Fetch the repository succeeded.
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package privdata
import (
"fmt"
"time"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric/core/ledger"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil"
privdatacommon "github.com/hyperledger/fabric/gossip/privdata/common"
"github.com/hyperledger/fabric/protos/common"
"github.com/hyperledger/fabric/protos/ledger/rwset"
"github.com/hyperledger/fabric/protos/ledger/rwset/kvrwset"
"github.com/hyperledger/fabric/protos/msp"
"github.com/hyperledger/fabric/protos/peer"
"github.com/spf13/viper"
)
type txValidationFlags []uint8
type blockFactory struct {
channelID string
transactions [][]byte
metadataSize int
lacksMetadata bool
invalidTxns map[int]struct{}
}
func (bf *blockFactory) AddTxn(txID string, nsName string, hash []byte, collections ...string) *blockFactory {
return bf.AddTxnWithEndorsement(txID, nsName, hash, "", true, collections...)
}
func (bf *blockFactory) AddReadOnlyTxn(txID string, nsName string, hash []byte, collections ...string) *blockFactory {
return bf.AddTxnWithEndorsement(txID, nsName, hash, "", false, collections...)
}
func (bf *blockFactory) AddTxnWithEndorsement(txID string, nsName string, hash []byte, org string, hasWrites bool, collections ...string) *blockFactory {
txn := &peer.Transaction{
Actions: []*peer.TransactionAction{
{},
},
}
nsRWSet := sampleNsRwSet(nsName, hash, collections...)
if !hasWrites {
nsRWSet = sampleReadOnlyNsRwSet(nsName, hash, collections...)
}
txrws := rwsetutil.TxRwSet{
NsRwSets: []*rwsetutil.NsRwSet{nsRWSet},
}
b, err := txrws.ToProtoBytes()
if err != nil {
panic(err)
}
ccAction := &peer.ChaincodeAction{
Results: b,
}
ccActionBytes, err := proto.Marshal(ccAction)
if err != nil {
panic(err)
}
pRespPayload := &peer.ProposalResponsePayload{
Extension: ccActionBytes,
}
respPayloadBytes, err := proto.Marshal(pRespPayload)
if err != nil {
panic(err)
}
ccPayload := &peer.ChaincodeActionPayload{
Action: &peer.ChaincodeEndorsedAction{
ProposalResponsePayload: respPayloadBytes,
},
}
if org != "" {
sID := &msp.SerializedIdentity{Mspid: org, IdBytes: []byte(fmt.Sprintf("p0%s", org))}
b, _ := proto.Marshal(sID)
ccPayload.Action.Endorsements = []*peer.Endorsement{
{
Endorser: b,
},
}
}
ccPayloadBytes, err := proto.Marshal(ccPayload)
if err != nil {
panic(err)
}
txn.Actions[0].Payload = ccPayloadBytes
txBytes, _ := proto.Marshal(txn)
cHdr := &common.ChannelHeader{
TxId: txID,
Type: int32(common.HeaderType_ENDORSER_TRANSACTION),
ChannelId: bf.channelID,
}
cHdrBytes, _ := proto.Marshal(cHdr)
commonPayload := &common.Payload{
Header: &common.Header{
ChannelHeader: cHdrBytes,
},
Data: txBytes,
}
payloadBytes, _ := proto.Marshal(commonPayload)
envp := &common.Envelope{
Payload: payloadBytes,
}
envelopeBytes, _ := proto.Marshal(envp)
bf.transactions = append(bf.transactions, envelopeBytes)
return bf
}
func (bf *blockFactory) create() *common.Block {
defer func() {
*bf = blockFactory{channelID: bf.channelID}
}()
block := &common.Block{
Header: &common.BlockHeader{
Number: 1,
},
Data: &common.BlockData{
Data: bf.transactions,
},
}
if bf.lacksMetadata {
return block
}
block.Metadata = &common.BlockMetadata{
Metadata: make([][]byte, common.BlockMetadataIndex_TRANSACTIONS_FILTER+1),
}
if bf.metadataSize > 0 {
block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = make([]uint8, bf.metadataSize)
} else {
block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = make([]uint8, len(block.Data.Data))
}
for txSeqInBlock := range bf.invalidTxns {
block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER][txSeqInBlock] = uint8(peer.TxValidationCode_INVALID_ENDORSER_TRANSACTION)
}
return block
}
func (bf *blockFactory) withoutMetadata() *blockFactory {
bf.lacksMetadata = true
return bf
}
func (bf *blockFactory) withMetadataSize(mdSize int) *blockFactory {
bf.metadataSize = mdSize
return bf
}
func (bf *blockFactory) withInvalidTxns(sequences ...int) *blockFactory {
bf.invalidTxns = make(map[int]struct{})
for _, seq := range sequences {
bf.invalidTxns[seq] = struct{}{}
}
return bf
}
func sampleNsRwSet(ns string, hash []byte, collections ...string) *rwsetutil.NsRwSet {
nsRwSet := &rwsetutil.NsRwSet{NameSpace: ns,
KvRwSet: sampleKvRwSet(),
}
for _, col := range collections {
nsRwSet.CollHashedRwSets = append(nsRwSet.CollHashedRwSets, sampleCollHashedRwSet(col, hash, true))
}
return nsRwSet
}
func sampleReadOnlyNsRwSet(ns string, hash []byte, collections ...string) *rwsetutil.NsRwSet {
nsRwSet := &rwsetutil.NsRwSet{NameSpace: ns,
KvRwSet: sampleKvRwSet(),
}
for _, col := range collections {
nsRwSet.CollHashedRwSets = append(nsRwSet.CollHashedRwSets, sampleCollHashedRwSet(col, hash, false))
}
return nsRwSet
}
func sampleKvRwSet() *kvrwset.KVRWSet {
rqi1 := &kvrwset.RangeQueryInfo{StartKey: "k0", EndKey: "k9", ItrExhausted: true}
rqi1.SetRawReads([]*kvrwset.KVRead{
{Key: "k1", Version: &kvrwset.Version{BlockNum: 1, TxNum: 1}},
{Key: "k2", Version: &kvrwset.Version{BlockNum: 1, TxNum: 2}},
})
rqi2 := &kvrwset.RangeQueryInfo{StartKey: "k00", EndKey: "k90", ItrExhausted: true}
rqi2.SetMerkelSummary(&kvrwset.QueryReadsMerkleSummary{MaxDegree: 5, MaxLevel: 4, MaxLevelHashes: [][]byte{[]byte("Hash-1"), []byte("Hash-2")}})
return &kvrwset.KVRWSet{
Reads: []*kvrwset.KVRead{{Key: "key1", Version: &kvrwset.Version{BlockNum: 1, TxNum: 1}}},
RangeQueriesInfo: []*kvrwset.RangeQueryInfo{rqi1},
Writes: []*kvrwset.KVWrite{{Key: "key2", IsDelete: false, Value: []byte("value2")}},
}
}
func sampleCollHashedRwSet(collectionName string, hash []byte, hasWrites bool) *rwsetutil.CollHashedRwSet {
collHashedRwSet := &rwsetutil.CollHashedRwSet{
CollectionName: collectionName,
HashedRwSet: &kvrwset.HashedRWSet{
HashedReads: []*kvrwset.KVReadHash{
{KeyHash: []byte("Key-1-hash"), Version: &kvrwset.Version{BlockNum: 1, TxNum: 2}},
{KeyHash: []byte("Key-2-hash"), Version: &kvrwset.Version{BlockNum: 2, TxNum: 3}},
},
},
PvtRwSetHash: hash,
}
if hasWrites {
collHashedRwSet.HashedRwSet.HashedWrites = []*kvrwset.KVWriteHash{
{KeyHash: []byte("Key-3-hash"), ValueHash: []byte("value-3-hash"), IsDelete: false},
{KeyHash: []byte("Key-4-hash"), ValueHash: []byte("value-4-hash"), IsDelete: true},
}
}
return collHashedRwSet
}
func extractCollectionConfig(configPackage *common.CollectionConfigPackage, collectionName string) *common.CollectionConfig {
for _, config := range configPackage.Config {
switch cconf := config.Payload.(type) {
case *common.CollectionConfig_StaticCollectionConfig:
if cconf.StaticCollectionConfig.Name == collectionName {
return config
}
default:
return nil
}
}
return nil
}
type pvtDataFactory struct {
data []*ledger.TxPvtData
}
func (df *pvtDataFactory) addRWSet() *pvtDataFactory {
seqInBlock := uint64(len(df.data))
df.data = append(df.data, &ledger.TxPvtData{
SeqInBlock: seqInBlock,
WriteSet: &rwset.TxPvtReadWriteSet{},
})
return df
}
func (df *pvtDataFactory) addNSRWSet(namespace string, collections ...string) *pvtDataFactory {
nsrws := &rwset.NsPvtReadWriteSet{
Namespace: namespace,
}
for _, col := range collections {
nsrws.CollectionPvtRwset = append(nsrws.CollectionPvtRwset, &rwset.CollectionPvtReadWriteSet{
CollectionName: col,
Rwset: []byte("rws-pre-image"),
})
}
df.data[len(df.data)-1].WriteSet.NsPvtRwset = append(df.data[len(df.data)-1].WriteSet.NsPvtRwset, nsrws)
return df
}
func (df *pvtDataFactory) create() []*ledger.TxPvtData {
defer func() {
df.data = nil
}()
return df.data
}
type digestsAndSourceFactory struct {
d2s dig2sources
lastDig *privdatacommon.DigKey
}
func (f *digestsAndSourceFactory) mapDigest(dig *privdatacommon.DigKey) *digestsAndSourceFactory {
f.lastDig = dig
return f
}
func (f *digestsAndSourceFactory) toSources(peers ...string) *digestsAndSourceFactory {
if f.d2s == nil {
f.d2s = make(dig2sources)
}
var endorsements []*peer.Endorsement
for _, p := range peers {
endorsements = append(endorsements, &peer.Endorsement{
Endorser: []byte(p),
})
}
f.d2s[*f.lastDig] = endorsements
return f
}
func (f *digestsAndSourceFactory) create() dig2sources {
return f.d2s
}
const btlPullMarginDefault = 10
func GetBtlPullMargin() uint64 {
var result uint64
if viper.IsSet("peer.gossip.pvtData.btlPullMargin") {
btlMarginVal := viper.GetInt("peer.gossip.pvtData.btlPullMargin")
if btlMarginVal < 0 {
result = btlPullMarginDefault
} else {
result = uint64(btlMarginVal)
}
} else {
result = btlPullMarginDefault
}
return result
}
const (
rreconcileSleepIntervalConfigKey = "peer.gossip.pvtData.reconcileSleepInterval"
reconcileSleepIntervalDefault = time.Minute * 1
reconcileBatchSizeConfigKey = "peer.gossip.pvtData.reconcileBatchSize"
reconcileBatchSizeDefault = 10
reconciliationEnabledConfigKey = "peer.gossip.pvtData.reconciliationEnabled"
)
// this func reads reconciler configuration values from core.yaml and returns ReconcilerConfig
func GetReconcilerConfig() *ReconcilerConfig {
reconcileSleepInterval := viper.GetDuration(rreconcileSleepIntervalConfigKey)
if reconcileSleepInterval == 0 {
logger.Warning("Configuration key", rreconcileSleepIntervalConfigKey, "isn't set, defaulting to", reconcileSleepIntervalDefault)
reconcileSleepInterval = reconcileSleepIntervalDefault
}
reconcileBatchSize := viper.GetInt(reconcileBatchSizeConfigKey)
if reconcileBatchSize == 0 {
logger.Warning("Configuration key", reconcileBatchSizeConfigKey, "isn't set, defaulting to", reconcileBatchSizeDefault)
reconcileBatchSize = reconcileBatchSizeDefault
}
isEnabled := viper.GetBool(reconciliationEnabledConfigKey)
return &ReconcilerConfig{SleepInterval: reconcileSleepInterval, BatchSize: reconcileBatchSize, IsEnabled: isEnabled}
}
const (
transientBlockRetentionConfigKey = "peer.gossip.pvtData.transientstoreMaxBlockRetention"
TransientBlockRetentionDefault = 1000
)
func GetTransientBlockRetention() uint64 {
transientBlockRetention := uint64(viper.GetInt(transientBlockRetentionConfigKey))
if transientBlockRetention == 0 {
logger.Warning("Configuration key", transientBlockRetentionConfigKey, "isn't set, defaulting to", TransientBlockRetentionDefault)
transientBlockRetention = TransientBlockRetentionDefault
}
return transientBlockRetention
}
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。