参考了该篇博文https://zhuanlan.zhihu.com/p/25358777,自己又做了一些梳理,由衷感谢作者给我的启发

通信接口

protos/orderer/ab.proto

service AtomicBroadcast {
    // Broadcast主要接收Peer的数据并在Orderer里面生成一系列数据块
    //Broadcast过程分析:Peer(客户端)通过GRPC发起通信,与Orderer连接成功之后,便可以向Orderer发送消息。Orderer通过Recv接口接收Peer发送过来的消息,并将消息推送到Kafka。同时与Kafka相连接的Orderer通过Consumer实例消费Kafka上的消息,将消费的消息进行同一排序(Order),排序完成后,当达到生成数据块(Block)的条件(条件有两个1:下一数据块定时器到期,定时器通过向Orderer向Kafka发送定时器消息,再通过Kafka消费来达到定时效果。2:每消费一条真实数据,就触发判断是否达到生成一个新的数据块条件,该条件由当前待生成数据块的数据总的大小以及记录数决定),并创建新的数据块(CreateNextBlock),创建成功则将数据块写入ledger(WriteBlock)
    rpc Broadcast(stream common.Envelope) returns (stream BroadcastResponse) {}

    // peer从orderer获取数据的接口
    // Deliver过程分析:Peer通过GRPC与orderer建立通信,连接成功以后,通过deliver接口发起获取数据请求。Orderer通过recv接口接收到数据获取请求,分析请求参数(SeekInfo_Start:1、SeekPosition_Oldest:从第一条数据块开始获取。2、SeekPosition_Newest:从最新一个数据块开始获取 3、SeekPosition_Specified:从指定数据块数获取)。Orderer从ledger中获取数据块迭代器入口,循环迭代器获取所有的数据块,每获取一个数据块同时就获取到的数据块返回给peer,知道所有数据块获取完,最后向peer返回获取成功状态。

    rpc Deliver(stream common.Envelope) returns (stream DeliverResponse) {}
}

数据结构

protos/common/common.proto

message BlockHeader {
    uint64 number = 1; // blockchain的当前所处位置,从0开始标号
    bytes previous_hash = 2;  //前一个block chain header的hash 值
    bytes data_hash = 3;  // BlockData的hash 值, 通过MerkleTree计算
}
message SeekInfo {
    enum SeekBehavior {
        BLOCK_UNTIL_READY = 0;  //遇到缺失的Block时,等待缺失的Block生成
        FAIL_IF_NOT_READY = 1;   //遇到缺失的Block时,直接返回错误 
    }
    SeekPosition start = 1;    //消息开始获取的位置
    SeekPosition stop = 2;     //消息停止获取的位置
    SeekBehavior behavior = 3;  //遇到缺失block的表现(behavior)
}

orderer进程启动

genesisBlock 是随着Orderer的启动而自动创建的初始块,主要在Orderer运行过程中提供基础配置,包括网络、channel等信息。

默认orderer启动时不带任何参数,默认参数启动,加载orderer.yaml配置文件

main函数在order/main.go

    //加载配置orderer.yaml
    conf := config.Load()
    //如果开启,启动分析服务。除非发生错误,否则ListenAndServe()调用不会返回。
    //安全服务相关配置开启的话,执行一定请求处理

	// 创建GRPC服务
	grpcServer, err := comm.NewGRPCServerFromListener(lis, secureConfig)
//加载本地MSP,Fabric 1.0使用MSP(成员管理服务)来描述主体principal,该MSP用于验证签名者的身份以及签名者在该MSP内所具有的权限。目前,支持两种角色:成员和管理员。 主体Principals的通用表现形式是MSP.ROLE,其中MSP是指MSP 的ID,ROLE是member 或admin。 一个有效主体的示例是“Org0.admin”(Org0 MSP的任意管理员)或“Org1.member”(Org1 MSP的任意成员)
err = mspmgmt.LoadLocalMsp(conf.General.LocalMSPDir, conf.General.BCCSP, conf.General.LocalMSPID)
// 创建ledger工厂,目前默认是ram
lf, _ := createLedgerFactory(conf)
if len(lf.ChainIDs()) == 0 {
        switch conf.General.GenesisMethod {
		case "provisional": //通过配置动态加载配置方式,创建创世块,缺省方式
			genesisBlock = provisional.New(genesisconfig.Load(conf.General.GenesisProfile)).GenesisBlock()
		case "file":  //通过加载文件形式创建创世块(从固定文件./genesisblock创建)
			genesisBlock = file.New(conf.General.GenesisFile).GenesisBlock() 
          
         //然后找到创世块的chainID,创建sc
         chainID, err := utils.GetChainIDFromBlock(genesisBlock)
         gl, err := lf.GetOrCreate(chainID)
         //把创世块添加到sc
         err = gl.Append(genesisBlock)
}
//共识策略
consenters := make(map[string]multichain.Consenter)
consenters["solo"] = solo.New()
consenters["kafka"] = kafka.New(conf.Kafka.Version, conf.Kafka.Retry, conf.Kafka.TLS)
consenters["sbft"] = sbft.New(makeSbftConsensusConfig(conf), makeSbftStackConfig(conf))
//NewSigner返回一个基于msp的LocalSigner的新实例。它假定本地msp已经被初始化了。
signer := localmsp.NewSigner()
//NewManagerImpl生成一个Manager的实例
manager := multichain.NewManagerImpl(lf, consenters, signer)
	//在之前已经创建了GRPC服务
	grpcServer, err := comm.NewGRPCServerFromListener(lis, secureConfig)
	
	//在函数的最后,注册广播组,可以接收peer的广播消息
	ab.RegisterAtomicBroadcastServer(grpcServer.Server(), server)

    //在本文最后详细讲述grpc服务的启动过程
	grpcServer.Start()

provision方式下的区块创建过程

func New(conf *genesisconfig.Profile) Generator {
	bs := &bootstrapper{
		channelGroups: []*cb.ConfigGroup{
			// Chain Config Types
			config.DefaultHashingAlgorithm(),
			config.DefaultBlockDataHashingStructure(),
			config.TemplateOrdererAddresses(conf.Orderer.Addresses), // TODO, move to conf.Channel when it exists

			// Default policies
			policies.TemplateImplicitMetaAnyPolicy([]string{}, configvaluesmsp.ReadersPolicyKey),
			policies.TemplateImplicitMetaAnyPolicy([]string{}, configvaluesmsp.WritersPolicyKey),
			policies.TemplateImplicitMetaMajorityPolicy([]string{}, configvaluesmsp.AdminsPolicyKey),

			// Temporary AcceptAllPolicy XXX, remove
			cauthdsl.TemplatePolicy(AcceptAllPolicyKey, cauthdsl.AcceptAllPolicy),
		},
	}

	if conf.Orderer != nil {
		bs.ordererGroups = []*cb.ConfigGroup{
			// Orderer Config Types
			config.TemplateConsensusType(conf.Orderer.OrdererType),
			config.TemplateBatchSize(&ab.BatchSize{
			    //批量发送过程中,一个包最大消息数
				MaxMessageCount:   conf.Orderer.BatchSize.MaxMessageCount,
				//批量发送过程中,一个包大小的上限
				AbsoluteMaxBytes:  conf.Orderer.BatchSize.AbsoluteMaxBytes,
				//批量发送过程中,一个包大小的优先上限设置
				PreferredMaxBytes: conf.Orderer.BatchSize.PreferredMaxBytes,
			}),
			config.TemplateBatchTimeout(conf.Orderer.BatchTimeout.String()),
			config.TemplateChannelRestrictions(conf.Orderer.MaxChannels),

			// Initialize the default Reader/Writer/Admins orderer policies, as well as block validation policy
			policies.TemplateImplicitMetaPolicyWithSubPolicy([]string{config.OrdererGroupKey}, BlockValidationPolicyKey, configvaluesmsp.WritersPolicyKey, cb.ImplicitMetaPolicy_ANY),
			policies.TemplateImplicitMetaAnyPolicy([]string{config.OrdererGroupKey}, configvaluesmsp.ReadersPolicyKey),
			policies.TemplateImplicitMetaAnyPolicy([]string{config.OrdererGroupKey}, configvaluesmsp.WritersPolicyKey),
			policies.TemplateImplicitMetaMajorityPolicy([]string{config.OrdererGroupKey}, configvaluesmsp.AdminsPolicyKey),
		}

		for _, org := range conf.Orderer.Organizations {
			mspConfig, err := msp.GetVerifyingMspConfig(resolveMSPDir(org.MSPDir), org.BCCSP, org.ID)
			if err != nil {
				logger.Panicf("Error loading MSP configuration for org %s: %s", org.Name, err)
			}
			bs.ordererGroups = append(bs.ordererGroups, configvaluesmsp.TemplateGroupMSP([]string{config.OrdererGroupKey, org.Name}, mspConfig))
		}

		switch conf.Orderer.OrdererType {
		case ConsensusTypeSolo, ConsensusTypeSbft:
		case ConsensusTypeKafka:
			bs.ordererGroups = append(bs.ordererGroups, config.TemplateKafkaBrokers(conf.Orderer.Kafka.Brokers))
		default:
			panic(fmt.Errorf("Wrong consenter type value given: %s", conf.Orderer.OrdererType))
		}

		bs.ordererSystemChannelGroups = []*cb.ConfigGroup{
			// Policies
			config.TemplateChainCreationPolicyNames(DefaultChainCreationPolicyNames),
		}
	}

	if conf.Application != nil {

		bs.applicationGroups = []*cb.ConfigGroup{
			// Initialize the default Reader/Writer/Admins application policies
			policies.TemplateImplicitMetaAnyPolicy([]string{config.ApplicationGroupKey}, configvaluesmsp.ReadersPolicyKey),
			policies.TemplateImplicitMetaAnyPolicy([]string{config.ApplicationGroupKey}, configvaluesmsp.WritersPolicyKey),
			policies.TemplateImplicitMetaMajorityPolicy([]string{config.ApplicationGroupKey}, configvaluesmsp.AdminsPolicyKey),
		}
		for _, org := range conf.Application.Organizations {
			mspConfig, err := msp.GetVerifyingMspConfig(resolveMSPDir(org.MSPDir), org.BCCSP, org.ID)
			if err != nil {
				logger.Panicf("Error loading MSP configuration for org %s: %s", org.Name, err)
			}

			bs.applicationGroups = append(bs.applicationGroups, configvaluesmsp.TemplateGroupMSP([]string{config.ApplicationGroupKey, org.Name}, mspConfig))
			var anchorProtos []*pb.AnchorPeer
			for _, anchorPeer := range org.AnchorPeers {
				anchorProtos = append(anchorProtos, &pb.AnchorPeer{
					Host: anchorPeer.Host,
					Port: int32(anchorPeer.Port),
				})
			}

			bs.applicationGroups = append(bs.applicationGroups, config.TemplateAnchorPeers(org.Name, anchorProtos))
		}

	}

	return bs
}

创世块创建主入口函数

func (bs *bootstrapper) GenesisBlock() *cb.Block {
	block, err := genesis.NewFactoryImpl(
		configtx.NewModPolicySettingTemplate(
			configvaluesmsp.AdminsPolicyKey,
			configtx.NewCompositeTemplate(
				configtx.NewSimpleTemplate(bs.ordererSystemChannelGroups...),
				bs.ChannelTemplate(),
			),
		),
	).Block(TestChainID)

	if err != nil {
		panic(err)
	}
	return block
}

multichain

封装了Chain操作相关接口。Chain目前有四种实现方式:SystemChain以及其它Chain(Kafaka、Solo、Sbft)。其中SystemChain为必须,其他三个根据配置启用。

1、manager:主要负责chain的创建与访问控制。

2、chainsupport:主要负责chain的操作接口,包括新增签名,数据块写入等。

3、systemchain: 封装了systemchain的相关操作,systemchain主要用于管理配置以及安全相关控制。

NewManagerImpl生成一个Manager的实例,调用了接口定义在orderer/multichain/manager.go

//遍历所有的chainID
existingChains := ledgerFactory.ChainIDs()
            //创建ledger中的所有chain
			chain := newChainSupport(createSystemChainFilters(ml, ledgerResources),
				ledgerResources,
				consenters,
				signer)

详细看下函数newChainSupport的处理

cutter := blockcutter.NewReceiverImpl(ledgerResources.SharedConfig(), filters)

lastBlock := ledger.GetBlock(cs.Reader(), cs.Reader().Height()-1)
metadata, err := utils.GetMetadataFromBlock(lastBlock, cb.BlockMetadataIndex_ORDERER)
//处理chain的创建
cs.chain, err = consenter.HandleChain(cs, metadata)
func (sbft *consenter) HandleChain(support multichain.ConsenterSupport, metadata *cb.Metadata) (multichain.Chain, error) {
       return newChain(sbft, support), nil
}

最终调用创建chain的接口

func newChain(sbft *consenter, support multichain.ConsenterSupport) *chain {
       logger.Infof("Starting a chain: %d", support.ChainID())

       if sbft.sbftPeers == nil {
              sbft.consensusStack = createConsensusStack(sbft) //接收客户端消息处理共识的主函数
              sbft.sbftPeers = make(map[string]*simplebft.SBFT) 
       }
       sbft.sbftPeers[support.ChainID()] = initSbftPeer(support.ChainID(), sbft, support)

       return &chain{
              chainID:        support.ChainID(),
              consensusStack: sbft.consensusStack,
       }
}

createConsensusStack的处理流程

persist := persist.New(sbft.sbftStackConfig.DataDir)
backend, err := backend.NewBackend(sbft.config.Peers, conn, persist)

ledger

orderer对外服务过程中,允许客户端(例如peer)批量获取消息, 所以orderer需要维护历史消息拷贝,消息数量可以是有限的也可以是无限的,通过配置进行修改。这些历史消息维护在Orderer的Ledger模块中。目前Orderer存储支持多种方式:(Orderer的ledger非Fabric整个系统的Ledger模块,此Ledger特指Orderer自身的数据存储引擎)

1、File:实现了以json格式为存储方式的的文件系统账本。该账本实现方式支持容灾,但性能不高,

2、Ram : Ram实现了一个简单的基于内存存储的账本,可以自由调整数据存储相关参数,例如历史存储大小等。该账本不支持容灾,每次系统挂掉,内存数据将会消失,重启后,将会重新建立新的账本。

以ram方式举例

数据块存储结构

type ramLedger struct {
	maxSize int   //可存储block数量的最大值
	size    int   //当前ledger已存储的block数量
	oldest  *simpleList //指向ledger上次已经获取的block位置的指针
	newest  *simpleList  //指向ledger最新block位置的指针
}

主要的函数接口在orderer/ledger/ram/impl.go,包括

1)新加一个块到ledger

func (rl *ramLedger) Append(block *cb.Block) error {
       // 检查新增block的编号是否在ledger上连续
       if block.Header.Number != rl.newest.block.Header.Number+1 {
              return fmt.Errorf("Block number should have been %d but was %d",
                     rl.newest.block.Header.Number+1, block.Header.Number)
       }
       
       //跳过genesis block(genesis block的number为0)
       if rl.newest.block.Header.Number+1 != 0 { // Skip this check for genesis block insertion
              //检查数据HASH是否相等
              if !bytes.Equal(block.Header.PreviousHash, rl.newest.block.Header.Hash()) {
                     return fmt.Errorf("Block should have had previous hash of %x but was %x",
                            rl.newest.block.Header.Hash(), block.Header.PreviousHash)
              }
       }

       // 将新增的block加入到ledger
       rl.appendBlock(block)
       return nil
}

2)创建一个空块加入到ledger

func (rl *ramLedger) appendBlock(block *cb.Block) {
    //新建一个空数据块
	rl.newest.next = &simpleList{
		signal: make(chan struct{}),
		block:  block,
	}

	lastSignal := rl.newest.signal
	logger.Debugf("Sending signal that block %d has a successor", rl.newest.block.Header.Number)
	rl.newest = rl.newest.next
	close(lastSignal)

	rl.size++  //当前ledger数据块数量加1

	if rl.size > rl.maxSize {
		logger.Debugf("RAM ledger max size about to be exceeded, removing oldest item: %d",
			rl.oldest.block.Header.Number)
		rl.oldest = rl.oldest.next
		rl.size--
	}
}

block

数据块生成是ORDER的核心功能,它从客户端接收过来的消息进行统一的排序, 并按照一定的组织方式进行存储,orderer将接收到的消息按消息记录数或者消息的大小组成一个固定的数据块,这些数据块存储在RAM或者FILE中, 提高了fabric系统对消息转发以及使用效率。

数据块生成主要分成两个接口,在orderer/common/blockcutter/blockcutter.go

1)消息的排序

func (r *receiver) Ordered(msg *cb.Envelope) ([][]*cb.Envelope, [][]filter.Committer, bool) {
	// The messages must be filtered a second time in case configuration has changed since the message was received
	//判断消息是否可接受
	committer, err := r.filters.Apply(msg)
	if err != nil {
		logger.Debugf("Rejecting message: %s", err)
		return nil, nil, false
	}

	messageSizeBytes := messageSizeBytes(msg)

	if committer.Isolated() || messageSizeBytes > r.sharedConfigManager.BatchSize().PreferredMaxBytes {

		if committer.Isolated() {
			logger.Debugf("Found message which requested to be isolated, cutting into its own batch")
		} else {
			logger.Debugf("The current message, with %v bytes, is larger than the preferred batch size of %v bytes and will be isolated.", messageSizeBytes, r.sharedConfigManager.BatchSize().PreferredMaxBytes)
		}

		messageBatches := [][]*cb.Envelope{}
		committerBatches := [][]filter.Committer{}

		// cut pending batch, if it has any messages
		// 当前存在等待切分的消息
		if len(r.pendingBatch) > 0 {
			messageBatch, committerBatch := r.Cut() //消息切分,返回刚创建的块
			messageBatches = append(messageBatches, messageBatch)//刚创建的块
			committerBatches = append(committerBatches, committerBatch)//刚创建待提交的块
		}

		// create new batch with single message
		// 将新的消息独立作为一个新的块,与刚创建的块组成数组返回
		messageBatches = append(messageBatches, []*cb.Envelope{msg})
		committerBatches = append(committerBatches, []filter.Committer{committer})

		return messageBatches, committerBatches, true
	}

	messageBatches := [][]*cb.Envelope{}
	committerBatches := [][]filter.Committer{}

    //计算当前消息的加入所组成的块大小是否会超出PreferredMaxBytes
	messageWillOverflowBatchSizeBytes := r.pendingBatchSizeBytes+messageSizeBytes > r.sharedConfigManager.BatchSize().PreferredMaxBytes

	if messageWillOverflowBatchSizeBytes {
        //达到了切分块的条件,将新的消息与等待切块的消息一起组成一个块
		messageBatch, committerBatch := r.Cut()
		messageBatches = append(messageBatches, messageBatch)
		committerBatches = append(committerBatches, committerBatch)
	}

	logger.Debugf("Enqueuing message into batch")
	// 将消息加入待切块区
	r.pendingBatch = append(r.pendingBatch, msg)
	r.pendingBatchSizeBytes += messageSizeBytes
	r.pendingCommitters = append(r.pendingCommitters, committer)

    // 如果消息的个数超出了一个块所允许的最大限制,也切分块
	if uint32(len(r.pendingBatch)) >= r.sharedConfigManager.BatchSize().MaxMessageCount {
		logger.Debugf("Batch size met, cutting batch")
		messageBatch, committerBatch := r.Cut()
		messageBatches = append(messageBatches, messageBatch)
		committerBatches = append(committerBatches, committerBatch)
	}

	// return nils instead of empty slices
	if len(messageBatches) == 0 {
		return nil, nil, true
	}

	return messageBatches, committerBatches, true

}

2)放回待切块的数据,并将原结构数据清空,以接收下一个块的数据

func (r *receiver) Cut() ([]*cb.Envelope, []filter.Committer) {
	batch := r.pendingBatch
	r.pendingBatch = nil
	committers := r.pendingCommitters
	r.pendingCommitters = nil
	r.pendingBatchSizeBytes = 0
	return batch, committers
}

kafka

Kafka消息收发处理主流程:通过主循环loop接口,接收两类消息事件来进行对消息排序并写入区块:

1、下一数据块生成定时器事件:系统会设置一个定时的数据块生成定时器,每当定时器到期,都会检查是否有新的数据块需要生成, 如果需要就生成新的数据块。

2、 新消息事件:每到一条新的消息,系统都会重新判断是否需要有新的数据块需要生成,如果有,就将消息进行排序并生成一块新的数据块。

在上文提到grpc服务的启动,接口函数的实现在orderer/karaf/orderer.go

func (ch *chainImpl) Start() {
    //在寻找尚未创建的分区时,发布CONNECT消息,以防止出现发生的penic
    ch.producer.Send(ch.partition, utils.MarshalOrPanic(newConnectMessage()))
  
    // 为分区创建监听者和消费者
  	consumer, err := ch.consenter.consFunc()(ch.support.SharedConfig().KafkaBrokers(), ch.consenter.kafkaVersion(), ch.consenter.tlsConfig(), ch.partition, ch.lastOffsetPersisted+1)
  
    // 循环监听链上更新
    go ch.loop()
}

loop函数就是主操作流程

func (ch *chainImpl) loop() {
        //1创建消息队列,参数从配置文件中读取
       msg := new(ab.KafkaMessage)
func New(kv sarama.KafkaVersion, ro config.Retry, tls config.TLS) multichain.Consenter {
       return newConsenter(kv, ro, tls, bfValue, pfValue, cfValue)
}
func newConsenter(kv sarama.KafkaVersion, ro config.Retry, tls config.TLS, bf bfType, pf pfType, cf cfType) multichain.Consenter {
       return &consenterImpl{kv, ro, tls, bf, pf, cf}
}

然后就是主循环

	for {
		select {
		case in := <-ch.consumer.Recv():
          ...
        case <-timer: //推送KafkaMessage_TimeToCut,即区块切分定时任务到期消息到kafka
          ...
        case <-ch.exitChan: // Halt()函数被调用,退出循环

从ch.consumer.Recv()接受到的消息包含以下几条:

1、kafka连接信息,直接跳过

			switch msg.Type.(type) {
			case *ab.KafkaMessage_Connect: 
				logger.Debug("It's a connect message - ignoring")
				continue		

2、区块切分定时任务到期

     		case *ab.KafkaMessage_TimeToCut: 
				ttcNumber = msg.GetTimeToCut().BlockNumber
				logger.Debug("It's a time-to-cut message for block", ttcNumber)
				if ttcNumber == ch.lastCutBlock+1 {
				    //获取可生成下一个区块的数据
					timer = nil
					batch, committers := ch.support.BlockCutter().Cut()
					if len(batch) == 0 { //没有数据
						return
					}
					block := ch.support.CreateNextBlock(batch) //创建一个新的数据块
					encodedLastOffsetPersisted = utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: in.Offset})
					//将新的区块数据写入到ledger中(RAM 或者FILE)
					ch.support.WriteBlock(block, committers, encodedLastOffsetPersisted)
					ch.lastCutBlock++
					logger.Debug("Proper time-to-cut received, just cut block", ch.lastCutBlock)
					continue

3、收到一条新的KafkaMessage_Regular消息

			case *ab.KafkaMessage_Regular:
				env := new(cb.Envelope)
				if err := proto.Unmarshal(msg.GetRegular().Payload, env); err != nil { //解析数据
					continue
				}
				// 获取可生成下一个区块的数据
				batches, committers, ok := ch.support.BlockCutter().Ordered(env)
				if ok && len(batches) == 0 && timer == nil {
				    //重新设置下一个区块生成定时器
					timer = time.After(ch.batchTimeout)
					continue
				}
				// If !ok, batches == nil, so this will be skipped
				for i, batch := range batches {
					block := ch.support.CreateNextBlock(batch)  //创建一个新的数据块
					encodedLastOffsetPersisted = utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: in.Offset})
					//将新的区块数据写入到ledger中(RAM 或者FILE)
					ch.support.WriteBlock(block, committers[i], encodedLastOffsetPersisted)
					ch.lastCutBlock++
					logger.Debug("Batch filled, just cut block", ch.lastCutBlock)
				}
				if len(batches) > 0 {
					timer = nil
				}
			}

WriteBlock函数将区块数据写入到ledger中

func (cs *chainSupport) WriteBlock(block *cb.Block, committers []filter.Committer, encodedMetadataValue []byte) *cb.Block {
       for _, committer := range committers {
              //新加一个块到ledger
              committer.Commit()
       }
       // 设置orderer相关的元数据
       if encodedMetadataValue != nil {
              block.Metadata.Metadata[cb.BlockMetadataIndex_ORDERER] = utils.MarshalOrPanic(&cb.Metadata{Value: encodedMetadataValue})
       }
       cs.addBlockSignature(block)
       cs.addLastConfigSignature(block)

       //新加一个块到ledger
       err := cs.ledger.Append(block)
       if err != nil {
              logger.Panicf("[channel: %s] Could not append block: %s", cs.ChainID(), err)
       }
       return block
}

Commit的处理

func (cc *configCommitter) Commit() {
       // 新加一个块到ledger
       err := cc.manager.Apply(cc.configEnvelope)
}

orderer.yaml配置文件

在目录/etc/hyperledger/fabric/orderer/orderer.yaml下

################################################################################
#
#   Orderer Configuration
#
#   - This controls the type and configuration for the orderer which is started
#   - This controls the type and configuration for the ordererledger if needed
#
################################################################################
General:

    # Ledger Type: The ledger type to provide to the orderer (if needed)
    # Available types are "ram", "file".
    LedgerType: ram

    # Queue Size: The maximum number of messages to allow pending from a gRPC
    # client.
    QueueSize: 10

    # Max Window Size: The maximum number of messages to for the orderer Deliver
    # to allow before acknowledgement must be received from the client.
    MaxWindowSize: 1000

    # Listen address: The IP on which to bind to listen
    ListenAddress: 127.0.0.1

    # Listen port: The port on which to bind to listen
    ListenPort: 7050

    # TLS: TLS settings for the GRPC server
    TLS:
        Enabled: false
        ServerKey:
        ServerCertificate:
        ServerRootCAs:
        ClientAuthEnabled: false
        ClientRootCAs:


    # Log Level: The level at which to log.  This accepts logging specifications
    # per fabric/docs/Setup/logging-control.md
    LogLevel: info

    # Genesis method: The method by which to retrieve/generate the genesis
    # block. Available values are "provisional", "file". Provisional utilizes
    # the parameters in the Genesis section to dynamically generate a new
    # genesis block. File uses the file provided by GenesisFile as the genesis
    # block.
    GenesisMethod: provisional

    # Genesis file: The file containing the genesis block. Used by the orderer
    # when GenesisMethod is set to "file".
    GenesisFile: ./genesisblock

    # Enable an HTTP service for Go "pprof" profiling as documented at:
    # https://golang.org/pkg/net/http/pprof
    Profile:
        Enabled: false
        Address: 0.0.0.0:6060

################################################################################
#
#   SECTION: RAM Ledger
#
#   - This section applies to the configuration of the RAM ledger
#
################################################################################
RAMLedger:

    # History Size: The number of blocks that the RAM ledger is set to retain
    HistorySize: 1000


################################################################################
#
#   SECTION: File Ledger
#
#   - This section applies to the configuration of the file ledger
#
################################################################################
FileLedger:

    # Location: The directory to store the blocks in
    # NOTE: If this is unset, a temporary location will be chosen using
    # the prefix specified by Prefix
    Location:

    # The prefix to use when generating a ledger directory in temporary space
    # Otherwise, this value is ignored
    Prefix: hyperledger-fabric-ordererledger

################################################################################
#
#   SECTION: Kafka
#
#   - This section applies to the configuration of the Kafka-backed orderer
#
################################################################################
Kafka:

    # Retry: What to do if none of the Kafka brokers are available
    Retry:
        # The producer should attempt to reconnect every <Period>
        Period: 3s
        # Panic if <Stop> has elapsed and no connection has been established
        Stop: 60s

    # Verbose: Turn on logging for sarama, the client library that we use to
    # interact with the Kafka cluster
    Verbose: false

    # Brokers: A list of Kafka brokers to which the orderer connects
    # NOTE: Use IP:port notation
    Brokers:
        - 127.0.0.1:9092

################################################################################
#
#   SECTION: Sbft
#
#   - This section applies to the configuration of the Sbft-backed orderer
#
################################################################################
Sbft:
    # Address to use for SBFT internal communication
    PeerCommAddr: ":6101"
    CertFile: "sbft/testdata/cert1.pem"
    KeyFile: "sbft/testdata/key.pem"
    # Directory for SBFT data (persistence)
    DataDir: "/tmp"
    # Number of peers
    "N": 1
    # Fault tolerance
    F: 0
    BatchDurationNsec: 1000
    BatchSizeBytes: 1000000000
    RequestTimeoutNsec: 1000000000
    # Peers (PeerCommAddr) with the path of their cert
    Peers:
        ":6101": "sbft/testdata/cert1.pem"

################################################################################
#
#   SECTION: Genesis
#
#   - This section applies to the configuration for the provisional bootstrapper
#
################################################################################
Genesis:

    # Orderer Type: The orderer implementation to start
    # Available types are "solo" and "kafka"
    OrdererType: solo

    # Batch Timeout: The amount of time to wait before creating a batch
    BatchTimeout: 10s

    # Batch Size: Controls the number of messages batched into a block
    BatchSize:

        # Max Message Count: The maximum number of messages to permit in a batch
        MaxMessageCount: 10

        # Absolute Max Bytes: The absolute maximum number of bytes allowed for
        # the serialized messages in a batch.
        AbsoluteMaxBytes: 99 MB

        # Preferred Max Bytes: The preferred maximum number of bytes allowed for
        # the serialized messages in a batch. A message larger than the preferred
        # max bytes will result in a batch larger than preferred max bytes.
        PreferredMaxBytes: 512 KB
posted on 2017-04-15 14:08  云中大卫  阅读(514)  评论(0)    收藏  举报