【HyperLedger Fabric 源码解读】orderer

排序服务启动流程

排序服务节点启动通过cmd/orderer/main.go的main()方法实现,该方法会进一步调用orderer/common/server/main.go#Main()方法。

# fabric-release-2.2\cmd\orderer\main.go
package main

import "github.com/hyperledger/fabric/orderer/common/server"

func main() {
	server.Main()
}
  1. 读取本地配置
    server.Main()主函数先调用config.Load()函数,加载指定目录下的orderer.yaml配置文件,解析获取Orderer配置信息,并将配置中的相对文件路径补全为绝对路径,填充未指定配置项为默认值,保存在Orderer配置对象conf(TopLevel类型)中。
# orderer/common/server/main.go文件
 func Main() {
     //解析用户命令行
	fullCmd := kingpin.MustParse(app.Parse(os.Args[1:]))

	// "version" command 
	if fullCmd == version.FullCommand() {
		fmt.Println(metadata.GetVersionInfo())
		return
	}

	//1.加载本地orderer.yaml配置文件
	conf, err := localconfig.Load() 
	if err != nil {
		logger.Error("failed to parse config: ", err)
		os.Exit(1)
	}
	···
# orderer/common/localconfig/config.go文件
// Load parses the orderer YAML file and environment, producing
// a struct suitable for config use, returning error on failure.
func Load() (*TopLevel, error) {
	return cache.load()
}

// configCache stores marshalled bytes of config structures that produced from
// EnhancedExactUnmarshal. Cache key is the path of the configuration file that was used.
type configCache struct {
	mutex sync.Mutex
	cache map[string][]byte
}

var cache = &configCache{}

// Load will load the configuration and cache it on the first call; subsequent
// calls will return a clone of the configuration that was previously loaded.
func (c *configCache) load() (*TopLevel, error) {
	var uconf TopLevel

	config := viper.New()
	coreconfig.InitViper(config, "orderer")
	config.SetEnvPrefix(Prefix)
	config.AutomaticEnv()
	replacer := strings.NewReplacer(".", "_")
	config.SetEnvKeyReplacer(replacer)

	if err := config.ReadInConfig(); err != nil {
		return nil, fmt.Errorf("Error reading configuration: %s", err)
	}

	c.mutex.Lock()
	defer c.mutex.Unlock()
	serializedConf, ok := c.cache[config.ConfigFileUsed()]
	if !ok {
		err := viperutil.EnhancedExactUnmarshal(config, &uconf)
		if err != nil {
			return nil, fmt.Errorf("Error unmarshaling config into struct: %s", err)
		}

		serializedConf, err = json.Marshal(uconf)
		if err != nil {
			return nil, err
		}

		if c.cache == nil {
			c.cache = map[string][]byte{}
		}
		c.cache[config.ConfigFileUsed()] = serializedConf
	}

	err := json.Unmarshal(serializedConf, &uconf)
	if err != nil {
		return nil, err
	}
	uconf.completeInitialization(filepath.Dir(config.ConfigFileUsed()))

	return &uconf, nil
}
  1. 读取环境变量配置,初始化日志
    读取环境变量FABRIC_LOGGING_SPEC和FABRIC_LOGGING_FORMAT,初始化日志模块数据结构。注意配置文件中不再包括日志相关配置信息。
# orderer/common/server/main.go文件
//2.读取日志环境变量FABRIC_LOGGING_SPEC 和 FABRIC_LOGGING_FORMAT ,初始化日志数据结构
	initializeLogging()
# orderer/common/server/main.go文件

func initializeLogging() {
	loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
	loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
	flogging.Init(flogging.Config{
		Format:  loggingFormat,
		Writer:  os.Stderr,
		LogSpec: loggingSpec,
	})
}
  1. 创建并启动运维支持服务
    创建并启动运维支持服务运维支持服务允许管理员通过RESTful API来获取节点的运行时状态信息,主要通过operations.System结构体来实现。主要代码如下所示:
# orderer/common/server/main.go文件
// 3.创建并启动运维支持服务
	opsSystem := newOperationsSystem(conf.Operations, conf.Metrics)
	metricsProvider := opsSystem.Provider
	logObserver := floggingmetrics.NewObserver(metricsProvider)
	flogging.SetObserver(logObserver)
	// initializeServerConfig初始化gprc服务器配置
	serverConfig := initializeServerConfig(conf, metricsProvider)
	// initializeGrpcServer初始化gprc服务器实例
	grpcServer := initializeGrpcServer(conf, serverConfig)
	caMgr := &caManager{
		appRootCAsByChain:     make(map[string][][]byte),
		ordererRootCAsByChain: make(map[string][][]byte),
		clientRootCAs:         serverConfig.SecOpts.ClientRootCAs,
	}
# orderer/common/server/main.go文件
//上述newOperationsSystem相关代码
func newOperationsSystem(ops localconfig.Operations, metrics localconfig.Metrics) *operations.System {
	return operations.NewSystem(operations.Options{
		Logger:        flogging.MustGetLogger("orderer.operations"),
		ListenAddress: ops.ListenAddress,
		Metrics: operations.MetricsOptions{
			Provider: metrics.Provider,
			Statsd: &operations.Statsd{
				Network:       metrics.Statsd.Network,
				Address:       metrics.Statsd.Address,
				WriteInterval: metrics.Statsd.WriteInterval,
				Prefix:        metrics.Statsd.Prefix,
			},
		},
		TLS: operations.TLS{
			Enabled:            ops.TLS.Enabled,
			CertFile:           ops.TLS.Certificate,
			KeyFile:            ops.TLS.PrivateKey,
			ClientCertRequired: ops.TLS.ClientAuthRequired,
			ClientCACertFiles:  ops.TLS.ClientRootCAs,
		},
		Version: metadata.Version,
	})
}
  1. 创建账本工厂结构
    主要实现下面的 方法。该方法会按照配置中指定的账本类型创建对应结构。目前仅支持文件类型账本工厂,主要调用orderer\common\server\util.go包中对应的方法,生成工厂结构,包括对本地文件进行操作的结构和账本读写结构。另外,按照区块序号生成索引信息。
// 4 初始化账本工厂结构
	lf, _, err := createLedgerFactory(conf, metricsProvider)
	if err != nil {
		logger.Panicf("Failed to create ledger factory: %v", err) 
	}
//orderer\common\server\util.go
func createLedgerFactory(conf *config.TopLevel, metricsProvider metrics.Provider) (blockledger.Factory, string, error) {
	ld := conf.FileLedger.Location
	var err error
	if ld == "" {
		if ld, err = ioutil.TempDir("", conf.FileLedger.Prefix); err != nil {
			logger.Panic("Error creating temp dir:", err)
		}
	}

	logger.Debug("Ledger dir:", ld)
	lf, err := fileledger.New(ld, metricsProvider)
	if err != nil {
		return nil, "", errors.WithMessage(err, "Error in opening ledger factory")
	}
	return lf, ld, nil
}
  1. 加载启动区块,尝试同步本地账本
    Orderer服务启动时,需要从启动区块中读取网络的相关配置并进行校验,例如是否包括联盟信息等,之后检查是否需要进行账本同步。
// 5 如果指定了启动区块,则解析并进行必要的启动(加载启动区块并同步账本)
	var bootstrapBlock *cb.Block
	if conf.General.BootstrapMethod == "file" {  
		bootstrapBlock = file.New(conf.General.BootstrapFile).GenesisBlock()
		if err := onboarding.ValidateBootstrapBlock(bootstrapBlock, cryptoProvider); err != nil {
			logger.Panicf("Failed validating bootstrap block: %v", err)
		}

		// Are we bootstrapping with a genesis block (i.e. bootstrap block number = 0)?
		// If yes, generate the system channel with a genesis block.
		if len(lf.ChannelIDs()) == 0 && bootstrapBlock.Header.Number == 0 {
			logger.Info("Bootstrapping the system channel")
			//初始化系统通道的源码
			initializeBootstrapChannel(bootstrapBlock, lf)
		} else if len(lf.ChannelIDs()) > 0 {
			logger.Info("Not bootstrapping the system channel because of existing channels")
		} else {
			logger.Infof("Not bootstrapping the system channel because the bootstrap block number is %d (>0), replication is needed", bootstrapBlock.Header.Number)
		}
	} else if conf.General.BootstrapMethod != "none" {
		logger.Panicf("Unknown bootstrap method: %s", conf.General.BootstrapMethod)
	}

  1. 初始化负责共识的Registranr结构:
    • 创建系统链(如果是首次启动);
    • 创建共识器(kafka.etcdraft)
// 6 初始化通道管理器
	//initializeMultichannelRegistrar初始化多通道注册管理器registrar对象
	manager := initializeMultichannelRegistrar(
		clusterBootBlock,
		repInitiator,
		clusterDialer,
		clusterServerConfig,
		clusterGRPCServer,
		conf,
		signer,
		metricsProvider,
		opsSystem,
		lf,
		cryptoProvider,
		tlsCallback,
	)
  1. 初始化gRPC服务结构,完成绑定并启动监听。主要逻辑如下:
  • 利用Register、deliver、broadeast的处理器、计量服务、回调等,创建对外主服务结构
  • AtomicBroadcastServer
  • 将AtomicBroadcastServer绑定到gRPC服务器上并启动监听

Broadcast调用

共识

Orderer排序服务器上的通道共识组件链对象利用Golang通道(Solo共识组件)或Kafka集群(Kafka共识组件)作为共识排序后端,对经过通道消息处理器过滤的合法交易消息进行排序,对交易顺序等达成一致性观点。

执行流程

  1. Orderer排序服务器基于Broadcast()接口接收交易广播服务请求,调用Broadcast服务处理句柄的Handle()方法进行处理,建立消息处理循环,接收与处理客户端提交的普通交易消息、配置交易消息等请求消息(Envelope类型,通道头部类型是ENDORSER_TRANSACTION、CONFIG_UPDATE等)
  2. 经过滤后发送至通道绑定的共识组件链对象(Solo类型、Kafka类型等)进行排序。
    -------共识----------
  3. 再将排序后的交易添加到本地待处理的缓存交易消息列表,包括配置交易消息、普通交易消息等;
  4. 并按照交易出块规则构造新区块,提交到Orderer节点指定通道账本的区块数据文件中,同时负责创建新的应用通道、更新通道配置等通道管理工作

本地待处理的缓存交易列表

solo共识

  1. 检查与过滤合法消息
    Solo共识组件链对象的chain.main()方法首先获取当前通道的最新配置序号seq,阻塞等待sendChan通道中的普通交易消息,如代码所示。Solo共识组件链对象的main()方法处理普通交易消息的源码示例
## https://github.com/hyperledger/fabric/blob/release-2.2/orderer/consensus/solo/consensus.go
## 添加消息到缓存交易消息列表,并按出块规则切割成批量交易集合列表batches
 batches, _ := ch.support.BlockCutter().Ordered(msg.normalMsg)
# /orderer/consensus/solo/consensus.go
func (ch *chain) main() {
	var timer <-chan time.Time
	var err error

	for { // 消息循环处理
		seq := ch.support.Sequence()  // 获取当前通道的配置序号
		err = nil
		select {
			// 检查sendChan通道的消息
		case msg := <-ch.sendChan:
			if msg.configMsg == nil {   //  普通交易消息
				// NormalMsg
				if msg.configSeq < seq {
					 // 检查消息中的配置序号是否小于目前通道的配置序号,
                    // 若是则说明通道配置已发生更新,需要重新过滤验证该消息
					_, err = ch.support.ProcessNormalMsg(msg.normalMsg)
					if err != nil {
						// 若发现错误,则丢弃该消息,跳转继续循环
						logger.Warningf("Discarding bad normal message: %s", err)
						continue
					}
				}

				// 添加消息到缓存交易消息列表,并按出块规则切割成批量交易集合列表batches
				batches, pending := ch.support.BlockCutter().Ordered(msg.normalMsg)
				
				####reording#####
				
				 // 检查等待打包出块的批量交易集合列表
				for _, batch := range batches { // 遍历批量交易集合列表
					block := ch.support.CreateNextBlock(batch) // 创建新区块
					ch.support.WriteBlock(block, nil)  // 将区块写入账本
				}

				switch {
				case timer != nil && !pending:
					// Timer is already running but there are no messages pending, stop the timer
					 // 若存在批量交易集合列表,则取消定时器
					timer = nil
				case timer == nil && pending:
					// Timer is not already running and there are messages pending, so start it
					// 若结果中不存在出块消息且未设置定时器,则设置定时器
					timer = time.After(ch.support.SharedConfig().BatchTimeout())
					logger.Debugf("Just began %s batch timer", ch.support.SharedConfig().BatchTimeout().String())
				default:
					// Do nothing when:
					// 1. Timer is already running and there are messages pending
					// 2. Timer is not set and there are no messages pending
				}

			} else { // 配置交易消息:创建新的应用通道或更新通道配置
				// ConfigMsg 
				if msg.configSeq < seq {
					msg.configMsg, _, err = ch.support.ProcessConfigMsg(msg.configMsg)
					if err != nil {
						logger.Warningf("Discarding bad config message: %s", err)
						continue
					}
				}
				batch := ch.support.BlockCutter().Cut()
				if batch != nil {
					block := ch.support.CreateNextBlock(batch)
					ch.support.WriteBlock(block, nil)
				}

				block := ch.support.CreateNextBlock([]*cb.Envelope{msg.configMsg})
				ch.support.WriteConfigBlock(block, nil)
				timer = nil
			}

		// 生成区块超时
		case <-timer:   
			//clear the timer
			timer = nil

			batch := ch.support.BlockCutter().Cut()
			if len(batch) == 0 {
				logger.Warningf("Batch timer expired with no pending requests, this might indicate a bug")
				continue
			}
			logger.Debugf("Batch timer expired, creating block")
			block := ch.support.CreateNextBlock(batch)
			ch.support.WriteBlock(block, nil)

		// 若接收到退出消息,则退出消息处理循环
		case <-ch.exitChan: // 若接收到退出消息,则退出消息处理循环
			logger.Debugf("Exiting")
			return
		}
	}
}
func (r *receiver) Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope, pending bool) {
	if len(r.pendingBatch) == 0 {
		// We are beginning a new batch, mark the time
		r.PendingBatchStartTime = time.Now()
	}

	ordererConfig, ok := r.sharedConfigFetcher.OrdererConfig()
	if !ok {
		logger.Panicf("Could not retrieve orderer config to query batch parameters, block cutting is not possible")
	}

	batchSize := ordererConfig.BatchSize()

	messageSizeBytes := messageSizeBytes(msg)
	if messageSizeBytes > batchSize.PreferredMaxBytes {
		logger.Debugf("The current message, with %v bytes, is larger than the preferred batch size of %v bytes and will be isolated.", messageSizeBytes, batchSize.PreferredMaxBytes)

		// cut pending batch, if it has any messages
		if len(r.pendingBatch) > 0 {
			messageBatch := r.Cut()
			messageBatches = append(messageBatches, messageBatch)
		}

		// create new batch with single message
		messageBatches = append(messageBatches, []*cb.Envelope{msg})

		// Record that this batch took no time to fill
		r.Metrics.BlockFillDuration.With("channel", r.ChannelID).Observe(0)

		return
	}

	messageWillOverflowBatchSizeBytes := r.pendingBatchSizeBytes+messageSizeBytes > batchSize.PreferredMaxBytes

	if messageWillOverflowBatchSizeBytes {
		logger.Debugf("The current message, with %v bytes, will overflow the pending batch of %v bytes.", messageSizeBytes, r.pendingBatchSizeBytes)
		logger.Debugf("Pending batch would overflow if current message is added, cutting batch now.")
		messageBatch := r.Cut()
		r.PendingBatchStartTime = time.Now()
		messageBatches = append(messageBatches, messageBatch)
	}

	logger.Debugf("Enqueuing message into batch")
	r.pendingBatch = append(r.pendingBatch, msg)
	r.pendingBatchSizeBytes += messageSizeBytes
	pending = true

	if uint32(len(r.pendingBatch)) >= batchSize.MaxMessageCount {
		logger.Debugf("Batch size met, cutting batch")
		messageBatch := r.Cut()
		messageBatches = append(messageBatches, messageBatch)
		pending = false
	}

	return
}

2 打包出块并提交到账本
chain.main()方法调用ch.support.BlockCutter().Ordered(msg.normalMsg)→receiver.Ordered()方法,如代码清单2-37所示,即通过消息切割组件receiver将当前接收的普通交易消息添加到缓存交易消息列表中,按打包出块规则切割成批量交易集合列表batches([][]*cb. Envelope类型)。其中,batches最多包含两个批量交易消息,并且第2个批量交易集合batch最多包含1个交易。

          orderer/common/blockcutter/blockcutter.go文件
func  (r  *receiver)  Ordered(msg  *cb.Envelope)  (messageBatches  [][]*cb.Envelope,
   pending bool) {
   messageSizeBytes := messageSizeBytes(msg)         // 获取该消息的字节数
   // 检查当前消息的字节数是否超过推荐的消息最大字节数
   if messageSizeBytes > r.sharedConfigManager.BatchSize().PreferredMaxBytes {
         ……
       // 如果存在缓存交易消息列表,则切割出批量交易集合
       if len(r.pendingBatch) > 0 {
           messageBatch := r.Cut()                   // 切割批量交易集合
           messageBatches = append(messageBatches, messageBatch)
                                                     // 添加到messageBatches列表
       }

       // 将msg构造为单独的批量交易集合,并添加到messageBatches列表中
       messageBatches = append(messageBatches, []*cb.Envelope{msg})
       return                                        // 返回消息处理循环
   }
   // 如果添加msg消息后的消息长度超过推荐的消息最大字节数,则先清空当前缓存交易消息列表
   messageWillOverflowBatchSizeBytes := r.pendingBatchSizeBytes+messageSizeBytes
       > r.sharedConfigManager.BatchSize().PreferredMaxBytes
   if messageWillOverflowBatchSizeBytes {
         ……
       messageBatch := r.Cut()                       // 切割批量交易集合
       // 添加到messageBatches列表
       messageBatches = append(messageBatches, messageBatch)
   }
   ……
   r.pendingBatch = append(r.pendingBatch, msg) // 将消息msg添加到缓存交易消息列表
   r.pendingBatchSizeBytes += messageSizeBytes       // 调整缓存交易消息列表的消息字节数
   pending = true
   // 检查调整后的缓存交易消息列表的消息个数是否超过了预设的最大消息数
   if uint32(len(r.pendingBatch))  >=  r.sharedConfigManager.BatchSize().
       MaxMessageCount {
       logger.Debugf("Batch size met, cutting batch")
       messageBatch := r.Cut()                    // 切割批量交易集合
       messageBatches = append(messageBatches, messageBatch) // 添加到批量交易集合列表
       pending = false
   }
   return
}
  • 其中,receiver.Ordered()方法首先获取普通交易消息msg的消息字节数。如果超过了Orderer配置推荐的最大消息字节数PreferredMaxBytes(默认为512KB),则继续检查。如果当前缓存交易消息列表的消息数量len(r.pendingBatch)大于0,则调用receiver组件的r.Cut()方法,清空该列表以切割出第1个批量交易集合messageBatch([]*cb.Envelope类型),并添加到批量交易消息集合messageBatches中。同时,将普通交易消息msg构造为单独的批量交易消息[]*cb.Envelope{msg},再添加到messageBatches列表中,这种情况下会形成两个批量交易集合。接着,receiver.Ordered()方法计算当前缓存交易消息列表在添加msg后的消息字节数。如果该字节数超过了推荐的最大消息字节数PreferredMaxBytes,则调用r.Cut()方法切割出批量交易集合messageBatch,再添加到messageBatches列表中,此时会形成第1个批量交易集合。
  • 否则,直接添加msg到当前缓存交易消息列表pendingBatch中,计算获取调整后缓存交易消息列表中的消息个数。如果该消息个数超过了Orderer配置的最大消息数MaxMessageCount(默认是10个),则调用r.Cut()方法切割出批量交易集合messageBatch,并添加到messageBatches列表中,将其返回到chain.main()方法中的batches变量。这种情况下会形成第2个批量交易集合,否则,只会形成第1个批量交易集合
  • 然后chain.main()方法检查批量交易集合列表batches。如果batches中不存在任何缓存消息,同时没有设置定时器timer(nil),则设置定时器触发周期事件为Orderer配置的出块超时时间(默认为2秒),负责周期性地发送打包出块消息(TIMETOCUT类型)。
  • 最后,chain.main()方法遍历batches中的每个对象batch,调用CreateNextBlock(batch)方法,基于该对象创建新区块block,再调用WriteBlock(block, nil)方法,将新区块写入当前通道账本的区块数据文件中。如果batches存在任何批量交易集合,则取消定时器(nil)。至此,chain.main()方法处理普通交易消息的流程结束,并返回到消息处理循环中

Deliver()区块分发服务

Orderer排序服务器提供Deliver()区块分发服务接口,将接收的服务请求交由Deliver服务处理句柄的Handle()方法处理,建立消息处理循环,负责接收与处理客户端提交的区块请求消息(Envelope类型,通道头部类型是DELIVER_SEEK_INFO、CONFIG_UPDATE等),封装了指定区块请求范围的区块搜索信息(SeekInfo类型)。接着,Deliver服务处理句柄循环从本地账本获取区块数据,依次发送给请求节点(如Leader主节点)。如果账本中还未生成指定区块,则Deliver服务处理句柄默认一直阻塞等待,直到该区块创建完成并提交账本后再回复给请求节点。

你可能感兴趣的:(hyperledger,fabric,fabric,服务器)