排序服务节点启动通过cmd/orderer/main.go的main()方法实现,该方法会进一步调用orderer/common/server/main.go#Main()方法。
# fabric-release-2.2\cmd\orderer\main.go
package main
import "github.com/hyperledger/fabric/orderer/common/server"
func main() {
server.Main()
}
# orderer/common/server/main.go文件
func Main() {
//解析用户命令行
fullCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
// "version" command
if fullCmd == version.FullCommand() {
fmt.Println(metadata.GetVersionInfo())
return
}
//1.加载本地orderer.yaml配置文件
conf, err := localconfig.Load()
if err != nil {
logger.Error("failed to parse config: ", err)
os.Exit(1)
}
···
# orderer/common/localconfig/config.go文件
// Load parses the orderer YAML file and environment, producing
// a struct suitable for config use, returning error on failure.
func Load() (*TopLevel, error) {
return cache.load()
}
// configCache stores marshalled bytes of config structures that produced from
// EnhancedExactUnmarshal. Cache key is the path of the configuration file that was used.
type configCache struct {
mutex sync.Mutex
cache map[string][]byte
}
var cache = &configCache{}
// Load will load the configuration and cache it on the first call; subsequent
// calls will return a clone of the configuration that was previously loaded.
func (c *configCache) load() (*TopLevel, error) {
var uconf TopLevel
config := viper.New()
coreconfig.InitViper(config, "orderer")
config.SetEnvPrefix(Prefix)
config.AutomaticEnv()
replacer := strings.NewReplacer(".", "_")
config.SetEnvKeyReplacer(replacer)
if err := config.ReadInConfig(); err != nil {
return nil, fmt.Errorf("Error reading configuration: %s", err)
}
c.mutex.Lock()
defer c.mutex.Unlock()
serializedConf, ok := c.cache[config.ConfigFileUsed()]
if !ok {
err := viperutil.EnhancedExactUnmarshal(config, &uconf)
if err != nil {
return nil, fmt.Errorf("Error unmarshaling config into struct: %s", err)
}
serializedConf, err = json.Marshal(uconf)
if err != nil {
return nil, err
}
if c.cache == nil {
c.cache = map[string][]byte{}
}
c.cache[config.ConfigFileUsed()] = serializedConf
}
err := json.Unmarshal(serializedConf, &uconf)
if err != nil {
return nil, err
}
uconf.completeInitialization(filepath.Dir(config.ConfigFileUsed()))
return &uconf, nil
}
# orderer/common/server/main.go文件
//2.读取日志环境变量FABRIC_LOGGING_SPEC 和 FABRIC_LOGGING_FORMAT ,初始化日志数据结构
initializeLogging()
# orderer/common/server/main.go文件
func initializeLogging() {
loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
flogging.Init(flogging.Config{
Format: loggingFormat,
Writer: os.Stderr,
LogSpec: loggingSpec,
})
}
# orderer/common/server/main.go文件
// 3.创建并启动运维支持服务
opsSystem := newOperationsSystem(conf.Operations, conf.Metrics)
metricsProvider := opsSystem.Provider
logObserver := floggingmetrics.NewObserver(metricsProvider)
flogging.SetObserver(logObserver)
// initializeServerConfig初始化gprc服务器配置
serverConfig := initializeServerConfig(conf, metricsProvider)
// initializeGrpcServer初始化gprc服务器实例
grpcServer := initializeGrpcServer(conf, serverConfig)
caMgr := &caManager{
appRootCAsByChain: make(map[string][][]byte),
ordererRootCAsByChain: make(map[string][][]byte),
clientRootCAs: serverConfig.SecOpts.ClientRootCAs,
}
# orderer/common/server/main.go文件
//上述newOperationsSystem相关代码
func newOperationsSystem(ops localconfig.Operations, metrics localconfig.Metrics) *operations.System {
return operations.NewSystem(operations.Options{
Logger: flogging.MustGetLogger("orderer.operations"),
ListenAddress: ops.ListenAddress,
Metrics: operations.MetricsOptions{
Provider: metrics.Provider,
Statsd: &operations.Statsd{
Network: metrics.Statsd.Network,
Address: metrics.Statsd.Address,
WriteInterval: metrics.Statsd.WriteInterval,
Prefix: metrics.Statsd.Prefix,
},
},
TLS: operations.TLS{
Enabled: ops.TLS.Enabled,
CertFile: ops.TLS.Certificate,
KeyFile: ops.TLS.PrivateKey,
ClientCertRequired: ops.TLS.ClientAuthRequired,
ClientCACertFiles: ops.TLS.ClientRootCAs,
},
Version: metadata.Version,
})
}
// 4 初始化账本工厂结构
lf, _, err := createLedgerFactory(conf, metricsProvider)
if err != nil {
logger.Panicf("Failed to create ledger factory: %v", err)
}
//orderer\common\server\util.go
func createLedgerFactory(conf *config.TopLevel, metricsProvider metrics.Provider) (blockledger.Factory, string, error) {
ld := conf.FileLedger.Location
var err error
if ld == "" {
if ld, err = ioutil.TempDir("", conf.FileLedger.Prefix); err != nil {
logger.Panic("Error creating temp dir:", err)
}
}
logger.Debug("Ledger dir:", ld)
lf, err := fileledger.New(ld, metricsProvider)
if err != nil {
return nil, "", errors.WithMessage(err, "Error in opening ledger factory")
}
return lf, ld, nil
}
// 5 如果指定了启动区块,则解析并进行必要的启动(加载启动区块并同步账本)
var bootstrapBlock *cb.Block
if conf.General.BootstrapMethod == "file" {
bootstrapBlock = file.New(conf.General.BootstrapFile).GenesisBlock()
if err := onboarding.ValidateBootstrapBlock(bootstrapBlock, cryptoProvider); err != nil {
logger.Panicf("Failed validating bootstrap block: %v", err)
}
// Are we bootstrapping with a genesis block (i.e. bootstrap block number = 0)?
// If yes, generate the system channel with a genesis block.
if len(lf.ChannelIDs()) == 0 && bootstrapBlock.Header.Number == 0 {
logger.Info("Bootstrapping the system channel")
//初始化系统通道的源码
initializeBootstrapChannel(bootstrapBlock, lf)
} else if len(lf.ChannelIDs()) > 0 {
logger.Info("Not bootstrapping the system channel because of existing channels")
} else {
logger.Infof("Not bootstrapping the system channel because the bootstrap block number is %d (>0), replication is needed", bootstrapBlock.Header.Number)
}
} else if conf.General.BootstrapMethod != "none" {
logger.Panicf("Unknown bootstrap method: %s", conf.General.BootstrapMethod)
}
// 6 初始化通道管理器
//initializeMultichannelRegistrar初始化多通道注册管理器registrar对象
manager := initializeMultichannelRegistrar(
clusterBootBlock,
repInitiator,
clusterDialer,
clusterServerConfig,
clusterGRPCServer,
conf,
signer,
metricsProvider,
opsSystem,
lf,
cryptoProvider,
tlsCallback,
)
Orderer排序服务器上的通道共识组件链对象利用Golang通道(Solo共识组件)或Kafka集群(Kafka共识组件)作为共识排序后端,对经过通道消息处理器过滤的合法交易消息进行排序,对交易顺序等达成一致性观点。
执行流程
本地待处理的缓存交易列表
## https://github.com/hyperledger/fabric/blob/release-2.2/orderer/consensus/solo/consensus.go
## 添加消息到缓存交易消息列表,并按出块规则切割成批量交易集合列表batches
batches, _ := ch.support.BlockCutter().Ordered(msg.normalMsg)
# /orderer/consensus/solo/consensus.go
func (ch *chain) main() {
var timer <-chan time.Time
var err error
for { // 消息循环处理
seq := ch.support.Sequence() // 获取当前通道的配置序号
err = nil
select {
// 检查sendChan通道的消息
case msg := <-ch.sendChan:
if msg.configMsg == nil { // 普通交易消息
// NormalMsg
if msg.configSeq < seq {
// 检查消息中的配置序号是否小于目前通道的配置序号,
// 若是则说明通道配置已发生更新,需要重新过滤验证该消息
_, err = ch.support.ProcessNormalMsg(msg.normalMsg)
if err != nil {
// 若发现错误,则丢弃该消息,跳转继续循环
logger.Warningf("Discarding bad normal message: %s", err)
continue
}
}
// 添加消息到缓存交易消息列表,并按出块规则切割成批量交易集合列表batches
batches, pending := ch.support.BlockCutter().Ordered(msg.normalMsg)
####reording#####
// 检查等待打包出块的批量交易集合列表
for _, batch := range batches { // 遍历批量交易集合列表
block := ch.support.CreateNextBlock(batch) // 创建新区块
ch.support.WriteBlock(block, nil) // 将区块写入账本
}
switch {
case timer != nil && !pending:
// Timer is already running but there are no messages pending, stop the timer
// 若存在批量交易集合列表,则取消定时器
timer = nil
case timer == nil && pending:
// Timer is not already running and there are messages pending, so start it
// 若结果中不存在出块消息且未设置定时器,则设置定时器
timer = time.After(ch.support.SharedConfig().BatchTimeout())
logger.Debugf("Just began %s batch timer", ch.support.SharedConfig().BatchTimeout().String())
default:
// Do nothing when:
// 1. Timer is already running and there are messages pending
// 2. Timer is not set and there are no messages pending
}
} else { // 配置交易消息:创建新的应用通道或更新通道配置
// ConfigMsg
if msg.configSeq < seq {
msg.configMsg, _, err = ch.support.ProcessConfigMsg(msg.configMsg)
if err != nil {
logger.Warningf("Discarding bad config message: %s", err)
continue
}
}
batch := ch.support.BlockCutter().Cut()
if batch != nil {
block := ch.support.CreateNextBlock(batch)
ch.support.WriteBlock(block, nil)
}
block := ch.support.CreateNextBlock([]*cb.Envelope{msg.configMsg})
ch.support.WriteConfigBlock(block, nil)
timer = nil
}
// 生成区块超时
case <-timer:
//clear the timer
timer = nil
batch := ch.support.BlockCutter().Cut()
if len(batch) == 0 {
logger.Warningf("Batch timer expired with no pending requests, this might indicate a bug")
continue
}
logger.Debugf("Batch timer expired, creating block")
block := ch.support.CreateNextBlock(batch)
ch.support.WriteBlock(block, nil)
// 若接收到退出消息,则退出消息处理循环
case <-ch.exitChan: // 若接收到退出消息,则退出消息处理循环
logger.Debugf("Exiting")
return
}
}
}
func (r *receiver) Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope, pending bool) {
if len(r.pendingBatch) == 0 {
// We are beginning a new batch, mark the time
r.PendingBatchStartTime = time.Now()
}
ordererConfig, ok := r.sharedConfigFetcher.OrdererConfig()
if !ok {
logger.Panicf("Could not retrieve orderer config to query batch parameters, block cutting is not possible")
}
batchSize := ordererConfig.BatchSize()
messageSizeBytes := messageSizeBytes(msg)
if messageSizeBytes > batchSize.PreferredMaxBytes {
logger.Debugf("The current message, with %v bytes, is larger than the preferred batch size of %v bytes and will be isolated.", messageSizeBytes, batchSize.PreferredMaxBytes)
// cut pending batch, if it has any messages
if len(r.pendingBatch) > 0 {
messageBatch := r.Cut()
messageBatches = append(messageBatches, messageBatch)
}
// create new batch with single message
messageBatches = append(messageBatches, []*cb.Envelope{msg})
// Record that this batch took no time to fill
r.Metrics.BlockFillDuration.With("channel", r.ChannelID).Observe(0)
return
}
messageWillOverflowBatchSizeBytes := r.pendingBatchSizeBytes+messageSizeBytes > batchSize.PreferredMaxBytes
if messageWillOverflowBatchSizeBytes {
logger.Debugf("The current message, with %v bytes, will overflow the pending batch of %v bytes.", messageSizeBytes, r.pendingBatchSizeBytes)
logger.Debugf("Pending batch would overflow if current message is added, cutting batch now.")
messageBatch := r.Cut()
r.PendingBatchStartTime = time.Now()
messageBatches = append(messageBatches, messageBatch)
}
logger.Debugf("Enqueuing message into batch")
r.pendingBatch = append(r.pendingBatch, msg)
r.pendingBatchSizeBytes += messageSizeBytes
pending = true
if uint32(len(r.pendingBatch)) >= batchSize.MaxMessageCount {
logger.Debugf("Batch size met, cutting batch")
messageBatch := r.Cut()
messageBatches = append(messageBatches, messageBatch)
pending = false
}
return
}
2 打包出块并提交到账本
chain.main()方法调用ch.support.BlockCutter().Ordered(msg.normalMsg)→receiver.Ordered()方法,如代码清单2-37所示,即通过消息切割组件receiver将当前接收的普通交易消息添加到缓存交易消息列表中,按打包出块规则切割成批量交易集合列表batches([][]*cb. Envelope类型)。其中,batches最多包含两个批量交易消息,并且第2个批量交易集合batch最多包含1个交易。
orderer/common/blockcutter/blockcutter.go文件
func (r *receiver) Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope,
pending bool) {
messageSizeBytes := messageSizeBytes(msg) // 获取该消息的字节数
// 检查当前消息的字节数是否超过推荐的消息最大字节数
if messageSizeBytes > r.sharedConfigManager.BatchSize().PreferredMaxBytes {
……
// 如果存在缓存交易消息列表,则切割出批量交易集合
if len(r.pendingBatch) > 0 {
messageBatch := r.Cut() // 切割批量交易集合
messageBatches = append(messageBatches, messageBatch)
// 添加到messageBatches列表
}
// 将msg构造为单独的批量交易集合,并添加到messageBatches列表中
messageBatches = append(messageBatches, []*cb.Envelope{msg})
return // 返回消息处理循环
}
// 如果添加msg消息后的消息长度超过推荐的消息最大字节数,则先清空当前缓存交易消息列表
messageWillOverflowBatchSizeBytes := r.pendingBatchSizeBytes+messageSizeBytes
> r.sharedConfigManager.BatchSize().PreferredMaxBytes
if messageWillOverflowBatchSizeBytes {
……
messageBatch := r.Cut() // 切割批量交易集合
// 添加到messageBatches列表
messageBatches = append(messageBatches, messageBatch)
}
……
r.pendingBatch = append(r.pendingBatch, msg) // 将消息msg添加到缓存交易消息列表
r.pendingBatchSizeBytes += messageSizeBytes // 调整缓存交易消息列表的消息字节数
pending = true
// 检查调整后的缓存交易消息列表的消息个数是否超过了预设的最大消息数
if uint32(len(r.pendingBatch)) >= r.sharedConfigManager.BatchSize().
MaxMessageCount {
logger.Debugf("Batch size met, cutting batch")
messageBatch := r.Cut() // 切割批量交易集合
messageBatches = append(messageBatches, messageBatch) // 添加到批量交易集合列表
pending = false
}
return
}
Orderer排序服务器提供Deliver()区块分发服务接口,将接收的服务请求交由Deliver服务处理句柄的Handle()方法处理,建立消息处理循环,负责接收与处理客户端提交的区块请求消息(Envelope类型,通道头部类型是DELIVER_SEEK_INFO、CONFIG_UPDATE等),封装了指定区块请求范围的区块搜索信息(SeekInfo类型)。接着,Deliver服务处理句柄循环从本地账本获取区块数据,依次发送给请求节点(如Leader主节点)。如果账本中还未生成指定区块,则Deliver服务处理句柄默认一直阻塞等待,直到该区块创建完成并提交账本后再回复给请求节点。