Fabric solo共识很简单,其本身就是为Fabric的开发人员做实验用的。通过这个简单的本地可运行的共识,能够让开发人员在本地开发共识外的代码。也正是这样,我们可以本地运行solo共识,分析Fabric其他部分的代码。
这里先通过solo共识的单元测试及外围的辅助代码,分析Fabric的共识流程和系统逻辑。
fabric solo单元测试过程涉及到的文件:
在通道关闭后,即close(chan),仍可尝试读取,再关闭后,直接返回空,但关闭后,不可写入。 参考Solo的测试函数TestStart。
package main
import "fmt"
func main() {
ch := make(chan struct{})
close(ch)
defer func() {
//为什么这样会异常?ch已经被close.
//ch <- struct{}{}
//如下,却可以?关闭后,可以尝试读取,在关闭后,直接返回nil
a := <-ch
fmt.Println("a", a)
}()
}
fabric/orderer/consensus/solo/consensus_test.go(源码):
//这个函数在测试什么功能?cutNext?跟TestHaltBeforeTimeout差不多呀?
func TestStart(t *testing.T) {
batchTimeout, _ := time.ParseDuration("1ms")
support := &mockmultichannel.ConsenterSupport{
Blocks: make(chan *cb.Block),
BlockCutterVal: mockblockcutter.NewReceiver(),
SharedConfigVal: &mockconfig.Orderer{BatchTimeoutVal: batchTimeout},
}
//通过close关闭了mock的<-Receiver.Block
//但是close不会阻塞吗?close之后还能用吗?close通道后,可尝试读取,关闭后,会返回空
close(support.BlockCutterVal.Block)
//在此,创建chain
bs, _ := New().HandleChain(support, nil)
//启动solo,在Start中启动协程
bs.Start()
defer bs.Halt()
//CutNext干啥的还没搞清楚-->用于切块的,通过该标志,可以实现灵活的划分块,
//CutNext=true接来下每次同步消息,都要成块
//CutNext=false表示接下来的每次同步,都不成块
support.BlockCutterVal.CutNext = true
//这里,先调用bs.Order,将返回只传入到assert.Nil判断error是否为空
//没有Recever.Block<-struct{}{},那mock ordered <-Receriver.Block不会阻塞吗?
//因为之前close(chan)所以mock <-Block不会阻塞
assert.Nil(t, bs.Order(testMessage, 0))
select {
case <-support.Blocks:
//应该读取空块吧?但是用来做什么的,表明块处理完了?收到的是写入的块
//即solo中,ch.support.WriteBlock(block, nil)中进行的,实际上,
//在fabric/orderer/mocks/common/multichannel/multichannel.go的WriteBlock中
log.Println("support blocks")
case <-bs.Errored():
t.Fatalf("Expected not to exit")
}
}
采用return+日志的较笨的方式进行分析,使用了两个tips:
localhost:solo liu$ go test -v -run TestHaltBeforeTimeout
=== RUN TestHaltBeforeTimeout
2019/04/17 22:13:22 here
2019/04/17 22:13:22 chan order sendchan
2019/04/17 22:13:22 mock ordered
2019/04/17 22:13:22 sync will blocking
2019-04-17 22:13:22.875 CST [orderer/consensus/solo] main -> DEBU 001 Exiting
2019/04/17 22:13:22 solo finished
--- PASS: TestHaltBeforeTimeout (0.00s)
PASS
ok github.com/hyperledger/fabric/orderer/consensus/solo 0.044s
localhost:solo liu$
bc.Block <- struct{}{}
//interface:
// Receiver defines a sink for the ordered broadcast messages
type Receiver interface {
// Ordered should be invoked sequentially as messages are ordered
// Each batch in `messageBatches` will be wrapped into a block.
// `pending` indicates if there are still messages pending in the receiver. It
// is useful for Kafka orderer to determine the `LastOffsetPersisted` of block.
Ordered(msg *cb.Envelope) (messageBatches [][]*cb.Envelope, pending bool)
// Cut returns the current batch and starts a new one
Cut() []*cb.Envelope
}
//...
//solo:
batches, _ := ch.support.BlockCutter().Ordered(msg.normalMsg)
func NewReceiver() *Receiver {
return &Receiver{
IsolatedTx: false,
CutAncestors: false,
CutNext: false,
Block: make(chan struct{}),
}
}
//...
//mock: 实际操作的对象
// Ordered will add or cut the batch according to the state of Receiver, it blocks reading from Block on return
func (mbc *Receiver) Ordered(env *cb.Envelope) ([][]*cb.Envelope, bool) {
log.Println("mock ordered")
defer func() {
<-mbc.Block
}()
...
}
这里分析TestHaltBeforeTimeout(t *testing.T)的数据流,主要包括三个部分:
具体分析,见如下代码中的注释
fabric/orderer/consensus/solo/consensus_test.go :
func syncQueueMessage(msg *cb.Envelope, chain *chain, bc *mockblockcutter.Receiver) {
chain.Order(msg, 0)
bc.Block <- struct{}{}
//这是在做什么?同步用吗,不会阻塞吗?
//bc.Block单独用来同步的,会用struct{}{}来强调表示,夜即无缓冲chan
//但他是跟谁同步呢?
//从名字看,是同步消息队列-->通过"断路测试"(我自己起的名字嘿,就是通过return,continue,截断程序,加上日志以分析数据流)
//该阻塞用于和模拟执行的同步,在fabric/orderer/mocks/common/blockcutter/blockcutter.go的Ordered函数中,该函数结束后
//会向Receiver.Block写入同步信号,告知syncQue...模拟处理处已经完成
}
type waitableGo struct {
done chan struct{}
}
func goWithWait(target func()) *waitableGo {
wg := &waitableGo{
done: make(chan struct{}),
}
go func() {
target()//该协程会阻塞在这;处理从sync加入的消息
close(wg.done)//用来对外通知
}()
//外边结束,里边还不结束吗?
return wg
}
// This test checks that if consenter is halted before a timer fires, nothing is actually written.
func TestHaltBeforeTimeout(t *testing.T) {
batchTimeout, _ := time.ParseDuration("1ms")
//support的构造还不清楚
support := &mockmultichannel.ConsenterSupport{
Blocks: make(chan *cb.Block),//消息发送
BlockCutterVal: mockblockcutter.NewReceiver(),
SharedConfigVal: &mockconfig.Orderer{BatchTimeoutVal: batchTimeout},
}
defer close(support.BlockCutterVal.Block)
bs := newChain(support)
//bs.main是solo算法的启动函数,是个死循环,处理函数
wg := goWithWait(bs.main)//启动solo算法,在goWithWait中启动协程,并通过通道通信
defer bs.Halt()//中止
log.Println("here")
syncQueueMessage(testMessage, bs, support.BlockCutterVal)
//将消息送入solo排序,
//sync阻塞,根本不会接着执行啊-->sync是和mock处理同步的
log.Println("sync will blocking")
bs.Halt()//中止solo共识
select {
case <-support.Blocks:
//应该读取空块吧?但是用来做什么的,表明块处理完了?收到的是写入的块
//即solo中,ch.support.WriteBlock(block, nil)中进行的,实际上,
//在fabric/orderer/mocks/common/multichannel/multichannel.go的WriteBlock中
t.Fatalf("Expected no invocations of Append")
log.Println("block exit")//不从这退出
case <-wg.done://共识算法退出标志
log.Println("solo finished")
}
}
fabric/orderer/consensus/solo/consensus.go:
// Order accepts normal messages for ordering
func (ch *chain) Order(env *cb.Envelope, configSeq uint64) error {
//外部通过调用order加入消息
//这里,这样处理的目的是什么?
//注意这里是两个case,正常是同步sendchan,但是如果要结束系统,由exitchan告知退出
//而不用一直阻塞在这里,等待接收处理消息,让外部服务顺滑退出
select {
case ch.sendChan <- &message{
configSeq: configSeq,
normalMsg: env,
}:
log.Println("chan order sendchan")
return nil
case <-ch.exitChan: //退出信号
return fmt.Errorf("Exiting")
}
}
//solo共识算法的主题部分,这里只关心一条数据链路,其他分支省略
func (ch *chain) main() {
var timer <-chan time.Time
var err error
for {
seq := ch.support.Sequence()
err = nil
select {
case msg := <-ch.sendChan://读取发来的消息
log.Println("chan receive msg")
//continue ;我的代码review的笨办法,日志+断路
//事实证明,这里接收sync发来的同步消息
if msg.configMsg == nil {
// NormalMsg
if msg.configSeq < seq {
_, err = ch.support.ProcessNormalMsg(msg.normalMsg)
if err != nil {
logger.Warningf("Discarding bad normal message: %s", err)
continue
}
}
//多个断路返回,确定接收同步信号在此,ordered进行了模拟处理
batches, _ := ch.support.BlockCutter().Ordered(msg.normalMsg)
if len(batches) == 0 && timer == nil {
//正常情况下,消息从这结束
timer = time.After(ch.support.SharedConfig().BatchTimeout())
continue
}
...
} else {
...
}
case <-timer:
...
case <-ch.exitChan:
logger.Debugf("Exiting")
return
}
}
}
fabric/orderer/mocks/common/blockcutter/blockcutter.go:
// Ordered will add or cut the batch according to the state of Receiver, it blocks reading from Block on return
func (mbc *Receiver) Ordered(env *cb.Envelope) ([][]*cb.Envelope, bool) {
defer func() { //模拟执行完,会告知syncque...处理完
<-mbc.Block
}()
...
}
// Cut terminates the current batch, returning it
func (mbc *Receiver) Cut() []*cb.Envelope {
...
}
fabric/orderer/mocks/common/multichannel/multichannel.go
// WriteBlock writes data to the Blocks channel
func (mcs *ConsenterSupport) WriteBlock(block *cb.Block, encodedMetadataValue []byte) {
if encodedMetadataValue != nil {
block.Metadata.Metadata[cb.BlockMetadataIndex_ORDERER] = utils.MarshalOrPanic(&cb.Metadata{Value: encodedMetadataValue})
}
mcs.HeightVal++
mcs.Blocks <- block //here
}