今天我们用配置变更来结束整个EtcdRaft源码分析系列。横向扩展能力是衡量分布式系统优劣的决定性指标,而能否轻松,快捷,有效,及时的变更集群成员是其中的关键。下面我们一起来看看EtcdRaft是怎么实现的。
接口
type Node interface {
...
// ProposeConfChange proposes config change.
// At most one ConfChange can be in the process of going through consensus.
// Application needs to call ApplyConfChange when applying EntryConfChange type entry.
ProposeConfChange(ctx context.Context, cc pb.ConfChange) error
...
// ApplyConfChange applies config change to the local node.
// Returns an opaque ConfState protobuf which must be recorded
// in snapshots. Will never return nil; it returns a pointer only
// to match MemoryStorage.Compact.
ApplyConfChange(cc pb.ConfChange) *pb.ConfState
...
}
可以看到有两个方法跟配置变更相关,看过前面的知道,外部跟Raft打交道的方式。先提案(propose), 然后等内部达成一致,再落地(Apply)。
struct
type ConfChange struct {
ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"`
NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"`
Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
const (
ConfChangeAddNode ConfChangeType = 0
ConfChangeRemoveNode ConfChangeType = 1
ConfChangeUpdateNode ConfChangeType = 2
ConfChangeAddLearnerNode ConfChangeType = 3
)
以上是提案内容,很清晰,但有个地方需要注意,一次只能变更一个节点。至于为什么,有兴趣的可以去看论文哈。
Propose
func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error {
data, err := cc.Marshal()
if err != nil {
return err
}
return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}})
}
- 基本就是走的提交数据的流程,唯一需要注意的是这里用pb.EntryConfChange将它与其他提案区别开来。
- 接下来,我们再走一遍数据提交的流程
Leader
...
for i, e := range m.Entries {
if e.Type == pb.EntryConfChange {
if r.pendingConfIndex > r.raftLog.applied {
r.logger.Infof("propose conf %s ignored since pending unapplied configuration [index %d, applied %d]",
e.String(), r.pendingConfIndex, r.raftLog.applied)
m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
} else {
r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1
}
}
}
...
if !r.appendEntry(m.Entries...) {
return ErrProposalDropped
}
r.bcastAppend()
- 如果当前还有配置更新没有处理完,那么这次新的变更将丢弃,用一个空的entry来替换它
- 如果都处理完了,那么记下这个配置变更的位置到pendingConfIndex
- 后面就一样了,累加到本地,而且群发给其他人。
- 问题来了,配置变更都同步给成员了,怎么确认都收到了,可以开始apply了呢?我想也猜得到,会通过Ready的committedIndex来通知应用层。
apply
应用层
case raftpb.EntryConfChange:
var cc raftpb.ConfChange
if err := cc.Unmarshal(ents[i].Data); err != nil {
c.logger.Warnf("Failed to unmarshal ConfChange data: %s", err)
continue
}
c.confState = *c.Node.ApplyConfChange(cc)
switch cc.Type {
case raftpb.ConfChangeAddNode:
c.logger.Infof("Applied config change to add node %d, current nodes in channel: %+v", cc.NodeID, c.confState.Nodes)
case raftpb.ConfChangeRemoveNode:
c.logger.Infof("Applied config change to remove node %d, current nodes in channel: %+v", cc.NodeID, c.confState.Nodes)
default:
c.logger.Panic("Programming error, encountered unsupported raft config change")
}
// This ConfChange was introduced by a previously committed config block,
// we can now unblock submitC to accept envelopes.
if c.confChangeInProgress != nil &&
c.confChangeInProgress.NodeID == cc.NodeID &&
c.confChangeInProgress.Type == cc.Type {
if err := c.configureComm(); err != nil {
c.logger.Panicf("Failed to configure communication: %s", err)
}
c.confChangeInProgress = nil
c.configInflight = false
// report the new cluster size
c.Metrics.ClusterSize.Set(float64(len(c.opts.RaftMetadata.Consenters)))
}
if cc.Type == raftpb.ConfChangeRemoveNode && cc.NodeID == c.raftID {
c.logger.Infof("Current node removed from replica set for channel %s", c.channelID)
// calling goroutine, since otherwise it will be blocked
// trying to write into haltC
go c.Halt()
}
}
- 这里,我举的是Fabric的例子,只关注关键流程就好
- 收到Raft的ConfChange,第一件事,我们就要Node.ApplyConfChange(cc)
- Raft的通讯层是需要应用层托管的,所以不是Raft那边做完配置变更,就可以收工了。
- Fabric要根据最新的集群成员数据,去做grpc的连接
- 如果有删除的节点,还要去停掉这个成员,这个后面会讲。
Raft
ApplyConfChange
case cc := <-n.confc:
if cc.NodeID == None {
select {
case n.confstatec <- pb.ConfState{
Nodes: r.nodes(),
Learners: r.learnerNodes()}:
case <-n.done:
}
break
}
switch cc.Type {
case pb.ConfChangeAddNode:
r.addNode(cc.NodeID)
case pb.ConfChangeAddLearnerNode:
r.addLearner(cc.NodeID)
case pb.ConfChangeRemoveNode:
// block incoming proposal when local node is
// removed
if cc.NodeID == r.id {
propc = nil
}
r.removeNode(cc.NodeID)
case pb.ConfChangeUpdateNode:
default:
panic("unexpected conf type")
}
select {
case n.confstatec <- pb.ConfState{
Nodes: r.nodes(),
Learners: r.learnerNodes()}:
case <-n.done:
}
- 首先,这里有个调用技巧,如果调用的时候传入的NodeID为None,那么会返回当前Raft的成员
- 下面我们具体看下这几种变更类型具体是在干嘛
addNode&addLearner
func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) {
pr := r.getProgress(id)
if pr == nil {
r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner)
} else {
if isLearner && !pr.IsLearner {
// can only change Learner to Voter
r.logger.Infof("%x ignored addLearner: do not support changing %x from raft peer to learner.", r.id, id)
return
}
if isLearner == pr.IsLearner {
// Ignore any redundant addNode calls (which can happen because the
// initial bootstrapping entries are applied twice).
return
}
// change Learner to Voter, use origin Learner progress
delete(r.learnerPrs, id)
pr.IsLearner = false
r.prs[id] = pr
}
if r.id == id {
r.isLearner = isLearner
}
// When a node is first added, we should mark it as recently active.
// Otherwise, CheckQuorum may cause us to step down if it is invoked
// before the added node has a chance to communicate with us.
pr = r.getProgress(id)
pr.RecentActive = true
}
- 如果是全新的节点,初始化Progress,这里match=0,next=r.raftLog.lastIndex()+1
- 如果之前是learner,那么从learner里面转移到正常节点里面
removeNode
func (r *raft) removeNode(id uint64) {
r.delProgress(id)
// do not try to commit or abort transferring if there is no nodes in the cluster.
if len(r.prs) == 0 && len(r.learnerPrs) == 0 {
return
}
// The quorum size is now smaller, so see if any pending entries can
// be committed.
if r.maybeCommit() {
r.bcastAppend()
}
// If the removed node is the leadTransferee, then abort the leadership transferring.
if r.state == StateLeader && r.leadTransferee == id {
r.abortLeaderTransfer()
}
}
- 真的删除Progress就够了么?想象下整个系统运转是靠什么?
- 还记得应用层会tick么?Leader靠这个发心跳,非Leader靠这个选举超时。
- 我们再回顾下应用层还做了些什么
应用层
if cc.Type == raftpb.ConfChangeRemoveNode && cc.NodeID == c.raftID {
// calling goroutine, since otherwise it will be blocked
// trying to write into haltC
go c.Halt()
}
case <-n.chain.haltC:
ticker.Stop()
n.Stop()
n.storage.Close()
n.logger.Infof("Raft node stopped")
close(n.chain.doneC) // close after all the artifacts are closed
return
}
- 可以看到如果是删除当前节点的消息,会最终会让该节点的ticker.Stop。这也导致该节点最终会被Raft抛弃。