以太坊源码(3)——节点发现协议,KAD协议

ethereum协议

3、P2P节点发现

(1)分布式哈希表(DHT)

DHT全称叫分布式哈希表(Distributed Hash Table),是一种分布式存储方法。在不需要服务器的情况下,每个客户端负责一个小范围的路由,并负责存储一小部分数据,从而实现整个DHT网络的寻址和存储。DHT技术的应用来源于p2p网络发展的需要。第二代p2p文件共享系统正是由于查找节点十分困难且耗费网络资源而促进了第三代系统引入了DHT技术,用以快速的查找节点以及资源。

分布式哈希表与哈希表的共同之处在于能够实现快速的查找。它与上面哈希表的不同在于:1)哈希表通常是本地的,用于在本地快速的插入和查找数据。而分布式哈希表相当于将哈希表中的bucket(桶)分散到不同的节点计算机中。2)哈希表增添、删除桶会导致所有的数据需要重新hash,但分布式哈希表支持动态的节点的数目,节点可以随意的进入或退出。

在以太坊中,DHT使用的是KAD协议。
引自:https://blog.csdn.net/lj900911/article/details/83861438

在Kad网络中,所有节点都被当作一颗二叉树的叶子,并且每一个节点的位置都由其ID值的最短前缀唯一确定。

ID值是512位公钥经过Hash出来的256位地址。

1、如何将ID映射到二叉树

如何把节点映射到二叉树?

1)先把key(nodeID)以二进制的形式表示,进行“最短唯一前缀”来处理;
2)二进制的第n位代表二叉树的第n层,这样一个子树的每个节点连起来就是完整的id二进制表示;
3)“1”代表进入左子树,“0”代表进入右子树(反过来也行)
4)按上面的步骤处理后得到到最后的叶子节点,就是该“key”对应的节点。

在以太坊中,KAD协议的核心逻辑由Discover/table.go中进行实现。KAD协议中,有四种RPC类型,包括PING、STORE、FINDNODE、FINDVALUE。以太坊的KAD只实现了PING和FindNode。

首先是新建Table

func newTable(t transport, db *enode.DB, bootnodes []*enode.Node) (*Table, error) {
    //新建 参数包括transport:KAD的两个操作, db 以及引导节点 
    tab := &Table{
        net:        t,
        db:         db,
        refreshReq: make(chan chan struct{}),
        initDone:   make(chan struct{}),
        closeReq:   make(chan struct{}),
        closed:     make(chan struct{}),
        rand:       mrand.New(mrand.NewSource(0)),
        ips:        netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit},
    }
    //加载引导节点
    if err := tab.setFallbackNodes(bootnodes); err != nil {
        return nil, err
    }
    //对每一个bucket[i]创建bucket对象
    for i := range tab.buckets {
        tab.buckets[i] = &bucket{
            ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit},
        }
    }
    //产生随机种子 后面用于读取table里面的randomnode 代码见下面的代码块。
    tab.seedRand()
    //读取SeedNodes
    tab.loadSeedNodes()
    //goroutine 负责刷新table以及关闭
    go tab.loop()
    return tab, nil
}

func (tab *Table) seedRand() {
    var b [8]byte
    crand.Read(b[:])

    tab.mutex.Lock()
    tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:])))
    tab.mutex.Unlock()
}

func (tab *Table) loadSeedNodes() {
    seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge))
    seeds = append(seeds, tab.nursery...)
    for i := range seeds {
        seed := seeds[i]
        age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID(), seed.IP())) }}
        log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
        //将种子节点加入到addSeenNode中
        tab.addSeenNode(seed)
    }
}

看一下tab.loop()。


// loop schedules refresh, revalidate runs and coordinates shutdown.
func (tab *Table) loop() {
    var (
        revalidate     = time.NewTimer(tab.nextRevalidateTime())
        refresh        = time.NewTicker(refreshInterval)
        copyNodes      = time.NewTicker(copyNodesInterval)
        refreshDone    = make(chan struct{})           // where doRefresh reports completion
        revalidateDone chan struct{}                   // where doRevalidate reports completion
        waiting        = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs
    )
    defer refresh.Stop()
    defer revalidate.Stop()
    defer copyNodes.Stop()

    // Start initial refresh.
    go tab.doRefresh(refreshDone)

loop:
    for {
        select {
            //每一个小时执行的刷新节点
        case <-refresh.C:
            tab.seedRand()
            if refreshDone == nil {
                refreshDone = make(chan struct{})
                go tab.doRefresh(refreshDone)
            }
            //收到刷新请求也刷新
        case req := <-tab.refreshReq:
            waiting = append(waiting, req)
            if refreshDone == nil {
                refreshDone = make(chan struct{})
                go tab.doRefresh(refreshDone)
            }
            //刷新完 关闭channel
        case <-refreshDone:
            for _, ch := range waiting {
                close(ch)
            }
            waiting, refreshDone = nil, nil
            //验证bucket最后一个的节点是不是还存货
        case <-revalidate.C:
            revalidateDone = make(chan struct{})
            go tab.doRevalidate(revalidateDone)
            //验证完了 重置验证时间
        case <-revalidateDone:
            revalidate.Reset(tab.nextRevalidateTime())
            revalidateDone = nil
            //如果存活节点存货时间比seedMinTableTime长,存入db
        case <-copyNodes.C:
            go tab.copyLiveNodes()
            //关闭接收请求 跳出循环
        case <-tab.closeReq:
            break loop
        }
    }

    if refreshDone != nil {
        <-refreshDone
    }
    for _, ch := range waiting {
        close(ch)
    }
    if revalidateDone != nil {
        <-revalidateDone
    }
    close(tab.closed)
}

看一下负责刷新的doRefresh函数。

func (tab *Table) doRefresh(done chan struct{}) {
    defer close(done)

    // Load nodes from the database and insert
    // them. This should yield a few previously seen nodes that are
    // (hopefully) still alive.
    tab.loadSeedNodes()

    // Run self lookup to discover new neighbor nodes.
    // We can only do this if we have a secp256k1 identity.
    var key ecdsa.PublicKey
    if err := tab.self().Load((*enode.Secp256k1)(&key)); err == nil {
        tab.lookup(encodePubkey(&key), false)
    }

    // The Kademlia paper specifies that the bucket refresh should
    // perform a lookup in the least recently used bucket. We cannot
    // adhere to this because the findnode target is a 512bit value
    // (not hash-sized) and it is not easily possible to generate a
    // sha3 preimage that falls into a chosen bucket.
    // We perform a few lookups with a random target instead.
    for i := 0; i < 3; i++ {
        var target encPubkey
        crand.Read(target[:])
        tab.lookup(target, false)
    }
}

主要是用tab.lookup()进行节点查找的。看一下lookup函数。

func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
    var (
        target         = enode.ID(crypto.Keccak256Hash(targetKey[:]))
        asked          = make(map[enode.ID]bool)
        seen           = make(map[enode.ID]bool)
        reply          = make(chan []*node, alpha)
        pendingQueries = 0
        result         *nodesByDistance
    )
    // don't query further if we hit ourself.
    // unlikely to happen often in practice.
    asked[tab.self().ID()] = true

    for {
        tab.mutex.Lock()
        // generate initial result set
        //返回和target最近的集合,最多bucketSize=16个
        result = tab.closest(target, bucketSize)
        tab.mutex.Unlock()
        if len(result.entries) > 0 || !refreshIfEmpty {
            break
        }
        // The result set is empty, all nodes were dropped, refresh.
        // We actually wait for the refresh to complete here. The very
        // first query will hit this case and run the bootstrapping
        // logic.
        <-tab.refresh()
        refreshIfEmpty = false
    }

    for {
        // ask the alpha closest nodes that we haven't asked yet
        //从result set 中的α=3个节点发起findnode请求,询问其离target最近的节点集合
        for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
            n := result.entries[i]
            if !asked[n.ID()] {
                asked[n.ID()] = true
                pendingQueries++
                //执行findnode方法
                go tab.findnode(n, targetKey, reply)
            }
        }
        //上面询问过程的节点都问过了
        if pendingQueries == 0 {
            // we have asked all closest nodes, stop the search
            break
        }
        select {
            //nodes放入表
        case nodes := <-reply:
            for _, n := range nodes {
                if n != nil && !seen[n.ID()] {
                    seen[n.ID()] = true
                    //按照距离排序放入
                    result.push(n, bucketSize)
                }
            }
            //关闭请求
        case <-tab.closeReq:
            return nil // shutdown, no need to continue.
        }
        pendingQueries--
    }
    return result.entries
}

看一下findnode

func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
    //findnode失败次数
    fails := tab.db.FindFails(n.ID(), n.IP())
    //udp发送findnode请求
    r, err := tab.net.findnode(n.ID(), n.addr(), targetKey)
    if err == errClosed {
        // Avoid recording failures on shutdown.
        reply <- nil
        return
    } else if len(r) == 0 {
        fails++
        tab.db.UpdateFindFails(n.ID(), n.IP(), fails)
        log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
        //大于失败次数,从table中删去节点
        if fails >= maxFindnodeFailures {
            log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails)
            tab.delete(n)
        }
    } else if fails > 0 {
        tab.db.UpdateFindFails(n.ID(), n.IP(), fails-1)
    }

    // Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
    // just remove those again during revalidation.

    //尽可能将节点添加到Table中
    for _, n := range r {
        tab.addSeenNode(n)
    }
    reply <- r
}

再看看table.addSeenNode()

// addSeenNode adds a node which may or may not be live to the end of a bucket. If the
// bucket has space available, adding the node succeeds immediately. Otherwise, the node is
// added to the replacements list.
//
// The caller must not hold tab.mutex.
func (tab *Table) addSeenNode(n *node) {
    //等于自己ID,就不加了
    if n.ID() == tab.self().ID() {
        return
    }

    tab.mutex.Lock()
    defer tab.mutex.Unlock()

    
    b := tab.bucket(n.ID())
    //b在bucket里,就不加了
    if contains(b.entries, n.ID()) {
        // Already in bucket, don't add.
        return
    }
    //bucket满了,可能作为替代添加,舍弃头部节点
    if len(b.entries) >= bucketSize {
        // Bucket full, maybe add as replacement.
        tab.addReplacement(b, n)
        return
    }
    if !tab.addIP(b, n.IP()) {
        // Can't add: IP limit reached.

        return
    }
    // Add to end of bucket:
    //直接在尾部加,将n从replacement中删除
    b.entries = append(b.entries, n)
    b.replacements = deleteNode(b.replacements, n)
    n.addedAt = time.Now()
    if tab.nodeAddedHook != nil {
        tab.nodeAddedHook(n)
    }
}

至此KAD的逻辑处理已经完成,还有部分细节代码没有看,但总体流程如上。
具体的PINGPONG、FINDNODE请求在Discover/udp.go中,有空再看。
这里只分析KAD节点发现。

你可能感兴趣的:(以太坊源码(3)——节点发现协议,KAD协议)