go-libp2p-Kad-dht 源代码分析

    包dht实现了一个满足ipfs路由接口的分布式哈希表。这个DHT模仿kademlia与S / Kademlia修改。
    包query实现一个查询管理器来驱动并发工作者查询DHT。使用目标密钥,任务与对等方通信的queryFunc和一组初始对等方设置查询。随着查询进度,queryFunc可以返回更近的对等点,这些对等点将用于更接近DHT中的目标键导航,直到达到答案。

      首先看dht.go,它定义了一个IpfsDHT结构体,pfsDHT是Kademlia的一个实现,带有S / Kademlia修改。它用于实现基础IpfsRouting模块。

type IpfsDHT struct {
host host.Host // the network services we need
self peer.ID // Local peer (yourself)
peerstore pstore.Peerstore // Peer Registry
datastore ds.Datastore // Local data
routingTable kb.RoutingTable // Array of routing tables for differently distanced nodes
providers providers.ProviderManager
birth time.Time // When this peer started up
Validator record.Validator
ctx context.Context
proc goprocess.Process
strmap map[peer.ID]
messageSender
smlk sync.Mutex
plk sync.Mutex
protocols []protocol.ID // DHT protocols
}
有三个初始化方法,newDHT,NewDHT和NewDHTClient:
1.New使用指定的主机和选项创建新的DHT
func New(ctx context.Context, h host.Host, options ...opts.Option) (
IpfsDHT, error) {
var cfg opts.Options
if err := cfg.Apply(append([]opts.Option{opts.Defaults}, options...)...); err != nil {
return nil, err
}
dht := makeDHT(ctx, h, cfg.Datastore, cfg.Protocols)
// register for network notifs.
dht.host.Network().Notify((netNotifiee)(dht))
dht.proc = goprocessctx.WithContextAndTeardown(ctx, func() error {
// remove ourselves from network notifs.
dht.host.Network().StopNotify((
netNotifiee)(dht))
return nil
})
dht.proc.AddChild(dht.providers.Process())
dht.Validator = cfg.Validator
if !cfg.Client {
for _, p := range cfg.Protocols {
h.SetStreamHandler(p, dht.handleNewStream)
}
}
return dht, nil
}

2.NewDHT创建一个新的DHT对象,给定的对等体作为“本地”主机。
使用此函数初始化的IpfsDHT将响应DHT请求,而使用NewDHTClient初始化的IpfsDHT则不会。
func NewDHT(ctx context.Context, h host.Host, dstore ds.Batching) *IpfsDHT {
dht, err := New(ctx, h, opts.Datastore(dstore))
if err != nil {
panic(err)
}
return dht
}

3.NewDHT创建一个新的DHT对象,给定的对等体作为“本地”主机。
使用thisNewDHTClient初始化的IpfsDHT创建一个新的DHT对象,给定的对等体作为“本地”主机。使用此函数初始化的IpfsDHT客户端将不响应DHT请求。如果您需要对等方响应DHT请求,请改用NewDHT。 NewDHTClient使用给定的对等方创建一个新的DHT对象,因为“本地”主机函数将响应DHT请求,而使用NewDHTClient初始化的IpfsDHT则不会。
func NewDHTClient(ctx context.Context, h host.Host, dstore ds.Batching) *IpfsDHT {
dht, err := New(ctx, h, opts.Datastore(dstore), opts.Client(true))
if err != nil {
panic(err)
}
return dht
}

新建DHT时会调用makeDHT
func makeDHT(ctx context.Context, h host.Host, dstore ds.Batching, protocols []protocol.ID) IpfsDHT {
//新建一个路由表
rt := kb.NewRoutingTable(KValue, kb.ConvertPeerID(h.ID()), time.Minute, h.Peerstore())
cmgr := h.ConnManager()
rt.PeerAdded = func(p peer.ID) {
cmgr.TagPeer(p, "kbucket", 5)
}
rt.PeerRemoved = func(p peer.ID) {
cmgr.UntagPeer(p, "kbucket")
}
return &IpfsDHT{
datastore: dstore,
self: h.ID(),
peerstore: h.Peerstore(),
host: h,
strmap: make(map[peer.ID]
messageSender),
ctx: ctx,
providers: providers.NewProviderManager(ctx, h.ID(), dstore),
birth: time.Now(),
routingTable: rt,
protocols: protocols,
}
}

    接下来是一些对Peer的属性设置和从的相关方法。

func (dht *IpfsDHT) putValueToPeer(ctx context.Context, p peer.ID, rec recpb.Record) error
func (dht IpfsDHT) getValueOrPeers(ctx context.Context, p peer.ID, key string) (recpb.Record, []
pstore.PeerInfo, error)//该方法会返回peer的值或者是一群更近的节点。
func (dht IpfsDHT) getValueSingle(ctx context.Context, p peer.ID, key string) (pb.Message, error)
func (dht IpfsDHT) getLocal(key string) (recpb.Record, error)

    还有一些获取更近节点的方法,都是对table方法的封装。

func (dht *IpfsDHT) nearestPeersToQuery(pmes *pb.Message, count int) []peer.ID
// betterPeerToQuery returns nearestPeersToQuery, but iff closer than self.
func (dht *IpfsDHT) betterPeersToQuery(pmes *pb.Message, p peer.ID, count int) []peer.ID

dht_bootstrap.go 首先看构造方法:
Bootstrap确保dht路由表在peer进出时保持健康。它通过请求随机对等ID来建立对等列表。 Bootstrap进程每次都会运行许多查询,并在每次信号触发时运行。这些参数是可配置的。与BootstrapWithConfig相反,Bootstrap满足路由接口。
func (dht *IpfsDHT) Bootstrap(ctx context.Context) error {
proc, err := dht.BootstrapWithConfig(DefaultBootstrapConfig)
if err != nil {
return err
}
// wait till ctx or dht.Context exits.
// we have to do it this way to satisfy the Routing interface (contexts)
go func() {
defer proc.Close()
select {
case <-ctx.Done():
case <-dht.Context().Done():
}
}()
return nil
}

    BootstrapWithConfig确保dht路由表在同行来去时保持健康。它通过请求随机对等ID来建立对等列表。 Bootstrap进程每次都会运行许多查询,并在每次信号触发时运行。这些参数是可配置的。 BootstrapWithConfig返回一个进程,因此用户可以将其停止。

func (dht *IpfsDHT) BootstrapWithConfig(cfg BootstrapConfig) (goprocess.Process, error) {
if cfg.Queries <= 0 {
return nil, fmt.Errorf("invalid number of queries: %d", cfg.Queries)
}
proc := dht.Process().Go(func(p goprocess.Process) {
<-p.Go(dht.bootstrapWorker(cfg)).Closed()
for {
select {
case <-time.After(cfg.Period):
<-p.Go(dht.bootstrapWorker(cfg)).Closed()
case <-p.Closing():
return
}
}
})
return proc, nil
}

    这里在进程中执行了bootstrapWorker方法,这个方法会调用runBootstrap:生成随机的randomId,然后通过寻找这个节点来充实自己的桶。

func (dht *IpfsDHT) runBootstrap(ctx context.Context, cfg BootstrapConfig) error {
bslog := func(msg string) {
log.Debugf("DHT %s dhtRunBootstrap %s -- routing table size: %d", dht.self, msg, dht.routingTable.Size())
}
bslog("start")
defer bslog("end")
defer log.EventBegin(ctx, "dhtRunBootstrap").Done()
var merr u.MultiErr
randomID := func() peer.ID {
// 16 random bytes is not a valid peer id. it may be fine becuase
// the dht will rehash to its own keyspace anyway.
id := make([]byte, 16)
rand.Read(id)
id = u.Hash(id)
return peer.ID(id)
}
// bootstrap sequentially, as results will compound
runQuery := func(ctx context.Context, id peer.ID) {
ctx, cancel := context.WithTimeout(ctx, cfg.Timeout)
defer cancel()
p, err := dht.FindPeer(ctx, id)
if err == routing.ErrNotFound {
// this isn't an error. this is precisely what we expect.
} else if err != nil {
merr = append(merr, err)
} else {
// woah, actually found a peer with that ID? this shouldn't happen normally
// (as the ID we use is not a real ID). this is an odd error worth logging.
err := fmt.Errorf("Bootstrap peer error: Actually FOUND peer. (%s, %s)", id, p)
log.Warningf("%s", err)
merr = append(merr, err)
}
}
// these should be parallel normally. but can make them sequential for debugging.
// note that the core/bootstrap context deadline should be extended too for that.
for i := 0; i < cfg.Queries; i++ {
id := randomID()
log.Debugf("Bootstrapping query (%d/%d) to random ID: %s", i+1, cfg.Queries, id)
runQuery(ctx, id)
}
// Find self to distribute peer info to our neighbors.
// Do this after bootstrapping.
log.Debugf("Bootstrapping query to self: %s", dht.self)
runQuery(ctx, dht.self)
if len(merr) > 0 {
return merr
}
return nil
}

    查询节点的函数是FindPeer,首先在FindLocal在本地查找,如果没有找到则在路由表中查找,调用NearstPeers。

func (rt *RoutingTable) NearestPeers(id ID, count int) []peer.ID {
cpl := commonPrefixLen(id, rt.local)
rt.tabLock.RLock()
// Get bucket at cpl index or last bucket
var bucket *Bucket
if cpl >= len(rt.Buckets) {
cpl = len(rt.Buckets) - 1
}
bucket = rt.Buckets[cpl]
peerArr := make(peerSorterArr, 0, count)
peerArr = copyPeersFromList(id, peerArr, bucket.list)
if len(peerArr) < count {
// In the case of an unusual split, one bucket may be short or empty.
// if this happens, search both surrounding buckets for nearby peers
if cpl > 0 {
plist := rt.Buckets[cpl-1].list
peerArr = copyPeersFromList(id, peerArr, plist)
}
if cpl < len(rt.Buckets)-1 {
plist := rt.Buckets[cpl+1].list
peerArr = copyPeersFromList(id, peerArr, plist)
}
}
rt.tabLock.RUnlock()
// Sort by distance to local peer
sort.Sort(peerArr)
if count < len(peerArr) {
peerArr = peerArr[:count]
}
out := make([]peer.ID, 0, len(peerArr))
for _, p := range peerArr {
out = append(out, p.p)
}
return out
}

得到该peer则直接返回,若没有发现则调用dhtQueryRunner.queryPeer()来寻找,最后在bootstrapping之后通过查找自己去分配节点到我们的邻居。
func (r *dhtQueryRunner) queryPeer(proc process.Process, p peer.ID)

dht_net.go 定义了消息结构体和收发规则。

    Protobuf编写器在编写消息时执行多次小写操作。我们需要缓冲这些写入,以确保我们不会为每次写入发送新数据包。

type bufferedDelimitedWriter struct {
*bufio.Writer
ggio.WriteCloser
}
调用handleNewstream处理消息,其会调用handleNewMessag,接收消息,更新peer,查询并指派消息的处理方法,处理完成之后发送response。
func (dht *IpfsDHT) handleNewMessage(s inet.Stream) {
ctx := dht.Context()
cr := ctxio.NewReader(ctx, s) // ok to use. we defer close stream in this func
cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func
r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax)
w := newBufferedDelimitedWriter(cw)
mPeer := s.Conn().RemotePeer()
for {
// receive msg
pmes := new(pb.Message)
switch err := r.ReadMsg(pmes); err {
case io.EOF:
s.Close()
return
case nil:
default:
s.Reset()
log.Debugf("Error unmarshaling data: %s", err)
return
}
// update the peer (on valid msgs only)
dht.updateFromMessage(ctx, mPeer, pmes)
// get handler for this msg type.
handler := dht.handlerForMsgType(pmes.GetType())
if handler == nil {
s.Reset()
log.Debug("got back nil handler from handlerForMsgType")
return
}
// dispatch handler.
rpmes, err := handler(ctx, mPeer, pmes)
if err != nil {
s.Reset()
log.Debugf("handle message error: %s", err)
return
}
// if nil response, return it before serializing
if rpmes == nil {
log.Debug("got back nil response from request")
continue
}
// send out response msg
err = w.WriteMsg(rpmes)
if err == nil {
err = w.Flush()
}
if err != nil {
s.Reset()
log.Debugf("send response error: %s", err)
return
}
}
}

    接下来看一下发送请求,sendRequest,但是也要保证去测量RTT,为了潜在的测量值。

func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.ID, pmes pb.Message) (pb.Message, error){
ms, err := dht.messageSenderForPeer(p)
if err != nil {
return nil, err
}
start := time.Now()
rpmes, err := ms.SendRequest(ctx, pmes)
if err != nil {
return nil, err
}
// update the peer (on valid msgs only)
dht.updateFromMessage(ctx, p, rpmes)
dht.peerstore.RecordLatency(p, time.Since(start))
log.Event(ctx, "dhtReceivedMessage", dht.self, p, rpmes)
return rpmes, nil
}
类似的发送message,它会先生成一个messageSender,然后发送请求
func (dht *IpfsDHT) sendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error

messageSender封装了stream/reader/peer和路由表,它包含数个方法,比如发送消息:
func (ms *messageSender) SendMessage(ctx context.Context, pmes *pb.Message) error {
ms.lk.Lock()
defer ms.lk.Unlock()
retry := false
for {
if err := ms.prep(); err != nil {
return err
}
if err := ms.writeMsg(pmes); err != nil {
ms.s.Reset()
ms.s = nil
if retry {
log.Info("error writing message, bailing: ", err)
return err
} else {
log.Info("error writing message, trying again: ", err)
retry = true
continue
}
}
log.Event(ctx, "dhtSentMessage", ms.dht.self, ms.p, pmes)
if ms.singleMes > streamReuseTries {
go inet.FullClose(ms.s)
ms.s = nil
} else if retry {
ms.singleMes++
}
return nil
}
}

读消息
func (ms *messageSender) ctxReadMsg(ctx context.Context, mes *pb.Message) error {
errc := make(chan error, 1)
go func(r ggio.ReadCloser) {
errc <- r.ReadMsg(mes)
}(ms.r)
t := time.NewTimer(dhtReadMessageTimeout)
defer t.Stop()
select {
case err := <-errc:
return err
case <-ctx.Done():
return ctx.Err()
case <-t.C:
return ErrReadTimeout
}
}

handlers.go里面包含一些具体的处理逻辑,比如说寻找节点:
func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) {
ctx = log.Start(ctx, "handleFindPeer")
defer func() { log.FinishWithErr(ctx, _err) }()
log.SetTag(ctx, "peer", p)
resp := pb.NewMessage(pmes.GetType(), nil, pmes.GetClusterLevel())
var closest []peer.ID
// if looking for self... special case where we send it on CloserPeers.
targetPid := peer.ID(pmes.GetKey())
if targetPid == dht.self {
closest = []peer.ID{dht.self}
} else {
closest = dht.betterPeersToQuery(pmes, p, CloserPeerCount)
// Never tell a peer about itself.
if targetPid != p {
// If we're connected to the target peer, report their
// peer info. This makes FindPeer work even if the
// target peer isn't in our routing table.
//
// Alternatively, we could just check our peerstore.
// However, we don't want to return out of date
// information. We can change this in the future when we
// add a progressive, asynchronous SearchPeer function
// and improve peer routing in the host.
switch dht.host.Network().Connectedness(targetPid) {
case inet.Connected, inet.CanConnect:
closest = append(closest, targetPid)
}
}
}
if closest == nil {
log.Infof("%s handleFindPeer %s: could not find anything.", dht.self, p)
return resp, nil
}
closestinfos := pstore.PeerInfos(dht.peerstore, closest)
// possibly an over-allocation but this array is temporary anyways.
withAddresses := make([]pstore.PeerInfo, 0, len(closestinfos))
for _, pi := range closestinfos {
if len(pi.Addrs) > 0 {
withAddresses = append(withAddresses, pi)
log.Debugf("handleFindPeer: sending back '%s'", pi.ID)
}
}
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses)
return resp, nil
}

lookup.go标准化Key能转化为节点id,它还有有重要的方法,获得最近的节点,这是kademlia的节点查询操作,返回一个距离Key最近的k个最近peers。
func (dht IpfsDHT) GetClosestPeers(ctx context.Context, key string) (<-chan peer.ID, error) {
e := log.EventBegin(ctx, "getClosestPeers", loggableKey(key))
tablepeers := dht.routingTable.NearestPeers(kb.ConvertKey(key), AlphaValue)
if len(tablepeers) == 0 {
return nil, kb.ErrLookupFailure
}
out := make(chan peer.ID, KValue)
// since the query doesnt actually pass our context down
// we have to hack this here. whyrusleeping isnt a huge fan of goprocess
parent := ctx
query := dht.newQuery(key, func(ctx context.Context, p peer.ID) (
dhtQueryResult, error) {
// For DHT query command
notif.PublishQueryEvent(parent, ¬if.QueryEvent{
Type: notif.SendingQuery,
ID: p,
})
pmes, err := dht.findPeerSingle(ctx, p, peer.ID(key))
if err != nil {
log.Debugf("error getting closer peers: %s", err)
return nil, err
}
peers := pb.PBPeersToPeerInfos(pmes.GetCloserPeers())

    // For DHT query command
    notif.PublishQueryEvent(parent, ¬if.QueryEvent{
        Type:      notif.PeerResponse,
        ID:        p,
        Responses: peers,
    })

    return &dhtQueryResult{closerPeers: peers}, nil
})
go func() {
    defer close(out)
    defer e.Done()
    // run it!
    res, err := query.Run(ctx, tablepeers)
    if err != nil {
        log.Debugf("closestPeers query run error: %s", err)
    }
    if res != nil && res.queriedSet != nil {
        sorted := kb.SortClosestPeers(res.queriedSet.Peers(), kb.ConvertKey(key))
        if len(sorted) > KValue {
            sorted = sorted[:KValue]
        }

        for _, p := range sorted {
            out <- p
        }
    }
}()
return out, nil

}

notif.go主要包含两个方法,连接和断开连接:
func (nn *netNotifiee) Connected(n inet.Network, v inet.Conn) {
dht := nn.DHT()
select {
case <-dht.Process().Closing():
return
default:
}
p := v.RemotePeer()
protos, err := dht.peerstore.SupportsProtocols(p, dht.protocolStrs()...)
if err == nil && len(protos) != 0 {
// 为了与testConnection中的锁保持一致,我们锁定此处。
这可能不是必需的,因为(dis)连接通知是序列化的,但保持一致是很好的。
dht.plk.Lock()
defer dht.plk.Unlock()
if dht.host.Network().Connectedness(p) == inet.Connected {
dht.Update(dht.Context(), p)
}
return
}
//注意:不幸的是,对等方可能还不知道该对等方是DHT服务器。因此,如果它没有返回上面的肯定响应,请手动测试。
go nn.testConnection(v)
}

func (nn *netNotifiee) testConnection(v inet.Conn) {
dht := nn.DHT()
p := v.RemotePeer()
// Forcibly use this connection. Otherwise, if we have two connections, we could:
// 1. Test it twice.
// 2. Have it closed from under us leaving the second (open) connection untested.
s, err := v.NewStream()
if err != nil {
// Connection error
return
}
defer inet.FullClose(s)
selected, err := mstream.SelectOneOf(dht.protocolStrs(), s)
if err != nil {
// Doesn't support the protocol
return
}
// Remember this choice (makes subsequent negotiations faster)
dht.peerstore.AddProtocols(p, selected)
// We lock here as we race with disconnect. If we didn't lock, we could
// finish processing a connect after handling the associated disconnect
// event and add the peer to the routing table after removing it.
dht.plk.Lock()
defer dht.plk.Unlock()
if dht.host.Network().Connectedness(p) == inet.Connected {
dht.Update(dht.Context(), p)
}
}

query.go 实现一个查询管理器来驱动并发工作者查询DHT。使用目标密钥,任务与对等方通信的queryFunc和一组初始对等方设置查询。随着查询进度,queryFunc可以返回更近的对等点,这些对等点将用于更接近DHT中的目标键导航,直到达到答案。
type dhtQuery struct {
dht *IpfsDHT
key string // the key we're querying for
qfunc queryFunc // the function to execute per peer
concurrency int // the concurrency parameter
}

type dhtQueryResult struct {
value []byte // GetValue
peer pstore.PeerInfo // FindPeer
providerPeers []pstore.PeerInfo // GetProviders
closerPeers []
pstore.PeerInfo // *
success bool

finalSet   *pset.PeerSet
queriedSet *pset.PeerSet

}
执行查询节点的任务
func (r dhtQueryRunner) Run(ctx context.Context, peers []peer.ID) (dhtQueryResult, error) {
r.log = log
r.runCtx = ctx
if len(peers) == 0 {
log.Warning("Running query with no peers!")
return nil, nil
}
// setup concurrency rate limiting
for i := 0; i < r.query.concurrency; i++ {
r.rateLimit <- struct{}{}
}
// add all the peers we got first.
for _, p := range peers {
r.addPeerToQuery(p)
}
// go do this thing.
// do it as a child proc to make sure Run exits
// ONLY AFTER spawn workers has exited.
r.proc.Go(r.spawnWorkers)
// so workers are working.
// wait until they're done.
err := routing.ErrNotFound
// now, if the context finishes, close the proc.
// we have to do it here because the logic before is setup, which
// should run without closing the proc.
ctxproc.CloseAfterContext(r.proc, ctx)
select {
case <-r.peersRemaining.Done():
r.proc.Close()
r.RLock()
defer r.RUnlock()

    err = routing.ErrNotFound

    // if every query to every peer failed, something must be very wrong.
    if len(r.errs) > 0 && len(r.errs) == r.peersSeen.Size() {
        log.Debugf("query errs: %s", r.errs)
        err = r.errs[0]
    }
case <-r.proc.Closed():
    r.RLock()
    defer r.RUnlock()
    err = context.DeadlineExceeded
}
if r.result != nil && r.result.success {
    return r.result, nil
}
return &dhtQueryResult{
    finalSet:   r.peersSeen,
    queriedSet: r.peersQueried,
}, err

}

records.go 包含了获取公钥的一些方法,而routing.go实现IpfsDHT结构的Routing接口。包括PutValue、GetValue、SearchValue等。util.go中存放着一些简单的方法。

你可能感兴趣的:(go-libp2p-Kad-dht 源代码分析)