putValue/getValue is used by IPNS namespace system. Data format is like:
/pk/encoded(key+value) : timestamp
/ipns/encoded(key+value) : timestamp
Provider is the DHT k/v system. Data format:
/provider/encoded(key)/encoded(value) : encoded(unixnano)
example: /providers/CIQIF7JNQBSUDCM4ZDQWGNFQFCATUJHDRKVLLAV7XWM7CKIU7C6LVEY/CIQEWHPUHMAV572SCJNKBRDA2KSS4M7KV2U24OJD3TO3OSE2BWVUXFY
func (dht *IpfsDHT) handlerForMsgType(t pb.Message_MessageType) dhtHandler {
switch t {
case pb.Message_FIND_NODE:
return dht.handleFindPeer
case pb.Message_PING:
return dht.handlePing
}
if dht.enableValues {
switch t {
case pb.Message_GET_VALUE:
return dht.handleGetValue
case pb.Message_PUT_VALUE:
return dht.handlePutValue
}
}
if dht.enableProviders {
switch t {
case pb.Message_ADD_PROVIDER:
return dht.handleAddProvider
case pb.Message_GET_PROVIDERS:
return dht.handleGetProviders
}
}
return nil
}
func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) {
ctx = logger.Start(ctx, "handleGetProviders")
defer func() { logger.FinishWithErr(ctx, _err) }()
logger.SetTag(ctx, "peer", p)
resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
key := pmes.GetKey()
if len(key) > 80 {
return nil, fmt.Errorf("handleGetProviders key size too large")
}
logger.SetTag(ctx, "key", key)
// debug logging niceness.
reqDesc := fmt.Sprintf("%s handleGetProviders(%s, %s): ", dht.self, p, key)
logger.Debugf("%s begin", reqDesc)
defer logger.Debugf("%s end", reqDesc)
// check if we have this value, to add ourselves as provider.
//providersKeyPrefix + base32.RawStdEncoding.EncodeToString(k)
has, err := dht.datastore.Has(convertToDsKey(key))
if err != nil && err != ds.ErrNotFound {
logger.Debugf("unexpected datastore error: %v\n", err)
has = false
}
// setup providers
providers := dht.providers.GetProviders(ctx, key)
if has {
providers = append(providers, dht.self)
logger.Debugf("%s have the value. added self as provider", reqDesc)
}
if len(providers) > 0 {
// TODO: pstore.PeerInfos should move to core (=> peerstore.AddrInfos).
infos := pstore.PeerInfos(dht.peerstore, providers)
resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
logger.Debugf("%s have %d providers: %s", reqDesc, len(providers), infos)
}
// Also send closer peers.
closer := dht.betterPeersToQuery(pmes, p, dht.bucketSize)
if closer != nil {
// TODO: pstore.PeerInfos should move to core (=> peerstore.AddrInfos).
infos := pstore.PeerInfos(dht.peerstore, closer)
resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
logger.Debugf("%s have %d closer peers: %s", reqDesc, len(closer), infos)
}
return resp, nil
}
See flow chart:
Send getProv message to pm worker
func (pm *ProviderManager) GetProviders(ctx context.Context, k []byte) []peer.ID {
gp := &getProv{
k: k,
resp: make(chan []peer.ID, 1), // buffered to prevent sender from blocking
}
select {
case <-ctx.Done():
return nil
case pm.getprovs <- gp:
}
select {
case <-ctx.Done():
return nil
case peers := <-gp.resp:
return peers
}
}
pm will od getProvSet and eventually respond with getProv.resp
func (pm *ProviderManager) getProvSet(k []byte) (*providerSet, error) {
// look up in the LRU cache at first
cached, ok := pm.providers.Get(string(k))
if ok {
return cached.(*providerSet), nil
}
// otherwise, load from data store
pset, err := loadProvSet(pm.dstore, k)
if err != nil {
return nil, err
}
if len(pset.providers) > 0 {
pm.providers.Add(string(k), pset)
}
return pset, nil
}
func loadProvSet(dstore ds.Datastore, k []byte) (*providerSet, error) {
// query the prefix, get the hit list
res, err := dstore.Query(dsq.Query{Prefix: mkProvKey(k)})
...
...
Then we got the values sharing the same key, in other words, the PeerID list serving the same CID.