【go-libp2p源码剖析】DHT Kademlia协议消息处理

简介

dht网络中的peer节点都必须支持dht协议(协议id为/kad/1.0.0),双方才能通信。dht本质就是C/S程序,peer可能既是客户端又是服务端,双发交换的数据格式为Protobuf,不同于gRpc dht底层走的不是HTTP2,而是libp2p里定义的各种Transport。HTTP2声称的多路复用、长连接(服务端推送数据到客户端),libp2p的muxer(yamux/mple)都能支持。

Protobuf结构定义

//消息的类型
const (
	Message_PUT_VALUE     Message_MessageType = 0
	Message_GET_VALUE     Message_MessageType = 1
	Message_ADD_PROVIDER  Message_MessageType = 2
	Message_GET_PROVIDERS Message_MessageType = 3
	Message_FIND_NODE     Message_MessageType = 4
	Message_PING          Message_MessageType = 5
)

//连接状态
const (
	// sender does not have a connection to peer, and no extra information (default)
	Message_NOT_CONNECTED Message_ConnectionType = 0
	// sender has a live connection to peer
	Message_CONNECTED Message_ConnectionType = 1
	// sender recently connected to peer
	Message_CAN_CONNECT Message_ConnectionType = 2
	// sender recently tried to connect to peer repeatedly but failed to connect
	// ("try" here is loose, but this should signal "made strong effort, failed")
	Message_CANNOT_CONNECT Message_ConnectionType = 3
)
//返回的Peer(带有地址和连接状态)
type Message_Peer struct {
    
    
	// ID of a given peer.
	Id byteString `protobuf:"bytes,1,opt,name=id,proto3,customtype=byteString" json:"id"`
	// multiaddrs for a given peer
	Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"`
	// used to signal the sender's connection capabilities to the peer
	Connection           Message_ConnectionType `protobuf:"varint,3,opt,name=connection,proto3,enum=dht.pb.Message_ConnectionType" json:"connection,omitempty"`
}
// GET_VALUE PUT_VALUE使用
type Record struct {
    
    
	// The key that references this record
	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
	// The actual value this record is storing
	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
	// Time the record was received, set by receiver
	TimeReceived  string   `protobuf:"bytes,5,opt,name=timeReceived,proto3" json:"timeReceived,omitempty"`
}
// 主要的消息结构体
type Message struct {
    
    
	// defines what type of message it is.
	Type Message_MessageType `protobuf:"varint,1,opt,name=type,proto3,enum=dht.pb.Message_MessageType" json:"type,omitempty"`
	// defines what coral cluster level this query/response belongs to.
	// in case we want to implement coral's cluster rings in the future.
	ClusterLevelRaw int32 `protobuf:"varint,10,opt,name=clusterLevelRaw,proto3" json:"clusterLevelRaw,omitempty"`
	// Used to specify the key associated with this message.
	// PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS
	Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
	// Used to return a value
	// PUT_VALUE, GET_VALUE
	Record *pb.Record `protobuf:"bytes,3,opt,name=record,proto3" json:"record,omitempty"`
	// Used to return peers closer to a key in a query
	// GET_VALUE, GET_PROVIDERS, FIND_NODE
	CloserPeers []Message_Peer `protobuf:"bytes,8,rep,name=closerPeers,proto3" json:"closerPeers"`
	// Used to return Providers
	// GET_VALUE, ADD_PROVIDER, GET_PROVIDERS
	ProviderPeers []Message_Peer `protobuf:"bytes,9,rep,name=providerPeers,proto3" json:"providerPeers"`
}

Protobuf消息组装、发送(客户端)

ClusterLevelRaw没明白干啥用?

func NewMessage(typ Message_MessageType, key []byte, level int) *Message {
    
    
	m := &Message{
    
    
		Type: typ,
		Key:  key,
	}
	m.SetClusterLevel(level)
	return m
}

dht.getValueSingle(dht.go)

根据key从指定peer上获取value值

func (dht *IpfsDHT) getValueSingle(ctx context.Context, p peer.ID, key string) (*pb.Message, error) {
    
    
	pmes := pb.NewMessage(pb.Message_GET_VALUE, []byte(key), 0)
	return dht.sendRequest(ctx, p, pmes)
}

dht.putValueToPeer(dht.go)

将给定的key/value对存储在指定peer上

// putValueToPeer stores the given key/value pair at the peer 'p'
func (dht *IpfsDHT) putValueToPeer(ctx context.Context, p peer.ID, rec *recpb.Record) error {
    
    
	pmes := pb.NewMessage(pb.Message_PUT_VALUE, rec.Key, 0)
	pmes.Record = rec
	rpmes, err := dht.sendRequest(ctx, p, pmes)
	if err != nil {
    
    
		logger.Debugw("failed to put value to peer", "to", p, "key", loggableRecordKeyBytes(rec.Key), "error", err)
		return err
	}

	if !bytes.Equal(rpmes.GetRecord().Value, pmes.GetRecord().Value) {
    
    
		logger.Infow("value not put correctly", "put-message", pmes, "get-message", rpmes)
		return errors.New("value not put correctly")
	}

	return nil
}

dht.findProvidersSingle(dht.go)

根据key(cid的hash值)从指定peer上获取value值

func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p peer.ID, key multihash.Multihash) (*pb.Message, error) {
    
    
	pmes := pb.NewMessage(pb.Message_GET_PROVIDERS, key, 0)
	return dht.sendRequest(ctx, p, pmes)
}

dht.findPeerSingle(dht.go)

询问peer“p”是否知道ID为“id”的peer在哪里

func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p peer.ID, id peer.ID) (*pb.Message, error) {
    
    
	pmes := pb.NewMessage(pb.Message_FIND_NODE, []byte(id), 0)
	return dht.sendRequest(ctx, p, pmes)
}

dht.Ping(dht.go)

向peer发送ping消息,然后等待响应。

func (dht *IpfsDHT) Ping(ctx context.Context, p peer.ID) error {
    
    
	req := pb.NewMessage(pb.Message_PING, nil, 0)
	resp, err := dht.sendRequest(ctx, p, req)
	if err != nil {
    
    
		return fmt.Errorf("sending request: %w", err)
	}
	if resp.Type != pb.Message_PING {
    
    
		return fmt.Errorf("got unexpected response type: %v", resp.Type)
	}
	return nil
}

dht.Provide(routing.go)

func (dht *IpfsDHT) Provide(ctx context.Context, key cid.Cid, brdcst bool) (err error) {
    
    
    ...
    mes, err := dht.makeProvRecord(keyMH)
	...

	for p := range peers {
    
    
		wg.Add(1)
		go func(p peer.ID) {
    
    
			defer wg.Done()
			logger.Debugf("putProvider(%s, %s)", loggableProviderRecordBytes(keyMH), p)
			err := dht.sendMessage(ctx, p, mes)
			if err != nil {
    
    
				logger.Debug(err)
			}
		}(p)
	}
	...
}

func (dht *IpfsDHT) makeProvRecord(key []byte) (*pb.Message, error) {
    
    
	pi := peer.AddrInfo{
    
    
		ID:    dht.self,
		Addrs: dht.host.Addrs(),
	}

	...

	pmes := pb.NewMessage(pb.Message_ADD_PROVIDER, key, 0)
	pmes.ProviderPeers = pb.RawPeerInfosToPBPeers([]peer.AddrInfo{
    
    pi})
	return pmes, nil
}

Protobuf消息发送

组装消息后,调用dht.sendRequest或dht.sendMessage,发送消息到远程节点,最终调用的是messageSender.SendRequest或messageSender.SendMessage,这两个方法很像,只有一点差别。

  1. messageSender.SendRequest
    打开一个流调用writeMsg写pb消息,再调用ctxReadMsg读响应消息
    用于GET_VALUE/PUT_VALUE/PING/GET_PROVIDERS消息
  2. messageSender.SendMessage
    打开一个流,调用writeMsg写pb消息
    用于ADD_PROVIDER消息

两者都有重试机制,最大重试三次,如果三次都失败则关闭stream

dht_net.go

func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
    
    
	ctx, _ = tag.New(ctx, metrics.UpsertMessageType(pmes))
	ms, err := dht.messageSenderForPeer(ctx, p)
	if err != nil {
    
    
		return nil, err
	}

	start := time.Now()

	rpmes, err := ms.SendRequest(ctx, pmes)
	if err != nil {
    
    
		return nil, err
	}

	dht.peerstore.RecordLatency(p, time.Since(start))
	return rpmes, nil
}

func (dht *IpfsDHT) sendMessage(ctx context.Context, p peer.ID, pmes *pb.Message) error {
    
    
	ctx, _ = tag.New(ctx, metrics.UpsertMessageType(pmes))

	ms, err := dht.messageSenderForPeer(ctx, p)
	if err != nil {
    
    
		return err
	}

	if err := ms.SendMessage(ctx, pmes); err != nil {
    
    
		return err
	}
	return nil
}


func (dht *IpfsDHT) messageSenderForPeer(ctx context.Context, p peer.ID) (*messageSender, error) {
    
    
	dht.smlk.Lock()
	ms, ok := dht.strmap[p]
	if ok {
    
    
		dht.smlk.Unlock()
		return ms, nil
	}
	ms = &messageSender{
    
    p: p, dht: dht, lk: newCtxMutex()}
	dht.strmap[p] = ms
	dht.smlk.Unlock()

	if err := ms.prepOrInvalidate(ctx); err != nil {
    
    
		dht.smlk.Lock()
		defer dht.smlk.Unlock()

		if msCur, ok := dht.strmap[p]; ok {
    
    
			// Changed. Use the new one, old one is invalid and
			// not in the map so we can just throw it away.
			if ms != msCur {
    
    
				return msCur, nil
			}
			// Not changed, remove the now invalid stream from the
			// map.
			delete(dht.strmap, p)
		}
		// Invalid but not in map. Must have been removed by a disconnect.
		return nil, err
	}
	// All ready to go.
	return ms, nil
}

dht_net.go

const streamReuseTries = 3

func (ms *messageSender) SendMessage(ctx context.Context, pmes *pb.Message) error {
    
    
	if err := ms.lk.Lock(ctx); err != nil {
    
    
		return err
	}
	defer ms.lk.Unlock()

	retry := false
	for {
    
    
		if err := ms.prep(ctx); err != nil {
    
    
			return err
		}

		if err := ms.writeMsg(pmes); err != nil {
    
    
			_ = ms.s.Reset()
			ms.s = nil

			if retry {
    
    
				logger.Debugw("error writing message", "error", err)
				return err
			}
			logger.Debugw("error writing message", "error", err, "retrying", true)
			retry = true
			continue
		}

		var err error
		if ms.singleMes > streamReuseTries {
    
    
			err = ms.s.Close()
			ms.s = nil
		} else if retry {
    
    
			ms.singleMes++
		}

		return err
	}
}

func (ms *messageSender) SendRequest(ctx context.Context, pmes *pb.Message) (*pb.Message, error) {
    
    
	if err := ms.lk.Lock(ctx); err != nil {
    
    
		return nil, err
	}
	defer ms.lk.Unlock()

	retry := false
	for {
    
    
		if err := ms.prep(ctx); err != nil {
    
    
			return nil, err
		}

		if err := ms.writeMsg(pmes); err != nil {
    
    
			_ = ms.s.Reset()
			ms.s = nil

			if retry {
    
    
				logger.Debugw("error writing message", "error", err)
				return nil, err
			}
			logger.Debugw("error writing message", "error", err, "retrying", true)
			retry = true
			continue
		}

		mes := new(pb.Message)
		if err := ms.ctxReadMsg(ctx, mes); err != nil {
    
    
			_ = ms.s.Reset()
			ms.s = nil

			if retry {
    
    
				logger.Debugw("error reading message", "error", err)
				return nil, err
			}
			logger.Debugw("error reading message", "error", err, "retrying", true)
			retry = true
			continue
		}

		var err error
		if ms.singleMes > streamReuseTries {
    
    
			err = ms.s.Close()
			ms.s = nil
		} else if retry {
    
    
			ms.singleMes++
		}

		return mes, err
	}
}

Protobuf消息解码处理(服务端)

入口,只有 ModeAutoServer, ModeServer模式才会启用

func New(ctx context.Context, h host.Host, options ...Option) (*IpfsDHT, error) {
    
    
...
    dht.auto = cfg.mode
	switch cfg.mode {
    
    
	case ModeAuto, ModeClient:
		dht.mode = modeClient
	case ModeAutoServer, ModeServer:
		dht.mode = modeServer
	default:
		return nil, fmt.Errorf("invalid dht mode %d", cfg.mode)
	}

	if dht.mode == modeServer {
    
    
		if err := dht.moveToServerMode(); err != nil {
    
    
			return nil, err
		}
	}
...
}

最终调用SetStreamHandler处理

func (dht *IpfsDHT) moveToServerMode() error {
    
    
	dht.mode = modeServer
	for _, p := range dht.serverProtocols {
    
    
		dht.host.SetStreamHandler(p, dht.handleNewStream)
	}
	return nil
}

// handleNewStream implements the network.StreamHandler
func (dht *IpfsDHT) handleNewStream(s network.Stream) {
    
    
	if dht.handleNewMessage(s) {
    
    
		// If we exited without error, close gracefully.
		_ = s.Close()
	} else {
    
    
		// otherwise, send an error.
		_ = s.Reset()
	}
}

处理消息逻辑。这里会将远程peer加入路由表。

// Returns true on orderly completion of writes (so we can Close the stream).
func (dht *IpfsDHT) handleNewMessage(s network.Stream) bool {
    
    
	ctx := dht.ctx
	//先使用stream构造reader
	r := msgio.NewVarintReaderSize(s, network.MessageSizeMax)

	mPeer := s.Conn().RemotePeer()

    ...

	for {
    
    
		if dht.getMode() != modeServer {
    
    
			...
		}

		var req pb.Message
		msgbytes, err := r.ReadMsg()
		msgLen := len(msgbytes)
		if err != nil {
    
    
			...
		}
		//读取消息再Unmarshal成结构体
		err = req.Unmarshal(msgbytes)
		r.ReleaseMsg(msgbytes)
		if err != nil {
    
    
			...
		}

		timer.Reset(dhtStreamIdleTimeout)

		startTime := time.Now()
		...
        //获取到对应消息处理函数
		handler := dht.handlerForMsgType(req.GetType())
		if handler == nil {
    
    
			...
		}

		//将这个连接进来的peer先加入到路由表
		dht.peerFound(dht.ctx, mPeer, true)
    
		resp, err := handler(ctx, mPeer, &req)

		if resp == nil {
    
    
			continue
		}

		//将handler执行结果写回
		err = writeMsg(s, resp)
		if err != nil {
    
    
		    ...
		}

		elapsedTime := time.Since(startTime)

		...
	}
}


func (dht *IpfsDHT) handlerForMsgType(t pb.Message_MessageType) dhtHandler {
    
    
	switch t {
    
    
	case pb.Message_FIND_NODE:
		return dht.handleFindPeer
	case pb.Message_PING:
		return dht.handlePing
	}

	if dht.enableValues {
    
    
		switch t {
    
    
		case pb.Message_GET_VALUE:
			return dht.handleGetValue
		case pb.Message_PUT_VALUE:
			return dht.handlePutValue
		}
	}

	if dht.enableProviders {
    
    
		switch t {
    
    
		case pb.Message_ADD_PROVIDER:
			return dht.handleAddProvider
		case pb.Message_GET_PROVIDERS:
			return dht.handleGetProviders
		}
	}

	return nil
}

handleFindPeer

func (dht *IpfsDHT) handleFindPeer(ctx context.Context, from peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) {
    
    
	resp := pb.NewMessage(pmes.GetType(), nil, pmes.GetClusterLevel())
	var closest []peer.ID

	if len(pmes.GetKey()) == 0 {
    
    
		return nil, fmt.Errorf("handleFindPeer with empty key")
	}

	// 如果client查找的目标节点正好是服务端自己,则直接返回
	targetPid := peer.ID(pmes.GetKey())
	if targetPid == dht.self {
    
    
		closest = []peer.ID{
    
    dht.self}
	} else {
    
    
	    //从路由表中获取最近的20个peer
		closest = dht.betterPeersToQuery(pmes, from, dht.bucketSize)

		// 目标节点不是client自己
		if targetPid != from {
    
    
			//在离当前节点最近的20个peer中查找目标peer,如果目peer存在则直接将closest返回,否则将目标peer加入到closest再返回(name这时closest会多出一个目标peer)
			found := false
			for _, p := range closest {
    
    
				if targetPid == p {
    
    
					found = true
					break
				}
			}
			if !found {
    
    
				closest = append(closest, targetPid)
			}
		}
	}

	if closest == nil {
    
    
		return resp, nil
	}

	// 获取closest地址
	closestinfos := pstore.PeerInfos(dht.peerstore, closest)
	withAddresses := make([]peer.AddrInfo, 0, len(closestinfos))
	for _, pi := range closestinfos {
    
    
		if len(pi.Addrs) > 0 {
    
    
			withAddresses = append(withAddresses, pi)
		}
	}
    //将这些peer的返回,附带连接状态ConnectionType,这里传入了Swarm,在server上可以知道这些peer有没有连接上
	resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses)
	return resp, nil
}

handlePing

这里什么也没做,只是在debug日志打印了一句话,将pb.Message原路返回。能连接到服务端,并支持dht协议,能正常收发消息,说明PING成功。

func (dht *IpfsDHT) handlePing(_ context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
    
    
	logger.Debugf("%s Responding to ping from %s!\n", dht.self, p)
	return pmes, nil
}

handleGetValue

func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, err error) {
    
    
	...

	//构造新的pb
	resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
    //在dht.datastore离查询k是否存在
	rec, err := dht.checkLocalDatastore(k)
	if err != nil {
    
    
		return nil, err
	}
	//rec可能为nil,当前节点并没有这条记录
	resp.Record = rec

	// 在路由表中找到最近的20个peer
	closer := dht.betterPeersToQuery(pmes, p, dht.bucketSize)
	if len(closer) > 0 {
    
    
        
		closerinfos := pstore.PeerInfos(dht.peerstore, closer)
		for _, pi := range closerinfos {
    
    
			....
		}
        // 将最近的节点返回
		resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos)
	}

	return resp, nil
}



func (dht *IpfsDHT) checkLocalDatastore(k []byte) (*recpb.Record, error) {
    
    

	dskey := convertToDsKey(k)
	buf, err := dht.datastore.Get(dskey)

	if err == ds.ErrNotFound {
    
    
		return nil, nil
	}

	if err != nil {
    
    
		return nil, err
	}

	rec := new(recpb.Record)
	err = proto.Unmarshal(buf, rec)
	
	if err != nil {
    
    
		return nil, err
	}

	var recordIsBad bool
	recvtime, err := u.ParseRFC3339(rec.GetTimeReceived())
	if err != nil {
    
    
		recordIsBad = true
	}

    //maxRecordAge默认为36个小时
	if time.Since(recvtime) > dht.maxRecordAge {
    
    
		recordIsBad = true
	}
	// 这里只检查了时间戳,不会在此处验证记录
	// 验证记录的责任放在了client上,因为验证记录可能在计算上很昂贵
	if recordIsBad {
    
    
	    // 删掉坏记录(时间戳不符合RFC3339或记录存放超过36小时)
		err := dht.datastore.Delete(dskey)
		if err != nil {
    
    
		}
		return nil, nil 
	}
	return rec, nil
}

handlePutValue

在peer的本地存储中存储一个值

func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, err error) {
    
    
	...

	rec := pmes.GetRecord()
	if rec == nil {
    
    
		return nil, errors.New("nil record")
	}

	if !bytes.Equal(pmes.GetKey(), rec.GetKey()) {
    
    
		return nil, errors.New("put key doesn't match record key")
	}

	cleanRecord(rec)

	//确保记录有效(未过期,有效的签名等)
	if err = dht.Validator.Validate(string(rec.GetKey()), rec.GetValue()); err != nil {
    
    
		return nil, err
	}

	dskey := convertToDsKey(rec.GetKey())

	// 这个锁(数组长度256)用来锁什么?
	var indexForLock byte
	if len(rec.GetKey()) == 0 {
    
    
		indexForLock = 0
	} else {
    
    
		indexForLock = rec.GetKey()[len(rec.GetKey())-1]
	}
	lk := &dht.stripedPutLocks[indexForLock]
	lk.Lock()
	defer lk.Unlock()

	// 确保新记录比本地记录“更好”。
	// 这样可以防止具有较低序号的记录覆盖具有较高序号的记录。
	existing, err := dht.getRecordFromDatastore(dskey)
	if err != nil {
    
    
		return nil, err
	}

	if existing != nil {
    
    
		recs := [][]byte{
    
    rec.GetValue(), existing.GetValue()}
		// 这里不大明白?
		i, err := dht.Validator.Select(string(rec.GetKey()), recs)
		if err != nil {
    
    
			return nil, err
		}
		if i != 0 {
    
    
			return nil, errors.New("old record")
		}
	}

	// 记录我们收到每条记录的时间
	rec.TimeReceived = u.FormatRFC3339(time.Now())

    // 记录Marshal成结构体
	data, err := proto.Marshal(rec)
	if err != nil {
    
    
		return nil, err
	}
    //数据存入datastore
	err = dht.datastore.Put(dskey, data)
	return pmes, err
}

// 清理记录 ,只是将TimeReceived置空
func cleanRecord(rec *recpb.Record) {
    
    
	rec.TimeReceived = ""
}

func (dht *IpfsDHT) getRecordFromDatastore(dskey ds.Key) (*recpb.Record, error) {
    
    
	buf, err := dht.datastore.Get(dskey)
	if err == ds.ErrNotFound {
    
    
		return nil, nil
	}
	if err != nil {
    
    
		return nil, err
	}
	rec := new(recpb.Record)
	err = proto.Unmarshal(buf, rec)
	if err != nil {
    
    
		return nil, nil
	}

	err = dht.Validator.Validate(string(rec.GetKey()), rec.GetValue())
	if err != nil {
    
    
		//Invalid记录,可能过期了但不返回错误,我们只是重写它
		return nil, nil
	}

	return rec, nil
}

handleAddProvider

func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) {
    
    
	key := pmes.GetKey()
	...

	//从pb消息中取出ProviderPeers,通常里面只有一个peer,它的值和p一样
	pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
	for _, pi := range pinfos {
    
    
		if pi.ID != p {
    
    
			// 忽略这条provider record!不是来自发起人。
			// p必须在GetProviderPeers里
			continue
		}
		//p 在GetProviderPeers里才会执行下面的逻辑
		if len(pi.Addrs) < 1 {
    
    
			continue
		}
		//将peer地址更新peerstore 
		if pi.ID != dht.self {
    
     // 自己不添加到
			dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peerstore.ProviderAddrTTL)
		}
		//保存这条记录到本地,一个key可以对应多个peer
		dht.ProviderManager.AddProvider(ctx, key, p)
	}
	return nil, nil
}

handleGetProviders


func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (_ *pb.Message, _err error) {
    
    
	key := pmes.GetKey()
	//根据pmes构造一条新消息,主要为了清空一些字段
	resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())

	// 从本地存储中获取指定key的providers
	providers := dht.ProviderManager.GetProviders(ctx, key)

	if len(providers) > 0 {
    
    
		infos := pstore.PeerInfos(dht.peerstore, providers)
		//将providers的地址和连接状态作为响应值返回
		resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
	}

	// 从路由表中获取p最近的20个peer作为响应值返回
	closer := dht.betterPeersToQuery(pmes, p, dht.bucketSize)
	if closer != nil {
    
    
		infos := pstore.PeerInfos(dht.peerstore, closer)
		resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
	}

	return resp, nil
}

猜你喜欢

转载自blog.csdn.net/kk3909/article/details/111028319