Esempio n. 1
0
// peerWantsBlock will check if we have the block in question,
// and then if we do, check the ledger for whether or not we should send it.
func (bs *BitSwap) peerWantsBlock(p *peer.Peer, want string) {
	u.DOut("peer [%s] wants block [%s]\n", p.ID.Pretty(), u.Key(want).Pretty())
	ledger := bs.getLedger(p)

	dsk := ds.NewKey(want)
	blk_i, err := bs.datastore.Get(dsk)
	if err != nil {
		if err == ds.ErrNotFound {
			ledger.Wants(u.Key(want))
		}
		u.PErr("datastore get error: %v\n", err)
		return
	}

	blk, ok := blk_i.([]byte)
	if !ok {
		u.PErr("data conversion error.\n")
		return
	}

	if ledger.ShouldSend() {
		u.DOut("Sending block to peer.\n")
		bblk, err := blocks.NewBlock(blk)
		if err != nil {
			u.PErr("newBlock error: %v\n", err)
			return
		}
		bs.SendBlock(p, bblk)
		ledger.SentBytes(len(blk))
	} else {
		u.DOut("Decided not to send block.")
	}
}
Esempio n. 2
0
func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *PBDHTMessage) {
	resp := Message{
		Type:     pmes.GetType(),
		ID:       pmes.GetId(),
		Response: true,
	}
	defer func() {
		mes := swarm.NewMessage(p, resp.ToProtobuf())
		dht.netChan.Outgoing <- mes
	}()
	level := pmes.GetValue()[0]
	u.DOut("handleFindPeer: searching for '%s'\n", peer.ID(pmes.GetKey()).Pretty())
	closest := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
	if closest == nil {
		u.PErr("handleFindPeer: could not find anything.\n")
		return
	}

	if len(closest.Addresses) == 0 {
		u.PErr("handleFindPeer: no addresses for connected peer...\n")
		return
	}

	// If the found peer further away than this peer...
	if kb.Closer(dht.self.ID, closest.ID, u.Key(pmes.GetKey())) {
		return
	}

	u.DOut("handleFindPeer: sending back '%s'\n", closest.ID.Pretty())
	resp.Peers = []*peer.Peer{closest}
	resp.Success = true
}
Esempio n. 3
0
func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *PBDHTMessage) {
	u.DOut("handleGetValue for key: %s\n", pmes.GetKey())
	dskey := ds.NewKey(pmes.GetKey())
	resp := &Message{
		Response: true,
		ID:       pmes.GetId(),
		Key:      pmes.GetKey(),
	}
	iVal, err := dht.datastore.Get(dskey)
	if err == nil {
		u.DOut("handleGetValue success!\n")
		resp.Success = true
		resp.Value = iVal.([]byte)
	} else if err == ds.ErrNotFound {
		// Check if we know any providers for the requested value
		provs := dht.providers.GetProviders(u.Key(pmes.GetKey()))
		if len(provs) > 0 {
			u.DOut("handleGetValue returning %d provider[s]\n", len(provs))
			resp.Peers = provs
			resp.Success = true
		} else {
			// No providers?
			// Find closest peer on given cluster to desired key and reply with that info

			level := 0
			if len(pmes.GetValue()) < 1 {
				// TODO: maybe return an error? Defaulting isnt a good idea IMO
				u.PErr("handleGetValue: no routing level specified, assuming 0\n")
			} else {
				level = int(pmes.GetValue()[0]) // Using value field to specify cluster level
			}
			u.DOut("handleGetValue searching level %d clusters\n", level)

			closer := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))

			if closer.ID.Equal(dht.self.ID) {
				u.DOut("Attempted to return self! this shouldnt happen...\n")
				resp.Peers = nil
				goto out
			}
			// If this peer is closer than the one from the table, return nil
			if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) {
				resp.Peers = nil
				u.DOut("handleGetValue could not find a closer node than myself.\n")
			} else {
				u.DOut("handleGetValue returning a closer peer: '%s'\n", closer.ID.Pretty())
				resp.Peers = []*peer.Peer{closer}
			}
		}
	} else {
		//temp: what other errors can a datastore return?
		panic(err)
	}

out:
	mes := swarm.NewMessage(p, resp.ToProtobuf())
	dht.netChan.Outgoing <- mes
}
Esempio n. 4
0
func refCmd(c *commander.Command, inp []string) error {
	if len(inp) < 1 {
		u.POut(c.Long)
		return nil
	}

	n, err := localNode(false)
	if err != nil {
		return err
	}

	recursive := c.Flag.Lookup("r").Value.Get().(bool)
	unique := c.Flag.Lookup("u").Value.Get().(bool)
	refsSeen := map[u.Key]bool{}

	printRef := func(h mh.Multihash) {
		if unique {
			_, found := refsSeen[u.Key(h)]
			if found {
				return
			}
			refsSeen[u.Key(h)] = true
		}

		u.POut("%s\n", h.B58String())
	}

	var printRefs func(nd *mdag.Node, recursive bool)
	printRefs = func(nd *mdag.Node, recursive bool) {

		for _, link := range nd.Links {
			printRef(link.Hash)

			if recursive {
				nd, err := n.DAG.Get(u.Key(link.Hash))
				if err != nil {
					u.PErr("error: cannot retrieve %s (%s)\n", link.Hash.B58String(), err)
					return
				}

				printRefs(nd, recursive)
			}
		}
	}

	for _, fn := range inp {
		nd, err := n.Resolver.ResolvePath(fn)
		if err != nil {
			return err
		}

		printRefs(nd, recursive)
	}
	return nil
}
Esempio n. 5
0
// Removes a given peer from the swarm and closes connections to it
func (s *Swarm) Drop(p *peer.Peer) error {
	u.DOut("Dropping peer: [%s]\n", p.ID.Pretty())
	s.connsLock.RLock()
	conn, found := s.conns[u.Key(p.ID)]
	s.connsLock.RUnlock()
	if !found {
		return u.ErrNotFound
	}

	s.connsLock.Lock()
	delete(s.conns, u.Key(p.ID))
	s.connsLock.Unlock()

	return conn.Close()
}
Esempio n. 6
0
func (dr *DagReader) precalcNextBuf() error {
	if dr.position >= len(dr.node.Links) {
		return io.EOF
	}
	nxtLink := dr.node.Links[dr.position]
	nxt := nxtLink.Node
	if nxt == nil {
		nxtNode, err := dr.serv.Get(u.Key(nxtLink.Hash))
		if err != nil {
			return err
		}
		nxt = nxtNode
	}
	pb := new(PBData)
	err := proto.Unmarshal(nxt.Data, pb)
	if err != nil {
		return err
	}
	dr.position++

	switch pb.GetType() {
	case PBData_Directory:
		panic("Why is there a directory under a file?")
	case PBData_File:
		//TODO: this *should* work, needs testing first
		//return NewDagReader(nxt, dr.serv)
		panic("Not yet handling different layers of indirection!")
	case PBData_Raw:
		dr.buf = bytes.NewBuffer(pb.GetData())
		return nil
	default:
		panic("Unrecognized node type!")
	}
}
Esempio n. 7
0
// ResolvePath fetches the node for given path. It uses the first
// path component as a hash (key) of the first node, then resolves
// all other components walking the links, with ResolveLinks.
func (s *Resolver) ResolvePath(fpath string) (*merkledag.Node, error) {
	u.DOut("Resolve: '%s'\n", fpath)
	fpath = path.Clean(fpath)

	parts := strings.Split(fpath, "/")

	// skip over empty first elem
	if len(parts[0]) == 0 {
		parts = parts[1:]
	}

	// if nothing, bail.
	if len(parts) == 0 {
		return nil, fmt.Errorf("ipfs path must contain at least one component")
	}

	// first element in the path is a b58 hash (for now)
	h, err := mh.FromB58String(parts[0])
	if err != nil {
		return nil, err
	}

	u.DOut("Resolve dag get.\n")
	nd, err := s.DAG.Get(u.Key(h))
	if err != nil {
		return nil, err
	}

	return s.ResolveLinks(nd, parts[1:])
}
Esempio n. 8
0
// ResolveLinks iteratively resolves names by walking the link hierarchy.
// Every node is fetched from the DAGService, resolving the next name.
// Returns the last node found.
//
// ResolveLinks(nd, []string{"foo", "bar", "baz"})
// would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links
func (s *Resolver) ResolveLinks(ndd *merkledag.Node, names []string) (
	nd *merkledag.Node, err error) {

	nd = ndd // dup arg workaround

	// for each of the path components
	for _, name := range names {

		var next u.Key
		// for each of the links in nd, the current object
		for _, link := range nd.Links {
			if link.Name == name {
				next = u.Key(link.Hash)
				break
			}
		}

		if next == "" {
			h1, _ := nd.Multihash()
			h2 := h1.B58String()
			return nil, fmt.Errorf("no link named %q under %s", name, h2)
		}

		// fetch object for link and assign to nd
		nd, err = s.DAG.Get(next)
		if err != nil {
			return nd, err
		}
	}
	return
}
Esempio n. 9
0
func Put(n *core.IpfsNode, cmdparts []string) (string, error) {
	if len(cmdparts) < 4 {
		return fmt.Sprintln("put: '# put key val'"), ErrArgCount
	}
	msg := fmt.Sprintf("putting value: '%s' for key '%s'\n", cmdparts[3], cmdparts[2])
	ctx, _ := context.WithDeadline(context.TODO(), time.Now().Add(time.Second*5))
	return msg, n.Routing.PutValue(ctx, u.Key(cmdparts[2]), []byte(cmdparts[3]))
}
Esempio n. 10
0
func TestProvides(t *testing.T) {
	u.Debug = false

	addrs, _, dhts := setupDHTS(4, t)

	_, err := dhts[0].Connect(addrs[1])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(addrs[2])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(addrs[3])
	if err != nil {
		t.Fatal(err)
	}

	err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
	if err != nil {
		t.Fatal(err)
	}

	err = dhts[3].Provide(u.Key("hello"))
	if err != nil {
		t.Fatal(err)
	}

	time.Sleep(time.Millisecond * 60)

	provs, err := dhts[0].FindProviders(u.Key("hello"), time.Second)
	if err != nil {
		t.Fatal(err)
	}

	if len(provs) != 1 {
		t.Fatal("Didnt get back providers")
	}

	for i := 0; i < 4; i++ {
		dhts[i].Halt()
	}
}
Esempio n. 11
0
// If less than K nodes are in the entire network, it should fail when we make
// a GET rpc and nobody has the value
func TestLessThanKResponses(t *testing.T) {
	u.Debug = false
	fn := newFauxNet()
	fn.Listen()

	local := new(peer.Peer)
	local.ID = peer.ID("test_peer")

	d := NewDHT(local, fn, ds.NewMapDatastore())
	d.Start()

	var ps []*peer.Peer
	for i := 0; i < 5; i++ {
		ps = append(ps, _randPeer())
		d.Update(ps[i])
	}
	other := _randPeer()

	// Reply with random peers to every message
	fn.AddHandler(func(mes *swarm.Message) *swarm.Message {
		pmes := new(PBDHTMessage)
		err := proto.Unmarshal(mes.Data, pmes)
		if err != nil {
			t.Fatal(err)
		}

		switch pmes.GetType() {
		case PBDHTMessage_GET_VALUE:
			resp := Message{
				Type:     pmes.GetType(),
				ID:       pmes.GetId(),
				Response: true,
				Success:  false,
				Peers:    []*peer.Peer{other},
			}

			return swarm.NewMessage(mes.Peer, resp.ToProtobuf())
		default:
			panic("Shouldnt recieve this.")
		}

	})

	_, err := d.GetValue(u.Key("hello"), time.Second*30)
	if err != nil {
		switch err {
		case u.ErrNotFound:
			//Success!
			return
		case u.ErrTimeout:
			t.Fatal("Should not have gotten timeout!")
		default:
			t.Fatalf("Got unexpected error: %s", err)
		}
	}
	t.Fatal("Expected to recieve an error.")
}
Esempio n. 12
0
func TestAppendWanted(t *testing.T) {
	const str = "foo"
	m := newMessage()
	m.AppendWanted(u.Key(str))

	if !contains(m.ToProto().GetWantlist(), str) {
		t.Fail()
	}
}
Esempio n. 13
0
func TestCopyProtoByValue(t *testing.T) {
	const str = "foo"
	m := newMessage()
	protoBeforeAppend := m.ToProto()
	m.AppendWanted(u.Key(str))
	if contains(protoBeforeAppend.GetWantlist(), str) {
		t.Fail()
	}
}
Esempio n. 14
0
func TestLayeredGet(t *testing.T) {
	u.Debug = false
	addrs, _, dhts := setupDHTS(4, t)

	_, err := dhts[0].Connect(addrs[1])
	if err != nil {
		t.Fatalf("Failed to connect: %s", err)
	}

	_, err = dhts[1].Connect(addrs[2])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(addrs[3])
	if err != nil {
		t.Fatal(err)
	}

	err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
	if err != nil {
		t.Fatal(err)
	}

	err = dhts[3].Provide(u.Key("hello"))
	if err != nil {
		t.Fatal(err)
	}

	time.Sleep(time.Millisecond * 60)

	val, err := dhts[0].GetValue(u.Key("hello"), time.Second)
	if err != nil {
		t.Fatal(err)
	}

	if string(val) != "world" {
		t.Fatal("Got incorrect value.")
	}

	for i := 0; i < 4; i++ {
		dhts[i].Halt()
	}
}
Esempio n. 15
0
func (dht *IpfsDHT) loadProvidableKeys() error {
	kl, err := dht.datastore.KeyList()
	if err != nil {
		return err
	}
	for _, k := range kl {
		dht.providers.AddProvider(u.Key(k.Bytes()), dht.self)
	}
	return nil
}
Esempio n. 16
0
func Get(n *core.IpfsNode, cmdparts []string) (string, error) {
	if len(cmdparts) < 3 {
		return fmt.Sprintln("get: '# get key'"), ErrArgCount
	}
	ctx, _ := context.WithDeadline(context.TODO(), time.Now().Add(time.Second*5))
	val, err := n.Routing.GetValue(ctx, u.Key(cmdparts[2]))
	if err != nil {
		return "", err
	}
	return fmt.Sprintf("Got value: '%s'\n", string(val)), nil
}
Esempio n. 17
0
func Store(n *core.IpfsNode, cmdparts []string) (string, error) {
	if len(cmdparts) < 4 {
		return fmt.Sprintln("store: '# store key val'"), ErrArgCount
	}
	err := n.Datastore.Put(u.Key(cmdparts[2]).DsKey(), []byte(cmdparts[3]))
	if err != nil {
		return "", err
	}

	return "", nil
}
Esempio n. 18
0
func Provide(n *core.IpfsNode, cmdparts []string) (string, error) {
	if len(cmdparts) < 3 {
		return fmt.Sprintln("provide: '# provide key'"), ErrArgCount
	}
	ctx, _ := context.WithDeadline(context.TODO(), time.Now().Add(time.Second*5))
	err := n.Routing.Provide(ctx, u.Key(cmdparts[2]))
	if err != nil {
		return "", err
	}
	return "", nil
}
Esempio n. 19
0
func TestProviderManager(t *testing.T) {
	mid := peer.ID("testing")
	p := NewProviderManager(mid)
	a := u.Key("test")
	p.AddProvider(a, &peer.Peer{})
	resp := p.GetProviders(a)
	if len(resp) != 1 {
		t.Fatal("Could not retrieve provider.")
	}
	p.Halt()
}
Esempio n. 20
0
func TestBlocks(t *testing.T) {
	d := ds.NewMapDatastore()
	bs, err := NewBlockService(d, nil)
	if err != nil {
		t.Error("failed to construct block service", err)
		return
	}

	b, err := blocks.NewBlock([]byte("beep boop"))
	if err != nil {
		t.Error("failed to construct block", err)
		return
	}

	h, err := u.Hash([]byte("beep boop"))
	if err != nil {
		t.Error("failed to hash data", err)
		return
	}

	if !bytes.Equal(b.Multihash, h) {
		t.Error("Block Multihash and data multihash not equal")
	}

	if b.Key() != u.Key(h) {
		t.Error("Block key and data multihash key not equal")
	}

	k, err := bs.AddBlock(b)
	if err != nil {
		t.Error("failed to add block to BlockService", err)
		return
	}

	if k != b.Key() {
		t.Error("returned key is not equal to block key", err)
	}

	b2, err := bs.GetBlock(b.Key())
	if err != nil {
		t.Error("failed to retrieve block from BlockService", err)
		return
	}

	if b.Key() != b2.Key() {
		t.Error("Block keys not equal.")
	}

	if !bytes.Equal(b.Data, b2.Data) {
		t.Error("Block data is not equal.")
	}
}
Esempio n. 21
0
func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *PBDHTMessage) {
	resp := Message{
		Type:     PBDHTMessage_GET_PROVIDERS,
		Key:      pmes.GetKey(),
		ID:       pmes.GetId(),
		Response: true,
	}

	has, err := dht.datastore.Has(ds.NewKey(pmes.GetKey()))
	if err != nil {
		dht.netChan.Errors <- err
	}

	providers := dht.providers.GetProviders(u.Key(pmes.GetKey()))
	if has {
		providers = append(providers, dht.self)
	}
	if providers == nil || len(providers) == 0 {
		level := 0
		if len(pmes.GetValue()) > 0 {
			level = int(pmes.GetValue()[0])
		}

		closer := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey())))
		if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) {
			resp.Peers = nil
		} else {
			resp.Peers = []*peer.Peer{closer}
		}
	} else {
		resp.Peers = providers
		resp.Success = true
	}

	mes := swarm.NewMessage(p, resp.ToProtobuf())
	dht.netChan.Outgoing <- mes
}
Esempio n. 22
0
func TestNode(t *testing.T) {

	n1 := &Node{Data: []byte("beep")}
	n2 := &Node{Data: []byte("boop")}
	n3 := &Node{Data: []byte("beep boop")}
	if err := n3.AddNodeLink("beep-link", n1); err != nil {
		t.Error(err)
	}
	if err := n3.AddNodeLink("boop-link", n2); err != nil {
		t.Error(err)
	}

	printn := func(name string, n *Node) {
		fmt.Println(">", name)
		fmt.Println("data:", string(n.Data))

		fmt.Println("links:")
		for _, l := range n.Links {
			fmt.Println("-", l.Name, l.Size, l.Hash)
		}

		e, err := n.Encoded(false)
		if err != nil {
			t.Error(err)
		} else {
			fmt.Println("encoded:", e)
		}

		h, err := n.Multihash()
		if err != nil {
			t.Error(err)
		} else {
			fmt.Println("hash:", h)
		}

		k, err := n.Key()
		if err != nil {
			t.Error(err)
		} else if k != u.Key(h) {
			t.Error("Key is not equivalent to multihash")
		} else {
			fmt.Println("key: ", k)
		}
	}

	printn("beep", n1)
	printn("boop", n2)
	printn("beep boop", n3)
}
Esempio n. 23
0
func AssertGet(n *core.IpfsNode, key, exp string) bool {
	ctx, _ := context.WithDeadline(context.TODO(), time.Now().Add(time.Second*5))
	val, err := n.Routing.GetValue(ctx, u.Key(key))
	if err != nil {
		fmt.Printf("Get error: %s\n", err)
		return false
	}

	if string(val) != exp {
		fmt.Printf("expected '%s' but got '%s' instead.\n", exp, string(val))
		return false
	}

	if !logquiet {
		fmt.Println("Expectation Successful!")
	}
	return true
}
Esempio n. 24
0
func FindProv(n *core.IpfsNode, cmdparts []string) (string, error) {
	if len(cmdparts) < 3 {
		return fmt.Sprintln("findprov: '# findprov key [count]'"), ErrArgCount
	}
	count := 1
	var err error
	if len(cmdparts) >= 4 {
		count, err = strconv.Atoi(cmdparts[3])
		if err != nil {
			return "", err
		}
	}
	ctx, _ := context.WithDeadline(context.TODO(), time.Now().Add(time.Second*5))
	pchan := n.Routing.FindProvidersAsync(ctx, u.Key(cmdparts[2]), count)

	out := new(bytes.Buffer)
	fmt.Fprintf(out, "Providers of '%s'\n", cmdparts[2])
	for p := range pchan {
		fmt.Fprintf(out, "\t%s\n", p)
	}
	return out.String(), nil
}
Esempio n. 25
0
// TODO: Could be done async
func (dht *IpfsDHT) addPeerList(key u.Key, peers []*PBDHTMessage_PBPeer) []*peer.Peer {
	var provArr []*peer.Peer
	for _, prov := range peers {
		// Dont add outselves to the list
		if peer.ID(prov.GetId()).Equal(dht.self.ID) {
			continue
		}
		// Dont add someone who is already on the list
		p := dht.network.Find(u.Key(prov.GetId()))
		if p == nil {
			u.DOut("given provider %s was not in our network already.\n", peer.ID(prov.GetId()).Pretty())
			var err error
			p, err = dht.peerFromInfo(prov)
			if err != nil {
				u.PErr("error connecting to new peer: %s\n", err)
				continue
			}
		}
		dht.providers.AddProvider(key, p)
		provArr = append(provArr, p)
	}
	return provArr
}
Esempio n. 26
0
func TestGetFailures(t *testing.T) {
	fn := newFauxNet()
	fn.Listen()

	local := new(peer.Peer)
	local.ID = peer.ID("test_peer")

	d := NewDHT(local, fn, ds.NewMapDatastore())

	other := &peer.Peer{ID: peer.ID("other_peer")}

	d.Start()

	d.Update(other)

	// This one should time out
	_, err := d.GetValue(u.Key("test"), time.Millisecond*10)
	if err != nil {
		if err != u.ErrTimeout {
			t.Fatal("Got different error than we expected.")
		}
	} else {
		t.Fatal("Did not get expected error!")
	}

	// Reply with failures to every message
	fn.AddHandler(func(mes *swarm.Message) *swarm.Message {
		pmes := new(PBDHTMessage)
		err := proto.Unmarshal(mes.Data, pmes)
		if err != nil {
			t.Fatal(err)
		}

		resp := Message{
			Type:     pmes.GetType(),
			ID:       pmes.GetId(),
			Response: true,
			Success:  false,
		}
		return swarm.NewMessage(mes.Peer, resp.ToProtobuf())
	})

	// This one should fail with NotFound
	_, err = d.GetValue(u.Key("test"), time.Millisecond*1000)
	if err != nil {
		if err != u.ErrNotFound {
			t.Fatalf("Expected ErrNotFound, got: %s", err)
		}
	} else {
		t.Fatal("expected error, got none.")
	}

	success := make(chan struct{})
	fn.handlers = nil
	fn.AddHandler(func(mes *swarm.Message) *swarm.Message {
		resp := new(PBDHTMessage)
		err := proto.Unmarshal(mes.Data, resp)
		if err != nil {
			t.Fatal(err)
		}
		if resp.GetSuccess() {
			t.Fatal("Get returned success when it shouldnt have.")
		}
		success <- struct{}{}
		return nil
	})

	// Now we test this DHT's handleGetValue failure
	req := Message{
		Type:  PBDHTMessage_GET_VALUE,
		Key:   "hello",
		ID:    swarm.GenerateMessageID(),
		Value: []byte{0},
	}
	fn.Chan.Incoming <- swarm.NewMessage(other, req.ToProtobuf())

	<-success
}
Esempio n. 27
0
func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *PBDHTMessage) {
	key := u.Key(pmes.GetKey())
	u.DOut("[%s] Adding [%s] as a provider for '%s'\n", dht.self.ID.Pretty(), p.ID.Pretty(), peer.ID(key).Pretty())
	dht.providers.AddProvider(key, p)
}
Esempio n. 28
0
// Key returns the block's Multihash as a Key value.
func (b *Block) Key() u.Key {
	return u.Key(b.Multihash)
}
Esempio n. 29
0
// Key returns the Multihash as a key, for maps.
func (n *Node) Key() (u.Key, error) {
	h, err := n.Multihash()
	return u.Key(h), err
}
Esempio n. 30
0
// Key returns the ID as a Key (string) for maps.
func (p *Peer) Key() u.Key {
	return u.Key(p.ID)
}