Example #1
0
func (p *printer) MocknetLinks(mn Mocknet) {
	links := mn.Links()

	fmt.Fprintf(p.w, "Mocknet link map:\n")
	for p1, lm := range links {
		fmt.Fprintf(p.w, "\t%s linked to:\n", peer.ID(p1))
		for p2, l := range lm {
			fmt.Fprintf(p.w, "\t\t%s (%d links)\n", peer.ID(p2), len(l))
		}
	}
	fmt.Fprintf(p.w, "\n")
}
Example #2
0
func TestProviderManager(t *testing.T) {
	ctx := context.Background()
	mid := peer.ID("testing")
	p := NewProviderManager(ctx, mid)
	a := key.Key("test")
	p.AddProvider(ctx, a, peer.ID("testingprovider"))
	resp := p.GetProviders(ctx, a)
	if len(resp) != 1 {
		t.Fatal("Could not retrieve provider.")
	}
	p.proc.Close()
}
Example #3
0
func TestQueue(t *testing.T) {

	p1 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31") // these aren't valid, because need to hex-decode.
	p2 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32") // these aren't valid, because need to hex-decode.
	p3 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") // these aren't valid, because need to hex-decode.
	p4 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34") // these aren't valid, because need to hex-decode.
	p5 := peer.ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31") // these aren't valid, because need to hex-decode.
	// but they work.

	// these are the peer.IDs' XORKeySpace Key values:
	// [228 47 151 130 156 102 222 232 218 31 132 94 170 208 80 253 120 103 55 35 91 237 48 157 81 245 57 247 66 150 9 40]
	// [26 249 85 75 54 49 25 30 21 86 117 62 85 145 48 175 155 194 210 216 58 14 241 143 28 209 129 144 122 28 163 6]
	// [78 135 26 216 178 181 224 181 234 117 2 248 152 115 255 103 244 34 4 152 193 88 9 225 8 127 216 158 226 8 236 246]
	// [125 135 124 6 226 160 101 94 192 57 39 12 18 79 121 140 190 154 147 55 44 83 101 151 63 255 94 179 51 203 241 51]

	pq := NewXORDistancePQ(key.Key("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31"))
	pq.Enqueue(p3)
	pq.Enqueue(p1)
	pq.Enqueue(p2)
	pq.Enqueue(p4)
	pq.Enqueue(p5)
	pq.Enqueue(p1)

	// should come out as: p1, p4, p3, p2

	if d := pq.Dequeue(); d != p1 && d != p5 {
		t.Error("ordering failed")
	}

	if d := pq.Dequeue(); d != p1 && d != p5 {
		t.Error("ordering failed")
	}

	if d := pq.Dequeue(); d != p1 && d != p5 {
		t.Error("ordering failed")
	}

	if pq.Dequeue() != p4 {
		t.Error("ordering failed")
	}

	if pq.Dequeue() != p3 {
		t.Error("ordering failed")
	}

	if pq.Dequeue() != p2 {
		t.Error("ordering failed")
	}

}
Example #4
0
func (dht *IpfsDHT) closerPeersSingle(ctx context.Context, key key.Key, p peer.ID) ([]peer.ID, error) {
	pmes, err := dht.findPeerSingle(ctx, p, peer.ID(key))
	if err != nil {
		return nil, err
	}

	var out []peer.ID
	for _, pbp := range pmes.GetCloserPeers() {
		pid := peer.ID(pbp.GetId())
		if pid != dht.self { // dont add self
			dht.peerstore.AddAddrs(pid, pbp.Addresses(), peer.TempAddrTTL)
			out = append(out, pid)
		}
	}
	return out, nil
}
Example #5
0
func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
	defer log.EventBegin(ctx, "handleFindPeer", p).Done()
	resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel())
	var closest []peer.ID

	// if looking for self... special case where we send it on CloserPeers.
	if peer.ID(pmes.GetKey()) == dht.self {
		closest = []peer.ID{dht.self}
	} else {
		closest = dht.betterPeersToQuery(pmes, p, CloserPeerCount)
	}

	if closest == nil {
		log.Infof("%s handleFindPeer %s: could not find anything.", dht.self, p)
		return resp, nil
	}

	var withAddresses []peer.PeerInfo
	closestinfos := peer.PeerInfos(dht.peerstore, closest)
	for _, pi := range closestinfos {
		if len(pi.Addrs) > 0 {
			withAddresses = append(withAddresses, pi)
			log.Debugf("handleFindPeer: sending back '%s'", pi.ID)
		}
	}

	resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses)
	return resp, nil
}
Example #6
0
func ReadHeader(r io.Reader) (src, dst peer.ID, err error) {

	mhr := mh.NewReader(r)

	s, err := mhr.ReadMultihash()
	if err != nil {
		return "", "", err
	}

	d, err := mhr.ReadMultihash()
	if err != nil {
		return "", "", err
	}

	return peer.ID(s), peer.ID(d), nil
}
Example #7
0
// RandPeerID generates random "valid" peer IDs. it does not NEED to generate
// keys because it is as if we lost the key right away. fine to read randomness
// and hash it. to generate proper keys and corresponding PeerID, use:
//  sk, pk, _ := testutil.RandKeyPair()
//  id, _ := peer.IDFromPublicKey(pk)
func RandPeerID() (peer.ID, error) {
	buf := make([]byte, 16)
	if _, err := io.ReadFull(u.NewTimeSeededRand(), buf); err != nil {
		return "", err
	}
	h := u.Hash(buf)
	return peer.ID(h), nil
}
Example #8
0
func newEngine(ctx context.Context, idStr string) peerAndEngine {
	return peerAndEngine{
		Peer: peer.ID(idStr),
		//Strategy: New(true),
		Engine: NewEngine(ctx,
			blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore()))),
	}
}
Example #9
0
// ID returns the ID of a given Conn.
func ID(c Conn) string {
	l := fmt.Sprintf("%s/%s", c.LocalMultiaddr(), c.LocalPeer().Pretty())
	r := fmt.Sprintf("%s/%s", c.RemoteMultiaddr(), c.RemotePeer().Pretty())
	lh := u.Hash([]byte(l))
	rh := u.Hash([]byte(r))
	ch := u.XOR(lh, rh)
	return peer.ID(ch).Pretty()
}
Example #10
0
func GetGraphJson(dinfo []*DiagInfo) []byte {
	out := make(map[string]interface{})
	names := make(map[string]int)
	var nodes []*node
	for _, di := range dinfo {
		names[di.ID] = len(nodes)
		val := di.BwIn + di.BwOut + 10
		// include the routing table key, for proper routing table display
		rtk := peer.ID(rtable.ConvertPeerID(peer.ID(di.ID))).Pretty()
		nodes = append(nodes, &node{Name: di.ID, Value: val, RtKey: rtk})
	}

	var links []*link
	linkexists := make([][]bool, len(nodes))
	for i := range linkexists {
		linkexists[i] = make([]bool, len(nodes))
	}

	for _, di := range dinfo {
		myid := names[di.ID]
		for _, con := range di.Connections {
			thisid := names[con.ID]
			if !linkexists[thisid][myid] {
				links = append(links, &link{
					Source: myid,
					Target: thisid,
					Value:  3,
				})
				linkexists[myid][thisid] = true
			}
		}
	}

	out["nodes"] = nodes
	out["links"] = links

	b, err := json.Marshal(out)
	if err != nil {
		panic(err)
	}

	return b
}
Example #11
0
func storeProvidersToPeerstore(ps peer.Peerstore, p peer.ID, providers []*dhtpb.Message_Peer) {
	for _, provider := range providers {
		providerID := peer.ID(provider.GetId())
		if providerID != p {
			log.Errorf("provider message came from third-party %s", p)
			continue
		}
		for _, maddr := range provider.Addresses() {
			// as a router, we want to store addresses for peers who have provided
			ps.AddAddr(p, maddr, peer.AddressTTL)
		}
	}
}
Example #12
0
func GetPublicKey(r IpfsRouting, ctx context.Context, pkhash []byte) (ci.PubKey, error) {
	if dht, ok := r.(PubKeyFetcher); ok {
		// If we have a DHT as our routing system, use optimized fetcher
		return dht.GetPublicKey(ctx, peer.ID(pkhash))
	} else {
		key := key.Key("/pk/" + string(pkhash))
		pkval, err := r.GetValue(ctx, key)
		if err != nil {
			return nil, err
		}

		// get PublicKey from node.Data
		return ci.UnmarshalPublicKey(pkval)
	}
}
Example #13
0
func TestCallbacksWork(t *testing.T) {
	fake := new(FakeStream)

	var sent int64
	var recv int64

	sentCB := func(n int64, proto protocol.ID, p peer.ID) {
		sent += n
	}

	recvCB := func(n int64, proto protocol.ID, p peer.ID) {
		recv += n
	}

	ms := newMeteredStream(fake, protocol.ID("TEST"), peer.ID("PEER"), recvCB, sentCB)

	toWrite := int64(100000)
	toRead := int64(100000)

	fake.ReadBuf = io.LimitReader(u.NewTimeSeededRand(), toRead)
	writeData := io.LimitReader(u.NewTimeSeededRand(), toWrite)

	n, err := io.Copy(ms, writeData)
	if err != nil {
		t.Fatal(err)
	}

	if n != toWrite {
		t.Fatal("incorrect write amount")
	}

	if toWrite != sent {
		t.Fatal("incorrectly reported writes", toWrite, sent)
	}

	n, err = io.Copy(ioutil.Discard, ms)
	if err != nil {
		t.Fatal(err)
	}

	if n != toRead {
		t.Fatal("incorrect read amount")
	}

	if toRead != recv {
		t.Fatal("incorrectly reported reads")
	}
}
Example #14
0
func verify(ps peer.Peerstore, r *dhtpb.Record) error {
	v := make(record.Validator)
	v["pk"] = record.PublicKeyValidator
	p := peer.ID(r.GetAuthor())
	pk := ps.PubKey(p)
	if pk == nil {
		return fmt.Errorf("do not have public key for %s", p)
	}
	if err := record.CheckRecordSig(r, pk); err != nil {
		return err
	}
	if err := v.VerifyRecord(r); err != nil {
		return err
	}
	return nil
}
Example #15
0
func (n *IpfsNode) loadID() error {
	if n.Identity != "" {
		return errors.New("identity already loaded")
	}

	cid := n.Repo.Config().Identity.PeerID
	if cid == "" {
		return errors.New("Identity was not set in config (was ipfs init run?)")
	}
	if len(cid) == 0 {
		return errors.New("No peer ID in config! (was ipfs init run?)")
	}

	n.Identity = peer.ID(b58.Decode(cid))
	return nil
}
Example #16
0
// verifyRecordLocally attempts to verify a record. if we do not have the public
// key, we fail. we do not search the dht.
func (dht *IpfsDHT) verifyRecordLocally(r *pb.Record) error {

	if len(r.Signature) > 0 {
		// First, validate the signature
		p := peer.ID(r.GetAuthor())
		pk := dht.peerstore.PubKey(p)
		if pk == nil {
			return fmt.Errorf("do not have public key for %s", p)
		}

		if err := record.CheckRecordSig(r, pk); err != nil {
			return err
		}
	}

	return dht.Validator.VerifyRecord(r)
}
Example #17
0
// verifyRecordOnline verifies a record, searching the DHT for the public key
// if necessary. The reason there is a distinction in the functions is that
// retrieving arbitrary public keys from the DHT as a result of passively
// receiving records (e.g. through a PUT_VALUE or ADD_PROVIDER) can cause a
// massive amplification attack on the dht. Use with care.
func (dht *IpfsDHT) verifyRecordOnline(ctx context.Context, r *pb.Record) error {

	if len(r.Signature) > 0 {
		// get the public key, search for it if necessary.
		p := peer.ID(r.GetAuthor())
		pk, err := dht.GetPublicKey(ctx, p)
		if err != nil {
			return err
		}

		err = record.CheckRecordSig(r, pk)
		if err != nil {
			return err
		}
	}

	return dht.Validator.VerifyRecord(r)
}
Example #18
0
// Looks for race conditions in table operations. For a more 'certain'
// test, increase the loop counter from 1000 to a much higher number
// and set GOMAXPROCS above 1
func TestTableMultithreaded(t *testing.T) {
	local := peer.ID("localPeer")
	m := peer.NewMetrics()
	tab := NewRoutingTable(20, ConvertPeerID(local), time.Hour, m)
	var peers []peer.ID
	for i := 0; i < 500; i++ {
		peers = append(peers, tu.RandPeerIDFatal(t))
	}

	done := make(chan struct{})
	go func() {
		for i := 0; i < 1000; i++ {
			n := rand.Intn(len(peers))
			tab.Update(peers[n])
		}
		done <- struct{}{}
	}()

	go func() {
		for i := 0; i < 1000; i++ {
			n := rand.Intn(len(peers))
			tab.Update(peers[n])
		}
		done <- struct{}{}
	}()

	go func() {
		for i := 0; i < 1000; i++ {
			n := rand.Intn(len(peers))
			tab.Find(peers[n])
		}
		done <- struct{}{}
	}()
	<-done
	<-done
	<-done
}
Example #19
0
	Arguments: []cmds.Argument{
		cmds.StringArg("peerid", false, false, "peer.ID of node to look up").EnableStdin(),
	},
	Options: []cmds.Option{
		cmds.StringOption("format", "f", "optional output format"),
	},
	Run: func(req cmds.Request, res cmds.Response) {
		node, err := req.InvocContext().GetNode()
		if err != nil {
			res.SetError(err, cmds.ErrNormal)
			return
		}

		var id peer.ID
		if len(req.Arguments()) > 0 {
			id = peer.ID(b58.Decode(req.Arguments()[0]))
			if len(id) == 0 {
				res.SetError(cmds.ClientError("Invalid peer id"), cmds.ErrClient)
				return
			}
		} else {
			id = node.Identity
		}

		if id == node.Identity {
			output, err := printSelf(node)
			if err != nil {
				res.SetError(err, cmds.ErrNormal)
				return
			}
			res.SetOutput(output)
Example #20
0
func (dht *IpfsDHT) checkLocalDatastore(k key.Key) (*pb.Record, error) {
	log.Debugf("%s handleGetValue looking into ds", dht.self)
	dskey := k.DsKey()
	iVal, err := dht.datastore.Get(dskey)
	log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal)

	if err == ds.ErrNotFound {
		return nil, nil
	}

	// if we got an unexpected error, bail.
	if err != nil {
		return nil, err
	}

	// if we have the value, send it back
	log.Debugf("%s handleGetValue success!", dht.self)

	byts, ok := iVal.([]byte)
	if !ok {
		return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey)
	}

	rec := new(pb.Record)
	err = proto.Unmarshal(byts, rec)
	if err != nil {
		log.Debug("Failed to unmarshal dht record from datastore")
		return nil, err
	}

	// if its our record, dont bother checking the times on it
	if peer.ID(rec.GetAuthor()) == dht.self {
		return rec, nil
	}

	var recordIsBad bool
	recvtime, err := u.ParseRFC3339(rec.GetTimeReceived())
	if err != nil {
		log.Info("either no receive time set on record, or it was invalid: ", err)
		recordIsBad = true
	}

	if time.Now().Sub(recvtime) > MaxRecordAge {
		log.Debug("old record found, tossing.")
		recordIsBad = true
	}

	// NOTE: we do not verify the record here beyond checking these timestamps.
	// we put the burden of checking the records on the requester as checking a record
	// may be computationally expensive

	if recordIsBad {
		err := dht.datastore.Delete(dskey)
		if err != nil {
			log.Error("Failed to delete bad record from datastore: ", err)
		}

		return nil, nil // can treat this as not having the record at all
	}

	return rec, nil
}
Example #21
0
// runBootstrap builds up list of peers by requesting random peer IDs
func (dht *IpfsDHT) runBootstrap(ctx context.Context, cfg BootstrapConfig) error {
	bslog := func(msg string) {
		log.Debugf("DHT %s dhtRunBootstrap %s -- routing table size: %d", dht.self, msg, dht.routingTable.Size())
	}
	bslog("start")
	defer bslog("end")
	defer log.EventBegin(ctx, "dhtRunBootstrap").Done()

	var merr u.MultiErr

	randomID := func() peer.ID {
		// 16 random bytes is not a valid peer id. it may be fine becuase
		// the dht will rehash to its own keyspace anyway.
		id := make([]byte, 16)
		rand.Read(id)
		id = u.Hash(id)
		return peer.ID(id)
	}

	// bootstrap sequentially, as results will compound
	ctx, cancel := context.WithTimeout(ctx, cfg.Timeout)
	defer cancel()
	runQuery := func(ctx context.Context, id peer.ID) {
		p, err := dht.FindPeer(ctx, id)
		if err == routing.ErrNotFound {
			// this isn't an error. this is precisely what we expect.
		} else if err != nil {
			merr = append(merr, err)
		} else {
			// woah, actually found a peer with that ID? this shouldn't happen normally
			// (as the ID we use is not a real ID). this is an odd error worth logging.
			err := fmt.Errorf("Bootstrap peer error: Actually FOUND peer. (%s, %s)", id, p)
			log.Warningf("%s", err)
			merr = append(merr, err)
		}
	}

	sequential := true
	if sequential {
		// these should be parallel normally. but can make them sequential for debugging.
		// note that the core/bootstrap context deadline should be extended too for that.
		for i := 0; i < cfg.Queries; i++ {
			id := randomID()
			log.Debugf("Bootstrapping query (%d/%d) to random ID: %s", i+1, cfg.Queries, id)
			runQuery(ctx, id)
		}

	} else {
		// note on parallelism here: the context is passed in to the queries, so they
		// **should** exit when it exceeds, making this function exit on ctx cancel.
		// normally, we should be selecting on ctx.Done() here too, but this gets
		// complicated to do with WaitGroup, and doesnt wait for the children to exit.
		var wg sync.WaitGroup
		for i := 0; i < cfg.Queries; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				id := randomID()
				log.Debugf("Bootstrapping query (%d/%d) to random ID: %s", i+1, cfg.Queries, id)
				runQuery(ctx, id)
			}()
		}
		wg.Wait()
	}

	if len(merr) > 0 {
		return merr
	}
	return nil
}
Example #22
0
func (s *Server) handleMessage(
	ctx context.Context, p peer.ID, req *dhtpb.Message) (peer.ID, *dhtpb.Message) {

	defer log.EventBegin(ctx, "routingMessageReceived", req, p).Done()

	var response = dhtpb.NewMessage(req.GetType(), req.GetKey(), req.GetClusterLevel())
	switch req.GetType() {

	case dhtpb.Message_GET_VALUE:
		rawRecord, err := getRoutingRecord(s.routingBackend, key.Key(req.GetKey()))
		if err != nil {
			return "", nil
		}
		response.Record = rawRecord
		return p, response

	case dhtpb.Message_PUT_VALUE:
		// FIXME: verify complains that the peer's ID is not present in the
		// peerstore. Mocknet problem?
		// if err := verify(s.peerstore, req.GetRecord()); err != nil {
		// 	log.Event(ctx, "validationFailed", req, p)
		// 	return "", nil
		// }
		putRoutingRecord(s.routingBackend, key.Key(req.GetKey()), req.GetRecord())
		return p, req

	case dhtpb.Message_FIND_NODE:
		p := s.peerstore.PeerInfo(peer.ID(req.GetKey()))
		pri := []dhtpb.PeerRoutingInfo{
			{
				PeerInfo: p,
				// Connectedness: TODO
			},
		}
		response.CloserPeers = dhtpb.PeerRoutingInfosToPBPeers(pri)
		return p.ID, response

	case dhtpb.Message_ADD_PROVIDER:
		for _, provider := range req.GetProviderPeers() {
			providerID := peer.ID(provider.GetId())
			if providerID == p {
				store := []*dhtpb.Message_Peer{provider}
				storeProvidersToPeerstore(s.peerstore, p, store)
				if err := putRoutingProviders(s.routingBackend, key.Key(req.GetKey()), store); err != nil {
					return "", nil
				}
			} else {
				log.Event(ctx, "addProviderBadRequest", p, req)
			}
		}
		return "", nil

	case dhtpb.Message_GET_PROVIDERS:
		providers, err := getRoutingProviders(s.routingBackend, key.Key(req.GetKey()))
		if err != nil {
			return "", nil
		}
		response.ProviderPeers = providers
		return p, response

	case dhtpb.Message_PING:
		return p, req
	default:
	}
	return "", nil
}
Example #23
0
File: id.go Project: avbalu/go-ipfs
			return
		}

		if len(req.Arguments()) == 0 {
			output, err := printSelf(node)
			if err != nil {
				res.SetError(err, cmds.ErrNormal)
				return
			}
			res.SetOutput(output)
			return
		}

		pid := req.Arguments()[0]

		id := peer.ID(b58.Decode(pid))
		if len(id) == 0 {
			res.SetError(cmds.ClientError("Invalid peer id"), cmds.ErrClient)
			return
		}

		// TODO handle offline mode with polymorphism instead of conditionals
		if !node.OnlineMode() {
			res.SetError(errors.New(offlineIdErrorMessage), cmds.ErrClient)
			return
		}

		p, err := node.Routing.FindPeer(req.Context().Context, id)
		if err == kb.ErrLookupFailure {
			res.SetError(errors.New(offlineIdErrorMessage), cmds.ErrClient)
			return
Example #24
0
func newPeerTime(t time.Time) peer.ID {
	s := fmt.Sprintf("hmmm time: %v", t)
	h := u.Hash([]byte(s))
	return peer.ID(h)
}
Example #25
0
// PBPeerToPeer turns a *Message_Peer into its peer.PeerInfo counterpart
func PBPeerToPeerInfo(pbp *Message_Peer) peer.PeerInfo {
	return peer.PeerInfo{
		ID:    peer.ID(pbp.GetId()),
		Addrs: pbp.Addresses(),
	}
}