func TestValueGetSet(t *testing.T) {
	// t.Skip("skipping test to debug another")

	ctx := context.Background()
	u.Debug = false
	addrA, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/11235")
	if err != nil {
		t.Fatal(err)
	}
	addrB, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/15679")
	if err != nil {
		t.Fatal(err)
	}

	peerA := makePeer(addrA)
	peerB := makePeer(addrB)

	dhtA := setupDHT(ctx, t, peerA)
	dhtB := setupDHT(ctx, t, peerB)

	vf := func(u.Key, []byte) error {
		return nil
	}
	dhtA.Validators["v"] = vf
	dhtB.Validators["v"] = vf

	defer dhtA.Close()
	defer dhtB.Close()
	defer dhtA.dialer.(inet.Network).Close()
	defer dhtB.dialer.(inet.Network).Close()

	_, err = dhtA.Connect(ctx, peerB)
	if err != nil {
		t.Fatal(err)
	}

	ctxT, _ := context.WithTimeout(ctx, time.Second)
	dhtA.PutValue(ctxT, "/v/hello", []byte("world"))

	ctxT, _ = context.WithTimeout(ctx, time.Second*2)
	val, err := dhtA.GetValue(ctxT, "/v/hello")
	if err != nil {
		t.Fatal(err)
	}

	if string(val) != "world" {
		t.Fatalf("Expected 'world' got '%s'", string(val))
	}

	ctxT, _ = context.WithTimeout(ctx, time.Second*2)
	val, err = dhtB.GetValue(ctxT, "/v/hello")
	if err != nil {
		t.Fatal(err)
	}

	if string(val) != "world" {
		t.Fatalf("Expected 'world' got '%s'", string(val))
	}
}
func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) {

	net := tn.VirtualNetwork()
	rs := mock.VirtualRoutingServer()
	block := blocks.NewBlock([]byte("block"))
	g := NewSessionGenerator(net, rs)

	hasBlock := g.Next()

	if err := hasBlock.blockstore.Put(block); err != nil {
		t.Fatal(err)
	}
	if err := hasBlock.exchange.HasBlock(context.Background(), *block); err != nil {
		t.Fatal(err)
	}

	wantsBlock := g.Next()

	ctx, _ := context.WithTimeout(context.Background(), time.Second)
	received, err := wantsBlock.exchange.Block(ctx, block.Key())
	if err != nil {
		t.Log(err)
		t.Fatal("Expected to succeed")
	}

	if !bytes.Equal(block.Data, received.Data) {
		t.Fatal("Data doesn't match")
	}
}
Exemple #3
0
// newConn constructs a new connection
func newSingleConn(ctx context.Context, local, remote peer.Peer,
	maconn manet.Conn) (Conn, error) {

	conn := &singleConn{
		local:  local,
		remote: remote,
		maconn: maconn,
		msgio:  newMsgioPipe(10, BufferPool),
	}

	conn.ContextCloser = ctxc.NewContextCloser(ctx, conn.close)

	log.Info("newSingleConn: %v to %v", local, remote)

	// setup the various io goroutines
	conn.Children().Add(1)
	go func() {
		conn.msgio.outgoing.WriteTo(maconn)
		conn.Children().Done()
	}()
	conn.Children().Add(1)
	go func() {
		conn.msgio.incoming.ReadFrom(maconn, MaxMessageSize)
		conn.Children().Done()
	}()

	// version handshake
	ctxT, _ := context.WithTimeout(ctx, HandshakeTimeout)
	if err := Handshake1(ctxT, conn); err != nil {
		conn.Close()
		return nil, fmt.Errorf("Handshake1 failed: %s", err)
	}

	return conn, nil
}
func TestServiceRequestTimeout(t *testing.T) {
	ctx, _ := context.WithTimeout(context.Background(), time.Millisecond)
	s1 := NewService(ctx, &ReverseHandler{})
	s2 := NewService(ctx, &ReverseHandler{})
	peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa")

	// patch services together
	go func() {
		for {
			<-time.After(time.Millisecond)
			select {
			case m := <-s1.GetPipe().Outgoing:
				s2.GetPipe().Incoming <- m
			case m := <-s2.GetPipe().Outgoing:
				s1.GetPipe().Incoming <- m
			case <-ctx.Done():
				return
			}
		}
	}()

	m1 := msg.New(peer1, []byte("beep"))
	m2, err := s1.SendRequest(ctx, m1)
	if err == nil || m2 != nil {
		t.Error("should've timed out")
	}
}
func TestProvides(t *testing.T) {
	// t.Skip("skipping test to debug another")
	ctx := context.Background()

	u.Debug = false

	_, peers, dhts := setupDHTS(ctx, 4, t)
	defer func() {
		for i := 0; i < 4; i++ {
			dhts[i].Close()
			defer dhts[i].dialer.(inet.Network).Close()
		}
	}()

	_, err := dhts[0].Connect(ctx, peers[1])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(ctx, peers[2])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(ctx, peers[3])
	if err != nil {
		t.Fatal(err)
	}

	err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
	if err != nil {
		t.Fatal(err)
	}

	bits, err := dhts[3].getLocal(u.Key("hello"))
	if err != nil && bytes.Equal(bits, []byte("world")) {
		t.Fatal(err)
	}

	err = dhts[3].Provide(ctx, u.Key("hello"))
	if err != nil {
		t.Fatal(err)
	}

	time.Sleep(time.Millisecond * 60)

	ctxT, _ := context.WithTimeout(ctx, time.Second)
	provchan := dhts[0].FindProvidersAsync(ctxT, u.Key("hello"), 1)

	after := time.After(time.Second)
	select {
	case prov := <-provchan:
		if prov == nil {
			t.Fatal("Got back nil provider")
		}
	case <-after:
		t.Fatal("Did not get a provider back.")
	}
}
func TestPing(t *testing.T) {
	// t.Skip("skipping test to debug another")
	ctx := context.Background()
	u.Debug = false
	addrA, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/2222")
	if err != nil {
		t.Fatal(err)
	}
	addrB, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/5678")
	if err != nil {
		t.Fatal(err)
	}

	peerA := makePeer(addrA)
	peerB := makePeer(addrB)

	dhtA := setupDHT(ctx, t, peerA)
	dhtB := setupDHT(ctx, t, peerB)

	defer dhtA.Close()
	defer dhtB.Close()
	defer dhtA.dialer.(inet.Network).Close()
	defer dhtB.dialer.(inet.Network).Close()

	_, err = dhtA.Connect(ctx, peerB)
	if err != nil {
		t.Fatal(err)
	}

	//Test that we can ping the node
	ctxT, _ := context.WithTimeout(ctx, 100*time.Millisecond)
	err = dhtA.Ping(ctxT, peerB)
	if err != nil {
		t.Fatal(err)
	}

	ctxT, _ = context.WithTimeout(ctx, 100*time.Millisecond)
	err = dhtB.Ping(ctxT, peerA)
	if err != nil {
		t.Fatal(err)
	}
}
func TestLayeredGet(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	ctx := context.Background()
	u.Debug = false
	_, peers, dhts := setupDHTS(ctx, 4, t)
	defer func() {
		for i := 0; i < 4; i++ {
			dhts[i].Close()
			defer dhts[i].dialer.(inet.Network).Close()
		}
	}()

	_, err := dhts[0].Connect(ctx, peers[1])
	if err != nil {
		t.Fatalf("Failed to connect: %s", err)
	}

	_, err = dhts[1].Connect(ctx, peers[2])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(ctx, peers[3])
	if err != nil {
		t.Fatal(err)
	}

	err = dhts[3].putLocal(u.Key("/v/hello"), []byte("world"))
	if err != nil {
		t.Fatal(err)
	}

	err = dhts[3].Provide(ctx, u.Key("/v/hello"))
	if err != nil {
		t.Fatal(err)
	}

	time.Sleep(time.Millisecond * 60)

	ctxT, _ := context.WithTimeout(ctx, time.Second)
	val, err := dhts[0].GetValue(ctxT, u.Key("/v/hello"))
	if err != nil {
		t.Fatal(err)
	}

	if string(val) != "world" {
		t.Fatal("Got incorrect value.")
	}

}
func TestCarryOnWhenDeadlineExpires(t *testing.T) {

	impossibleDeadline := time.Nanosecond
	fastExpiringCtx, _ := context.WithTimeout(context.Background(), impossibleDeadline)

	n := New()
	defer n.Shutdown()
	block := blocks.NewBlock([]byte("A Missed Connection"))
	blockChannel := n.Subscribe(fastExpiringCtx, block.Key())

	assertBlockChannelNil(t, blockChannel)
}
func ExampleWithTimeout() {
	// Pass a context with a timeout to tell a blocking function that it
	// should abandon its work after the timeout elapses.
	ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
	select {
	case <-time.After(200 * time.Millisecond):
		fmt.Println("overslept")
	case <-ctx.Done():
		fmt.Println(ctx.Err()) // prints "context deadline exceeded"
	}
	// Output:
	// context deadline exceeded
}
// Get retrieves a node from the dagService, fetching the block in the BlockService
func (n *dagService) Get(k u.Key) (*Node, error) {
	if n == nil {
		return nil, fmt.Errorf("dagService is nil")
	}

	ctx, _ := context.WithTimeout(context.TODO(), time.Second*5)
	b, err := n.Blocks.GetBlock(ctx, k)
	if err != nil {
		return nil, err
	}

	return Decoded(b.Data)
}
func TestGetBlockTimeout(t *testing.T) {

	net := tn.VirtualNetwork()
	rs := mock.VirtualRoutingServer()
	g := NewSessionGenerator(net, rs)

	self := g.Next()

	ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond)
	block := blocks.NewBlock([]byte("block"))
	_, err := self.exchange.Block(ctx, block.Key())

	if err != context.DeadlineExceeded {
		t.Fatal("Expected DeadlineExceeded error")
	}
}
func TestFindPeer(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	ctx := context.Background()
	u.Debug = false

	_, peers, dhts := setupDHTS(ctx, 4, t)
	defer func() {
		for i := 0; i < 4; i++ {
			dhts[i].Close()
			dhts[i].dialer.(inet.Network).Close()
		}
	}()

	_, err := dhts[0].Connect(ctx, peers[1])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(ctx, peers[2])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(ctx, peers[3])
	if err != nil {
		t.Fatal(err)
	}

	ctxT, _ := context.WithTimeout(ctx, time.Second)
	p, err := dhts[0].FindPeer(ctxT, peers[2].ID())
	if err != nil {
		t.Fatal(err)
	}

	if p == nil {
		t.Fatal("Failed to find peer.")
	}

	if !p.ID().Equal(peers[2].ID()) {
		t.Fatal("Didnt find expected peer.")
	}
}
func TestProviderForKeyButNetworkCannotFind(t *testing.T) {

	net := tn.VirtualNetwork()
	rs := mock.VirtualRoutingServer()
	g := NewSessionGenerator(net, rs)

	block := blocks.NewBlock([]byte("block"))
	rs.Announce(peer.WithIDString("testing"), block.Key()) // but not on network

	solo := g.Next()

	ctx, _ := context.WithTimeout(context.Background(), time.Nanosecond)
	_, err := solo.exchange.Block(ctx, block.Key())

	if err != context.DeadlineExceeded {
		t.Fatal("Expected DeadlineExceeded error")
	}
}
func TestBlocks(t *testing.T) {
	d := ds.NewMapDatastore()
	bs, err := NewBlockService(d, nil)
	if err != nil {
		t.Error("failed to construct block service", err)
		return
	}

	b := blocks.NewBlock([]byte("beep boop"))
	h := u.Hash([]byte("beep boop"))
	if !bytes.Equal(b.Multihash, h) {
		t.Error("Block Multihash and data multihash not equal")
	}

	if b.Key() != u.Key(h) {
		t.Error("Block key and data multihash key not equal")
	}

	k, err := bs.AddBlock(b)
	if err != nil {
		t.Error("failed to add block to BlockService", err)
		return
	}

	if k != b.Key() {
		t.Error("returned key is not equal to block key", err)
	}

	ctx, _ := context.WithTimeout(context.TODO(), time.Second*5)
	b2, err := bs.GetBlock(ctx, b.Key())
	if err != nil {
		t.Error("failed to retrieve block from BlockService", err)
		return
	}

	if b.Key() != b2.Key() {
		t.Error("Block keys not equal.")
	}

	if !bytes.Equal(b.Data, b2.Data) {
		t.Error("Block data is not equal.")
	}
}
Exemple #15
0
func (dht *IpfsDHT) getPublicKey(pid peer.ID) (ci.PubKey, error) {
	log.Debug("getPublicKey for: %s", pid)
	p, err := dht.peerstore.Get(pid)
	if err == nil {
		return p.PubKey(), nil
	}

	log.Debug("not in peerstore, searching dht.")
	ctxT, _ := context.WithTimeout(dht.ContextCloser.Context(), time.Second*5)
	val, err := dht.GetValue(ctxT, u.Key("/pk/"+string(pid)))
	if err != nil {
		log.Warning("Failed to find requested public key.")
		return nil, err
	}

	pubkey, err := ci.UnmarshalPublicKey(val)
	if err != nil {
		log.Errorf("Failed to unmarshal public key: %s", err)
		return nil, err
	}
	return pubkey, nil
}
Exemple #16
0
// PingRoutine periodically pings nearest neighbors.
func (dht *IpfsDHT) PingRoutine(t time.Duration) {
	defer dht.Children().Done()

	tick := time.Tick(t)
	for {
		select {
		case <-tick:
			id := make([]byte, 16)
			rand.Read(id)
			peers := dht.routingTables[0].NearestPeers(kb.ConvertKey(u.Key(id)), 5)
			for _, p := range peers {
				ctx, _ := context.WithTimeout(dht.Context(), time.Second*5)
				err := dht.Ping(ctx, p)
				if err != nil {
					log.Errorf("Ping error: %s", err)
				}
			}
		case <-dht.Closing():
			return
		}
	}
}
Exemple #17
0
// connSetup takes a new connection, performs the IPFS handshake (handshake3)
// and then adds it to the appropriate MultiConn.
func (s *Swarm) connSetup(c conn.Conn) (conn.Conn, error) {
	if c == nil {
		return nil, errors.New("Tried to start nil connection.")
	}

	log.Event(context.TODO(), "connSetupBegin", c.LocalPeer(), c.RemotePeer())

	// add address of connection to Peer. Maybe it should happen in connSecure.
	// NOT adding this address here, because the incoming address in TCP
	// is an EPHEMERAL address, and not the address we want to keep around.
	// addresses should be figured out through the DHT.
	// c.Remote.AddAddress(c.Conn.RemoteMultiaddr())

	// handshake3
	ctxT, _ := context.WithTimeout(c.Context(), conn.HandshakeTimeout)
	h3result, err := conn.Handshake3(ctxT, c)
	if err != nil {
		c.Close()
		return nil, fmt.Errorf("Handshake3 failed: %s", err)
	}

	// check for nats. you know, just in case.
	if h3result.LocalObservedAddress != nil {
		s.checkNATWarning(h3result.LocalObservedAddress)
	} else {
		log.Warningf("Received nil observed address from %s", c.RemotePeer())
	}

	// add to conns
	mc, err := s.peerMultiConn(c.RemotePeer())
	if err != nil {
		c.Close()
		return nil, err
	}
	mc.Add(c)
	log.Event(context.TODO(), "connSetupSuccess", c.LocalPeer(), c.RemotePeer())
	return c, nil
}
func TestProvidesAsync(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	ctx := context.Background()
	u.Debug = false

	_, peers, dhts := setupDHTS(ctx, 4, t)
	defer func() {
		for i := 0; i < 4; i++ {
			dhts[i].Close()
			defer dhts[i].dialer.(inet.Network).Close()
		}
	}()

	_, err := dhts[0].Connect(ctx, peers[1])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(ctx, peers[2])
	if err != nil {
		t.Fatal(err)
	}

	_, err = dhts[1].Connect(ctx, peers[3])
	if err != nil {
		t.Fatal(err)
	}

	err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
	if err != nil {
		t.Fatal(err)
	}

	bits, err := dhts[3].getLocal(u.Key("hello"))
	if err != nil && bytes.Equal(bits, []byte("world")) {
		t.Fatal(err)
	}

	err = dhts[3].Provide(ctx, u.Key("hello"))
	if err != nil {
		t.Fatal(err)
	}

	time.Sleep(time.Millisecond * 60)

	ctxT, _ := context.WithTimeout(ctx, time.Millisecond*300)
	provs := dhts[0].FindProvidersAsync(ctxT, u.Key("hello"), 5)
	select {
	case p, ok := <-provs:
		if !ok {
			t.Fatal("Provider channel was closed...")
		}
		if p == nil {
			t.Fatal("Got back nil provider!")
		}
		if !p.ID().Equal(dhts[3].self.ID()) {
			t.Fatalf("got a provider, but not the right one. %s", p)
		}
	case <-ctxT.Done():
		t.Fatal("Didnt get back providers")
	}
}
// TODO simplify this test. get to the _essence_!
func TestSendToWantingPeer(t *testing.T) {
	net := tn.VirtualNetwork()
	rs := mock.VirtualRoutingServer()
	sg := NewSessionGenerator(net, rs)
	bg := NewBlockGenerator()

	me := sg.Next()
	w := sg.Next()
	o := sg.Next()

	t.Logf("Session %v\n", me.peer)
	t.Logf("Session %v\n", w.peer)
	t.Logf("Session %v\n", o.peer)

	alpha := bg.Next()

	const timeout = 1 * time.Millisecond // FIXME don't depend on time

	t.Logf("Peer %v attempts to get %v. NB: not available\n", w.peer, alpha.Key())
	ctx, _ := context.WithTimeout(context.Background(), timeout)
	_, err := w.exchange.Block(ctx, alpha.Key())
	if err == nil {
		t.Fatalf("Expected %v to NOT be available", alpha.Key())
	}

	beta := bg.Next()
	t.Logf("Peer %v announes availability  of %v\n", w.peer, beta.Key())
	ctx, _ = context.WithTimeout(context.Background(), timeout)
	if err := w.blockstore.Put(&beta); err != nil {
		t.Fatal(err)
	}
	w.exchange.HasBlock(ctx, beta)

	t.Logf("%v gets %v from %v and discovers it wants %v\n", me.peer, beta.Key(), w.peer, alpha.Key())
	ctx, _ = context.WithTimeout(context.Background(), timeout)
	if _, err := me.exchange.Block(ctx, beta.Key()); err != nil {
		t.Fatal(err)
	}

	t.Logf("%v announces availability of %v\n", o.peer, alpha.Key())
	ctx, _ = context.WithTimeout(context.Background(), timeout)
	if err := o.blockstore.Put(&alpha); err != nil {
		t.Fatal(err)
	}
	o.exchange.HasBlock(ctx, alpha)

	t.Logf("%v requests %v\n", me.peer, alpha.Key())
	ctx, _ = context.WithTimeout(context.Background(), timeout)
	if _, err := me.exchange.Block(ctx, alpha.Key()); err != nil {
		t.Fatal(err)
	}

	t.Logf("%v should now have %v\n", w.peer, alpha.Key())
	block, err := w.blockstore.Get(alpha.Key())
	if err != nil {
		t.Fatal("Should not have received an error")
	}
	if block.Key() != alpha.Key() {
		t.Fatal("Expected to receive alpha from me")
	}
}
// If less than K nodes are in the entire network, it should fail when we make
// a GET rpc and nobody has the value
func TestLessThanKResponses(t *testing.T) {
	// t.Skip("skipping test because it makes a lot of output")

	ctx := context.Background()
	u.Debug = false
	fn := &fauxNet{}
	fs := &fauxSender{}
	local := makePeer(nil)
	peerstore := peer.NewPeerstore()
	peerstore.Add(local)

	d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore())

	var ps []peer.Peer
	for i := 0; i < 5; i++ {
		ps = append(ps, _randPeer())
		d.Update(ctx, ps[i])
	}
	other := _randPeer()

	// Reply with random peers to every message
	fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage {
		pmes := new(pb.Message)
		err := proto.Unmarshal(mes.Data(), pmes)
		if err != nil {
			t.Fatal(err)
		}

		switch pmes.GetType() {
		case pb.Message_GET_VALUE:
			resp := &pb.Message{
				Type:        pmes.Type,
				CloserPeers: pb.PeersToPBPeers([]peer.Peer{other}),
			}

			mes, err := msg.FromObject(mes.Peer(), resp)
			if err != nil {
				t.Error(err)
			}
			return mes
		default:
			panic("Shouldnt recieve this.")
		}

	})

	ctx, _ = context.WithTimeout(ctx, time.Second*30)
	_, err := d.GetValue(ctx, u.Key("hello"))
	if err != nil {
		switch err {
		case routing.ErrNotFound:
			//Success!
			return
		case u.ErrTimeout:
			t.Fatal("Should not have gotten timeout!")
		default:
			t.Fatalf("Got unexpected error: %s", err)
		}
	}
	t.Fatal("Expected to recieve an error.")
}
func TestNotFound(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	ctx := context.Background()
	fn := &fauxNet{}
	fs := &fauxSender{}

	local := makePeer(nil)
	peerstore := peer.NewPeerstore()
	peerstore.Add(local)

	d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore())

	var ps []peer.Peer
	for i := 0; i < 5; i++ {
		ps = append(ps, _randPeer())
		d.Update(ctx, ps[i])
	}

	// Reply with random peers to every message
	fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage {
		pmes := new(pb.Message)
		err := proto.Unmarshal(mes.Data(), pmes)
		if err != nil {
			t.Fatal(err)
		}

		switch pmes.GetType() {
		case pb.Message_GET_VALUE:
			resp := &pb.Message{Type: pmes.Type}

			peers := []peer.Peer{}
			for i := 0; i < 7; i++ {
				peers = append(peers, _randPeer())
			}
			resp.CloserPeers = pb.PeersToPBPeers(peers)
			mes, err := msg.FromObject(mes.Peer(), resp)
			if err != nil {
				t.Error(err)
			}
			return mes
		default:
			panic("Shouldnt recieve this.")
		}

	})

	ctx, _ = context.WithTimeout(ctx, time.Second*5)
	v, err := d.GetValue(ctx, u.Key("hello"))
	log.Debugf("get value got %v", v)
	if err != nil {
		switch err {
		case routing.ErrNotFound:
			//Success!
			return
		case u.ErrTimeout:
			t.Fatal("Should not have gotten timeout!")
		default:
			t.Fatalf("Got unexpected error: %s", err)
		}
	}
	t.Fatal("Expected to recieve an error.")
}
func TestGetFailures(t *testing.T) {
	if testing.Short() {
		t.SkipNow()
	}

	ctx := context.Background()
	fn := &fauxNet{}
	fs := &fauxSender{}

	peerstore := peer.NewPeerstore()
	local := makePeer(nil)

	d := NewDHT(ctx, local, peerstore, fn, fs, ds.NewMapDatastore())
	other := makePeer(nil)
	d.Update(ctx, other)

	// This one should time out
	// u.POut("Timout Test\n")
	ctx1, _ := context.WithTimeout(context.Background(), time.Second)
	_, err := d.GetValue(ctx1, u.Key("test"))
	if err != nil {
		if err != context.DeadlineExceeded {
			t.Fatal("Got different error than we expected", err)
		}
	} else {
		t.Fatal("Did not get expected error!")
	}

	// u.POut("NotFound Test\n")
	// Reply with failures to every message
	fs.AddHandler(func(mes msg.NetMessage) msg.NetMessage {
		pmes := new(pb.Message)
		err := proto.Unmarshal(mes.Data(), pmes)
		if err != nil {
			t.Fatal(err)
		}

		resp := &pb.Message{
			Type: pmes.Type,
		}
		m, err := msg.FromObject(mes.Peer(), resp)
		return m
	})

	// This one should fail with NotFound
	ctx2, _ := context.WithTimeout(context.Background(), time.Second)
	_, err = d.GetValue(ctx2, u.Key("test"))
	if err != nil {
		if err != routing.ErrNotFound {
			t.Fatalf("Expected ErrNotFound, got: %s", err)
		}
	} else {
		t.Fatal("expected error, got none.")
	}

	fs.handlers = nil
	// Now we test this DHT's handleGetValue failure
	typ := pb.Message_GET_VALUE
	str := "hello"
	rec, err := d.makePutRecord(u.Key(str), []byte("blah"))
	if err != nil {
		t.Fatal(err)
	}
	req := pb.Message{
		Type:   &typ,
		Key:    &str,
		Record: rec,
	}

	// u.POut("handleGetValue Test\n")
	mes, err := msg.FromObject(other, &req)
	if err != nil {
		t.Error(err)
	}

	mes = d.HandleMessage(ctx, mes)

	pmes := new(pb.Message)
	err = proto.Unmarshal(mes.Data(), pmes)
	if err != nil {
		t.Fatal(err)
	}
	if pmes.GetRecord() != nil {
		t.Fatal("shouldnt have value")
	}
	if pmes.GetProviderPeers() != nil {
		t.Fatal("shouldnt have provider peers")
	}

}
Exemple #23
0
		if err != nil {
			return nil, err
		}

		if len(req.Arguments()) == 0 {
			return printPeer(node.Identity)
		}

		pid := req.Arguments()[0]

		id := peer.ID(b58.Decode(pid))
		if len(id) == 0 {
			return nil, cmds.ClientError("Invalid peer id")
		}

		ctx, _ := context.WithTimeout(context.TODO(), time.Second*5)
		if node.Routing == nil {
			return nil, errors.New(offlineIdErrorMessage)
		}

		p, err := node.Routing.FindPeer(ctx, id)
		if err == kb.ErrLookupFailure {
			return nil, errors.New(offlineIdErrorMessage)
		}
		if err != nil {
			return nil, err
		}
		return printPeer(p)
	},
	Marshalers: cmds.MarshalerMap{
		cmds.Text: func(res cmds.Response) ([]byte, error) {