Exemplo n.º 1
0
// AddBlock adds a particular block to the service, Putting it into the datastore.
func (s *BlockService) AddBlock(b *blocks.Block) (u.Key, error) {
	k := b.Key()
	log.Debugf("blockservice: storing [%s] in datastore", k)
	// TODO(brian): define a block datastore with a Put method which accepts a
	// block parameter

	// check if we have it before adding. this is an extra read, but large writes
	// are more expensive.
	// TODO(jbenet) cheaper has. https://github.com/jbenet/go-datastore/issues/6
	has, err := s.Datastore.Has(k.DsKey())
	if err != nil {
		return k, err
	}
	if has {
		log.Debugf("blockservice: storing [%s] in datastore (already stored)", k)
	} else {
		log.Debugf("blockservice: storing [%s] in datastore", k)
		err := s.Datastore.Put(k.DsKey(), b.Data)
		if err != nil {
			return k, err
		}
	}

	if s.Remote != nil {
		ctx := context.TODO()
		err = s.Remote.HasBlock(ctx, *b)
	}
	return k, err
}
Exemplo n.º 2
0
// Resolve implements Resolver. Uses the IPFS routing system to resolve SFS-like
// names.
func (r *routingResolver) Resolve(name string) (string, error) {
	log.Debugf("RoutingResolve: '%s'", name)
	ctx := context.TODO()
	hash, err := mh.FromB58String(name)
	if err != nil {
		log.Warning("RoutingResolve: bad input hash: [%s]\n", name)
		return "", err
	}
	// name should be a multihash. if it isn't, error out here.

	// use the routing system to get the name.
	// /ipns/<name>
	h := []byte("/ipns/" + string(hash))

	ipnsKey := u.Key(h)
	val, err := r.routing.GetValue(ctx, ipnsKey)
	if err != nil {
		log.Warning("RoutingResolve get failed.")
		return "", err
	}

	entry := new(pb.IpnsEntry)
	err = proto.Unmarshal(val, entry)
	if err != nil {
		return "", err
	}

	// name should be a public key retrievable from ipfs
	// /ipfs/<name>
	key := u.Key("/pk/" + string(hash))
	pkval, err := r.routing.GetValue(ctx, key)
	if err != nil {
		log.Warning("RoutingResolve PubKey Get failed.")
		return "", err
	}

	// get PublicKey from node.Data
	pk, err := ci.UnmarshalPublicKey(pkval)
	if err != nil {
		return "", err
	}
	hsh, _ := pk.Hash()
	log.Debugf("pk hash = %s", u.Key(hsh))

	// check sig with pk
	if ok, err := pk.Verify(ipnsEntryDataForSig(entry), entry.GetSignature()); err != nil || !ok {
		return "", fmt.Errorf("Invalid value. Not signed by PrivateKey corresponding to %v", pk)
	}

	// ok sig checks out. this is a valid name.
	return string(entry.GetValue()), nil
}
Exemplo n.º 3
0
// Get retrieves a node from the dagService, fetching the block in the BlockService
func (n *dagService) Get(k u.Key) (*Node, error) {
	if n == nil {
		return nil, fmt.Errorf("dagService is nil")
	}

	ctx, _ := context.WithTimeout(context.TODO(), time.Second*5)
	b, err := n.Blocks.GetBlock(ctx, k)
	if err != nil {
		return nil, err
	}

	return Decoded(b.Data)
}
Exemplo n.º 4
0
func runEncryptBenchmark(b *testing.B) {
	pstore := peer.NewPeerstore()
	ctx := context.TODO()
	bufsize := 1024 * 1024

	pa := getPeer(b)
	pb := getPeer(b)
	duplexa := pipes.NewDuplex(16)
	duplexb := pipes.NewDuplex(16)

	go bindDuplexNoCopy(duplexa, duplexb)

	var spb *SecurePipe
	done := make(chan struct{})
	go func() {
		var err error
		spb, err = NewSecurePipe(ctx, bufsize, pb, pstore, duplexb)
		if err != nil {
			b.Fatal(err)
		}
		done <- struct{}{}
	}()

	spa, err := NewSecurePipe(ctx, bufsize, pa, pstore, duplexa)
	if err != nil {
		b.Fatal(err)
	}

	<-done

	go func() {
		for _ = range spa.In {
			// Throw it all away,
			// all of your hopes and dreams
			// piped out to /dev/null...
			done <- struct{}{}
		}
	}()

	data := make([]byte, 1024*512)
	util.NewTimeSeededRand().Read(data)
	// Begin actual benchmarking
	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		b.SetBytes(int64(len(data)))
		spb.Out <- data
		<-done
	}

}
Exemplo n.º 5
0
// connSetup takes a new connection, performs the IPFS handshake (handshake3)
// and then adds it to the appropriate MultiConn.
func (s *Swarm) connSetup(c conn.Conn) (conn.Conn, error) {
	if c == nil {
		return nil, errors.New("Tried to start nil connection.")
	}

	log.Event(context.TODO(), "connSetupBegin", c.LocalPeer(), c.RemotePeer())

	// add address of connection to Peer. Maybe it should happen in connSecure.
	// NOT adding this address here, because the incoming address in TCP
	// is an EPHEMERAL address, and not the address we want to keep around.
	// addresses should be figured out through the DHT.
	// c.Remote.AddAddress(c.Conn.RemoteMultiaddr())

	// handshake3
	ctxT, _ := context.WithTimeout(c.Context(), conn.HandshakeTimeout)
	h3result, err := conn.Handshake3(ctxT, c)
	if err != nil {
		c.Close()
		return nil, fmt.Errorf("Handshake3 failed: %s", err)
	}

	// check for nats. you know, just in case.
	if h3result.LocalObservedAddress != nil {
		s.checkNATWarning(h3result.LocalObservedAddress)
	} else {
		log.Warningf("Received nil observed address from %s", c.RemotePeer())
	}

	// add to conns
	mc, err := s.peerMultiConn(c.RemotePeer())
	if err != nil {
		c.Close()
		return nil, err
	}
	mc.Add(c)
	log.Event(context.TODO(), "connSetupSuccess", c.LocalPeer(), c.RemotePeer())
	return c, nil
}
Exemplo n.º 6
0
// Publish implements Publisher. Accepts a keypair and a value,
// and publishes it out to the routing system
func (p *ipnsPublisher) Publish(k ci.PrivKey, value string) error {
	log.Debugf("namesys: Publish %s", value)

	// validate `value` is a ref (multihash)
	_, err := mh.FromB58String(value)
	if err != nil {
		log.Errorf("hash cast failed: %s", value)
		return fmt.Errorf("publish value must be str multihash. %v", err)
	}

	ctx := context.TODO()
	data, err := createRoutingEntryData(k, value)
	if err != nil {
		log.Error("entry creation failed.")
		return err
	}
	pubkey := k.GetPublic()
	pkbytes, err := pubkey.Bytes()
	if err != nil {
		log.Error("pubkey getbytes failed.")
		return err
	}

	nameb := u.Hash(pkbytes)
	namekey := u.Key("/pk/" + string(nameb))

	log.Debugf("Storing pubkey at: %s", namekey)
	// Store associated public key
	timectx, _ := context.WithDeadline(ctx, time.Now().Add(time.Second*4))
	err = p.routing.PutValue(timectx, namekey, pkbytes)
	if err != nil {
		return err
	}

	ipnskey := u.Key("/ipns/" + string(nameb))

	log.Debugf("Storing ipns entry at: %s", ipnskey)
	// Store ipns entry at "/ipns/"+b58(h(pubkey))
	timectx, _ = context.WithDeadline(ctx, time.Now().Add(time.Second*4))
	err = p.routing.PutValue(timectx, ipnskey, data)
	if err != nil {
		return err
	}

	return nil
}
Exemplo n.º 7
0
// Dial connects to a peer.
//
// The idea is that the client of Swarm does not need to know what network
// the connection will happen over. Swarm can use whichever it choses.
// This allows us to use various transport protocols, do NAT traversal/relay,
// etc. to achive connection.
//
// For now, Dial uses only TCP. This will be extended.
func (s *Swarm) Dial(peer peer.Peer) (conn.Conn, error) {
	if peer.ID().Equal(s.local.ID()) {
		return nil, errors.New("Attempted connection to self!")
	}

	// check if we already have an open connection first
	c := s.GetConnection(peer.ID())
	if c != nil {
		return c, nil
	}

	// check if we don't have the peer in Peerstore
	peer, err := s.peers.Add(peer)
	if err != nil {
		return nil, err
	}

	// open connection to peer
	d := &conn.Dialer{
		LocalPeer: s.local,
		Peerstore: s.peers,
	}

	// try to connect to one of the peer's known addresses.
	// for simplicity, we do this sequentially.
	// A future commit will do this asynchronously.
	for _, addr := range peer.Addresses() {
		c, err = d.DialAddr(s.Context(), addr, peer)
		if err == nil {
			break
		}
	}
	if err != nil {
		return nil, err
	}

	c, err = s.connSetup(c)
	if err != nil {
		c.Close()
		return nil, err
	}

	// TODO replace the TODO ctx with a context passed in from caller
	log.Event(context.TODO(), "dial", peer)
	return c, nil
}
Exemplo n.º 8
0
func TestBlocks(t *testing.T) {
	d := ds.NewMapDatastore()
	bs, err := NewBlockService(d, nil)
	if err != nil {
		t.Error("failed to construct block service", err)
		return
	}

	b := blocks.NewBlock([]byte("beep boop"))
	h := u.Hash([]byte("beep boop"))
	if !bytes.Equal(b.Multihash, h) {
		t.Error("Block Multihash and data multihash not equal")
	}

	if b.Key() != u.Key(h) {
		t.Error("Block key and data multihash key not equal")
	}

	k, err := bs.AddBlock(b)
	if err != nil {
		t.Error("failed to add block to BlockService", err)
		return
	}

	if k != b.Key() {
		t.Error("returned key is not equal to block key", err)
	}

	ctx, _ := context.WithTimeout(context.TODO(), time.Second*5)
	b2, err := bs.GetBlock(ctx, b.Key())
	if err != nil {
		t.Error("failed to retrieve block from BlockService", err)
		return
	}

	if b.Key() != b2.Key() {
		t.Error("Block keys not equal.")
	}

	if !bytes.Equal(b.Data, b2.Data) {
		t.Error("Block data is not equal.")
	}
}
Exemplo n.º 9
0
// resolveUnspecifiedAddresses expands unspecified ip addresses (/ip4/0.0.0.0, /ip6/::) to
// use the known local interfaces.
func resolveUnspecifiedAddresses(unspecifiedAddrs []ma.Multiaddr) ([]ma.Multiaddr, error) {
	var outputAddrs []ma.Multiaddr

	// todo optimize: only fetch these if we have a "any" addr.
	ifaceAddrs, err := interfaceAddresses()
	if err != nil {
		return nil, err
	}

	for _, a := range unspecifiedAddrs {

		// split address into its components
		split := ma.Split(a)

		// if first component (ip) is not unspecified, use it as is.
		if !manet.IsIPUnspecified(split[0]) {
			outputAddrs = append(outputAddrs, a)
			continue
		}

		// unspecified? add one address per interface.
		for _, ia := range ifaceAddrs {
			split[0] = ia
			joined := ma.Join(split...)
			outputAddrs = append(outputAddrs, joined)
		}
	}

	log.Event(context.TODO(), "interfaceListenAddresses", func() eventlog.Loggable {
		var addrs []string
		for _, addr := range outputAddrs {
			addrs = append(addrs, addr.String())
		}
		return eventlog.Metadata{"addresses": addrs}
	}())
	log.Info("InterfaceListenAddresses:", outputAddrs)
	return outputAddrs, nil
}
Exemplo n.º 10
0
func (n *network) deliver(
	r bsnet.Receiver, from peer.Peer, message bsmsg.BitSwapMessage) error {
	if message == nil || from == nil {
		return errors.New("Invalid input")
	}

	nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message)

	if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) {
		return errors.New("Malformed client request")
	}

	if nextPeer == nil && nextMsg == nil {
		return nil
	}

	nextReceiver, ok := n.clients[nextPeer.Key()]
	if !ok {
		return errors.New("Cannot locate peer on network")
	}
	go n.deliver(nextReceiver, nextPeer, nextMsg)
	return nil
}
Exemplo n.º 11
0
// Handles the unwrapping + sending of messages to the right connection.
func (s *Swarm) fanOut() {
	defer s.Children().Done()

	i := 0
	for {
		select {
		case <-s.Closing():
			return // told to close.

		case msg, ok := <-s.Outgoing:
			if !ok {
				log.Infof("%s outgoing channel closed", s)
				return
			}
			if len(msg.Data()) >= conn.MaxMessageSize {
				log.Critical("Attempted to send message bigger than max size.")
			}

			s.connsLock.RLock()
			c, found := s.conns[msg.Peer().Key()]
			s.connsLock.RUnlock()

			if !found {
				e := fmt.Errorf("Sent msg to peer without open conn: %v", msg.Peer())
				s.errChan <- e
				log.Error(e)
				continue
			}

			i++
			log.Debugf("%s sent message to %s (%d)", s.local, msg.Peer(), i)
			log.Event(context.TODO(), "sendMessage", s.local, msg)
			// queue it in the connection's buffer
			c.Out() <- msg.Data()
		}
	}
}
Exemplo n.º 12
0
// TODO
func (n *network) SendRequest(
	ctx context.Context,
	from peer.Peer,
	to peer.Peer,
	message bsmsg.BitSwapMessage) (
	incoming bsmsg.BitSwapMessage, err error) {

	r, ok := n.clients[to.Key()]
	if !ok {
		return nil, errors.New("Cannot locate peer on network")
	}
	nextPeer, nextMsg := r.ReceiveMessage(context.TODO(), from, message)

	// TODO dedupe code
	if (nextPeer == nil && nextMsg != nil) || (nextMsg == nil && nextPeer != nil) {
		r.ReceiveError(errors.New("Malformed client request"))
		return nil, nil
	}

	// TODO dedupe code
	if nextPeer == nil && nextMsg == nil {
		return nil, nil
	}

	// TODO test when receiver doesn't immediately respond to the initiator of the request
	if !bytes.Equal(nextPeer.ID(), from.ID()) {
		go func() {
			nextReceiver, ok := n.clients[nextPeer.Key()]
			if !ok {
				// TODO log the error?
			}
			n.deliver(nextReceiver, nextPeer, nextMsg)
		}()
		return nil, nil
	}
	return nextMsg, nil
}
Exemplo n.º 13
0
		if err != nil {
			return nil, err
		}

		if len(req.Arguments()) == 0 {
			return printPeer(node.Identity)
		}

		pid := req.Arguments()[0]

		id := peer.ID(b58.Decode(pid))
		if len(id) == 0 {
			return nil, cmds.ClientError("Invalid peer id")
		}

		ctx, _ := context.WithTimeout(context.TODO(), time.Second*5)
		if node.Routing == nil {
			return nil, errors.New(offlineIdErrorMessage)
		}

		p, err := node.Routing.FindPeer(ctx, id)
		if err == kb.ErrLookupFailure {
			return nil, errors.New(offlineIdErrorMessage)
		}
		if err != nil {
			return nil, err
		}
		return printPeer(p)
	},
	Marshalers: cmds.MarshalerMap{
		cmds.Text: func(res cmds.Response) ([]byte, error) {
Exemplo n.º 14
0
var swarmConnectCmd = &cmds.Command{
	Helptext: cmds.HelpText{
		Tagline: "Open connection to a given peer",
		ShortDescription: `
'ipfs swarm connect' opens a connection to a peer address. The address format
is an ipfs multiaddr:

ipfs swarm connect /ip4/104.131.131.82/tcp/4001/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ
`,
	},
	Arguments: []cmds.Argument{
		cmds.StringArg("address", true, true, "address of peer to connect to"),
	},
	Run: func(req cmds.Request) (interface{}, error) {
		ctx := context.TODO()

		log.Debug("ipfs swarm connect")
		n, err := req.Context().GetNode()
		if err != nil {
			return nil, err
		}

		addrs := req.Arguments()

		if n.Network == nil {
			return nil, errNotOnline
		}

		peers, err := peersWithAddresses(n.Peerstore, addrs)
		if err != nil {
Exemplo n.º 15
0
// NewIpfsNode constructs a new IpfsNode based on the given config.
func NewIpfsNode(cfg *config.Config, online bool) (n *IpfsNode, err error) {
	success := false // flip to true after all sub-system inits succeed
	defer func() {
		if !success && n != nil {
			n.Close()
		}
	}()

	if cfg == nil {
		return nil, debugerror.Errorf("configuration required")
	}

	// derive this from a higher context.
	ctx := context.TODO()
	n = &IpfsNode{
		onlineMode: online,
		Config:     cfg,
	}
	n.ContextCloser = ctxc.NewContextCloser(ctx, n.teardown)

	// setup datastore.
	if n.Datastore, err = makeDatastore(cfg.Datastore); err != nil {
		return nil, debugerror.Wrap(err)
	}

	// setup peerstore + local peer identity
	n.Peerstore = peer.NewPeerstore()
	n.Identity, err = initIdentity(&n.Config.Identity, n.Peerstore, online)
	if err != nil {
		return nil, debugerror.Wrap(err)
	}

	// setup online services
	if online {

		dhtService := netservice.NewService(ctx, nil)      // nil handler for now, need to patch it
		exchangeService := netservice.NewService(ctx, nil) // nil handler for now, need to patch it
		diagService := netservice.NewService(ctx, nil)     // nil handler for now, need to patch it

		muxMap := &mux.ProtocolMap{
			mux.ProtocolID_Routing:    dhtService,
			mux.ProtocolID_Exchange:   exchangeService,
			mux.ProtocolID_Diagnostic: diagService,
			// add protocol services here.
		}

		// setup the network
		listenAddrs, err := listenAddresses(cfg)
		if err != nil {
			return nil, debugerror.Wrap(err)
		}

		n.Network, err = inet.NewIpfsNetwork(ctx, listenAddrs, n.Identity, n.Peerstore, muxMap)
		if err != nil {
			return nil, debugerror.Wrap(err)
		}
		n.AddCloserChild(n.Network)

		// setup diagnostics service
		n.Diagnostics = diag.NewDiagnostics(n.Identity, n.Network, diagService)
		diagService.SetHandler(n.Diagnostics)

		// setup routing service
		dhtRouting := dht.NewDHT(ctx, n.Identity, n.Peerstore, n.Network, dhtService, n.Datastore)
		dhtRouting.Validators[IpnsValidatorTag] = namesys.ValidateIpnsRecord

		// TODO(brian): perform this inside NewDHT factory method
		dhtService.SetHandler(dhtRouting) // wire the handler to the service.
		n.Routing = dhtRouting
		n.AddCloserChild(dhtRouting)

		// setup exchange service
		const alwaysSendToPeer = true // use YesManStrategy
		bitswapNetwork := bsnet.NewFromIpfsNetwork(exchangeService, n.Network)
		n.Exchange = bitswap.New(ctx, n.Identity, bitswapNetwork, n.Routing, n.Datastore, alwaysSendToPeer)

		go initConnections(ctx, n.Config, n.Peerstore, dhtRouting)
	}

	// TODO(brian): when offline instantiate the BlockService with a bitswap
	// session that simply doesn't return blocks
	n.Blocks, err = bserv.NewBlockService(n.Datastore, n.Exchange)
	if err != nil {
		return nil, debugerror.Wrap(err)
	}

	n.DAG = merkledag.NewDAGService(n.Blocks)
	n.Namesys = namesys.NewNameSystem(n.Routing)
	n.Pinning, err = pin.LoadPinner(n.Datastore, n.DAG)
	if err != nil {
		n.Pinning = pin.NewPinner(n.Datastore, n.DAG)
	}
	n.Resolver = &path.Resolver{DAG: n.DAG}

	success = true
	return n, nil
}