示例#1
0
文件: wantmanager.go 项目: rht/ipget
func (mq *msgQueue) doWork(ctx context.Context) {
	// allow ten minutes for connections
	// this includes looking them up in the dht
	// dialing them, and handshaking
	conctx, cancel := context.WithTimeout(ctx, time.Minute*10)
	defer cancel()

	err := mq.network.ConnectTo(conctx, mq.p)
	if err != nil {
		log.Infof("cant connect to peer %s: %s", mq.p, err)
		// TODO: cant connect, what now?
		return
	}

	// grab outgoing message
	mq.outlk.Lock()
	wlm := mq.out
	if wlm == nil || wlm.Empty() {
		mq.outlk.Unlock()
		return
	}
	mq.out = nil
	mq.outlk.Unlock()

	sendctx, cancel := context.WithTimeout(ctx, time.Minute*5)
	defer cancel()

	// send wantlist updates
	err = mq.network.SendMessage(sendctx, mq.p, wlm)
	if err != nil {
		log.Infof("bitswap send error: %s", err)
		// TODO: what do we do if this fails?
		return
	}
}
示例#2
0
文件: ping.go 项目: rht/ipget
func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int) <-chan interface{} {
	outChan := make(chan interface{})
	go func() {
		defer close(outChan)

		if len(n.Peerstore.Addrs(pid)) == 0 {
			// Make sure we can find the node in question
			outChan <- &PingResult{
				Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()),
			}

			ctx, cancel := context.WithTimeout(ctx, kPingTimeout)
			defer cancel()
			p, err := n.Routing.FindPeer(ctx, pid)
			if err != nil {
				outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)}
				return
			}
			n.Peerstore.AddAddrs(p.ID, p.Addrs, peer.TempAddrTTL)
		}

		outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())}

		ctx, cancel := context.WithTimeout(ctx, kPingTimeout*time.Duration(numPings))
		defer cancel()
		pings, err := n.Ping.Ping(ctx, pid)
		if err != nil {
			log.Debugf("Ping error: %s", err)
			outChan <- &PingResult{Text: fmt.Sprintf("Ping error: %s", err)}
			return
		}

		var done bool
		var total time.Duration
		for i := 0; i < numPings && !done; i++ {
			select {
			case <-ctx.Done():
				done = true
				break
			case t, ok := <-pings:
				if !ok {
					done = true
					break
				}

				outChan <- &PingResult{
					Success: true,
					Time:    t,
				}
				total += t
				time.Sleep(time.Second)
			}
		}
		averagems := total.Seconds() * 1000 / float64(numPings)
		outChan <- &PingResult{
			Text: fmt.Sprintf("Average latency: %.2fms", averagems),
		}
	}()
	return outChan
}
示例#3
0
文件: routing.go 项目: rht/ipget
// GetValue searches for the value corresponding to given Key.
func (dht *IpfsDHT) GetValue(ctx context.Context, key key.Key) ([]byte, error) {
	ctx, cancel := context.WithTimeout(ctx, time.Minute)
	defer cancel()

	vals, err := dht.GetValues(ctx, key, 16)
	if err != nil {
		return nil, err
	}

	var recs [][]byte
	for _, v := range vals {
		recs = append(recs, v.Val)
	}

	i, err := dht.Selector.BestRecord(key, recs)
	if err != nil {
		return nil, err
	}

	best := recs[i]
	log.Debugf("GetValue %v %v", key, best)
	if best == nil {
		log.Errorf("GetValue yielded correct record with nil value.")
		return nil, routing.ErrNotFound
	}

	fixupRec, err := record.MakePutRecord(dht.peerstore.PrivKey(dht.self), key, best, true)
	if err != nil {
		// probably shouldnt actually 'error' here as we have found a value we like,
		// but this call failing probably isnt something we want to ignore
		return nil, err
	}

	for _, v := range vals {
		// if someone sent us a different 'less-valid' record, lets correct them
		if !bytes.Equal(v.Val, best) {
			go func(v routing.RecvdVal) {
				ctx, cancel := context.WithTimeout(dht.Context(), time.Second*30)
				defer cancel()
				err := dht.putValueToPeer(ctx, v.From, key, fixupRec)
				if err != nil {
					log.Error("Error correcting DHT entry: ", err)
				}
			}(v)
		}
	}

	return best, nil
}
示例#4
0
文件: diag.go 项目: rht/ipget
// GetDiagnostic runs a diagnostics request across the entire network
func (d *Diagnostics) GetDiagnostic(ctx context.Context, timeout time.Duration) ([]*DiagInfo, error) {
	log.Debug("Getting diagnostic.")
	ctx, cancel := context.WithTimeout(ctx, timeout)
	defer cancel()

	diagID := newID()
	d.diagLock.Lock()
	d.diagMap[diagID] = time.Now()
	d.diagLock.Unlock()

	log.Debug("Begin Diagnostic")

	peers := d.getPeers()
	log.Debugf("Sending diagnostic request to %d peers.", len(peers))

	pmes := newMessage(diagID)

	pmes.SetTimeoutDuration(timeout - HopTimeoutDecrement) // decrease timeout per hop
	dpeers, err := d.getDiagnosticFromPeers(ctx, d.getPeers(), pmes)
	if err != nil {
		return nil, fmt.Errorf("diagnostic from peers err: %s", err)
	}

	di := d.getDiagInfo()
	out := []*DiagInfo{di}
	for dpi := range dpeers {
		out = append(out, dpi)
	}
	return out, nil
}
示例#5
0
文件: workers.go 项目: rht/ipget
// connects to providers for the given keys
func (bs *Bitswap) providerConnector(parent context.Context) {
	defer log.Info("bitswap client worker shutting down...")

	for {
		log.Event(parent, "Bitswap.ProviderConnector.Loop")
		select {
		case req := <-bs.findKeys:
			keys := req.keys
			if len(keys) == 0 {
				log.Warning("Received batch request for zero blocks")
				continue
			}
			log.Event(parent, "Bitswap.ProviderConnector.Work", logging.LoggableMap{"Keys": keys})

			// NB: Optimization. Assumes that providers of key[0] are likely to
			// be able to provide for all keys. This currently holds true in most
			// every situation. Later, this assumption may not hold as true.
			child, cancel := context.WithTimeout(req.ctx, providerRequestTimeout)
			providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest)
			for p := range providers {
				go bs.network.ConnectTo(req.ctx, p)
			}
			cancel()

		case <-parent.Done():
			return
		}
	}
}
示例#6
0
文件: bitswap.go 项目: rht/ipget
func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.Entry) {

	ctx, cancel := context.WithCancel(ctx)
	defer cancel()

	// Get providers for all entries in wantlist (could take a while)
	wg := sync.WaitGroup{}
	for _, e := range entries {
		wg.Add(1)
		go func(k key.Key) {
			defer wg.Done()

			child, cancel := context.WithTimeout(ctx, providerRequestTimeout)
			defer cancel()
			providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest)
			for prov := range providers {
				go func(p peer.ID) {
					bs.network.ConnectTo(ctx, p)
				}(prov)
			}
		}(e.Key)
	}

	wg.Wait() // make sure all our children do finish.
}
示例#7
0
文件: core.go 项目: rht/ipget
func (n *IpfsNode) HandlePeerFound(p peer.PeerInfo) {
	log.Warning("trying peer info: ", p)
	ctx, cancel := context.WithTimeout(n.Context(), discoveryConnTimeout)
	defer cancel()
	if err := n.PeerHost.Connect(ctx, p); err != nil {
		log.Warning("Failed to connect to peer found by discovery: ", err)
	}
}
示例#8
0
文件: net.go 项目: rht/ipget
func Dial(nd *core.IpfsNode, p peer.ID, protocol string) (net.Stream, error) {
	ctx, cancel := context.WithTimeout(nd.Context(), time.Second*30)
	defer cancel()
	err := nd.PeerHost.Connect(ctx, peer.PeerInfo{ID: p})
	if err != nil {
		return nil, err
	}
	return nd.PeerHost.NewStream(pro.ID(protocol), p)
}
示例#9
0
文件: diag.go 项目: rht/ipget
func (d *Diagnostics) HandleMessage(ctx context.Context, s inet.Stream) error {

	cr := ctxio.NewReader(ctx, s)
	cw := ctxio.NewWriter(ctx, s)
	r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) // maxsize
	w := ggio.NewDelimitedWriter(cw)

	// deserialize msg
	pmes := new(pb.Message)
	if err := r.ReadMsg(pmes); err != nil {
		log.Debugf("Failed to decode protobuf message: %v", err)
		return nil
	}

	// Print out diagnostic
	log.Infof("[peer: %s] Got message from [%s]\n",
		d.self.Pretty(), s.Conn().RemotePeer())

	// Make sure we havent already handled this request to prevent loops
	if err := d.startDiag(pmes.GetDiagID()); err != nil {
		return nil
	}

	resp := newMessage(pmes.GetDiagID())
	resp.Data = d.getDiagInfo().Marshal()
	if err := w.WriteMsg(resp); err != nil {
		log.Debugf("Failed to write protobuf message over stream: %s", err)
		return err
	}

	timeout := pmes.GetTimeoutDuration()
	if timeout < HopTimeoutDecrement {
		return fmt.Errorf("timeout too short: %s", timeout)
	}
	ctx, cancel := context.WithTimeout(ctx, timeout)
	defer cancel()
	pmes.SetTimeoutDuration(timeout - HopTimeoutDecrement)

	dpeers, err := d.getDiagnosticFromPeers(ctx, d.getPeers(), pmes)
	if err != nil {
		log.Debugf("diagnostic from peers err: %s", err)
		return err
	}
	for b := range dpeers {
		resp := newMessage(pmes.GetDiagID())
		resp.Data = b.Marshal()
		if err := w.WriteMsg(resp); err != nil {
			log.Debugf("Failed to write protobuf message over stream: %s", err)
			return err
		}
	}

	return nil
}
示例#10
0
文件: fracctx.go 项目: rht/ipget
// WithDeadlineFraction returns a Context with a fraction of the
// original context's timeout. This is useful in sequential pipelines
// of work, where one might try options and fall back to others
// depending on the time available, or failure to respond. For example:
//
//  // getPicture returns a picture from our encrypted database
//  // we have a pipeline of multiple steps. we need to:
//  // - get the data from a database
//  // - decrypt it
//  // - apply many transforms
//  //
//  // we **know** that each step takes increasingly more time.
//  // The transforms are much more expensive than decryption, and
//  // decryption is more expensive than the database lookup.
//  // If our database takes too long (i.e. >0.2 of available time),
//  // there's no use in continuing.
//  func getPicture(ctx context.Context, key string) ([]byte, error) {
//    // fractional timeout contexts to the rescue!
//
//    // try the database with 0.2 of remaining time.
//    ctx1, _ := ctxext.WithDeadlineFraction(ctx, 0.2)
//    val, err := db.Get(ctx1, key)
//    if err != nil {
//      return nil, err
//    }
//
//    // try decryption with 0.3 of remaining time.
//    ctx2, _ := ctxext.WithDeadlineFraction(ctx, 0.3)
//    if val, err = decryptor.Decrypt(ctx2, val); err != nil {
//      return nil, err
//    }
//
//    // try transforms with all remaining time. hopefully it's enough!
//    return transformer.Transform(ctx, val)
//  }
//
//
func WithDeadlineFraction(ctx context.Context, fraction float64) (
	context.Context, context.CancelFunc) {

	d, found := ctx.Deadline()
	if !found { // no deadline
		return context.WithCancel(ctx)
	}

	left := d.Sub(time.Now())
	if left < 0 { // already passed...
		return context.WithCancel(ctx)
	}

	left = time.Duration(float64(left) * fraction)
	return context.WithTimeout(ctx, left)
}
示例#11
0
文件: publisher.go 项目: rht/ipget
func PublishEntry(ctx context.Context, r routing.IpfsRouting, ipnskey key.Key, rec *pb.IpnsEntry) error {
	timectx, cancel := context.WithTimeout(ctx, PublishPutValTimeout)
	defer cancel()

	data, err := proto.Marshal(rec)
	if err != nil {
		return err
	}

	log.Debugf("Storing ipns entry at: %s", ipnskey)
	// Store ipns entry at "/ipns/"+b58(h(pubkey))
	if err := r.PutValue(timectx, ipnskey, data); err != nil {
		return err
	}

	return nil
}
示例#12
0
文件: publisher.go 项目: rht/ipget
func PublishPublicKey(ctx context.Context, r routing.IpfsRouting, k key.Key, pubk ci.PubKey) error {
	log.Debugf("Storing pubkey at: %s", k)
	pkbytes, err := pubk.Bytes()
	if err != nil {
		return err
	}

	// Store associated public key
	timectx, cancel := context.WithTimeout(ctx, PublishPutValTimeout)
	defer cancel()
	err = r.PutValue(timectx, k, pkbytes)
	if err != nil {
		return err
	}

	return nil
}
示例#13
0
文件: bootstrap.go 项目: rht/ipget
func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error {

	ctx, cancel := context.WithTimeout(ctx, cfg.ConnectionTimeout)
	defer cancel()
	id := host.ID()

	// get bootstrap peers from config. retrieving them here makes
	// sure we remain observant of changes to client configuration.
	peers := cfg.BootstrapPeers()

	// determine how many bootstrap connections to open
	connected := host.Network().Peers()
	if len(connected) >= cfg.MinPeerThreshold {
		log.Event(ctx, "bootstrapSkip", id)
		log.Debugf("%s core bootstrap skipped -- connected to %d (> %d) nodes",
			id, len(connected), cfg.MinPeerThreshold)
		return nil
	}
	numToDial := cfg.MinPeerThreshold - len(connected)

	// filter out bootstrap nodes we are already connected to
	var notConnected []peer.PeerInfo
	for _, p := range peers {
		if host.Network().Connectedness(p.ID) != inet.Connected {
			notConnected = append(notConnected, p)
		}
	}

	// if connected to all bootstrap peer candidates, exit
	if len(notConnected) < 1 {
		log.Debugf("%s no more bootstrap peers to create %d connections", id, numToDial)
		return ErrNotEnoughBootstrapPeers
	}

	// connect to a random susbset of bootstrap candidates
	randSubset := randomSubsetOfPeers(notConnected, numToDial)

	defer log.EventBegin(ctx, "bootstrapStart", id).Done()
	log.Debugf("%s bootstrapping to %d nodes: %s", id, numToDial, randSubset)
	if err := bootstrapConnect(ctx, host, randSubset); err != nil {
		return err
	}
	return nil
}
示例#14
0
文件: resolver.go 项目: rht/ipget
// ResolveLinks iteratively resolves names by walking the link hierarchy.
// Every node is fetched from the DAGService, resolving the next name.
// Returns the list of nodes forming the path, starting with ndd. This list is
// guaranteed never to be empty.
//
// ResolveLinks(nd, []string{"foo", "bar", "baz"})
// would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links
func (s *Resolver) ResolveLinks(ctx context.Context, ndd *merkledag.Node, names []string) ([]*merkledag.Node, error) {

	result := make([]*merkledag.Node, 0, len(names)+1)
	result = append(result, ndd)
	nd := ndd // dup arg workaround

	// for each of the path components
	for _, name := range names {

		var next key.Key
		var nlink *merkledag.Link
		// for each of the links in nd, the current object
		for _, link := range nd.Links {
			if link.Name == name {
				next = key.Key(link.Hash)
				nlink = link
				break
			}
		}

		if next == "" {
			n, _ := nd.Multihash()
			return result, ErrNoLink{name: name, node: n}
		}

		if nlink.Node == nil {
			// fetch object for link and assign to nd
			ctx, cancel := context.WithTimeout(ctx, time.Minute)
			defer cancel()
			var err error
			nd, err = s.DAG.Get(ctx, next)
			if err != nil {
				return append(result, nd), err
			}
			nlink.Node = nd
		} else {
			nd = nlink.Node
		}

		result = append(result, nlink.Node)
	}
	return result, nil
}
示例#15
0
文件: publisher.go 项目: rht/ipget
func (p *ipnsPublisher) getPreviousSeqNo(ctx context.Context, ipnskey key.Key) (uint64, error) {
	prevrec, err := p.ds.Get(ipnskey.DsKey())
	if err != nil && err != ds.ErrNotFound {
		// None found, lets start at zero!
		return 0, err
	}
	var val []byte
	if err == nil {
		prbytes, ok := prevrec.([]byte)
		if !ok {
			return 0, fmt.Errorf("unexpected type returned from datastore: %#v", prevrec)
		}
		dhtrec := new(dhtpb.Record)
		err := proto.Unmarshal(prbytes, dhtrec)
		if err != nil {
			return 0, err
		}

		val = dhtrec.GetValue()
	} else {
		// try and check the dht for a record
		ctx, cancel := context.WithTimeout(ctx, time.Second*30)
		defer cancel()

		rv, err := p.routing.GetValue(ctx, ipnskey)
		if err != nil {
			// no such record found, start at zero!
			return 0, nil
		}

		val = rv
	}

	e := new(pb.IpnsEntry)
	err = proto.Unmarshal(val, e)
	if err != nil {
		return 0, err
	}

	return e.GetSequence(), nil
}
示例#16
0
文件: workers.go 项目: rht/ipget
func (bs *Bitswap) provideWorker(px process.Process) {

	limiter := ratelimit.NewRateLimiter(px, provideWorkerMax)

	limitedGoProvide := func(k key.Key, wid int) {
		ev := logging.LoggableMap{"ID": wid}
		limiter.LimitedGo(func(px process.Process) {

			ctx := procctx.OnClosingContext(px) // derive ctx from px
			defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, &k).Done()

			ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx
			defer cancel()

			if err := bs.network.Provide(ctx, k); err != nil {
				log.Error(err)
			}
		})
	}

	// worker spawner, reads from bs.provideKeys until it closes, spawning a
	// _ratelimited_ number of workers to handle each key.
	limiter.Go(func(px process.Process) {
		for wid := 2; ; wid++ {
			ev := logging.LoggableMap{"ID": 1}
			log.Event(procctx.OnClosingContext(px), "Bitswap.ProvideWorker.Loop", ev)

			select {
			case <-px.Closing():
				return
			case k, ok := <-bs.provideKeys:
				if !ok {
					log.Debug("provideKeys channel closed")
					return
				}
				limitedGoProvide(k, wid)
			}
		}
	})
}
示例#17
0
文件: protocol.go 项目: rht/ipget
// runHandshake performs initial communication over insecure channel to share
// keys, IDs, and initiate communication, assigning all necessary params.
// requires the duplex channel to be a msgio.ReadWriter (for framed messaging)
func (s *secureSession) runHandshake() error {
	ctx, cancel := context.WithTimeout(s.ctx, HandshakeTimeout) // remove
	defer cancel()

	// =============================================================================
	// step 1. Propose -- propose cipher suite + send pubkeys + nonce

	// Generate and send Hello packet.
	// Hello = (rand, PublicKey, Supported)
	nonceOut := make([]byte, nonceSize)
	_, err := rand.Read(nonceOut)
	if err != nil {
		return err
	}

	defer log.EventBegin(ctx, "secureHandshake", s).Done()

	s.local.permanentPubKey = s.localKey.GetPublic()
	myPubKeyBytes, err := s.local.permanentPubKey.Bytes()
	if err != nil {
		return err
	}

	proposeOut := new(pb.Propose)
	proposeOut.Rand = nonceOut
	proposeOut.Pubkey = myPubKeyBytes
	proposeOut.Exchanges = &SupportedExchanges
	proposeOut.Ciphers = &SupportedCiphers
	proposeOut.Hashes = &SupportedHashes

	// log.Debugf("1.0 Propose: nonce:%s exchanges:%s ciphers:%s hashes:%s",
	// 	nonceOut, SupportedExchanges, SupportedCiphers, SupportedHashes)

	// Send Propose packet (respects ctx)
	proposeOutBytes, err := writeMsgCtx(ctx, s.insecureM, proposeOut)
	if err != nil {
		return err
	}

	// Receive + Parse their Propose packet and generate an Exchange packet.
	proposeIn := new(pb.Propose)
	proposeInBytes, err := readMsgCtx(ctx, s.insecureM, proposeIn)
	if err != nil {
		return err
	}

	// log.Debugf("1.0.1 Propose recv: nonce:%s exchanges:%s ciphers:%s hashes:%s",
	// 	proposeIn.GetRand(), proposeIn.GetExchanges(), proposeIn.GetCiphers(), proposeIn.GetHashes())

	// =============================================================================
	// step 1.1 Identify -- get identity from their key

	// get remote identity
	s.remote.permanentPubKey, err = ci.UnmarshalPublicKey(proposeIn.GetPubkey())
	if err != nil {
		return err
	}

	// get peer id
	s.remotePeer, err = peer.IDFromPublicKey(s.remote.permanentPubKey)
	if err != nil {
		return err
	}

	log.Debugf("1.1 Identify: %s Remote Peer Identified as %s", s.localPeer, s.remotePeer)

	// =============================================================================
	// step 1.2 Selection -- select/agree on best encryption parameters

	// to determine order, use cmp(H(remote_pubkey||local_rand), H(local_pubkey||remote_rand)).
	oh1 := u.Hash(append(proposeIn.GetPubkey(), nonceOut...))
	oh2 := u.Hash(append(myPubKeyBytes, proposeIn.GetRand()...))
	order := bytes.Compare(oh1, oh2)
	if order == 0 {
		return ErrEcho // talking to self (same socket. must be reuseport + dialing self)
	}

	s.local.curveT, err = selectBest(order, SupportedExchanges, proposeIn.GetExchanges())
	if err != nil {
		return err
	}

	s.local.cipherT, err = selectBest(order, SupportedCiphers, proposeIn.GetCiphers())
	if err != nil {
		return err
	}

	s.local.hashT, err = selectBest(order, SupportedHashes, proposeIn.GetHashes())
	if err != nil {
		return err
	}

	// we use the same params for both directions (must choose same curve)
	// WARNING: if they dont SelectBest the same way, this won't work...
	s.remote.curveT = s.local.curveT
	s.remote.cipherT = s.local.cipherT
	s.remote.hashT = s.local.hashT

	// log.Debugf("1.2 selection: exchange:%s cipher:%s hash:%s",
	// 	s.local.curveT, s.local.cipherT, s.local.hashT)

	// =============================================================================
	// step 2. Exchange -- exchange (signed) ephemeral keys. verify signatures.

	// Generate EphemeralPubKey
	var genSharedKey ci.GenSharedKey
	s.local.ephemeralPubKey, genSharedKey, err = ci.GenerateEKeyPair(s.local.curveT)

	// Gather corpus to sign.
	selectionOut := new(bytes.Buffer)
	selectionOut.Write(proposeOutBytes)
	selectionOut.Write(proposeInBytes)
	selectionOut.Write(s.local.ephemeralPubKey)
	selectionOutBytes := selectionOut.Bytes()

	// log.Debugf("2.0 exchange: %v", selectionOutBytes)
	exchangeOut := new(pb.Exchange)
	exchangeOut.Epubkey = s.local.ephemeralPubKey
	exchangeOut.Signature, err = s.localKey.Sign(selectionOutBytes)
	if err != nil {
		return err
	}

	// Send Propose packet (respects ctx)
	if _, err := writeMsgCtx(ctx, s.insecureM, exchangeOut); err != nil {
		return err
	}

	// Receive + Parse their Exchange packet.
	exchangeIn := new(pb.Exchange)
	if _, err := readMsgCtx(ctx, s.insecureM, exchangeIn); err != nil {
		return err
	}

	// =============================================================================
	// step 2.1. Verify -- verify their exchange packet is good.

	// get their ephemeral pub key
	s.remote.ephemeralPubKey = exchangeIn.GetEpubkey()

	selectionIn := new(bytes.Buffer)
	selectionIn.Write(proposeInBytes)
	selectionIn.Write(proposeOutBytes)
	selectionIn.Write(s.remote.ephemeralPubKey)
	selectionInBytes := selectionIn.Bytes()
	// log.Debugf("2.0.1 exchange recv: %v", selectionInBytes)

	// u.POut("Remote Peer Identified as %s\n", s.remote)
	sigOK, err := s.remote.permanentPubKey.Verify(selectionInBytes, exchangeIn.GetSignature())
	if err != nil {
		// log.Error("2.1 Verify: failed: %s", err)
		return err
	}

	if !sigOK {
		err := errors.New("Bad signature!")
		// log.Error("2.1 Verify: failed: %s", err)
		return err
	}
	// log.Debugf("2.1 Verify: signature verified.")

	// =============================================================================
	// step 2.2. Keys -- generate keys for mac + encryption

	// OK! seems like we're good to go.
	s.sharedSecret, err = genSharedKey(exchangeIn.GetEpubkey())
	if err != nil {
		return err
	}

	// generate two sets of keys (stretching)
	k1, k2 := ci.KeyStretcher(s.local.cipherT, s.local.hashT, s.sharedSecret)

	// use random nonces to decide order.
	switch {
	case order > 0:
		// just break
	case order < 0:
		k1, k2 = k2, k1 // swap
	default:
		// we should've bailed before this. but if not, bail here.
		return ErrEcho
	}
	s.local.keys = k1
	s.remote.keys = k2

	// log.Debug("2.2 keys:\n\tshared: %v\n\tk1: %v\n\tk2: %v",
	// 	s.sharedSecret, s.local.keys, s.remote.keys)

	// =============================================================================
	// step 2.3. MAC + Cipher -- prepare MAC + cipher

	if err := s.local.makeMacAndCipher(); err != nil {
		return err
	}

	if err := s.remote.makeMacAndCipher(); err != nil {
		return err
	}

	// log.Debug("2.3 mac + cipher.")

	// =============================================================================
	// step 3. Finish -- send expected message to verify encryption works (send local nonce)

	// setup ETM ReadWriter
	w := NewETMWriter(s.insecure, s.local.cipher, s.local.mac)
	r := NewETMReader(s.insecure, s.remote.cipher, s.remote.mac)
	s.secure = msgio.Combine(w, r).(msgio.ReadWriteCloser)

	// log.Debug("3.0 finish. sending: %v", proposeIn.GetRand())
	// send their Nonce.
	if _, err := s.secure.Write(proposeIn.GetRand()); err != nil {
		return fmt.Errorf("Failed to write Finish nonce: %s", err)
	}

	// read our Nonce
	nonceOut2 := make([]byte, len(nonceOut))
	if _, err := io.ReadFull(s.secure, nonceOut2); err != nil {
		return fmt.Errorf("Failed to read Finish nonce: %s", err)
	}

	// log.Debug("3.0 finish.\n\texpect: %v\n\tactual: %v", nonceOut, nonceOut2)
	if !bytes.Equal(nonceOut, nonceOut2) {
		return fmt.Errorf("Failed to read our encrypted nonce: %s != %s", nonceOut2, nonceOut)
	}

	// Whew! ok, that's all folks.
	return nil
}
示例#18
0
func (i *gatewayHandler) deleteHandler(w http.ResponseWriter, r *http.Request) {
	urlPath := r.URL.Path
	ctx, cancel := context.WithCancel(i.node.Context())
	defer cancel()

	ipfsNode, err := core.Resolve(ctx, i.node, path.Path(urlPath))
	if err != nil {
		// FIXME HTTP error code
		webError(w, "Could not resolve name", err, http.StatusInternalServerError)
		return
	}

	k, err := ipfsNode.Key()
	if err != nil {
		webError(w, "Could not get key from resolved node", err, http.StatusInternalServerError)
		return
	}

	h, components, err := path.SplitAbsPath(path.FromKey(k))
	if err != nil {
		webError(w, "Could not split path", err, http.StatusInternalServerError)
		return
	}

	tctx, cancel := context.WithTimeout(ctx, time.Minute)
	defer cancel()
	rootnd, err := i.node.Resolver.DAG.Get(tctx, key.Key(h))
	if err != nil {
		webError(w, "Could not resolve root object", err, http.StatusBadRequest)
		return
	}

	pathNodes, err := i.node.Resolver.ResolveLinks(tctx, rootnd, components[:len(components)-1])
	if err != nil {
		webError(w, "Could not resolve parent object", err, http.StatusBadRequest)
		return
	}

	// TODO(cyrptix): assumes len(pathNodes) > 1 - not found is an error above?
	err = pathNodes[len(pathNodes)-1].RemoveNodeLink(components[len(components)-1])
	if err != nil {
		webError(w, "Could not delete link", err, http.StatusBadRequest)
		return
	}

	newnode := pathNodes[len(pathNodes)-1]
	for i := len(pathNodes) - 2; i >= 0; i-- {
		newnode, err = pathNodes[i].UpdateNodeLink(components[i], newnode)
		if err != nil {
			webError(w, "Could not update node links", err, http.StatusInternalServerError)
			return
		}
	}

	if err := i.node.DAG.AddRecursive(newnode); err != nil {
		webError(w, "Could not add recursively new node", err, http.StatusInternalServerError)
		return
	}

	// Redirect to new path
	key, err := newnode.Key()
	if err != nil {
		webError(w, "Could not get key of new node", err, http.StatusInternalServerError)
		return
	}

	i.addUserHeaders(w) // ok, _now_ write user's headers.
	w.Header().Set("IPFS-Hash", key.String())
	http.Redirect(w, r, ipfsPathPrefix+key.String()+"/"+strings.Join(components[:len(components)-1], "/"), http.StatusCreated)
}
示例#19
0
func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) {
	// TODO(cryptix): either ask mildred about the flow of this or rewrite it
	webErrorWithCode(w, "Sorry, PUT is bugged right now, closing request", errors.New("handler disabled"), http.StatusInternalServerError)
	return
	urlPath := r.URL.Path
	pathext := urlPath[5:]
	var err error
	if urlPath == ipfsPathPrefix+"QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn/" {
		i.putEmptyDirHandler(w, r)
		return
	}

	var newnode *dag.Node
	if pathext[len(pathext)-1] == '/' {
		newnode = uio.NewEmptyDirectory()
	} else {
		newnode, err = i.newDagFromReader(r.Body)
		if err != nil {
			webError(w, "Could not create DAG from request", err, http.StatusInternalServerError)
			return
		}
	}

	ctx, cancel := context.WithCancel(i.node.Context())
	defer cancel()

	ipfsNode, err := core.Resolve(ctx, i.node, path.Path(urlPath))
	if err != nil {
		// FIXME HTTP error code
		webError(w, "Could not resolve name", err, http.StatusInternalServerError)
		return
	}

	k, err := ipfsNode.Key()
	if err != nil {
		webError(w, "Could not get key from resolved node", err, http.StatusInternalServerError)
		return
	}

	h, components, err := path.SplitAbsPath(path.FromKey(k))
	if err != nil {
		webError(w, "Could not split path", err, http.StatusInternalServerError)
		return
	}

	if len(components) < 1 {
		err = fmt.Errorf("Cannot override existing object")
		webError(w, "http gateway", err, http.StatusBadRequest)
		return
	}

	tctx, cancel := context.WithTimeout(ctx, time.Minute)
	defer cancel()
	// TODO(cryptix): could this be core.Resolve() too?
	rootnd, err := i.node.Resolver.DAG.Get(tctx, key.Key(h))
	if err != nil {
		webError(w, "Could not resolve root object", err, http.StatusBadRequest)
		return
	}

	// resolving path components into merkledag nodes. if a component does not
	// resolve, create empty directories (which will be linked and populated below.)
	pathNodes, err := i.node.Resolver.ResolveLinks(tctx, rootnd, components[:len(components)-1])
	if _, ok := err.(path.ErrNoLink); ok {
		// Create empty directories, links will be made further down the code
		for len(pathNodes) < len(components) {
			pathNodes = append(pathNodes, uio.NewDirectory(i.node.DAG).GetNode())
		}
	} else if err != nil {
		webError(w, "Could not resolve parent object", err, http.StatusBadRequest)
		return
	}

	for i := len(pathNodes) - 1; i >= 0; i-- {
		newnode, err = pathNodes[i].UpdateNodeLink(components[i], newnode)
		if err != nil {
			webError(w, "Could not update node links", err, http.StatusInternalServerError)
			return
		}
	}

	if err := i.node.DAG.AddRecursive(newnode); err != nil {
		webError(w, "Could not add recursively new node", err, http.StatusInternalServerError)
		return
	}

	// Redirect to new path
	key, err := newnode.Key()
	if err != nil {
		webError(w, "Could not get key of new node", err, http.StatusInternalServerError)
		return
	}

	i.addUserHeaders(w) // ok, _now_ write user's headers.
	w.Header().Set("IPFS-Hash", key.String())
	http.Redirect(w, r, ipfsPathPrefix+key.String()+"/"+strings.Join(components, "/"), http.StatusCreated)
}
示例#20
0
// runBootstrap builds up list of peers by requesting random peer IDs
func (dht *IpfsDHT) runBootstrap(ctx context.Context, cfg BootstrapConfig) error {
	bslog := func(msg string) {
		log.Debugf("DHT %s dhtRunBootstrap %s -- routing table size: %d", dht.self, msg, dht.routingTable.Size())
	}
	bslog("start")
	defer bslog("end")
	defer log.EventBegin(ctx, "dhtRunBootstrap").Done()

	var merr u.MultiErr

	randomID := func() peer.ID {
		// 16 random bytes is not a valid peer id. it may be fine becuase
		// the dht will rehash to its own keyspace anyway.
		id := make([]byte, 16)
		rand.Read(id)
		id = u.Hash(id)
		return peer.ID(id)
	}

	// bootstrap sequentially, as results will compound
	ctx, cancel := context.WithTimeout(ctx, cfg.Timeout)
	defer cancel()
	runQuery := func(ctx context.Context, id peer.ID) {
		p, err := dht.FindPeer(ctx, id)
		if err == routing.ErrNotFound {
			// this isn't an error. this is precisely what we expect.
		} else if err != nil {
			merr = append(merr, err)
		} else {
			// woah, actually found a peer with that ID? this shouldn't happen normally
			// (as the ID we use is not a real ID). this is an odd error worth logging.
			err := fmt.Errorf("Bootstrap peer error: Actually FOUND peer. (%s, %s)", id, p)
			log.Warningf("%s", err)
			merr = append(merr, err)
		}
	}

	sequential := true
	if sequential {
		// these should be parallel normally. but can make them sequential for debugging.
		// note that the core/bootstrap context deadline should be extended too for that.
		for i := 0; i < cfg.Queries; i++ {
			id := randomID()
			log.Debugf("Bootstrapping query (%d/%d) to random ID: %s", i+1, cfg.Queries, id)
			runQuery(ctx, id)
		}

	} else {
		// note on parallelism here: the context is passed in to the queries, so they
		// **should** exit when it exceeds, making this function exit on ctx cancel.
		// normally, we should be selecting on ctx.Done() here too, but this gets
		// complicated to do with WaitGroup, and doesnt wait for the children to exit.
		var wg sync.WaitGroup
		for i := 0; i < cfg.Queries; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()

				id := randomID()
				log.Debugf("Bootstrapping query (%d/%d) to random ID: %s", i+1, cfg.Queries, id)
				runQuery(ctx, id)
			}()
		}
		wg.Wait()
	}

	if len(merr) > 0 {
		return merr
	}
	return nil
}