func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int) <-chan interface{} { outChan := make(chan interface{}) go func() { defer close(outChan) if len(n.Peerstore.Addrs(pid)) == 0 { // Make sure we can find the node in question outChan <- &PingResult{ Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()), } ctx, cancel := context.WithTimeout(ctx, kPingTimeout) defer cancel() p, err := n.Routing.FindPeer(ctx, pid) if err != nil { outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)} return } n.Peerstore.AddAddrs(p.ID, p.Addrs, peer.TempAddrTTL) } outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())} ctx, cancel := context.WithTimeout(ctx, kPingTimeout*time.Duration(numPings)) defer cancel() pings, err := n.Ping.Ping(ctx, pid) if err != nil { log.Debugf("Ping error: %s", err) outChan <- &PingResult{Text: fmt.Sprintf("Ping error: %s", err)} return } var done bool var total time.Duration for i := 0; i < numPings && !done; i++ { select { case <-ctx.Done(): done = true break case t, ok := <-pings: if !ok { done = true break } outChan <- &PingResult{ Success: true, Time: t, } total += t time.Sleep(time.Second) } } averagems := total.Seconds() * 1000 / float64(numPings) outChan <- &PingResult{ Text: fmt.Sprintf("Average latency: %.2fms", averagems), } }() return outChan }
func (mq *msgQueue) doWork(ctx context.Context) { // allow ten minutes for connections // this includes looking them up in the dht // dialing them, and handshaking conctx, cancel := context.WithTimeout(ctx, time.Minute*10) defer cancel() err := mq.network.ConnectTo(conctx, mq.p) if err != nil { log.Infof("cant connect to peer %s: %s", mq.p, err) // TODO: cant connect, what now? return } // grab outgoing message mq.outlk.Lock() wlm := mq.out if wlm == nil || wlm.Empty() { mq.outlk.Unlock() return } mq.out = nil mq.outlk.Unlock() sendctx, cancel := context.WithTimeout(ctx, time.Minute*5) defer cancel() // send wantlist updates err = mq.network.SendMessage(sendctx, mq.p, wlm) if err != nil { log.Infof("bitswap send error: %s", err) // TODO: what do we do if this fails? return } }
func TestPinRecursiveFail(t *testing.T) { ctx := context.Background() dstore := dssync.MutexWrap(ds.NewMapDatastore()) bstore := blockstore.NewBlockstore(dstore) bserv := bs.New(bstore, offline.Exchange(bstore)) dserv := mdag.NewDAGService(bserv) p := NewPinner(dstore, dserv) a, _ := randNode() b, _ := randNode() err := a.AddNodeLinkClean("child", b) if err != nil { t.Fatal(err) } // NOTE: This isnt a time based test, we expect the pin to fail mctx, _ := context.WithTimeout(ctx, time.Millisecond) err = p.Pin(mctx, a, true) if err == nil { t.Fatal("should have failed to pin here") } _, err = dserv.Add(b) if err != nil { t.Fatal(err) } // this one is time based... but shouldnt cause any issues mctx, _ = context.WithTimeout(ctx, time.Second) err = p.Pin(mctx, a, true) if err != nil { t.Fatal(err) } }
// GetValue searches for the value corresponding to given Key. func (dht *IpfsDHT) GetValue(ctx context.Context, key key.Key) ([]byte, error) { ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() vals, err := dht.GetValues(ctx, key, 16) if err != nil { return nil, err } var recs [][]byte for _, v := range vals { if v.Val != nil { recs = append(recs, v.Val) } } i, err := dht.Selector.BestRecord(key, recs) if err != nil { return nil, err } best := recs[i] log.Debugf("GetValue %v %v", key, best) if best == nil { log.Errorf("GetValue yielded correct record with nil value.") return nil, routing.ErrNotFound } fixupRec, err := record.MakePutRecord(dht.peerstore.PrivKey(dht.self), key, best, true) if err != nil { // probably shouldnt actually 'error' here as we have found a value we like, // but this call failing probably isnt something we want to ignore return nil, err } for _, v := range vals { // if someone sent us a different 'less-valid' record, lets correct them if !bytes.Equal(v.Val, best) { go func(v routing.RecvdVal) { if v.From == dht.self { err := dht.putLocal(key, fixupRec) if err != nil { log.Error("Error correcting local dht entry:", err) } return } ctx, cancel := context.WithTimeout(dht.Context(), time.Second*30) defer cancel() err := dht.putValueToPeer(ctx, v.From, key, fixupRec) if err != nil { log.Error("Error correcting DHT entry: ", err) } }(v) } } return best, nil }
func TestValueGetSet(t *testing.T) { // t.Skip("skipping test to debug another") ctx := context.Background() dhtA := setupDHT(ctx, t) dhtB := setupDHT(ctx, t) defer dhtA.Close() defer dhtB.Close() defer dhtA.host.Close() defer dhtB.host.Close() vf := &record.ValidChecker{ Func: func(key.Key, []byte) error { return nil }, Sign: false, } nulsel := func(_ key.Key, bs [][]byte) (int, error) { return 0, nil } dhtA.Validator["v"] = vf dhtB.Validator["v"] = vf dhtA.Selector["v"] = nulsel dhtB.Selector["v"] = nulsel connect(t, ctx, dhtA, dhtB) ctxT, _ := context.WithTimeout(ctx, time.Second) dhtA.PutValue(ctxT, "/v/hello", []byte("world")) ctxT, _ = context.WithTimeout(ctx, time.Second*2) val, err := dhtA.GetValue(ctxT, "/v/hello") if err != nil { t.Fatal(err) } if string(val) != "world" { t.Fatalf("Expected 'world' got '%s'", string(val)) } ctxT, _ = context.WithTimeout(ctx, time.Second*2) val, err = dhtB.GetValue(ctxT, "/v/hello") if err != nil { t.Fatal(err) } if string(val) != "world" { t.Fatalf("Expected 'world' got '%s'", string(val)) } }
// GetDiagnostic runs a diagnostics request across the entire network func (d *Diagnostics) GetDiagnostic(ctx context.Context, timeout time.Duration) ([]*DiagInfo, error) { log.Debug("getting diagnostic") ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() diagID := newID() d.diagLock.Lock() d.diagMap[diagID] = time.Now() d.diagLock.Unlock() log.Debug("begin diagnostic") peers := d.getPeers() log.Debugf("Sending diagnostic request to %d peers.", len(peers)) pmes := newMessage(diagID) pmes.SetTimeoutDuration(timeout - HopTimeoutDecrement) // decrease timeout per hop dpeers, err := d.getDiagnosticFromPeers(ctx, d.getPeers(), pmes) if err != nil { return nil, fmt.Errorf("diagnostic from peers err: %s", err) } di := d.getDiagInfo() out := []*DiagInfo{di} for dpi := range dpeers { out = append(out, dpi) } return out, nil }
// ResolveLinks iteratively resolves names by walking the link hierarchy. // Every node is fetched from the DAGService, resolving the next name. // Returns the list of nodes forming the path, starting with ndd. This list is // guaranteed never to be empty. // // ResolveLinks(nd, []string{"foo", "bar", "baz"}) // would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links func (s *Resolver) ResolveLinks(ctx context.Context, ndd *merkledag.Node, names []string) ([]*merkledag.Node, error) { result := make([]*merkledag.Node, 0, len(names)+1) result = append(result, ndd) nd := ndd // dup arg workaround // for each of the path components for _, name := range names { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, time.Minute) defer cancel() nextnode, err := nd.GetLinkedNode(ctx, s.DAG, name) if err == merkledag.ErrLinkNotFound { n, _ := nd.Multihash() return result, ErrNoLink{Name: name, Node: n} } else if err != nil { return append(result, nextnode), err } nd = nextnode result = append(result, nextnode) } return result, nil }
// pipeStream relays over a stream to a remote peer. It's like `cat` func (rs *RelayService) pipeStream(src, dst peer.ID, s inet.Stream) error { // TODO: find a good way to pass contexts into here nsctx, cancel := context.WithTimeout(context.TODO(), time.Second*30) defer cancel() s2, err := rs.openStreamToPeer(nsctx, dst) if err != nil { return fmt.Errorf("failed to open stream to peer: %s -- %s", dst, err) } cancel() // cancel here because this function might last a while if err := WriteHeader(s2, src, dst); err != nil { return err } // connect the series of tubes. done := make(chan retio, 2) go func() { n, err := io.Copy(s2, s) done <- retio{n, err} }() go func() { n, err := io.Copy(s, s2) done <- retio{n, err} }() r1 := <-done r2 := <-done log.Infof("%s relayed %d/%d bytes between %s and %s", rs.host.ID(), r1.n, r2.n, src, dst) if r1.err != nil { return r1.err } return r2.err }
func TestHasIsBloomCached(t *testing.T) { cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} bs := NewBlockstore(syncds.MutexWrap(cd)) for i := 0; i < 1000; i++ { bs.Put(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i)))) } ctx, _ := context.WithTimeout(context.Background(), 1*time.Second) cachedbs, err := testBloomCached(bs, ctx) if err != nil { t.Fatal(err) } select { case <-cachedbs.rebuildChan: case <-ctx.Done(): t.Fatalf("Timeout wating for rebuild: %d", cachedbs.bloom.ElementsAdded()) } cacheFails := 0 cd.SetFunc(func() { cacheFails++ }) for i := 0; i < 1000; i++ { cachedbs.Has(blocks.NewBlock([]byte(fmt.Sprintf("data: %d", i+2000))).Key()) } if float64(cacheFails)/float64(1000) > float64(0.05) { t.Fatal("Bloom filter has cache miss rate of more than 5%") } }
func TestDeadlineFractionCancel(t *testing.T) { ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Millisecond) ctx2, cancel2 := WithDeadlineFraction(ctx1, 0.5) select { case <-ctx1.Done(): t.Fatal("ctx1 ended too early") case <-ctx2.Done(): t.Fatal("ctx2 ended too early") default: } cancel2() select { case <-ctx1.Done(): t.Fatal("ctx1 should NOT be cancelled") case <-ctx2.Done(): default: t.Fatal("ctx2 should be cancelled") } cancel1() select { case <-ctx1.Done(): case <-ctx2.Done(): default: t.Fatal("ctx1 should be cancelled") } }
func TestBasicBitswap(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() t.Log("Test a one node trying to get one block from another") instances := sg.Instances(2) blocks := bg.Blocks(1) err := instances[0].Exchange.HasBlock(blocks[0]) if err != nil { t.Fatal(err) } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Key()) if err != nil { t.Fatal(err) } t.Log(blk) for _, inst := range instances { err := inst.Exchange.Close() if err != nil { t.Fatal(err) } } }
func TestGetBlocksSequential(t *testing.T) { var servs = Mocks(4) for _, s := range servs { defer s.Close() } bg := blocksutil.NewBlockGenerator() blks := bg.Blocks(50) var keys []key.Key for _, blk := range blks { keys = append(keys, blk.Key()) servs[0].AddBlock(blk) } t.Log("one instance at a time, get blocks concurrently") for i := 1; i < len(servs); i++ { ctx, cancel := context.WithTimeout(context.Background(), time.Second*50) defer cancel() out := servs[i].GetBlocks(ctx, keys) gotten := make(map[key.Key]blocks.Block) for blk := range out { if _, ok := gotten[blk.Key()]; ok { t.Fatal("Got duplicate block!") } gotten[blk.Key()] = blk } if len(gotten) != len(blks) { t.Fatalf("Didnt get enough blocks back: %d/%d", len(gotten), len(blks)) } } }
func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) g := NewTestSessionGenerator(net) defer g.Close() peers := g.Instances(2) hasBlock := peers[0] defer hasBlock.Exchange.Close() if err := hasBlock.Exchange.HasBlock(block); err != nil { t.Fatal(err) } wantsBlock := peers[1] defer wantsBlock.Exchange.Close() ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() received, err := wantsBlock.Exchange.GetBlock(ctx, block.Key()) if err != nil { t.Log(err) t.Fatal("Expected to succeed") } if !bytes.Equal(block.Data, received.Data) { t.Fatal("Data doesn't match") } }
func TestFindPeer(t *testing.T) { // t.Skip("skipping test to debug another") if testing.Short() { t.SkipNow() } ctx := context.Background() _, peers, dhts := setupDHTS(ctx, 4, t) defer func() { for i := 0; i < 4; i++ { dhts[i].Close() dhts[i].host.Close() } }() connect(t, ctx, dhts[0], dhts[1]) connect(t, ctx, dhts[1], dhts[2]) connect(t, ctx, dhts[1], dhts[3]) ctxT, _ := context.WithTimeout(ctx, time.Second) p, err := dhts[0].FindPeer(ctxT, peers[2]) if err != nil { t.Fatal(err) } if p.ID == "" { t.Fatal("Failed to find peer.") } if p.ID != peers[2] { t.Fatal("Didnt find expected peer.") } }
func (bs *Bitswap) connectToProviders(ctx context.Context, entries []wantlist.Entry) { ctx, cancel := context.WithCancel(ctx) defer cancel() // Get providers for all entries in wantlist (could take a while) wg := sync.WaitGroup{} for _, e := range entries { wg.Add(1) go func(k key.Key) { defer wg.Done() child, cancel := context.WithTimeout(ctx, providerRequestTimeout) defer cancel() providers := bs.network.FindProvidersAsync(child, k, maxProvidersPerRequest) for prov := range providers { go func(p peer.ID) { bs.network.ConnectTo(ctx, p) }(prov) } }(e.Key) } wg.Wait() // make sure all our children do finish. }
// connects to providers for the given keys func (bs *Bitswap) providerConnector(parent context.Context) { defer log.Info("bitswap client worker shutting down...") for { log.Event(parent, "Bitswap.ProviderConnector.Loop") select { case req := <-bs.findKeys: keys := req.keys if len(keys) == 0 { log.Warning("Received batch request for zero blocks") continue } log.Event(parent, "Bitswap.ProviderConnector.Work", logging.LoggableMap{"Keys": keys}) // NB: Optimization. Assumes that providers of key[0] are likely to // be able to provide for all keys. This currently holds true in most // every situation. Later, this assumption may not hold as true. child, cancel := context.WithTimeout(req.ctx, providerRequestTimeout) providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest) for p := range providers { go bs.network.ConnectTo(req.ctx, p) } cancel() case <-parent.Done(): return } } }
func (n *IpfsNode) HandlePeerFound(p pstore.PeerInfo) { log.Warning("trying peer info: ", p) ctx, cancel := context.WithTimeout(n.Context(), discoveryConnTimeout) defer cancel() if err := n.PeerHost.Connect(ctx, p); err != nil { log.Warning("Failed to connect to peer found by discovery: ", err) } }
func Dial(nd *core.IpfsNode, p peer.ID, protocol string) (net.Stream, error) { ctx, cancel := context.WithTimeout(nd.Context(), time.Second*30) defer cancel() err := nd.PeerHost.Connect(ctx, peer.PeerInfo{ID: p}) if err != nil { return nil, err } return nd.PeerHost.NewStream(nd.Context(), pro.ID(protocol), p) }
func (d *Diagnostics) HandleMessage(ctx context.Context, s inet.Stream) error { cr := ctxio.NewReader(ctx, s) cw := ctxio.NewWriter(ctx, s) r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax) // maxsize w := ggio.NewDelimitedWriter(cw) // deserialize msg pmes := new(pb.Message) if err := r.ReadMsg(pmes); err != nil { log.Debugf("Failed to decode protobuf message: %v", err) return nil } // Print out diagnostic log.Infof("[peer: %s] Got message from [%s]\n", d.self.Pretty(), s.Conn().RemotePeer()) // Make sure we havent already handled this request to prevent loops if err := d.startDiag(pmes.GetDiagID()); err != nil { return nil } resp := newMessage(pmes.GetDiagID()) resp.Data = d.getDiagInfo().Marshal() if err := w.WriteMsg(resp); err != nil { log.Debugf("Failed to write protobuf message over stream: %s", err) return err } timeout := pmes.GetTimeoutDuration() if timeout < HopTimeoutDecrement { return fmt.Errorf("timeout too short: %s", timeout) } ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() pmes.SetTimeoutDuration(timeout - HopTimeoutDecrement) dpeers, err := d.getDiagnosticFromPeers(ctx, d.getPeers(), pmes) if err != nil { log.Debugf("diagnostic from peers err: %s", err) return err } for b := range dpeers { resp := newMessage(pmes.GetDiagID()) resp.Data = b.Marshal() if err := w.WriteMsg(resp); err != nil { log.Debugf("Failed to write protobuf message over stream: %s", err) return err } } return nil }
// LoadPinner loads a pinner and its keysets from the given datastore func LoadPinner(d ds.Datastore, dserv mdag.DAGService) (Pinner, error) { p := new(pinner) rootKeyI, err := d.Get(pinDatastoreKey) if err != nil { return nil, fmt.Errorf("cannot load pin state: %v", err) } rootKeyBytes, ok := rootKeyI.([]byte) if !ok { return nil, fmt.Errorf("cannot load pin state: %s was not bytes", pinDatastoreKey) } rootKey := key.Key(rootKeyBytes) ctx, cancel := context.WithTimeout(context.TODO(), time.Second*5) defer cancel() root, err := dserv.Get(ctx, rootKey) if err != nil { return nil, fmt.Errorf("cannot find pinning root object: %v", err) } internalPin := map[key.Key]struct{}{ rootKey: struct{}{}, } recordInternal := func(k key.Key) { internalPin[k] = struct{}{} } { // load recursive set recurseKeys, err := loadSet(ctx, dserv, root, linkRecursive, recordInternal) if err != nil { return nil, fmt.Errorf("cannot load recursive pins: %v", err) } p.recursePin = set.SimpleSetFromKeys(recurseKeys) } { // load direct set directKeys, err := loadSet(ctx, dserv, root, linkDirect, recordInternal) if err != nil { return nil, fmt.Errorf("cannot load direct pins: %v", err) } p.directPin = set.SimpleSetFromKeys(directKeys) } p.internalPin = internalPin // assign services p.dserv = dserv p.dstore = d return p, nil }
func TestFindPeersConnectedToPeer(t *testing.T) { t.Skip("not quite correct (see note)") if testing.Short() { t.SkipNow() } ctx := context.Background() _, peers, dhts := setupDHTS(ctx, 4, t) defer func() { for i := 0; i < 4; i++ { dhts[i].Close() dhts[i].host.Close() } }() // topology: // 0-1, 1-2, 1-3, 2-3 connect(t, ctx, dhts[0], dhts[1]) connect(t, ctx, dhts[1], dhts[2]) connect(t, ctx, dhts[1], dhts[3]) connect(t, ctx, dhts[2], dhts[3]) // fmt.Println("0 is", peers[0]) // fmt.Println("1 is", peers[1]) // fmt.Println("2 is", peers[2]) // fmt.Println("3 is", peers[3]) ctxT, _ := context.WithTimeout(ctx, time.Second) pchan, err := dhts[0].FindPeersConnectedToPeer(ctxT, peers[2]) if err != nil { t.Fatal(err) } // shouldFind := []peer.ID{peers[1], peers[3]} found := []peer.PeerInfo{} for nextp := range pchan { found = append(found, nextp) } // fmt.Printf("querying 0 (%s) FindPeersConnectedToPeer 2 (%s)\n", peers[0], peers[2]) // fmt.Println("should find 1, 3", shouldFind) // fmt.Println("found", found) // testPeerListsMatch(t, shouldFind, found) log.Warning("TestFindPeersConnectedToPeer is not quite correct") if len(found) == 0 { t.Fatal("didn't find any peers.") } }
func TestCarryOnWhenDeadlineExpires(t *testing.T) { impossibleDeadline := time.Nanosecond fastExpiringCtx, cancel := context.WithTimeout(context.Background(), impossibleDeadline) defer cancel() n := New() defer n.Shutdown() block := blocks.NewBlock([]byte("A Missed Connection")) blockChannel := n.Subscribe(fastExpiringCtx, block.Key()) assertBlockChannelNil(t, blockChannel) }
func ExampleWithTimeout() { // Pass a context with a timeout to tell a blocking function that it // should abandon its work after the timeout elapses. ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) select { case <-time.After(200 * time.Millisecond): fmt.Println("overslept") case <-ctx.Done(): fmt.Println(ctx.Err()) // prints "context deadline exceeded" } // Output: // context deadline exceeded }
func TestBootstrap(t *testing.T) { // t.Skip("skipping test to debug another") if testing.Short() { t.SkipNow() } ctx := context.Background() nDHTs := 30 _, _, dhts := setupDHTS(ctx, nDHTs, t) defer func() { for i := 0; i < nDHTs; i++ { dhts[i].Close() defer dhts[i].host.Close() } }() t.Logf("connecting %d dhts in a ring", nDHTs) for i := 0; i < nDHTs; i++ { connect(t, ctx, dhts[i], dhts[(i+1)%len(dhts)]) } <-time.After(100 * time.Millisecond) // bootstrap a few times until we get good tables. stop := make(chan struct{}) go func() { for { t.Logf("bootstrapping them so they find each other", nDHTs) ctxT, _ := context.WithTimeout(ctx, 5*time.Second) bootstrap(t, ctxT, dhts) select { case <-time.After(50 * time.Millisecond): continue // being explicit case <-stop: return } } }() waitForWellFormedTables(t, dhts, 7, 10, 20*time.Second) close(stop) if u.Debug { // the routing tables should be full now. let's inspect them. printRoutingTables(dhts) } }
// TODO simplify this test. get to the _essence_! func TestSendToWantingPeer(t *testing.T) { if testing.Short() { t.SkipNow() } net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) sg := NewTestSessionGenerator(net) defer sg.Close() bg := blocksutil.NewBlockGenerator() prev := rebroadcastDelay.Set(time.Second / 2) defer func() { rebroadcastDelay.Set(prev) }() peers := sg.Instances(2) peerA := peers[0] peerB := peers[1] t.Logf("Session %v\n", peerA.Peer) t.Logf("Session %v\n", peerB.Peer) waitTime := time.Second * 5 alpha := bg.Next() // peerA requests and waits for block alpha ctx, cancel := context.WithTimeout(context.Background(), waitTime) defer cancel() alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []key.Key{alpha.Key()}) if err != nil { t.Fatal(err) } // peerB announces to the network that he has block alpha err = peerB.Exchange.HasBlock(alpha) if err != nil { t.Fatal(err) } // At some point, peerA should get alpha (or timeout) blkrecvd, ok := <-alphaPromise if !ok { t.Fatal("context timed out and broke promise channel!") } if blkrecvd.Key() != alpha.Key() { t.Fatal("Wrong block!") } }
// WithDeadlineFraction returns a Context with a fraction of the // original context's timeout. This is useful in sequential pipelines // of work, where one might try options and fall back to others // depending on the time available, or failure to respond. For example: // // // getPicture returns a picture from our encrypted database // // we have a pipeline of multiple steps. we need to: // // - get the data from a database // // - decrypt it // // - apply many transforms // // // // we **know** that each step takes increasingly more time. // // The transforms are much more expensive than decryption, and // // decryption is more expensive than the database lookup. // // If our database takes too long (i.e. >0.2 of available time), // // there's no use in continuing. // func getPicture(ctx context.Context, key string) ([]byte, error) { // // fractional timeout contexts to the rescue! // // // try the database with 0.2 of remaining time. // ctx1, _ := ctxext.WithDeadlineFraction(ctx, 0.2) // val, err := db.Get(ctx1, key) // if err != nil { // return nil, err // } // // // try decryption with 0.3 of remaining time. // ctx2, _ := ctxext.WithDeadlineFraction(ctx, 0.3) // if val, err = decryptor.Decrypt(ctx2, val); err != nil { // return nil, err // } // // // try transforms with all remaining time. hopefully it's enough! // return transformer.Transform(ctx, val) // } // // func WithDeadlineFraction(ctx context.Context, fraction float64) ( context.Context, context.CancelFunc) { d, found := ctx.Deadline() if !found { // no deadline return context.WithCancel(ctx) } left := d.Sub(time.Now()) if left < 0 { // already passed... return context.WithCancel(ctx) } left = time.Duration(float64(left) * fraction) return context.WithTimeout(ctx, left) }
func PublishEntry(ctx context.Context, r routing.IpfsRouting, ipnskey key.Key, rec *pb.IpnsEntry) error { timectx, cancel := context.WithTimeout(ctx, PublishPutValTimeout) defer cancel() data, err := proto.Marshal(rec) if err != nil { return err } log.Debugf("Storing ipns entry at: %s", ipnskey) // Store ipns entry at "/ipns/"+b58(h(pubkey)) if err := r.PutValue(timectx, ipnskey, data); err != nil { return err } return nil }
func PublishPublicKey(ctx context.Context, r routing.IpfsRouting, k key.Key, pubk ci.PubKey) error { log.Debugf("Storing pubkey at: %s", k) pkbytes, err := pubk.Bytes() if err != nil { return err } // Store associated public key timectx, cancel := context.WithTimeout(ctx, PublishPutValTimeout) defer cancel() err = r.PutValue(timectx, k, pkbytes) if err != nil { return err } return nil }
func TestBlocks(t *testing.T) { bstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) bs := New(bstore, offline.Exchange(bstore)) defer bs.Close() _, err := bs.GetBlock(context.Background(), key.Key("")) if err != ErrNotFound { t.Error("Empty String Key should error", err) } b := blocks.NewBlock([]byte("beep boop")) h := u.Hash([]byte("beep boop")) if !bytes.Equal(b.Multihash(), h) { t.Error("Block Multihash and data multihash not equal") } if b.Key() != key.Key(h) { t.Error("Block key and data multihash key not equal") } k, err := bs.AddBlock(b) if err != nil { t.Error("failed to add block to BlockService", err) return } if k != b.Key() { t.Error("returned key is not equal to block key", err) } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() b2, err := bs.GetBlock(ctx, b.Key()) if err != nil { t.Error("failed to retrieve block from BlockService", err) return } if b.Key() != b2.Key() { t.Error("Block keys not equal.") } if !bytes.Equal(b.Data(), b2.Data()) { t.Error("Block data is not equal.") } }
func (bs *Bitswap) provideWorker(px process.Process) { limit := make(chan struct{}, provideWorkerMax) limitedGoProvide := func(k key.Key, wid int) { defer func() { // replace token when done <-limit }() ev := logging.LoggableMap{"ID": wid} ctx := procctx.OnClosingContext(px) // derive ctx from px defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, &k).Done() ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx defer cancel() if err := bs.network.Provide(ctx, k); err != nil { log.Warning(err) } } // worker spawner, reads from bs.provideKeys until it closes, spawning a // _ratelimited_ number of workers to handle each key. for wid := 2; ; wid++ { ev := logging.LoggableMap{"ID": 1} log.Event(procctx.OnClosingContext(px), "Bitswap.ProvideWorker.Loop", ev) select { case <-px.Closing(): return case k, ok := <-bs.provideKeys: if !ok { log.Debug("provideKeys channel closed") return } select { case <-px.Closing(): return case limit <- struct{}{}: go limitedGoProvide(k, wid) } } } }