コード例 #1
0
ファイル: workers.go プロジェクト: JeffreyRodriguez/go-ipfs
func (bs *Bitswap) provideCollector(ctx context.Context) {
	defer close(bs.provideKeys)
	var toProvide []key.Key
	var nextKey key.Key
	var keysOut chan key.Key

	for {
		select {
		case blk, ok := <-bs.newBlocks:
			if !ok {
				log.Debug("newBlocks channel closed")
				return
			}
			if keysOut == nil {
				nextKey = blk.Key()
				keysOut = bs.provideKeys
			} else {
				toProvide = append(toProvide, blk.Key())
			}
		case keysOut <- nextKey:
			if len(toProvide) > 0 {
				nextKey = toProvide[0]
				toProvide = toProvide[1:]
			} else {
				keysOut = nil
			}
		case <-ctx.Done():
			return
		}
	}
}
コード例 #2
0
ファイル: ipfs_impl.go プロジェクト: avbalu/go-ipfs
// FindProvidersAsync returns a channel of providers for the given key
func (bsnet *impl) FindProvidersAsync(ctx context.Context, k key.Key, max int) <-chan peer.ID {

	// Since routing queries are expensive, give bitswap the peers to which we
	// have open connections. Note that this may cause issues if bitswap starts
	// precisely tracking which peers provide certain keys. This optimization
	// would be misleading. In the long run, this may not be the most
	// appropriate place for this optimization, but it won't cause any harm in
	// the short term.
	connectedPeers := bsnet.host.Network().Peers()
	out := make(chan peer.ID, len(connectedPeers)) // just enough buffer for these connectedPeers
	for _, id := range connectedPeers {
		if id == bsnet.host.ID() {
			continue // ignore self as provider
		}
		out <- id
	}

	go func() {
		defer close(out)
		providers := bsnet.routing.FindProvidersAsync(ctx, k, max)
		for info := range providers {
			if info.ID == bsnet.host.ID() {
				continue // ignore self as provider
			}
			bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peer.TempAddrTTL)
			select {
			case <-ctx.Done():
				return
			case out <- info.ID:
			}
		}
	}()
	return out
}
コード例 #3
0
// Subscribe returns a channel of blocks for the given |keys|. |blockChannel|
// is closed if the |ctx| times out or is cancelled, or after sending len(keys)
// blocks.
func (ps *impl) Subscribe(ctx context.Context, keys ...key.Key) <-chan *blocks.Block {

	blocksCh := make(chan *blocks.Block, len(keys))
	valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking
	if len(keys) == 0 {
		close(blocksCh)
		return blocksCh
	}
	ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...)
	go func() {
		defer close(blocksCh)
		defer ps.wrapped.Unsub(valuesCh) // with a len(keys) buffer, this is an optimization
		for {
			select {
			case <-ctx.Done():
				return
			case val, ok := <-valuesCh:
				if !ok {
					return
				}
				block, ok := val.(*blocks.Block)
				if !ok {
					return
				}
				select {
				case <-ctx.Done():
					return
				case blocksCh <- block: // continue
				}
			}
		}
	}()

	return blocksCh
}
コード例 #4
0
ファイル: workers.go プロジェクト: JeffreyRodriguez/go-ipfs
func (bs *Bitswap) taskWorker(ctx context.Context, id int) {
	idmap := logging.LoggableMap{"ID": id}
	defer log.Info("bitswap task worker shutting down...")
	for {
		log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap)
		select {
		case nextEnvelope := <-bs.engine.Outbox():
			select {
			case envelope, ok := <-nextEnvelope:
				if !ok {
					continue
				}
				log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableMap{
					"ID":     id,
					"Target": envelope.Peer.Pretty(),
					"Block":  envelope.Block.Multihash.B58String(),
				})

				bs.wm.SendBlock(ctx, envelope)
			case <-ctx.Done():
				return
			}
		case <-ctx.Done():
			return
		}
	}
}
コード例 #5
0
ファイル: bitswap.go プロジェクト: BlockchainOS/go-ipfs
// GetBlocks returns a channel where the caller may receive blocks that
// correspond to the provided |keys|. Returns an error if BitSwap is unable to
// begin this request within the deadline enforced by the context.
//
// NB: Your request remains open until the context expires. To conserve
// resources, provide a context with a reasonably short deadline (ie. not one
// that lasts throughout the lifetime of the server)
func (bs *Bitswap) GetBlocks(ctx context.Context, keys []key.Key) (<-chan *blocks.Block, error) {
	select {
	case <-bs.process.Closing():
		return nil, errors.New("bitswap is closed")
	default:
	}
	promise := bs.notifications.Subscribe(ctx, keys...)

	for _, k := range keys {
		log.Event(ctx, "Bitswap.GetBlockRequest.Start", &k)
	}

	bs.wm.WantBlocks(keys)

	req := &blockRequest{
		keys: keys,
		ctx:  ctx,
	}
	select {
	case bs.findKeys <- req:
		return promise, nil
	case <-ctx.Done():
		return nil, ctx.Err()
	}
}
コード例 #6
0
ファイル: ping.go プロジェクト: thomas-gardner/go-ipfs
func pingPeer(ctx context.Context, n *core.IpfsNode, pid peer.ID, numPings int) <-chan interface{} {
	outChan := make(chan interface{})
	go func() {
		defer close(outChan)

		if len(n.Peerstore.Addrs(pid)) == 0 {
			// Make sure we can find the node in question
			outChan <- &PingResult{
				Text: fmt.Sprintf("Looking up peer %s", pid.Pretty()),
			}

			ctx, cancel := context.WithTimeout(ctx, kPingTimeout)
			defer cancel()
			p, err := n.Routing.FindPeer(ctx, pid)
			if err != nil {
				outChan <- &PingResult{Text: fmt.Sprintf("Peer lookup error: %s", err)}
				return
			}
			n.Peerstore.AddAddrs(p.ID, p.Addrs, peer.TempAddrTTL)
		}

		outChan <- &PingResult{Text: fmt.Sprintf("PING %s.", pid.Pretty())}

		ctx, cancel := context.WithTimeout(ctx, kPingTimeout*time.Duration(numPings))
		defer cancel()
		pings, err := n.Ping.Ping(ctx, pid)
		if err != nil {
			log.Debugf("Ping error: %s", err)
			outChan <- &PingResult{Text: fmt.Sprintf("Ping error: %s", err)}
			return
		}

		var done bool
		var total time.Duration
		for i := 0; i < numPings && !done; i++ {
			select {
			case <-ctx.Done():
				done = true
				break
			case t, ok := <-pings:
				if !ok {
					done = true
					break
				}

				outChan <- &PingResult{
					Success: true,
					Time:    t,
				}
				total += t
				time.Sleep(time.Second)
			}
		}
		averagems := total.Seconds() * 1000 / float64(numPings)
		outChan <- &PingResult{
			Text: fmt.Sprintf("Average latency: %.2fms", averagems),
		}
	}()
	return outChan
}
コード例 #7
0
ファイル: sync.go プロジェクト: avbalu/go-ipfs
func (cq *ChanQueue) process(ctx context.Context) {
	// construct the channels here to be able to use them bidirectionally
	enqChan := make(chan peer.ID)
	deqChan := make(chan peer.ID)

	cq.EnqChan = enqChan
	cq.DeqChan = deqChan

	go func() {
		log.Debug("processing")
		defer log.Debug("closed")
		defer close(deqChan)

		var next peer.ID
		var item peer.ID
		var more bool

		for {
			if cq.Queue.Len() == 0 {
				// log.Debug("wait for enqueue")
				select {
				case next, more = <-enqChan:
					if !more {
						return
					}
					// log.Debug("got", next)

				case <-ctx.Done():
					return
				}

			} else {
				next = cq.Queue.Dequeue()
				// log.Debug("peek", next)
			}

			select {
			case item, more = <-enqChan:
				if !more {
					if cq.Queue.Len() > 0 {
						return // we're done done.
					}
					enqChan = nil // closed, so no use.
				}
				// log.Debug("got", item)
				cq.Queue.Enqueue(item)
				cq.Queue.Enqueue(next) // order may have changed.
				next = ""

			case deqChan <- next:
				// log.Debug("dequeued", next)
				next = ""

			case <-ctx.Done():
				return
			}
		}

	}()
}
コード例 #8
0
ファイル: merkledag.go プロジェクト: andradeandrey/go-ipfs
// FetchGraph asynchronously fetches all nodes that are children of the given
// node, and returns a channel that may be waited upon for the fetch to complete
func FetchGraph(ctx context.Context, root *Node, serv DAGService) chan struct{} {
	log.Warning("Untested.")
	var wg sync.WaitGroup
	done := make(chan struct{})

	for _, l := range root.Links {
		wg.Add(1)
		go func(lnk *Link) {

			// Signal child is done on way out
			defer wg.Done()
			select {
			case <-ctx.Done():
				return
			}

			nd, err := lnk.GetNode(ctx, serv)
			if err != nil {
				log.Debug(err)
				return
			}

			// Wait for children to finish
			<-FetchGraph(ctx, nd, serv)
		}(l)
	}

	go func() {
		wg.Wait()
		done <- struct{}{}
	}()

	return done
}
コード例 #9
0
ファイル: system.go プロジェクト: JeffreyRodriguez/go-ipfs
// Run is the main republisher loop
func (np *Republisher) Run(ctx context.Context) {
	for {
		select {
		case <-np.Publish:
			quick := time.After(np.TimeoutShort)
			longer := time.After(np.TimeoutLong)

		wait:
			select {
			case <-ctx.Done():
				return
			case <-np.Publish:
				quick = time.After(np.TimeoutShort)
				goto wait
			case <-quick:
			case <-longer:
			}

			log.Info("Publishing Changes!")
			err := np.root.Publish(ctx)
			if err != nil {
				log.Error("republishRoot error: %s", err)
			}

		case <-ctx.Done():
			return
		}
	}
}
コード例 #10
0
ファイル: gc.go プロジェクト: nham/go-ipfs
func GarbageCollectAsync(n *core.IpfsNode, ctx context.Context) (<-chan *KeyRemoved, error) {

	keychan, err := n.Blockstore.AllKeysChan(ctx)
	if err != nil {
		return nil, err
	}

	output := make(chan *KeyRemoved)
	go func() {
		defer close(output)
		for {
			select {
			case k, ok := <-keychan:
				if !ok {
					return
				}
				if !n.Pinning.IsPinned(k) {
					err := n.Blockstore.DeleteBlock(k)
					if err != nil {
						log.Debugf("Error removing key from blockstore: %s", err)
						continue
					}
					select {
					case output <- &KeyRemoved{k}:
					case <-ctx.Done():
					}
				}
			case <-ctx.Done():
				return
			}
		}
	}()
	return output, nil
}
コード例 #11
0
ファイル: merkledag.go プロジェクト: noffle/go-ipfs
func fetchNodes(ctx context.Context, ds DAGService, in <-chan []key.Key, out chan<- *Node, errs chan<- error) {
	defer close(out)

	get := func(g NodeGetter) {
		nd, err := g.Get(ctx)
		if err != nil {
			select {
			case errs <- err:
			case <-ctx.Done():
			}
			return
		}

		select {
		case out <- nd:
		case <-ctx.Done():
			return
		}
	}

	for ks := range in {
		ng := ds.GetNodes(ctx, ks)
		for _, g := range ng {
			go get(g)
		}
	}
}
コード例 #12
0
ファイル: ping.go プロジェクト: heems/go-ipfs
func (ps *PingService) Ping(ctx context.Context, p peer.ID) (<-chan time.Duration, error) {
	s, err := ps.Host.NewStream(ID, p)
	if err != nil {
		return nil, err
	}

	out := make(chan time.Duration)
	go func() {
		defer close(out)
		for {
			select {
			case <-ctx.Done():
				return
			default:
				t, err := ping(s)
				if err != nil {
					log.Debugf("ping error: %s", err)
					return
				}

				select {
				case out <- t:
				case <-ctx.Done():
					return
				}
			}
		}
	}()

	return out, nil
}
コード例 #13
0
ファイル: offline.go プロジェクト: avbalu/go-ipfs
func (e *offlineExchange) GetBlocks(ctx context.Context, ks []key.Key) (<-chan *blocks.Block, error) {
	out := make(chan *blocks.Block, 0)
	go func() {
		defer close(out)
		var misses []key.Key
		for _, k := range ks {
			hit, err := e.bs.Get(k)
			if err != nil {
				misses = append(misses, k)
				// a long line of misses should abort when context is cancelled.
				select {
				// TODO case send misses down channel
				case <-ctx.Done():
					return
				default:
					continue
				}
			}
			select {
			case out <- hit:
			case <-ctx.Done():
				return
			}
		}
	}()
	return out, nil
}
コード例 #14
0
ファイル: blockservice.go プロジェクト: noscripter/go-ipfs
// GetBlocks gets a list of blocks asynchronously and returns through
// the returned channel.
// NB: No guarantees are made about order.
func (s *BlockService) GetBlocks(ctx context.Context, ks []key.Key) <-chan *blocks.Block {
	out := make(chan *blocks.Block, 0)
	go func() {
		defer close(out)
		var misses []key.Key
		for _, k := range ks {
			hit, err := s.Blockstore.Get(k)
			if err != nil {
				misses = append(misses, k)
				continue
			}
			log.Debug("Blockservice: Got data in datastore.")
			select {
			case out <- hit:
			case <-ctx.Done():
				return
			}
		}

		rblocks, err := s.Exchange.GetBlocks(ctx, misses)
		if err != nil {
			log.Debugf("Error with GetBlocks: %s", err)
			return
		}

		for b := range rblocks {
			select {
			case out <- b:
			case <-ctx.Done():
				return
			}
		}
	}()
	return out
}
コード例 #15
0
ファイル: workers.go プロジェクト: JeffreyRodriguez/go-ipfs
func (bs *Bitswap) rebroadcastWorker(parent context.Context) {
	ctx, cancel := context.WithCancel(parent)
	defer cancel()

	broadcastSignal := time.NewTicker(rebroadcastDelay.Get())
	defer broadcastSignal.Stop()

	tick := time.NewTicker(10 * time.Second)
	defer tick.Stop()

	for {
		log.Event(ctx, "Bitswap.Rebroadcast.idle")
		select {
		case <-tick.C:
			n := bs.wm.wl.Len()
			if n > 0 {
				log.Debug(n, "keys in bitswap wantlist")
			}
		case <-broadcastSignal.C: // resend unfulfilled wantlist keys
			log.Event(ctx, "Bitswap.Rebroadcast.active")
			entries := bs.wm.wl.Entries()
			if len(entries) > 0 {
				bs.connectToProviders(ctx, entries)
			}
		case <-parent.Done():
			return
		}
	}
}
コード例 #16
0
ファイル: workers.go プロジェクト: JeffreyRodriguez/go-ipfs
// connects to providers for the given keys
func (bs *Bitswap) providerConnector(parent context.Context) {
	defer log.Info("bitswap client worker shutting down...")

	for {
		log.Event(parent, "Bitswap.ProviderConnector.Loop")
		select {
		case req := <-bs.findKeys:
			keys := req.keys
			if len(keys) == 0 {
				log.Warning("Received batch request for zero blocks")
				continue
			}
			log.Event(parent, "Bitswap.ProviderConnector.Work", logging.LoggableMap{"Keys": keys})

			// NB: Optimization. Assumes that providers of key[0] are likely to
			// be able to provide for all keys. This currently holds true in most
			// every situation. Later, this assumption may not hold as true.
			child, cancel := context.WithTimeout(req.ctx, providerRequestTimeout)
			providers := bs.network.FindProvidersAsync(child, keys[0], maxProvidersPerRequest)
			for p := range providers {
				go bs.network.ConnectTo(req.ctx, p)
			}
			cancel()

		case <-parent.Done():
			return
		}
	}
}
コード例 #17
0
ファイル: serve_test.go プロジェクト: andradeandrey/go-ipfs
func (it *interrupt) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
	select {
	case it.hanging <- struct{}{}:
	default:
	}
	<-ctx.Done()
	return fuse.EINTR
}
コード例 #18
0
ファイル: context.go プロジェクト: andradeandrey/go-ipfs
// LogError logs the error to the owner of the context.
//
// If this context was created with ContextWithErrorLog, then this method
// passes the error to context creator over an unbuffered channel.
//
// If this context was created by other means, this method is a no-op.
func LogError(ctx context.Context, err error) {
	v := ctx.Value(errLogKey)
	errs, ok := v.(privateChanType)
	if !ok {
		return
	}
	errs <- err
}
コード例 #19
0
ファイル: publisher.go プロジェクト: andradeandrey/go-ipfs
// setting the TTL on published records is an experimental feature.
// as such, i'm using the context to wire it through to avoid changing too
// much code along the way.
func checkCtxTTL(ctx context.Context) (time.Duration, bool) {
	v := ctx.Value("ipns-publish-ttl")
	if v == nil {
		return 0, false
	}

	d, ok := v.(time.Duration)
	return d, ok
}
コード例 #20
0
ファイル: swarm_dial.go プロジェクト: avbalu/go-ipfs
// gatedDialAttempt is an attempt to dial a node. It is gated by the swarm's
// dial synchronization systems: dialsync and dialbackoff.
func (s *Swarm) gatedDialAttempt(ctx context.Context, p peer.ID) (*Conn, error) {
	var logdial = lgbl.Dial("swarm", s.LocalPeer(), p, nil, nil)
	defer log.EventBegin(ctx, "swarmDialAttemptSync", logdial).Done()

	// check if we already have an open connection first
	conn := s.bestConnectionToPeer(p)
	if conn != nil {
		return conn, nil
	}

	// check if there's an ongoing dial to this peer
	if ok, wait := s.dsync.Lock(p); ok {
		// ok, we have been charged to dial! let's do it.
		// if it succeeds, dial will add the conn to the swarm itself.

		defer log.EventBegin(ctx, "swarmDialAttemptStart", logdial).Done()
		ctxT, cancel := context.WithTimeout(ctx, s.dialT)
		conn, err := s.dial(ctxT, p)
		cancel()
		s.dsync.Unlock(p)
		log.Debugf("dial end %s", conn)
		if err != nil {
			log.Event(ctx, "swarmDialBackoffAdd", logdial)
			s.backf.AddBackoff(p) // let others know to backoff

			// ok, we failed. try again. (if loop is done, our error is output)
			return nil, fmt.Errorf("dial attempt failed: %s", err)
		}
		log.Event(ctx, "swarmDialBackoffClear", logdial)
		s.backf.Clear(p) // okay, no longer need to backoff
		return conn, nil

	} else {
		// we did not dial. we must wait for someone else to dial.

		// check whether we should backoff first...
		if s.backf.Backoff(p) {
			log.Event(ctx, "swarmDialBackoff", logdial)
			return nil, ErrDialBackoff
		}

		defer log.EventBegin(ctx, "swarmDialWait", logdial).Done()
		select {
		case <-wait: // wait for that other dial to finish.

			// see if it worked, OR we got an incoming dial in the meantime...
			conn := s.bestConnectionToPeer(p)
			if conn != nil {
				return conn, nil
			}
			return nil, ErrDialFailed
		case <-ctx.Done(): // or we may have to bail...
			return nil, ctx.Err()
		}
	}
}
コード例 #21
0
ファイル: context.go プロジェクト: avbalu/go-ipfs
func MetadataFromContext(ctx context.Context) (Metadata, error) {
	value := ctx.Value(metadataKey)
	if value != nil {
		metadata, ok := value.(Metadata)
		if ok {
			return metadata, nil
		}
	}
	return nil, errors.New("context contains no metadata")
}
コード例 #22
0
ファイル: providers.go プロジェクト: andradeandrey/go-ipfs
func (pm *ProviderManager) AddProvider(ctx context.Context, k key.Key, val peer.ID) {
	prov := &addProv{
		k:   k,
		val: val,
	}
	select {
	case pm.newprovs <- prov:
	case <-ctx.Done():
	}
}
コード例 #23
0
ファイル: diag.go プロジェクト: rdterner/go-ipfs
func (d *Diagnostics) getDiagnosticFromPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (<-chan *DiagInfo, error) {
	s, err := d.host.NewStream(ProtocolDiag, p)
	if err != nil {
		return nil, err
	}

	cr := ctxio.NewReader(ctx, s) // ok to use. we defer close stream in this func
	cw := ctxio.NewWriter(ctx, s) // ok to use. we defer close stream in this func
	r := ggio.NewDelimitedReader(cr, inet.MessageSizeMax)
	w := ggio.NewDelimitedWriter(cw)

	start := time.Now()

	if err := w.WriteMsg(pmes); err != nil {
		return nil, err
	}

	out := make(chan *DiagInfo)
	go func() {

		defer func() {
			close(out)
			s.Close()
			rtt := time.Since(start)
			log.Infof("diagnostic request took: %s", rtt.String())
		}()

		for {
			rpmes := new(pb.Message)
			if err := r.ReadMsg(rpmes); err != nil {
				log.Debugf("Error reading diagnostic from stream: %s", err)
				return
			}
			if rpmes == nil {
				log.Debug("Got no response back from diag request.")
				return
			}

			di, err := decodeDiagJson(rpmes.GetData())
			if err != nil {
				log.Debug(err)
				return
			}

			select {
			case out <- di:
			case <-ctx.Done():
				return
			}
		}

	}()

	return out, nil
}
コード例 #24
0
ファイル: context.go プロジェクト: heems/go-ipfs
// WithContextAndTeardown is a helper function to set teardown at initiation
// of WithContext
func WithContextAndTeardown(ctx context.Context, tf goprocess.TeardownFunc) goprocess.Process {
	if ctx == nil {
		panic("nil Context")
	}
	p := goprocess.WithTeardown(tf)
	go func() {
		<-ctx.Done()
		p.Close()
	}()
	return p
}
コード例 #25
0
ファイル: ipns_unix.go プロジェクト: rdterner/go-ipfs
// Fsync flushes the content in the file to disk, but does not
// update the dag tree internally
func (fi *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
	errs := make(chan error, 1)
	go func() {
		errs <- fi.fi.Sync()
	}()
	select {
	case err := <-errs:
		return err
	case <-ctx.Done():
		return ctx.Err()
	}
}
コード例 #26
0
ファイル: wantmanager.go プロジェクト: andradeandrey/go-ipfs
func (mq *msgQueue) runQueue(ctx context.Context) {
	for {
		select {
		case <-mq.work: // there is work to be done
			mq.doWork(ctx)
		case <-mq.done:
			return
		case <-ctx.Done():
			return
		}
	}
}
コード例 #27
0
ファイル: context.go プロジェクト: avbalu/go-ipfs
// WithContext constructs and returns a Process that respects
// given context. It is the equivalent of:
//
//   func ProcessWithContext(ctx context.Context) goprocess.Process {
//     p := goprocess.WithParent(goprocess.Background())
//     go func() {
//       <-ctx.Done()
//       p.Close()
//     }()
//     return p
//   }
//
func WithContext(ctx context.Context) goprocess.Process {
	if ctx == nil {
		panic("nil Context")
	}

	p := goprocess.WithParent(goprocess.Background())
	go func() {
		<-ctx.Done()
		p.Close()
	}()
	return p
}
コード例 #28
0
ファイル: context.go プロジェクト: heems/go-ipfs
// CloseAfterContext schedules the process to close after the given
// context is done. It is the equivalent of:
//
//   func CloseAfterContext(p goprocess.Process, ctx context.Context) {
//     go func() {
//       <-ctx.Done()
//       p.Close()
//     }()
//   }
//
func CloseAfterContext(p goprocess.Process, ctx context.Context) {
	if p == nil {
		panic("nil Process")
	}
	if ctx == nil {
		panic("nil Context")
	}

	go func() {
		<-ctx.Done()
		p.Close()
	}()
}
コード例 #29
0
ファイル: query.go プロジェクト: heems/go-ipfs
// Run runs the query at hand. pass in a list of peers to use first.
func (q *dhtQuery) Run(ctx context.Context, peers []peer.ID) (*dhtQueryResult, error) {
	select {
	case <-ctx.Done():
		return nil, ctx.Err()
	default:
	}

	ctx, cancel := context.WithCancel(ctx)
	defer cancel()

	runner := newQueryRunner(q)
	return runner.Run(ctx, peers)
}
コード例 #30
0
ファイル: fracctx.go プロジェクト: avbalu/go-ipfs
func WithDeadlineFraction(ctx context.Context, fraction float64) (context.Context, context.CancelFunc) {
	d, found := ctx.Deadline()
	if !found { // no deadline
		return context.WithCancel(ctx)
	}

	left := d.Sub(time.Now())
	if left < 0 { // already passed...
		return context.WithCancel(ctx)
	}

	left = time.Duration(float64(left) * fraction)
	return context.WithTimeout(ctx, left)
}