// FetchGraph asynchronously fetches all nodes that are children of the given // node, and returns a channel that may be waited upon for the fetch to complete func FetchGraph(ctx context.Context, root *Node, serv DAGService) chan struct{} { var wg sync.WaitGroup done := make(chan struct{}) for _, l := range root.Links { wg.Add(1) go func(lnk *Link) { // Signal child is done on way out defer wg.Done() select { case <-ctx.Done(): return } nd, err := lnk.GetNode(serv) if err != nil { log.Error(err) return } // Wait for children to finish <-FetchGraph(ctx, nd, serv) }(l) } go func() { wg.Wait() done <- struct{}{} }() return done }
// LogError logs the error to the owner of the context. // // If this context was created with ContextWithErrorLog, then this method // passes the error to context creator over an unbuffered channel. // // If this context was created by other means, this method is a no-op. func LogError(ctx context.Context, err error) { v := ctx.Value(errLogKey) errs, ok := v.(privateChanType) if !ok { return } errs <- err }
// GetBlock attempts to retrieve a particular block from peers within the // deadline enforced by the context // // TODO ensure only one active request per key func (bs *bitswap) Block(parent context.Context, k u.Key) (*blocks.Block, error) { log.Debugf("Get Block %v", k) now := time.Now() defer func() { log.Debugf("GetBlock took %f secs", time.Now().Sub(now).Seconds()) }() ctx, cancelFunc := context.WithCancel(parent) defer cancelFunc() bs.wantlist.Add(k) promise := bs.notifications.Subscribe(ctx, k) const maxProviders = 20 peersToQuery := bs.routing.FindProvidersAsync(ctx, k, maxProviders) go func() { message := bsmsg.New() for _, wanted := range bs.wantlist.Keys() { message.AddWanted(wanted) } for peerToQuery := range peersToQuery { log.Debugf("bitswap got peersToQuery: %s", peerToQuery) go func(p peer.Peer) { log.Debugf("bitswap dialing peer: %s", p) err := bs.sender.DialPeer(ctx, p) if err != nil { log.Errorf("Error sender.DialPeer(%s)", p) return } response, err := bs.sender.SendRequest(ctx, p, message) if err != nil { log.Errorf("Error sender.SendRequest(%s) = %s", p, err) return } // FIXME ensure accounting is handled correctly when // communication fails. May require slightly different API to // get better guarantees. May need shared sequence numbers. bs.strategy.MessageSent(p, message) if response == nil { return } bs.ReceiveMessage(ctx, p, response) }(peerToQuery) } }() select { case block := <-promise: bs.wantlist.Remove(k) return &block, nil case <-parent.Done(): return nil, parent.Err() } }
func echo(ctx context.Context, c Conn) { for { select { case <-ctx.Done(): return case m := <-c.In(): c.Out() <- m } } }
func echoListen(ctx context.Context, listener Listener) { for { select { case <-ctx.Done(): return case c := <-listener.Accept(): go echo(ctx, c) } } }
func MetadataFromContext(ctx context.Context) (Metadata, error) { value := ctx.Value(metadataKey) if value != nil { metadata, ok := value.(Metadata) if ok { return metadata, nil } } return nil, errors.New("context contains no metadata") }
func (cq *ChanQueue) process(ctx context.Context) { // construct the channels here to be able to use them bidirectionally enqChan := make(chan peer.Peer, 10) deqChan := make(chan peer.Peer, 10) cq.EnqChan = enqChan cq.DeqChan = deqChan go func() { defer close(deqChan) var next peer.Peer var item peer.Peer var more bool for { if cq.Queue.Len() == 0 { select { case next, more = <-enqChan: if !more { return } case <-ctx.Done(): return } } else { next = cq.Queue.Dequeue() } select { case item, more = <-enqChan: if !more { return } cq.Queue.Enqueue(item) cq.Queue.Enqueue(next) next = nil case deqChan <- next: next = nil case <-ctx.Done(): return } } }() }
func pong(ctx context.Context, swarm *Swarm) { i := 0 for { select { case <-ctx.Done(): return case m1 := <-swarm.Incoming: if bytes.Equal(m1.Data(), []byte("ping")) { m2 := msg.New(m1.Peer(), []byte("pong")) i++ log.Debugf("%s pong %s (%d)", swarm.local, m1.Peer(), i) swarm.Outgoing <- m2 } } } }
func (mr *MockRouter) FindProvidersAsync(ctx context.Context, k u.Key, max int) <-chan peer.Peer { out := make(chan peer.Peer) go func() { defer close(out) for i, p := range mr.hashTable.Providers(k) { if max <= i { return } select { case out <- p: case <-ctx.Done(): return } } }() return out }
// Handshake3 exchanges local and remote service information func Handshake3(ctx context.Context, c Conn) (*handshake.Handshake3Result, error) { rpeer := c.RemotePeer() lpeer := c.LocalPeer() // setup + send the message to remote var remoteH, localH *hspb.Handshake3 localH = handshake.Handshake3Msg(lpeer, c.RemoteMultiaddr()) localB, err := proto.Marshal(localH) if err != nil { return nil, err } c.Out() <- localB log.Debugf("Handshake1: sent to %s", rpeer) // wait + listen for response select { case <-ctx.Done(): return nil, ctx.Err() case <-c.Closing(): return nil, errors.New("Handshake3: error remote connection closed") case remoteB, ok := <-c.In(): if !ok { return nil, fmt.Errorf("Handshake3 error receiving from conn: %v", rpeer) } remoteH = new(hspb.Handshake3) err = proto.Unmarshal(remoteB, remoteH) if err != nil { return nil, fmt.Errorf("Handshake3 could not decode remote msg: %q", err) } log.Debugf("Handshake3 received from %s", rpeer) } // actually update our state based on the new knowledge res, err := handshake.Handshake3Update(lpeer, rpeer, remoteH) if err != nil { log.Errorf("Handshake3 failed to update %s", rpeer) } res.RemoteObservedAddress = c.RemoteMultiaddr() return res, nil }
// Subscribe returns a one-time use |blockChannel|. |blockChannel| returns nil // if the |ctx| times out or is cancelled. Then channel is closed after the // block given by |k| is sent. func (ps *impl) Subscribe(ctx context.Context, k u.Key) <-chan blocks.Block { topic := string(k) subChan := ps.wrapped.SubOnce(topic) blockChannel := make(chan blocks.Block, 1) // buffered so the sender doesn't wait on receiver go func() { defer close(blockChannel) select { case val := <-subChan: block, ok := val.(blocks.Block) if ok { blockChannel <- block } case <-ctx.Done(): ps.wrapped.Unsub(subChan, topic) } }() return blockChannel }
// Handshake1 exchanges local and remote versions and compares them // closes remote and returns an error in case of major difference func Handshake1(ctx context.Context, c Conn) error { rpeer := c.RemotePeer() lpeer := c.LocalPeer() var remoteH, localH *hspb.Handshake1 localH = handshake.Handshake1Msg() myVerBytes, err := proto.Marshal(localH) if err != nil { return err } c.Out() <- myVerBytes log.Debugf("Sent my version (%s) to %s", localH, rpeer) select { case <-ctx.Done(): return ctx.Err() case <-c.Closing(): return errors.New("remote closed connection during version exchange") case data, ok := <-c.In(): if !ok { return fmt.Errorf("error retrieving from conn: %v", rpeer) } remoteH = new(hspb.Handshake1) err = proto.Unmarshal(data, remoteH) if err != nil { return fmt.Errorf("could not decode remote version: %q", err) } log.Debugf("Received remote version (%s) from %s", remoteH, rpeer) } if err := handshake.Handshake1Compatible(localH, remoteH); err != nil { log.Infof("%s (%s) incompatible version with %s (%s)", lpeer, localH, rpeer, remoteH) return err } log.Debugf("%s version handshake compatible %s", lpeer, rpeer) return nil }
// sendMessage sends a message out (actual leg work. SendMessage is to export w/o rid) func (s *service) sendMessage(ctx context.Context, m msg.NetMessage, rid RequestID) error { // serialize ServiceMessage wrapper data, err := wrapData(m.Data(), rid) if err != nil { return err } // log.Debug("Service send message [to = %s]", m.Peer()) // send message m2 := msg.New(m.Peer(), data) select { case s.Outgoing <- m2: case <-ctx.Done(): return ctx.Err() } return nil }
func (f *fauxSender) SendRequest(ctx context.Context, m msg.NetMessage) (msg.NetMessage, error) { f.Lock() handlers := make([]mesHandleFunc, len(f.handlers)) copy(handlers, f.handlers) f.Unlock() for _, h := range handlers { reply := h(m) if reply != nil { return reply, nil } } // no reply? ok force a timeout select { case <-ctx.Done(): } return nil, ctx.Err() }
// New initializes a BitSwap instance that communicates over the // provided BitSwapNetwork. This function registers the returned instance as // the network delegate. // Runs until context is cancelled func New(ctx context.Context, p peer.Peer, network bsnet.BitSwapNetwork, routing bsnet.Routing, d ds.ThreadSafeDatastore, nice bool) exchange.Interface { notif := notifications.New() go func() { <-ctx.Done() notif.Shutdown() }() bs := &bitswap{ blockstore: blockstore.NewBlockstore(d), notifications: notif, strategy: strategy.New(nice), routing: routing, sender: network, wantlist: u.NewKeySet(), } network.SetDelegate(bs) return bs }
func ContextDo(ctx context.Context, f func() error) error { ch := make(chan error) go func() { select { case <-ctx.Done(): case ch <- f(): } }() select { case <-ctx.Done(): return ctx.Err() case val := <-ch: return val } return nil }
// SendRequest sends a request message out and awaits a response. func (s *service) SendRequest(ctx context.Context, m msg.NetMessage) (msg.NetMessage, error) { // check if we should bail given our contexts select { default: case <-s.Closing(): return nil, fmt.Errorf("service closed: %s", s.Context().Err()) case <-ctx.Done(): return nil, ctx.Err() } // create a request r, err := NewRequest(m.Peer().ID()) if err != nil { return nil, err } // register Request s.RequestsLock.Lock() s.Requests[r.Key()] = r s.RequestsLock.Unlock() // defer deleting this request defer func() { s.RequestsLock.Lock() delete(s.Requests, r.Key()) s.RequestsLock.Unlock() }() // check if we should bail after waiting for mutex select { default: case <-s.Closing(): return nil, fmt.Errorf("service closed: %s", s.Context().Err()) case <-ctx.Done(): return nil, ctx.Err() } // Send message s.sendMessage(ctx, m, r.ID) // wait for response m = nil err = nil select { case m = <-r.Response: case <-s.Closed(): err = fmt.Errorf("service closed: %s", s.Context().Err()) case <-ctx.Done(): err = ctx.Err() } if m == nil { return nil, ErrNoResponse } return m, err }