func (bs *Bitswap) provideWorker(px process.Process) { limiter := ratelimit.NewRateLimiter(px, provideWorkerMax) limitedGoProvide := func(k key.Key, wid int) { ev := eventlog.LoggableMap{"ID": wid} limiter.LimitedGo(func(px process.Process) { ctx := waitable.Context(px) // derive ctx from px defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, &k).Done() ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx defer cancel() if err := bs.network.Provide(ctx, k); err != nil { log.Error(err) } }) } // worker spawner, reads from bs.provideKeys until it closes, spawning a // _ratelimited_ number of workers to handle each key. limiter.Go(func(px process.Process) { for wid := 2; ; wid++ { ev := eventlog.LoggableMap{"ID": 1} log.Event(waitable.Context(px), "Bitswap.ProvideWorker.Loop", ev) select { case <-px.Closing(): return case k, ok := <-bs.provideKeys: if !ok { log.Debug("provideKeys channel closed") return } limitedGoProvide(k, wid) } } }) }
func (w *Worker) start(c Config) { workerChan := make(chan *blocks.Block, c.WorkerBufferSize) // clientWorker handles incoming blocks from |w.added| and sends to // |workerChan|. This will never block the client. w.process.Go(func(proc process.Process) { defer close(workerChan) var workQueue BlockList debugInfo := time.NewTicker(5 * time.Second) defer debugInfo.Stop() for { // take advantage of the fact that sending on nil channel always // blocks so that a message is only sent if a block exists sendToWorker := workerChan nextBlock := workQueue.Pop() if nextBlock == nil { sendToWorker = nil } select { // if worker is ready and there's a block to process, send the // block case sendToWorker <- nextBlock: case <-debugInfo.C: if workQueue.Len() > 0 { log.Debugf("%d blocks in blockservice provide queue...", workQueue.Len()) } case block := <-w.added: if nextBlock != nil { workQueue.Push(nextBlock) // missed the chance to send it } // if the client sends another block, add it to the queue. workQueue.Push(block) case <-proc.Closing(): return } } }) // reads from |workerChan| until w.process closes limiter := ratelimit.NewRateLimiter(w.process, c.NumWorkers) limiter.Go(func(proc process.Process) { ctx := waitable.Context(proc) // shut down in-progress HasBlock when time to die for { select { case <-proc.Closing(): return case block, ok := <-workerChan: if !ok { return } limiter.LimitedGo(func(proc process.Process) { if err := w.exchange.HasBlock(ctx, block); err != nil { log.Infof("blockservice worker error: %s", err) } }) } } }) }