Beispiel #1
0
func (r *dhtQueryRunner) spawnWorkers(proc process.Process) {
	for {

		select {
		case <-r.peersRemaining.Done():
			return

		case <-r.proc.Closing():
			return

		case <-r.rateLimit:
			select {
			case p, more := <-r.peersToQuery.DeqChan:
				if !more {
					return // channel closed.
				}

				// do it as a child func to make sure Run exits
				// ONLY AFTER spawn workers has exited.
				proc.Go(func(proc process.Process) {
					r.queryPeer(proc, p)
				})
			case <-r.proc.Closing():
				return
			case <-r.peersRemaining.Done():
				return
			}
		}
	}
}
Beispiel #2
0
func (bs *Bitswap) startWorkers(px process.Process, ctx context.Context) {
	// Start up a worker to handle block requests this node is making
	px.Go(func(px process.Process) {
		bs.providerConnector(ctx)
	})

	// Start up workers to handle requests from other nodes for the data on this node
	for i := 0; i < TaskWorkerCount; i++ {
		i := i
		px.Go(func(px process.Process) {
			bs.taskWorker(ctx, i)
		})
	}

	// Start up a worker to manage periodically resending our wantlist out to peers
	px.Go(func(px process.Process) {
		bs.rebroadcastWorker(ctx)
	})

	// Start up a worker to manage sending out provides messages
	px.Go(func(px process.Process) {
		bs.provideCollector(ctx)
	})

	// Spawn up multiple workers to handle incoming blocks
	// consider increasing number if providing blocks bottlenecks
	// file transfers
	px.Go(bs.provideWorker)
}
Beispiel #3
0
// WithProcessClosing returns a context.Context derived from ctx that
// is cancelled as p is Closing (after: <-p.Closing()). It is simply:
//
//   func WithProcessClosing(ctx context.Context, p goprocess.Process) context.Context {
//     ctx, cancel := context.WithCancel(ctx)
//     go func() {
//       <-p.Closing()
//       cancel()
//     }()
//     return ctx
//   }
//
func WithProcessClosing(ctx context.Context, p goprocess.Process) context.Context {
	ctx, cancel := context.WithCancel(ctx)
	go func() {
		<-p.Closing()
		cancel()
	}()
	return ctx
}
Beispiel #4
0
// CloseAfterContext schedules the process to close after the given
// context is done. It is the equivalent of:
//
//   func CloseAfterContext(p goprocess.Process, ctx context.Context) {
//     go func() {
//       <-ctx.Done()
//       p.Close()
//     }()
//   }
//
func CloseAfterContext(p goprocess.Process, ctx context.Context) {
	if p == nil {
		panic("nil Process")
	}
	if ctx == nil {
		panic("nil Context")
	}

	go func() {
		<-ctx.Done()
		p.Close()
	}()
}
Beispiel #5
0
func (rp *Republisher) Run(proc goprocess.Process) {
	tick := time.NewTicker(rp.Interval)
	defer tick.Stop()

	for {
		select {
		case <-tick.C:
			err := rp.republishEntries(proc)
			if err != nil {
				log.Error("Republisher failed to republish: ", err)
			}
		case <-proc.Closing():
			return
		}
	}
}
Beispiel #6
0
func (d *datastore) runQuery(worker goprocess.Process, qrb *dsq.ResultBuilder) {

	var rnge *util.Range
	if qrb.Query.Prefix != "" {
		rnge = util.BytesPrefix([]byte(qrb.Query.Prefix))
	}
	i := d.DB.NewIterator(rnge, nil)
	defer i.Release()

	// advance iterator for offset
	if qrb.Query.Offset > 0 {
		for j := 0; j < qrb.Query.Offset; j++ {
			i.Next()
		}
	}

	// iterate, and handle limit, too
	for sent := 0; i.Next(); sent++ {
		// end early if we hit the limit
		if qrb.Query.Limit > 0 && sent >= qrb.Query.Limit {
			break
		}

		k := ds.NewKey(string(i.Key())).String()
		e := dsq.Entry{Key: k}

		if !qrb.Query.KeysOnly {
			buf := make([]byte, len(i.Value()))
			copy(buf, i.Value())
			e.Value = buf
		}

		select {
		case qrb.Output <- dsq.Result{Entry: e}: // we sent it out
		case <-worker.Closing(): // client told us to end early.
			break
		}
	}

	if err := i.Error(); err != nil {
		select {
		case qrb.Output <- dsq.Result{Error: err}: // client read our error
		case <-worker.Closing(): // client told us to end.
			return
		}
	}
}
Beispiel #7
0
func (bs *Bitswap) provideWorker(px process.Process) {

	limit := make(chan struct{}, provideWorkerMax)

	limitedGoProvide := func(k key.Key, wid int) {
		defer func() {
			// replace token when done
			<-limit
		}()
		ev := logging.LoggableMap{"ID": wid}

		ctx := procctx.OnClosingContext(px) // derive ctx from px
		defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, &k).Done()

		ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx
		defer cancel()

		if err := bs.network.Provide(ctx, k); err != nil {
			log.Warning(err)
		}
	}

	// worker spawner, reads from bs.provideKeys until it closes, spawning a
	// _ratelimited_ number of workers to handle each key.
	for wid := 2; ; wid++ {
		ev := logging.LoggableMap{"ID": 1}
		log.Event(procctx.OnClosingContext(px), "Bitswap.ProvideWorker.Loop", ev)

		select {
		case <-px.Closing():
			return
		case k, ok := <-bs.provideKeys:
			if !ok {
				log.Debug("provideKeys channel closed")
				return
			}
			select {
			case <-px.Closing():
				return
			case limit <- struct{}{}:
				go limitedGoProvide(k, wid)
			}
		}
	}
}
Beispiel #8
0
// CloseAfterContext schedules the process to close after the given
// context is done. It is the equivalent of:
//
//   func CloseAfterContext(p goprocess.Process, ctx context.Context) {
//     go func() {
//       <-ctx.Done()
//       p.Close()
//     }()
//   }
//
func CloseAfterContext(p goprocess.Process, ctx context.Context) {
	if p == nil {
		panic("nil Process")
	}
	if ctx == nil {
		panic("nil Context")
	}

	// context.Background(). if ctx.Done() is nil, it will never be done.
	// we check for this to avoid wasting a goroutine forever.
	if ctx.Done() == nil {
		return
	}

	go func() {
		<-ctx.Done()
		p.Close()
	}()
}
Beispiel #9
0
func (bs *Bitswap) provideWorker(px process.Process) {

	limiter := ratelimit.NewRateLimiter(px, provideWorkerMax)

	limitedGoProvide := func(k key.Key, wid int) {
		ev := logging.LoggableMap{"ID": wid}
		limiter.LimitedGo(func(px process.Process) {

			ctx := procctx.OnClosingContext(px) // derive ctx from px
			defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, &k).Done()

			ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx
			defer cancel()

			if err := bs.network.Provide(ctx, k); err != nil {
				log.Error(err)
			}
		})
	}

	// worker spawner, reads from bs.provideKeys until it closes, spawning a
	// _ratelimited_ number of workers to handle each key.
	limiter.Go(func(px process.Process) {
		for wid := 2; ; wid++ {
			ev := logging.LoggableMap{"ID": 1}
			log.Event(procctx.OnClosingContext(px), "Bitswap.ProvideWorker.Loop", ev)

			select {
			case <-px.Closing():
				return
			case k, ok := <-bs.provideKeys:
				if !ok {
					log.Debug("provideKeys channel closed")
					return
				}
				limitedGoProvide(k, wid)
			}
		}
	})
}
Beispiel #10
0
func (r *dhtQueryRunner) queryPeer(proc process.Process, p peer.ID) {
	// make sure we rate limit concurrency.
	select {
	case <-r.rateLimit:
	case <-proc.Closing():
		r.peersRemaining.Decrement(1)
		return
	}

	// ok let's do this!

	// create a context from our proc.
	ctx := ctxproc.WithProcessClosing(context.Background(), proc)

	// make sure we do this when we exit
	defer func() {
		// signal we're done proccessing peer p
		r.peersRemaining.Decrement(1)
		r.rateLimit <- struct{}{}
	}()

	// make sure we're connected to the peer.
	// FIXME abstract away into the network layer
	if conns := r.query.dht.host.Network().ConnsToPeer(p); len(conns) == 0 {
		log.Infof("not connected. dialing.")
		// while we dial, we do not take up a rate limit. this is to allow
		// forward progress during potentially very high latency dials.
		r.rateLimit <- struct{}{}

		pi := peer.PeerInfo{ID: p}

		if err := r.query.dht.host.Connect(ctx, pi); err != nil {
			log.Debugf("Error connecting: %s", err)

			notif.PublishQueryEvent(ctx, &notif.QueryEvent{
				Type:  notif.QueryError,
				Extra: err.Error(),
			})

			r.Lock()
			r.errs = append(r.errs, err)
			r.Unlock()
			<-r.rateLimit // need to grab it again, as we deferred.
			return
		}
		<-r.rateLimit // need to grab it again, as we deferred.
		log.Debugf("connected. dial success.")
	}

	// finally, run the query against this peer
	res, err := r.query.qfunc(ctx, p)

	if err != nil {
		log.Debugf("ERROR worker for: %v %v", p, err)
		r.Lock()
		r.errs = append(r.errs, err)
		r.Unlock()

	} else if res.success {
		log.Debugf("SUCCESS worker for: %v %s", p, res)
		r.Lock()
		r.result = res
		r.Unlock()
		go r.proc.Close() // signal to everyone that we're done.
		// must be async, as we're one of the children, and Close blocks.

	} else if len(res.closerPeers) > 0 {
		log.Debugf("PEERS CLOSER -- worker for: %v (%d closer peers)", p, len(res.closerPeers))
		for _, next := range res.closerPeers {
			if next.ID == r.query.dht.self { // dont add self.
				log.Debugf("PEERS CLOSER -- worker for: %v found self", p)
				continue
			}

			// add their addresses to the dialer's peerstore
			r.query.dht.peerstore.AddAddrs(next.ID, next.Addrs, peer.TempAddrTTL)
			r.addPeerToQuery(next.ID)
			log.Debugf("PEERS CLOSER -- worker for: %v added %v (%v)", p, next.ID, next.Addrs)
		}
	} else {
		log.Debugf("QUERY worker for: %v - not found, and no closer peers.", p)
	}
}
Beispiel #11
0
// WaitForContext makes p WaitFor ctx. When Closing, p waits for
// ctx.Done(), before being Closed(). It is simply:
//
//   p.WaitFor(goprocess.WithContext(ctx))
//
func WaitForContext(ctx context.Context, p goprocess.Process) {
	p.WaitFor(WithContext(ctx))
}
Beispiel #12
0
// OnClosedContext derives a context from a given goprocess that will
// be 'Done' when the process is closed
func OnClosedContext(p goprocess.Process) context.Context {
	return &procContext{
		done:  p.Closed(),
		which: closed,
	}
}
Beispiel #13
0
// OnClosingContext derives a context from a given goprocess that will
// be 'Done' when the process is closing
func OnClosingContext(p goprocess.Process) context.Context {
	return &procContext{
		done:  p.Closing(),
		which: closing,
	}
}
Beispiel #14
0
// transport will grab message arrival times, wait until that time, and
// then write the message out when it is scheduled to arrive
func (s *stream) transport(proc process.Process) {
	bufsize := 256
	buf := new(bytes.Buffer)
	ticker := time.NewTicker(time.Millisecond * 4)

	// writeBuf writes the contents of buf through to the s.Writer.
	// done only when arrival time makes sense.
	drainBuf := func() {
		if buf.Len() > 0 {
			_, err := s.Writer.Write(buf.Bytes())
			if err != nil {
				return
			}
			buf.Reset()
		}
	}

	// deliverOrWait is a helper func that processes
	// an incoming packet. it waits until the arrival time,
	// and then writes things out.
	deliverOrWait := func(o *transportObject) {
		buffered := len(o.msg) + buf.Len()

		now := time.Now()
		if now.Before(o.arrivalTime) {
			if buffered < bufsize {
				buf.Write(o.msg)
				return
			}

			// we do not buffer + return here, instead hanging the
			// call (i.e. not accepting any more transportObjects)
			// so that we apply back-pressure to the sender.
			// this sleep should wake up same time as ticker.
			time.Sleep(o.arrivalTime.Sub(now))
		}

		// ok, we waited our due time. now rite the buf + msg.

		// drainBuf first, before we write this message.
		drainBuf()

		// write this message.
		_, err := s.Writer.Write(o.msg)
		if err != nil {
			log.Error("mock_stream", err)
		}
	}

	for {
		select {
		case <-proc.Closing():
			return // bail out of here.

		case o, ok := <-s.toDeliver:
			if !ok {
				return
			}
			deliverOrWait(o)

		case <-ticker.C: // ok, due to write it out.
			drainBuf()
		}
	}
}