Esempio n. 1
0
// Subscribe returns a channel of blocks for the given |keys|. |blockChannel|
// is closed if the |ctx| times out or is cancelled, or after sending len(keys)
// blocks.
func (ps *impl) Subscribe(ctx context.Context, keys ...*cid.Cid) <-chan blocks.Block {

	blocksCh := make(chan blocks.Block, len(keys))
	valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking
	if len(keys) == 0 {
		close(blocksCh)
		return blocksCh
	}
	ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...)
	go func() {
		defer close(blocksCh)
		defer ps.wrapped.Unsub(valuesCh) // with a len(keys) buffer, this is an optimization
		for {
			select {
			case <-ctx.Done():
				return
			case val, ok := <-valuesCh:
				if !ok {
					return
				}
				block, ok := val.(blocks.Block)
				if !ok {
					return
				}
				select {
				case <-ctx.Done():
					return
				case blocksCh <- block: // continue
				}
			}
		}
	}()

	return blocksCh
}
Esempio n. 2
0
File: scanner.go Progetto: kego/ke
// ScanFiles takes a chanel of files
func ScanFilesToBytes(ctx context.Context, in chan File) chan Content {

	out := make(chan Content)

	go func() {

		defer close(out)

		for {
			select {
			case value, open := <-in:
				if !open {
					return
				}
				if value.Err != nil {
					out <- Content{"", nil, kerr.Wrap("PQUCOUYLJE", value.Err)}
					return
				}
				bytes, err := ProcessFile(value.File)
				// process returns Bytes == nil for non json files, so we should skip them
				if bytes != nil || err != nil {
					out <- Content{value.File, bytes, err}
				}
			case <-ctx.Done():
				out <- Content{"", nil, kerr.Wrap("AFBJCTFOKX", ctx.Err())}
				return
			}
		}

	}()

	return out

}
Esempio n. 3
0
func (ps *PingService) Ping(ctx context.Context, p peer.ID) (<-chan time.Duration, error) {
	s, err := ps.Host.NewStream(ctx, p, ID)
	if err != nil {
		return nil, err
	}

	out := make(chan time.Duration)
	go func() {
		defer close(out)
		defer s.Close()
		for {
			select {
			case <-ctx.Done():
				return
			default:
				t, err := ping(s)
				if err != nil {
					log.Debugf("ping error: %s", err)
					return
				}

				ps.Host.Peerstore().RecordLatency(p, t)
				select {
				case out <- t:
				case <-ctx.Done():
					return
				}
			}
		}
	}()

	return out, nil
}
Esempio n. 4
0
func (e *offlineExchange) GetBlocks(ctx context.Context, ks []*cid.Cid) (<-chan blocks.Block, error) {
	out := make(chan blocks.Block, 0)
	go func() {
		defer close(out)
		var misses []*cid.Cid
		for _, k := range ks {
			hit, err := e.bs.Get(k)
			if err != nil {
				misses = append(misses, k)
				// a long line of misses should abort when context is cancelled.
				select {
				// TODO case send misses down channel
				case <-ctx.Done():
					return
				default:
					continue
				}
			}
			select {
			case out <- hit:
			case <-ctx.Done():
				return
			}
		}
	}()
	return out, nil
}
Esempio n. 5
0
// FindProvidersAsync returns a channel of providers for the given key
func (bsnet *impl) FindProvidersAsync(ctx context.Context, k *cid.Cid, max int) <-chan peer.ID {

	// Since routing queries are expensive, give bitswap the peers to which we
	// have open connections. Note that this may cause issues if bitswap starts
	// precisely tracking which peers provide certain keys. This optimization
	// would be misleading. In the long run, this may not be the most
	// appropriate place for this optimization, but it won't cause any harm in
	// the short term.
	connectedPeers := bsnet.host.Network().Peers()
	out := make(chan peer.ID, len(connectedPeers)) // just enough buffer for these connectedPeers
	for _, id := range connectedPeers {
		if id == bsnet.host.ID() {
			continue // ignore self as provider
		}
		out <- id
	}

	go func() {
		defer close(out)
		providers := bsnet.routing.FindProvidersAsync(ctx, k, max)
		for info := range providers {
			if info.ID == bsnet.host.ID() {
				continue // ignore self as provider
			}
			bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, pstore.TempAddrTTL)
			select {
			case <-ctx.Done():
				return
			case out <- info.ID:
			}
		}
	}()
	return out
}
Esempio n. 6
0
func storageLoop(ctx context.Context, local *localNode, remotes *remoteNodes, notify <-chan struct{}, reply chan<- []*net.UDPAddr, done chan<- struct{}, client *s3.S3, bucket, prefix, localKey string, log *Log) {
	defer func() {
		updateStorage(local.empty(), client, bucket, localKey, log)
		close(done)
	}()

	timer := time.NewTimer(randomStorageInterval())

	for {
		var scan bool

		select {
		case <-notify:
			scan = false

		case <-timer.C:
			timer.Reset(randomStorageInterval())
			scan = true

		case <-ctx.Done():
			timer.Stop()
			return
		}

		if err := updateStorage(local, client, bucket, localKey, log); err != nil {
			log.Error(err)
		}

		if scan {
			if err := scanStorage(local, remotes, reply, client, bucket, prefix, log); err != nil {
				log.Error(err)
			}
		}
	}
}
Esempio n. 7
0
func (ds *dagService) GetMany(ctx context.Context, keys []*cid.Cid) <-chan *NodeOption {
	out := make(chan *NodeOption, len(keys))
	blocks := ds.Blocks.GetBlocks(ctx, keys)
	var count int

	go func() {
		defer close(out)
		for {
			select {
			case b, ok := <-blocks:
				if !ok {
					if count != len(keys) {
						out <- &NodeOption{Err: fmt.Errorf("failed to fetch all nodes")}
					}
					return
				}

				nd, err := decodeBlock(b)
				if err != nil {
					out <- &NodeOption{Err: err}
					return
				}

				out <- &NodeOption{Node: nd}
				count++

			case <-ctx.Done():
				out <- &NodeOption{Err: ctx.Err()}
				return
			}
		}
	}()
	return out
}
Esempio n. 8
0
func paintLines(ctx context.Context, r io.Reader, source string) {
	scanner := bufio.NewScanner(r)
	doneC := ctx.Done()
	for scanner.Scan() {
		lsp.Println(source, scanner.Text())
		select {
		case <-doneC:
			break
		default:
			// do nothing
		}
	}

	if err := ctx.Err(); err != nil {
		errC <- err
		return
	}

	if err := scanner.Err(); err != nil {
		errC <- err
		return
	}

	errC <- nil
}
Esempio n. 9
0
// bloomCached returns Blockstore that caches Has requests using Bloom filter
// Size is size of bloom filter in bytes
func bloomCached(bs Blockstore, ctx context.Context, bloomSize, hashCount int) (*bloomcache, error) {
	bl, err := bloom.New(float64(bloomSize), float64(hashCount))
	if err != nil {
		return nil, err
	}
	bc := &bloomcache{blockstore: bs, bloom: bl}
	bc.hits = metrics.NewCtx(ctx, "bloom.hits_total",
		"Number of cache hits in bloom cache").Counter()
	bc.total = metrics.NewCtx(ctx, "bloom_total",
		"Total number of requests to bloom cache").Counter()

	bc.Invalidate()
	go bc.Rebuild(ctx)
	if metrics.Active() {
		go func() {
			fill := metrics.NewCtx(ctx, "bloom_fill_ratio",
				"Ratio of bloom filter fullnes, (updated once a minute)").Gauge()

			<-bc.rebuildChan
			t := time.NewTicker(1 * time.Minute)
			for {
				select {
				case <-ctx.Done():
					t.Stop()
					return
				case <-t.C:
					fill.Set(bc.bloom.FillRatio())
				}
			}
		}()
	}
	return bc, nil
}
Esempio n. 10
0
func (e *apiClient) PerformRequests(ctx context.Context, queries []QueryToSend) (*tsdb.QueryResult, error) {
	queryResult := &tsdb.QueryResult{}

	queryCount := len(queries)
	jobsChan := make(chan QueryToSend, queryCount)
	resultChan := make(chan []*tsdb.TimeSeries, queryCount)
	errorsChan := make(chan error, 1)
	for w := 1; w <= MaxWorker; w++ {
		go e.spawnWorker(ctx, w, jobsChan, resultChan, errorsChan)
	}

	for _, v := range queries {
		jobsChan <- v
	}
	close(jobsChan)

	resultCounter := 0
	for {
		select {
		case timeseries := <-resultChan:
			queryResult.Series = append(queryResult.Series, timeseries...)
			resultCounter++

			if resultCounter == queryCount {
				close(resultChan)
				return queryResult, nil
			}
		case err := <-errorsChan:
			return nil, err
		case <-ctx.Done():
			return nil, ctx.Err()
		}
	}
}
Esempio n. 11
0
func (mq *msgQueue) doWork(ctx context.Context) {
	if mq.sender == nil {
		err := mq.openSender(ctx)
		if err != nil {
			log.Infof("cant open message sender to peer %s: %s", mq.p, err)
			// TODO: cant connect, what now?
			return
		}
	}

	// grab outgoing message
	mq.outlk.Lock()
	wlm := mq.out
	if wlm == nil || wlm.Empty() {
		mq.outlk.Unlock()
		return
	}
	mq.out = nil
	mq.outlk.Unlock()

	// send wantlist updates
	for { // try to send this message until we fail.
		err := mq.sender.SendMsg(wlm)
		if err == nil {
			return
		}

		log.Infof("bitswap send error: %s", err)
		mq.sender.Close()
		mq.sender = nil

		select {
		case <-mq.done:
			return
		case <-ctx.Done():
			return
		case <-time.After(time.Millisecond * 100):
			// wait 100ms in case disconnect notifications are still propogating
			log.Warning("SendMsg errored but neither 'done' nor context.Done() were set")
		}

		err = mq.openSender(ctx)
		if err != nil {
			log.Errorf("couldnt open sender again after SendMsg(%s) failed: %s", mq.p, err)
			// TODO(why): what do we do now?
			// I think the *right* answer is to probably put the message we're
			// trying to send back, and then return to waiting for new work or
			// a disconnect.
			return
		}

		// TODO: Is this the same instance for the remote peer?
		// If its not, we should resend our entire wantlist to them
		/*
			if mq.sender.InstanceID() != mq.lastSeenInstanceID {
				wlm = mq.getFullWantlistMessage()
			}
		*/
	}
}
Esempio n. 12
0
func cgoLookupPTR(ctx context.Context, addr string) (names []string, err error, completed bool) {
	var zone string
	ip := parseIPv4(addr)
	if ip == nil {
		ip, zone = parseIPv6(addr, true)
	}
	if ip == nil {
		return nil, &DNSError{Err: "invalid address", Name: addr}, true
	}
	sa, salen := cgoSockaddr(ip, zone)
	if sa == nil {
		return nil, &DNSError{Err: "invalid address " + ip.String(), Name: addr}, true
	}
	if ctx.Done() == nil {
		names, err := cgoLookupAddrPTR(addr, sa, salen)
		return names, err, true
	}
	result := make(chan reverseLookupResult, 1)
	go cgoReverseLookup(result, addr, sa, salen)
	select {
	case r := <-result:
		return r.names, r.err, true
	case <-ctx.Done():
		return nil, mapErr(ctx.Err()), false
	}
}
Esempio n. 13
0
func (bs *Bitswap) provideCollector(ctx context.Context) {
	defer close(bs.provideKeys)
	var toProvide []*cid.Cid
	var nextKey *cid.Cid
	var keysOut chan *cid.Cid

	for {
		select {
		case blkey, ok := <-bs.newBlocks:
			if !ok {
				log.Debug("newBlocks channel closed")
				return
			}

			if keysOut == nil {
				nextKey = blkey
				keysOut = bs.provideKeys
			} else {
				toProvide = append(toProvide, blkey)
			}
		case keysOut <- nextKey:
			if len(toProvide) > 0 {
				nextKey = toProvide[0]
				toProvide = toProvide[1:]
			} else {
				keysOut = nil
			}
		case <-ctx.Done():
			return
		}
	}
}
Esempio n. 14
0
func (bs *Bitswap) taskWorker(ctx context.Context, id int) {
	idmap := logging.LoggableMap{"ID": id}
	defer log.Info("bitswap task worker shutting down...")
	for {
		log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap)
		select {
		case nextEnvelope := <-bs.engine.Outbox():
			select {
			case envelope, ok := <-nextEnvelope:
				if !ok {
					continue
				}
				log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableMap{
					"ID":     id,
					"Target": envelope.Peer.Pretty(),
					"Block":  envelope.Block.Cid().String(),
				})

				bs.wm.SendBlock(ctx, envelope)
			case <-ctx.Done():
				return
			}
		case <-ctx.Done():
			return
		}
	}
}
Esempio n. 15
0
func absFilepath(ctx context.Context, filename string) <-chan string {
	ch := make(chan string)
	go func() {
		var (
			err error
		)
		defer func() {
			if err != nil {
				<-ctx.Done()
				close(ch)
				return
			}
			select {
			case <-ctx.Done():
				close(ch)
			default:
				ch <- filename
				close(ch)
			}
		}()
		if !filepath.IsAbs(filename) {
			err = errors.New("filename is not abs")
			return
		}
		fi, err := os.Lstat(filename)
		if err != nil {
			return
		}
		if fi.IsDir() {
			err = ErrNotFile
			return
		}
	}()
	return ch
}
Esempio n. 16
0
func fetchNodes(ctx context.Context, ds DAGService, in <-chan []*cid.Cid, out chan<- *NodeOption) {
	var wg sync.WaitGroup
	defer func() {
		// wait for all 'get' calls to complete so we don't accidentally send
		// on a closed channel
		wg.Wait()
		close(out)
	}()

	get := func(ks []*cid.Cid) {
		defer wg.Done()
		nodes := ds.GetMany(ctx, ks)
		for opt := range nodes {
			select {
			case out <- opt:
			case <-ctx.Done():
				return
			}
		}
	}

	for ks := range in {
		wg.Add(1)
		go get(ks)
	}
}
Esempio n. 17
0
func (c *Client) do(ctx context.Context, req *http.Request) (*http.Response, error) {
	if nil == ctx || nil == ctx.Done() { // ctx.Done() is for ctx
		return c.Client.Do(req)
	}

	var resc = make(chan *http.Response, 1)
	var errc = make(chan error, 1)

	// Perform request from separate routine.
	go func() {
		res, err := c.Client.Do(req)
		if err != nil {
			errc <- err
		} else {
			resc <- res
		}
	}()

	// Wait for request completion of context expiry.
	select {
	case <-ctx.Done():
		c.t.CancelRequest(req)
		return nil, ctx.Err()
	case err := <-errc:
		return nil, err
	case res := <-resc:
		return res, nil
	}
}
Esempio n. 18
0
func (c *Client) do(ctx context.Context, req *http.Request) (*http.Response, error) {
	if nil == ctx || nil == ctx.Done() { // ctx.Done() is for ctx
		return c.Client.Do(req)
	}

	return c.Client.Do(req.WithContext(ctx))
}
Esempio n. 19
0
// newContext creates a new context.Context for the HTTP request and
// a cancel function to call when the HTTP request is finished. The context's
// Done channel will be closed if the HTTP client closes the connection
// while the request is being processed. (This feature relies on w implementing
// the http.CloseNotifier interface).
//
// Note that the caller must ensure that the cancel function is called
// when the HTTP request is complete, or a goroutine leak could result.
func newContext(ctx context.Context, w http.ResponseWriter, r *http.Request) (context.Context, context.CancelFunc) {
	// TODO: the request r is not used in this function, and perhaps it should
	// be removed. Is there any reason to keep it. A future version of Go might
	// keep a context in the request object, so it is kept here for now.
	var cancelFunc context.CancelFunc
	if ctx == nil {
		ctx = context.Background()
	}

	// create a context without a timeout
	ctx, cancelFunc = context.WithCancel(ctx)

	if closeNotifier, ok := w.(http.CloseNotifier); ok {
		// need to acquire the channel prior to entering
		// the go-routine, otherwise CloseNotify could be
		// called after the request is finished, which
		// results in a panic
		closeChan := closeNotifier.CloseNotify()
		go func() {
			select {
			case <-closeChan:
				cancelFunc()
				return
			case <-ctx.Done():
				return
			}
		}()
	}

	return ctx, cancelFunc
}
Esempio n. 20
0
func (m *mdnsService) pollForEntries(ctx context.Context) {

	ticker := time.NewTicker(m.interval)
	for {
		select {
		case <-ticker.C:
			entriesCh := make(chan *mdns.ServiceEntry, 16)
			go func() {
				for entry := range entriesCh {
					m.handleEntry(entry)
				}
			}()

			log.Debug("starting mdns query")
			qp := &mdns.QueryParam{
				Domain:  "local",
				Entries: entriesCh,
				Service: ServiceTag,
				Timeout: time.Second * 5,
			}

			err := mdns.Query(qp)
			if err != nil {
				log.Error("mdns lookup error: ", err)
			}
			close(entriesCh)
			log.Debug("mdns query complete")
		case <-ctx.Done():
			log.Debug("mdns service halting")
			return
		}
	}
}
Esempio n. 21
0
func dialPlan9(ctx context.Context, net string, laddr, raddr Addr) (fd *netFD, err error) {
	defer func() { fixErr(err) }()
	type res struct {
		fd  *netFD
		err error
	}
	resc := make(chan res)
	go func() {
		testHookDialChannel()
		fd, err := dialPlan9Blocking(ctx, net, laddr, raddr)
		select {
		case resc <- res{fd, err}:
		case <-ctx.Done():
			if fd != nil {
				fd.Close()
			}
		}
	}()
	select {
	case res := <-resc:
		return res.fd, res.err
	case <-ctx.Done():
		return nil, mapErr(ctx.Err())
	}
}
Esempio n. 22
0
File: waiter.go Progetto: vmware/vic
// WaitForResult wraps govmomi operations and wait the operation to complete.
// Return the operation result
// Sample usage:
//    info, err := WaitForResult(ctx, func(ctx) (*TaskInfo, error) {
//       return vm, vm.Reconfigure(ctx, config)
//    })
func WaitForResult(ctx context.Context, f func(context.Context) (Task, error)) (*types.TaskInfo, error) {
	var err error
	var info *types.TaskInfo
	var backoffFactor int64 = 1

	for {
		var t Task
		if t, err = f(ctx); err == nil {
			info, err = t.WaitForResult(ctx, nil)
			if err == nil {
				return info, err
			}
		}

		if !isTaskInProgress(err) {
			return info, err
		}

		sleepValue := time.Duration(backoffFactor * (rand.Int63n(100) + int64(50)))
		select {
		case <-time.After(sleepValue * time.Millisecond):
			backoffFactor *= 2
			if backoffFactor > maxBackoffFactor {
				backoffFactor = maxBackoffFactor
			}
		case <-ctx.Done():
			return info, ctx.Err()
		}

		log.Warnf("retrying task")
	}
}
Esempio n. 23
0
func (d *Diagnostics) getDiagnosticFromPeers(ctx context.Context, peers map[peer.ID]int, pmes *pb.Message) (<-chan *DiagInfo, error) {
	respdata := make(chan *DiagInfo)
	wg := sync.WaitGroup{}
	for p := range peers {
		wg.Add(1)
		log.Debugf("Sending diagnostic request to peer: %s", p)
		go func(p peer.ID) {
			defer wg.Done()
			out, err := d.getDiagnosticFromPeer(ctx, p, pmes)
			if err != nil {
				log.Debugf("Error getting diagnostic from %s: %s", p, err)
				return
			}
			for d := range out {
				select {
				case respdata <- d:
				case <-ctx.Done():
					return
				}
			}
		}(p)
	}

	go func() {
		wg.Wait()
		close(respdata)
	}()

	return respdata, nil
}
Esempio n. 24
0
func ctxDriverBegin(ctx context.Context, ci driver.Conn) (driver.Tx, error) {
	if ciCtx, is := ci.(driver.ConnBeginContext); is {
		return ciCtx.BeginContext(ctx)
	}

	if ctx.Done() == context.Background().Done() {
		return ci.Begin()
	}

	// Check the transaction level in ctx. If set and non-default
	// then return an error here as the BeginContext driver value is not supported.
	if level, ok := driver.IsolationFromContext(ctx); ok && level != driver.IsolationLevel(LevelDefault) {
		return nil, errors.New("sql: driver does not support non-default isolation level")
	}

	// Check for a read-only parameter in ctx. If a read-only transaction is
	// requested return an error as the BeginContext driver value is not supported.
	if ro := driver.ReadOnlyFromContext(ctx); ro {
		return nil, errors.New("sql: driver does not support read-only transactions")
	}

	txi, err := ci.Begin()
	if err == nil {
		select {
		default:
		case <-ctx.Done():
			txi.Rollback()
			return nil, ctx.Err()
		}
	}
	return txi, err
}
Esempio n. 25
0
func (b *bloomcache) Rebuild(ctx context.Context) {
	evt := log.EventBegin(ctx, "bloomcache.Rebuild")
	defer evt.Done()

	ch, err := b.blockstore.AllKeysChan(ctx)
	if err != nil {
		log.Errorf("AllKeysChan failed in bloomcache rebuild with: %v", err)
		return
	}
	finish := false
	for !finish {
		select {
		case key, ok := <-ch:
			if ok {
				b.bloom.AddTS(key.Bytes()) // Use binary key, the more compact the better
			} else {
				finish = true
			}
		case <-ctx.Done():
			log.Warning("Cache rebuild closed by context finishing.")
			return
		}
	}
	close(b.rebuildChan)
	atomic.StoreInt32(&b.active, 1)
}
Esempio n. 26
0
// lookupProtocol looks up IP protocol name and returns correspondent protocol number.
func lookupProtocol(ctx context.Context, name string) (int, error) {
	// GetProtoByName return value is stored in thread local storage.
	// Start new os thread before the call to prevent races.
	type result struct {
		proto int
		err   error
	}
	ch := make(chan result) // unbuffered
	go func() {
		acquireThread()
		defer releaseThread()
		runtime.LockOSThread()
		defer runtime.UnlockOSThread()
		proto, err := getprotobyname(name)
		select {
		case ch <- result{proto: proto, err: err}:
		case <-ctx.Done():
		}
	}()
	select {
	case r := <-ch:
		if r.err != nil {
			if proto, err := lookupProtocolMap(name); err == nil {
				return proto, nil
			}
			r.err = &DNSError{Err: r.err.Error(), Name: name}
		}
		return r.proto, r.err
	case <-ctx.Done():
		return 0, mapErr(ctx.Err())
	}
}
Esempio n. 27
0
func relativeFilepath(ctx context.Context, base, filename string) <-chan string {
	ch := make(chan string)
	go func() {
		var (
			err error
		)
		defer func() {
			if err != nil {
				<-ctx.Done()
				close(ch)
				return
			}
			select {
			case <-ctx.Done():
				close(ch)
			default:
				ch <- filename
				close(ch)
			}
		}()
		// 相对路径检查
		filename = filepath.Join(base, filename)
		fi, err := os.Lstat(filename)
		if err != nil {
			return
		}
		if fi.IsDir() {
			err = ErrNotFile
			return
		}
	}()
	return ch
}
Esempio n. 28
0
// PopulateShard gets data for predicate pred from server with id serverId and
// writes it to RocksDB.
func populateShard(ctx context.Context, pl *pool, group uint32) (int, error) {
	gkeys, err := generateGroup(group)
	if err != nil {
		return 0, x.Wrapf(err, "While generating keys group")
	}

	conn, err := pl.Get()
	if err != nil {
		return 0, err
	}
	defer pl.Put(conn)
	c := NewWorkerClient(conn)

	stream, err := c.PredicateData(context.Background(), gkeys)
	if err != nil {
		return 0, err
	}
	x.Trace(ctx, "Streaming data for group: %v", group)

	kvs := make(chan *task.KV, 1000)
	che := make(chan error)
	go writeBatch(ctx, kvs, che)

	// We can use count to check the number of posting lists returned in tests.
	count := 0
	for {
		kv, err := stream.Recv()
		if err == io.EOF {
			break
		}
		if err != nil {
			close(kvs)
			return count, err
		}
		count++

		// We check for errors, if there are no errors we send value to channel.
		select {
		case kvs <- kv:
			// OK
		case <-ctx.Done():
			x.TraceError(ctx, x.Errorf("Context timed out while streaming group: %v", group))
			close(kvs)
			return count, ctx.Err()
		case err := <-che:
			x.TraceError(ctx, x.Errorf("Error while doing a batch write for group: %v", group))
			close(kvs)
			return count, err
		}
	}
	close(kvs)

	if err := <-che; err != nil {
		x.TraceError(ctx, x.Errorf("Error while doing a batch write for group: %v", group))
		return count, err
	}
	x.Trace(ctx, "Streaming complete for group: %v", group)
	return count, nil
}
Esempio n. 29
0
func EnumerateChildrenAsync(ctx context.Context, ds DAGService, c *cid.Cid, visit func(*cid.Cid) bool) error {
	toprocess := make(chan []*cid.Cid, 8)
	nodes := make(chan *NodeOption, 8)

	ctx, cancel := context.WithCancel(ctx)
	defer cancel()
	defer close(toprocess)

	go fetchNodes(ctx, ds, toprocess, nodes)

	root, err := ds.Get(ctx, c)
	if err != nil {
		return err
	}

	nodes <- &NodeOption{Node: root}
	live := 1

	for {
		select {
		case opt, ok := <-nodes:
			if !ok {
				return nil
			}

			if opt.Err != nil {
				return opt.Err
			}

			nd := opt.Node

			// a node has been fetched
			live--

			var cids []*cid.Cid
			for _, lnk := range nd.Links() {
				c := lnk.Cid
				if visit(c) {
					live++
					cids = append(cids, c)
				}
			}

			if live == 0 {
				return nil
			}

			if len(cids) > 0 {
				select {
				case toprocess <- cids:
				case <-ctx.Done():
					return ctx.Err()
				}
			}
		case <-ctx.Done():
			return ctx.Err()
		}
	}
}
Esempio n. 30
0
func (it *interrupt) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
	select {
	case it.hanging <- struct{}{}:
	default:
	}
	<-ctx.Done()
	return fuse.EINTR
}