Beispiel #1
1
// Upload is called to perform the upload.
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
	if u.currentUploads != nil {
		defer atomic.AddInt32(u.currentUploads, -1)

		if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
			return distribution.Descriptor{}, errors.New("concurrency limit exceeded")
		}
	}

	// Sleep a bit to simulate a time-consuming upload.
	for i := int64(0); i <= 10; i++ {
		select {
		case <-ctx.Done():
			return distribution.Descriptor{}, ctx.Err()
		case <-time.After(10 * time.Millisecond):
			progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
		}
	}

	if u.simulateRetries != 0 {
		u.simulateRetries--
		return distribution.Descriptor{}, errors.New("simulating retry")
	}

	return distribution.Descriptor{}, nil
}
Beispiel #2
1
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
	r.ID = s.reqIDGen.Next()

	data, err := r.Marshal()
	if err != nil {
		return nil, err
	}

	if len(data) > maxRequestBytes {
		return nil, ErrRequestTooLarge
	}

	ch := s.w.Register(r.ID)

	s.r.Propose(ctx, data)

	select {
	case x := <-ch:
		return x.(*applyResult), nil
	case <-ctx.Done():
		s.w.Trigger(r.ID, nil) // GC wait
		return nil, ctx.Err()
	case <-s.done:
		return nil, ErrStopped
	}
}
Beispiel #3
1
func runExec(ctx context.Context, db *sql.DB, query string) error {
	done := make(chan struct{})
	var (
		errMsg error
	)
	go func() {
		for {
			if _, err := db.Exec(query); err != nil {
				errMsg = err
				time.Sleep(time.Second)
				continue
			} else {
				errMsg = nil
				done <- struct{}{}
				break
			}
		}
	}()
	select {
	case <-done:
		return errMsg
	case <-ctx.Done():
		return fmt.Errorf("runExec %s timed out with %v / %v", query, ctx.Err(), errMsg)
	}
}
Beispiel #4
1
// waitForStateChange blocks until the state changes to something other than the sourceState.
func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
	ac.mu.Lock()
	defer ac.mu.Unlock()
	if sourceState != ac.state {
		return ac.state, nil
	}
	done := make(chan struct{})
	var err error
	go func() {
		select {
		case <-ctx.Done():
			ac.mu.Lock()
			err = ctx.Err()
			ac.stateCV.Broadcast()
			ac.mu.Unlock()
		case <-done:
		}
	}()
	defer close(done)
	for sourceState == ac.state {
		ac.stateCV.Wait()
		if err != nil {
			return ac.state, err
		}
	}
	return ac.state, nil
}
Beispiel #5
0
// WaitSelect waits until at least one remote becomes available and then selects one.
func (s *persistentRemotes) WaitSelect(ctx context.Context) <-chan api.Peer {
	c := make(chan api.Peer, 1)
	s.RLock()
	done := make(chan struct{})
	go func() {
		select {
		case <-ctx.Done():
			s.c.Broadcast()
		case <-done:
		}
	}()
	go func() {
		defer s.RUnlock()
		defer close(c)
		defer close(done)
		for {
			if ctx.Err() != nil {
				return
			}
			p, err := s.Select()
			if err == nil {
				c <- p
				return
			}
			s.c.Wait()
		}
	}()
	return c
}
Beispiel #6
0
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed.
func (ac *addrConn) wait(ctx context.Context) (transport.ClientTransport, error) {
	for {
		ac.mu.Lock()
		switch {
		case ac.state == Shutdown:
			ac.mu.Unlock()
			return nil, errConnClosing
		case ac.state == Ready:
			ct := ac.transport
			ac.mu.Unlock()
			return ct, nil
		default:
			ready := ac.ready
			if ready == nil {
				ready = make(chan struct{})
				ac.ready = ready
			}
			ac.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Beispiel #7
0
// configure sends a configuration change through consensus and
// then waits for it to be applied to the server. It will block
// until the change is performed or there is an error.
func (n *Node) configure(ctx context.Context, cc raftpb.ConfChange) error {
	cc.ID = n.reqIDGen.Next()

	ctx, cancel := context.WithCancel(ctx)
	ch := n.wait.register(cc.ID, nil, cancel)

	if err := n.raftNode.ProposeConfChange(ctx, cc); err != nil {
		n.wait.cancel(cc.ID)
		return err
	}

	select {
	case x := <-ch:
		if err, ok := x.(error); ok {
			return err
		}
		if x != nil {
			log.G(ctx).Panic("raft: configuration change error, return type should always be error")
		}
		return nil
	case <-ctx.Done():
		n.wait.cancel(cc.ID)
		return ctx.Err()
	}
}
Beispiel #8
0
// Wait blocks until i) the new transport is up or ii) ctx is done or iii)
func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) {
	for {
		cc.mu.Lock()
		switch {
		case cc.state == Shutdown:
			cc.mu.Unlock()
			return nil, ErrClientConnClosing
		case cc.state == Ready:
			cc.mu.Unlock()
			return cc.transport, nil
		case cc.state == TransientFailure:
			cc.mu.Unlock()
			// Break out so that the caller gets chance to pick another transport to
			// perform rpc instead of sticking to this transport.
			return nil, ErrTransientFailure
		default:
			ready := cc.ready
			if ready == nil {
				ready = make(chan struct{})
				cc.ready = ready
			}
			cc.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Beispiel #9
0
func (i *localInstance) RunScript(ctx gocontext.Context, writer io.Writer) (*RunResult, error) {
	if i.scriptPath == "" {
		return &RunResult{Completed: false}, errNoScriptUploaded
	}

	cmd := exec.Command("bash", i.scriptPath)
	cmd.Stdout = writer
	cmd.Stderr = writer

	err := cmd.Start()
	if err != nil {
		return &RunResult{Completed: false}, err
	}

	errChan := make(chan error)
	go func() {
		errChan <- cmd.Wait()
	}()

	select {
	case err := <-errChan:
		if err != nil {
			return &RunResult{Completed: false}, err
		}
		return &RunResult{Completed: true}, nil
	case <-ctx.Done():
		err = ctx.Err()
		if err != nil {
			return &RunResult{Completed: false}, err
		}
		return &RunResult{Completed: true}, nil
	}
}
Beispiel #10
0
func (m *mongoStorage) EnumerateBlobs(ctx context.Context, dest chan<- blob.SizedRef, after string, limit int) error {
	defer close(dest)

	var b blobDoc
	var qry bson.M
	if after != "" {
		qry = bson.M{"key": bson.M{"$gt": after}}
	}
	iter := m.c.Find(qry).Limit(limit).Select(bson.M{"key": 1, "size": 1}).Sort("key").Iter()

	for iter.Next(&b) {
		br, ok := blob.Parse(b.Key)
		if !ok {
			continue
		}
		select {
		case dest <- blob.SizedRef{Ref: br, Size: uint32(b.Size)}:
		case <-ctx.Done():
			// Close the iterator but ignore the error value since we are already cancelling
			if err := iter.Close(); err != nil {
				log.Printf("Error closing iterator after enumerating: %v", err)
			}
			return ctx.Err()
		}
	}

	if err := iter.Close(); err != nil {
		return err
	}

	return nil
}
Beispiel #11
0
// Retry invokes the given function, retrying it multiple times if the connection failed or
// the HTTP status response indicates the request should be attempted again. ctx may be nil.
func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) {
	for {
		resp, err := f()

		var status int
		if resp != nil {
			status = resp.StatusCode
		}

		// Return if we shouldn't retry.
		pause, retry := backoff.Pause()
		if !shouldRetry(status, err) || !retry {
			return resp, err
		}

		// Ensure the response body is closed, if any.
		if resp != nil && resp.Body != nil {
			resp.Body.Close()
		}

		// Pause, but still listen to ctx.Done if context is not nil.
		var done <-chan struct{}
		if ctx != nil {
			done = ctx.Done()
		}
		select {
		case <-done:
			return nil, ctx.Err()
		case <-time.After(pause):
		}
	}
}
Beispiel #12
0
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
// If the client is nil, http.DefaultClient is used.
// If the context is canceled or times out, ctx.Err() will be returned.
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
	if client == nil {
		client = http.DefaultClient
	}

	// Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go.
	cancel := canceler(client, req)

	type responseAndError struct {
		resp *http.Response
		err  error
	}
	result := make(chan responseAndError, 1)

	// Make local copies of test hooks closed over by goroutines below.
	// Prevents data races in tests.
	testHookDoReturned := testHookDoReturned
	testHookDidBodyClose := testHookDidBodyClose

	go func() {
		resp, err := client.Do(req)
		testHookDoReturned()
		result <- responseAndError{resp, err}
	}()

	var resp *http.Response

	select {
	case <-ctx.Done():
		testHookContextDoneBeforeHeaders()
		cancel()
		// Clean up after the goroutine calling client.Do:
		go func() {
			if r := <-result; r.resp != nil {
				testHookDidBodyClose()
				r.resp.Body.Close()
			}
		}()
		return nil, ctx.Err()
	case r := <-result:
		var err error
		resp, err = r.resp, r.err
		if err != nil {
			return resp, err
		}
	}

	c := make(chan struct{})
	go func() {
		select {
		case <-ctx.Done():
			cancel()
		case <-c:
			// The response's Body is closed.
		}
	}()
	resp.Body = &notifyingReader{resp.Body, c}

	return resp, nil
}
Beispiel #13
0
func (h *testHandler) Handle(ctx context.Context, args *raw.Args) (*raw.Res, error) {
	h.Lock()
	h.format = args.Format
	h.caller = args.Caller
	h.Unlock()

	assert.Equal(h.t, args.Caller, CurrentCall(ctx).CallerName())

	switch args.Method {
	case "block":
		<-ctx.Done()
		h.blockErr <- ctx.Err()
		return &raw.Res{
			IsErr: true,
		}, nil
	case "echo":
		return &raw.Res{
			Arg2: args.Arg2,
			Arg3: args.Arg3,
		}, nil
	case "busy":
		return &raw.Res{
			SystemErr: ErrServerBusy,
		}, nil
	case "app-error":
		return &raw.Res{
			IsErr: true,
		}, nil
	}
	return nil, errors.New("unknown method")
}
Beispiel #14
0
// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed.
func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) {
	// 等待? 为什么等待呢?
	for {
		cc.mu.Lock()
		switch {
		// 1. 如果Conn关闭,则直接报错
		case cc.state == Shutdown:
			cc.mu.Unlock()
			return nil, ErrClientConnClosing
		// 2. 如果Ready, 则返回: Transport
		case cc.state == Ready:
			ct := cc.transport
			cc.mu.Unlock()
			return ct, nil
		default:
			// 3. 其他情况下,等待Ready or Done
			ready := cc.ready
			if ready == nil {
				ready = make(chan struct{})
				cc.ready = ready
			}
			cc.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Beispiel #15
0
// WaitN blocks until lim permits n events to happen.
// It returns an error if n exceeds the Limiter's burst size, the Context is
// canceled, or the expected wait time exceeds the Context's Deadline.
func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) {
	if n > lim.burst {
		return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst)
	}
	// Check if ctx is already cancelled
	select {
	case <-ctx.Done():
		return ctx.Err()
	default:
	}
	// Determine wait limit
	now := time.Now()
	waitLimit := InfDuration
	if deadline, ok := ctx.Deadline(); ok {
		waitLimit = deadline.Sub(now)
	}
	// Reserve
	r := lim.reserveN(now, n, waitLimit)
	if !r.ok {
		return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n)
	}
	// Wait
	t := time.NewTimer(r.DelayFrom(now))
	defer t.Stop()
	select {
	case <-t.C:
		// We can proceed.
		return nil
	case <-ctx.Done():
		// Context was canceled before we could proceed.  Cancel the
		// reservation, which may permit other events to proceed sooner.
		r.Cancel()
		return ctx.Err()
	}
}
// FetchCert retrieves already issued certificate from the given url, in DER format.
// It retries the request until the certificate is successfully retrieved,
// context is cancelled by the caller or an error response is received.
//
// The returned value will also contain CA (the issuer) certificate if bundle is true.
func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) {
	for {
		res, err := c.httpClient().Get(url)
		if err != nil {
			return nil, err
		}
		defer res.Body.Close()
		if res.StatusCode == http.StatusOK {
			return responseCert(c.httpClient(), res, bundle)
		}
		if res.StatusCode > 299 {
			return nil, responseError(res)
		}
		d, err := retryAfter(res.Header.Get("retry-after"))
		if err != nil {
			d = 3 * time.Second
		}
		select {
		case <-time.After(d):
			// retry
		case <-ctx.Done():
			return nil, ctx.Err()
		}
	}
}
Beispiel #17
0
func (s *session) heartbeat(ctx context.Context) error {
	log.G(ctx).Debugf("(*session).heartbeat")
	client := api.NewDispatcherClient(s.conn)
	heartbeat := time.NewTimer(1) // send out a heartbeat right away
	defer heartbeat.Stop()

	for {
		select {
		case <-heartbeat.C:
			heartbeatCtx, cancel := context.WithTimeout(ctx, dispatcherRPCTimeout)
			resp, err := client.Heartbeat(heartbeatCtx, &api.HeartbeatRequest{
				SessionID: s.sessionID,
			})
			cancel()
			if err != nil {
				if grpc.Code(err) == codes.NotFound {
					err = errNodeNotRegistered
				}

				return err
			}

			period, err := ptypes.Duration(&resp.Period)
			if err != nil {
				return err
			}

			heartbeat.Reset(period)
		case <-s.closed:
			return errSessionClosed
		case <-ctx.Done():
			return ctx.Err()
		}
	}
}
Beispiel #18
0
// ConcatPacketChans concatenates packet chans in order.
func ConcatPacketChans(ctx context.Context, in <-chan *PacketChan) *PacketChan {
	out := NewPacketChan(100)
	go func() {
		for c := range in {
			c := c
			defer c.Discard()
		L:
			for c.Err() == nil {
				select {
				case pkt := <-c.Receive():
					if pkt == nil {
						break L
					}
					out.Send(pkt)
				case <-ctx.Done():
					out.Close(ctx.Err())
					return
				}
			}
			if err := c.Err(); err != nil {
				out.Close(err)
				return
			}
		}
		out.Close(nil)
	}()
	return out
}
Beispiel #19
0
func (s rpcServer) Wait(ctx context.Context, req *pb.WaitReq) (*pb.WaitResp, error) {
	glog.Infof("Got request to wait for %d nodes for %q", req.Count, req.Key)
	rc := make(chan ssResp)
	s <- ssInput{
		respChan: rc,
		key:      req.Key,
		count:    req.Count,
		arrive:   true,
	}
	got := <-rc
	resp := pb.WaitResp{Count: got.count}
	select {
	case <-got.done:
		resp.Start = true
		return &resp, nil
	case <-ctx.Done():
		glog.Infof("Connection ended for %q", req.Key)
		s <- ssInput{
			respChan: nil,
			key:      req.Key,
			count:    req.Count,
			arrive:   false,
		}
		return nil, ctx.Err()
	}
}
Beispiel #20
0
// WaitForCluster waits until node observes that the cluster wide config is
// committed to raft. This ensures that we can see and serve informations
// related to the cluster.
func WaitForCluster(ctx context.Context, n *Node) (cluster *api.Cluster, err error) {
	watch, cancel := state.Watch(n.MemoryStore().WatchQueue(), state.EventCreateCluster{})
	defer cancel()

	var clusters []*api.Cluster
	n.MemoryStore().View(func(readTx store.ReadTx) {
		clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
	})

	if err != nil {
		return nil, err
	}

	if len(clusters) == 1 {
		cluster = clusters[0]
	} else {
		select {
		case e := <-watch:
			cluster = e.(state.EventCreateCluster).Cluster
		case <-ctx.Done():
			return nil, ctx.Err()
		}
	}

	return cluster, nil
}
Beispiel #21
0
// LeaderConn returns current connection to cluster leader or raftselector.ErrIsLeader
// if current machine is leader.
func (n *Node) LeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
	cc, err := n.getLeaderConn()
	if err == nil {
		return cc, nil
	}
	if err == raftselector.ErrIsLeader {
		return nil, err
	}
	ticker := time.NewTicker(1 * time.Second)
	defer ticker.Stop()
	for {
		select {
		case <-ticker.C:
			cc, err := n.getLeaderConn()
			if err == nil {
				return cc, nil
			}
			if err == raftselector.ErrIsLeader {
				return nil, err
			}
		case <-ctx.Done():
			return nil, ctx.Err()
		}
	}
}
Beispiel #22
0
// Upload is a blocking function which ensures the listed layers are present on
// the remote registry. It uses the string returned by the Key method to
// deduplicate uploads.
func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error {
	var (
		uploads          []*uploadTransfer
		dedupDescriptors = make(map[string]struct{})
	)

	for _, descriptor := range layers {
		progress.Update(progressOutput, descriptor.ID(), "Preparing")

		key := descriptor.Key()
		if _, present := dedupDescriptors[key]; present {
			continue
		}
		dedupDescriptors[key] = struct{}{}

		xferFunc := lum.makeUploadFunc(descriptor)
		upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput)
		defer upload.Release(watcher)
		uploads = append(uploads, upload.(*uploadTransfer))
	}

	for _, upload := range uploads {
		select {
		case <-ctx.Done():
			return ctx.Err()
		case <-upload.Transfer.Done():
			if upload.err != nil {
				return upload.err
			}
		}
	}

	return nil
}
Beispiel #23
0
func runQuery(ctx context.Context, db *sql.DB, query string) (*sql.Rows, error) {
	done := make(chan struct{})
	var (
		rows   *sql.Rows
		errMsg error
	)
	go func() {
		for {
			rs, err := db.Query(query)
			if err != nil {
				errMsg = err
				time.Sleep(time.Second)
				continue
			} else {
				rows = rs
				errMsg = nil
				done <- struct{}{}
				break
			}
		}
	}()
	select {
	case <-done:
		return rows, errMsg
	case <-ctx.Done():
		return nil, fmt.Errorf("runQuery %s timed out with %v / %v", query, ctx.Err(), errMsg)
	}
}
Beispiel #24
0
// When wait returns, either the new transport is up or ClientConn is
// closing. Used to avoid working on a dying transport. It updates and
// returns the transport and its version when there is no error.
func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) {
	for {
		cc.mu.Lock()
		switch {
		case cc.closing:
			cc.mu.Unlock()
			return nil, 0, ErrClientConnClosing
		case ts < cc.transportSeq:
			// Worked on a dying transport. Try the new one immediately.
			defer cc.mu.Unlock()
			return cc.transport, cc.transportSeq, nil
		default:
			ready := cc.ready
			if ready == nil {
				ready = make(chan struct{})
				cc.ready = ready
			}
			cc.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, 0, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Beispiel #25
0
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
// iv) transport is in TransientFailure and the RPC is fail-fast.
func (ac *addrConn) wait(ctx context.Context, failFast bool) (transport.ClientTransport, error) {
	for {
		ac.mu.Lock()
		switch {
		case ac.state == Shutdown:
			ac.mu.Unlock()
			return nil, errConnClosing
		case ac.state == Ready:
			ct := ac.transport
			ac.mu.Unlock()
			return ct, nil
		case ac.state == TransientFailure && failFast:
			ac.mu.Unlock()
			return nil, Errorf(codes.Unavailable, "grpc: RPC failed fast due to transport failure")
		default:
			ready := ac.ready
			if ready == nil {
				ready = make(chan struct{})
				ac.ready = ready
			}
			ac.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, toRPCErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
Beispiel #26
0
func (msr *MockSubnetRegistry) watchSubnet(ctx context.Context, network string, since uint64, sn ip.IP4Net) (Event, uint64, error) {
	msr.mux.Lock()
	n, ok := msr.networks[network]
	msr.mux.Unlock()

	if !ok {
		return Event{}, 0, fmt.Errorf("Network %s not found", network)
	}

	for {
		msr.mux.Lock()
		index := msr.index
		msr.mux.Unlock()

		if since < index {
			return Event{}, msr.index, etcd.Error{
				Code:    etcd.ErrorCodeEventIndexCleared,
				Cause:   "out of date",
				Message: "cursor is out of date",
				Index:   index,
			}
		}

		select {
		case <-ctx.Done():
			return Event{}, index, ctx.Err()

		case e := <-n.subnetEventsChan(sn):
			if e.index > since {
				return e.evt, index, nil
			}
		}
	}
}
Beispiel #27
0
func (msr *MockSubnetRegistry) watchNetworks(ctx context.Context, since uint64) (Event, uint64, error) {
	msr.mux.Lock()
	index := msr.index
	msr.mux.Unlock()

	for {
		if since < index {
			return Event{}, 0, etcd.Error{
				Code:    etcd.ErrorCodeEventIndexCleared,
				Cause:   "out of date",
				Message: "cursor is out of date",
				Index:   index,
			}
		}

		select {
		case <-ctx.Done():
			return Event{}, 0, ctx.Err()

		case e := <-msr.networkEvents:
			if e.index > since {
				return e.evt, e.index, nil
			}
		}
	}
}
Beispiel #28
0
// PushRatification implements the interfaceE2EKSVerification interface from proto/verifier.proto
func (ks *Keyserver) PushRatification(ctx context.Context, r *proto.SignedEpochHead) (*proto.Nothing, error) {
	verifierCertID, err := authenticateVerifier(ctx)
	if err != nil {
		log.Printf("PushRatification: %s", err)
		return nil, fmt.Errorf("PushRatification: %s", err)
	}
	for signerID := range r.Signatures {
		if signerID != verifierCertID {
			return nil, fmt.Errorf("PushRatification: not authorized: authenticated as %x but tried to write %x's signature", verifierCertID, signerID)
		}
	}
	uid := genUID()
	ch := ks.wr.Wait(uid)
	ks.log.Propose(ctx, replication.LogEntry{Data: proto.MustMarshal(&proto.KeyserverStep{
		UID:  uid,
		Type: &proto.KeyserverStep_VerifierSigned{VerifierSigned: r},
	})})
	select {
	case <-ctx.Done():
		ks.wr.Notify(uid, nil)
		return nil, ctx.Err()
	case <-ch:
		return nil, nil
	}
}
Beispiel #29
0
func (sto *appengineStorage) EnumerateBlobs(ctx context.Context, dest chan<- blob.SizedRef, after string, limit int) error {
	defer close(dest)

	loan := ctxPool.Get()
	defer loan.Return()
	actx := loan

	prefix := sto.namespace + "|"
	keyBegin := datastore.NewKey(actx, memKind, prefix+after, 0, nil)
	keyEnd := datastore.NewKey(actx, memKind, sto.namespace+"~", 0, nil)

	q := datastore.NewQuery(memKind).Limit(int(limit)).Filter("__key__>", keyBegin).Filter("__key__<", keyEnd)
	it := q.Run(actx)
	var row memEnt
	for {
		key, err := it.Next(&row)
		if err == datastore.Done {
			break
		}
		if err != nil {
			return err
		}
		select {
		case dest <- blob.SizedRef{blob.ParseOrZero(key.StringID()[len(prefix):]), uint32(row.Size)}:
		case <-ctx.Done():
			return ctx.Err()
		}
	}
	return nil
}
Beispiel #30
0
func (n *Node) waitRole(ctx context.Context, role string) error {
	n.roleCond.L.Lock()
	if role == n.role {
		n.roleCond.L.Unlock()
		return nil
	}
	finishCh := make(chan struct{})
	defer close(finishCh)
	go func() {
		select {
		case <-finishCh:
		case <-ctx.Done():
			// call broadcast to shutdown this function
			n.roleCond.Broadcast()
		}
	}()
	defer n.roleCond.L.Unlock()
	for role != n.role {
		n.roleCond.Wait()
		select {
		case <-ctx.Done():
			if ctx.Err() != nil {
				return ctx.Err()
			}
		default:
		}
	}

	return nil
}