예제 #1
0
파일: clientconn.go 프로젝트: ikatson/etcd
// When wait returns, either the new transport is up or ClientConn is
// closing. Used to avoid working on a dying transport. It updates and
// returns the transport and its version when there is no error.
func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) {
	for {
		cc.mu.Lock()
		switch {
		case cc.closing:
			cc.mu.Unlock()
			return nil, 0, ErrClientConnClosing
		case ts < cc.transportSeq:
			// Worked on a dying transport. Try the new one immediately.
			defer cc.mu.Unlock()
			return cc.transport, cc.transportSeq, nil
		default:
			ready := cc.ready
			if ready == nil {
				ready = make(chan struct{})
				cc.ready = ready
			}
			cc.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, 0, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
예제 #2
0
파일: lease.go 프로젝트: lrita/etcd
func (l *lessor) keepAliveCtxCloser(id lease.LeaseID, ctx context.Context, donec <-chan struct{}) {
	select {
	case <-donec:
		return
	case <-l.donec:
		return
	case <-ctx.Done():
	}

	l.mu.Lock()
	defer l.mu.Unlock()

	ka, ok := l.keepAlives[id]
	if !ok {
		return
	}

	// close channel and remove context if still associated with keep alive
	for i, c := range ka.ctxs {
		if c == ctx {
			close(ka.chs[i])
			ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
			ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
			break
		}
	}
	// remove if no one more listeners
	if len(ka.chs) == 0 {
		delete(l.keepAlives, id)
	}
}
예제 #3
0
파일: watch.go 프로젝트: vsayer/etcd
// Watch posts a watch request to run() and waits for a new watcher channel
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
	ow := opWatch(key, opts...)

	wr := ow.toWatchRequest()
	wr.ctx = ctx

	retc := make(chan chan WatchResponse, 1)
	wr.retc = retc

	ok := false

	// submit request
	select {
	case w.reqc <- wr:
		ok = true
	case <-wr.ctx.Done():
	case <-w.donec:
	}

	// receive channel
	if ok {
		select {
		case ret := <-retc:
			return ret
		case <-ctx.Done():
		case <-w.donec:
		}
	}

	// couldn't create channel; return closed channel
	ch := make(chan WatchResponse)
	close(ch)
	return ch
}
예제 #4
0
파일: clientconn.go 프로젝트: lrita/etcd
// WaitForStateChange blocks until the state changes to something other than the sourceState.
func (cc *Conn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
	cc.mu.Lock()
	defer cc.mu.Unlock()
	if sourceState != cc.state {
		return cc.state, nil
	}
	done := make(chan struct{})
	var err error
	go func() {
		select {
		case <-ctx.Done():
			cc.mu.Lock()
			err = ctx.Err()
			cc.stateCV.Broadcast()
			cc.mu.Unlock()
		case <-done:
		}
	}()
	defer close(done)
	for sourceState == cc.state {
		cc.stateCV.Wait()
		if err != nil {
			return cc.state, err
		}
	}
	return cc.state, nil
}
예제 #5
0
파일: server.go 프로젝트: polvi/etcd
// Do interprets r and performs an operation on s.store according to r.Method
// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
// Quorum == true, r will be sent through consensus before performing its
// respective operation. Do will block until an action is performed or there is
// an error.
func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
	r.ID = s.reqIDGen.Next()
	if r.Method == "GET" && r.Quorum {
		r.Method = "QGET"
	}
	switch r.Method {
	case "POST", "PUT", "DELETE", "QGET":
		var raftReq pb.InternalRaftRequest
		raftReq.V2 = &r
		data, err := raftReq.Marshal()
		if err != nil {
			return Response{}, err
		}
		ch := s.w.Register(r.ID)

		// TODO: benchmark the cost of time.Now()
		// might be sampling?
		start := time.Now()
		s.r.Propose(ctx, data)

		proposePending.Inc()
		defer proposePending.Dec()

		select {
		case x := <-ch:
			proposeDurations.Observe(float64(time.Since(start).Nanoseconds() / int64(time.Millisecond)))
			resp := x.(Response)
			return resp, resp.err
		case <-ctx.Done():
			proposeFailed.Inc()
			s.w.Trigger(r.ID, nil) // GC wait
			return Response{}, parseCtxErr(ctx.Err())
		case <-s.done:
			return Response{}, ErrStopped
		}
	case "GET":
		switch {
		case r.Wait:
			wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
			if err != nil {
				return Response{}, err
			}
			return Response{Watcher: wc}, nil
		default:
			ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
			if err != nil {
				return Response{}, err
			}
			return Response{Event: ev}, nil
		}
	case "HEAD":
		ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
		if err != nil {
			return Response{}, err
		}
		return Response{Event: ev}, nil
	default:
		return Response{}, ErrUnknownMethod
	}
}
예제 #6
0
func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
	r.ID = s.reqIDGen.Next()

	data, err := r.Marshal()
	if err != nil {
		return nil, err
	}

	if len(data) > maxRequestBytes {
		return nil, ErrRequestTooLarge
	}

	ch := s.w.Register(r.ID)

	s.r.Propose(ctx, data)

	select {
	case x := <-ch:
		return x.(*applyResult), nil
	case <-ctx.Done():
		s.w.Trigger(r.ID, nil) // GC wait
		return nil, ctx.Err()
	case <-s.done:
		return nil, ErrStopped
	}
}
예제 #7
0
파일: clientconn.go 프로젝트: lrita/etcd
// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed.
func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) {
	for {
		cc.mu.Lock()
		switch {
		case cc.state == Shutdown:
			cc.mu.Unlock()
			return nil, ErrClientConnClosing
		case cc.state == Ready:
			ct := cc.transport
			cc.mu.Unlock()
			return ct, nil
		default:
			ready := cc.ready
			if ready == nil {
				ready = make(chan struct{})
				cc.ready = ready
			}
			cc.mu.Unlock()
			select {
			case <-ctx.Done():
				return nil, transport.ContextErr(ctx.Err())
			// Wait until the new transport is ready or failed.
			case <-ready:
			}
		}
	}
}
예제 #8
0
파일: client.go 프로젝트: balboah/etcd
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
	req := act.HTTPRequest(c.endpoint)

	if err := printcURL(req); err != nil {
		return nil, nil, err
	}

	rtchan := make(chan roundTripResponse, 1)
	go func() {
		resp, err := c.transport.RoundTrip(req)
		rtchan <- roundTripResponse{resp: resp, err: err}
		close(rtchan)
	}()

	var resp *http.Response
	var err error

	select {
	case rtresp := <-rtchan:
		resp, err = rtresp.resp, rtresp.err
	case <-ctx.Done():
		// cancel and wait for request to actually exit before continuing
		c.transport.CancelRequest(req)
		rtresp := <-rtchan
		resp = rtresp.resp
		err = ctx.Err()
	}

	// always check for resp nil-ness to deal with possible
	// race conditions between channels above
	defer func() {
		if resp != nil {
			resp.Body.Close()
		}
	}()

	if err != nil {
		return nil, nil, err
	}

	var body []byte
	done := make(chan struct{})
	go func() {
		body, err = ioutil.ReadAll(resp.Body)
		done <- struct{}{}
	}()

	select {
	case <-ctx.Done():
		err = resp.Body.Close()
		<-done
		if err == nil {
			err = ctx.Err()
		}
	case <-done:
	}

	return resp, body, err
}
예제 #9
0
파일: transport.go 프로젝트: rtewalt/etcd
// wait blocks until it can receive from ctx.Done, closing, or proceed.
// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
// If it receives from closing, it returns 0, ErrConnClosing.
// If it receives from proceed, it returns the received integer, nil.
func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) {
	select {
	case <-ctx.Done():
		return 0, ContextErr(ctx.Err())
	case <-closing:
		return 0, ErrConnClosing
	case i := <-proceed:
		return i, nil
	}
}
예제 #10
0
파일: ctxhttp.go 프로젝트: jak-atx/vic
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
// If the client is nil, http.DefaultClient is used.
// If the context is canceled or times out, ctx.Err() will be returned.
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
	if client == nil {
		client = http.DefaultClient
	}

	// Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go.
	cancel := canceler(client, req)

	type responseAndError struct {
		resp *http.Response
		err  error
	}
	result := make(chan responseAndError, 1)

	go func() {
		resp, err := client.Do(req)
		testHookDoReturned()
		result <- responseAndError{resp, err}
	}()

	var resp *http.Response

	select {
	case <-ctx.Done():
		testHookContextDoneBeforeHeaders()
		cancel()
		// Clean up after the goroutine calling client.Do:
		go func() {
			if r := <-result; r.resp != nil {
				testHookDidBodyClose()
				r.resp.Body.Close()
			}
		}()
		return nil, ctx.Err()
	case r := <-result:
		var err error
		resp, err = r.resp, r.err
		if err != nil {
			return resp, err
		}
	}

	c := make(chan struct{})
	go func() {
		select {
		case <-ctx.Done():
			cancel()
		case <-c:
			// The response's Body is closed.
		}
	}()
	resp.Body = &notifyingReader{resp.Body, c}

	return resp, nil
}
예제 #11
0
파일: server.go 프로젝트: robszumski/etcd
// Do interprets r and performs an operation on s.store according to r.Method
// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
// Quorum == true, r will be sent through consensus before performing its
// respective operation. Do will block until an action is performed or there is
// an error.
func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
	if r.ID == 0 {
		log.Panicf("request ID should never be 0")
	}
	if r.Method == "GET" && r.Quorum {
		r.Method = "QGET"
	}
	switch r.Method {
	case "POST", "PUT", "DELETE", "QGET":
		data, err := r.Marshal()
		if err != nil {
			return Response{}, err
		}
		ch := s.w.Register(r.ID)
		s.node.Propose(ctx, data)
		select {
		case x := <-ch:
			resp := x.(Response)
			return resp, resp.err
		case <-ctx.Done():
			s.w.Trigger(r.ID, nil) // GC wait
			return Response{}, parseCtxErr(ctx.Err())
		case <-s.done:
			return Response{}, ErrStopped
		}
	case "GET":
		switch {
		case r.Wait:
			wc, err := s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
			if err != nil {
				return Response{}, err
			}
			return Response{Watcher: wc}, nil
		default:
			ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
			if err != nil {
				return Response{}, err
			}
			return Response{Event: ev}, nil
		}
	case "HEAD":
		ev, err := s.store.Get(r.Path, r.Recursive, r.Sorted)
		if err != nil {
			return Response{}, err
		}
		return Response{Event: ev}, nil
	default:
		return Response{}, ErrUnknownMethod
	}
}
예제 #12
0
func (mn *multiNode) step(ctx context.Context, m multiMessage) error {
	ch := mn.recvc
	if m.msg.Type == pb.MsgProp {
		ch = mn.propc
	}

	select {
	case ch <- m:
		return nil
	case <-ctx.Done():
		return ctx.Err()
	case <-mn.done:
		return ErrStopped
	}
}
예제 #13
0
파일: client.go 프로젝트: jak-atx/vic
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
	ticker := time.NewTicker(interval)
	defer ticker.Stop()
	for {
		err := c.Sync(ctx)
		if err != nil {
			return err
		}
		select {
		case <-ctx.Done():
			return ctx.Err()
		case <-ticker.C:
		}
	}
}
예제 #14
0
파일: node.go 프로젝트: dterei/etcd
// Step advances the state machine using msgs. The ctx.Err() will be returned,
// if any.
func (n *node) step(ctx context.Context, m pb.Message) error {
	ch := n.recvc
	if m.Type == pb.MsgProp {
		ch = n.propc
	}

	select {
	case ch <- m:
		return nil
	case <-ctx.Done():
		return ctx.Err()
	case <-n.done:
		return ErrStopped
	}
}
예제 #15
0
파일: mutex.go 프로젝트: youtube/doorman
// Lock locks the mutex with a cancellable context. If the context is cancelled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
	s, err := NewSession(m.client)
	if err != nil {
		return err
	}
	// put self in lock waiters via myKey; oldest waiter holds lock
	m.myKey, m.myRev, err = NewUniqueKey(ctx, m.client, m.pfx, v3.WithLease(s.Lease()))
	// wait for deletion revisions prior to myKey
	err = waitDeletes(ctx, m.client, m.pfx, v3.WithPrefix(), v3.WithRev(m.myRev-1))
	// release lock key if cancelled
	select {
	case <-ctx.Done():
		m.Unlock()
	default:
	}
	return err
}
예제 #16
0
// Description:
//     A helper routine to renew the TTL of o given key. This routine starts a
//     timer and at every tick attempts to renew the TTL of the given key.
//
// Parameters:
//     @ctx     - A context object used to stop the renewal operation.
//     @key     - The key for which the TTL needs to be renewed.
//     @val     - The value associated with the key.
//     @ttl     - TTL value in seconds.
//     @refresh - Interval, in seconds, at which the TTL will be renewed.
//
// Return value:
//     @out - A channel on which errors will be reported back to the caller.
func (ec *EtcdConnector) RenewTTL(ctx context.Context, key, val string, ttl, refresh time.Duration) <-chan error {
	out := make(chan error)

	// Start a ticker to trigger at every @refresh seconds.
	ticker := time.NewTicker(refresh)

	// Launch a go routine to handle the periodic TTL renewal of @key.
	go func() {
		opts := &client.SetOptions{PrevExist: client.PrevExist, TTL: ttl}

		// Wait for activity either on the ticker channel or @ctx's channel.
		for {
			select {
			case <-ticker.C:
				// Update the TTL of @key.
				_, err := ec.Set(ctx, key, val, opts)
				if err != nil {
					// In case of an error, put out the error info on the
					// outward channel and return.
					out <- err
					close(out)
					return
				}
			case <-ctx.Done():
				// If we are here then it's indication by the caller to stop the
				// renewal operation. So kill the ticker, delete @key, close the
				// outward channel and return.
				ticker.Stop()

				// Deletion is best effort here and also an optimization. We end
				// up here only when the program is being shutdown or when a
				// catastrophic failure occurs. If we succeed in deleting @key
				// then entities watching @key will get notified quickly or else
				// they'll have to wait for the TTL to expire. But if we get here
				// because of catastrophic failure then it's a best effort as the
				// delete can also fail.
				ec.Delete(context.Background(), key, &client.DeleteOptions{})
				close(out)
				return
			}
		}
	}()

	return out
}
예제 #17
0
파일: client.go 프로젝트: salatamartin/etcd
func handleKeyWatch(ctx context.Context, w http.ResponseWriter, wa store.Watcher, stream bool, rt etcdserver.RaftTimer) {
	defer wa.Remove()
	ech := wa.EventChan()
	var nch <-chan bool
	if x, ok := w.(http.CloseNotifier); ok {
		nch = x.CloseNotify()
	}

	w.Header().Set("Content-Type", "application/json")
	w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex()))
	w.Header().Set("X-Raft-Index", fmt.Sprint(rt.Index()))
	w.Header().Set("X-Raft-Term", fmt.Sprint(rt.Term()))
	w.WriteHeader(http.StatusOK)

	// Ensure headers are flushed early, in case of long polling
	w.(http.Flusher).Flush()

	for {
		select {
		case <-nch:
			// Client closed connection. Nothing to do.
			return
		case <-ctx.Done():
			// Timed out. net/http will close the connection for us, so nothing to do.
			return
		case ev, ok := <-ech:
			if !ok {
				// If the channel is closed this may be an indication of
				// that notifications are much more than we are able to
				// send to the client in time. Then we simply end streaming.
				return
			}
			ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
			if err := json.NewEncoder(w).Encode(ev); err != nil {
				// Should never be reached
				plog.Warningf("error writing event (%v)", err)
				return
			}
			if !stream {
				return
			}
			w.(http.Flusher).Flush()
		}
	}
}
예제 #18
0
파일: watch.go 프로젝트: khogeland/etcd
// watch posts a watch request to run() and waits for a new watcher channel
func (w *watcher) watch(ctx context.Context, key, prefix string, rev int64) <-chan WatchResponse {
	retc := make(chan chan WatchResponse, 1)
	wr := &watchRequest{ctx: ctx, key: key, prefix: prefix, rev: rev, retc: retc}
	// submit request
	select {
	case w.reqc <- wr:
	case <-wr.ctx.Done():
		return nil
	case <-w.donec:
		return nil
	}
	// receive channel
	select {
	case ret := <-retc:
		return ret
	case <-ctx.Done():
		return nil
	case <-w.donec:
		return nil
	}
}
예제 #19
0
파일: node.go 프로젝트: salatamartin/etcd
// Step advances the state machine using msgs. The ctx.Err() will be returned,
// if any.
func (n *node) step(ctx context.Context, m pb.Message) error {
	ch := n.recvc
	if m.Type == pb.MsgProp {
		ch = n.propc
	}

	select {
	case ch <- m:
		if m.Type == pb.MsgProp {
			//NOTE: this case is accessed before quorum check
			//plog.Infof("message is in channel")
		}
		return nil
	case <-ctx.Done():
		//plog.Infof("context is done, should not happen")
		return ctx.Err()
	case <-n.done:
		//plog.Infof("node stopped")
		return ErrStopped
	}
}
예제 #20
0
파일: mutex.go 프로젝트: vsayer/etcd
// Lock locks the mutex with a cancellable context. If the context is cancelled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
	s, err := NewSession(m.client)
	if err != nil {
		return err
	}
	// put self in lock waiters via myKey; oldest waiter holds lock
	m.myKey, m.myRev, err = NewUniqueKey(ctx, m.kv, m.pfx, v3.WithLease(s.Lease()))
	// wait for lock to become available
	for err == nil {
		// find oldest element in waiters via revision of insertion
		var resp *v3.GetResponse
		resp, err = m.kv.Get(ctx, m.pfx, v3.WithFirstRev()...)
		if err != nil {
			break
		}
		if m.myRev == resp.Kvs[0].CreateRevision {
			// myKey is oldest in waiters; myKey holds the lock now
			return nil
		}
		// otherwise myKey isn't lowest, so there must be a pfx prior to myKey
		opts := append(v3.WithLastRev(), v3.WithRev(m.myRev-1))
		resp, err = m.kv.Get(ctx, m.pfx, opts...)
		if err != nil {
			break
		}
		lastKey := string(resp.Kvs[0].Key)
		// wait for release on prior pfx
		err = waitUpdate(ctx, m.client, lastKey, v3.WithRev(m.myRev))
		// try again in case lastKey left the wait list before acquiring the lock;
		// myKey can only hold the lock if it's the oldest in the list
	}

	// release lock key if cancelled
	select {
	case <-ctx.Done():
		m.Unlock()
	default:
	}
	return err
}
예제 #21
0
파일: server.go 프로젝트: robszumski/etcd
// configure sends a configuration change through consensus and
// then waits for it to be applied to the server. It
// will block until the change is performed or there is an error.
func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error {
	ch := s.w.Register(cc.ID)
	if err := s.node.ProposeConfChange(ctx, cc); err != nil {
		s.w.Trigger(cc.ID, nil)
		return err
	}
	select {
	case x := <-ch:
		if err, ok := x.(error); ok {
			return err
		}
		if x != nil {
			log.Panicf("return type should always be error")
		}
		return nil
	case <-ctx.Done():
		s.w.Trigger(cc.ID, nil) // GC wait
		return parseCtxErr(ctx.Err())
	case <-s.done:
		return ErrStopped
	}
}
예제 #22
0
func (s *EtcdServer) V3DemoDo(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
	r.ID = s.reqIDGen.Next()

	data, err := r.Marshal()
	if err != nil {
		return &pb.EmptyResponse{}, err
	}
	ch := s.w.Register(r.ID)

	s.r.Propose(ctx, data)

	select {
	case x := <-ch:
		resp := x.(proto.Message)
		return resp, nil
	case <-ctx.Done():
		s.w.Trigger(r.ID, nil) // GC wait
		return &pb.EmptyResponse{}, ctx.Err()
	case <-s.done:
		return &pb.EmptyResponse{}, ErrStopped
	}
}
예제 #23
0
파일: http.go 프로젝트: diffoperator/etcd
func (c *httpClient) Do(ctx context.Context, act HTTPAction) (*http.Response, []byte, error) {
	req := act.HTTPRequest(c.endpoint)

	rtchan := make(chan roundTripResponse, 1)
	go func() {
		resp, err := c.transport.RoundTrip(req)
		rtchan <- roundTripResponse{resp: resp, err: err}
		close(rtchan)
	}()

	var resp *http.Response
	var err error

	select {
	case rtresp := <-rtchan:
		resp, err = rtresp.resp, rtresp.err
	case <-ctx.Done():
		c.transport.CancelRequest(req)
		// wait for request to actually exit before continuing
		<-rtchan
		err = ctx.Err()
	}

	// always check for resp nil-ness to deal with possible
	// race conditions between channels above
	defer func() {
		if resp != nil {
			resp.Body.Close()
		}
	}()

	if err != nil {
		return nil, nil, err
	}

	body, err := ioutil.ReadAll(resp.Body)
	return resp, body, err
}
예제 #24
0
파일: watch.go 프로젝트: lrita/etcd
// Watch posts a watch request to run() and waits for a new watcher channel
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
	ow := opWatch(key, opts...)

	retc := make(chan chan WatchResponse, 1)
	wr := &watchRequest{
		ctx:            ctx,
		key:            string(ow.key),
		end:            string(ow.end),
		rev:            ow.rev,
		progressNotify: ow.progressNotify,
		retc:           retc,
	}

	ok := false

	// submit request
	select {
	case w.reqc <- wr:
		ok = true
	case <-wr.ctx.Done():
	case <-w.donec:
	}

	// receive channel
	if ok {
		select {
		case ret := <-retc:
			return ret
		case <-ctx.Done():
		case <-w.donec:
		}
	}

	// couldn't create channel; return closed channel
	ch := make(chan WatchResponse)
	close(ch)
	return ch
}
예제 #25
0
파일: election.go 프로젝트: lrita/etcd
// Campaign puts a value as eligible for the election. It blocks until
// it is elected, an error occurs, or the context is cancelled.
func (e *Election) Campaign(ctx context.Context, val string) error {
	s, serr := NewSession(e.client)
	if serr != nil {
		return serr
	}

	k, rev, err := NewUniqueKV(ctx, e.client, e.keyPrefix, val, v3.WithLease(s.Lease()))
	if err == nil {
		err = waitDeletes(ctx, e.client, e.keyPrefix, v3.WithPrefix(), v3.WithRev(rev-1))
	}

	if err != nil {
		// clean up in case of context cancel
		select {
		case <-ctx.Done():
			e.client.Delete(e.client.Ctx(), k)
		default:
		}
		return err
	}

	e.leaderKey, e.leaderRev, e.leaderSession = k, rev, s
	return nil
}
예제 #26
0
func (n *nodeProposalBlockerRecorder) Propose(ctx context.Context, data []byte) error {
	<-ctx.Done()
	n.Record(testutil.Action{Name: "Propose blocked"})
	return nil
}
예제 #27
0
파일: client.go 프로젝트: nkhare/etcd
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
	req := act.HTTPRequest(c.endpoint)

	if err := printcURL(req); err != nil {
		return nil, nil, err
	}

	hctx, hcancel := context.WithCancel(ctx)
	if c.headerTimeout > 0 {
		hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
	}
	defer hcancel()

	rtchan := make(chan roundTripResponse, 1)
	go func() {
		resp, err := c.transport.RoundTrip(req)
		rtchan <- roundTripResponse{resp: resp, err: err}
		close(rtchan)
	}()

	var resp *http.Response
	var err error

	select {
	case rtresp := <-rtchan:
		resp, err = rtresp.resp, rtresp.err
	case <-hctx.Done():
		// cancel and wait for request to actually exit before continuing
		c.transport.CancelRequest(req)
		rtresp := <-rtchan
		resp = rtresp.resp
		switch {
		case ctx.Err() != nil:
			err = ctx.Err()
		case hctx.Err() != nil:
			err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
		default:
			panic("failed to get error from context")
		}
	}

	// always check for resp nil-ness to deal with possible
	// race conditions between channels above
	defer func() {
		if resp != nil {
			resp.Body.Close()
		}
	}()

	if err != nil {
		return nil, nil, err
	}

	var body []byte
	done := make(chan struct{})
	go func() {
		body, err = ioutil.ReadAll(resp.Body)
		done <- struct{}{}
	}()

	select {
	case <-ctx.Done():
		resp.Body.Close()
		<-done
		return nil, nil, ctx.Err()
	case <-done:
	}

	return resp, body, err
}
예제 #28
0
파일: client.go 프로젝트: jak-atx/vic
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
	req := act.HTTPRequest(c.endpoint)

	if err := printcURL(req); err != nil {
		return nil, nil, err
	}

	isWait := false
	if req != nil && req.URL != nil {
		ws := req.URL.Query().Get("wait")
		if len(ws) != 0 {
			var err error
			isWait, err = strconv.ParseBool(ws)
			if err != nil {
				return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
			}
		}
	}

	var hctx context.Context
	var hcancel context.CancelFunc
	if !isWait && c.headerTimeout > 0 {
		hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
	} else {
		hctx, hcancel = context.WithCancel(ctx)
	}
	defer hcancel()

	reqcancel := requestCanceler(c.transport, req)

	rtchan := make(chan roundTripResponse, 1)
	go func() {
		resp, err := c.transport.RoundTrip(req)
		rtchan <- roundTripResponse{resp: resp, err: err}
		close(rtchan)
	}()

	var resp *http.Response
	var err error

	select {
	case rtresp := <-rtchan:
		resp, err = rtresp.resp, rtresp.err
	case <-hctx.Done():
		// cancel and wait for request to actually exit before continuing
		reqcancel()
		rtresp := <-rtchan
		resp = rtresp.resp
		switch {
		case ctx.Err() != nil:
			err = ctx.Err()
		case hctx.Err() != nil:
			err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
		default:
			panic("failed to get error from context")
		}
	}

	// always check for resp nil-ness to deal with possible
	// race conditions between channels above
	defer func() {
		if resp != nil {
			resp.Body.Close()
		}
	}()

	if err != nil {
		return nil, nil, err
	}

	var body []byte
	done := make(chan struct{})
	go func() {
		body, err = ioutil.ReadAll(resp.Body)
		done <- struct{}{}
	}()

	select {
	case <-ctx.Done():
		resp.Body.Close()
		<-done
		return nil, nil, ctx.Err()
	case <-done:
	}

	return resp, body, err
}