Пример #1
0
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
	req := &pb.AlarmRequest{
		Action:   pb.AlarmRequest_DEACTIVATE,
		MemberID: am.MemberID,
		Alarm:    am.Alarm,
	}

	if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
		ar, err := m.AlarmList(ctx)
		if err != nil {
			return nil, rpctypes.Error(err)
		}
		ret := AlarmResponse{}
		for _, am := range ar.Alarms {
			dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
			if derr != nil {
				return nil, rpctypes.Error(derr)
			}
			ret.Alarms = append(ret.Alarms, dresp.Alarms...)
		}
		return &ret, nil
	}

	resp, err := m.getRemote().Alarm(ctx, req)
	if err == nil {
		return (*AlarmResponse)(resp), nil
	}
	if !isHaltErr(ctx, err) {
		m.rc.reconnect(err)
	}
	return nil, rpctypes.Error(err)
}
Пример #2
0
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
	cctx, cancel := context.WithCancel(ctx)
	defer cancel()

	stream, err := l.getRemote().LeaseKeepAlive(cctx)
	if err != nil {
		return nil, rpctypes.Error(err)
	}

	err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
	if err != nil {
		return nil, rpctypes.Error(err)
	}

	resp, rerr := stream.Recv()
	if rerr != nil {
		return nil, rpctypes.Error(rerr)
	}

	karesp := &LeaseKeepAliveResponse{
		ResponseHeader: resp.GetHeader(),
		ID:             LeaseID(resp.ID),
		TTL:            resp.TTL,
	}
	return karesp, nil
}
Пример #3
0
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
	conn, err := m.c.Dial(endpoint)
	if err != nil {
		return nil, rpctypes.Error(err)
	}
	remote := pb.NewMaintenanceClient(conn)
	resp, err := remote.Status(ctx, &pb.StatusRequest{})
	if err != nil {
		return nil, rpctypes.Error(err)
	}
	return (*StatusResponse)(resp), nil
}
Пример #4
0
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
	r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
	resp, err := c.getRemote().MemberAdd(ctx, r)
	if err == nil {
		return (*MemberAddResponse)(resp), nil
	}

	if isHaltErr(ctx, err) {
		return nil, rpctypes.Error(err)
	}

	c.rc.reconnect(err)
	return nil, rpctypes.Error(err)
}
Пример #5
0
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
	r := &pb.MemberRemoveRequest{ID: id}
	resp, err := c.getRemote().MemberRemove(ctx, r)
	if err == nil {
		return (*MemberRemoveResponse)(resp), nil
	}

	if isHaltErr(ctx, err) {
		return nil, rpctypes.Error(err)
	}

	c.rc.reconnect(err)
	return nil, rpctypes.Error(err)
}
Пример #6
0
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
	ss, err := m.getRemote().Snapshot(ctx, &pb.SnapshotRequest{})
	if err != nil {
		return nil, rpctypes.Error(err)
	}

	pr, pw := io.Pipe()
	go func() {
		for {
			resp, err := ss.Recv()
			if err != nil {
				pw.CloseWithError(err)
				return
			}
			if resp == nil && err == nil {
				break
			}
			if _, werr := pw.Write(resp.Blob); werr != nil {
				pw.CloseWithError(werr)
				return
			}
		}
		pw.Close()
	}()
	return pr, nil
}
Пример #7
0
func (lc *leaseChecker) checkShortLivedLease(ctx context.Context, leaseID int64) (err error) {
	// retry in case of transient failure or lease is expired but not yet revoked due to the fact that etcd cluster didn't have enought time to delete it.
	var resp *pb.LeaseTimeToLiveResponse
	for i := 0; i < retries; i++ {
		resp, err = lc.getLeaseByID(ctx, leaseID)
		if rpctypes.Error(err) == rpctypes.ErrLeaseNotFound {
			return nil
		}
		if err != nil {
			plog.Debugf("retry %d. failed to retrieve lease %v error (%v)", i, leaseID, err)
			continue
		}
		if resp.TTL > 0 {
			plog.Debugf("lease %v is not expired. sleep for %d until it expires.", leaseID, resp.TTL)
			time.Sleep(time.Duration(resp.TTL) * time.Second)
		} else {
			plog.Debugf("retry %d. lease %v is expired but not yet revoked", i, leaseID)
			time.Sleep(time.Second)
		}
		if err = lc.checkLease(ctx, false, leaseID); err != nil {
			continue
		}
		return nil
	}
	return err
}
Пример #8
0
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
	cctx, cancel := context.WithCancel(ctx)
	done := cancelWhenStop(cancel, l.stopCtx.Done())
	defer close(done)

	for {
		r := &pb.LeaseGrantRequest{TTL: ttl}
		resp, err := l.getRemote().LeaseGrant(cctx, r)
		if err == nil {
			gresp := &LeaseGrantResponse{
				ResponseHeader: resp.GetHeader(),
				ID:             LeaseID(resp.ID),
				TTL:            resp.TTL,
				Error:          resp.Error,
			}
			return gresp, nil
		}
		if isHaltErr(cctx, err) {
			return nil, rpctypes.Error(err)
		}

		if nerr := l.switchRemoteAndStream(err); nerr != nil {
			return nil, nerr
		}
	}
}
Пример #9
0
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
	// connect to grpc stream
	wc, err := w.openWatchClient()
	if err != nil {
		return nil, v3rpc.Error(err)
	}
	// mark all substreams as resuming
	if len(w.substreams)+len(w.resuming) > 0 {
		close(w.resumec)
		w.resumec = make(chan struct{})
		w.joinSubstreams()
		for _, ws := range w.substreams {
			ws.id = -1
			w.resuming = append(w.resuming, ws)
		}
		for _, ws := range w.resuming {
			if ws == nil || ws.closing {
				continue
			}
			ws.donec = make(chan struct{})
			go w.serveSubstream(ws, w.resumec)
		}
	}
	w.substreams = make(map[int64]*watcherStream)
	// receive data from new grpc stream
	go w.serveWatchClient(wc)
	return wc, nil
}
Пример #10
0
func (c *Client) newRetryWrapper() retryRpcFunc {
	return func(rpcCtx context.Context, f rpcFunc) {
		for {
			err := f(rpcCtx)
			if err == nil {
				return
			}
			// only retry if unavailable
			if grpc.Code(err) != codes.Unavailable {
				return
			}
			// always stop retry on etcd errors
			eErr := rpctypes.Error(err)
			if _, ok := eErr.(rpctypes.EtcdError); ok {
				return
			}
			select {
			case <-c.balancer.ConnectNotify():
			case <-rpcCtx.Done():
			case <-c.ctx.Done():
				return
			}
		}
	}
}
Пример #11
0
func (kv *kv) Compact(ctx context.Context, rev int64) error {
	remote, err := kv.getRemote(ctx)
	if err != nil {
		return rpctypes.Error(err)
	}
	defer kv.rc.release()
	_, err = remote.Compact(ctx, &pb.CompactionRequest{Revision: rev})
	if err == nil {
		return nil
	}
	if isHaltErr(ctx, err) {
		return rpctypes.Error(err)
	}
	kv.rc.reconnect(err)
	return rpctypes.Error(err)
}
Пример #12
0
func (auth *auth) RoleGrant(ctx context.Context, name string, key string, permType PermissionType) (*AuthRoleGrantResponse, error) {
	perm := &authpb.Permission{
		Key:      []byte(key),
		PermType: authpb.Permission_Type(permType),
	}
	resp, err := auth.remote.RoleGrant(ctx, &pb.AuthRoleGrantRequest{Name: name, Perm: perm})
	return (*AuthRoleGrantResponse)(resp), rpctypes.Error(err)
}
Пример #13
0
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
	// it is safe to retry on list.
	for {
		resp, err := c.getRemote().MemberList(ctx, &pb.MemberListRequest{})
		if err == nil {
			return (*MemberListResponse)(resp), nil
		}

		if isHaltErr(ctx, err) {
			return nil, rpctypes.Error(err)
		}

		if err = c.rc.reconnectWait(ctx, err); err != nil {
			return nil, rpctypes.Error(err)
		}
	}
}
Пример #14
0
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
	// it is safe to retry on update.
	for {
		r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
		resp, err := c.getRemote().MemberUpdate(ctx, r)
		if err == nil {
			return (*MemberUpdateResponse)(resp), nil
		}

		if isHaltErr(ctx, err) {
			return nil, rpctypes.Error(err)
		}

		if err = c.rc.reconnectWait(ctx, err); err != nil {
			return nil, rpctypes.Error(err)
		}
	}
}
Пример #15
0
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
	for {
		resp, err := kv.do(ctx, op)
		if err == nil {
			return resp, nil
		}
		if isHaltErr(ctx, err) {
			return resp, rpctypes.Error(err)
		}
		// do not retry on modifications
		if op.isWrite() {
			kv.rc.reconnect(err)
			return resp, rpctypes.Error(err)
		}
		if nerr := kv.rc.reconnectWait(ctx, err); nerr != nil {
			return resp, rpctypes.Error(nerr)
		}
	}
}
Пример #16
0
// resume creates a new WatchClient with all current watchers reestablished
func (w *watchGrpcStream) resume() (ws pb.Watch_WatchClient, err error) {
	for {
		if ws, err = w.openWatchClient(); err != nil {
			break
		} else if err = w.resumeWatchers(ws); err == nil {
			break
		}
	}
	return ws, v3rpc.Error(err)
}
Пример #17
0
// Err is the error value if this WatchResponse holds an error.
func (wr *WatchResponse) Err() error {
	switch {
	case wr.closeErr != nil:
		return v3rpc.Error(wr.closeErr)
	case wr.CompactRevision != 0:
		return v3rpc.ErrCompacted
	case wr.Canceled:
		return v3rpc.ErrFutureRev
	}
	return nil
}
Пример #18
0
func (txn *txn) Commit() (*TxnResponse, error) {
	txn.mu.Lock()
	defer txn.mu.Unlock()
	for {
		resp, err := txn.commit()
		if err == nil {
			return resp, err
		}
		if isHaltErr(txn.ctx, err) {
			return nil, rpctypes.Error(err)
		}
		if txn.isWrite {
			txn.kv.rc.reconnect(err)
			return nil, rpctypes.Error(err)
		}
		if nerr := txn.kv.rc.reconnectWait(txn.ctx, err); nerr != nil {
			return nil, nerr
		}
	}
}
Пример #19
0
func toErr(ctx context.Context, err error) error {
	if err == nil {
		return nil
	}
	err = rpctypes.Error(err)
	if ctx.Err() != nil && strings.Contains(err.Error(), "context") {
		err = ctx.Err()
	} else if strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()) {
		err = grpc.ErrClientConnClosing
	}
	return err
}
Пример #20
0
func (l *lessor) switchRemoteAndStream(prevErr error) error {
	for {
		if prevErr != nil {
			err := l.rc.reconnectWait(l.stopCtx, prevErr)
			if err != nil {
				return rpctypes.Error(err)
			}
		}
		if prevErr = l.newStream(); prevErr == nil {
			return nil
		}
	}
}
Пример #21
0
func (lc *leaseChecker) hasLeaseExpired(ctx context.Context, leaseID int64) (bool, error) {
	// keep retrying until lease's state is known or ctx is being canceled
	for ctx.Err() == nil {
		resp, err := lc.getLeaseByID(ctx, leaseID)
		if err == nil {
			return false, nil
		}
		if rpctypes.Error(err) == rpctypes.ErrLeaseNotFound {
			return true, nil
		}
		plog.Warningf("hasLeaseExpired %v resp %v error (%v)", leaseID, resp, err)
	}
	return false, ctx.Err()
}
Пример #22
0
// isHaltErr returns true if the given error and context indicate no forward
// progress can be made, even after reconnecting.
func isHaltErr(ctx context.Context, err error) bool {
	if ctx != nil && ctx.Err() != nil {
		return true
	}
	if err == nil {
		return false
	}
	eErr := rpctypes.Error(err)
	if _, ok := eErr.(rpctypes.EtcdError); ok {
		return eErr != rpctypes.ErrStopped && eErr != rpctypes.ErrNoLeader
	}
	// treat etcdserver errors not recognized by the client as halting
	return isConnClosing(err) || strings.Contains(err.Error(), "etcdserver:")
}
Пример #23
0
// randomlyDropLease drops the lease only when the rand.Int(2) returns 1.
// This creates a 50/50 percents chance of dropping a lease
func (ls *leaseStresser) randomlyDropLease(leaseID int64) (bool, error) {
	if rand.Intn(2) != 0 {
		return false, nil
	}
	// keep retrying until a lease is dropped or ctx is being canceled
	for ls.ctx.Err() == nil {
		_, err := ls.lc.LeaseRevoke(ls.ctx, &pb.LeaseRevokeRequest{ID: leaseID})
		if err == nil || rpctypes.Error(err) == rpctypes.ErrLeaseNotFound {
			return true, nil
		}
	}
	plog.Debugf("randomlyDropLease error: (%v)", ls.ctx.Err())
	return false, ls.ctx.Err()
}
Пример #24
0
func toErr(ctx context.Context, err error) error {
	if err == nil {
		return nil
	}
	err = rpctypes.Error(err)
	switch {
	case ctx.Err() != nil && strings.Contains(err.Error(), "context"):
		err = ctx.Err()
	case strings.Contains(err.Error(), ErrNoAvailableEndpoints.Error()):
		err = ErrNoAvailableEndpoints
	case strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()):
		err = grpc.ErrClientConnClosing
	}
	return err
}
Пример #25
0
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
	// mark all substreams as resuming
	close(w.resumec)
	w.resumec = make(chan struct{})
	w.joinSubstreams()
	for _, ws := range w.substreams {
		ws.id = -1
		w.resuming = append(w.resuming, ws)
	}
	// strip out nils, if any
	var resuming []*watcherStream
	for _, ws := range w.resuming {
		if ws != nil {
			resuming = append(resuming, ws)
		}
	}
	w.resuming = resuming
	w.substreams = make(map[int64]*watcherStream)

	// connect to grpc stream while accepting watcher cancelation
	stopc := make(chan struct{})
	donec := w.waitCancelSubstreams(stopc)
	wc, err := w.openWatchClient()
	close(stopc)
	<-donec

	// serve all non-closing streams, even if there's a client error
	// so that the teardown path can shutdown the streams as expected.
	for _, ws := range w.resuming {
		if ws.closing {
			continue
		}
		ws.donec = make(chan struct{})
		go w.serveSubstream(ws, w.resumec)
	}

	if err != nil {
		return nil, v3rpc.Error(err)
	}

	// receive data from new grpc stream
	go w.serveWatchClient(wc)
	return wc, nil
}
Пример #26
0
// openWatchClient retries opening a watchclient until retryConnection fails
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
	for {
		select {
		case <-w.stopc:
			if err == nil {
				err = context.Canceled
			}
			return nil, err
		default:
		}
		if ws, err = w.remote.Watch(w.ctx); ws != nil && err == nil {
			break
		}
		if isHaltErr(w.ctx, err) {
			return nil, v3rpc.Error(err)
		}
	}
	return ws, nil
}
Пример #27
0
// openWatchClient retries opening a watchclient until retryConnection fails
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
	for {
		select {
		case <-w.ctx.Done():
			if err == nil {
				return nil, w.ctx.Err()
			}
			return nil, err
		default:
		}
		if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil {
			break
		}
		if isHaltErr(w.ctx, err) {
			return nil, v3rpc.Error(err)
		}
	}
	return ws, nil
}
Пример #28
0
func (l *lessor) newStream() error {
	sctx, cancel := context.WithCancel(l.stopCtx)
	stream, err := l.getRemote().LeaseKeepAlive(sctx)
	if err != nil {
		cancel()
		return rpctypes.Error(err)
	}

	l.mu.Lock()
	defer l.mu.Unlock()
	if l.stream != nil && l.streamCancel != nil {
		l.stream.CloseSend()
		l.streamCancel()
	}

	l.streamCancel = cancel
	l.stream = stream
	return nil
}
Пример #29
0
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
	req := &pb.AlarmRequest{
		Action:   pb.AlarmRequest_GET,
		MemberID: 0,                 // all
		Alarm:    pb.AlarmType_NONE, // all
	}
	for {
		resp, err := m.getRemote().Alarm(ctx, req)
		if err == nil {
			return (*AlarmResponse)(resp), nil
		}
		if isHaltErr(ctx, err) {
			return nil, rpctypes.Error(err)
		}
		if err = m.rc.reconnectWait(ctx, err); err != nil {
			return nil, err
		}
	}
}
Пример #30
0
// openWatchClient retries opening a watchclient until retryConnection fails
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
	for {
		w.mu.Lock()
		stopc := w.stopc
		w.mu.Unlock()
		if stopc == nil {
			if err == nil {
				err = context.Canceled
			}
			return nil, err
		}
		if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil {
			break
		}
		if isHaltErr(w.ctx, err) {
			return nil, v3rpc.Error(err)
		}
	}
	return ws, nil
}