Beispiel #1
0
// NewSession gets the leased session for a client.
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
	ops := &sessionOptions{ttl: defaultSessionTTL}
	for _, opt := range opts {
		opt(ops)
	}

	resp, err := client.Grant(client.Ctx(), int64(ops.ttl))
	if err != nil {
		return nil, err
	}
	id := v3.LeaseID(resp.ID)

	ctx, cancel := context.WithCancel(client.Ctx())
	keepAlive, err := client.KeepAlive(ctx, id)
	if err != nil || keepAlive == nil {
		return nil, err
	}

	donec := make(chan struct{})
	s := &Session{client: client, id: id, cancel: cancel, donec: donec}

	// keep the lease alive until client error or cancelled context
	go func() {
		defer close(donec)
		for range keepAlive {
			// eat messages until keep alive channel closes
		}
	}()

	return s, nil
}
Beispiel #2
0
func NewWatchProxy(c *clientv3.Client) pb.WatchServer {
	wp := &watchProxy{
		cw:           c.Watcher,
		ctx:          clientv3.WithRequireLeader(c.Ctx()),
		retryLimiter: rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond),
		leaderc:      make(chan struct{}),
	}
	wp.ranges = newWatchRanges(wp)
	go func() {
		// a new streams without opening any watchers won't catch
		// a lost leader event, so have a special watch to monitor it
		rev := int64((uint64(1) << 63) - 2)
		for wp.ctx.Err() == nil {
			wch := wp.cw.Watch(wp.ctx, lostLeaderKey, clientv3.WithRev(rev))
			for range wch {
			}
			wp.mu.Lock()
			close(wp.leaderc)
			wp.leaderc = make(chan struct{})
			wp.mu.Unlock()
			wp.retryLimiter.Wait(wp.ctx)
		}
		wp.mu.Lock()
		<-wp.ctx.Done()
		wp.mu.Unlock()
		wp.wg.Wait()
		wp.ranges.stop()
	}()
	return wp
}
Beispiel #3
0
func NewWatchProxy(c *clientv3.Client) pb.WatchServer {
	wp := &watchProxy{
		cw: c.Watcher,
		wgs: watchergroups{
			cw:        c.Watcher,
			groups:    make(map[watchRange]*watcherGroup),
			idToGroup: make(map[receiverID]*watcherGroup),
			proxyCtx:  c.Ctx(),
		},
		ctx: c.Ctx(),
	}
	go func() {
		<-wp.ctx.Done()
		wp.wgs.stop()
	}()
	return wp
}
Beispiel #4
0
func getLeader(etcdClient *clientv3.Client, path string) (string, int64, error) {
	kv := clientv3.NewKV(etcdClient)
	ctx, cancel := context.WithTimeout(etcdClient.Ctx(), requestTimeout)
	resp, err := kv.Get(ctx, path)
	cancel()
	if err != nil {
		return "", 0, errors.Trace(err)
	}
	if len(resp.Kvs) != 1 {
		return "", 0, errors.Errorf("invalid getLeader resp: %v", resp)
	}

	var leader pdpb.Leader
	if err = leader.Unmarshal(resp.Kvs[0].Value); err != nil {
		return "", 0, errors.Trace(err)
	}
	return leader.GetAddr(), resp.Header.Revision, nil
}
Beispiel #5
0
// NewSession gets the leased session for a client.
func NewSession(client *v3.Client) (*Session, error) {
	clientSessions.mu.Lock()
	defer clientSessions.mu.Unlock()
	if s, ok := clientSessions.sessions[client]; ok {
		return s, nil
	}

	resp, err := client.Grant(client.Ctx(), sessionTTL)
	if err != nil {
		return nil, err
	}
	id := v3.LeaseID(resp.ID)

	ctx, cancel := context.WithCancel(client.Ctx())
	keepAlive, err := client.KeepAlive(ctx, id)
	if err != nil || keepAlive == nil {
		return nil, err
	}

	donec := make(chan struct{})
	s := &Session{client: client, id: id, cancel: cancel, donec: donec}
	clientSessions.sessions[client] = s

	// keep the lease alive until client error or cancelled context
	go func() {
		defer func() {
			clientSessions.mu.Lock()
			delete(clientSessions.sessions, client)
			clientSessions.mu.Unlock()
			close(donec)
		}()
		for range keepAlive {
			// eat messages until keep alive channel closes
		}
	}()

	return s, nil
}