예제 #1
0
파일: election.go 프로젝트: ringtail/etcd
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
	client := e.session.Client()

	defer close(ch)
	for {
		resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
		if err != nil {
			return
		}

		var kv *mvccpb.KeyValue

		cctx, cancel := context.WithCancel(ctx)
		if len(resp.Kvs) == 0 {
			// wait for first key put on prefix
			opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
			wch := client.Watch(cctx, e.keyPrefix, opts...)

			for kv == nil {
				wr, ok := <-wch
				if !ok || wr.Err() != nil {
					cancel()
					return
				}
				// only accept PUTs; a DELETE will make observe() spin
				for _, ev := range wr.Events {
					if ev.Type == mvccpb.PUT {
						kv = ev.Kv
						break
					}
				}
			}
		} else {
			kv = resp.Kvs[0]
		}

		wch := client.Watch(cctx, string(kv.Key), v3.WithRev(kv.ModRevision))
		keyDeleted := false
		for !keyDeleted {
			wr, ok := <-wch
			if !ok {
				return
			}
			for _, ev := range wr.Events {
				if ev.Type == mvccpb.DELETE {
					keyDeleted = true
					break
				}
				resp.Header = &wr.Header
				resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
				select {
				case ch <- *resp:
				case <-cctx.Done():
					return
				}
			}
		}
		cancel()
	}
}
예제 #2
0
// TestWatchWithRequireLeader checks the watch channel closes when no leader.
func TestWatchWithRequireLeader(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// Put a key for the non-require leader watch to read as an event.
	// The watchers will be on member[0]; put key through member[0] to
	// ensure that it receives the update so watching after killing quorum
	// is guaranteed to have the key.
	liveClient := clus.Client(0)
	if _, err := liveClient.Put(context.TODO(), "foo", "bar"); err != nil {
		t.Fatal(err)
	}

	clus.Members[1].Stop(t)
	clus.Members[2].Stop(t)
	clus.Client(1).Close()
	clus.Client(2).Close()
	clus.TakeClient(1)
	clus.TakeClient(2)

	// wait for election timeout, then member[0] will not have a leader.
	tickDuration := 10 * time.Millisecond
	time.Sleep(time.Duration(3*clus.Members[0].ElectionTicks) * tickDuration)

	chLeader := liveClient.Watch(clientv3.WithRequireLeader(context.TODO()), "foo", clientv3.WithRev(1))
	chNoLeader := liveClient.Watch(context.TODO(), "foo", clientv3.WithRev(1))

	select {
	case resp, ok := <-chLeader:
		if !ok {
			t.Fatalf("expected %v watch channel, got closed channel", rpctypes.ErrNoLeader)
		}
		if resp.Err() != rpctypes.ErrNoLeader {
			t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
		}
	case <-time.After(3 * time.Second):
		t.Fatal("watch without leader took too long to close")
	}

	select {
	case resp, ok := <-chLeader:
		if ok {
			t.Fatalf("expected closed channel, got response %v", resp)
		}
	case <-time.After(3 * time.Second):
		t.Fatal("waited too long for channel to close")
	}

	if _, ok := <-chNoLeader; !ok {
		t.Fatalf("expected response, got closed channel")
	}
}
예제 #3
0
// watchCommandFunc executes the "watch" command.
func watchCommandFunc(cmd *cobra.Command, args []string) {
	if watchInteractive {
		watchInteractiveFunc(cmd, args)
		return
	}
	if len(args) < 1 || len(args) > 2 {
		ExitWithError(ExitBadArgs, fmt.Errorf("watch in non-interactive mode requires one or two arguments as key or prefix, with range end"))
	}

	opts := []clientv3.OpOption{clientv3.WithRev(watchRev)}
	key := args[0]
	if len(args) == 2 {
		if watchPrefix {
			ExitWithError(ExitBadArgs, fmt.Errorf("`range_end` and `--prefix` cannot be set at the same time, choose one"))
		}
		opts = append(opts, clientv3.WithRange(args[1]))
	}

	if watchPrefix {
		opts = append(opts, clientv3.WithPrefix())
	}
	c := mustClientFromCmd(cmd)
	wc := c.Watch(context.TODO(), key, opts...)
	printWatchCh(wc)
	err := c.Close()
	if err == nil {
		ExitWithError(ExitInterrupted, fmt.Errorf("watch is canceled by the server"))
	}
	ExitWithError(ExitBadConnection, err)
}
예제 #4
0
파일: example_kv_test.go 프로젝트: Zex/etcd
func ExampleKV_getWithRev() {
	cli, err := clientv3.New(clientv3.Config{
		Endpoints:   endpoints,
		DialTimeout: dialTimeout,
	})
	if err != nil {
		log.Fatal(err)
	}
	defer cli.Close()

	presp, err := cli.Put(context.TODO(), "foo", "bar1")
	if err != nil {
		log.Fatal(err)
	}
	_, err = cli.Put(context.TODO(), "foo", "bar2")
	if err != nil {
		log.Fatal(err)
	}

	ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
	resp, err := cli.Get(ctx, "foo", clientv3.WithRev(presp.Header.Revision))
	cancel()
	if err != nil {
		log.Fatal(err)
	}
	for _, ev := range resp.Kvs {
		fmt.Printf("%s : %s\n", ev.Key, ev.Value)
	}
	// Output: foo : bar1
}
예제 #5
0
파일: rwmutex.go 프로젝트: lrita/etcd
func (rwm *RWMutex) Lock() error {
	rk, err := NewUniqueEphemeralKey(rwm.client, rwm.key+"/write")
	if err != nil {
		return err
	}
	rwm.myKey = rk

	for {
		// find any key of lower rev number blocks the write lock
		opts := append(v3.WithLastRev(), v3.WithRev(rk.Revision()-1))
		resp, err := rwm.client.Get(rwm.ctx, rwm.key, opts...)
		if err != nil {
			return err
		}
		if len(resp.Kvs) == 0 {
			// no matching for revision before myKey; acquired
			break
		}
		if err := rwm.waitOnLowest(); err != nil {
			return err
		}
		//  get the new lowest, etc until this is the only one left
	}

	return nil
}
예제 #6
0
파일: mutex.go 프로젝트: achanda/etcd
// Lock locks the mutex with a cancellable context. If the context is cancelled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
	s, serr := NewSession(m.client)
	if serr != nil {
		return serr
	}

	m.myKey = fmt.Sprintf("%s/%x", m.pfx, s.Lease())
	cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
	// put self in lock waiters via myKey; oldest waiter holds lock
	put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
	// reuse key in case this session already holds the lock
	get := v3.OpGet(m.myKey)
	resp, err := m.client.Txn(ctx).If(cmp).Then(put).Else(get).Commit()
	if err != nil {
		return err
	}
	m.myRev = resp.Header.Revision
	if !resp.Succeeded {
		m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
	}

	// wait for deletion revisions prior to myKey
	err = waitDeletes(ctx, m.client, m.pfx, v3.WithPrefix(), v3.WithRev(m.myRev-1))
	// release lock key if cancelled
	select {
	case <-ctx.Done():
		m.Unlock()
	default:
	}
	return err
}
예제 #7
0
파일: client.go 프로젝트: yubobo/tidb
func (c *client) watchLeader(leaderPath string, revision int64) {
	defer c.wg.Done()
WATCH:
	for {
		log.Infof("[pd] start watch pd leader on path %v, revision %v", leaderPath, revision)
		rch := c.etcdClient.Watch(context.Background(), leaderPath, clientv3.WithRev(revision))
		select {
		case resp := <-rch:
			if resp.Canceled {
				log.Warn("[pd] leader watcher canceled")
				continue WATCH
			}
			leaderAddr, rev, err := getLeader(c.etcdClient, leaderPath)
			if err != nil {
				log.Warn(err)
				continue WATCH
			}
			log.Infof("[pd] found new pd-server leader addr: %v", leaderAddr)
			c.workerMutex.Lock()
			c.worker.stop(errors.New("[pd] leader change"))
			c.worker = newRPCWorker(leaderAddr, c.clusterID)
			c.workerMutex.Unlock()
			revision = rev
		case <-c.quit:
			return
		}
	}
}
예제 #8
0
파일: key.go 프로젝트: siddontang/etcd
// waitDeletes efficiently waits until all keys matched by Get(key, opts...) are deleted
func waitDeletes(ctx context.Context, client *v3.Client, key string, opts ...v3.OpOption) error {
	getOpts := []v3.OpOption{v3.WithSort(v3.SortByCreatedRev, v3.SortAscend)}
	getOpts = append(getOpts, opts...)
	resp, err := client.Get(ctx, key, getOpts...)
	maxRev := int64(math.MaxInt64)
	getOpts = append(getOpts, v3.WithRev(0))
	for err == nil {
		for len(resp.Kvs) > 0 {
			i := len(resp.Kvs) - 1
			if resp.Kvs[i].CreateRevision <= maxRev {
				break
			}
			resp.Kvs = resp.Kvs[:i]
		}
		if len(resp.Kvs) == 0 {
			break
		}
		lastKV := resp.Kvs[len(resp.Kvs)-1]
		maxRev = lastKV.CreateRevision
		err = waitDelete(ctx, client, string(lastKV.Key), maxRev)
		if err != nil || len(resp.Kvs) == 1 {
			break
		}
		getOpts = append(getOpts, v3.WithLimit(int64(len(resp.Kvs)-1)))
		resp, err = client.Get(ctx, key, getOpts...)
	}
	return err
}
예제 #9
0
파일: watch.go 프로젝트: hongchaodeng/etcd
func NewWatchProxy(c *clientv3.Client) pb.WatchServer {
	wp := &watchProxy{
		cw:           c.Watcher,
		ctx:          clientv3.WithRequireLeader(c.Ctx()),
		retryLimiter: rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond),
		leaderc:      make(chan struct{}),
	}
	wp.ranges = newWatchRanges(wp)
	go func() {
		// a new streams without opening any watchers won't catch
		// a lost leader event, so have a special watch to monitor it
		rev := int64((uint64(1) << 63) - 2)
		for wp.ctx.Err() == nil {
			wch := wp.cw.Watch(wp.ctx, lostLeaderKey, clientv3.WithRev(rev))
			for range wch {
			}
			wp.mu.Lock()
			close(wp.leaderc)
			wp.leaderc = make(chan struct{})
			wp.mu.Unlock()
			wp.retryLimiter.Wait(wp.ctx)
		}
		wp.mu.Lock()
		<-wp.ctx.Done()
		wp.mu.Unlock()
		wp.wg.Wait()
		wp.ranges.stop()
	}()
	return wp
}
예제 #10
0
파일: watch.go 프로젝트: tamird/etcd
// WaitEvents waits on a key until it observes the given events and returns the final one.
func WaitEvents(c *clientv3.Client, key string, rev int64, evs []storagepb.Event_EventType) (*storagepb.Event, error) {
	wc := c.Watch(context.Background(), key, clientv3.WithRev(rev))
	if wc == nil {
		return nil, ErrNoWatcher
	}
	return waitEvents(wc, evs), nil
}
예제 #11
0
파일: watch.go 프로젝트: achanda/etcd
func WaitPrefixEvents(c *clientv3.Client, prefix string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
	wc := c.Watch(context.Background(), prefix, clientv3.WithPrefix(), clientv3.WithRev(rev))
	if wc == nil {
		return nil, ErrNoWatcher
	}
	return waitEvents(wc, evs), nil
}
예제 #12
0
func newWatchBroadcast(wp *watchProxy, w *watcher, update func(*watchBroadcast)) *watchBroadcast {
	cctx, cancel := context.WithCancel(wp.ctx)
	wb := &watchBroadcast{
		cancel:    cancel,
		nextrev:   w.nextrev,
		receivers: make(map[*watcher]struct{}),
		donec:     make(chan struct{}),
	}
	wb.add(w)
	go func() {
		defer close(wb.donec)
		// loop because leader loss will close channel
		for cctx.Err() == nil {
			wch := wp.cw.Watch(cctx, w.wr.key,
				clientv3.WithRange(w.wr.end),
				clientv3.WithProgressNotify(),
				clientv3.WithCreatedNotify(),
				clientv3.WithRev(wb.nextrev),
				clientv3.WithPrevKV(),
			)
			for wr := range wch {
				wb.bcast(wr)
				update(wb)
			}
			wp.retryLimiter.Wait(cctx)
		}
	}()
	return wb
}
예제 #13
0
func (c *cluster) checkCompact(rev int64) error {
	if rev == 0 {
		return nil
	}
	for _, u := range c.GRPCURLs {
		cli, err := clientv3.New(clientv3.Config{
			Endpoints:   []string{u},
			DialTimeout: 5 * time.Second,
		})
		if err != nil {
			return fmt.Errorf("%v (endpoint %s)", err, u)
		}

		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
		wch := cli.Watch(ctx, "\x00", clientv3.WithFromKey(), clientv3.WithRev(rev-1))
		wr, ok := <-wch
		cancel()

		cli.Close()

		if !ok {
			return fmt.Errorf("watch channel terminated (endpoint %s)", u)
		}
		if wr.CompactRevision != rev {
			return fmt.Errorf("got compact revision %v, wanted %v (endpoint %s)", wr.CompactRevision, rev, u)
		}
	}
	return nil
}
예제 #14
0
// startWatching does:
// - get current objects if initialRev=0; set initialRev to current rev
// - watch on given key and send events to process.
func (wc *watchChan) startWatching(watchClosedCh chan struct{}) {
	if wc.initialRev == 0 {
		if err := wc.sync(); err != nil {
			glog.Errorf("failed to sync with latest state: %v", err)
			wc.sendError(err)
			return
		}
	}
	opts := []clientv3.OpOption{clientv3.WithRev(wc.initialRev + 1), clientv3.WithPrevKV()}
	if wc.recursive {
		opts = append(opts, clientv3.WithPrefix())
	}
	wch := wc.watcher.client.Watch(wc.ctx, wc.key, opts...)
	for wres := range wch {
		if wres.Err() != nil {
			err := wres.Err()
			// If there is an error on server (e.g. compaction), the channel will return it before closed.
			glog.Errorf("watch chan error: %v", err)
			wc.sendError(err)
			return
		}
		for _, e := range wres.Events {
			wc.sendEvent(parseEvent(e))
		}
	}
	// When we come to this point, it's only possible that client side ends the watch.
	// e.g. cancel the context, close the client.
	// If this watch chan is broken and context isn't cancelled, other goroutines will still hang.
	// We should notify the main thread that this goroutine has exited.
	close(watchClosedCh)
}
예제 #15
0
func TestCompact(t *testing.T) {
	cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer cluster.Terminate(t)
	client := cluster.RandClient()
	ctx := context.Background()

	putResp, err := client.Put(ctx, "/somekey", "data")
	if err != nil {
		t.Fatalf("Put failed: %v", err)
	}

	putResp1, err := client.Put(ctx, "/somekey", "data2")
	if err != nil {
		t.Fatalf("Put failed: %v", err)
	}

	_, _, err = compact(ctx, client, 0, putResp1.Header.Revision)
	if err != nil {
		t.Fatalf("compact failed: %v", err)
	}

	obj, err := client.Get(ctx, "/somekey", clientv3.WithRev(putResp.Header.Revision))
	if err != etcdrpc.ErrCompacted {
		t.Errorf("Expecting ErrCompacted, but get=%v err=%v", obj, err)
	}
}
예제 #16
0
// startWatching does:
// - get current objects if initialRev=0; set initialRev to current rev
// - watch on given key and send events to process.
func (wc *watchChan) startWatching() {
	if wc.initialRev == 0 {
		if err := wc.sync(); err != nil {
			glog.Errorf("failed to sync with latest state: %v", err)
			wc.sendError(err)
			return
		}
	}
	opts := []clientv3.OpOption{clientv3.WithRev(wc.initialRev + 1)}
	if wc.recursive {
		opts = append(opts, clientv3.WithPrefix())
	}
	wch := wc.watcher.client.Watch(wc.ctx, wc.key, opts...)
	for wres := range wch {
		if wres.Err() != nil {
			err := wres.Err()
			// If there is an error on server (e.g. compaction), the channel will return it before closed.
			glog.Errorf("watch chan error: %v", err)
			wc.sendError(err)
			return
		}
		for _, e := range wres.Events {
			wc.sendEvent(parseEvent(e))
		}
	}
}
예제 #17
0
// watchCommandFunc executes the "watch" command.
func watchCommandFunc(cmd *cobra.Command, args []string) {
	if watchInteractive {
		watchInteractiveFunc(cmd, args)
		return
	}

	if len(args) != 1 {
		ExitWithError(ExitBadArgs, fmt.Errorf("watch in non-interactive mode requires an argument as key or prefix"))
	}

	c := mustClientFromCmd(cmd)
	w := clientv3.NewWatcher(c)

	opts := []clientv3.OpOption{clientv3.WithRev(watchRev)}
	if watchPrefix {
		opts = append(opts, clientv3.WithPrefix())
	}
	wc := w.Watch(context.TODO(), args[0], opts...)
	printWatchCh(wc)
	err := w.Close()
	if err == nil {
		ExitWithError(ExitInterrupted, fmt.Errorf("watch is canceled by the server"))
	}
	ExitWithError(ExitBadConnection, err)
}
예제 #18
0
파일: watch_test.go 프로젝트: pulcy/robin
func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) {
	defer testutil.AfterTest(t)
	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
	defer clus.Terminate(t)

	// each unique context "%v" has a unique grpc stream
	n := 100
	ctxs, ctxc := make([]context.Context, 5), make([]chan struct{}, 5)
	for i := range ctxs {
		// make "%v" unique
		ctxs[i] = context.WithValue(context.TODO(), "key", i)
		// limits the maximum number of outstanding watchers per stream
		ctxc[i] = make(chan struct{}, 2)
	}

	// issue concurrent watches on "abc" with cancel
	cli := clus.RandClient()
	if _, err := cli.Put(context.TODO(), "abc", "def"); err != nil {
		t.Fatal(err)
	}
	ch := make(chan struct{}, n)
	for i := 0; i < n; i++ {
		go func() {
			defer func() { ch <- struct{}{} }()
			idx := rand.Intn(len(ctxs))
			ctx, cancel := context.WithCancel(ctxs[idx])
			ctxc[idx] <- struct{}{}
			wch := cli.Watch(ctx, "abc", clientv3.WithRev(1))
			f(clus)
			select {
			case _, ok := <-wch:
				if !ok {
					t.Fatalf("unexpected closed channel %p", wch)
				}
			// may take a second or two to reestablish a watcher because of
			// grpc backoff policies for disconnects
			case <-time.After(5 * time.Second):
				t.Errorf("timed out waiting for watch on %p", wch)
			}
			// randomize how cancel overlaps with watch creation
			if rand.Intn(2) == 0 {
				<-ctxc[idx]
				cancel()
			} else {
				cancel()
				<-ctxc[idx]
			}
		}()
	}
	// join on watches
	for i := 0; i < n; i++ {
		select {
		case <-ch:
		case <-time.After(5 * time.Second):
			t.Fatalf("timed out waiting for completed watch")
		}
	}
}
예제 #19
0
파일: syncer.go 프로젝트: vsayer/etcd
func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error) {
	respchan := make(chan clientv3.GetResponse, 1024)
	errchan := make(chan error, 1)

	kapi := clientv3.NewKV(s.c)
	// if rev is not specified, we will choose the most recent revision.
	if s.rev == 0 {
		resp, err := kapi.Get(ctx, "foo")
		if err != nil {
			errchan <- err
			close(respchan)
			close(errchan)
			return respchan, errchan
		}
		s.rev = resp.Header.Revision
	}

	go func() {
		defer close(respchan)
		defer close(errchan)

		var key string

		opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev)}

		if len(s.prefix) == 0 {
			// If len(s.prefix) == 0, we will sync the entire key-value space.
			// We then range from the smallest key (0x00) to the end.
			opts = append(opts, clientv3.WithFromKey())
			key = "\x00"
		} else {
			// If len(s.prefix) != 0, we will sync key-value space with given prefix.
			// We then range from the prefix to the next prefix if exists. Or we will
			// range from the prefix to the end if the next prefix does not exists.
			opts = append(opts, clientv3.WithPrefix())
			key = s.prefix
		}

		for {
			resp, err := kapi.Get(ctx, key, opts...)
			if err != nil {
				errchan <- err
				return
			}

			respchan <- (clientv3.GetResponse)(*resp)

			if !resp.More {
				return
			}
			// move to next key
			key = string(append(resp.Kvs[len(resp.Kvs)-1].Key, 0))
		}
	}()

	return respchan, errchan
}
예제 #20
0
func watchInteractiveFunc(cmd *cobra.Command, args []string) {
	c := mustClientFromCmd(cmd)

	reader := bufio.NewReader(os.Stdin)

	for {
		l, err := reader.ReadString('\n')
		if err != nil {
			ExitWithError(ExitInvalidInput, fmt.Errorf("Error reading watch request line: %v", err))
		}
		l = strings.TrimSuffix(l, "\n")

		args := argify(l)
		if len(args) < 2 {
			fmt.Fprintf(os.Stderr, "Invalid command %s (command type or key is not provided)\n", l)
			continue
		}

		if args[0] != "watch" {
			fmt.Fprintf(os.Stderr, "Invalid command %s (only support watch)\n", l)
			continue
		}

		flagset := NewWatchCommand().Flags()
		err = flagset.Parse(args[1:])
		if err != nil {
			fmt.Fprintf(os.Stderr, "Invalid command %s (%v)\n", l, err)
			continue
		}
		moreargs := flagset.Args()
		if len(moreargs) < 1 || len(moreargs) > 2 {
			fmt.Fprintf(os.Stderr, "Invalid command %s (Too few or many arguments)\n", l)
			continue
		}
		var key string
		_, err = fmt.Sscanf(moreargs[0], "%q", &key)
		if err != nil {
			key = moreargs[0]
		}
		opts := []clientv3.OpOption{clientv3.WithRev(watchRev)}
		if len(moreargs) == 2 {
			if watchPrefix {
				fmt.Fprintf(os.Stderr, "`range_end` and `--prefix` cannot be set at the same time, choose one\n")
				continue
			}
			opts = append(opts, clientv3.WithRange(moreargs[1]))
		}
		if watchPrefix {
			opts = append(opts, clientv3.WithPrefix())
		}
		ch := c.Watch(context.TODO(), key, opts...)
		go printWatchCh(ch)
	}
}
예제 #21
0
// snapshotToStdout streams a snapshot over stdout
func snapshotToStdout(c *clientv3.Client) {
	// must explicitly fetch first revision since no retry on stdout
	wr := <-c.Watch(context.TODO(), "", clientv3.WithPrefix(), clientv3.WithRev(1))
	if wr.Err() == nil {
		wr.CompactRevision = 1
	}
	if rev := snapshot(os.Stdout, c, wr.CompactRevision+1); rev != 0 {
		err := fmt.Errorf("snapshot interrupted by compaction %v", rev)
		ExitWithError(ExitInterrupted, err)
	}
	os.Stdout.Sync()
}
예제 #22
0
// TestWatchResumeComapcted checks that the watcher gracefully closes in case
// that it tries to resume to a revision that's been compacted out of the store.
func TestWatchResumeCompacted(t *testing.T) {
	defer testutil.AfterTest(t)

	clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
	defer clus.Terminate(t)

	// create a waiting watcher at rev 1
	w := clientv3.NewWatcher(clus.Client(0))
	defer w.Close()
	wch := w.Watch(context.Background(), "foo", clientv3.WithRev(1))
	select {
	case w := <-wch:
		t.Errorf("unexpected message from wch %v", w)
	default:
	}
	clus.Members[0].Stop(t)

	ticker := time.After(time.Second * 10)
	for clus.WaitLeader(t) <= 0 {
		select {
		case <-ticker:
			t.Fatalf("failed to wait for new leader")
		default:
			time.Sleep(10 * time.Millisecond)
		}
	}

	// put some data and compact away
	kv := clientv3.NewKV(clus.Client(1))
	for i := 0; i < 5; i++ {
		if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
			t.Fatal(err)
		}
	}
	if _, err := kv.Compact(context.TODO(), 3); err != nil {
		t.Fatal(err)
	}

	clus.Members[0].Restart(t)

	// get compacted error message
	wresp, ok := <-wch
	if !ok {
		t.Fatalf("expected wresp, but got closed channel")
	}
	if wresp.Err() != rpctypes.ErrCompacted {
		t.Fatalf("wresp.Err() expected %v, but got %v", rpctypes.ErrCompacted, wresp.Err())
	}
	// ensure the channel is closed
	if wresp, ok = <-wch; ok {
		t.Fatalf("expected closed channel, but got %v", wresp)
	}
}
예제 #23
0
파일: mutex.go 프로젝트: vsayer/etcd
// Lock locks the mutex with a cancellable context. If the context is cancelled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
	s, err := NewSession(m.client)
	if err != nil {
		return err
	}
	// put self in lock waiters via myKey; oldest waiter holds lock
	m.myKey, m.myRev, err = NewUniqueKey(ctx, m.kv, m.pfx, v3.WithLease(s.Lease()))
	// wait for lock to become available
	for err == nil {
		// find oldest element in waiters via revision of insertion
		var resp *v3.GetResponse
		resp, err = m.kv.Get(ctx, m.pfx, v3.WithFirstRev()...)
		if err != nil {
			break
		}
		if m.myRev == resp.Kvs[0].CreateRevision {
			// myKey is oldest in waiters; myKey holds the lock now
			return nil
		}
		// otherwise myKey isn't lowest, so there must be a pfx prior to myKey
		opts := append(v3.WithLastRev(), v3.WithRev(m.myRev-1))
		resp, err = m.kv.Get(ctx, m.pfx, opts...)
		if err != nil {
			break
		}
		lastKey := string(resp.Kvs[0].Key)
		// wait for release on prior pfx
		err = waitUpdate(ctx, m.client, lastKey, v3.WithRev(m.myRev))
		// try again in case lastKey left the wait list before acquiring the lock;
		// myKey can only hold the lock if it's the oldest in the list
	}

	// release lock key if cancelled
	select {
	case <-ctx.Done():
		m.Unlock()
	default:
	}
	return err
}
예제 #24
0
파일: watch_get.go 프로젝트: CliffYuan/etcd
func doUnsyncWatch(stream v3.Watcher, rev int64, f func()) {
	wch := stream.Watch(context.TODO(), "watchkey", v3.WithRev(rev))
	if wch == nil {
		panic("could not open watch channel")
	}
	firstWatch.Do(func() { go f() })
	i := 0
	for i < watchEvents {
		wev := <-wch
		i += len(wev.Events)
		bar.Add(len(wev.Events))
	}
	wg.Done()
}
예제 #25
0
// snapshotToStdout streams a snapshot over stdout
func snapshotToStdout(c *clientv3.Client) {
	// must explicitly fetch first revision since no retry on stdout
	wapi := clientv3.NewWatcher(c)
	defer wapi.Close()
	wr := <-wapi.Watch(context.TODO(), "", clientv3.WithPrefix(), clientv3.WithRev(1))
	if len(wr.Events) > 0 {
		wr.CompactRevision = 1
	}
	if rev := snapshot(os.Stdout, c, wr.CompactRevision+1); rev != 0 {
		err := fmt.Errorf("snapshot interrupted by compaction %v", rev)
		ExitWithError(ExitInterrupted, err)
	}
	os.Stdout.Sync()
}
예제 #26
0
파일: rwmutex.go 프로젝트: lrita/etcd
func (rwm *RWMutex) waitOnLowest() error {
	// must block; get key before ek for waiting
	opts := append(v3.WithLastRev(), v3.WithRev(rwm.myKey.Revision()-1))
	lastKey, err := rwm.client.Get(rwm.ctx, rwm.key, opts...)
	if err != nil {
		return err
	}
	// wait for release on prior key
	_, err = WaitEvents(
		rwm.client,
		string(lastKey.Kvs[0].Key),
		rwm.myKey.Revision(),
		[]storagepb.Event_EventType{storagepb.DELETE})
	return err
}
예제 #27
0
파일: key.go 프로젝트: siddontang/etcd
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
	cctx, cancel := context.WithCancel(ctx)
	defer cancel()
	wch := client.Watch(cctx, key, v3.WithRev(rev))
	for wr := range wch {
		for _, ev := range wr.Events {
			if ev.Type == storagepb.DELETE {
				return nil
			}
		}
	}
	if err := ctx.Err(); err != nil {
		return err
	}
	return fmt.Errorf("lost watcher waiting for delete")
}
예제 #28
0
파일: watch.go 프로젝트: yuya008/etcd
func (sws *serverWatchStream) addDedicatedWatcher(w watcher, rev int64) {
	sws.mu.Lock()
	defer sws.mu.Unlock()

	ctx, cancel := context.WithCancel(context.Background())

	wch := sws.c.Watch(ctx,
		w.wr.key, clientv3.WithRange(w.wr.end),
		clientv3.WithRev(rev),
		clientv3.WithProgressNotify(),
	)

	ws := newWatcherSingle(wch, cancel, w, sws)
	sws.singles[w.id] = ws
	go ws.run()
}
예제 #29
0
파일: election.go 프로젝트: vsayer/etcd
func (e *Election) waitLeadership(tryKey *EphemeralKV) error {
	opts := append(v3.WithLastCreate(), v3.WithRev(tryKey.Revision()-1))
	resp, err := e.kv.Get(e.ctx, e.keyPrefix, opts...)
	if err != nil {
		return err
	} else if len(resp.Kvs) == 0 {
		// nothing before tryKey => have leadership
		return nil
	}
	_, err = WaitEvents(
		e.client,
		string(resp.Kvs[0].Key),
		tryKey.Revision(),
		[]storagepb.Event_EventType{storagepb.DELETE})
	return err
}
예제 #30
0
파일: mutex.go 프로젝트: youtube/doorman
// Lock locks the mutex with a cancellable context. If the context is cancelled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
	s, err := NewSession(m.client)
	if err != nil {
		return err
	}
	// put self in lock waiters via myKey; oldest waiter holds lock
	m.myKey, m.myRev, err = NewUniqueKey(ctx, m.client, m.pfx, v3.WithLease(s.Lease()))
	// wait for deletion revisions prior to myKey
	err = waitDeletes(ctx, m.client, m.pfx, v3.WithPrefix(), v3.WithRev(m.myRev-1))
	// release lock key if cancelled
	select {
	case <-ctx.Done():
		m.Unlock()
	default:
	}
	return err
}