Пример #1
0
func NewRaftNode(name string, c *raft.Config, storage *raft.MemoryStorage, lookup NodeLookup, tr Transporter) *raftNode {
	var peers []raft.Peer
	node := raft.StartNode(c, peers)
	var mu sync.Mutex
	removed := make(map[uint64]bool)
	h := Jesteress([]byte(name))

	return &raftNode{
		index: 0,
		term:  0,
		lead:  0,
		hash:  h,

		lt:   time.Now(),
		Node: node,
		cfg:  *c,
		mu:   mu,

		lookup:  lookup,
		removed: removed,

		idgen:       idutil.NewGenerator(uint8(h), time.Now()),
		w:           wait.New(),
		ticker:      time.Tick(500 * time.Millisecond),
		raftStorage: storage,
		storage:     &NoopStorage{},
		transport:   tr,
		applyc:      make(chan apply),
		stopped:     make(chan struct{}),
		done:        make(chan struct{}),
	}
}
Пример #2
0
// TestApplyRepeat tests that server handles repeat raft messages gracefully
func TestApplyRepeat(t *testing.T) {
	n := newNodeConfChangeCommitterStream()
	n.readyc <- raft.Ready{
		SoftState: &raft.SoftState{RaftState: raft.StateLeader},
	}
	cl := newTestCluster(nil)
	st := store.New()
	cl.SetStore(store.New())
	cl.AddMember(&membership.Member{ID: 1234})
	s := &EtcdServer{
		r: raftNode{
			Node:        n,
			raftStorage: raft.NewMemoryStorage(),
			storage:     mockstorage.NewStorageRecorder(""),
			transport:   rafthttp.NewNopTransporter(),
		},
		cfg:      &ServerConfig{},
		store:    st,
		cluster:  cl,
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	s.applyV2 = &applierV2store{s}
	s.start()
	req := &pb.Request{Method: "QGET", ID: uint64(1)}
	ents := []raftpb.Entry{{Index: 1, Data: pbutil.MustMarshal(req)}}
	n.readyc <- raft.Ready{CommittedEntries: ents}
	// dup msg
	n.readyc <- raft.Ready{CommittedEntries: ents}

	// use a conf change to block until dup msgs are all processed
	cc := &raftpb.ConfChange{Type: raftpb.ConfChangeRemoveNode, NodeID: 2}
	ents = []raftpb.Entry{{
		Index: 2,
		Type:  raftpb.EntryConfChange,
		Data:  pbutil.MustMarshal(cc),
	}}
	n.readyc <- raft.Ready{CommittedEntries: ents}
	// wait for conf change message
	act, err := n.Wait(1)
	// wait for stop message (async to avoid deadlock)
	stopc := make(chan error)
	go func() {
		_, werr := n.Wait(1)
		stopc <- werr
	}()
	s.Stop()

	// only want to confirm etcdserver won't panic; no data to check

	if err != nil {
		t.Fatal(err)
	}
	if len(act) == 0 {
		t.Fatalf("expected len(act)=0, got %d", len(act))
	}

	if err = <-stopc; err != nil {
		t.Fatalf("error on stop (%v)", err)
	}
}
Пример #3
0
// Applied > SnapCount should trigger a SaveSnap event
func TestTriggerSnap(t *testing.T) {
	snapc := 10
	st := store.NewRecorder()
	p := &storageRecorder{}
	srv := &EtcdServer{
		cfg:       &ServerConfig{TickMs: 1},
		snapCount: uint64(snapc),
		r: raftNode{
			Node:        newNodeCommitter(),
			raftStorage: raft.NewMemoryStorage(),
			storage:     p,
			transport:   rafthttp.NewNopTransporter(),
		},
		store:    st,
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	srv.start()
	for i := 0; i < snapc+1; i++ {
		srv.Do(context.Background(), pb.Request{Method: "PUT"})
	}

	wcnt := 2 + snapc
	gaction, _ := p.Wait(wcnt)

	srv.Stop()

	// each operation is recorded as a Save
	// (SnapCount+1) * Puts + SaveSnap = (SnapCount+1) * Save + SaveSnap
	if len(gaction) != wcnt {
		t.Fatalf("len(action) = %d, want %d", len(gaction), wcnt)
	}
	if !reflect.DeepEqual(gaction[wcnt-1], testutil.Action{Name: "SaveSnap"}) {
		t.Errorf("action = %s, want SaveSnap", gaction[wcnt-1])
	}
}
Пример #4
0
// TestPublishRetry tests that publish will keep retry until success.
func TestPublishRetry(t *testing.T) {
	n := newNodeRecorderStream()
	srv := &EtcdServer{
		Cfg:      &ServerConfig{TickMs: 1},
		r:        raftNode{Node: n},
		w:        mockwait.NewNop(),
		stopping: make(chan struct{}),
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	// expect multiple proposals from retrying
	ch := make(chan struct{})
	go func() {
		defer close(ch)
		if action, err := n.Wait(2); err != nil {
			t.Errorf("len(action) = %d, want >= 2 (%v)", len(action), err)
		}
		close(srv.stopping)
		// drain remaing actions, if any, so publish can terminate
		for {
			select {
			case <-ch:
				return
			default:
				n.Action()
			}
		}
	}()
	srv.publish(10 * time.Nanosecond)
	ch <- struct{}{}
	<-ch
}
Пример #5
0
// TestSync tests sync 1. is nonblocking 2. proposes SYNC request.
func TestSync(t *testing.T) {
	n := newNodeRecorder()
	srv := &EtcdServer{
		r:        raftNode{Node: n},
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	// check that sync is non-blocking
	done := make(chan struct{})
	go func() {
		srv.sync(10 * time.Second)
		done <- struct{}{}
	}()

	select {
	case <-done:
	case <-time.After(time.Second):
		t.Fatal("sync should be non-blocking but did not return after 1s!")
	}

	action, _ := n.Wait(1)
	if len(action) != 1 {
		t.Fatalf("len(action) = %d, want 1", len(action))
	}
	if action[0].Name != "Propose" {
		t.Fatalf("action = %s, want Propose", action[0].Name)
	}
	data := action[0].Params[0].([]byte)
	var r pb.Request
	if err := r.Unmarshal(data); err != nil {
		t.Fatalf("unmarshal request error: %v", err)
	}
	if r.Method != "SYNC" {
		t.Errorf("method = %s, want SYNC", r.Method)
	}
}
Пример #6
0
// TestSyncTimeout tests the case that sync 1. is non-blocking 2. cancel request
// after timeout
func TestSyncTimeout(t *testing.T) {
	n := newProposalBlockerRecorder()
	srv := &EtcdServer{
		r:        raftNode{Node: n},
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	srv.applyV2 = &applierV2store{srv}

	// check that sync is non-blocking
	done := make(chan struct{})
	go func() {
		srv.sync(0)
		done <- struct{}{}
	}()

	select {
	case <-done:
	case <-time.After(time.Second):
		t.Fatal("sync should be non-blocking but did not return after 1s!")
	}

	w := []testutil.Action{{Name: "Propose blocked"}}
	if g, _ := n.Wait(1); !reflect.DeepEqual(g, w) {
		t.Errorf("action = %v, want %v", g, w)
	}
}
Пример #7
0
func NewLessor(lessorID uint8, dr DeleteableRange) *lessor {
	return &lessor{
		leaseMap: make(map[uint64]*lease),
		dr:       dr,
		idgen:    idutil.NewGenerator(lessorID, time.Now()),
	}
}
Пример #8
0
// TestDoLocalAction tests requests which do not need to go through raft to be applied,
// and are served through local data.
func TestDoLocalAction(t *testing.T) {
	tests := []struct {
		req pb.Request

		wresp    Response
		werr     error
		wactions []testutil.Action
	}{
		{
			pb.Request{Method: "GET", ID: 1, Wait: true},
			Response{Watcher: store.NewNopWatcher()}, nil, []testutil.Action{{Name: "Watch"}},
		},
		{
			pb.Request{Method: "GET", ID: 1},
			Response{Event: &store.Event{}}, nil,
			[]testutil.Action{
				{
					Name:   "Get",
					Params: []interface{}{"", false, false},
				},
			},
		},
		{
			pb.Request{Method: "HEAD", ID: 1},
			Response{Event: &store.Event{}}, nil,
			[]testutil.Action{
				{
					Name:   "Get",
					Params: []interface{}{"", false, false},
				},
			},
		},
		{
			pb.Request{Method: "BADMETHOD", ID: 1},
			Response{}, ErrUnknownMethod, []testutil.Action{},
		},
	}
	for i, tt := range tests {
		st := mockstore.NewRecorder()
		srv := &EtcdServer{
			store:    st,
			reqIDGen: idutil.NewGenerator(0, time.Time{}),
		}
		resp, err := srv.Do(context.TODO(), tt.req)

		if err != tt.werr {
			t.Fatalf("#%d: err = %+v, want %+v", i, err, tt.werr)
		}
		if !reflect.DeepEqual(resp, tt.wresp) {
			t.Errorf("#%d: resp = %+v, want %+v", i, resp, tt.wresp)
		}
		gaction := st.Action()
		if !reflect.DeepEqual(gaction, tt.wactions) {
			t.Errorf("#%d: action = %+v, want %+v", i, gaction, tt.wactions)
		}
	}
}
Пример #9
0
// NewNode generates a new Raft node
func NewNode(opts NodeOptions) *Node {
	cfg := opts.Config
	if cfg == nil {
		cfg = DefaultNodeConfig()
	}
	if opts.TickInterval == 0 {
		opts.TickInterval = time.Second
	}
	if opts.SendTimeout == 0 {
		opts.SendTimeout = 2 * time.Second
	}

	raftStore := raft.NewMemoryStorage()

	n := &Node{
		cluster:   membership.NewCluster(2 * cfg.ElectionTick),
		raftStore: raftStore,
		opts:      opts,
		Config: &raft.Config{
			ElectionTick:    cfg.ElectionTick,
			HeartbeatTick:   cfg.HeartbeatTick,
			Storage:         raftStore,
			MaxSizePerMsg:   cfg.MaxSizePerMsg,
			MaxInflightMsgs: cfg.MaxInflightMsgs,
			Logger:          cfg.Logger,
		},
		doneCh:              make(chan struct{}),
		removeRaftCh:        make(chan struct{}),
		stopped:             make(chan struct{}),
		leadershipBroadcast: watch.NewQueue(),
		lastSendToMember:    make(map[uint64]chan struct{}),
		keyRotator:          opts.KeyRotator,
	}
	n.memoryStore = store.NewMemoryStore(n)

	if opts.ClockSource == nil {
		n.ticker = clock.NewClock().NewTicker(opts.TickInterval)
	} else {
		n.ticker = opts.ClockSource.NewTicker(opts.TickInterval)
	}

	n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now())
	n.wait = newWait()

	n.removeRaftFunc = func(n *Node) func() {
		var removeRaftOnce sync.Once
		return func() {
			removeRaftOnce.Do(func() {
				close(n.removeRaftCh)
			})
		}
	}(n)

	return n
}
Пример #10
0
// handleBackup handles a request that intends to do a backup.
func handleBackup(c *cli.Context) {
	srcSnap := path.Join(c.String("data-dir"), "member", "snap")
	destSnap := path.Join(c.String("backup-dir"), "member", "snap")
	srcWAL := path.Join(c.String("data-dir"), "member", "wal")
	destWAL := path.Join(c.String("backup-dir"), "member", "wal")

	if err := os.MkdirAll(destSnap, 0700); err != nil {
		log.Fatalf("failed creating backup snapshot dir %v: %v", destSnap, err)
	}
	ss := snap.New(srcSnap)
	snapshot, err := ss.Load()
	if err != nil && err != snap.ErrNoSnapshot {
		log.Fatal(err)
	}
	var walsnap walpb.Snapshot
	if snapshot != nil {
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
		newss := snap.New(destSnap)
		if err = newss.SaveSnap(*snapshot); err != nil {
			log.Fatal(err)
		}
	}

	w, err := wal.OpenForRead(srcWAL, walsnap)
	if err != nil {
		log.Fatal(err)
	}
	defer w.Close()
	wmetadata, state, ents, err := w.ReadAll()
	switch err {
	case nil:
	case wal.ErrSnapshotNotFound:
		fmt.Printf("Failed to find the match snapshot record %+v in wal %v.", walsnap, srcWAL)
		fmt.Printf("etcdctl will add it back. Start auto fixing...")
	default:
		log.Fatal(err)
	}
	var metadata etcdserverpb.Metadata
	pbutil.MustUnmarshal(&metadata, wmetadata)
	idgen := idutil.NewGenerator(0, time.Now())
	metadata.NodeID = idgen.Next()
	metadata.ClusterID = idgen.Next()

	neww, err := wal.Create(destWAL, pbutil.MustMarshal(&metadata))
	if err != nil {
		log.Fatal(err)
	}
	defer neww.Close()
	if err := neww.Save(state, ents); err != nil {
		log.Fatal(err)
	}
	if err := neww.SaveSnapshot(walsnap); err != nil {
		log.Fatal(err)
	}
}
Пример #11
0
func NewLessor(lessorID uint8, b backend.Backend, dr DeleteableRange) *lessor {
	l := &lessor{
		leaseMap: make(map[LeaseID]*lease),
		b:        b,
		dr:       dr,
		idgen:    idutil.NewGenerator(lessorID, time.Now()),
	}
	l.initAndRecover()

	return l
}
Пример #12
0
// NewNode generates a new Raft node
func NewNode(ctx context.Context, opts NewNodeOptions) *Node {
	cfg := opts.Config
	if cfg == nil {
		cfg = DefaultNodeConfig()
	}
	if opts.TickInterval == 0 {
		opts.TickInterval = time.Second
	}

	raftStore := raft.NewMemoryStorage()

	ctx, cancel := context.WithCancel(ctx)

	n := &Node{
		Ctx:            ctx,
		cancel:         cancel,
		cluster:        membership.NewCluster(),
		tlsCredentials: opts.TLSCredentials,
		raftStore:      raftStore,
		Address:        opts.Addr,
		opts:           opts,
		Config: &raft.Config{
			ElectionTick:    cfg.ElectionTick,
			HeartbeatTick:   cfg.HeartbeatTick,
			Storage:         raftStore,
			MaxSizePerMsg:   cfg.MaxSizePerMsg,
			MaxInflightMsgs: cfg.MaxInflightMsgs,
			Logger:          cfg.Logger,
		},
		forceNewCluster:     opts.ForceNewCluster,
		stopCh:              make(chan struct{}),
		doneCh:              make(chan struct{}),
		removeRaftCh:        make(chan struct{}),
		StateDir:            opts.StateDir,
		joinAddr:            opts.JoinAddr,
		sendTimeout:         2 * time.Second,
		leadershipBroadcast: events.NewBroadcaster(),
	}
	n.memoryStore = store.NewMemoryStore(n)

	if opts.ClockSource == nil {
		n.ticker = clock.NewClock().NewTicker(opts.TickInterval)
	} else {
		n.ticker = opts.ClockSource.NewTicker(opts.TickInterval)
	}
	if opts.SendTimeout != 0 {
		n.sendTimeout = opts.SendTimeout
	}

	n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now())
	n.wait = newWait()

	return n
}
Пример #13
0
func TestDoProposalTimeout(t *testing.T) {
	srv := &EtcdServer{
		cfg:      &ServerConfig{TickMs: 1},
		r:        raftNode{Node: newNodeNop()},
		w:        mockwait.NewNop(),
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	ctx, _ := context.WithTimeout(context.Background(), 0)
	_, err := srv.Do(ctx, pb.Request{Method: "PUT"})
	if err != ErrTimeout {
		t.Fatalf("err = %v, want %v", err, ErrTimeout)
	}
}
Пример #14
0
// Applied > SnapCount should trigger a SaveSnap event
func TestTriggerSnap(t *testing.T) {
	be, tmpPath := backend.NewDefaultTmpBackend()
	defer func() {
		os.RemoveAll(tmpPath)
	}()

	snapc := 10
	st := mockstore.NewRecorder()
	p := mockstorage.NewStorageRecorderStream("")
	srv := &EtcdServer{
		cfg:       &ServerConfig{TickMs: 1},
		snapCount: uint64(snapc),
		r: raftNode{
			Node:        newNodeCommitter(),
			raftStorage: raft.NewMemoryStorage(),
			storage:     p,
			transport:   rafthttp.NewNopTransporter(),
		},
		store:    st,
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	srv.applyV2 = &applierV2store{srv}

	srv.kv = mvcc.New(be, &lease.FakeLessor{}, &srv.consistIndex)
	srv.be = be

	srv.start()

	donec := make(chan struct{})
	go func() {
		wcnt := 2 + snapc
		gaction, _ := p.Wait(wcnt)

		// each operation is recorded as a Save
		// (SnapCount+1) * Puts + SaveSnap = (SnapCount+1) * Save + SaveSnap
		if len(gaction) != wcnt {
			t.Fatalf("len(action) = %d, want %d", len(gaction), wcnt)
		}
		if !reflect.DeepEqual(gaction[wcnt-1], testutil.Action{Name: "SaveSnap"}) {
			t.Errorf("action = %s, want SaveSnap", gaction[wcnt-1])
		}
		close(donec)
	}()

	for i := 0; i < snapc+1; i++ {
		srv.Do(context.Background(), pb.Request{Method: "PUT"})
	}

	srv.Stop()
	<-donec
}
Пример #15
0
func TestDoProposalStopped(t *testing.T) {
	srv := &EtcdServer{
		cfg:      &ServerConfig{TickMs: 1},
		r:        raftNode{Node: newNodeNop()},
		w:        mockwait.NewNop(),
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	srv.done = make(chan struct{})
	close(srv.done)
	_, err := srv.Do(context.Background(), pb.Request{Method: "PUT", ID: 1})
	if err != ErrStopped {
		t.Errorf("err = %v, want %v", err, ErrStopped)
	}
}
Пример #16
0
// TestDoBadLocalAction tests server requests which do not need to go through consensus,
// and return errors when they fetch from local data.
func TestDoBadLocalAction(t *testing.T) {
	storeErr := fmt.Errorf("bah")
	tests := []struct {
		req pb.Request

		wactions []testutil.Action
	}{
		{
			pb.Request{Method: "GET", ID: 1, Wait: true},
			[]testutil.Action{{Name: "Watch"}},
		},
		{
			pb.Request{Method: "GET", ID: 1},
			[]testutil.Action{
				{
					Name:   "Get",
					Params: []interface{}{"", false, false},
				},
			},
		},
		{
			pb.Request{Method: "HEAD", ID: 1},
			[]testutil.Action{
				{
					Name:   "Get",
					Params: []interface{}{"", false, false},
				},
			},
		},
	}
	for i, tt := range tests {
		st := mockstore.NewErrRecorder(storeErr)
		srv := &EtcdServer{
			store:    st,
			reqIDGen: idutil.NewGenerator(0, time.Time{}),
		}
		resp, err := srv.Do(context.Background(), tt.req)

		if err != storeErr {
			t.Fatalf("#%d: err = %+v, want %+v", i, err, storeErr)
		}
		if !reflect.DeepEqual(resp, Response{}) {
			t.Errorf("#%d: resp = %+v, want %+v", i, resp, Response{})
		}
		gaction := st.Action()
		if !reflect.DeepEqual(gaction, tt.wactions) {
			t.Errorf("#%d: action = %+v, want %+v", i, gaction, tt.wactions)
		}
	}
}
Пример #17
0
// TestSyncTrigger tests that the server proposes a SYNC request when its sync timer ticks
func TestSyncTrigger(t *testing.T) {
	n := newReadyNode()
	st := make(chan time.Time, 1)
	srv := &EtcdServer{
		cfg: &ServerConfig{TickMs: 1},
		r: raftNode{
			Node:        n,
			raftStorage: raft.NewMemoryStorage(),
			transport:   rafthttp.NewNopTransporter(),
			storage:     mockstorage.NewStorageRecorder(""),
		},
		store:      mockstore.NewNop(),
		SyncTicker: st,
		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
	}

	// trigger the server to become a leader and accept sync requests
	go func() {
		srv.start()
		n.readyc <- raft.Ready{
			SoftState: &raft.SoftState{
				RaftState: raft.StateLeader,
			},
		}
		// trigger a sync request
		st <- time.Time{}
	}()

	action, _ := n.Wait(1)
	go srv.Stop()

	if len(action) != 1 {
		t.Fatalf("len(action) = %d, want 1", len(action))
	}
	if action[0].Name != "Propose" {
		t.Fatalf("action = %s, want Propose", action[0].Name)
	}
	data := action[0].Params[0].([]byte)
	var req pb.Request
	if err := req.Unmarshal(data); err != nil {
		t.Fatalf("error unmarshalling data: %v", err)
	}
	if req.Method != "SYNC" {
		t.Fatalf("unexpected proposed request: %#v", req.Method)
	}

	// wait on stop message
	<-n.Chan()
}
Пример #18
0
// TestPublishStopped tests that publish will be stopped if server is stopped.
func TestPublishStopped(t *testing.T) {
	srv := &EtcdServer{
		cfg: &ServerConfig{TickMs: 1},
		r: raftNode{
			Node:      newNodeNop(),
			transport: rafthttp.NewNopTransporter(),
		},
		cluster:  &membership.RaftCluster{},
		w:        mockwait.NewNop(),
		done:     make(chan struct{}),
		stop:     make(chan struct{}),
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	close(srv.done)
	srv.publish(time.Hour)
}
Пример #19
0
func newLessor(lessorID uint8, b backend.Backend, dr DeleteableRange) *lessor {
	// ensure the most significant bit of lessorID is 0.
	// so all the IDs generated by id generator will be greater than 0.
	if int8(lessorID) < 0 {
		lessorID = uint8(-int8(lessorID))
	}

	l := &lessor{
		leaseMap: make(map[LeaseID]*Lease),
		b:        b,
		dr:       dr,
		idgen:    idutil.NewGenerator(lessorID, time.Now()),
	}
	l.initAndRecover()

	return l
}
Пример #20
0
func TestPublish(t *testing.T) {
	n := newNodeRecorder()
	ch := make(chan interface{}, 1)
	// simulate that request has gone through consensus
	ch <- Response{}
	w := wait.NewWithResponse(ch)
	srv := &EtcdServer{
		readych:    make(chan struct{}),
		cfg:        &ServerConfig{TickMs: 1},
		id:         1,
		r:          raftNode{Node: n},
		attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}},
		cluster:    &membership.RaftCluster{},
		w:          w,
		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
	}
	srv.publish(time.Hour)

	action := n.Action()
	if len(action) != 1 {
		t.Fatalf("len(action) = %d, want 1", len(action))
	}
	if action[0].Name != "Propose" {
		t.Fatalf("action = %s, want Propose", action[0].Name)
	}
	data := action[0].Params[0].([]byte)
	var r pb.Request
	if err := r.Unmarshal(data); err != nil {
		t.Fatalf("unmarshal request error: %v", err)
	}
	if r.Method != "PUT" {
		t.Errorf("method = %s, want PUT", r.Method)
	}
	wm := membership.Member{ID: 1, Attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}}}
	if wpath := membership.MemberAttributesStorePath(wm.ID); r.Path != wpath {
		t.Errorf("path = %s, want %s", r.Path, wpath)
	}
	var gattr membership.Attributes
	if err := json.Unmarshal([]byte(r.Val), &gattr); err != nil {
		t.Fatalf("unmarshal val error: %v", err)
	}
	if !reflect.DeepEqual(gattr, wm.Attributes) {
		t.Errorf("member = %v, want %v", gattr, wm.Attributes)
	}
}
Пример #21
0
// TestPublishRetry tests that publish will keep retry until success.
func TestPublishRetry(t *testing.T) {
	n := newNodeRecorder()
	srv := &EtcdServer{
		cfg:      &ServerConfig{TickMs: 1},
		r:        raftNode{Node: n},
		w:        mockwait.NewNop(),
		done:     make(chan struct{}),
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	// TODO: use fakeClockwork
	time.AfterFunc(10*time.Millisecond, func() { close(srv.done) })
	srv.publish(10 * time.Nanosecond)

	action := n.Action()
	// multiple Proposes
	if cnt := len(action); cnt < 2 {
		t.Errorf("len(action) = %d, want >= 2", cnt)
	}
}
Пример #22
0
func TestDoProposalCancelled(t *testing.T) {
	wait := mockwait.NewRecorder()
	srv := &EtcdServer{
		cfg:      &ServerConfig{TickMs: 1},
		r:        raftNode{Node: newNodeNop()},
		w:        wait,
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	ctx, cancel := context.WithCancel(context.Background())
	cancel()
	_, err := srv.Do(ctx, pb.Request{Method: "PUT"})

	if err != ErrCanceled {
		t.Fatalf("err = %v, want %v", err, ErrCanceled)
	}
	w := []testutil.Action{{Name: "Register"}, {Name: "Trigger"}}
	if !reflect.DeepEqual(wait.Action(), w) {
		t.Errorf("wait.action = %+v, want %+v", wait.Action(), w)
	}
}
Пример #23
0
func newLessor(lessorID uint8, b backend.Backend) *lessor {
	// ensure the most significant bit of lessorID is 0.
	// so all the IDs generated by id generator will be greater than 0.
	if int8(lessorID) < 0 {
		lessorID = uint8(-int8(lessorID))
	}

	l := &lessor{
		leaseMap: make(map[LeaseID]*Lease),
		b:        b,
		// expiredC is a small buffered chan to avoid unncessary blocking.
		expiredC: make(chan []*Lease, 16),
		idgen:    idutil.NewGenerator(lessorID, time.Now()),
	}
	l.initAndRecover()

	go l.runLoop()

	return l
}
Пример #24
0
func TestUpdateVersion(t *testing.T) {
	n := newNodeRecorder()
	ch := make(chan interface{}, 1)
	// simulate that request has gone through consensus
	ch <- Response{}
	w := wait.NewWithResponse(ch)
	srv := &EtcdServer{
		id:         1,
		cfg:        &ServerConfig{TickMs: 1},
		r:          raftNode{Node: n},
		attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://node1.com"}},
		cluster:    &membership.RaftCluster{},
		w:          w,
		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
	}
	srv.updateClusterVersion("2.0.0")

	action := n.Action()
	if len(action) != 1 {
		t.Fatalf("len(action) = %d, want 1", len(action))
	}
	if action[0].Name != "Propose" {
		t.Fatalf("action = %s, want Propose", action[0].Name)
	}
	data := action[0].Params[0].([]byte)
	var r pb.Request
	if err := r.Unmarshal(data); err != nil {
		t.Fatalf("unmarshal request error: %v", err)
	}
	if r.Method != "PUT" {
		t.Errorf("method = %s, want PUT", r.Method)
	}
	if wpath := path.Join(StoreClusterPrefix, "version"); r.Path != wpath {
		t.Errorf("path = %s, want %s", r.Path, wpath)
	}
	if r.Val != "2.0.0" {
		t.Errorf("val = %s, want %s", r.Val, "2.0.0")
	}
}
Пример #25
0
func TestDoProposal(t *testing.T) {
	tests := []pb.Request{
		{Method: "POST", ID: 1},
		{Method: "PUT", ID: 1},
		{Method: "DELETE", ID: 1},
		{Method: "GET", ID: 1, Quorum: true},
	}
	for i, tt := range tests {
		st := mockstore.NewRecorder()
		srv := &EtcdServer{
			cfg: &ServerConfig{TickMs: 1},
			r: raftNode{
				Node:        newNodeCommitter(),
				storage:     mockstorage.NewStorageRecorder(""),
				raftStorage: raft.NewMemoryStorage(),
				transport:   rafthttp.NewNopTransporter(),
			},
			store:    st,
			reqIDGen: idutil.NewGenerator(0, time.Time{}),
		}
		srv.applyV2 = &applierV2store{srv}
		srv.start()
		resp, err := srv.Do(context.Background(), tt)
		srv.Stop()

		action := st.Action()
		if len(action) != 1 {
			t.Errorf("#%d: len(action) = %d, want 1", i, len(action))
		}
		if err != nil {
			t.Fatalf("#%d: err = %v, want nil", i, err)
		}
		wresp := Response{Event: &store.Event{}}
		if !reflect.DeepEqual(resp, wresp) {
			t.Errorf("#%d: resp = %v, want %v", i, resp, wresp)
		}
	}
}
Пример #26
0
// TestUpdateMember tests RemoveMember can propose and perform node update.
func TestUpdateMember(t *testing.T) {
	n := newNodeConfChangeCommitterRecorder()
	n.readyc <- raft.Ready{
		SoftState: &raft.SoftState{RaftState: raft.StateLeader},
	}
	cl := newTestCluster(nil)
	st := store.New()
	cl.SetStore(st)
	cl.AddMember(&membership.Member{ID: 1234})
	s := &EtcdServer{
		r: raftNode{
			Node:        n,
			raftStorage: raft.NewMemoryStorage(),
			storage:     mockstorage.NewStorageRecorder(""),
			transport:   rafthttp.NewNopTransporter(),
		},
		store:    st,
		cluster:  cl,
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	s.start()
	wm := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://127.0.0.1:1"}}}
	err := s.UpdateMember(context.TODO(), wm)
	gaction := n.Action()
	s.Stop()

	if err != nil {
		t.Fatalf("UpdateMember error: %v", err)
	}
	wactions := []testutil.Action{{Name: "ProposeConfChange:ConfChangeUpdateNode"}, {Name: "ApplyConfChange:ConfChangeUpdateNode"}}
	if !reflect.DeepEqual(gaction, wactions) {
		t.Errorf("action = %v, want %v", gaction, wactions)
	}
	if !reflect.DeepEqual(cl.Member(1234), &wm) {
		t.Errorf("member = %v, want %v", cl.Member(1234), &wm)
	}
}
Пример #27
0
// TestRemoveMember tests RemoveMember can propose and perform node removal.
func TestRemoveMember(t *testing.T) {
	n := newNodeConfChangeCommitterRecorder()
	n.readyc <- raft.Ready{
		SoftState: &raft.SoftState{RaftState: raft.StateLeader},
	}
	cl := newTestCluster(nil)
	st := store.New()
	cl.SetStore(store.New())
	cl.AddMember(&membership.Member{ID: 1234})
	s := &EtcdServer{
		r: raftNode{
			Node:        n,
			raftStorage: raft.NewMemoryStorage(),
			storage:     mockstorage.NewStorageRecorder(""),
			transport:   rafthttp.NewNopTransporter(),
		},
		cfg:      &ServerConfig{},
		store:    st,
		cluster:  cl,
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	s.start()
	err := s.RemoveMember(context.TODO(), 1234)
	gaction := n.Action()
	s.Stop()

	if err != nil {
		t.Fatalf("RemoveMember error: %v", err)
	}
	wactions := []testutil.Action{{Name: "ProposeConfChange:ConfChangeRemoveNode"}, {Name: "ApplyConfChange:ConfChangeRemoveNode"}}
	if !reflect.DeepEqual(gaction, wactions) {
		t.Errorf("action = %v, want %v", gaction, wactions)
	}
	if cl.Member(1234) != nil {
		t.Errorf("member with id 1234 is not removed")
	}
}
Пример #28
0
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
	st := store.New(StoreClusterPrefix, StoreKeysPrefix)

	var (
		w  *wal.WAL
		n  raft.Node
		s  *raft.MemoryStorage
		id types.ID
		cl *membership.RaftCluster
	)

	if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
		return nil, fmt.Errorf("cannot access data directory: %v", terr)
	}

	haveWAL := wal.Exist(cfg.WALDir())

	if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
		plog.Fatalf("create snapshot directory error: %v", err)
	}
	ss := snap.New(cfg.SnapDir())

	bepath := path.Join(cfg.SnapDir(), databaseFilename)
	beExist := fileutil.Exist(bepath)

	var be backend.Backend
	beOpened := make(chan struct{})
	go func() {
		be = backend.NewDefaultBackend(bepath)
		beOpened <- struct{}{}
	}()

	select {
	case <-beOpened:
	case <-time.After(time.Second):
		plog.Warningf("another etcd process is running with the same data dir and holding the file lock.")
		plog.Warningf("waiting for it to exit before starting...")
		<-beOpened
	}

	defer func() {
		if err != nil {
			be.Close()
		}
	}()

	prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
	if err != nil {
		return nil, err
	}
	var (
		remotes  []*membership.Member
		snapshot *raftpb.Snapshot
	)

	switch {
	case !haveWAL && !cfg.NewCluster:
		if err = cfg.VerifyJoinExisting(); err != nil {
			return nil, err
		}
		cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
		if err != nil {
			return nil, err
		}
		existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt)
		if gerr != nil {
			return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
		}
		if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
			return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
		}
		if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) {
			return nil, fmt.Errorf("incompatible with current running cluster")
		}

		remotes = existingCluster.Members()
		cl.SetID(existingCluster.ID())
		cl.SetStore(st)
		cl.SetBackend(be)
		cfg.Print()
		id, n, s, w = startNode(cfg, cl, nil)
	case !haveWAL && cfg.NewCluster:
		if err = cfg.VerifyBootstrap(); err != nil {
			return nil, err
		}
		cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
		if err != nil {
			return nil, err
		}
		m := cl.MemberByName(cfg.Name)
		if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
			return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
		}
		if cfg.ShouldDiscover() {
			var str string
			str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
			if err != nil {
				return nil, &DiscoveryError{Op: "join", Err: err}
			}
			var urlsmap types.URLsMap
			urlsmap, err = types.NewURLsMap(str)
			if err != nil {
				return nil, err
			}
			if checkDuplicateURL(urlsmap) {
				return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
			}
			if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil {
				return nil, err
			}
		}
		cl.SetStore(st)
		cl.SetBackend(be)
		cfg.PrintWithInitial()
		id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
	case haveWAL:
		if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
			return nil, fmt.Errorf("cannot write to member directory: %v", err)
		}

		if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
			return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
		}

		if cfg.ShouldDiscover() {
			plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
		}
		snapshot, err = ss.Load()
		if err != nil && err != snap.ErrNoSnapshot {
			return nil, err
		}
		if snapshot != nil {
			if err = st.Recovery(snapshot.Data); err != nil {
				plog.Panicf("recovered store from snapshot error: %v", err)
			}
			plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
		}
		cfg.Print()
		if !cfg.ForceNewCluster {
			id, cl, n, s, w = restartNode(cfg, snapshot)
		} else {
			id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
		}
		cl.SetStore(st)
		cl.SetBackend(be)
		cl.Recover(api.UpdateCapability)
		if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
			os.RemoveAll(bepath)
			return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
		}
	default:
		return nil, fmt.Errorf("unsupported bootstrap config")
	}

	if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
		return nil, fmt.Errorf("cannot access member directory: %v", terr)
	}

	sstats := &stats.ServerStats{
		Name: cfg.Name,
		ID:   id.String(),
	}
	sstats.Initialize()
	lstats := stats.NewLeaderStats(id.String())

	heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
	srv = &EtcdServer{
		readych:   make(chan struct{}),
		Cfg:       cfg,
		snapCount: cfg.SnapCount,
		errorc:    make(chan error, 1),
		store:     st,
		r: raftNode{
			isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
			Node:        n,
			ticker:      time.Tick(heartbeat),
			// set up contention detectors for raft heartbeat message.
			// expect to send a heartbeat within 2 heartbeat intervals.
			td:          contention.NewTimeoutDetector(2 * heartbeat),
			heartbeat:   heartbeat,
			raftStorage: s,
			storage:     NewStorage(w, ss),
			msgSnapC:    make(chan raftpb.Message, maxInFlightMsgSnap),
			readStateC:  make(chan raft.ReadState, 1),
		},
		id:            id,
		attributes:    membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
		cluster:       cl,
		stats:         sstats,
		lstats:        lstats,
		SyncTicker:    time.Tick(500 * time.Millisecond),
		peerRt:        prt,
		reqIDGen:      idutil.NewGenerator(uint16(id), time.Now()),
		forceVersionC: make(chan struct{}),
	}

	srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}

	srv.be = be
	minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat

	// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
	// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
	srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
	srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex)
	if beExist {
		kvindex := srv.kv.ConsistentIndex()
		// TODO: remove kvindex != 0 checking when we do not expect users to upgrade
		// etcd from pre-3.0 release.
		if snapshot != nil && kvindex < snapshot.Metadata.Index {
			if kvindex != 0 {
				return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index)
			}
			plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
		}
	}
	srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())

	srv.authStore = auth.NewAuthStore(srv.be)
	if h := cfg.AutoCompactionRetention; h != 0 {
		srv.compactor = compactor.NewPeriodic(h, srv.kv, srv)
		srv.compactor.Run()
	}

	srv.applyV3Base = &applierV3backend{srv}
	if err = srv.restoreAlarms(); err != nil {
		return nil, err
	}

	// TODO: move transport initialization near the definition of remote
	tr := &rafthttp.Transport{
		TLSInfo:     cfg.PeerTLSInfo,
		DialTimeout: cfg.peerDialTimeout(),
		ID:          id,
		URLs:        cfg.PeerURLs,
		ClusterID:   cl.ID(),
		Raft:        srv,
		Snapshotter: ss,
		ServerStats: sstats,
		LeaderStats: lstats,
		ErrorC:      srv.errorc,
	}
	if err = tr.Start(); err != nil {
		return nil, err
	}
	// add all remotes into transport
	for _, m := range remotes {
		if m.ID != id {
			tr.AddRemote(m.ID, m.PeerURLs)
		}
	}
	for _, m := range cl.Members() {
		if m.ID != id {
			tr.AddPeer(m.ID, m.PeerURLs)
		}
	}
	srv.r.transport = tr

	return srv, nil
}
Пример #29
0
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
	st := store.New(StoreClusterPrefix, StoreKeysPrefix)

	var (
		w  *wal.WAL
		n  raft.Node
		s  *raft.MemoryStorage
		id types.ID
		cl *membership.RaftCluster
	)

	if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
		return nil, fmt.Errorf("cannot access data directory: %v", terr)
	}

	// Run the migrations.
	dataVer, err := version.DetectDataDir(cfg.DataDir)
	if err != nil {
		return nil, err
	}
	if err = upgradeDataDir(cfg.DataDir, cfg.Name, dataVer); err != nil {
		return nil, err
	}

	haveWAL := wal.Exist(cfg.WALDir())

	if err = os.MkdirAll(cfg.SnapDir(), privateDirMode); err != nil && !os.IsExist(err) {
		plog.Fatalf("create snapshot directory error: %v", err)
	}
	ss := snap.New(cfg.SnapDir())
	be := backend.NewDefaultBackend(path.Join(cfg.SnapDir(), databaseFilename))
	defer func() {
		if err != nil {
			be.Close()
		}
	}()

	prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
	if err != nil {
		return nil, err
	}
	var remotes []*membership.Member
	switch {
	case !haveWAL && !cfg.NewCluster:
		if err = cfg.VerifyJoinExisting(); err != nil {
			return nil, err
		}
		cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
		if err != nil {
			return nil, err
		}
		existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt)
		if gerr != nil {
			return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
		}
		if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
			return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
		}
		if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) {
			return nil, fmt.Errorf("incomptible with current running cluster")
		}

		remotes = existingCluster.Members()
		cl.SetID(existingCluster.ID())
		cl.SetStore(st)
		cl.SetBackend(be)
		cfg.Print()
		id, n, s, w = startNode(cfg, cl, nil)
	case !haveWAL && cfg.NewCluster:
		if err = cfg.VerifyBootstrap(); err != nil {
			return nil, err
		}
		cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
		if err != nil {
			return nil, err
		}
		m := cl.MemberByName(cfg.Name)
		if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
			return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
		}
		if cfg.ShouldDiscover() {
			var str string
			str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
			if err != nil {
				return nil, &DiscoveryError{Op: "join", Err: err}
			}
			var urlsmap types.URLsMap
			urlsmap, err = types.NewURLsMap(str)
			if err != nil {
				return nil, err
			}
			if checkDuplicateURL(urlsmap) {
				return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
			}
			if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil {
				return nil, err
			}
		}
		cl.SetStore(st)
		cl.SetBackend(be)
		cfg.PrintWithInitial()
		id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
	case haveWAL:
		if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
			return nil, fmt.Errorf("cannot write to member directory: %v", err)
		}

		if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
			return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
		}

		if cfg.ShouldDiscover() {
			plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
		}
		var snapshot *raftpb.Snapshot
		snapshot, err = ss.Load()
		if err != nil && err != snap.ErrNoSnapshot {
			return nil, err
		}
		if snapshot != nil {
			if err = st.Recovery(snapshot.Data); err != nil {
				plog.Panicf("recovered store from snapshot error: %v", err)
			}
			plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
		}
		cfg.Print()
		if !cfg.ForceNewCluster {
			id, cl, n, s, w = restartNode(cfg, snapshot)
		} else {
			id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
		}
		cl.SetStore(st)
		cl.SetBackend(be)
		cl.Recover()
	default:
		return nil, fmt.Errorf("unsupported bootstrap config")
	}

	if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
		return nil, fmt.Errorf("cannot access member directory: %v", terr)
	}

	sstats := &stats.ServerStats{
		Name: cfg.Name,
		ID:   id.String(),
	}
	sstats.Initialize()
	lstats := stats.NewLeaderStats(id.String())

	srv = &EtcdServer{
		readych:   make(chan struct{}),
		Cfg:       cfg,
		snapCount: cfg.SnapCount,
		errorc:    make(chan error, 1),
		store:     st,
		r: raftNode{
			Node:        n,
			ticker:      time.Tick(time.Duration(cfg.TickMs) * time.Millisecond),
			raftStorage: s,
			storage:     NewStorage(w, ss),
		},
		id:            id,
		attributes:    membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
		cluster:       cl,
		stats:         sstats,
		lstats:        lstats,
		SyncTicker:    time.Tick(500 * time.Millisecond),
		peerRt:        prt,
		reqIDGen:      idutil.NewGenerator(uint16(id), time.Now()),
		forceVersionC: make(chan struct{}),
		msgSnapC:      make(chan raftpb.Message, maxInFlightMsgSnap),
	}

	srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}

	srv.be = be
	srv.lessor = lease.NewLessor(srv.be)
	srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex)
	srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
	srv.authStore = auth.NewAuthStore(srv.be)
	if h := cfg.AutoCompactionRetention; h != 0 {
		srv.compactor = compactor.NewPeriodic(h, srv.kv, srv)
		srv.compactor.Run()
	}

	if err = srv.restoreAlarms(); err != nil {
		return nil, err
	}

	// TODO: move transport initialization near the definition of remote
	tr := &rafthttp.Transport{
		TLSInfo:     cfg.PeerTLSInfo,
		DialTimeout: cfg.peerDialTimeout(),
		ID:          id,
		URLs:        cfg.PeerURLs,
		ClusterID:   cl.ID(),
		Raft:        srv,
		Snapshotter: ss,
		ServerStats: sstats,
		LeaderStats: lstats,
		ErrorC:      srv.errorc,
	}
	if err = tr.Start(); err != nil {
		return nil, err
	}
	// add all remotes into transport
	for _, m := range remotes {
		if m.ID != id {
			tr.AddRemote(m.ID, m.PeerURLs)
		}
	}
	for _, m := range cl.Members() {
		if m.ID != id {
			tr.AddPeer(m.ID, m.PeerURLs)
		}
	}
	srv.r.transport = tr

	return srv, nil
}
Пример #30
0
func NewLessor(lessorID uint8) *lessor {
	return &lessor{
		leaseMap: make(map[uint64]*lease),
		idgen:    idutil.NewGenerator(lessorID, time.Now()),
	}
}