Example #1
0
func (s *EtcdServer) run() {
	var syncC <-chan time.Time
	for {
		select {
		case <-s.Ticker:
			s.Node.Tick()
		case rd := <-s.Node.Ready():
			s.Save(rd.HardState, rd.Entries)
			s.Send(rd.Messages)

			// TODO(bmizerany): do this in the background, but take
			// care to apply entries in a single goroutine, and not
			// race them.
			for _, e := range rd.CommittedEntries {
				var r pb.Request
				if err := r.Unmarshal(e.Data); err != nil {
					panic("TODO: this is bad, what do we do about it?")
				}
				s.w.Trigger(r.Id, s.apply(r))
			}

			if rd.SoftState != nil {
				if rd.RaftState == raft.StateLeader {
					syncC = s.SyncTicker
				} else {
					syncC = nil
				}
			}
		case <-syncC:
			s.sync(defaultSyncTimeout)
		case <-s.done:
			return
		}
	}
}
Example #2
0
// TestSync tests sync 1. is nonblocking 2. proposes SYNC request.
func TestSync(t *testing.T) {
	n := newNodeRecorder()
	srv := &EtcdServer{
		r:        raftNode{Node: n},
		reqIDGen: idutil.NewGenerator(0, time.Time{}),
	}
	// check that sync is non-blocking
	done := make(chan struct{})
	go func() {
		srv.sync(10 * time.Second)
		done <- struct{}{}
	}()

	select {
	case <-done:
	case <-time.After(time.Second):
		t.Fatal("sync should be non-blocking but did not return after 1s!")
	}

	action, _ := n.Wait(1)
	if len(action) != 1 {
		t.Fatalf("len(action) = %d, want 1", len(action))
	}
	if action[0].Name != "Propose" {
		t.Fatalf("action = %s, want Propose", action[0].Name)
	}
	data := action[0].Params[0].([]byte)
	var r pb.Request
	if err := r.Unmarshal(data); err != nil {
		t.Fatalf("unmarshal request error: %v", err)
	}
	if r.Method != "SYNC" {
		t.Errorf("method = %s, want SYNC", r.Method)
	}
}
Example #3
0
// TestSyncTrigger tests that the server proposes a SYNC request when its sync timer ticks
func TestSyncTrigger(t *testing.T) {
	n := newReadyNode()
	st := make(chan time.Time, 1)
	srv := &EtcdServer{
		cfg: &ServerConfig{TickMs: 1},
		r: raftNode{
			Node:        n,
			raftStorage: raft.NewMemoryStorage(),
			transport:   rafthttp.NewNopTransporter(),
			storage:     mockstorage.NewStorageRecorder(""),
		},
		store:      mockstore.NewNop(),
		SyncTicker: st,
		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
	}

	// trigger the server to become a leader and accept sync requests
	go func() {
		srv.start()
		n.readyc <- raft.Ready{
			SoftState: &raft.SoftState{
				RaftState: raft.StateLeader,
			},
		}
		// trigger a sync request
		st <- time.Time{}
	}()

	action, _ := n.Wait(1)
	go srv.Stop()

	if len(action) != 1 {
		t.Fatalf("len(action) = %d, want 1", len(action))
	}
	if action[0].Name != "Propose" {
		t.Fatalf("action = %s, want Propose", action[0].Name)
	}
	data := action[0].Params[0].([]byte)
	var req pb.Request
	if err := req.Unmarshal(data); err != nil {
		t.Fatalf("error unmarshalling data: %v", err)
	}
	if req.Method != "SYNC" {
		t.Fatalf("unexpected proposed request: %#v", req.Method)
	}

	// wait on stop message
	<-n.Chan()
}
Example #4
0
func main() {
	version := flag.Int("version", 5, "4 or 5")
	from := flag.String("data-dir", "", "")
	flag.Parse()

	if *from == "" {
		log.Fatal("Must provide -data-dir flag")
	}

	var ents []raftpb.Entry
	var err error
	switch *version {
	case 4:
		ents, err = dump4(*from)
	case 5:
		ents, err = dump5(*from)
	default:
		err = errors.New("value of -version flag must be 4 or 5")
	}

	if err != nil {
		log.Fatalf("Failed decoding log: %v", err)
	}

	for _, e := range ents {
		msg := fmt.Sprintf("%2d %5d: ", e.Term, e.Index)
		switch e.Type {
		case raftpb.EntryNormal:
			msg = fmt.Sprintf("%s norm", msg)
			var r etcdserverpb.Request
			if err := r.Unmarshal(e.Data); err != nil {
				msg = fmt.Sprintf("%s ???", msg)
			} else {
				msg = fmt.Sprintf("%s %s %s %s", msg, r.Method, r.Path, r.Val)
			}
		case raftpb.EntryConfChange:
			msg = fmt.Sprintf("%s conf", msg)
			var r raftpb.ConfChange
			if err := r.Unmarshal(e.Data); err != nil {
				msg = fmt.Sprintf("%s ???", msg)
			} else {
				msg = fmt.Sprintf("%s %s %s %s", msg, r.Type, types.ID(r.NodeID), r.Context)
			}
		}
		fmt.Println(msg)
	}
}
Example #5
0
func TestPublish(t *testing.T) {
	n := newNodeRecorder()
	ch := make(chan interface{}, 1)
	// simulate that request has gone through consensus
	ch <- Response{}
	w := wait.NewWithResponse(ch)
	srv := &EtcdServer{
		readych:    make(chan struct{}),
		cfg:        &ServerConfig{TickMs: 1},
		id:         1,
		r:          raftNode{Node: n},
		attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}},
		cluster:    &membership.RaftCluster{},
		w:          w,
		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
	}
	srv.publish(time.Hour)

	action := n.Action()
	if len(action) != 1 {
		t.Fatalf("len(action) = %d, want 1", len(action))
	}
	if action[0].Name != "Propose" {
		t.Fatalf("action = %s, want Propose", action[0].Name)
	}
	data := action[0].Params[0].([]byte)
	var r pb.Request
	if err := r.Unmarshal(data); err != nil {
		t.Fatalf("unmarshal request error: %v", err)
	}
	if r.Method != "PUT" {
		t.Errorf("method = %s, want PUT", r.Method)
	}
	wm := membership.Member{ID: 1, Attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}}}
	if wpath := membership.MemberAttributesStorePath(wm.ID); r.Path != wpath {
		t.Errorf("path = %s, want %s", r.Path, wpath)
	}
	var gattr membership.Attributes
	if err := json.Unmarshal([]byte(r.Val), &gattr); err != nil {
		t.Fatalf("unmarshal val error: %v", err)
	}
	if !reflect.DeepEqual(gattr, wm.Attributes) {
		t.Errorf("member = %v, want %v", gattr, wm.Attributes)
	}
}
Example #6
0
func TestUpdateVersion(t *testing.T) {
	n := newNodeRecorder()
	ch := make(chan interface{}, 1)
	// simulate that request has gone through consensus
	ch <- Response{}
	w := wait.NewWithResponse(ch)
	srv := &EtcdServer{
		id:         1,
		cfg:        &ServerConfig{TickMs: 1},
		r:          raftNode{Node: n},
		attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://node1.com"}},
		cluster:    &membership.RaftCluster{},
		w:          w,
		reqIDGen:   idutil.NewGenerator(0, time.Time{}),
	}
	srv.updateClusterVersion("2.0.0")

	action := n.Action()
	if len(action) != 1 {
		t.Fatalf("len(action) = %d, want 1", len(action))
	}
	if action[0].Name != "Propose" {
		t.Fatalf("action = %s, want Propose", action[0].Name)
	}
	data := action[0].Params[0].([]byte)
	var r pb.Request
	if err := r.Unmarshal(data); err != nil {
		t.Fatalf("unmarshal request error: %v", err)
	}
	if r.Method != "PUT" {
		t.Errorf("method = %s, want PUT", r.Method)
	}
	if wpath := path.Join(StoreClusterPrefix, "version"); r.Path != wpath {
		t.Errorf("path = %s, want %s", r.Path, wpath)
	}
	if r.Val != "2.0.0" {
		t.Errorf("val = %s, want %s", r.Val, "2.0.0")
	}
}
Example #7
0
func (s *Server) run() {
	for {
		select {
		case <-s.Ticker:
			s.Node.Tick()
		case rd := <-s.Node.Ready():
			s.Save(rd.State, rd.Entries)
			s.Send(rd.Messages)

			// TODO(bmizerany): do this in the background, but take
			// care to apply entries in a single goroutine, and not
			// race them.
			for _, e := range rd.CommittedEntries {
				var r pb.Request
				if err := r.Unmarshal(e.Data); err != nil {
					panic("TODO: this is bad, what do we do about it?")
				}
				s.w.Trigger(r.Id, s.apply(r))
			}
		case <-s.done:
			return
		}
	}
}
Example #8
0
func main() {
	from := flag.String("data-dir", "", "")
	snapfile := flag.String("start-snap", "", "The base name of snapshot file to start dumping")
	index := flag.Uint64("start-index", 0, "The index to start dumping")
	flag.Parse()
	if *from == "" {
		log.Fatal("Must provide -data-dir flag.")
	}
	if *snapfile != "" && *index != 0 {
		log.Fatal("start-snap and start-index flags cannot be used together.")
	}

	var (
		walsnap  walpb.Snapshot
		snapshot *raftpb.Snapshot
		err      error
	)

	isIndex := *index != 0

	if isIndex {
		fmt.Printf("Start dumping log entries from index %d.\n", *index)
		walsnap.Index = *index
	} else {
		if *snapfile == "" {
			ss := snap.New(snapDir(*from))
			snapshot, err = ss.Load()
		} else {
			snapshot, err = snap.Read(path.Join(snapDir(*from), *snapfile))
		}

		switch err {
		case nil:
			walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
			nodes := genIDSlice(snapshot.Metadata.ConfState.Nodes)
			fmt.Printf("Snapshot:\nterm=%d index=%d nodes=%s\n",
				walsnap.Term, walsnap.Index, nodes)
		case snap.ErrNoSnapshot:
			fmt.Printf("Snapshot:\nempty\n")
		default:
			log.Fatalf("Failed loading snapshot: %v", err)
		}
		fmt.Println("Start dupmping log entries from snapshot.")
	}

	w, err := wal.OpenForRead(walDir(*from), walsnap)
	if err != nil {
		log.Fatalf("Failed opening WAL: %v", err)
	}
	wmetadata, state, ents, err := w.ReadAll()
	w.Close()
	if err != nil && (!isIndex || err != wal.ErrSnapshotNotFound) {
		log.Fatalf("Failed reading WAL: %v", err)
	}
	id, cid := parseWALMetadata(wmetadata)
	vid := types.ID(state.Vote)
	fmt.Printf("WAL metadata:\nnodeID=%s clusterID=%s term=%d commitIndex=%d vote=%s\n",
		id, cid, state.Term, state.Commit, vid)

	fmt.Printf("WAL entries:\n")
	fmt.Printf("lastIndex=%d\n", ents[len(ents)-1].Index)
	fmt.Printf("%4s\t%10s\ttype\tdata\n", "term", "index")
	for _, e := range ents {
		msg := fmt.Sprintf("%4d\t%10d", e.Term, e.Index)
		switch e.Type {
		case raftpb.EntryNormal:
			msg = fmt.Sprintf("%s\tnorm", msg)

			var rr etcdserverpb.InternalRaftRequest
			if err := rr.Unmarshal(e.Data); err == nil {
				msg = fmt.Sprintf("%s\t%s", msg, rr.String())
				break
			}

			var r etcdserverpb.Request
			if err := r.Unmarshal(e.Data); err == nil {
				switch r.Method {
				case "":
					msg = fmt.Sprintf("%s\tnoop", msg)
				case "SYNC":
					msg = fmt.Sprintf("%s\tmethod=SYNC time=%q", msg, time.Unix(0, r.Time))
				case "QGET", "DELETE":
					msg = fmt.Sprintf("%s\tmethod=%s path=%s", msg, r.Method, excerpt(r.Path, 64, 64))
				default:
					msg = fmt.Sprintf("%s\tmethod=%s path=%s val=%s", msg, r.Method, excerpt(r.Path, 64, 64), excerpt(r.Val, 128, 0))
				}
				break
			}
			msg = fmt.Sprintf("%s\t???", msg)
		case raftpb.EntryConfChange:
			msg = fmt.Sprintf("%s\tconf", msg)
			var r raftpb.ConfChange
			if err := r.Unmarshal(e.Data); err != nil {
				msg = fmt.Sprintf("%s\t???", msg)
			} else {
				msg = fmt.Sprintf("%s\tmethod=%s id=%s", msg, r.Type, types.ID(r.NodeID))
			}
		}
		fmt.Println(msg)
	}
}
Example #9
0
func main() {
	from := flag.String("data-dir", "", "")
	flag.Parse()
	if *from == "" {
		log.Fatal("Must provide -data-dir flag")
	}

	ss := snap.New(snapDir(*from))
	snapshot, err := ss.Load()
	var walsnap walpb.Snapshot
	switch err {
	case nil:
		walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
		nodes := genIDSlice(snapshot.Metadata.ConfState.Nodes)
		fmt.Printf("Snapshot:\nterm=%d index=%d nodes=%s\n",
			walsnap.Term, walsnap.Index, nodes)
	case snap.ErrNoSnapshot:
		fmt.Printf("Snapshot:\nempty\n")
	default:
		log.Fatalf("Failed loading snapshot: %v", err)
	}

	w, err := wal.Open(walDir(*from), walsnap)
	if err != nil {
		log.Fatalf("Failed opening WAL: %v", err)
	}
	wmetadata, state, ents, err := w.ReadAll()
	w.Close()
	if err != nil {
		log.Fatalf("Failed reading WAL: %v", err)
	}
	id, cid := parseWALMetadata(wmetadata)
	vid := types.ID(state.Vote)
	fmt.Printf("WAL metadata:\nnodeID=%s clusterID=%s term=%d commitIndex=%d vote=%s\n",
		id, cid, state.Term, state.Commit, vid)

	fmt.Printf("WAL entries:\n")
	fmt.Printf("lastIndex=%d\n", ents[len(ents)-1].Index)
	fmt.Printf("%4s\t%10s\ttype\tdata\n", "term", "index")
	for _, e := range ents {
		msg := fmt.Sprintf("%4d\t%10d", e.Term, e.Index)
		switch e.Type {
		case raftpb.EntryNormal:
			msg = fmt.Sprintf("%s\tnorm", msg)
			var r etcdserverpb.Request
			if err := r.Unmarshal(e.Data); err != nil {
				msg = fmt.Sprintf("%s\t???", msg)
				break
			}
			switch r.Method {
			case "":
				msg = fmt.Sprintf("%s\tnoop", msg)
			case "SYNC":
				msg = fmt.Sprintf("%s\tmethod=SYNC time=%q", msg, time.Unix(0, r.Time))
			case "QGET", "DELETE":
				msg = fmt.Sprintf("%s\tmethod=%s path=%s", msg, r.Method, excerpt(r.Path, 64, 64))
			default:
				msg = fmt.Sprintf("%s\tmethod=%s path=%s val=%s", msg, r.Method, excerpt(r.Path, 64, 64), excerpt(r.Val, 128, 0))
			}
		case raftpb.EntryConfChange:
			msg = fmt.Sprintf("%s\tconf", msg)
			var r raftpb.ConfChange
			if err := r.Unmarshal(e.Data); err != nil {
				msg = fmt.Sprintf("%s\t???", msg)
			} else {
				msg = fmt.Sprintf("%s\tmethod=%s id=%s", msg, r.Type, types.ID(r.NodeID))
			}
		}
		fmt.Println(msg)
	}
}
Example #10
0
func (s *EtcdServer) run() {
	var syncC <-chan time.Time
	// snapi indicates the index of the last submitted snapshot request
	var snapi, appliedi int64
	for {
		select {
		case <-s.ticker:
			s.node.Tick()
		case rd := <-s.node.Ready():
			s.storage.Save(rd.HardState, rd.Entries)
			s.storage.SaveSnap(rd.Snapshot)
			s.send(rd.Messages)

			// TODO(bmizerany): do this in the background, but take
			// care to apply entries in a single goroutine, and not
			// race them.
			// TODO: apply configuration change into ClusterStore.
			for _, e := range rd.CommittedEntries {
				switch e.Type {
				case raftpb.EntryNormal:
					var r pb.Request
					if err := r.Unmarshal(e.Data); err != nil {
						panic("TODO: this is bad, what do we do about it?")
					}
					s.w.Trigger(r.ID, s.apply(r))
				case raftpb.EntryConfChange:
					var cc raftpb.ConfChange
					if err := cc.Unmarshal(e.Data); err != nil {
						panic("TODO: this is bad, what do we do about it?")
					}
					s.node.ApplyConfChange(cc)
					s.w.Trigger(cc.ID, nil)
				default:
					panic("unexpected entry type")
				}
				atomic.StoreInt64(&s.raftIndex, e.Index)
				atomic.StoreInt64(&s.raftTerm, e.Term)
				appliedi = e.Index
			}

			if rd.Snapshot.Index > snapi {
				snapi = rd.Snapshot.Index
			}

			// recover from snapshot if it is more updated than current applied
			if rd.Snapshot.Index > appliedi {
				if err := s.store.Recovery(rd.Snapshot.Data); err != nil {
					panic("TODO: this is bad, what do we do about it?")
				}
				appliedi = rd.Snapshot.Index
			}

			if appliedi-snapi > s.snapCount {
				s.snapshot()
				snapi = appliedi
			}

			if rd.SoftState != nil {
				if rd.RaftState == raft.StateLeader {
					syncC = s.syncTicker
				} else {
					syncC = nil
				}
				if rd.SoftState.ShouldStop {
					s.Stop()
					return
				}
			}
		case <-syncC:
			s.sync(defaultSyncTimeout)
		case <-s.done:
			return
		}
	}
}