func TestTransportAdd(t *testing.T) { ls := stats.NewLeaderStats("") tr := &transport{ roundTripper: &roundTripperRecorder{}, leaderStats: ls, peers: make(map[types.ID]Peer), } tr.AddPeer(1, []string{"http://localhost:7001"}) defer tr.Stop() if _, ok := ls.Followers["1"]; !ok { t.Errorf("FollowerStats[1] is nil, want exists") } s, ok := tr.peers[types.ID(1)] if !ok { t.Fatalf("senders[1] is nil, want exists") } // duplicate AddPeer is ignored tr.AddPeer(1, []string{"http://localhost:7001"}) ns := tr.peers[types.ID(1)] if s != ns { t.Errorf("sender = %v, want %v", ns, s) } }
func TestTransportErrorc(t *testing.T) { errorc := make(chan error, 1) tr := &transport{ roundTripper: newRespRoundTripper(http.StatusForbidden, nil), leaderStats: stats.NewLeaderStats(""), peers: make(map[types.ID]Peer), errorc: errorc, } tr.AddPeer(1, []string{"http://localhost:7001"}) defer tr.Stop() select { case <-errorc: t.Fatalf("received unexpected from errorc") case <-time.After(10 * time.Millisecond): } tr.peers[1].Send(raftpb.Message{}) testutil.ForceGosched() select { case <-errorc: default: t.Fatalf("cannot receive error from errorc") } }
func TestTransportRemove(t *testing.T) { tr := &transport{ roundTripper: &roundTripperRecorder{}, leaderStats: stats.NewLeaderStats(""), peers: make(map[types.ID]Peer), } tr.AddPeer(1, []string{"http://localhost:7001"}) tr.RemovePeer(types.ID(1)) defer tr.Stop() if _, ok := tr.peers[types.ID(1)]; ok { t.Fatalf("senders[1] exists, want removed") } }
// NewServer creates a new EtcdServer from the supplied configuration. The // configuration is considered static for the lifetime of the EtcdServer. func NewServer(cfg *ServerConfig) (*EtcdServer, error) { st := store.New(StoreAdminPrefix, StoreKeysPrefix) var w *wal.WAL var n raft.Node var s *raft.MemoryStorage var id types.ID walVersion, err := wal.DetectVersion(cfg.DataDir) if err != nil { return nil, err } if walVersion == wal.WALUnknown { return nil, fmt.Errorf("unknown wal version in data dir %s", cfg.DataDir) } haveWAL := walVersion != wal.WALNotExist ss := snap.New(cfg.SnapDir()) switch { case !haveWAL && !cfg.NewCluster: existingCluster, err := GetClusterFromRemotePeers(getRemotePeerURLs(cfg.Cluster, cfg.Name), cfg.Transport) if err != nil { return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", err) } if err := ValidateClusterAndAssignIDs(cfg.Cluster, existingCluster); err != nil { return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) } cfg.Cluster.UpdateIndex(existingCluster.index) cfg.Cluster.SetID(existingCluster.id) cfg.Cluster.SetStore(st) cfg.Print() id, n, s, w = startNode(cfg, nil) case !haveWAL && cfg.NewCluster: if err := cfg.VerifyBootstrapConfig(); err != nil { return nil, err } m := cfg.Cluster.MemberByName(cfg.Name) if isMemberBootstrapped(cfg.Cluster, cfg.Name, cfg.Transport) { return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) } if cfg.ShouldDiscover() { str, err := discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.Cluster.String()) if err != nil { return nil, err } if cfg.Cluster, err = NewClusterFromString(cfg.Cluster.token, str); err != nil { return nil, err } if err := cfg.Cluster.Validate(); err != nil { return nil, fmt.Errorf("bad discovery cluster: %v", err) } } cfg.Cluster.SetStore(st) cfg.PrintWithInitial() id, n, s, w = startNode(cfg, cfg.Cluster.MemberIDs()) case haveWAL: // Run the migrations. if err := upgradeWAL(cfg.DataDir, cfg.Name, walVersion); err != nil { return nil, err } if err := fileutil.IsDirWriteable(cfg.DataDir); err != nil { return nil, fmt.Errorf("cannot write to data directory: %v", err) } if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { return nil, fmt.Errorf("cannot write to member directory: %v", err) } if cfg.ShouldDiscover() { log.Printf("etcdserver: discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir()) } snapshot, err := ss.Load() if err != nil && err != snap.ErrNoSnapshot { return nil, err } if snapshot != nil { if err := st.Recovery(snapshot.Data); err != nil { log.Panicf("etcdserver: recovered store from snapshot error: %v", err) } log.Printf("etcdserver: recovered store from snapshot at index %d", snapshot.Metadata.Index) } cfg.Cluster = NewClusterFromStore(cfg.Cluster.token, st) cfg.Print() if snapshot != nil { log.Printf("etcdserver: loaded cluster information from store: %s", cfg.Cluster) } if !cfg.ForceNewCluster { id, n, s, w = restartNode(cfg, snapshot) } else { id, n, s, w = restartAsStandaloneNode(cfg, snapshot) } default: return nil, fmt.Errorf("unsupported bootstrap config") } sstats := &stats.ServerStats{ Name: cfg.Name, ID: id.String(), } lstats := stats.NewLeaderStats(id.String()) srv := &EtcdServer{ cfg: cfg, snapCount: cfg.SnapCount, errorc: make(chan error, 1), store: st, r: raftNode{ Node: n, ticker: time.Tick(time.Duration(cfg.TickMs) * time.Millisecond), raftStorage: s, storage: NewStorage(w, ss), }, id: id, attributes: Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, Cluster: cfg.Cluster, stats: sstats, lstats: lstats, SyncTicker: time.Tick(500 * time.Millisecond), reqIDGen: idutil.NewGenerator(uint8(id), time.Now()), } tr := rafthttp.NewTransporter(cfg.Transport, id, cfg.Cluster.ID(), srv, srv.errorc, sstats, lstats) // add all the remote members into sendhub for _, m := range cfg.Cluster.Members() { if m.ID != id { tr.AddPeer(m.ID, m.PeerURLs) } } srv.r.transport = tr return srv, nil }
// TestSendMessageWhenStreamIsBroken tests that message can be sent to the // remote in a limited time when all underlying connections are broken. func TestSendMessageWhenStreamIsBroken(t *testing.T) { // member 1 tr := NewTransporter(&http.Transport{}, types.ID(1), types.ID(1), &fakeRaft{}, nil, newServerStats(), stats.NewLeaderStats("1")) srv := httptest.NewServer(tr.Handler()) defer srv.Close() // member 2 recvc := make(chan raftpb.Message, 1) p := &fakeRaft{recvc: recvc} tr2 := NewTransporter(&http.Transport{}, types.ID(2), types.ID(1), p, nil, newServerStats(), stats.NewLeaderStats("2")) srv2 := httptest.NewServer(tr2.Handler()) defer srv2.Close() tr.AddPeer(types.ID(2), []string{srv2.URL}) defer tr.Stop() tr2.AddPeer(types.ID(1), []string{srv.URL}) defer tr2.Stop() if !waitStreamWorking(tr.(*transport).Get(types.ID(2)).(*peer)) { t.Fatalf("stream from 1 to 2 is not in work as expected") } // break the stream srv.CloseClientConnections() srv2.CloseClientConnections() var n int for { select { // TODO: remove this resend logic when we add retry logic into the code case <-time.After(time.Millisecond): n++ tr.Send([]raftpb.Message{{Type: raftpb.MsgHeartbeat, From: 1, To: 2, Term: 1, Commit: 3}}) case <-recvc: if n > 10 { t.Errorf("disconnection time = %dms, want < 10ms", n) } return } } }
func TestSendMessage( // member 1 t *testing.T) { tr := NewTransporter(&http.Transport{}, types.ID(1), types.ID(1), &fakeRaft{}, nil, newServerStats(), stats.NewLeaderStats("1")) srv := httptest.NewServer(tr.Handler()) defer srv.Close() // member 2 recvc := make(chan raftpb.Message, 1) p := &fakeRaft{recvc: recvc} tr2 := NewTransporter(&http.Transport{}, types.ID(2), types.ID(1), p, nil, newServerStats(), stats.NewLeaderStats("2")) srv2 := httptest.NewServer(tr2.Handler()) defer srv2.Close() tr.AddPeer(types.ID(2), []string{srv2.URL}) defer tr.Stop() tr2.AddPeer(types.ID(1), []string{srv.URL}) defer tr2.Stop() if !waitStreamWorking(tr.(*transport).Get(types.ID(2)).(*peer)) { t.Fatalf("stream from 1 to 2 is not in work as expected") } data := []byte("some data") tests := []raftpb.Message{ // these messages are set to send to itself, which faciliates testing. {Type: raftpb.MsgProp, From: 1, To: 2, Entries: []raftpb.Entry{{Data: data}}}, // TODO: send out MsgApp which fits msgapp stream but the term doesn't match {Type: raftpb.MsgApp, From: 1, To: 2, Term: 1, Index: 3, LogTerm: 0, Entries: []raftpb.Entry{{Index: 4, Term: 1, Data: data}}, Commit: 3}, {Type: raftpb.MsgAppResp, From: 1, To: 2, Term: 1, Index: 3}, {Type: raftpb.MsgVote, From: 1, To: 2, Term: 1, Index: 3, LogTerm: 0}, {Type: raftpb.MsgVoteResp, From: 1, To: 2, Term: 1}, {Type: raftpb.MsgSnap, From: 1, To: 2, Term: 1, Snapshot: raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 1000, Term: 1}, Data: data}}, {Type: raftpb.MsgHeartbeat, From: 1, To: 2, Term: 1, Commit: 3}, {Type: raftpb.MsgHeartbeatResp, From: 1, To: 2, Term: 1}, } for i, tt := range tests { tr.Send([]raftpb.Message{tt}) msg := <-recvc if !reflect.DeepEqual(msg, tt) { t.Errorf("#%d: msg = %+v, want %+v", i, msg, tt) } } }