// TODO (xiangli): reasonable retry logic func (s *sender) Send(m raftpb.Message) error { s.maybeStopStream(m.Term) if shouldInitStream(m) && !s.hasStreamClient() { s.initStream(types.ID(m.From), types.ID(m.To), m.Term) s.batcher.Reset(time.Now()) } if canBatch(m) && s.hasStreamClient() { if s.batcher.ShouldBatch(time.Now()) { return nil } } if canUseStream(m) { if ok := s.tryStream(m); ok { return nil } } // TODO: don't block. we should be able to have 1000s // of messages out at a time. data := pbutil.MustMarshal(&m) select { case s.q <- data: return nil default: log.Printf("sender: reach the maximal serving to %s", s.u) return fmt.Errorf("reach maximal serving") } }
func TestClusterUpdateAttributes(t *testing.T) { name := "etcd" clientURLs := []string{"http://127.0.0.1:4001"} tests := []struct { mems []*Member removed map[types.ID]bool wmems []*Member }{ // update attributes of existing member { []*Member{ newTestMember(1, nil, "", nil), }, nil, []*Member{ newTestMember(1, nil, name, clientURLs), }, }, // update attributes of removed member { nil, map[types.ID]bool{types.ID(1): true}, nil, }, } for i, tt := range tests { c := newTestCluster(tt.mems) c.removed = tt.removed c.UpdateAttributes(types.ID(1), Attributes{Name: name, ClientURLs: clientURLs}) if g := c.Members(); !reflect.DeepEqual(g, tt.wmems) { t.Errorf("#%d: members = %+v, want %+v", i, g, tt.wmems) } } }
func parseWALMetadata(b []byte) (id, cid types.ID) { var metadata etcdserverpb.Metadata pbutil.MustUnmarshal(&metadata, b) id = types.ID(metadata.NodeID) cid = types.ID(metadata.ClusterID) return }
func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { switch err { case context.Canceled: return ErrCanceled case context.DeadlineExceeded: curLeadElected := s.r.leadElectedTime() prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) if start.After(prevLeadLost) && start.Before(curLeadElected) { return ErrTimeoutDueToLeaderFail } lead := types.ID(atomic.LoadUint64(&s.r.lead)) switch lead { case types.ID(raft.None): // TODO: return error to specify it happens because the cluster does not have leader now case s.ID(): if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) { return ErrTimeoutDueToConnectionLost } default: if !isConnectedSince(s.r.transport, start, lead) { return ErrTimeoutDueToConnectionLost } } return ErrTimeout default: return err } }
func TestStreamReaderDialRequest(t *testing.T) { for i, tt := range []streamType{streamTypeMsgApp, streamTypeMessage, streamTypeMsgAppV2} { tr := &roundTripperRecorder{} sr := &streamReader{ tr: tr, picker: mustNewURLPicker(t, []string{"http://localhost:2380"}), local: types.ID(1), remote: types.ID(2), cid: types.ID(1), msgAppTerm: 1, } sr.dial(tt) req := tr.Request() wurl := fmt.Sprintf("http://localhost:2380" + tt.endpoint() + "/1") if req.URL.String() != wurl { t.Errorf("#%d: url = %s, want %s", i, req.URL.String(), wurl) } if w := "GET"; req.Method != w { t.Errorf("#%d: method = %s, want %s", i, req.Method, w) } if g := req.Header.Get("X-Etcd-Cluster-ID"); g != "1" { t.Errorf("#%d: header X-Etcd-Cluster-ID = %s, want 1", i, g) } if g := req.Header.Get("X-Raft-To"); g != "2" { t.Errorf("#%d: header X-Raft-To = %s, want 2", i, g) } if g := req.Header.Get("X-Raft-Term"); tt == streamTypeMsgApp && g != "1" { t.Errorf("#%d: header X-Raft-Term = %s, want 1", i, g) } } }
func TestTransportCutMend(t *testing.T) { ss := &stats.ServerStats{} ss.Initialize() peer1 := newFakePeer() peer2 := newFakePeer() tr := &Transport{ ServerStats: ss, peers: map[types.ID]Peer{types.ID(1): peer1, types.ID(2): peer2}, } tr.CutPeer(types.ID(1)) wmsgsTo := []raftpb.Message{ // good message {Type: raftpb.MsgProp, To: 1}, {Type: raftpb.MsgApp, To: 1}, } tr.Send(wmsgsTo) if len(peer1.msgs) > 0 { t.Fatalf("msgs expected to be ignored, got %+v", peer1.msgs) } tr.MendPeer(types.ID(1)) tr.Send(wmsgsTo) if !reflect.DeepEqual(peer1.msgs, wmsgsTo) { t.Errorf("msgs to peer 1 = %+v, want %+v", peer1.msgs, wmsgsTo) } }
func TestTransportAdd(t *testing.T) { ls := stats.NewLeaderStats("") tr := &Transport{ LeaderStats: ls, streamRt: &roundTripperRecorder{}, peers: make(map[types.ID]Peer), prober: probing.NewProber(nil), } tr.AddPeer(1, []string{"http://localhost:2380"}) if _, ok := ls.Followers["1"]; !ok { t.Errorf("FollowerStats[1] is nil, want exists") } s, ok := tr.peers[types.ID(1)] if !ok { tr.Stop() t.Fatalf("senders[1] is nil, want exists") } // duplicate AddPeer is ignored tr.AddPeer(1, []string{"http://localhost:2380"}) ns := tr.peers[types.ID(1)] if s != ns { t.Errorf("sender = %v, want %v", ns, s) } tr.Stop() }
func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) { var ( err error wmetadata []byte ) repaired := false for { if w, err = wal.Open(waldir, snap); err != nil { plog.Fatalf("open wal error: %v", err) } if wmetadata, st, ents, err = w.ReadAll(); err != nil { w.Close() // we can only repair ErrUnexpectedEOF and we never repair twice. if repaired || err != io.ErrUnexpectedEOF { plog.Fatalf("read wal error (%v) and cannot be repaired", err) } if !wal.Repair(waldir) { plog.Fatalf("WAL error (%v) cannot be repaired", err) } else { plog.Infof("repaired WAL error (%v)", err) repaired = true } continue } break } var metadata pb.Metadata pbutil.MustUnmarshal(&metadata, wmetadata) id = types.ID(metadata.NodeID) cid = types.ID(metadata.ClusterID) return }
func TestTransportAdd(t *testing.T) { ls := stats.NewLeaderStats("") term := uint64(10) tr := &transport{ roundTripper: &roundTripperRecorder{}, leaderStats: ls, term: term, peers: make(map[types.ID]Peer), prober: probing.NewProber(nil), } tr.AddPeer(1, []string{"http://localhost:2380"}) if _, ok := ls.Followers["1"]; !ok { t.Errorf("FollowerStats[1] is nil, want exists") } s, ok := tr.peers[types.ID(1)] if !ok { tr.Stop() t.Fatalf("senders[1] is nil, want exists") } // duplicate AddPeer is ignored tr.AddPeer(1, []string{"http://localhost:2380"}) ns := tr.peers[types.ID(1)] if s != ns { t.Errorf("sender = %v, want %v", ns, s) } tr.Stop() if g := s.(*peer).msgAppReader.msgAppTerm; g != term { t.Errorf("peer.term = %d, want %d", g, term) } }
// TestStreamReaderStopOnDial tests a stream reader closes the connection on stop. func TestStreamReaderStopOnDial(t *testing.T) { defer testutil.AfterTest(t) h := http.Header{} h.Add("X-Server-Version", version.Version) tr := &respWaitRoundTripper{rrt: &respRoundTripper{code: http.StatusOK, header: h}} sr := &streamReader{ peerID: types.ID(2), tr: &Transport{streamRt: tr, ClusterID: types.ID(1)}, picker: mustNewURLPicker(t, []string{"http://localhost:2380"}), errorc: make(chan error, 1), typ: streamTypeMessage, status: newPeerStatus(types.ID(2)), } tr.onResp = func() { // stop() waits for the run() goroutine to exit, but that exit // needs a response from RoundTrip() first; use goroutine go sr.stop() // wait so that stop() is blocked on run() exiting time.Sleep(10 * time.Millisecond) // sr.run() completes dialing then begins decoding while stopped } sr.start() select { case <-sr.done: case <-time.After(time.Second): t.Fatal("streamReader did not stop in time") } }
func TestPipelinePost(t *testing.T) { tr := &roundTripperRecorder{} picker := mustNewURLPicker(t, []string{"http://localhost:2380"}) p := newPipeline(tr, picker, types.ID(2), types.ID(1), types.ID(1), newPeerStatus(types.ID(1)), nil, &fakeRaft{}, nil) if err := p.post([]byte("some data")); err != nil { t.Fatalf("unexpect post error: %v", err) } p.stop() if g := tr.Request().Method; g != "POST" { t.Errorf("method = %s, want %s", g, "POST") } if g := tr.Request().URL.String(); g != "http://localhost:2380/raft" { t.Errorf("url = %s, want %s", g, "http://localhost:2380/raft") } if g := tr.Request().Header.Get("Content-Type"); g != "application/protobuf" { t.Errorf("content type = %s, want %s", g, "application/protobuf") } if g := tr.Request().Header.Get("X-Server-Version"); g != version.Version { t.Errorf("version = %s, want %s", g, version.Version) } if g := tr.Request().Header.Get("X-Min-Cluster-Version"); g != version.MinClusterVersion { t.Errorf("min version = %s, want %s", g, version.MinClusterVersion) } if g := tr.Request().Header.Get("X-Etcd-Cluster-ID"); g != "1" { t.Errorf("cluster id = %s, want %s", g, "1") } b, err := ioutil.ReadAll(tr.Request().Body) if err != nil { t.Fatalf("unexpected ReadAll error: %v", err) } if string(b) != "some data" { t.Errorf("body = %s, want %s", b, "some data") } }
func TestSenderPost(t *testing.T) { tr := &roundTripperRecorder{} s := NewSender(tr, "http://10.0.0.1", types.ID(1), types.ID(1), &nopProcessor{}, nil, nil) if err := s.post([]byte("some data")); err != nil { t.Fatalf("unexpect post error: %v", err) } s.Stop() if g := tr.Request().Method; g != "POST" { t.Errorf("method = %s, want %s", g, "POST") } if g := tr.Request().URL.String(); g != "http://10.0.0.1" { t.Errorf("url = %s, want %s", g, "http://10.0.0.1") } if g := tr.Request().Header.Get("Content-Type"); g != "application/protobuf" { t.Errorf("content type = %s, want %s", g, "application/protobuf") } if g := tr.Request().Header.Get("X-Etcd-Cluster-ID"); g != "1" { t.Errorf("cluster id = %s, want %s", g, "1") } b, err := ioutil.ReadAll(tr.Request().Body) if err != nil { t.Fatalf("unexpected ReadAll error: %v", err) } if string(b) != "some data" { t.Errorf("body = %s, want %s", b, "some data") } }
func TestSenderPostBad(t *testing.T) { tests := []struct { u string code int err error }{ // bad url {":bad url", http.StatusNoContent, nil}, // RoundTrip returns error {"http://10.0.0.1", 0, errors.New("blah")}, // unexpected response status code {"http://10.0.0.1", http.StatusOK, nil}, {"http://10.0.0.1", http.StatusCreated, nil}, } for i, tt := range tests { shouldstop := make(chan struct{}) s := NewSender(newRespRoundTripper(tt.code, tt.err), tt.u, types.ID(1), types.ID(1), &nopProcessor{}, nil, shouldstop) err := s.post([]byte("some data")) s.Stop() if err == nil { t.Errorf("#%d: err = nil, want not nil", i) } } }
func TestSenderExceedMaximalServing(t *testing.T) { tr := newRoundTripperBlocker() fs := &stats.FollowerStats{} s := NewSender(tr, "http://10.0.0.1", types.ID(1), types.ID(1), &nopProcessor{}, fs, nil) // keep the sender busy and make the buffer full // nothing can go out as we block the sender for i := 0; i < connPerSender+senderBufSize; i++ { if err := s.Send(raftpb.Message{}); err != nil { t.Errorf("send err = %v, want nil", err) } // force the sender to grab data testutil.ForceGosched() } // try to send a data when we are sure the buffer is full if err := s.Send(raftpb.Message{}); err == nil { t.Errorf("unexpect send success") } // unblock the senders and force them to send out the data tr.unblock() testutil.ForceGosched() // It could send new data after previous ones succeed if err := s.Send(raftpb.Message{}); err != nil { t.Errorf("send err = %v, want nil", err) } s.Stop() }
func TestStreamReaderDialRequest(t *testing.T) { for i, tt := range []streamType{streamTypeMessage, streamTypeMsgAppV2} { tr := &roundTripperRecorder{rec: &testutil.RecorderBuffered{}} sr := &streamReader{ peerID: types.ID(2), tr: &Transport{streamRt: tr, ClusterID: types.ID(1), ID: types.ID(1)}, picker: mustNewURLPicker(t, []string{"http://localhost:2380"}), } sr.dial(tt) act, err := tr.rec.Wait(1) if err != nil { t.Fatal(err) } req := act[0].Params[0].(*http.Request) wurl := fmt.Sprintf("http://localhost:2380" + tt.endpoint() + "/1") if req.URL.String() != wurl { t.Errorf("#%d: url = %s, want %s", i, req.URL.String(), wurl) } if w := "GET"; req.Method != w { t.Errorf("#%d: method = %s, want %s", i, req.Method, w) } if g := req.Header.Get("X-Etcd-Cluster-ID"); g != "1" { t.Errorf("#%d: header X-Etcd-Cluster-ID = %s, want 1", i, g) } if g := req.Header.Get("X-Raft-To"); g != "2" { t.Errorf("#%d: header X-Raft-To = %s, want 2", i, g) } } }
// TestStreamWriterAttachOutgoingConn tests that outgoingConn can be attached // to streamWriter. After that, streamWriter can use it to send messages // continuously, and closes it when stopped. func TestStreamWriterAttachOutgoingConn(t *testing.T) { sw := startStreamWriter(types.ID(1), newPeerStatus(types.ID(1)), &stats.FollowerStats{}, &fakeRaft{}) // the expected initial state of streamWriter is not working if _, ok := sw.writec(); ok != false { t.Errorf("initial working status = %v, want false", ok) } // repeat tests to ensure streamWriter can use last attached connection var wfc *fakeWriteFlushCloser for i := 0; i < 3; i++ { prevwfc := wfc wfc = &fakeWriteFlushCloser{} sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc}) // sw.attach happens asynchronously. Waits for its result in a for loop to make the // test more robust on slow CI. for j := 0; j < 3; j++ { testutil.WaitSchedule() // previous attached connection should be closed if prevwfc != nil && prevwfc.Closed() != true { continue } // write chan is available if _, ok := sw.writec(); ok != true { continue } } // previous attached connection should be closed if prevwfc != nil && prevwfc.Closed() != true { t.Errorf("#%d: close of previous connection = %v, want true", i, prevwfc.Closed()) } // write chan is available if _, ok := sw.writec(); ok != true { t.Errorf("#%d: working status = %v, want true", i, ok) } sw.msgc <- raftpb.Message{} testutil.WaitSchedule() // write chan is available if _, ok := sw.writec(); ok != true { t.Errorf("#%d: working status = %v, want true", i, ok) } if wfc.Written() == 0 { t.Errorf("#%d: failed to write to the underlying connection", i) } } sw.stop() // write chan is unavailable since the writer is stopped. if _, ok := sw.writec(); ok != false { t.Errorf("working status after stop = %v, want false", ok) } if wfc.Closed() != true { t.Errorf("failed to close the underlying connection") } }
func (rc *raftNode) startRaft() { if !fileutil.Exist(rc.snapdir) { if err := os.Mkdir(rc.snapdir, 0750); err != nil { log.Fatalf("raftexample: cannot create dir for snapshot (%v)", err) } } rc.snapshotter = snap.New(rc.snapdir) rc.snapshotterReady <- rc.snapshotter oldwal := wal.Exist(rc.waldir) rc.wal = rc.replayWAL() rpeers := make([]raft.Peer, len(rc.peers)) for i := range rpeers { rpeers[i] = raft.Peer{ID: uint64(i + 1)} } c := &raft.Config{ ID: uint64(rc.id), ElectionTick: 10, HeartbeatTick: 1, Storage: rc.raftStorage, MaxSizePerMsg: 1024 * 1024, MaxInflightMsgs: 256, } if oldwal { rc.node = raft.RestartNode(c) } else { startPeers := rpeers if rc.join { startPeers = nil } rc.node = raft.StartNode(c, startPeers) } ss := &stats.ServerStats{} ss.Initialize() rc.transport = &rafthttp.Transport{ ID: types.ID(rc.id), ClusterID: 0x1000, Raft: rc, ServerStats: ss, LeaderStats: stats.NewLeaderStats(strconv.Itoa(rc.id)), ErrorC: make(chan error), } rc.transport.Start() for i := range rc.peers { if i+1 != rc.id { rc.transport.AddPeer(types.ID(i+1), []string{rc.peers[i]}) } } go rc.serveRaft() go rc.serveChannels() }
func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { if s.cluster.IsIDRemoved(types.ID(m.From)) { plog.Warningf("reject message from removed member %s", types.ID(m.From).String()) return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member") } if m.Type == raftpb.MsgApp { s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size()) } return s.r.Step(ctx, m) }
func TestServeRaftStreamPrefix(t *testing.T) { tests := []struct { path string wtype streamType }{ { RaftStreamPrefix + "/message/1", streamTypeMessage, }, { RaftStreamPrefix + "/msgapp/1", streamTypeMsgAppV2, }, // backward compatibility { RaftStreamPrefix + "/1", streamTypeMsgApp, }, } for i, tt := range tests { req, err := http.NewRequest("GET", "http://localhost:2380"+tt.path, nil) if err != nil { t.Fatalf("#%d: could not create request: %#v", i, err) } req.Header.Set("X-Etcd-Cluster-ID", "1") req.Header.Set("X-Server-Version", version.Version) req.Header.Set("X-Raft-To", "2") wterm := "1" req.Header.Set("X-Raft-Term", wterm) peer := newFakePeer() peerGetter := &fakePeerGetter{peers: map[types.ID]Peer{types.ID(1): peer}} h := newStreamHandler(peerGetter, &fakeRaft{}, types.ID(2), types.ID(1)) rw := httptest.NewRecorder() go h.ServeHTTP(rw, req) var conn *outgoingConn select { case conn = <-peer.connc: case <-time.After(time.Second): t.Fatalf("#%d: failed to attach outgoingConn", i) } if g := rw.Header().Get("X-Server-Version"); g != version.Version { t.Errorf("#%d: X-Server-Version = %s, want %s", i, g, version.Version) } if conn.t != tt.wtype { t.Errorf("#%d: type = %s, want %s", i, conn.t, tt.wtype) } if conn.termStr != wterm { t.Errorf("#%d: term = %s, want %s", i, conn.termStr, wterm) } conn.Close() } }
func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { var dec decoder cr.mu.Lock() switch t { case streamTypeMsgAppV2: dec = newMsgAppV2Decoder(rc, cr.local, cr.remote) case streamTypeMessage: dec = &messageDecoder{r: rc} default: plog.Panicf("unhandled stream type %s", t) } cr.closer = rc cr.mu.Unlock() for { m, err := dec.decode() if err != nil { cr.mu.Lock() cr.close() cr.mu.Unlock() return err } receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size())) cr.mu.Lock() paused := cr.paused cr.mu.Unlock() if paused { continue } if isLinkHeartbeatMessage(m) { // raft is not interested in link layer // heartbeat message, so we should ignore // it. continue } recvc := cr.recvc if m.Type == raftpb.MsgProp { recvc = cr.propc } select { case recvc <- m: default: if cr.status.isActive() { plog.MergeWarningf("dropped internal raft message from %s since receiving buffer is full (overloaded network)", types.ID(m.From)) } plog.Debugf("dropped %s from %s since receiving buffer is full", m.Type, types.ID(m.From)) } } }
func TestSendHubRemove(t *testing.T) { cl := newTestCluster(nil) ls := stats.NewLeaderStats("") h := newSendHub(nil, cl, nil, nil, ls) m := newTestMember(1, []string{"http://a"}, "", nil) h.Add(m) h.Remove(types.ID(1)) if _, ok := h.senders[types.ID(1)]; ok { t.Fatalf("senders[1] exists, want removed") } }
func TestTransportUpdate(t *testing.T) { peer := newFakePeer() tr := &transport{ peers: map[types.ID]Peer{types.ID(1): peer}, } u := "http://localhost:2380" tr.UpdatePeer(types.ID(1), []string{u}) wurls := types.URLs(testutil.MustNewURLs(t, []string{"http://localhost:2380"})) if !reflect.DeepEqual(peer.urls, wurls) { t.Errorf("urls = %+v, want %+v", peer.urls, wurls) } }
func TestTransportRemove(t *testing.T) { tr := &transport{ leaderStats: stats.NewLeaderStats(""), peers: make(map[types.ID]*peer), } tr.AddPeer(1, []string{"http://a"}) tr.RemovePeer(types.ID(1)) if _, ok := tr.peers[types.ID(1)]; ok { t.Fatalf("senders[1] exists, want removed") } }
func (a *AlarmStore) addToMap(newAlarm *pb.AlarmMember) *pb.AlarmMember { t := a.types[newAlarm.Alarm] if t == nil { t = make(alarmSet) a.types[newAlarm.Alarm] = t } m := t[types.ID(newAlarm.MemberID)] if m != nil { return m } t[types.ID(newAlarm.MemberID)] = newAlarm return newAlarm }
func startTestPipeline(tr *Transport, picker *urlPicker) *pipeline { p := &pipeline{ peerID: types.ID(1), tr: tr, picker: picker, status: newPeerStatus(types.ID(1)), raft: &fakeRaft{}, followerStats: &stats.FollowerStats{}, errorc: make(chan error, 1), } p.start() return p }
func GuessNodeID(nodes map[string]uint64, snap4 *Snapshot4, cfg *Config4, name string) uint64 { var snapNodes map[string]uint64 if snap4 != nil { snapNodes = snap4.GetNodesFromStore() } // First, use the flag, if set. if name != "" { log.Printf("Using suggested name %s", name) if val, ok := nodes[name]; ok { log.Printf("Found ID %s", types.ID(val)) return val } if snapNodes != nil { if val, ok := snapNodes[name]; ok { log.Printf("Found ID %s", types.ID(val)) return val } } log.Printf("Name not found, autodetecting...") } // Next, look at the snapshot peers, if that exists. if snap4 != nil { //snapNodes := make(map[string]uint64) //for _, p := range snap4.Peers { //m := generateNodeMember(p.Name, p.ConnectionString, "") //snapNodes[p.Name] = uint64(m.ID) //} for _, p := range cfg.Peers { log.Printf(p.Name) delete(snapNodes, p.Name) } if len(snapNodes) == 1 { for name, id := range nodes { log.Printf("Autodetected from snapshot: name %s", name) return id } } } // Then, try and deduce from the log. for _, p := range cfg.Peers { delete(nodes, p.Name) } if len(nodes) == 1 { for name, id := range nodes { log.Printf("Autodetected name %s", name) return id } } return 0 }
func TestTransportRemove(t *testing.T) { tr := &transport{ roundTripper: &roundTripperRecorder{}, leaderStats: stats.NewLeaderStats(""), peers: make(map[types.ID]Peer), } tr.AddPeer(1, []string{"http://localhost:2380"}) tr.RemovePeer(types.ID(1)) defer tr.Stop() if _, ok := tr.peers[types.ID(1)]; ok { t.Fatalf("senders[1] exists, want removed") } }
// TestStreamWriterAttachOutgoingConn tests that outgoingConn can be attached // to streamWriter. After that, streamWriter can use it to send messages // continuously, and closes it when stopped. func TestStreamWriterAttachOutgoingConn(t *testing.T) { sw := startStreamWriter(types.ID(1), newPeerStatus(types.ID(1)), &stats.FollowerStats{}, &fakeRaft{}) // the expected initial state of streamWriter is not working if _, ok := sw.writec(); ok { t.Errorf("initial working status = %v, want false", ok) } // repeat tests to ensure streamWriter can use last attached connection var wfc *fakeWriteFlushCloser for i := 0; i < 3; i++ { prevwfc := wfc wfc = newFakeWriteFlushCloser(nil) sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc}) // previous attached connection should be closed if prevwfc != nil { select { case <-prevwfc.closed: case <-time.After(time.Second): t.Errorf("#%d: close of previous connection timed out", i) } } // if prevwfc != nil, the new msgc is ready since prevwfc has closed // if prevwfc == nil, the first connection may be pending, but the first // msgc is already available since it's set on calling startStreamwriter msgc, _ := sw.writec() msgc <- raftpb.Message{} select { case <-wfc.writec: case <-time.After(time.Second): t.Errorf("#%d: failed to write to the underlying connection", i) } // write chan is still available if _, ok := sw.writec(); !ok { t.Errorf("#%d: working status = %v, want true", i, ok) } } sw.stop() // write chan is unavailable since the writer is stopped. if _, ok := sw.writec(); ok { t.Errorf("working status after stop = %v, want false", ok) } if !wfc.Closed() { t.Errorf("failed to close the underlying connection") } }
func readWAL(waldir string, index uint64) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) { var err error if w, err = wal.Open(waldir, index); err != nil { log.Fatalf("etcdserver: open wal error: %v", err) } var wmetadata []byte if wmetadata, st, ents, err = w.ReadAll(); err != nil { log.Fatalf("etcdserver: read wal error: %v", err) } var metadata pb.Metadata pbutil.MustUnmarshal(&metadata, wmetadata) id = types.ID(metadata.NodeID) cid = types.ID(metadata.ClusterID) return }
// TestPipelineSendFailed tests that when send func meets the post error, // it increases fail count in stats. func TestPipelineSendFailed(t *testing.T) { picker := mustNewURLPicker(t, []string{"http://localhost:2380"}) fs := &stats.FollowerStats{} p := newPipeline(newRespRoundTripper(0, errors.New("blah")), picker, types.ID(2), types.ID(1), types.ID(1), newPeerStatus(types.ID(1)), fs, &fakeRaft{}, nil) p.msgc <- raftpb.Message{Type: raftpb.MsgApp} testutil.WaitSchedule() p.stop() fs.Lock() defer fs.Unlock() if fs.Counts.Fail != 1 { t.Errorf("fail = %d, want 1", fs.Counts.Fail) } }