func TestNewDirExpirationTTL(t *testing.T) { nd, _ := newTestNodeDir() if _, ttl := nd.expirationAndTTL(clockwork.NewFakeClock()); ttl > expiration.Nanoseconds() { t.Errorf("ttl = %d, want %d < %d", ttl, ttl, expiration.Nanoseconds()) } newExpiration := time.Hour nd.UpdateTTL(time.Now().Add(newExpiration)) if _, ttl := nd.expirationAndTTL(clockwork.NewFakeClock()); ttl > newExpiration.Nanoseconds() { t.Errorf("ttl = %d, want %d < %d", ttl, ttl, newExpiration.Nanoseconds()) } }
func TestSnapshotStoreCreateSnap(t *testing.T) { snap := raftpb.Snapshot{ Metadata: raftpb.SnapshotMetadata{Index: 1}, } ss := newSnapshotStore("", &nopKV{}) fakeClock := clockwork.NewFakeClock() ss.clock = fakeClock go func() { <-ss.reqsnapc ss.raftsnapc <- snap }() // create snapshot ss.createSnap() if !reflect.DeepEqual(ss.snap.raft(), snap) { t.Errorf("raftsnap = %+v, want %+v", ss.snap.raft(), snap) } // unused snapshot is cleared after clearUnusedSnapshotInterval fakeClock.BlockUntil(1) fakeClock.Advance(clearUnusedSnapshotInterval) testutil.WaitSchedule() ss.mu.Lock() if ss.snap != nil { t.Errorf("snap = %+v, want %+v", ss.snap, nil) } ss.mu.Unlock() }
// newFakeClock creates a new FakeClock that has been advanced to at least minExpireTime func newFakeClock() clockwork.FakeClock { fc := clockwork.NewFakeClock() for minExpireTime.After(fc.Now()) { fc.Advance((0x1 << 62) * time.Nanosecond) } return fc }
func TestNewDirReadWriteListReprClone(t *testing.T) { nd, _ := newTestNodeDir() if _, err := nd.Read(); err == nil { t.Errorf("err = %v, want err != nil", err) } if err := nd.Write(val, nd.CreatedIndex+1); err == nil { t.Errorf("err = %v, want err != nil", err) } if ns, err := nd.List(); ns == nil && err != nil { t.Errorf("nodes = %v and err = %v, want nodes = nil and err == nil", ns, err) } en := nd.Repr(false, false, clockwork.NewFakeClock()) if en.Key != nd.Path { t.Errorf("en.Key = %s, want = %s", en.Key, nd.Path) } cn := nd.Clone() if cn.Path != nd.Path { t.Errorf("cn.Path = %s, want = %s", cn.Path, nd.Path) } }
func TestPeriodic(t *testing.T) { fc := clockwork.NewFakeClock() compactable := &fakeCompactable{testutil.NewRecorderStream()} tb := &Periodic{ clock: fc, periodInHour: 1, rg: &fakeRevGetter{}, c: compactable, } tb.Run() defer tb.Stop() n := int(time.Hour / checkCompactionInterval) for i := 0; i < 3; i++ { for j := 0; j < n; j++ { time.Sleep(5 * time.Millisecond) fc.Advance(checkCompactionInterval) } a, err := compactable.Wait(1) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: int64(i*n) + 1}) { t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: int64(i*n) + 1}) } } }
func TestNewKVExpiration(t *testing.T) { nd := newTestNode() if _, ttl := nd.expirationAndTTL(clockwork.NewFakeClock()); ttl > expiration.Nanoseconds() { t.Errorf("ttl = %d, want %d < %d", ttl, ttl, expiration.Nanoseconds()) } newExpiration := time.Hour nd.UpdateTTL(time.Now().Add(newExpiration)) if _, ttl := nd.expirationAndTTL(clockwork.NewFakeClock()); ttl > newExpiration.Nanoseconds() { t.Errorf("ttl = %d, want %d < %d", ttl, ttl, newExpiration.Nanoseconds()) } if ns, err := nd.List(); ns != nil || err == nil { t.Errorf("nodes = %v and err = %v, want nodes = nil and err != nil", ns, err) } en := nd.Repr(false, false, clockwork.NewFakeClock()) if en.Key != nd.Path { t.Errorf("en.Key = %s, want = %s", en.Key, nd.Path) } if *(en.Value) != nd.Value { t.Errorf("*(en.Key) = %s, want = %s", *(en.Value), nd.Value) } }
// Ensure that any TTL <= minExpireTime becomes Permanent func TestMinExpireTime(t *testing.T) { s := newStore() fc := clockwork.NewFakeClock() s.clock = fc // FakeClock starts at 0, so minExpireTime should be far in the future.. but just in case assert.True(t, minExpireTime.After(fc.Now()), "minExpireTime should be ahead of FakeClock!") s.Create("/foo", false, "Y", false, fc.Now().Add(3*time.Second)) fc.Advance(5 * time.Second) // Ensure it hasn't expired s.DeleteExpiredKeys(fc.Now()) var eidx uint64 = 1 e, err := s.Get("/foo", true, false) assert.Nil(t, err, "") assert.Equal(t, e.EtcdIndex, eidx, "") assert.Equal(t, e.Action, "get", "") assert.Equal(t, e.Node.Key, "/foo", "") assert.Equal(t, e.Node.TTL, 0) }
func TestSnapshotStoreGetSnap(t *testing.T) { snap := raftpb.Snapshot{ Metadata: raftpb.SnapshotMetadata{Index: 1}, } ss := newSnapshotStore("", &nopKV{}) fakeClock := clockwork.NewFakeClock() ss.clock = fakeClock ss.tr = &nopTransporter{} go func() { <-ss.reqsnapc ss.raftsnapc <- snap }() // get snap when no snapshot stored _, err := ss.getSnap() if err != raft.ErrSnapshotTemporarilyUnavailable { t.Fatalf("getSnap error = %v, want %v", err, raft.ErrSnapshotTemporarilyUnavailable) } // wait for asynchronous snapshot creation to finish testutil.WaitSchedule() // get the created snapshot s, err := ss.getSnap() if err != nil { t.Fatalf("getSnap error = %v, want nil", err) } if !reflect.DeepEqual(s.raft(), snap) { t.Errorf("raftsnap = %+v, want %+v", s.raft(), snap) } if !ss.inUse { t.Errorf("inUse = %v, want true", ss.inUse) } // get snap when snapshot stored has been in use _, err = ss.getSnap() if err != raft.ErrSnapshotTemporarilyUnavailable { t.Fatalf("getSnap error = %v, want %v", err, raft.ErrSnapshotTemporarilyUnavailable) } // clean up fakeClock.Advance(clearUnusedSnapshotInterval) }
func TestRetryFailure(t *testing.T) { cluster := "1000" c := &clientWithRetry{failTimes: 4} fc := clockwork.NewFakeClock() d := discovery{ cluster: cluster, id: 1, c: c, clock: fc, } go func() { for i := uint(1); i <= nRetries; i++ { fc.BlockUntil(1) fc.Advance(time.Second * (0x1 << i)) } }() if _, _, err := d.checkCluster(); err != ErrTooManyRetries { t.Errorf("err = %v, want %v", err, ErrTooManyRetries) } }
func TestNewKVListReprCompareClone(t *testing.T) { nd := newTestNode() if ns, err := nd.List(); ns != nil || err == nil { t.Errorf("nodes = %v and err = %v, want nodes = nil and err != nil", ns, err) } en := nd.Repr(false, false, clockwork.NewFakeClock()) if en.Key != nd.Path { t.Errorf("en.Key = %s, want = %s", en.Key, nd.Path) } if *(en.Value) != nd.Value { t.Errorf("*(en.Key) = %s, want = %s", *(en.Value), nd.Value) } cn := nd.Clone() if cn.Path != nd.Path { t.Errorf("cn.Path = %s, want = %s", cn.Path, nd.Path) } if cn.Value != nd.Value { t.Errorf("cn.Value = %s, want = %s", cn.Value, nd.Value) } }
func TestPeriodicPause(t *testing.T) { fc := clockwork.NewFakeClock() compactable := &fakeCompactable{testutil.NewRecorderStream()} tb := &Periodic{ clock: fc, periodInHour: 1, rg: &fakeRevGetter{}, c: compactable, } tb.Run() tb.Pause() n := int(time.Hour / checkCompactionInterval) for i := 0; i < 3*n; i++ { time.Sleep(5 * time.Millisecond) fc.Advance(checkCompactionInterval) } select { case a := <-compactable.Chan(): t.Fatal("unexpected action %v", a) case <-time.After(10 * time.Millisecond): } tb.Resume() fc.Advance(checkCompactionInterval) a, err := compactable.Wait(1) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: int64(2*n) + 2}) { t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: int64(2*n) + 2}) } }
func TestCheckCluster(t *testing.T) { cluster := "/prefix/1000" self := "/1000/1" tests := []struct { nodes []*client.Node index uint64 werr error wsize int }{ { // self is in the size range []*client.Node{ {Key: "/1000/_config/size", Value: "3", CreatedIndex: 1}, {Key: "/1000/_config/"}, {Key: self, CreatedIndex: 2}, {Key: "/1000/2", CreatedIndex: 3}, {Key: "/1000/3", CreatedIndex: 4}, {Key: "/1000/4", CreatedIndex: 5}, }, 5, nil, 3, }, { // self is in the size range []*client.Node{ {Key: "/1000/_config/size", Value: "3", CreatedIndex: 1}, {Key: "/1000/_config/"}, {Key: "/1000/2", CreatedIndex: 2}, {Key: "/1000/3", CreatedIndex: 3}, {Key: self, CreatedIndex: 4}, {Key: "/1000/4", CreatedIndex: 5}, }, 5, nil, 3, }, { // self is out of the size range []*client.Node{ {Key: "/1000/_config/size", Value: "3", CreatedIndex: 1}, {Key: "/1000/_config/"}, {Key: "/1000/2", CreatedIndex: 2}, {Key: "/1000/3", CreatedIndex: 3}, {Key: "/1000/4", CreatedIndex: 4}, {Key: self, CreatedIndex: 5}, }, 5, ErrFullCluster, 3, }, { // self is not in the cluster []*client.Node{ {Key: "/1000/_config/size", Value: "3", CreatedIndex: 1}, {Key: "/1000/_config/"}, {Key: "/1000/2", CreatedIndex: 2}, {Key: "/1000/3", CreatedIndex: 3}, }, 3, nil, 3, }, { []*client.Node{ {Key: "/1000/_config/size", Value: "3", CreatedIndex: 1}, {Key: "/1000/_config/"}, {Key: "/1000/2", CreatedIndex: 2}, {Key: "/1000/3", CreatedIndex: 3}, {Key: "/1000/4", CreatedIndex: 4}, }, 3, ErrFullCluster, 3, }, { // bad size key []*client.Node{ {Key: "/1000/_config/size", Value: "bad", CreatedIndex: 1}, }, 0, ErrBadSizeKey, 0, }, { // no size key []*client.Node{}, 0, ErrSizeNotFound, 0, }, } for i, tt := range tests { rs := make([]*client.Response, 0) if len(tt.nodes) > 0 { rs = append(rs, &client.Response{Node: tt.nodes[0], Index: tt.index}) rs = append(rs, &client.Response{ Node: &client.Node{ Key: cluster, Nodes: tt.nodes[1:], }, Index: tt.index, }) } c := &clientWithResp{rs: rs} dBase := discovery{cluster: cluster, id: 1, c: c} cRetry := &clientWithRetry{failTimes: 3} cRetry.rs = rs fc := clockwork.NewFakeClock() dRetry := discovery{cluster: cluster, id: 1, c: cRetry, clock: fc} for _, d := range []discovery{dBase, dRetry} { go func() { for i := uint(1); i <= maxRetryInTest; i++ { fc.BlockUntil(1) fc.Advance(time.Second * (0x1 << i)) } }() ns, size, index, err := d.checkCluster() if err != tt.werr { t.Errorf("#%d: err = %v, want %v", i, err, tt.werr) } if reflect.DeepEqual(ns, tt.nodes) { t.Errorf("#%d: nodes = %v, want %v", i, ns, tt.nodes) } if size != tt.wsize { t.Errorf("#%d: size = %v, want %d", i, size, tt.wsize) } if index != tt.index { t.Errorf("#%d: index = %v, want %d", i, index, tt.index) } } } }
func TestWaitNodes(t *testing.T) { all := []*client.Node{ 0: {Key: "/1000/1", CreatedIndex: 2}, 1: {Key: "/1000/2", CreatedIndex: 3}, 2: {Key: "/1000/3", CreatedIndex: 4}, } tests := []struct { nodes []*client.Node rs []*client.Response }{ { all, []*client.Response{}, }, { all[:1], []*client.Response{ {Node: &client.Node{Key: "/1000/2", CreatedIndex: 3}}, {Node: &client.Node{Key: "/1000/3", CreatedIndex: 4}}, }, }, { all[:2], []*client.Response{ {Node: &client.Node{Key: "/1000/3", CreatedIndex: 4}}, }, }, { append(all, &client.Node{Key: "/1000/4", CreatedIndex: 5}), []*client.Response{ {Node: &client.Node{Key: "/1000/3", CreatedIndex: 4}}, }, }, } for i, tt := range tests { // Basic case c := &clientWithResp{rs: nil, w: &watcherWithResp{rs: tt.rs}} dBase := &discovery{cluster: "1000", c: c} // Retry case retryScanResp := make([]*client.Response, 0) if len(tt.nodes) > 0 { retryScanResp = append(retryScanResp, &client.Response{ Node: &client.Node{ Key: "1000", Value: strconv.Itoa(3), }, }) retryScanResp = append(retryScanResp, &client.Response{ Node: &client.Node{ Nodes: tt.nodes, }, }) } cRetry := &clientWithResp{ rs: retryScanResp, w: &watcherWithRetry{rs: tt.rs, failTimes: 2}, } fc := clockwork.NewFakeClock() dRetry := &discovery{ cluster: "1000", c: cRetry, clock: fc, } for _, d := range []*discovery{dBase, dRetry} { go func() { for i := uint(1); i <= maxRetryInTest; i++ { fc.BlockUntil(1) fc.Advance(time.Second * (0x1 << i)) } }() g, err := d.waitNodes(tt.nodes, 3, 0) // we do not care about index in this test if err != nil { t.Errorf("#%d: err = %v, want %v", i, err, nil) } if !reflect.DeepEqual(g, all) { t.Errorf("#%d: all = %v, want %v", i, g, all) } } } }