func TestNewDirExpirationTTL(t *testing.T) { nd, _ := newTestNodeDir() if _, ttl := nd.expirationAndTTL(clockwork.NewFakeClock()); ttl > expiration.Nanoseconds() { t.Errorf("ttl = %d, want %d < %d", ttl, ttl, expiration.Nanoseconds()) } newExpiration := time.Hour nd.UpdateTTL(time.Now().Add(newExpiration)) if _, ttl := nd.expirationAndTTL(clockwork.NewFakeClock()); ttl > newExpiration.Nanoseconds() { t.Errorf("ttl = %d, want %d < %d", ttl, ttl, newExpiration.Nanoseconds()) } }
func TestNewDirReadWriteListReprClone(t *testing.T) { nd, _ := newTestNodeDir() if _, err := nd.Read(); err == nil { t.Errorf("err = %v, want err != nil", err) } if err := nd.Write(val, nd.CreatedIndex+1); err == nil { t.Errorf("err = %v, want err != nil", err) } if ns, err := nd.List(); ns == nil && err != nil { t.Errorf("nodes = %v and err = %v, want nodes = nil and err == nil", ns, err) } en := nd.Repr(false, false, clockwork.NewFakeClock()) if en.Key != nd.Path { t.Errorf("en.Key = %s, want = %s", en.Key, nd.Path) } cn := nd.Clone() if cn.Path != nd.Path { t.Errorf("cn.Path = %s, want = %s", cn.Path, nd.Path) } }
func TestWaitForProviderConfigImmediateSuccess(t *testing.T) { cfg := newValidProviderConfig() b, err := json.Marshal(&cfg) if err != nil { t.Fatalf("Failed marshaling provider config") } resp := http.Response{Body: ioutil.NopCloser(bytes.NewBuffer(b))} hc := &fakeClient{&resp} fc := clockwork.NewFakeClock() reschan := make(chan ProviderConfig) go func() { reschan <- waitForProviderConfig(hc, cfg.Issuer.String(), fc) }() var got ProviderConfig select { case got = <-reschan: case <-time.After(time.Second): t.Fatalf("Did not receive result within 1s") } if !reflect.DeepEqual(cfg, got) { t.Fatalf("Received incorrect provider config: want=%#v got=%#v", cfg, got) } }
func TestPrivateKeyManagerExpiresAt(t *testing.T) { fc := clockwork.NewFakeClock() now := fc.Now().UTC() k := generatePrivateKeyStatic(t, 17) km := &privateKeyManager{ clock: fc, } want := fc.Now().UTC() got := km.ExpiresAt() if want != got { t.Fatalf("Incorrect expiration time: want=%v got=%v", want, got) } err := km.Set(&PrivateKeySet{ keys: []*PrivateKey{k}, ActiveKeyID: k.KeyID, expiresAt: now.Add(2 * time.Minute), }) if err != nil { t.Fatalf("Unexpected error: %v", err) } want = fc.Now().UTC().Add(2 * time.Minute) got = km.ExpiresAt() if want != got { t.Fatalf("Incorrect expiration time: want=%v got=%v", want, got) } }
func makeTestSessionKeyRepoDB(dsn string) func() (session.SessionKeyRepo, clockwork.FakeClock) { return func() (session.SessionKeyRepo, clockwork.FakeClock) { c := initDB(dsn) fc := clockwork.NewFakeClock() return db.NewSessionKeyRepoWithClock(c, fc), fc } }
func TestPeriodic(t *testing.T) { fc := clockwork.NewFakeClock() rg := &fakeRevGetter{testutil.NewRecorderStream(), 0} compactable := &fakeCompactable{testutil.NewRecorderStream()} tb := &Periodic{ clock: fc, periodInHour: 1, rg: rg, c: compactable, } tb.Run() defer tb.Stop() n := int(time.Hour / checkCompactionInterval) // collect 3 hours of revisions for i := 0; i < 3; i++ { // advance one hour, one revision for each interval for j := 0; j < n; j++ { fc.Advance(checkCompactionInterval) rg.Wait(1) } // ready to acknowledge hour "i"; unblock clock fc.Advance(checkCompactionInterval) a, err := compactable.Wait(1) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: int64(i*n) + 1}) { t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: int64(i*n) + 1}) } } }
func TestPeriodic(t *testing.T) { fc := clockwork.NewFakeClock() compactable := &fakeCompactable{testutil.NewRecorderStream()} tb := &Periodic{ clock: fc, periodInHour: 1, rg: &fakeRevGetter{}, c: compactable, } tb.Run() defer tb.Stop() n := int(time.Hour / checkCompactionInterval) for i := 0; i < 3; i++ { for j := 0; j < n; j++ { time.Sleep(5 * time.Millisecond) fc.Advance(checkCompactionInterval) } a, err := compactable.Wait(1) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: int64(i*n) + 1}) { t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: int64(i*n) + 1}) } } }
// newFakeClock creates a new FakeClock that has been advanced to at least minExpireTime func newFakeClock() clockwork.FakeClock { fc := clockwork.NewFakeClock() for minExpireTime.After(fc.Now()) { fc.Advance((0x1 << 62) * time.Nanosecond) } return fc }
func TestNewPasswordReset(t *testing.T) { clock = clockwork.NewFakeClock() defer func() { clock = clockwork.NewRealClock() }() now := clock.Now() issuer, _ := url.Parse("http://example.com") clientID := "myclient" usr := User{ID: "123456", Email: "*****@*****.**"} callback := "http://client.example.com/callback" expires := time.Hour * 3 password := Password("passy") tests := []struct { user User password Password issuer url.URL clientID string callback string expires time.Duration want jose.Claims }{ { issuer: *issuer, clientID: clientID, user: usr, callback: callback, expires: expires, password: password, want: map[string]interface{}{ "iss": issuer.String(), "aud": clientID, ClaimPasswordResetCallback: callback, ClaimPasswordResetPassword: string(password), "exp": float64(now.Add(expires).Unix()), "sub": usr.ID, "iat": float64(now.Unix()), }, }, } for i, tt := range tests { cbURL, err := url.Parse(tt.callback) if err != nil { t.Fatalf("case %d: non-nil err: %q", i, err) } ev := NewPasswordReset(tt.user, tt.password, tt.issuer, tt.clientID, *cbURL, tt.expires) if diff := pretty.Compare(tt.want, ev.claims); diff != "" { t.Errorf("case %d: Compare(want, got): %v", i, diff) } if diff := pretty.Compare(ev.Password(), password); diff != "" { t.Errorf("case %d: Compare(want, got): %v", i, diff) } } }
// A rekey is needed, but the user closes the rekey status window. func TestRekeyNeededUserClose(t *testing.T) { tc := libkb.SetupTest(t, "gregor", 1) defer tc.Cleanup() rkeyui := &fakeRekeyUI{} rkeyui.notifyRefresh = make(chan bool, 10) router := fakeUIRouter{ rekeyUI: rkeyui, } tc.G.SetUIRouter(&router) clock := clockwork.NewFakeClock() tc.G.SetClock(clock) gUID, h, rekeyHandler := rekeySetup(tc) rekeyBroadcast(tc, gUID, h, problemSet) select { case <-rekeyHandler.notifyStart: case <-time.After(20 * time.Second): t.Fatal("timeout waiting for rekeyHandler.notifyStart") } // since this is testing that the user closes a rekey status window, // wait for the refresh call: select { case <-rkeyui.notifyRefresh: case <-time.After(20 * time.Second): t.Fatal("timeout waiting for rekeyui.notifyRefresh") } // now call finish outcome, err := h.RekeyStatusFinish(context.Background(), rkeyui.sessionID) if err != nil { t.Fatal(err) } if outcome != keybase1.Outcome_IGNORED { t.Fatalf("RekeyStatusFinish outcome: %v, expected %v", outcome, keybase1.Outcome_IGNORED) } clock.Advance(3 * time.Second) select { case <-rekeyHandler.notifyComplete: case <-time.After(20 * time.Second): t.Fatal("timeout waiting for rekeyHandler.notifyComplete") } // there should be one call to refresh to bring the window up, but then the RekeyStatusFinish call above // should close the window and stop the loop. if len(rkeyui.refreshArgs) != 1 { t.Fatalf("rkeyui refresh calls: %d, expected 1", len(rkeyui.refreshArgs)) } if len(rkeyui.refreshArgs[0].ProblemSetDevices.ProblemSet.Tlfs) != 1 { t.Errorf("first refresh call, tlf count = %d, expected 1", len(rkeyui.refreshArgs[0].ProblemSetDevices.ProblemSet.Tlfs)) } }
func newSessionKeyRepo(t *testing.T) (session.SessionKeyRepo, clockwork.FakeClock) { clock := clockwork.NewFakeClock() if os.Getenv("DEX_TEST_DSN") == "" { return db.NewSessionKeyRepoWithClock(db.NewMemDB(), clock), clock } dbMap := connect(t) return db.NewSessionKeyRepoWithClock(dbMap, clock), clock }
func TestPrivateKeyRotatorRun(t *testing.T) { fc := clockwork.NewFakeClock() now := fc.Now().UTC() k1 := generatePrivateKeyStatic(t, 1) k2 := generatePrivateKeyStatic(t, 2) k3 := generatePrivateKeyStatic(t, 3) k4 := generatePrivateKeyStatic(t, 4) kRepo := NewPrivateKeySetRepo() krot := NewPrivateKeyRotator(kRepo, 4*time.Second) krot.clock = fc krot.generateKey = generatePrivateKeySerialFunc(t) steps := []*PrivateKeySet{ &PrivateKeySet{ keys: []*PrivateKey{k1}, ActiveKeyID: k1.KeyID, expiresAt: now.Add(4 * time.Second), }, &PrivateKeySet{ keys: []*PrivateKey{k2, k1}, ActiveKeyID: k2.KeyID, expiresAt: now.Add(6 * time.Second), }, &PrivateKeySet{ keys: []*PrivateKey{k3, k2}, ActiveKeyID: k3.KeyID, expiresAt: now.Add(8 * time.Second), }, &PrivateKeySet{ keys: []*PrivateKey{k4, k3}, ActiveKeyID: k4.KeyID, expiresAt: now.Add(10 * time.Second), }, } stop := krot.Run() defer close(stop) for i, st := range steps { // wait for the rotater to get sleepy fc.BlockUntil(1) got, err := kRepo.Get() if err != nil { t.Fatalf("step %d: unexpected error: %v", i, err) } if !reflect.DeepEqual(st, got) { t.Fatalf("step %d: unexpected state: want=%#v got=%#v", i, st, got) } fc.Advance(2 * time.Second) } }
func TestHandleKeysFunc(t *testing.T) { fc := clockwork.NewFakeClock() exp := fc.Now().Add(13 * time.Second) km := &StaticKeyManager{ expiresAt: exp, keys: []jose.JWK{ jose.JWK{ ID: "1234", Type: "RSA", Alg: "RS256", Use: "sig", Exponent: 65537, Modulus: big.NewInt(int64(5716758339926702)), }, jose.JWK{ ID: "5678", Type: "RSA", Alg: "RS256", Use: "sig", Exponent: 65537, Modulus: big.NewInt(int64(1234294715519622)), }, }, } req, err := http.NewRequest("GET", "http://server.example.com", nil) if err != nil { t.Fatalf("Failed creating HTTP request: err=%v", err) } w := httptest.NewRecorder() hdlr := handleKeysFunc(km, fc) hdlr.ServeHTTP(w, req) if w.Code != http.StatusOK { t.Fatalf("Incorrect status code: want=200 got=%d", w.Code) } wantHeader := http.Header{ "Content-Type": []string{"application/json"}, "Cache-Control": []string{"public, max-age=13"}, "Expires": []string{exp.Format(time.RFC1123)}, } gotHeader := w.Header() if !reflect.DeepEqual(wantHeader, gotHeader) { t.Fatalf("Incorrect headers: want=%#v got=%#v", wantHeader, gotHeader) } wantBody := `{"keys":[{"kid":"1234","kty":"RSA","alg":"RS256","use":"sig","e":"AQAB","n":"FE9chh46rg=="},{"kid":"5678","kty":"RSA","alg":"RS256","use":"sig","e":"AQAB","n":"BGKVohEShg=="}]}` gotBody := w.Body.String() if wantBody != gotBody { t.Fatalf("Incorrect body: want=%s got=%s", wantBody, gotBody) } }
func TestPrivateKeyRotatorExpiresAt(t *testing.T) { fc := clockwork.NewFakeClock() krot := &PrivateKeyRotator{ clock: fc, ttl: time.Minute, } got := krot.expiresAt() want := fc.Now().UTC().Add(time.Minute) if !reflect.DeepEqual(want, got) { t.Errorf("Incorrect expiration time: want=%v got=%v", want, got) } }
func TestHealthy(t *testing.T) { fc := clockwork.NewFakeClock() now := fc.Now().UTC() tests := []struct { expiresAt time.Time numKeys int expected error }{ { expiresAt: now.Add(time.Hour), numKeys: 2, expected: nil, }, { expiresAt: now.Add(time.Hour), numKeys: -1, expected: ErrorNoKeys, }, { expiresAt: now.Add(time.Hour), numKeys: 0, expected: ErrorNoKeys, }, { expiresAt: now.Add(-time.Hour), numKeys: 2, expected: ErrorPrivateKeysExpired, }, } for i, tt := range tests { kRepo := NewPrivateKeySetRepo() krot := NewPrivateKeyRotator(kRepo, time.Hour) krot.clock = fc pks := &PrivateKeySet{ expiresAt: tt.expiresAt, } if tt.numKeys != -1 { for n := 0; n < tt.numKeys; n++ { pks.keys = append(pks.keys, generatePrivateKeyStatic(t, n)) } err := kRepo.Set(pks) if err != nil { log.Fatalf("case %d: unexpected error: %v", i, err) } } if err := krot.Healthy(); err != tt.expected { t.Errorf("case %d: got==%q, want==%q", i, err, tt.expected) } } }
func (s *HooksSuite) TestSafeForConcurrentAccess(c *C) { logger := log.New() logger.Out = ioutil.Discard entry := logger.WithFields(log.Fields{"foo": "bar"}) logger.Hooks.Add(&UDPHook{Clock: clockwork.NewFakeClock()}) for i := 0; i < 3; i++ { go func(entry *log.Entry) { for i := 0; i < 1000; i++ { entry.Infof("test") } }(entry) } }
func TestSyncFail(t *testing.T) { tests := []error{ nil, errors.New("fail!"), } for i, tt := range tests { from := &staticReadableKeySetRepo{ks: nil, err: tt} to := NewPrivateKeySetRepo() if _, err := sync(from, to, clockwork.NewFakeClock()); err == nil { t.Errorf("case %d: expected non-nil error", i) } } }
func TestNewKVExpiration(t *testing.T) { nd := newTestNode() if _, ttl := nd.expirationAndTTL(clockwork.NewFakeClock()); ttl > expiration.Nanoseconds() { t.Errorf("ttl = %d, want %d < %d", ttl, ttl, expiration.Nanoseconds()) } newExpiration := time.Hour nd.UpdateTTL(time.Now().Add(newExpiration)) if _, ttl := nd.expirationAndTTL(clockwork.NewFakeClock()); ttl > newExpiration.Nanoseconds() { t.Errorf("ttl = %d, want %d < %d", ttl, ttl, newExpiration.Nanoseconds()) } if ns, err := nd.List(); ns != nil || err == nil { t.Errorf("nodes = %v and err = %v, want nodes = nil and err != nil", ns, err) } en := nd.Repr(false, false, clockwork.NewFakeClock()) if en.Key != nd.Path { t.Errorf("en.Key = %s, want = %s", en.Key, nd.Path) } if *(en.Value) != nd.Value { t.Errorf("*(en.Key) = %s, want = %s", *(en.Value), nd.Value) } }
func TestWatcherEmitsOnChange(t *testing.T) { var wg sync.WaitGroup watcher := NewWatcher("test-url-does-not-exist") clock := clockwork.NewFakeClock() count := 0 MockifyWatcher(watcher, &MockJsonAccessor{ eras: []MockJsonEra{ MockJsonEra{contentToReturn: fixture1}, MockJsonEra{contentToReturn: fixture1}, MockJsonEra{contentToReturn: fixture1}, MockJsonEra{contentToReturn: fixture1}, MockJsonEra{contentToReturn: fixture2}}, }, clock) ctx, _ := context.WithCancel(context.Background()) go func() { for { select { case <-watcher.Updated: count++ wg.Done() case <-watcher.OnError: t.Error("No errors should have been returned.") } } }() wg.Add(1) watcher.Watch(ctx) wg.Wait() clock.Advance(watcher.periodInSeconds) wg.Add(1) wg.Wait() if count != 2 { t.Error("Should have received 2 events") } }
func makeTestFixtures() *testFixtures { f := &testFixtures{} f.clock = clockwork.NewFakeClock() f.ur = user.NewUserRepoFromUsers([]user.UserWithRemoteIdentities{ { User: user.User{ ID: "ID-1", Email: "*****@*****.**", }, RemoteIdentities: []user.RemoteIdentity{ { ConnectorID: "local", ID: "1", }, }, }, { User: user.User{ ID: "ID-2", Email: "*****@*****.**", EmailVerified: true, }, RemoteIdentities: []user.RemoteIdentity{ { ConnectorID: "local", ID: "2", }, }, }, }) f.pwr = user.NewPasswordInfoRepoFromPasswordInfos([]user.PasswordInfo{ { UserID: "ID-1", Password: []byte("password-1"), }, { UserID: "ID-2", Password: []byte("password-2"), }, }) f.ccr = connector.NewConnectorConfigRepoFromConfigs([]connector.ConnectorConfig{ &connector.LocalConnectorConfig{ID: "local"}, }) f.mgr = NewUserManager(f.ur, f.pwr, f.ccr, repo.InMemTransactionFactory, ManagerOptions{}) f.mgr.Clock = f.clock return f }
// Ensure that any TTL <= minExpireTime becomes Permanent func TestMinExpireTime(t *testing.T) { s := newStore() fc := clockwork.NewFakeClock() s.clock = fc // FakeClock starts at 0, so minExpireTime should be far in the future.. but just in case assert.True(t, minExpireTime.After(fc.Now()), "minExpireTime should be ahead of FakeClock!") s.Create("/foo", false, "Y", false, TTLOptionSet{ExpireTime: fc.Now().Add(3 * time.Second)}) fc.Advance(5 * time.Second) // Ensure it hasn't expired s.DeleteExpiredKeys(fc.Now()) var eidx uint64 = 1 e, err := s.Get("/foo", true, false) assert.Nil(t, err, "") assert.Equal(t, e.EtcdIndex, eidx, "") assert.Equal(t, e.Action, "get", "") assert.Equal(t, e.Node.Key, "/foo", "") assert.Equal(t, e.Node.TTL, 0) }
func TestPeriodicPause(t *testing.T) { fc := clockwork.NewFakeClock() compactable := &fakeCompactable{testutil.NewRecorderStream()} rg := &fakeRevGetter{testutil.NewRecorderStream(), 0} tb := &Periodic{ clock: fc, periodInHour: 1, rg: rg, c: compactable, } tb.Run() tb.Pause() // tb will collect 3 hours of revisions but not compact since paused n := int(time.Hour / checkCompactionInterval) for i := 0; i < 3*n; i++ { fc.Advance(checkCompactionInterval) rg.Wait(1) } // tb ends up waiting for the clock select { case a := <-compactable.Chan(): t.Fatalf("unexpected action %v", a) case <-time.After(10 * time.Millisecond): } // tb resumes to being blocked on the clock tb.Resume() // unblock clock, will kick off a compaction at hour 3 fc.Advance(checkCompactionInterval) a, err := compactable.Wait(1) if err != nil { t.Fatal(err) } // compact the revision from hour 2 wreq := &pb.CompactionRequest{Revision: int64(2*n + 1)} if !reflect.DeepEqual(a[0].Params[0], wreq) { t.Errorf("compact request = %v, want %v", a[0].Params[0], wreq.Revision) } }
func TestLoginSessionTimeout(t *testing.T) { tc := SetupTest(t, "login_session_test") tc.G.API = &FakeAPI{} c := clockwork.NewFakeClock() tc.G.Clock = c defer tc.Cleanup() sesh := NewLoginSession("logintest", tc.G) err := sesh.Load() if err != nil { t.Fatal(err) } if !sesh.NotExpired() { t.Fatal("Fresh LoginSession says expired") } c.Advance(LoginSessionMemoryTimeout + 1*time.Second) tc.G.Clock = c // ?? if sesh.NotExpired() { t.Fatal("Stale LoginSession says not expired") } }
func TestNextSyncAfter(t *testing.T) { fc := clockwork.NewFakeClock() tests := []struct { exp time.Time want time.Duration }{ { exp: fc.Now().Add(time.Hour), want: 30 * time.Minute, }, // override large values with the maximum { exp: fc.Now().Add(168 * time.Hour), // one week want: 24 * time.Hour, }, // override "now" values with the minimum { exp: fc.Now(), want: time.Minute, }, // override negative values with the minimum { exp: fc.Now().Add(-1 * time.Minute), want: time.Minute, }, // zero-value Time results in maximum sync interval { exp: time.Time{}, want: 24 * time.Hour, }, } for i, tt := range tests { got := nextSyncAfter(tt.exp, fc) if tt.want != got { t.Errorf("case %d: want=%v got=%v", i, tt.want, got) } } }
func TestProviderConfigSyncerSyncFailure(t *testing.T) { fc := clockwork.NewFakeClock() tests := []struct { from *staticProviderConfigGetter to *staticProviderConfigSetter // want indicates what ProviderConfig should be passed to Set. // If nil, the Set should not be called. want *ProviderConfig }{ // generic Get failure { from: &staticProviderConfigGetter{err: errors.New("fail")}, to: &staticProviderConfigSetter{}, want: nil, }, // generic Set failure { from: &staticProviderConfigGetter{cfg: ProviderConfig{ExpiresAt: fc.Now().Add(time.Minute)}}, to: &staticProviderConfigSetter{err: errors.New("fail")}, want: &ProviderConfig{ExpiresAt: fc.Now().Add(time.Minute)}, }, } for i, tt := range tests { pcs := &ProviderConfigSyncer{ from: tt.from, to: tt.to, clock: fc, } _, err := pcs.sync() if err == nil { t.Errorf("case %d: expected non-nil error", i) } if !reflect.DeepEqual(tt.want, tt.to.cfg) { t.Errorf("case %d: Set mismatch: want=%#v got=%#v", i, tt.want, tt.to.cfg) } } }
func TestPeriodicPause(t *testing.T) { fc := clockwork.NewFakeClock() compactable := &fakeCompactable{testutil.NewRecorderStream()} rg := &fakeRevGetter{testutil.NewRecorderStream(), 0} tb := &Periodic{ clock: fc, periodInHour: 1, rg: rg, c: compactable, } tb.Run() tb.Pause() n := int(time.Hour / checkCompactionInterval) for i := 0; i < 3*n; i++ { rg.Wait(1) fc.Advance(checkCompactionInterval) } select { case a := <-compactable.Chan(): t.Fatalf("unexpected action %v", a) case <-time.After(10 * time.Millisecond): } tb.Resume() rg.Wait(1) fc.Advance(checkCompactionInterval) a, err := compactable.Wait(1) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: int64(2*n) + 2}) { t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: int64(2*n) + 2}) } }
func TestRetryFailure(t *testing.T) { nRetries = maxRetryInTest defer func() { nRetries = math.MaxUint32 }() cluster := "1000" c := &clientWithRetry{failTimes: 4} fc := clockwork.NewFakeClock() d := discovery{ cluster: cluster, id: 1, c: c, clock: fc, } go func() { for i := uint(1); i <= maxRetryInTest; i++ { fc.BlockUntil(1) fc.Advance(time.Second * (0x1 << i)) } }() if _, _, _, err := d.checkCluster(); err != ErrTooManyRetries { t.Errorf("err = %v, want %v", err, ErrTooManyRetries) } }
func TestNewKVListReprCompareClone(t *testing.T) { nd := newTestNode() if ns, err := nd.List(); ns != nil || err == nil { t.Errorf("nodes = %v and err = %v, want nodes = nil and err != nil", ns, err) } en := nd.Repr(false, false, clockwork.NewFakeClock()) if en.Key != nd.Path { t.Errorf("en.Key = %s, want = %s", en.Key, nd.Path) } if *(en.Value) != nd.Value { t.Errorf("*(en.Key) = %s, want = %s", *(en.Value), nd.Value) } cn := nd.Clone() if cn.Path != nd.Path { t.Errorf("cn.Path = %s, want = %s", cn.Path, nd.Path) } if cn.Value != nd.Value { t.Errorf("cn.Value = %s, want = %s", cn.Value, nd.Value) } }
// TestPeriodicReconcilerRun attempts to validate the behaviour of the central Run // loop of the PeriodicReconciler func TestPeriodicReconcilerRun(t *testing.T) { ival := 5 * time.Hour fclock := clockwork.NewFakeClock() fes := &fakeEventStream{make(chan Event)} called := make(chan struct{}) rec := func() { go func() { called <- struct{}{} }() } pr := &reconciler{ ival: ival, rFunc: rec, eStream: fes, clock: fclock, } // launch the PeriodicReconciler in the background prDone := make(chan struct{}) stop := make(chan struct{}) go func() { pr.Run(stop) close(prDone) }() // reconcile should have occurred once at start-up select { case <-called: case <-time.After(time.Second): t.Fatalf("rFunc() not called at start-up as expected!") } // no further reconciles yet expected select { case <-called: t.Fatalf("rFunc() called unexpectedly!") default: } // now, send an event on the EventStream and ensure rFunc occurs fes.trigger() select { case <-called: case <-time.After(time.Second): t.Fatalf("rFunc() not called after trigger!") } // assert rFunc was only called once select { case <-called: t.Fatalf("rFunc() called unexpectedly!") default: } // another event should work OK fes.trigger() select { case <-called: case <-time.After(time.Second): t.Fatalf("rFunc() not called after trigger!") } // again, assert rFunc was only called once select { case <-called: t.Fatalf("rFunc() called unexpectedly!") default: } // now check that time changes have the expected effect fclock.Advance(2 * time.Hour) select { case <-called: t.Fatalf("rFunc() called unexpectedly!") default: } fclock.Advance(3 * time.Hour) select { case <-called: case <-time.After(time.Second): t.Fatalf("rFunc() not called after time event!") } // stop the PeriodicReconciler close(stop) // now, sending an event should do nothing fes.trigger() select { case <-called: t.Fatalf("rFunc() called unexpectedly!") default: } // and nor should changes in time fclock.Advance(10 * time.Hour) select { case <-called: t.Fatalf("rFunc() called unexpectedly!") default: } // and the PeriodicReconciler should have shut down select { case <-prDone: case <-time.After(time.Second): t.Fatalf("PeriodicReconciler.Run did not return after stop signal!") } }
"net/http" "net/http/httptest" "net/url" "github.com/coreos/go-oidc/key" "github.com/go-gorp/gorp" "github.com/jonboulle/clockwork" "github.com/coreos/dex/connector" "github.com/coreos/dex/db" "github.com/coreos/dex/user" "github.com/coreos/dex/user/manager" ) var ( clock = clockwork.NewFakeClock() testIssuerURL = url.URL{Scheme: "https", Host: "auth.example.com"} testClientID = "client.example.com" testClientSecret = base64.URLEncoding.EncodeToString([]byte("secret")) testRedirectURL = url.URL{Scheme: "https", Host: "client.example.com", Path: "/redirect"} testBadRedirectURL = url.URL{Scheme: "https", Host: "bad.example.com", Path: "/redirect"} testResetPasswordURL = url.URL{Scheme: "https", Host: "auth.example.com", Path: "/resetPassword"} testPrivKey, _ = key.GeneratePrivateKey() ) type tokenHandlerTransport struct { Handler http.Handler Token string }