func (s *IsolatedWorkerSuite) TestConfigValidation(c *gc.C) { tests := []struct { cfg meterstatus.IsolatedConfig expected string }{{ cfg: meterstatus.IsolatedConfig{ Runner: &stubRunner{stub: s.stub}, StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), }, expected: "clock not provided", }, { cfg: meterstatus.IsolatedConfig{ Clock: coretesting.NewClock(time.Now()), StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), }, expected: "hook runner not provided", }, { cfg: meterstatus.IsolatedConfig{ Clock: coretesting.NewClock(time.Now()), Runner: &stubRunner{stub: s.stub}, }, expected: "state file not provided", }} for i, test := range tests { c.Logf("running test %d", i) err := test.cfg.Validate() c.Assert(err, gc.ErrorMatches, test.expected) } }
func (s *ModelMigrationSuite) SetUpTest(c *gc.C) { s.ConnSuite.SetUpTest(c) s.clock = coretesting.NewClock(time.Now().Truncate(time.Second)) s.PatchValue(&state.GetClock, func() clock.Clock { return s.clock }) // Create a hosted model to migrate. s.State2 = s.Factory.MakeModel(c, nil) s.AddCleanup(func(*gc.C) { s.State2.Close() }) targetControllerTag := names.NewModelTag(utils.MustNewUUID().String()) // Plausible migration arguments to test with. s.stdSpec = state.ModelMigrationSpec{ InitiatedBy: names.NewUserTag("admin"), TargetInfo: migration.TargetInfo{ ControllerTag: targetControllerTag, Addrs: []string{"1.2.3.4:5555", "4.3.2.1:6666"}, CACert: "cert", AuthTag: names.NewUserTag("user"), Password: "******", }, } }
func (s *IsolatedWorkerSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.stub = &testing.Stub{} s.dataDir = c.MkDir() s.hookRan = make(chan struct{}) s.triggersCreated = make(chan struct{}) triggerFactory := func(state meterstatus.WorkerState, status string, disconectedAt time.Time, clk clock.Clock, amber time.Duration, red time.Duration) (<-chan time.Time, <-chan time.Time) { select { case s.triggersCreated <- struct{}{}: case <-time.After(coretesting.LongWait): c.Fatalf("failed to signal trigger creation") } return meterstatus.GetTriggers(state, status, disconectedAt, clk, amber, red) } s.clk = coretesting.NewClock(time.Now()) wrk, err := meterstatus.NewIsolatedStatusWorker( meterstatus.IsolatedConfig{ Runner: &stubRunner{stub: s.stub, ran: s.hookRan}, StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), Clock: s.clk, AmberGracePeriod: AmberGracePeriod, RedGracePeriod: RedGracePeriod, TriggerFactory: triggerFactory, }) c.Assert(err, jc.ErrorIsNil) c.Assert(wrk, gc.NotNil) s.worker = wrk }
func newFixture(period time.Duration) workerFixture { return workerFixture{ revisionUpdater: newMockRevisionUpdater(), clock: coretesting.NewClock(time.Now()), period: period, } }
func (s *ValidationSuite) TestMissingClient(c *gc.C) { manager, err := leadership.NewManager(leadership.ManagerConfig{ Clock: coretesting.NewClock(time.Now()), }) c.Check(err, gc.ErrorMatches, "missing client") c.Check(manager, gc.IsNil) }
func (*scheduleSuite) TestAdd(c *gc.C) { clock := coretesting.NewClock(time.Time{}) s := schedule.NewSchedule(clock) op0 := operation{"k0", "v0", 3 * time.Second} op1 := operation{"k1", "v1", 1500 * time.Millisecond} op2 := operation{"k2", "v2", 2 * time.Second} s.Add(op0) s.Add(op1) s.Add(op2) clock.Advance(time.Second) // T+1 assertReady(c, s, clock /* nothing */) clock.Advance(time.Second) // T+2 assertReady(c, s, clock, op1, op2) assertReady(c, s, clock /* nothing */) clock.Advance(500 * time.Millisecond) // T+2.5 assertReady(c, s, clock /* nothing */) clock.Advance(time.Second) // T+3.5 assertReady(c, s, clock, op0) }
func (s *SecurityGroupSuite) TestDeleteSecurityGroupFewCalls(c *gc.C) { t0 := time.Time{} clock := autoAdvancingClock{coretesting.NewClock(t0)} count := 0 maxCalls := 4 expectedTimes := []time.Time{ t0, t0.Add(time.Second), t0.Add(3 * time.Second), t0.Add(7 * time.Second), t0.Add(15 * time.Second), } s.instanceStub.deleteSecurityGroup = func(group amzec2.SecurityGroup) (resp *amzec2.SimpleResp, err error) { c.Assert(clock.Now(), gc.Equals, expectedTimes[count]) if count < maxCalls { count++ return nil, &amzec2.Error{Code: "keep going"} } return nil, nil } err := s.deleteFunc(s.instanceStub, amzec2.SecurityGroup{}, clock) c.Assert(err, jc.ErrorIsNil) expectedCalls := make([]string, maxCalls+1) for i := 0; i < maxCalls+1; i++ { expectedCalls[i] = "DeleteSecurityGroup" } s.instanceStub.CheckCallNames(c, expectedCalls...) }
func (*scheduleSuite) TestNext(c *gc.C) { clock := coretesting.NewClock(time.Time{}) s := schedule.NewSchedule(clock) op0 := operation{"k0", "v0", 3 * time.Second} op1 := operation{"k1", "v1", 1500 * time.Millisecond} op2 := operation{"k2", "v2", 2 * time.Second} op3 := operation{"k3", "v3", 2500 * time.Millisecond} s.Add(op0) s.Add(op1) s.Add(op2) s.Add(op3) assertNextOp(c, s, clock, 1500*time.Millisecond) clock.Advance(1500 * time.Millisecond) assertReady(c, s, clock, op1) clock.Advance(500 * time.Millisecond) assertNextOp(c, s, clock, 0) assertReady(c, s, clock, op2) s.Remove("k3") clock.Advance(2 * time.Second) // T+4 assertNextOp(c, s, clock, 0) assertReady(c, s, clock, op0) }
func (s *SingularSuite) SetUpTest(c *gc.C) { s.clock = coretesting.NewClock(time.Now()) s.PatchValue(&state.GetClock, func() clock.Clock { return s.clock }) s.ConnSuite.SetUpTest(c) }
func (s *cmdControllerSuite) TestSystemKillCallsEnvironDestroyOnHostedEnviron(c *gc.C) { st := s.Factory.MakeEnvironment(c, &factory.EnvParams{ Name: "foo", }) defer st.Close() st.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyEnvironment") st.Close() opc := make(chan dummy.Operation, 200) dummy.Listen(opc) conn, err := juju.NewAPIState(s.AdminUserTag(c), s.Environ, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) s.AddCleanup(func(*gc.C) { conn.Close() }) client := undertakerapi.NewClient(conn) startTime := time.Date(2015, time.September, 1, 17, 2, 1, 0, time.UTC) mClock := testing.NewClock(startTime) undertaker.NewUndertaker(client, mClock) store, err := configstore.Default() _, err = store.ReadInfo("dummyenv") c.Assert(err, jc.ErrorIsNil) s.run(c, "kill-controller", "dummyenv", "-y") // Ensure that Destroy was called on the hosted environment ... opRecvTimeout(c, st, opc, dummy.OpDestroy{}) // ... and that the configstore was removed. _, err = store.ReadInfo("dummyenv") c.Assert(err, jc.Satisfies, errors.IsNotFound) }
func (s *SecurityGroupSuite) TestDeleteSecurityGroupInvalidGroupNotFound(c *gc.C) { s.instanceStub.deleteSecurityGroup = func(group amzec2.SecurityGroup) (resp *amzec2.SimpleResp, err error) { return nil, &amzec2.Error{Code: "InvalidGroup.NotFound"} } err := s.deleteFunc(s.instanceStub, amzec2.SecurityGroup{}, coretesting.NewClock(time.Time{})) c.Assert(err, jc.ErrorIsNil) s.instanceStub.CheckCallNames(c, "DeleteSecurityGroup") }
func (s *ValidationSuite) TestMissingMaxSleep(c *gc.C) { manager, err := leadership.NewManager(leadership.ManagerConfig{ Client: NewClient(nil, nil), Clock: coretesting.NewClock(time.Now()), }) c.Check(err, gc.ErrorMatches, "non-positive MaxSleep not valid") c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(manager, gc.IsNil) }
func (s *ValidationSuite) TestMissingClient(c *gc.C) { manager, err := leadership.NewManager(leadership.ManagerConfig{ Clock: coretesting.NewClock(time.Now()), MaxSleep: time.Minute, }) c.Check(err, gc.ErrorMatches, "nil Client not valid") c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(manager, gc.IsNil) }
func (s *LeadershipSuite) SetUpTest(c *gc.C) { s.clock = coretesting.NewClock(time.Now()) s.PatchValue(&state.GetClock, func() clock.Clock { return s.clock }) s.ConnSuite.SetUpTest(c) s.checker = s.State.LeadershipChecker() s.claimer = s.State.LeadershipClaimer() }
func (s *ValidationSuite) TestNegativeMaxSleep(c *gc.C) { manager, err := lease.NewManager(lease.ManagerConfig{ Client: NewClient(nil, nil), Clock: coretesting.NewClock(time.Now()), Secretary: struct{ lease.Secretary }{}, MaxSleep: -time.Nanosecond, }) c.Check(err, gc.ErrorMatches, "non-positive MaxSleep not valid") c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(manager, gc.IsNil) }
func (s *MachineManifoldSuite) TestMachine(c *gc.C) { config := storageprovisioner.MachineManifoldConfig{ PostUpgradeManifoldConfig: workertesting.PostUpgradeManifoldTestConfig(), Clock: coretesting.NewClock(defaultClockStart), } _, err := workertesting.RunPostUpgradeManifold( storageprovisioner.MachineManifold(config), &fakeAgent{tag: names.NewMachineTag("42")}, &fakeAPIConn{}) c.Assert(err, jc.ErrorIsNil) c.Assert(s.newCalled, jc.IsTrue) }
func (s *MachineManifoldSuite) TestNonAgent(c *gc.C) { config := storageprovisioner.MachineManifoldConfig{ PostUpgradeManifoldConfig: workertesting.PostUpgradeManifoldTestConfig(), Clock: coretesting.NewClock(defaultClockStart), } _, err := workertesting.RunPostUpgradeManifold( storageprovisioner.MachineManifold(config), &fakeAgent{tag: names.NewUserTag("foo")}, &fakeAPIConn{}) c.Assert(err, gc.ErrorMatches, "expected ModelTag or MachineTag, got names.UserTag") c.Assert(s.newCalled, jc.IsFalse) }
func (*scheduleSuite) TestRemove(c *gc.C) { clock := coretesting.NewClock(time.Time{}) now := clock.Now() s := schedule.NewSchedule(clock) s.Add("k0", "v0", now.Add(3*time.Second)) s.Add("k1", "v1", now.Add(2*time.Second)) s.Remove("k0") assertReady(c, s, clock /* nothing */) clock.Advance(3 * time.Second) assertReady(c, s, clock, "v1") }
func (s *WatcherSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.st = mockState{ unit: mockUnit{ tag: names.NewUnitTag("mysql/0"), life: params.Alive, service: mockService{ tag: names.NewServiceTag("mysql"), life: params.Alive, curl: charm.MustParseURL("cs:trusty/mysql"), serviceWatcher: mockNotifyWatcher{changes: make(chan struct{}, 1)}, leaderSettingsWatcher: mockNotifyWatcher{ changes: make(chan struct{}, 1), }, relationsWatcher: mockStringsWatcher{ changes: make(chan []string, 1), }, }, unitWatcher: mockNotifyWatcher{changes: make(chan struct{}, 1)}, addressesWatcher: mockNotifyWatcher{changes: make(chan struct{}, 1)}, configSettingsWatcher: mockNotifyWatcher{changes: make(chan struct{}, 1)}, storageWatcher: mockStringsWatcher{changes: make(chan []string, 1)}, actionWatcher: mockStringsWatcher{changes: make(chan []string, 1)}, }, relations: make(map[names.RelationTag]*mockRelation), storageAttachment: make(map[params.StorageAttachmentId]params.StorageAttachment), relationUnitsWatchers: make(map[names.RelationTag]*mockRelationUnitsWatcher), storageAttachmentWatchers: make(map[names.StorageTag]*mockStorageAttachmentWatcher), } s.leadership = mockLeadershipTracker{ claimTicket: mockTicket{make(chan struct{}, 1), true}, leaderTicket: mockTicket{make(chan struct{}, 1), true}, minionTicket: mockTicket{make(chan struct{}, 1), true}, } s.clock = testing.NewClock(time.Now()) statusTicker := func() <-chan time.Time { return s.clock.After(statusTickDuration) } w, err := remotestate.NewWatcher(remotestate.WatcherConfig{ State: &s.st, LeadershipTracker: &s.leadership, UnitTag: s.st.unit.tag, UpdateStatusChannel: statusTicker, }) c.Assert(err, jc.ErrorIsNil) s.watcher = w }
func (*scheduleSuite) TestRemove(c *gc.C) { clock := coretesting.NewClock(time.Time{}) s := schedule.NewSchedule(clock) op0 := operation{"k0", "v0", 3 * time.Second} op1 := operation{"k1", "v1", 2 * time.Second} s.Add(op0) s.Add(op1) s.Remove("k0") assertReady(c, s, clock /* nothing */) clock.Advance(3 * time.Second) assertReady(c, s, clock, op1) }
func (s *MachineManifoldSuite) SetUpTest(c *gc.C) { s.newCalled = false s.PatchValue(&storageprovisioner.NewStorageProvisioner, func(config storageprovisioner.Config) (worker.Worker, error) { s.newCalled = true return nil, nil }, ) config := workertesting.AgentApiManifoldTestConfig() s.config = storageprovisioner.MachineManifoldConfig{ AgentName: config.AgentName, APICallerName: config.APICallerName, Clock: coretesting.NewClock(defaultClockStart), } }
func (s *ContextFactorySuite) SetUpTest(c *gc.C) { s.HookContextSuite.SetUpTest(c) s.paths = runnertesting.NewRealPaths(c) s.membership = map[int][]string{} contextFactory, err := context.NewContextFactory( s.uniter, s.unit.Tag().(names.UnitTag), runnertesting.FakeTracker{}, s.getRelationInfos, s.storage, s.paths, coretesting.NewClock(time.Time{}), ) c.Assert(err, jc.ErrorIsNil) s.factory = contextFactory }
// RunTest sets up a Manager and a Clock and passes them into the supplied // test function. The manager will be cleaned up afterwards. func (fix *Fixture) RunTest(c *gc.C, test func(leadership.ManagerWorker, *testing.Clock)) { clock := testing.NewClock(defaultClockStart) client := NewClient(fix.leases, fix.expectCalls) manager, err := leadership.NewManager(leadership.ManagerConfig{ Clock: clock, Client: client, }) c.Assert(err, jc.ErrorIsNil) defer func() { // Dirty tests will probably have stopped the manager anyway, but no // sense leaving them around if things aren't exactly as we expect. manager.Kill() err := manager.Wait() if !fix.expectDirty { c.Check(err, jc.ErrorIsNil) } }() defer client.Wait(c) test(manager, clock) }
func run(c *gc.C, stub *testing.Stub, test FixtureTest) { context := &context{ c: c, stub: stub, clock: coretesting.NewClock(time.Now()), timeout: time.After(time.Second), starts: make(chan worker.Worker, 1000), } defer context.checkCleanedUp() worker, err := presence.New(presence.Config{ Identity: names.NewMachineTag("1"), Start: context.startPinger, Clock: context.clock, RetryDelay: fiveSeconds, }) c.Assert(err, jc.ErrorIsNil) defer workertest.CleanKill(c, worker) test(context, worker) }
func (*scheduleSuite) TestAdd(c *gc.C) { clock := coretesting.NewClock(time.Time{}) now := clock.Now() s := schedule.NewSchedule(clock) s.Add("k0", "v0", now.Add(3*time.Second)) s.Add("k1", "v1", now.Add(1500*time.Millisecond)) s.Add("k2", "v2", now.Add(2*time.Second)) clock.Advance(time.Second) // T+1 assertReady(c, s, clock /* nothing */) clock.Advance(time.Second) // T+2 assertReady(c, s, clock, "v1", "v2") assertReady(c, s, clock /* nothing */) clock.Advance(500 * time.Millisecond) // T+2.5 assertReady(c, s, clock /* nothing */) clock.Advance(time.Second) // T+3.5 assertReady(c, s, clock, "v0") }
func (fix fixture) run(c *gc.C, test func(worker.Worker, *coretesting.Clock)) *testing.Stub { stub := &testing.Stub{} environ := &mockEnviron{ stub: stub, } facade := &mockFacade{ stub: stub, info: fix.info, } clock := coretesting.NewClock(time.Now()) stub.SetErrors(fix.errors...) w, err := undertaker.NewUndertaker(undertaker.Config{ Facade: facade, Environ: environ, Clock: clock, RemoveDelay: RIPTime, }) c.Assert(err, jc.ErrorIsNil) defer fix.cleanup(c, w) test(w, clock) return stub }
func (fix *fixture) Run(c *gc.C, test testFunc) { facade := newStubFacade(&fix.Stub) clock := coretesting.NewClock(time.Now()) flagWorker, err := singular.NewFlagWorker(singular.FlagConfig{ Facade: facade, Clock: clock, Duration: time.Minute, }) c.Assert(err, jc.ErrorIsNil) done := make(chan struct{}) go func() { defer close(done) defer worker.Stop(flagWorker) defer facade.unblock() test(flagWorker, clock, facade.unblock) }() select { case <-done: case <-time.After(coretesting.LongWait): c.Fatalf("test timed out") } }
func (*scheduleSuite) TestNext(c *gc.C) { clock := coretesting.NewClock(time.Time{}) now := clock.Now() s := schedule.NewSchedule(clock) s.Add("k0", "v0", now.Add(3*time.Second)) s.Add("k1", "v1", now.Add(1500*time.Millisecond)) s.Add("k2", "v2", now.Add(2*time.Second)) s.Add("k3", "v3", now.Add(2500*time.Millisecond)) assertNextOp(c, s, clock, 1500*time.Millisecond) clock.Advance(1500 * time.Millisecond) assertReady(c, s, clock, "v1") clock.Advance(500 * time.Millisecond) assertNextOp(c, s, clock, 0) assertReady(c, s, clock, "v2") s.Remove("k3") clock.Advance(2 * time.Second) // T+4 assertNextOp(c, s, clock, 0) assertReady(c, s, clock, "v0") }
func (s *userAuthenticatorSuite) TestCreateLocalLoginMacaroon(c *gc.C) { service := mockBakeryService{} clock := coretesting.NewClock(time.Time{}) authenticator := &authentication.UserAuthenticator{ Service: &service, Clock: clock, } _, err := authenticator.CreateLocalLoginMacaroon(names.NewUserTag("bobbrown")) c.Assert(err, jc.ErrorIsNil) service.CheckCallNames(c, "ExpireStorageAt", "NewMacaroon", "AddCaveat") calls := service.Calls() c.Assert(calls[0].Args, jc.DeepEquals, []interface{}{clock.Now().Add(24 * time.Hour)}) c.Assert(calls[1].Args, jc.DeepEquals, []interface{}{ "", []byte(nil), []checkers.Caveat{ checkers.DeclaredCaveat("username", "bobbrown@local"), }, }) c.Assert(calls[2].Args, jc.DeepEquals, []interface{}{ &macaroon.Macaroon{}, checkers.TimeBeforeCaveat(clock.Now().Add(24 * time.Hour)), }) }
func (*scheduleSuite) TestExponentialBackoff(c *gc.C) { clock := coretesting.NewClock(time.Time{}) now := clock.Now() s := schedule.NewSchedule(clock) op := &exponentialBackoffOperation{key: "key"} expectedTimes := []time.Time{ now, now.Add(30 * time.Second), now.Add(1 * time.Minute), now.Add(2 * time.Minute), now.Add(4 * time.Minute), now.Add(8 * time.Minute), now.Add(16 * time.Minute), now.Add(30 * time.Minute), // truncated now.Add(30 * time.Minute), } for i, expected := range expectedTimes { c.Logf("%d: expect %s", i, expected) t := s.Add(op) c.Assert(t, gc.DeepEquals, expected) s.Remove(op.Key()) } }