func (s *Suite) TestSUCCESSMinionWaitTimeout(c *gc.C) { // The SUCCESS phase is special in that even if some minions fail // to report the migration should continue. There's no turning // back from SUCCESS. s.facade.queueStatus(s.makeStatus(coremigration.SUCCESS)) worker, err := migrationmaster.New(s.config) c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, worker) select { case <-s.clock.Alarms(): case <-time.After(coretesting.LongWait): c.Fatal("timed out waiting for clock.After call") } // Move time ahead in order to trigger timeout. s.clock.Advance(15 * time.Minute) err = workertest.CheckKilled(c, worker) c.Assert(err, gc.Equals, migrationmaster.ErrMigrated) s.stub.CheckCalls(c, joinCalls( watchStatusLockdownCalls, []jujutesting.StubCall{ {"facade.WatchMinionReports", nil}, {"facade.SetPhase", []interface{}{coremigration.LOGTRANSFER}}, {"facade.SetPhase", []interface{}{coremigration.REAP}}, {"facade.Reap", nil}, {"facade.SetPhase", []interface{}{coremigration.DONE}}, }, )) }
func (s *Suite) TestDirtyKillTimeout(c *gc.C) { w := workertest.NewForeverWorker(nil) defer w.ReallyKill() workertest.DirtyKill(c, w) s.CheckFailed(c) }
func (fix fixture) cleanup(c *gc.C, w worker.Worker) { if fix.dirty { workertest.DirtyKill(c, w) } else { workertest.CleanKill(c, w) } }
func (s *TrackerSuite) newTrackerDirtyKill() *leadership.Tracker { tracker := s.newTrackerInner() s.AddCleanup(func(c *gc.C) { workertest.DirtyKill(c, tracker) }) return tracker }
func (fix *engineFixture) kill(c *gc.C, engine *dependency.Engine) { if fix.dirty { workertest.DirtyKill(c, engine) } else { workertest.CleanKill(c, engine) } }
func (s *apiserverBaseSuite) newServerDirtyKill(c *gc.C, config apiserver.ServerConfig) *apiserver.Server { srv := s.newServerNoCleanup(c, config) s.AddCleanup(func(c *gc.C) { workertest.DirtyKill(c, srv) }) return srv }
func (s *Suite) TestCheckKillSuccess(c *gc.C) { expect := errors.New("fledbon") w := workertest.NewErrorWorker(expect) defer workertest.DirtyKill(c, w) err := workertest.CheckKill(c, w) c.Check(err, gc.Equals, expect) }
func (s *Suite) TestCheckKilledSuccess(c *gc.C) { expect := errors.New("snifplog") w := workertest.NewErrorWorker(expect) defer workertest.DirtyKill(c, w) w.Kill() err := workertest.CheckKilled(c, w) c.Check(err, gc.Equals, expect) }
func (fix *fixture) Run(c *gc.C, test func(*runContext)) { watcher := newNotifyWatcher(fix.watcherErr) defer workertest.DirtyKill(c, watcher) context := &runContext{ config: newModelConfig(c, fix.initialConfig), watcher: watcher, } context.stub.SetErrors(fix.observerErrs...) test(context) }
func (*FlagSuite) TestFlagUnlockError(c *gc.C) { lock := gate.NewLock() worker, err := gate.NewFlag(lock) c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, worker) workertest.CheckAlive(c, worker) lock.Unlock() err = workertest.CheckKilled(c, worker) c.Check(err, gc.Equals, gate.ErrUnlocked) }
// RunDumb starts a DumbWorkers inside a fresh Context and supplies it // to a test func. func (fix Fixture) RunDumb(c *gc.C, test func(Context, *workers.DumbWorkers)) { fix.Run(c, func(ctx Context) { dw, err := workers.NewDumbWorkers(workers.DumbConfig{ Factory: ctx.Factory(), Logger: loggo.GetLogger("test"), }) c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, dw) test(ctx, dw) }) }
// FailDumb verifies that a DumbWorkers cannot start successfully, and // checks that the returned error matches. func (fix Fixture) FailDumb(c *gc.C, match string) { fix.Run(c, func(ctx Context) { dw, err := workers.NewDumbWorkers(workers.DumbConfig{ Factory: ctx.Factory(), Logger: loggo.GetLogger("test"), }) if !c.Check(dw, gc.IsNil) { workertest.DirtyKill(c, dw) } c.Check(err, gc.ErrorMatches, match) }) }
// RunRestart starts a RestartWorkers inside a fresh Context and // supplies it to a test func. func (fix Fixture) RunRestart(c *gc.C, test func(Context, *workers.RestartWorkers)) { fix.Run(c, func(ctx Context) { rw, err := workers.NewRestartWorkers(workers.RestartConfig{ Factory: ctx.Factory(), Logger: loggo.GetLogger("test"), Clock: ctx.Clock(), Delay: fiveSeconds, }) c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, rw) test(ctx, rw) }) }
func (s *LogForwarderSuite) TestOne(c *gc.C) { s.stream.addRecords(c, s.rec) lf, err := logforwarder.NewLogForwarder(s.newLogForwarderArgs(c, s.stream, s.sender)) c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, lf) s.sender.waitForSend(c) workertest.CleanKill(c, lf) s.sender.stub.CheckCalls(c, []testing.StubCall{ {"Send", []interface{}{[]logfwd.Record{s.rec}}}, {"Close", nil}, }) }
// FailRestart verifies that a RestartWorkers cannot start successfully, and // checks that the returned error matches. func (fix Fixture) FailRestart(c *gc.C, match string) { fix.Run(c, func(ctx Context) { rw, err := workers.NewRestartWorkers(workers.RestartConfig{ Factory: ctx.Factory(), Logger: loggo.GetLogger("test"), Clock: ctx.Clock(), Delay: fiveSeconds, }) if !c.Check(rw, gc.IsNil) { workertest.DirtyKill(c, rw) } c.Check(err, gc.ErrorMatches, match) }) }
func checkInvalidRestartConfig(c *gc.C, config workers.RestartConfig, match string) { check := func(err error) { c.Check(err, jc.Satisfies, errors.IsNotValid) c.Check(err, gc.ErrorMatches, match) } err := config.Validate() check(err) rw, err := workers.NewRestartWorkers(config) if !c.Check(rw, gc.IsNil) { workertest.DirtyKill(c, rw) } check(err) }
func (s *TrackerSuite) TestWatchCloses(c *gc.C) { fix := &fixture{} fix.Run(c, func(context *runContext) { tracker, err := environ.NewTracker(environ.Config{ Observer: context, }) c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, tracker) context.CloseNotify() err = workertest.CheckKilled(c, tracker) c.Check(err, gc.ErrorMatches, "environ config watch closed") context.CheckCallNames(c, "EnvironConfig", "WatchForEnvironConfigChanges") }) }
func (*HousingSuite) TestOccupySuccess(c *gc.C) { expectWorker := workertest.NewErrorWorker(errors.New("ignored")) defer workertest.DirtyKill(c, expectWorker) manifold := util.Housing{ Occupy: "fortress", }.Decorate(dependency.Manifold{ Start: func(dependency.Context) (worker.Worker, error) { return expectWorker, nil }, }) guest := newGuest(true) context := dt.StubContext(nil, map[string]interface{}{ "fortress": guest, }) // wait for the start func to complete started := make(chan struct{}) go func() { defer close(started) worker, err := manifold.Start(context) c.Check(worker, gc.Equals, expectWorker) c.Check(err, jc.ErrorIsNil) }() select { case <-started: case <-time.After(coretesting.LongWait): c.Fatalf("timed out") } // check the worker's alive workertest.CheckAlive(c, expectWorker) // check the visit keeps running... select { case <-time.After(coretesting.ShortWait): case <-guest.done: c.Fatalf("visit finished early") } // ...until the worker stops expectWorker.Kill() select { case <-guest.done: case <-time.After(coretesting.LongWait): c.Fatalf("timed out") } }
func (s *LogForwarderSuite) TestStreamError(c *gc.C) { failure := errors.New("<failure>") s.stream.stub.SetErrors(nil, failure) s.stream.addRecords(c, s.rec) lf, err := logforwarder.NewLogForwarder(s.newLogForwarderArgs(c, s.stream, s.sender)) c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, lf) err = workertest.CheckKilled(c, lf) c.Check(errors.Cause(err), gc.Equals, failure) s.sender.stub.CheckCalls(c, []testing.StubCall{ {"Send", []interface{}{[]logfwd.Record{s.rec}}}, {"Close", nil}, }) }
func (fix fixture) Run(c *gc.C, test TestFunc) *testing.Stub { stub := &testing.Stub{} stub.SetErrors(fix.errors...) clock := testing.NewClock(time.Now()) facade := newMockFacade(stub) worker, err := resumer.NewResumer(resumer.Config{ Facade: facade, Interval: time.Hour, Clock: clock, }) c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, worker) test(clock, worker) return stub }
func (s *TrackerSuite) TestWatchFails(c *gc.C) { fix := &fixture{ observerErrs: []error{ nil, errors.New("grrk splat"), }, } fix.Run(c, func(context *runContext) { tracker, err := environ.NewTracker(environ.Config{ Observer: context, }) c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, tracker) err = workertest.CheckKilled(c, tracker) c.Check(err, gc.ErrorMatches, "cannot watch environ config: grrk splat") context.CheckCallNames(c, "EnvironConfig", "WatchForEnvironConfigChanges") }) }
func (s *TrackerSuite) TestWatchedModelConfigFails(c *gc.C) { fix := &fixture{ observerErrs: []error{ nil, nil, errors.New("blam ouch"), }, } fix.Run(c, func(context *runContext) { tracker, err := environ.NewTracker(environ.Config{ Observer: context, }) c.Check(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, tracker) context.SendNotify() err = workertest.CheckKilled(c, tracker) c.Check(err, gc.ErrorMatches, "cannot read environ config: blam ouch") context.CheckCallNames(c, "ModelConfig", "WatchForModelConfigChanges", "ModelConfig") }) }
func (s *TrackerSuite) TestWatchedEnvironConfigIncompatible(c *gc.C) { fix := &fixture{ initialConfig: coretesting.Attrs{ "broken": "SetConfig", }, } fix.Run(c, func(context *runContext) { tracker, err := environ.NewTracker(environ.Config{ Observer: context, }) c.Check(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, tracker) context.SendNotify() err = workertest.CheckKilled(c, tracker) c.Check(err, gc.ErrorMatches, "cannot update environ config: dummy.SetConfig is broken") context.CheckCallNames(c, "EnvironConfig", "WatchForEnvironConfigChanges", "EnvironConfig") }) }
func (s *TrackerSuite) TestWatchedModelConfigIncompatible(c *gc.C) { fix := &fixture{} fix.Run(c, func(context *runContext) { tracker, err := environ.NewTracker(environ.Config{ Observer: context, NewEnvironFunc: func(environs.OpenParams) (environs.Environ, error) { env := &mockEnviron{} env.SetErrors(errors.New("SetConfig is broken")) return env, nil }, }) c.Check(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, tracker) context.SendModelConfigNotify() err = workertest.CheckKilled(c, tracker) c.Check(err, gc.ErrorMatches, "cannot update environ config: SetConfig is broken") context.CheckCallNames(c, "ModelConfig", "CloudSpec", "WatchForModelConfigChanges", "ModelConfig") }) }
func (s *LogForwarderSuite) TestConfigChange(c *gc.C) { rec0 := s.rec rec1 := s.rec rec1.ID = 11 api := &mockLogForwardConfig{ enabled: true, host: "10.0.0.1", } lf, err := logforwarder.NewLogForwarder(s.newLogForwarderArgsWithAPI(c, api, s.stream, s.sender)) c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, lf) // Send the first record. s.stream.addRecords(c, rec0) s.sender.waitForSend(c) // Config change. api.host = "10.0.0.2" api.changes <- struct{}{} s.sender.waitForClose(c) // Send the second record. s.stream.addRecords(c, rec1) s.sender.waitForSend(c) workertest.CleanKill(c, lf) // Check that both records were sent with the config change // applied for the second send. rec1.Message = "send to 10.0.0.2" s.sender.stub.CheckCalls(c, []testing.StubCall{ {"Send", []interface{}{[]logfwd.Record{rec0}}}, {"Close", nil}, {"Send", []interface{}{[]logfwd.Record{rec1}}}, {"Close", nil}, }) }
func (s *Suite) runWorker(c *gc.C) error { w, err := migrationmaster.New(s.config) c.Assert(err, jc.ErrorIsNil) defer workertest.DirtyKill(c, w) return workertest.CheckKilled(c, w) }
func (s *Suite) TestDirtyKillSuccess(c *gc.C) { w := workertest.NewErrorWorker(errors.New("hifstit")) workertest.DirtyKill(c, w) }