func (*RestartWorkersSuite) TestSingularManagerRestart(c *gc.C) { fix := BasicFixture() fix.SW_errors = []error{errors.New("oof"), nil} fix.RunRestart(c, func(ctx Context, rw *workers.RestartWorkers) { origw := rw.SingularManager() w := NextWorker(c, ctx.SWs()) c.Assert(w, gc.NotNil) AssertWorker(c, rw.SingularManager(), w) w.Kill() clock := ctx.Clock() WaitAlarms(c, clock, 1) clock.Advance(fiveSeconds) w2 := NextWorker(c, ctx.SWs()) c.Assert(w, gc.NotNil) WaitWorker(c, SM_getter(rw), w2) // The new worker should underlie the originally // acquired singular manager, so that restarts // do not require callers to acquire a new manager AssertWorker(c, origw, w2) workertest.CleanKill(c, rw) }) }
func (s *machineSuite) TestNoPollWhenNotProvisioned(c *gc.C) { s.PatchValue(&ShortPoll, 1*time.Millisecond) s.PatchValue(&LongPoll, coretesting.LongWait) polled := make(chan struct{}, 1) getInstanceInfo := func(id instance.Id) (instanceInfo, error) { select { case polled <- struct{}{}: default: } return instanceInfo{testAddrs, instance.InstanceStatus{Status: status.Unknown, Message: "pending"}}, nil } context := &testMachineContext{ getInstanceInfo: getInstanceInfo, dyingc: make(chan struct{}), } m := &testMachine{ tag: names.NewMachineTag("99"), instanceId: instance.Id(""), refresh: func() error { return nil }, addresses: testAddrs, life: params.Alive, status: "pending", } died := make(chan machine) clock := gitjujutesting.NewClock(time.Time{}) changed := make(chan struct{}) go runMachine(context, m, changed, died, clock) expectPoll := func() { // worker should be waiting for ShortPoll select { case <-clock.Alarms(): case <-time.After(coretesting.LongWait): c.Fatalf("expected time-based polling") } clock.Advance(ShortPoll) } expectPoll() expectPoll() select { case <-polled: c.Fatalf("unexpected instance poll") case <-time.After(coretesting.ShortWait): } m.setInstanceId("inst-ance") expectPoll() select { case <-polled: case <-time.After(coretesting.LongWait): c.Fatalf("expected instance poll") } killMachineLoop(c, m, context.dyingc, died) c.Assert(context.killErr, gc.Equals, nil) }
func (*RestartWorkersSuite) TestLeadershipManagerDelay(c *gc.C) { fix := BasicFixture() fix.LW_errors = []error{errors.New("oof")} fix.RunRestart(c, func(ctx Context, rw *workers.RestartWorkers) { w := NextWorker(c, ctx.LWs()) c.Assert(w, gc.NotNil) AssertWorker(c, rw.LeadershipManager(), w) w.Kill() clock := ctx.Clock() WaitAlarms(c, clock, 1) clock.Advance(almostFiveSeconds) AssertWorker(c, rw.LeadershipManager(), w) err := workertest.CheckKill(c, rw) c.Check(err, gc.ErrorMatches, "error stopping leadership lease manager: oof") }) }
func (*RestartWorkersSuite) TestPresenceWatcherRestart(c *gc.C) { fix := BasicFixture() fix.PW_errors = []error{errors.New("oof"), nil} fix.RunRestart(c, func(ctx Context, rw *workers.RestartWorkers) { w := NextWorker(c, ctx.PWs()) c.Assert(w, gc.NotNil) AssertWorker(c, rw.PresenceWatcher(), w) w.Kill() clock := ctx.Clock() WaitAlarms(c, clock, 1) clock.Advance(fiveSeconds) w2 := NextWorker(c, ctx.PWs()) c.Assert(w, gc.NotNil) WaitWorker(c, PW_getter(rw), w2) workertest.CleanKill(c, rw) }) }