func (s *workerJujuConnSuite) TestPublisherSetsAPIHostPorts(c *gc.C) { st := newFakeState() initState(c, st, 3) watcher := s.State.WatchAPIHostPorts() cwatch := statetesting.NewNotifyWatcherC(c, s.State, watcher) cwatch.AssertOneChange() statePublish := newPublisher(s.State) // Wrap the publisher so that we can call StartSync immediately // after the publishAPIServers method is called. publish := func(apiServers [][]instance.HostPort, instanceIds []instance.Id) error { err := statePublish.publishAPIServers(apiServers, instanceIds) s.State.StartSync() return err } w := newWorker(st, publisherFunc(publish)) defer func() { c.Check(worker.Stop(w), gc.IsNil) }() cwatch.AssertOneChange() hps, err := s.State.APIHostPorts() c.Assert(err, gc.IsNil) assertAPIHostPorts(c, hps, expectedAPIHostPorts(3)) }
func (s *singularSuite) TestWithIsMasterTrue(c *gc.C) { // When IsMaster returns true, workers get started on the underlying // runner as usual. s.PatchValue(&singular.PingInterval, 1*time.Millisecond) underlyingRunner := newRunner() conn := &fakeConn{ isMaster: true, } r, err := singular.New(underlyingRunner, conn) c.Assert(err, gc.IsNil) started := make(chan struct{}, 1) err = r.StartWorker("worker", func() (worker.Worker, error) { return worker.NewSimpleWorker(func(stop <-chan struct{}) error { started <- struct{}{} <-stop return nil }), nil }) select { case <-started: case <-time.After(testing.LongWait): c.Fatalf("timed out waiting for worker to start") } err = worker.Stop(r) c.Assert(err, gc.IsNil) }
func (s *workerSuite) TestStateServersArePublished(c *gc.C) { publishCh := make(chan [][]instance.HostPort) publish := func(apiServers [][]instance.HostPort, instanceIds []instance.Id) error { publishCh <- apiServers return nil } st := newFakeState() initState(c, st, 3) w := newWorker(st, publisherFunc(publish)) defer func() { c.Check(worker.Stop(w), gc.IsNil) }() select { case servers := <-publishCh: assertAPIHostPorts(c, servers, expectedAPIHostPorts(3)) case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for publish") } // Change one of the servers' API addresses and check that it's published. newMachine10APIHostPorts := addressesWithPort(apiPort, "0.2.8.124") st.machine("10").setAPIHostPorts(newMachine10APIHostPorts) select { case servers := <-publishCh: expected := expectedAPIHostPorts(3) expected[0] = newMachine10APIHostPorts assertAPIHostPorts(c, servers, expected) case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for publish") } }
func (s *workerSuite) TestWorkerPublishesInstanceIds(c *gc.C) { s.PatchValue(&pollInterval, coretesting.LongWait+time.Second) s.PatchValue(&initialRetryInterval, 5*time.Millisecond) s.PatchValue(&maxRetryInterval, initialRetryInterval) publishCh := make(chan []instance.Id, 100) publish := func(apiServers [][]instance.HostPort, instanceIds []instance.Id) error { publishCh <- instanceIds return nil } st := newFakeState() initState(c, st, 3) w := newWorker(st, publisherFunc(publish)) defer func() { c.Check(worker.Stop(w), gc.IsNil) }() select { case instanceIds := <-publishCh: c.Assert(instanceIds, jc.SameContents, []instance.Id{"id-10", "id-11", "id-12"}) case <-time.After(coretesting.LongWait): c.Errorf("timed out waiting for publish") } }
func (*runnerSuite) TestOneWorkerStartWhenStopping(c *gc.C) { worker.RestartDelay = 3 * time.Second runner := worker.NewRunner(allFatal, noImportance) starter := newTestWorkerStarter() starter.stopWait = make(chan struct{}) err := runner.StartWorker("id", testWorkerStart(starter)) c.Assert(err, gc.IsNil) starter.assertStarted(c, true) err = runner.StopWorker("id") c.Assert(err, gc.IsNil) err = runner.StartWorker("id", testWorkerStart(starter)) c.Assert(err, gc.IsNil) close(starter.stopWait) starter.assertStarted(c, false) // Check that the task is restarted immediately without // the usual restart timeout delay. t0 := time.Now() starter.assertStarted(c, true) restartDuration := time.Since(t0) if restartDuration > 1*time.Second { c.Fatalf("task did not restart immediately") } c.Assert(worker.Stop(runner), gc.IsNil) }
func (s *workerSuite) TestAddressChange(c *gc.C) { st := newFakeState() initState(c, st, 3) memberWatcher := st.session.members.Watch() mustNext(c, memberWatcher) assertMembers(c, memberWatcher.Value(), mkMembers("0v")) logger.Infof("starting worker") w := newWorker(st, noPublisher{}) defer func() { c.Check(worker.Stop(w), gc.IsNil) }() // Wait for the worker to set the initial members. mustNext(c, memberWatcher) assertMembers(c, memberWatcher.Value(), mkMembers("0v 1 2")) // Change an address and wait for it to be changed in the // members. st.machine("11").setStateHostPort("0.1.99.99:9876") mustNext(c, memberWatcher) expectMembers := mkMembers("0v 1 2") expectMembers[1].Address = "0.1.99.99:9876" assertMembers(c, memberWatcher.Value(), expectMembers) }
func (s *notifyWorkerSuite) TestStop(c *gc.C) { err := worker.Stop(s.worker) c.Assert(err, gc.IsNil) // After stop, Wait should return right away err = waitShort(c, s.worker) c.Assert(err, gc.IsNil) }
func (s *MachinerSuite) TestSetDead(c *gc.C) { mr := s.makeMachiner() defer worker.Stop(mr) c.Assert(s.machine.Destroy(), gc.IsNil) s.State.StartSync() c.Assert(mr.Wait(), gc.Equals, worker.ErrTerminateAgent) c.Assert(s.machine.Refresh(), gc.IsNil) c.Assert(s.machine.Life(), gc.Equals, state.Dead) }
func (*runnerSuite) TestOneWorkerStart(c *gc.C) { runner := worker.NewRunner(noneFatal, noImportance) starter := newTestWorkerStarter() err := runner.StartWorker("id", testWorkerStart(starter)) c.Assert(err, gc.IsNil) starter.assertStarted(c, true) c.Assert(worker.Stop(runner), gc.IsNil) starter.assertStarted(c, false) }
func (s *MachineEnvironmentWatcherSuite) TestInitialStateLocalMachine1(c *gc.C) { proxySettings, aptProxySettings := s.updateConfig(c) agentConfig := agentConfig("1", provider.Local) envWorker := s.makeWorker(c, agentConfig) defer worker.Stop(envWorker) s.waitProxySettings(c, proxySettings) s.waitForFile(c, s.proxyFile, proxySettings.AsScriptEnvironment()+"\n") s.waitForFile(c, utils.AptConfFile, utils.AptProxyContent(aptProxySettings)+"\n") }
func (s *MachinerSuite) TestStartSetsStatus(c *gc.C) { status, info, _, err := s.machine.Status() c.Assert(err, gc.IsNil) c.Assert(status, gc.Equals, params.StatusPending) c.Assert(info, gc.Equals, "") mr := s.makeMachiner() defer worker.Stop(mr) s.waitMachineStatus(c, s.machine, params.StatusStarted) }
func (s *MachineEnvironmentWatcherSuite) TestRespondsToEvents(c *gc.C) { agentConfig := agentConfig("0", "ec2") envWorker := s.makeWorker(c, agentConfig) defer worker.Stop(envWorker) s.waitForPostSetup(c) proxySettings, aptProxySettings := s.updateConfig(c) s.waitProxySettings(c, proxySettings) s.waitForFile(c, s.proxyFile, proxySettings.AsScriptEnvironment()+"\n") s.waitForFile(c, utils.AptConfFile, utils.AptProxyContent(aptProxySettings)+"\n") }
func (s *MachineEnvironmentWatcherSuite) TestInitialStateLocalMachine0(c *gc.C) { proxySettings, _ := s.updateConfig(c) agentConfig := agentConfig("0", provider.Local) envWorker := s.makeWorker(c, agentConfig) defer worker.Stop(envWorker) s.waitForPostSetup(c) s.waitProxySettings(c, proxySettings) c.Assert(utils.AptConfFile, jc.DoesNotExist) c.Assert(s.proxyFile, jc.DoesNotExist) }
func (s *stringsWorkerSuite) TestChangesTriggerHandler(c *gc.C) { s.actor.CheckActions(c, "setup") s.actor.watcher.TriggerChange(c, []string{"aa", "bb"}) waitForHandledStrings(c, s.actor.handled, []string{"aa", "bb"}) s.actor.CheckActions(c, "setup", "handler") s.actor.watcher.TriggerChange(c, []string{"cc", "dd"}) waitForHandledStrings(c, s.actor.handled, []string{"cc", "dd"}) s.actor.watcher.TriggerChange(c, []string{"ee", "ff"}) waitForHandledStrings(c, s.actor.handled, []string{"ee", "ff"}) s.actor.CheckActions(c, "setup", "handler", "handler", "handler") c.Assert(worker.Stop(s.worker), gc.IsNil) s.actor.CheckActions(c, "setup", "handler", "handler", "handler", "teardown") }
func (s *notifyWorkerSuite) TestChangesTriggerHandler(c *gc.C) { s.actor.CheckActions(c, "setup") s.actor.watcher.TriggerChange(c) waitForHandledNotify(c, s.actor.handled) s.actor.CheckActions(c, "setup", "handler") s.actor.watcher.TriggerChange(c) waitForHandledNotify(c, s.actor.handled) s.actor.watcher.TriggerChange(c) waitForHandledNotify(c, s.actor.handled) s.actor.CheckActions(c, "setup", "handler", "handler", "handler") c.Assert(worker.Stop(s.worker), gc.IsNil) s.actor.CheckActions(c, "setup", "handler", "handler", "handler", "teardown") }
// During teardown we try to stop the worker, but don't hang the test suite if // Stop never returns func (s *notifyWorkerSuite) stopWorker(c *gc.C) { if s.worker == nil { return } done := make(chan error) go func() { done <- worker.Stop(s.worker) }() err := waitForTimeout(c, done, coretesting.LongWait) c.Check(err, gc.IsNil) s.actor = nil s.worker = nil }
func (u *updaterWorker) loop() (err error) { u.observer, err = worker.NewEnvironObserver(u.st) if err != nil { return err } u.aggregator = newAggregator(u.observer.Environ()) logger.Infof("instance poller received inital environment configuration") defer func() { obsErr := worker.Stop(u.observer) if err == nil { err = obsErr } }() return watchMachinesLoop(u, u.st.WatchEnvironMachines()) }
func (s *LoggerSuite) TestInitialState(c *gc.C) { config, err := s.State.EnvironConfig() c.Assert(err, gc.IsNil) expected := config.LoggingConfig() initial := "<root>=DEBUG;wibble=ERROR" c.Assert(expected, gc.Not(gc.Equals), initial) loggo.ResetLoggers() err = loggo.ConfigureLoggers(initial) c.Assert(err, gc.IsNil) loggingWorker, _ := s.makeLogger(c) defer worker.Stop(loggingWorker) s.waitLoggingInfo(c, expected) }
func (*runnerSuite) TestOneWorkerRestart(c *gc.C) { runner := worker.NewRunner(noneFatal, noImportance) starter := newTestWorkerStarter() err := runner.StartWorker("id", testWorkerStart(starter)) c.Assert(err, gc.IsNil) starter.assertStarted(c, true) // Check it restarts a few times time. for i := 0; i < 3; i++ { starter.die <- fmt.Errorf("an error") starter.assertStarted(c, false) starter.assertStarted(c, true) } c.Assert(worker.Stop(runner), gc.IsNil) starter.assertStarted(c, false) }
func (s *CleanerSuite) TestCleaner(c *gc.C) { cr := cleaner.NewCleaner(s.State) defer func() { c.Assert(worker.Stop(cr), gc.IsNil) }() needed, err := s.State.NeedsCleanup() c.Assert(err, gc.IsNil) c.Assert(needed, gc.Equals, false) s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) s.AddTestingService(c, "mysql", s.AddTestingCharm(c, "mysql")) eps, err := s.State.InferEndpoints([]string{"wordpress", "mysql"}) c.Assert(err, gc.IsNil) relM, err := s.State.AddRelation(eps...) c.Assert(err, gc.IsNil) needed, err = s.State.NeedsCleanup() c.Assert(err, gc.IsNil) c.Assert(needed, gc.Equals, false) // Observe destroying of the relation with a watcher. cw := s.State.WatchCleanups() defer func() { c.Assert(cw.Stop(), gc.IsNil) }() err = relM.Destroy() c.Assert(err, gc.IsNil) timeout := time.After(coretesting.LongWait) for { s.State.StartSync() select { case <-time.After(coretesting.ShortWait): continue case <-timeout: c.Fatalf("timed out waiting for cleanup") case <-cw.Changes(): needed, err = s.State.NeedsCleanup() c.Assert(err, gc.IsNil) if needed { continue } } break } }
func (s *minUnitsWorkerSuite) TestMinUnitsWorker(c *gc.C) { mu := minunitsworker.NewMinUnitsWorker(s.State) defer func() { c.Assert(worker.Stop(mu), gc.IsNil) }() // Set up services and units for later use. wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) mysql := s.AddTestingService(c, "mysql", s.AddTestingCharm(c, "mysql")) unit, err := wordpress.AddUnit() c.Assert(err, gc.IsNil) _, err = wordpress.AddUnit() c.Assert(err, gc.IsNil) // Set up minimum units for services. err = wordpress.SetMinUnits(3) c.Assert(err, gc.IsNil) err = mysql.SetMinUnits(2) c.Assert(err, gc.IsNil) // Remove a unit for a service. err = unit.Destroy() c.Assert(err, gc.IsNil) timeout := time.After(coretesting.LongWait) for { s.State.StartSync() select { case <-time.After(coretesting.ShortWait): wordpressUnits, err := wordpress.AllUnits() c.Assert(err, gc.IsNil) mysqlUnits, err := mysql.AllUnits() c.Assert(err, gc.IsNil) wordpressCount := len(wordpressUnits) mysqlCount := len(mysqlUnits) if wordpressCount == 3 && mysqlCount == 2 { return } logger.Infof("wordpress units: %d; mysql units: %d", wordpressCount, mysqlCount) case <-timeout: c.Fatalf("timed out waiting for minunits events") } } }
func (s *workerSuite) TestSetMembersErrorIsNotFatal(c *gc.C) { st := newFakeState() initState(c, st, 3) st.session.setStatus(mkStatuses("0p 1s 2s")) var isSet voyeur.Value count := 0 setErrorFuncFor("Session.Set", func() error { isSet.Set(count) count++ return errors.New("sample") }) s.PatchValue(&initialRetryInterval, 10*time.Microsecond) s.PatchValue(&maxRetryInterval, coretesting.ShortWait/4) expectedIterations := 0 for d := initialRetryInterval; d < maxRetryInterval*2; d *= 2 { expectedIterations++ } w := newWorker(st, noPublisher{}) defer func() { c.Check(worker.Stop(w), gc.IsNil) }() isSetWatcher := isSet.Watch() n0 := mustNext(c, isSetWatcher).(int) time.Sleep(maxRetryInterval * 2) n1 := mustNext(c, isSetWatcher).(int) // The worker should have backed off exponentially... c.Assert(n1-n0, jc.LessThan, expectedIterations+1) c.Logf("actual iterations %d; expected iterations %d", n1-n0, expectedIterations) // ... but only up to the maximum retry interval n0 = mustNext(c, isSetWatcher).(int) time.Sleep(maxRetryInterval * 2) n1 = mustNext(c, isSetWatcher).(int) c.Assert(n1-n0, jc.LessThan, 3) }
func (s *workerSuite) TestWorkerRetriesOnPublishError(c *gc.C) { s.PatchValue(&pollInterval, coretesting.LongWait+time.Second) s.PatchValue(&initialRetryInterval, 5*time.Millisecond) s.PatchValue(&maxRetryInterval, initialRetryInterval) publishCh := make(chan [][]instance.HostPort, 100) count := 0 publish := func(apiServers [][]instance.HostPort, instanceIds []instance.Id) error { publishCh <- apiServers count++ if count <= 3 { return fmt.Errorf("publish error") } return nil } st := newFakeState() initState(c, st, 3) w := newWorker(st, publisherFunc(publish)) defer func() { c.Check(worker.Stop(w), gc.IsNil) }() for i := 0; i < 4; i++ { select { case servers := <-publishCh: assertAPIHostPorts(c, servers, expectedAPIHostPorts(3)) case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for publish #%d", i) } } select { case <-publishCh: c.Errorf("unexpected publish event") case <-time.After(coretesting.ShortWait): } }
func (s *MachinerSuite) TestMachineAddresses(c *gc.C) { s.PatchValue(machiner.InterfaceAddrs, func() ([]net.Addr, error) { addrs := []net.Addr{ &net.IPAddr{IP: net.IPv4(10, 0, 0, 1)}, &net.IPAddr{IP: net.IPv4(127, 0, 0, 1)}, &net.IPAddr{IP: net.IPv6loopback}, &net.UnixAddr{}, // not IP, ignored &net.IPNet{IP: net.ParseIP("2001:db8::1")}, } return addrs, nil }) mr := s.makeMachiner() defer worker.Stop(mr) c.Assert(s.machine.Destroy(), gc.IsNil) s.State.StartSync() c.Assert(mr.Wait(), gc.Equals, worker.ErrTerminateAgent) c.Assert(s.machine.Refresh(), gc.IsNil) c.Assert(s.machine.MachineAddresses(), gc.DeepEquals, []instance.Address{ instance.NewAddress("10.0.0.1", instance.NetworkCloudLocal), instance.NewAddress("127.0.0.1", instance.NetworkMachineLocal), instance.NewAddress("::1", instance.NetworkMachineLocal), instance.NewAddress("2001:db8::1", instance.NetworkUnknown), }) }
func (*runnerSuite) TestStopWorkerWhenDead(c *gc.C) { runner := worker.NewRunner(allFatal, noImportance) c.Assert(worker.Stop(runner), gc.IsNil) c.Assert(runner.StopWorker("foo"), gc.Equals, worker.ErrDead) }
func (s *LoggerSuite) TestRunStop(c *gc.C) { loggingWorker, _ := s.makeLogger(c) c.Assert(worker.Stop(loggingWorker), gc.IsNil) }
func stop(c *gc.C, w worker.Worker) { c.Assert(worker.Stop(w), gc.IsNil) }
func (n *notifier) Stop() error { return worker.Stop(n) }
func (s *workerSuite) TestSetsAndUpdatesMembers(c *gc.C) { s.PatchValue(&pollInterval, 5*time.Millisecond) st := newFakeState() initState(c, st, 3) memberWatcher := st.session.members.Watch() mustNext(c, memberWatcher) assertMembers(c, memberWatcher.Value(), mkMembers("0v")) logger.Infof("starting worker") w := newWorker(st, noPublisher{}) defer func() { c.Check(worker.Stop(w), gc.IsNil) }() // Wait for the worker to set the initial members. mustNext(c, memberWatcher) assertMembers(c, memberWatcher.Value(), mkMembers("0v 1 2")) // Update the status of the new members // and check that they become voting. c.Logf("updating new member status") st.session.setStatus(mkStatuses("0p 1s 2s")) mustNext(c, memberWatcher) assertMembers(c, memberWatcher.Value(), mkMembers("0v 1v 2v")) c.Logf("adding another machine") // Add another machine. m13 := st.addMachine("13", false) m13.setStateHostPort(fmt.Sprintf("0.1.2.%d:%d", 13, mongoPort)) st.setStateServers("10", "11", "12", "13") c.Logf("waiting for new member to be added") mustNext(c, memberWatcher) assertMembers(c, memberWatcher.Value(), mkMembers("0v 1v 2v 3")) // Remove vote from an existing member; // and give it to the new machine. // Also set the status of the new machine to // healthy. c.Logf("removing vote from machine 10 and adding it to machine 13") st.machine("10").setWantsVote(false) st.machine("13").setWantsVote(true) st.session.setStatus(mkStatuses("0p 1s 2s 3s")) // Check that the new machine gets the vote and the // old machine loses it. c.Logf("waiting for vote switch") mustNext(c, memberWatcher) assertMembers(c, memberWatcher.Value(), mkMembers("0 1v 2v 3v")) c.Logf("removing old machine") // Remove the old machine. st.removeMachine("10") st.setStateServers("11", "12", "13") // Check that it's removed from the members. c.Logf("waiting for removal") mustNext(c, memberWatcher) assertMembers(c, memberWatcher.Value(), mkMembers("1v 2v 3v")) }
func (s *workerJujuConnSuite) TestStartStop(c *gc.C) { w, err := New(s.State) c.Assert(err, gc.IsNil) err = worker.Stop(w) c.Assert(err, gc.IsNil) }