func (s *ResumerSuite) TestResumerCalls(c *C) { // Shorter interval and mock help to count // the resumer calls in a given timespan. testInterval := 10 * time.Millisecond resumer.SetInterval(testInterval) defer resumer.RestoreInterval() tr := &transactionResumerMock{[]time.Time{}} rr := resumer.NewResumer(tr) defer func() { c.Assert(rr.Stop(), IsNil) }() time.Sleep(10 * testInterval) // Check that a numner of calls has happened with a time // difference somewhere between the interval and twice the // interval. A more precise time behavior cannot be // specified due to the load during the test. c.Assert(len(tr.timestamps) > 0, Equals, true) for i := 1; i < len(tr.timestamps); i++ { diff := tr.timestamps[i].Sub(tr.timestamps[i-1]) c.Assert(diff >= testInterval, Equals, true) c.Assert(diff <= 2*testInterval, Equals, true) } }
func (s *ResumerSuite) TestRunStopWithState(c *C) { // Test with state ensures that state fulfills the // TransactionResumer interface. rr := resumer.NewResumer(s.State) c.Assert(rr.Stop(), IsNil) }
// StateJobs returns a worker running all the workers that require // a *state.State connection. func (a *MachineAgent) StateWorker() (worker.Worker, error) { st, entity, err := openState(a.Conf.Conf, a) if err != nil { return nil, err } // If this fails, other bits will fail, so we just log the error, and // let the other failures actually restart runners if err := EnsureAPIInfo(a.Conf.Conf, st, entity); err != nil { log.Warningf("failed to EnsureAPIInfo: %v", err) } reportOpenedState(st) m := entity.(*state.Machine) // TODO(rog) use more discriminating test for errors // rather than taking everything down indiscriminately. dataDir := a.Conf.DataDir runner := worker.NewRunner(allFatal, moreImportant) runner.StartWorker("upgrader", func() (worker.Worker, error) { // TODO(rog) use id instead of *Machine (or introduce Clone method) return NewUpgrader(st, m, dataDir), nil }) // At this stage, since we don't embed lxc containers, just start an lxc // provisioner task for non-lxc containers. Since we have only LXC // containers and normal machines, this effectively means that we only // have an LXC provisioner when we have a normally provisioned machine // (through the environ-provisioner). With the upcoming advent of KVM // containers, it is likely that we will want an LXC provisioner on a KVM // machine, and once we get nested LXC containers, we can remove this // check. providerType := os.Getenv("JUJU_PROVIDER_TYPE") if providerType != provider.Local && m.ContainerType() != instance.LXC { workerName := fmt.Sprintf("%s-provisioner", provisioner.LXC) runner.StartWorker(workerName, func() (worker.Worker, error) { return provisioner.NewProvisioner(provisioner.LXC, st, a.MachineId, dataDir), nil }) } // Take advantage of special knowledge here in that we will only ever want // the storage provider on one machine, and that is the "bootstrap" node. if providerType == provider.Local && m.Id() == bootstrapMachineId { runner.StartWorker("local-storage", func() (worker.Worker, error) { return localstorage.NewWorker(), nil }) } for _, job := range m.Jobs() { switch job { case state.JobHostUnits: runner.StartWorker("deployer", func() (worker.Worker, error) { return newDeployer(st, m.Id(), dataDir), nil }) case state.JobManageEnviron: runner.StartWorker("environ-provisioner", func() (worker.Worker, error) { return provisioner.NewProvisioner(provisioner.ENVIRON, st, a.MachineId, dataDir), nil }) runner.StartWorker("firewaller", func() (worker.Worker, error) { return firewaller.NewFirewaller(st), nil }) case state.JobManageState: runner.StartWorker("apiserver", func() (worker.Worker, error) { // If the configuration does not have the required information, // it is currently not a recoverable error, so we kill the whole // agent, potentially enabling human intervention to fix // the agent's configuration file. In the future, we may retrieve // the state server certificate and key from the state, and // this should then change. if len(a.Conf.StateServerCert) == 0 || len(a.Conf.StateServerKey) == 0 { return nil, &fatalError{"configuration does not have state server cert/key"} } return apiserver.NewServer(st, fmt.Sprintf(":%d", a.Conf.APIPort), a.Conf.StateServerCert, a.Conf.StateServerKey) }) runner.StartWorker("cleaner", func() (worker.Worker, error) { return cleaner.NewCleaner(st), nil }) runner.StartWorker("resumer", func() (worker.Worker, error) { // The action of resumer is so subtle that it is not tested, // because we can't figure out how to do so without brutalising // the transaction log. return resumer.NewResumer(st), nil }) default: log.Warningf("ignoring unknown job %q", job) } } return newCloseWorker(runner, st), nil }