示例#1
0
func (s *CleanerSuite) TestWatchCleanupsError(c *gc.C) {
	s.mockState.err = []error{errors.New("hello")}
	cln := cleaner.NewCleaner(s.mockState)

	s.AssertReceived(c, "WatchCleanups")
	s.AssertEmpty(c)
	err := worker.Stop(cln)
	c.Assert(err, gc.ErrorMatches, "hello")
}
示例#2
0
func (s *CleanerSuite) TestCleaner(c *gc.C) {
	cln := cleaner.NewCleaner(s.mockState)
	defer func() { c.Assert(worker.Stop(cln), jc.ErrorIsNil) }()

	s.AssertReceived(c, "WatchCleanups")
	s.AssertReceived(c, "Cleanup")

	s.mockState.watcher.Change()
	s.AssertReceived(c, "Cleanup")
}
示例#3
0
func (s *CleanerSuite) TestCleanupError(c *gc.C) {
	s.mockState.err = []error{nil, errors.New("hello")}
	cln := cleaner.NewCleaner(s.mockState)

	s.AssertReceived(c, "WatchCleanups")
	s.AssertReceived(c, "Cleanup")
	err := worker.Stop(cln)
	c.Assert(err, jc.ErrorIsNil)
	log := c.GetTestLog()
	c.Assert(log, jc.Contains, "ERROR juju.worker.cleaner cannot cleanup state: hello")
}
示例#4
0
func (s *CleanerSuite) TestCleaner(c *gc.C) {
	cr := cleaner.NewCleaner(s.State)
	defer func() { c.Assert(worker.Stop(cr), gc.IsNil) }()

	needed, err := s.State.NeedsCleanup()
	c.Assert(err, gc.IsNil)
	c.Assert(needed, gc.Equals, false)

	s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress"))
	s.AddTestingService(c, "mysql", s.AddTestingCharm(c, "mysql"))
	eps, err := s.State.InferEndpoints([]string{"wordpress", "mysql"})
	c.Assert(err, gc.IsNil)
	relM, err := s.State.AddRelation(eps...)
	c.Assert(err, gc.IsNil)

	needed, err = s.State.NeedsCleanup()
	c.Assert(err, gc.IsNil)
	c.Assert(needed, gc.Equals, false)

	// Observe destroying of the relation with a watcher.
	cw := s.State.WatchCleanups()
	defer func() { c.Assert(cw.Stop(), gc.IsNil) }()

	err = relM.Destroy()
	c.Assert(err, gc.IsNil)

	timeout := time.After(coretesting.LongWait)
	for {
		s.State.StartSync()
		select {
		case <-time.After(coretesting.ShortWait):
			continue
		case <-timeout:
			c.Fatalf("timed out waiting for cleanup")
		case <-cw.Changes():
			needed, err = s.State.NeedsCleanup()
			c.Assert(err, gc.IsNil)
			if needed {
				continue
			}
		}
		break
	}
}
示例#5
0
文件: machine.go 项目: zhouqt/juju
// StateWorker returns a worker running all the workers that require
// a *state.State connection.
func (a *MachineAgent) StateWorker() (worker.Worker, error) {
	agentConfig := a.CurrentConfig()

	// Create system-identity file.
	if err := agent.WriteSystemIdentityFile(agentConfig); err != nil {
		return nil, err
	}

	// Start MongoDB server and dial.
	if err := a.ensureMongoServer(agentConfig); err != nil {
		return nil, err
	}
	st, m, err := openState(agentConfig, stateWorkerDialOpts)
	if err != nil {
		return nil, err
	}
	reportOpenedState(st)

	singularStateConn := singularStateConn{st.MongoSession(), m}
	runner := newRunner(connectionIsFatal(st), moreImportant)
	singularRunner, err := newSingularRunner(runner, singularStateConn)
	if err != nil {
		return nil, fmt.Errorf("cannot make singular State Runner: %v", err)
	}

	// Take advantage of special knowledge here in that we will only ever want
	// the storage provider on one machine, and that is the "bootstrap" node.
	providerType := agentConfig.Value(agent.ProviderType)
	if (providerType == provider.Local || provider.IsManual(providerType)) && m.Id() == bootstrapMachineId {
		a.startWorkerAfterUpgrade(runner, "local-storage", func() (worker.Worker, error) {
			// TODO(axw) 2013-09-24 bug #1229507
			// Make another job to enable storage.
			// There's nothing special about this.
			return localstorage.NewWorker(agentConfig), nil
		})
	}
	for _, job := range m.Jobs() {
		switch job {
		case state.JobHostUnits:
			// Implemented in APIWorker.
		case state.JobManageEnviron:
			useMultipleCPUs()
			a.startWorkerAfterUpgrade(runner, "instancepoller", func() (worker.Worker, error) {
				return instancepoller.NewWorker(st), nil
			})
			a.startWorkerAfterUpgrade(runner, "peergrouper", func() (worker.Worker, error) {
				return peergrouperNew(st)
			})
			runner.StartWorker("apiserver", func() (worker.Worker, error) {
				// If the configuration does not have the required information,
				// it is currently not a recoverable error, so we kill the whole
				// agent, potentially enabling human intervention to fix
				// the agent's configuration file. In the future, we may retrieve
				// the state server certificate and key from the state, and
				// this should then change.
				info, ok := agentConfig.StateServingInfo()
				if !ok {
					return nil, &fatalError{"StateServingInfo not available and we need it"}
				}
				cert := []byte(info.Cert)
				key := []byte(info.PrivateKey)

				if len(cert) == 0 || len(key) == 0 {
					return nil, &fatalError{"configuration does not have state server cert/key"}
				}
				dataDir := agentConfig.DataDir()
				logDir := agentConfig.LogDir()

				endpoint := net.JoinHostPort("", strconv.Itoa(info.APIPort))
				listener, err := net.Listen("tcp", endpoint)
				if err != nil {
					return nil, err
				}
				return apiserver.NewServer(st, listener, apiserver.ServerConfig{
					Cert:      cert,
					Key:       key,
					DataDir:   dataDir,
					LogDir:    logDir,
					Validator: a.limitLoginsDuringUpgrade,
				})
			})
			a.startWorkerAfterUpgrade(singularRunner, "cleaner", func() (worker.Worker, error) {
				return cleaner.NewCleaner(st), nil
			})
			a.startWorkerAfterUpgrade(singularRunner, "resumer", func() (worker.Worker, error) {
				// The action of resumer is so subtle that it is not tested,
				// because we can't figure out how to do so without brutalising
				// the transaction log.
				return resumer.NewResumer(st), nil
			})
			a.startWorkerAfterUpgrade(singularRunner, "minunitsworker", func() (worker.Worker, error) {
				return minunitsworker.NewMinUnitsWorker(st), nil
			})
		case state.JobManageStateDeprecated:
			// Legacy environments may set this, but we ignore it.
		default:
			logger.Warningf("ignoring unknown job %q", job)
		}
	}
	return newCloseWorker(runner, st), nil
}