Ejemplo n.º 1
0
func (s *ResumerSuite) TestResumerCalls(c *gc.C) {
	// Shorter interval and mock help to count
	// the resumer calls in a given timespan.
	testInterval := 10 * time.Millisecond
	resumer.SetInterval(testInterval)
	defer resumer.RestoreInterval()

	tr := &transactionResumerMock{[]time.Time{}}
	rr := resumer.NewResumer(tr)
	defer func() { c.Assert(rr.Stop(), gc.IsNil) }()

	time.Sleep(10 * testInterval)

	// Check that a numner of calls has happened with a time
	// difference somewhere between the interval and twice the
	// interval. A more precise time behavior cannot be
	// specified due to the load during the test.
	c.Assert(len(tr.timestamps) > 0, gc.Equals, true)
	for i := 1; i < len(tr.timestamps); i++ {
		diff := tr.timestamps[i].Sub(tr.timestamps[i-1])

		c.Assert(diff >= testInterval, gc.Equals, true)
		c.Assert(diff <= 4*testInterval, gc.Equals, true)
	}
}
Ejemplo n.º 2
0
func (s *ResumerSuite) TestRunStopWithState(c *gc.C) {
	// Test with state ensures that state fulfills the
	// TransactionResumer interface.
	rr := resumer.NewResumer(s.State)

	c.Assert(rr.Stop(), gc.IsNil)
}
Ejemplo n.º 3
0
func checkInvalid(c *gc.C, config resumer.Config, match string) {
	check := func(err error) {
		c.Check(err, jc.Satisfies, errors.IsNotValid)
		c.Check(err, gc.ErrorMatches, match)
	}
	check(config.Validate())

	worker, err := resumer.NewResumer(config)
	workertest.CheckNilOrKill(c, worker)
	check(err)
}
Ejemplo n.º 4
0
func (s *ResumerSuite) TestResumerCalls(c *gc.C) {
	// Shorter interval and mock help to count
	// the resumer calls in a given timespan.
	testInterval := coretesting.ShortWait
	resumer.SetInterval(testInterval)
	defer resumer.RestoreInterval()

	rr := resumer.NewResumer(s.mockState)
	defer func() { c.Assert(rr.Stop(), gc.IsNil) }()

	time.Sleep(10 * testInterval)

	s.mockState.CheckTimestamps(c, testInterval)
}
Ejemplo n.º 5
0
func (s *ResumerSuite) TestResumeTransactionsFailure(c *gc.C) {
	// Force the first call to ResumeTransactions() to fail, the
	// remaining returning no error.
	s.mockState.SetErrors(errors.New("boom!"))

	// Shorter interval and mock help to count
	// the resumer calls in a given timespan.
	testInterval := coretesting.ShortWait
	resumer.SetInterval(testInterval)
	defer resumer.RestoreInterval()

	rr := resumer.NewResumer(s.mockState)
	defer func() { c.Assert(rr.Stop(), gc.IsNil) }()

	// For 4 intervals between 2 and 3 calls should be made.
	time.Sleep(4 * testInterval)
	s.mockState.CheckNumCallsBetween(c, 2, 3)
}
Ejemplo n.º 6
0
func (fix fixture) Run(c *gc.C, test TestFunc) *testing.Stub {

	stub := &testing.Stub{}
	stub.SetErrors(fix.errors...)
	clock := testing.NewClock(time.Now())
	facade := newMockFacade(stub)

	worker, err := resumer.NewResumer(resumer.Config{
		Facade:   facade,
		Interval: time.Hour,
		Clock:    clock,
	})
	c.Assert(err, jc.ErrorIsNil)
	defer workertest.DirtyKill(c, worker)

	test(clock, worker)
	return stub
}
Ejemplo n.º 7
0
func (s *ResumerSuite) TestRunStopWithMockState(c *gc.C) {
	w := resumer.NewResumer(s.mockState)
	c.Assert(worker.Stop(w), gc.IsNil)
}
Ejemplo n.º 8
0
// StateWorker returns a worker running all the workers that require
// a *state.State connection.
func (a *MachineAgent) StateWorker() (worker.Worker, error) {
	agentConfig := a.CurrentConfig()

	// Create system-identity file.
	if err := agent.WriteSystemIdentityFile(agentConfig); err != nil {
		return nil, err
	}

	// Start MongoDB server and dial.
	if err := a.ensureMongoServer(agentConfig); err != nil {
		return nil, err
	}
	st, m, err := openState(agentConfig, stateWorkerDialOpts)
	if err != nil {
		return nil, err
	}
	reportOpenedState(st)

	singularStateConn := singularStateConn{st.MongoSession(), m}
	runner := newRunner(connectionIsFatal(st), moreImportant)
	singularRunner, err := newSingularRunner(runner, singularStateConn)
	if err != nil {
		return nil, fmt.Errorf("cannot make singular State Runner: %v", err)
	}

	// Take advantage of special knowledge here in that we will only ever want
	// the storage provider on one machine, and that is the "bootstrap" node.
	providerType := agentConfig.Value(agent.ProviderType)
	if (providerType == provider.Local || provider.IsManual(providerType)) && m.Id() == bootstrapMachineId {
		a.startWorkerAfterUpgrade(runner, "local-storage", func() (worker.Worker, error) {
			// TODO(axw) 2013-09-24 bug #1229507
			// Make another job to enable storage.
			// There's nothing special about this.
			return localstorage.NewWorker(agentConfig), nil
		})
	}
	for _, job := range m.Jobs() {
		switch job {
		case state.JobHostUnits:
			// Implemented in APIWorker.
		case state.JobManageEnviron:
			useMultipleCPUs()
			a.startWorkerAfterUpgrade(runner, "instancepoller", func() (worker.Worker, error) {
				return instancepoller.NewWorker(st), nil
			})
			a.startWorkerAfterUpgrade(runner, "peergrouper", func() (worker.Worker, error) {
				return peergrouperNew(st)
			})
			runner.StartWorker("apiserver", func() (worker.Worker, error) {
				// If the configuration does not have the required information,
				// it is currently not a recoverable error, so we kill the whole
				// agent, potentially enabling human intervention to fix
				// the agent's configuration file. In the future, we may retrieve
				// the state server certificate and key from the state, and
				// this should then change.
				info, ok := agentConfig.StateServingInfo()
				if !ok {
					return nil, &fatalError{"StateServingInfo not available and we need it"}
				}
				cert := []byte(info.Cert)
				key := []byte(info.PrivateKey)

				if len(cert) == 0 || len(key) == 0 {
					return nil, &fatalError{"configuration does not have state server cert/key"}
				}
				dataDir := agentConfig.DataDir()
				logDir := agentConfig.LogDir()

				endpoint := net.JoinHostPort("", strconv.Itoa(info.APIPort))
				listener, err := net.Listen("tcp", endpoint)
				if err != nil {
					return nil, err
				}
				return apiserver.NewServer(st, listener, apiserver.ServerConfig{
					Cert:      cert,
					Key:       key,
					DataDir:   dataDir,
					LogDir:    logDir,
					Validator: a.limitLoginsDuringUpgrade,
				})
			})
			a.startWorkerAfterUpgrade(singularRunner, "cleaner", func() (worker.Worker, error) {
				return cleaner.NewCleaner(st), nil
			})
			a.startWorkerAfterUpgrade(singularRunner, "resumer", func() (worker.Worker, error) {
				// The action of resumer is so subtle that it is not tested,
				// because we can't figure out how to do so without brutalising
				// the transaction log.
				return resumer.NewResumer(st), nil
			})
			a.startWorkerAfterUpgrade(singularRunner, "minunitsworker", func() (worker.Worker, error) {
				return minunitsworker.NewMinUnitsWorker(st), nil
			})
		case state.JobManageStateDeprecated:
			// Legacy environments may set this, but we ignore it.
		default:
			logger.Warningf("ignoring unknown job %q", job)
		}
	}
	return newCloseWorker(runner, st), nil
}