Пример #1
0
func (s *TxnPrunerSuite) TestPrunes(c *gc.C) {
	fakePruner := newFakeTransactionPruner()
	testClock := testing.NewClock(time.Now())
	interval := time.Minute
	p := txnpruner.New(fakePruner, interval, testClock)
	defer p.Kill()

	select {
	case <-testClock.Alarms():
	case <-time.After(coretesting.LongWait):
		c.Fatalf("timed out waiting for worker to stat")
	}
	c.Logf("pruner running and waiting: %s (%s)", testClock.Now(), time.Now())
	// Show that we prune every minute
	for i := 0; i < 5; i++ {
		testClock.Advance(interval)
		c.Logf("loop %d: %s (%s)", i, testClock.Now(), time.Now())
		select {
		case <-fakePruner.pruneCh:
		case <-time.After(coretesting.LongWait):
			c.Fatal("timed out waiting for pruning to happen")
		}
		// Now we need to wait for the txn pruner to call clock.After again
		// before we advance the clock, or it will be waiting for the wrong time.
		select {
		case <-testClock.Alarms():
		case <-time.After(coretesting.LongWait):
			c.Fatalf("timed out waiting for worker to loop around")
		}
	}
}
Пример #2
0
func (s *TxnPrunerSuite) TestStops(c *gc.C) {
	success := make(chan bool)
	check := func() {
		p := txnpruner.New(newFakeTransactionPruner(), time.Minute, clock.WallClock)
		p.Kill()
		c.Check(p.Wait(), jc.ErrorIsNil)
		success <- true
	}
	go check()

	select {
	case <-success:
	case <-time.After(coretesting.LongWait):
		c.Fatal("timed out waiting for worker to stop")
	}
}
Пример #3
0
func (s *TxnPrunerSuite) TestPrunes(c *gc.C) {
	fakePruner := newFakeTransactionPruner()
	interval := 10 * time.Millisecond
	p := txnpruner.New(fakePruner, interval)
	defer p.Kill()

	var t0 time.Time
	for i := 0; i < 5; i++ {
		select {
		case <-fakePruner.pruneCh:
			t1 := time.Now()
			if i > 0 {
				// Check that pruning runs at the expected interval
				// (but not the first time around as we don't know
				// when the worker actually started).
				td := t1.Sub(t0)
				c.Assert(td >= interval, jc.IsTrue, gc.Commentf("td=%s", td))
			}
			t0 = t1
		case <-time.After(testing.LongWait):
			c.Fatal("timed out waiting for pruning to happen")
		}
	}
}
Пример #4
0
// startStateWorkers returns a worker running all the workers that
// require a *state.State connection.
func (a *MachineAgent) startStateWorkers(st *state.State) (worker.Worker, error) {
	agentConfig := a.CurrentConfig()

	m, err := getMachine(st, agentConfig.Tag())
	if err != nil {
		return nil, errors.Annotate(err, "machine lookup")
	}

	runner := newConnRunner(st)
	singularRunner, err := newSingularStateRunner(runner, st, m)
	if err != nil {
		return nil, errors.Trace(err)
	}

	for _, job := range m.Jobs() {
		switch job {
		case state.JobHostUnits:
			// Implemented elsewhere with workers that use the API.
		case state.JobManageNetworking:
			// Not used by state workers.
		case state.JobManageModel:
			useMultipleCPUs()
			a.startWorkerAfterUpgrade(runner, "model worker manager", func() (worker.Worker, error) {
				w, err := modelworkermanager.New(modelworkermanager.Config{
					Backend:    st,
					NewWorker:  a.startModelWorkers,
					ErrorDelay: worker.RestartDelay,
				})
				if err != nil {
					return nil, errors.Annotate(err, "cannot start model worker manager")
				}
				return w, nil
			})
			a.startWorkerAfterUpgrade(runner, "peergrouper", func() (worker.Worker, error) {
				w, err := peergrouperNew(st)
				if err != nil {
					return nil, errors.Annotate(err, "cannot start peergrouper worker")
				}
				return w, nil
			})
			a.startWorkerAfterUpgrade(runner, "restore", func() (worker.Worker, error) {
				w, err := a.newRestoreStateWatcherWorker(st)
				if err != nil {
					return nil, errors.Annotate(err, "cannot start backup-restorer worker")
				}
				return w, nil
			})
			a.startWorkerAfterUpgrade(runner, "mongoupgrade", func() (worker.Worker, error) {
				return newUpgradeMongoWorker(st, a.machineId, a.maybeStopMongo)
			})

			// certChangedChan is shared by multiple workers it's up
			// to the agent to close it rather than any one of the
			// workers.  It is possible that multiple cert changes
			// come in before the apiserver is up to receive them.
			// Specify a bigger buffer to prevent deadlock when
			// the apiserver isn't up yet.  Use a size of 10 since we
			// allow up to 7 controllers, and might also update the
			// addresses of the local machine (127.0.0.1, ::1, etc).
			//
			// TODO(cherylj/waigani) Remove this workaround when
			// certupdater and apiserver can properly manage dependencies
			// through the dependency engine.
			//
			// TODO(ericsnow) For now we simply do not close the channel.
			certChangedChan := make(chan params.StateServingInfo, 10)
			// Each time apiserver worker is restarted, we need a fresh copy of state due
			// to the fact that state holds lease managers which are killed and need to be reset.
			stateOpener := func() (*state.State, error) {
				logger.Debugf("opening state for apiserver worker")
				st, _, err := openState(agentConfig, stateWorkerDialOpts)
				return st, err
			}
			runner.StartWorker("apiserver", a.apiserverWorkerStarter(stateOpener, certChangedChan))
			var stateServingSetter certupdater.StateServingInfoSetter = func(info params.StateServingInfo, done <-chan struct{}) error {
				return a.ChangeConfig(func(config agent.ConfigSetter) error {
					config.SetStateServingInfo(info)
					logger.Infof("update apiserver worker with new certificate")
					select {
					case certChangedChan <- info:
						return nil
					case <-done:
						return nil
					}
				})
			}
			a.startWorkerAfterUpgrade(runner, "certupdater", func() (worker.Worker, error) {
				return newCertificateUpdater(m, agentConfig, st, st, stateServingSetter), nil
			})

			a.startWorkerAfterUpgrade(singularRunner, "dblogpruner", func() (worker.Worker, error) {
				return dblogpruner.New(st, dblogpruner.NewLogPruneParams()), nil
			})

			a.startWorkerAfterUpgrade(singularRunner, "txnpruner", func() (worker.Worker, error) {
				return txnpruner.New(st, time.Hour*2), nil
			})
		default:
			return nil, errors.Errorf("unknown job type %q", job)
		}
	}
	return runner, nil
}