Example #1
0
// New returns a worker.Worker for history Pruner.
func New(st *state.State, params *HistoryPrunerParams) worker.Worker {
	w := &pruneWorker{
		st:     st,
		params: params,
	}
	return worker.NewSimpleWorker(w.loop)
}
Example #2
0
// New starts a logsender worker which reads log message structs from
// a channel and sends them to the JES via the logsink API.
func New(logs chan *LogRecord, apiInfo *api.Info) worker.Worker {
	loop := func(stop <-chan struct{}) error {
		logger.Debugf("starting logsender worker")

		conn, err := dialLogsinkAPI(apiInfo)
		if err != nil {
			return errors.Annotate(err, "logsender dial failed")
		}
		defer conn.Close()

		for {
			select {
			case rec := <-logs:
				err := websocket.JSON.Send(conn, &apiserver.LogMessage{
					Time:     rec.Time,
					Module:   rec.Module,
					Location: rec.Location,
					Level:    rec.Level,
					Message:  rec.Message,
				})
				if err != nil {
					// Note: due to the fire-and-forget nature of the
					// logsink API, it is possible that when the
					// connection dies, any logs that were "in-flight"
					// will not be recorded on the server side.
					return errors.Annotate(err, "logsink connection failed")
				}
			case <-stop:
				return nil
			}
		}
	}
	return worker.NewSimpleWorker(loop)
}
Example #3
0
func (a *agent) mongoWorker() (worker.Worker, error) {
	dialInfo := gitjujutesting.MgoDialInfo(coretesting.Certs, a.hostPort)
	session, err := mgo.DialWithInfo(dialInfo)
	if err != nil {
		return nil, err
	}
	mc := &mongoConn{
		localHostPort: a.hostPort,
		session:       session,
	}

	fn := func(err0, err1 error) bool { return true }
	runner := worker.NewRunner(connectionIsFatal(mc), fn, worker.RestartDelay)
	singularRunner, err := singular.New(runner, mc)
	if err != nil {
		return nil, fmt.Errorf("cannot start singular runner: %v", err)
	}
	a.notify.workerConnected()
	singularRunner.StartWorker(fmt.Sprint("worker-", a.notify.id), func() (worker.Worker, error) {
		return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
			return a.worker(session, stop)
		}), nil
	})
	return runner, nil
}
Example #4
0
// upgradeWaiterWorker runs the specified worker after upgrades have completed.
func (a *MachineAgent) upgradeWaiterWorker(name string, start func() (worker.Worker, error)) worker.Worker {
	return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
		// Wait for the agent upgrade and upgrade steps to complete (or for us to be stopped).
		for _, ch := range []<-chan struct{}{
			a.upgradeComplete.Unlocked(),
			a.initialUpgradeCheckComplete.Unlocked(),
		} {
			select {
			case <-stop:
				return nil
			case <-ch:
			}
		}
		logger.Debugf("upgrades done, starting worker %q", name)

		// Upgrades are done, start the worker.
		w, err := start()
		if err != nil {
			return err
		}
		// Wait for worker to finish or for us to be stopped.
		done := make(chan error, 1)
		go func() {
			done <- w.Wait()
		}()
		select {
		case err := <-done:
			return errors.Annotatef(err, "worker %q exited", name)
		case <-stop:
			logger.Debugf("stopping so killing worker %q", name)
			return worker.Stop(w)
		}
	})
}
Example #5
0
func (s *singularSuite) TestWithIsMasterTrue(c *gc.C) {
	// When IsMaster returns true, workers get started on the underlying
	// runner as usual.
	s.PatchValue(&singular.PingInterval, 1*time.Millisecond)
	underlyingRunner := newRunner()
	conn := &fakeConn{
		isMaster: true,
	}
	r, err := singular.New(underlyingRunner, conn)
	c.Assert(err, gc.IsNil)

	started := make(chan struct{}, 1)
	err = r.StartWorker("worker", func() (worker.Worker, error) {
		return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
			started <- struct{}{}
			<-stop
			return nil
		}), nil
	})
	select {
	case <-started:
	case <-time.After(testing.LongWait):
		c.Fatalf("timed out waiting for worker to start")
	}

	err = worker.Stop(r)
	c.Assert(err, gc.IsNil)
}
Example #6
0
// upgradeWaiterWorker runs the specified worker after upgrades have completed.
func (a *MachineAgent) upgradeWaiterWorker(start func() (worker.Worker, error)) worker.Worker {
	return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
		// Wait for the upgrade to complete (or for us to be stopped).
		select {
		case <-stop:
			return nil
		case <-a.upgradeWorkerContext.UpgradeComplete:
		}
		// Upgrades are done, start the worker.
		worker, err := start()
		if err != nil {
			return err
		}
		// Wait for worker to finish or for us to be stopped.
		waitCh := make(chan error)
		go func() {
			waitCh <- worker.Wait()
		}()
		select {
		case err := <-waitCh:
			return err
		case <-stop:
			worker.Kill()
		}
		return <-waitCh // Ensure worker has stopped before returning.
	})
}
Example #7
0
// NewUndertaker returns a worker which processes a dying environment.
func NewUndertaker(client apiundertaker.UndertakerClient, clock uc.Clock) worker.Worker {
	f := func(stopCh <-chan struct{}) error {
		result, err := client.EnvironInfo()
		if err != nil {
			return errors.Trace(err)
		}
		if result.Error != nil {
			return errors.Trace(result.Error)
		}
		envInfo := result.Result

		if envInfo.Life == params.Alive {
			return errors.Errorf("undertaker worker should not be started for an alive environment: %q", envInfo.GlobalName)
		}

		if envInfo.Life == params.Dying {
			// Process the dying environment. This blocks until the environment
			// is dead.
			processDyingEnv(client, clock, stopCh)
		}

		// If environ is not alive or dying, it must be dead.

		if envInfo.IsSystem {
			// Nothing to do. We don't remove environment docs for a state server
			// environment.
			return nil
		}

		cfg, err := client.EnvironConfig()
		if err != nil {
			return errors.Trace(err)
		}

		env, err := environs.New(cfg)
		if err != nil {
			return errors.Trace(err)
		}

		err = env.Destroy()
		if err != nil {
			return errors.Trace(err)
		}

		tod := clock.Now()
		if envInfo.TimeOfDeath != nil {
			// If TimeOfDeath is not nil, the environment was already dead
			// before the worker was started. So we use the recorded time of
			// death. This may happen if the system is rebooted after an
			// environment is set to dead, but before the environ docs are
			// removed.
			tod = *envInfo.TimeOfDeath
		}

		// Process the dead environment
		return processDeadEnv(client, clock, tod, stopCh)
	}
	return worker.NewSimpleWorker(f)
}
Example #8
0
func (c *upgradeWorkerContext) Worker(
	agent upgradingMachineAgent,
	apiState *api.State,
	jobs []params.MachineJob,
) worker.Worker {
	c.agent = agent
	c.apiState = apiState
	c.jobs = jobs
	return worker.NewSimpleWorker(c.run)
}
Example #9
0
func (c *upgradeWorkerContext) Worker(
	agent upgradingMachineAgent,
	apiState api.Connection,
	jobs []multiwatcher.MachineJob,
) worker.Worker {
	c.agent = agent
	c.apiState = apiState
	c.jobs = jobs
	return worker.NewSimpleWorker(c.run)
}
Example #10
0
// New starts a logsender worker which reads log message structs from
// a channel and sends them to the JES via the logsink API.
func New(logs LogRecordCh, apiInfoGate gate.Waiter, agent agent.Agent) worker.Worker {
	loop := func(stop <-chan struct{}) error {
		logger.Debugf("started log-sender worker; waiting for api info")
		select {
		case <-apiInfoGate.Unlocked():
		case <-stop:
			return nil
		}

		logger.Debugf("dialing log-sender connection")
		apiInfo := agent.CurrentConfig().APIInfo()
		conn, err := dialLogsinkAPI(apiInfo)
		if err != nil {
			return errors.Annotate(err, "logsender dial failed")
		}
		defer conn.Close()

		for {
			select {
			case rec := <-logs:
				err := sendLogRecord(conn, rec.Time, rec.Module, rec.Location, rec.Level, rec.Message)
				if err != nil {
					return errors.Trace(err)
				}
				if rec.DroppedAfter > 0 {
					// If messages were dropped after this one, report
					// the count (the source of the log messages -
					// BufferedLogWriter - handles the actual dropping
					// and counting).
					//
					// Any logs indicated as dropped here are will
					// never end up in the logs DB in the JES
					// (although will still be in the local agent log
					// file). Message dropping by the
					// BufferedLogWriter is last resort protection
					// against memory exhaustion and should only
					// happen if API connectivity is lost for extended
					// periods. The maximum in-memory log buffer is
					// quite large (see the InstallBufferedLogWriter
					// call in jujuDMain).
					err := sendLogRecord(conn, rec.Time, loggerName, "", loggo.WARNING,
						fmt.Sprintf("%d log messages dropped due to lack of API connectivity", rec.DroppedAfter))
					if err != nil {
						return errors.Trace(err)
					}
				}

			case <-stop:
				return nil
			}
		}
	}
	return worker.NewSimpleWorker(loop)
}
Example #11
0
File: worker.go Project: bac/juju
// New starts a logsender worker which reads log message structs from
// a channel and sends them to the JES via the logsink API.
func New(logs LogRecordCh, logSenderAPI *logsender.API) worker.Worker {
	loop := func(stop <-chan struct{}) error {
		logWriter, err := logSenderAPI.LogWriter()
		if err != nil {
			return errors.Annotate(err, "logsender dial failed")
		}
		defer logWriter.Close()
		for {
			select {
			case rec := <-logs:
				err := logWriter.WriteLog(&params.LogRecord{
					Time:     rec.Time,
					Module:   rec.Module,
					Location: rec.Location,
					Level:    rec.Level.String(),
					Message:  rec.Message,
				})
				if err != nil {
					return errors.Trace(err)
				}
				if rec.DroppedAfter > 0 {
					// If messages were dropped after this one, report
					// the count (the source of the log messages -
					// BufferedLogWriter - handles the actual dropping
					// and counting).
					//
					// Any logs indicated as dropped here are will
					// never end up in the logs DB in the JES
					// (although will still be in the local agent log
					// file). Message dropping by the
					// BufferedLogWriter is last resort protection
					// against memory exhaustion and should only
					// happen if API connectivity is lost for extended
					// periods. The maximum in-memory log buffer is
					// quite large (see the InstallBufferedLogWriter
					// call in jujuDMain).
					err := logWriter.WriteLog(&params.LogRecord{
						Time:    rec.Time,
						Module:  loggerName,
						Level:   loggo.WARNING.String(),
						Message: fmt.Sprintf("%d log messages dropped due to lack of API connectivity", rec.DroppedAfter),
					})
					if err != nil {
						return errors.Trace(err)
					}
				}

			case <-stop:
				return nil
			}
		}
	}
	return worker.NewSimpleWorker(loop)
}
Example #12
0
// upgradeWorker runs the required upgrade operations to upgrade to the current Juju version.
func (a *MachineAgent) upgradeWorker(
	apiState *api.State,
	jobs []params.MachineJob,
	agentConfig agent.Config,
) worker.Worker {
	return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
		select {
		case <-a.upgradeComplete:
			// Our work is already done (we're probably being restarted
			// because the API connection has gone down), so do nothing.
			<-stop
			return nil
		default:
		}
		// If the machine agent is a state server, flag that state
		// needs to be opened before running upgrade steps
		needsState := false
		for _, job := range jobs {
			if job == params.JobManageEnviron {
				needsState = true
			}
		}
		// We need a *state.State for upgrades. We open it independently
		// of StateWorker, because we have no guarantees about when
		// and how often StateWorker might run.
		var st *state.State
		if needsState {
			if err := a.ensureMongoServer(agentConfig); err != nil {
				return err
			}
			var err error
			info, ok := agentConfig.StateInfo()
			if !ok {
				return fmt.Errorf("no state info available")
			}
			st, err = state.Open(info, mongo.DialOpts{}, environs.NewStatePolicy())
			if err != nil {
				return err
			}
			defer st.Close()
		}
		err := a.runUpgrades(st, apiState, jobs, agentConfig)
		if err != nil {
			return err
		}
		logger.Infof("upgrade to %v completed.", version.Current)
		close(a.upgradeComplete)
		<-stop
		return nil
	})
}
Example #13
0
// New returns a worker which periodically prunes the data for
// completed transactions.
func New(tp TransactionPruner, interval time.Duration, clock clock.Clock) worker.Worker {
	return worker.NewSimpleWorker(func(stopCh <-chan struct{}) error {
		for {
			select {
			case <-clock.After(interval):
				err := tp.MaybePruneTransactions()
				if err != nil {
					return errors.Annotate(err, "pruning failed, txnpruner stopping")
				}
			case <-stopCh:
				return nil
			}
		}
	})
}
Example #14
0
func (r *runner) StartWorker(id string, startFunc func() (worker.Worker, error)) error {
	if r.isMaster {
		// We are master; the started workers should
		// encounter an error as they do what they're supposed
		// to do - we can just start the worker in the
		// underlying runner.
		logger.Infof("starting %q", id)
		return r.Runner.StartWorker(id, startFunc)
	}
	logger.Infof("standby %q", id)
	// We're not master, so don't start the worker, but start a pinger so
	// that we know when the connection master changes.
	r.startPingerOnce.Do(func() {
		go r.pinger()
	})
	return r.Runner.StartWorker(id, func() (worker.Worker, error) {
		return worker.NewSimpleWorker(r.waitPinger), nil
	})
}
Example #15
0
// New returns a worker which periodically prunes the data for
// completed transactions.
func New(tp TransactionPruner, interval time.Duration) worker.Worker {
	return worker.NewSimpleWorker(func(stopCh <-chan struct{}) error {
		// Use a timer rather than a ticker because pruning could
		// sometimes take a while and we don't want pruning attempts
		// to occur back-to-back.
		timer := time.NewTimer(interval)
		defer timer.Stop()
		for {
			select {
			case <-timer.C:
				err := tp.MaybePruneTransactions()
				if err != nil {
					return errors.Annotate(err, "pruning failed, txnpruner stopping")
				}
				timer.Reset(interval)
			case <-stopCh:
				return nil
			}
		}
	})
}
Example #16
0
// upgradeWaiterWorker runs the specified worker after upgrades have completed.
func (a *MachineAgent) upgradeWaiterWorker(name string, start func() (worker.Worker, error)) worker.Worker {
	return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
		// Wait for the agent upgrade and upgrade steps to complete (or for us to be stopped).
		for _, ch := range []<-chan struct{}{
			a.upgradeComplete.Unlocked(),
			a.initialUpgradeCheckComplete.Unlocked(),
		} {
			select {
			case <-stop:
				return nil
			case <-ch:
			}
		}
		logger.Debugf("upgrades done, starting worker %q", name)

		// Upgrades are done, start the worker.
		worker, err := start()
		if err != nil {
			return err
		}
		// Wait for worker to finish or for us to be stopped.
		waitCh := make(chan error)
		go func() {
			waitCh <- worker.Wait()
		}()
		select {
		case err := <-waitCh:
			logger.Debugf("worker %q exited with %v", name, err)
			return err
		case <-stop:
			logger.Debugf("stopping so killing worker %q", name)
			worker.Kill()
		}
		return <-waitCh // Ensure worker has stopped before returning.
	})
}
Example #17
0
// upgradeWorker runs the required upgrade operations to upgrade to the current Juju version.
func (a *MachineAgent) upgradeWorker(
	apiState *api.State,
	jobs []params.MachineJob,
	agentConfig agent.Config,
) worker.Worker {
	return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
		select {
		case <-a.upgradeComplete:
			// Our work is already done (we're probably being restarted
			// because the API connection has gone down), so do nothing.
			<-stop
			return nil
		default:
		}
		// If the machine agent is a state server, flag that state
		// needs to be opened before running upgrade steps
		needsState := false
		for _, job := range jobs {
			if job == params.JobManageEnviron {
				needsState = true
			}
		}
		// We need a *state.State for upgrades. We open it independently
		// of StateWorker, because we have no guarantees about when
		// and how often StateWorker might run.
		var st *state.State
		if needsState {
			if err := a.ensureMongoServer(agentConfig); err != nil {
				return err
			}
			var err error
			info, ok := agentConfig.MongoInfo()
			if !ok {
				return fmt.Errorf("no state info available")
			}
			st, err = state.Open(info, mongo.DialOpts{}, environs.NewStatePolicy())
			if err != nil {
				return err
			}
			defer st.Close()
		}
		err := a.runUpgrades(st, apiState, jobs, agentConfig)
		if err == nil {
			// Only signal that the upgrade is complete if no error
			// was returned.
			close(a.upgradeComplete)
		} else if !isFatal(err) {
			// Only non-fatal errors are returned (this will trigger
			// the worker to be restarted).
			//
			// Fatal upgrade errors are not returned because user
			// intervention is required at that point. We don't want
			// the upgrade worker or the agent to restart. Status
			// output and the logs will report that the upgrade has
			// failed.
			return err
		}
		<-stop
		return nil
	})
}
Example #18
0
// newStateStarterWorker wraps stateStarter in a simple worker for use in
// a.runner.StartWorker.
func (a *MachineAgent) newStateStarterWorker() (worker.Worker, error) {
	return worker.NewSimpleWorker(a.stateStarter), nil
}
Example #19
0
File: manifold.go Project: bac/juju
		return nil, errors.New("this manifold may only be used inside a machine agent")
	}

	// Get the machine agent's jobs.
	entity, err := apiagent.NewState(apiCaller).Entity(tag)
	if err != nil {
		return nil, err
	}

	var isModelManager bool
	for _, job := range entity.Jobs() {
		if job == multiwatcher.JobManageModel {
			isModelManager = true
			break
		}
	}

	if !isModelManager {
		return nil, dependency.ErrMissing
	}

	return NewWorker(cfg)
}

var NewWorker = func(agentConfig agent.Config) (worker.Worker, error) {
	inner := func(<-chan struct{}) error {
		return agent.WriteSystemIdentityFile(agentConfig)
	}
	return worker.NewSimpleWorker(inner), nil
}
Example #20
0
// New returns a worker or err in case of failure.
// this worker takes care of watching the state of machine's upgrade
// mongo information and change agent conf accordingly.
func New(st *state.State, machineID string, maybeStopMongo StopMongo) (worker.Worker, error) {
	upgradeWorker := func(stopch <-chan struct{}) error {
		return upgradeMongoWatcher(st, stopch, machineID, maybeStopMongo)
	}
	return worker.NewSimpleWorker(upgradeWorker), nil
}
Example #21
0
func newDummyWorker() worker.Worker {
	return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
		<-stop
		return nil
	})
}
Example #22
0
// newRestoreStateWatcherWorker will return a worker or err if there
// is a failure, the worker takes care of watching the state of
// restoreInfo doc and put the agent in the different restore modes.
func (a *MachineAgent) newRestoreStateWatcherWorker(st *state.State) (worker.Worker, error) {
	rWorker := func(stopch <-chan struct{}) error {
		return a.restoreStateWatcher(st, stopch)
	}
	return worker.NewSimpleWorker(rWorker), nil
}