Esempio n. 1
0
// NewConnectedStatusWorker creates a new worker that monitors the meter status of the
// unit and runs the meter-status-changed hook appropriately.
func NewConnectedStatusWorker(cfg ConnectedConfig) (worker.Worker, error) {
	handler, err := NewConnectedStatusHandler(cfg)
	if err != nil {
		return nil, errors.Trace(err)
	}
	return watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: handler,
	})
}
Esempio n. 2
0
// NewCleaner returns a worker.Worker that runs state.Cleanup()
// if the CleanupWatcher signals documents marked for deletion.
func NewCleaner(st StateCleaner) (worker.Worker, error) {
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: &Cleaner{st: st},
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}
Esempio n. 3
0
// NewWorker returns a machine undertaker worker that will watch for
// machines that need to be removed and remove them, cleaning up any
// necessary provider-level resources first.
func NewWorker(api Facade, env environs.Environ) (worker.Worker, error) {
	envNetworking, _ := environs.SupportsNetworking(env)
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: &Undertaker{API: api, Releaser: envNetworking},
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}
Esempio n. 4
0
// NewRetryStrategyWorker returns a worker.Worker that returns the current
// retry strategy and bounces when it changes.
func NewRetryStrategyWorker(config WorkerConfig) (worker.Worker, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: retryStrategyHandler{config},
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return &RetryStrategyWorker{w, config.RetryStrategy}, nil
}
Esempio n. 5
0
// NewAPIAddressUpdater returns a worker.Worker that watches for changes to
// API addresses and then sets them on the APIAddressSetter.
// TODO(fwereade): this should have a config struct, and some validation.
func NewAPIAddressUpdater(addresser APIAddresser, setter APIAddressSetter) (worker.Worker, error) {
	handler := &APIAddressUpdater{
		addresser: addresser,
		setter:    setter,
	}
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: handler,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}
Esempio n. 6
0
// NewMachiner returns a Worker that will wait for the identified machine
// to become Dying and make it Dead; or until the machine becomes Dead by
// other means.
//
// The machineDead function will be called immediately after the machine's
// lifecycle is updated to Dead.
func NewMachiner(cfg Config) (worker.Worker, error) {
	if err := cfg.Validate(); err != nil {
		return nil, errors.Annotate(err, "validating config")
	}
	handler := &Machiner{config: cfg}
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: handler,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}
Esempio n. 7
0
func NewReboot(st reboot.State, agentConfig agent.Config, machineLock *fslock.Lock) (worker.Worker, error) {
	tag, ok := agentConfig.Tag().(names.MachineTag)
	if !ok {
		return nil, errors.Errorf("Expected names.MachineTag, got %T: %v", agentConfig.Tag(), agentConfig.Tag())
	}
	r := &Reboot{
		st:          st,
		tag:         tag,
		machineLock: machineLock,
	}
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: r,
	})
	return w, errors.Trace(err)
}
Esempio n. 8
0
// NewLogger returns a worker.Worker that uses the notify watcher returned
// from the setup.
func NewLogger(api *logger.State, agentConfig agent.Config) (worker.Worker, error) {
	logger := &Logger{
		api:         api,
		agentConfig: agentConfig,
		lastConfig:  loggo.LoggerInfo(),
	}
	log.Debugf("initial log config: %q", logger.lastConfig)
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: logger,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}
Esempio n. 9
0
// NewWorker returns a worker that keeps track of
// the machine's authorised ssh keys and ensures the
// ~/.ssh/authorized_keys file is up to date.
func NewWorker(st *keyupdater.State, agentConfig agent.Config) (worker.Worker, error) {
	machineTag, ok := agentConfig.Tag().(names.MachineTag)
	if !ok {
		return nil, errors.NotValidf("machine tag %v", agentConfig.Tag())
	}
	if os.HostOS() == os.Windows {
		return worker.NewNoOpWorker(), nil
	}
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: &keyupdaterWorker{
			st:  st,
			tag: machineTag,
		},
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}
Esempio n. 10
0
func newNotifyHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*notifyHandler, worker.Worker) {
	nh := &notifyHandler{
		actions:       nil,
		handled:       make(chan struct{}, 1),
		setupError:    setupError,
		teardownError: teardownError,
		handlerError:  handlerError,
		watcher:       newTestNotifyWatcher(),
		setupDone:     make(chan struct{}),
	}
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{Handler: nh})
	c.Assert(err, jc.ErrorIsNil)
	select {
	case <-nh.setupDone:
	case <-time.After(coretesting.ShortWait):
		c.Error("Failed waiting for notifyHandler.Setup to be called during SetUpTest")
	}
	return nh, w
}
Esempio n. 11
0
	// time through, even if the proxies are empty, we write the files to
	// disk.
	first bool
}

// NewWorker returns a worker.Worker that updates proxy environment variables for the
// process; and, if writeSystemFiles is true, for the whole machine.
var NewWorker = func(api *apiproxyupdater.Facade, writeSystemFiles bool) (worker.Worker, error) {
	logger.Debugf("write system files: %v", writeSystemFiles)
	envWorker := &proxyWorker{
		api:              api,
		writeSystemFiles: writeSystemFiles,
		first:            true,
	}
	w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
		Handler: envWorker,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}

func (w *proxyWorker) writeEnvironmentFile() error {
	// Writing the environment file is handled by executing the script for two
	// primary reasons:
	//
	// 1: In order to have the local provider specify the environment settings
	// for the machine agent running on the host, this worker needs to run,
	// but it shouldn't be touching any files on the disk.  If however there is
	// an ubuntu user, it will. This shouldn't be a problem.
Esempio n. 12
0
// startAPIWorkers is called to start workers which rely on the
// machine agent's API connection (via the apiworkers manifold). It
// returns a Runner with a number of workers attached to it.
//
// The workers started here need to be converted to run under the
// dependency engine. Once they have all been converted, this method -
// and the apiworkers manifold - can be removed.
func (a *MachineAgent) startAPIWorkers(apiConn api.Connection) (_ worker.Worker, outErr error) {
	agentConfig := a.CurrentConfig()

	entity, err := apiagent.NewState(apiConn).Entity(a.Tag())
	if err != nil {
		return nil, errors.Trace(err)
	}

	var isModelManager bool
	for _, job := range entity.Jobs() {
		switch job {
		case multiwatcher.JobManageModel:
			isModelManager = true
		default:
			// TODO(dimitern): Once all workers moved over to using
			// the API, report "unknown job type" here.
		}
	}

	runner := newConnRunner(apiConn)
	defer func() {
		// If startAPIWorkers exits early with an error, stop the
		// runner so that any already started runners aren't leaked.
		if outErr != nil {
			worker.Stop(runner)
		}
	}()

	modelConfig, err := apiagent.NewState(apiConn).ModelConfig()
	if err != nil {
		return nil, fmt.Errorf("cannot read model config: %v", err)
	}

	// Perform the operations needed to set up hosting for containers.
	if err := a.setupContainerSupport(runner, apiConn, agentConfig); err != nil {
		cause := errors.Cause(err)
		if params.IsCodeDead(cause) || cause == worker.ErrTerminateAgent {
			return nil, worker.ErrTerminateAgent
		}
		return nil, fmt.Errorf("setting up container support: %v", err)
	}

	if isModelManager {

		// Published image metadata for some providers are in simple streams.
		// Providers that do not depend on simple streams do not need this worker.
		env, err := newEnvirons(modelConfig)
		if err != nil {
			return nil, errors.Annotate(err, "getting environ")
		}
		if _, ok := env.(simplestreams.HasRegion); ok {
			// Start worker that stores published image metadata in state.
			runner.StartWorker("imagemetadata", func() (worker.Worker, error) {
				return newMetadataUpdater(apiConn.MetadataUpdater()), nil
			})
		}

		// We don't have instance info set and the network config for the
		// bootstrap machine only, so update it now. All the other machines will
		// have instance info including network config set at provisioning time.
		if err := a.setControllerNetworkConfig(apiConn); err != nil {
			return nil, errors.Annotate(err, "setting controller network config")
		}
	} else {
		runner.StartWorker("stateconverter", func() (worker.Worker, error) {
			// TODO(fwereade): this worker needs its own facade.
			facade := apimachiner.NewState(apiConn)
			handler := conv2state.New(facade, a)
			w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{
				Handler: handler,
			})
			if err != nil {
				return nil, errors.Annotate(err, "cannot start controller promoter worker")
			}
			return w, nil
		})
	}
	return runner, nil
}