func (s *RunTestSuite) TestNoContextWithLock(c *gc.C) { lock, err := cmdutil.HookExecutionLock(cmdutil.DataDir) c.Assert(err, jc.ErrorIsNil) lock.Lock("juju-run test") defer lock.Unlock() // in case of failure channel := startRunAsync(c, []string{"--no-context", "echo done"}) ctx, err := waitForResult(channel, testing.ShortWait) c.Assert(err, gc.ErrorMatches, "timeout") lock.Unlock() ctx, err = waitForResult(channel, testing.LongWait) c.Assert(err, jc.ErrorIsNil) c.Assert(strings.TrimRight(testing.Stdout(ctx), "\r\n"), gc.Equals, "done") }
// newWorker trivially wraps NewReboot for use in a util.PostUpgradeManifold. // // TODO(mjs) - It's not tested at the moment, because the scaffolding // necessary is too unwieldy/distracting to introduce at this point. func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { apiConn, ok := apiCaller.(api.Connection) if !ok { return nil, errors.New("unable to obtain api.Connection") } rebootState, err := apiConn.Reboot() if err != nil { return nil, errors.Trace(err) } lock, err := cmdutil.HookExecutionLock(cmdutil.DataDir) if err != nil { return nil, errors.Trace(err) } w, err := NewReboot(rebootState, a.CurrentConfig(), lock) if err != nil { return nil, errors.Annotate(err, "cannot start reboot worker") } return w, nil }
func (c *RunCommand) executeNoContext() (*exec.ExecResponse, error) { // Acquire the uniter hook execution lock to make sure we don't // stomp on each other. lock, err := cmdutil.HookExecutionLock(cmdutil.DataDir) if err != nil { return nil, errors.Trace(err) } err = lock.Lock("juju-run") if err != nil { return nil, errors.Trace(err) } defer lock.Unlock() runCmd := c.appendProxyToCommands() return exec.RunCommands( exec.RunParams{ Commands: runCmd, }) }
// updateSupportedContainers records in state that a machine can run the specified containers. // It starts a watcher and when a container of a given type is first added to the machine, // the watcher is killed, the machine is set up to be able to start containers of the given type, // and a suitable provisioner is started. func (a *MachineAgent) updateSupportedContainers( runner worker.Runner, st api.Connection, containers []instance.ContainerType, agentConfig agent.Config, ) error { pr := st.Provisioner() tag := agentConfig.Tag().(names.MachineTag) machine, err := pr.Machine(tag) if errors.IsNotFound(err) || err == nil && machine.Life() == params.Dead { return worker.ErrTerminateAgent } if err != nil { return errors.Annotatef(err, "cannot load machine %s from state", tag) } if len(containers) == 0 { if err := machine.SupportsNoContainers(); err != nil { return errors.Annotatef(err, "clearing supported containers for %s", tag) } return nil } if err := machine.SetSupportedContainers(containers...); err != nil { return errors.Annotatef(err, "setting supported containers for %s", tag) } initLock, err := cmdutil.HookExecutionLock(agentConfig.DataDir()) if err != nil { return err } // Start the watcher to fire when a container is first requested on the machine. modelUUID, err := st.ModelTag() if err != nil { return err } watcherName := fmt.Sprintf("%s-container-watcher", machine.Id()) // There may not be a CA certificate private key available, and without // it we can't ensure that other Juju nodes can connect securely, so only // use an image URL getter if there's a private key. var imageURLGetter container.ImageURLGetter if agentConfig.Value(agent.AllowsSecureConnection) == "true" { cfg, err := pr.ModelConfig() if err != nil { return errors.Annotate(err, "unable to get environ config") } imageURLGetter = container.NewImageURLGetter( // Explicitly call the non-named constructor so if anyone // adds additional fields, this fails. container.ImageURLGetterConfig{ ServerRoot: st.Addr(), ModelUUID: modelUUID.Id(), CACert: []byte(agentConfig.CACert()), CloudimgBaseUrl: cfg.CloudImageBaseURL(), Stream: cfg.ImageStream(), ImageDownloadFunc: container.ImageDownloadURL, }) } params := provisioner.ContainerSetupParams{ Runner: runner, WorkerName: watcherName, SupportedContainers: containers, ImageURLGetter: imageURLGetter, Machine: machine, Provisioner: pr, Config: agentConfig, InitLock: initLock, } handler := provisioner.NewContainerSetupHandler(params) a.startWorkerAfterUpgrade(runner, watcherName, func() (worker.Worker, error) { w, err := watcher.NewStringsWorker(watcher.StringsConfig{ Handler: handler, }) if err != nil { return nil, errors.Annotatef(err, "cannot start %s worker", watcherName) } return w, nil }) return nil }
func (a *UnitAgent) APIWorkers() (_ worker.Worker, err error) { agentConfig := a.CurrentConfig() dataDir := agentConfig.DataDir() hookLock, err := cmdutil.HookExecutionLock(dataDir) if err != nil { return nil, err } st, entity, err := OpenAPIState(agentConfig, a) if err != nil { return nil, err } unitTag, err := names.ParseUnitTag(entity.Tag()) if err != nil { return nil, errors.Trace(err) } // Ensure that the environment uuid is stored in the agent config. // Luckily the API has it recorded for us after we connect. if agentConfig.Environment().Id() == "" { err := a.ChangeConfig(func(setter agent.ConfigSetter) error { environTag, err := st.EnvironTag() if err != nil { return errors.Annotate(err, "no environment uuid set on api") } return setter.Migrate(agent.MigrateParams{ Environment: environTag, }) }) if err != nil { logger.Warningf("unable to save environment uuid: %v", err) // Not really fatal, just annoying. } } defer func() { if err != nil { st.Close() reportClosedUnitAPI(st) } }() // Before starting any workers, ensure we record the Juju version this unit // agent is running. currentTools := &tools.Tools{Version: version.Current} apiStateUpgrader := a.getUpgrader(st) if err := apiStateUpgrader.SetVersion(agentConfig.Tag().String(), currentTools.Version); err != nil { return nil, errors.Annotate(err, "cannot set unit agent version") } runner := worker.NewRunner(cmdutil.ConnectionIsFatal(logger, st), cmdutil.MoreImportant) // start proxyupdater first to ensure proxy settings are correct runner.StartWorker("proxyupdater", func() (worker.Worker, error) { return proxyupdater.New(st.Environment(), false), nil }) runner.StartWorker("upgrader", func() (worker.Worker, error) { return upgrader.NewAgentUpgrader( st.Upgrader(), agentConfig, agentConfig.UpgradedToVersion(), func() bool { return false }, a.initialAgentUpgradeCheckComplete, ), nil }) runner.StartWorker("logger", func() (worker.Worker, error) { return workerlogger.NewLogger(st.Logger(), agentConfig), nil }) runner.StartWorker("uniter", func() (worker.Worker, error) { uniterFacade, err := st.Uniter() if err != nil { return nil, errors.Trace(err) } uniterParams := uniter.UniterParams{ uniterFacade, unitTag, leadership.NewClient(st), dataDir, hookLock, uniter.NewMetricsTimerChooser(), uniter.NewUpdateStatusTimer(), } return uniter.NewUniter(&uniterParams), nil }) runner.StartWorker("apiaddressupdater", func() (worker.Worker, error) { uniterFacade, err := st.Uniter() if err != nil { return nil, errors.Trace(err) } return apiaddressupdater.NewAPIAddressUpdater(uniterFacade, a), nil }) runner.StartWorker("rsyslog", func() (worker.Worker, error) { return cmdutil.NewRsyslogConfigWorker(st.Rsyslog(), agentConfig, rsyslog.RsyslogModeForwarding) }) return cmdutil.NewCloseWorker(logger, runner, st), nil }