func (env *localEnviron) writeBootstrapAgentConfFile(cert, key []byte) error { info, apiInfo, err := env.StateInfo() if err != nil { logger.Errorf("failed to get state info to write bootstrap agent file: %v", err) return err } tag := state.MachineTag("0") info.Tag = tag apiInfo.Tag = tag conf := &agent.Conf{ DataDir: env.config.rootDir(), StateInfo: info, APIInfo: apiInfo, StateServerCert: cert, StateServerKey: key, StatePort: env.config.StatePort(), APIPort: env.config.APIPort(), MachineNonce: state.BootstrapNonce, } if err := conf.Write(); err != nil { logger.Errorf("failed to write bootstrap agent file: %v", err) return err } return nil }
// CanDeploy returns if the currently authenticated entity (a machine // agent) can deploy each passed unit entity. func (d *DeployerAPI) CanDeploy(args params.Entities) (params.BoolResults, error) { result := params.BoolResults{ Results: make([]params.BoolResult, len(args.Entities)), } for i, entity := range args.Entities { unitName := state.UnitNameFromTag(entity.Tag) unit, err := d.st.Unit(unitName) if errors.IsNotFoundError(err) { // Unit not found, so no need to continue. continue } else if err != nil { // Any other error get reported back. result.Results[i].Error = common.ServerError(err) continue } machineId, err := unit.AssignedMachineId() if err != nil && !state.IsNotAssigned(err) && !errors.IsNotFoundError(err) { // Any other errors get reported back. result.Results[i].Error = common.ServerError(err) continue } else if err != nil { // This means the unit wasn't assigned to the machine // agent or it wasn't found. In both cases we just return // false so as not to leak information about the existence // of a unit to a potentially rogue machine agent. continue } // Finally, check if we're allowed to access this unit. // When assigned machineId == "" it will fail. result.Results[i].Result = d.authorizer.AuthOwner(state.MachineTag(machineId)) } return result, nil }
// FakeAPIInfo holds information about no state - it will always // give an error when connected to. The machine id gives the machine id // of the machine to be started. func FakeAPIInfo(machineId string) *api.Info { return &api.Info{ Addrs: []string{"0.1.2.3:1234"}, Tag: state.MachineTag(machineId), Password: "******", CACert: []byte(testing.CACert), } }
func (p *Provisioner) startMachine(m *state.Machine) error { // TODO(dfc) the state.Info passed to environ.StartInstance remains contentious // however as the PA only knows one state.Info, and that info is used by MAs and // UAs to locate the state for this environment, it is logical to use the same // state.Info as the PA. stateInfo, apiInfo, err := p.setupAuthentication(m) if err != nil { return err } cons, err := m.Constraints() if err != nil { return err } // Generate a unique nonce for the new instance. uuid, err := utils.NewUUID() if err != nil { return err } // Generated nonce has the format: "machine-#:UUID". The first // part is a badge, specifying the tag of the machine the provisioner // is running on, while the second part is a random UUID. nonce := fmt.Sprintf("%s:%s", state.MachineTag(p.machineId), uuid.String()) inst, err := p.environ.StartInstance(m.Id(), nonce, m.Series(), cons, stateInfo, apiInfo) if err != nil { // Set the state to error, so the machine will be skipped next // time until the error is resolved, but don't return an // error; just keep going with the other machines. log.Errorf("worker/provisioner: cannot start instance for machine %q: %v", m, err) if err1 := m.SetStatus(params.StatusError, err.Error()); err1 != nil { // Something is wrong with this machine, better report it back. log.Errorf("worker/provisioner: cannot set error status for machine %q: %v", m, err1) return err1 } return nil } if err := m.SetProvisioned(inst.Id(), nonce); err != nil { // The machine is started, but we can't record the mapping in // state. It'll keep running while we fail out and restart, // but will then be detected by findUnknownInstances and // killed again. // // TODO(dimitern) Stop the instance right away here. // // Multiple instantiations of a given machine (with the same // machine ID) cannot coexist, because findUnknownInstances is // called before startMachines. However, if the first machine // had started to do work before being replaced, we may // encounter surprising problems. return err } // populate the local cache p.instances[m.Id()] = inst p.machines[inst.Id()] = m.Id() log.Noticef("worker/provisioner: started machine %s as instance %s", m, inst.Id()) return nil }
func (cfg *MachineConfig) addLogging(c *cloudinit.Config) error { var configRenderer syslog.SyslogConfigRenderer if cfg.StateServer { configRenderer = syslog.NewAccumulateConfig( state.MachineTag(cfg.MachineId)) } else { configRenderer = syslog.NewForwardConfig( state.MachineTag(cfg.MachineId), cfg.stateHostAddrs()) } content, err := configRenderer.Render() if err != nil { return err } addScripts(c, fmt.Sprintf("cat > /etc/rsyslog.d/25-juju.conf << 'EOF'\n%sEOF\n", string(content)), ) c.AddRunCmd("restart rsyslog") return nil }
func (s *MachineSuite) TestMachineIdFromTag(c *C) { c.Assert(state.MachineIdFromTag("machine-10"), Equals, "10") // Check a container id. c.Assert(state.MachineIdFromTag("machine-10-lxc-1"), Equals, "10/lxc/1") // Check reversability. nested := "2/kvm/0/lxc/3" c.Assert(state.MachineIdFromTag(state.MachineTag(nested)), Equals, nested) // Try with an invalid tag format. c.Assert(state.MachineIdFromTag("foo"), Equals, "") }
// makeMachineConfig produces a valid cloudinit machine config. func makeMachineConfig(c *gc.C) *cloudinit.MachineConfig { dir := c.MkDir() machineID := "0" return &cloudinit.MachineConfig{ MachineId: machineID, MachineNonce: "gxshasqlnng", DataDir: dir, Tools: &tools.Tools{URL: "file://" + dir}, StateInfo: &state.Info{ CACert: []byte(testing.CACert), Addrs: []string{"127.0.0.1:123"}, Tag: state.MachineTag(machineID), }, APIInfo: &api.Info{ CACert: []byte(testing.CACert), Addrs: []string{"127.0.0.1:123"}, Tag: state.MachineTag(machineID), }, ProviderType: "azure", } }
func (s *CommonProvisionerSuite) checkStartInstanceCustom(c *C, m *state.Machine, secret string, cons constraints.Value) (inst instance.Instance) { s.State.StartSync() for { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStartInstance: inst = o.Instance s.waitInstanceId(c, m, inst.Id()) // Check the instance was started with the expected params. c.Assert(o.MachineId, Equals, m.Id()) nonceParts := strings.SplitN(o.MachineNonce, ":", 2) c.Assert(nonceParts, HasLen, 2) c.Assert(nonceParts[0], Equals, state.MachineTag("0")) c.Assert(nonceParts[1], checkers.Satisfies, utils.IsValidUUIDString) c.Assert(o.Secret, Equals, secret) c.Assert(o.Constraints, DeepEquals, cons) // Check we can connect to the state with // the machine's entity name and password. info := s.StateInfo(c) info.Tag = m.Tag() c.Assert(o.Info.Password, Not(HasLen), 0) info.Password = o.Info.Password c.Assert(o.Info, DeepEquals, info) // Check we can connect to the state with // the machine's entity name and password. st, err := state.Open(o.Info, state.DefaultDialOpts()) c.Assert(err, IsNil) // All provisioned machines in this test suite have their hardware characteristics // attributes set to the same values as the constraints due to the dummy environment being used. hc, err := m.HardwareCharacteristics() c.Assert(err, IsNil) c.Assert(*hc, DeepEquals, instance.HardwareCharacteristics{ Arch: cons.Arch, Mem: cons.Mem, CpuCores: cons.CpuCores, CpuPower: cons.CpuPower, }) st.Close() return default: c.Logf("ignoring unexpected operation %#v", o) } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not start an instance") return } } return }
func (env *localEnviron) setupLocalMachineAgent(cons constraints.Value) error { dataDir := env.config.rootDir() toolList, err := environs.FindBootstrapTools(env, cons) if err != nil { return err } // ensure we have at least one valid tools if len(toolList) == 0 { return fmt.Errorf("No bootstrap tools found") } // unpack the first tools into the agent dir. agentTools := toolList[0] logger.Debugf("tools: %#v", agentTools) // brutally abuse our knowledge of storage to directly open the file toolsUrl, err := url.Parse(agentTools.URL) toolsLocation := filepath.Join(env.config.storageDir(), toolsUrl.Path) logger.Infof("tools location: %v", toolsLocation) toolsFile, err := os.Open(toolsLocation) defer toolsFile.Close() // Again, brutally abuse our knowledge here. // The tools that FindBootstrapTools has returned us are based on the // default series in the config. However we are running potentially on a // different series. When the machine agent is started, it will be // looking based on the current series, so we need to override the series // returned in the tools to be the current series. agentTools.Binary.Series = version.CurrentSeries() err = tools.UnpackTools(dataDir, agentTools, toolsFile) machineId := "0" // Always machine 0 tag := state.MachineTag(machineId) toolsDir := tools.SharedToolsDir(dataDir, agentTools.Binary) logDir := env.config.logDir() logConfig := "--debug" // TODO(thumper): specify loggo config agent := upstart.MachineAgentUpstartService( env.machineAgentServiceName(), toolsDir, dataDir, logDir, tag, machineId, logConfig, env.config.Type()) agent.Env["USER"] = env.config.user agent.Env["HOME"] = os.Getenv("HOME") agent.Env["JUJU_STORAGE_DIR"] = env.config.storageDir() agent.Env["JUJU_STORAGE_ADDR"] = env.config.storageAddr() agent.Env["JUJU_SHARED_STORAGE_DIR"] = env.config.sharedStorageDir() agent.Env["JUJU_SHARED_STORAGE_ADDR"] = env.config.sharedStorageAddr() agent.InitDir = upstartScriptLocation logger.Infof("installing service %s to %s", env.machineAgentServiceName(), agent.InitDir) if err := agent.Install(); err != nil { logger.Errorf("could not install machine agent service: %v", err) return err } return nil }
func (task *provisionerTask) startMachine(machine *state.Machine) error { stateInfo, apiInfo, err := task.auth.SetupAuthentication(machine) if err != nil { logger.Errorf("failed to setup authentication: %v", err) return err } cons, err := machine.Constraints() if err != nil { return err } // Generate a unique nonce for the new instance. uuid, err := utils.NewUUID() if err != nil { return err } // Generated nonce has the format: "machine-#:UUID". The first // part is a badge, specifying the tag of the machine the provisioner // is running on, while the second part is a random UUID. nonce := fmt.Sprintf("%s:%s", state.MachineTag(task.machineId), uuid.String()) inst, metadata, err := task.broker.StartInstance(machine.Id(), nonce, machine.Series(), cons, stateInfo, apiInfo) if err != nil { // Set the state to error, so the machine will be skipped next // time until the error is resolved, but don't return an // error; just keep going with the other machines. logger.Errorf("cannot start instance for machine %q: %v", machine, err) if err1 := machine.SetStatus(params.StatusError, err.Error()); err1 != nil { // Something is wrong with this machine, better report it back. logger.Errorf("cannot set error status for machine %q: %v", machine, err1) return err1 } return nil } if err := machine.SetProvisioned(inst.Id(), nonce, metadata); err != nil { logger.Errorf("cannot register instance for machine %v: %v", machine, err) // The machine is started, but we can't record the mapping in // state. It'll keep running while we fail out and restart, // but will then be detected by findUnknownInstances and // killed again. // // TODO(dimitern) Stop the instance right away here. // // Multiple instantiations of a given machine (with the same // machine ID) cannot coexist, because findUnknownInstances is // called before startMachines. However, if the first machine // had started to do work before being replaced, we may // encounter surprising problems. return err } logger.Infof("started machine %s as instance %s with hardware %q", machine, inst.Id(), metadata) return nil }
// primeAgent adds a new Machine to run the given jobs, and sets up the // machine agent's directory. It returns the new machine, the // agent's configuration and the tools currently running. func (s *MachineSuite) primeAgent(c *C, jobs ...state.MachineJob) (*state.Machine, *agent.Conf, *state.Tools) { m, err := s.State.InjectMachine("series", constraints.Value{}, "ardbeg-0", instance.HardwareCharacteristics{}, jobs...) c.Assert(err, IsNil) err = m.SetMongoPassword("machine-password") c.Assert(err, IsNil) err = m.SetPassword("machine-password") c.Assert(err, IsNil) conf, tools := s.agentSuite.primeAgent(c, state.MachineTag(m.Id()), "machine-password") conf.MachineNonce = state.BootstrapNonce conf.APIInfo.Nonce = conf.MachineNonce err = conf.Write() c.Assert(err, IsNil) return m, conf, tools }
func (s *commonSuite) SetUpTest(c *C) { s.JujuConnSuite.SetUpTest(c) var err error s.machine0, err = s.State.AddMachine("series", state.JobManageEnviron, state.JobManageState) c.Assert(err, IsNil) s.machine1, err = s.State.AddMachine("series", state.JobHostUnits) c.Assert(err, IsNil) // Create a FakeAuthorizer so we can check permissions, // set up assuming machine 1 has logged in. s.authorizer = apiservertesting.FakeAuthorizer{ Tag: state.MachineTag(s.machine1.Id()), LoggedIn: true, Manager: false, MachineAgent: true, } }
func (s *ProvisionerSuite) checkStartInstanceCustom(c *C, m *state.Machine, secret string, cons constraints.Value) { s.State.StartSync() for { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStartInstance: s.waitInstanceId(c, m, o.Instance.Id()) // Check the instance was started with the expected params. c.Assert(o.MachineId, Equals, m.Id()) nonceParts := strings.SplitN(o.MachineNonce, ":", 2) c.Assert(nonceParts, HasLen, 2) c.Assert(nonceParts[0], Equals, state.MachineTag("0")) c.Assert(utils.IsValidUUIDString(nonceParts[1]), Equals, true) c.Assert(o.Secret, Equals, secret) c.Assert(o.Constraints, DeepEquals, cons) // Check we can connect to the state with // the machine's entity name and password. info := s.StateInfo(c) info.Tag = m.Tag() c.Assert(o.Info.Password, Not(HasLen), 0) info.Password = o.Info.Password c.Assert(o.Info, DeepEquals, info) // Check we can connect to the state with // the machine's entity name and password. st, err := state.Open(o.Info, state.DefaultDialOpts()) c.Assert(err, IsNil) st.Close() return default: c.Logf("ignoring unexpected operation %#v", o) } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not start an instance") return } } }
func verifyConfig(cfg *MachineConfig) (err error) { defer utils.ErrorContextf(&err, "invalid machine configuration") if !state.IsMachineId(cfg.MachineId) { return fmt.Errorf("invalid machine id") } if cfg.DataDir == "" { return fmt.Errorf("missing var directory") } if cfg.Tools == nil { return fmt.Errorf("missing tools") } if cfg.Tools.URL == "" { return fmt.Errorf("missing tools URL") } if cfg.StateInfo == nil { return fmt.Errorf("missing state info") } if len(cfg.StateInfo.CACert) == 0 { return fmt.Errorf("missing CA certificate") } if cfg.APIInfo == nil { return fmt.Errorf("missing API info") } if len(cfg.APIInfo.CACert) == 0 { return fmt.Errorf("missing API CA certificate") } if cfg.ProviderType == "" { return fmt.Errorf("missing provider type") } if cfg.StateServer { if cfg.Config == nil { return fmt.Errorf("missing environment configuration") } if cfg.StateInfo.Tag != "" { return fmt.Errorf("entity tag must be blank when starting a state server") } if cfg.APIInfo.Tag != "" { return fmt.Errorf("entity tag must be blank when starting a state server") } if len(cfg.StateServerCert) == 0 { return fmt.Errorf("missing state server certificate") } if len(cfg.StateServerKey) == 0 { return fmt.Errorf("missing state server private key") } if cfg.StatePort == 0 { return fmt.Errorf("missing state port") } if cfg.APIPort == 0 { return fmt.Errorf("missing API port") } } else { if len(cfg.StateInfo.Addrs) == 0 { return fmt.Errorf("missing state hosts") } if cfg.StateInfo.Tag != state.MachineTag(cfg.MachineId) { return fmt.Errorf("entity tag must match started machine") } if len(cfg.APIInfo.Addrs) == 0 { return fmt.Errorf("missing API hosts") } if cfg.APIInfo.Tag != state.MachineTag(cfg.MachineId) { return fmt.Errorf("entity tag must match started machine") } } if cfg.MachineNonce == "" { return fmt.Errorf("missing machine nonce") } return nil }
func (a *MachineAgent) Tag() string { return state.MachineTag(a.MachineId) }
func (s *MachineSuite) TestMachineTag(c *C) { c.Assert(state.MachineTag("10"), Equals, "machine-10") // Check a container id. c.Assert(state.MachineTag("10/lxc/1"), Equals, "machine-10-lxc-1") }
func Configure(cfg *MachineConfig, c *cloudinit.Config) (*cloudinit.Config, error) { if err := verifyConfig(cfg); err != nil { return nil, err } c.AddSSHAuthorizedKeys(cfg.AuthorizedKeys) c.AddPackage("git") // Perfectly reasonable to install lxc on environment instances and kvm // containers. if cfg.MachineContainerType != instance.LXC { c.AddPackage("lxc") } addScripts(c, "set -xe", // ensure we run all the scripts or abort. fmt.Sprintf("mkdir -p %s", cfg.DataDir), "mkdir -p /var/log/juju") // Make a directory for the tools to live in, then fetch the // tools and unarchive them into it. addScripts(c, "bin="+shquote(cfg.jujuTools()), "mkdir -p $bin", fmt.Sprintf("wget --no-verbose -O - %s | tar xz -C $bin", shquote(cfg.Tools.URL)), fmt.Sprintf("echo -n %s > $bin/downloaded-url.txt", shquote(cfg.Tools.URL)), ) // TODO (thumper): work out how to pass the logging config to the children debugFlag := "" // TODO: disable debug mode by default when the system is stable. if true { debugFlag = " --debug" } if err := cfg.addLogging(c); err != nil { return nil, err } // We add the machine agent's configuration info // before running bootstrap-state so that bootstrap-state // has a chance to rerwrite it to change the password. // It would be cleaner to change bootstrap-state to // be responsible for starting the machine agent itself, // but this would not be backwardly compatible. machineTag := state.MachineTag(cfg.MachineId) _, err := cfg.addAgentInfo(c, machineTag) if err != nil { return nil, err } if cfg.StateServer { if cfg.NeedMongoPPA() { c.AddAptSource("ppa:juju/experimental", "1024R/C8068B11") } c.AddPackage("mongodb-server") certKey := string(cfg.StateServerCert) + string(cfg.StateServerKey) addFile(c, cfg.dataFile("server.pem"), certKey, 0600) if err := cfg.addMongoToBoot(c); err != nil { return nil, err } // We temporarily give bootstrap-state a directory // of its own so that it can get the state info via the // same mechanism as other jujud commands. acfg, err := cfg.addAgentInfo(c, "bootstrap") if err != nil { return nil, err } addScripts(c, fmt.Sprintf("echo %s > %s", shquote(cfg.StateInfoURL), BootstrapStateURLFile), cfg.jujuTools()+"/jujud bootstrap-state"+ " --data-dir "+shquote(cfg.DataDir)+ " --env-config "+shquote(base64yaml(cfg.Config))+ " --constraints "+shquote(cfg.Constraints.String())+ debugFlag, "rm -rf "+shquote(acfg.Dir()), ) } if err := cfg.addMachineAgentToBoot(c, machineTag, cfg.MachineId, debugFlag); err != nil { return nil, err } // general options c.SetAptUpgrade(true) c.SetAptUpdate(true) c.SetOutput(cloudinit.OutAll, "| tee -a /var/log/cloud-init-output.log", "") return c, nil }
func (s *deployerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) // The two known machines now contain the following units: // machine 0 (not authorized): mysql/1 (principal1) // machine 1 (authorized): mysql/0 (principal0), logging/0 (subordinate0) var err error s.machine0, err = s.State.AddMachine("series", state.JobManageState, state.JobHostUnits) c.Assert(err, gc.IsNil) s.machine1, err = s.State.AddMachine("series", state.JobHostUnits) c.Assert(err, gc.IsNil) s.service0, err = s.State.AddService("mysql", s.AddTestingCharm(c, "mysql")) c.Assert(err, gc.IsNil) s.service1, err = s.State.AddService("logging", s.AddTestingCharm(c, "logging")) c.Assert(err, gc.IsNil) eps, err := s.State.InferEndpoints([]string{"mysql", "logging"}) c.Assert(err, gc.IsNil) rel, err := s.State.AddRelation(eps...) c.Assert(err, gc.IsNil) s.principal0, err = s.service0.AddUnit() c.Assert(err, gc.IsNil) err = s.principal0.AssignToMachine(s.machine1) c.Assert(err, gc.IsNil) s.principal1, err = s.service0.AddUnit() c.Assert(err, gc.IsNil) err = s.principal1.AssignToMachine(s.machine0) c.Assert(err, gc.IsNil) relUnit0, err := rel.Unit(s.principal0) c.Assert(err, gc.IsNil) err = relUnit0.EnterScope(nil) c.Assert(err, gc.IsNil) s.subordinate0, err = s.service1.Unit("logging/0") c.Assert(err, gc.IsNil) // Create a FakeAuthorizer so we can check permissions, // set up assuming machine 1 has logged in. s.authorizer = apiservertesting.FakeAuthorizer{ Tag: state.MachineTag(s.machine1.Id()), LoggedIn: true, Manager: false, MachineAgent: true, } // Create the resource registry separately to track invocations to // Register. s.resources = common.NewResources() // Create a deployer API for machine 1. deployer, err := deployer.NewDeployerAPI( s.State, s.resources, s.authorizer, ) c.Assert(err, gc.IsNil) s.deployer = deployer }
func (manager *containerManager) StartContainer( machineId, series, nonce string, tools *tools.Tools, environConfig *config.Config, stateInfo *state.Info, apiInfo *api.Info) (instance.Instance, error) { name := state.MachineTag(machineId) if manager.name != "" { name = fmt.Sprintf("%s-%s", manager.name, name) } // Note here that the lxcObjectFacotry only returns a valid container // object, and doesn't actually construct the underlying lxc container on // disk. container := lxcObjectFactory.New(name) // Create the cloud-init. directory := jujuContainerDirectory(name) logger.Tracef("create directory: %s", directory) if err := os.MkdirAll(directory, 0755); err != nil { logger.Errorf("failed to create container directory: %v", err) return nil, err } logger.Tracef("write cloud-init") userDataFilename, err := writeUserData(directory, machineId, nonce, tools, environConfig, stateInfo, apiInfo) if err != nil { logger.Errorf("failed to write user data: %v", err) return nil, err } logger.Tracef("write the lxc.conf file") configFile, err := writeLxcConfig(directory, manager.logdir) if err != nil { logger.Errorf("failed to write config file: %v", err) return nil, err } templateParams := []string{ "--debug", // Debug errors in the cloud image "--userdata", userDataFilename, // Our groovey cloud-init "--hostid", name, // Use the container name as the hostid "-r", series, } // Create the container. logger.Tracef("create the container") if err := container.Create(configFile, defaultTemplate, templateParams...); err != nil { logger.Errorf("lxc container creation failed: %v", err) return nil, err } // Make sure that the mount dir has been created. logger.Tracef("make the mount dir for the shard logs") if err := os.MkdirAll(internalLogDir(name), 0755); err != nil { logger.Errorf("failed to create internal /var/log/juju mount dir: %v", err) return nil, err } logger.Tracef("lxc container created") // Now symlink the config file into the restart directory. containerConfigFile := filepath.Join(lxcContainerDir, name, "config") if err := os.Symlink(containerConfigFile, restartSymlink(name)); err != nil { return nil, err } logger.Tracef("auto-restart link created") // Start the lxc container with the appropriate settings for grabbing the // console output and a log file. consoleFile := filepath.Join(directory, "console.log") container.SetLogFile(filepath.Join(directory, "container.log"), golxc.LogDebug) logger.Tracef("start the container") // We explicitly don't pass through the config file to the container.Start // method as we have passed it through at container creation time. This // is necessary to get the appropriate rootfs reference without explicitly // setting it ourselves. if err = container.Start("", consoleFile); err != nil { logger.Errorf("container failed to start: %v", err) return nil, err } logger.Tracef("container started") return &lxcInstance{name}, nil }
func (e *environ) machineFullName(machineId string) string { return fmt.Sprintf("juju-%s-%s", e.Name(), state.MachineTag(machineId)) }
func (e *environ) StartInstance(machineId, machineNonce string, series string, cons constraints.Value, info *state.Info, apiInfo *api.Info) (instance.Instance, *instance.HardwareCharacteristics, error) { defer delay() log.Infof("environs/dummy: dummy startinstance, machine %s", machineId) if err := e.checkBroken("StartInstance"); err != nil { return nil, nil, err } possibleTools, err := environs.FindInstanceTools(e, series, cons) if err != nil { return nil, nil, err } err = environs.CheckToolsSeries(possibleTools, series) if err != nil { return nil, nil, err } log.Infof("environs/dummy: would pick tools from %s", possibleTools) e.state.mu.Lock() defer e.state.mu.Unlock() if machineNonce == "" { return nil, nil, fmt.Errorf("cannot start instance: missing machine nonce") } if _, ok := e.Config().CACert(); !ok { return nil, nil, fmt.Errorf("no CA certificate in environment configuration") } if info.Tag != state.MachineTag(machineId) { return nil, nil, fmt.Errorf("entity tag must match started machine") } if apiInfo.Tag != state.MachineTag(machineId) { return nil, nil, fmt.Errorf("entity tag must match started machine") } i := &dummyInstance{ state: e.state, id: instance.Id(fmt.Sprintf("%s-%d", e.state.name, e.state.maxId)), ports: make(map[instance.Port]bool), machineId: machineId, series: series, } var hc *instance.HardwareCharacteristics // To match current system capability, only provide hardware characteristics for // environ machines, not containers. if state.ParentId(machineId) == "" { // We will just assume the instance hardware characteristics exactly matches // the supplied constraints (if specified). hc = &instance.HardwareCharacteristics{ Arch: cons.Arch, Mem: cons.Mem, CpuCores: cons.CpuCores, CpuPower: cons.CpuPower, } // Fill in some expected instance hardware characteristics if constraints not specified. if hc.Arch == nil { arch := "amd64" hc.Arch = &arch } if hc.Mem == nil { mem := uint64(1024) hc.Mem = &mem } if hc.CpuCores == nil { cores := uint64(1) hc.CpuCores = &cores } } e.state.insts[i.id] = i e.state.maxId++ e.state.ops <- OpStartInstance{ Env: e.state.name, MachineId: machineId, MachineNonce: machineNonce, Constraints: cons, Instance: i, Info: info, APIInfo: apiInfo, Secret: e.ecfg().secret(), } return i, hc, nil }