// newAgent returns a new MachineAgent instance func (s *commonMachineSuite) newAgent(c *gc.C, m *state.Machine) *MachineAgent { a := &MachineAgent{} s.initAgent(c, a, "--machine-id", m.Id()) err := a.ReadConfig(m.Tag()) c.Assert(err, gc.IsNil) return a }
// runMachineUpdate connects via ssh to the machine and runs the update script func runMachineUpdate(m *state.Machine, sshArg string) error { progress("updating machine: %v\n", m) addr := instance.SelectPublicAddress(m.Addresses()) if addr == "" { return fmt.Errorf("no appropriate public address found") } return runViaSsh(addr, sshArg) }
func (s *assignCleanSuite) assertAssignUnit(c *gc.C, expectedMachine *state.Machine) { unit, err := s.wordpress.AddUnit() c.Assert(err, gc.IsNil) reusedMachine, err := s.assignUnit(unit) c.Assert(err, gc.IsNil) c.Assert(reusedMachine.Id(), gc.Equals, expectedMachine.Id()) c.Assert(reusedMachine.Clean(), jc.IsFalse) }
func (s *kvmProvisionerSuite) expectStarted(c *gc.C, machine *state.Machine) string { s.State.StartSync() event := <-s.events c.Assert(event.Action, gc.Equals, mock.Started) err := machine.Refresh() c.Assert(err, gc.IsNil) s.waitInstanceId(c, machine, instance.Id(event.InstanceId)) return event.InstanceId }
// waitRemoved waits for the supplied machine to be removed from state. func (s *CommonProvisionerSuite) waitRemoved(c *gc.C, m *state.Machine) { s.waitMachine(c, m, func() bool { err := m.Refresh() if errors.IsNotFound(err) { return true } c.Assert(err, gc.IsNil) c.Logf("machine %v is still %s", m, m.Life()) return false }) }
// waitInstanceId waits until the supplied machine has an instance id, then // asserts it is as expected. func (s *CommonProvisionerSuite) waitInstanceId(c *gc.C, m *state.Machine, expect instance.Id) { s.waitHardwareCharacteristics(c, m, func() bool { if actual, err := m.InstanceId(); err == nil { c.Assert(actual, gc.Equals, expect) return true } else if !state.IsNotProvisionedError(err) { // We don't expect any errors. panic(err) } c.Logf("machine %v is still unprovisioned", m) return false }) }
func (s *CommonProvisionerSuite) checkStartInstanceCustom(c *gc.C, m *state.Machine, secret string, cons constraints.Value, includeNetworks, excludeNetworks []string, networkInfo []network.Info, waitInstanceId bool) (inst instance.Instance) { s.BackingState.StartSync() for { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStartInstance: inst = o.Instance if waitInstanceId { s.waitInstanceId(c, m, inst.Id()) } // Check the instance was started with the expected params. c.Assert(o.MachineId, gc.Equals, m.Id()) nonceParts := strings.SplitN(o.MachineNonce, ":", 2) c.Assert(nonceParts, gc.HasLen, 2) c.Assert(nonceParts[0], gc.Equals, names.MachineTag("0")) c.Assert(nonceParts[1], jc.Satisfies, utils.IsValidUUIDString) c.Assert(o.Secret, gc.Equals, secret) c.Assert(o.IncludeNetworks, jc.DeepEquals, includeNetworks) c.Assert(o.ExcludeNetworks, jc.DeepEquals, excludeNetworks) c.Assert(o.NetworkInfo, jc.DeepEquals, networkInfo) // All provisioned machines in this test suite have // their hardware characteristics attributes set to // the same values as the constraints due to the dummy // environment being used. if !constraints.IsEmpty(&cons) { c.Assert(o.Constraints, gc.DeepEquals, cons) hc, err := m.HardwareCharacteristics() c.Assert(err, gc.IsNil) c.Assert(*hc, gc.DeepEquals, instance.HardwareCharacteristics{ Arch: cons.Arch, Mem: cons.Mem, RootDisk: cons.RootDisk, CpuCores: cons.CpuCores, CpuPower: cons.CpuPower, Tags: cons.Tags, }) } return default: c.Logf("ignoring unexpected operation %#v", o) } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not start an instance") return } } return }
func (s *commonMachineSuite) setFakeMachineAddresses(c *gc.C, machine *state.Machine) { addrs := []instance.Address{ instance.NewAddress("0.1.2.3", instance.NetworkUnknown), } err := machine.SetAddresses(addrs...) c.Assert(err, gc.IsNil) // Set the addresses in the environ instance as well so that if the instance poller // runs it won't overwrite them. instId, err := machine.InstanceId() c.Assert(err, gc.IsNil) insts, err := s.Conn.Environ.Instances([]instance.Id{instId}) c.Assert(err, gc.IsNil) dummy.SetInstanceAddresses(insts[0], addrs) }
// remoteParamsForMachine returns a filled in RemoteExec instance // based on the machine, command and timeout params. If the machine // does not have an internal address, the Host is empty. This is caught // by the function that actually tries to execute the command. func remoteParamsForMachine(machine *state.Machine, command string, timeout time.Duration) *RemoteExec { // magic boolean parameters are bad :-( address := instance.SelectInternalAddress(machine.Addresses(), false) execParams := &RemoteExec{ ExecParams: ssh.ExecParams{ Command: command, Timeout: timeout, }, MachineId: machine.Id(), } if address != "" { execParams.Host = fmt.Sprintf("ubuntu@%s", address) } return execParams }
func newMachineToolWaiter(m *state.Machine) *toolsWaiter { w := m.Watch() waiter := &toolsWaiter{ changes: make(chan struct{}, 1), watcher: w, tooler: m, } go func() { for _ = range w.Changes() { waiter.changes <- struct{}{} } close(waiter.changes) }() return waiter }
func (t *LiveTests) assertStartInstance(c *gc.C, m *state.Machine) { // Wait for machine to get an instance id. for a := waitAgent.Start(); a.Next(); { err := m.Refresh() c.Assert(err, gc.IsNil) instId, err := m.InstanceId() if err != nil { c.Assert(err, jc.Satisfies, state.IsNotProvisionedError) continue } _, err = t.Env.Instances([]instance.Id{instId}) c.Assert(err, gc.IsNil) return } c.Fatalf("provisioner failed to start machine after %v", waitAgent.Total) }
func (s *MachinerSuite) waitMachineStatus(c *gc.C, m *state.Machine, expectStatus params.Status) { timeout := time.After(worstCase) for { select { case <-timeout: c.Fatalf("timeout while waiting for machine status to change") case <-time.After(10 * time.Millisecond): status, _, _, err := m.Status() c.Assert(err, gc.IsNil) if status != expectStatus { c.Logf("machine %q status is %s, still waiting", m, status) continue } return } } }
// commonServiceInstances returns instances with // services in common with the specified machine. func commonServiceInstances(st *state.State, m *state.Machine) ([]instance.Id, error) { units, err := m.Units() if err != nil { return nil, err } var instanceIdSet set.Strings for _, unit := range units { if !unit.IsPrincipal() { continue } service, err := unit.Service() if err != nil { return nil, err } allUnits, err := service.AllUnits() if err != nil { return nil, err } for _, unit := range allUnits { machineId, err := unit.AssignedMachineId() if state.IsNotAssigned(err) { continue } else if err != nil { return nil, err } machine, err := st.Machine(machineId) if err != nil { return nil, err } instanceId, err := machine.InstanceId() if err == nil { instanceIdSet.Add(string(instanceId)) } else if state.IsNotProvisionedError(err) { continue } else { return nil, err } } } instanceIds := make([]instance.Id, instanceIdSet.Size()) // Sort values to simplify testing. for i, instanceId := range instanceIdSet.SortedValues() { instanceIds[i] = instance.Id(instanceId) } return instanceIds, nil }
func (s *CommonProvisionerSuite) waitHardwareCharacteristics(c *gc.C, m *state.Machine, check func() bool) { w := m.WatchHardwareCharacteristics() defer stop(c, w) timeout := time.After(coretesting.LongWait) resync := time.After(0) for { select { case <-w.Changes(): if check() { return } case <-resync: resync = time.After(coretesting.ShortWait) s.BackingState.StartSync() case <-timeout: c.Fatalf("hardware characteristics for machine %v wait timed out", m) } } }
func (s *ContainerSetupSuite) assertContainerProvisionerStarted( c *gc.C, host *state.Machine, ctype instance.ContainerType) { // A stub worker callback to record what happens. provisionerStarted := false startProvisionerWorker := func(runner worker.Runner, containerType instance.ContainerType, pr *apiprovisioner.State, cfg agent.Config, broker environs.InstanceBroker) error { c.Assert(containerType, gc.Equals, ctype) c.Assert(cfg.Tag(), gc.Equals, host.Tag()) provisionerStarted = true return nil } s.PatchValue(&provisioner.StartProvisioner, startProvisionerWorker) s.createContainer(c, host, ctype) // Consume the apt command used to initialise the container. <-s.aptCmdChan // the container worker should have created the provisioner c.Assert(provisionerStarted, jc.IsTrue) }
// assertInstanceId asserts that the machine has an instance id // that matches that of the given instance. If the instance is nil, // It asserts that the instance id is unset. func assertInstanceId(c *gc.C, m *state.Machine, inst instance.Instance) { var wantId, gotId instance.Id var err error if inst != nil { wantId = inst.Id() } for a := waitAgent.Start(); a.Next(); { err := m.Refresh() c.Assert(err, gc.IsNil) gotId, err = m.InstanceId() if err != nil { c.Assert(err, jc.Satisfies, state.IsNotProvisionedError) if inst == nil { return } continue } break } c.Assert(err, gc.IsNil) c.Assert(gotId, gc.Equals, wantId) }
func (s *CommonProvisionerSuite) waitMachine(c *gc.C, m *state.Machine, check func() bool) { // TODO(jam): We need to grow a new method on NotifyWatcherC // that calls StartSync while waiting for changes, then // waitMachine and waitHardwareCharacteristics can use that // instead w := m.Watch() defer stop(c, w) timeout := time.After(coretesting.LongWait) resync := time.After(0) for { select { case <-w.Changes(): if check() { return } case <-resync: resync = time.After(coretesting.ShortWait) s.BackingState.StartSync() case <-timeout: c.Fatalf("machine %v wait timed out", m) } } }
func (s *CommonProvisionerSuite) APILogin(c *gc.C, machine *state.Machine) { if s.st != nil { c.Assert(s.st.Close(), gc.IsNil) } password, err := utils.RandomPassword() c.Assert(err, gc.IsNil) err = machine.SetPassword(password) c.Assert(err, gc.IsNil) err = machine.SetProvisioned("i-fake", "fake_nonce", nil) c.Assert(err, gc.IsNil) s.st = s.OpenAPIAsMachine(c, machine.Tag(), password, "fake_nonce") c.Assert(s.st, gc.NotNil) c.Logf("API: login as %q successful", machine.Tag()) s.provisioner = s.st.Provisioner() c.Assert(s.provisioner, gc.NotNil) }
func (api *MachinerAPI) SetMachineAddresses(args params.SetMachinesAddresses) (params.ErrorResults, error) { results := params.ErrorResults{ Results: make([]params.ErrorResult, len(args.MachineAddresses)), } canModify, err := api.getCanModify() if err != nil { return params.ErrorResults{}, err } for i, arg := range args.MachineAddresses { err := common.ErrPerm if canModify(arg.Tag) { var m *state.Machine m, err = api.getMachine(arg.Tag) if err == nil { err = m.SetMachineAddresses(arg.Addresses...) } else if errors.IsNotFound(err) { err = common.ErrPerm } } results.Results[i].Error = common.ServerError(err) } return results, nil }
func getProvisioningInfo(m *state.Machine) (*params.ProvisioningInfo, error) { cons, err := m.Constraints() if err != nil { return nil, err } // TODO(dimitern) For now, since network names and // provider ids are the same, we return what we got // from state. In the future, when networks can be // added before provisioning, we should convert both // slices from juju network names to provider-specific // ids before returning them. includeNetworks, excludeNetworks, err := m.RequestedNetworks() if err != nil { return nil, err } return ¶ms.ProvisioningInfo{ Constraints: cons, Series: m.Series(), Placement: m.Placement(), IncludeNetworks: includeNetworks, ExcludeNetworks: excludeNetworks, }, nil }
func (s *ContainerSetupSuite) createContainer(c *gc.C, host *state.Machine, ctype instance.ContainerType) { inst := s.checkStartInstance(c, host) s.setupContainerWorker(c, host.Tag()) // make a container on the host machine template := state.MachineTemplate{ Series: coretesting.FakeDefaultSeries, Jobs: []state.MachineJob{state.JobHostUnits}, } container, err := s.State.AddMachineInsideMachine(template, host.Id(), ctype) c.Assert(err, gc.IsNil) // the host machine agent should not attempt to create the container s.checkNoOperations(c) // cleanup c.Assert(container.EnsureDead(), gc.IsNil) c.Assert(container.Remove(), gc.IsNil) c.Assert(host.EnsureDead(), gc.IsNil) s.checkStopInstances(c, inst) s.waitRemoved(c, host) }
// startInstance starts a new instance for the given machine. func (s *FirewallerSuite) startInstance(c *gc.C, m *state.Machine) instance.Instance { inst, hc := testing.AssertStartInstance(c, s.Conn.Environ, m.Id()) err := m.SetProvisioned(inst.Id(), "fake_nonce", hc) c.Assert(err, gc.IsNil) return inst }
func (s *SSHCommonSuite) setAddresses(m *state.Machine, c *gc.C) { addrPub := instance.NewAddress(fmt.Sprintf("dummyenv-%s.dns", m.Id()), instance.NetworkPublic) addrPriv := instance.NewAddress(fmt.Sprintf("dummyenv-%s.internal", m.Id()), instance.NetworkCloudLocal) err := m.SetAddresses(addrPub, addrPriv) c.Assert(err, gc.IsNil) }
func (context *statusContext) makeMachineStatus(machine *state.Machine) (status api.MachineStatus) { status.Id = machine.Id() status.Life, status.AgentVersion, status.AgentState, status.AgentStateInfo, status.Err = processAgent(machine) status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid status.InstanceState, err = machine.InstanceStatus() if err != nil { status.InstanceState = "error" } status.DNSName = instance.SelectPublicAddress(machine.Addresses()) } else { if state.IsNotProvisionedError(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } // There's no point in reporting a pending agent state // if the machine hasn't been provisioned. This // also makes unprovisioned machines visually distinct // in the output. status.AgentState = "" } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]api.MachineStatus) return }
// Run initializes state for an environment. func (c *BootstrapCommand) Run(_ *cmd.Context) error { envCfg, err := config.New(config.NoDefaults, c.EnvConfig) if err != nil { return err } err = c.ReadConfig("machine-0") if err != nil { return err } agentConfig := c.CurrentConfig() // agent.Jobs is an optional field in the agent config, and was // introduced after 1.17.2. We default to allowing units on // machine-0 if missing. jobs := agentConfig.Jobs() if len(jobs) == 0 { jobs = []params.MachineJob{ params.JobManageEnviron, params.JobHostUnits, } } // Get the bootstrap machine's addresses from the provider. env, err := environs.New(envCfg) if err != nil { return err } instanceId := instance.Id(c.InstanceId) instances, err := env.Instances([]instance.Id{instanceId}) if err != nil { return err } addrs, err := instances[0].Addresses() if err != nil { return err } // Create system-identity file if err := agent.WriteSystemIdentityFile(agentConfig); err != nil { return err } // Generate a shared secret for the Mongo replica set, and write it out. sharedSecret, err := mongo.GenerateSharedSecret() if err != nil { return err } info, ok := agentConfig.StateServingInfo() if !ok { return fmt.Errorf("bootstrap machine config has no state serving info") } info.SharedSecret = sharedSecret err = c.ChangeConfig(func(agentConfig agent.ConfigSetter) { agentConfig.SetStateServingInfo(info) }) if err != nil { return fmt.Errorf("cannot write agent config: %v", err) } agentConfig = c.CurrentConfig() if err := c.startMongo(addrs, agentConfig); err != nil { return err } logger.Infof("started mongo") // Initialise state, and store any agent config (e.g. password) changes. var st *state.State var m *state.Machine err = nil writeErr := c.ChangeConfig(func(agentConfig agent.ConfigSetter) { st, m, err = agent.InitializeState( agentConfig, envCfg, agent.BootstrapMachineConfig{ Addresses: addrs, Constraints: c.Constraints, Jobs: jobs, InstanceId: instanceId, Characteristics: c.Hardware, SharedSecret: sharedSecret, }, state.DefaultDialOpts(), environs.NewStatePolicy(), ) }) if writeErr != nil { return fmt.Errorf("cannot write initial configuration: %v", err) } if err != nil { return err } defer st.Close() // bootstrap machine always gets the vote return m.SetHasVote(true) }
func (s *assignCleanSuite) assertMachineNotEmpty(c *gc.C, machine *state.Machine) { containers, err := machine.Containers() c.Assert(err, gc.IsNil) c.Assert(len(containers), gc.Not(gc.Equals), 0) }