func getProvisioningInfo(m *state.Machine) (*params.ProvisioningInfo, error) { cons, err := m.Constraints() if err != nil { return nil, err } // TODO(dimitern) For now, since network names and // provider ids are the same, we return what we got // from state. In the future, when networks can be // added before provisioning, we should convert both // slices from juju network names to provider-specific // ids before returning them. networks, err := m.RequestedNetworks() if err != nil { return nil, err } var jobs []params.MachineJob for _, job := range m.Jobs() { jobs = append(jobs, job.ToParams()) } return ¶ms.ProvisioningInfo{ Constraints: cons, Series: m.Series(), Placement: m.Placement(), Networks: networks, Jobs: jobs, }, nil }
func makeMachineStatus(machine *state.Machine) (status params.MachineStatus) { var err error status.Id = machine.Id() agentStatus := processMachine(machine) status.AgentStatus = agentStatus status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() sInfo, err := machine.InstanceStatus() populateStatusFromStatusInfoAndErr(&status.InstanceStatus, sInfo, err) instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid addr, err := machine.PublicAddress() if err != nil { // Usually this indicates that no addresses have been set on the // machine yet. addr = network.Address{} logger.Debugf("error fetching public address: %q", err) } status.DNSName = addr.Value mAddrs := machine.Addresses() if len(mAddrs) == 0 { logger.Debugf("no IP addresses fetched for machine %q", instid) // At least give it the newly created DNSName address, if it exists. if addr.Value != "" { mAddrs = append(mAddrs, addr) } } for _, mAddr := range mAddrs { switch mAddr.Scope { case network.ScopeMachineLocal, network.ScopeLinkLocal: continue } status.IPAddresses = append(status.IPAddresses, mAddr.Value) } } else { if errors.IsNotProvisioned(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]params.MachineStatus) return }
func (p *ProvisionerAPI) getProvisioningInfo(m *state.Machine) (*params.ProvisioningInfo, error) { cons, err := m.Constraints() if err != nil { return nil, err } volumes, err := p.machineVolumeParams(m) if err != nil { return nil, errors.Trace(err) } // TODO(dimitern) Drop this once we only use spaces for // deployments. networks, err := m.RequestedNetworks() if err != nil { return nil, err } var jobs []multiwatcher.MachineJob for _, job := range m.Jobs() { jobs = append(jobs, job.ToParams()) } tags, err := p.machineTags(m, jobs) if err != nil { return nil, errors.Trace(err) } subnetsToZones, err := p.machineSubnetsAndZones(m) if err != nil { return nil, errors.Annotate(err, "cannot match subnets to zones") } endpointBindings, err := p.machineEndpointBindings(m) if err != nil { return nil, errors.Annotate(err, "cannot determine machine endpoint bindings") } imageMetadata, err := p.availableImageMetadata(m) if err != nil { return nil, errors.Annotate(err, "cannot get available image metadata") } return ¶ms.ProvisioningInfo{ Constraints: cons, Series: m.Series(), Placement: m.Placement(), Networks: networks, Jobs: jobs, Volumes: volumes, Tags: tags, SubnetsToZones: subnetsToZones, EndpointBindings: endpointBindings, ImageMetadata: imageMetadata, }, nil }
func makeMachineStatus(machine *state.Machine) (status params.MachineStatus) { status.Id = machine.Id() agentStatus, compatStatus := processMachine(machine) status.Agent = agentStatus // These legacy status values will be deprecated for Juju 2.0. status.AgentState = compatStatus.Status status.AgentStateInfo = compatStatus.Info status.AgentVersion = compatStatus.Version status.Life = compatStatus.Life status.Err = compatStatus.Err status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid status.InstanceState, err = machine.InstanceStatus() if err != nil { status.InstanceState = "error" } addr, err := machine.PublicAddress() if err != nil { // Usually this indicates that no addresses have been set on the // machine yet. addr = network.Address{} logger.Warningf("error fetching public address: %q", err) } status.DNSName = addr.Value } else { if errors.IsNotProvisioned(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } // There's no point in reporting a pending agent state // if the machine hasn't been provisioned. This // also makes unprovisioned machines visually distinct // in the output. status.AgentState = "" } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]params.MachineStatus) return }
func (p *ProvisionerAPI) getProvisioningInfo(m *state.Machine) (*params.ProvisioningInfo, error) { cons, err := m.Constraints() if err != nil { return nil, errors.Trace(err) } volumes, err := p.machineVolumeParams(m) if err != nil { return nil, errors.Trace(err) } var jobs []multiwatcher.MachineJob for _, job := range m.Jobs() { jobs = append(jobs, job.ToParams()) } tags, err := p.machineTags(m, jobs) if err != nil { return nil, errors.Trace(err) } subnetsToZones, err := p.machineSubnetsAndZones(m) if err != nil { return nil, errors.Annotate(err, "cannot match subnets to zones") } endpointBindings, err := p.machineEndpointBindings(m) if err != nil { return nil, errors.Annotate(err, "cannot determine machine endpoint bindings") } imageMetadata, err := p.availableImageMetadata(m) if err != nil { return nil, errors.Annotate(err, "cannot get available image metadata") } controllerCfg, err := p.st.ControllerConfig() if err != nil { return nil, errors.Annotate(err, "cannot get controller configuration") } return ¶ms.ProvisioningInfo{ Constraints: cons, Series: m.Series(), Placement: m.Placement(), Jobs: jobs, Volumes: volumes, Tags: tags, SubnetsToZones: subnetsToZones, EndpointBindings: endpointBindings, ImageMetadata: imageMetadata, ControllerConfig: controllerCfg, }, nil }
func (s *MigrationImportSuite) AssertMachineEqual(c *gc.C, newMachine, oldMachine *state.Machine) { c.Assert(newMachine.Id(), gc.Equals, oldMachine.Id()) c.Assert(newMachine.Principals(), jc.DeepEquals, oldMachine.Principals()) c.Assert(newMachine.Series(), gc.Equals, oldMachine.Series()) c.Assert(newMachine.ContainerType(), gc.Equals, oldMachine.ContainerType()) newHardware, err := newMachine.HardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) oldHardware, err := oldMachine.HardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) c.Assert(newHardware, jc.DeepEquals, oldHardware) c.Assert(newMachine.Jobs(), jc.DeepEquals, oldMachine.Jobs()) c.Assert(newMachine.Life(), gc.Equals, oldMachine.Life()) newTools, err := newMachine.AgentTools() c.Assert(err, jc.ErrorIsNil) oldTools, err := oldMachine.AgentTools() c.Assert(err, jc.ErrorIsNil) c.Assert(newTools, jc.DeepEquals, oldTools) }
func makeMachineStatus(machine *state.Machine) (status params.MachineStatus) { status.Id = machine.Id() agentStatus := processMachine(machine) status.Agent = agentStatus status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid status.InstanceState, err = machine.InstanceStatus() if err != nil { status.InstanceState = "error" } addr, err := machine.PublicAddress() if err != nil { // Usually this indicates that no addresses have been set on the // machine yet. addr = network.Address{} logger.Debugf("error fetching public address: %q", err) } status.DNSName = addr.Value } else { if errors.IsNotProvisioned(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]params.MachineStatus) return }
func (context *statusContext) makeMachineStatus(machine *state.Machine) (status api.MachineStatus) { status.Id = machine.Id() status.Agent, status.AgentState, status.AgentStateInfo = processAgent(machine) status.AgentVersion = status.Agent.Version status.Life = status.Agent.Life status.Err = status.Agent.Err status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid status.InstanceState, err = machine.InstanceStatus() if err != nil { status.InstanceState = "error" } status.DNSName = network.SelectPublicAddress(machine.Addresses()) } else { if state.IsNotProvisionedError(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } // There's no point in reporting a pending agent state // if the machine hasn't been provisioned. This // also makes unprovisioned machines visually distinct // in the output. status.AgentState = "" } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]api.MachineStatus) return }
func (s *CommonProvisionerSuite) checkStartInstanceCustom( c *gc.C, m *state.Machine, secret string, cons constraints.Value, networks []string, networkInfo []network.InterfaceInfo, subnetsToZones map[network.Id][]string, volumes []storage.Volume, secureServerConnection bool, checkPossibleTools coretools.List, waitInstanceId bool, ) ( inst instance.Instance, ) { s.BackingState.StartSync() for { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStartInstance: inst = o.Instance if waitInstanceId { s.waitInstanceId(c, m, inst.Id()) } // Check the instance was started with the expected params. c.Assert(o.MachineId, gc.Equals, m.Id()) nonceParts := strings.SplitN(o.MachineNonce, ":", 2) c.Assert(nonceParts, gc.HasLen, 2) c.Assert(nonceParts[0], gc.Equals, names.NewMachineTag("0").String()) c.Assert(nonceParts[1], jc.Satisfies, utils.IsValidUUIDString) c.Assert(o.Secret, gc.Equals, secret) c.Assert(o.SubnetsToZones, jc.DeepEquals, subnetsToZones) c.Assert(o.Networks, jc.DeepEquals, networks) c.Assert(o.NetworkInfo, jc.DeepEquals, networkInfo) c.Assert(o.Volumes, jc.DeepEquals, volumes) c.Assert(o.AgentEnvironment["SECURE_STATESERVER_CONNECTION"], gc.Equals, strconv.FormatBool(secureServerConnection)) var jobs []multiwatcher.MachineJob for _, job := range m.Jobs() { jobs = append(jobs, job.ToParams()) } c.Assert(o.Jobs, jc.SameContents, jobs) if checkPossibleTools != nil { for _, t := range o.PossibleTools { url := fmt.Sprintf("https://%s/environment/%s/tools/%s", s.st.Addr(), coretesting.EnvironmentTag.Id(), t.Version) c.Check(t.URL, gc.Equals, url) t.URL = "" } for _, t := range checkPossibleTools { t.URL = "" } c.Assert(o.PossibleTools, gc.DeepEquals, checkPossibleTools) } // All provisioned machines in this test suite have // their hardware characteristics attributes set to // the same values as the constraints due to the dummy // environment being used. if !constraints.IsEmpty(&cons) { c.Assert(o.Constraints, gc.DeepEquals, cons) hc, err := m.HardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) c.Assert(*hc, gc.DeepEquals, instance.HardwareCharacteristics{ Arch: cons.Arch, Mem: cons.Mem, RootDisk: cons.RootDisk, CpuCores: cons.CpuCores, CpuPower: cons.CpuPower, Tags: cons.Tags, }) } return default: c.Logf("ignoring unexpected operation %#v", o) } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not start an instance") return } } }
func (s *CommonProvisionerSuite) checkStartInstanceCustom( c *gc.C, m *state.Machine, secret string, cons constraints.Value, networks []string, networkInfo []network.Info, checkPossibleTools coretools.List, waitInstanceId bool, ) ( inst instance.Instance, ) { s.BackingState.StartSync() for { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStartInstance: inst = o.Instance if waitInstanceId { s.waitInstanceId(c, m, inst.Id()) } // Check the instance was started with the expected params. c.Assert(o.MachineId, gc.Equals, m.Id()) nonceParts := strings.SplitN(o.MachineNonce, ":", 2) c.Assert(nonceParts, gc.HasLen, 2) c.Assert(nonceParts[0], gc.Equals, names.NewMachineTag("0").String()) c.Assert(nonceParts[1], jc.Satisfies, utils.IsValidUUIDString) c.Assert(o.Secret, gc.Equals, secret) c.Assert(o.Networks, jc.DeepEquals, networks) c.Assert(o.NetworkInfo, jc.DeepEquals, networkInfo) var jobs []params.MachineJob for _, job := range m.Jobs() { jobs = append(jobs, job.ToParams()) } c.Assert(o.Jobs, jc.SameContents, jobs) if checkPossibleTools != nil { c.Assert(o.PossibleTools, gc.DeepEquals, checkPossibleTools) } // All provisioned machines in this test suite have // their hardware characteristics attributes set to // the same values as the constraints due to the dummy // environment being used. if !constraints.IsEmpty(&cons) { c.Assert(o.Constraints, gc.DeepEquals, cons) hc, err := m.HardwareCharacteristics() c.Assert(err, gc.IsNil) c.Assert(*hc, gc.DeepEquals, instance.HardwareCharacteristics{ Arch: cons.Arch, Mem: cons.Mem, RootDisk: cons.RootDisk, CpuCores: cons.CpuCores, CpuPower: cons.CpuPower, Tags: cons.Tags, }) } return default: c.Logf("ignoring unexpected operation %#v", o) } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not start an instance") return } } }