// newAgent returns a new MachineAgent instance func (s *commonMachineSuite) newAgent(c *gc.C, m *state.Machine) *MachineAgent { a := &MachineAgent{} s.initAgent(c, a, "--machine-id", m.Id()) err := a.ReadConfig(m.Tag().String()) c.Assert(err, gc.IsNil) return a }
// machineVolumeParams retrieves VolumeParams for the volumes that should be // provisioned with, and attached to, the machine. The client should ignore // parameters that it does not know how to handle. func (p *ProvisionerAPI) machineVolumeParams(m *state.Machine) ([]params.VolumeParams, error) { volumeAttachments, err := m.VolumeAttachments() if err != nil { return nil, err } if len(volumeAttachments) == 0 { return nil, nil } envConfig, err := p.st.EnvironConfig() if err != nil { return nil, err } poolManager := poolmanager.New(state.NewStateSettings(p.st)) allVolumeParams := make([]params.VolumeParams, 0, len(volumeAttachments)) for _, volumeAttachment := range volumeAttachments { volumeTag := volumeAttachment.Volume() volume, err := p.st.Volume(volumeTag) if err != nil { return nil, errors.Annotatef(err, "getting volume %q", volumeTag.Id()) } storageInstance, err := storagecommon.MaybeAssignedStorageInstance( volume.StorageInstance, p.st.StorageInstance, ) if err != nil { return nil, errors.Annotatef(err, "getting volume %q storage instance", volumeTag.Id()) } volumeParams, err := storagecommon.VolumeParams(volume, storageInstance, envConfig, poolManager) if err != nil { return nil, errors.Annotatef(err, "getting volume %q parameters", volumeTag.Id()) } provider, err := registry.StorageProvider(storage.ProviderType(volumeParams.Provider)) if err != nil { return nil, errors.Annotate(err, "getting storage provider") } if provider.Dynamic() { // Leave dynamic storage to the storage provisioner. continue } volumeAttachmentParams, ok := volumeAttachment.Params() if !ok { // Attachment is already provisioned; this is an insane // state, so we should not proceed with the volume. return nil, errors.Errorf( "volume %s already attached to machine %s", volumeTag.Id(), m.Id(), ) } // Not provisioned yet, so ask the cloud provisioner do it. volumeParams.Attachment = ¶ms.VolumeAttachmentParams{ volumeTag.String(), m.Tag().String(), "", // we're creating the volume, so it has no volume ID. "", // we're creating the machine, so it has no instance ID. volumeParams.Provider, volumeAttachmentParams.ReadOnly, } allVolumeParams = append(allVolumeParams, volumeParams) } return allVolumeParams, nil }
func (s *commonMachineSuite) primeAgentWithMachine(c *gc.C, m *state.Machine, vers version.Binary) (*state.Machine, agent.ConfigSetterWriter, *tools.Tools) { pinger, err := m.SetAgentPresence() c.Assert(err, jc.ErrorIsNil) s.AddCleanup(func(c *gc.C) { c.Assert(worker.Stop(pinger), jc.ErrorIsNil) }) return s.configureMachine(c, m.Id(), vers) }
func (s *assignCleanSuite) assertAssignUnit(c *gc.C, expectedMachine *state.Machine) { unit, err := s.wordpress.AddUnit() c.Assert(err, jc.ErrorIsNil) reusedMachine, err := s.assignUnit(unit) c.Assert(err, jc.ErrorIsNil) c.Assert(reusedMachine.Id(), gc.Equals, expectedMachine.Id()) c.Assert(reusedMachine.Clean(), jc.IsFalse) }
// machineSubnetsAndZones returns a map of subnet provider-specific id // to list of availability zone names for that subnet. The result can // be empty if there are no spaces constraints specified for the // machine, or there's an error fetching them. func (p *ProvisionerAPI) machineSubnetsAndZones(m *state.Machine) (map[string][]string, error) { mcons, err := m.Constraints() if err != nil { return nil, errors.Annotate(err, "cannot get machine constraints") } includeSpaces := mcons.IncludeSpaces() if len(includeSpaces) < 1 { // Nothing to do. return nil, nil } // TODO(dimitern): For the network model MVP we only use the first // included space and ignore the rest. // // LKK Card: https://canonical.leankit.com/Boards/View/101652562/117352306 // LP Bug: http://pad.lv/1498232 spaceName := includeSpaces[0] if len(includeSpaces) > 1 { logger.Debugf( "using space %q from constraints for machine %q (ignoring remaining: %v)", spaceName, m.Id(), includeSpaces[1:], ) } space, err := p.st.Space(spaceName) if err != nil { return nil, errors.Trace(err) } subnets, err := space.Subnets() if err != nil { return nil, errors.Trace(err) } if len(subnets) == 0 { return nil, errors.Errorf("cannot use space %q as deployment target: no subnets", spaceName) } subnetsToZones := make(map[string][]string, len(subnets)) for _, subnet := range subnets { warningPrefix := fmt.Sprintf( "not using subnet %q in space %q for machine %q provisioning: ", subnet.CIDR(), spaceName, m.Id(), ) providerId := subnet.ProviderId() if providerId == "" { logger.Warningf(warningPrefix + "no ProviderId set") continue } // TODO(dimitern): Once state.Subnet supports multiple zones, // use all of them below. // // LKK Card: https://canonical.leankit.com/Boards/View/101652562/119979611 zone := subnet.AvailabilityZone() if zone == "" { logger.Warningf(warningPrefix + "no availability zone(s) set") continue } subnetsToZones[string(providerId)] = []string{zone} } return subnetsToZones, nil }
func makeMachineStatus(machine *state.Machine) (status params.MachineStatus) { var err error status.Id = machine.Id() agentStatus := processMachine(machine) status.AgentStatus = agentStatus status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() sInfo, err := machine.InstanceStatus() populateStatusFromStatusInfoAndErr(&status.InstanceStatus, sInfo, err) instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid addr, err := machine.PublicAddress() if err != nil { // Usually this indicates that no addresses have been set on the // machine yet. addr = network.Address{} logger.Debugf("error fetching public address: %q", err) } status.DNSName = addr.Value mAddrs := machine.Addresses() if len(mAddrs) == 0 { logger.Debugf("no IP addresses fetched for machine %q", instid) // At least give it the newly created DNSName address, if it exists. if addr.Value != "" { mAddrs = append(mAddrs, addr) } } for _, mAddr := range mAddrs { switch mAddr.Scope { case network.ScopeMachineLocal, network.ScopeLinkLocal: continue } status.IPAddresses = append(status.IPAddresses, mAddr.Value) } } else { if errors.IsNotProvisioned(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]params.MachineStatus) return }
func makeMachineStatus(machine *state.Machine) (status params.MachineStatus) { status.Id = machine.Id() agentStatus, compatStatus := processMachine(machine) status.Agent = agentStatus // These legacy status values will be deprecated for Juju 2.0. status.AgentState = compatStatus.Status status.AgentStateInfo = compatStatus.Info status.AgentVersion = compatStatus.Version status.Life = compatStatus.Life status.Err = compatStatus.Err status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid status.InstanceState, err = machine.InstanceStatus() if err != nil { status.InstanceState = "error" } addr, err := machine.PublicAddress() if err != nil { // Usually this indicates that no addresses have been set on the // machine yet. addr = network.Address{} logger.Warningf("error fetching public address: %q", err) } status.DNSName = addr.Value } else { if errors.IsNotProvisioned(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } // There's no point in reporting a pending agent state // if the machine hasn't been provisioned. This // also makes unprovisioned machines visually distinct // in the output. status.AgentState = "" } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]params.MachineStatus) return }
func (s *SSHCommonSuite) setAddresses(m *state.Machine, c *gc.C) { addrPub := network.NewScopedAddress( fmt.Sprintf("admin-%s.dns", m.Id()), network.ScopePublic, ) addrPriv := network.NewScopedAddress( fmt.Sprintf("admin-%s.internal", m.Id()), network.ScopeCloudLocal, ) err := m.SetProviderAddresses(addrPub, addrPriv) c.Assert(err, jc.ErrorIsNil) }
func (s *SSHCommonSuite) setAddresses(c *gc.C, m *state.Machine) { addrPub := network.NewScopedAddress( fmt.Sprintf("%s.public", m.Id()), network.ScopePublic, ) addrPriv := network.NewScopedAddress( fmt.Sprintf("%s.private", m.Id()), network.ScopeCloudLocal, ) err := m.SetProviderAddresses(addrPub, addrPriv) c.Assert(err, jc.ErrorIsNil) }
// machineSubnetsAndZones returns a map of subnet provider-specific id // to list of availability zone names for that subnet. The result can // be empty if there are no spaces constraints specified for the // machine, or there's an error fetching them. func (p *ProvisionerAPI) machineSubnetsAndZones(m *state.Machine) (map[string][]string, error) { mcons, err := m.Constraints() if err != nil { return nil, errors.Annotate(err, "cannot get machine constraints") } includeSpaces := mcons.IncludeSpaces() if len(includeSpaces) < 1 { // Nothing to do. return nil, nil } // TODO(dimitern): For the network model MVP we only use the first // included space and ignore the rest. spaceName := includeSpaces[0] if len(includeSpaces) > 1 { logger.Debugf( "using space %q from constraints for machine %q (ignoring remaining: %v)", spaceName, m.Id(), includeSpaces[1:], ) } space, err := p.st.Space(spaceName) if err != nil { return nil, errors.Trace(err) } subnets, err := space.Subnets() if err != nil { return nil, errors.Trace(err) } subnetsToZones := make(map[string][]string, len(subnets)) for _, subnet := range subnets { warningPrefix := fmt.Sprintf( "not using subnet %q in space %q for machine %q provisioning: ", subnet.CIDR(), spaceName, m.Id(), ) // TODO(dimitern): state.Subnet.ProviderId needs to be of type // network.Id. providerId := subnet.ProviderId() if providerId == "" { logger.Warningf(warningPrefix + "no ProviderId set") continue } // TODO(dimitern): Once state.Subnet supports multiple zones, // use all of them below. zone := subnet.AvailabilityZone() if zone == "" { logger.Warningf(warningPrefix + "no availability zone(s) set") continue } subnetsToZones[providerId] = []string{zone} } return subnetsToZones, nil }
func (s *CommonProvisionerSuite) checkStartInstanceCustom(c *gc.C, m *state.Machine, secret string, cons constraints.Value, networks []string, networkInfo []network.Info, waitInstanceId bool) (inst instance.Instance) { s.BackingState.StartSync() for { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStartInstance: inst = o.Instance if waitInstanceId { s.waitInstanceId(c, m, inst.Id()) } // Check the instance was started with the expected params. c.Assert(o.MachineId, gc.Equals, m.Id()) nonceParts := strings.SplitN(o.MachineNonce, ":", 2) c.Assert(nonceParts, gc.HasLen, 2) c.Assert(nonceParts[0], gc.Equals, names.NewMachineTag("0").String()) c.Assert(nonceParts[1], jc.Satisfies, utils.IsValidUUIDString) c.Assert(o.Secret, gc.Equals, secret) c.Assert(o.Networks, jc.DeepEquals, networks) c.Assert(o.NetworkInfo, jc.DeepEquals, networkInfo) // All provisioned machines in this test suite have // their hardware characteristics attributes set to // the same values as the constraints due to the dummy // environment being used. if !constraints.IsEmpty(&cons) { c.Assert(o.Constraints, gc.DeepEquals, cons) hc, err := m.HardwareCharacteristics() c.Assert(err, gc.IsNil) c.Assert(*hc, gc.DeepEquals, instance.HardwareCharacteristics{ Arch: cons.Arch, Mem: cons.Mem, RootDisk: cons.RootDisk, CpuCores: cons.CpuCores, CpuPower: cons.CpuPower, Tags: cons.Tags, }) } return default: c.Logf("ignoring unexpected operation %#v", o) } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not start an instance") return } } return }
// waitForRemovalMark waits for the supplied machine to be marked for removal. func (s *CommonProvisionerSuite) waitForRemovalMark(c *gc.C, m *state.Machine) { w := s.BackingState.WatchMachineRemovals() name := fmt.Sprintf("machine %v marked for removal", m) s.waitForWatcher(c, w, name, func() bool { removals, err := s.BackingState.AllMachineRemovals() c.Assert(err, jc.ErrorIsNil) for _, removal := range removals { if removal == m.Id() { return true } } return false }) }
func matchMachineId(m *state.Machine, patterns []string) (bool, bool, error) { var anyValid bool for _, p := range patterns { if !names.IsValidMachine(p) { continue } anyValid = true if m.Id() == p || strings.HasPrefix(m.Id(), p+"/") { // Pattern matches the machine, or container's // host machine. return true, true, nil } } return false, anyValid, nil }
// remoteParamsForMachine returns a filled in RemoteExec instance // based on the machine, command and timeout params. If the machine // does not have an internal address, the Host is empty. This is caught // by the function that actually tries to execute the command. func remoteParamsForMachine(machine *state.Machine, command string, timeout time.Duration) *RemoteExec { // magic boolean parameters are bad :-( address, ok := network.SelectInternalAddress(machine.Addresses(), false) execParams := &RemoteExec{ ExecParams: ssh.ExecParams{ Command: command, Timeout: timeout, }, MachineId: machine.Id(), } if ok { execParams.Host = fmt.Sprintf("ubuntu@%s", address.Value) } return execParams }
func (api *MachinerAPI) setOneMachineNetworkConfig(m *state.Machine, networkConfig []params.NetworkConfig) error { devicesArgs, devicesAddrs := networkingcommon.NetworkConfigsToStateArgs(networkConfig) logger.Debugf("setting devices: %+v", devicesArgs) if err := m.SetParentLinkLayerDevicesBeforeTheirChildren(devicesArgs); err != nil { return errors.Trace(err) } logger.Debugf("setting addresses: %+v", devicesAddrs) if err := m.SetDevicesAddressesIdempotently(devicesAddrs); err != nil { return errors.Trace(err) } logger.Debugf("updated machine %q network config", m.Id()) return nil }
func (s *MigrationImportSuite) AssertMachineEqual(c *gc.C, newMachine, oldMachine *state.Machine) { c.Assert(newMachine.Id(), gc.Equals, oldMachine.Id()) c.Assert(newMachine.Principals(), jc.DeepEquals, oldMachine.Principals()) c.Assert(newMachine.Series(), gc.Equals, oldMachine.Series()) c.Assert(newMachine.ContainerType(), gc.Equals, oldMachine.ContainerType()) newHardware, err := newMachine.HardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) oldHardware, err := oldMachine.HardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) c.Assert(newHardware, jc.DeepEquals, oldHardware) c.Assert(newMachine.Jobs(), jc.DeepEquals, oldMachine.Jobs()) c.Assert(newMachine.Life(), gc.Equals, oldMachine.Life()) newTools, err := newMachine.AgentTools() c.Assert(err, jc.ErrorIsNil) oldTools, err := oldMachine.AgentTools() c.Assert(err, jc.ErrorIsNil) c.Assert(newTools, jc.DeepEquals, oldTools) }
func makeMachineStatus(machine *state.Machine) (status params.MachineStatus) { status.Id = machine.Id() agentStatus := processMachine(machine) status.Agent = agentStatus status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid status.InstanceState, err = machine.InstanceStatus() if err != nil { status.InstanceState = "error" } addr, err := machine.PublicAddress() if err != nil { // Usually this indicates that no addresses have been set on the // machine yet. addr = network.Address{} logger.Debugf("error fetching public address: %q", err) } status.DNSName = addr.Value } else { if errors.IsNotProvisioned(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]params.MachineStatus) return }
func (context *statusContext) makeMachineStatus(machine *state.Machine) (status api.MachineStatus) { status.Id = machine.Id() status.Agent, status.AgentState, status.AgentStateInfo = processAgent(machine) status.AgentVersion = status.Agent.Version status.Life = status.Agent.Life status.Err = status.Agent.Err status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid status.InstanceState, err = machine.InstanceStatus() if err != nil { status.InstanceState = "error" } status.DNSName = network.SelectPublicAddress(machine.Addresses()) } else { if state.IsNotProvisionedError(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } // There's no point in reporting a pending agent state // if the machine hasn't been provisioned. This // also makes unprovisioned machines visually distinct // in the output. status.AgentState = "" } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]api.MachineStatus) return }
func (s *ContainerSetupSuite) createContainer(c *gc.C, host *state.Machine, ctype instance.ContainerType) { inst := s.checkStartInstanceNoSecureConnection(c, host) s.setupContainerWorker(c, host.Tag().(names.MachineTag)) // make a container on the host machine template := state.MachineTemplate{ Series: coretesting.FakeDefaultSeries, Jobs: []state.MachineJob{state.JobHostUnits}, } container, err := s.State.AddMachineInsideMachine(template, host.Id(), ctype) c.Assert(err, jc.ErrorIsNil) // the host machine agent should not attempt to create the container s.checkNoOperations(c) // cleanup c.Assert(container.EnsureDead(), gc.IsNil) c.Assert(container.Remove(), gc.IsNil) c.Assert(host.EnsureDead(), gc.IsNil) s.checkStopInstances(c, inst) s.waitRemoved(c, host) }
// allocateAddress tries to pick an address out of the given subnet and // allocates it to the container. func (p *ProvisionerAPI) allocateAddress( environ environs.NetworkingEnviron, subnet *state.Subnet, host, container *state.Machine, instId instance.Id, macAddress string, ) (*state.IPAddress, error) { subnetId := network.Id(subnet.ProviderId()) name := names.NewMachineTag(container.Id()).String() for { addr, err := subnet.PickNewAddress() if err != nil { return nil, err } logger.Tracef("picked new address %q on subnet %q", addr.String(), subnetId) // Attempt to allocate with environ. err = environ.AllocateAddress(instId, subnetId, addr.Address(), macAddress, name) if err != nil { logger.Warningf( "allocating address %q on instance %q and subnet %q failed: %v (retrying)", addr.String(), instId, subnetId, err, ) // It's as good as unavailable for us, so mark it as // such. err = setAddrState(addr, state.AddressStateUnavailable) if err != nil { logger.Warningf( "cannot set address %q to %q: %v (ignoring and retrying)", addr.String(), state.AddressStateUnavailable, err, ) continue } logger.Tracef( "setting address %q to %q and retrying", addr.String(), state.AddressStateUnavailable, ) continue } logger.Infof( "allocated address %q on instance %q and subnet %q", addr.String(), instId, subnetId, ) err = p.setAllocatedOrRelease(addr, environ, instId, container, subnetId, macAddress) if err != nil { // Something went wrong - retry. continue } return addr, nil } }
func (s *CommonProvisionerSuite) checkStartInstanceCustom( c *gc.C, m *state.Machine, secret string, cons constraints.Value, networks []string, networkInfo []network.InterfaceInfo, subnetsToZones map[network.Id][]string, volumes []storage.Volume, secureServerConnection bool, checkPossibleTools coretools.List, waitInstanceId bool, ) ( inst instance.Instance, ) { s.BackingState.StartSync() for { select { case o := <-s.op: switch o := o.(type) { case dummy.OpStartInstance: inst = o.Instance if waitInstanceId { s.waitInstanceId(c, m, inst.Id()) } // Check the instance was started with the expected params. c.Assert(o.MachineId, gc.Equals, m.Id()) nonceParts := strings.SplitN(o.MachineNonce, ":", 2) c.Assert(nonceParts, gc.HasLen, 2) c.Assert(nonceParts[0], gc.Equals, names.NewMachineTag("0").String()) c.Assert(nonceParts[1], jc.Satisfies, utils.IsValidUUIDString) c.Assert(o.Secret, gc.Equals, secret) c.Assert(o.SubnetsToZones, jc.DeepEquals, subnetsToZones) c.Assert(o.Networks, jc.DeepEquals, networks) c.Assert(o.NetworkInfo, jc.DeepEquals, networkInfo) c.Assert(o.Volumes, jc.DeepEquals, volumes) c.Assert(o.AgentEnvironment["SECURE_STATESERVER_CONNECTION"], gc.Equals, strconv.FormatBool(secureServerConnection)) var jobs []multiwatcher.MachineJob for _, job := range m.Jobs() { jobs = append(jobs, job.ToParams()) } c.Assert(o.Jobs, jc.SameContents, jobs) if checkPossibleTools != nil { for _, t := range o.PossibleTools { url := fmt.Sprintf("https://%s/environment/%s/tools/%s", s.st.Addr(), coretesting.EnvironmentTag.Id(), t.Version) c.Check(t.URL, gc.Equals, url) t.URL = "" } for _, t := range checkPossibleTools { t.URL = "" } c.Assert(o.PossibleTools, gc.DeepEquals, checkPossibleTools) } // All provisioned machines in this test suite have // their hardware characteristics attributes set to // the same values as the constraints due to the dummy // environment being used. if !constraints.IsEmpty(&cons) { c.Assert(o.Constraints, gc.DeepEquals, cons) hc, err := m.HardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) c.Assert(*hc, gc.DeepEquals, instance.HardwareCharacteristics{ Arch: cons.Arch, Mem: cons.Mem, RootDisk: cons.RootDisk, CpuCores: cons.CpuCores, CpuPower: cons.CpuPower, Tags: cons.Tags, }) } return default: c.Logf("ignoring unexpected operation %#v", o) } case <-time.After(2 * time.Second): c.Fatalf("provisioner did not start an instance") return } } }
// newAgent returns a new MachineAgent instance func (s *commonMachineSuite) newAgent(c *gc.C, m *state.Machine) *MachineAgent { agentConf := agentConf{dataDir: s.DataDir()} agentConf.ReadConfig(names.NewMachineTag(m.Id()).String()) machineAgentFactory := NewTestMachineAgentFactory(&agentConf, nil, c.MkDir()) return machineAgentFactory(m.Id()) }
// startInstance starts a new instance for the given machine. func (s *FirewallerSuite) startInstance(c *gc.C, m *state.Machine) instance.Instance { inst, hc := testing.AssertStartInstance(c, s.Environ, m.Id()) err := m.SetProvisioned(inst.Id(), "fake_nonce", hc) c.Assert(err, gc.IsNil) return inst }
func (s *SSHCommonSuite) setAddresses(m *state.Machine, c *gc.C) { addrPub := network.NewAddress(fmt.Sprintf("dummyenv-%s.dns", m.Id()), network.ScopePublic) addrPriv := network.NewAddress(fmt.Sprintf("dummyenv-%s.internal", m.Id()), network.ScopeCloudLocal) err := m.SetAddresses(addrPub, addrPriv) c.Assert(err, gc.IsNil) }
func (s *SSHCommonSuite) setKeys(c *gc.C, m *state.Machine) { id := m.Id() keys := state.SSHHostKeys{"dsa-" + id, "rsa-" + id} err := s.State.SetSSHHostKeys(m.MachineTag(), keys) c.Assert(err, jc.ErrorIsNil) }
// TODO(mjs) - the following should maybe be part of AgentSuite func (s *upgradeSuite) newAgent(c *gc.C, m *state.Machine) *agentcmd.MachineAgent { agentConf := agentcmd.NewAgentConf(s.DataDir()) agentConf.ReadConfig(m.Tag().String()) machineAgentFactory := agentcmd.MachineAgentFactoryFn(agentConf, nil, c.MkDir()) return machineAgentFactory(m.Id()) }