// newAgent returns a new MachineAgent instance func (s *commonMachineSuite) newAgent(c *gc.C, m *state.Machine) *MachineAgent { a := &MachineAgent{} s.initAgent(c, a, "--machine-id", m.Id()) err := a.ReadConfig(m.Tag().String()) c.Assert(err, gc.IsNil) return a }
// machineVolumeParams retrieves VolumeParams for the volumes that should be // provisioned with, and attached to, the machine. The client should ignore // parameters that it does not know how to handle. func (p *ProvisionerAPI) machineVolumeParams(m *state.Machine) ([]params.VolumeParams, error) { volumeAttachments, err := m.VolumeAttachments() if err != nil { return nil, err } if len(volumeAttachments) == 0 { return nil, nil } envConfig, err := p.st.EnvironConfig() if err != nil { return nil, err } poolManager := poolmanager.New(state.NewStateSettings(p.st)) allVolumeParams := make([]params.VolumeParams, 0, len(volumeAttachments)) for _, volumeAttachment := range volumeAttachments { volumeTag := volumeAttachment.Volume() volume, err := p.st.Volume(volumeTag) if err != nil { return nil, errors.Annotatef(err, "getting volume %q", volumeTag.Id()) } storageInstance, err := storagecommon.MaybeAssignedStorageInstance( volume.StorageInstance, p.st.StorageInstance, ) if err != nil { return nil, errors.Annotatef(err, "getting volume %q storage instance", volumeTag.Id()) } volumeParams, err := storagecommon.VolumeParams(volume, storageInstance, envConfig, poolManager) if err != nil { return nil, errors.Annotatef(err, "getting volume %q parameters", volumeTag.Id()) } provider, err := registry.StorageProvider(storage.ProviderType(volumeParams.Provider)) if err != nil { return nil, errors.Annotate(err, "getting storage provider") } if provider.Dynamic() { // Leave dynamic storage to the storage provisioner. continue } volumeAttachmentParams, ok := volumeAttachment.Params() if !ok { // Attachment is already provisioned; this is an insane // state, so we should not proceed with the volume. return nil, errors.Errorf( "volume %s already attached to machine %s", volumeTag.Id(), m.Id(), ) } // Not provisioned yet, so ask the cloud provisioner do it. volumeParams.Attachment = ¶ms.VolumeAttachmentParams{ volumeTag.String(), m.Tag().String(), "", // we're creating the volume, so it has no volume ID. "", // we're creating the machine, so it has no instance ID. volumeParams.Provider, volumeAttachmentParams.ReadOnly, } allVolumeParams = append(allVolumeParams, volumeParams) } return allVolumeParams, nil }
func (s *clientSuite) assertRetryProvisioning(c *gc.C, machine *state.Machine) { _, err := s.APIState.Client().RetryProvisioning(machine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) statusInfo, err := machine.Status() c.Assert(err, jc.ErrorIsNil) c.Assert(statusInfo.Status, gc.Equals, status.Error) c.Assert(statusInfo.Message, gc.Equals, "error") c.Assert(statusInfo.Data["transient"], jc.IsTrue) }
func (s *CommonProvisionerSuite) APILogin(c *gc.C, machine *state.Machine) { if s.st != nil { c.Assert(s.st.Close(), gc.IsNil) } password, err := utils.RandomPassword() c.Assert(err, gc.IsNil) err = machine.SetPassword(password) c.Assert(err, gc.IsNil) err = machine.SetProvisioned("i-fake", "fake_nonce", nil) c.Assert(err, gc.IsNil) s.st = s.OpenAPIAsMachine(c, machine.Tag(), password, "fake_nonce") c.Assert(s.st, gc.NotNil) c.Logf("API: login as %q successful", machine.Tag()) s.provisioner = s.st.Provisioner() c.Assert(s.provisioner, gc.NotNil) }
func (s *ContainerSetupSuite) assertContainerProvisionerStarted( c *gc.C, host *state.Machine, ctype instance.ContainerType) { // A stub worker callback to record what happens. provisionerStarted := false startProvisionerWorker := func(runner worker.Runner, containerType instance.ContainerType, pr *apiprovisioner.State, cfg agent.Config, broker environs.InstanceBroker) error { c.Assert(containerType, gc.Equals, ctype) c.Assert(cfg.Tag(), gc.Equals, host.Tag()) provisionerStarted = true return nil } s.PatchValue(&provisioner.StartProvisioner, startProvisionerWorker) s.createContainer(c, host, ctype) // Consume the apt command used to initialise the container. <-s.aptCmdChan // the container worker should have created the provisioner c.Assert(provisionerStarted, jc.IsTrue) }
func (s *ContainerSetupSuite) createContainer(c *gc.C, host *state.Machine, ctype instance.ContainerType) { inst := s.checkStartInstanceNoSecureConnection(c, host) s.setupContainerWorker(c, host.Tag().(names.MachineTag)) // make a container on the host machine template := state.MachineTemplate{ Series: coretesting.FakeDefaultSeries, Jobs: []state.MachineJob{state.JobHostUnits}, } container, err := s.State.AddMachineInsideMachine(template, host.Id(), ctype) c.Assert(err, jc.ErrorIsNil) // the host machine agent should not attempt to create the container s.checkNoOperations(c) // cleanup c.Assert(container.EnsureDead(), gc.IsNil) c.Assert(container.Remove(), gc.IsNil) c.Assert(host.EnsureDead(), gc.IsNil) s.checkStopInstances(c, inst) s.waitRemoved(c, host) }
func (s *rebootSuite) setUpMachine(c *gc.C, machine *state.Machine) *machines { // Create a FakeAuthorizer so we can check permissions, // set up assuming we logged in as a machine agent. authorizer := apiservertesting.FakeAuthorizer{ Tag: machine.Tag(), } resources := common.NewResources() rebootAPI, err := reboot.NewRebootAPI(s.State, resources, authorizer) c.Assert(err, jc.ErrorIsNil) args := params.Entities{Entities: []params.Entity{ {Tag: machine.Tag().String()}, }} resultMachine, err := rebootAPI.WatchForRebootEvent() c.Assert(err, jc.ErrorIsNil) c.Check(resultMachine.NotifyWatcherId, gc.Not(gc.Equals), "") c.Check(resultMachine.Error, gc.IsNil) resourceMachine := resources.Get(resultMachine.NotifyWatcherId) c.Check(resourceMachine, gc.NotNil) w := resourceMachine.(state.NotifyWatcher) wc := statetesting.NewNotifyWatcherC(c, s.State, w) wc.AssertNoChange() return &machines{ machine: machine, authorizer: authorizer, resources: resources, rebootAPI: rebootAPI, args: args, w: w, wc: wc, } }
// allocateAddress tries to pick an address out of the given subnet and // allocates it to the container. func (p *ProvisionerAPI) allocateAddress( environ environs.NetworkingEnviron, subnet *state.Subnet, host, container *state.Machine, instId instance.Id, macAddress string, ) (*state.IPAddress, error) { hostname := containerHostname(container.Tag()) if !environs.AddressAllocationEnabled() { // Even if the address allocation feature flag is not enabled, we might // be running on MAAS 1.8+ with devices support, which we can use to // register containers getting IPs via DHCP. However, most of the usual // allocation code can be bypassed, we just need the parent instance ID // and a MAC address (no subnet or IP address). allocatedAddress := network.Address{} err := environ.AllocateAddress(instId, network.AnySubnet, &allocatedAddress, macAddress, hostname) if err != nil { // Not using MAAS 1.8+ or some other error. return nil, errors.Trace(err) } logger.Infof( "allocated address %q on instance %q for container %q", allocatedAddress.String(), instId, hostname, ) // Add the address to state, so we can look it up later by MAC address. stateAddr, err := p.st.AddIPAddress(allocatedAddress, string(network.AnySubnet)) if err != nil { return nil, errors.Annotatef(err, "failed to save address %q", allocatedAddress) } err = p.setAllocatedOrRelease(stateAddr, environ, instId, container, network.AnySubnet, macAddress) if err != nil { return nil, errors.Trace(err) } return stateAddr, nil } subnetId := network.Id(subnet.ProviderId()) for { addr, err := subnet.PickNewAddress() if err != nil { return nil, err } netAddr := addr.Address() logger.Tracef("picked new address %q on subnet %q", addr.String(), subnetId) // Attempt to allocate with environ. err = environ.AllocateAddress(instId, subnetId, &netAddr, macAddress, hostname) if err != nil { logger.Warningf( "allocating address %q on instance %q and subnet %q failed: %v (retrying)", addr.String(), instId, subnetId, err, ) // It's as good as unavailable for us, so mark it as // such. err = setAddrState(addr, state.AddressStateUnavailable) if err != nil { logger.Warningf( "cannot set address %q to %q: %v (ignoring and retrying)", addr.String(), state.AddressStateUnavailable, err, ) continue } logger.Tracef( "setting address %q to %q and retrying", addr.String(), state.AddressStateUnavailable, ) continue } logger.Infof( "allocated address %q on instance %q and subnet %q", addr.String(), instId, subnetId, ) err = p.setAllocatedOrRelease(addr, environ, instId, container, subnetId, macAddress) if err != nil { // Something went wrong - retry. continue } return addr, nil } }
func (s *clientSuite) assertRetryProvisioningBlocked(c *gc.C, machine *state.Machine, msg string) { _, err := s.APIState.Client().RetryProvisioning(machine.Tag().(names.MachineTag)) s.AssertBlocked(c, err, msg) }
// TODO(mjs) - the following should maybe be part of AgentSuite func (s *upgradeSuite) newAgent(c *gc.C, m *state.Machine) *agentcmd.MachineAgent { agentConf := agentcmd.NewAgentConf(s.DataDir()) agentConf.ReadConfig(m.Tag().String()) machineAgentFactory := agentcmd.MachineAgentFactoryFn(agentConf, nil, c.MkDir()) return machineAgentFactory(m.Id()) }