func (api *API) createParamsStorageAttachment(si params.StorageDetails, sa state.StorageAttachment) (params.StorageDetails, error) { result := params.StorageDetails{Status: "pending"} result.StorageTag = sa.StorageInstance().String() if result.StorageTag != si.StorageTag { panic("attachment does not belong to storage instance") } result.UnitTag = sa.Unit().String() result.OwnerTag = si.OwnerTag result.Kind = si.Kind result.Persistent = si.Persistent // TODO(axw) set status according to whether storage has been provisioned. // This is only for provisioned attachments machineTag, err := api.storage.UnitAssignedMachine(sa.Unit()) if err != nil { return params.StorageDetails{}, errors.Annotate(err, "getting unit for storage attachment") } info, err := common.StorageAttachmentInfo(api.storage, sa, machineTag) if err != nil { if errors.IsNotProvisioned(err) { // If Info returns an error, then the storage has not yet been provisioned. return result, nil } return params.StorageDetails{}, errors.Annotate(err, "getting storage attachment info") } result.Location = info.Location if result.Location != "" { result.Status = "attached" } return result, nil }
// ServiceInstances returns the instance IDs of provisioned // machines that are assigned units of the specified service. func ServiceInstances(st *State, service string) ([]instance.Id, error) { units, err := allUnits(st, service) if err != nil { return nil, err } instanceIds := make([]instance.Id, 0, len(units)) for _, unit := range units { machineId, err := unit.AssignedMachineId() if errors.IsNotAssigned(err) { continue } else if err != nil { return nil, err } machine, err := st.Machine(machineId) if err != nil { return nil, err } instanceId, err := machine.InstanceId() if err == nil { instanceIds = append(instanceIds, instanceId) } else if errors.IsNotProvisioned(err) { continue } else { return nil, err } } return instanceIds, nil }
// isVolumeInherentlyMachineBound reports whether or not the volume with the // specified tag is inherently bound to the lifetime of the machine, and will // be removed along with it, leaving no resources dangling. func isVolumeInherentlyMachineBound(st *State, tag names.VolumeTag) (bool, error) { volume, err := st.Volume(tag) if err != nil { return false, errors.Trace(err) } volumeInfo, err := volume.Info() if errors.IsNotProvisioned(err) { params, _ := volume.Params() _, provider, err := poolStorageProvider(st, params.Pool) if err != nil { return false, errors.Trace(err) } if provider.Scope() == storage.ScopeMachine { // Any storage created by a machine must be destroyed // along with the machine. return true, nil } if provider.Dynamic() { // We don't know ahead of time whether the storage // will be Persistent, so we assume it will be, and // rely on the environment-level storage provisioner // to clean up. return false, nil } // Volume is static, so even if it is provisioned, it will // be tied to the machine. return true, nil } else if err != nil { return false, errors.Trace(err) } // If volume does not outlive machine it can be removed. return !volumeInfo.Persistent, nil }
// SetFilesystemStatus sets the status of the specified filesystem. func (st *State) SetFilesystemStatus(tag names.FilesystemTag, status Status, info string, data map[string]interface{}) error { switch status { case StatusAttaching, StatusAttached, StatusDetaching, StatusDetached, StatusDestroying: case StatusError: if info == "" { return errors.Errorf("cannot set status %q without info", status) } case StatusPending: // If a filesystem is not yet provisioned, we allow its status // to be set back to pending (when a retry is to occur). v, err := st.Filesystem(tag) if err != nil { return errors.Trace(err) } _, err = v.Info() if errors.IsNotProvisioned(err) { break } return errors.Errorf("cannot set status %q", status) default: return errors.Errorf("cannot set invalid status %q", status) } return setStatus(st, setStatusParams{ badge: "filesystem", globalKey: filesystemGlobalKey(tag.Id()), status: status, message: info, rawData: data, }) }
// If the filesystem has not been provisioned, then it should be Pending; // if it has been provisioned, but there is an unprovisioned attachment, then // it should be Attaching; otherwise it is Attached. func upgradingFilesystemStatus(st *State, filesystem Filesystem) (status.Status, error) { if _, err := filesystem.Info(); errors.IsNotProvisioned(err) { return status.StatusPending, nil } attachments, err := st.FilesystemAttachments(filesystem.FilesystemTag()) if err != nil { return "", errors.Trace(err) } for _, attachment := range attachments { _, err := attachment.Info() if errors.IsNotProvisioned(err) { return status.StatusAttaching, nil } } return status.StatusAttached, nil }
// SetStatus sets the status of the machine. func (m *Machine) SetStatus(status Status, info string, data map[string]interface{}) error { switch status { case StatusStarted, StatusStopped: case StatusError: if info == "" { return errors.Errorf("cannot set status %q without info", status) } case StatusPending: // If a machine is not yet provisioned, we allow its status // to be set back to pending (when a retry is to occur). _, err := m.InstanceId() allowPending := errors.IsNotProvisioned(err) if allowPending { break } fallthrough case StatusDown: return errors.Errorf("cannot set status %q", status) default: return errors.Errorf("cannot set invalid status %q", status) } return setStatus(m.st, setStatusParams{ badge: "machine", globalKey: m.globalKey(), status: status, message: info, rawData: data, }) }
func (api *MachinerAPI) SetObservedNetworkConfig(args params.SetMachineNetworkConfig) error { m, err := api.getMachineForSettingNetworkConfig(args.Tag) if err != nil { return errors.Trace(err) } if m.IsContainer() { return nil } observedConfig := args.Config logger.Tracef("observed network config of machine %q: %+v", m.Id(), observedConfig) if len(observedConfig) == 0 { logger.Infof("not updating machine network config: no observed network config found") return nil } providerConfig, err := api.getOneMachineProviderNetworkConfig(m) if errors.IsNotProvisioned(err) { logger.Infof("not updating provider network config: %v", err) return nil } if err != nil { return errors.Trace(err) } if len(providerConfig) == 0 { logger.Infof("not updating machine network config: no provider network config found") return nil } mergedConfig := networkingcommon.MergeProviderAndObservedNetworkConfigs(providerConfig, observedConfig) logger.Tracef("merged observed and provider network config: %+v", mergedConfig) return api.setOneMachineNetworkConfig(m, mergedConfig) }
// isVolumeInherentlyMachineBound reports whether or not the volume with the // specified tag is inherently bound to the lifetime of the machine, and will // be removed along with it, leaving no resources dangling. func isVolumeInherentlyMachineBound(st *State, tag names.VolumeTag) (bool, error) { volume, err := st.Volume(tag) if err != nil { return false, errors.Trace(err) } volumeInfo, err := volume.Info() if errors.IsNotProvisioned(err) { params, _ := volume.Params() _, provider, err := poolStorageProvider(st, params.Pool) if err != nil { return false, errors.Trace(err) } if provider.Dynamic() { // Even machine-scoped storage could be provisioned // while the machine is Dying, and we don't know at // this layer whether or not it will be Persistent. // // TODO(axw) extend storage provider interface to // determine up-front whether or not a volume will // be persistent. This will have to depend on the // machine type, since, e.g., loop devices will // outlive LXC containers. return false, nil } // Volume is static, so even if it is provisioned, it will // be tied to the machine. return true, nil } else if err != nil { return false, errors.Trace(err) } // If volume does not outlive machine it can be removed. return !volumeInfo.Persistent, nil }
// ServerError returns an error suitable for returning to an API // client, with an error code suitable for various kinds of errors // generated in packages outside the API. func ServerError(err error) *params.Error { if err == nil { return nil } logger.Tracef("server RPC error %v", errors.Details(err)) msg := err.Error() // Skip past annotations when looking for the code. err = errors.Cause(err) code, ok := singletonCode(err) var info *params.ErrorInfo switch { case ok: case errors.IsUnauthorized(err): code = params.CodeUnauthorized case errors.IsNotFound(err): code = params.CodeNotFound case errors.IsUserNotFound(err): code = params.CodeUserNotFound case errors.IsAlreadyExists(err): code = params.CodeAlreadyExists case errors.IsNotAssigned(err): code = params.CodeNotAssigned case state.IsHasAssignedUnitsError(err): code = params.CodeHasAssignedUnits case state.IsHasHostedModelsError(err): code = params.CodeHasHostedModels case isNoAddressSetError(err): code = params.CodeNoAddressSet case errors.IsNotProvisioned(err): code = params.CodeNotProvisioned case IsUpgradeInProgressError(err): code = params.CodeUpgradeInProgress case state.IsHasAttachmentsError(err): code = params.CodeMachineHasAttachedStorage case isUnknownModelError(err): code = params.CodeModelNotFound case errors.IsNotSupported(err): code = params.CodeNotSupported case errors.IsBadRequest(err): code = params.CodeBadRequest case errors.IsMethodNotAllowed(err): code = params.CodeMethodNotAllowed default: if err, ok := err.(*DischargeRequiredError); ok { code = params.CodeDischargeRequired info = ¶ms.ErrorInfo{ Macaroon: err.Macaroon, // One macaroon fits all. MacaroonPath: "/", } break } code = params.ErrCode(err) } return ¶ms.Error{ Message: msg, Code: code, Info: info, } }
// SetVolumeStatus sets the status of the specified volume. func (st *State) SetVolumeStatus(tag names.VolumeTag, volumeStatus status.Status, info string, data map[string]interface{}, updated *time.Time) error { switch volumeStatus { case status.Attaching, status.Attached, status.Detaching, status.Detached, status.Destroying: case status.Error: if info == "" { return errors.Errorf("cannot set status %q without info", volumeStatus) } case status.Pending: // If a volume is not yet provisioned, we allow its status // to be set back to pending (when a retry is to occur). v, err := st.Volume(tag) if err != nil { return errors.Trace(err) } _, err = v.Info() if errors.IsNotProvisioned(err) { break } return errors.Errorf("cannot set status %q", volumeStatus) default: return errors.Errorf("cannot set invalid status %q", volumeStatus) } return setStatus(st, setStatusParams{ badge: "volume", globalKey: volumeGlobalKey(tag.Id()), status: volumeStatus, message: info, rawData: data, updated: updated, }) }
// reconcileInstances compares the initially started watcher for machines, // units and services with the opened and closed ports of the instances and // opens and closes the appropriate ports for each instance. func (fw *Firewaller) reconcileInstances() error { for _, machined := range fw.machineds { m, err := machined.machine() if params.IsCodeNotFound(err) { if err := fw.forgetMachine(machined); err != nil { return err } continue } if err != nil { return err } instanceId, err := m.InstanceId() if errors.IsNotProvisioned(err) { logger.Errorf("Machine not yet provisioned: %v", err) continue } if err != nil { return err } instances, err := fw.environ.Instances([]instance.Id{instanceId}) if err == environs.ErrNoInstances { return nil } if err != nil { return err } machineId := machined.tag.Id() initialPortRanges, err := instances[0].Ports(machineId) if err != nil { return err } // Check which ports to open or to close. toOpen := diffRanges(machined.openedPorts, initialPortRanges) toClose := diffRanges(initialPortRanges, machined.openedPorts) if len(toOpen) > 0 { logger.Infof("opening instance port ranges %v for %q", toOpen, machined.tag) if err := instances[0].OpenPorts(machineId, toOpen); err != nil { // TODO(mue) Add local retry logic. return err } network.SortPortRanges(toOpen) } if len(toClose) > 0 { logger.Infof("closing instance port ranges %v for %q", toClose, machined.tag) if err := instances[0].ClosePorts(machineId, toClose); err != nil { // TODO(mue) Add local retry logic. return err } network.SortPortRanges(toClose) } } return nil }
func makeMachineStatus(machine *state.Machine) (status params.MachineStatus) { var err error status.Id = machine.Id() agentStatus := processMachine(machine) status.AgentStatus = agentStatus status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() sInfo, err := machine.InstanceStatus() populateStatusFromStatusInfoAndErr(&status.InstanceStatus, sInfo, err) instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid addr, err := machine.PublicAddress() if err != nil { // Usually this indicates that no addresses have been set on the // machine yet. addr = network.Address{} logger.Debugf("error fetching public address: %q", err) } status.DNSName = addr.Value mAddrs := machine.Addresses() if len(mAddrs) == 0 { logger.Debugf("no IP addresses fetched for machine %q", instid) // At least give it the newly created DNSName address, if it exists. if addr.Value != "" { mAddrs = append(mAddrs, addr) } } for _, mAddr := range mAddrs { switch mAddr.Scope { case network.ScopeMachineLocal, network.ScopeLinkLocal: continue } status.IPAddresses = append(status.IPAddresses, mAddr.Value) } } else { if errors.IsNotProvisioned(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]params.MachineStatus) return }
// ModelMachineInfo returns information about machine hardware for // alive top level machines (not containers). func ModelMachineInfo(st ModelManagerBackend) (machineInfo []params.ModelMachineInfo, _ error) { machines, err := st.AllMachines() if err != nil { return nil, errors.Trace(err) } for _, m := range machines { if m.Life() != state.Alive { continue } var status string statusInfo, err := MachineStatus(m) if err == nil { status = string(statusInfo.Status) } else { status = err.Error() } mInfo := params.ModelMachineInfo{ Id: m.Id(), HasVote: m.HasVote(), WantsVote: m.WantsVote(), Status: status, } instId, err := m.InstanceId() switch { case err == nil: mInfo.InstanceId = string(instId) case errors.IsNotProvisioned(err): // ok, but no instance ID to get. default: return nil, errors.Trace(err) } if m.ContainerType() != "" && m.ContainerType() != instance.NONE { machineInfo = append(machineInfo, mInfo) continue } // Only include cores for physical machines. hw, err := m.HardwareCharacteristics() if err != nil && !errors.IsNotFound(err) { return nil, errors.Trace(err) } if hw != nil && hw.String() != "" { hwParams := ¶ms.MachineHardware{ Cores: hw.CpuCores, Arch: hw.Arch, Mem: hw.Mem, RootDisk: hw.RootDisk, CpuPower: hw.CpuPower, Tags: hw.Tags, AvailabilityZone: hw.AvailabilityZone, } mInfo.Hardware = hwParams } machineInfo = append(machineInfo, mInfo) } return machineInfo, nil }
// AllocateTo sets the machine ID, MAC address and interface ID of the IP address. // It will fail if the state is not AddressStateUnknown. On success, // the address state will also change to AddressStateAllocated. func (i *IPAddress) AllocateTo(machineId, interfaceId, macAddress string) (err error) { defer errors.DeferredAnnotatef(&err, "cannot allocate IP address %q to machine %q, interface %q", i, machineId, interfaceId) var instId instance.Id machine, err := i.st.Machine(machineId) if err != nil { return errors.Annotatef(err, "cannot get allocated machine %q", machineId) } else { instId, err = machine.InstanceId() if errors.IsNotProvisioned(err) { // The machine is not yet provisioned. The instance ID will be // set on provisioning. instId = instance.UnknownId } else if err != nil { return errors.Annotatef(err, "cannot get machine %q instance ID", machineId) } } buildTxn := func(attempt int) ([]txn.Op, error) { if attempt > 0 { if err := i.Refresh(); errors.IsNotFound(err) { return nil, err } else if i.Life() == Dead { return nil, errors.New("address is dead") } else if i.State() != AddressStateUnknown { return nil, errors.Errorf("already allocated or unavailable") } else if err != nil { return nil, err } } return []txn.Op{{ C: ipaddressesC, Id: i.doc.DocID, Assert: append(isAliveDoc, bson.DocElem{"state", AddressStateUnknown}), Update: bson.D{{"$set", bson.D{ {"machineid", machineId}, {"interfaceid", interfaceId}, {"instanceid", instId}, {"macaddress", macAddress}, {"state", string(AddressStateAllocated)}, }}}, }}, nil } err = i.st.run(buildTxn) if err != nil { return err } i.doc.MachineId = machineId i.doc.MACAddress = macAddress i.doc.InterfaceId = interfaceId i.doc.State = AddressStateAllocated i.doc.InstanceId = string(instId) return nil }
func makeMachineStatus(machine *state.Machine) (status params.MachineStatus) { status.Id = machine.Id() agentStatus, compatStatus := processMachine(machine) status.Agent = agentStatus // These legacy status values will be deprecated for Juju 2.0. status.AgentState = compatStatus.Status status.AgentStateInfo = compatStatus.Info status.AgentVersion = compatStatus.Version status.Life = compatStatus.Life status.Err = compatStatus.Err status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid status.InstanceState, err = machine.InstanceStatus() if err != nil { status.InstanceState = "error" } addr, err := machine.PublicAddress() if err != nil { // Usually this indicates that no addresses have been set on the // machine yet. addr = network.Address{} logger.Warningf("error fetching public address: %q", err) } status.DNSName = addr.Value } else { if errors.IsNotProvisioned(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } // There's no point in reporting a pending agent state // if the machine hasn't been provisioned. This // also makes unprovisioned machines visually distinct // in the output. status.AgentState = "" } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]params.MachineStatus) return }
// waitInstanceId waits until the supplied machine has an instance id, then // asserts it is as expected. func (s *CommonProvisionerSuite) waitInstanceId(c *gc.C, m *state.Machine, expect instance.Id) { s.waitHardwareCharacteristics(c, m, func() bool { if actual, err := m.InstanceId(); err == nil { c.Assert(actual, gc.Equals, expect) return true } else if !errors.IsNotProvisioned(err) { // We don't expect any errors. panic(err) } c.Logf("machine %v is still unprovisioned", m) return false }) }
func storageAttachmentInfo(st storageAccess, a state.StorageAttachment) (_ names.MachineTag, location string, _ error) { machineTag, err := st.UnitAssignedMachine(a.Unit()) if errors.IsNotAssigned(err) { return names.MachineTag{}, "", nil } else if err != nil { return names.MachineTag{}, "", errors.Trace(err) } info, err := storagecommon.StorageAttachmentInfo(st, a, machineTag) if errors.IsNotProvisioned(err) { return machineTag, "", nil } else if err != nil { return names.MachineTag{}, "", errors.Trace(err) } return machineTag, info.Location, nil }
func makeMachineStatus(machine *state.Machine) (status params.MachineStatus) { status.Id = machine.Id() agentStatus := processMachine(machine) status.Agent = agentStatus status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() status.HasVote = machine.HasVote() instid, err := machine.InstanceId() if err == nil { status.InstanceId = instid status.InstanceState, err = machine.InstanceStatus() if err != nil { status.InstanceState = "error" } addr, err := machine.PublicAddress() if err != nil { // Usually this indicates that no addresses have been set on the // machine yet. addr = network.Address{} logger.Debugf("error fetching public address: %q", err) } status.DNSName = addr.Value } else { if errors.IsNotProvisioned(err) { status.InstanceId = "pending" } else { status.InstanceId = "error" } } hc, err := machine.HardwareCharacteristics() if err != nil { if !errors.IsNotFound(err) { status.Hardware = "error" } } else { status.Hardware = hc.String() } status.Containers = make(map[string]params.MachineStatus) return }
// environManagerInstances returns all environ manager instances. func environManagerInstances(st *state.State) ([]instance.Id, error) { info, err := st.ControllerInfo() if err != nil { return nil, err } instances := make([]instance.Id, 0, len(info.MachineIds)) for _, id := range info.MachineIds { machine, err := st.Machine(id) if err != nil { return nil, err } instanceId, err := machine.InstanceId() if err == nil { instances = append(instances, instanceId) } else if !errors.IsNotProvisioned(err) { return nil, err } } return instances, nil }
// SetConstraints sets the exact constraints to apply when provisioning an // instance for the machine. It will fail if the machine is Dead, or if it // is already provisioned. func (m *Machine) SetConstraints(cons constraints.Value) (err error) { defer errors.DeferredAnnotatef(&err, "cannot set constraints") unsupported, err := m.st.validateConstraints(cons) if len(unsupported) > 0 { logger.Warningf( "setting constraints on machine %q: unsupported constraints: %v", m.Id(), strings.Join(unsupported, ",")) } else if err != nil { return err } notSetYet := bson.D{{"nonce", ""}} ops := []txn.Op{ { C: machinesC, Id: m.doc.DocID, Assert: append(isAliveDoc, notSetYet...), }, setConstraintsOp(m.st, m.globalKey(), cons), } // make multiple attempts to push the ErrExcessiveContention case out of the // realm of plausibility: it implies local state indicating unprovisioned, // and remote state indicating provisioned (reasonable); but which changes // back to unprovisioned and then to provisioned again with *very* specific // timing in the course of this loop. buildTxn := func(attempt int) ([]txn.Op, error) { if attempt > 0 { if m, err = m.st.Machine(m.doc.Id); err != nil { return nil, err } } if m.doc.Life != Alive { return nil, errNotAlive } if _, err := m.InstanceId(); err == nil { return nil, fmt.Errorf("machine is already provisioned") } else if !errors.IsNotProvisioned(err) { return nil, err } return ops, nil } return m.st.run(buildTxn) }
// SetStatus sets the status of the machine. func (m *Machine) SetStatus(status Status, info string, data map[string]interface{}) error { // If a machine is not yet provisioned, we allow its status // to be set back to pending (when a retry is to occur). _, err := m.InstanceId() allowPending := errors.IsNotProvisioned(err) doc, err := newMachineStatusDoc(status, info, data, allowPending) if err != nil { return err } ops := []txn.Op{{ C: machinesC, Id: m.doc.DocID, Assert: notDeadDoc, }, updateStatusOp(m.st, m.globalKey(), doc.statusDoc), } if err = m.st.runTransaction(ops); err != nil { return fmt.Errorf("cannot set status of machine %q: %v", m, onAbort(err, errNotAlive)) } return nil }
// ServerError returns an error suitable for returning to an API // client, with an error code suitable for various kinds of errors // generated in packages outside the API. func ServerError(err error) *params.Error { if err == nil { return nil } msg := err.Error() // Skip past annotations when looking for the code. err = errors.Cause(err) code, ok := singletonCode(err) switch { case ok: case errors.IsUnauthorized(err): code = params.CodeUnauthorized case errors.IsNotFound(err): code = params.CodeNotFound case errors.IsAlreadyExists(err): code = params.CodeAlreadyExists case errors.IsNotAssigned(err): code = params.CodeNotAssigned case state.IsHasAssignedUnitsError(err): code = params.CodeHasAssignedUnits case IsNoAddressSetError(err): code = params.CodeNoAddressSet case errors.IsNotProvisioned(err): code = params.CodeNotProvisioned case state.IsUpgradeInProgressError(err): code = params.CodeUpgradeInProgress case state.IsHasAttachmentsError(err): code = params.CodeMachineHasAttachedStorage case IsUnknownEnviromentError(err): code = params.CodeNotFound case errors.IsNotSupported(err): code = params.CodeNotSupported default: code = params.ErrCode(err) } return ¶ms.Error{ Message: msg, Code: code, } }
func addInstanceTags(env environs.Environ, machines []*state.Machine) error { cfg := env.Config() tagger, ok := env.(environs.InstanceTagger) if !ok { logger.Debugf("environment type %q does not support instance tagging", cfg.Type()) return nil } // Tag each top-level, provisioned machine. logger.Infof("adding tags to existing machine instances") for _, m := range machines { if names.IsContainerMachine(m.Id()) { continue } instId, err := m.InstanceId() if errors.IsNotProvisioned(err) { continue } else if err != nil { return errors.Annotatef(err, "getting instance ID for machine %v", m.Id()) } stateMachineJobs := m.Jobs() paramsMachineJobs := make([]multiwatcher.MachineJob, len(stateMachineJobs)) for i, job := range stateMachineJobs { paramsMachineJobs[i] = job.ToParams() } tags := instancecfg.InstanceTags(cfg, paramsMachineJobs) logger.Infof("tagging instance %v: %v", instId, tags) if err := tagger.TagInstance(instId, tags); err != nil { return errors.Annotatef(err, "tagging instance %v for machine %v", instId, m.Id()) } } return nil }
// VolumeParams returns the parameters for creating or destroying // the volumes with the specified tags. func (s *StorageProvisionerAPI) VolumeParams(args params.Entities) (params.VolumeParamsResults, error) { canAccess, err := s.getStorageEntityAuthFunc() if err != nil { return params.VolumeParamsResults{}, err } envConfig, err := s.st.EnvironConfig() if err != nil { return params.VolumeParamsResults{}, err } results := params.VolumeParamsResults{ Results: make([]params.VolumeParamsResult, len(args.Entities)), } poolManager := poolmanager.New(s.settings) one := func(arg params.Entity) (params.VolumeParams, error) { tag, err := names.ParseVolumeTag(arg.Tag) if err != nil || !canAccess(tag) { return params.VolumeParams{}, common.ErrPerm } volume, err := s.st.Volume(tag) if errors.IsNotFound(err) { return params.VolumeParams{}, common.ErrPerm } else if err != nil { return params.VolumeParams{}, err } volumeAttachments, err := s.st.VolumeAttachments(tag) if err != nil { return params.VolumeParams{}, err } storageInstance, err := storagecommon.MaybeAssignedStorageInstance( volume.StorageInstance, s.st.StorageInstance, ) if err != nil { return params.VolumeParams{}, err } volumeParams, err := storagecommon.VolumeParams(volume, storageInstance, envConfig, poolManager) if err != nil { return params.VolumeParams{}, err } if len(volumeAttachments) == 1 { // There is exactly one attachment to be made, so make // it immediately. Otherwise we will defer attachments // until later. volumeAttachment := volumeAttachments[0] volumeAttachmentParams, ok := volumeAttachment.Params() if !ok { return params.VolumeParams{}, errors.Errorf( "volume %q is already attached to machine %q", volumeAttachment.Volume().Id(), volumeAttachment.Machine().Id(), ) } machineTag := volumeAttachment.Machine() instanceId, err := s.st.MachineInstanceId(machineTag) if errors.IsNotProvisioned(err) { // Leave the attachment until later. instanceId = "" } else if err != nil { return params.VolumeParams{}, err } volumeParams.Attachment = ¶ms.VolumeAttachmentParams{ tag.String(), machineTag.String(), "", // volume ID string(instanceId), volumeParams.Provider, volumeAttachmentParams.ReadOnly, } } return volumeParams, nil } for i, arg := range args.Entities { var result params.VolumeParamsResult volumeParams, err := one(arg) if err != nil { result.Error = common.ServerError(err) } else { result.Result = volumeParams } results.Results[i] = result } return results, nil }
func (p *ProvisionerAPI) prepareOrGetContainerInterfaceInfo(args params.Entities, maintain bool) (params.MachineNetworkConfigResults, error) { result := params.MachineNetworkConfigResults{ Results: make([]params.MachineNetworkConfigResult, len(args.Entities)), } netEnviron, hostMachine, canAccess, err := p.prepareContainerAccessEnvironment() if err != nil { return result, errors.Trace(err) } instId, err := hostMachine.InstanceId() if errors.IsNotProvisioned(err) { err = errors.NotProvisionedf("cannot prepare container network config: host machine %q", hostMachine) return result, err } else if err != nil { return result, errors.Trace(err) } for i, entity := range args.Entities { machineTag, err := names.ParseMachineTag(entity.Tag) if err != nil { result.Results[i].Error = common.ServerError(err) continue } // The auth function (canAccess) checks that the machine is a // top level machine (we filter those out next) or that the // machine has the host as a parent. container, err := p.getMachine(canAccess, machineTag) if err != nil { result.Results[i].Error = common.ServerError(err) continue } else if !container.IsContainer() { err = errors.Errorf("cannot prepare network config for %q: not a container", machineTag) result.Results[i].Error = common.ServerError(err) continue } else if ciid, cerr := container.InstanceId(); maintain == true && cerr == nil { // Since we want to configure and create NICs on the // container before it starts, it must also be not // provisioned yet. err = errors.Errorf("container %q already provisioned as %q", container, ciid) result.Results[i].Error = common.ServerError(err) continue } else if cerr != nil && !errors.IsNotProvisioned(cerr) { // Any other error needs to be reported. result.Results[i].Error = common.ServerError(cerr) continue } if err := hostMachine.SetContainerLinkLayerDevices(container); err != nil { result.Results[i].Error = common.ServerError(err) continue } containerDevices, err := container.AllLinkLayerDevices() if err != nil { result.Results[i].Error = common.ServerError(err) continue } preparedInfo := make([]network.InterfaceInfo, len(containerDevices)) preparedOK := true for j, device := range containerDevices { parentDevice, err := device.ParentDevice() if err != nil || parentDevice == nil { err = errors.Errorf( "cannot get parent %q of container device %q: %v", device.ParentName(), device.Name(), err, ) result.Results[i].Error = common.ServerError(err) preparedOK = false break } parentAddrs, err := parentDevice.Addresses() if err != nil { result.Results[i].Error = common.ServerError(err) preparedOK = false break } info := network.InterfaceInfo{ InterfaceName: device.Name(), MACAddress: device.MACAddress(), ConfigType: network.ConfigManual, InterfaceType: network.InterfaceType(device.Type()), NoAutoStart: !device.IsAutoStart(), Disabled: !device.IsUp(), MTU: int(device.MTU()), ParentInterfaceName: parentDevice.Name(), } if len(parentAddrs) > 0 { logger.Infof("host machine device %q has addresses %v", parentDevice.Name(), parentAddrs) firstAddress := parentAddrs[0] parentDeviceSubnet, err := firstAddress.Subnet() if err != nil { err = errors.Annotatef(err, "cannot get subnet %q used by address %q of host machine device %q", firstAddress.SubnetCIDR(), firstAddress.Value(), parentDevice.Name(), ) result.Results[i].Error = common.ServerError(err) preparedOK = false break } info.ConfigType = network.ConfigStatic info.CIDR = parentDeviceSubnet.CIDR() info.ProviderSubnetId = parentDeviceSubnet.ProviderId() info.VLANTag = parentDeviceSubnet.VLANTag() } else { logger.Infof("host machine device %q has no addresses %v", parentDevice.Name(), parentAddrs) } logger.Tracef("prepared info for container interface %q: %+v", info.InterfaceName, info) preparedOK = true preparedInfo[j] = info } if !preparedOK { // Error result is already set. continue } allocatedInfo, err := netEnviron.AllocateContainerAddresses(instId, machineTag, preparedInfo) if err != nil { result.Results[i].Error = common.ServerError(err) continue } logger.Debugf("got allocated info from provider: %+v", allocatedInfo) allocatedConfig := networkingcommon.NetworkConfigFromInterfaceInfo(allocatedInfo) logger.Tracef("allocated network config: %+v", allocatedConfig) result.Results[i].Config = allocatedConfig } return result, nil }
// VolumeAttachmentParams returns the parameters for creating the volume // attachments with the specified IDs. func (s *StorageProvisionerAPI) VolumeAttachmentParams( args params.MachineStorageIds, ) (params.VolumeAttachmentParamsResults, error) { canAccess, err := s.getAttachmentAuthFunc() if err != nil { return params.VolumeAttachmentParamsResults{}, common.ServerError(common.ErrPerm) } results := params.VolumeAttachmentParamsResults{ Results: make([]params.VolumeAttachmentParamsResult, len(args.Ids)), } poolManager := poolmanager.New(s.settings) one := func(arg params.MachineStorageId) (params.VolumeAttachmentParams, error) { volumeAttachment, err := s.oneVolumeAttachment(arg, canAccess) if err != nil { return params.VolumeAttachmentParams{}, err } instanceId, err := s.st.MachineInstanceId(volumeAttachment.Machine()) if errors.IsNotProvisioned(err) { // The worker must watch for machine provisioning events. instanceId = "" } else if err != nil { return params.VolumeAttachmentParams{}, err } volume, err := s.st.Volume(volumeAttachment.Volume()) if err != nil { return params.VolumeAttachmentParams{}, err } var volumeId string var pool string if volumeParams, ok := volume.Params(); ok { pool = volumeParams.Pool } else { volumeInfo, err := volume.Info() if err != nil { return params.VolumeAttachmentParams{}, err } volumeId = volumeInfo.VolumeId pool = volumeInfo.Pool } providerType, _, err := storagecommon.StoragePoolConfig(pool, poolManager) if err != nil { return params.VolumeAttachmentParams{}, errors.Trace(err) } var readOnly bool if volumeAttachmentParams, ok := volumeAttachment.Params(); ok { readOnly = volumeAttachmentParams.ReadOnly } else { // Attachment parameters may be requested even if the // attachment exists; i.e. for reattachment. volumeAttachmentInfo, err := volumeAttachment.Info() if err != nil { return params.VolumeAttachmentParams{}, errors.Trace(err) } readOnly = volumeAttachmentInfo.ReadOnly } return params.VolumeAttachmentParams{ volumeAttachment.Volume().String(), volumeAttachment.Machine().String(), volumeId, string(instanceId), string(providerType), readOnly, }, nil } for i, arg := range args.Ids { var result params.VolumeAttachmentParamsResult volumeAttachment, err := one(arg) if err != nil { result.Error = common.ServerError(err) } else { result.Result = volumeAttachment } results.Results[i] = result } return results, nil }
// FilesystemAttachmentParams returns the parameters for creating the filesystem // attachments with the specified IDs. func (s *StorageProvisionerAPI) FilesystemAttachmentParams( args params.MachineStorageIds, ) (params.FilesystemAttachmentParamsResults, error) { canAccess, err := s.getAttachmentAuthFunc() if err != nil { return params.FilesystemAttachmentParamsResults{}, common.ServerError(common.ErrPerm) } results := params.FilesystemAttachmentParamsResults{ Results: make([]params.FilesystemAttachmentParamsResult, len(args.Ids)), } poolManager := poolmanager.New(s.settings) one := func(arg params.MachineStorageId) (params.FilesystemAttachmentParams, error) { filesystemAttachment, err := s.oneFilesystemAttachment(arg, canAccess) if err != nil { return params.FilesystemAttachmentParams{}, err } instanceId, err := s.st.MachineInstanceId(filesystemAttachment.Machine()) if errors.IsNotProvisioned(err) { // The worker must watch for machine provisioning events. instanceId = "" } else if err != nil { return params.FilesystemAttachmentParams{}, err } filesystem, err := s.st.Filesystem(filesystemAttachment.Filesystem()) if err != nil { return params.FilesystemAttachmentParams{}, err } var filesystemId string var pool string if filesystemParams, ok := filesystem.Params(); ok { pool = filesystemParams.Pool } else { filesystemInfo, err := filesystem.Info() if err != nil { return params.FilesystemAttachmentParams{}, err } filesystemId = filesystemInfo.FilesystemId pool = filesystemInfo.Pool } providerType, _, err := storagecommon.StoragePoolConfig(pool, poolManager) if err != nil { return params.FilesystemAttachmentParams{}, errors.Trace(err) } var location string var readOnly bool if filesystemAttachmentParams, ok := filesystemAttachment.Params(); ok { location = filesystemAttachmentParams.Location readOnly = filesystemAttachmentParams.ReadOnly } else { // Attachment parameters may be requested even if the // attachment exists; i.e. for reattachment. filesystemAttachmentInfo, err := filesystemAttachment.Info() if err != nil { return params.FilesystemAttachmentParams{}, errors.Trace(err) } location = filesystemAttachmentInfo.MountPoint readOnly = filesystemAttachmentInfo.ReadOnly } return params.FilesystemAttachmentParams{ filesystemAttachment.Filesystem().String(), filesystemAttachment.Machine().String(), filesystemId, string(instanceId), string(providerType), // TODO(axw) dealias MountPoint. We now have // Path, MountPoint and Location in different // parts of the codebase. location, readOnly, }, nil } for i, arg := range args.Ids { var result params.FilesystemAttachmentParamsResult filesystemAttachment, err := one(arg) if err != nil { result.Error = common.ServerError(err) } else { result.Result = filesystemAttachment } results.Results[i] = result } return results, nil }
// legacyPrepareOrGetContainerInterfaceInfo optionally allocates an address and // returns information for configuring networking on a container. It accepts // container tags as arguments. func (p *ProvisionerAPI) legacyPrepareOrGetContainerInterfaceInfo( args params.Entities, provisionContainer bool, ) ( params.MachineNetworkConfigResults, error, ) { result := params.MachineNetworkConfigResults{ Results: make([]params.MachineNetworkConfigResult, len(args.Entities)), } // Some preparations first. environ, host, canAccess, err := p.prepareContainerAccessEnvironment() if err != nil { return result, errors.Trace(err) } instId, err := host.InstanceId() if err != nil && errors.IsNotProvisioned(err) { // If the host machine is not provisioned yet, we have nothing // to do. NotProvisionedf will append " not provisioned" to // the message. err = errors.NotProvisionedf("cannot allocate addresses: host machine %q", host) return result, err } var subnet *state.Subnet var subnetInfo network.SubnetInfo var interfaceInfo network.InterfaceInfo if environs.AddressAllocationEnabled() { // We don't need a subnet unless we need to allocate a static IP. subnet, subnetInfo, interfaceInfo, err = p.prepareAllocationNetwork(environ, instId) if err != nil { return result, errors.Annotate(err, "cannot allocate addresses") } } else { var allInterfaceInfos []network.InterfaceInfo allInterfaceInfos, err = environ.NetworkInterfaces(instId) if err != nil { return result, errors.Annotatef(err, "cannot instance %q interfaces", instId) } else if len(allInterfaceInfos) == 0 { return result, errors.New("no interfaces available") } // Currently we only support a single NIC per container, so we only need // the information from the host instance's first NIC. logger.Tracef("interfaces for instance %q: %v", instId, allInterfaceInfos) interfaceInfo = allInterfaceInfos[0] } // Loop over the passed container tags. for i, entity := range args.Entities { tag, err := names.ParseMachineTag(entity.Tag) if err != nil { result.Results[i].Error = common.ServerError(err) continue } // The auth function (canAccess) checks that the machine is a // top level machine (we filter those out next) or that the // machine has the host as a parent. container, err := p.getMachine(canAccess, tag) if err != nil { result.Results[i].Error = common.ServerError(err) continue } else if !container.IsContainer() { err = errors.Errorf("cannot allocate address for %q: not a container", tag) result.Results[i].Error = common.ServerError(err) continue } else if ciid, cerr := container.InstanceId(); provisionContainer == true && cerr == nil { // Since we want to configure and create NICs on the // container before it starts, it must also be not // provisioned yet. err = errors.Errorf("container %q already provisioned as %q", container, ciid) result.Results[i].Error = common.ServerError(err) continue } else if cerr != nil && !errors.IsNotProvisioned(cerr) { // Any other error needs to be reported. result.Results[i].Error = common.ServerError(cerr) continue } var macAddress string var address *state.IPAddress if provisionContainer { // Allocate and set an address. macAddress = generateMACAddress() address, err = p.allocateAddress(environ, subnet, host, container, instId, macAddress) if err != nil { err = errors.Annotatef(err, "failed to allocate an address for %q", container) result.Results[i].Error = common.ServerError(err) continue } } else { id := container.Id() addresses, err := p.st.AllocatedIPAddresses(id) if err != nil { logger.Warningf("failed to get Id for container %q: %v", tag, err) result.Results[i].Error = common.ServerError(err) continue } // TODO(dooferlad): if we get more than 1 address back, we ignore everything after // the first. The calling function expects exactly one result though, // so we don't appear to have a way of allocating >1 address to a // container... if len(addresses) != 1 { logger.Warningf("got %d addresses for container %q - expected 1: %v", len(addresses), tag, err) result.Results[i].Error = common.ServerError(err) continue } address = addresses[0] macAddress = address.MACAddress() } // Store it on the machine, construct and set an interface result. dnsServers := make([]string, len(interfaceInfo.DNSServers)) for l, dns := range interfaceInfo.DNSServers { dnsServers[l] = dns.Value } if macAddress == "" { macAddress = interfaceInfo.MACAddress } interfaceType := string(interfaceInfo.InterfaceType) if interfaceType == "" { interfaceType = string(network.EthernetInterface) } // TODO(dimitern): Support allocating one address per NIC on // the host, effectively creating the same number of NICs in // the container. result.Results[i] = params.MachineNetworkConfigResult{ Config: []params.NetworkConfig{{ DeviceIndex: interfaceInfo.DeviceIndex, MACAddress: macAddress, CIDR: subnetInfo.CIDR, NetworkName: interfaceInfo.NetworkName, ProviderId: string(interfaceInfo.ProviderId), ProviderSubnetId: string(subnetInfo.ProviderId), VLANTag: interfaceInfo.VLANTag, InterfaceType: interfaceType, InterfaceName: interfaceInfo.InterfaceName, Disabled: interfaceInfo.Disabled, NoAutoStart: interfaceInfo.NoAutoStart, DNSServers: dnsServers, ConfigType: string(network.ConfigStatic), Address: address.Value(), GatewayAddress: interfaceInfo.GatewayAddress.Value, ExtraConfig: interfaceInfo.ExtraConfig, }}, } } return result, nil }