// Authenticate authenticates the provided entity. // It takes an entityfinder and the tag used to find the entity that requires authentication. func (*AgentAuthenticator) Authenticate(entityFinder EntityFinder, tag names.Tag, req params.LoginRequest) (state.Entity, error) { entity, err := entityFinder.FindEntity(tag) if errors.IsNotFound(err) { return nil, errors.Trace(common.ErrBadCreds) } if err != nil { return nil, errors.Trace(err) } authenticator, ok := entity.(taggedAuthenticator) if !ok { return nil, errors.Trace(common.ErrBadRequest) } if !authenticator.PasswordValid(req.Credentials) { return nil, errors.Trace(common.ErrBadCreds) } // If this is a machine agent connecting, we need to check the // nonce matches, otherwise the wrong agent might be trying to // connect. if machine, ok := authenticator.(*state.Machine); ok { if !machine.CheckProvisioned(req.Nonce) { return nil, errors.NotProvisionedf("machine %v", machine.Id()) } } return entity, nil }
func (s *InstancePollerSuite) TestProviderAddressesFailure(c *gc.C) { s.st.SetErrors( errors.New("pow!"), // m1 := FindEntity("1") nil, // m2 := FindEntity("2") errors.New("FAIL"), // m2.ProviderAddresses()- unused errors.NotProvisionedf("machine 42"), // FindEntity("3") (ensure wrapping is preserved) ) s.st.SetMachineInfo(c, machineInfo{id: "1"}) s.st.SetMachineInfo(c, machineInfo{id: "2"}) result, err := s.api.ProviderAddresses(s.machineEntities) c.Assert(err, jc.ErrorIsNil) c.Assert(result, jc.DeepEquals, params.MachineAddressesResults{ Results: []params.MachineAddressesResult{ {Error: apiservertesting.ServerError("pow!")}, {Addresses: nil}, {Error: apiservertesting.NotProvisionedError("42")}, }}, ) s.st.CheckFindEntityCall(c, 0, "1") s.st.CheckFindEntityCall(c, 1, "2") s.st.CheckCall(c, 2, "ProviderAddresses") s.st.CheckFindEntityCall(c, 3, "3") }
// Authenticate authenticates the provided entity. // It takes an entityfinder and the tag used to find the entity that requires authentication. func (*AgentAuthenticator) Authenticate(entityFinder EntityFinder, tag names.Tag, req params.LoginRequest) (state.Entity, error) { entity, err := entityFinder.FindEntity(tag) if errors.IsNotFound(err) { return nil, errors.Trace(common.ErrBadCreds) } if err != nil { return nil, errors.Trace(err) } authenticator, ok := entity.(taggedAuthenticator) if !ok { return nil, errors.Trace(common.ErrBadRequest) } if !authenticator.PasswordValid(req.Credentials) { return nil, errors.Trace(common.ErrBadCreds) } // If this is a machine agent connecting, we need to check the // nonce matches, otherwise the wrong agent might be trying to // connect. // // NOTE(axw) with the current implementation of Login, it is // important that we check the password before checking the // nonce, or an unprovisioned machine in a hosted model will // prevent a controller machine from logging into the hosted // model. if machine, ok := authenticator.(*state.Machine); ok { if !machine.CheckProvisioned(req.Nonce) { return nil, errors.NotProvisionedf("machine %v", machine.Id()) } } return entity, nil }
func (s *InstancePollerSuite) TestSetInstanceStatusFailure(c *gc.C) { s.st.SetErrors( errors.New("pow!"), // m1 := FindEntity("1") nil, // m2 := FindEntity("2") errors.New("FAIL"), // m2.SetInstanceStatus() errors.NotProvisionedf("machine 42"), // FindEntity("3") (ensure wrapping is preserved) ) s.st.SetMachineInfo(c, machineInfo{id: "1", instanceStatus: statusInfo("foo")}) s.st.SetMachineInfo(c, machineInfo{id: "2", instanceStatus: statusInfo("")}) result, err := s.api.SetInstanceStatus(params.SetStatus{ Entities: []params.EntityStatusArgs{ {Tag: "machine-1", Status: "new"}, {Tag: "machine-2", Status: "invalid"}, {Tag: "machine-3", Status: ""}, }}, ) c.Assert(err, jc.ErrorIsNil) c.Assert(result, jc.DeepEquals, s.machineErrorResults) s.st.CheckFindEntityCall(c, 0, "1") s.st.CheckFindEntityCall(c, 1, "2") now := s.clock.Now() s.st.CheckCall(c, 2, "SetInstanceStatus", status.StatusInfo{Status: "invalid", Since: &now}) s.st.CheckFindEntityCall(c, 3, "3") }
func (s *InstancePollerSuite) TestInstanceIdFailure(c *gc.C) { s.st.SetErrors( errors.New("pow!"), // m1 := FindEntity("1"); InstanceId not called nil, // m2 := FindEntity("2") errors.New("FAIL"), // m2.InstanceId() errors.NotProvisionedf("machine 42"), // FindEntity("3") (ensure wrapping is preserved) ) s.st.SetMachineInfo(c, machineInfo{id: "1", instanceId: ""}) s.st.SetMachineInfo(c, machineInfo{id: "2", instanceId: "i-bar"}) result, err := s.api.InstanceId(s.machineEntities) c.Assert(err, jc.ErrorIsNil) c.Assert(result, jc.DeepEquals, params.StringResults{ Results: []params.StringResult{ {Error: apiservertesting.ServerError("pow!")}, {Error: apiservertesting.ServerError("FAIL")}, {Error: apiservertesting.NotProvisionedError("42")}, }}, ) s.st.CheckFindEntityCall(c, 0, "1") s.st.CheckFindEntityCall(c, 1, "2") s.st.CheckCall(c, 2, "InstanceId") s.st.CheckFindEntityCall(c, 3, "3") }
func (s *InstancePollerSuite) TestAreManuallyProvisionedFailure(c *gc.C) { s.st.SetErrors( errors.New("pow!"), // m1 := FindEntity("1") nil, // m2 := FindEntity("2") errors.New("FAIL"), // m2.IsManual() errors.NotProvisionedf("machine 42"), // FindEntity("3") (ensure wrapping is preserved) ) s.st.SetMachineInfo(c, machineInfo{id: "1", isManual: true}) s.st.SetMachineInfo(c, machineInfo{id: "2", isManual: false}) result, err := s.api.AreManuallyProvisioned(s.machineEntities) c.Assert(err, jc.ErrorIsNil) c.Assert(result, jc.DeepEquals, params.BoolResults{ Results: []params.BoolResult{ {Error: apiservertesting.ServerError("pow!")}, {Error: apiservertesting.ServerError("FAIL")}, {Error: apiservertesting.NotProvisionedError("42")}, }}, ) s.st.CheckFindEntityCall(c, 0, "1") s.st.CheckFindEntityCall(c, 1, "2") s.st.CheckCall(c, 2, "IsManual") s.st.CheckFindEntityCall(c, 3, "3") }
func (s *provisionerSuite) TestVolumesEnviron(c *gc.C) { s.setupVolumes(c) s.authorizer.Tag = names.NewMachineTag("2") // neither 0 nor 1 results, err := s.api.Volumes(params.Entities{ Entities: []params.Entity{ {"volume-0-0"}, {"volume-1"}, {"volume-2"}, {"volume-42"}, }, }) c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.DeepEquals, params.VolumeResults{ Results: []params.VolumeResult{ {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, {Error: common.ServerError(errors.NotProvisionedf(`volume "1"`))}, {Result: params.Volume{ VolumeTag: "volume-2", Info: params.VolumeInfo{ VolumeId: "def", HardwareId: "456", Size: 4096, }, }}, {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) }
func (s *provisionerSuite) TestFilesystems(c *gc.C) { s.setupFilesystems(c) s.authorizer.Tag = names.NewMachineTag("2") // neither 0 nor 1 results, err := s.api.Filesystems(params.Entities{ Entities: []params.Entity{ {"filesystem-0-0"}, {"filesystem-1"}, {"filesystem-2"}, {"filesystem-42"}, }, }) c.Assert(err, jc.ErrorIsNil) c.Assert(results, jc.DeepEquals, params.FilesystemResults{ Results: []params.FilesystemResult{ {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, {Error: common.ServerError(errors.NotProvisionedf(`filesystem "1"`))}, {Result: params.Filesystem{ FilesystemTag: "filesystem-2", Info: params.FilesystemInfo{ FilesystemId: "def", Size: 4096, }, }}, {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) }
func (s *InstancePollerSuite) TestLifeFailure(c *gc.C) { s.st.SetErrors( errors.New("pow!"), // m1 := FindEntity("1"); Life not called nil, // m2 := FindEntity("2") errors.New("FAIL"), // m2.Life() - unused errors.NotProvisionedf("machine 42"), // FindEntity("3") (ensure wrapping is preserved) ) s.st.SetMachineInfo(c, machineInfo{id: "1", life: state.Alive}) s.st.SetMachineInfo(c, machineInfo{id: "2", life: state.Dead}) s.st.SetMachineInfo(c, machineInfo{id: "3", life: state.Dying}) result, err := s.api.Life(s.machineEntities) c.Assert(err, jc.ErrorIsNil) c.Assert(result, jc.DeepEquals, params.LifeResults{ Results: []params.LifeResult{ {Error: apiservertesting.ServerError("pow!")}, {Life: params.Dead}, {Error: apiservertesting.NotProvisionedError("42")}, }}, ) s.st.CheckFindEntityCall(c, 0, "1") s.st.CheckFindEntityCall(c, 1, "2") s.st.CheckCall(c, 2, "Life") s.st.CheckFindEntityCall(c, 3, "3") }
func (s *InstancePollerSuite) TestSetProviderAddressesFailure(c *gc.C) { s.st.SetErrors( errors.New("pow!"), // m1 := FindEntity("1") nil, // m2 := FindEntity("2") errors.New("FAIL"), // m2.SetProviderAddresses() errors.NotProvisionedf("machine 42"), // FindEntity("3") (ensure wrapping is preserved) ) oldAddrs := network.NewAddresses("0.1.2.3", "127.0.0.1", "8.8.8.8") newAddrs := network.NewAddresses("1.2.3.4", "8.4.4.8", "2001:db8::") s.st.SetMachineInfo(c, machineInfo{id: "1", providerAddresses: oldAddrs}) s.st.SetMachineInfo(c, machineInfo{id: "2", providerAddresses: nil}) result, err := s.api.SetProviderAddresses(params.SetMachinesAddresses{ MachineAddresses: []params.MachineAddresses{ {Tag: "machine-1"}, {Tag: "machine-2", Addresses: params.FromNetworkAddresses(newAddrs...)}, {Tag: "machine-3"}, }}, ) c.Assert(err, jc.ErrorIsNil) c.Assert(result, jc.DeepEquals, s.machineErrorResults) s.st.CheckFindEntityCall(c, 0, "1") s.st.CheckFindEntityCall(c, 1, "2") s.st.CheckSetProviderAddressesCall(c, 2, newAddrs) s.st.CheckFindEntityCall(c, 3, "3") // Ensure machine 2 wasn't updated. machine, err := s.st.Machine("2") c.Assert(err, jc.ErrorIsNil) c.Assert(machine.ProviderAddresses(), gc.HasLen, 0) }
func (v *fakeVolume) Info() (state.VolumeInfo, error) { if !v.provisioned { return state.VolumeInfo{}, errors.NotProvisionedf("volume %v", v.tag.Id()) } return state.VolumeInfo{ Pool: "loop", Size: 1024, }, nil }
// InstanceStatus returns the provider specific instance status for this machine, // or a NotProvisionedError if instance is not yet provisioned. func (m *Machine) InstanceStatus() (string, error) { instData, err := getInstanceData(m.st, m.Id()) if errors.IsNotFound(err) { err = errors.NotProvisionedf("machine %v", m.Id()) } if err != nil { return "", err } return instData.Status, err }
func checkForValidMachineAgent(entity state.Entity, req params.LoginRequest) error { // If this is a machine agent connecting, we need to check the // nonce matches, otherwise the wrong agent might be trying to // connect. if machine, ok := entity.(*state.Machine); ok { if !machine.CheckProvisioned(req.Nonce) { return errors.NotProvisionedf("machine %v", machine.Id()) } } return nil }
func (v *mockVolumeAccessor) Volumes(volumes []names.VolumeTag) ([]params.VolumeResult, error) { var result []params.VolumeResult for _, tag := range volumes { if vol, ok := v.provisionedVolumes[tag.String()]; ok { result = append(result, params.VolumeResult{Result: vol}) } else { result = append(result, params.VolumeResult{ Error: common.ServerError(errors.NotProvisionedf("volume %q", tag.Id())), }) } } return result, nil }
func (v *mockFilesystemAccessor) Filesystems(filesystems []names.FilesystemTag) ([]params.FilesystemResult, error) { var result []params.FilesystemResult for _, tag := range filesystems { if vol, ok := v.provisionedFilesystems[tag.String()]; ok { result = append(result, params.FilesystemResult{Result: vol}) } else { result = append(result, params.FilesystemResult{ Error: common.ServerError(errors.NotProvisionedf("filesystem %q", tag.Id())), }) } } return result, nil }
func (v *mockFilesystemAccessor) FilesystemAttachments(ids []params.MachineStorageId) ([]params.FilesystemAttachmentResult, error) { var result []params.FilesystemAttachmentResult for _, id := range ids { if att, ok := v.provisionedAttachments[id]; ok { result = append(result, params.FilesystemAttachmentResult{Result: att}) } else { result = append(result, params.FilesystemAttachmentResult{ Error: common.ServerError(errors.NotProvisionedf("filesystem attachment %v", id)), }) } } return result, nil }
func volumeStorageAttachmentInfo( st StorageInterface, storageInstance state.StorageInstance, machineTag names.MachineTag, ) (*storage.StorageAttachmentInfo, error) { storageTag := storageInstance.StorageTag() volume, err := st.StorageInstanceVolume(storageTag) if err != nil { return nil, errors.Annotate(err, "getting volume") } volumeInfo, err := volume.Info() if err != nil { return nil, errors.Annotate(err, "getting volume info") } volumeAttachment, err := st.VolumeAttachment(machineTag, volume.VolumeTag()) if err != nil { return nil, errors.Annotate(err, "getting volume attachment") } volumeAttachmentInfo, err := volumeAttachment.Info() if err != nil { return nil, errors.Annotate(err, "getting volume attachment info") } blockDevices, err := st.BlockDevices(machineTag) if err != nil { return nil, errors.Annotate(err, "getting block devices") } blockDevice, ok := MatchingBlockDevice( blockDevices, volumeInfo, volumeAttachmentInfo, ) if !ok { // We must not say that a block-kind storage attachment is // provisioned until its block device has shown up on the // machine, otherwise the charm may attempt to use it and // fail. return nil, errors.NotProvisionedf("%v", names.ReadableString(storageTag)) } devicePath, err := volumeAttachmentDevicePath( volumeInfo, volumeAttachmentInfo, *blockDevice, ) if err != nil { return nil, errors.Trace(err) } return &storage.StorageAttachmentInfo{ storage.StorageKindBlock, devicePath, }, nil }
// AvailabilityZone returns the provier-specific instance availability // zone in which the machine was provisioned. func (m *Machine) AvailabilityZone() (string, error) { instData, err := getInstanceData(m.st, m.Id()) if errors.IsNotFound(err) { return "", errors.Trace(errors.NotProvisionedf("machine %v", m.Id())) } if err != nil { return "", errors.Trace(err) } var zone string if instData.AvailZone != nil { zone = *instData.AvailZone } return zone, nil }
// SetInstanceStatus sets the provider specific instance status for a machine. func (m *Machine) SetInstanceStatus(status string) (err error) { defer errors.DeferredAnnotatef(&err, "cannot set instance status for machine %q", m) ops := []txn.Op{ { C: instanceDataC, Id: m.doc.DocID, Assert: txn.DocExists, Update: bson.D{{"$set", bson.D{{"status", status}}}}, }, } if err = m.st.runTransaction(ops); err == nil { return nil } else if err != txn.ErrAborted { return err } return errors.NotProvisionedf("machine %v", m.Id()) }
// Authenticate authenticates the provided entity and returns an error on authentication failure. func (*AgentAuthenticator) Authenticate(entity state.Entity, password, nonce string) error { authenticator, ok := entity.(taggedAuthenticator) if !ok { return common.ErrBadRequest } if !authenticator.PasswordValid(password) { return common.ErrBadCreds } // If this is a machine agent connecting, we need to check the // nonce matches, otherwise the wrong agent might be trying to // connect. if machine, ok := authenticator.(*state.Machine); ok { if !machine.CheckProvisioned(nonce) { return errors.NotProvisionedf("machine %v", machine.Id()) } } return nil }
code: params.CodeNoAddressSet, helperFunc: params.IsCodeNoAddressSet, }, { err: common.ErrBadCreds, code: params.CodeUnauthorized, helperFunc: params.IsCodeUnauthorized, }, { err: common.ErrPerm, code: params.CodeUnauthorized, helperFunc: params.IsCodeUnauthorized, }, { err: common.ErrNotLoggedIn, code: params.CodeUnauthorized, helperFunc: params.IsCodeUnauthorized, }, { err: errors.NotProvisionedf("machine 0"), code: params.CodeNotProvisioned, helperFunc: params.IsCodeNotProvisioned, }, { err: errors.AlreadyExistsf("blah"), code: params.CodeAlreadyExists, helperFunc: params.IsCodeAlreadyExists, }, { err: common.ErrUnknownWatcher, code: params.CodeNotFound, helperFunc: params.IsCodeNotFound, }, { err: errors.NotAssignedf("unit mysql/0"), code: params.CodeNotAssigned, helperFunc: params.IsCodeNotAssigned, }, {
// Info is required to implement FilesystemAttachment. func (f *filesystemAttachment) Info() (FilesystemAttachmentInfo, error) { if f.doc.Info == nil { return FilesystemAttachmentInfo{}, errors.NotProvisionedf("filesystem attachment %q on %q", f.doc.Filesystem, f.doc.Machine) } return *f.doc.Info, nil }
// Info is required to implement Filesystem. func (f *filesystem) Info() (FilesystemInfo, error) { if f.doc.Info == nil { return FilesystemInfo{}, errors.NotProvisionedf("filesystem %q", f.doc.FilesystemId) } return *f.doc.Info, nil }
func (p *ProvisionerAPI) prepareOrGetContainerInterfaceInfo(args params.Entities, maintain bool) (params.MachineNetworkConfigResults, error) { result := params.MachineNetworkConfigResults{ Results: make([]params.MachineNetworkConfigResult, len(args.Entities)), } netEnviron, hostMachine, canAccess, err := p.prepareContainerAccessEnvironment() if err != nil { return result, errors.Trace(err) } instId, err := hostMachine.InstanceId() if errors.IsNotProvisioned(err) { err = errors.NotProvisionedf("cannot prepare container network config: host machine %q", hostMachine) return result, err } else if err != nil { return result, errors.Trace(err) } for i, entity := range args.Entities { machineTag, err := names.ParseMachineTag(entity.Tag) if err != nil { result.Results[i].Error = common.ServerError(err) continue } // The auth function (canAccess) checks that the machine is a // top level machine (we filter those out next) or that the // machine has the host as a parent. container, err := p.getMachine(canAccess, machineTag) if err != nil { result.Results[i].Error = common.ServerError(err) continue } else if !container.IsContainer() { err = errors.Errorf("cannot prepare network config for %q: not a container", machineTag) result.Results[i].Error = common.ServerError(err) continue } else if ciid, cerr := container.InstanceId(); maintain == true && cerr == nil { // Since we want to configure and create NICs on the // container before it starts, it must also be not // provisioned yet. err = errors.Errorf("container %q already provisioned as %q", container, ciid) result.Results[i].Error = common.ServerError(err) continue } else if cerr != nil && !errors.IsNotProvisioned(cerr) { // Any other error needs to be reported. result.Results[i].Error = common.ServerError(cerr) continue } if err := hostMachine.SetContainerLinkLayerDevices(container); err != nil { result.Results[i].Error = common.ServerError(err) continue } containerDevices, err := container.AllLinkLayerDevices() if err != nil { result.Results[i].Error = common.ServerError(err) continue } preparedInfo := make([]network.InterfaceInfo, len(containerDevices)) preparedOK := true for j, device := range containerDevices { parentDevice, err := device.ParentDevice() if err != nil || parentDevice == nil { err = errors.Errorf( "cannot get parent %q of container device %q: %v", device.ParentName(), device.Name(), err, ) result.Results[i].Error = common.ServerError(err) preparedOK = false break } parentAddrs, err := parentDevice.Addresses() if err != nil { result.Results[i].Error = common.ServerError(err) preparedOK = false break } info := network.InterfaceInfo{ InterfaceName: device.Name(), MACAddress: device.MACAddress(), ConfigType: network.ConfigManual, InterfaceType: network.InterfaceType(device.Type()), NoAutoStart: !device.IsAutoStart(), Disabled: !device.IsUp(), MTU: int(device.MTU()), ParentInterfaceName: parentDevice.Name(), } if len(parentAddrs) > 0 { logger.Infof("host machine device %q has addresses %v", parentDevice.Name(), parentAddrs) firstAddress := parentAddrs[0] parentDeviceSubnet, err := firstAddress.Subnet() if err != nil { err = errors.Annotatef(err, "cannot get subnet %q used by address %q of host machine device %q", firstAddress.SubnetCIDR(), firstAddress.Value(), parentDevice.Name(), ) result.Results[i].Error = common.ServerError(err) preparedOK = false break } info.ConfigType = network.ConfigStatic info.CIDR = parentDeviceSubnet.CIDR() info.ProviderSubnetId = parentDeviceSubnet.ProviderId() info.VLANTag = parentDeviceSubnet.VLANTag() } else { logger.Infof("host machine device %q has no addresses %v", parentDevice.Name(), parentAddrs) } logger.Tracef("prepared info for container interface %q: %+v", info.InterfaceName, info) preparedOK = true preparedInfo[j] = info } if !preparedOK { // Error result is already set. continue } allocatedInfo, err := netEnviron.AllocateContainerAddresses(instId, machineTag, preparedInfo) if err != nil { result.Results[i].Error = common.ServerError(err) continue } logger.Debugf("got allocated info from provider: %+v", allocatedInfo) allocatedConfig := networkingcommon.NetworkConfigFromInterfaceInfo(allocatedInfo) logger.Tracef("allocated network config: %+v", allocatedConfig) result.Results[i].Config = allocatedConfig } return result, nil }
func (va *mockVolumeAttachment) Info() (state.VolumeAttachmentInfo, error) { if va.info != nil { return *va.info, nil } return state.VolumeAttachmentInfo{}, errors.NotProvisionedf("volume attachment") }
func (m *mockFilesystemAttachment) Info() (state.FilesystemAttachmentInfo, error) { if m.info != nil { return *m.info, nil } return state.FilesystemAttachmentInfo{}, errors.NotProvisionedf("filesystem attachment") }
func (m *mockFilesystem) Info() (state.FilesystemInfo, error) { if m.info != nil { return *m.info, nil } return state.FilesystemInfo{}, errors.NotProvisionedf("filesystem") }
// legacyPrepareOrGetContainerInterfaceInfo optionally allocates an address and // returns information for configuring networking on a container. It accepts // container tags as arguments. func (p *ProvisionerAPI) legacyPrepareOrGetContainerInterfaceInfo( args params.Entities, provisionContainer bool, ) ( params.MachineNetworkConfigResults, error, ) { result := params.MachineNetworkConfigResults{ Results: make([]params.MachineNetworkConfigResult, len(args.Entities)), } // Some preparations first. environ, host, canAccess, err := p.prepareContainerAccessEnvironment() if err != nil { return result, errors.Trace(err) } instId, err := host.InstanceId() if err != nil && errors.IsNotProvisioned(err) { // If the host machine is not provisioned yet, we have nothing // to do. NotProvisionedf will append " not provisioned" to // the message. err = errors.NotProvisionedf("cannot allocate addresses: host machine %q", host) return result, err } var subnet *state.Subnet var subnetInfo network.SubnetInfo var interfaceInfo network.InterfaceInfo if environs.AddressAllocationEnabled() { // We don't need a subnet unless we need to allocate a static IP. subnet, subnetInfo, interfaceInfo, err = p.prepareAllocationNetwork(environ, instId) if err != nil { return result, errors.Annotate(err, "cannot allocate addresses") } } else { var allInterfaceInfos []network.InterfaceInfo allInterfaceInfos, err = environ.NetworkInterfaces(instId) if err != nil { return result, errors.Annotatef(err, "cannot instance %q interfaces", instId) } else if len(allInterfaceInfos) == 0 { return result, errors.New("no interfaces available") } // Currently we only support a single NIC per container, so we only need // the information from the host instance's first NIC. logger.Tracef("interfaces for instance %q: %v", instId, allInterfaceInfos) interfaceInfo = allInterfaceInfos[0] } // Loop over the passed container tags. for i, entity := range args.Entities { tag, err := names.ParseMachineTag(entity.Tag) if err != nil { result.Results[i].Error = common.ServerError(err) continue } // The auth function (canAccess) checks that the machine is a // top level machine (we filter those out next) or that the // machine has the host as a parent. container, err := p.getMachine(canAccess, tag) if err != nil { result.Results[i].Error = common.ServerError(err) continue } else if !container.IsContainer() { err = errors.Errorf("cannot allocate address for %q: not a container", tag) result.Results[i].Error = common.ServerError(err) continue } else if ciid, cerr := container.InstanceId(); provisionContainer == true && cerr == nil { // Since we want to configure and create NICs on the // container before it starts, it must also be not // provisioned yet. err = errors.Errorf("container %q already provisioned as %q", container, ciid) result.Results[i].Error = common.ServerError(err) continue } else if cerr != nil && !errors.IsNotProvisioned(cerr) { // Any other error needs to be reported. result.Results[i].Error = common.ServerError(cerr) continue } var macAddress string var address *state.IPAddress if provisionContainer { // Allocate and set an address. macAddress = generateMACAddress() address, err = p.allocateAddress(environ, subnet, host, container, instId, macAddress) if err != nil { err = errors.Annotatef(err, "failed to allocate an address for %q", container) result.Results[i].Error = common.ServerError(err) continue } } else { id := container.Id() addresses, err := p.st.AllocatedIPAddresses(id) if err != nil { logger.Warningf("failed to get Id for container %q: %v", tag, err) result.Results[i].Error = common.ServerError(err) continue } // TODO(dooferlad): if we get more than 1 address back, we ignore everything after // the first. The calling function expects exactly one result though, // so we don't appear to have a way of allocating >1 address to a // container... if len(addresses) != 1 { logger.Warningf("got %d addresses for container %q - expected 1: %v", len(addresses), tag, err) result.Results[i].Error = common.ServerError(err) continue } address = addresses[0] macAddress = address.MACAddress() } // Store it on the machine, construct and set an interface result. dnsServers := make([]string, len(interfaceInfo.DNSServers)) for l, dns := range interfaceInfo.DNSServers { dnsServers[l] = dns.Value } if macAddress == "" { macAddress = interfaceInfo.MACAddress } interfaceType := string(interfaceInfo.InterfaceType) if interfaceType == "" { interfaceType = string(network.EthernetInterface) } // TODO(dimitern): Support allocating one address per NIC on // the host, effectively creating the same number of NICs in // the container. result.Results[i] = params.MachineNetworkConfigResult{ Config: []params.NetworkConfig{{ DeviceIndex: interfaceInfo.DeviceIndex, MACAddress: macAddress, CIDR: subnetInfo.CIDR, NetworkName: interfaceInfo.NetworkName, ProviderId: string(interfaceInfo.ProviderId), ProviderSubnetId: string(subnetInfo.ProviderId), VLANTag: interfaceInfo.VLANTag, InterfaceType: interfaceType, InterfaceName: interfaceInfo.InterfaceName, Disabled: interfaceInfo.Disabled, NoAutoStart: interfaceInfo.NoAutoStart, DNSServers: dnsServers, ConfigType: string(network.ConfigStatic), Address: address.Value(), GatewayAddress: interfaceInfo.GatewayAddress.Value, ExtraConfig: interfaceInfo.ExtraConfig, }}, } } return result, nil }
func (s *lxcBrokerSuite) TestConfigureContainerNetwork(c *gc.C) { // All the pieces used by this func are separately tested, we just // test the integration between them. s.PatchValue(provisioner.NetInterfaces, func() ([]net.Interface, error) { return []net.Interface{{ Index: 0, Name: "fake0", Flags: net.FlagUp, }}, nil }) s.PatchValue(provisioner.InterfaceAddrs, func(i *net.Interface) ([]net.Addr, error) { return []net.Addr{&fakeAddr{"0.1.2.1/24"}}, nil }) fakeResolvConf := filepath.Join(c.MkDir(), "resolv.conf") err := ioutil.WriteFile(fakeResolvConf, []byte("nameserver ns1.dummy\n"), 0644) c.Assert(err, jc.ErrorIsNil) s.PatchValue(provisioner.ResolvConf, fakeResolvConf) // When ifaceInfo is not empty it shouldn't do anything and both // the error and the result are nil. ifaceInfo := []network.InterfaceInfo{{DeviceIndex: 0}} // First call as if we are configuring the container for the first time result, err := provisioner.ConfigureContainerNetwork("42", "bridge", s.api, ifaceInfo, true, false) c.Assert(err, jc.ErrorIsNil) c.Assert(result, gc.IsNil) s.api.CheckCalls(c, []gitjujutesting.StubCall{}) // Next call as if the container has already been configured. s.api.ResetCalls() result, err = provisioner.ConfigureContainerNetwork("42", "bridge", s.api, ifaceInfo, false, false) c.Assert(err, jc.ErrorIsNil) c.Assert(result, gc.IsNil) s.api.CheckCalls(c, []gitjujutesting.StubCall{}) // Call as if the container already has a network configuration, but doesn't. s.api.ResetCalls() s.api.SetErrors(errors.NotProvisionedf("machine-42 has no network provisioning info")) ifaceInfo = []network.InterfaceInfo{} result, err = provisioner.ConfigureContainerNetwork("42", "bridge", s.api, ifaceInfo, false, false) c.Assert(err, gc.ErrorMatches, "machine-42 has no network provisioning info not provisioned") c.Assert(result, jc.DeepEquals, []network.InterfaceInfo{}) s.api.CheckCalls(c, []gitjujutesting.StubCall{{ FuncName: "GetContainerInterfaceInfo", Args: []interface{}{names.NewMachineTag("42")}, }}) // When it's not empty, result should be populated as expected. s.api.ResetCalls() result, err = provisioner.ConfigureContainerNetwork("42", "bridge", s.api, ifaceInfo, false, false) c.Assert(err, jc.ErrorIsNil) c.Assert(result, gc.HasLen, 1) c.Assert(err, jc.ErrorIsNil) c.Assert(result, jc.DeepEquals, []network.InterfaceInfo{{ DeviceIndex: 0, CIDR: "0.1.2.0/24", ConfigType: network.ConfigStatic, InterfaceName: "eth0", // generated from the device index. MACAddress: "aa:bb:cc:dd:ee:ff", DNSServers: network.NewAddresses("ns1.dummy"), DNSSearchDomains: []string{""}, Address: network.NewAddress("0.1.2.3"), GatewayAddress: network.NewAddress("0.1.2.1"), NetworkName: network.DefaultPrivate, ProviderId: network.DefaultProviderId, }}) s.api.CheckCalls(c, []gitjujutesting.StubCall{{ FuncName: "GetContainerInterfaceInfo", Args: []interface{}{names.NewMachineTag("42")}, }}) s.api.ResetCalls() result, err = provisioner.ConfigureContainerNetwork("42", "bridge", s.api, ifaceInfo, false, false) c.Assert(result, gc.HasLen, 1) c.Assert(err, jc.ErrorIsNil) c.Assert(result, jc.DeepEquals, []network.InterfaceInfo{{ DeviceIndex: 0, CIDR: "0.1.2.0/24", ConfigType: network.ConfigStatic, InterfaceName: "eth0", // generated from the device index. MACAddress: "aa:bb:cc:dd:ee:ff", DNSServers: network.NewAddresses("ns1.dummy"), DNSSearchDomains: []string{""}, Address: network.NewAddress("0.1.2.3"), GatewayAddress: network.NewAddress("0.1.2.1"), NetworkName: network.DefaultPrivate, ProviderId: network.DefaultProviderId, }}) s.api.CheckCalls(c, []gitjujutesting.StubCall{{ FuncName: "GetContainerInterfaceInfo", Args: []interface{}{names.NewMachineTag("42")}, }}) }
func (m *mockVolume) Info() (state.VolumeInfo, error) { if m.info != nil { return *m.info, nil } return state.VolumeInfo{}, errors.NotProvisionedf("%v", m.tag) }