// MaintainInstance checks that the container's host has the required iptables and routing // rules to make the container visible to both the host and other machines on the same subnet. func (broker *kvmBroker) MaintainInstance(args environs.StartInstanceParams) error { machineId := args.InstanceConfig.MachineId if !environs.AddressAllocationEnabled() { kvmLogger.Debugf("address allocation disabled: Not running maintenance for kvm with machineId: %s", machineId) return nil } kvmLogger.Debugf("running maintenance for kvm with machineId: %s", machineId) // Default to using the host network until we can configure. bridgeDevice := broker.agentConfig.Value(agent.LxcBridge) if bridgeDevice == "" { bridgeDevice = kvm.DefaultKvmBridge } _, err := configureContainerNetwork( machineId, bridgeDevice, broker.api, args.NetworkInfo, false, // don't allocate a new address. broker.enableNAT, ) return err }
func (p *ProvisionerAPI) legacyAddressAllocationSupported() (bool, error) { config, err := p.st.ModelConfig() if err != nil { return false, errors.Trace(err) } return environs.AddressAllocationEnabled(config.Type()), nil }
func maybeReleaseContainerAddresses( api APICalls, instanceID instance.Id, namespace string, log loggo.Logger, ) { if environs.AddressAllocationEnabled() { // The addresser worker will take care of the addresses. return } // If we're not using addressable containers, we might still have used MAAS // 1.8+ device to register the container when provisioning. In that case we // need to attempt releasing the device, but ignore a NotSupported error // (when we're not using MAAS 1.8+). namespacePrefix := fmt.Sprintf("%s-", namespace) tagString := strings.TrimPrefix(string(instanceID), namespacePrefix) containerTag, err := names.ParseMachineTag(tagString) if err != nil { // Not a reason to cause StopInstances to fail though.. log.Warningf("unexpected container tag %q: %v", instanceID, err) return } err = api.ReleaseContainerAddresses(containerTag) switch { case err == nil: log.Infof("released all addresses for container %q", containerTag.Id()) case errors.IsNotSupported(err): log.Warningf("not releasing all addresses for container %q: %v", containerTag.Id(), err) default: log.Warningf( "unexpected error trying to release container %q addreses: %v", containerTag.Id(), err, ) } }
// AllocateAddress requests an address to be allocated for the // given instance on the given subnet. func (env *environ) AllocateAddress(instId instance.Id, subnetId network.Id, addr network.Address, macAddress, hostname string) error { if !environs.AddressAllocationEnabled() { return errors.NotSupportedf("address allocation") } if err := env.checkBroken("AllocateAddress"); err != nil { return err } estate, err := env.state() if err != nil { return err } estate.mu.Lock() defer estate.mu.Unlock() estate.maxAddr++ estate.ops <- OpAllocateAddress{ Env: env.name, InstanceId: instId, SubnetId: subnetId, Address: addr, MACAddress: macAddress, HostName: hostname, } return nil }
// runInitialiser runs the container initialiser with the initialisation hook held. func (cs *ContainerSetup) runInitialiser(containerType instance.ContainerType, initialiser container.Initialiser) error { logger.Debugf("running initialiser for %s containers", containerType) if err := cs.initLock.Lock(fmt.Sprintf("initialise-%s", containerType)); err != nil { return errors.Annotate(err, "failed to acquire initialization lock") } defer cs.initLock.Unlock() // Only tweak default LXC network config when address allocation // feature flag is enabled. if environs.AddressAllocationEnabled() { // In order to guarantee stable statically assigned IP addresses // for LXC containers, we need to install a custom version of // /etc/default/lxc-net before we install the lxc package. The // custom version of lxc-net is almost the same as the original, // but the defined LXC_DHCP_RANGE (used by dnsmasq to give away // 10.0.3.x addresses to containers bound to lxcbr0) has infinite // lease time. This is necessary, because with the default lease // time of 1h, dhclient running inside each container will request // a renewal from dnsmasq and replace our statically configured IP // address within an hour after starting the container. err := maybeOverrideDefaultLXCNet(containerType, cs.addressableContainers) if err != nil { return errors.Trace(err) } } if err := initialiser.Initialise(); err != nil { return errors.Trace(err) } return nil }
// GetContainerInterfaceInfo returns information to configure networking // for a container. It accepts container tags as arguments. If the address // allocation feature flag is not enabled, it returns a NotSupported error. func (p *ProvisionerAPI) GetContainerInterfaceInfo(args params.Entities) ( params.MachineNetworkConfigResults, error) { if environs.AddressAllocationEnabled() { logger.Warningf("address allocation enabled - using legacyPrepareOrGetContainerInterfaceInfo(false)") return p.legacyPrepareOrGetContainerInterfaceInfo(args, false) } return p.prepareOrGetContainerInterfaceInfo(args, false) }
// ContainerManagerConfig returns information from the environment config that is // needed for configuring the container manager. func (p *ProvisionerAPI) ContainerManagerConfig(args params.ContainerManagerConfigParams) (params.ContainerManagerConfig, error) { var result params.ContainerManagerConfig config, err := p.st.EnvironConfig() if err != nil { return result, err } cfg := make(map[string]string) cfg[container.ConfigName] = container.DefaultNamespace switch args.Type { case instance.LXC: if useLxcClone, ok := config.LXCUseClone(); ok { cfg["use-clone"] = fmt.Sprint(useLxcClone) } if useLxcCloneAufs, ok := config.LXCUseCloneAUFS(); ok { cfg["use-aufs"] = fmt.Sprint(useLxcCloneAufs) } if lxcDefaultMTU, ok := config.LXCDefaultMTU(); ok { logger.Debugf("using default MTU %v for all LXC containers NICs", lxcDefaultMTU) cfg[container.ConfigLXCDefaultMTU] = fmt.Sprintf("%d", lxcDefaultMTU) } } if !environs.AddressAllocationEnabled() { // No need to even try checking the environ for support. logger.Debugf("address allocation feature flag not enabled") result.ManagerConfig = cfg return result, nil } // Create an environment to verify networking support. env, err := environs.New(config) if err != nil { return result, err } if netEnv, ok := environs.SupportsNetworking(env); ok { // Passing network.AnySubnet below should be interpreted by // the provider as "does ANY subnet support this". supported, err := netEnv.SupportsAddressAllocation(network.AnySubnet) if err == nil && supported { cfg[container.ConfigIPForwarding] = "true" } else if err != nil { // We log the error, but it's safe to ignore as it's not // critical. logger.Debugf("address allocation not supported (%v)", err) } // AWS requires NAT in place in order for hosted containers to // reach outside. if config.Type() == provider.EC2 { cfg[container.ConfigEnableNAT] = "true" } } result.ManagerConfig = cfg return result, nil }
// SupportsAddressAllocation is specified on environs.Networking. func (env *environ) SupportsAddressAllocation(subnetId network.Id) (bool, error) { if !environs.AddressAllocationEnabled() { return false, errors.NotSupportedf("address allocation") } if err := env.checkBroken("SupportsAddressAllocation"); err != nil { return false, err } // Any subnetId starting with "noalloc-" will cause this to return // false, so it can be used in tests. if strings.HasPrefix(string(subnetId), "noalloc-") { return false, nil } return true, nil }
// AllocateAddress requests an address to be allocated for the // given instance on the given subnet. func (env *environ) AllocateAddress(instId instance.Id, subnetId network.Id, addr *network.Address, macAddress, hostname string) error { if !environs.AddressAllocationEnabled("dummy") { // Any instId starting with "i-alloc-" when the feature flag is off will // still work, in order to be able to test MAAS 1.8+ environment where // we can use devices for containers. if !strings.HasPrefix(string(instId), "i-alloc-") { return errors.NotSupportedf("address allocation") } // Also, in this case we expect addr to be non-nil, but empty, so it can // be used as an output argument (same as in provider/maas). if addr == nil || addr.Value != "" { return errors.NewNotValid(nil, "invalid address: nil or non-empty") } } if err := env.checkBroken("AllocateAddress"); err != nil { return err } estate, err := env.state() if err != nil { return err } estate.mu.Lock() defer estate.mu.Unlock() estate.maxAddr++ if addr.Value == "" { *addr = network.NewAddress(fmt.Sprintf("0.10.0.%v", estate.maxAddr)) } estate.ops <- OpAllocateAddress{ Env: env.name, InstanceId: instId, SubnetId: subnetId, Address: *addr, MACAddress: macAddress, HostName: hostname, } return nil }
// ReleaseAddress releases a specific address previously allocated with // AllocateAddress. func (env *environ) ReleaseAddress(instId instance.Id, subnetId network.Id, addr network.Address) error { if !environs.AddressAllocationEnabled() { return errors.NotSupportedf("address allocation") } if err := env.checkBroken("ReleaseAddress"); err != nil { return err } estate, err := env.state() if err != nil { return err } estate.mu.Lock() defer estate.mu.Unlock() estate.maxAddr-- estate.ops <- OpReleaseAddress{ Env: env.name, InstanceId: instId, SubnetId: subnetId, Address: addr, } return nil }
// legacyPrepareOrGetContainerInterfaceInfo optionally allocates an address and // returns information for configuring networking on a container. It accepts // container tags as arguments. func (p *ProvisionerAPI) legacyPrepareOrGetContainerInterfaceInfo( args params.Entities, provisionContainer bool, ) ( params.MachineNetworkConfigResults, error, ) { result := params.MachineNetworkConfigResults{ Results: make([]params.MachineNetworkConfigResult, len(args.Entities)), } // Some preparations first. environ, host, canAccess, err := p.prepareContainerAccessEnvironment() if err != nil { return result, errors.Trace(err) } instId, err := host.InstanceId() if err != nil && errors.IsNotProvisioned(err) { // If the host machine is not provisioned yet, we have nothing // to do. NotProvisionedf will append " not provisioned" to // the message. err = errors.NotProvisionedf("cannot allocate addresses: host machine %q", host) return result, err } var subnet *state.Subnet var subnetInfo network.SubnetInfo var interfaceInfo network.InterfaceInfo if environs.AddressAllocationEnabled() { // We don't need a subnet unless we need to allocate a static IP. subnet, subnetInfo, interfaceInfo, err = p.prepareAllocationNetwork(environ, instId) if err != nil { return result, errors.Annotate(err, "cannot allocate addresses") } } else { var allInterfaceInfos []network.InterfaceInfo allInterfaceInfos, err = environ.NetworkInterfaces(instId) if err != nil { return result, errors.Annotatef(err, "cannot instance %q interfaces", instId) } else if len(allInterfaceInfos) == 0 { return result, errors.New("no interfaces available") } // Currently we only support a single NIC per container, so we only need // the information from the host instance's first NIC. logger.Tracef("interfaces for instance %q: %v", instId, allInterfaceInfos) interfaceInfo = allInterfaceInfos[0] } // Loop over the passed container tags. for i, entity := range args.Entities { tag, err := names.ParseMachineTag(entity.Tag) if err != nil { result.Results[i].Error = common.ServerError(err) continue } // The auth function (canAccess) checks that the machine is a // top level machine (we filter those out next) or that the // machine has the host as a parent. container, err := p.getMachine(canAccess, tag) if err != nil { result.Results[i].Error = common.ServerError(err) continue } else if !container.IsContainer() { err = errors.Errorf("cannot allocate address for %q: not a container", tag) result.Results[i].Error = common.ServerError(err) continue } else if ciid, cerr := container.InstanceId(); provisionContainer == true && cerr == nil { // Since we want to configure and create NICs on the // container before it starts, it must also be not // provisioned yet. err = errors.Errorf("container %q already provisioned as %q", container, ciid) result.Results[i].Error = common.ServerError(err) continue } else if cerr != nil && !errors.IsNotProvisioned(cerr) { // Any other error needs to be reported. result.Results[i].Error = common.ServerError(cerr) continue } var macAddress string var address *state.IPAddress if provisionContainer { // Allocate and set an address. macAddress = generateMACAddress() address, err = p.allocateAddress(environ, subnet, host, container, instId, macAddress) if err != nil { err = errors.Annotatef(err, "failed to allocate an address for %q", container) result.Results[i].Error = common.ServerError(err) continue } } else { id := container.Id() addresses, err := p.st.AllocatedIPAddresses(id) if err != nil { logger.Warningf("failed to get Id for container %q: %v", tag, err) result.Results[i].Error = common.ServerError(err) continue } // TODO(dooferlad): if we get more than 1 address back, we ignore everything after // the first. The calling function expects exactly one result though, // so we don't appear to have a way of allocating >1 address to a // container... if len(addresses) != 1 { logger.Warningf("got %d addresses for container %q - expected 1: %v", len(addresses), tag, err) result.Results[i].Error = common.ServerError(err) continue } address = addresses[0] macAddress = address.MACAddress() } // Store it on the machine, construct and set an interface result. dnsServers := make([]string, len(interfaceInfo.DNSServers)) for l, dns := range interfaceInfo.DNSServers { dnsServers[l] = dns.Value } if macAddress == "" { macAddress = interfaceInfo.MACAddress } interfaceType := string(interfaceInfo.InterfaceType) if interfaceType == "" { interfaceType = string(network.EthernetInterface) } // TODO(dimitern): Support allocating one address per NIC on // the host, effectively creating the same number of NICs in // the container. result.Results[i] = params.MachineNetworkConfigResult{ Config: []params.NetworkConfig{{ DeviceIndex: interfaceInfo.DeviceIndex, MACAddress: macAddress, CIDR: subnetInfo.CIDR, NetworkName: interfaceInfo.NetworkName, ProviderId: string(interfaceInfo.ProviderId), ProviderSubnetId: string(subnetInfo.ProviderId), VLANTag: interfaceInfo.VLANTag, InterfaceType: interfaceType, InterfaceName: interfaceInfo.InterfaceName, Disabled: interfaceInfo.Disabled, NoAutoStart: interfaceInfo.NoAutoStart, DNSServers: dnsServers, ConfigType: string(network.ConfigStatic), Address: address.Value(), GatewayAddress: interfaceInfo.GatewayAddress.Value, ExtraConfig: interfaceInfo.ExtraConfig, }}, } } return result, nil }
// allocateAddress tries to pick an address out of the given subnet and // allocates it to the container. func (p *ProvisionerAPI) allocateAddress( environ environs.NetworkingEnviron, subnet *state.Subnet, host, container *state.Machine, instId instance.Id, macAddress string, ) (*state.IPAddress, error) { hostname := containerHostname(container.Tag()) if !environs.AddressAllocationEnabled() { // Even if the address allocation feature flag is not enabled, we might // be running on MAAS 1.8+ with devices support, which we can use to // register containers getting IPs via DHCP. However, most of the usual // allocation code can be bypassed, we just need the parent instance ID // and a MAC address (no subnet or IP address). allocatedAddress := network.Address{} err := environ.AllocateAddress(instId, network.AnySubnet, &allocatedAddress, macAddress, hostname) if err != nil { // Not using MAAS 1.8+ or some other error. return nil, errors.Trace(err) } logger.Infof( "allocated address %q on instance %q for container %q", allocatedAddress.String(), instId, hostname, ) // Add the address to state, so we can look it up later by MAC address. stateAddr, err := p.st.AddIPAddress(allocatedAddress, string(network.AnySubnet)) if err != nil { return nil, errors.Annotatef(err, "failed to save address %q", allocatedAddress) } err = p.setAllocatedOrRelease(stateAddr, environ, instId, container, network.AnySubnet, macAddress) if err != nil { return nil, errors.Trace(err) } return stateAddr, nil } subnetId := network.Id(subnet.ProviderId()) for { addr, err := subnet.PickNewAddress() if err != nil { return nil, err } netAddr := addr.Address() logger.Tracef("picked new address %q on subnet %q", addr.String(), subnetId) // Attempt to allocate with environ. err = environ.AllocateAddress(instId, subnetId, &netAddr, macAddress, hostname) if err != nil { logger.Warningf( "allocating address %q on instance %q and subnet %q failed: %v (retrying)", addr.String(), instId, subnetId, err, ) // It's as good as unavailable for us, so mark it as // such. err = setAddrState(addr, state.AddressStateUnavailable) if err != nil { logger.Warningf( "cannot set address %q to %q: %v (ignoring and retrying)", addr.String(), state.AddressStateUnavailable, err, ) continue } logger.Tracef( "setting address %q to %q and retrying", addr.String(), state.AddressStateUnavailable, ) continue } logger.Infof( "allocated address %q on instance %q and subnet %q", addr.String(), instId, subnetId, ) err = p.setAllocatedOrRelease(addr, environ, instId, container, subnetId, macAddress) if err != nil { // Something went wrong - retry. continue } return addr, nil } }
func prepareOrGetContainerInterfaceInfo( api APICalls, machineID string, bridgeDevice string, allocateOrMaintain bool, enableNAT bool, startingNetworkInfo []network.InterfaceInfo, log loggo.Logger, providerType string, ) ([]network.InterfaceInfo, error) { maintain := !allocateOrMaintain if environs.AddressAllocationEnabled(providerType) { if maintain { log.Debugf("running maintenance for container %q", machineID) } else { log.Debugf("trying to allocate static IP for container %q", machineID) } allocatedInfo, err := configureContainerNetwork( machineID, bridgeDevice, api, startingNetworkInfo, allocateOrMaintain, enableNAT, ) if err != nil && !maintain { log.Infof("not allocating static IP for container %q: %v", machineID, err) } return allocatedInfo, err } if maintain { log.Debugf("address allocation disabled: Not running maintenance for machine %q", machineID) return nil, nil } log.Debugf("address allocation feature flag not enabled; using multi-bridge networking for container %q", machineID) // In case we're running on MAAS 1.8+ with devices support, we'll still // call PrepareContainerInterfaceInfo(), but we'll ignore a NotSupported // error if we get it (which means we're not using MAAS 1.8+). containerTag := names.NewMachineTag(machineID) preparedInfo, err := api.PrepareContainerInterfaceInfo(containerTag) if err != nil && errors.IsNotSupported(err) { log.Warningf("new container %q not registered as device: not running on MAAS 1.8+", machineID) return nil, nil } else if err != nil { return nil, errors.Trace(err) } log.Tracef("PrepareContainerInterfaceInfo returned %+v", preparedInfo) dnsServersFound := false for _, info := range preparedInfo { if len(info.DNSServers) > 0 { dnsServersFound = true break } } if !dnsServersFound { logger.Warningf("no DNS settings found, discovering the host settings") dnsServers, searchDomain, err := localDNSServers() if err != nil { return nil, errors.Trace(err) } // Since the result is sorted, the first entry is the primary NIC. preparedInfo[0].DNSServers = dnsServers preparedInfo[0].DNSSearchDomains = []string{searchDomain} logger.Debugf( "setting DNS servers %+v and domains %+v on container interface %q", preparedInfo[0].DNSServers, preparedInfo[0].DNSSearchDomains, preparedInfo[0].InterfaceName, ) } return preparedInfo, nil }
func prepareOrGetContainerInterfaceInfo( api APICalls, machineID string, bridgeDevice string, allocateOrMaintain bool, enableNAT bool, startingNetworkInfo []network.InterfaceInfo, log loggo.Logger, ) ([]network.InterfaceInfo, error) { maintain := !allocateOrMaintain if environs.AddressAllocationEnabled() { if maintain { log.Debugf("running maintenance for container %q", machineID) } else { log.Debugf("trying to allocate static IP for container %q", machineID) } allocatedInfo, err := configureContainerNetwork( machineID, bridgeDevice, api, startingNetworkInfo, allocateOrMaintain, enableNAT, ) if err != nil && !maintain { log.Infof("not allocating static IP for container %q: %v", machineID, err) } return allocatedInfo, err } if maintain { log.Debugf("address allocation disabled: Not running maintenance for machine %q", machineID) return nil, nil } log.Debugf("address allocation feature flag not enabled; using DHCP for container %q", machineID) // In case we're running on MAAS 1.8+ with devices support, we'll still // call PrepareContainerInterfaceInfo(), but we'll ignore a NotSupported // error if we get it (which means we're not using MAAS 1.8+). containerTag := names.NewMachineTag(machineID) preparedInfo, err := api.PrepareContainerInterfaceInfo(containerTag) if err != nil && errors.IsNotSupported(err) { log.Warningf("new container %q not registered as device: not running on MAAS 1.8+", machineID) return nil, nil } else if err != nil { return nil, errors.Trace(err) } dnsServers, searchDomain, dnsErr := localDNSServers() if dnsErr != nil { return nil, errors.Trace(dnsErr) } for i, _ := range preparedInfo { preparedInfo[i].DNSServers = dnsServers preparedInfo[i].DNSSearch = searchDomain } log.Tracef("PrepareContainerInterfaceInfo returned %#v", preparedInfo) // Most likely there will be only one item in the list, but check // all of them for forward compatibility. macAddresses := set.NewStrings() for _, prepInfo := range preparedInfo { macAddresses.Add(prepInfo.MACAddress) } log.Infof( "new container %q registered as a MAAS device with MAC address(es) %v", machineID, macAddresses.SortedValues(), ) return preparedInfo, nil }
// ReleaseContainerAddresses finds addresses allocated to a container // and marks them as Dead, to be released and removed. It accepts // container tags as arguments. If address allocation feature flag is // not enabled, it will return a NotSupported error. func (p *ProvisionerAPI) ReleaseContainerAddresses(args params.Entities) (params.ErrorResults, error) { result := params.ErrorResults{ Results: make([]params.ErrorResult, len(args.Entities)), } if !environs.AddressAllocationEnabled() { return result, errors.NotSupportedf("address allocation") } canAccess, err := p.getAuthFunc() if err != nil { logger.Errorf("failed to get an authorisation function: %v", err) return result, errors.Trace(err) } // Loop over the passed container tags. for i, entity := range args.Entities { tag, err := names.ParseMachineTag(entity.Tag) if err != nil { logger.Warningf("failed to parse machine tag %q: %v", entity.Tag, err) result.Results[i].Error = common.ServerError(common.ErrPerm) continue } // The auth function (canAccess) checks that the machine is a // top level machine (we filter those out next) or that the // machine has the host as a parent. container, err := p.getMachine(canAccess, tag) if err != nil { logger.Warningf("failed to get machine %q: %v", tag, err) result.Results[i].Error = common.ServerError(err) continue } else if !container.IsContainer() { err = errors.Errorf("cannot mark addresses for removal for %q: not a container", tag) result.Results[i].Error = common.ServerError(err) continue } id := container.Id() addresses, err := p.st.AllocatedIPAddresses(id) if err != nil { logger.Warningf("failed to get Id for container %q: %v", tag, err) result.Results[i].Error = common.ServerError(err) continue } deadErrors := []error{} logger.Debugf("for container %q found addresses %v", tag, addresses) for _, addr := range addresses { err = addr.EnsureDead() if err != nil { deadErrors = append(deadErrors, err) continue } } if len(deadErrors) != 0 { err = errors.Errorf("failed to mark all addresses for removal for %q: %v", tag, deadErrors) result.Results[i].Error = common.ServerError(err) } } return result, nil }
// StartInstance is specified in the Broker interface. func (broker *kvmBroker) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { if args.InstanceConfig.HasNetworks() { return nil, errors.New("starting kvm containers with networks is not supported yet") } // TODO: refactor common code out of the container brokers. machineId := args.InstanceConfig.MachineId kvmLogger.Infof("starting kvm container for machineId: %s", machineId) // TODO: Default to using the host network until we can configure. Yes, // this is using the LxcBridge value, we should put it in the api call for // container config. bridgeDevice := broker.agentConfig.Value(agent.LxcBridge) if bridgeDevice == "" { bridgeDevice = kvm.DefaultKvmBridge } if !environs.AddressAllocationEnabled() { logger.Debugf( "address allocation feature flag not enabled; using DHCP for container %q", machineId, ) } else { logger.Debugf("trying to allocate static IP for container %q", machineId) allocatedInfo, err := configureContainerNetwork( machineId, bridgeDevice, broker.api, args.NetworkInfo, true, // allocate a new address. broker.enableNAT, ) if err != nil { // It's fine, just ignore it. The effect will be that the // container won't have a static address configured. logger.Infof("not allocating static IP for container %q: %v", machineId, err) } else { args.NetworkInfo = allocatedInfo } } // Unlike with LXC, we don't override the default MTU to use. network := container.BridgeNetworkConfig(bridgeDevice, 0, args.NetworkInfo) series := args.Tools.OneSeries() args.InstanceConfig.MachineContainerType = instance.KVM args.InstanceConfig.Tools = args.Tools[0] config, err := broker.api.ContainerConfig() if err != nil { kvmLogger.Errorf("failed to get container config: %v", err) return nil, err } if err := instancecfg.PopulateInstanceConfig( args.InstanceConfig, config.ProviderType, config.AuthorizedKeys, config.SSLHostnameVerification, config.Proxy, config.AptProxy, config.AptMirror, config.PreferIPv6, config.EnableOSRefreshUpdate, config.EnableOSUpgrade, ); err != nil { kvmLogger.Errorf("failed to populate machine config: %v", err) return nil, err } storageConfig := &container.StorageConfig{ AllowMount: true, } inst, hardware, err := broker.manager.CreateContainer(args.InstanceConfig, series, network, storageConfig) if err != nil { kvmLogger.Errorf("failed to start container: %v", err) return nil, err } kvmLogger.Infof("started kvm container for machineId: %s, %s, %s", machineId, inst.Id(), hardware.String()) return &environs.StartInstanceResult{ Instance: inst, Hardware: hardware, NetworkInfo: network.Interfaces, }, nil }
func shutdownInitCommands(initSystem, series string) ([]string, error) { // These files are removed just before the template shuts down. cleanupOnShutdown := []string{ // We remove any dhclient lease files so there's no chance a // clone to reuse a lease from the template it was cloned // from. "/var/lib/dhcp/dhclient*", // Both of these sets of files below are recreated on boot and // if we leave them in the template's rootfs boot logs coming // from cloned containers will be appended. It's better to // keep clean logs for diagnosing issues / debugging. "/var/log/cloud-init*.log", } // Using EOC below as the template shutdown script is itself // passed through cat > ... < EOF. replaceNetConfCmd := fmt.Sprintf( "/bin/cat > /etc/network/interfaces << EOC%sEOC\n ", defaultEtcNetworkInterfaces, ) paths := strings.Join(cleanupOnShutdown, " ") removeCmd := fmt.Sprintf("/bin/rm -fr %s\n ", paths) shutdownCmd := "/sbin/shutdown -h now" name := "juju-template-restart" desc := "juju shutdown job" execStart := shutdownCmd if environs.AddressAllocationEnabled() { // Only do the cleanup and replacement of /e/n/i when address // allocation feature flag is enabled. execStart = replaceNetConfCmd + removeCmd + shutdownCmd } conf := common.Conf{ Desc: desc, Transient: true, AfterStopped: "cloud-final", ExecStart: execStart, } // systemd uses targets for synchronization of services if initSystem == service.InitSystemSystemd { conf.AfterStopped = "cloud-config.target" } svc, err := service.NewService(name, conf, series) if err != nil { return nil, errors.Trace(err) } cmds, err := svc.InstallCommands() if err != nil { return nil, errors.Trace(err) } startCommands, err := svc.StartCommands() if err != nil { return nil, errors.Trace(err) } cmds = append(cmds, startCommands...) return cmds, nil }
// StartInstance is specified in the Broker interface. func (broker *lxcBroker) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { if args.InstanceConfig.HasNetworks() { return nil, errors.New("starting lxc containers with networks is not supported yet") } // TODO: refactor common code out of the container brokers. machineId := args.InstanceConfig.MachineId lxcLogger.Infof("starting lxc container for machineId: %s", machineId) // Default to using the host network until we can configure. bridgeDevice := broker.agentConfig.Value(agent.LxcBridge) if bridgeDevice == "" { bridgeDevice = lxc.DefaultLxcBridge } if !environs.AddressAllocationEnabled() { logger.Debugf( "address allocation feature flag not enabled; using DHCP for container %q", machineId, ) } else { logger.Debugf("trying to allocate static IP for container %q", machineId) allocatedInfo, err := configureContainerNetwork( machineId, bridgeDevice, broker.api, args.NetworkInfo, true, // allocate a new address. broker.enableNAT, ) if err != nil { // It's fine, just ignore it. The effect will be that the // container won't have a static address configured. logger.Infof("not allocating static IP for container %q: %v", machineId, err) } else { args.NetworkInfo = allocatedInfo } } network := container.BridgeNetworkConfig(bridgeDevice, broker.defaultMTU, args.NetworkInfo) // The provisioner worker will provide all tools it knows about // (after applying explicitly specified constraints), which may // include tools for architectures other than the host's. We // must constrain to the host's architecture for LXC. archTools, err := args.Tools.Match(tools.Filter{ Arch: version.Current.Arch, }) if err == tools.ErrNoMatches { return nil, errors.Errorf( "need tools for arch %s, only found %s", version.Current.Arch, args.Tools.Arches(), ) } series := archTools.OneSeries() args.InstanceConfig.MachineContainerType = instance.LXC args.InstanceConfig.Tools = archTools[0] config, err := broker.api.ContainerConfig() if err != nil { lxcLogger.Errorf("failed to get container config: %v", err) return nil, err } storageConfig := &container.StorageConfig{ AllowMount: config.AllowLXCLoopMounts, } if err := instancecfg.PopulateInstanceConfig( args.InstanceConfig, config.ProviderType, config.AuthorizedKeys, config.SSLHostnameVerification, config.Proxy, config.AptProxy, config.AptMirror, config.PreferIPv6, config.EnableOSRefreshUpdate, config.EnableOSUpgrade, ); err != nil { lxcLogger.Errorf("failed to populate machine config: %v", err) return nil, err } inst, hardware, err := broker.manager.CreateContainer(args.InstanceConfig, series, network, storageConfig) if err != nil { lxcLogger.Errorf("failed to start container: %v", err) return nil, err } lxcLogger.Infof("started lxc container for machineId: %s, %s, %s", machineId, inst.Id(), hardware.String()) return &environs.StartInstanceResult{ Instance: inst, Hardware: hardware, NetworkInfo: network.Interfaces, }, nil }