// getInstanceNetworkInterfaces returns a map of interface MAC address // to ifaceInfo for each network interface of the given instance, as // discovered during the commissioning phase. func (environ *maasEnviron) getInstanceNetworkInterfaces(inst instance.Instance) (map[string]ifaceInfo, error) { maasInst := inst.(*maasInstance) maasObj := maasInst.maasObject result, err := maasObj.CallGet("details", nil) if err != nil { return nil, errors.Trace(err) } // Get the node's lldp / lshw details discovered at commissioning. data, err := result.GetBytes() if err != nil { return nil, errors.Trace(err) } var parsed map[string]interface{} if err := bson.Unmarshal(data, &parsed); err != nil { return nil, errors.Trace(err) } lshwData, ok := parsed["lshw"] if !ok { return nil, errors.Errorf("no hardware information available for node %q", inst.Id()) } lshwXML, ok := lshwData.([]byte) if !ok { return nil, errors.Errorf("invalid hardware information for node %q", inst.Id()) } // Now we have the lshw XML data, parse it to extract and return NICs. return extractInterfaces(inst, lshwXML) }
// handleBootstrapError cleans up after a failed bootstrap. func handleBootstrapError(err error, ctx environs.BootstrapContext, inst instance.Instance, env environs.Environ) { if err == nil { return } logger.Errorf("bootstrap failed: %v", err) ch := make(chan os.Signal, 1) ctx.InterruptNotify(ch) defer ctx.StopInterruptNotify(ch) defer close(ch) go func() { for _ = range ch { fmt.Fprintln(ctx.GetStderr(), "Cleaning up failed bootstrap") } }() if inst != nil { fmt.Fprintln(ctx.GetStderr(), "Stopping instance...") if stoperr := env.StopInstances(inst.Id()); stoperr != nil { logger.Errorf("cannot stop failed bootstrap instance %q: %v", inst.Id(), stoperr) } else { // set to nil so we know we can safely delete the state file inst = nil } } // We only delete the bootstrap state file if either we didn't // start an instance, or we managed to cleanly stop it. if inst == nil { if rmerr := bootstrap.DeleteStateFile(env.Storage()); rmerr != nil { logger.Errorf("cannot delete bootstrap state file: %v", rmerr) } } }
func (s *lxcBrokerSuite) assertDefaultNetworkConfig(c *gc.C, lxc instance.Instance) { lxc_conf := filepath.Join(s.ContainerDir, string(lxc.Id()), "lxc.conf") expect := []string{ "lxc.network.type = veth", "lxc.network.link = lxcbr0", } AssertFileContains(c, lxc_conf, expect...) }
func (c *rackspaceFirewaller) getInstanceConfigurator(inst instance.Instance) ([]network.Address, common.InstanceConfigurator, error) { addresses, err := inst.Addresses() if err != nil { return nil, nil, errors.Trace(err) } if len(addresses) == 0 { return addresses, nil, errors.New("No addresses found") } client := common.NewSshInstanceConfigurator(addresses[0].Value) return addresses, client, err }
// extractInterfaces parses the XML output of lswh and extracts all // network interfaces, returning a map MAC address to ifaceInfo. func extractInterfaces(inst instance.Instance, lshwXML []byte) (map[string]ifaceInfo, error) { type Node struct { Id string `xml:"id,attr"` Disabled bool `xml:"disabled,attr,omitempty"` Description string `xml:"description"` Serial string `xml:"serial"` LogicalName string `xml:"logicalname"` Children []Node `xml:"node"` } type List struct { Nodes []Node `xml:"node"` } var lshw List if err := xml.Unmarshal(lshwXML, &lshw); err != nil { return nil, errors.Annotatef(err, "cannot parse lshw XML details for node %q", inst.Id()) } interfaces := make(map[string]ifaceInfo) var processNodes func(nodes []Node) error var baseIndex int processNodes = func(nodes []Node) error { for _, node := range nodes { if strings.HasPrefix(node.Id, "network") { index := baseIndex if strings.HasPrefix(node.Id, "network:") { // There is an index suffix, parse it. var err error index, err = strconv.Atoi(strings.TrimPrefix(node.Id, "network:")) if err != nil { return errors.Annotatef(err, "lshw output for node %q has invalid ID suffix for %q", inst.Id(), node.Id) } } else { baseIndex++ } if node.Disabled { logger.Debugf("node %q skipping disabled network interface %q", inst.Id(), node.LogicalName) } interfaces[node.Serial] = ifaceInfo{ DeviceIndex: index, InterfaceName: node.LogicalName, Disabled: node.Disabled, } } if err := processNodes(node.Children); err != nil { return err } } return nil } err := processNodes(lshw.Nodes) return interfaces, err }
// instInfo returns the instance info for the given id // and instance. If inst is nil, it returns a not-found error. func (*aggregator) instInfo(id instance.Id, inst instance.Instance) (instanceInfo, error) { if inst == nil { return instanceInfo{}, errors.NotFoundf("instance %v", id) } addr, err := inst.Addresses() if err != nil { return instanceInfo{}, err } return instanceInfo{ addr, inst.Status(), }, nil }
// Bootstrap is a common implementation of the Bootstrap method defined on // environs.Environ; we strongly recommend that this implementation be used // when writing a new provider. func Bootstrap(ctx environs.BootstrapContext, env environs.Environ, args environs.BootstrapParams) (arch, series string, _ environs.BootstrapFinalizer, err error) { // TODO make safe in the case of racing Bootstraps // If two Bootstraps are called concurrently, there's // no way to make sure that only one succeeds. var inst instance.Instance defer func() { handleBootstrapError(err, ctx, inst, env) }() // First thing, ensure we have tools otherwise there's no point. series = config.PreferredSeries(env.Config()) availableTools, err := args.AvailableTools.Match(coretools.Filter{Series: series}) if err != nil { return "", "", nil, err } // Get the bootstrap SSH client. Do this early, so we know // not to bother with any of the below if we can't finish the job. client := ssh.DefaultClient if client == nil { // This should never happen: if we don't have OpenSSH, then // go.crypto/ssh should be used with an auto-generated key. return "", "", nil, fmt.Errorf("no SSH client available") } machineConfig, err := environs.NewBootstrapMachineConfig(args.Constraints, series) if err != nil { return "", "", nil, err } machineConfig.EnableOSRefreshUpdate = env.Config().EnableOSRefreshUpdate() machineConfig.EnableOSUpgrade = env.Config().EnableOSUpgrade() fmt.Fprintln(ctx.GetStderr(), "Launching instance") inst, hw, _, err := env.StartInstance(environs.StartInstanceParams{ Constraints: args.Constraints, Tools: availableTools, MachineConfig: machineConfig, Placement: args.Placement, }) if err != nil { return "", "", nil, fmt.Errorf("cannot start bootstrap instance: %v", err) } fmt.Fprintf(ctx.GetStderr(), " - %s\n", inst.Id()) err = SaveState(env.Storage(), &BootstrapState{ StateInstances: []instance.Id{inst.Id()}, }) if err != nil { return "", "", nil, fmt.Errorf("cannot save state: %v", err) } finalize := func(ctx environs.BootstrapContext, mcfg *cloudinit.MachineConfig) error { mcfg.InstanceId = inst.Id() mcfg.HardwareCharacteristics = hw if err := environs.FinishMachineConfig(mcfg, env.Config()); err != nil { return err } return FinishBootstrap(ctx, client, inst, mcfg) } return *hw.Arch, series, finalize, nil }
// Bootstrap is a common implementation of the Bootstrap method defined on // environs.Environ; we strongly recommend that this implementation be used // when writing a new provider. func Bootstrap(ctx environs.BootstrapContext, env environs.Environ, args environs.BootstrapParams) (err error) { // TODO make safe in the case of racing Bootstraps // If two Bootstraps are called concurrently, there's // no way to make sure that only one succeeds. var inst instance.Instance defer func() { handleBootstrapError(err, ctx, inst, env) }() network.InitializeFromConfig(env.Config()) // First thing, ensure we have tools otherwise there's no point. selectedTools, err := EnsureBootstrapTools(ctx, env, config.PreferredSeries(env.Config()), args.Constraints.Arch) if err != nil { return err } // Get the bootstrap SSH client. Do this early, so we know // not to bother with any of the below if we can't finish the job. client := ssh.DefaultClient if client == nil { // This should never happen: if we don't have OpenSSH, then // go.crypto/ssh should be used with an auto-generated key. return fmt.Errorf("no SSH client available") } privateKey, err := GenerateSystemSSHKey(env) if err != nil { return err } machineConfig := environs.NewBootstrapMachineConfig(privateKey) fmt.Fprintln(ctx.GetStderr(), "Launching instance") inst, hw, _, err := env.StartInstance(environs.StartInstanceParams{ Constraints: args.Constraints, Tools: selectedTools, MachineConfig: machineConfig, Placement: args.Placement, }) if err != nil { return fmt.Errorf("cannot start bootstrap instance: %v", err) } fmt.Fprintf(ctx.GetStderr(), " - %s\n", inst.Id()) machineConfig.InstanceId = inst.Id() machineConfig.HardwareCharacteristics = hw err = bootstrap.SaveState( env.Storage(), &bootstrap.BootstrapState{ StateInstances: []instance.Id{inst.Id()}, }) if err != nil { return fmt.Errorf("cannot save state: %v", err) } return FinishBootstrap(ctx, client, inst, machineConfig) }
// setupNetworks prepares a []network.InterfaceInfo for the given instance. Any // disabled network interfaces (as discovered from the lshw output for the node) // will stay disabled. func (environ *maasEnviron) setupNetworks(inst instance.Instance) ([]network.InterfaceInfo, error) { // Get the instance network interfaces first. interfaces, err := environ.getInstanceNetworkInterfaces(inst) if err != nil { return nil, errors.Annotatef(err, "getInstanceNetworkInterfaces failed") } logger.Debugf("node %q has network interfaces %v", inst.Id(), interfaces) networks, err := environ.getInstanceNetworks(inst) if err != nil { return nil, errors.Annotatef(err, "getInstanceNetworks failed") } logger.Debugf("node %q has networks %v", inst.Id(), networks) var tempInterfaceInfo []network.InterfaceInfo for _, netw := range networks { netCIDR := &net.IPNet{ IP: net.ParseIP(netw.IP), Mask: net.IPMask(net.ParseIP(netw.Mask)), } macs, err := environ.getNetworkMACs(netw.Name) if err != nil { return nil, errors.Annotatef(err, "getNetworkMACs failed") } logger.Debugf("network %q has MACs: %v", netw.Name, macs) var defaultGateway network.Address if netw.DefaultGateway != "" { defaultGateway = network.NewAddress(netw.DefaultGateway) } for _, mac := range macs { if ifinfo, ok := interfaces[mac]; ok { tempInterfaceInfo = append(tempInterfaceInfo, network.InterfaceInfo{ MACAddress: mac, InterfaceName: ifinfo.InterfaceName, DeviceIndex: ifinfo.DeviceIndex, CIDR: netCIDR.String(), VLANTag: netw.VLANTag, ProviderId: network.Id(netw.Name), NetworkName: netw.Name, Disabled: ifinfo.Disabled, GatewayAddress: defaultGateway, }) } } } // Verify we filled-in everything for all networks/interfaces // and drop incomplete records. var interfaceInfo []network.InterfaceInfo for _, info := range tempInterfaceInfo { if info.ProviderId == "" || info.NetworkName == "" || info.CIDR == "" { logger.Infof("ignoring interface %q: missing subnet info", info.InterfaceName) continue } if info.MACAddress == "" || info.InterfaceName == "" { logger.Infof("ignoring subnet %q: missing interface info", info.ProviderId) continue } interfaceInfo = append(interfaceInfo, info) } logger.Debugf("node %q network information: %#v", inst.Id(), interfaceInfo) return interfaceInfo, nil }
// assertPorts retrieves the open ports of the instance and compares them // to the expected. func (s *FirewallerSuite) assertPorts(c *gc.C, inst instance.Instance, machineId string, expected []network.Port) { s.BackingState.StartSync() start := time.Now() for { got, err := inst.Ports(machineId) if err != nil { c.Fatal(err) return } network.SortPorts(got) network.SortPorts(expected) if reflect.DeepEqual(got, expected) { c.Succeed() return } if time.Since(start) > coretesting.LongWait { c.Fatalf("timed out: expected %q; got %q", expected, got) return } time.Sleep(coretesting.ShortWait) } }
// assertInstanceId asserts that the machine has an instance id // that matches that of the given instance. If the instance is nil, // It asserts that the instance id is unset. func assertInstanceId(c *gc.C, m *state.Machine, inst instance.Instance) { var wantId, gotId instance.Id var err error if inst != nil { wantId = inst.Id() } for a := waitAgent.Start(); a.Next(); { err := m.Refresh() c.Assert(err, gc.IsNil) gotId, err = m.InstanceId() if err != nil { c.Assert(err, jc.Satisfies, state.IsNotProvisionedError) if inst == nil { return } continue } break } c.Assert(err, gc.IsNil) c.Assert(gotId, gc.Equals, wantId) }
// createInstance creates all of the Azure entities necessary for a // new instance. This includes Cloud Service, Deployment and Role. // // If serviceName is non-empty, then createInstance will assign to // the Cloud Service with that name. Otherwise, a new Cloud Service // will be created. func (env *azureEnviron) createInstance(azure *gwacl.ManagementAPI, role *gwacl.Role, serviceName string, stateServer bool) (resultInst instance.Instance, resultErr error) { var inst instance.Instance defer func() { if inst != nil && resultErr != nil { if err := env.StopInstances(inst.Id()); err != nil { // Failure upon failure. Log it, but return the original error. logger.Errorf("error releasing failed instance: %v", err) } } }() var err error var service *gwacl.HostedService if serviceName != "" { logger.Debugf("creating instance in existing cloud service %q", serviceName) service, err = azure.GetHostedServiceProperties(serviceName, true) } else { logger.Debugf("creating instance in new cloud service") // If we're creating a cloud service for state servers, // we will want to open additional ports. We need to // record this against the cloud service, so we use a // special label for the purpose. var label string if stateServer { label = stateServerLabel } service, err = newHostedService(azure, env.getEnvPrefix(), env.getAffinityGroupName(), label) } if err != nil { return nil, err } if len(service.Deployments) == 0 { // This is a newly created cloud service, so we // should destroy it if anything below fails. defer func() { if resultErr != nil { azure.DeleteHostedService(service.ServiceName) // Destroying the hosted service destroys the instance, // so ensure StopInstances isn't called. inst = nil } }() // Create an initial deployment. deployment := gwacl.NewDeploymentForCreateVMDeployment( deploymentNameV2(service.ServiceName), deploymentSlot, deploymentNameV2(service.ServiceName), []gwacl.Role{*role}, env.getVirtualNetworkName(), ) if err := azure.AddDeployment(deployment, service.ServiceName); err != nil { return nil, errors.Annotate(err, "error creating VM deployment") } service.Deployments = append(service.Deployments, *deployment) } else { // Update the deployment. deployment := &service.Deployments[0] if err := azure.AddRole(&gwacl.AddRoleRequest{ ServiceName: service.ServiceName, DeploymentName: deployment.Name, PersistentVMRole: (*gwacl.PersistentVMRole)(role), }); err != nil { return nil, err } deployment.RoleList = append(deployment.RoleList, *role) } return env.getInstance(service, role.RoleName) }
func (s *lxcBrokerSuite) lxcRemovedContainerDir(inst instance.Instance) string { return filepath.Join(s.RemovedDir, string(inst.Id())) }
func (s *lxcBrokerSuite) assertDefaultStorageConfig(c *gc.C, lxc instance.Instance) { config := filepath.Join(s.LxcDir, string(lxc.Id()), "config") AssertFileContents(c, gc.Not(jc.Contains), config, "lxc.aa_profile = lxc-container-default-with-mounting") }
func (s *kvmBrokerSuite) kvmContainerDir(inst instance.Instance) string { return filepath.Join(s.ContainerDir, string(inst.Id())) }