// InstanceTags returns the minimum set of tags that should be set on a // machine instance, if the provider supports them. func InstanceTags(cfg *config.Config, jobs []multiwatcher.MachineJob) map[string]string { instanceTags := tags.ResourceTags(names.NewModelTag(cfg.UUID()), cfg) if multiwatcher.AnyJobNeedsState(jobs...) { instanceTags[tags.JujuController] = "true" } return instanceTags }
func (c *environClient) generateSigmaComponents(baseName string, constraints *sigmaConstraints, args environs.StartInstanceParams, drv gosigma.Drive, userData []byte) (cc gosigma.Components, err error) { cc.SetName(baseName) cc.SetDescription(baseName) cc.SetSMP(constraints.cores) cc.SetCPU(constraints.power) cc.SetMem(constraints.mem) vncpass, err := utils.RandomPassword() if err != nil { err = errors.Errorf("error generating password: %v", err) return } cc.SetVNCPassword(vncpass) logger.Debugf("Setting ssh key: %s end", c.config.AuthorizedKeys()) cc.SetSSHPublicKey(c.config.AuthorizedKeys()) cc.AttachDrive(1, "0:0", "virtio", drv.UUID()) cc.NetworkDHCP4(gosigma.ModelVirtio) if multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...) { cc.SetMeta(jujuMetaInstance, jujuMetaInstanceStateServer) } else { cc.SetMeta(jujuMetaInstance, jujuMetaInstanceServer) } cc.SetMeta(jujuMetaEnvironment, c.uuid) cc.SetMeta(jujuMetaCoudInit, string(userData)) cc.SetMeta(jujuMetaBase64, jujuMetaCoudInit) return cc, nil }
// InstanceTags returns the minimum set of tags that should be set on a // machine instance, if the provider supports them. func InstanceTags(cfg *config.Config, jobs []multiwatcher.MachineJob) map[string]string { uuid, _ := cfg.UUID() instanceTags := tags.ResourceTags(names.NewEnvironTag(uuid), cfg) if multiwatcher.AnyJobNeedsState(jobs...) { instanceTags[tags.JujuStateServer] = "true" } return instanceTags }
// InstanceTags returns the minimum set of tags that should be set on a // machine instance, if the provider supports them. func InstanceTags(modelUUID, controllerUUID string, tagger tags.ResourceTagger, jobs []multiwatcher.MachineJob) map[string]string { instanceTags := tags.ResourceTags( names.NewModelTag(modelUUID), names.NewControllerTag(controllerUUID), tagger, ) if multiwatcher.AnyJobNeedsState(jobs...) { instanceTags[tags.JujuIsController] = "true" } return instanceTags }
// Handle implements NotifyWatchHandler's Handle method. If the change means // that the machine is now expected to manage the environment, we change its // password (to set its password in mongo) and restart the agent. func (c *converter) Handle(_ <-chan struct{}) error { results, err := c.machine.Jobs() if err != nil { return errors.Annotate(err, "can't get jobs for machine") } if !multiwatcher.AnyJobNeedsState(results.Jobs...) { return nil } return errors.Trace(c.agent.Restart()) }
func (task *provisionerTask) constructInstanceConfig( machine *apiprovisioner.Machine, auth authentication.AuthenticationProvider, pInfo *params.ProvisioningInfo, ) (*instancecfg.InstanceConfig, error) { stateInfo, apiInfo, err := auth.SetupAuthentication(machine) if err != nil { return nil, errors.Annotate(err, "failed to setup authentication") } // Generated a nonce for the new instance, with the format: "machine-#:UUID". // The first part is a badge, specifying the tag of the machine the provisioner // is running on, while the second part is a random UUID. uuid, err := utils.NewUUID() if err != nil { return nil, errors.Annotate(err, "failed to generate a nonce for machine "+machine.Id()) } nonce := fmt.Sprintf("%s:%s", task.machineTag, uuid) instanceConfig, err := instancecfg.NewInstanceConfig( names.NewControllerTag(controller.Config(pInfo.ControllerConfig).ControllerUUID()), machine.Id(), nonce, task.imageStream, pInfo.Series, apiInfo, ) if err != nil { return nil, errors.Trace(err) } instanceConfig.Tags = pInfo.Tags if len(pInfo.Jobs) > 0 { instanceConfig.Jobs = pInfo.Jobs } if multiwatcher.AnyJobNeedsState(instanceConfig.Jobs...) { publicKey, err := simplestreams.UserPublicSigningKey() if err != nil { return nil, err } instanceConfig.Controller = &instancecfg.ControllerConfig{ PublicImageSigningKey: publicKey, MongoInfo: stateInfo, } instanceConfig.Controller.Config = make(map[string]interface{}) for k, v := range pInfo.ControllerConfig { instanceConfig.Controller.Config[k] = v } } return instanceConfig, nil }
// StartInstance is specified in the InstanceBroker interface. func (env *azureEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { if args.InstanceConfig.HasNetworks() { return nil, errors.New("starting instances with networks is not supported yet") } err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config()) if err != nil { return nil, err } // Pick envtools. Needed for the custom data (which is what we normally // call userdata). args.InstanceConfig.Tools = args.Tools[0] logger.Infof("picked tools %q", args.InstanceConfig.Tools) // Compose userdata. userData, err := providerinit.ComposeUserData(args.InstanceConfig, nil, AzureRenderer{}) if err != nil { return nil, errors.Annotate(err, "cannot compose user data") } snapshot := env.getSnapshot() location := snapshot.ecfg.location() instanceType, sourceImageName, err := env.selectInstanceTypeAndImage(&instances.InstanceConstraint{ Region: location, Series: args.Tools.OneSeries(), Arches: args.Tools.Arches(), Constraints: args.Constraints, }) if err != nil { return nil, err } // We use the cloud service label as a way to group instances with // the same affinity, so that machines can be be allocated to the // same availability set. var cloudServiceName string if args.DistributionGroup != nil && snapshot.ecfg.availabilitySetsEnabled() { instanceIds, err := args.DistributionGroup() if err != nil { return nil, err } for _, id := range instanceIds { cloudServiceName, _ = env.splitInstanceId(id) if cloudServiceName != "" { break } } } vhd, err := env.newOSDisk(sourceImageName, args.InstanceConfig.Series) if err != nil { return nil, errors.Trace(err) } // If we're creating machine-0, we'll want to expose port 22. // All other machines get an auto-generated public port for SSH. stateServer := multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...) role, err := env.newRole(instanceType.Id, vhd, stateServer, string(userData), args.InstanceConfig.Series, snapshot) if err != nil { return nil, errors.Trace(err) } inst, err := createInstance(env, snapshot.api, role, cloudServiceName, stateServer) if err != nil { return nil, errors.Trace(err) } hc := &instance.HardwareCharacteristics{ Mem: &instanceType.Mem, RootDisk: &instanceType.RootDisk, CpuCores: &instanceType.CpuCores, } if len(instanceType.Arches) == 1 { hc.Arch = &instanceType.Arches[0] } return &environs.StartInstanceResult{ Instance: inst, Hardware: hc, }, nil }
// StartInstance is specified in the InstanceBroker interface. func (env *azureEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { // Get the required configuration and config-dependent information // required to create the instance. We take the lock just once, to // ensure we obtain all information based on the same configuration. env.mu.Lock() location := env.config.location envTags, _ := env.config.ResourceTags() apiPort := env.config.APIPort() vmClient := compute.VirtualMachinesClient{env.compute} availabilitySetClient := compute.AvailabilitySetsClient{env.compute} networkClient := env.network vmImagesClient := compute.VirtualMachineImagesClient{env.compute} vmExtensionClient := compute.VirtualMachineExtensionsClient{env.compute} subscriptionId := env.config.subscriptionId imageStream := env.config.ImageStream() storageEndpoint := env.config.storageEndpoint storageAccountName := env.config.storageAccount instanceTypes, err := env.getInstanceTypesLocked() if err != nil { env.mu.Unlock() return nil, errors.Trace(err) } internalNetworkSubnet, err := env.getInternalSubnetLocked() if err != nil { env.mu.Unlock() return nil, errors.Trace(err) } env.mu.Unlock() // Identify the instance type and image to provision. instanceSpec, err := findInstanceSpec( vmImagesClient, instanceTypes, &instances.InstanceConstraint{ Region: location, Series: args.Tools.OneSeries(), Arches: args.Tools.Arches(), Constraints: args.Constraints, }, imageStream, ) if err != nil { return nil, err } // Pick tools by filtering the available tools down to the architecture of // the image that will be provisioned. selectedTools, err := args.Tools.Match(tools.Filter{ Arch: instanceSpec.Image.Arch, }) if err != nil { return nil, errors.Trace(err) } logger.Infof("picked tools %q", selectedTools[0].Version) // Finalize the instance config, which we'll render to CustomData below. if err := args.InstanceConfig.SetTools(selectedTools); err != nil { return nil, errors.Trace(err) } if err := instancecfg.FinishInstanceConfig( args.InstanceConfig, env.Config(), ); err != nil { return nil, err } machineTag := names.NewMachineTag(args.InstanceConfig.MachineId) vmName := resourceName(machineTag) vmTags := make(map[string]string) for k, v := range args.InstanceConfig.Tags { vmTags[k] = v } // jujuMachineNameTag identifies the VM name, in which is encoded // the Juju machine name. We tag all resources related to the // machine with this. vmTags[jujuMachineNameTag] = vmName // If the machine will run a controller, then we need to open the // API port for it. var apiPortPtr *int if multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...) { apiPortPtr = &apiPort } // Construct the network security group ID for the environment. nsgID := path.Join( "/subscriptions", subscriptionId, "resourceGroups", env.resourceGroup, "providers", "Microsoft.Network", "networkSecurityGroups", internalSecurityGroupName, ) vm, err := createVirtualMachine( env.resourceGroup, location, vmName, vmTags, envTags, instanceSpec, args.InstanceConfig, args.DistributionGroup, env.Instances, apiPortPtr, internalNetworkSubnet, nsgID, storageEndpoint, storageAccountName, networkClient, vmClient, availabilitySetClient, vmExtensionClient, ) if err != nil { logger.Errorf("creating instance failed, destroying: %v", err) if err := env.StopInstances(instance.Id(vmName)); err != nil { logger.Errorf("could not destroy failed virtual machine: %v", err) } return nil, errors.Annotatef(err, "creating virtual machine %q", vmName) } // Note: the instance is initialised without addresses to keep the // API chatter down. We will refresh the instance if we need to know // the addresses. inst := &azureInstance{vm, env, nil, nil} amd64 := arch.AMD64 hc := &instance.HardwareCharacteristics{ Arch: &amd64, Mem: &instanceSpec.InstanceType.Mem, RootDisk: &instanceSpec.InstanceType.RootDisk, CpuCores: &instanceSpec.InstanceType.CpuCores, } return &environs.StartInstanceResult{ Instance: inst, Hardware: hc, }, nil }
func isStateServer(icfg *instancecfg.InstanceConfig) bool { return multiwatcher.AnyJobNeedsState(icfg.Jobs...) }
func (env *joyentEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { if args.InstanceConfig.HasNetworks() { return nil, errors.New("starting instances with networks is not supported yet") } series := args.Tools.OneSeries() arches := args.Tools.Arches() spec, err := env.FindInstanceSpec(&instances.InstanceConstraint{ Region: env.Ecfg().Region(), Series: series, Arches: arches, Constraints: args.Constraints, }, args.ImageMetadata) if err != nil { return nil, err } tools, err := args.Tools.Match(tools.Filter{Arch: spec.Image.Arch}) if err != nil { return nil, errors.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches) } args.InstanceConfig.Tools = tools[0] if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config()); err != nil { return nil, err } // This is a hack that ensures that instances can communicate over // the internal network. Joyent sometimes gives instances // different 10.x.x.x/21 networks and adding this route allows // them to talk despite this. See: // https://bugs.launchpad.net/juju-core/+bug/1401130 cloudcfg, err := cloudinit.New(args.InstanceConfig.Series) if err != nil { return nil, errors.Annotate(err, "cannot create cloudinit template") } ifupScript := ` #!/bin/bash # These guards help to ensure that this hack only runs if Joyent's # internal network still works as it does at time of writing. [ "$IFACE" == "eth1" ] || [ "$IFACE" == "--all" ] || exit 0 /sbin/ip -4 --oneline addr show dev eth1 | fgrep --quiet " inet 10." || exit 0 /sbin/ip route add 10.0.0.0/8 dev eth1 `[1:] cloudcfg.AddBootTextFile("/etc/network/if-up.d/joyent", ifupScript, 0755) userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, JoyentRenderer{}) if err != nil { return nil, errors.Annotate(err, "cannot make user data") } logger.Debugf("joyent user data: %d bytes", len(userData)) var machine *cloudapi.Machine machine, err = env.compute.cloudapi.CreateMachine(cloudapi.CreateMachineOpts{ //Name: env.machineFullName(machineConf.MachineId), Package: spec.InstanceType.Name, Image: spec.Image.Id, Metadata: map[string]string{"metadata.cloud-init:user-data": string(userData)}, Tags: map[string]string{"tag.group": "juju", "tag.env": env.Config().Name()}, }) if err != nil { return nil, errors.Annotate(err, "cannot create instances") } machineId := machine.Id logger.Infof("provisioning instance %q", machineId) machine, err = env.compute.cloudapi.GetMachine(machineId) if err != nil { return nil, errors.Annotate(err, "cannot start instances") } // wait for machine to start for !strings.EqualFold(machine.State, "running") { time.Sleep(1 * time.Second) machine, err = env.compute.cloudapi.GetMachine(machineId) if err != nil { return nil, errors.Annotate(err, "cannot start instances") } } logger.Infof("started instance %q", machineId) inst := &joyentInstance{ machine: machine, env: env, } if multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...) { if err := common.AddStateInstance(env.Storage(), inst.Id()); err != nil { logger.Errorf("could not record instance in provider-state: %v", err) } } disk64 := uint64(machine.Disk) hc := instance.HardwareCharacteristics{ Arch: &spec.Image.Arch, Mem: &spec.InstanceType.Mem, CpuCores: &spec.InstanceType.CpuCores, CpuPower: spec.InstanceType.CpuPower, RootDisk: &disk64, } return &environs.StartInstanceResult{ Instance: inst, Hardware: &hc, }, nil }
func isController(mcfg *instancecfg.InstanceConfig) bool { return multiwatcher.AnyJobNeedsState(mcfg.Jobs...) }