func (c *Client) addOneMachine(p params.AddMachineParams) (*state.Machine, error) { if p.ParentId != "" && p.ContainerType == "" { return nil, fmt.Errorf("parent machine specified without container type") } if p.ContainerType != "" && p.Placement != nil { return nil, fmt.Errorf("container type and placement are mutually exclusive") } if p.Placement != nil { // Extract container type and parent from container placement directives. containerType, err := instance.ParseContainerType(p.Placement.Scope) if err == nil { p.ContainerType = containerType p.ParentId = p.Placement.Directive p.Placement = nil } } if p.ContainerType != "" || p.Placement != nil { // Guard against dubious client by making sure that // the following attributes can only be set when we're // not using placement. p.InstanceId = "" p.Nonce = "" p.HardwareCharacteristics = instance.HardwareCharacteristics{} p.Addrs = nil } if p.Series == "" { conf, err := c.api.state.EnvironConfig() if err != nil { return nil, err } p.Series = config.PreferredSeries(conf) } var placementDirective string if p.Placement != nil { env, err := c.api.state.Environment() if err != nil { return nil, err } // For 1.21 we should support both UUID and name, and with 1.22 // just support UUID if p.Placement.Scope != env.Name() && p.Placement.Scope != env.UUID() { return nil, fmt.Errorf("invalid environment name %q", p.Placement.Scope) } placementDirective = p.Placement.Directive } jobs, err := common.StateJobs(p.Jobs) if err != nil { return nil, err } template := state.MachineTemplate{ Series: p.Series, Constraints: p.Constraints, InstanceId: p.InstanceId, Jobs: jobs, Nonce: p.Nonce, HardwareCharacteristics: p.HardwareCharacteristics, Addresses: params.NetworkAddresses(p.Addrs), Placement: placementDirective, } if p.ContainerType == "" { return c.api.state.AddOneMachine(template) } if p.ParentId != "" { return c.api.state.AddMachineInsideMachine(template, p.ParentId, p.ContainerType) } return c.api.state.AddMachineInsideNewMachine(template, template, p.ContainerType) }
// addMachine creates a new top-level machine or container in the environment. func (h *bundleHandler) addMachine(id string, p bundlechanges.AddMachineParams) error { services := h.servicesForMachineChange(id) // Note that we always have at least one service that justifies the // creation of this machine. msg := services[0] + " unit" svcLen := len(services) if svcLen != 1 { msg = strings.Join(services[:svcLen-1], ", ") + " and " + services[svcLen-1] + " units" } // Check whether the desired number of units already exist in the // environment, in which case avoid adding other machines to host those // service units. machine := h.chooseMachine(services...) if machine != "" { h.results[id] = machine notify := make([]string, 0, svcLen) for _, service := range services { if !h.ignoredMachines[service] { h.ignoredMachines[service] = true notify = append(notify, service) } } svcLen = len(notify) switch svcLen { case 0: return nil case 1: msg = notify[0] default: msg = strings.Join(notify[:svcLen-1], ", ") + " and " + notify[svcLen-1] } h.log.Infof("avoid creating other machines to host %s units", msg) return nil } cons, err := constraints.Parse(p.Constraints) if err != nil { // This should never happen, as the bundle is already verified. return errors.Annotate(err, "invalid constraints for machine") } machineParams := params.AddMachineParams{ Constraints: cons, Series: p.Series, Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, } if p.ContainerType != "" { containerType, err := instance.ParseContainerType(p.ContainerType) if err != nil { return errors.Annotatef(err, "cannot create machine for holding %s", msg) } machineParams.ContainerType = containerType if p.ParentId != "" { machineParams.ParentId, err = h.resolveMachine(p.ParentId) if err != nil { return errors.Annotatef(err, "cannot retrieve parent placement for %s", msg) } } } r, err := h.client.AddMachines([]params.AddMachineParams{machineParams}) if err != nil { return errors.Annotatef(err, "cannot create machine for holding %s", msg) } if r[0].Error != nil { return errors.Annotatef(r[0].Error, "cannot create machine for holding %s", msg) } machine = r[0].Machine if p.ContainerType == "" { h.log.Infof("created new machine %s for holding %s", machine, msg) } else if p.ParentId == "" { h.log.Infof("created %s container in new machine for holding %s", machine, msg) } else { h.log.Infof("created %s container in machine %s for holding %s", machine, machineParams.ParentId, msg) } h.results[id] = machine return nil }
// addMachine creates a new top-level machine or container in the environment. func (h *bundleHandler) addMachine(id string, p bundlechanges.AddMachineParams) error { services := h.servicesForMachineChange(id) // Note that we always have at least one application that justifies the // creation of this machine. msg := services[0] + " unit" svcLen := len(services) if svcLen != 1 { msg = strings.Join(services[:svcLen-1], ", ") + " and " + services[svcLen-1] + " units" } // Check whether the desired number of units already exist in the // environment, in which case avoid adding other machines to host those // application units. machine := h.chooseMachine(services...) if machine != "" { h.results[id] = machine notify := make([]string, 0, svcLen) for _, application := range services { if !h.ignoredMachines[application] { h.ignoredMachines[application] = true notify = append(notify, application) } } svcLen = len(notify) switch svcLen { case 0: return nil case 1: msg = notify[0] default: msg = strings.Join(notify[:svcLen-1], ", ") + " and " + notify[svcLen-1] } h.log.Infof("avoid creating other machines to host %s units", msg) return nil } cons, err := constraints.Parse(p.Constraints) if err != nil { // This should never happen, as the bundle is already verified. return errors.Annotate(err, "invalid constraints for machine") } machineParams := params.AddMachineParams{ Constraints: cons, Series: p.Series, Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, } if ct := p.ContainerType; ct != "" { // for backwards compatibility with 1.x bundles, we treat lxc // placement directives as lxd. if ct == "lxc" { if !h.warnedLXC { h.log.Infof("Bundle has one or more containers specified as lxc. lxc containers are deprecated in Juju 2.0. lxd containers will be deployed instead.") h.warnedLXC = true } ct = string(instance.LXD) } containerType, err := instance.ParseContainerType(ct) if err != nil { return errors.Annotatef(err, "cannot create machine for holding %s", msg) } machineParams.ContainerType = containerType if p.ParentId != "" { machineParams.ParentId, err = h.resolveMachine(p.ParentId) if err != nil { return errors.Annotatef(err, "cannot retrieve parent placement for %s", msg) } } } r, err := h.api.AddMachines([]params.AddMachineParams{machineParams}) if err != nil { return errors.Annotatef(err, "cannot create machine for holding %s", msg) } if r[0].Error != nil { return errors.Annotatef(r[0].Error, "cannot create machine for holding %s", msg) } machine = r[0].Machine if p.ContainerType == "" { logger.Debugf("created new machine %s for holding %s", machine, msg) } else if p.ParentId == "" { logger.Debugf("created %s container in new machine for holding %s", machine, msg) } else { logger.Debugf("created %s container in machine %s for holding %s", machine, machineParams.ParentId, msg) } h.results[id] = machine return nil }
func (c *addCommand) Run(ctx *cmd.Context) error { client, err := c.getClientAPI() if err != nil { return errors.Trace(err) } defer client.Close() var machineManager MachineManagerAPI if len(c.Disks) > 0 { machineManager, err = c.getMachineManagerAPI() if err != nil { return errors.Trace(err) } defer machineManager.Close() if machineManager.BestAPIVersion() < 1 { return errors.New("cannot add machines with disks: not supported by the API server") } } logger.Infof("load config") var config *config.Config if defaultStore, err := configstore.Default(); err != nil { return err } else if config, err = c.Config(defaultStore, client); err != nil { return err } if c.Placement != nil && c.Placement.Scope == "ssh" { logger.Infof("manual provisioning") args := manual.ProvisionMachineArgs{ Host: c.Placement.Directive, Client: client, Stdin: ctx.Stdin, Stdout: ctx.Stdout, Stderr: ctx.Stderr, UpdateBehavior: ¶ms.UpdateBehavior{ config.EnableOSRefreshUpdate(), config.EnableOSUpgrade(), }, } machineId, err := manualProvisioner(args) if err == nil { ctx.Infof("created machine %v", machineId) } return err } logger.Infof("environment provisioning") if c.Placement != nil && c.Placement.Scope == "env-uuid" { c.Placement.Scope = client.EnvironmentUUID() } if c.Placement != nil && c.Placement.Scope == instance.MachineScope { // It does not make sense to add-machine <id>. return fmt.Errorf("machine-id cannot be specified when adding machines") } jobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits} envVersion, err := envcmd.GetEnvironmentVersion(client) if err != nil { return err } // Servers before 1.21-alpha2 don't have the networker so don't // try to use JobManageNetworking with them. // // In case of MAAS and Joyent JobManageNetworking is not added // to ensure the non-intrusive start of a networker like above // for the manual provisioning. See this related joyent bug // http://pad.lv/1401423 if envVersion.Compare(version.MustParse("1.21-alpha2")) >= 0 && config.Type() != provider.MAAS && config.Type() != provider.Joyent { jobs = append(jobs, multiwatcher.JobManageNetworking) } machineParams := params.AddMachineParams{ Placement: c.Placement, Series: c.Series, Constraints: c.Constraints, Jobs: jobs, Disks: c.Disks, } machines := make([]params.AddMachineParams, c.NumMachines) for i := 0; i < c.NumMachines; i++ { machines[i] = machineParams } var results []params.AddMachinesResult // If storage is specified, we attempt to use a new API on the service facade. if len(c.Disks) > 0 { results, err = machineManager.AddMachines(machines) } else { results, err = client.AddMachines(machines) if params.IsCodeNotImplemented(err) { if c.Placement != nil { containerType, parseErr := instance.ParseContainerType(c.Placement.Scope) if parseErr != nil { // The user specified a non-container placement directive: // return original API not implemented error. return err } machineParams.ContainerType = containerType machineParams.ParentId = c.Placement.Directive machineParams.Placement = nil } logger.Infof( "AddMachinesWithPlacement not supported by the API server, " + "falling back to 1.18 compatibility mode", ) results, err = client.AddMachines1dot18([]params.AddMachineParams{machineParams}) } } if params.IsCodeOperationBlocked(err) { return block.ProcessBlockedError(err, block.BlockChange) } if err != nil { return errors.Trace(err) } errs := []error{} for _, machineInfo := range results { if machineInfo.Error != nil { errs = append(errs, machineInfo.Error) continue } machineId := machineInfo.Machine if names.IsContainerMachine(machineId) { ctx.Infof("created container %v", machineId) } else { ctx.Infof("created machine %v", machineId) } } if len(errs) == 1 { fmt.Fprintf(ctx.Stderr, "failed to create 1 machine\n") return errs[0] } if len(errs) > 1 { fmt.Fprintf(ctx.Stderr, "failed to create %d machines\n", len(errs)) returnErr := []string{} for _, e := range errs { returnErr = append(returnErr, e.Error()) } return errors.New(strings.Join(returnErr, ", ")) } return nil }
func (mm *MachineManagerAPI) addOneMachine(p params.AddMachineParams) (*state.Machine, error) { if p.ParentId != "" && p.ContainerType == "" { return nil, fmt.Errorf("parent machine specified without container type") } if p.ContainerType != "" && p.Placement != nil { return nil, fmt.Errorf("container type and placement are mutually exclusive") } if p.Placement != nil { // Extract container type and parent from container placement directives. containerType, err := instance.ParseContainerType(p.Placement.Scope) if err == nil { p.ContainerType = containerType p.ParentId = p.Placement.Directive p.Placement = nil } } if p.ContainerType != "" || p.Placement != nil { // Guard against dubious client by making sure that // the following attributes can only be set when we're // not using placement. p.InstanceId = "" p.Nonce = "" p.HardwareCharacteristics = instance.HardwareCharacteristics{} p.Addrs = nil } if p.Series == "" { conf, err := mm.st.ModelConfig() if err != nil { return nil, err } p.Series = config.PreferredSeries(conf) } var placementDirective string if p.Placement != nil { env, err := mm.st.Model() if err != nil { return nil, err } // For 1.21 we should support both UUID and name, and with 1.22 // just support UUID if p.Placement.Scope != env.Name() && p.Placement.Scope != env.UUID() { return nil, fmt.Errorf("invalid model name %q", p.Placement.Scope) } placementDirective = p.Placement.Directive } volumes := make([]state.MachineVolumeParams, 0, len(p.Disks)) for _, cons := range p.Disks { if cons.Count == 0 { return nil, errors.Errorf("invalid volume params: count not specified") } // Pool and Size are validated by AddMachineX. volumeParams := state.VolumeParams{ Pool: cons.Pool, Size: cons.Size, } volumeAttachmentParams := state.VolumeAttachmentParams{} for i := uint64(0); i < cons.Count; i++ { volumes = append(volumes, state.MachineVolumeParams{ volumeParams, volumeAttachmentParams, }) } } jobs, err := common.StateJobs(p.Jobs) if err != nil { return nil, err } template := state.MachineTemplate{ Series: p.Series, Constraints: p.Constraints, Volumes: volumes, InstanceId: p.InstanceId, Jobs: jobs, Nonce: p.Nonce, HardwareCharacteristics: p.HardwareCharacteristics, Addresses: params.NetworkAddresses(p.Addrs), Placement: placementDirective, } if p.ContainerType == "" { return mm.st.AddOneMachine(template) } if p.ParentId != "" { return mm.st.AddMachineInsideMachine(template, p.ParentId, p.ContainerType) } return mm.st.AddMachineInsideNewMachine(template, template, p.ContainerType) }