func (s *InstanceSuite) TestParseContainerType(c *gc.C) { ctype, err := instance.ParseContainerType("lxc") c.Assert(err, jc.ErrorIsNil) c.Assert(ctype, gc.Equals, instance.LXC) ctype, err = instance.ParseContainerType("kvm") c.Assert(err, jc.ErrorIsNil) c.Assert(ctype, gc.Equals, instance.KVM) _, err = instance.ParseContainerType("none") c.Assert(err, gc.ErrorMatches, `invalid container type "none"`) _, err = instance.ParseContainerType("omg") c.Assert(err, gc.ErrorMatches, `invalid container type "omg"`) }
// makePlacement makes a placement directive for the given machineIdSpec. func makePlacement(machineIdSpec string) ([]*instance.Placement, error) { if machineIdSpec == "" { return nil, nil } mid := machineIdSpec scope := instance.MachineScope var containerType instance.ContainerType specParts := strings.SplitN(machineIdSpec, ":", 2) if len(specParts) > 1 { firstPart := specParts[0] var err error if containerType, err = instance.ParseContainerType(firstPart); err == nil { mid = specParts[1] scope = string(containerType) } } if !names.IsValidMachine(mid) { return nil, errors.Errorf("invalid force machine id %q", mid) } return []*instance.Placement{ { Scope: scope, Directive: mid, }, }, nil }
// AddUnits starts n units of the given service and allocates machines // to them as necessary. func AddUnits(st *state.State, svc *state.Service, n int, machineIdSpec string) ([]*state.Unit, error) { if machineIdSpec != "" && n != 1 { return nil, errors.Errorf("cannot add multiple units of service %q to a single machine", svc.Name()) } var placement []*instance.Placement if machineIdSpec != "" { mid := machineIdSpec scope := instance.MachineScope var containerType instance.ContainerType specParts := strings.SplitN(machineIdSpec, ":", 2) if len(specParts) > 1 { firstPart := specParts[0] var err error if containerType, err = instance.ParseContainerType(firstPart); err == nil { mid = specParts[1] scope = string(containerType) } } if !names.IsValidMachine(mid) { return nil, fmt.Errorf("invalid force machine id %q", mid) } placement = []*instance.Placement{ { Scope: scope, Directive: mid, }, } } return AddUnitsWithPlacement(st, svc, n, placement) }
func addMachineForUnit(st *state.State, unit *state.Unit, placement *instance.Placement, networks []string) (*state.Machine, error) { unitCons, err := unit.Constraints() if err != nil { return nil, err } var containerType instance.ContainerType var mid, placementDirective string // Extract container type and parent from container placement directives. if containerType, err = instance.ParseContainerType(placement.Scope); err == nil { mid = placement.Directive } else { switch placement.Scope { case st.EnvironUUID(): placementDirective = placement.Directive case instance.MachineScope: mid = placement.Directive default: return nil, errors.Errorf("invalid environment UUID %q", placement.Scope) } } // Create any new machine marked as dirty so that // nothing else will grab it before we assign the unit to it. // If a container is to be used, create it. if containerType != "" { template := state.MachineTemplate{ Series: unit.Series(), Jobs: []state.MachineJob{state.JobHostUnits}, Dirty: true, Constraints: *unitCons, RequestedNetworks: networks, } return st.AddMachineInsideMachine(template, mid, containerType) } // If a placement directive is to be used, do that here. if placementDirective != "" { template := state.MachineTemplate{ Series: unit.Series(), Jobs: []state.MachineJob{state.JobHostUnits}, Dirty: true, Constraints: *unitCons, RequestedNetworks: networks, Placement: placementDirective, } return st.AddOneMachine(template) } // Otherwise use an existing machine. return st.Machine(mid) }
func (c *Client) addOneMachine(p params.AddMachineParams) (*state.Machine, error) { if p.ParentId != "" && p.ContainerType == "" { return nil, fmt.Errorf("parent machine specified without container type") } if p.ContainerType != "" && p.Placement != nil { return nil, fmt.Errorf("container type and placement are mutually exclusive") } if p.Placement != nil { // Extract container type and parent from container placement directives. containerType, err := instance.ParseContainerType(p.Placement.Scope) if err == nil { p.ContainerType = containerType p.ParentId = p.Placement.Directive p.Placement = nil } } if p.ContainerType != "" || p.Placement != nil { // Guard against dubious client by making sure that // the following attributes can only be set when we're // not using placement. p.InstanceId = "" p.Nonce = "" p.HardwareCharacteristics = instance.HardwareCharacteristics{} p.Addrs = nil } if p.Series == "" { conf, err := c.api.state.EnvironConfig() if err != nil { return nil, err } p.Series = config.PreferredSeries(conf) } var placementDirective string if p.Placement != nil { env, err := c.api.state.Environment() if err != nil { return nil, err } // For 1.21 we should support both UUID and name, and with 1.22 // just support UUID if p.Placement.Scope != env.Name() && p.Placement.Scope != env.UUID() { return nil, fmt.Errorf("invalid environment name %q", p.Placement.Scope) } placementDirective = p.Placement.Directive } jobs, err := common.StateJobs(p.Jobs) if err != nil { return nil, err } template := state.MachineTemplate{ Series: p.Series, Constraints: p.Constraints, InstanceId: p.InstanceId, Jobs: jobs, Nonce: p.Nonce, HardwareCharacteristics: p.HardwareCharacteristics, Addresses: params.NetworkAddresses(p.Addrs), Placement: placementDirective, } if p.ContainerType == "" { return c.api.state.AddOneMachine(template) } if p.ParentId != "" { return c.api.state.AddMachineInsideMachine(template, p.ParentId, p.ContainerType) } return c.api.state.AddMachineInsideNewMachine(template, template, p.ContainerType) }
// addMachine creates a new top-level machine or container in the environment. func (h *bundleHandler) addMachine(id string, p bundlechanges.AddMachineParams) error { services := h.servicesForMachineChange(id) // Note that we always have at least one service that justifies the // creation of this machine. msg := services[0] + " unit" svcLen := len(services) if svcLen != 1 { msg = strings.Join(services[:svcLen-1], ", ") + " and " + services[svcLen-1] + " units" } // Check whether the desired number of units already exist in the // environment, in which case avoid adding other machines to host those // service units. machine := h.chooseMachine(services...) if machine != "" { h.results[id] = machine notify := make([]string, 0, svcLen) for _, service := range services { if !h.ignoredMachines[service] { h.ignoredMachines[service] = true notify = append(notify, service) } } svcLen = len(notify) switch svcLen { case 0: return nil case 1: msg = notify[0] default: msg = strings.Join(notify[:svcLen-1], ", ") + " and " + notify[svcLen-1] } h.log.Infof("avoid creating other machines to host %s units", msg) return nil } cons, err := constraints.Parse(p.Constraints) if err != nil { // This should never happen, as the bundle is already verified. return errors.Annotate(err, "invalid constraints for machine") } machineParams := params.AddMachineParams{ Constraints: cons, Series: p.Series, Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, } if p.ContainerType != "" { containerType, err := instance.ParseContainerType(p.ContainerType) if err != nil { return errors.Annotatef(err, "cannot create machine for holding %s", msg) } machineParams.ContainerType = containerType if p.ParentId != "" { machineParams.ParentId, err = h.resolveMachine(p.ParentId) if err != nil { return errors.Annotatef(err, "cannot retrieve parent placement for %s", msg) } } } r, err := h.client.AddMachines([]params.AddMachineParams{machineParams}) if err != nil { return errors.Annotatef(err, "cannot create machine for holding %s", msg) } if r[0].Error != nil { return errors.Annotatef(r[0].Error, "cannot create machine for holding %s", msg) } machine = r[0].Machine if p.ContainerType == "" { h.log.Infof("created new machine %s for holding %s", machine, msg) } else if p.ParentId == "" { h.log.Infof("created %s container in new machine for holding %s", machine, msg) } else { h.log.Infof("created %s container in machine %s for holding %s", machine, machineParams.ParentId, msg) } h.results[id] = machine return nil }
func (c *addCommand) Run(ctx *cmd.Context) error { client, err := c.getClientAPI() if err != nil { return errors.Trace(err) } defer client.Close() var machineManager MachineManagerAPI if len(c.Disks) > 0 { machineManager, err = c.getMachineManagerAPI() if err != nil { return errors.Trace(err) } defer machineManager.Close() if machineManager.BestAPIVersion() < 1 { return errors.New("cannot add machines with disks: not supported by the API server") } } logger.Infof("load config") var config *config.Config if defaultStore, err := configstore.Default(); err != nil { return err } else if config, err = c.Config(defaultStore, client); err != nil { return err } if c.Placement != nil && c.Placement.Scope == "ssh" { logger.Infof("manual provisioning") args := manual.ProvisionMachineArgs{ Host: c.Placement.Directive, Client: client, Stdin: ctx.Stdin, Stdout: ctx.Stdout, Stderr: ctx.Stderr, UpdateBehavior: ¶ms.UpdateBehavior{ config.EnableOSRefreshUpdate(), config.EnableOSUpgrade(), }, } machineId, err := manualProvisioner(args) if err == nil { ctx.Infof("created machine %v", machineId) } return err } logger.Infof("environment provisioning") if c.Placement != nil && c.Placement.Scope == "env-uuid" { c.Placement.Scope = client.EnvironmentUUID() } if c.Placement != nil && c.Placement.Scope == instance.MachineScope { // It does not make sense to add-machine <id>. return fmt.Errorf("machine-id cannot be specified when adding machines") } jobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits} envVersion, err := envcmd.GetEnvironmentVersion(client) if err != nil { return err } // Servers before 1.21-alpha2 don't have the networker so don't // try to use JobManageNetworking with them. // // In case of MAAS and Joyent JobManageNetworking is not added // to ensure the non-intrusive start of a networker like above // for the manual provisioning. See this related joyent bug // http://pad.lv/1401423 if envVersion.Compare(version.MustParse("1.21-alpha2")) >= 0 && config.Type() != provider.MAAS && config.Type() != provider.Joyent { jobs = append(jobs, multiwatcher.JobManageNetworking) } machineParams := params.AddMachineParams{ Placement: c.Placement, Series: c.Series, Constraints: c.Constraints, Jobs: jobs, Disks: c.Disks, } machines := make([]params.AddMachineParams, c.NumMachines) for i := 0; i < c.NumMachines; i++ { machines[i] = machineParams } var results []params.AddMachinesResult // If storage is specified, we attempt to use a new API on the service facade. if len(c.Disks) > 0 { results, err = machineManager.AddMachines(machines) } else { results, err = client.AddMachines(machines) if params.IsCodeNotImplemented(err) { if c.Placement != nil { containerType, parseErr := instance.ParseContainerType(c.Placement.Scope) if parseErr != nil { // The user specified a non-container placement directive: // return original API not implemented error. return err } machineParams.ContainerType = containerType machineParams.ParentId = c.Placement.Directive machineParams.Placement = nil } logger.Infof( "AddMachinesWithPlacement not supported by the API server, " + "falling back to 1.18 compatibility mode", ) results, err = client.AddMachines1dot18([]params.AddMachineParams{machineParams}) } } if params.IsCodeOperationBlocked(err) { return block.ProcessBlockedError(err, block.BlockChange) } if err != nil { return errors.Trace(err) } errs := []error{} for _, machineInfo := range results { if machineInfo.Error != nil { errs = append(errs, machineInfo.Error) continue } machineId := machineInfo.Machine if names.IsContainerMachine(machineId) { ctx.Infof("created container %v", machineId) } else { ctx.Infof("created machine %v", machineId) } } if len(errs) == 1 { fmt.Fprintf(ctx.Stderr, "failed to create 1 machine\n") return errs[0] } if len(errs) > 1 { fmt.Fprintf(ctx.Stderr, "failed to create %d machines\n", len(errs)) returnErr := []string{} for _, e := range errs { returnErr = append(returnErr, e.Error()) } return errors.New(strings.Join(returnErr, ", ")) } return nil }
// AddUnits starts n units of the given service and allocates machines // to them as necessary. func AddUnits(st *state.State, svc *state.Service, n int, machineIdSpec string) ([]*state.Unit, error) { units := make([]*state.Unit, n) // Hard code for now till we implement a different approach. policy := state.AssignCleanEmpty // All units should have the same networks as the service. networks, err := svc.Networks() if err != nil { return nil, fmt.Errorf("cannot get service %q networks: %v", svc.Name(), err) } // TODO what do we do if we fail half-way through this process? for i := 0; i < n; i++ { unit, err := svc.AddUnit() if err != nil { return nil, fmt.Errorf("cannot add unit %d/%d to service %q: %v", i+1, n, svc.Name(), err) } if machineIdSpec != "" { if n != 1 { return nil, fmt.Errorf("cannot add multiple units of service %q to a single machine", svc.Name()) } // machineIdSpec may be an existing machine or container, eg 3/lxc/2 // or a new container on a machine, eg lxc:3 mid := machineIdSpec var containerType instance.ContainerType specParts := strings.SplitN(machineIdSpec, ":", 2) if len(specParts) > 1 { firstPart := specParts[0] var err error if containerType, err = instance.ParseContainerType(firstPart); err == nil { mid = specParts[1] } else { mid = machineIdSpec } } if !names.IsMachine(mid) { return nil, fmt.Errorf("invalid force machine id %q", mid) } var unitCons *constraints.Value unitCons, err = unit.Constraints() if err != nil { return nil, err } var err error var m *state.Machine // If a container is to be used, create it. if containerType != "" { // Create the new machine marked as dirty so that // nothing else will grab it before we assign the unit to it. template := state.MachineTemplate{ Series: unit.Series(), Jobs: []state.MachineJob{state.JobHostUnits}, Dirty: true, Constraints: *unitCons, RequestedNetworks: networks, } m, err = st.AddMachineInsideMachine(template, mid, containerType) } else { m, err = st.Machine(mid) } if err != nil { return nil, fmt.Errorf("cannot assign unit %q to machine: %v", unit.Name(), err) } err = unit.AssignToMachine(m) if err != nil { return nil, err } } else if err := st.AssignUnit(unit, policy); err != nil { return nil, err } units[i] = unit } return units, nil }
// addMachine creates a new top-level machine or container in the environment. func (h *bundleHandler) addMachine(id string, p bundlechanges.AddMachineParams) error { services := h.servicesForMachineChange(id) // Note that we always have at least one application that justifies the // creation of this machine. msg := services[0] + " unit" svcLen := len(services) if svcLen != 1 { msg = strings.Join(services[:svcLen-1], ", ") + " and " + services[svcLen-1] + " units" } // Check whether the desired number of units already exist in the // environment, in which case avoid adding other machines to host those // application units. machine := h.chooseMachine(services...) if machine != "" { h.results[id] = machine notify := make([]string, 0, svcLen) for _, application := range services { if !h.ignoredMachines[application] { h.ignoredMachines[application] = true notify = append(notify, application) } } svcLen = len(notify) switch svcLen { case 0: return nil case 1: msg = notify[0] default: msg = strings.Join(notify[:svcLen-1], ", ") + " and " + notify[svcLen-1] } h.log.Infof("avoid creating other machines to host %s units", msg) return nil } cons, err := constraints.Parse(p.Constraints) if err != nil { // This should never happen, as the bundle is already verified. return errors.Annotate(err, "invalid constraints for machine") } machineParams := params.AddMachineParams{ Constraints: cons, Series: p.Series, Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, } if ct := p.ContainerType; ct != "" { // for backwards compatibility with 1.x bundles, we treat lxc // placement directives as lxd. if ct == "lxc" { if !h.warnedLXC { h.log.Infof("Bundle has one or more containers specified as lxc. lxc containers are deprecated in Juju 2.0. lxd containers will be deployed instead.") h.warnedLXC = true } ct = string(instance.LXD) } containerType, err := instance.ParseContainerType(ct) if err != nil { return errors.Annotatef(err, "cannot create machine for holding %s", msg) } machineParams.ContainerType = containerType if p.ParentId != "" { machineParams.ParentId, err = h.resolveMachine(p.ParentId) if err != nil { return errors.Annotatef(err, "cannot retrieve parent placement for %s", msg) } } } r, err := h.api.AddMachines([]params.AddMachineParams{machineParams}) if err != nil { return errors.Annotatef(err, "cannot create machine for holding %s", msg) } if r[0].Error != nil { return errors.Annotatef(r[0].Error, "cannot create machine for holding %s", msg) } machine = r[0].Machine if p.ContainerType == "" { logger.Debugf("created new machine %s for holding %s", machine, msg) } else if p.ParentId == "" { logger.Debugf("created %s container in new machine for holding %s", machine, msg) } else { logger.Debugf("created %s container in machine %s for holding %s", machine, machineParams.ParentId, msg) } h.results[id] = machine return nil }
func (mm *MachineManagerAPI) addOneMachine(p params.AddMachineParams) (*state.Machine, error) { if p.ParentId != "" && p.ContainerType == "" { return nil, fmt.Errorf("parent machine specified without container type") } if p.ContainerType != "" && p.Placement != nil { return nil, fmt.Errorf("container type and placement are mutually exclusive") } if p.Placement != nil { // Extract container type and parent from container placement directives. containerType, err := instance.ParseContainerType(p.Placement.Scope) if err == nil { p.ContainerType = containerType p.ParentId = p.Placement.Directive p.Placement = nil } } if p.ContainerType != "" || p.Placement != nil { // Guard against dubious client by making sure that // the following attributes can only be set when we're // not using placement. p.InstanceId = "" p.Nonce = "" p.HardwareCharacteristics = instance.HardwareCharacteristics{} p.Addrs = nil } if p.Series == "" { conf, err := mm.st.ModelConfig() if err != nil { return nil, err } p.Series = config.PreferredSeries(conf) } var placementDirective string if p.Placement != nil { env, err := mm.st.Model() if err != nil { return nil, err } // For 1.21 we should support both UUID and name, and with 1.22 // just support UUID if p.Placement.Scope != env.Name() && p.Placement.Scope != env.UUID() { return nil, fmt.Errorf("invalid model name %q", p.Placement.Scope) } placementDirective = p.Placement.Directive } volumes := make([]state.MachineVolumeParams, 0, len(p.Disks)) for _, cons := range p.Disks { if cons.Count == 0 { return nil, errors.Errorf("invalid volume params: count not specified") } // Pool and Size are validated by AddMachineX. volumeParams := state.VolumeParams{ Pool: cons.Pool, Size: cons.Size, } volumeAttachmentParams := state.VolumeAttachmentParams{} for i := uint64(0); i < cons.Count; i++ { volumes = append(volumes, state.MachineVolumeParams{ volumeParams, volumeAttachmentParams, }) } } jobs, err := common.StateJobs(p.Jobs) if err != nil { return nil, err } template := state.MachineTemplate{ Series: p.Series, Constraints: p.Constraints, Volumes: volumes, InstanceId: p.InstanceId, Jobs: jobs, Nonce: p.Nonce, HardwareCharacteristics: p.HardwareCharacteristics, Addresses: params.NetworkAddresses(p.Addrs), Placement: placementDirective, } if p.ContainerType == "" { return mm.st.AddOneMachine(template) } if p.ParentId != "" { return mm.st.AddMachineInsideMachine(template, p.ParentId, p.ContainerType) } return mm.st.AddMachineInsideNewMachine(template, template, p.ContainerType) }
func (c *AddMachineCommand) Run(ctx *cmd.Context) error { if c.Placement != nil && c.Placement.Scope == "ssh" { args := manual.ProvisionMachineArgs{ Host: c.Placement.Directive, EnvName: c.EnvName, Stdin: ctx.Stdin, Stdout: ctx.Stdout, Stderr: ctx.Stderr, } _, err := manual.ProvisionMachine(args) return err } client, err := juju.NewAPIClientFromName(c.EnvName) if err != nil { return err } defer client.Close() if c.Placement != nil && c.Placement.Scope == instance.MachineScope { // It does not make sense to add-machine <id>. return fmt.Errorf("machine-id cannot be specified when adding machines") } machineParams := params.AddMachineParams{ Placement: c.Placement, Series: c.Series, Constraints: c.Constraints, Jobs: []params.MachineJob{params.JobHostUnits}, } results, err := client.AddMachines([]params.AddMachineParams{machineParams}) if params.IsCodeNotImplemented(err) { if c.Placement != nil { containerType, parseErr := instance.ParseContainerType(c.Placement.Scope) if parseErr != nil { // The user specified a non-container placement directive: // return original API not implemented error. return err } machineParams.ContainerType = containerType machineParams.ParentId = c.Placement.Directive machineParams.Placement = nil } logger.Infof( "AddMachinesWithPlacement not supported by the API server, " + "falling back to 1.18 compatibility mode", ) results, err = client.AddMachines1dot18([]params.AddMachineParams{machineParams}) } if err != nil { return err } // Currently, only one machine is added, but in future there may be several added in one call. machineInfo := results[0] if machineInfo.Error != nil { return machineInfo.Error } machineId := machineInfo.Machine if names.IsContainerMachine(machineId) { ctx.Infof("created container %v", machineId) } else { ctx.Infof("created machine %v", machineId) } return nil }
func (c *AddMachineCommand) Run(ctx *cmd.Context) error { if c.Placement != nil && c.Placement.Scope == "ssh" { args := manual.ProvisionMachineArgs{ Host: c.Placement.Directive, EnvName: c.EnvName, Stdin: ctx.Stdin, Stdout: ctx.Stdout, Stderr: ctx.Stderr, } _, err := manual.ProvisionMachine(args) return err } client, err := getAddMachineAPI(c.EnvName) if err != nil { return err } defer client.Close() if c.Placement != nil && c.Placement.Scope == instance.MachineScope { // It does not make sense to add-machine <id>. return fmt.Errorf("machine-id cannot be specified when adding machines") } machineParams := params.AddMachineParams{ Placement: c.Placement, Series: c.Series, Constraints: c.Constraints, Jobs: []params.MachineJob{params.JobHostUnits}, } machines := make([]params.AddMachineParams, c.NumMachines) for i := 0; i < c.NumMachines; i++ { machines[i] = machineParams } results, err := client.AddMachines(machines) if params.IsCodeNotImplemented(err) { if c.Placement != nil { containerType, parseErr := instance.ParseContainerType(c.Placement.Scope) if parseErr != nil { // The user specified a non-container placement directive: // return original API not implemented error. return err } machineParams.ContainerType = containerType machineParams.ParentId = c.Placement.Directive machineParams.Placement = nil } logger.Infof( "AddMachinesWithPlacement not supported by the API server, " + "falling back to 1.18 compatibility mode", ) results, err = client.AddMachines1dot18([]params.AddMachineParams{machineParams}) } if err != nil { return err } errs := []error{} for _, machineInfo := range results { if machineInfo.Error != nil { errs = append(errs, machineInfo.Error) continue } machineId := machineInfo.Machine if names.IsContainerMachine(machineId) { ctx.Infof("created container %v", machineId) } else { ctx.Infof("created machine %v", machineId) } } if len(errs) == 1 { fmt.Fprintf(ctx.Stderr, "failed to create 1 machine\n") return errs[0] } if len(errs) > 1 { fmt.Fprintf(ctx.Stderr, "failed to create %d machines\n", len(errs)) returnErr := []string{} for _, e := range errs { returnErr = append(returnErr, fmt.Sprintf("%s", e)) } return errors.New(strings.Join(returnErr, ", ")) } return nil }