Exemple #1
0
func (c *ensureAvailabilityCommand) Init(args []string) error {
	if c.NumStateServers < 0 || (c.NumStateServers%2 != 1 && c.NumStateServers != 0) {
		return fmt.Errorf("must specify a number of state servers odd and non-negative")
	}
	if c.PlacementSpec != "" {
		placementSpecs := strings.Split(c.PlacementSpec, ",")
		c.Placement = make([]string, len(placementSpecs))
		for i, spec := range placementSpecs {
			p, err := instance.ParsePlacement(strings.TrimSpace(spec))
			if err == nil && names.IsContainerMachine(p.Directive) {
				return errors.New("ensure-availability cannot be used with container placement directives")
			}
			if err == nil && p.Scope == instance.MachineScope {
				// Targeting machines is ok.
				c.Placement[i] = p.String()
				continue
			}
			if err != instance.ErrPlacementScopeMissing {
				return fmt.Errorf("unsupported ensure-availability placement directive %q", spec)
			}
			c.Placement[i] = spec
		}
	}
	return cmd.CheckEmpty(args)
}
Exemple #2
0
func (s *machineSuite) TestMachineIdFormats(c *gc.C) {
	for i, test := range machineIdTests {
		c.Logf("test %d: %q", i, test.pattern)
		c.Assert(names.IsValidMachine(test.pattern), gc.Equals, test.valid)
		c.Assert(names.IsContainerMachine(test.pattern), gc.Equals, test.container)
	}
}
Exemple #3
0
func (c *retryProvisioningCommand) Init(args []string) error {
	if len(args) == 0 {
		return errors.Errorf("no machine specified")
	}
	c.Machines = make([]names.MachineTag, len(args))
	for i, arg := range args {
		if !names.IsValidMachine(arg) {
			return errors.Errorf("invalid machine %q", arg)
		}
		if names.IsContainerMachine(arg) {
			return errors.Errorf("invalid machine %q retry-provisioning does not support containers", arg)
		}
		c.Machines[i] = names.NewMachineTag(arg)
	}
	return nil
}
Exemple #4
0
// Manifold returns a dependency manifold that runs a machiner worker, using
// the resource names defined in the supplied config.
func Manifold(config ManifoldConfig) dependency.Manifold {

	// TODO(waigani) This function is currently covered by functional tests
	// under the machine agent. Add unit tests once infrastructure to do so is
	// in place.

	// newWorker non-trivially wraps NewMachiner to specialise a PostUpgradeManifold.
	var newWorker = func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) {
		currentConfig := a.CurrentConfig()

		// TODO(fwereade): this functionality should be on the
		// deployer facade instead.
		agentFacade := apiagent.NewState(apiCaller)
		envConfig, err := agentFacade.ModelConfig()
		if err != nil {
			return nil, errors.Errorf("cannot read environment config: %v", err)
		}

		ignoreMachineAddresses, _ := envConfig.IgnoreMachineAddresses()
		// Containers only have machine addresses, so we can't ignore them.
		tag := currentConfig.Tag()
		if names.IsContainerMachine(tag.Id()) {
			ignoreMachineAddresses = false
		}
		if ignoreMachineAddresses {
			logger.Infof("machine addresses not used, only addresses from provider")
		}
		accessor := APIMachineAccessor{apimachiner.NewState(apiCaller)}
		w, err := NewMachiner(Config{
			MachineAccessor: accessor,
			Tag:             tag.(names.MachineTag),
			ClearMachineAddressesOnStart: ignoreMachineAddresses,
			NotifyMachineDead: func() error {
				return agent.SetCanUninstall(a)
			},
		})
		if err != nil {
			return nil, errors.Annotate(err, "cannot start machiner worker")
		}
		return w, err
	}

	return util.PostUpgradeManifold(config.PostUpgradeManifoldConfig, newWorker)
}
Exemple #5
0
// newWorker non-trivially wraps NewMachiner to specialise a util.AgentApiManifold.
//
// TODO(waigani) This function is currently covered by functional tests
// under the machine agent. Add unit tests once infrastructure to do so is
// in place.
func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) {
	currentConfig := a.CurrentConfig()

	// TODO(fwereade): this functionality should be on the
	// machiner facade instead -- or, better yet, separate
	// the networking concerns from the lifecycle ones and
	// have completey separate workers.
	//
	// (With their own facades.)
	agentFacade := apiagent.NewState(apiCaller)
	envConfig, err := agentFacade.ModelConfig()
	if err != nil {
		return nil, errors.Errorf("cannot read environment config: %v", err)
	}

	ignoreMachineAddresses, _ := envConfig.IgnoreMachineAddresses()
	// Containers only have machine addresses, so we can't ignore them.
	tag := currentConfig.Tag()
	if names.IsContainerMachine(tag.Id()) {
		ignoreMachineAddresses = false
	}
	if ignoreMachineAddresses {
		logger.Infof("machine addresses not used, only addresses from provider")
	}
	accessor := APIMachineAccessor{apimachiner.NewState(apiCaller)}
	w, err := NewMachiner(Config{
		MachineAccessor: accessor,
		Tag:             tag.(names.MachineTag),
		ClearMachineAddressesOnStart: ignoreMachineAddresses,
		NotifyMachineDead: func() error {
			return agent.SetCanUninstall(a)
		},
	})
	if err != nil {
		return nil, errors.Annotate(err, "cannot start machiner worker")
	}
	return w, err
}
Exemple #6
0
func addInstanceTags(env environs.Environ, machines []*state.Machine) error {
	cfg := env.Config()
	tagger, ok := env.(environs.InstanceTagger)
	if !ok {
		logger.Debugf("environment type %q does not support instance tagging", cfg.Type())
		return nil
	}

	// Tag each top-level, provisioned machine.
	logger.Infof("adding tags to existing machine instances")
	for _, m := range machines {
		if names.IsContainerMachine(m.Id()) {
			continue
		}
		instId, err := m.InstanceId()
		if errors.IsNotProvisioned(err) {
			continue
		} else if err != nil {
			return errors.Annotatef(err, "getting instance ID for machine %v", m.Id())
		}

		stateMachineJobs := m.Jobs()
		paramsMachineJobs := make([]multiwatcher.MachineJob, len(stateMachineJobs))
		for i, job := range stateMachineJobs {
			paramsMachineJobs[i] = job.ToParams()
		}

		tags := instancecfg.InstanceTags(cfg, paramsMachineJobs)
		logger.Infof("tagging instance %v: %v", instId, tags)
		if err := tagger.TagInstance(instId, tags); err != nil {
			return errors.Annotatef(err, "tagging instance %v for machine %v", instId, m.Id())
		}
	}

	return nil
}
Exemple #7
0
// ensureAvailabilityIntentions returns what we would like
// to do to maintain the availability of the existing servers
// mentioned in the given info, including:
//   demoting unavailable, voting machines;
//   removing unavailable, non-voting, non-vote-holding machines;
//   gathering available, non-voting machines that may be promoted;
func (st *State) ensureAvailabilityIntentions(info *StateServerInfo, placement []string) (*ensureAvailabilityIntent, error) {
	var intent ensureAvailabilityIntent
	for _, s := range placement {
		// TODO(natefinch): unscoped placements shouldn't ever get here (though
		// they do currently).  We should fix up the CLI to always add a scope
		// to placements and then we can remove the need to deal with unscoped
		// placements.
		p, err := instance.ParsePlacement(s)
		if err == instance.ErrPlacementScopeMissing {
			intent.placement = append(intent.placement, s)
			continue
		}
		if err == nil && p.Scope == instance.MachineScope {
			// TODO(natefinch) add env provider policy to check if conversion is
			// possible (e.g. cannot be supported by Azure in HA mode).

			if names.IsContainerMachine(p.Directive) {
				return nil, errors.New("container placement directives not supported")
			}

			m, err := st.Machine(p.Directive)
			if err != nil {
				return nil, errors.Annotatef(err, "can't find machine for placement directive %q", s)
			}
			if m.IsManager() {
				return nil, errors.Errorf("machine for placement directive %q is already a state server", s)
			}
			intent.convert = append(intent.convert, m)
			intent.placement = append(intent.placement, s)
			continue
		}
		return nil, errors.Errorf("unsupported placement directive %q", s)
	}

	for _, mid := range info.MachineIds {
		m, err := st.Machine(mid)
		if err != nil {
			return nil, err
		}
		available, err := stateServerAvailable(m)
		if err != nil {
			return nil, err
		}
		logger.Infof("machine %q, available %v, wants vote %v, has vote %v", m, available, m.WantsVote(), m.HasVote())
		if available {
			if m.WantsVote() {
				intent.maintain = append(intent.maintain, m)
			} else {
				intent.promote = append(intent.promote, m)
			}
			continue
		}
		if m.WantsVote() {
			// The machine wants to vote, so we simply set novote and allow it
			// to run its course to have its vote removed by the worker that
			// maintains the replicaset. We will replace it with an existing
			// non-voting state server if there is one, starting a new one if
			// not.
			intent.demote = append(intent.demote, m)
		} else if m.HasVote() {
			// The machine still has a vote, so keep it around for now.
			intent.maintain = append(intent.maintain, m)
		} else {
			// The machine neither wants to nor has a vote, so remove its
			// JobManageEnviron job immediately.
			intent.remove = append(intent.remove, m)
		}
	}
	logger.Infof("initial intentions: promote %v; maintain %v; demote %v; remove %v; convert: %v",
		intent.promote, intent.maintain, intent.demote, intent.remove, intent.convert)
	return &intent, nil
}
Exemple #8
0
func (c *addCommand) Run(ctx *cmd.Context) error {
	client, err := c.getClientAPI()
	if err != nil {
		return errors.Trace(err)
	}
	defer client.Close()

	var machineManager MachineManagerAPI
	if len(c.Disks) > 0 {
		machineManager, err = c.getMachineManagerAPI()
		if err != nil {
			return errors.Trace(err)
		}
		defer machineManager.Close()
		if machineManager.BestAPIVersion() < 1 {
			return errors.New("cannot add machines with disks: not supported by the API server")
		}
	}

	logger.Infof("load config")
	configAttrs, err := client.ModelGet()
	if err != nil {
		return errors.Trace(err)
	}
	config, err := config.New(config.NoDefaults, configAttrs)
	if err != nil {
		return errors.Trace(err)
	}

	if c.Placement != nil && c.Placement.Scope == "ssh" {
		logger.Infof("manual provisioning")
		args := manual.ProvisionMachineArgs{
			Host:   c.Placement.Directive,
			Client: client,
			Stdin:  ctx.Stdin,
			Stdout: ctx.Stdout,
			Stderr: ctx.Stderr,
			UpdateBehavior: &params.UpdateBehavior{
				config.EnableOSRefreshUpdate(),
				config.EnableOSUpgrade(),
			},
		}
		machineId, err := manualProvisioner(args)
		if err == nil {
			ctx.Infof("created machine %v", machineId)
		}
		return err
	}

	logger.Infof("model provisioning")
	if c.Placement != nil && c.Placement.Scope == "model-uuid" {
		c.Placement.Scope = client.ModelUUID()
	}

	if c.Placement != nil && c.Placement.Scope == instance.MachineScope {
		// It does not make sense to add-machine <id>.
		return fmt.Errorf("machine-id cannot be specified when adding machines")
	}

	jobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits}

	// In case of MAAS and Joyent JobManageNetworking is not added
	// to ensure the non-intrusive start of a networker like above
	// for the manual provisioning. See this related joyent bug
	// http://pad.lv/1401423
	if config.Type() != provider.MAAS && config.Type() != provider.Joyent {
		jobs = append(jobs, multiwatcher.JobManageNetworking)
	}

	machineParams := params.AddMachineParams{
		Placement:   c.Placement,
		Series:      c.Series,
		Constraints: c.Constraints,
		Jobs:        jobs,
		Disks:       c.Disks,
	}
	machines := make([]params.AddMachineParams, c.NumMachines)
	for i := 0; i < c.NumMachines; i++ {
		machines[i] = machineParams
	}

	var results []params.AddMachinesResult
	// If storage is specified, we attempt to use a new API on the service facade.
	if len(c.Disks) > 0 {
		results, err = machineManager.AddMachines(machines)
	} else {
		results, err = client.AddMachines(machines)
	}
	if params.IsCodeOperationBlocked(err) {
		return block.ProcessBlockedError(err, block.BlockChange)
	}
	if err != nil {
		return errors.Trace(err)
	}

	errs := []error{}
	for _, machineInfo := range results {
		if machineInfo.Error != nil {
			errs = append(errs, machineInfo.Error)
			continue
		}
		machineId := machineInfo.Machine

		if names.IsContainerMachine(machineId) {
			ctx.Infof("created container %v", machineId)
		} else {
			ctx.Infof("created machine %v", machineId)
		}
	}
	if len(errs) == 1 {
		fmt.Fprintf(ctx.Stderr, "failed to create 1 machine\n")
		return errs[0]
	}
	if len(errs) > 1 {
		fmt.Fprintf(ctx.Stderr, "failed to create %d machines\n", len(errs))
		returnErr := []string{}
		for _, e := range errs {
			returnErr = append(returnErr, e.Error())
		}
		return errors.New(strings.Join(returnErr, ", "))
	}
	return nil
}
Exemple #9
0
func (c *addCommand) Run(ctx *cmd.Context) error {
	client, err := c.getClientAPI()
	if err != nil {
		return errors.Trace(err)
	}
	defer client.Close()

	var machineManager MachineManagerAPI
	if len(c.Disks) > 0 {
		machineManager, err = c.getMachineManagerAPI()
		if err != nil {
			return errors.Trace(err)
		}
		defer machineManager.Close()
		if machineManager.BestAPIVersion() < 1 {
			return errors.New("cannot add machines with disks: not supported by the API server")
		}
	}

	logger.Infof("load config")
	var config *config.Config
	if defaultStore, err := configstore.Default(); err != nil {
		return err
	} else if config, err = c.Config(defaultStore, client); err != nil {
		return err
	}

	if c.Placement != nil && c.Placement.Scope == "ssh" {
		logger.Infof("manual provisioning")
		args := manual.ProvisionMachineArgs{
			Host:   c.Placement.Directive,
			Client: client,
			Stdin:  ctx.Stdin,
			Stdout: ctx.Stdout,
			Stderr: ctx.Stderr,
			UpdateBehavior: &params.UpdateBehavior{
				config.EnableOSRefreshUpdate(),
				config.EnableOSUpgrade(),
			},
		}
		machineId, err := manualProvisioner(args)
		if err == nil {
			ctx.Infof("created machine %v", machineId)
		}
		return err
	}

	logger.Infof("environment provisioning")
	if c.Placement != nil && c.Placement.Scope == "env-uuid" {
		c.Placement.Scope = client.EnvironmentUUID()
	}

	if c.Placement != nil && c.Placement.Scope == instance.MachineScope {
		// It does not make sense to add-machine <id>.
		return fmt.Errorf("machine-id cannot be specified when adding machines")
	}

	jobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits}

	envVersion, err := envcmd.GetEnvironmentVersion(client)
	if err != nil {
		return err
	}

	// Servers before 1.21-alpha2 don't have the networker so don't
	// try to use JobManageNetworking with them.
	//
	// In case of MAAS and Joyent JobManageNetworking is not added
	// to ensure the non-intrusive start of a networker like above
	// for the manual provisioning. See this related joyent bug
	// http://pad.lv/1401423
	if envVersion.Compare(version.MustParse("1.21-alpha2")) >= 0 &&
		config.Type() != provider.MAAS &&
		config.Type() != provider.Joyent {
		jobs = append(jobs, multiwatcher.JobManageNetworking)
	}

	machineParams := params.AddMachineParams{
		Placement:   c.Placement,
		Series:      c.Series,
		Constraints: c.Constraints,
		Jobs:        jobs,
		Disks:       c.Disks,
	}
	machines := make([]params.AddMachineParams, c.NumMachines)
	for i := 0; i < c.NumMachines; i++ {
		machines[i] = machineParams
	}

	var results []params.AddMachinesResult
	// If storage is specified, we attempt to use a new API on the service facade.
	if len(c.Disks) > 0 {
		results, err = machineManager.AddMachines(machines)
	} else {
		results, err = client.AddMachines(machines)
		if params.IsCodeNotImplemented(err) {
			if c.Placement != nil {
				containerType, parseErr := instance.ParseContainerType(c.Placement.Scope)
				if parseErr != nil {
					// The user specified a non-container placement directive:
					// return original API not implemented error.
					return err
				}
				machineParams.ContainerType = containerType
				machineParams.ParentId = c.Placement.Directive
				machineParams.Placement = nil
			}
			logger.Infof(
				"AddMachinesWithPlacement not supported by the API server, " +
					"falling back to 1.18 compatibility mode",
			)
			results, err = client.AddMachines1dot18([]params.AddMachineParams{machineParams})
		}
	}
	if params.IsCodeOperationBlocked(err) {
		return block.ProcessBlockedError(err, block.BlockChange)
	}
	if err != nil {
		return errors.Trace(err)
	}

	errs := []error{}
	for _, machineInfo := range results {
		if machineInfo.Error != nil {
			errs = append(errs, machineInfo.Error)
			continue
		}
		machineId := machineInfo.Machine

		if names.IsContainerMachine(machineId) {
			ctx.Infof("created container %v", machineId)
		} else {
			ctx.Infof("created machine %v", machineId)
		}
	}
	if len(errs) == 1 {
		fmt.Fprintf(ctx.Stderr, "failed to create 1 machine\n")
		return errs[0]
	}
	if len(errs) > 1 {
		fmt.Fprintf(ctx.Stderr, "failed to create %d machines\n", len(errs))
		returnErr := []string{}
		for _, e := range errs {
			returnErr = append(returnErr, e.Error())
		}
		return errors.New(strings.Join(returnErr, ", "))
	}
	return nil
}
Exemple #10
0
func (c *AddMachineCommand) Run(ctx *cmd.Context) error {
	client, err := getAddMachineAPI(c)
	if err != nil {
		return errors.Trace(err)
	}
	defer client.Close()

	if c.Placement != nil && c.Placement.Scope == "ssh" {

		var config *config.Config
		if defaultStore, err := configstore.Default(); err != nil {
			return err
		} else if config, err = c.Config(defaultStore); err != nil {
			return err
		}

		args := manual.ProvisionMachineArgs{
			Host:   c.Placement.Directive,
			Client: client,
			Stdin:  ctx.Stdin,
			Stdout: ctx.Stdout,
			Stderr: ctx.Stderr,
			UpdateBehavior: &params.UpdateBehavior{
				config.EnableOSRefreshUpdate(),
				config.EnableOSUpgrade(),
			},
		}
		machineId, err := manualProvisioner(args)
		if err == nil {
			ctx.Infof("created machine %v", machineId)
		}
		return err
	}

	if c.Placement != nil && c.Placement.Scope == "env-uuid" {
		c.Placement.Scope = client.EnvironmentUUID()
	}

	if c.Placement != nil && c.Placement.Scope == instance.MachineScope {
		// It does not make sense to add-machine <id>.
		return fmt.Errorf("machine-id cannot be specified when adding machines")
	}

	machineParams := params.AddMachineParams{
		Placement:   c.Placement,
		Series:      c.Series,
		Constraints: c.Constraints,
		Jobs:        []params.MachineJob{params.JobHostUnits},
	}
	machines := make([]params.AddMachineParams, c.NumMachines)
	for i := 0; i < c.NumMachines; i++ {
		machines[i] = machineParams
	}

	results, err := client.AddMachines(machines)
	if err != nil {
		return errors.Trace(err)
	}

	errs := []error{}
	for _, machineInfo := range results {
		if machineInfo.Error != nil {
			errs = append(errs, machineInfo.Error)
			continue
		}
		machineId := machineInfo.Machine

		if names.IsContainerMachine(machineId) {
			ctx.Infof("created container %v", machineId)
		} else {
			ctx.Infof("created machine %v", machineId)
		}
	}
	if len(errs) == 1 {
		fmt.Fprintf(ctx.Stderr, "failed to create 1 machine\n")
		return errs[0]
	}
	if len(errs) > 1 {
		fmt.Fprintf(ctx.Stderr, "failed to create %d machines\n", len(errs))
		returnErr := []string{}
		for _, e := range errs {
			returnErr = append(returnErr, fmt.Sprintf("%s", e))
		}
		return errors.New(strings.Join(returnErr, ", "))
	}
	return nil
}
Exemple #11
0
func (c *AddMachineCommand) Run(ctx *cmd.Context) error {
	if c.Placement != nil && c.Placement.Scope == "ssh" {
		args := manual.ProvisionMachineArgs{
			Host:    c.Placement.Directive,
			EnvName: c.EnvName,
			Stdin:   ctx.Stdin,
			Stdout:  ctx.Stdout,
			Stderr:  ctx.Stderr,
		}
		_, err := manual.ProvisionMachine(args)
		return err
	}

	client, err := juju.NewAPIClientFromName(c.EnvName)
	if err != nil {
		return err
	}
	defer client.Close()

	if c.Placement != nil && c.Placement.Scope == instance.MachineScope {
		// It does not make sense to add-machine <id>.
		return fmt.Errorf("machine-id cannot be specified when adding machines")
	}

	machineParams := params.AddMachineParams{
		Placement:   c.Placement,
		Series:      c.Series,
		Constraints: c.Constraints,
		Jobs:        []params.MachineJob{params.JobHostUnits},
	}
	results, err := client.AddMachines([]params.AddMachineParams{machineParams})
	if params.IsCodeNotImplemented(err) {
		if c.Placement != nil {
			containerType, parseErr := instance.ParseContainerType(c.Placement.Scope)
			if parseErr != nil {
				// The user specified a non-container placement directive:
				// return original API not implemented error.
				return err
			}
			machineParams.ContainerType = containerType
			machineParams.ParentId = c.Placement.Directive
			machineParams.Placement = nil
		}
		logger.Infof(
			"AddMachinesWithPlacement not supported by the API server, " +
				"falling back to 1.18 compatibility mode",
		)
		results, err = client.AddMachines1dot18([]params.AddMachineParams{machineParams})
	}
	if err != nil {
		return err
	}

	// Currently, only one machine is added, but in future there may be several added in one call.
	machineInfo := results[0]
	if machineInfo.Error != nil {
		return machineInfo.Error
	}
	machineId := machineInfo.Machine

	if names.IsContainerMachine(machineId) {
		ctx.Infof("created container %v", machineId)
	} else {
		ctx.Infof("created machine %v", machineId)
	}
	return nil
}
Exemple #12
0
func (c *AddMachineCommand) Run(ctx *cmd.Context) error {
	if c.Placement != nil && c.Placement.Scope == "ssh" {
		args := manual.ProvisionMachineArgs{
			Host:    c.Placement.Directive,
			EnvName: c.EnvName,
			Stdin:   ctx.Stdin,
			Stdout:  ctx.Stdout,
			Stderr:  ctx.Stderr,
		}
		_, err := manual.ProvisionMachine(args)
		return err
	}

	client, err := getAddMachineAPI(c.EnvName)
	if err != nil {
		return err
	}
	defer client.Close()

	if c.Placement != nil && c.Placement.Scope == instance.MachineScope {
		// It does not make sense to add-machine <id>.
		return fmt.Errorf("machine-id cannot be specified when adding machines")
	}

	machineParams := params.AddMachineParams{
		Placement:   c.Placement,
		Series:      c.Series,
		Constraints: c.Constraints,
		Jobs:        []params.MachineJob{params.JobHostUnits},
	}
	machines := make([]params.AddMachineParams, c.NumMachines)
	for i := 0; i < c.NumMachines; i++ {
		machines[i] = machineParams
	}

	results, err := client.AddMachines(machines)
	if params.IsCodeNotImplemented(err) {
		if c.Placement != nil {
			containerType, parseErr := instance.ParseContainerType(c.Placement.Scope)
			if parseErr != nil {
				// The user specified a non-container placement directive:
				// return original API not implemented error.
				return err
			}
			machineParams.ContainerType = containerType
			machineParams.ParentId = c.Placement.Directive
			machineParams.Placement = nil
		}
		logger.Infof(
			"AddMachinesWithPlacement not supported by the API server, " +
				"falling back to 1.18 compatibility mode",
		)
		results, err = client.AddMachines1dot18([]params.AddMachineParams{machineParams})
	}
	if err != nil {
		return err
	}

	errs := []error{}
	for _, machineInfo := range results {
		if machineInfo.Error != nil {
			errs = append(errs, machineInfo.Error)
			continue
		}
		machineId := machineInfo.Machine

		if names.IsContainerMachine(machineId) {
			ctx.Infof("created container %v", machineId)
		} else {
			ctx.Infof("created machine %v", machineId)
		}
	}
	if len(errs) == 1 {
		fmt.Fprintf(ctx.Stderr, "failed to create 1 machine\n")
		return errs[0]
	}
	if len(errs) > 1 {
		fmt.Fprintf(ctx.Stderr, "failed to create %d machines\n", len(errs))
		returnErr := []string{}
		for _, e := range errs {
			returnErr = append(returnErr, fmt.Sprintf("%s", e))
		}
		return errors.New(strings.Join(returnErr, ", "))
	}
	return nil
}