示例#1
0
文件: bootstrap.go 项目: kapilt/juju
func ConfigureMachine(ctx environs.BootstrapContext, client ssh.Client, host string, machineConfig *cloudinit.MachineConfig) error {
	// Bootstrap is synchronous, and will spawn a subprocess
	// to complete the procedure. If the user hits Ctrl-C,
	// SIGINT is sent to the foreground process attached to
	// the terminal, which will be the ssh subprocess at this
	// point. For that reason, we do not call StopInterruptNotify
	// until this function completes.
	cloudcfg := coreCloudinit.New()
	cloudcfg.SetAptUpdate(machineConfig.EnableOSRefreshUpdate)
	cloudcfg.SetAptUpgrade(machineConfig.EnableOSUpgrade)

	udata, err := cloudinit.NewUserdataConfig(machineConfig, cloudcfg)
	if err != nil {
		return err
	}
	if err := udata.ConfigureJuju(); err != nil {
		return err
	}
	configScript, err := sshinit.ConfigureScript(cloudcfg)
	if err != nil {
		return err
	}
	script := shell.DumpFileOnErrorScript(machineConfig.CloudInitOutputLog) + configScript
	return sshinit.RunConfigureScript(script, sshinit.ConfigureParams{
		Host:           "ubuntu@" + host,
		Client:         client,
		Config:         cloudcfg,
		ProgressWriter: ctx.GetStderr(),
	})
}
示例#2
0
// handleBootstrapError cleans up after a failed bootstrap.
func handleBootstrapError(err error, ctx environs.BootstrapContext, inst instance.Instance, env environs.Environ) {
	if err == nil {
		return
	}

	logger.Errorf("bootstrap failed: %v", err)
	ch := make(chan os.Signal, 1)
	ctx.InterruptNotify(ch)
	defer ctx.StopInterruptNotify(ch)
	defer close(ch)
	go func() {
		for _ = range ch {
			fmt.Fprintln(ctx.GetStderr(), "Cleaning up failed bootstrap")
		}
	}()

	if inst != nil {
		fmt.Fprintln(ctx.GetStderr(), "Stopping instance...")
		if stoperr := env.StopInstances(inst.Id()); stoperr != nil {
			logger.Errorf("cannot stop failed bootstrap instance %q: %v", inst.Id(), stoperr)
		} else {
			// set to nil so we know we can safely delete the state file
			inst = nil
		}
	}
	// We only delete the bootstrap state file if either we didn't
	// start an instance, or we managed to cleanly stop it.
	if inst == nil {
		if rmerr := bootstrap.DeleteStateFile(env.Storage()); rmerr != nil {
			logger.Errorf("cannot delete bootstrap state file: %v", rmerr)
		}
	}
}
示例#3
0
文件: bootstrap.go 项目: kapilt/juju
// Bootstrap is a common implementation of the Bootstrap method defined on
// environs.Environ; we strongly recommend that this implementation be used
// when writing a new provider.
func Bootstrap(ctx environs.BootstrapContext, env environs.Environ, args environs.BootstrapParams) (arch, series string, _ environs.BootstrapFinalizer, err error) {
	// TODO make safe in the case of racing Bootstraps
	// If two Bootstraps are called concurrently, there's
	// no way to make sure that only one succeeds.

	var inst instance.Instance
	defer func() { handleBootstrapError(err, ctx, inst, env) }()

	// First thing, ensure we have tools otherwise there's no point.
	series = config.PreferredSeries(env.Config())
	availableTools, err := args.AvailableTools.Match(coretools.Filter{Series: series})
	if err != nil {
		return "", "", nil, err
	}

	// Get the bootstrap SSH client. Do this early, so we know
	// not to bother with any of the below if we can't finish the job.
	client := ssh.DefaultClient
	if client == nil {
		// This should never happen: if we don't have OpenSSH, then
		// go.crypto/ssh should be used with an auto-generated key.
		return "", "", nil, fmt.Errorf("no SSH client available")
	}

	machineConfig, err := environs.NewBootstrapMachineConfig(args.Constraints, series)
	if err != nil {
		return "", "", nil, err
	}
	machineConfig.EnableOSRefreshUpdate = env.Config().EnableOSRefreshUpdate()
	machineConfig.EnableOSUpgrade = env.Config().EnableOSUpgrade()

	fmt.Fprintln(ctx.GetStderr(), "Launching instance")
	inst, hw, _, err := env.StartInstance(environs.StartInstanceParams{
		Constraints:   args.Constraints,
		Tools:         availableTools,
		MachineConfig: machineConfig,
		Placement:     args.Placement,
	})
	if err != nil {
		return "", "", nil, fmt.Errorf("cannot start bootstrap instance: %v", err)
	}
	fmt.Fprintf(ctx.GetStderr(), " - %s\n", inst.Id())

	err = SaveState(env.Storage(), &BootstrapState{
		StateInstances: []instance.Id{inst.Id()},
	})
	if err != nil {
		return "", "", nil, fmt.Errorf("cannot save state: %v", err)
	}
	finalize := func(ctx environs.BootstrapContext, mcfg *cloudinit.MachineConfig) error {
		mcfg.InstanceId = inst.Id()
		mcfg.HardwareCharacteristics = hw
		if err := environs.FinishMachineConfig(mcfg, env.Config()); err != nil {
			return err
		}
		return FinishBootstrap(ctx, client, inst, mcfg)
	}
	return *hw.Arch, series, finalize, nil
}
示例#4
0
// waitSSH waits for the instance to be assigned a routable
// address, then waits until we can connect to it via SSH.
//
// waitSSH attempts on all addresses returned by the instance
// in parallel; the first succeeding one wins. We ensure that
// private addresses are for the correct machine by checking
// the presence of a file on the machine that contains the
// machine's nonce. The "checkHostScript" is a bash script
// that performs this file check.
func waitSSH(ctx environs.BootstrapContext, interrupted <-chan os.Signal, client ssh.Client, checkHostScript string, inst addresser, timeout config.SSHTimeoutOpts) (addr string, err error) {
	globalTimeout := time.After(timeout.Timeout)
	pollAddresses := time.NewTimer(0)

	// checker checks each address in a loop, in parallel,
	// until one succeeds, the global timeout is reached,
	// or the tomb is killed.
	checker := parallelHostChecker{
		Try:             parallel.NewTry(0, nil),
		client:          client,
		stderr:          ctx.GetStderr(),
		active:          make(map[network.Address]chan struct{}),
		checkDelay:      timeout.RetryDelay,
		checkHostScript: checkHostScript,
	}
	defer checker.wg.Wait()
	defer checker.Kill()

	fmt.Fprintln(ctx.GetStderr(), "Waiting for address")
	for {
		select {
		case <-pollAddresses.C:
			pollAddresses.Reset(timeout.AddressesDelay)
			if err := inst.Refresh(); err != nil {
				return "", fmt.Errorf("refreshing addresses: %v", err)
			}
			addresses, err := inst.Addresses()
			if err != nil {
				return "", fmt.Errorf("getting addresses: %v", err)
			}
			checker.UpdateAddresses(addresses)
		case <-globalTimeout:
			checker.Close()
			lastErr := checker.Wait()
			format := "waited for %v "
			args := []interface{}{timeout.Timeout}
			if len(checker.active) == 0 {
				format += "without getting any addresses"
			} else {
				format += "without being able to connect"
			}
			if lastErr != nil && lastErr != parallel.ErrStopped {
				format += ": %v"
				args = append(args, lastErr)
			}
			return "", fmt.Errorf(format, args...)
		case <-interrupted:
			return "", fmt.Errorf("interrupted")
		case <-checker.Dead():
			result, err := checker.Result()
			if err != nil {
				return "", err
			}
			return result.(*hostChecker).addr.Value, nil
		}
	}
}
示例#5
0
// Bootstrap is a common implementation of the Bootstrap method defined on
// environs.Environ; we strongly recommend that this implementation be used
// when writing a new provider.
func Bootstrap(ctx environs.BootstrapContext, env environs.Environ, args environs.BootstrapParams) (err error) {
	// TODO make safe in the case of racing Bootstraps
	// If two Bootstraps are called concurrently, there's
	// no way to make sure that only one succeeds.

	var inst instance.Instance
	defer func() { handleBootstrapError(err, ctx, inst, env) }()

	network.InitializeFromConfig(env.Config())

	// First thing, ensure we have tools otherwise there's no point.
	selectedTools, err := EnsureBootstrapTools(ctx, env, config.PreferredSeries(env.Config()), args.Constraints.Arch)
	if err != nil {
		return err
	}

	// Get the bootstrap SSH client. Do this early, so we know
	// not to bother with any of the below if we can't finish the job.
	client := ssh.DefaultClient
	if client == nil {
		// This should never happen: if we don't have OpenSSH, then
		// go.crypto/ssh should be used with an auto-generated key.
		return fmt.Errorf("no SSH client available")
	}

	privateKey, err := GenerateSystemSSHKey(env)
	if err != nil {
		return err
	}
	machineConfig := environs.NewBootstrapMachineConfig(privateKey)

	fmt.Fprintln(ctx.GetStderr(), "Launching instance")
	inst, hw, _, err := env.StartInstance(environs.StartInstanceParams{
		Constraints:   args.Constraints,
		Tools:         selectedTools,
		MachineConfig: machineConfig,
		Placement:     args.Placement,
	})
	if err != nil {
		return fmt.Errorf("cannot start bootstrap instance: %v", err)
	}
	fmt.Fprintf(ctx.GetStderr(), " - %s\n", inst.Id())
	machineConfig.InstanceId = inst.Id()
	machineConfig.HardwareCharacteristics = hw

	err = bootstrap.SaveState(
		env.Storage(),
		&bootstrap.BootstrapState{
			StateInstances: []instance.Id{inst.Id()},
		})
	if err != nil {
		return fmt.Errorf("cannot save state: %v", err)
	}
	return FinishBootstrap(ctx, client, inst, machineConfig)
}
示例#6
0
文件: bootstrap.go 项目: kat-co/juju
// BootstrapInstance creates a new instance with the series of its choice,
// constrained to those of the available tools, and
// returns the instance result, series, and a function that
// must be called to finalize the bootstrap process by transferring
// the tools and installing the initial Juju controller.
// This method is called by Bootstrap above, which implements environs.Bootstrap, but
// is also exported so that providers can manipulate the started instance.
func BootstrapInstance(ctx environs.BootstrapContext, env environs.Environ, args environs.BootstrapParams,
) (_ *environs.StartInstanceResult, selectedSeries string, _ environs.BootstrapFinalizer, err error) {
	// TODO make safe in the case of racing Bootstraps
	// If two Bootstraps are called concurrently, there's
	// no way to make sure that only one succeeds.

	// First thing, ensure we have tools otherwise there's no point.
	if args.BootstrapSeries != "" {
		selectedSeries = args.BootstrapSeries
	} else {
		selectedSeries = config.PreferredSeries(env.Config())
	}
	availableTools, err := args.AvailableTools.Match(coretools.Filter{
		Series: selectedSeries,
	})
	if err != nil {
		return nil, "", nil, err
	}

	// Filter image metadata to the selected series.
	var imageMetadata []*imagemetadata.ImageMetadata
	seriesVersion, err := series.SeriesVersion(selectedSeries)
	if err != nil {
		return nil, "", nil, errors.Trace(err)
	}
	for _, m := range args.ImageMetadata {
		if m.Version != seriesVersion {
			continue
		}
		imageMetadata = append(imageMetadata, m)
	}

	// Get the bootstrap SSH client. Do this early, so we know
	// not to bother with any of the below if we can't finish the job.
	client := ssh.DefaultClient
	if client == nil {
		// This should never happen: if we don't have OpenSSH, then
		// go.crypto/ssh should be used with an auto-generated key.
		return nil, "", nil, fmt.Errorf("no SSH client available")
	}

	publicKey, err := simplestreams.UserPublicSigningKey()
	if err != nil {
		return nil, "", nil, err
	}
	envCfg := env.Config()
	instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(
		args.ControllerConfig, args.BootstrapConstraints, args.ModelConstraints, selectedSeries, publicKey,
	)
	if err != nil {
		return nil, "", nil, err
	}
	instanceConfig.EnableOSRefreshUpdate = env.Config().EnableOSRefreshUpdate()
	instanceConfig.EnableOSUpgrade = env.Config().EnableOSUpgrade()

	instanceConfig.Tags = instancecfg.InstanceTags(envCfg.UUID(), args.ControllerConfig.ControllerUUID(), envCfg, instanceConfig.Jobs)
	maybeSetBridge := func(icfg *instancecfg.InstanceConfig) {
		// If we need to override the default bridge name, do it now. When
		// args.ContainerBridgeName is empty, the default names for LXC
		// (lxcbr0) and KVM (virbr0) will be used.
		if args.ContainerBridgeName != "" {
			logger.Debugf("using %q as network bridge for all container types", args.ContainerBridgeName)
			if icfg.AgentEnvironment == nil {
				icfg.AgentEnvironment = make(map[string]string)
			}
			icfg.AgentEnvironment[agent.LxcBridge] = args.ContainerBridgeName
		}
	}
	maybeSetBridge(instanceConfig)

	cloudRegion := args.CloudName
	if args.CloudRegion != "" {
		cloudRegion += "/" + args.CloudRegion
	}
	fmt.Fprintf(ctx.GetStderr(), "Launching controller instance(s) on %s...\n", cloudRegion)
	// Print instance status reports status changes during provisioning.
	// Note the carriage returns, meaning subsequent prints are to the same
	// line of stderr, not a new line.
	instanceStatus := func(settableStatus status.Status, info string, data map[string]interface{}) error {
		// The data arg is not expected to be used in this case, but
		// print it, rather than ignore it, if we get something.
		dataString := ""
		if len(data) > 0 {
			dataString = fmt.Sprintf(" %v", data)
		}
		fmt.Fprintf(ctx.GetStderr(), " - %s%s\r", info, dataString)
		return nil
	}
	// Likely used after the final instanceStatus call to white-out the
	// current stderr line before the next use, removing any residual status
	// reporting output.
	statusCleanup := func(info string) error {
		// The leading spaces account for the leading characters
		// emitted by instanceStatus above.
		fmt.Fprintf(ctx.GetStderr(), "   %s\r", info)
		return nil
	}
	result, err := env.StartInstance(environs.StartInstanceParams{
		ControllerUUID:  args.ControllerConfig.ControllerUUID(),
		Constraints:     args.BootstrapConstraints,
		Tools:           availableTools,
		InstanceConfig:  instanceConfig,
		Placement:       args.Placement,
		ImageMetadata:   imageMetadata,
		StatusCallback:  instanceStatus,
		CleanupCallback: statusCleanup,
	})
	if err != nil {
		return nil, "", nil, errors.Annotate(err, "cannot start bootstrap instance")
	}
	// We need some padding below to overwrite any previous messages. We'll use a width of 40.
	msg := fmt.Sprintf(" - %s", result.Instance.Id())
	if len(msg) < 40 {
		padding := make([]string, 40-len(msg))
		msg += strings.Join(padding, " ")
	}
	fmt.Fprintln(ctx.GetStderr(), msg)

	finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig, opts environs.BootstrapDialOpts) error {
		icfg.Bootstrap.BootstrapMachineInstanceId = result.Instance.Id()
		icfg.Bootstrap.BootstrapMachineHardwareCharacteristics = result.Hardware
		envConfig := env.Config()
		if result.Config != nil {
			updated, err := envConfig.Apply(result.Config.UnknownAttrs())
			if err != nil {
				return errors.Trace(err)
			}
			envConfig = updated
		}
		if err := instancecfg.FinishInstanceConfig(icfg, envConfig); err != nil {
			return err
		}
		maybeSetBridge(icfg)
		return FinishBootstrap(ctx, client, env, result.Instance, icfg, opts)
	}
	return result, selectedSeries, finalize, nil
}
示例#7
0
// BootstrapInstance creates a new instance with the series and architecture
// of its choice, constrained to those of the available tools, and
// returns the instance result, series, and a function that
// must be called to finalize the bootstrap process by transferring
// the tools and installing the initial Juju state server.
// This method is called by Bootstrap above, which implements environs.Bootstrap, but
// is also exported so that providers can manipulate the started instance.
func BootstrapInstance(ctx environs.BootstrapContext, env environs.Environ, args environs.BootstrapParams,
) (_ *environs.StartInstanceResult, series string, _ environs.BootstrapFinalizer, err error) {
	// TODO make safe in the case of racing Bootstraps
	// If two Bootstraps are called concurrently, there's
	// no way to make sure that only one succeeds.

	// First thing, ensure we have tools otherwise there's no point.
	series = config.PreferredSeries(env.Config())
	availableTools, err := args.AvailableTools.Match(coretools.Filter{Series: series})
	if err != nil {
		return nil, "", nil, err
	}

	// Get the bootstrap SSH client. Do this early, so we know
	// not to bother with any of the below if we can't finish the job.
	client := ssh.DefaultClient
	if client == nil {
		// This should never happen: if we don't have OpenSSH, then
		// go.crypto/ssh should be used with an auto-generated key.
		return nil, "", nil, fmt.Errorf("no SSH client available")
	}

	instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(args.Constraints, series)
	if err != nil {
		return nil, "", nil, err
	}
	instanceConfig.EnableOSRefreshUpdate = env.Config().EnableOSRefreshUpdate()
	instanceConfig.EnableOSUpgrade = env.Config().EnableOSUpgrade()
	instanceConfig.Tags = instancecfg.InstanceTags(env.Config(), instanceConfig.Jobs)
	maybeSetBridge := func(icfg *instancecfg.InstanceConfig) {
		// If we need to override the default bridge name, do it now. When
		// args.ContainerBridgeName is empty, the default names for LXC
		// (lxcbr0) and KVM (virbr0) will be used.
		if args.ContainerBridgeName != "" {
			logger.Debugf("using %q as network bridge for all container types", args.ContainerBridgeName)
			if icfg.AgentEnvironment == nil {
				icfg.AgentEnvironment = make(map[string]string)
			}
			icfg.AgentEnvironment[agent.LxcBridge] = args.ContainerBridgeName
		}
	}
	maybeSetBridge(instanceConfig)

	fmt.Fprintln(ctx.GetStderr(), "Launching instance")
	result, err := env.StartInstance(environs.StartInstanceParams{
		Constraints:    args.Constraints,
		Tools:          availableTools,
		InstanceConfig: instanceConfig,
		Placement:      args.Placement,
	})
	if err != nil {
		return nil, "", nil, errors.Annotate(err, "cannot start bootstrap instance")
	}
	fmt.Fprintf(ctx.GetStderr(), " - %s\n", result.Instance.Id())

	finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig) error {
		icfg.InstanceId = result.Instance.Id()
		icfg.HardwareCharacteristics = result.Hardware
		if err := instancecfg.FinishInstanceConfig(icfg, env.Config()); err != nil {
			return err
		}
		maybeSetBridge(icfg)
		return FinishBootstrap(ctx, client, result.Instance, icfg)
	}
	return result, series, finalize, nil
}
示例#8
0
// BootstrapInstance creates a new instance with the series and architecture
// of its choice, constrained to those of the available tools, and
// returns the instance result, series, and a function that
// must be called to finalize the bootstrap process by transferring
// the tools and installing the initial Juju controller.
// This method is called by Bootstrap above, which implements environs.Bootstrap, but
// is also exported so that providers can manipulate the started instance.
func BootstrapInstance(ctx environs.BootstrapContext, env environs.Environ, args environs.BootstrapParams,
) (_ *environs.StartInstanceResult, selectedSeries string, _ environs.BootstrapFinalizer, err error) {
	// TODO make safe in the case of racing Bootstraps
	// If two Bootstraps are called concurrently, there's
	// no way to make sure that only one succeeds.

	// First thing, ensure we have tools otherwise there's no point.
	if args.BootstrapSeries != "" {
		selectedSeries = args.BootstrapSeries
	} else {
		selectedSeries = config.PreferredSeries(env.Config())
	}
	availableTools, err := args.AvailableTools.Match(coretools.Filter{
		Series: selectedSeries,
	})
	if err != nil {
		return nil, "", nil, err
	}

	// Filter image metadata to the selected series.
	var imageMetadata []*imagemetadata.ImageMetadata
	seriesVersion, err := series.SeriesVersion(selectedSeries)
	if err != nil {
		return nil, "", nil, errors.Trace(err)
	}
	for _, m := range args.ImageMetadata {
		if m.Version != seriesVersion {
			continue
		}
		imageMetadata = append(imageMetadata, m)
	}

	// Get the bootstrap SSH client. Do this early, so we know
	// not to bother with any of the below if we can't finish the job.
	client := ssh.DefaultClient
	if client == nil {
		// This should never happen: if we don't have OpenSSH, then
		// go.crypto/ssh should be used with an auto-generated key.
		return nil, "", nil, fmt.Errorf("no SSH client available")
	}

	publicKey, err := simplestreams.UserPublicSigningKey()
	if err != nil {
		return nil, "", nil, err
	}
	instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(
		args.BootstrapConstraints, args.ModelConstraints, selectedSeries, publicKey,
	)
	if err != nil {
		return nil, "", nil, err
	}
	instanceConfig.EnableOSRefreshUpdate = env.Config().EnableOSRefreshUpdate()
	instanceConfig.EnableOSUpgrade = env.Config().EnableOSUpgrade()
	instanceConfig.Tags = instancecfg.InstanceTags(env.Config(), instanceConfig.Jobs)
	maybeSetBridge := func(icfg *instancecfg.InstanceConfig) {
		// If we need to override the default bridge name, do it now. When
		// args.ContainerBridgeName is empty, the default names for LXC
		// (lxcbr0) and KVM (virbr0) will be used.
		if args.ContainerBridgeName != "" {
			logger.Debugf("using %q as network bridge for all container types", args.ContainerBridgeName)
			if icfg.AgentEnvironment == nil {
				icfg.AgentEnvironment = make(map[string]string)
			}
			icfg.AgentEnvironment[agent.LxcBridge] = args.ContainerBridgeName
		}
	}
	maybeSetBridge(instanceConfig)

	fmt.Fprintln(ctx.GetStderr(), "Launching instance")
	instanceStatus := func(settableStatus status.Status, info string, data map[string]interface{}) error {
		fmt.Fprintf(ctx.GetStderr(), "%s      \r", info)
		return nil
	}
	result, err := env.StartInstance(environs.StartInstanceParams{
		Constraints:    args.BootstrapConstraints,
		Tools:          availableTools,
		InstanceConfig: instanceConfig,
		Placement:      args.Placement,
		ImageMetadata:  imageMetadata,
		StatusCallback: instanceStatus,
	})
	if err != nil {
		return nil, "", nil, errors.Annotate(err, "cannot start bootstrap instance")
	}
	fmt.Fprintf(ctx.GetStderr(), " - %s\n", result.Instance.Id())

	finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig) error {
		icfg.InstanceId = result.Instance.Id()
		icfg.HardwareCharacteristics = result.Hardware
		envConfig := env.Config()
		if result.Config != nil {
			updated, err := envConfig.Apply(result.Config.UnknownAttrs())
			if err != nil {
				return errors.Trace(err)
			}
			envConfig = updated
		}
		if err := instancecfg.FinishInstanceConfig(icfg, envConfig); err != nil {
			return err
		}
		maybeSetBridge(icfg)
		return FinishBootstrap(ctx, client, env, result.Instance, icfg)
	}
	return result, selectedSeries, finalize, nil
}