Пример #1
0
// Bootstrap is specified on the Environ interface.
func (e *manualEnviron) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (*environs.BootstrapResult, error) {
	envConfig := e.envConfig()
	host := envConfig.bootstrapHost()
	provisioned, err := manualCheckProvisioned(host)
	if err != nil {
		return nil, errors.Annotate(err, "failed to check provisioned status")
	}
	if provisioned {
		return nil, manual.ErrProvisioned
	}
	hc, series, err := manualDetectSeriesAndHardwareCharacteristics(host)
	if err != nil {
		return nil, err
	}
	finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig) error {
		icfg.InstanceId = BootstrapInstanceId
		icfg.HardwareCharacteristics = &hc
		if err := instancecfg.FinishInstanceConfig(icfg, e.Config()); err != nil {
			return err
		}
		return common.ConfigureMachine(ctx, ssh.DefaultClient, host, icfg)
	}

	result := &environs.BootstrapResult{
		Arch:     *hc.Arch,
		Series:   series,
		Finalize: finalize,
	}
	return result, nil
}
Пример #2
0
func (s *configureSuite) getCloudConfig(c *gc.C, stateServer bool, vers version.Binary) cloudinit.CloudConfig {
	var icfg *instancecfg.InstanceConfig
	var err error
	if stateServer {
		icfg, err = instancecfg.NewBootstrapInstanceConfig(constraints.Value{}, vers.Series)
		c.Assert(err, jc.ErrorIsNil)
		icfg.InstanceId = "instance-id"
		icfg.Jobs = []multiwatcher.MachineJob{multiwatcher.JobManageEnviron, multiwatcher.JobHostUnits}
	} else {
		icfg, err = instancecfg.NewInstanceConfig("0", "ya", imagemetadata.ReleasedStream, vers.Series, true, nil, nil, nil)
		c.Assert(err, jc.ErrorIsNil)
		icfg.Jobs = []multiwatcher.MachineJob{multiwatcher.JobHostUnits}
	}
	icfg.Tools = &tools.Tools{
		Version: vers,
		URL:     "http://testing.invalid/tools.tar.gz",
	}
	environConfig := testConfig(c, stateServer, vers)
	err = instancecfg.FinishInstanceConfig(icfg, environConfig)
	c.Assert(err, jc.ErrorIsNil)
	cloudcfg, err := cloudinit.New(icfg.Series)
	c.Assert(err, jc.ErrorIsNil)
	udata, err := cloudconfig.NewUserdataConfig(icfg, cloudcfg)
	c.Assert(err, jc.ErrorIsNil)
	err = udata.Configure()
	c.Assert(err, jc.ErrorIsNil)
	return cloudcfg
}
Пример #3
0
// StartInstance is specified in the InstanceBroker interface.
func (env *localEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	if args.InstanceConfig.HasNetworks() {
		return nil, fmt.Errorf("starting instances with networks is not supported yet.")
	}
	series := args.Tools.OneSeries()
	logger.Debugf("StartInstance: %q, %s", args.InstanceConfig.MachineId, series)
	args.InstanceConfig.Tools = args.Tools[0]

	args.InstanceConfig.MachineContainerType = env.config.container()
	logger.Debugf("tools: %#v", args.InstanceConfig.Tools)
	if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.config.Config); err != nil {
		return nil, err
	}
	// TODO: evaluate the impact of setting the constraints on the
	// instanceConfig for all machines rather than just state server nodes.
	// This limitation is why the constraints are assigned directly here.
	args.InstanceConfig.Constraints = args.Constraints
	args.InstanceConfig.AgentEnvironment[agent.Namespace] = env.config.namespace()
	inst, hardware, err := createContainer(env, args)
	if err != nil {
		return nil, err
	}
	return &environs.StartInstanceResult{
		Instance: inst,
		Hardware: hardware,
	}, nil
}
Пример #4
0
func (env *environ) finishInstanceConfig(args environs.StartInstanceParams) error {
	// TODO(natefinch): This is only correct so long as the lxd is running on
	// the local machine.  If/when we support a remote lxd environment, we'll
	// need to change this to match the arch of the remote machine.
	tools, err := args.Tools.Match(tools.Filter{Arch: arch.HostArch()})
	if err != nil {
		return errors.Trace(err)
	}
	if len(tools) == 0 {
		return errors.Errorf("No tools available for architecture %q", arch.HostArch())
	}
	if err := args.InstanceConfig.SetTools(tools); err != nil {
		return errors.Trace(err)
	}
	logger.Debugf("tools: %#v", args.InstanceConfig.ToolsList())

	if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.ecfg.Config); err != nil {
		return errors.Trace(err)
	}

	// TODO: evaluate the impact of setting the constraints on the
	// instanceConfig for all machines rather than just controller nodes.
	// This limitation is why the constraints are assigned directly here.
	args.InstanceConfig.Constraints = args.Constraints

	args.InstanceConfig.AgentEnvironment[agent.Namespace] = env.ecfg.namespace()

	return nil
}
Пример #5
0
func (s *CloudInitSuite) TestFinishInstanceConfigNonDefault(c *gc.C) {
	userTag := names.NewLocalUserTag("not-touched")
	attrs := dummySampleConfig().Merge(testing.Attrs{
		"authorized-keys":           "we-are-the-keys",
		"ssl-hostname-verification": false,
	})
	cfg, err := config.New(config.NoDefaults, attrs)
	c.Assert(err, jc.ErrorIsNil)
	icfg := &instancecfg.InstanceConfig{
		MongoInfo: &mongo.MongoInfo{Tag: userTag},
		APIInfo:   &api.Info{Tag: userTag},
	}
	err = instancecfg.FinishInstanceConfig(icfg, cfg)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(icfg, jc.DeepEquals, &instancecfg.InstanceConfig{
		AuthorizedKeys: "we-are-the-keys",
		AgentEnvironment: map[string]string{
			agent.ProviderType:  "dummy",
			agent.ContainerType: "",
		},
		MongoInfo: &mongo.MongoInfo{Tag: userTag},
		APIInfo:   &api.Info{Tag: userTag},
		DisableSSLHostnameVerification: true,
		PreferIPv6:                     true,
		EnableOSRefreshUpdate:          true,
		EnableOSUpgrade:                true,
	})
}
Пример #6
0
func createContainer(c *gc.C, manager container.Manager, machineId string) instance.Instance {
	machineNonce := "fake-nonce"
	stateInfo := jujutesting.FakeStateInfo(machineId)
	apiInfo := jujutesting.FakeAPIInfo(machineId)
	instanceConfig, err := instancecfg.NewInstanceConfig(machineId, machineNonce, imagemetadata.ReleasedStream, "quantal", "", true, stateInfo, apiInfo)
	c.Assert(err, jc.ErrorIsNil)
	network := container.BridgeNetworkConfig("virbr0", 0, nil)

	err = instanceConfig.SetTools(tools.List{
		&tools.Tools{
			Version: version.MustParseBinary("2.3.4-foo-bar"),
			URL:     "http://tools.testing.invalid/2.3.4-foo-bar.tgz",
		},
	})
	c.Assert(err, jc.ErrorIsNil)
	environConfig := dummyConfig(c)
	err = instancecfg.FinishInstanceConfig(instanceConfig, environConfig)
	c.Assert(err, jc.ErrorIsNil)
	callback := func(settableStatus status.Status, info string, data map[string]interface{}) error { return nil }
	inst, hardware, err := manager.CreateContainer(instanceConfig, "precise", network, nil, callback)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(hardware, gc.NotNil)
	expected := fmt.Sprintf("arch=%s cpu-cores=1 mem=512M root-disk=8192M", arch.HostArch())
	c.Assert(hardware.String(), gc.Equals, expected)
	return inst
}
Пример #7
0
// Bootstrap is part of the Environ interface.
func (e *manualEnviron) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (*environs.BootstrapResult, error) {
	provisioned, err := manualCheckProvisioned(e.host)
	if err != nil {
		return nil, errors.Annotate(err, "failed to check provisioned status")
	}
	if provisioned {
		return nil, manual.ErrProvisioned
	}
	hw, series, err := e.seriesAndHardwareCharacteristics()
	if err != nil {
		return nil, err
	}
	finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig, _ environs.BootstrapDialOpts) error {
		icfg.Bootstrap.BootstrapMachineInstanceId = BootstrapInstanceId
		icfg.Bootstrap.BootstrapMachineHardwareCharacteristics = hw
		if err := instancecfg.FinishInstanceConfig(icfg, e.Config()); err != nil {
			return err
		}
		return common.ConfigureMachine(ctx, ssh.DefaultClient, e.host, icfg)
	}

	result := &environs.BootstrapResult{
		Arch:     *hw.Arch,
		Series:   series,
		Finalize: finalize,
	}
	return result, nil
}
Пример #8
0
func (s *configureSuite) getCloudConfig(c *gc.C, controller bool, vers version.Binary) cloudinit.CloudConfig {
	var icfg *instancecfg.InstanceConfig
	var err error
	modelConfig := testConfig(c, controller, vers)
	if controller {
		icfg, err = instancecfg.NewBootstrapInstanceConfig(
			coretesting.FakeControllerConfig(),
			constraints.Value{}, constraints.Value{},
			vers.Series, "",
		)
		c.Assert(err, jc.ErrorIsNil)
		icfg.APIInfo = &api.Info{
			Password: "******",
			CACert:   coretesting.CACert,
			ModelTag: coretesting.ModelTag,
		}
		icfg.Controller.MongoInfo = &mongo.MongoInfo{
			Password: "******", Info: mongo.Info{CACert: coretesting.CACert},
		}
		icfg.Bootstrap.ControllerModelConfig = modelConfig
		icfg.Bootstrap.BootstrapMachineInstanceId = "instance-id"
		icfg.Bootstrap.HostedModelConfig = map[string]interface{}{
			"name": "hosted-model",
		}
		icfg.Bootstrap.StateServingInfo = params.StateServingInfo{
			Cert:         coretesting.ServerCert,
			PrivateKey:   coretesting.ServerKey,
			CAPrivateKey: coretesting.CAKey,
			StatePort:    123,
			APIPort:      456,
		}
		icfg.Jobs = []multiwatcher.MachineJob{multiwatcher.JobManageModel, multiwatcher.JobHostUnits}
		icfg.Bootstrap.StateServingInfo = params.StateServingInfo{
			Cert:         coretesting.ServerCert,
			PrivateKey:   coretesting.ServerKey,
			CAPrivateKey: coretesting.CAKey,
			StatePort:    123,
			APIPort:      456,
		}
	} else {
		icfg, err = instancecfg.NewInstanceConfig(coretesting.ControllerTag, "0", "ya", imagemetadata.ReleasedStream, vers.Series, nil)
		c.Assert(err, jc.ErrorIsNil)
		icfg.Jobs = []multiwatcher.MachineJob{multiwatcher.JobHostUnits}
	}
	err = icfg.SetTools(tools.List{
		&tools.Tools{
			Version: vers,
			URL:     "http://testing.invalid/tools.tar.gz",
		},
	})
	err = instancecfg.FinishInstanceConfig(icfg, modelConfig)
	c.Assert(err, jc.ErrorIsNil)
	cloudcfg, err := cloudinit.New(icfg.Series)
	c.Assert(err, jc.ErrorIsNil)
	udata, err := cloudconfig.NewUserdataConfig(icfg, cloudcfg)
	c.Assert(err, jc.ErrorIsNil)
	err = udata.Configure()
	c.Assert(err, jc.ErrorIsNil)
	return cloudcfg
}
Пример #9
0
func (s *CloudInitSuite) TestFinishInstanceConfig(c *gc.C) {

	userTag := names.NewLocalUserTag("not-touched")

	expectedMcfg := &instancecfg.InstanceConfig{
		AuthorizedKeys: "we-are-the-keys",
		AgentEnvironment: map[string]string{
			agent.ProviderType:  "dummy",
			agent.ContainerType: "",
		},
		MongoInfo: &mongo.MongoInfo{Tag: userTag},
		APIInfo:   &api.Info{Tag: userTag},
		DisableSSLHostnameVerification: false,
		PreferIPv6:                     true,
		EnableOSRefreshUpdate:          true,
		EnableOSUpgrade:                true,
	}

	cfg, err := config.New(config.NoDefaults, dummySampleConfig().Merge(testing.Attrs{
		"authorized-keys": "we-are-the-keys",
	}))
	c.Assert(err, jc.ErrorIsNil)

	icfg := &instancecfg.InstanceConfig{
		MongoInfo: &mongo.MongoInfo{Tag: userTag},
		APIInfo:   &api.Info{Tag: userTag},
	}
	err = instancecfg.FinishInstanceConfig(icfg, cfg)

	c.Assert(err, jc.ErrorIsNil)
	c.Assert(icfg, jc.DeepEquals, expectedMcfg)

	// Test when updates/upgrades are set to false.
	cfg, err = config.New(config.NoDefaults, dummySampleConfig().Merge(testing.Attrs{
		"authorized-keys":          "we-are-the-keys",
		"enable-os-refresh-update": false,
		"enable-os-upgrade":        false,
	}))
	c.Assert(err, jc.ErrorIsNil)
	err = instancecfg.FinishInstanceConfig(icfg, cfg)
	c.Assert(err, jc.ErrorIsNil)
	expectedMcfg.EnableOSRefreshUpdate = false
	expectedMcfg.EnableOSUpgrade = false
	c.Assert(icfg, jc.DeepEquals, expectedMcfg)
}
Пример #10
0
// finishInstanceConfig updates args.InstanceConfig in place. Setting up
// the API, StateServing, and SSHkeys information.
func (env *environ) finishInstanceConfig(args environs.StartInstanceParams, spec *instances.InstanceSpec) error {
	envTools, err := args.Tools.Match(tools.Filter{Arch: spec.Image.Arch})
	if err != nil {
		return errors.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches)
	}

	args.InstanceConfig.Tools = envTools[0]
	return instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config())
}
Пример #11
0
// StartInstance asks for a new instance to be created, associated with
// the provided config in machineConfig. The given config describes the juju
// state for the new instance to connect to. The config MachineNonce, which must be
// unique within an environment, is used by juju to protect against the
// consequences of multiple instances being started with the same machine id.
func (env *environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	logger.Infof("sigmaEnviron.StartInstance...")

	if args.InstanceConfig == nil {
		return nil, errors.New("instance configuration is nil")
	}

	if args.InstanceConfig.HasNetworks() {
		return nil, errors.New("starting instances with networks is not supported yet")
	}

	if len(args.Tools) == 0 {
		return nil, errors.New("tools not found")
	}

	img, err := findInstanceImage(args.ImageMetadata)
	if err != nil {
		return nil, err
	}

	tools, err := args.Tools.Match(tools.Filter{Arch: img.Arch})
	if err != nil {
		return nil, errors.Errorf("chosen architecture %v not present in %v", img.Arch, args.Tools.Arches())
	}

	args.InstanceConfig.Tools = tools[0]
	if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config()); err != nil {
		return nil, err
	}
	userData, err := providerinit.ComposeUserData(args.InstanceConfig, nil, CloudSigmaRenderer{})
	if err != nil {
		return nil, errors.Annotate(err, "cannot make user data")
	}

	logger.Debugf("cloudsigma user data; %d bytes", len(userData))

	client := env.client
	server, rootdrive, arch, err := client.newInstance(args, img, userData)
	if err != nil {
		return nil, errors.Errorf("failed start instance: %v", err)
	}

	inst := &sigmaInstance{server: server}

	// prepare hardware characteristics
	hwch, err := inst.hardware(arch, rootdrive.Size())
	if err != nil {
		return nil, err
	}

	logger.Debugf("hardware: %v", hwch)
	return &environs.StartInstanceResult{
		Instance: inst,
		Hardware: hwch,
	}, nil
}
Пример #12
0
func (*cloudinitSuite) createInstanceConfig(c *gc.C, environConfig *config.Config) *instancecfg.InstanceConfig {
	machineId := "42"
	machineNonce := "fake-nonce"
	stateInfo := jujutesting.FakeStateInfo(machineId)
	apiInfo := jujutesting.FakeAPIInfo(machineId)
	instanceConfig, err := instancecfg.NewInstanceConfig(machineId, machineNonce, imagemetadata.ReleasedStream, "quantal", "", true, nil, stateInfo, apiInfo)
	c.Assert(err, jc.ErrorIsNil)
	instanceConfig.Tools = &tools.Tools{
		Version: version.MustParseBinary("2.3.4-quantal-amd64"),
		URL:     "http://tools.testing.invalid/2.3.4-quantal-amd64.tgz",
	}
	err = instancecfg.FinishInstanceConfig(instanceConfig, environConfig)
	c.Assert(err, jc.ErrorIsNil)
	return instanceConfig
}
Пример #13
0
func (env *environ) finishInstanceConfig(args environs.StartInstanceParams) error {
	args.InstanceConfig.Tools = args.Tools[0]
	logger.Debugf("tools: %#v", args.InstanceConfig.Tools)

	if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.ecfg.Config); err != nil {
		return errors.Trace(err)
	}

	// TODO: evaluate the impact of setting the constraints on the
	// instanceConfig for all machines rather than just state server nodes.
	// This limitation is why the constraints are assigned directly here.
	args.InstanceConfig.Constraints = args.Constraints

	args.InstanceConfig.AgentEnvironment[agent.Namespace] = env.ecfg.namespace()

	return nil
}
Пример #14
0
func (env *environ) finishInstanceConfig(args environs.StartInstanceParams) error {
	// TODO(natefinch): This is only correct so long as the lxd is running on
	// the local machine.  If/when we support a remote lxd environment, we'll
	// need to change this to match the arch of the remote machine.
	tools, err := args.Tools.Match(tools.Filter{Arch: arch.HostArch()})
	if err != nil {
		return errors.Trace(err)
	}
	if err := args.InstanceConfig.SetTools(tools); err != nil {
		return errors.Trace(err)
	}

	if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.ecfg.Config); err != nil {
		return errors.Trace(err)
	}

	return nil
}
Пример #15
0
func (s *CloudInitSuite) TestFinishBootstrapConfig(c *gc.C) {
	attrs := dummySampleConfig().Merge(testing.Attrs{
		"authorized-keys": "we-are-the-keys",
		"admin-secret":    "lisboan-pork",
		"agent-version":   "1.2.3",
		"state-server":    false,
	})
	cfg, err := config.New(config.NoDefaults, attrs)
	c.Assert(err, jc.ErrorIsNil)
	oldAttrs := cfg.AllAttrs()
	icfg := &instancecfg.InstanceConfig{
		Bootstrap: true,
	}
	err = instancecfg.FinishInstanceConfig(icfg, cfg)
	c.Assert(err, jc.ErrorIsNil)
	c.Check(icfg.AuthorizedKeys, gc.Equals, "we-are-the-keys")
	c.Check(icfg.DisableSSLHostnameVerification, jc.IsFalse)
	password := utils.UserPasswordHash("lisboan-pork", utils.CompatSalt)
	c.Check(icfg.APIInfo, gc.DeepEquals, &api.Info{
		Password: password, CACert: testing.CACert,
		ModelTag: testing.ModelTag,
	})
	c.Check(icfg.MongoInfo, gc.DeepEquals, &mongo.MongoInfo{
		Password: password, Info: mongo.Info{CACert: testing.CACert},
	})
	c.Check(icfg.StateServingInfo.StatePort, gc.Equals, cfg.StatePort())
	c.Check(icfg.StateServingInfo.APIPort, gc.Equals, cfg.APIPort())
	c.Check(icfg.StateServingInfo.CAPrivateKey, gc.Equals, oldAttrs["ca-private-key"])

	oldAttrs["ca-private-key"] = ""
	oldAttrs["admin-secret"] = ""
	c.Check(icfg.Config.AllAttrs(), gc.DeepEquals, oldAttrs)
	srvCertPEM := icfg.StateServingInfo.Cert
	srvKeyPEM := icfg.StateServingInfo.PrivateKey
	_, _, err = cert.ParseCertAndKey(srvCertPEM, srvKeyPEM)
	c.Check(err, jc.ErrorIsNil)

	err = cert.Verify(srvCertPEM, testing.CACert, time.Now())
	c.Assert(err, jc.ErrorIsNil)
	err = cert.Verify(srvCertPEM, testing.CACert, time.Now().AddDate(9, 0, 0))
	c.Assert(err, jc.ErrorIsNil)
	err = cert.Verify(srvCertPEM, testing.CACert, time.Now().AddDate(10, 0, 1))
	c.Assert(err, gc.NotNil)
}
Пример #16
0
func (e *manualEnviron) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (arch, series string, _ environs.BootstrapFinalizer, _ error) {
	// Set "use-sshstorage" to false, so agents know not to use sshstorage.
	cfg, err := e.Config().Apply(map[string]interface{}{"use-sshstorage": false})
	if err != nil {
		return "", "", nil, err
	}
	if err := e.SetConfig(cfg); err != nil {
		return "", "", nil, err
	}
	agentEnv, err := localstorage.StoreConfig(e)
	if err != nil {
		return "", "", nil, err
	}
	envConfig := e.envConfig()
	// TODO(axw) consider how we can use placement to override bootstrap-host.
	host := envConfig.bootstrapHost()
	provisioned, err := manualCheckProvisioned(host)
	if err != nil {
		return "", "", nil, errors.Annotate(err, "failed to check provisioned status")
	}
	if provisioned {
		return "", "", nil, manual.ErrProvisioned
	}
	hc, series, err := manualDetectSeriesAndHardwareCharacteristics(host)
	if err != nil {
		return "", "", nil, err
	}
	finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig) error {
		icfg.InstanceId = BootstrapInstanceId
		icfg.HardwareCharacteristics = &hc
		if err := instancecfg.FinishInstanceConfig(icfg, e.Config()); err != nil {
			return err
		}
		for k, v := range agentEnv {
			icfg.AgentEnvironment[k] = v
		}
		return common.ConfigureMachine(ctx, ssh.DefaultClient, host, icfg)
	}
	return *hc.Arch, series, finalize, nil
}
Пример #17
0
// InstanceConfig returns information from the environment config that
// is needed for machine cloud-init (for non-controllers only). It
// is exposed for testing purposes.
// TODO(rog) fix environs/manual tests so they do not need to call this, or move this elsewhere.
func InstanceConfig(st *state.State, machineId, nonce, dataDir string) (*instancecfg.InstanceConfig, error) {
	environConfig, err := st.ModelConfig()
	if err != nil {
		return nil, err
	}

	// Get the machine so we can get its series and arch.
	// If the Arch is not set in hardware-characteristics,
	// an error is returned.
	machine, err := st.Machine(machineId)
	if err != nil {
		return nil, err
	}
	hc, err := machine.HardwareCharacteristics()
	if err != nil {
		return nil, err
	}
	if hc.Arch == nil {
		return nil, fmt.Errorf("arch is not set for %q", machine.Tag())
	}

	// Find the appropriate tools information.
	agentVersion, ok := environConfig.AgentVersion()
	if !ok {
		return nil, errors.New("no agent version set in model configuration")
	}
	environment, err := st.Model()
	if err != nil {
		return nil, err
	}
	urlGetter := common.NewToolsURLGetter(environment.UUID(), st)
	toolsFinder := common.NewToolsFinder(st, st, urlGetter)
	findToolsResult, err := toolsFinder.FindTools(params.FindToolsParams{
		Number:       agentVersion,
		MajorVersion: -1,
		MinorVersion: -1,
		Series:       machine.Series(),
		Arch:         *hc.Arch,
	})
	if err != nil {
		return nil, err
	}
	if findToolsResult.Error != nil {
		return nil, findToolsResult.Error
	}
	tools := findToolsResult.List[0]

	// Find the API endpoints.
	env, err := environs.New(environConfig)
	if err != nil {
		return nil, err
	}
	apiInfo, err := environs.APIInfo(env)
	if err != nil {
		return nil, err
	}

	auth := authentication.NewAuthenticator(st.MongoConnectionInfo(), apiInfo)
	mongoInfo, apiInfo, err := auth.SetupAuthentication(machine)
	if err != nil {
		return nil, err
	}

	// Find requested networks.
	networks, err := machine.RequestedNetworks()
	if err != nil {
		return nil, err
	}

	// Figure out if secure connections are supported.
	info, err := st.StateServingInfo()
	if err != nil {
		return nil, err
	}
	secureServerConnection := info.CAPrivateKey != ""
	icfg, err := instancecfg.NewInstanceConfig(machineId, nonce, env.Config().ImageStream(), machine.Series(), "",
		secureServerConnection, networks, mongoInfo, apiInfo,
	)
	if err != nil {
		return nil, err
	}
	if dataDir != "" {
		icfg.DataDir = dataDir
	}
	icfg.Tools = tools
	err = instancecfg.FinishInstanceConfig(icfg, environConfig)
	if err != nil {
		return nil, err
	}
	return icfg, nil
}
Пример #18
0
func (env *joyentEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {

	if args.InstanceConfig.HasNetworks() {
		return nil, errors.New("starting instances with networks is not supported yet")
	}

	series := args.Tools.OneSeries()
	arches := args.Tools.Arches()
	spec, err := env.FindInstanceSpec(&instances.InstanceConstraint{
		Region:      env.Ecfg().Region(),
		Series:      series,
		Arches:      arches,
		Constraints: args.Constraints,
	}, args.ImageMetadata)
	if err != nil {
		return nil, err
	}
	tools, err := args.Tools.Match(tools.Filter{Arch: spec.Image.Arch})
	if err != nil {
		return nil, errors.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches)
	}

	args.InstanceConfig.Tools = tools[0]

	if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config()); err != nil {
		return nil, err
	}

	// This is a hack that ensures that instances can communicate over
	// the internal network. Joyent sometimes gives instances
	// different 10.x.x.x/21 networks and adding this route allows
	// them to talk despite this. See:
	// https://bugs.launchpad.net/juju-core/+bug/1401130
	cloudcfg, err := cloudinit.New(args.InstanceConfig.Series)
	if err != nil {
		return nil, errors.Annotate(err, "cannot create cloudinit template")
	}
	ifupScript := `
#!/bin/bash

# These guards help to ensure that this hack only runs if Joyent's
# internal network still works as it does at time of writing.
[ "$IFACE" == "eth1" ] || [ "$IFACE" == "--all" ] || exit 0
/sbin/ip -4 --oneline addr show dev eth1 | fgrep --quiet " inet 10." || exit 0

/sbin/ip route add 10.0.0.0/8 dev eth1
`[1:]
	cloudcfg.AddBootTextFile("/etc/network/if-up.d/joyent", ifupScript, 0755)

	userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, JoyentRenderer{})
	if err != nil {
		return nil, errors.Annotate(err, "cannot make user data")
	}
	logger.Debugf("joyent user data: %d bytes", len(userData))

	var machine *cloudapi.Machine
	machine, err = env.compute.cloudapi.CreateMachine(cloudapi.CreateMachineOpts{
		//Name:	 env.machineFullName(machineConf.MachineId),
		Package:  spec.InstanceType.Name,
		Image:    spec.Image.Id,
		Metadata: map[string]string{"metadata.cloud-init:user-data": string(userData)},
		Tags:     map[string]string{"tag.group": "juju", "tag.env": env.Config().Name()},
	})
	if err != nil {
		return nil, errors.Annotate(err, "cannot create instances")
	}
	machineId := machine.Id

	logger.Infof("provisioning instance %q", machineId)

	machine, err = env.compute.cloudapi.GetMachine(machineId)
	if err != nil {
		return nil, errors.Annotate(err, "cannot start instances")
	}

	// wait for machine to start
	for !strings.EqualFold(machine.State, "running") {
		time.Sleep(1 * time.Second)

		machine, err = env.compute.cloudapi.GetMachine(machineId)
		if err != nil {
			return nil, errors.Annotate(err, "cannot start instances")
		}
	}

	logger.Infof("started instance %q", machineId)

	inst := &joyentInstance{
		machine: machine,
		env:     env,
	}

	if multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...) {
		if err := common.AddStateInstance(env.Storage(), inst.Id()); err != nil {
			logger.Errorf("could not record instance in provider-state: %v", err)
		}
	}

	disk64 := uint64(machine.Disk)
	hc := instance.HardwareCharacteristics{
		Arch:     &spec.Image.Arch,
		Mem:      &spec.InstanceType.Mem,
		CpuCores: &spec.InstanceType.CpuCores,
		CpuPower: spec.InstanceType.CpuPower,
		RootDisk: &disk64,
	}

	return &environs.StartInstanceResult{
		Instance: inst,
		Hardware: &hc,
	}, nil
}
Пример #19
0
// InstanceConfig returns information from the environment config that
// is needed for machine cloud-init (for non-controllers only). It
// is exposed for testing purposes.
// TODO(rog) fix environs/manual tests so they do not need to call this, or move this elsewhere.
func InstanceConfig(st *state.State, machineId, nonce, dataDir string) (*instancecfg.InstanceConfig, error) {
	environConfig, err := st.ModelConfig()
	if err != nil {
		return nil, errors.Annotate(err, "getting model config")
	}

	// Get the machine so we can get its series and arch.
	// If the Arch is not set in hardware-characteristics,
	// an error is returned.
	machine, err := st.Machine(machineId)
	if err != nil {
		return nil, errors.Annotate(err, "getting machine")
	}
	hc, err := machine.HardwareCharacteristics()
	if err != nil {
		return nil, errors.Annotate(err, "getting machine hardware characteristics")
	}
	if hc.Arch == nil {
		return nil, fmt.Errorf("arch is not set for %q", machine.Tag())
	}

	// Find the appropriate tools information.
	agentVersion, ok := environConfig.AgentVersion()
	if !ok {
		return nil, errors.New("no agent version set in model configuration")
	}
	environment, err := st.Model()
	if err != nil {
		return nil, errors.Annotate(err, "getting state model")
	}
	urlGetter := common.NewToolsURLGetter(environment.UUID(), st)
	toolsFinder := common.NewToolsFinder(st, st, urlGetter)
	findToolsResult, err := toolsFinder.FindTools(params.FindToolsParams{
		Number:       agentVersion,
		MajorVersion: -1,
		MinorVersion: -1,
		Series:       machine.Series(),
		Arch:         *hc.Arch,
	})
	if err != nil {
		return nil, errors.Annotate(err, "finding tools")
	}
	if findToolsResult.Error != nil {
		return nil, errors.Annotate(findToolsResult.Error, "finding tools")
	}
	tools := findToolsResult.List[0]

	// Get the API connection info; attempt all API addresses.
	apiHostPorts, err := st.APIHostPorts()
	if err != nil {
		return nil, errors.Annotate(err, "getting API addresses")
	}
	apiAddrs := make(set.Strings)
	for _, hostPorts := range apiHostPorts {
		for _, hp := range hostPorts {
			apiAddrs.Add(hp.NetAddr())
		}
	}
	apiInfo := &api.Info{
		Addrs:    apiAddrs.SortedValues(),
		CACert:   st.CACert(),
		ModelTag: st.ModelTag(),
	}

	auth := authentication.NewAuthenticator(st.MongoConnectionInfo(), apiInfo)
	mongoInfo, apiInfo, err := auth.SetupAuthentication(machine)
	if err != nil {
		return nil, errors.Annotate(err, "setting up machine authentication")
	}

	// Find requested networks.
	networks, err := machine.RequestedNetworks()
	if err != nil {
		return nil, errors.Annotate(err, "getting requested networks for machine")
	}

	// Figure out if secure connections are supported.
	info, err := st.StateServingInfo()
	if err != nil {
		return nil, errors.Annotate(err, "getting state serving info")
	}
	secureServerConnection := info.CAPrivateKey != ""
	icfg, err := instancecfg.NewInstanceConfig(machineId, nonce, environConfig.ImageStream(), machine.Series(), "",
		secureServerConnection, networks, mongoInfo, apiInfo,
	)
	if err != nil {
		return nil, errors.Annotate(err, "initializing instance config")
	}
	if dataDir != "" {
		icfg.DataDir = dataDir
	}
	icfg.Tools = tools
	err = instancecfg.FinishInstanceConfig(icfg, environConfig)
	if err != nil {
		return nil, errors.Annotate(err, "finishing instance config")
	}
	return icfg, nil
}
Пример #20
0
// BootstrapInstance creates a new instance with the series of its choice,
// constrained to those of the available tools, and
// returns the instance result, series, and a function that
// must be called to finalize the bootstrap process by transferring
// the tools and installing the initial Juju controller.
// This method is called by Bootstrap above, which implements environs.Bootstrap, but
// is also exported so that providers can manipulate the started instance.
func BootstrapInstance(ctx environs.BootstrapContext, env environs.Environ, args environs.BootstrapParams,
) (_ *environs.StartInstanceResult, selectedSeries string, _ environs.BootstrapFinalizer, err error) {
	// TODO make safe in the case of racing Bootstraps
	// If two Bootstraps are called concurrently, there's
	// no way to make sure that only one succeeds.

	// First thing, ensure we have tools otherwise there's no point.
	if args.BootstrapSeries != "" {
		selectedSeries = args.BootstrapSeries
	} else {
		selectedSeries = config.PreferredSeries(env.Config())
	}
	availableTools, err := args.AvailableTools.Match(coretools.Filter{
		Series: selectedSeries,
	})
	if err != nil {
		return nil, "", nil, err
	}

	// Filter image metadata to the selected series.
	var imageMetadata []*imagemetadata.ImageMetadata
	seriesVersion, err := series.SeriesVersion(selectedSeries)
	if err != nil {
		return nil, "", nil, errors.Trace(err)
	}
	for _, m := range args.ImageMetadata {
		if m.Version != seriesVersion {
			continue
		}
		imageMetadata = append(imageMetadata, m)
	}

	// Get the bootstrap SSH client. Do this early, so we know
	// not to bother with any of the below if we can't finish the job.
	client := ssh.DefaultClient
	if client == nil {
		// This should never happen: if we don't have OpenSSH, then
		// go.crypto/ssh should be used with an auto-generated key.
		return nil, "", nil, fmt.Errorf("no SSH client available")
	}

	publicKey, err := simplestreams.UserPublicSigningKey()
	if err != nil {
		return nil, "", nil, err
	}
	envCfg := env.Config()
	instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(
		args.ControllerConfig, args.BootstrapConstraints, args.ModelConstraints, selectedSeries, publicKey,
	)
	if err != nil {
		return nil, "", nil, err
	}
	instanceConfig.EnableOSRefreshUpdate = env.Config().EnableOSRefreshUpdate()
	instanceConfig.EnableOSUpgrade = env.Config().EnableOSUpgrade()

	instanceConfig.Tags = instancecfg.InstanceTags(envCfg.UUID(), args.ControllerConfig.ControllerUUID(), envCfg, instanceConfig.Jobs)
	maybeSetBridge := func(icfg *instancecfg.InstanceConfig) {
		// If we need to override the default bridge name, do it now. When
		// args.ContainerBridgeName is empty, the default names for LXC
		// (lxcbr0) and KVM (virbr0) will be used.
		if args.ContainerBridgeName != "" {
			logger.Debugf("using %q as network bridge for all container types", args.ContainerBridgeName)
			if icfg.AgentEnvironment == nil {
				icfg.AgentEnvironment = make(map[string]string)
			}
			icfg.AgentEnvironment[agent.LxcBridge] = args.ContainerBridgeName
		}
	}
	maybeSetBridge(instanceConfig)

	cloudRegion := args.CloudName
	if args.CloudRegion != "" {
		cloudRegion += "/" + args.CloudRegion
	}
	fmt.Fprintf(ctx.GetStderr(), "Launching controller instance(s) on %s...\n", cloudRegion)
	// Print instance status reports status changes during provisioning.
	// Note the carriage returns, meaning subsequent prints are to the same
	// line of stderr, not a new line.
	instanceStatus := func(settableStatus status.Status, info string, data map[string]interface{}) error {
		// The data arg is not expected to be used in this case, but
		// print it, rather than ignore it, if we get something.
		dataString := ""
		if len(data) > 0 {
			dataString = fmt.Sprintf(" %v", data)
		}
		fmt.Fprintf(ctx.GetStderr(), " - %s%s\r", info, dataString)
		return nil
	}
	// Likely used after the final instanceStatus call to white-out the
	// current stderr line before the next use, removing any residual status
	// reporting output.
	statusCleanup := func(info string) error {
		// The leading spaces account for the leading characters
		// emitted by instanceStatus above.
		fmt.Fprintf(ctx.GetStderr(), "   %s\r", info)
		return nil
	}
	result, err := env.StartInstance(environs.StartInstanceParams{
		ControllerUUID:  args.ControllerConfig.ControllerUUID(),
		Constraints:     args.BootstrapConstraints,
		Tools:           availableTools,
		InstanceConfig:  instanceConfig,
		Placement:       args.Placement,
		ImageMetadata:   imageMetadata,
		StatusCallback:  instanceStatus,
		CleanupCallback: statusCleanup,
	})
	if err != nil {
		return nil, "", nil, errors.Annotate(err, "cannot start bootstrap instance")
	}
	// We need some padding below to overwrite any previous messages. We'll use a width of 40.
	msg := fmt.Sprintf(" - %s", result.Instance.Id())
	if len(msg) < 40 {
		padding := make([]string, 40-len(msg))
		msg += strings.Join(padding, " ")
	}
	fmt.Fprintln(ctx.GetStderr(), msg)

	finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig, opts environs.BootstrapDialOpts) error {
		icfg.Bootstrap.BootstrapMachineInstanceId = result.Instance.Id()
		icfg.Bootstrap.BootstrapMachineHardwareCharacteristics = result.Hardware
		envConfig := env.Config()
		if result.Config != nil {
			updated, err := envConfig.Apply(result.Config.UnknownAttrs())
			if err != nil {
				return errors.Trace(err)
			}
			envConfig = updated
		}
		if err := instancecfg.FinishInstanceConfig(icfg, envConfig); err != nil {
			return err
		}
		maybeSetBridge(icfg)
		return FinishBootstrap(ctx, client, env, result.Instance, icfg, opts)
	}
	return result, selectedSeries, finalize, nil
}
Пример #21
0
// StartInstance is specified in the InstanceBroker interface.
func (env *azureEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	// Get the required configuration and config-dependent information
	// required to create the instance. We take the lock just once, to
	// ensure we obtain all information based on the same configuration.
	env.mu.Lock()
	location := env.config.location
	envTags, _ := env.config.ResourceTags()
	apiPort := env.config.APIPort()
	vmClient := compute.VirtualMachinesClient{env.compute}
	availabilitySetClient := compute.AvailabilitySetsClient{env.compute}
	networkClient := env.network
	vmImagesClient := compute.VirtualMachineImagesClient{env.compute}
	vmExtensionClient := compute.VirtualMachineExtensionsClient{env.compute}
	subscriptionId := env.config.subscriptionId
	imageStream := env.config.ImageStream()
	storageEndpoint := env.config.storageEndpoint
	storageAccountName := env.config.storageAccount
	instanceTypes, err := env.getInstanceTypesLocked()
	if err != nil {
		env.mu.Unlock()
		return nil, errors.Trace(err)
	}
	internalNetworkSubnet, err := env.getInternalSubnetLocked()
	if err != nil {
		env.mu.Unlock()
		return nil, errors.Trace(err)
	}
	env.mu.Unlock()

	// Identify the instance type and image to provision.
	instanceSpec, err := findInstanceSpec(
		vmImagesClient,
		instanceTypes,
		&instances.InstanceConstraint{
			Region:      location,
			Series:      args.Tools.OneSeries(),
			Arches:      args.Tools.Arches(),
			Constraints: args.Constraints,
		},
		imageStream,
	)
	if err != nil {
		return nil, err
	}

	// Pick tools by filtering the available tools down to the architecture of
	// the image that will be provisioned.
	selectedTools, err := args.Tools.Match(tools.Filter{
		Arch: instanceSpec.Image.Arch,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	logger.Infof("picked tools %q", selectedTools[0].Version)

	// Finalize the instance config, which we'll render to CustomData below.
	if err := args.InstanceConfig.SetTools(selectedTools); err != nil {
		return nil, errors.Trace(err)
	}
	if err := instancecfg.FinishInstanceConfig(
		args.InstanceConfig, env.Config(),
	); err != nil {
		return nil, err
	}

	machineTag := names.NewMachineTag(args.InstanceConfig.MachineId)
	vmName := resourceName(machineTag)
	vmTags := make(map[string]string)
	for k, v := range args.InstanceConfig.Tags {
		vmTags[k] = v
	}
	// jujuMachineNameTag identifies the VM name, in which is encoded
	// the Juju machine name. We tag all resources related to the
	// machine with this.
	vmTags[jujuMachineNameTag] = vmName

	// If the machine will run a controller, then we need to open the
	// API port for it.
	var apiPortPtr *int
	if multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...) {
		apiPortPtr = &apiPort
	}

	// Construct the network security group ID for the environment.
	nsgID := path.Join(
		"/subscriptions", subscriptionId, "resourceGroups",
		env.resourceGroup, "providers", "Microsoft.Network",
		"networkSecurityGroups", internalSecurityGroupName,
	)

	vm, err := createVirtualMachine(
		env.resourceGroup, location, vmName,
		vmTags, envTags,
		instanceSpec, args.InstanceConfig,
		args.DistributionGroup,
		env.Instances,
		apiPortPtr, internalNetworkSubnet, nsgID,
		storageEndpoint, storageAccountName,
		networkClient, vmClient,
		availabilitySetClient, vmExtensionClient,
	)
	if err != nil {
		logger.Errorf("creating instance failed, destroying: %v", err)
		if err := env.StopInstances(instance.Id(vmName)); err != nil {
			logger.Errorf("could not destroy failed virtual machine: %v", err)
		}
		return nil, errors.Annotatef(err, "creating virtual machine %q", vmName)
	}

	// Note: the instance is initialised without addresses to keep the
	// API chatter down. We will refresh the instance if we need to know
	// the addresses.
	inst := &azureInstance{vm, env, nil, nil}
	amd64 := arch.AMD64
	hc := &instance.HardwareCharacteristics{
		Arch:     &amd64,
		Mem:      &instanceSpec.InstanceType.Mem,
		RootDisk: &instanceSpec.InstanceType.RootDisk,
		CpuCores: &instanceSpec.InstanceType.CpuCores,
	}
	return &environs.StartInstanceResult{
		Instance: inst,
		Hardware: hc,
	}, nil
}
Пример #22
0
// BootstrapInstance creates a new instance with the series and architecture
// of its choice, constrained to those of the available tools, and
// returns the instance result, series, and a function that
// must be called to finalize the bootstrap process by transferring
// the tools and installing the initial Juju controller.
// This method is called by Bootstrap above, which implements environs.Bootstrap, but
// is also exported so that providers can manipulate the started instance.
func BootstrapInstance(ctx environs.BootstrapContext, env environs.Environ, args environs.BootstrapParams,
) (_ *environs.StartInstanceResult, selectedSeries string, _ environs.BootstrapFinalizer, err error) {
	// TODO make safe in the case of racing Bootstraps
	// If two Bootstraps are called concurrently, there's
	// no way to make sure that only one succeeds.

	// First thing, ensure we have tools otherwise there's no point.
	if args.BootstrapSeries != "" {
		selectedSeries = args.BootstrapSeries
	} else {
		selectedSeries = config.PreferredSeries(env.Config())
	}
	availableTools, err := args.AvailableTools.Match(coretools.Filter{
		Series: selectedSeries,
	})
	if err != nil {
		return nil, "", nil, err
	}

	// Filter image metadata to the selected series.
	var imageMetadata []*imagemetadata.ImageMetadata
	seriesVersion, err := series.SeriesVersion(selectedSeries)
	if err != nil {
		return nil, "", nil, errors.Trace(err)
	}
	for _, m := range args.ImageMetadata {
		if m.Version != seriesVersion {
			continue
		}
		imageMetadata = append(imageMetadata, m)
	}

	// Get the bootstrap SSH client. Do this early, so we know
	// not to bother with any of the below if we can't finish the job.
	client := ssh.DefaultClient
	if client == nil {
		// This should never happen: if we don't have OpenSSH, then
		// go.crypto/ssh should be used with an auto-generated key.
		return nil, "", nil, fmt.Errorf("no SSH client available")
	}

	publicKey, err := simplestreams.UserPublicSigningKey()
	if err != nil {
		return nil, "", nil, err
	}
	instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(
		args.BootstrapConstraints, args.ModelConstraints, selectedSeries, publicKey,
	)
	if err != nil {
		return nil, "", nil, err
	}
	instanceConfig.EnableOSRefreshUpdate = env.Config().EnableOSRefreshUpdate()
	instanceConfig.EnableOSUpgrade = env.Config().EnableOSUpgrade()
	instanceConfig.Tags = instancecfg.InstanceTags(env.Config(), instanceConfig.Jobs)
	maybeSetBridge := func(icfg *instancecfg.InstanceConfig) {
		// If we need to override the default bridge name, do it now. When
		// args.ContainerBridgeName is empty, the default names for LXC
		// (lxcbr0) and KVM (virbr0) will be used.
		if args.ContainerBridgeName != "" {
			logger.Debugf("using %q as network bridge for all container types", args.ContainerBridgeName)
			if icfg.AgentEnvironment == nil {
				icfg.AgentEnvironment = make(map[string]string)
			}
			icfg.AgentEnvironment[agent.LxcBridge] = args.ContainerBridgeName
		}
	}
	maybeSetBridge(instanceConfig)

	fmt.Fprintln(ctx.GetStderr(), "Launching instance")
	instanceStatus := func(settableStatus status.Status, info string, data map[string]interface{}) error {
		fmt.Fprintf(ctx.GetStderr(), "%s      \r", info)
		return nil
	}
	result, err := env.StartInstance(environs.StartInstanceParams{
		Constraints:    args.BootstrapConstraints,
		Tools:          availableTools,
		InstanceConfig: instanceConfig,
		Placement:      args.Placement,
		ImageMetadata:  imageMetadata,
		StatusCallback: instanceStatus,
	})
	if err != nil {
		return nil, "", nil, errors.Annotate(err, "cannot start bootstrap instance")
	}
	fmt.Fprintf(ctx.GetStderr(), " - %s\n", result.Instance.Id())

	finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig) error {
		icfg.InstanceId = result.Instance.Id()
		icfg.HardwareCharacteristics = result.Hardware
		envConfig := env.Config()
		if result.Config != nil {
			updated, err := envConfig.Apply(result.Config.UnknownAttrs())
			if err != nil {
				return errors.Trace(err)
			}
			envConfig = updated
		}
		if err := instancecfg.FinishInstanceConfig(icfg, envConfig); err != nil {
			return err
		}
		maybeSetBridge(icfg)
		return FinishBootstrap(ctx, client, env, result.Instance, icfg)
	}
	return result, selectedSeries, finalize, nil
}
Пример #23
0
// StartInstance is specified in the InstanceBroker interface.
func (env *azureEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	if args.InstanceConfig.HasNetworks() {
		return nil, errors.New("starting instances with networks is not supported yet")
	}

	err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.Config())
	if err != nil {
		return nil, err
	}

	// Pick envtools.  Needed for the custom data (which is what we normally
	// call userdata).
	args.InstanceConfig.Tools = args.Tools[0]
	logger.Infof("picked tools %q", args.InstanceConfig.Tools)

	// Compose userdata.
	userData, err := providerinit.ComposeUserData(args.InstanceConfig, nil, AzureRenderer{})
	if err != nil {
		return nil, errors.Annotate(err, "cannot compose user data")
	}

	snapshot := env.getSnapshot()
	location := snapshot.ecfg.location()
	instanceType, sourceImageName, err := env.selectInstanceTypeAndImage(&instances.InstanceConstraint{
		Region:      location,
		Series:      args.Tools.OneSeries(),
		Arches:      args.Tools.Arches(),
		Constraints: args.Constraints,
	})
	if err != nil {
		return nil, err
	}

	// We use the cloud service label as a way to group instances with
	// the same affinity, so that machines can be be allocated to the
	// same availability set.
	var cloudServiceName string
	if args.DistributionGroup != nil && snapshot.ecfg.availabilitySetsEnabled() {
		instanceIds, err := args.DistributionGroup()
		if err != nil {
			return nil, err
		}
		for _, id := range instanceIds {
			cloudServiceName, _ = env.splitInstanceId(id)
			if cloudServiceName != "" {
				break
			}
		}
	}

	vhd, err := env.newOSDisk(sourceImageName, args.InstanceConfig.Series)
	if err != nil {
		return nil, errors.Trace(err)
	}
	// If we're creating machine-0, we'll want to expose port 22.
	// All other machines get an auto-generated public port for SSH.
	stateServer := multiwatcher.AnyJobNeedsState(args.InstanceConfig.Jobs...)
	role, err := env.newRole(instanceType.Id, vhd, stateServer, string(userData), args.InstanceConfig.Series, snapshot)
	if err != nil {
		return nil, errors.Trace(err)
	}
	inst, err := createInstance(env, snapshot.api, role, cloudServiceName, stateServer)
	if err != nil {
		return nil, errors.Trace(err)
	}
	hc := &instance.HardwareCharacteristics{
		Mem:      &instanceType.Mem,
		RootDisk: &instanceType.RootDisk,
		CpuCores: &instanceType.CpuCores,
	}
	if len(instanceType.Arches) == 1 {
		hc.Arch = &instanceType.Arches[0]
	}
	return &environs.StartInstanceResult{
		Instance: inst,
		Hardware: hc,
	}, nil
}
Пример #24
0
// finishBootstrap converts the machine config to cloud-config,
// converts that to a script, and then executes it locally.
func (env *localEnviron) finishBootstrap(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig) error {
	icfg.InstanceId = bootstrapInstanceId
	icfg.DataDir = env.config.rootDir()
	icfg.LogDir = fmt.Sprintf("/var/log/juju-%s", env.config.namespace())
	icfg.CloudInitOutputLog = filepath.Join(icfg.DataDir, "cloud-init-output.log")

	// No JobManageNetworking added in order not to change the network
	// configuration of the user's machine.
	icfg.Jobs = []multiwatcher.MachineJob{multiwatcher.JobManageEnviron}

	icfg.MachineAgentServiceName = env.machineAgentServiceName()
	icfg.AgentEnvironment = map[string]string{
		agent.Namespace: env.config.namespace(),
		agent.LxcBridge: env.config.networkBridge(),

		// The local provider only supports a single state server,
		// so we make the oplog size to a small value. This makes
		// the preallocation faster with no disadvantage.
		agent.MongoOplogSize: "1", // 1MB
	}

	if err := instancecfg.FinishInstanceConfig(icfg, env.Config()); err != nil {
		return errors.Trace(err)
	}

	// Since Juju's state machine is currently the host machine
	// for local providers, don't stomp on it.
	cfgAttrs := env.config.AllAttrs()
	if val, ok := cfgAttrs["enable-os-refresh-update"].(bool); !ok {
		logger.Infof("local provider; disabling refreshing OS updates.")
		icfg.EnableOSRefreshUpdate = false
	} else {
		icfg.EnableOSRefreshUpdate = val
	}
	if val, ok := cfgAttrs["enable-os-upgrade"].(bool); !ok {
		logger.Infof("local provider; disabling OS upgrades.")
		icfg.EnableOSUpgrade = false
	} else {
		icfg.EnableOSUpgrade = val
	}

	// don't write proxy or mirror settings for local machine
	icfg.AptProxySettings = proxy.Settings{}
	icfg.ProxySettings = proxy.Settings{}
	icfg.AptMirror = ""

	cloudcfg, err := cloudinit.New(icfg.Series)
	if err != nil {
		return errors.Trace(err)
	}
	cloudcfg.SetSystemUpdate(icfg.EnableOSRefreshUpdate)
	cloudcfg.SetSystemUpgrade(icfg.EnableOSUpgrade)

	localLogDir := filepath.Join(icfg.DataDir, "log")
	if err := os.RemoveAll(localLogDir); err != nil {
		return errors.Trace(err)
	}
	if err := symlink.New(icfg.LogDir, localLogDir); err != nil {
		return errors.Trace(err)
	}
	if err := os.Remove(icfg.CloudInitOutputLog); err != nil && !os.IsNotExist(err) {
		return errors.Trace(err)
	}
	cloudcfg.AddScripts(
		fmt.Sprintf("rm -fr %s", icfg.LogDir),
	)
	udata, err := cloudconfig.NewUserdataConfig(icfg, cloudcfg)
	if err != nil {
		return errors.Trace(err)
	}
	if err := udata.ConfigureJuju(); err != nil {
		return errors.Trace(err)
	}
	return executeCloudConfig(ctx, icfg, cloudcfg)
}
Пример #25
0
// StartInstance is specified in the InstanceBroker interface.
func (e *Environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	var availabilityZones []string
	if args.Placement != "" {
		placement, err := e.parsePlacement(args.Placement)
		if err != nil {
			return nil, err
		}
		if !placement.availabilityZone.State.Available {
			return nil, errors.Errorf("availability zone %q is unavailable", placement.availabilityZone.Name)
		}
		availabilityZones = append(availabilityZones, placement.availabilityZone.Name)
	}

	// If no availability zone is specified, then automatically spread across
	// the known zones for optimal spread across the instance distribution
	// group.
	if len(availabilityZones) == 0 {
		var group []instance.Id
		var err error
		if args.DistributionGroup != nil {
			group, err = args.DistributionGroup()
			if err != nil {
				return nil, err
			}
		}
		zoneInstances, err := availabilityZoneAllocations(e, group)
		if errors.IsNotImplemented(err) {
			// Availability zones are an extension, so we may get a
			// not implemented error; ignore these.
		} else if err != nil {
			return nil, err
		} else {
			for _, zone := range zoneInstances {
				availabilityZones = append(availabilityZones, zone.ZoneName)
			}
		}
		if len(availabilityZones) == 0 {
			// No explicitly selectable zones available, so use an unspecified zone.
			availabilityZones = []string{""}
		}
	}

	series := args.Tools.OneSeries()
	arches := args.Tools.Arches()
	spec, err := findInstanceSpec(e, &instances.InstanceConstraint{
		Region:      e.ecfg().region(),
		Series:      series,
		Arches:      arches,
		Constraints: args.Constraints,
	}, args.ImageMetadata)
	if err != nil {
		return nil, err
	}
	tools, err := args.Tools.Match(tools.Filter{Arch: spec.Image.Arch})
	if err != nil {
		return nil, errors.Errorf("chosen architecture %v not present in %v", spec.Image.Arch, arches)
	}

	if err := args.InstanceConfig.SetTools(tools); err != nil {
		return nil, errors.Trace(err)
	}

	if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, e.Config()); err != nil {
		return nil, err
	}
	cloudcfg, err := e.configurator.GetCloudConfig(args)
	if err != nil {
		return nil, errors.Trace(err)
	}
	userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, OpenstackRenderer{})
	if err != nil {
		return nil, errors.Annotate(err, "cannot make user data")
	}
	logger.Debugf("openstack user data; %d bytes", len(userData))

	var networks = e.firewaller.InitialNetworks()
	usingNetwork := e.ecfg().network()
	if usingNetwork != "" {
		networkId, err := e.resolveNetwork(usingNetwork)
		if err != nil {
			return nil, err
		}
		logger.Debugf("using network id %q", networkId)
		networks = append(networks, nova.ServerNetworks{NetworkId: networkId})
	}
	withPublicIP := e.ecfg().useFloatingIP()
	var publicIP *nova.FloatingIP
	if withPublicIP {
		logger.Debugf("allocating public IP address for openstack node")
		if fip, err := e.allocatePublicIP(); err != nil {
			return nil, errors.Annotate(err, "cannot allocate a public IP as needed")
		} else {
			publicIP = fip
			logger.Infof("allocated public IP %s", publicIP.IP)
		}
	}

	cfg := e.Config()
	var groupNames = make([]nova.SecurityGroupName, 0)
	groups, err := e.firewaller.SetUpGroups(args.InstanceConfig.MachineId, cfg.APIPort())
	if err != nil {
		return nil, errors.Annotate(err, "cannot set up groups")
	}

	for _, g := range groups {
		groupNames = append(groupNames, nova.SecurityGroupName{g.Name})
	}
	machineName := resourceName(
		names.NewMachineTag(args.InstanceConfig.MachineId),
		e.Config().UUID(),
	)

	tryStartNovaInstance := func(
		attempts utils.AttemptStrategy,
		client *nova.Client,
		instanceOpts nova.RunServerOpts,
	) (server *nova.Entity, err error) {
		for a := attempts.Start(); a.Next(); {
			server, err = client.RunServer(instanceOpts)
			if err == nil || gooseerrors.IsNotFound(err) == false {
				break
			}
		}
		return server, err
	}

	tryStartNovaInstanceAcrossAvailZones := func(
		attempts utils.AttemptStrategy,
		client *nova.Client,
		instanceOpts nova.RunServerOpts,
		availabilityZones []string,
	) (server *nova.Entity, err error) {
		for _, zone := range availabilityZones {
			instanceOpts.AvailabilityZone = zone
			e.configurator.ModifyRunServerOptions(&instanceOpts)
			server, err = tryStartNovaInstance(attempts, client, instanceOpts)
			if err == nil || isNoValidHostsError(err) == false {
				break
			}

			logger.Infof("no valid hosts available in zone %q, trying another availability zone", zone)
		}

		if err != nil {
			err = errors.Annotate(err, "cannot run instance")
		}

		return server, err
	}

	var opts = nova.RunServerOpts{
		Name:               machineName,
		FlavorId:           spec.InstanceType.Id,
		ImageId:            spec.Image.Id,
		UserData:           userData,
		SecurityGroupNames: groupNames,
		Networks:           networks,
		Metadata:           args.InstanceConfig.Tags,
	}
	server, err := tryStartNovaInstanceAcrossAvailZones(shortAttempt, e.nova(), opts, availabilityZones)
	if err != nil {
		return nil, errors.Trace(err)
	}

	detail, err := e.nova().GetServer(server.Id)
	if err != nil {
		return nil, errors.Annotate(err, "cannot get started instance")
	}

	inst := &openstackInstance{
		e:            e,
		serverDetail: detail,
		arch:         &spec.Image.Arch,
		instType:     &spec.InstanceType,
	}
	logger.Infof("started instance %q", inst.Id())
	if withPublicIP {
		if err := e.assignPublicIP(publicIP, string(inst.Id())); err != nil {
			if err := e.terminateInstances([]instance.Id{inst.Id()}); err != nil {
				// ignore the failure at this stage, just log it
				logger.Debugf("failed to terminate instance %q: %v", inst.Id(), err)
			}
			return nil, errors.Annotatef(err, "cannot assign public address %s to instance %q", publicIP.IP, inst.Id())
		}
		inst.floatingIP = publicIP
		logger.Infof("assigned public IP %s to %q", publicIP.IP, inst.Id())
	}
	return &environs.StartInstanceResult{
		Instance: inst,
		Hardware: inst.hardwareCharacteristics(),
	}, nil
}
Пример #26
0
// StartInstance is specified in the InstanceBroker interface.
func (env *azureEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) {
	if args.ControllerUUID == "" {
		return nil, errors.New("missing controller UUID")
	}

	// Get the required configuration and config-dependent information
	// required to create the instance. We take the lock just once, to
	// ensure we obtain all information based on the same configuration.
	env.mu.Lock()
	envTags := tags.ResourceTags(
		names.NewModelTag(env.config.Config.UUID()),
		names.NewControllerTag(args.ControllerUUID),
		env.config,
	)
	storageAccountType := env.config.storageAccountType
	imageStream := env.config.ImageStream()
	instanceTypes, err := env.getInstanceTypesLocked()
	if err != nil {
		env.mu.Unlock()
		return nil, errors.Trace(err)
	}
	env.mu.Unlock()

	// If the user has not specified a root-disk size, then
	// set a sensible default.
	var rootDisk uint64
	if args.Constraints.RootDisk != nil {
		rootDisk = *args.Constraints.RootDisk
	} else {
		rootDisk = defaultRootDiskSize
		args.Constraints.RootDisk = &rootDisk
	}

	// Identify the instance type and image to provision.
	series := args.Tools.OneSeries()
	instanceSpec, err := findInstanceSpec(
		compute.VirtualMachineImagesClient{env.compute},
		instanceTypes,
		&instances.InstanceConstraint{
			Region:      env.location,
			Series:      series,
			Arches:      args.Tools.Arches(),
			Constraints: args.Constraints,
		},
		imageStream,
	)
	if err != nil {
		return nil, err
	}
	if rootDisk < uint64(instanceSpec.InstanceType.RootDisk) {
		// The InstanceType's RootDisk is set to the maximum
		// OS disk size; override it with the user-specified
		// or default root disk size.
		instanceSpec.InstanceType.RootDisk = rootDisk
	}

	// Windows images are 127GiB, and cannot be made smaller.
	const windowsMinRootDiskMB = 127 * 1024
	seriesOS, err := jujuseries.GetOSFromSeries(series)
	if err != nil {
		return nil, errors.Trace(err)
	}
	if seriesOS == os.Windows {
		if instanceSpec.InstanceType.RootDisk < windowsMinRootDiskMB {
			instanceSpec.InstanceType.RootDisk = windowsMinRootDiskMB
		}
	}

	// Pick tools by filtering the available tools down to the architecture of
	// the image that will be provisioned.
	selectedTools, err := args.Tools.Match(tools.Filter{
		Arch: instanceSpec.Image.Arch,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	logger.Infof("picked tools %q", selectedTools[0].Version)

	// Finalize the instance config, which we'll render to CustomData below.
	if err := args.InstanceConfig.SetTools(selectedTools); err != nil {
		return nil, errors.Trace(err)
	}
	if err := instancecfg.FinishInstanceConfig(
		args.InstanceConfig, env.Config(),
	); err != nil {
		return nil, err
	}

	machineTag := names.NewMachineTag(args.InstanceConfig.MachineId)
	vmName := resourceName(machineTag)
	vmTags := make(map[string]string)
	for k, v := range args.InstanceConfig.Tags {
		vmTags[k] = v
	}
	// jujuMachineNameTag identifies the VM name, in which is encoded
	// the Juju machine name. We tag all resources related to the
	// machine with this.
	vmTags[jujuMachineNameTag] = vmName

	if err := env.createVirtualMachine(
		vmName, vmTags, envTags,
		instanceSpec, args.InstanceConfig,
		storageAccountType,
	); err != nil {
		logger.Errorf("creating instance failed, destroying: %v", err)
		if err := env.StopInstances(instance.Id(vmName)); err != nil {
			logger.Errorf("could not destroy failed virtual machine: %v", err)
		}
		return nil, errors.Annotatef(err, "creating virtual machine %q", vmName)
	}

	// Note: the instance is initialised without addresses to keep the
	// API chatter down. We will refresh the instance if we need to know
	// the addresses.
	inst := &azureInstance{vmName, "Creating", env, nil, nil}
	amd64 := arch.AMD64
	hc := &instance.HardwareCharacteristics{
		Arch:     &amd64,
		Mem:      &instanceSpec.InstanceType.Mem,
		RootDisk: &instanceSpec.InstanceType.RootDisk,
		CpuCores: &instanceSpec.InstanceType.CpuCores,
	}
	return &environs.StartInstanceResult{
		Instance: inst,
		Hardware: hc,
	}, nil
}
Пример #27
0
// BootstrapInstance creates a new instance with the series and architecture
// of its choice, constrained to those of the available tools, and
// returns the instance result, series, and a function that
// must be called to finalize the bootstrap process by transferring
// the tools and installing the initial Juju state server.
// This method is called by Bootstrap above, which implements environs.Bootstrap, but
// is also exported so that providers can manipulate the started instance.
func BootstrapInstance(ctx environs.BootstrapContext, env environs.Environ, args environs.BootstrapParams,
) (_ *environs.StartInstanceResult, series string, _ environs.BootstrapFinalizer, err error) {
	// TODO make safe in the case of racing Bootstraps
	// If two Bootstraps are called concurrently, there's
	// no way to make sure that only one succeeds.

	// First thing, ensure we have tools otherwise there's no point.
	series = config.PreferredSeries(env.Config())
	availableTools, err := args.AvailableTools.Match(coretools.Filter{Series: series})
	if err != nil {
		return nil, "", nil, err
	}

	// Get the bootstrap SSH client. Do this early, so we know
	// not to bother with any of the below if we can't finish the job.
	client := ssh.DefaultClient
	if client == nil {
		// This should never happen: if we don't have OpenSSH, then
		// go.crypto/ssh should be used with an auto-generated key.
		return nil, "", nil, fmt.Errorf("no SSH client available")
	}

	instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(args.Constraints, series)
	if err != nil {
		return nil, "", nil, err
	}
	instanceConfig.EnableOSRefreshUpdate = env.Config().EnableOSRefreshUpdate()
	instanceConfig.EnableOSUpgrade = env.Config().EnableOSUpgrade()
	instanceConfig.Tags = instancecfg.InstanceTags(env.Config(), instanceConfig.Jobs)
	maybeSetBridge := func(icfg *instancecfg.InstanceConfig) {
		// If we need to override the default bridge name, do it now. When
		// args.ContainerBridgeName is empty, the default names for LXC
		// (lxcbr0) and KVM (virbr0) will be used.
		if args.ContainerBridgeName != "" {
			logger.Debugf("using %q as network bridge for all container types", args.ContainerBridgeName)
			if icfg.AgentEnvironment == nil {
				icfg.AgentEnvironment = make(map[string]string)
			}
			icfg.AgentEnvironment[agent.LxcBridge] = args.ContainerBridgeName
		}
	}
	maybeSetBridge(instanceConfig)

	fmt.Fprintln(ctx.GetStderr(), "Launching instance")
	result, err := env.StartInstance(environs.StartInstanceParams{
		Constraints:    args.Constraints,
		Tools:          availableTools,
		InstanceConfig: instanceConfig,
		Placement:      args.Placement,
	})
	if err != nil {
		return nil, "", nil, errors.Annotate(err, "cannot start bootstrap instance")
	}
	fmt.Fprintf(ctx.GetStderr(), " - %s\n", result.Instance.Id())

	finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig) error {
		icfg.InstanceId = result.Instance.Id()
		icfg.HardwareCharacteristics = result.Hardware
		if err := instancecfg.FinishInstanceConfig(icfg, env.Config()); err != nil {
			return err
		}
		maybeSetBridge(icfg)
		return FinishBootstrap(ctx, client, result.Instance, icfg)
	}
	return result, series, finalize, nil
}
Пример #28
0
func (e *environ) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (*environs.BootstrapResult, error) {
	series := config.PreferredSeries(e.Config())
	availableTools, err := args.AvailableTools.Match(coretools.Filter{Series: series})
	if err != nil {
		return nil, err
	}
	arch := availableTools.Arches()[0]

	defer delay()
	if err := e.checkBroken("Bootstrap"); err != nil {
		return nil, err
	}
	if _, ok := args.ControllerConfig.CACert(); !ok {
		return nil, errors.New("no CA certificate in controller configuration")
	}

	logger.Infof("would pick tools from %s", availableTools)

	estate, err := e.state()
	if err != nil {
		return nil, err
	}
	estate.mu.Lock()
	defer estate.mu.Unlock()
	if estate.bootstrapped {
		return nil, errors.New("model is already bootstrapped")
	}

	// Create an instance for the bootstrap node.
	logger.Infof("creating bootstrap instance")
	i := &dummyInstance{
		id:           BootstrapInstanceId,
		addresses:    network.NewAddresses("localhost"),
		ports:        make(map[network.PortRange]bool),
		machineId:    agent.BootstrapMachineId,
		series:       series,
		firewallMode: e.Config().FirewallMode(),
		state:        estate,
		controller:   true,
	}
	estate.insts[i.id] = i
	estate.bootstrapped = true
	estate.ops <- OpBootstrap{Context: ctx, Env: e.name, Args: args}

	finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig, _ environs.BootstrapDialOpts) error {
		if e.ecfg().controller() {
			icfg.Bootstrap.BootstrapMachineInstanceId = BootstrapInstanceId
			if err := instancecfg.FinishInstanceConfig(icfg, e.Config()); err != nil {
				return err
			}

			adminUser := names.NewUserTag("admin@local")
			var cloudCredentialTag names.CloudCredentialTag
			if icfg.Bootstrap.ControllerCloudCredentialName != "" {
				cloudCredentialTag = names.NewCloudCredentialTag(fmt.Sprintf(
					"%s/%s/%s",
					icfg.Bootstrap.ControllerCloudName,
					adminUser.Id(),
					icfg.Bootstrap.ControllerCloudCredentialName,
				))
			}

			cloudCredentials := make(map[names.CloudCredentialTag]cloud.Credential)
			if icfg.Bootstrap.ControllerCloudCredential != nil && icfg.Bootstrap.ControllerCloudCredentialName != "" {
				cloudCredentials[cloudCredentialTag] = *icfg.Bootstrap.ControllerCloudCredential
			}

			info := stateInfo()
			// Since the admin user isn't setup until after here,
			// the password in the info structure is empty, so the admin
			// user is constructed with an empty password here.
			// It is set just below.
			st, err := state.Initialize(state.InitializeParams{
				Clock:            clock.WallClock,
				ControllerConfig: icfg.Controller.Config,
				ControllerModelArgs: state.ModelArgs{
					Owner:                   adminUser,
					Config:                  icfg.Bootstrap.ControllerModelConfig,
					Constraints:             icfg.Bootstrap.BootstrapMachineConstraints,
					CloudName:               icfg.Bootstrap.ControllerCloudName,
					CloudRegion:             icfg.Bootstrap.ControllerCloudRegion,
					CloudCredential:         cloudCredentialTag,
					StorageProviderRegistry: e,
				},
				Cloud:            icfg.Bootstrap.ControllerCloud,
				CloudName:        icfg.Bootstrap.ControllerCloudName,
				CloudCredentials: cloudCredentials,
				MongoInfo:        info,
				MongoDialOpts:    mongotest.DialOpts(),
				NewPolicy:        estate.newStatePolicy,
			})
			if err != nil {
				return err
			}
			if err := st.SetModelConstraints(args.ModelConstraints); err != nil {
				return err
			}
			if err := st.SetAdminMongoPassword(icfg.Controller.MongoInfo.Password); err != nil {
				return err
			}
			if err := st.MongoSession().DB("admin").Login("admin", icfg.Controller.MongoInfo.Password); err != nil {
				return err
			}
			env, err := st.Model()
			if err != nil {
				return err
			}
			owner, err := st.User(env.Owner())
			if err != nil {
				return err
			}
			// We log this out for test purposes only. No one in real life can use
			// a dummy provider for anything other than testing, so logging the password
			// here is fine.
			logger.Debugf("setting password for %q to %q", owner.Name(), icfg.Controller.MongoInfo.Password)
			owner.SetPassword(icfg.Controller.MongoInfo.Password)

			estate.apiStatePool = state.NewStatePool(st)

			estate.apiServer, err = apiserver.NewServer(st, estate.apiListener, apiserver.ServerConfig{
				Clock:       clock.WallClock,
				Cert:        testing.ServerCert,
				Key:         testing.ServerKey,
				Tag:         names.NewMachineTag("0"),
				DataDir:     DataDir,
				LogDir:      LogDir,
				StatePool:   estate.apiStatePool,
				NewObserver: func() observer.Observer { return &fakeobserver.Instance{} },
				// Should never be used but prevent external access just in case.
				AutocertURL: "https://0.1.2.3/no-autocert-here",
			})
			if err != nil {
				panic(err)
			}
			estate.apiState = st
		}
		estate.ops <- OpFinalizeBootstrap{Context: ctx, Env: e.name, InstanceConfig: icfg}
		return nil
	}

	bsResult := &environs.BootstrapResult{
		Arch:     arch,
		Series:   series,
		Finalize: finalize,
	}
	return bsResult, nil
}