Пример #1
0
func (s *AddressSuite) TestSelectInternalMachineAddress(c *gc.C) {
	oldValue := network.GetPreferIPv6()
	defer func() {
		network.SetPreferIPv6(oldValue)
	}()
	for i, t := range selectInternalMachineTests {
		c.Logf("test %d: %s", i, t.about)
		network.SetPreferIPv6(t.preferIPv6)
		c.Check(network.SelectInternalAddress(t.addresses, true), gc.Equals, t.expected())
	}
}
Пример #2
0
func (s *AddressSuite) TestSelectInternalHostPorts(c *gc.C) {
	oldValue := network.PreferIPv6()
	defer func() {
		network.SetPreferIPv6(oldValue)
	}()
	for i, t := range selectInternalHostPortsTests {
		c.Logf("test %d: %s", i, t.about)
		network.SetPreferIPv6(t.preferIPv6)
		c.Check(network.SelectInternalHostPorts(t.addresses, false), gc.DeepEquals, t.expected)
	}
}
Пример #3
0
func (s *PortSuite) TestSelectInternalMachineHostPort(c *gc.C) {
	oldValue := network.GetPreferIPv6()
	defer func() {
		network.SetPreferIPv6(oldValue)
	}()
	for i, t0 := range selectInternalMachineTests {
		t := t0.hostPortTest()
		c.Logf("test %d: %s", i, t.about)
		network.SetPreferIPv6(t.preferIPv6)
		c.Check(network.SelectInternalHostPort(t.hostPorts, true), gc.DeepEquals, t.expected())
	}
}
Пример #4
0
func (*HostPortSuite) TestSelectPublicHostPort(c *gc.C) {
	oldValue := network.GetPreferIPv6()
	defer func() {
		network.SetPreferIPv6(oldValue)
	}()
	for i, t0 := range selectPublicTests {
		t := t0.hostPortTest()
		c.Logf("test %d: %s", i, t.about)
		network.SetPreferIPv6(t.preferIPv6)
		c.Check(network.SelectPublicHostPort(t.hostPorts), jc.DeepEquals, t.expected())
	}
}
Пример #5
0
func (s *AddressSuite) TestSelectInternalAddress(c *gc.C) {
	oldValue := network.PreferIPv6()
	defer func() {
		network.SetPreferIPv6(oldValue)
	}()
	for i, t := range selectInternalTests {
		c.Logf("test %d: %s", i, t.about)
		network.SetPreferIPv6(t.preferIPv6)
		expectAddr, expectOK := t.expected()
		actualAddr, actualOK := network.SelectInternalAddress(t.addresses, false)
		c.Check(actualOK, gc.Equals, expectOK)
		c.Check(actualAddr, gc.Equals, expectAddr)
	}
}
Пример #6
0
func (*NetworkSuite) TestInitializeFromConfig(c *gc.C) {
	c.Check(network.PreferIPv6(), jc.IsFalse)

	envConfig := testing.CustomModelConfig(c, testing.Attrs{
		"prefer-ipv6": true,
	})
	network.SetPreferIPv6(envConfig.PreferIPv6())
	c.Check(network.PreferIPv6(), jc.IsTrue)

	envConfig = testing.CustomModelConfig(c, testing.Attrs{
		"prefer-ipv6": false,
	})
	network.SetPreferIPv6(envConfig.PreferIPv6())
	c.Check(network.PreferIPv6(), jc.IsFalse)
}
Пример #7
0
func (*AddressSuite) TestExactScopeMatchHonoursPreferIPv6(c *gc.C) {
	network.SetPreferIPv6(true)
	addr := network.NewScopedAddress("10.0.0.2", network.ScopeCloudLocal)
	match := network.ExactScopeMatch(addr, network.ScopeCloudLocal)
	c.Assert(match, jc.IsFalse)
	match = network.ExactScopeMatch(addr, network.ScopePublic)
	c.Assert(match, jc.IsFalse)

	addr = network.NewScopedAddress("8.8.8.8", network.ScopePublic)
	match = network.ExactScopeMatch(addr, network.ScopeCloudLocal)
	c.Assert(match, jc.IsFalse)
	match = network.ExactScopeMatch(addr, network.ScopePublic)
	c.Assert(match, jc.IsFalse)

	addr = network.NewScopedAddress("2001:db8::ff00:42:8329", network.ScopePublic)
	match = network.ExactScopeMatch(addr, network.ScopeCloudLocal)
	c.Assert(match, jc.IsFalse)
	match = network.ExactScopeMatch(addr, network.ScopePublic)
	c.Assert(match, jc.IsTrue)

	addr = network.NewScopedAddress("fc00::1", network.ScopeCloudLocal)
	match = network.ExactScopeMatch(addr, network.ScopeCloudLocal)
	c.Assert(match, jc.IsTrue)
	match = network.ExactScopeMatch(addr, network.ScopePublic)
	c.Assert(match, jc.IsFalse)
}
Пример #8
0
Файл: unit.go Проект: makyo/juju
// Run runs a unit agent.
func (a *UnitAgent) Run(ctx *cmd.Context) error {
	defer a.tomb.Done()
	if err := a.ReadConfig(a.Tag().String()); err != nil {
		return err
	}
	agentConfig := a.CurrentConfig()

	agentLogger.Infof("unit agent %v start (%s [%s])", a.Tag().String(), jujuversion.Current, runtime.Compiler)
	if flags := featureflag.String(); flags != "" {
		logger.Warningf("developer feature flags enabled: %s", flags)
	}
	network.SetPreferIPv6(agentConfig.PreferIPv6())

	// Sometimes there are upgrade steps that are needed for each unit.
	// There are plans afoot to unify the unit and machine agents. When
	// this happens, there will be a simple helper function for the upgrade
	// steps to run something for each unit on the machine. Until then, we
	// need to have the uniter do it, as the overhead of getting a full
	// upgrade process in the unit agent out weights the current benefits.
	// So.. since the upgrade steps are all idempotent, we will just call
	// the upgrade steps when we start the uniter. To be clear, these
	// should move back to the upgrade package when we do unify the agents.
	runUpgrades(agentConfig.Tag(), agentConfig.DataDir())

	a.runner.StartWorker("api", a.APIWorkers)
	err := cmdutil.AgentDone(logger, a.runner.Wait())
	a.tomb.Kill(err)
	return err
}
Пример #9
0
func (s *BaseSuite) SetUpTest(c *gc.C) {
	s.CleanupSuite.SetUpTest(c)
	s.LoggingSuite.SetUpTest(c)
	s.JujuOSEnvSuite.SetUpTest(c)

	// We do this to isolate invocations of bash from pulling in the
	// ambient user environment, and potentially affecting the tests.
	// We can't always just use IsolationSuite because we still need
	// PATH and possibly a couple other envars.
	s.PatchEnvironment("BASH_ENV", "")
	network.SetPreferIPv6(false)
}
Пример #10
0
// Run runs a machine agent.
func (a *MachineAgent) Run(*cmd.Context) error {

	defer a.tomb.Done()
	if err := a.ReadConfig(a.Tag().String()); err != nil {
		return fmt.Errorf("cannot read agent configuration: %v", err)
	}

	logger.Infof("machine agent %v start (%s [%s])", a.Tag(), jujuversion.Current, runtime.Compiler)
	if flags := featureflag.String(); flags != "" {
		logger.Warningf("developer feature flags enabled: %s", flags)
	}

	// Before doing anything else, we need to make sure the certificate generated for
	// use by mongo to validate controller connections is correct. This needs to be done
	// before any possible restart of the mongo service.
	// See bug http://pad.lv/1434680
	if err := a.upgradeCertificateDNSNames(); err != nil {
		return errors.Annotate(err, "error upgrading server certificate")
	}

	if upgradeComplete, err := upgradesteps.NewLock(a); err != nil {
		return errors.Annotate(err, "error during creating upgrade completion channel")
	} else {
		a.upgradeComplete = upgradeComplete
	}

	agentConfig := a.CurrentConfig()
	createEngine := a.makeEngineCreator(agentConfig.UpgradedToVersion())
	network.SetPreferIPv6(agentConfig.PreferIPv6())
	charmrepo.CacheDir = filepath.Join(agentConfig.DataDir(), "charmcache")
	if err := a.createJujudSymlinks(agentConfig.DataDir()); err != nil {
		return err
	}
	a.runner.StartWorker("engine", createEngine)

	// At this point, all workers will have been configured to start
	close(a.workersStarted)
	err := a.runner.Wait()
	switch errors.Cause(err) {
	case worker.ErrTerminateAgent:
		err = a.uninstallAgent()
	case worker.ErrRebootMachine:
		logger.Infof("Caught reboot error")
		err = a.executeRebootOrShutdown(params.ShouldReboot)
	case worker.ErrShutdownMachine:
		logger.Infof("Caught shutdown error")
		err = a.executeRebootOrShutdown(params.ShouldShutdown)
	}
	err = cmdutil.AgentDone(logger, err)
	a.tomb.Kill(err)
	return err
}
Пример #11
0
func (*AddressSuite) TestExactScopeMatch(c *gc.C) {
	network.SetPreferIPv6(false)
	addr := network.NewScopedAddress("10.0.0.2", network.ScopeCloudLocal)
	match := network.ExactScopeMatch(addr, network.ScopeCloudLocal)
	c.Assert(match, jc.IsTrue)
	match = network.ExactScopeMatch(addr, network.ScopePublic)
	c.Assert(match, jc.IsFalse)

	addr = network.NewScopedAddress("8.8.8.8", network.ScopePublic)
	match = network.ExactScopeMatch(addr, network.ScopeCloudLocal)
	c.Assert(match, jc.IsFalse)
	match = network.ExactScopeMatch(addr, network.ScopePublic)
	c.Assert(match, jc.IsTrue)
}
Пример #12
0
// Run initializes state for an environment.
func (c *BootstrapCommand) Run(_ *cmd.Context) error {
	envCfg, err := config.New(config.NoDefaults, c.ControllerModelConfig)
	if err != nil {
		return err
	}
	err = c.ReadConfig("machine-0")
	if err != nil {
		return err
	}
	agentConfig := c.CurrentConfig()
	network.SetPreferIPv6(agentConfig.PreferIPv6())

	// agent.Jobs is an optional field in the agent config, and was
	// introduced after 1.17.2. We default to allowing units on
	// machine-0 if missing.
	jobs := agentConfig.Jobs()
	if len(jobs) == 0 {
		jobs = []multiwatcher.MachineJob{
			multiwatcher.JobManageModel,
			multiwatcher.JobHostUnits,
			multiwatcher.JobManageNetworking,
		}
	}

	// Get the bootstrap machine's addresses from the provider.
	env, err := environs.New(envCfg)
	if err != nil {
		return err
	}
	newConfigAttrs := make(map[string]interface{})

	// Check to see if a newer agent version has been requested
	// by the bootstrap client.
	desiredVersion, ok := envCfg.AgentVersion()
	if ok && desiredVersion != jujuversion.Current {
		// If we have been asked for a newer version, ensure the newer
		// tools can actually be found, or else bootstrap won't complete.
		stream := envtools.PreferredStream(&desiredVersion, envCfg.Development(), envCfg.AgentStream())
		logger.Infof("newer tools requested, looking for %v in stream %v", desiredVersion, stream)
		filter := tools.Filter{
			Number: desiredVersion,
			Arch:   arch.HostArch(),
			Series: series.HostSeries(),
		}
		_, toolsErr := envtools.FindTools(env, -1, -1, stream, filter)
		if toolsErr == nil {
			logger.Infof("tools are available, upgrade will occur after bootstrap")
		}
		if errors.IsNotFound(toolsErr) {
			// Newer tools not available, so revert to using the tools
			// matching the current agent version.
			logger.Warningf("newer tools for %q not available, sticking with version %q", desiredVersion, jujuversion.Current)
			newConfigAttrs["agent-version"] = jujuversion.Current.String()
		} else if toolsErr != nil {
			logger.Errorf("cannot find newer tools: %v", toolsErr)
			return toolsErr
		}
	}

	instanceId := instance.Id(c.InstanceId)
	instances, err := env.Instances([]instance.Id{instanceId})
	if err != nil {
		return err
	}
	addrs, err := instances[0].Addresses()
	if err != nil {
		return err
	}

	// When machine addresses are reported from state, they have
	// duplicates removed.  We should do the same here so that
	// there is not unnecessary churn in the mongo replicaset.
	// TODO (cherylj) Add explicit unit tests for this - tracked
	// by bug #1544158.
	addrs = network.MergedAddresses([]network.Address{}, addrs)

	// Generate a private SSH key for the controllers, and add
	// the public key to the environment config. We'll add the
	// private key to StateServingInfo below.
	privateKey, publicKey, err := sshGenerateKey(config.JujuSystemKey)
	if err != nil {
		return errors.Annotate(err, "failed to generate system key")
	}
	authorizedKeys := config.ConcatAuthKeys(envCfg.AuthorizedKeys(), publicKey)
	newConfigAttrs[config.AuthKeysConfig] = authorizedKeys

	// Generate a shared secret for the Mongo replica set, and write it out.
	sharedSecret, err := mongo.GenerateSharedSecret()
	if err != nil {
		return err
	}
	info, ok := agentConfig.StateServingInfo()
	if !ok {
		return fmt.Errorf("bootstrap machine config has no state serving info")
	}
	info.SharedSecret = sharedSecret
	info.SystemIdentity = privateKey
	err = c.ChangeConfig(func(agentConfig agent.ConfigSetter) error {
		agentConfig.SetStateServingInfo(info)
		return nil
	})
	if err != nil {
		return fmt.Errorf("cannot write agent config: %v", err)
	}

	err = c.ChangeConfig(func(config agent.ConfigSetter) error {
		// We'll try for mongo 3.2 first and fallback to
		// mongo 2.4 if the newer binaries are not available.
		if mongo.BinariesAvailable(mongo.Mongo32wt) {
			config.SetMongoVersion(mongo.Mongo32wt)
		} else {
			config.SetMongoVersion(mongo.Mongo24)
		}
		return nil
	})
	if err != nil {
		return errors.Annotate(err, "cannot set mongo version")
	}

	agentConfig = c.CurrentConfig()

	// Create system-identity file
	if err := agent.WriteSystemIdentityFile(agentConfig); err != nil {
		return err
	}

	if err := c.startMongo(addrs, agentConfig); err != nil {
		return err
	}

	logger.Infof("started mongo")
	// Initialise state, and store any agent config (e.g. password) changes.
	envCfg, err = env.Config().Apply(newConfigAttrs)
	if err != nil {
		return errors.Annotate(err, "failed to update model config")
	}
	var st *state.State
	var m *state.Machine
	err = c.ChangeConfig(func(agentConfig agent.ConfigSetter) error {
		var stateErr error
		dialOpts := mongo.DefaultDialOpts()

		// Set a longer socket timeout than usual, as the machine
		// will be starting up and disk I/O slower than usual. This
		// has been known to cause timeouts in queries.
		timeouts := envCfg.BootstrapSSHOpts()
		dialOpts.SocketTimeout = timeouts.Timeout
		if dialOpts.SocketTimeout < minSocketTimeout {
			dialOpts.SocketTimeout = minSocketTimeout
		}

		// We shouldn't attempt to dial peers until we have some.
		dialOpts.Direct = true

		adminTag := names.NewLocalUserTag(c.AdminUsername)
		st, m, stateErr = agentInitializeState(
			adminTag,
			agentConfig,
			envCfg, c.HostedModelConfig,
			agentbootstrap.BootstrapMachineConfig{
				Addresses:            addrs,
				BootstrapConstraints: c.BootstrapConstraints,
				ModelConstraints:     c.ModelConstraints,
				Jobs:                 jobs,
				InstanceId:           instanceId,
				Characteristics:      c.Hardware,
				SharedSecret:         sharedSecret,
			},
			dialOpts,
			environs.NewStatePolicy(),
		)
		return stateErr
	})
	if err != nil {
		return err
	}
	defer st.Close()

	// Populate the tools catalogue.
	if err := c.populateTools(st, env); err != nil {
		return err
	}

	// Populate the GUI archive catalogue.
	if err := c.populateGUIArchive(st, env); err != nil {
		// Do not stop the bootstrapping process for Juju GUI archive errors.
		logger.Warningf("cannot set up Juju GUI: %s", err)
	} else {
		logger.Debugf("Juju GUI successfully set up")
	}

	// Add custom image metadata to environment storage.
	if c.ImageMetadataDir != "" {
		if err := c.saveCustomImageMetadata(st, env); err != nil {
			return err
		}

		stor := newStateStorage(st.ModelUUID(), st.MongoSession())
		if err := c.storeCustomImageMetadata(stor); err != nil {
			return err
		}
	}

	// Populate the storage pools.
	if err = c.populateDefaultStoragePools(st); err != nil {
		return err
	}

	// bootstrap machine always gets the vote
	return m.SetHasVote(true)
}
Пример #13
0
// Bootstrap bootstraps the given environment. The supplied constraints are
// used to provision the instance, and are also set within the bootstrapped
// environment.
func Bootstrap(ctx environs.BootstrapContext, environ environs.Environ, args BootstrapParams) error {
	cfg := environ.Config()
	network.SetPreferIPv6(cfg.PreferIPv6())
	if secret := cfg.AdminSecret(); secret == "" {
		return errors.Errorf("model configuration has no admin-secret")
	}
	if authKeys := ssh.SplitAuthorisedKeys(cfg.AuthorizedKeys()); len(authKeys) == 0 {
		// Apparently this can never happen, so it's not tested. But, one day,
		// Config will act differently (it's pretty crazy that, AFAICT, the
		// authorized-keys are optional config settings... but it's impossible
		// to actually *create* a config without them)... and when it does,
		// we'll be here to catch this problem early.
		return errors.Errorf("model configuration has no authorized-keys")
	}
	if _, hasCACert := cfg.CACert(); !hasCACert {
		return errors.Errorf("model configuration has no ca-cert")
	}
	if _, hasCAKey := cfg.CAPrivateKey(); !hasCAKey {
		return errors.Errorf("model configuration has no ca-private-key")
	}

	// Set default tools metadata source, add image metadata source,
	// then verify constraints. Providers may rely on image metadata
	// for constraint validation.
	var customImageMetadata []*imagemetadata.ImageMetadata
	if args.MetadataDir != "" {
		var err error
		customImageMetadata, err = setPrivateMetadataSources(environ, args.MetadataDir)
		if err != nil {
			return err
		}
	}
	if err := validateConstraints(environ, args.ModelConstraints); err != nil {
		return err
	}
	if err := validateConstraints(environ, args.BootstrapConstraints); err != nil {
		return err
	}

	constraintsValidator, err := environ.ConstraintsValidator()
	if err != nil {
		return err
	}
	bootstrapConstraints, err := constraintsValidator.Merge(
		args.ModelConstraints, args.BootstrapConstraints,
	)
	if err != nil {
		return err
	}

	_, supportsNetworking := environs.SupportsNetworking(environ)

	var bootstrapSeries *string
	if args.BootstrapSeries != "" {
		bootstrapSeries = &args.BootstrapSeries
	}

	ctx.Infof("Bootstrapping model %q", cfg.Name())
	logger.Debugf("model %q supports service/machine networks: %v", cfg.Name(), supportsNetworking)
	disableNetworkManagement, _ := cfg.DisableNetworkManagement()
	logger.Debugf("network management by juju enabled: %v", !disableNetworkManagement)
	availableTools, err := findAvailableTools(
		environ, args.AgentVersion, bootstrapConstraints.Arch,
		bootstrapSeries, args.UploadTools, args.BuildToolsTarball != nil,
	)
	if errors.IsNotFound(err) {
		return errors.New(noToolsMessage)
	} else if err != nil {
		return err
	}

	if lxcMTU, ok := cfg.LXCDefaultMTU(); ok {
		logger.Debugf("using MTU %v for all created LXC containers' network interfaces", lxcMTU)
	}

	imageMetadata, err := bootstrapImageMetadata(
		environ, availableTools,
		args.BootstrapImage,
		&customImageMetadata,
	)
	if err != nil {
		return errors.Trace(err)
	}

	// If we're uploading, we must override agent-version;
	// if we're not uploading, we want to ensure we have an
	// agent-version set anyway, to appease FinishInstanceConfig.
	// In the latter case, setBootstrapTools will later set
	// agent-version to the correct thing.
	agentVersion := jujuversion.Current
	if args.AgentVersion != nil {
		agentVersion = *args.AgentVersion
	}
	if cfg, err = cfg.Apply(map[string]interface{}{
		"agent-version": agentVersion.String(),
	}); err != nil {
		return err
	}
	if err = environ.SetConfig(cfg); err != nil {
		return err
	}

	ctx.Infof("Starting new instance for initial controller")
	result, err := environ.Bootstrap(ctx, environs.BootstrapParams{
		ModelConstraints:     args.ModelConstraints,
		BootstrapConstraints: args.BootstrapConstraints,
		BootstrapSeries:      args.BootstrapSeries,
		Placement:            args.Placement,
		AvailableTools:       availableTools,
		ImageMetadata:        imageMetadata,
	})
	if err != nil {
		return err
	}

	matchingTools, err := availableTools.Match(coretools.Filter{
		Arch:   result.Arch,
		Series: result.Series,
	})
	if err != nil {
		return err
	}
	selectedToolsList, err := setBootstrapTools(environ, matchingTools)
	if err != nil {
		return err
	}
	havePrepackaged := false
	for i, selectedTools := range selectedToolsList {
		if selectedTools.URL != "" {
			havePrepackaged = true
			continue
		}
		ctx.Infof("Building tools to upload (%s)", selectedTools.Version)
		builtTools, err := args.BuildToolsTarball(&selectedTools.Version.Number, cfg.AgentStream())
		if err != nil {
			return errors.Annotate(err, "cannot upload bootstrap tools")
		}
		defer os.RemoveAll(builtTools.Dir)
		filename := filepath.Join(builtTools.Dir, builtTools.StorageName)
		selectedTools.URL = fmt.Sprintf("file://%s", filename)
		selectedTools.Size = builtTools.Size
		selectedTools.SHA256 = builtTools.Sha256Hash
		selectedToolsList[i] = selectedTools
	}
	if !havePrepackaged && !args.UploadTools {
		// There are no prepackaged agents, so we must upload
		// even though the user didn't ask for it. We only do
		// this when the image-stream is not "released" and
		// the agent version hasn't been specified.
		logger.Warningf("no prepackaged tools available")
	}

	ctx.Infof("Installing Juju agent on bootstrap instance")
	publicKey, err := userPublicSigningKey()
	if err != nil {
		return err
	}
	instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(
		args.BootstrapConstraints, args.ModelConstraints, result.Series, publicKey,
	)
	if err != nil {
		return err
	}
	if err := instanceConfig.SetTools(selectedToolsList); err != nil {
		return errors.Trace(err)
	}
	instanceConfig.CustomImageMetadata = customImageMetadata
	instanceConfig.HostedModelConfig = args.HostedModelConfig

	instanceConfig.GUI = guiArchive(args.GUIDataSourceBaseURL, func(msg string) {
		ctx.Infof(msg)
	})

	if err := result.Finalize(ctx, instanceConfig); err != nil {
		return err
	}
	ctx.Infof("Bootstrap agent installed")
	return nil
}
Пример #14
0
func (e *environ) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (*environs.BootstrapResult, error) {
	series := config.PreferredSeries(e.Config())
	availableTools, err := args.AvailableTools.Match(coretools.Filter{Series: series})
	if err != nil {
		return nil, err
	}
	arch := availableTools.Arches()[0]

	defer delay()
	if err := e.checkBroken("Bootstrap"); err != nil {
		return nil, err
	}
	network.SetPreferIPv6(e.Config().PreferIPv6())
	password := e.Config().AdminSecret()
	if password == "" {
		return nil, fmt.Errorf("admin-secret is required for bootstrap")
	}
	if _, ok := e.Config().CACert(); !ok {
		return nil, fmt.Errorf("no CA certificate in model configuration")
	}

	logger.Infof("would pick tools from %s", availableTools)
	cfg, err := environs.BootstrapConfig(e.Config())
	if err != nil {
		return nil, fmt.Errorf("cannot make bootstrap config: %v", err)
	}

	estate, err := e.state()
	if err != nil {
		return nil, err
	}
	estate.mu.Lock()
	defer estate.mu.Unlock()
	if estate.bootstrapped {
		return nil, fmt.Errorf("model is already bootstrapped")
	}
	estate.preferIPv6 = e.Config().PreferIPv6()

	// Create an instance for the bootstrap node.
	logger.Infof("creating bootstrap instance")
	i := &dummyInstance{
		id:           BootstrapInstanceId,
		addresses:    network.NewAddresses("localhost"),
		ports:        make(map[network.PortRange]bool),
		machineId:    agent.BootstrapMachineId,
		series:       series,
		firewallMode: e.Config().FirewallMode(),
		state:        estate,
		controller:   true,
	}
	estate.insts[i.id] = i

	if e.ecfg().controller() {
		// TODO(rog) factor out relevant code from cmd/jujud/bootstrap.go
		// so that we can call it here.

		info := stateInfo(estate.preferIPv6)
		// Since the admin user isn't setup until after here,
		// the password in the info structure is empty, so the admin
		// user is constructed with an empty password here.
		// It is set just below.
		st, err := state.Initialize(
			names.NewUserTag("admin@local"), info, cfg,
			mongotest.DialOpts(), estate.statePolicy)
		if err != nil {
			panic(err)
		}
		if err := st.SetModelConstraints(args.ModelConstraints); err != nil {
			panic(err)
		}
		if err := st.SetAdminMongoPassword(password); err != nil {
			panic(err)
		}
		if err := st.MongoSession().DB("admin").Login("admin", password); err != nil {
			panic(err)
		}
		env, err := st.Model()
		if err != nil {
			panic(err)
		}
		owner, err := st.User(env.Owner())
		if err != nil {
			panic(err)
		}
		// We log this out for test purposes only. No one in real life can use
		// a dummy provider for anything other than testing, so logging the password
		// here is fine.
		logger.Debugf("setting password for %q to %q", owner.Name(), password)
		owner.SetPassword(password)

		estate.apiStatePool = state.NewStatePool(st)

		estate.apiServer, err = apiserver.NewServer(st, estate.apiListener, apiserver.ServerConfig{
			Cert:      []byte(testing.ServerCert),
			Key:       []byte(testing.ServerKey),
			Tag:       names.NewMachineTag("0"),
			DataDir:   DataDir,
			LogDir:    LogDir,
			StatePool: estate.apiStatePool,
		})
		if err != nil {
			panic(err)
		}
		estate.apiState = st
	}
	estate.bootstrapped = true
	estate.ops <- OpBootstrap{Context: ctx, Env: e.name, Args: args}
	finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig) error {
		estate.ops <- OpFinalizeBootstrap{Context: ctx, Env: e.name, InstanceConfig: icfg}
		return nil
	}

	bsResult := &environs.BootstrapResult{
		Arch:     arch,
		Series:   series,
		Finalize: finalize,
	}
	return bsResult, nil
}