コード例 #1
0
ファイル: cloudinit_test.go プロジェクト: kapilt/juju
func (*cloudinitSuite) TestWindowsCloudInit(c *gc.C) {
	for i, test := range windowsCloudinitTests {
		c.Logf("test %d", i)
		dataDir, err := paths.DataDir(test.cfg.Series)
		c.Assert(err, gc.IsNil)
		logDir, err := paths.LogDir(test.cfg.Series)
		c.Assert(err, gc.IsNil)

		test.cfg.DataDir = dataDir
		test.cfg.LogDir = path.Join(logDir, "juju")

		ci := coreCloudinit.New()
		udata, err := cloudinit.NewUserdataConfig(&test.cfg, ci)

		c.Assert(err, gc.IsNil)
		err = udata.Configure()

		c.Assert(err, gc.IsNil)
		c.Check(ci, gc.NotNil)
		data, err := udata.Render()
		c.Assert(err, gc.IsNil)

		stringData := strings.Replace(string(data), "\r\n", "\n", -1)
		stringData = strings.Replace(stringData, "\t", " ", -1)
		stringData = strings.TrimSpace(stringData)

		compareString := strings.Replace(string(test.expectScripts), "\r\n", "\n", -1)
		compareString = strings.Replace(compareString, "\t", " ", -1)
		compareString = strings.TrimSpace(compareString)

		c.Assert(stringData, gc.Equals, compareString)

	}
}
コード例 #2
0
func (s *format_1_16Suite) TestMissingAttributes(c *gc.C) {
	logDir, err := paths.LogDir(series.HostSeries())
	c.Assert(err, jc.ErrorIsNil)
	realDataDir, err := paths.DataDir(series.HostSeries())
	c.Assert(err, jc.ErrorIsNil)

	realDataDir = filepath.FromSlash(realDataDir)
	logPath := filepath.Join(logDir, "juju")
	logPath = filepath.FromSlash(logPath)

	dataDir := c.MkDir()
	formatPath := filepath.Join(dataDir, legacyFormatFilename)
	err = utils.AtomicWriteFile(formatPath, []byte(legacyFormatFileContents), 0600)
	c.Assert(err, jc.ErrorIsNil)
	configPath := filepath.Join(dataDir, agentConfigFilename)

	err = utils.AtomicWriteFile(configPath, []byte(configDataWithoutNewAttributes), 0600)
	c.Assert(err, jc.ErrorIsNil)
	readConfig, err := ReadConfig(configPath)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(readConfig.UpgradedToVersion(), gc.Equals, version.MustParse("1.16.0"))
	configLogDir := filepath.FromSlash(readConfig.LogDir())
	configDataDir := filepath.FromSlash(readConfig.DataDir())

	c.Assert(configLogDir, gc.Equals, logPath)
	c.Assert(configDataDir, gc.Equals, realDataDir)
	// Test data doesn't include a StateServerKey so StateServingInfo
	// should *not* be available
	_, available := readConfig.StateServingInfo()
	c.Assert(available, jc.IsFalse)
}
コード例 #3
0
func (s *format_1_18Suite) TestMissingAttributes(c *gc.C) {
	logDir, err := paths.LogDir(series.HostSeries())
	c.Assert(err, jc.ErrorIsNil)
	realDataDir, err := paths.DataDir(series.HostSeries())
	c.Assert(err, jc.ErrorIsNil)

	realDataDir = filepath.FromSlash(realDataDir)
	logPath := filepath.Join(logDir, "juju")
	logPath = filepath.FromSlash(logPath)

	dataDir := c.MkDir()
	configPath := filepath.Join(dataDir, agentConfigFilename)
	err = utils.AtomicWriteFile(configPath, []byte(configData1_18WithoutUpgradedToVersion), 0600)
	c.Assert(err, jc.ErrorIsNil)
	readConfig, err := ReadConfig(configPath)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(readConfig.UpgradedToVersion(), gc.Equals, version.MustParse("1.16.0"))
	configLogDir := filepath.FromSlash(readConfig.LogDir())
	configDataDir := filepath.FromSlash(readConfig.DataDir())
	c.Assert(configLogDir, gc.Equals, logPath)
	c.Assert(configDataDir, gc.Equals, realDataDir)
	c.Assert(readConfig.PreferIPv6(), jc.IsFalse)
	// The api info doesn't have the environment tag set.
	apiInfo, ok := readConfig.APIInfo()
	c.Assert(ok, jc.IsTrue)
	c.Assert(apiInfo.EnvironTag.Id(), gc.Equals, "")
}
コード例 #4
0
ファイル: service_test.go プロジェクト: Pankov404/juju
func (s *initSystemSuite) SetUpTest(c *gc.C) {
	s.BaseSuite.SetUpTest(c)

	dataDir, err := paths.DataDir("vivid")
	c.Assert(err, jc.ErrorIsNil)
	s.dataDir = dataDir
	// Patch things out.
	s.ch = systemd.PatchNewChan(s)

	s.stub = &testing.Stub{}
	s.conn = systemd.PatchNewConn(s, s.stub)
	s.fops = systemd.PatchFileOps(s, s.stub)
	s.exec = systemd.PatchExec(s, s.stub)

	// Set up the service.
	tagStr := "machine-0"
	tag, err := names.ParseTag(tagStr)
	c.Assert(err, jc.ErrorIsNil)
	s.tag = tag
	s.name = "jujud-" + tagStr
	s.conf = common.Conf{
		Desc:      "juju agent for " + tagStr,
		ExecStart: jujud + " " + tagStr,
	}
	s.service = s.newService(c)

	// Reset any incidental calls.
	s.stub.ResetCalls()
}
コード例 #5
0
ファイル: service.go プロジェクト: ktsakalozos/juju
func newService(name string, conf common.Conf, initSystem, series string) (Service, error) {
	switch initSystem {
	case InitSystemWindows:
		svc, err := windows.NewService(name, conf)
		if err != nil {
			return nil, errors.Annotatef(err, "failed to wrap service %q", name)
		}
		return svc, nil
	case InitSystemUpstart:
		return upstart.NewService(name, conf), nil
	case InitSystemSystemd:
		dataDir, err := paths.DataDir(series)
		if err != nil {
			return nil, errors.Annotatef(err, "failed to find juju data dir for service %q", name)
		}

		svc, err := systemd.NewService(name, conf, dataDir)
		if err != nil {
			return nil, errors.Annotatef(err, "failed to wrap service %q", name)
		}
		return svc, nil
	default:
		return nil, errors.NotFoundf("init system %q", initSystem)
	}
}
コード例 #6
0
ファイル: providerinit_test.go プロジェクト: bac/juju
func (s *CloudInitSuite) TestWindowsUserdataEncoding(c *gc.C) {
	series := "win8"
	metricsSpoolDir := must(paths.MetricsSpoolDir("win8"))
	toolsList := tools.List{
		&tools.Tools{
			URL:     "http://foo.com/tools/released/juju1.2.3-win8-amd64.tgz",
			Version: version.MustParseBinary("1.2.3-win8-amd64"),
			Size:    10,
			SHA256:  "1234",
		},
	}
	dataDir, err := paths.DataDir(series)
	c.Assert(err, jc.ErrorIsNil)
	logDir, err := paths.LogDir(series)
	c.Assert(err, jc.ErrorIsNil)

	cfg := instancecfg.InstanceConfig{
		ControllerTag:    testing.ControllerTag,
		MachineId:        "10",
		AgentEnvironment: map[string]string{agent.ProviderType: "dummy"},
		Series:           series,
		Jobs:             []multiwatcher.MachineJob{multiwatcher.JobHostUnits},
		MachineNonce:     "FAKE_NONCE",
		APIInfo: &api.Info{
			Addrs:    []string{"state-addr.testing.invalid:54321"},
			Password: "******",
			CACert:   "CA CERT\n" + testing.CACert,
			Tag:      names.NewMachineTag("10"),
			ModelTag: testing.ModelTag,
		},
		MachineAgentServiceName: "jujud-machine-10",
		DataDir:                 dataDir,
		LogDir:                  path.Join(logDir, "juju"),
		MetricsSpoolDir:         metricsSpoolDir,
		CloudInitOutputLog:      path.Join(logDir, "cloud-init-output.log"),
	}
	err = cfg.SetTools(toolsList)
	c.Assert(err, jc.ErrorIsNil)

	ci, err := cloudinit.New("win8")
	c.Assert(err, jc.ErrorIsNil)

	udata, err := cloudconfig.NewUserdataConfig(&cfg, ci)
	c.Assert(err, jc.ErrorIsNil)

	err = udata.Configure()
	c.Assert(err, jc.ErrorIsNil)

	data, err := ci.RenderYAML()
	c.Assert(err, jc.ErrorIsNil)

	cicompose, err := cloudinit.New("win8")
	c.Assert(err, jc.ErrorIsNil)

	base64Data := base64.StdEncoding.EncodeToString(utils.Gzip(data))
	got := []byte(fmt.Sprintf(cloudconfig.UserDataScript, base64Data))
	expected, err := providerinit.ComposeUserData(&cfg, cicompose, openstack.OpenstackRenderer{})
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(string(got), gc.Equals, string(expected))
}
コード例 #7
0
ファイル: instancecfg.go プロジェクト: imoapps/juju
// NewInstanceConfig sets up a basic machine configuration, for a
// non-bootstrap node. You'll still need to supply more information,
// but this takes care of the fixed entries and the ones that are
// always needed.
func NewInstanceConfig(
	machineID,
	machineNonce,
	imageStream,
	series,
	publicImageSigningKey string,
	secureServerConnections bool,
	networks []string,
	mongoInfo *mongo.MongoInfo,
	apiInfo *api.Info,
) (*InstanceConfig, error) {
	dataDir, err := paths.DataDir(series)
	if err != nil {
		return nil, err
	}
	logDir, err := paths.LogDir(series)
	if err != nil {
		return nil, err
	}
	metricsSpoolDir, err := paths.MetricsSpoolDir(series)
	if err != nil {
		return nil, err
	}
	cloudInitOutputLog := path.Join(logDir, "cloud-init-output.log")
	icfg := &InstanceConfig{
		// Fixed entries.
		DataDir:                 dataDir,
		LogDir:                  path.Join(logDir, "juju"),
		MetricsSpoolDir:         metricsSpoolDir,
		Jobs:                    []multiwatcher.MachineJob{multiwatcher.JobHostUnits},
		CloudInitOutputLog:      cloudInitOutputLog,
		MachineAgentServiceName: "jujud-" + names.NewMachineTag(machineID).String(),
		Series:                  series,
		Tags:                    map[string]string{},

		// Parameter entries.
		MachineId:             machineID,
		MachineNonce:          machineNonce,
		Networks:              networks,
		MongoInfo:             mongoInfo,
		APIInfo:               apiInfo,
		ImageStream:           imageStream,
		PublicImageSigningKey: publicImageSigningKey,
		AgentEnvironment: map[string]string{
			agent.AllowsSecureConnection: strconv.FormatBool(secureServerConnections),
		},
	}
	return icfg, nil
}
コード例 #8
0
ファイル: util_test.go プロジェクト: howbazaar/juju
func (*utilSuite) TestMachineInfoCloudinitRunCmd(c *gc.C) {
	hostname := "hostname"
	info := machineInfo{hostname}
	filename := "/var/lib/juju/MAASmachine.txt"
	dataDir, err := paths.DataDir("quantal")
	c.Assert(err, jc.ErrorIsNil)
	cloudcfg, err := cloudinit.New("quantal")
	c.Assert(err, jc.ErrorIsNil)
	script, err := info.cloudinitRunCmd(cloudcfg)
	c.Assert(err, jc.ErrorIsNil)
	yaml, err := goyaml.Marshal(info)
	c.Assert(err, jc.ErrorIsNil)
	expected := fmt.Sprintf("mkdir -p '%s'\ncat > '%s' << 'EOF'\n'%s'\nEOF\nchmod 0755 '%s'", dataDir, filename, yaml, filename)
	c.Check(script, gc.Equals, expected)
}
コード例 #9
0
ファイル: agent_test.go プロジェクト: exekias/juju
// CheckAgentCommand is a utility function for verifying that common agent
// options are handled by a Command; it returns an instance of that
// command pre-parsed, with any mandatory flags added.
func CheckAgentCommand(c *gc.C, create acCreator, args []string) cmd.Command {
	com, conf := create()
	err := coretesting.InitCommand(com, args)
	dataDir, err := paths.DataDir(series.HostSeries())
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(conf.DataDir(), gc.Equals, dataDir)
	badArgs := append(args, "--data-dir", "")
	com, _ = create()
	err = coretesting.InitCommand(com, badArgs)
	c.Assert(err, gc.ErrorMatches, "--data-dir option must be set")

	args = append(args, "--data-dir", "jd")
	com, conf = create()
	c.Assert(coretesting.InitCommand(com, args), gc.IsNil)
	c.Assert(conf.DataDir(), gc.Equals, "jd")
	return com
}
コード例 #10
0
ファイル: util.go プロジェクト: imoapps/juju
// cloudinitRunCmd returns the shell command that, when run, will create the
// "machine info" file containing the hostname of a machine.
// That command is destined to be used by cloudinit.
func (info *machineInfo) cloudinitRunCmd(cloudcfg cloudinit.CloudConfig) (string, error) {
	dataDir, err := paths.DataDir(cloudcfg.GetSeries())
	if err != nil {
		return "", errors.Trace(err)
	}
	yaml, err := goyaml.Marshal(info)
	if err != nil {
		return "", errors.Trace(err)
	}
	renderer := cloudcfg.ShellRenderer()
	fileName := renderer.Join(renderer.FromSlash(dataDir), "MAASmachine.txt")
	script := renderer.MkdirAll(dataDir)
	contents := renderer.Quote(string(yaml))
	script = append(script, renderer.WriteFile(fileName, []byte(contents))...)
	script = append(script, renderer.Chmod(fileName, 0755)...)
	return strings.Join(script, "\n"), nil
}
コード例 #11
0
ファイル: util.go プロジェクト: kapilt/juju
// cloudinitRunCmd returns the shell command that, when run, will create the
// "machine info" file containing the hostname of a machine.
// That command is destined to be used by cloudinit.
func (info *machineInfo) cloudinitRunCmd(series string) (string, error) {
	dataDir, err := paths.DataDir(series)
	if err != nil {
		return "", err
	}
	renderer, err := cloudinit.NewRenderer(series)
	if err != nil {
		return "", err
	}

	yaml, err := goyaml.Marshal(info)
	if err != nil {
		return "", err
	}
	fileName := renderer.PathJoin(renderer.FromSlash(dataDir), "MAASmachine.txt")
	script := renderer.Mkdir(dataDir)
	contents := utils.ShQuote(string(yaml))
	script = append(script, renderer.WriteFile(fileName, contents, 0755)...)
	return strings.Join(script, "\n"), nil
}
コード例 #12
0
ファイル: instancecfg.go プロジェクト: bac/juju
// NewInstanceConfig sets up a basic machine configuration, for a
// non-bootstrap node. You'll still need to supply more information,
// but this takes care of the fixed entries and the ones that are
// always needed.
func NewInstanceConfig(
	controllerTag names.ControllerTag,
	machineID,
	machineNonce,
	imageStream,
	series string,
	apiInfo *api.Info,
) (*InstanceConfig, error) {
	dataDir, err := paths.DataDir(series)
	if err != nil {
		return nil, err
	}
	logDir, err := paths.LogDir(series)
	if err != nil {
		return nil, err
	}
	metricsSpoolDir, err := paths.MetricsSpoolDir(series)
	if err != nil {
		return nil, err
	}
	cloudInitOutputLog := path.Join(logDir, "cloud-init-output.log")
	icfg := &InstanceConfig{
		// Fixed entries.
		DataDir:                 dataDir,
		LogDir:                  path.Join(logDir, "juju"),
		MetricsSpoolDir:         metricsSpoolDir,
		Jobs:                    []multiwatcher.MachineJob{multiwatcher.JobHostUnits},
		CloudInitOutputLog:      cloudInitOutputLog,
		MachineAgentServiceName: "jujud-" + names.NewMachineTag(machineID).String(),
		Series:                  series,
		Tags:                    map[string]string{},

		// Parameter entries.
		ControllerTag: controllerTag,
		MachineId:     machineID,
		MachineNonce:  machineNonce,
		APIInfo:       apiInfo,
		ImageStream:   imageStream,
	}
	return icfg, nil
}
コード例 #13
0
ファイル: cloudinit.go プロジェクト: kapilt/juju
// NewMachineConfig sets up a basic machine configuration, for a
// non-bootstrap node. You'll still need to supply more information,
// but this takes care of the fixed entries and the ones that are
// always needed.
func NewMachineConfig(
	machineID,
	machineNonce,
	imageStream,
	series string,
	networks []string,
	mongoInfo *mongo.MongoInfo,
	apiInfo *api.Info,
) (*cloudinit.MachineConfig, error) {
	dataDir, err := paths.DataDir(series)
	if err != nil {
		return nil, err
	}
	logDir, err := paths.LogDir(series)
	if err != nil {
		return nil, err
	}
	cloudInitOutputLog := path.Join(logDir, "cloud-init-output.log")
	mcfg := &cloudinit.MachineConfig{
		// Fixed entries.
		DataDir:                 dataDir,
		LogDir:                  path.Join(logDir, "juju"),
		Jobs:                    []params.MachineJob{params.JobHostUnits},
		CloudInitOutputLog:      cloudInitOutputLog,
		MachineAgentServiceName: "jujud-" + names.NewMachineTag(machineID).String(),
		Series:                  series,

		// Parameter entries.
		MachineId:    machineID,
		MachineNonce: machineNonce,
		Networks:     networks,
		MongoInfo:    mongoInfo,
		APIInfo:      apiInfo,
		ImageStream:  imageStream,
	}
	return mcfg, nil
}
コード例 #14
0
ファイル: backups_linux.go プロジェクト: kat-co/juju
// Restore handles either returning or creating a controller to a backed up status:
// * extracts the content of the given backup file and:
// * runs mongorestore with the backed up mongo dump
// * updates and writes configuration files
// * updates existing db entries to make sure they hold no references to
// old instances
// * updates config in all agents.
func (b *backups) Restore(backupId string, dbInfo *DBInfo, args RestoreArgs) (names.Tag, error) {
	meta, backupReader, err := b.Get(backupId)
	if err != nil {
		return nil, errors.Annotatef(err, "could not fetch backup %q", backupId)
	}

	defer backupReader.Close()

	workspace, err := NewArchiveWorkspaceReader(backupReader)
	if err != nil {
		return nil, errors.Annotate(err, "cannot unpack backup file")
	}
	defer workspace.Close()

	// This might actually work, but we don't have a guarantee so we don't allow it.
	if meta.Origin.Series != args.NewInstSeries {
		return nil, errors.Errorf("cannot restore a backup made in a machine with series %q into a machine with series %q, %#v", meta.Origin.Series, args.NewInstSeries, meta)
	}

	// TODO(perrito666) Create a compatibility table of sorts.
	vers := meta.Origin.Version
	if vers.Major != 2 {
		return nil, errors.Errorf("Juju version %v cannot restore backups made using Juju version %v", version.Current.Minor, vers)
	}
	backupMachine := names.NewMachineTag(meta.Origin.Machine)

	// The path for the config file might change if the tag changed
	// and also the rest of the path, so we assume as little as possible.
	oldDatadir, err := paths.DataDir(args.NewInstSeries)
	if err != nil {
		return nil, errors.Annotate(err, "cannot determine DataDir for the restored machine")
	}

	var oldAgentConfig agent.ConfigSetterWriter
	oldAgentConfigFile := agent.ConfigPath(oldDatadir, args.NewInstTag)
	if oldAgentConfig, err = agent.ReadConfig(oldAgentConfigFile); err != nil {
		return nil, errors.Annotate(err, "cannot load old agent config from disk")
	}

	logger.Infof("stopping juju-db")
	if err = mongo.StopService(); err != nil {
		return nil, errors.Annotate(err, "failed to stop mongo")
	}

	// delete all the files to be replaced
	if err := PrepareMachineForRestore(oldAgentConfig.MongoVersion()); err != nil {
		return nil, errors.Annotate(err, "cannot delete existing files")
	}
	logger.Infof("deleted old files to place new")

	if err := workspace.UnpackFilesBundle(filesystemRoot()); err != nil {
		return nil, errors.Annotate(err, "cannot obtain system files from backup")
	}
	logger.Infof("placed new restore files")

	var agentConfig agent.ConfigSetterWriter
	// The path for the config file might change if the tag changed
	// and also the rest of the path, so we assume as little as possible.
	datadir, err := paths.DataDir(args.NewInstSeries)
	if err != nil {
		return nil, errors.Annotate(err, "cannot determine DataDir for the restored machine")
	}
	agentConfigFile := agent.ConfigPath(datadir, backupMachine)
	if agentConfig, err = agent.ReadConfig(agentConfigFile); err != nil {
		return nil, errors.Annotate(err, "cannot load agent config from disk")
	}
	ssi, ok := agentConfig.StateServingInfo()
	if !ok {
		return nil, errors.Errorf("cannot determine state serving info")
	}
	APIHostPorts := network.NewHostPorts(ssi.APIPort, args.PrivateAddress, args.PublicAddress)
	agentConfig.SetAPIHostPorts([][]network.HostPort{APIHostPorts})
	if err := agentConfig.Write(); err != nil {
		return nil, errors.Annotate(err, "cannot write new agent configuration")
	}
	logger.Infof("wrote new agent config for restore")

	if backupMachine.Id() != "0" {
		logger.Infof("extra work needed backup belongs to %q machine", backupMachine.String())
		serviceName := "jujud-" + agentConfig.Tag().String()
		aInfo := service.NewMachineAgentInfo(
			agentConfig.Tag().Id(),
			dataDir,
			paths.MustSucceed(paths.LogDir(args.NewInstSeries)),
		)

		// TODO(perrito666) renderer should have a RendererForSeries, for the moment
		// restore only works on linuxes.
		renderer, _ := shell.NewRenderer("bash")
		serviceAgentConf := service.AgentConf(aInfo, renderer)
		svc, err := service.NewService(serviceName, serviceAgentConf, args.NewInstSeries)
		if err != nil {
			return nil, errors.Annotate(err, "cannot generate service for the restored agent.")
		}
		if err := svc.Install(); err != nil {
			return nil, errors.Annotate(err, "cannot install service for the restored agent.")
		}
		logger.Infof("new machine service")
	}

	logger.Infof("mongo service will be reinstalled to ensure its presence")
	if err := ensureMongoService(agentConfig); err != nil {
		return nil, errors.Annotate(err, "failed to reinstall service for juju-db")
	}

	dialInfo, err := newDialInfo(args.PrivateAddress, agentConfig)
	if err != nil {
		return nil, errors.Annotate(err, "cannot produce dial information")
	}

	oldDialInfo, err := newDialInfo(args.PrivateAddress, oldAgentConfig)
	if err != nil {
		return nil, errors.Annotate(err, "cannot produce dial information for existing mongo")
	}

	logger.Infof("new mongo will be restored")
	mgoVer := agentConfig.MongoVersion()

	tagUser, tagUserPassword, err := tagUserCredentials(agentConfig)
	if err != nil {
		return nil, errors.Trace(err)
	}
	rArgs := RestorerArgs{
		DialInfo:        dialInfo,
		Version:         mgoVer,
		TagUser:         tagUser,
		TagUserPassword: tagUserPassword,
		RunCommandFn:    runCommand,
		StartMongo:      mongo.StartService,
		StopMongo:       mongo.StopService,
		NewMongoSession: NewMongoSession,
		GetDB:           GetDB,
	}

	// Restore mongodb from backup
	restorer, err := NewDBRestorer(rArgs)
	if err != nil {
		return nil, errors.Annotate(err, "error preparing for restore")
	}
	if err := restorer.Restore(workspace.DBDumpDir, oldDialInfo); err != nil {
		return nil, errors.Annotate(err, "error restoring state from backup")
	}

	// Re-start replicaset with the new value for server address
	logger.Infof("restarting replicaset")
	memberHostPort := net.JoinHostPort(args.PrivateAddress, strconv.Itoa(ssi.StatePort))
	err = resetReplicaSet(dialInfo, memberHostPort)
	if err != nil {
		return nil, errors.Annotate(err, "cannot reset replicaSet")
	}

	err = updateMongoEntries(args.NewInstId, args.NewInstTag.Id(), backupMachine.Id(), dialInfo)
	if err != nil {
		return nil, errors.Annotate(err, "cannot update mongo entries")
	}

	// From here we work with the restored controller
	mgoInfo, ok := agentConfig.MongoInfo()
	if !ok {
		return nil, errors.Errorf("cannot retrieve info to connect to mongo")
	}

	st, err := newStateConnection(agentConfig.Controller(), agentConfig.Model(), mgoInfo)
	if err != nil {
		return nil, errors.Trace(err)
	}
	defer st.Close()

	machine, err := st.Machine(backupMachine.Id())
	if err != nil {
		return nil, errors.Trace(err)
	}

	logger.Infof("updating local machine addresses")
	err = updateMachineAddresses(machine, args.PrivateAddress, args.PublicAddress)
	if err != nil {
		return nil, errors.Annotate(err, "cannot update api server machine addresses")
	}
	// Update the APIHostPorts as well. Under normal circumstances the API
	// Host Ports are only set during bootstrap and by the peergrouper worker.
	// Unfortunately right now, the peer grouper is busy restarting and isn't
	// guaranteed to set the host ports before the remote machines we are
	// about to tell about us. If it doesn't, the remote machine gets its
	// agent.conf file updated with this new machine's IP address, it then
	// starts, and the "api-address-updater" worker asks for the api host
	// ports, and gets told the old IP address of the machine that was backed
	// up. It then writes this incorrect file to its agent.conf file, which
	// causes it to attempt to reconnect to the api server. Unfortunately it
	// now has the wrong address and can never get the  correct one.
	// So, we set it explicitly here.
	if err := st.SetAPIHostPorts([][]network.HostPort{APIHostPorts}); err != nil {
		return nil, errors.Annotate(err, "cannot update api server host ports")
	}

	// update all agents known to the new controller.
	// TODO(perrito666): We should never stop process because of this.
	// updateAllMachines will not return errors for individual
	// agent update failures
	models, err := st.AllModels()
	if err != nil {
		return nil, errors.Trace(err)
	}
	machines := []machineModel{}
	for _, model := range models {
		machinesForModel, err := st.AllMachinesFor(model.UUID())
		if err != nil {
			return nil, errors.Trace(err)
		}
		for _, machine := range machinesForModel {
			machines = append(machines, machineModel{machine: machine, model: model})
		}
	}
	logger.Infof("updating other machine addresses")
	if err := updateAllMachines(args.PrivateAddress, args.PublicAddress, machines); err != nil {
		return nil, errors.Annotate(err, "cannot update agents")
	}

	// Mark restoreInfo as Finished so upon restart of the apiserver
	// the client can reconnect and determine if we where succesful.
	info := st.RestoreInfo()
	// In mongo 3.2, even though the backup is made with --oplog, there
	// are stale transactions in this collection.
	if err := info.PurgeTxn(); err != nil {
		return nil, errors.Annotate(err, "cannot purge stale transactions")
	}
	if err = info.SetStatus(state.RestoreFinished); err != nil {
		return nil, errors.Annotate(err, "failed to set status to finished")
	}

	return backupMachine, nil
}
コード例 #15
0
ファイル: customdata_test.go プロジェクト: ktsakalozos/juju
type customDataSuite struct {
	testing.BaseSuite
}

var _ = gc.Suite(&customDataSuite{})

func must(s string, err error) string {
	if err != nil {
		panic(err)
	}
	return s
}

var logDir = must(paths.LogDir("precise"))
var metricsSpoolDir = must(paths.MetricsSpoolDir("precise"))
var dataDir = must(paths.DataDir("precise"))
var cloudInitOutputLog = path.Join(logDir, "cloud-init-output.log")

// makeInstanceConfig produces a valid cloudinit machine config.
func makeInstanceConfig(c *gc.C) *instancecfg.InstanceConfig {
	machineId := "0"
	machineTag := names.NewMachineTag(machineId)
	return &instancecfg.InstanceConfig{
		MachineId:       machineId,
		MachineNonce:    "gxshasqlnng",
		DataDir:         dataDir,
		LogDir:          logDir,
		MetricsSpoolDir: metricsSpoolDir,
		Jobs: []multiwatcher.MachineJob{
			multiwatcher.JobManageEnviron,
			multiwatcher.JobHostUnits,
コード例 #16
0
ファイル: agent.go プロジェクト: klyachin/juju
	"github.com/juju/juju/juju/paths"
	"github.com/juju/juju/mongo"
	"github.com/juju/juju/network"
	"github.com/juju/juju/state/api"
	"github.com/juju/juju/state/api/params"
	"github.com/juju/juju/version"
)

var logger = loggo.GetLogger("juju.agent")

// logDir returns a filesystem path to the location where juju
// may create a folder containing its logs
var logDir = paths.MustSucceed(paths.LogDir(version.Current.Series))

// dataDir returns the default data directory for this running system
var dataDir = paths.MustSucceed(paths.DataDir(version.Current.Series))

// DefaultLogDir defines the default log directory for juju agents.
// It's defined as a variable so it could be overridden in tests.
var DefaultLogDir = path.Join(logDir, "juju")

// DefaultDataDir defines the default data directory for juju agents.
// It's defined as a variable so it could be overridden in tests.
var DefaultDataDir = dataDir

// SystemIdentity is the name of the file where the environment SSH key is kept.
const SystemIdentity = "system-identity"

const (
	LxcBridge        = "LXC_BRIDGE"
	ProviderType     = "PROVIDER_TYPE"
コード例 #17
0
ファイル: backups_linux.go プロジェクト: Pankov404/juju
// Restore handles either returning or creating a state server to a backed up status:
// * extracts the content of the given backup file and:
// * runs mongorestore with the backed up mongo dump
// * updates and writes configuration files
// * updates existing db entries to make sure they hold no references to
// old instances
// * updates config in all agents.
func (b *backups) Restore(backupId string, args RestoreArgs) error {
	meta, backupReader, err := b.Get(backupId)
	if err != nil {
		return errors.Annotatef(err, "could not fetch backup %q", backupId)
	}

	defer backupReader.Close()

	workspace, err := NewArchiveWorkspaceReader(backupReader)
	if err != nil {
		return errors.Annotate(err, "cannot unpack backup file")
	}
	defer workspace.Close()

	// TODO(perrito666) Create a compatibility table of sorts.
	version := meta.Origin.Version
	backupMachine := names.NewMachineTag(meta.Origin.Machine)

	// delete all the files to be replaced
	if err := PrepareMachineForRestore(); err != nil {
		return errors.Annotate(err, "cannot delete existing files")
	}

	if err := workspace.UnpackFilesBundle(filesystemRoot()); err != nil {
		return errors.Annotate(err, "cannot obtain system files from backup")
	}

	if err := updateBackupMachineTag(backupMachine, args.NewInstTag); err != nil {
		return errors.Annotate(err, "cannot update paths to reflect current machine id")
	}

	var agentConfig agent.ConfigSetterWriter
	// The path for the config file might change if the tag changed
	// and also the rest of the path, so we assume as little as possible.
	datadir, err := paths.DataDir(args.NewInstSeries)
	if err != nil {
		return errors.Annotate(err, "cannot determine DataDir for the restored machine")
	}
	agentConfigFile := agent.ConfigPath(datadir, args.NewInstTag)
	if agentConfig, err = agent.ReadConfig(agentConfigFile); err != nil {
		return errors.Annotate(err, "cannot load agent config from disk")
	}
	ssi, ok := agentConfig.StateServingInfo()
	if !ok {
		return errors.Errorf("cannot determine state serving info")
	}
	// The machine tag might have changed, we update it.
	agentConfig.SetValue("tag", args.NewInstTag.String())
	apiHostPorts := [][]network.HostPort{
		network.NewHostPorts(ssi.APIPort, args.PrivateAddress),
	}
	agentConfig.SetAPIHostPorts(apiHostPorts)
	if err := agentConfig.Write(); err != nil {
		return errors.Annotate(err, "cannot write new agent configuration")
	}

	// Restore mongodb from backup
	if err := placeNewMongo(workspace.DBDumpDir, version); err != nil {
		return errors.Annotate(err, "error restoring state from backup")
	}

	// Re-start replicaset with the new value for server address
	dialInfo, err := newDialInfo(args.PrivateAddress, agentConfig)
	if err != nil {
		return errors.Annotate(err, "cannot produce dial information")
	}

	memberHostPort := fmt.Sprintf("%s:%d", args.PrivateAddress, ssi.StatePort)
	err = resetReplicaSet(dialInfo, memberHostPort)
	if err != nil {
		return errors.Annotate(err, "cannot reset replicaSet")
	}

	err = updateMongoEntries(args.NewInstId, args.NewInstTag.Id(), backupMachine.Id(), dialInfo)
	if err != nil {
		return errors.Annotate(err, "cannot update mongo entries")
	}

	// From here we work with the restored state server
	mgoInfo, ok := agentConfig.MongoInfo()
	if !ok {
		return errors.Errorf("cannot retrieve info to connect to mongo")
	}

	st, err := newStateConnection(mgoInfo)
	if err != nil {
		return errors.Trace(err)
	}
	defer st.Close()

	machine, err := st.Machine(args.NewInstTag.Id())
	if err != nil {
		return errors.Trace(err)
	}

	err = updateMachineAddresses(machine, args.PrivateAddress, args.PublicAddress)
	if err != nil {
		return errors.Annotate(err, "cannot update api server machine addresses")
	}

	// update all agents known to the new state server.
	// TODO(perrito666): We should never stop process because of this.
	// updateAllMachines will not return errors for individual
	// agent update failures
	machines, err := st.AllMachines()
	if err != nil {
		return errors.Trace(err)
	}
	if err = updateAllMachines(args.PrivateAddress, machines); err != nil {
		return errors.Annotate(err, "cannot update agents")
	}

	info, err := st.RestoreInfoSetter()

	if err != nil {
		return errors.Trace(err)
	}

	// Mark restoreInfo as Finished so upon restart of the apiserver
	// the client can reconnect and determine if we where succesful.
	err = info.SetStatus(state.RestoreFinished)

	return errors.Annotate(err, "failed to set status to finished")
}
コード例 #18
0
ファイル: upgrade_mongo.go プロジェクト: makyo/juju
func (u *UpgradeMongoCommand) run() (err error) {
	dataDir, err := paths.DataDir(u.series)
	if err != nil {
		return errors.Annotatef(err, "cannot determine data dir for %q", u.series)
	}
	if u.configFilePath == "" {
		machineTag, err := names.ParseMachineTag(u.machineTag)
		if err != nil {
			return errors.Annotatef(err, "%q is not a valid machine tag", u.machineTag)
		}
		u.configFilePath = agent.ConfigPath(dataDir, machineTag)
	}
	u.agentConfig, err = agent.ReadConfig(u.configFilePath)
	if err != nil {
		return errors.Annotatef(err, "cannot read config file in %q", u.configFilePath)
	}

	current := u.agentConfig.MongoVersion()

	agentServiceName := u.agentConfig.Value(agent.AgentServiceName)
	if agentServiceName == "" {
		// For backwards compatibility, handle lack of AgentServiceName.
		agentServiceName = u.osGetenv("UPSTART_JOB")
	}
	if agentServiceName == "" {
		return errors.New("cannot determine juju service name")
	}
	svc, err := u.discoverService(agentServiceName, common.Conf{})
	if err != nil {
		return errors.Annotate(err, "cannot determine juju service")
	}
	if err := svc.Stop(); err != nil {
		return errors.Annotate(err, "cannot stop juju to begin migration")
	}
	defer func() {
		svcErr := svc.Start()
		if err != nil {
			err = errors.Annotatef(err, "failed upgrade and juju start after rollbacking upgrade: %v", svcErr)
		} else {
			err = errors.Annotate(svcErr, "could not start juju after upgrade")
		}
	}()
	if !u.slave {
		defer u.replicaAdd()
	}
	if u.rollback {
		origin := u.agentConfig.Value(KeyUpgradeBackup)
		if origin == "" {
			return errors.New("no available backup")
		}
		return u.rollbackCopyBackup(dataDir, origin)
	}

	u.tmpDir, err = u.createTempDir()
	if err != nil {
		return errors.Annotate(err, "could not create a temporary directory for the migration")
	}

	logger.Infof("begin migration to mongo 3")

	if err := u.satisfyPrerequisites(u.series); err != nil {
		return errors.Annotate(err, "cannot satisfy pre-requisites for the migration")
	}
	if current == mongo.Mongo24 || current == mongo.MongoUpgrade {
		if u.slave {
			return u.upgradeSlave(dataDir)
		}
		u.replicaRemove()
		if err := u.maybeUpgrade24to26(dataDir); err != nil {
			defer func() {
				if u.backupPath == "" {
					return
				}
				logger.Infof("will roll back after failed 2.6 upgrade")
				if err := u.rollbackCopyBackup(dataDir, u.backupPath); err != nil {
					logger.Errorf("could not rollback the upgrade: %v", err)
				}
			}()
			return errors.Annotate(err, "cannot upgrade from mongo 2.4 to 2.6")
		}
		current = mongo.Mongo26
	}
	if current == mongo.Mongo26 || current.StorageEngine != mongo.WiredTiger {
		if err := u.maybeUpgrade26to3x(dataDir); err != nil {
			defer func() {
				if u.backupPath == "" {
					return
				}
				logger.Infof("will roll back after failed 3.0 upgrade")
				if err := u.rollbackCopyBackup(dataDir, u.backupPath); err != nil {
					logger.Errorf("could not rollback the upgrade: %v", err)
				}
			}()
			return errors.Annotate(err, "cannot upgrade from mongo 2.6 to 3")
		}
	}
	return nil
}
コード例 #19
0
ファイル: util.go プロジェクト: kapilt/juju
	}
	return values
}

// machineInfo is the structure used to pass information between the provider
// and the agent running on a node.
// When a node is started, the provider code creates a machineInfo object
// containing information about the node being started and configures
// cloudinit to get a YAML representation of that object written on the node's
// filesystem during its first startup.  That file is then read by the juju
// agent running on the node and converted back into a machineInfo object.
type machineInfo struct {
	Hostname string `yaml:,omitempty`
}

var maasDataDir = paths.MustSucceed(paths.DataDir(config.LatestLtsSeries()))
var _MAASInstanceFilename = path.Join(maasDataDir, "MAASmachine.txt")

// cloudinitRunCmd returns the shell command that, when run, will create the
// "machine info" file containing the hostname of a machine.
// That command is destined to be used by cloudinit.
func (info *machineInfo) cloudinitRunCmd(series string) (string, error) {
	dataDir, err := paths.DataDir(series)
	if err != nil {
		return "", err
	}
	renderer, err := cloudinit.NewRenderer(series)
	if err != nil {
		return "", err
	}
コード例 #20
0
ファイル: backups_linux.go プロジェクト: exekias/juju
// Restore handles either returning or creating a controller to a backed up status:
// * extracts the content of the given backup file and:
// * runs mongorestore with the backed up mongo dump
// * updates and writes configuration files
// * updates existing db entries to make sure they hold no references to
// old instances
// * updates config in all agents.
func (b *backups) Restore(backupId string, args RestoreArgs) (names.Tag, error) {
	meta, backupReader, err := b.Get(backupId)
	if err != nil {
		return nil, errors.Annotatef(err, "could not fetch backup %q", backupId)
	}

	defer backupReader.Close()

	workspace, err := NewArchiveWorkspaceReader(backupReader)
	if err != nil {
		return nil, errors.Annotate(err, "cannot unpack backup file")
	}
	defer workspace.Close()

	// TODO(perrito666) Create a compatibility table of sorts.
	version := meta.Origin.Version
	backupMachine := names.NewMachineTag(meta.Origin.Machine)

	if err := mongo.StopService(); err != nil {
		return nil, errors.Annotate(err, "cannot stop mongo to replace files")
	}

	// delete all the files to be replaced
	if err := PrepareMachineForRestore(); err != nil {
		return nil, errors.Annotate(err, "cannot delete existing files")
	}
	logger.Infof("deleted old files to place new")

	if err := workspace.UnpackFilesBundle(filesystemRoot()); err != nil {
		return nil, errors.Annotate(err, "cannot obtain system files from backup")
	}
	logger.Infof("placed new files")

	var agentConfig agent.ConfigSetterWriter
	// The path for the config file might change if the tag changed
	// and also the rest of the path, so we assume as little as possible.
	datadir, err := paths.DataDir(args.NewInstSeries)
	if err != nil {
		return nil, errors.Annotate(err, "cannot determine DataDir for the restored machine")
	}
	agentConfigFile := agent.ConfigPath(datadir, backupMachine)
	if agentConfig, err = agent.ReadConfig(agentConfigFile); err != nil {
		return nil, errors.Annotate(err, "cannot load agent config from disk")
	}
	ssi, ok := agentConfig.StateServingInfo()
	if !ok {
		return nil, errors.Errorf("cannot determine state serving info")
	}
	APIHostPorts := network.NewHostPorts(ssi.APIPort, args.PrivateAddress)
	agentConfig.SetAPIHostPorts([][]network.HostPort{APIHostPorts})
	if err := agentConfig.Write(); err != nil {
		return nil, errors.Annotate(err, "cannot write new agent configuration")
	}
	logger.Infof("wrote new agent config")

	if backupMachine.Id() != "0" {
		logger.Infof("extra work needed backup belongs to %q machine", backupMachine.String())
		serviceName := "jujud-" + agentConfig.Tag().String()
		aInfo := service.NewMachineAgentInfo(
			agentConfig.Tag().Id(),
			dataDir,
			paths.MustSucceed(paths.LogDir(args.NewInstSeries)),
		)

		// TODO(perrito666) renderer should have a RendererForSeries, for the moment
		// restore only works on linuxes.
		renderer, _ := shell.NewRenderer("bash")
		serviceAgentConf := service.AgentConf(aInfo, renderer)
		svc, err := service.NewService(serviceName, serviceAgentConf, args.NewInstSeries)
		if err != nil {
			return nil, errors.Annotate(err, "cannot generate service for the restored agent.")
		}
		if err := svc.Install(); err != nil {
			return nil, errors.Annotate(err, "cannot install service for the restored agent.")
		}
		logger.Infof("new machine service")
	}

	logger.Infof("mongo service will be reinstalled to ensure its presence")
	if err := ensureMongoService(agentConfig); err != nil {
		return nil, errors.Annotate(err, "failed to reinstall service for juju-db")
	}

	logger.Infof("new mongo will be restored")
	// Restore mongodb from backup
	if err := placeNewMongoService(workspace.DBDumpDir, version); err != nil {
		return nil, errors.Annotate(err, "error restoring state from backup")
	}

	// Re-start replicaset with the new value for server address
	dialInfo, err := newDialInfo(args.PrivateAddress, agentConfig)
	if err != nil {
		return nil, errors.Annotate(err, "cannot produce dial information")
	}

	logger.Infof("restarting replicaset")
	memberHostPort := net.JoinHostPort(args.PrivateAddress, strconv.Itoa(ssi.StatePort))
	err = resetReplicaSet(dialInfo, memberHostPort)
	if err != nil {
		return nil, errors.Annotate(err, "cannot reset replicaSet")
	}

	err = updateMongoEntries(args.NewInstId, args.NewInstTag.Id(), backupMachine.Id(), dialInfo)
	if err != nil {
		return nil, errors.Annotate(err, "cannot update mongo entries")
	}

	// From here we work with the restored controller
	mgoInfo, ok := agentConfig.MongoInfo()
	if !ok {
		return nil, errors.Errorf("cannot retrieve info to connect to mongo")
	}

	st, err := newStateConnection(agentConfig.Model(), mgoInfo)
	if err != nil {
		return nil, errors.Trace(err)
	}
	defer st.Close()

	machine, err := st.Machine(backupMachine.Id())
	if err != nil {
		return nil, errors.Trace(err)
	}

	err = updateMachineAddresses(machine, args.PrivateAddress, args.PublicAddress)
	if err != nil {
		return nil, errors.Annotate(err, "cannot update api server machine addresses")
	}

	// update all agents known to the new controller.
	// TODO(perrito666): We should never stop process because of this.
	// updateAllMachines will not return errors for individual
	// agent update failures
	machines, err := st.AllMachines()
	if err != nil {
		return nil, errors.Trace(err)
	}
	if err = updateAllMachines(args.PrivateAddress, machines); err != nil {
		return nil, errors.Annotate(err, "cannot update agents")
	}

	info, err := st.RestoreInfoSetter()
	if err != nil {
		return nil, errors.Trace(err)
	}

	// Mark restoreInfo as Finished so upon restart of the apiserver
	// the client can reconnect and determine if we where succesful.
	err = info.SetStatus(state.RestoreFinished)

	return backupMachine, errors.Annotate(err, "failed to set status to finished")
}
コード例 #21
0
ファイル: userdatacfg_test.go プロジェクト: imoapps/juju
func jujuDataDir(series string) string {
	return must(paths.DataDir(series))
}
コード例 #22
0
ファイル: util.go プロジェクト: bac/juju
	}
	return values
}

// machineInfo is the structure used to pass information between the provider
// and the agent running on a node.
// When a node is started, the provider code creates a machineInfo object
// containing information about the node being started and configures
// cloudinit to get a YAML representation of that object written on the node's
// filesystem during its first startup.  That file is then read by the juju
// agent running on the node and converted back into a machineInfo object.
type machineInfo struct {
	Hostname string `yaml:",omitempty"`
}

var maasDataDir = paths.MustSucceed(paths.DataDir(series.LatestLts()))
var _MAASInstanceFilename = path.Join(maasDataDir, "MAASmachine.txt")

// cloudinitRunCmd returns the shell command that, when run, will create the
// "machine info" file containing the hostname of a machine.
// That command is destined to be used by cloudinit.
func (info *machineInfo) cloudinitRunCmd(cloudcfg cloudinit.CloudConfig) (string, error) {
	dataDir, err := paths.DataDir(cloudcfg.GetSeries())
	if err != nil {
		return "", errors.Trace(err)
	}
	yaml, err := goyaml.Marshal(info)
	if err != nil {
		return "", errors.Trace(err)
	}
	renderer := cloudcfg.ShellRenderer()
コード例 #23
0
ファイル: agent.go プロジェクト: imoapps/juju
	"github.com/juju/juju/version"
)

var logger = loggo.GetLogger("juju.agent")

const (
	// UninstallAgentFile is the name of the file inside the data
	// dir that, if it exists, will cause a machine agent to uninstall
	// when it receives the termination signal.
	UninstallAgentFile = "uninstall-agent"
)

// These are base values used for the corresponding defaults.
var (
	logDir          = paths.MustSucceed(paths.LogDir(series.HostSeries()))
	dataDir         = paths.MustSucceed(paths.DataDir(series.HostSeries()))
	confDir         = paths.MustSucceed(paths.ConfDir(series.HostSeries()))
	metricsSpoolDir = paths.MustSucceed(paths.MetricsSpoolDir(series.HostSeries()))
)

// Agent exposes the agent's configuration to other components. This
// interface should probably be segregated (agent.ConfigGetter and
// agent.ConfigChanger?) but YAGNI *currently* advises against same.
type Agent interface {

	// CurrentConfig returns a copy of the agent's configuration. No
	// guarantees regarding ongoing correctness are made.
	CurrentConfig() Config

	// ChangeConfig allows clients to change the agent's configuration
	// by supplying a callback that applies the changes.
コード例 #24
0
ファイル: providerinit_test.go プロジェクト: pmatulis/juju
func (*CloudInitSuite) testUserData(c *gc.C, series string, bootstrap bool) {
	testJujuHome := c.MkDir()
	defer osenv.SetJujuHome(osenv.SetJujuHome(testJujuHome))
	// Use actual series paths instead of local defaults
	logDir := must(paths.LogDir(series))
	metricsSpoolDir := must(paths.MetricsSpoolDir(series))
	dataDir := must(paths.DataDir(series))
	tools := &tools.Tools{
		URL:     "http://tools.testing/tools/released/juju.tgz",
		Version: version.Binary{version.MustParse("1.2.3"), "quantal", "amd64"},
	}
	envConfig, err := config.New(config.NoDefaults, dummySampleConfig())
	c.Assert(err, jc.ErrorIsNil)

	allJobs := []multiwatcher.MachineJob{
		multiwatcher.JobManageModel,
		multiwatcher.JobHostUnits,
		multiwatcher.JobManageNetworking,
	}
	cfg := &instancecfg.InstanceConfig{
		MachineId:    "10",
		MachineNonce: "5432",
		Tools:        tools,
		Series:       series,
		MongoInfo: &mongo.MongoInfo{
			Info: mongo.Info{
				Addrs:  []string{"127.0.0.1:1234"},
				CACert: "CA CERT\n" + testing.CACert,
			},
			Password: "******",
			Tag:      names.NewMachineTag("10"),
		},
		APIInfo: &api.Info{
			Addrs:    []string{"127.0.0.1:1234"},
			Password: "******",
			CACert:   "CA CERT\n" + testing.CACert,
			Tag:      names.NewMachineTag("10"),
			ModelTag: testing.ModelTag,
		},
		DataDir:                 dataDir,
		LogDir:                  path.Join(logDir, "juju"),
		MetricsSpoolDir:         metricsSpoolDir,
		Jobs:                    allJobs,
		CloudInitOutputLog:      path.Join(logDir, "cloud-init-output.log"),
		Config:                  envConfig,
		AgentEnvironment:        map[string]string{agent.ProviderType: "dummy"},
		AuthorizedKeys:          "wheredidileavemykeys",
		MachineAgentServiceName: "jujud-machine-10",
		EnableOSUpgrade:         true,
	}
	if bootstrap {
		cfg.Bootstrap = true
		cfg.StateServingInfo = &params.StateServingInfo{
			StatePort:    envConfig.StatePort(),
			APIPort:      envConfig.APIPort(),
			Cert:         testing.ServerCert,
			PrivateKey:   testing.ServerKey,
			CAPrivateKey: testing.CAKey,
		}
	}
	script1 := "script1"
	script2 := "script2"
	cloudcfg, err := cloudinit.New(series)
	c.Assert(err, jc.ErrorIsNil)
	cloudcfg.AddRunCmd(script1)
	cloudcfg.AddRunCmd(script2)
	result, err := providerinit.ComposeUserData(cfg, cloudcfg, &openstack.OpenstackRenderer{})
	c.Assert(err, jc.ErrorIsNil)

	unzipped, err := utils.Gunzip(result)
	c.Assert(err, jc.ErrorIsNil)

	config := make(map[interface{}]interface{})
	err = goyaml.Unmarshal(unzipped, &config)
	c.Assert(err, jc.ErrorIsNil)

	// The scripts given to userData where added as the first
	// commands to be run.
	runCmd := config["runcmd"].([]interface{})
	c.Check(runCmd[0], gc.Equals, script1)
	c.Check(runCmd[1], gc.Equals, script2)

	if bootstrap {
		// The cloudinit config should have nothing but the basics:
		// SSH authorized keys, the additional runcmds, and log output.
		//
		// Note: the additional runcmds *do* belong here, at least
		// for MAAS. MAAS needs to configure and then bounce the
		// network interfaces, which would sever the SSH connection
		// in the synchronous bootstrap phase.
		expected := map[interface{}]interface{}{
			"output": map[interface{}]interface{}{
				"all": "| tee -a /var/log/cloud-init-output.log",
			},
			"runcmd": []interface{}{
				"script1", "script2",
				"set -xe",
				"install -D -m 644 /dev/null '/etc/init/juju-clean-shutdown.conf'",
				"printf '%s\\n' '\nauthor \"Juju Team <*****@*****.**>\"\ndescription \"Stop all network interfaces on shutdown\"\nstart on runlevel [016]\ntask\nconsole output\n\nexec /sbin/ifdown -a -v --force\n' > '/etc/init/juju-clean-shutdown.conf'",
				"install -D -m 644 /dev/null '/var/lib/juju/nonce.txt'",
				"printf '%s\\n' '5432' > '/var/lib/juju/nonce.txt'",
			},
		}
		// Series with old cloudinit versions don't support adding
		// users so need the old way to set SSH authorized keys.
		if series == "precise" {
			expected["ssh_authorized_keys"] = []interface{}{
				"wheredidileavemykeys",
			}
		} else {
			expected["users"] = []interface{}{
				map[interface{}]interface{}{
					"name":        "ubuntu",
					"lock_passwd": true,
					"groups": []interface{}{"adm", "audio",
						"cdrom", "dialout", "dip",
						"floppy", "netdev", "plugdev",
						"sudo", "video"},
					"shell":               "/bin/bash",
					"sudo":                []interface{}{"ALL=(ALL) NOPASSWD:ALL"},
					"ssh-authorized-keys": []interface{}{"wheredidileavemykeys"},
				},
			}
		}
		c.Check(config, jc.DeepEquals, expected)
	} else {
		// Just check that the cloudinit config looks good,
		// and that there are more runcmds than the additional
		// ones we passed into ComposeUserData.
		c.Check(config["package_upgrade"], jc.IsTrue)
		c.Check(len(runCmd) > 2, jc.IsTrue)
	}
}