Exemplo n.º 1
0
func (s *UnitSuite) RunTestOpenAPIState(c *gc.C, ent state.AgentEntity, agentCmd Agent, initialPassword string) {
	conf, err := agent.ReadConfig(agent.ConfigPath(s.DataDir(), ent.Tag()))
	c.Assert(err, jc.ErrorIsNil)

	conf.SetPassword("")
	err = conf.Write()
	c.Assert(err, jc.ErrorIsNil)

	// Check that it starts initially and changes the password
	assertOpen := func(conf agent.Config) {
		st, gotEnt, err := OpenAPIState(conf, agentCmd)
		c.Assert(err, jc.ErrorIsNil)
		c.Assert(st, gc.NotNil)
		st.Close()
		c.Assert(gotEnt.Tag(), gc.Equals, ent.Tag().String())
	}
	assertOpen(conf)

	// Check that the initial password is no longer valid.
	err = ent.Refresh()
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(ent.PasswordValid(initialPassword), jc.IsFalse)

	// Read the configuration and check that we can connect with it.
	conf, err = agent.ReadConfig(agent.ConfigPath(conf.DataDir(), conf.Tag()))
	//conf = refreshConfig(c, conf)
	c.Assert(err, gc.IsNil)
	// Check we can open the API with the new configuration.
	assertOpen(conf)
}
Exemplo n.º 2
0
func (s *UpgradeSuite) assertUpgradeSteps(c *gc.C, job state.MachineJob) {
	s.agentSuite.PatchValue(&version.Current, s.upgradeToVersion)
	err := s.State.SetEnvironAgentVersion(s.upgradeToVersion.Number)
	c.Assert(err, gc.IsNil)

	oldVersion := s.upgradeToVersion
	oldVersion.Major = 1
	oldVersion.Minor = 16
	var oldConfig agent.Config
	s.machine, oldConfig, _ = s.primeAgent(c, oldVersion, job)

	a := s.newAgent(c, s.machine)
	go func() { c.Check(a.Run(nil), gc.IsNil) }()
	defer func() { c.Check(a.Stop(), gc.IsNil) }()

	// Wait for upgrade steps to run.
	success := false
	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
		conf, err := agent.ReadConfig(agent.ConfigPath(oldConfig.DataDir(), s.machine.Tag()))
		c.Assert(err, gc.IsNil)
		success = conf.UpgradedToVersion() == s.upgradeToVersion.Number
		if success {
			break
		}
	}
	// Upgrade worker has completed ok.
	c.Assert(success, jc.IsTrue)
}
Exemplo n.º 3
0
func (s *migrateLocalProviderAgentConfigSuite) assertConfigNotProcessed(c *gc.C) {
	envConfig, err := s.State.EnvironConfig()
	c.Assert(err, jc.ErrorIsNil)
	allAttrs := envConfig.AllAttrs()

	namespace, _ := allAttrs["namespace"].(string)
	c.Assert(namespace, gc.Equals, "")
	container, _ := allAttrs["container"].(string)
	c.Assert(container, gc.Equals, "")

	rootDir, _ := allAttrs["root-dir"].(string)
	expectedSharedStorageDir := filepath.Join(rootDir, "shared-storage")
	_, err = os.Lstat(expectedSharedStorageDir)
	c.Assert(err, jc.ErrorIsNil)
	tag := s.ctx.AgentConfig().Tag()

	// We need to read the actual migrated agent config.
	configFilePath := agent.ConfigPath(agent.DefaultDataDir, tag)
	agentConfig, err := agent.ReadConfig(configFilePath)
	c.Assert(err, jc.ErrorIsNil)

	c.Assert(agentConfig.DataDir(), gc.Equals, agent.DefaultDataDir)
	c.Assert(agentConfig.LogDir(), gc.Equals, agent.DefaultLogDir)
	c.Assert(agentConfig.Jobs(), gc.HasLen, 0)
	c.Assert(agentConfig.Value("SHARED_STORAGE_ADDR"), gc.Equals, "blah")
	c.Assert(agentConfig.Value("SHARED_STORAGE_DIR"), gc.Equals, expectedSharedStorageDir)
	c.Assert(agentConfig.Value(agent.Namespace), gc.Equals, "")
	c.Assert(agentConfig.Value(agent.AgentServiceName), gc.Equals, "")
	c.Assert(agentConfig.Value(agent.ContainerType), gc.Equals, "")
}
Exemplo n.º 4
0
func (s *migrateAgentEnvUUIDSuite) removeEnvUUIDFromAgentConfig(c *gc.C) {
	// Read the file in as simple map[string]interface{} and delete
	// the element, and write it back out again.

	// First step, read the file contents.
	filename := agent.ConfigPath(agent.DefaultDataDir, s.machine.Tag())
	data, err := ioutil.ReadFile(filename)
	c.Assert(err, jc.ErrorIsNil)
	c.Logf("Data in:\n\n%s\n", data)

	// Parse it into the map.
	var content map[string]interface{}
	err = goyaml.Unmarshal(data, &content)
	c.Assert(err, jc.ErrorIsNil)

	// Remove the environment value, and marshal back into bytes.
	delete(content, "environment")
	data, err = goyaml.Marshal(content)
	c.Assert(err, jc.ErrorIsNil)

	// Write the yaml back out remembering to add the format prefix.
	data = append([]byte("# format 1.18\n"), data...)
	c.Logf("Data out:\n\n%s\n", data)
	err = ioutil.WriteFile(filename, data, 0644)
	c.Assert(err, jc.ErrorIsNil)

	// Reset test attributes.
	cfg, err := agent.ReadConfig(filename)
	c.Assert(err, jc.ErrorIsNil)
	s.ctx.realAgentConfig = cfg
}
Exemplo n.º 5
0
func (c *MigrateCommand) Run(ctx *cmd.Context) (err error) {

	defer func() {
		if err != nil {
			fmt.Fprintf(ctx.Stdout, "error stack:\n"+errors.ErrorStack(err))
		}
	}()

	loggo.GetLogger("juju").SetLogLevel(loggo.DEBUG)
	conf, err := agent.ReadConfig(agent.ConfigPath(c.dataDir, c.machineTag))
	if err != nil {
		return err
	}

	info, ok := conf.MongoInfo()
	if !ok {
		return errors.Errorf("no state info available")
	}
	st, err := state.Open(conf.Model(), info, mongo.DefaultDialOpts(), environs.NewStatePolicy())
	if err != nil {
		return err
	}
	defer st.Close()

	if c.operation == "export" {
		return c.exportModel(ctx, st)
	}

	return c.importModel(ctx, st)

}
Exemplo n.º 6
0
func (s *migrateLocalProviderAgentConfigSuite) assertConfigProcessed(c *gc.C) {
	envConfig, err := s.State.EnvironConfig()
	c.Assert(err, jc.ErrorIsNil)
	allAttrs := envConfig.AllAttrs()

	namespace, _ := allAttrs["namespace"].(string)
	c.Assert(namespace, gc.Equals, "user-dummyenv")
	container, _ := allAttrs["container"].(string)
	c.Assert(container, gc.Equals, "lxc")

	expectedDataDir, _ := allAttrs["root-dir"].(string)
	expectedSharedStorageDir := filepath.Join(expectedDataDir, "shared-storage")
	_, err = os.Lstat(expectedSharedStorageDir)
	c.Assert(err, gc.NotNil)
	c.Assert(err, jc.Satisfies, os.IsNotExist)
	expectedLogDir := filepath.Join(*upgrades.RootLogDir, "juju-"+namespace)
	expectedJobs := []multiwatcher.MachineJob{multiwatcher.JobManageEnviron}
	tag := s.ctx.AgentConfig().Tag()

	// We need to read the actual migrated agent config.
	configFilePath := agent.ConfigPath(expectedDataDir, tag)
	agentConfig, err := agent.ReadConfig(configFilePath)
	c.Assert(err, jc.ErrorIsNil)

	c.Assert(agentConfig.DataDir(), gc.Equals, expectedDataDir)
	c.Assert(agentConfig.LogDir(), gc.Equals, expectedLogDir)
	c.Assert(agentConfig.Jobs(), gc.DeepEquals, expectedJobs)
	c.Assert(agentConfig.Value("SHARED_STORAGE_ADDR"), gc.Equals, "")
	c.Assert(agentConfig.Value("SHARED_STORAGE_DIR"), gc.Equals, "")
	c.Assert(agentConfig.Value(agent.Namespace), gc.Equals, namespace)
	agentService := "juju-agent-user-dummyenv"
	c.Assert(agentConfig.Value(agent.AgentServiceName), gc.Equals, agentService)
	c.Assert(agentConfig.Value(agent.ContainerType), gc.Equals, "")
}
Exemplo n.º 7
0
func (s *UpgradeSuite) getMachine0Config(c *gc.C) agent.Config {
	conf, err := agent.ReadConfig(agent.ConfigPath(
		s.machine0Config.DataDir(),
		s.machine0.Tag(),
	))
	c.Assert(err, gc.IsNil)
	return conf
}
Exemplo n.º 8
0
func (s *agentSuite) assertCanOpenState(c *gc.C, tag, dataDir string) {
	config, err := agent.ReadConfig(agent.ConfigPath(dataDir, tag))
	c.Assert(err, gc.IsNil)
	info, ok := config.StateInfo()
	c.Assert(ok, jc.IsTrue)
	st, err := state.Open(info, mongo.DialOpts{}, environs.NewStatePolicy())
	c.Assert(err, gc.IsNil)
	st.Close()
}
Exemplo n.º 9
0
func (s *AgentSuite) AssertCanOpenState(c *gc.C, tag names.Tag, dataDir string) {
	config, err := agent.ReadConfig(agent.ConfigPath(dataDir, tag))
	c.Assert(err, jc.ErrorIsNil)
	info, ok := config.MongoInfo()
	c.Assert(ok, jc.IsTrue)
	st, err := state.Open(config.Model(), info, mongo.DefaultDialOpts(), environs.NewStatePolicy())
	c.Assert(err, jc.ErrorIsNil)
	st.Close()
}
Exemplo n.º 10
0
func (s *BootstrapSuite) TestInitialPassword(c *gc.C) {
	machineConf, cmd, err := s.initBootstrapCommand(c, nil,
		"--model-config", s.b64yamlControllerModelConfig,
		"--hosted-model-config", s.b64yamlHostedModelConfig,
		"--instance-id", string(s.instanceId),
	)
	c.Assert(err, jc.ErrorIsNil)

	err = cmd.Run(nil)
	c.Assert(err, jc.ErrorIsNil)

	info := &mongo.MongoInfo{
		Info: mongo.Info{
			Addrs:  []string{gitjujutesting.MgoServer.Addr()},
			CACert: testing.CACert,
		},
	}

	// Check we can log in to mongo as admin.
	// TODO(dfc) does passing nil for the admin user name make your skin crawl ? mine too.
	info.Tag, info.Password = nil, testPassword
	st, err := state.Open(testing.ModelTag, info, mongotest.DialOpts(), environs.NewStatePolicy())
	c.Assert(err, jc.ErrorIsNil)
	defer st.Close()

	// We're running Mongo with --noauth; let's explicitly verify
	// that we can login as that user. Even with --noauth, an
	// explicit Login will still be verified.
	adminDB := st.MongoSession().DB("admin")
	err = adminDB.Login("admin", "invalid-password")
	c.Assert(err, gc.ErrorMatches, "(auth|(.*Authentication)) fail(s|ed)\\.?")
	err = adminDB.Login("admin", info.Password)
	c.Assert(err, jc.ErrorIsNil)

	// Check that the admin user has been given an appropriate
	// password
	u, err := st.User(names.NewLocalUserTag("admin"))
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(u.PasswordValid(testPassword), jc.IsTrue)

	// Check that the machine configuration has been given a new
	// password and that we can connect to mongo as that machine
	// and that the in-mongo password also verifies correctly.
	machineConf1, err := agent.ReadConfig(agent.ConfigPath(machineConf.DataDir(), names.NewMachineTag("0")))
	c.Assert(err, jc.ErrorIsNil)

	stateinfo, ok := machineConf1.MongoInfo()
	c.Assert(ok, jc.IsTrue)
	st, err = state.Open(testing.ModelTag, stateinfo, mongotest.DialOpts(), environs.NewStatePolicy())
	c.Assert(err, jc.ErrorIsNil)
	defer st.Close()

	m, err := st.Machine("0")
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(m.HasVote(), jc.IsTrue)
}
Exemplo n.º 11
0
func (c *AgentConf) ReadConfig(tag string) error {
	c.mu.Lock()
	defer c.mu.Unlock()
	conf, err := agent.ReadConfig(agent.ConfigPath(c.dataDir, tag))
	if err != nil {
		return err
	}
	c._config = conf
	return nil
}
Exemplo n.º 12
0
Arquivo: agent.go Projeto: bac/juju
func (s *AgentSuite) AssertCanOpenState(c *gc.C, tag names.Tag, dataDir string) {
	config, err := agent.ReadConfig(agent.ConfigPath(dataDir, tag))
	c.Assert(err, jc.ErrorIsNil)
	info, ok := config.MongoInfo()
	c.Assert(ok, jc.IsTrue)
	st, err := state.Open(config.Model(), config.Controller(), info, mongotest.DialOpts(), stateenvirons.GetNewPolicyFunc(
		stateenvirons.GetNewEnvironFunc(environs.New),
	))
	c.Assert(err, jc.ErrorIsNil)
	st.Close()
}
Exemplo n.º 13
0
func (*suite) TestWriteAndRead(c *gc.C) {
	testParams := attributeParams
	testParams.Paths.DataDir = c.MkDir()
	testParams.Paths.LogDir = c.MkDir()
	conf, err := agent.NewAgentConfig(testParams)
	c.Assert(err, jc.ErrorIsNil)

	c.Assert(conf.Write(), gc.IsNil)
	reread, err := agent.ReadConfig(agent.ConfigPath(conf.DataDir(), conf.Tag()))
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(reread, jc.DeepEquals, conf)
}
Exemplo n.º 14
0
func (s *UnitSuite) TestOpenAPIState(c *gc.C) {
	_, unit, conf, _ := s.primeAgent(c)
	configPath := agent.ConfigPath(conf.DataDir(), conf.Tag())

	// Set an invalid password (but the old initial password will still work).
	// This test is a sort of unsophisticated simulation of what might happen
	// if a previous cycle had picked, and locally recorded, a new password;
	// but failed to set it on the state server. Would be better to test that
	// code path explicitly in future, but this suffices for now.
	confW, err := agent.ReadConfig(configPath)
	c.Assert(err, gc.IsNil)
	confW.SetPassword("nonsense-borken")
	err = confW.Write()
	c.Assert(err, jc.ErrorIsNil)

	// Check that it successfully connects (with the conf's old password).
	assertOpen := func() {
		agent := NewAgentConf(conf.DataDir())
		err := agent.ReadConfig(conf.Tag().String())
		c.Assert(err, jc.ErrorIsNil)
		st, gotEntity, err := apicaller.OpenAPIState(agent)
		c.Assert(err, jc.ErrorIsNil)
		c.Assert(st, gc.NotNil)
		st.Close()
		c.Assert(gotEntity.Tag(), gc.Equals, unit.Tag().String())
	}
	assertOpen()

	// Check that the old password has been invalidated.
	assertPassword := func(password string, valid bool) {
		err := unit.Refresh()
		c.Assert(err, jc.ErrorIsNil)
		c.Check(unit.PasswordValid(password), gc.Equals, valid)
	}
	assertPassword(initialUnitPassword, false)

	// Read the stored password and check it's valid.
	confR, err := agent.ReadConfig(configPath)
	c.Assert(err, gc.IsNil)
	apiInfo, ok := confR.APIInfo()
	c.Assert(ok, jc.IsTrue)
	newPassword := apiInfo.Password
	assertPassword(newPassword, true)

	// Double-check that we can open a fresh connection with the stored
	// conf ... and that the password hasn't been changed again.
	assertOpen()
	assertPassword(newPassword, true)
}
Exemplo n.º 15
0
func (s *UpgradeSuite) waitForUpgradeToFinish(c *gc.C) {
	success := false
	for attempt := coretesting.LongAttempt.Start(); attempt.Next(); {
		conf, err := agent.ReadConfig(agent.ConfigPath(
			s.machine0Config.DataDir(),
			s.machine0.Tag().String(),
		))
		c.Assert(err, gc.IsNil)
		success = conf.UpgradedToVersion() == s.upgradeToVersion.Number
		if success {
			break
		}
	}
	c.Assert(success, jc.IsTrue)
}
Exemplo n.º 16
0
func (s *BootstrapSuite) TestInitialPassword(c *gc.C) {
	machineConf, cmd, err := s.initBootstrapCommand(c, nil, "--env-config", s.envcfg, "--instance-id", string(s.instanceId))
	c.Assert(err, gc.IsNil)

	err = cmd.Run(nil)
	c.Assert(err, gc.IsNil)

	// Check that we cannot now connect to the state without a
	// password.
	info := &authentication.MongoInfo{
		Info: mongo.Info{
			Addrs:  []string{gitjujutesting.MgoServer.Addr()},
			CACert: testing.CACert,
		},
	}
	testOpenState(c, info, errors.Unauthorizedf(""))

	// Check we can log in to mongo as admin.
	// TODO(dfc) does passing nil for the admin user name make your skin crawl ? mine too.
	info.Tag, info.Password = nil, testPasswordHash()
	st, err := state.Open(info, mongo.DefaultDialOpts(), environs.NewStatePolicy())
	c.Assert(err, gc.IsNil)
	// Reset password so the tests can continue to use the same server.
	defer st.Close()
	defer st.SetAdminMongoPassword("")

	// Check that the admin user has been given an appropriate
	// password
	u, err := st.User("admin")
	c.Assert(err, gc.IsNil)
	c.Assert(u.PasswordValid(testPassword), gc.Equals, true)

	// Check that the machine configuration has been given a new
	// password and that we can connect to mongo as that machine
	// and that the in-mongo password also verifies correctly.
	machineConf1, err := agent.ReadConfig(agent.ConfigPath(machineConf.DataDir(), names.NewMachineTag("0")))
	c.Assert(err, gc.IsNil)

	stateinfo, ok := machineConf1.MongoInfo()
	c.Assert(ok, jc.IsTrue)
	st, err = state.Open(stateinfo, mongo.DialOpts{}, environs.NewStatePolicy())
	c.Assert(err, gc.IsNil)
	defer st.Close()

	m, err := st.Machine("0")
	c.Assert(err, gc.IsNil)
	c.Assert(m.HasVote(), jc.IsTrue)
}
Exemplo n.º 17
0
func (fix *SimpleToolsFixture) checkUnitInstalled(c *gc.C, name, password string) {
	tag := names.NewUnitTag(name)

	svcName := "jujud-" + tag.String()
	assertContains(c, fix.data.InstalledNames(), svcName)

	svcConf := fix.data.GetInstalled(svcName).Conf()
	// TODO(ericsnow) For now we just use upstart serialization.
	uconfData, err := upstart.Serialize(svcName, svcConf)
	c.Assert(err, jc.ErrorIsNil)
	uconf := string(uconfData)

	regex := regexp.MustCompile("(?m)(?:^\\s)*exec\\s.+$")
	execs := regex.FindAllString(uconf, -1)

	if nil == execs {
		c.Fatalf("no command found in conf:\n%s", uconf)
	} else if 1 > len(execs) {
		c.Fatalf("Test is not built to handle more than one exec line.")
	}

	_, toolsDir := fix.paths(tag)
	jujudPath := filepath.Join(toolsDir, "jujud"+cmdSuffix)

	logPath := filepath.Join(fix.logDir, tag.String()+".log")

	for _, pat := range []string{
		"^exec " + quote + jujudPath + quote + " unit ",
		" --unit-name " + name + " ",
		" >> " + logPath + " 2>&1$",
	} {
		match, err := regexp.MatchString(pat, execs[0])
		c.Assert(err, jc.ErrorIsNil)
		if !match {
			c.Fatalf("failed to match:\n%s\nin:\n%s", pat, execs[0])
		}
	}

	conf, err := agent.ReadConfig(agent.ConfigPath(fix.dataDir, tag))
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(conf.Tag(), gc.Equals, tag)
	c.Assert(conf.DataDir(), gc.Equals, fix.dataDir)

	jujudData, err := ioutil.ReadFile(jujudPath)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(string(jujudData), gc.Equals, fakeJujud)
}
Exemplo n.º 18
0
func stateInfo() (*state.Info, error) {
	dataDir := agent.DefaultDataDir
	tag, err := machineAgentTag(dataDir)
	if err != nil {
		return nil, err
	}
	cfgPath := agent.ConfigPath(dataDir, tag)
	cfg, err := agent.ReadConfig(cfgPath)
	if err != nil {
		return nil, err
	}
	info, ok := cfg.StateInfo()
	if !ok {
		return nil, fmt.Errorf("no state info found")
	}
	return info, nil
}
Exemplo n.º 19
0
func (fix *SimpleToolsFixture) checkUnitInstalled(c *gc.C, name, password string) {
	tag := names.NewUnitTag(name)
	uconfPath, _, toolsDir := fix.paths(tag)
	uconfData, err := ioutil.ReadFile(uconfPath)
	c.Assert(err, gc.IsNil)
	uconf := string(uconfData)

	regex := regexp.MustCompile("(?m)(?:^\\s)*exec\\s.+$")
	execs := regex.FindAllString(uconf, -1)

	if nil == execs {
		c.Fatalf("no command found in %s:\n%s", uconfPath, uconf)
	} else if 1 > len(execs) {
		c.Fatalf("Test is not built to handle more than one exec line.")
	}

	logPath := filepath.Join(fix.logDir, tag.String()+".log")
	jujudPath := filepath.Join(toolsDir, "jujud")

	for _, pat := range []string{
		"^exec " + jujudPath + " unit ",
		" --unit-name " + name + " ",
		" >> " + logPath + " 2>&1$",
	} {
		match, err := regexp.MatchString(pat, execs[0])
		c.Assert(err, gc.IsNil)
		if !match {
			c.Fatalf("failed to match:\n%s\nin:\n%s", pat, execs[0])
		}
	}

	conf, err := agent.ReadConfig(agent.ConfigPath(fix.dataDir, tag))
	c.Assert(err, gc.IsNil)
	c.Assert(conf.Tag(), gc.Equals, tag)
	c.Assert(conf.DataDir(), gc.Equals, fix.dataDir)

	jujudData, err := ioutil.ReadFile(jujudPath)
	c.Assert(err, gc.IsNil)
	c.Assert(string(jujudData), gc.Equals, fakeJujud)
}
Exemplo n.º 20
0
func (fix *SimpleToolsFixture) checkUnitInstalled(c *gc.C, name, password string) {
	tag := names.NewUnitTag(name).String()
	uconfPath, _, toolsDir := fix.paths(tag)
	uconfData, err := ioutil.ReadFile(uconfPath)
	c.Assert(err, gc.IsNil)
	uconf := string(uconfData)
	var execLine string
	for _, line := range strings.Split(uconf, "\n") {
		if strings.HasPrefix(line, "exec ") {
			execLine = line
			break
		}
	}
	if execLine == "" {
		c.Fatalf("no command found in %s:\n%s", uconfPath, uconf)
	}
	logPath := filepath.Join(fix.logDir, tag+".log")
	jujudPath := filepath.Join(toolsDir, "jujud")
	for _, pat := range []string{
		"^exec " + jujudPath + " unit ",
		" --unit-name " + name + " ",
		" >> " + logPath + " 2>&1$",
	} {
		match, err := regexp.MatchString(pat, execLine)
		c.Assert(err, gc.IsNil)
		if !match {
			c.Fatalf("failed to match:\n%s\nin:\n%s", pat, execLine)
		}
	}

	conf, err := agent.ReadConfig(agent.ConfigPath(fix.dataDir, tag))
	c.Assert(err, gc.IsNil)
	c.Assert(conf.Tag(), gc.Equals, tag)
	c.Assert(conf.DataDir(), gc.Equals, fix.dataDir)

	jujudData, err := ioutil.ReadFile(jujudPath)
	c.Assert(err, gc.IsNil)
	c.Assert(string(jujudData), gc.Equals, fakeJujud)
}
Exemplo n.º 21
0
func (c *FixitCommand) Run(ctx *cmd.Context) error {

	loggo.GetLogger("juju").SetLogLevel(loggo.DEBUG)
	conf, err := agent.ReadConfig(agent.ConfigPath(c.dataDir, c.machineTag))
	if err != nil {
		return err
	}

	info, ok := conf.MongoInfo()
	if !ok {
		return errors.Errorf("no state info available")
	}
	st, err := state.Open(conf.Environment(), info, mongo.DefaultDialOpts(), environs.NewStatePolicy())
	if err != nil {
		return err
	}
	defer st.Close()

	ctx.Infof("\nStep 1: migrate individual unit ports to openedPorts collection")

	if err := state.MigrateUnitPortsToOpenedPorts(st); err != nil {
		return err
	}

	ctx.Infof("\nStep 2: create entries in meter status collection for existing units")

	if err := state.CreateUnitMeterStatus(st); err != nil {
		return err
	}

	ctx.Infof("\nStep 3: migrate machine jobs into ones with JobManageNetworking based on rules")

	if err := state.MigrateJobManageNetworking(st); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 22
0
func (s *bootstrapSuite) testInitializeState(c *gc.C, fakeLocalEnv bool) {
	dataDir := c.MkDir()

	lxcFakeNetConfig := filepath.Join(c.MkDir(), "lxc-net")
	netConf := []byte(`
  # comments ignored
LXC_BR= ignored
LXC_ADDR = "fooo"
LXC_BRIDGE="foobar" # detected
anything else ignored
LXC_BRIDGE="ignored"`[1:])
	err := ioutil.WriteFile(lxcFakeNetConfig, netConf, 0644)
	c.Assert(err, jc.ErrorIsNil)
	s.PatchValue(&network.InterfaceByNameAddrs, func(name string) ([]net.Addr, error) {
		c.Assert(name, gc.Equals, "foobar")
		return []net.Addr{
			&net.IPAddr{IP: net.IPv4(10, 0, 3, 1)},
			&net.IPAddr{IP: net.IPv4(10, 0, 3, 4)},
		}, nil
	})
	s.PatchValue(&network.LXCNetDefaultConfig, lxcFakeNetConfig)
	s.PatchValue(agent.IsLocalEnv, func(*config.Config) bool {
		c.Logf("fakeLocalEnv=%v", fakeLocalEnv)
		return fakeLocalEnv
	})

	pwHash := utils.UserPasswordHash(testing.DefaultMongoPassword, utils.CompatSalt)
	configParams := agent.AgentConfigParams{
		Paths:             agent.Paths{DataDir: dataDir},
		Tag:               names.NewMachineTag("0"),
		UpgradedToVersion: version.Current.Number,
		StateAddresses:    []string{s.mgoInst.Addr()},
		CACert:            testing.CACert,
		Password:          pwHash,
		Environment:       testing.EnvironmentTag,
	}
	servingInfo := params.StateServingInfo{
		Cert:           testing.ServerCert,
		PrivateKey:     testing.ServerKey,
		CAPrivateKey:   testing.CAKey,
		APIPort:        1234,
		StatePort:      s.mgoInst.Port(),
		SystemIdentity: "def456",
	}

	cfg, err := agent.NewStateMachineConfig(configParams, servingInfo)
	c.Assert(err, jc.ErrorIsNil)

	_, available := cfg.StateServingInfo()
	c.Assert(available, jc.IsTrue)
	expectConstraints := constraints.MustParse("mem=1024M")
	expectHW := instance.MustParseHardware("mem=2048M")
	initialAddrs := network.NewAddresses(
		"zeroonetwothree",
		"0.1.2.3",
		"10.0.3.1", // lxc bridge address filtered (when fakeLocalEnv=false).
		"10.0.3.4", // lxc bridge address filtered (-"-).
		"10.0.3.3", // not a lxc bridge address
	)
	mcfg := agent.BootstrapMachineConfig{
		Addresses:       initialAddrs,
		Constraints:     expectConstraints,
		Jobs:            []multiwatcher.MachineJob{multiwatcher.JobManageEnviron},
		InstanceId:      "i-bootstrap",
		Characteristics: expectHW,
		SharedSecret:    "abc123",
	}
	filteredAddrs := network.NewAddresses(
		"zeroonetwothree",
		"0.1.2.3",
		"10.0.3.3",
	)
	if fakeLocalEnv {
		// For local environments - no filtering.
		filteredAddrs = append([]network.Address{}, initialAddrs...)
	}
	envAttrs := dummy.SampleConfig().Delete("admin-secret").Merge(testing.Attrs{
		"agent-version": version.Current.Number.String(),
		"state-id":      "1", // needed so policy can Open config
	})
	envCfg, err := config.New(config.NoDefaults, envAttrs)
	c.Assert(err, jc.ErrorIsNil)

	adminUser := names.NewLocalUserTag("agent-admin")
	st, m, err := agent.InitializeState(adminUser, cfg, envCfg, mcfg, mongo.DefaultDialOpts(), environs.NewStatePolicy())
	c.Assert(err, jc.ErrorIsNil)
	defer st.Close()

	err = cfg.Write()
	c.Assert(err, jc.ErrorIsNil)

	// Check that the environment has been set up.
	env, err := st.Environment()
	c.Assert(err, jc.ErrorIsNil)
	uuid, ok := envCfg.UUID()
	c.Assert(ok, jc.IsTrue)
	c.Assert(env.UUID(), gc.Equals, uuid)

	// Check that initial admin user has been set up correctly.
	envTag := env.Tag().(names.EnvironTag)
	s.assertCanLogInAsAdmin(c, envTag, pwHash)
	user, err := st.User(env.Owner())
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(user.PasswordValid(testing.DefaultMongoPassword), jc.IsTrue)

	// Check that environment configuration has been added.
	newEnvCfg, err := st.EnvironConfig()
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(newEnvCfg.AllAttrs(), gc.DeepEquals, envCfg.AllAttrs())

	// Check that the bootstrap machine looks correct.
	c.Assert(m.Id(), gc.Equals, "0")
	c.Assert(m.Jobs(), gc.DeepEquals, []state.MachineJob{state.JobManageEnviron})
	c.Assert(m.Series(), gc.Equals, series.HostSeries())
	c.Assert(m.CheckProvisioned(agent.BootstrapNonce), jc.IsTrue)
	c.Assert(m.Addresses(), jc.DeepEquals, filteredAddrs)
	gotConstraints, err := m.Constraints()
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(gotConstraints, gc.DeepEquals, expectConstraints)
	c.Assert(err, jc.ErrorIsNil)
	gotHW, err := m.HardwareCharacteristics()
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(*gotHW, gc.DeepEquals, expectHW)

	// Check that the API host ports are initialised correctly.
	apiHostPorts, err := st.APIHostPorts()
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(apiHostPorts, jc.DeepEquals, [][]network.HostPort{
		network.AddressesWithPort(filteredAddrs, 1234),
	})

	// Check that the state serving info is initialised correctly.
	stateServingInfo, err := st.StateServingInfo()
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(stateServingInfo, jc.DeepEquals, state.StateServingInfo{
		APIPort:        1234,
		StatePort:      s.mgoInst.Port(),
		Cert:           testing.ServerCert,
		PrivateKey:     testing.ServerKey,
		CAPrivateKey:   testing.CAKey,
		SharedSecret:   "abc123",
		SystemIdentity: "def456",
	})

	// Check that the machine agent's config has been written
	// and that we can use it to connect to the state.
	machine0 := names.NewMachineTag("0")
	newCfg, err := agent.ReadConfig(agent.ConfigPath(dataDir, machine0))
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(newCfg.Tag(), gc.Equals, machine0)
	c.Assert(agent.Password(newCfg), gc.Not(gc.Equals), pwHash)
	c.Assert(agent.Password(newCfg), gc.Not(gc.Equals), testing.DefaultMongoPassword)
	info, ok := cfg.MongoInfo()
	c.Assert(ok, jc.IsTrue)
	st1, err := state.Open(newCfg.Environment(), info, mongo.DefaultDialOpts(), environs.NewStatePolicy())
	c.Assert(err, jc.ErrorIsNil)
	defer st1.Close()
}
Exemplo n.º 23
0
func readConfigFromDisk(c *gc.C, dir string, tag names.Tag) agent.Config {
	conf, err := agent.ReadConfig(agent.ConfigPath(dir, tag))
	c.Assert(err, jc.ErrorIsNil)
	return conf
}
Exemplo n.º 24
0
// Restore handles either returning or creating a controller to a backed up status:
// * extracts the content of the given backup file and:
// * runs mongorestore with the backed up mongo dump
// * updates and writes configuration files
// * updates existing db entries to make sure they hold no references to
// old instances
// * updates config in all agents.
func (b *backups) Restore(backupId string, dbInfo *DBInfo, args RestoreArgs) (names.Tag, error) {
	meta, backupReader, err := b.Get(backupId)
	if err != nil {
		return nil, errors.Annotatef(err, "could not fetch backup %q", backupId)
	}

	defer backupReader.Close()

	workspace, err := NewArchiveWorkspaceReader(backupReader)
	if err != nil {
		return nil, errors.Annotate(err, "cannot unpack backup file")
	}
	defer workspace.Close()

	// This might actually work, but we don't have a guarantee so we don't allow it.
	if meta.Origin.Series != args.NewInstSeries {
		return nil, errors.Errorf("cannot restore a backup made in a machine with series %q into a machine with series %q, %#v", meta.Origin.Series, args.NewInstSeries, meta)
	}

	// TODO(perrito666) Create a compatibility table of sorts.
	vers := meta.Origin.Version
	if vers.Major != 2 {
		return nil, errors.Errorf("Juju version %v cannot restore backups made using Juju version %v", version.Current.Minor, vers)
	}
	backupMachine := names.NewMachineTag(meta.Origin.Machine)

	// The path for the config file might change if the tag changed
	// and also the rest of the path, so we assume as little as possible.
	oldDatadir, err := paths.DataDir(args.NewInstSeries)
	if err != nil {
		return nil, errors.Annotate(err, "cannot determine DataDir for the restored machine")
	}

	var oldAgentConfig agent.ConfigSetterWriter
	oldAgentConfigFile := agent.ConfigPath(oldDatadir, args.NewInstTag)
	if oldAgentConfig, err = agent.ReadConfig(oldAgentConfigFile); err != nil {
		return nil, errors.Annotate(err, "cannot load old agent config from disk")
	}

	logger.Infof("stopping juju-db")
	if err = mongo.StopService(); err != nil {
		return nil, errors.Annotate(err, "failed to stop mongo")
	}

	// delete all the files to be replaced
	if err := PrepareMachineForRestore(oldAgentConfig.MongoVersion()); err != nil {
		return nil, errors.Annotate(err, "cannot delete existing files")
	}
	logger.Infof("deleted old files to place new")

	if err := workspace.UnpackFilesBundle(filesystemRoot()); err != nil {
		return nil, errors.Annotate(err, "cannot obtain system files from backup")
	}
	logger.Infof("placed new restore files")

	var agentConfig agent.ConfigSetterWriter
	// The path for the config file might change if the tag changed
	// and also the rest of the path, so we assume as little as possible.
	datadir, err := paths.DataDir(args.NewInstSeries)
	if err != nil {
		return nil, errors.Annotate(err, "cannot determine DataDir for the restored machine")
	}
	agentConfigFile := agent.ConfigPath(datadir, backupMachine)
	if agentConfig, err = agent.ReadConfig(agentConfigFile); err != nil {
		return nil, errors.Annotate(err, "cannot load agent config from disk")
	}
	ssi, ok := agentConfig.StateServingInfo()
	if !ok {
		return nil, errors.Errorf("cannot determine state serving info")
	}
	APIHostPorts := network.NewHostPorts(ssi.APIPort, args.PrivateAddress, args.PublicAddress)
	agentConfig.SetAPIHostPorts([][]network.HostPort{APIHostPorts})
	if err := agentConfig.Write(); err != nil {
		return nil, errors.Annotate(err, "cannot write new agent configuration")
	}
	logger.Infof("wrote new agent config for restore")

	if backupMachine.Id() != "0" {
		logger.Infof("extra work needed backup belongs to %q machine", backupMachine.String())
		serviceName := "jujud-" + agentConfig.Tag().String()
		aInfo := service.NewMachineAgentInfo(
			agentConfig.Tag().Id(),
			dataDir,
			paths.MustSucceed(paths.LogDir(args.NewInstSeries)),
		)

		// TODO(perrito666) renderer should have a RendererForSeries, for the moment
		// restore only works on linuxes.
		renderer, _ := shell.NewRenderer("bash")
		serviceAgentConf := service.AgentConf(aInfo, renderer)
		svc, err := service.NewService(serviceName, serviceAgentConf, args.NewInstSeries)
		if err != nil {
			return nil, errors.Annotate(err, "cannot generate service for the restored agent.")
		}
		if err := svc.Install(); err != nil {
			return nil, errors.Annotate(err, "cannot install service for the restored agent.")
		}
		logger.Infof("new machine service")
	}

	logger.Infof("mongo service will be reinstalled to ensure its presence")
	if err := ensureMongoService(agentConfig); err != nil {
		return nil, errors.Annotate(err, "failed to reinstall service for juju-db")
	}

	dialInfo, err := newDialInfo(args.PrivateAddress, agentConfig)
	if err != nil {
		return nil, errors.Annotate(err, "cannot produce dial information")
	}

	oldDialInfo, err := newDialInfo(args.PrivateAddress, oldAgentConfig)
	if err != nil {
		return nil, errors.Annotate(err, "cannot produce dial information for existing mongo")
	}

	logger.Infof("new mongo will be restored")
	mgoVer := agentConfig.MongoVersion()

	tagUser, tagUserPassword, err := tagUserCredentials(agentConfig)
	if err != nil {
		return nil, errors.Trace(err)
	}
	rArgs := RestorerArgs{
		DialInfo:        dialInfo,
		Version:         mgoVer,
		TagUser:         tagUser,
		TagUserPassword: tagUserPassword,
		RunCommandFn:    runCommand,
		StartMongo:      mongo.StartService,
		StopMongo:       mongo.StopService,
		NewMongoSession: NewMongoSession,
		GetDB:           GetDB,
	}

	// Restore mongodb from backup
	restorer, err := NewDBRestorer(rArgs)
	if err != nil {
		return nil, errors.Annotate(err, "error preparing for restore")
	}
	if err := restorer.Restore(workspace.DBDumpDir, oldDialInfo); err != nil {
		return nil, errors.Annotate(err, "error restoring state from backup")
	}

	// Re-start replicaset with the new value for server address
	logger.Infof("restarting replicaset")
	memberHostPort := net.JoinHostPort(args.PrivateAddress, strconv.Itoa(ssi.StatePort))
	err = resetReplicaSet(dialInfo, memberHostPort)
	if err != nil {
		return nil, errors.Annotate(err, "cannot reset replicaSet")
	}

	err = updateMongoEntries(args.NewInstId, args.NewInstTag.Id(), backupMachine.Id(), dialInfo)
	if err != nil {
		return nil, errors.Annotate(err, "cannot update mongo entries")
	}

	// From here we work with the restored controller
	mgoInfo, ok := agentConfig.MongoInfo()
	if !ok {
		return nil, errors.Errorf("cannot retrieve info to connect to mongo")
	}

	st, err := newStateConnection(agentConfig.Controller(), agentConfig.Model(), mgoInfo)
	if err != nil {
		return nil, errors.Trace(err)
	}
	defer st.Close()

	machine, err := st.Machine(backupMachine.Id())
	if err != nil {
		return nil, errors.Trace(err)
	}

	logger.Infof("updating local machine addresses")
	err = updateMachineAddresses(machine, args.PrivateAddress, args.PublicAddress)
	if err != nil {
		return nil, errors.Annotate(err, "cannot update api server machine addresses")
	}
	// Update the APIHostPorts as well. Under normal circumstances the API
	// Host Ports are only set during bootstrap and by the peergrouper worker.
	// Unfortunately right now, the peer grouper is busy restarting and isn't
	// guaranteed to set the host ports before the remote machines we are
	// about to tell about us. If it doesn't, the remote machine gets its
	// agent.conf file updated with this new machine's IP address, it then
	// starts, and the "api-address-updater" worker asks for the api host
	// ports, and gets told the old IP address of the machine that was backed
	// up. It then writes this incorrect file to its agent.conf file, which
	// causes it to attempt to reconnect to the api server. Unfortunately it
	// now has the wrong address and can never get the  correct one.
	// So, we set it explicitly here.
	if err := st.SetAPIHostPorts([][]network.HostPort{APIHostPorts}); err != nil {
		return nil, errors.Annotate(err, "cannot update api server host ports")
	}

	// update all agents known to the new controller.
	// TODO(perrito666): We should never stop process because of this.
	// updateAllMachines will not return errors for individual
	// agent update failures
	models, err := st.AllModels()
	if err != nil {
		return nil, errors.Trace(err)
	}
	machines := []machineModel{}
	for _, model := range models {
		machinesForModel, err := st.AllMachinesFor(model.UUID())
		if err != nil {
			return nil, errors.Trace(err)
		}
		for _, machine := range machinesForModel {
			machines = append(machines, machineModel{machine: machine, model: model})
		}
	}
	logger.Infof("updating other machine addresses")
	if err := updateAllMachines(args.PrivateAddress, args.PublicAddress, machines); err != nil {
		return nil, errors.Annotate(err, "cannot update agents")
	}

	// Mark restoreInfo as Finished so upon restart of the apiserver
	// the client can reconnect and determine if we where succesful.
	info := st.RestoreInfo()
	// In mongo 3.2, even though the backup is made with --oplog, there
	// are stale transactions in this collection.
	if err := info.PurgeTxn(); err != nil {
		return nil, errors.Annotate(err, "cannot purge stale transactions")
	}
	if err = info.SetStatus(state.RestoreFinished); err != nil {
		return nil, errors.Annotate(err, "failed to set status to finished")
	}

	return backupMachine, nil
}
Exemplo n.º 25
0
func (*suite) TestMigrate(c *gc.C) {
	initialParams := agent.AgentConfigParams{
		Paths: agent.Paths{
			DataDir: c.MkDir(),
			LogDir:  c.MkDir(),
		},
		Tag:               names.NewMachineTag("1"),
		Nonce:             "nonce",
		Password:          "******",
		UpgradedToVersion: version.MustParse("1.16.5"),
		Jobs: []multiwatcher.MachineJob{
			multiwatcher.JobManageModel,
			multiwatcher.JobHostUnits,
		},
		CACert:         "ca cert",
		Model:          testing.ModelTag,
		StateAddresses: []string{"localhost:1234"},
		APIAddresses:   []string{"localhost:4321"},
		Values: map[string]string{
			"key1": "value1",
			"key2": "value2",
			"key3": "value3",
		},
	}

	migrateTests := []struct {
		comment      string
		fields       []string
		newParams    agent.MigrateParams
		expectValues map[string]string
		expectErr    string
	}{{
		comment:   "nothing to change",
		fields:    nil,
		newParams: agent.MigrateParams{},
	}, {
		fields: []string{"Paths"},
		newParams: agent.MigrateParams{
			Paths: agent.Paths{DataDir: c.MkDir()},
		},
	}, {
		fields: []string{"Paths"},
		newParams: agent.MigrateParams{
			Paths: agent.Paths{
				DataDir: c.MkDir(),
				LogDir:  c.MkDir(),
			},
		},
	}, {
		fields: []string{"Jobs"},
		newParams: agent.MigrateParams{
			Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits},
		},
	}, {
		comment:   "invalid/immutable field specified",
		fields:    []string{"InvalidField"},
		newParams: agent.MigrateParams{},
		expectErr: `unknown field "InvalidField"`,
	}, {
		comment: "Values can be added, changed or removed",
		fields:  []string{"Values", "DeleteValues"},
		newParams: agent.MigrateParams{
			DeleteValues: []string{"key2", "key3"}, // delete
			Values: map[string]string{
				"key1":     "new value1", // change
				"new key3": "value3",     // add
				"empty":    "",           // add empty val
			},
		},
		expectValues: map[string]string{
			"key1":     "new value1",
			"new key3": "value3",
			"empty":    "",
		},
	}}
	for i, test := range migrateTests {
		summary := "migrate fields"
		if test.comment != "" {
			summary += " (" + test.comment + ") "
		}
		c.Logf("test %d: %s %v", i, summary, test.fields)

		initialConfig, err := agent.NewAgentConfig(initialParams)
		c.Assert(err, jc.ErrorIsNil)

		newConfig, err := agent.NewAgentConfig(initialParams)
		c.Assert(err, jc.ErrorIsNil)

		c.Assert(initialConfig.Write(), gc.IsNil)
		c.Assert(agent.ConfigFileExists(initialConfig), jc.IsTrue)

		err = newConfig.Migrate(test.newParams)
		c.Assert(err, jc.ErrorIsNil)
		err = newConfig.Write()
		c.Assert(err, jc.ErrorIsNil)
		c.Assert(agent.ConfigFileExists(newConfig), jc.IsTrue)

		// Make sure we can read it back successfully and it
		// matches what we wrote.
		configPath := agent.ConfigPath(newConfig.DataDir(), newConfig.Tag())
		c.Logf("new config path: %v", configPath)
		readConfig, err := agent.ReadConfig(configPath)
		c.Check(err, jc.ErrorIsNil)
		c.Check(newConfig, jc.DeepEquals, readConfig)

		// Make sure only the specified fields were changed and
		// the rest matches.
		for _, field := range test.fields {
			switch field {
			case "Values":
				err = agent.PatchConfig(initialConfig, field, test.expectValues)
				c.Check(err, jc.ErrorIsNil)
			case "DeleteValues":
				err = agent.PatchConfig(initialConfig, field, test.newParams.DeleteValues)
				c.Check(err, jc.ErrorIsNil)
			default:
				value := reflect.ValueOf(test.newParams).FieldByName(field)
				if value.IsValid() && test.expectErr == "" {
					err = agent.PatchConfig(initialConfig, field, value.Interface())
					c.Check(err, jc.ErrorIsNil)
				} else {
					err = agent.PatchConfig(initialConfig, field, value)
					c.Check(err, gc.ErrorMatches, test.expectErr)
				}
			}
		}
		c.Check(newConfig, jc.DeepEquals, initialConfig)
	}
}
Exemplo n.º 26
0
func (s *agentSuite) assertCannotOpenState(c *gc.C, tag, dataDir string) {
	config, err := agent.ReadConfig(agent.ConfigPath(dataDir, tag))
	c.Assert(err, gc.IsNil)
	_, ok := config.StateInfo()
	c.Assert(ok, jc.IsFalse)
}
Exemplo n.º 27
0
func refreshConfig(c *gc.C, config agent.Config) agent.ConfigSetterWriter {
	config1, err := agent.ReadConfig(agent.ConfigPath(config.DataDir(), config.Tag()))
	c.Assert(err, gc.IsNil)
	return config1
}
Exemplo n.º 28
0
func (s *bootstrapSuite) TestInitializeState(c *gc.C) {
	dataDir := c.MkDir()

	pwHash := utils.UserPasswordHash(testing.DefaultMongoPassword, utils.CompatSalt)
	configParams := agent.AgentConfigParams{
		DataDir:           dataDir,
		Tag:               names.NewMachineTag("0"),
		UpgradedToVersion: version.Current.Number,
		StateAddresses:    []string{s.mgoInst.Addr()},
		CACert:            testing.CACert,
		Password:          pwHash,
	}
	servingInfo := params.StateServingInfo{
		Cert:           testing.ServerCert,
		PrivateKey:     testing.ServerKey,
		APIPort:        1234,
		StatePort:      s.mgoInst.Port(),
		SystemIdentity: "def456",
	}

	cfg, err := agent.NewStateMachineConfig(configParams, servingInfo)
	c.Assert(err, gc.IsNil)

	_, available := cfg.StateServingInfo()
	c.Assert(available, gc.Equals, true)
	expectConstraints := constraints.MustParse("mem=1024M")
	expectHW := instance.MustParseHardware("mem=2048M")
	mcfg := agent.BootstrapMachineConfig{
		Addresses:       network.NewAddresses("zeroonetwothree", "0.1.2.3"),
		Constraints:     expectConstraints,
		Jobs:            []params.MachineJob{params.JobManageEnviron},
		InstanceId:      "i-bootstrap",
		Characteristics: expectHW,
		SharedSecret:    "abc123",
	}
	envAttrs := dummy.SampleConfig().Delete("admin-secret").Merge(testing.Attrs{
		"agent-version": version.Current.Number.String(),
		"state-id":      "1", // needed so policy can Open config
	})
	envCfg, err := config.New(config.NoDefaults, envAttrs)
	c.Assert(err, gc.IsNil)

	st, m, err := agent.InitializeState(cfg, envCfg, mcfg, mongo.DialOpts{}, environs.NewStatePolicy())
	c.Assert(err, gc.IsNil)
	defer st.Close()

	err = cfg.Write()
	c.Assert(err, gc.IsNil)

	// Check that the environment has been set up.
	env, err := st.Environment()
	c.Assert(err, gc.IsNil)
	uuid, ok := envCfg.UUID()
	c.Assert(ok, jc.IsTrue)
	c.Assert(env.UUID(), gc.Equals, uuid)

	// Check that initial admin user has been set up correctly.
	s.assertCanLogInAsAdmin(c, pwHash)
	user, err := st.User(env.Owner())
	c.Assert(err, gc.IsNil)
	c.Assert(user.PasswordValid(testing.DefaultMongoPassword), jc.IsTrue)

	// Check that environment configuration has been added.
	newEnvCfg, err := st.EnvironConfig()
	c.Assert(err, gc.IsNil)
	c.Assert(newEnvCfg.AllAttrs(), gc.DeepEquals, envCfg.AllAttrs())

	// Check that the bootstrap machine looks correct.
	c.Assert(m.Id(), gc.Equals, "0")
	c.Assert(m.Jobs(), gc.DeepEquals, []state.MachineJob{state.JobManageEnviron})
	c.Assert(m.Series(), gc.Equals, version.Current.Series)
	c.Assert(m.CheckProvisioned(agent.BootstrapNonce), jc.IsTrue)
	c.Assert(m.Addresses(), gc.DeepEquals, mcfg.Addresses)
	gotConstraints, err := m.Constraints()
	c.Assert(err, gc.IsNil)
	c.Assert(gotConstraints, gc.DeepEquals, expectConstraints)
	c.Assert(err, gc.IsNil)
	gotHW, err := m.HardwareCharacteristics()
	c.Assert(err, gc.IsNil)
	c.Assert(*gotHW, gc.DeepEquals, expectHW)
	gotAddrs := m.Addresses()
	c.Assert(gotAddrs, gc.DeepEquals, mcfg.Addresses)

	// Check that the API host ports are initialised correctly.
	apiHostPorts, err := st.APIHostPorts()
	c.Assert(err, gc.IsNil)
	c.Assert(apiHostPorts, jc.DeepEquals, [][]network.HostPort{
		network.AddressesWithPort(
			network.NewAddresses("zeroonetwothree", "0.1.2.3"),
			1234),
	})

	// Check that the state serving info is initialised correctly.
	stateServingInfo, err := st.StateServingInfo()
	c.Assert(err, gc.IsNil)
	c.Assert(stateServingInfo, jc.DeepEquals, state.StateServingInfo{
		APIPort:        1234,
		StatePort:      s.mgoInst.Port(),
		Cert:           testing.ServerCert,
		PrivateKey:     testing.ServerKey,
		SharedSecret:   "abc123",
		SystemIdentity: "def456",
	})

	// Check that the machine agent's config has been written
	// and that we can use it to connect to the state.
	machine0 := names.NewMachineTag("0")
	newCfg, err := agent.ReadConfig(agent.ConfigPath(dataDir, machine0))
	c.Assert(err, gc.IsNil)
	c.Assert(newCfg.Tag(), gc.Equals, machine0)
	c.Assert(agent.Password(newCfg), gc.Not(gc.Equals), pwHash)
	c.Assert(agent.Password(newCfg), gc.Not(gc.Equals), testing.DefaultMongoPassword)
	info, ok := cfg.MongoInfo()
	c.Assert(ok, jc.IsTrue)
	st1, err := state.Open(info, mongo.DialOpts{}, environs.NewStatePolicy())
	c.Assert(err, gc.IsNil)
	defer st1.Close()
}
Exemplo n.º 29
0
// Restore handles either returning or creating a controller to a backed up status:
// * extracts the content of the given backup file and:
// * runs mongorestore with the backed up mongo dump
// * updates and writes configuration files
// * updates existing db entries to make sure they hold no references to
// old instances
// * updates config in all agents.
func (b *backups) Restore(backupId string, args RestoreArgs) (names.Tag, error) {
	meta, backupReader, err := b.Get(backupId)
	if err != nil {
		return nil, errors.Annotatef(err, "could not fetch backup %q", backupId)
	}

	defer backupReader.Close()

	workspace, err := NewArchiveWorkspaceReader(backupReader)
	if err != nil {
		return nil, errors.Annotate(err, "cannot unpack backup file")
	}
	defer workspace.Close()

	// TODO(perrito666) Create a compatibility table of sorts.
	version := meta.Origin.Version
	backupMachine := names.NewMachineTag(meta.Origin.Machine)

	if err := mongo.StopService(); err != nil {
		return nil, errors.Annotate(err, "cannot stop mongo to replace files")
	}

	// delete all the files to be replaced
	if err := PrepareMachineForRestore(); err != nil {
		return nil, errors.Annotate(err, "cannot delete existing files")
	}
	logger.Infof("deleted old files to place new")

	if err := workspace.UnpackFilesBundle(filesystemRoot()); err != nil {
		return nil, errors.Annotate(err, "cannot obtain system files from backup")
	}
	logger.Infof("placed new files")

	var agentConfig agent.ConfigSetterWriter
	// The path for the config file might change if the tag changed
	// and also the rest of the path, so we assume as little as possible.
	datadir, err := paths.DataDir(args.NewInstSeries)
	if err != nil {
		return nil, errors.Annotate(err, "cannot determine DataDir for the restored machine")
	}
	agentConfigFile := agent.ConfigPath(datadir, backupMachine)
	if agentConfig, err = agent.ReadConfig(agentConfigFile); err != nil {
		return nil, errors.Annotate(err, "cannot load agent config from disk")
	}
	ssi, ok := agentConfig.StateServingInfo()
	if !ok {
		return nil, errors.Errorf("cannot determine state serving info")
	}
	APIHostPorts := network.NewHostPorts(ssi.APIPort, args.PrivateAddress)
	agentConfig.SetAPIHostPorts([][]network.HostPort{APIHostPorts})
	if err := agentConfig.Write(); err != nil {
		return nil, errors.Annotate(err, "cannot write new agent configuration")
	}
	logger.Infof("wrote new agent config")

	if backupMachine.Id() != "0" {
		logger.Infof("extra work needed backup belongs to %q machine", backupMachine.String())
		serviceName := "jujud-" + agentConfig.Tag().String()
		aInfo := service.NewMachineAgentInfo(
			agentConfig.Tag().Id(),
			dataDir,
			paths.MustSucceed(paths.LogDir(args.NewInstSeries)),
		)

		// TODO(perrito666) renderer should have a RendererForSeries, for the moment
		// restore only works on linuxes.
		renderer, _ := shell.NewRenderer("bash")
		serviceAgentConf := service.AgentConf(aInfo, renderer)
		svc, err := service.NewService(serviceName, serviceAgentConf, args.NewInstSeries)
		if err != nil {
			return nil, errors.Annotate(err, "cannot generate service for the restored agent.")
		}
		if err := svc.Install(); err != nil {
			return nil, errors.Annotate(err, "cannot install service for the restored agent.")
		}
		logger.Infof("new machine service")
	}

	logger.Infof("mongo service will be reinstalled to ensure its presence")
	if err := ensureMongoService(agentConfig); err != nil {
		return nil, errors.Annotate(err, "failed to reinstall service for juju-db")
	}

	logger.Infof("new mongo will be restored")
	// Restore mongodb from backup
	if err := placeNewMongoService(workspace.DBDumpDir, version); err != nil {
		return nil, errors.Annotate(err, "error restoring state from backup")
	}

	// Re-start replicaset with the new value for server address
	dialInfo, err := newDialInfo(args.PrivateAddress, agentConfig)
	if err != nil {
		return nil, errors.Annotate(err, "cannot produce dial information")
	}

	logger.Infof("restarting replicaset")
	memberHostPort := net.JoinHostPort(args.PrivateAddress, strconv.Itoa(ssi.StatePort))
	err = resetReplicaSet(dialInfo, memberHostPort)
	if err != nil {
		return nil, errors.Annotate(err, "cannot reset replicaSet")
	}

	err = updateMongoEntries(args.NewInstId, args.NewInstTag.Id(), backupMachine.Id(), dialInfo)
	if err != nil {
		return nil, errors.Annotate(err, "cannot update mongo entries")
	}

	// From here we work with the restored controller
	mgoInfo, ok := agentConfig.MongoInfo()
	if !ok {
		return nil, errors.Errorf("cannot retrieve info to connect to mongo")
	}

	st, err := newStateConnection(agentConfig.Model(), mgoInfo)
	if err != nil {
		return nil, errors.Trace(err)
	}
	defer st.Close()

	machine, err := st.Machine(backupMachine.Id())
	if err != nil {
		return nil, errors.Trace(err)
	}

	err = updateMachineAddresses(machine, args.PrivateAddress, args.PublicAddress)
	if err != nil {
		return nil, errors.Annotate(err, "cannot update api server machine addresses")
	}

	// update all agents known to the new controller.
	// TODO(perrito666): We should never stop process because of this.
	// updateAllMachines will not return errors for individual
	// agent update failures
	machines, err := st.AllMachines()
	if err != nil {
		return nil, errors.Trace(err)
	}
	if err = updateAllMachines(args.PrivateAddress, machines); err != nil {
		return nil, errors.Annotate(err, "cannot update agents")
	}

	info, err := st.RestoreInfoSetter()
	if err != nil {
		return nil, errors.Trace(err)
	}

	// Mark restoreInfo as Finished so upon restart of the apiserver
	// the client can reconnect and determine if we where succesful.
	err = info.SetStatus(state.RestoreFinished)

	return backupMachine, errors.Annotate(err, "failed to set status to finished")
}
Exemplo n.º 30
0
func (s *AgentSuite) AssertCannotOpenState(c *gc.C, tag names.Tag, dataDir string) {
	config, err := agent.ReadConfig(agent.ConfigPath(dataDir, tag))
	c.Assert(err, jc.ErrorIsNil)
	_, ok := config.MongoInfo()
	c.Assert(ok, jc.IsFalse)
}