func (*agentSuite) TestContainerAgentConf(c *gc.C) { dataDir := c.MkDir() logDir := c.MkDir() info := service.NewUnitAgentInfo("wordpress/0", dataDir, logDir) renderer, err := shell.NewRenderer("") c.Assert(err, jc.ErrorIsNil) conf := service.ContainerAgentConf(info, renderer, "cont") jujud := filepath.Join(dataDir, "tools", "unit-wordpress-0", "jujud"+cmdSuffix) cmd := strings.Join([]string{ shquote(jujud), "unit", "--data-dir", shquote(dataDir), "--unit-name", "wordpress/0", "--debug", }, " ") serviceBinary := jujud serviceArgs := []string{ "unit", "--data-dir", dataDir, "--unit-name", "wordpress/0", "--debug", } env := osenv.FeatureFlags() env[osenv.JujuContainerTypeEnvKey] = "cont" c.Check(conf, jc.DeepEquals, common.Conf{ Desc: "juju unit agent for wordpress/0", ExecStart: cmd, Logfile: filepath.Join(logDir, "unit-wordpress-0.log"), Env: env, Timeout: 300, ServiceBinary: serviceBinary, ServiceArgs: serviceArgs, }) }
func (s rendererSuite) TestNewRendererUnix(c *gc.C) { for _, os := range utils.OSUnix { c.Logf("trying %q", os) renderer, err := shell.NewRenderer(os) c.Assert(err, jc.ErrorIsNil) s.checkRenderer(c, renderer, "bash") } }
func (s rendererSuite) TestNewRendererDistros(c *gc.C) { distros := []string{"ubuntu"} for _, distro := range distros { c.Logf("trying %q", distro) renderer, err := shell.NewRenderer(distro) c.Assert(err, jc.ErrorIsNil) s.checkRenderer(c, renderer, "bash") } }
// New returns a new Config with no options set. func New(series string) (CloudConfig, error) { os, err := version.GetOSFromSeries(series) if err != nil { return nil, err } switch os { case version.Windows: renderer, _ := shell.NewRenderer("powershell") return &windowsCloudConfig{ &cloudConfig{ series: series, renderer: renderer, attrs: make(map[string]interface{}), }, }, nil case version.Ubuntu: renderer, _ := shell.NewRenderer("bash") return &ubuntuCloudConfig{ &cloudConfig{ series: series, paccmder: commands.NewAptPackageCommander(), pacconfer: config.NewAptPackagingConfigurer(series), renderer: renderer, attrs: make(map[string]interface{}), }, }, nil case version.CentOS: renderer, _ := shell.NewRenderer("bash") return ¢OSCloudConfig{ &cloudConfig{ series: series, paccmder: commands.NewYumPackageCommander(), pacconfer: config.NewYumPackagingConfigurer(series), renderer: renderer, attrs: make(map[string]interface{}), }, }, nil default: return nil, errors.NotFoundf("cloudconfig for series %q", series) } }
func (s rendererSuite) TestNewRendererGOOS(c *gc.C) { // All possible values of runtime.GOOS should be supported. renderer, err := shell.NewRenderer(runtime.GOOS) c.Assert(err, jc.ErrorIsNil) switch runtime.GOOS { case "windows": s.checkRenderer(c, renderer, "powershell") default: s.checkRenderer(c, renderer, "bash") } }
func (*agentSuite) TestAgentConfMachineLocal(c *gc.C) { // We use two distinct directories to ensure the paths don't get // mixed up during the call. dataDir := c.MkDir() logDir := c.MkDir() info := service.NewMachineAgentInfo("0", dataDir, logDir) renderer, err := shell.NewRenderer("") c.Assert(err, jc.ErrorIsNil) conf := service.AgentConf(info, renderer) jujud := filepath.Join(dataDir, "tools", "machine-0", "jujud"+cmdSuffix) cmd := strings.Join([]string{ shquote(jujud), "machine", "--data-dir", shquote(dataDir), "--machine-id", "0", "--debug", }, " ") serviceBinary := jujud serviceArgs := []string{ "machine", "--data-dir", dataDir, "--machine-id", "0", "--debug", } c.Check(conf, jc.DeepEquals, common.Conf{ Desc: "juju agent for machine-0", ExecStart: cmd, Logfile: filepath.Join(logDir, "machine-0.log"), Env: osenv.FeatureFlags(), Limit: map[string]int{ "nofile": 20000, }, Timeout: 300, ServiceBinary: serviceBinary, ServiceArgs: serviceArgs, }) }
func (*agentSuite) TestAgentConfMachineUbuntu(c *gc.C) { dataDir := "/var/lib/juju" logDir := "/var/log/juju" info := service.NewMachineAgentInfo("0", dataDir, logDir) renderer, err := shell.NewRenderer("ubuntu") c.Assert(err, jc.ErrorIsNil) conf := service.AgentConf(info, renderer) jujud := dataDir + "/tools/machine-0/jujud" cmd := strings.Join([]string{ shquote(dataDir + "/tools/machine-0/jujud"), "machine", "--data-dir", shquote(dataDir), "--machine-id", "0", "--debug", }, " ") serviceBinary := jujud serviceArgs := []string{ "machine", "--data-dir", dataDir, "--machine-id", "0", "--debug", } c.Check(conf, jc.DeepEquals, common.Conf{ Desc: "juju agent for machine-0", ExecStart: cmd, Logfile: logDir + "/machine-0.log", Env: osenv.FeatureFlags(), Limit: map[string]int{ "nofile": 20000, }, Timeout: 300, ServiceBinary: serviceBinary, ServiceArgs: serviceArgs, }) }
func (*agentSuite) TestAgentConfMachineWindows(c *gc.C) { dataDir := `C:\Juju\lib\juju` logDir := `C:\Juju\logs\juju` info := service.NewMachineAgentInfo("0", dataDir, logDir) renderer, err := shell.NewRenderer("windows") c.Assert(err, jc.ErrorIsNil) conf := service.AgentConf(info, renderer) jujud := dataDir + `\tools\machine-0\jujud.exe` cmd := strings.Join([]string{ shquote(jujud), "machine", "--data-dir", shquote(dataDir), "--machine-id", "0", "--debug", }, " ") serviceBinary := jujud serviceArgs := []string{ "machine", "--data-dir", dataDir, "--machine-id", "0", "--debug", } c.Check(conf, jc.DeepEquals, common.Conf{ Desc: "juju agent for machine-0", ExecStart: cmd, Logfile: logDir + `\machine-0.log`, Env: osenv.FeatureFlags(), Limit: map[string]int{ "nofile": 20000, }, Timeout: 300, ServiceBinary: serviceBinary, ServiceArgs: serviceArgs, }) }
// Restore handles either returning or creating a controller to a backed up status: // * extracts the content of the given backup file and: // * runs mongorestore with the backed up mongo dump // * updates and writes configuration files // * updates existing db entries to make sure they hold no references to // old instances // * updates config in all agents. func (b *backups) Restore(backupId string, dbInfo *DBInfo, args RestoreArgs) (names.Tag, error) { meta, backupReader, err := b.Get(backupId) if err != nil { return nil, errors.Annotatef(err, "could not fetch backup %q", backupId) } defer backupReader.Close() workspace, err := NewArchiveWorkspaceReader(backupReader) if err != nil { return nil, errors.Annotate(err, "cannot unpack backup file") } defer workspace.Close() // This might actually work, but we don't have a guarantee so we don't allow it. if meta.Origin.Series != args.NewInstSeries { return nil, errors.Errorf("cannot restore a backup made in a machine with series %q into a machine with series %q, %#v", meta.Origin.Series, args.NewInstSeries, meta) } // TODO(perrito666) Create a compatibility table of sorts. vers := meta.Origin.Version if vers.Major != 2 { return nil, errors.Errorf("Juju version %v cannot restore backups made using Juju version %v", version.Current.Minor, vers) } backupMachine := names.NewMachineTag(meta.Origin.Machine) // The path for the config file might change if the tag changed // and also the rest of the path, so we assume as little as possible. oldDatadir, err := paths.DataDir(args.NewInstSeries) if err != nil { return nil, errors.Annotate(err, "cannot determine DataDir for the restored machine") } var oldAgentConfig agent.ConfigSetterWriter oldAgentConfigFile := agent.ConfigPath(oldDatadir, args.NewInstTag) if oldAgentConfig, err = agent.ReadConfig(oldAgentConfigFile); err != nil { return nil, errors.Annotate(err, "cannot load old agent config from disk") } logger.Infof("stopping juju-db") if err = mongo.StopService(); err != nil { return nil, errors.Annotate(err, "failed to stop mongo") } // delete all the files to be replaced if err := PrepareMachineForRestore(oldAgentConfig.MongoVersion()); err != nil { return nil, errors.Annotate(err, "cannot delete existing files") } logger.Infof("deleted old files to place new") if err := workspace.UnpackFilesBundle(filesystemRoot()); err != nil { return nil, errors.Annotate(err, "cannot obtain system files from backup") } logger.Infof("placed new restore files") var agentConfig agent.ConfigSetterWriter // The path for the config file might change if the tag changed // and also the rest of the path, so we assume as little as possible. datadir, err := paths.DataDir(args.NewInstSeries) if err != nil { return nil, errors.Annotate(err, "cannot determine DataDir for the restored machine") } agentConfigFile := agent.ConfigPath(datadir, backupMachine) if agentConfig, err = agent.ReadConfig(agentConfigFile); err != nil { return nil, errors.Annotate(err, "cannot load agent config from disk") } ssi, ok := agentConfig.StateServingInfo() if !ok { return nil, errors.Errorf("cannot determine state serving info") } APIHostPorts := network.NewHostPorts(ssi.APIPort, args.PrivateAddress, args.PublicAddress) agentConfig.SetAPIHostPorts([][]network.HostPort{APIHostPorts}) if err := agentConfig.Write(); err != nil { return nil, errors.Annotate(err, "cannot write new agent configuration") } logger.Infof("wrote new agent config for restore") if backupMachine.Id() != "0" { logger.Infof("extra work needed backup belongs to %q machine", backupMachine.String()) serviceName := "jujud-" + agentConfig.Tag().String() aInfo := service.NewMachineAgentInfo( agentConfig.Tag().Id(), dataDir, paths.MustSucceed(paths.LogDir(args.NewInstSeries)), ) // TODO(perrito666) renderer should have a RendererForSeries, for the moment // restore only works on linuxes. renderer, _ := shell.NewRenderer("bash") serviceAgentConf := service.AgentConf(aInfo, renderer) svc, err := service.NewService(serviceName, serviceAgentConf, args.NewInstSeries) if err != nil { return nil, errors.Annotate(err, "cannot generate service for the restored agent.") } if err := svc.Install(); err != nil { return nil, errors.Annotate(err, "cannot install service for the restored agent.") } logger.Infof("new machine service") } logger.Infof("mongo service will be reinstalled to ensure its presence") if err := ensureMongoService(agentConfig); err != nil { return nil, errors.Annotate(err, "failed to reinstall service for juju-db") } dialInfo, err := newDialInfo(args.PrivateAddress, agentConfig) if err != nil { return nil, errors.Annotate(err, "cannot produce dial information") } oldDialInfo, err := newDialInfo(args.PrivateAddress, oldAgentConfig) if err != nil { return nil, errors.Annotate(err, "cannot produce dial information for existing mongo") } logger.Infof("new mongo will be restored") mgoVer := agentConfig.MongoVersion() tagUser, tagUserPassword, err := tagUserCredentials(agentConfig) if err != nil { return nil, errors.Trace(err) } rArgs := RestorerArgs{ DialInfo: dialInfo, Version: mgoVer, TagUser: tagUser, TagUserPassword: tagUserPassword, RunCommandFn: runCommand, StartMongo: mongo.StartService, StopMongo: mongo.StopService, NewMongoSession: NewMongoSession, GetDB: GetDB, } // Restore mongodb from backup restorer, err := NewDBRestorer(rArgs) if err != nil { return nil, errors.Annotate(err, "error preparing for restore") } if err := restorer.Restore(workspace.DBDumpDir, oldDialInfo); err != nil { return nil, errors.Annotate(err, "error restoring state from backup") } // Re-start replicaset with the new value for server address logger.Infof("restarting replicaset") memberHostPort := net.JoinHostPort(args.PrivateAddress, strconv.Itoa(ssi.StatePort)) err = resetReplicaSet(dialInfo, memberHostPort) if err != nil { return nil, errors.Annotate(err, "cannot reset replicaSet") } err = updateMongoEntries(args.NewInstId, args.NewInstTag.Id(), backupMachine.Id(), dialInfo) if err != nil { return nil, errors.Annotate(err, "cannot update mongo entries") } // From here we work with the restored controller mgoInfo, ok := agentConfig.MongoInfo() if !ok { return nil, errors.Errorf("cannot retrieve info to connect to mongo") } st, err := newStateConnection(agentConfig.Controller(), agentConfig.Model(), mgoInfo) if err != nil { return nil, errors.Trace(err) } defer st.Close() machine, err := st.Machine(backupMachine.Id()) if err != nil { return nil, errors.Trace(err) } logger.Infof("updating local machine addresses") err = updateMachineAddresses(machine, args.PrivateAddress, args.PublicAddress) if err != nil { return nil, errors.Annotate(err, "cannot update api server machine addresses") } // Update the APIHostPorts as well. Under normal circumstances the API // Host Ports are only set during bootstrap and by the peergrouper worker. // Unfortunately right now, the peer grouper is busy restarting and isn't // guaranteed to set the host ports before the remote machines we are // about to tell about us. If it doesn't, the remote machine gets its // agent.conf file updated with this new machine's IP address, it then // starts, and the "api-address-updater" worker asks for the api host // ports, and gets told the old IP address of the machine that was backed // up. It then writes this incorrect file to its agent.conf file, which // causes it to attempt to reconnect to the api server. Unfortunately it // now has the wrong address and can never get the correct one. // So, we set it explicitly here. if err := st.SetAPIHostPorts([][]network.HostPort{APIHostPorts}); err != nil { return nil, errors.Annotate(err, "cannot update api server host ports") } // update all agents known to the new controller. // TODO(perrito666): We should never stop process because of this. // updateAllMachines will not return errors for individual // agent update failures models, err := st.AllModels() if err != nil { return nil, errors.Trace(err) } machines := []machineModel{} for _, model := range models { machinesForModel, err := st.AllMachinesFor(model.UUID()) if err != nil { return nil, errors.Trace(err) } for _, machine := range machinesForModel { machines = append(machines, machineModel{machine: machine, model: model}) } } logger.Infof("updating other machine addresses") if err := updateAllMachines(args.PrivateAddress, args.PublicAddress, machines); err != nil { return nil, errors.Annotate(err, "cannot update agents") } // Mark restoreInfo as Finished so upon restart of the apiserver // the client can reconnect and determine if we where succesful. info := st.RestoreInfo() // In mongo 3.2, even though the backup is made with --oplog, there // are stale transactions in this collection. if err := info.PurgeTxn(); err != nil { return nil, errors.Annotate(err, "cannot purge stale transactions") } if err = info.SetStatus(state.RestoreFinished); err != nil { return nil, errors.Annotate(err, "failed to set status to finished") } return backupMachine, nil }
// Restore handles either returning or creating a controller to a backed up status: // * extracts the content of the given backup file and: // * runs mongorestore with the backed up mongo dump // * updates and writes configuration files // * updates existing db entries to make sure they hold no references to // old instances // * updates config in all agents. func (b *backups) Restore(backupId string, args RestoreArgs) (names.Tag, error) { meta, backupReader, err := b.Get(backupId) if err != nil { return nil, errors.Annotatef(err, "could not fetch backup %q", backupId) } defer backupReader.Close() workspace, err := NewArchiveWorkspaceReader(backupReader) if err != nil { return nil, errors.Annotate(err, "cannot unpack backup file") } defer workspace.Close() // TODO(perrito666) Create a compatibility table of sorts. version := meta.Origin.Version backupMachine := names.NewMachineTag(meta.Origin.Machine) if err := mongo.StopService(); err != nil { return nil, errors.Annotate(err, "cannot stop mongo to replace files") } // delete all the files to be replaced if err := PrepareMachineForRestore(); err != nil { return nil, errors.Annotate(err, "cannot delete existing files") } logger.Infof("deleted old files to place new") if err := workspace.UnpackFilesBundle(filesystemRoot()); err != nil { return nil, errors.Annotate(err, "cannot obtain system files from backup") } logger.Infof("placed new files") var agentConfig agent.ConfigSetterWriter // The path for the config file might change if the tag changed // and also the rest of the path, so we assume as little as possible. datadir, err := paths.DataDir(args.NewInstSeries) if err != nil { return nil, errors.Annotate(err, "cannot determine DataDir for the restored machine") } agentConfigFile := agent.ConfigPath(datadir, backupMachine) if agentConfig, err = agent.ReadConfig(agentConfigFile); err != nil { return nil, errors.Annotate(err, "cannot load agent config from disk") } ssi, ok := agentConfig.StateServingInfo() if !ok { return nil, errors.Errorf("cannot determine state serving info") } APIHostPorts := network.NewHostPorts(ssi.APIPort, args.PrivateAddress) agentConfig.SetAPIHostPorts([][]network.HostPort{APIHostPorts}) if err := agentConfig.Write(); err != nil { return nil, errors.Annotate(err, "cannot write new agent configuration") } logger.Infof("wrote new agent config") if backupMachine.Id() != "0" { logger.Infof("extra work needed backup belongs to %q machine", backupMachine.String()) serviceName := "jujud-" + agentConfig.Tag().String() aInfo := service.NewMachineAgentInfo( agentConfig.Tag().Id(), dataDir, paths.MustSucceed(paths.LogDir(args.NewInstSeries)), ) // TODO(perrito666) renderer should have a RendererForSeries, for the moment // restore only works on linuxes. renderer, _ := shell.NewRenderer("bash") serviceAgentConf := service.AgentConf(aInfo, renderer) svc, err := service.NewService(serviceName, serviceAgentConf, args.NewInstSeries) if err != nil { return nil, errors.Annotate(err, "cannot generate service for the restored agent.") } if err := svc.Install(); err != nil { return nil, errors.Annotate(err, "cannot install service for the restored agent.") } logger.Infof("new machine service") } logger.Infof("mongo service will be reinstalled to ensure its presence") if err := ensureMongoService(agentConfig); err != nil { return nil, errors.Annotate(err, "failed to reinstall service for juju-db") } logger.Infof("new mongo will be restored") // Restore mongodb from backup if err := placeNewMongoService(workspace.DBDumpDir, version); err != nil { return nil, errors.Annotate(err, "error restoring state from backup") } // Re-start replicaset with the new value for server address dialInfo, err := newDialInfo(args.PrivateAddress, agentConfig) if err != nil { return nil, errors.Annotate(err, "cannot produce dial information") } logger.Infof("restarting replicaset") memberHostPort := net.JoinHostPort(args.PrivateAddress, strconv.Itoa(ssi.StatePort)) err = resetReplicaSet(dialInfo, memberHostPort) if err != nil { return nil, errors.Annotate(err, "cannot reset replicaSet") } err = updateMongoEntries(args.NewInstId, args.NewInstTag.Id(), backupMachine.Id(), dialInfo) if err != nil { return nil, errors.Annotate(err, "cannot update mongo entries") } // From here we work with the restored controller mgoInfo, ok := agentConfig.MongoInfo() if !ok { return nil, errors.Errorf("cannot retrieve info to connect to mongo") } st, err := newStateConnection(agentConfig.Model(), mgoInfo) if err != nil { return nil, errors.Trace(err) } defer st.Close() machine, err := st.Machine(backupMachine.Id()) if err != nil { return nil, errors.Trace(err) } err = updateMachineAddresses(machine, args.PrivateAddress, args.PublicAddress) if err != nil { return nil, errors.Annotate(err, "cannot update api server machine addresses") } // update all agents known to the new controller. // TODO(perrito666): We should never stop process because of this. // updateAllMachines will not return errors for individual // agent update failures machines, err := st.AllMachines() if err != nil { return nil, errors.Trace(err) } if err = updateAllMachines(args.PrivateAddress, machines); err != nil { return nil, errors.Annotate(err, "cannot update agents") } info, err := st.RestoreInfoSetter() if err != nil { return nil, errors.Trace(err) } // Mark restoreInfo as Finished so upon restart of the apiserver // the client can reconnect and determine if we where succesful. err = info.SetStatus(state.RestoreFinished) return backupMachine, errors.Annotate(err, "failed to set status to finished") }
func (ctx *SimpleContext) DeployUnit(unitName, initialPassword string) (err error) { // Check sanity. renderer, err := shell.NewRenderer("") if err != nil { return errors.Trace(err) } svc, err := ctx.service(unitName, renderer) if err != nil { return errors.Trace(err) } installed, err := svc.Installed() if err != nil { return errors.Trace(err) } if installed { return fmt.Errorf("unit %q is already deployed", unitName) } // Link the current tools for use by the new agent. tag := names.NewUnitTag(unitName) dataDir := ctx.agentConfig.DataDir() logDir := ctx.agentConfig.LogDir() current := version.Binary{ Number: version.Current, Arch: arch.HostArch(), Series: series.HostSeries(), } toolsDir := tools.ToolsDir(dataDir, tag.String()) defer removeOnErr(&err, toolsDir) _, err = tools.ChangeAgentTools(dataDir, tag.String(), current) if err != nil { return errors.Trace(err) } result, err := ctx.api.ConnectionInfo() if err != nil { return errors.Trace(err) } logger.Debugf("state addresses: %q", result.StateAddresses) logger.Debugf("API addresses: %q", result.APIAddresses) containerType := ctx.agentConfig.Value(agent.ContainerType) namespace := ctx.agentConfig.Value(agent.Namespace) conf, err := agent.NewAgentConfig( agent.AgentConfigParams{ Paths: agent.Paths{ DataDir: dataDir, LogDir: logDir, MetricsSpoolDir: agent.DefaultPaths.MetricsSpoolDir, }, UpgradedToVersion: version.Current, Tag: tag, Password: initialPassword, Nonce: "unused", Model: ctx.agentConfig.Model(), // TODO: remove the state addresses here and test when api only. StateAddresses: result.StateAddresses, APIAddresses: result.APIAddresses, CACert: ctx.agentConfig.CACert(), Values: map[string]string{ agent.ContainerType: containerType, agent.Namespace: namespace, }, }) if err != nil { return errors.Trace(err) } if err := conf.Write(); err != nil { return err } defer removeOnErr(&err, conf.Dir()) // Install an init service that runs the unit agent. if err := service.InstallAndStart(svc); err != nil { return errors.Trace(err) } return nil }
func (s rendererSuite) TestNewRendererUnknown(c *gc.C) { _, err := shell.NewRenderer("<unknown OS>") c.Check(err, jc.Satisfies, errors.IsNotFound) }
func (s rendererSuite) TestNewRendererWindows(c *gc.C) { renderer, err := shell.NewRenderer("windows") c.Assert(err, jc.ErrorIsNil) s.checkRenderer(c, renderer, "powershell") }