"path/filepath" "runtime" "strings" "sync" "github.com/juju/loggo" "github.com/juju/utils/series" "github.com/juju/juju/juju/paths" ) var ( enabledMu sync.Mutex enabled = true dataDir = paths.MustSucceed(paths.DataDir(series.HostSeries())) wrenchDir = filepath.Join(dataDir, "wrench") jujuUid = os.Getuid() ) var logger = loggo.GetLogger("juju.wrench") // IsActive returns true if a "wrench" of a certain category and // feature should be "dropped in the works". // // This function may be called at specific points in the Juju codebase // to introduce otherwise hard to induce failure modes for the // purposes of manual or CI testing. The "<juju_datadir>/wrench/" // directory will be checked for "wrench files" which this function // looks for. //
"github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/version" ) var logger = loggo.GetLogger("juju.agent") const ( // UninstallAgentFile is the name of the file inside the data // dir that, if it exists, will cause a machine agent to uninstall // when it receives the termination signal. UninstallAgentFile = "uninstall-agent" ) // These are base values used for the corresponding defaults. var ( logDir = paths.MustSucceed(paths.LogDir(series.HostSeries())) dataDir = paths.MustSucceed(paths.DataDir(series.HostSeries())) confDir = paths.MustSucceed(paths.ConfDir(series.HostSeries())) metricsSpoolDir = paths.MustSucceed(paths.MetricsSpoolDir(series.HostSeries())) ) // Agent exposes the agent's configuration to other components. This // interface should probably be segregated (agent.ConfigGetter and // agent.ConfigChanger?) but YAGNI *currently* advises against same. type Agent interface { // CurrentConfig returns a copy of the agent's configuration. No // guarantees regarding ongoing correctness are made. CurrentConfig() Config // ChangeConfig allows clients to change the agent's configuration
"github.com/juju/utils" "github.com/juju/juju/environmentserver/authentication" "github.com/juju/juju/juju/paths" "github.com/juju/juju/mongo" "github.com/juju/juju/network" "github.com/juju/juju/state/api" "github.com/juju/juju/state/api/params" "github.com/juju/juju/version" ) var logger = loggo.GetLogger("juju.agent") // logDir returns a filesystem path to the location where juju // may create a folder containing its logs var logDir = paths.MustSucceed(paths.LogDir(version.Current.Series)) // dataDir returns the default data directory for this running system var dataDir = paths.MustSucceed(paths.DataDir(version.Current.Series)) // DefaultLogDir defines the default log directory for juju agents. // It's defined as a variable so it could be overridden in tests. var DefaultLogDir = path.Join(logDir, "juju") // DefaultDataDir defines the default data directory for juju agents. // It's defined as a variable so it could be overridden in tests. var DefaultDataDir = dataDir // SystemIdentity is the name of the file where the environment SSH key is kept. const SystemIdentity = "system-identity"
// Restore handles either returning or creating a controller to a backed up status: // * extracts the content of the given backup file and: // * runs mongorestore with the backed up mongo dump // * updates and writes configuration files // * updates existing db entries to make sure they hold no references to // old instances // * updates config in all agents. func (b *backups) Restore(backupId string, dbInfo *DBInfo, args RestoreArgs) (names.Tag, error) { meta, backupReader, err := b.Get(backupId) if err != nil { return nil, errors.Annotatef(err, "could not fetch backup %q", backupId) } defer backupReader.Close() workspace, err := NewArchiveWorkspaceReader(backupReader) if err != nil { return nil, errors.Annotate(err, "cannot unpack backup file") } defer workspace.Close() // This might actually work, but we don't have a guarantee so we don't allow it. if meta.Origin.Series != args.NewInstSeries { return nil, errors.Errorf("cannot restore a backup made in a machine with series %q into a machine with series %q, %#v", meta.Origin.Series, args.NewInstSeries, meta) } // TODO(perrito666) Create a compatibility table of sorts. vers := meta.Origin.Version if vers.Major != 2 { return nil, errors.Errorf("Juju version %v cannot restore backups made using Juju version %v", version.Current.Minor, vers) } backupMachine := names.NewMachineTag(meta.Origin.Machine) // The path for the config file might change if the tag changed // and also the rest of the path, so we assume as little as possible. oldDatadir, err := paths.DataDir(args.NewInstSeries) if err != nil { return nil, errors.Annotate(err, "cannot determine DataDir for the restored machine") } var oldAgentConfig agent.ConfigSetterWriter oldAgentConfigFile := agent.ConfigPath(oldDatadir, args.NewInstTag) if oldAgentConfig, err = agent.ReadConfig(oldAgentConfigFile); err != nil { return nil, errors.Annotate(err, "cannot load old agent config from disk") } logger.Infof("stopping juju-db") if err = mongo.StopService(); err != nil { return nil, errors.Annotate(err, "failed to stop mongo") } // delete all the files to be replaced if err := PrepareMachineForRestore(oldAgentConfig.MongoVersion()); err != nil { return nil, errors.Annotate(err, "cannot delete existing files") } logger.Infof("deleted old files to place new") if err := workspace.UnpackFilesBundle(filesystemRoot()); err != nil { return nil, errors.Annotate(err, "cannot obtain system files from backup") } logger.Infof("placed new restore files") var agentConfig agent.ConfigSetterWriter // The path for the config file might change if the tag changed // and also the rest of the path, so we assume as little as possible. datadir, err := paths.DataDir(args.NewInstSeries) if err != nil { return nil, errors.Annotate(err, "cannot determine DataDir for the restored machine") } agentConfigFile := agent.ConfigPath(datadir, backupMachine) if agentConfig, err = agent.ReadConfig(agentConfigFile); err != nil { return nil, errors.Annotate(err, "cannot load agent config from disk") } ssi, ok := agentConfig.StateServingInfo() if !ok { return nil, errors.Errorf("cannot determine state serving info") } APIHostPorts := network.NewHostPorts(ssi.APIPort, args.PrivateAddress, args.PublicAddress) agentConfig.SetAPIHostPorts([][]network.HostPort{APIHostPorts}) if err := agentConfig.Write(); err != nil { return nil, errors.Annotate(err, "cannot write new agent configuration") } logger.Infof("wrote new agent config for restore") if backupMachine.Id() != "0" { logger.Infof("extra work needed backup belongs to %q machine", backupMachine.String()) serviceName := "jujud-" + agentConfig.Tag().String() aInfo := service.NewMachineAgentInfo( agentConfig.Tag().Id(), dataDir, paths.MustSucceed(paths.LogDir(args.NewInstSeries)), ) // TODO(perrito666) renderer should have a RendererForSeries, for the moment // restore only works on linuxes. renderer, _ := shell.NewRenderer("bash") serviceAgentConf := service.AgentConf(aInfo, renderer) svc, err := service.NewService(serviceName, serviceAgentConf, args.NewInstSeries) if err != nil { return nil, errors.Annotate(err, "cannot generate service for the restored agent.") } if err := svc.Install(); err != nil { return nil, errors.Annotate(err, "cannot install service for the restored agent.") } logger.Infof("new machine service") } logger.Infof("mongo service will be reinstalled to ensure its presence") if err := ensureMongoService(agentConfig); err != nil { return nil, errors.Annotate(err, "failed to reinstall service for juju-db") } dialInfo, err := newDialInfo(args.PrivateAddress, agentConfig) if err != nil { return nil, errors.Annotate(err, "cannot produce dial information") } oldDialInfo, err := newDialInfo(args.PrivateAddress, oldAgentConfig) if err != nil { return nil, errors.Annotate(err, "cannot produce dial information for existing mongo") } logger.Infof("new mongo will be restored") mgoVer := agentConfig.MongoVersion() tagUser, tagUserPassword, err := tagUserCredentials(agentConfig) if err != nil { return nil, errors.Trace(err) } rArgs := RestorerArgs{ DialInfo: dialInfo, Version: mgoVer, TagUser: tagUser, TagUserPassword: tagUserPassword, RunCommandFn: runCommand, StartMongo: mongo.StartService, StopMongo: mongo.StopService, NewMongoSession: NewMongoSession, GetDB: GetDB, } // Restore mongodb from backup restorer, err := NewDBRestorer(rArgs) if err != nil { return nil, errors.Annotate(err, "error preparing for restore") } if err := restorer.Restore(workspace.DBDumpDir, oldDialInfo); err != nil { return nil, errors.Annotate(err, "error restoring state from backup") } // Re-start replicaset with the new value for server address logger.Infof("restarting replicaset") memberHostPort := net.JoinHostPort(args.PrivateAddress, strconv.Itoa(ssi.StatePort)) err = resetReplicaSet(dialInfo, memberHostPort) if err != nil { return nil, errors.Annotate(err, "cannot reset replicaSet") } err = updateMongoEntries(args.NewInstId, args.NewInstTag.Id(), backupMachine.Id(), dialInfo) if err != nil { return nil, errors.Annotate(err, "cannot update mongo entries") } // From here we work with the restored controller mgoInfo, ok := agentConfig.MongoInfo() if !ok { return nil, errors.Errorf("cannot retrieve info to connect to mongo") } st, err := newStateConnection(agentConfig.Controller(), agentConfig.Model(), mgoInfo) if err != nil { return nil, errors.Trace(err) } defer st.Close() machine, err := st.Machine(backupMachine.Id()) if err != nil { return nil, errors.Trace(err) } logger.Infof("updating local machine addresses") err = updateMachineAddresses(machine, args.PrivateAddress, args.PublicAddress) if err != nil { return nil, errors.Annotate(err, "cannot update api server machine addresses") } // Update the APIHostPorts as well. Under normal circumstances the API // Host Ports are only set during bootstrap and by the peergrouper worker. // Unfortunately right now, the peer grouper is busy restarting and isn't // guaranteed to set the host ports before the remote machines we are // about to tell about us. If it doesn't, the remote machine gets its // agent.conf file updated with this new machine's IP address, it then // starts, and the "api-address-updater" worker asks for the api host // ports, and gets told the old IP address of the machine that was backed // up. It then writes this incorrect file to its agent.conf file, which // causes it to attempt to reconnect to the api server. Unfortunately it // now has the wrong address and can never get the correct one. // So, we set it explicitly here. if err := st.SetAPIHostPorts([][]network.HostPort{APIHostPorts}); err != nil { return nil, errors.Annotate(err, "cannot update api server host ports") } // update all agents known to the new controller. // TODO(perrito666): We should never stop process because of this. // updateAllMachines will not return errors for individual // agent update failures models, err := st.AllModels() if err != nil { return nil, errors.Trace(err) } machines := []machineModel{} for _, model := range models { machinesForModel, err := st.AllMachinesFor(model.UUID()) if err != nil { return nil, errors.Trace(err) } for _, machine := range machinesForModel { machines = append(machines, machineModel{machine: machine, model: model}) } } logger.Infof("updating other machine addresses") if err := updateAllMachines(args.PrivateAddress, args.PublicAddress, machines); err != nil { return nil, errors.Annotate(err, "cannot update agents") } // Mark restoreInfo as Finished so upon restart of the apiserver // the client can reconnect and determine if we where succesful. info := st.RestoreInfo() // In mongo 3.2, even though the backup is made with --oplog, there // are stale transactions in this collection. if err := info.PurgeTxn(); err != nil { return nil, errors.Annotate(err, "cannot purge stale transactions") } if err = info.SetStatus(state.RestoreFinished); err != nil { return nil, errors.Annotate(err, "failed to set status to finished") } return backupMachine, nil }
"github.com/juju/juju/worker/deployer" "github.com/juju/juju/worker/gate" "github.com/juju/juju/worker/imagemetadataworker" "github.com/juju/juju/worker/logsender" "github.com/juju/juju/worker/modelworkermanager" "github.com/juju/juju/worker/mongoupgrader" "github.com/juju/juju/worker/peergrouper" "github.com/juju/juju/worker/provisioner" "github.com/juju/juju/worker/singular" "github.com/juju/juju/worker/txnpruner" "github.com/juju/juju/worker/upgradesteps" ) var ( logger = loggo.GetLogger("juju.cmd.jujud") jujuRun = paths.MustSucceed(paths.JujuRun(series.HostSeries())) jujuDumpLogs = paths.MustSucceed(paths.JujuDumpLogs(series.HostSeries())) // The following are defined as variables to allow the tests to // intercept calls to the functions. useMultipleCPUs = utils.UseMultipleCPUs modelManifolds = model.Manifolds newSingularRunner = singular.New peergrouperNew = peergrouper.New newCertificateUpdater = certupdater.NewCertificateUpdater newMetadataUpdater = imagemetadataworker.NewWorker newUpgradeMongoWorker = mongoupgrader.New reportOpenedState = func(io.Closer) {} ) // Variable to override in tests, default is true
package api import ( "crypto/x509" "io/ioutil" "os" "path/filepath" "github.com/juju/errors" "github.com/juju/utils/series" "github.com/juju/juju/cert" "github.com/juju/juju/juju/paths" ) var certDir = filepath.FromSlash(paths.MustSucceed(paths.CertDir(series.HostSeries()))) // CreateCertPool creates a new x509.CertPool and adds in the caCert passed // in. All certs from the cert directory (/etc/juju/cert.d on ubuntu) are // also added. func CreateCertPool(caCert string) (*x509.CertPool, error) { pool := x509.NewCertPool() if caCert != "" { xcert, err := cert.ParseCert(caCert) if err != nil { return nil, errors.Trace(err) } pool.AddCert(xcert) }
if cfg.APIInfo.Tag != names.NewMachineTag(cfg.MachineId) { return errors.New("entity tag must match started machine") } if cfg.StateServingInfo != nil { return errors.New("state serving info unexpectedly present") } } if cfg.MachineNonce == "" { return errors.New("missing machine nonce") } return nil } // logDir returns a filesystem path to the location where applications // may create a folder containing logs var logDir = paths.MustSucceed(paths.LogDir(series.HostSeries())) // DefaultBridgeName is the network bridge device name used for LXC and KVM // containers const DefaultBridgeName = "juju-br0" // NewInstanceConfig sets up a basic machine configuration, for a // non-bootstrap node. You'll still need to supply more information, // but this takes care of the fixed entries and the ones that are // always needed. func NewInstanceConfig( machineID, machineNonce, imageStream, series, publicImageSigningKey string,
"strings" "github.com/juju/errors" "github.com/juju/utils" "github.com/juju/utils/series" lxdshared "github.com/lxc/lxd/shared" "github.com/juju/juju/environs" jujupaths "github.com/juju/juju/juju/paths" "github.com/juju/juju/network" "github.com/juju/juju/provider/common" "github.com/juju/juju/tools/lxdclient" ) var ( jujuConfDir = jujupaths.MustSucceed(jujupaths.ConfDir(series.LatestLts())) clientCertPath = path.Join(jujuConfDir, "lxd-client.crt") clientKeyPath = path.Join(jujuConfDir, "lxd-client.key") serverCertPath = path.Join(jujuConfDir, "lxd-server.crt") ) type rawProvider struct { lxdCerts lxdConfig lxdInstances lxdProfiles lxdImages common.Firewaller } type lxdCerts interface {
} return values } // machineInfo is the structure used to pass information between the provider // and the agent running on a node. // When a node is started, the provider code creates a machineInfo object // containing information about the node being started and configures // cloudinit to get a YAML representation of that object written on the node's // filesystem during its first startup. That file is then read by the juju // agent running on the node and converted back into a machineInfo object. type machineInfo struct { Hostname string `yaml:,omitempty` } var maasDataDir = paths.MustSucceed(paths.DataDir(config.LatestLtsSeries())) var _MAASInstanceFilename = path.Join(maasDataDir, "MAASmachine.txt") // cloudinitRunCmd returns the shell command that, when run, will create the // "machine info" file containing the hostname of a machine. // That command is destined to be used by cloudinit. func (info *machineInfo) cloudinitRunCmd(series string) (string, error) { dataDir, err := paths.DataDir(series) if err != nil { return "", err } renderer, err := cloudinit.NewRenderer(series) if err != nil { return "", err }
// Restore handles either returning or creating a controller to a backed up status: // * extracts the content of the given backup file and: // * runs mongorestore with the backed up mongo dump // * updates and writes configuration files // * updates existing db entries to make sure they hold no references to // old instances // * updates config in all agents. func (b *backups) Restore(backupId string, args RestoreArgs) (names.Tag, error) { meta, backupReader, err := b.Get(backupId) if err != nil { return nil, errors.Annotatef(err, "could not fetch backup %q", backupId) } defer backupReader.Close() workspace, err := NewArchiveWorkspaceReader(backupReader) if err != nil { return nil, errors.Annotate(err, "cannot unpack backup file") } defer workspace.Close() // TODO(perrito666) Create a compatibility table of sorts. version := meta.Origin.Version backupMachine := names.NewMachineTag(meta.Origin.Machine) if err := mongo.StopService(); err != nil { return nil, errors.Annotate(err, "cannot stop mongo to replace files") } // delete all the files to be replaced if err := PrepareMachineForRestore(); err != nil { return nil, errors.Annotate(err, "cannot delete existing files") } logger.Infof("deleted old files to place new") if err := workspace.UnpackFilesBundle(filesystemRoot()); err != nil { return nil, errors.Annotate(err, "cannot obtain system files from backup") } logger.Infof("placed new files") var agentConfig agent.ConfigSetterWriter // The path for the config file might change if the tag changed // and also the rest of the path, so we assume as little as possible. datadir, err := paths.DataDir(args.NewInstSeries) if err != nil { return nil, errors.Annotate(err, "cannot determine DataDir for the restored machine") } agentConfigFile := agent.ConfigPath(datadir, backupMachine) if agentConfig, err = agent.ReadConfig(agentConfigFile); err != nil { return nil, errors.Annotate(err, "cannot load agent config from disk") } ssi, ok := agentConfig.StateServingInfo() if !ok { return nil, errors.Errorf("cannot determine state serving info") } APIHostPorts := network.NewHostPorts(ssi.APIPort, args.PrivateAddress) agentConfig.SetAPIHostPorts([][]network.HostPort{APIHostPorts}) if err := agentConfig.Write(); err != nil { return nil, errors.Annotate(err, "cannot write new agent configuration") } logger.Infof("wrote new agent config") if backupMachine.Id() != "0" { logger.Infof("extra work needed backup belongs to %q machine", backupMachine.String()) serviceName := "jujud-" + agentConfig.Tag().String() aInfo := service.NewMachineAgentInfo( agentConfig.Tag().Id(), dataDir, paths.MustSucceed(paths.LogDir(args.NewInstSeries)), ) // TODO(perrito666) renderer should have a RendererForSeries, for the moment // restore only works on linuxes. renderer, _ := shell.NewRenderer("bash") serviceAgentConf := service.AgentConf(aInfo, renderer) svc, err := service.NewService(serviceName, serviceAgentConf, args.NewInstSeries) if err != nil { return nil, errors.Annotate(err, "cannot generate service for the restored agent.") } if err := svc.Install(); err != nil { return nil, errors.Annotate(err, "cannot install service for the restored agent.") } logger.Infof("new machine service") } logger.Infof("mongo service will be reinstalled to ensure its presence") if err := ensureMongoService(agentConfig); err != nil { return nil, errors.Annotate(err, "failed to reinstall service for juju-db") } logger.Infof("new mongo will be restored") // Restore mongodb from backup if err := placeNewMongoService(workspace.DBDumpDir, version); err != nil { return nil, errors.Annotate(err, "error restoring state from backup") } // Re-start replicaset with the new value for server address dialInfo, err := newDialInfo(args.PrivateAddress, agentConfig) if err != nil { return nil, errors.Annotate(err, "cannot produce dial information") } logger.Infof("restarting replicaset") memberHostPort := net.JoinHostPort(args.PrivateAddress, strconv.Itoa(ssi.StatePort)) err = resetReplicaSet(dialInfo, memberHostPort) if err != nil { return nil, errors.Annotate(err, "cannot reset replicaSet") } err = updateMongoEntries(args.NewInstId, args.NewInstTag.Id(), backupMachine.Id(), dialInfo) if err != nil { return nil, errors.Annotate(err, "cannot update mongo entries") } // From here we work with the restored controller mgoInfo, ok := agentConfig.MongoInfo() if !ok { return nil, errors.Errorf("cannot retrieve info to connect to mongo") } st, err := newStateConnection(agentConfig.Model(), mgoInfo) if err != nil { return nil, errors.Trace(err) } defer st.Close() machine, err := st.Machine(backupMachine.Id()) if err != nil { return nil, errors.Trace(err) } err = updateMachineAddresses(machine, args.PrivateAddress, args.PublicAddress) if err != nil { return nil, errors.Annotate(err, "cannot update api server machine addresses") } // update all agents known to the new controller. // TODO(perrito666): We should never stop process because of this. // updateAllMachines will not return errors for individual // agent update failures machines, err := st.AllMachines() if err != nil { return nil, errors.Trace(err) } if err = updateAllMachines(args.PrivateAddress, machines); err != nil { return nil, errors.Annotate(err, "cannot update agents") } info, err := st.RestoreInfoSetter() if err != nil { return nil, errors.Trace(err) } // Mark restoreInfo as Finished so upon restart of the apiserver // the client can reconnect and determine if we where succesful. err = info.SetStatus(state.RestoreFinished) return backupMachine, errors.Annotate(err, "failed to set status to finished") }
package api import ( "crypto/x509" "io/ioutil" "os" "path/filepath" "github.com/juju/errors" "github.com/juju/juju/cert" "github.com/juju/juju/juju/paths" "github.com/juju/juju/version" ) var certDir = filepath.FromSlash(paths.MustSucceed(paths.CertDir(version.Current.Series))) // CreateCertPool creates a new x509.CertPool and adds in the caCert passed // in. All certs from the cert directory (/etc/juju/cert.d on ubuntu) are // also added. func CreateCertPool(caCert string) (*x509.CertPool, error) { pool := x509.NewCertPool() if caCert != "" { xcert, err := cert.ParseCert(caCert) if err != nil { return nil, errors.Trace(err) } pool.AddCert(xcert) }
"github.com/juju/juju/worker/terminationworker" "github.com/juju/juju/worker/upgrader" ) var logger = loggo.GetLogger("juju.cmd.jujud") var newRunner = worker.NewRunner const bootstrapMachineId = "0" // eitherState can be either a *state.State or a *api.State. type eitherState interface{} var ( retryDelay = 3 * time.Second jujuRun = paths.MustSucceed(paths.JujuRun(version.Current.Series)) useMultipleCPUs = utils.UseMultipleCPUs // The following are defined as variables to // allow the tests to intercept calls to the functions. ensureMongoServer = mongo.EnsureServer maybeInitiateMongoServer = peergrouper.MaybeInitiateMongoServer ensureMongoAdminUser = mongo.EnsureAdminUser newSingularRunner = singular.New peergrouperNew = peergrouper.New newNetworker = networker.NewNetworker newSafeNetworker = networker.NewSafeNetworker // reportOpenedAPI is exposed for tests to know when // the State has been successfully opened. reportOpenedState = func(eitherState) {}
"github.com/juju/juju/agent" apirsyslog "github.com/juju/juju/api/rsyslog" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/juju/paths" "github.com/juju/juju/mongo" "github.com/juju/juju/state" "github.com/juju/juju/version" "github.com/juju/juju/worker" "github.com/juju/juju/worker/rsyslog" "github.com/juju/juju/worker/upgrader" ) var ( logger = loggo.GetLogger("juju.cmd.jujud.util") DataDir = paths.MustSucceed(paths.DataDir(version.Current.Series)) EnsureMongoServer = mongo.EnsureServer ) // requiredError is useful when complaining about missing command-line options. func RequiredError(name string) error { return fmt.Errorf("--%s option must be set", name) } // IsFatal determines if an error is fatal to the process. func IsFatal(err error) bool { err = errors.Cause(err) switch err { case worker.ErrTerminateAgent, worker.ErrRebootMachine, worker.ErrShutdownMachine: return true }
"github.com/juju/utils/shell" "github.com/juju/juju/api" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/juju/paths" "github.com/juju/juju/mongo" "github.com/juju/juju/network" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/version" ) var logger = loggo.GetLogger("juju.agent") // These are base values used for the corresponding defaults. var ( logDir = paths.MustSucceed(paths.LogDir(version.Current.Series)) dataDir = paths.MustSucceed(paths.DataDir(version.Current.Series)) confDir = paths.MustSucceed(paths.ConfDir(version.Current.Series)) ) // Agent exposes the agent's configuration to other components. This // interface should probably be segregated (agent.ConfigGetter and // agent.ConfigChanger?) but YAGNI *currently* advises against same. type Agent interface { // CurrentConfig returns a copy of the agent's configuration. No // guarantees regarding ongoing correctness are made. CurrentConfig() Config // ChangeConfig allows clients to change the agent's configuration // by supplying a callback that applies the changes.
} return values } // machineInfo is the structure used to pass information between the provider // and the agent running on a node. // When a node is started, the provider code creates a machineInfo object // containing information about the node being started and configures // cloudinit to get a YAML representation of that object written on the node's // filesystem during its first startup. That file is then read by the juju // agent running on the node and converted back into a machineInfo object. type machineInfo struct { Hostname string `yaml:",omitempty"` } var maasDataDir = paths.MustSucceed(paths.DataDir(series.LatestLts())) var _MAASInstanceFilename = path.Join(maasDataDir, "MAASmachine.txt") // cloudinitRunCmd returns the shell command that, when run, will create the // "machine info" file containing the hostname of a machine. // That command is destined to be used by cloudinit. func (info *machineInfo) cloudinitRunCmd(cloudcfg cloudinit.CloudConfig) (string, error) { dataDir, err := paths.DataDir(cloudcfg.GetSeries()) if err != nil { return "", errors.Trace(err) } yaml, err := goyaml.Marshal(info) if err != nil { return "", errors.Trace(err) } renderer := cloudcfg.ShellRenderer()