// WaitForModels will wait for the models to bring themselves down nicely. // It will return the UUIDs of any models that need to be removed forceably. func (c *killCommand) WaitForModels(ctx *cmd.Context, api destroyControllerAPI, uuid string) error { thirtySeconds := (time.Second * 30) updateStatus := newTimedStatusUpdater(ctx, api, uuid, c.clock) ctrStatus, modelsStatus := updateStatus(0) lastStatus := ctrStatus lastChange := c.clock.Now().Truncate(time.Second) deadline := lastChange.Add(c.timeout) for ; hasUnDeadModels(modelsStatus) && (deadline.After(c.clock.Now())); ctrStatus, modelsStatus = updateStatus(5 * time.Second) { now := c.clock.Now().Truncate(time.Second) if ctrStatus != lastStatus { lastStatus = ctrStatus lastChange = now deadline = lastChange.Add(c.timeout) } timeSinceLastChange := now.Sub(lastChange) timeUntilDestruction := deadline.Sub(now) warning := "" // We want to show the warning if it has been more than 30 seconds since // the last change, or we are within 30 seconds of our timeout. if timeSinceLastChange > thirtySeconds || timeUntilDestruction < thirtySeconds { warning = fmt.Sprintf(", will kill machines directly in %s", timeUntilDestruction) } ctx.Infof("%s%s", fmtCtrStatus(ctrStatus), warning) for _, modelStatus := range modelsStatus { ctx.Verbosef(fmtModelStatus(modelStatus)) } } if hasUnDeadModels(modelsStatus) { return errors.New("timed out") } else { ctx.Infof("All hosted models reclaimed, cleaning up controller machines") } return nil }
// Run implements Command.Run func (c *destroyCommand) Run(ctx *cmd.Context) error { store, err := configstore.Default() if err != nil { return errors.Annotate(err, "cannot open controller info storage") } cfgInfo, err := store.ReadInfo(c.ModelName()) if err != nil { return errors.Annotate(err, "cannot read controller info") } // Verify that we're destroying a controller apiEndpoint := cfgInfo.APIEndpoint() if apiEndpoint.ServerUUID != "" && apiEndpoint.ModelUUID != apiEndpoint.ServerUUID { return errors.Errorf("%q is not a controller; use juju model destroy to destroy it", c.ModelName()) } if !c.assumeYes { if err = confirmDestruction(ctx, c.ModelName()); err != nil { return err } } // Attempt to connect to the API. If we can't, fail the destroy. Users will // need to use the controller kill command if we can't connect. api, err := c.getControllerAPI() if err != nil { return c.ensureUserFriendlyErrorLog(errors.Annotate(err, "cannot connect to API"), ctx, nil) } defer api.Close() // Obtain bootstrap / controller environ information controllerEnviron, err := c.getControllerEnviron(cfgInfo, api) if err != nil { return errors.Annotate(err, "cannot obtain bootstrap information") } // Attempt to destroy the controller. err = api.DestroyController(c.destroyEnvs) if err != nil { return c.ensureUserFriendlyErrorLog(errors.Annotate(err, "cannot destroy controller"), ctx, api) } ctx.Infof("Destroying controller %q", c.ModelName()) if c.destroyEnvs { ctx.Infof("Waiting for hosted model resources to be reclaimed.") updateStatus := newTimedStatusUpdater(ctx, api, apiEndpoint.ModelUUID) for ctrStatus, envsStatus := updateStatus(0); hasUnDeadEnvirons(envsStatus); ctrStatus, envsStatus = updateStatus(2 * time.Second) { ctx.Infof(fmtCtrStatus(ctrStatus)) for _, envStatus := range envsStatus { ctx.Verbosef(fmtEnvStatus(envStatus)) } } ctx.Infof("All hosted models reclaimed, cleaning up controller machines") } return environs.Destroy(controllerEnviron, store) }
// Run implements Command.Run func (c *killCommand) Run(ctx *cmd.Context) error { controllerName := c.ControllerName() store := c.ClientStore() if !c.assumeYes { if err := confirmDestruction(ctx, controllerName); err != nil { return err } } // Attempt to connect to the API. api, err := c.getControllerAPI() switch { case err == nil: defer api.Close() case errors.Cause(err) == common.ErrPerm: return errors.Annotate(err, "cannot destroy controller") default: if errors.Cause(err) != modelcmd.ErrConnTimedOut { logger.Debugf("unable to open api: %s", err) } ctx.Infof("Unable to open API: %s\n", err) api = nil } // Obtain controller environ so we can clean up afterwards. controllerEnviron, err := c.getControllerEnviron(ctx, store, controllerName, api) if err != nil { return errors.Annotate(err, "getting controller environ") } // If we were unable to connect to the API, just destroy the controller through // the environs interface. if api == nil { ctx.Infof("Unable to connect to the API server. Destroying through provider.") return environs.Destroy(controllerName, controllerEnviron, store) } // Attempt to destroy the controller and all environments. err = api.DestroyController(true) if err != nil { ctx.Infof("Unable to destroy controller through the API: %s. Destroying through provider.", err) return environs.Destroy(controllerName, controllerEnviron, store) } ctx.Infof("Destroying controller %q\nWaiting for resources to be reclaimed", controllerName) updateStatus := newTimedStatusUpdater(ctx, api, controllerEnviron.Config().UUID()) for ctrStatus, envsStatus := updateStatus(0); hasUnDeadModels(envsStatus); ctrStatus, envsStatus = updateStatus(2 * time.Second) { ctx.Infof(fmtCtrStatus(ctrStatus)) for _, envStatus := range envsStatus { ctx.Verbosef(fmtModelStatus(envStatus)) } } ctx.Infof("All hosted models reclaimed, cleaning up controller machines") return environs.Destroy(controllerName, controllerEnviron, store) }
// WaitForAgentInitialisation polls the bootstrapped controller with a read-only // command which will fail until the controller is fully initialised. // TODO(wallyworld) - add a bespoke command to maybe the admin facade for this purpose. func WaitForAgentInitialisation(ctx *cmd.Context, c *modelcmd.ModelCommandBase, controllerName, hostedModelName string) error { // TODO(katco): 2016-08-09: lp:1611427 attempts := utils.AttemptStrategy{ Min: bootstrapReadyPollCount, Delay: bootstrapReadyPollDelay, } var ( apiAttempts int err error ) // Make a best effort to find the new controller address so we can print it. addressInfo := "" controller, err := c.ClientStore().ControllerByName(controllerName) if err == nil && len(controller.APIEndpoints) > 0 { addr, err := network.ParseHostPort(controller.APIEndpoints[0]) if err == nil { addressInfo = fmt.Sprintf(" at %s", addr.Address.Value) } } ctx.Infof("Contacting Juju controller%s to verify accessibility...", addressInfo) apiAttempts = 1 for attempt := attempts.Start(); attempt.Next(); apiAttempts++ { err = tryAPI(c) if err == nil { ctx.Infof("Bootstrap complete, %q controller now available.", controllerName) ctx.Infof("Controller machines are in the %q model.", bootstrap.ControllerModelName) ctx.Infof("Initial model %q added.", hostedModelName) break } // As the API server is coming up, it goes through a number of steps. // Initially the upgrade steps run, but the api server allows some // calls to be processed during the upgrade, but not the list blocks. // Logins are also blocked during space discovery. // It is also possible that the underlying database causes connections // to be dropped as it is initialising, or reconfiguring. These can // lead to EOF or "connection is shut down" error messages. We skip // these too, hoping that things come back up before the end of the // retry poll count. errorMessage := errors.Cause(err).Error() switch { case errors.Cause(err) == io.EOF, strings.HasSuffix(errorMessage, "connection is shut down"), strings.HasSuffix(errorMessage, "no api connection available"), strings.Contains(errorMessage, "spaces are still being discovered"): ctx.Verbosef("Still waiting for API to become available") continue case params.ErrCode(err) == params.CodeUpgradeInProgress: ctx.Verbosef("Still waiting for API to become available: %v", err) continue } break } return errors.Annotatef(err, "unable to contact api server after %d attempts", apiAttempts) }
// Run implements Command.Run func (c *destroyCommand) Run(ctx *cmd.Context) error { controllerName := c.ControllerName() store := c.ClientStore() controllerDetails, err := store.ControllerByName(controllerName) if err != nil { return errors.Annotate(err, "cannot read controller info") } if !c.assumeYes { if err = confirmDestruction(ctx, c.ControllerName()); err != nil { return err } } // Attempt to connect to the API. If we can't, fail the destroy. Users will // need to use the controller kill command if we can't connect. api, err := c.getControllerAPI() if err != nil { return c.ensureUserFriendlyErrorLog(errors.Annotate(err, "cannot connect to API"), ctx, nil) } defer api.Close() // Obtain controller environ so we can clean up afterwards. controllerEnviron, err := c.getControllerEnviron(store, controllerName, api) if err != nil { return errors.Annotate(err, "getting controller environ") } // Attempt to destroy the controller. err = api.DestroyController(c.destroyModels) if err != nil { return c.ensureUserFriendlyErrorLog(errors.Annotate(err, "cannot destroy controller"), ctx, api) } ctx.Infof("Destroying controller %q", c.ControllerName()) if c.destroyModels { ctx.Infof("Waiting for hosted model resources to be reclaimed.") updateStatus := newTimedStatusUpdater(ctx, api, controllerDetails.ControllerUUID) for ctrStatus, modelsStatus := updateStatus(0); hasUnDeadModels(modelsStatus); ctrStatus, modelsStatus = updateStatus(2 * time.Second) { ctx.Infof(fmtCtrStatus(ctrStatus)) for _, model := range modelsStatus { ctx.Verbosef(fmtModelStatus(model)) } } ctx.Infof("All hosted models reclaimed, cleaning up controller machines") } return environs.Destroy(c.ControllerName(), controllerEnviron, store) }
// watchDebugLog1dot18 runs in case of an older API server and uses ssh // but with server-side grep. func (c *DebugLogCommand) watchDebugLog1dot18(ctx *cmd.Context) error { ctx.Infof("Server does not support new stream log, falling back to tail") ctx.Verbosef("filters are not supported with tail") sshCmd := &SSHCommand{} tailCmd := fmt.Sprintf("tail -n -%d -f %s", c.params.Backlog, DefaultLogLocation) // If the api doesn't support WatchDebugLog, then it won't be running in // HA either, so machine 0 is where it is all at. args := []string{"0", tailCmd} err := sshCmd.Init(args) if err != nil { return err } sshCmd.EnvName = c.EnvName return runSSHCommand(sshCmd, ctx) }
// ReadAuthorizedKeys implements the standard juju behaviour for finding // authorized_keys. It returns a set of keys in in authorized_keys format // (see sshd(8) for a description). If path is non-empty, it names the // file to use; otherwise the user's .ssh directory will be searched. // Home directory expansion will be performed on the path if it starts with // a ~; if the expanded path is relative, it will be interpreted relative // to $HOME/.ssh. // // The result of utils/ssh.PublicKeyFiles will always be prepended to the // result. In practice, this means ReadAuthorizedKeys never returns an // error when the call originates in the CLI. // // If no SSH keys are found, ReadAuthorizedKeys returns // ErrNoAuthorizedKeys. func ReadAuthorizedKeys(ctx *cmd.Context, path string) (string, error) { files := ssh.PublicKeyFiles() if path == "" { files = append(files, "id_dsa.pub", "id_rsa.pub", "identity.pub") } else { files = append(files, path) } var firstError error var keyData []byte for _, f := range files { f, err := utils.NormalizePath(f) if err != nil { if firstError == nil { firstError = err } continue } if !filepath.IsAbs(f) { f = filepath.Join(utils.Home(), ".ssh", f) } data, err := ioutil.ReadFile(f) if err != nil { if firstError == nil && !os.IsNotExist(err) { firstError = err } continue } keyData = append(keyData, bytes.Trim(data, "\n")...) keyData = append(keyData, '\n') ctx.Verbosef("Adding contents of %q to authorized-keys", f) } if len(keyData) == 0 { if firstError == nil { firstError = ErrNoAuthorizedKeys } return "", firstError } return string(keyData), nil }
// Run connects to the environment specified on the command line and bootstraps // a juju in that environment if none already exists. If there is as yet no environments.yaml file, // the user is informed how to create one. func (c *bootstrapCommand) Run(ctx *cmd.Context) (resultErr error) { bootstrapFuncs := getBootstrapFuncs() // Get the cloud definition identified by c.Cloud. If c.Cloud does not // identify a cloud in clouds.yaml, but is the name of a provider, and // that provider implements environs.CloudRegionDetector, we'll // synthesise a Cloud structure with the detected regions and no auth- // types. cloud, err := jujucloud.CloudByName(c.Cloud) if errors.IsNotFound(err) { ctx.Verbosef("cloud %q not found, trying as a provider name", c.Cloud) provider, err := environs.Provider(c.Cloud) if errors.IsNotFound(err) { return errors.NewNotFound(nil, fmt.Sprintf("unknown cloud %q, please try %q", c.Cloud, "juju update-clouds")) } else if err != nil { return errors.Trace(err) } detector, ok := provider.(environs.CloudRegionDetector) if !ok { ctx.Verbosef( "provider %q does not support detecting regions", c.Cloud, ) return errors.NewNotFound(nil, fmt.Sprintf("unknown cloud %q, please try %q", c.Cloud, "juju update-clouds")) } regions, err := detector.DetectRegions() if err != nil && !errors.IsNotFound(err) { // It's not an error to have no regions. return errors.Annotatef(err, "detecting regions for %q cloud provider", c.Cloud, ) } cloud = &jujucloud.Cloud{ Type: c.Cloud, Regions: regions, } } else if err != nil { return errors.Trace(err) } if err := checkProviderType(cloud.Type); errors.IsNotFound(err) { // This error will get handled later. } else if err != nil { return errors.Trace(err) } // Get the credentials and region name. store := c.ClientStore() credential, credentialName, regionName, err := modelcmd.GetCredentials( store, c.Region, c.CredentialName, c.Cloud, cloud.Type, ) if errors.IsNotFound(err) && c.CredentialName == "" { // No credential was explicitly specified, and no credential // was found in credentials.yaml; have the provider detect // credentials from the environment. ctx.Verbosef("no credentials found, checking environment") detected, err := modelcmd.DetectCredential(c.Cloud, cloud.Type) if errors.Cause(err) == modelcmd.ErrMultipleCredentials { return ambiguousCredentialError } else if err != nil { return errors.Trace(err) } // We have one credential so extract it from the map. var oneCredential jujucloud.Credential for _, oneCredential = range detected.AuthCredentials { } credential = &oneCredential regionName = c.Region if regionName == "" { regionName = detected.DefaultRegion } logger.Tracef("authenticating with region %q and %v", regionName, credential) } else if err != nil { return errors.Trace(err) } region, err := getRegion(cloud, c.Cloud, regionName) if err != nil { return errors.Trace(err) } hostedModelUUID, err := utils.NewUUID() if err != nil { return errors.Trace(err) } controllerUUID, err := utils.NewUUID() if err != nil { return errors.Trace(err) } // Create an environment config from the cloud and credentials. configAttrs := map[string]interface{}{ "type": cloud.Type, "name": environs.ControllerModelName, config.UUIDKey: controllerUUID.String(), config.ControllerUUIDKey: controllerUUID.String(), } userConfigAttrs, err := c.config.ReadAttrs(ctx) if err != nil { return errors.Trace(err) } for k, v := range userConfigAttrs { configAttrs[k] = v } logger.Debugf("preparing controller with config: %v", configAttrs) // Read existing current controller, account, model so we can clean up on error. var oldCurrentController string oldCurrentController, err = modelcmd.ReadCurrentController() if err != nil { return errors.Annotate(err, "error reading current controller") } defer func() { if resultErr == nil || errors.IsAlreadyExists(resultErr) { return } if oldCurrentController != "" { if err := modelcmd.WriteCurrentController(oldCurrentController); err != nil { logger.Warningf( "cannot reset current controller to %q: %v", oldCurrentController, err, ) } } if err := store.RemoveController(c.controllerName); err != nil { logger.Warningf( "cannot destroy newly created controller %q details: %v", c.controllerName, err, ) } }() environ, err := environsPrepare( modelcmd.BootstrapContext(ctx), store, environs.PrepareParams{ BaseConfig: configAttrs, ControllerName: c.controllerName, CloudName: c.Cloud, CloudRegion: region.Name, CloudEndpoint: region.Endpoint, CloudStorageEndpoint: region.StorageEndpoint, Credential: *credential, CredentialName: credentialName, }, ) if err != nil { return errors.Trace(err) } // Set the current model to the initial hosted model. accountName, err := store.CurrentAccount(c.controllerName) if err != nil { return errors.Trace(err) } if err := store.UpdateModel(c.controllerName, accountName, c.hostedModelName, jujuclient.ModelDetails{ hostedModelUUID.String(), }); err != nil { return errors.Trace(err) } if err := store.SetCurrentModel(c.controllerName, accountName, c.hostedModelName); err != nil { return errors.Trace(err) } // Set the current controller so "juju status" can be run while // bootstrapping is underway. if err := modelcmd.WriteCurrentController(c.controllerName); err != nil { return errors.Trace(err) } cloudRegion := c.Cloud if region.Name != "" { cloudRegion = fmt.Sprintf("%s/%s", cloudRegion, region.Name) } ctx.Infof( "Creating Juju controller %q on %s", c.controllerName, cloudRegion, ) // If we error out for any reason, clean up the environment. defer func() { if resultErr != nil { if c.KeepBrokenEnvironment { logger.Warningf(` bootstrap failed but --keep-broken was specified so model is not being destroyed. When you are finished diagnosing the problem, remember to run juju destroy-model --force to clean up the model.`[1:]) } else { handleBootstrapError(ctx, resultErr, func() error { return environsDestroy( c.controllerName, environ, store, ) }) } } }() // Block interruption during bootstrap. Providers may also // register for interrupt notification so they can exit early. interrupted := make(chan os.Signal, 1) defer close(interrupted) ctx.InterruptNotify(interrupted) defer ctx.StopInterruptNotify(interrupted) go func() { for _ = range interrupted { ctx.Infof("Interrupt signalled: waiting for bootstrap to exit") } }() // If --metadata-source is specified, override the default tools metadata source so // SyncTools can use it, and also upload any image metadata. var metadataDir string if c.MetadataSource != "" { metadataDir = ctx.AbsPath(c.MetadataSource) } // Merge environ and bootstrap-specific constraints. constraintsValidator, err := environ.ConstraintsValidator() if err != nil { return errors.Trace(err) } bootstrapConstraints, err := constraintsValidator.Merge( c.Constraints, c.BootstrapConstraints, ) if err != nil { return errors.Trace(err) } logger.Infof("combined bootstrap constraints: %v", bootstrapConstraints) hostedModelConfig := map[string]interface{}{ "name": c.hostedModelName, config.UUIDKey: hostedModelUUID.String(), } // We copy across any user supplied attributes to the hosted model config. // But only if the attributes have not been removed from the controller // model config as part of preparing the controller model. controllerConfigAttrs := environ.Config().AllAttrs() for k, v := range userConfigAttrs { if _, ok := controllerConfigAttrs[k]; ok { hostedModelConfig[k] = v } } // Ensure that certain config attributes are not included in the hosted // model config. These attributes may be modified during bootstrap; by // removing them from this map, we ensure the modified values are // inherited. delete(hostedModelConfig, config.AuthKeysConfig) delete(hostedModelConfig, config.AgentVersionKey) // Check whether the Juju GUI must be installed in the controller. // Leaving this value empty means no GUI will be installed. var guiDataSourceBaseURL string if !c.noGUI { guiDataSourceBaseURL = common.GUIDataSourceBaseURL() } err = bootstrapFuncs.Bootstrap(modelcmd.BootstrapContext(ctx), environ, bootstrap.BootstrapParams{ ModelConstraints: c.Constraints, BootstrapConstraints: bootstrapConstraints, BootstrapSeries: c.BootstrapSeries, BootstrapImage: c.BootstrapImage, Placement: c.Placement, UploadTools: c.UploadTools, AgentVersion: c.AgentVersion, MetadataDir: metadataDir, HostedModelConfig: hostedModelConfig, GUIDataSourceBaseURL: guiDataSourceBaseURL, }) if err != nil { return errors.Annotate(err, "failed to bootstrap model") } if err := c.SetModelName(c.hostedModelName); err != nil { return errors.Trace(err) } err = c.setBootstrapEndpointAddress(environ) if err != nil { return errors.Annotate(err, "saving bootstrap endpoint address") } // To avoid race conditions when running scripted bootstraps, wait // for the controller's machine agent to be ready to accept commands // before exiting this bootstrap command. return c.waitForAgentInitialisation(ctx) }
// Run changes the version proposed for the juju envtools. func (c *upgradeJujuCommand) Run(ctx *cmd.Context) (err error) { client, err := getUpgradeJujuAPI(c) if err != nil { return err } defer client.Close() modelConfigClient, err := getModelConfigAPI(c) if err != nil { return err } defer modelConfigClient.Close() controllerClient, err := getControllerAPI(c) if err != nil { return err } defer controllerClient.Close() defer func() { if err == errUpToDate { ctx.Infof(err.Error()) err = nil } }() // Determine the version to upgrade to, uploading tools if necessary. attrs, err := modelConfigClient.ModelGet() if err != nil { return err } cfg, err := config.New(config.NoDefaults, attrs) if err != nil { return err } controllerModelConfig, err := controllerClient.ModelConfig() if err != nil { return err } isControllerModel := cfg.UUID() == controllerModelConfig[config.UUIDKey] if c.BuildAgent && !isControllerModel { // For UploadTools, model must be the "controller" model, // that is, modelUUID == controllerUUID return errors.Errorf("--build-agent can only be used with the controller model") } agentVersion, ok := cfg.AgentVersion() if !ok { // Can't happen. In theory. return errors.New("incomplete model configuration") } if c.BuildAgent && c.Version == version.Zero { // Currently, uploading tools assumes the version to be // the same as jujuversion.Current if not specified with // --agent-version. c.Version = jujuversion.Current } warnCompat := false switch { case !canUpgradeRunningVersion(agentVersion): // This version of upgrade-juju cannot upgrade the running // environment version (can't guarantee API compatibility). return errors.Errorf("cannot upgrade a %s model with a %s client", agentVersion, jujuversion.Current) case c.Version != version.Zero && c.Version.Major < agentVersion.Major: // The specified version would downgrade the environment. // Don't upgrade and return an error. return errors.Errorf(downgradeErrMsg, agentVersion, c.Version) case agentVersion.Major != jujuversion.Current.Major: // Running environment is the previous major version (a higher major // version wouldn't have passed the check in canUpgradeRunningVersion). if c.Version == version.Zero || c.Version.Major == agentVersion.Major { // Not requesting an upgrade across major release boundary. // Warn of incompatible CLI and filter on the prior major version // when searching for available tools. // TODO(cherylj) Add in a suggestion to upgrade to 2.0 if // no matching tools are found (bug 1532670) warnCompat = true break } // User requested an upgrade to the next major version. // Fallthrough to the next case to verify that the upgrade // conditions are met. fallthrough case c.Version.Major > agentVersion.Major: // User is requesting an upgrade to a new major number // Only upgrade to a different major number if: // 1 - Explicitly requested with --agent-version or using --build-agent, and // 2 - The environment is running a valid version to upgrade from, and // 3 - The upgrade is to a minor version of 0. minVer, ok := c.minMajorUpgradeVersion[c.Version.Major] if !ok { return errors.Errorf("unknown version %q", c.Version) } retErr := false if c.Version.Minor != 0 { ctx.Infof("upgrades to %s must first go through juju %d.0", c.Version, c.Version.Major) retErr = true } if comp := agentVersion.Compare(minVer); comp < 0 { ctx.Infof("upgrades to a new major version must first go through %s", minVer) retErr = true } if retErr { return errors.New("unable to upgrade to requested version") } } context, err := c.initVersions(client, cfg, agentVersion, warnCompat) if err != nil { return err } // If we're running a custom build or the user has asked for a new agent // to be built, upload a local jujud binary if possible. uploadLocalBinary := isControllerModel && c.Version == version.Zero && tryImplicitUpload(agentVersion) if !warnCompat && (uploadLocalBinary || c.BuildAgent) && !c.DryRun { if err := context.uploadTools(c.BuildAgent); err != nil { // If we've explicitly asked to build an agent binary, or the upload failed // because changes were blocked, we'll return an error. if err2 := block.ProcessBlockedError(err, block.BlockChange); c.BuildAgent || err2 == cmd.ErrSilent { return err2 } } builtMsg := "" if c.BuildAgent { builtMsg = " (built from source)" } fmt.Fprintf(ctx.Stdout, "no prepackaged tools available, using local agent binary %v%s\n", context.chosen, builtMsg) } // If there was an error implicitly uploading a binary, we'll still look for any packaged binaries // since there may still be a valid upgrade and the user didn't ask for any local binary. if err := context.validate(); err != nil { return err } // TODO(fwereade): this list may be incomplete, pending envtools.Upload change. ctx.Verbosef("available tools:\n%s", formatTools(context.tools)) ctx.Verbosef("best version:\n %s", context.chosen) if warnCompat { fmt.Fprintf(ctx.Stderr, "version %s incompatible with this client (%s)\n", context.chosen, jujuversion.Current) } if c.DryRun { fmt.Fprintf(ctx.Stderr, "upgrade to this version by running\n juju upgrade-juju --agent-version=\"%s\"\n", context.chosen) } else { if c.ResetPrevious { if ok, err := c.confirmResetPreviousUpgrade(ctx); !ok || err != nil { const message = "previous upgrade not reset and no new upgrade triggered" if err != nil { return errors.Annotate(err, message) } return errors.New(message) } if err := client.AbortCurrentUpgrade(); err != nil { return block.ProcessBlockedError(err, block.BlockChange) } } if err := client.SetModelAgentVersion(context.chosen); err != nil { if params.IsCodeUpgradeInProgress(err) { return errors.Errorf("%s\n\n"+ "Please wait for the upgrade to complete or if there was a problem with\n"+ "the last upgrade that has been resolved, consider running the\n"+ "upgrade-juju command with the --reset-previous-upgrade flag.", err, ) } else { return block.ProcessBlockedError(err, block.BlockChange) } } fmt.Fprintf(ctx.Stdout, "started upgrade to %s\n", context.chosen) } return nil }
// Run connects to the environment specified on the command line and bootstraps // a juju in that environment if none already exists. If there is as yet no environments.yaml file, // the user is informed how to create one. func (c *bootstrapCommand) Run(ctx *cmd.Context) (resultErr error) { if err := c.parseConstraints(ctx); err != nil { return err } if c.BootstrapImage != "" { if c.BootstrapSeries == "" { return errors.Errorf("--bootstrap-image must be used with --bootstrap-series") } cons, err := constraints.Merge(c.Constraints, c.BootstrapConstraints) if err != nil { return errors.Trace(err) } if !cons.HasArch() { return errors.Errorf("--bootstrap-image must be used with --bootstrap-constraints, specifying architecture") } } if c.interactive { if err := c.runInteractive(ctx); err != nil { return errors.Trace(err) } // now run normal bootstrap using info gained above. } if c.showClouds { return printClouds(ctx, c.ClientStore()) } if c.showRegionsForCloud != "" { return printCloudRegions(ctx, c.showRegionsForCloud) } bootstrapFuncs := getBootstrapFuncs() // Get the cloud definition identified by c.Cloud. If c.Cloud does not // identify a cloud in clouds.yaml, but is the name of a provider, and // that provider implements environs.CloudRegionDetector, we'll // synthesise a Cloud structure with the detected regions and no auth- // types. cloud, err := jujucloud.CloudByName(c.Cloud) if errors.IsNotFound(err) { ctx.Verbosef("cloud %q not found, trying as a provider name", c.Cloud) provider, err := environs.Provider(c.Cloud) if errors.IsNotFound(err) { return errors.NewNotFound(nil, fmt.Sprintf("unknown cloud %q, please try %q", c.Cloud, "juju update-clouds")) } else if err != nil { return errors.Trace(err) } detector, ok := bootstrapFuncs.CloudRegionDetector(provider) if !ok { ctx.Verbosef( "provider %q does not support detecting regions", c.Cloud, ) return errors.NewNotFound(nil, fmt.Sprintf("unknown cloud %q, please try %q", c.Cloud, "juju update-clouds")) } var cloudEndpoint string regions, err := detector.DetectRegions() if errors.IsNotFound(err) { // It's not an error to have no regions. If the // provider does not support regions, then we // reinterpret the supplied region name as the // cloud's endpoint. This enables the user to // supply, for example, maas/<IP> or manual/<IP>. if c.Region != "" { ctx.Verbosef("interpreting %q as the cloud endpoint", c.Region) cloudEndpoint = c.Region c.Region = "" } } else if err != nil { return errors.Annotatef(err, "detecting regions for %q cloud provider", c.Cloud, ) } schemas := provider.CredentialSchemas() authTypes := make([]jujucloud.AuthType, 0, len(schemas)) for authType := range schemas { authTypes = append(authTypes, authType) } // Since we are iterating over a map, lets sort the authTypes so // they are always in a consistent order. sort.Sort(jujucloud.AuthTypes(authTypes)) cloud = &jujucloud.Cloud{ Type: c.Cloud, AuthTypes: authTypes, Endpoint: cloudEndpoint, Regions: regions, } } else if err != nil { return errors.Trace(err) } if err := checkProviderType(cloud.Type); errors.IsNotFound(err) { // This error will get handled later. } else if err != nil { return errors.Trace(err) } provider, err := environs.Provider(cloud.Type) if err != nil { return errors.Trace(err) } // Custom clouds may not have explicitly declared support for any auth- // types, in which case we'll assume that they support everything that // the provider supports. if len(cloud.AuthTypes) == 0 { for authType := range provider.CredentialSchemas() { cloud.AuthTypes = append(cloud.AuthTypes, authType) } } // Get the credentials and region name. store := c.ClientStore() var detectedCredentialName string credential, credentialName, regionName, err := modelcmd.GetCredentials( ctx, store, modelcmd.GetCredentialsParams{ Cloud: *cloud, CloudName: c.Cloud, CloudRegion: c.Region, CredentialName: c.CredentialName, }, ) if errors.Cause(err) == modelcmd.ErrMultipleCredentials { return ambiguousCredentialError } if errors.IsNotFound(err) && c.CredentialName == "" { // No credential was explicitly specified, and no credential // was found in credentials.yaml; have the provider detect // credentials from the environment. ctx.Verbosef("no credentials found, checking environment") detected, err := modelcmd.DetectCredential(c.Cloud, cloud.Type) if errors.Cause(err) == modelcmd.ErrMultipleCredentials { return ambiguousDetectedCredentialError } else if err != nil { return errors.Trace(err) } // We have one credential so extract it from the map. var oneCredential jujucloud.Credential for detectedCredentialName, oneCredential = range detected.AuthCredentials { } credential = &oneCredential regionName = c.Region if regionName == "" { regionName = detected.DefaultRegion } logger.Debugf( "authenticating with region %q and credential %q (%v)", regionName, detectedCredentialName, credential.Label, ) logger.Tracef("credential: %v", credential) } else if err != nil { return errors.Trace(err) } region, err := getRegion(cloud, c.Cloud, regionName) if err != nil { fmt.Fprintf(ctx.GetStderr(), "%s\n\nSpecify an alternative region, or try %q.", err, "juju update-clouds", ) return cmd.ErrSilent } controllerModelUUID, err := utils.NewUUID() if err != nil { return errors.Trace(err) } hostedModelUUID, err := utils.NewUUID() if err != nil { return errors.Trace(err) } controllerUUID, err := utils.NewUUID() if err != nil { return errors.Trace(err) } // Create a model config, and split out any controller // and bootstrap config attributes. modelConfigAttrs := map[string]interface{}{ "type": cloud.Type, "name": bootstrap.ControllerModelName, config.UUIDKey: controllerModelUUID.String(), } userConfigAttrs, err := c.config.ReadAttrs(ctx) if err != nil { return errors.Trace(err) } // The provider may define some custom attributes specific // to the provider. These will be added to the model config. providerAttrs := make(map[string]interface{}) if ps, ok := provider.(config.ConfigSchemaSource); ok { for attr := range ps.ConfigSchema() { if v, ok := userConfigAttrs[attr]; ok { providerAttrs[attr] = v } } fields := schema.FieldMap(ps.ConfigSchema(), ps.ConfigDefaults()) if coercedAttrs, err := fields.Coerce(providerAttrs, nil); err != nil { return errors.Annotatef(err, "invalid attribute value(s) for %v cloud", cloud.Type) } else { providerAttrs = coercedAttrs.(map[string]interface{}) } } logger.Debugf("provider attrs: %v", providerAttrs) for k, v := range userConfigAttrs { modelConfigAttrs[k] = v } // Provider specific attributes are either already specified in model // config (but may have been coerced), or were not present. Either way, // copy them in. for k, v := range providerAttrs { modelConfigAttrs[k] = v } bootstrapConfigAttrs := make(map[string]interface{}) controllerConfigAttrs := make(map[string]interface{}) // Based on the attribute names in clouds.yaml, create // a map of shared config for all models on this cloud. inheritedControllerAttrs := make(map[string]interface{}) for k, v := range cloud.Config { switch { case bootstrap.IsBootstrapAttribute(k): bootstrapConfigAttrs[k] = v continue case controller.ControllerOnlyAttribute(k): controllerConfigAttrs[k] = v continue } inheritedControllerAttrs[k] = v } for k, v := range modelConfigAttrs { switch { case bootstrap.IsBootstrapAttribute(k): bootstrapConfigAttrs[k] = v delete(modelConfigAttrs, k) case controller.ControllerOnlyAttribute(k): controllerConfigAttrs[k] = v delete(modelConfigAttrs, k) } } bootstrapConfig, err := bootstrap.NewConfig(bootstrapConfigAttrs) if err != nil { return errors.Annotate(err, "constructing bootstrap config") } controllerConfig, err := controller.NewConfig( controllerUUID.String(), bootstrapConfig.CACert, controllerConfigAttrs, ) if err != nil { return errors.Annotate(err, "constructing controller config") } if err := common.FinalizeAuthorizedKeys(ctx, modelConfigAttrs); err != nil { return errors.Annotate(err, "finalizing authorized-keys") } logger.Debugf("preparing controller with config: %v", modelConfigAttrs) // Read existing current controller so we can clean up on error. var oldCurrentController string oldCurrentController, err = store.CurrentController() if errors.IsNotFound(err) { oldCurrentController = "" } else if err != nil { return errors.Annotate(err, "error reading current controller") } defer func() { if resultErr == nil || errors.IsAlreadyExists(resultErr) { return } if oldCurrentController != "" { if err := store.SetCurrentController(oldCurrentController); err != nil { logger.Errorf( "cannot reset current controller to %q: %v", oldCurrentController, err, ) } } if err := store.RemoveController(c.controllerName); err != nil { logger.Errorf( "cannot destroy newly created controller %q details: %v", c.controllerName, err, ) } }() bootstrapModelConfig := make(map[string]interface{}) for k, v := range inheritedControllerAttrs { bootstrapModelConfig[k] = v } for k, v := range modelConfigAttrs { bootstrapModelConfig[k] = v } // Add in any default attribute values if not already // specified, making the recorded bootstrap config // immutable to changes in Juju. for k, v := range config.ConfigDefaults() { if _, ok := bootstrapModelConfig[k]; !ok { bootstrapModelConfig[k] = v } } environ, err := bootstrapPrepare( modelcmd.BootstrapContext(ctx), store, bootstrap.PrepareParams{ ModelConfig: bootstrapModelConfig, ControllerConfig: controllerConfig, ControllerName: c.controllerName, Cloud: environs.CloudSpec{ Type: cloud.Type, Name: c.Cloud, Region: region.Name, Endpoint: region.Endpoint, IdentityEndpoint: region.IdentityEndpoint, StorageEndpoint: region.StorageEndpoint, Credential: credential, }, CredentialName: credentialName, AdminSecret: bootstrapConfig.AdminSecret, }, ) if err != nil { return errors.Trace(err) } // Set the current model to the initial hosted model. if err := store.UpdateModel(c.controllerName, c.hostedModelName, jujuclient.ModelDetails{ hostedModelUUID.String(), }); err != nil { return errors.Trace(err) } if err := store.SetCurrentModel(c.controllerName, c.hostedModelName); err != nil { return errors.Trace(err) } // Set the current controller so "juju status" can be run while // bootstrapping is underway. if err := store.SetCurrentController(c.controllerName); err != nil { return errors.Trace(err) } cloudRegion := c.Cloud if region.Name != "" { cloudRegion = fmt.Sprintf("%s/%s", cloudRegion, region.Name) } ctx.Infof( "Creating Juju controller %q on %s", c.controllerName, cloudRegion, ) // If we error out for any reason, clean up the environment. defer func() { if resultErr != nil { if c.KeepBrokenEnvironment { ctx.Infof(` bootstrap failed but --keep-broken was specified so resources are not being destroyed. When you have finished diagnosing the problem, remember to clean up the failed controller. See `[1:] + "`juju kill-controller`" + `.`) } else { handleBootstrapError(ctx, resultErr, func() error { return environsDestroy( c.controllerName, environ, store, ) }) } } }() // Block interruption during bootstrap. Providers may also // register for interrupt notification so they can exit early. interrupted := make(chan os.Signal, 1) defer close(interrupted) ctx.InterruptNotify(interrupted) defer ctx.StopInterruptNotify(interrupted) go func() { for _ = range interrupted { ctx.Infof("Interrupt signalled: waiting for bootstrap to exit") } }() // If --metadata-source is specified, override the default tools metadata source so // SyncTools can use it, and also upload any image metadata. var metadataDir string if c.MetadataSource != "" { metadataDir = ctx.AbsPath(c.MetadataSource) } // Merge environ and bootstrap-specific constraints. constraintsValidator, err := environ.ConstraintsValidator() if err != nil { return errors.Trace(err) } bootstrapConstraints, err := constraintsValidator.Merge( c.Constraints, c.BootstrapConstraints, ) if err != nil { return errors.Trace(err) } logger.Infof("combined bootstrap constraints: %v", bootstrapConstraints) hostedModelConfig := map[string]interface{}{ "name": c.hostedModelName, config.UUIDKey: hostedModelUUID.String(), } for k, v := range inheritedControllerAttrs { hostedModelConfig[k] = v } // We copy across any user supplied attributes to the hosted model config. // But only if the attributes have not been removed from the controller // model config as part of preparing the controller model. controllerModelConfigAttrs := environ.Config().AllAttrs() for k, v := range userConfigAttrs { if _, ok := controllerModelConfigAttrs[k]; ok { hostedModelConfig[k] = v } } // Ensure that certain config attributes are not included in the hosted // model config. These attributes may be modified during bootstrap; by // removing them from this map, we ensure the modified values are // inherited. delete(hostedModelConfig, config.AuthorizedKeysKey) delete(hostedModelConfig, config.AgentVersionKey) // Check whether the Juju GUI must be installed in the controller. // Leaving this value empty means no GUI will be installed. var guiDataSourceBaseURL string if !c.noGUI { guiDataSourceBaseURL = common.GUIDataSourceBaseURL() } if credentialName == "" { // credentialName will be empty if the credential was detected. // We must supply a name for the credential in the database, // so choose one. credentialName = detectedCredentialName } err = bootstrapFuncs.Bootstrap(modelcmd.BootstrapContext(ctx), environ, bootstrap.BootstrapParams{ ModelConstraints: c.Constraints, BootstrapConstraints: bootstrapConstraints, BootstrapSeries: c.BootstrapSeries, BootstrapImage: c.BootstrapImage, Placement: c.Placement, BuildAgent: c.BuildAgent, BuildAgentTarball: sync.BuildAgentTarball, AgentVersion: c.AgentVersion, MetadataDir: metadataDir, Cloud: *cloud, CloudName: c.Cloud, CloudRegion: region.Name, CloudCredential: credential, CloudCredentialName: credentialName, ControllerConfig: controllerConfig, ControllerInheritedConfig: inheritedControllerAttrs, RegionInheritedConfig: cloud.RegionConfig, HostedModelConfig: hostedModelConfig, GUIDataSourceBaseURL: guiDataSourceBaseURL, AdminSecret: bootstrapConfig.AdminSecret, CAPrivateKey: bootstrapConfig.CAPrivateKey, DialOpts: environs.BootstrapDialOpts{ Timeout: bootstrapConfig.BootstrapTimeout, RetryDelay: bootstrapConfig.BootstrapRetryDelay, AddressesDelay: bootstrapConfig.BootstrapAddressesDelay, }, }) if err != nil { return errors.Annotate(err, "failed to bootstrap model") } if err := c.SetModelName(modelcmd.JoinModelName(c.controllerName, c.hostedModelName)); err != nil { return errors.Trace(err) } agentVersion := jujuversion.Current if c.AgentVersion != nil { agentVersion = *c.AgentVersion } err = common.SetBootstrapEndpointAddress(c.ClientStore(), c.controllerName, agentVersion, controllerConfig.APIPort(), environ) if err != nil { return errors.Annotate(err, "saving bootstrap endpoint address") } // To avoid race conditions when running scripted bootstraps, wait // for the controller's machine agent to be ready to accept commands // before exiting this bootstrap command. return waitForAgentInitialisation(ctx, &c.ModelCommandBase, c.controllerName, c.hostedModelName) }
// Run implements Command.Run func (c *killCommand) Run(ctx *cmd.Context) error { store, err := configstore.Default() if err != nil { return errors.Annotate(err, "cannot open controller info storage") } cfgInfo, err := store.ReadInfo(c.ModelName()) if err != nil { return errors.Annotate(err, "cannot read controller info") } // Verify that we're destroying a controller apiEndpoint := cfgInfo.APIEndpoint() if apiEndpoint.ServerUUID != "" && apiEndpoint.ModelUUID != apiEndpoint.ServerUUID { return errors.Errorf("%q is not a controller; use juju model destroy to destroy it", c.ModelName()) } if !c.assumeYes { if err = confirmDestruction(ctx, c.ModelName()); err != nil { return err } } // Attempt to connect to the API. api, err := c.getControllerAPI() switch { case err == nil: defer api.Close() case errors.Cause(err) == common.ErrPerm: return errors.Annotate(err, "cannot destroy controller") default: if errors.Cause(err) != modelcmd.ErrConnTimedOut { logger.Debugf("unable to open api: %s", err) } ctx.Infof("Unable to open API: %s\n", err) api = nil } // Obtain bootstrap / controller environ information controllerEnviron, err := c.getControllerEnviron(cfgInfo, api) if err != nil { return errors.Annotate(err, "cannot obtain bootstrap information") } // If we were unable to connect to the API, just destroy the controller through // the environs interface. if api == nil { ctx.Infof("Unable to connect to the API server. Destroying through provider.") return environs.Destroy(controllerEnviron, store) } // Attempt to destroy the controller and all environments. err = api.DestroyController(true) if err != nil { ctx.Infof("Unable to destroy controller through the API: %s. Destroying through provider.", err) return environs.Destroy(controllerEnviron, store) } ctx.Infof("Destroying controller %q\nWaiting for resources to be reclaimed", c.ModelName()) updateStatus := newTimedStatusUpdater(ctx, api, apiEndpoint.ModelUUID) for ctrStatus, envsStatus := updateStatus(0); hasUnDeadEnvirons(envsStatus); ctrStatus, envsStatus = updateStatus(2 * time.Second) { ctx.Infof(fmtCtrStatus(ctrStatus)) for _, envStatus := range envsStatus { ctx.Verbosef(fmtEnvStatus(envStatus)) } } ctx.Infof("All hosted models reclaimed, cleaning up controller machines") return environs.Destroy(controllerEnviron, store) }
// Run implements Command.Run func (c *destroyCommand) Run(ctx *cmd.Context) error { controllerName := c.ControllerName() store := c.ClientStore() if !c.assumeYes { if err := confirmDestruction(ctx, c.ControllerName()); err != nil { return err } } // Attempt to connect to the API. If we can't, fail the destroy. Users will // need to use the controller kill command if we can't connect. api, err := c.getControllerAPI() if err != nil { return c.ensureUserFriendlyErrorLog(errors.Annotate(err, "cannot connect to API"), ctx, nil) } defer api.Close() // Obtain controller environ so we can clean up afterwards. controllerEnviron, err := c.getControllerEnviron(ctx, store, controllerName, api) if err != nil { return errors.Annotate(err, "getting controller environ") } for { // Attempt to destroy the controller. ctx.Infof("Destroying controller") var hasHostedModels bool err = api.DestroyController(c.destroyModels) if err != nil { if params.IsCodeHasHostedModels(err) { hasHostedModels = true } else { return c.ensureUserFriendlyErrorLog( errors.Annotate(err, "cannot destroy controller"), ctx, api, ) } } updateStatus := newTimedStatusUpdater(ctx, api, controllerEnviron.Config().UUID(), clock.WallClock) ctrStatus, modelsStatus := updateStatus(0) if !c.destroyModels { if err := c.checkNoAliveHostedModels(ctx, modelsStatus); err != nil { return errors.Trace(err) } if hasHostedModels && !hasUnDeadModels(modelsStatus) { // When we called DestroyController before, we were // informed that there were hosted models remaining. // When we checked just now, there were none. We should // try destroying again. continue } } // Even if we've not just requested for hosted models to be destroyed, // there may be some being destroyed already. We need to wait for them. ctx.Infof("Waiting for hosted model resources to be reclaimed") for ; hasUnDeadModels(modelsStatus); ctrStatus, modelsStatus = updateStatus(2 * time.Second) { ctx.Infof(fmtCtrStatus(ctrStatus)) for _, model := range modelsStatus { ctx.Verbosef(fmtModelStatus(model)) } } ctx.Infof("All hosted models reclaimed, cleaning up controller machines") return environs.Destroy(c.ControllerName(), controllerEnviron, store) } }