// ProvisioningScript generates a bash script that can be // executed on a remote host to carry out the cloud-init // configuration. func ProvisioningScript(mcfg *cloudinit.MachineConfig) (string, error) { cloudcfg := coreCloudinit.New() cloudcfg.SetAptUpdate(mcfg.EnableOSRefreshUpdate) cloudcfg.SetAptUpgrade(mcfg.EnableOSUpgrade) udata, err := cloudinit.NewUserdataConfig(mcfg, cloudcfg) if err != nil { return "", errors.Annotate(err, "error generating cloud-config") } if err := udata.ConfigureJuju(); err != nil { return "", errors.Annotate(err, "error generating cloud-config") } configScript, err := sshinit.ConfigureScript(cloudcfg) if err != nil { return "", errors.Annotate(err, "error converting cloud-config to script") } var buf bytes.Buffer // Always remove the cloud-init-output.log file first, if it exists. fmt.Fprintf(&buf, "rm -f %s\n", utils.ShQuote(mcfg.CloudInitOutputLog)) // If something goes wrong, dump cloud-init-output.log to stderr. buf.WriteString(shell.DumpFileOnErrorScript(mcfg.CloudInitOutputLog)) buf.WriteString(configScript) return buf.String(), nil }
func (st *State) migrationFromQuery(query mongo.Query) (ModelMigration, error) { var doc modelMigDoc err := query.One(&doc) if err == mgo.ErrNotFound { return nil, errors.NotFoundf("migration") } else if err != nil { return nil, errors.Annotate(err, "migration lookup failed") } statusColl, closer := st.getCollection(migrationsStatusC) defer closer() var statusDoc modelMigStatusDoc err = statusColl.FindId(doc.Id).One(&statusDoc) if err == mgo.ErrNotFound { return nil, errors.NotFoundf("migration status") } else if err != nil { return nil, errors.Annotate(err, "migration status lookup failed") } return &modelMigration{ doc: doc, statusDoc: statusDoc, st: st, }, nil }
// AddEnvironmentUser adds a new user to the database. func (st *State) AddEnvironmentUser(user, createdBy names.UserTag, displayName string) (*EnvironmentUser, error) { // Ensure local user exists in state before adding them as an environment user. if user.IsLocal() { localUser, err := st.User(user) if err != nil { return nil, errors.Annotate(err, fmt.Sprintf("user %q does not exist locally", user.Name())) } if displayName == "" { displayName = localUser.DisplayName() } } // Ensure local createdBy user exists. if createdBy.IsLocal() { if _, err := st.User(createdBy); err != nil { return nil, errors.Annotate(err, fmt.Sprintf("createdBy user %q does not exist locally", createdBy.Name())) } } envuuid := st.EnvironUUID() op, doc := createEnvUserOpAndDoc(envuuid, user, createdBy, displayName) err := st.runTransaction([]txn.Op{op}) if err == txn.ErrAborted { err = errors.AlreadyExistsf("environment user %q", user.Username()) } if err != nil { return nil, errors.Trace(err) } return &EnvironmentUser{st: st, doc: *doc}, nil }
func addUUIDToSecurityGroupNames(e *Environ) error { nova := e.nova() groups, err := nova.ListSecurityGroups() if err != nil { return errors.Annotate(err, "upgrading instance names") } cfg := e.Config() eName := cfg.Name() eUUID, ok := cfg.UUID() if !ok { return errors.NotFoundf("model uuid for model %q", eName) } for _, group := range groups { newName, ok, err := replaceNameWithID(group.Name, eName, eUUID) if err != nil { return errors.Annotate(err, "generating the new security group name") } if !ok { continue } // Name should have uuid instead of name _, err = nova.UpdateSecurityGroup(group.Id, newName, group.Description) if err != nil { return errors.Annotatef(err, "upgrading security group name from %q to %q", group.Name, newName) } } return nil }
// populateGUIArchive stores the uploaded Juju GUI archive in provider storage, // updates the GUI metadata and set the current Juju GUI version. func (c *BootstrapCommand) populateGUIArchive(st *state.State, env environs.Environ) error { agentConfig := c.CurrentConfig() dataDir := agentConfig.DataDir() guistorage, err := st.GUIStorage() if err != nil { return errors.Trace(err) } defer guistorage.Close() gui, err := agenttools.ReadGUIArchive(dataDir) if err != nil { return errors.Annotate(err, "cannot fetch GUI info") } f, err := os.Open(filepath.Join(agenttools.SharedGUIDir(dataDir), "gui.tar.bz2")) if err != nil { return errors.Annotate(err, "cannot read GUI archive") } defer f.Close() if err := guistorage.Add(f, binarystorage.Metadata{ Version: gui.Version.String(), Size: gui.Size, SHA256: gui.SHA256, }); err != nil { return errors.Annotate(err, "cannot store GUI archive") } if err = st.GUISetVersion(gui.Version); err != nil { return errors.Annotate(err, "cannot set current GUI version") } return nil }
// UpdateCloudCredential adds or updates a cloud credential with the given tag. func (st *State) UpdateCloudCredential(tag names.CloudCredentialTag, credential cloud.Credential) error { credentials := map[names.CloudCredentialTag]cloud.Credential{tag: credential} buildTxn := func(attempt int) ([]txn.Op, error) { cloudName := tag.Cloud().Id() cloud, err := st.Cloud(cloudName) if err != nil { return nil, errors.Trace(err) } ops, err := validateCloudCredentials(cloud, cloudName, credentials) if err != nil { return nil, errors.Annotate(err, "validating cloud credentials") } _, err = st.CloudCredential(tag) if err != nil && !errors.IsNotFound(err) { return nil, errors.Maskf(err, "fetching cloud credentials") } if err == nil { ops = append(ops, updateCloudCredentialOp(tag, credential)) } else { ops = append(ops, createCloudCredentialOp(tag, credential)) } return ops, nil } if err := st.run(buildTxn); err != nil { return errors.Annotate(err, "updating cloud credentials") } return nil }
// Destroy sets the environment's lifecycle to Dying, preventing // addition of services or machines to state. func (e *Environment) Destroy() (err error) { defer errors.DeferredAnnotatef(&err, "failed to destroy environment") if e.Life() != Alive { return nil } if err := e.ensureDestroyable(); err != nil { return errors.Trace(err) } if err := e.startDestroy(); err != nil { if abortErr := e.abortDestroy(); abortErr != nil { return errors.Annotate(abortErr, err.Error()) } return errors.Trace(err) } // Check that no new environments or machines were added between the first // check and the Environment.startDestroy(). if err := e.ensureDestroyable(); err != nil { if abortErr := e.abortDestroy(); abortErr != nil { return errors.Annotate(abortErr, err.Error()) } return errors.Trace(err) } if err := e.finishDestroy(); err != nil { if abortErr := e.abortDestroy(); abortErr != nil { return errors.Annotate(abortErr, err.Error()) } return errors.Trace(err) } return nil }
// processGet handles a tools GET request. func (h *toolsDownloadHandler) processGet(r *http.Request, st *state.State) ([]byte, error) { version, err := version.ParseBinary(r.URL.Query().Get(":version")) if err != nil { return nil, errors.Annotate(err, "error parsing version") } storage, err := st.ToolsStorage() if err != nil { return nil, errors.Annotate(err, "error getting tools storage") } defer storage.Close() _, reader, err := storage.Open(version.String()) if errors.IsNotFound(err) { // Tools could not be found in tools storage, // so look for them in simplestreams, fetch // them and cache in tools storage. logger.Infof("%v tools not found locally, fetching", version) reader, err = h.fetchAndCacheTools(version, storage, st) if err != nil { err = errors.Annotate(err, "error fetching tools") } } if err != nil { return nil, err } defer reader.Close() data, err := ioutil.ReadAll(reader) if err != nil { return nil, errors.Annotate(err, "failed to read tools tarball") } return data, nil }
// WatchStorageAttachment returns a state.NotifyWatcher that reacts to changes // to the VolumeAttachmentInfo or FilesystemAttachmentInfo corresponding to the tags // specified. func WatchStorageAttachment( st StorageInterface, storageTag names.StorageTag, machineTag names.MachineTag, unitTag names.UnitTag, ) (state.NotifyWatcher, error) { storageInstance, err := st.StorageInstance(storageTag) if err != nil { return nil, errors.Annotate(err, "getting storage instance") } var w state.NotifyWatcher switch storageInstance.Kind() { case state.StorageKindBlock: volume, err := st.StorageInstanceVolume(storageTag) if err != nil { return nil, errors.Annotate(err, "getting storage volume") } w = st.WatchVolumeAttachment(machineTag, volume.VolumeTag()) case state.StorageKindFilesystem: filesystem, err := st.StorageInstanceFilesystem(storageTag) if err != nil { return nil, errors.Annotate(err, "getting storage filesystem") } w = st.WatchFilesystemAttachment(machineTag, filesystem.FilesystemTag()) default: return nil, errors.Errorf("invalid storage kind %v", storageInstance.Kind()) } w2 := st.WatchStorageAttachment(storageTag, unitTag) return common.NewMultiNotifyWatcher(w, w2), nil }
func dialLogsinkAPI(apiInfo *api.Info) (*websocket.Conn, error) { // TODO(mjs) Most of this should be extracted to be shared for // connections to both /log (debuglog) and /logsink. header := utils.BasicAuthHeader(apiInfo.Tag.String(), apiInfo.Password) header.Set("X-Juju-Nonce", apiInfo.Nonce) conn, err := api.Connect(apiInfo, "/logsink", header, api.DialOpts{}) if err != nil { return nil, errors.Annotate(err, "failed to connect to logsink API") } // Read the initial error and translate to a real error. // Read up to the first new line character. We can't use bufio here as it // reads too much from the reader. line := make([]byte, 4096) n, err := conn.Read(line) if err != nil { return nil, errors.Annotate(err, "unable to read initial response") } line = line[0:n] var errResult params.ErrorResult err = json.Unmarshal(line, &errResult) if err != nil { return nil, errors.Annotate(err, "unable to unmarshal initial response") } if errResult.Error != nil { return nil, errors.Annotatef(err, "initial server error") } return conn, nil }
func cleanupDyingMachineResources(m *Machine) error { volumeAttachments, err := m.st.MachineVolumeAttachments(m.MachineTag()) if err != nil { return errors.Annotate(err, "getting machine volume attachments") } for _, va := range volumeAttachments { if err := m.st.DetachVolume(va.Machine(), va.Volume()); err != nil { if IsContainsFilesystem(err) { // The volume will be destroyed when the // contained filesystem is removed, whose // destruction is initiated below. continue } return errors.Trace(err) } } filesystemAttachments, err := m.st.MachineFilesystemAttachments(m.MachineTag()) if err != nil { return errors.Annotate(err, "getting machine filesystem attachments") } for _, fsa := range filesystemAttachments { if err := m.st.DetachFilesystem(fsa.Machine(), fsa.Filesystem()); err != nil { return errors.Trace(err) } } return nil }
// New starts a logsender worker which reads log message structs from // a channel and sends them to the JES via the logsink API. func New(logs chan *LogRecord, apiInfo *api.Info) worker.Worker { loop := func(stop <-chan struct{}) error { logger.Debugf("starting logsender worker") conn, err := dialLogsinkAPI(apiInfo) if err != nil { return errors.Annotate(err, "logsender dial failed") } defer conn.Close() for { select { case rec := <-logs: err := websocket.JSON.Send(conn, &apiserver.LogMessage{ Time: rec.Time, Module: rec.Module, Location: rec.Location, Level: rec.Level, Message: rec.Message, }) if err != nil { // Note: due to the fire-and-forget nature of the // logsink API, it is possible that when the // connection dies, any logs that were "in-flight" // will not be recorded on the server side. return errors.Annotate(err, "logsink connection failed") } case <-stop: return nil } } } return worker.NewSimpleWorker(loop) }
// startFunc returns a StartFunc that creates a worker based on the manifolds // named in the supplied config. func startFunc(config ManifoldConfig) dependency.StartFunc { return func(getResource dependency.GetResourceFunc) (worker.Worker, error) { // Get dependencies and open a connection. var a agent.Agent if err := getResource(config.AgentName, &a); err != nil { return nil, err } conn, err := openConnection(a) if err != nil { return nil, errors.Annotate(err, "cannot open api") } // Add the environment uuid to agent config if not present. currentConfig := a.CurrentConfig() if currentConfig.Environment().Id() == "" { err := a.ChangeConfig(func(setter agent.ConfigSetter) error { environTag, err := conn.EnvironTag() if err != nil { return errors.Annotate(err, "no environment uuid set on api") } return setter.Migrate(agent.MigrateParams{ Environment: environTag, }) }) if err != nil { logger.Warningf("unable to save environment uuid: %v", err) // Not really fatal, just annoying. } } // Return the worker. return newApiConnWorker(conn) } }
func restoreBootstrapMachine(st api.Connection, backupFile string, agentConf agentConfig) (addr string, err error) { client := st.Client() addr, err = client.PublicAddress("0") if err != nil { return "", errors.Annotate(err, "cannot get public address of bootstrap machine") } paddr, err := client.PrivateAddress("0") if err != nil { return "", errors.Annotate(err, "cannot get private address of bootstrap machine") } status, err := client.Status(nil) if err != nil { return "", errors.Annotate(err, "cannot get environment status") } info, ok := status.Machines["0"] if !ok { return "", fmt.Errorf("cannot find bootstrap machine in status") } newInstId := instance.Id(info.InstanceId) progress("copying backup file to bootstrap host") if err := sendViaScp(backupFile, addr, "~/juju-backup.tgz"); err != nil { return "", errors.Annotate(err, "cannot copy backup file to bootstrap instance") } progress("updating bootstrap machine") if err := runViaSsh(addr, updateBootstrapMachineScript(newInstId, agentConf, addr, paddr)); err != nil { return "", errors.Annotate(err, "update script failed") } return addr, nil }
// StoreCharmArchive stores a charm archive in environment storage. func StoreCharmArchive(st *state.State, curl *charm.URL, ch charm.Charm, r io.Reader, size int64, sha256 string) error { storage := newStateStorage(st.EnvironUUID(), st.MongoSession()) storagePath, err := charmArchiveStoragePath(curl) if err != nil { return errors.Annotate(err, "cannot generate charm archive name") } if err := storage.Put(storagePath, r, size); err != nil { return errors.Annotate(err, "cannot add charm to storage") } // Now update the charm data in state and mark it as no longer pending. _, err = st.UpdateUploadedCharm(ch, curl, storagePath, sha256) if err != nil { alreadyUploaded := err == state.ErrCharmRevisionAlreadyModified || errors.Cause(err) == state.ErrCharmRevisionAlreadyModified || state.IsCharmAlreadyUploadedError(err) if err := storage.Remove(storagePath); err != nil { if alreadyUploaded { logger.Errorf("cannot remove duplicated charm archive from storage: %v", err) } else { logger.Errorf("cannot remove unsuccessfully recorded charm archive from storage: %v", err) } } if alreadyUploaded { // Somebody else managed to upload and update the charm in // state before us. This is not an error. return nil } } return nil }
func volumeStorageAttachmentInfo( st StorageInterface, storageInstance state.StorageInstance, machineTag names.MachineTag, ) (*storage.StorageAttachmentInfo, error) { storageTag := storageInstance.StorageTag() volume, err := st.StorageInstanceVolume(storageTag) if err != nil { return nil, errors.Annotate(err, "getting volume") } volumeInfo, err := volume.Info() if err != nil { return nil, errors.Annotate(err, "getting volume info") } volumeAttachment, err := st.VolumeAttachment(machineTag, volume.VolumeTag()) if err != nil { return nil, errors.Annotate(err, "getting volume attachment") } volumeAttachmentInfo, err := volumeAttachment.Info() if err != nil { return nil, errors.Annotate(err, "getting volume attachment info") } devicePath, err := volumeAttachmentDevicePath( volumeInfo, volumeAttachmentInfo, ) if err != nil { return nil, errors.Trace(err) } return &storage.StorageAttachmentInfo{ storage.StorageKindBlock, devicePath, }, nil }
func (task *provisionerTask) constructInstanceConfig( machine *apiprovisioner.Machine, auth authentication.AuthenticationProvider, pInfo *params.ProvisioningInfo, ) (*instancecfg.InstanceConfig, error) { stateInfo, apiInfo, err := auth.SetupAuthentication(machine) if err != nil { return nil, errors.Annotate(err, "failed to setup authentication") } // Generated a nonce for the new instance, with the format: "machine-#:UUID". // The first part is a badge, specifying the tag of the machine the provisioner // is running on, while the second part is a random UUID. uuid, err := utils.NewUUID() if err != nil { return nil, errors.Annotate(err, "failed to generate a nonce for machine "+machine.Id()) } nonce := fmt.Sprintf("%s:%s", task.machineTag, uuid) return instancecfg.NewInstanceConfig( machine.Id(), nonce, task.imageStream, pInfo.Series, task.secureServerConnection, nil, stateInfo, apiInfo, ) }
// addFilesystemOps returns txn.Ops to create a new filesystem with the // specified parameters. If the storage source cannot create filesystems // directly, a volume will be created and Juju will manage a filesystem // on it. func (st *State) addFilesystemOps(params FilesystemParams, machineId string) ([]txn.Op, names.FilesystemTag, names.VolumeTag, error) { if params.binding == nil { params.binding = names.NewMachineTag(machineId) } params, err := st.filesystemParamsWithDefaults(params) if err != nil { return nil, names.FilesystemTag{}, names.VolumeTag{}, errors.Trace(err) } machineId, err = st.validateFilesystemParams(params, machineId) if err != nil { return nil, names.FilesystemTag{}, names.VolumeTag{}, errors.Annotate(err, "validating filesystem params") } filesystemId, err := newFilesystemId(st, machineId) if err != nil { return nil, names.FilesystemTag{}, names.VolumeTag{}, errors.Annotate(err, "cannot generate filesystem name") } filesystemTag := names.NewFilesystemTag(filesystemId) // Check if the filesystem needs a volume. var volumeId string var volumeTag names.VolumeTag var ops []txn.Op _, provider, err := poolStorageProvider(st, params.Pool) if err != nil { return nil, names.FilesystemTag{}, names.VolumeTag{}, errors.Trace(err) } if !provider.Supports(storage.StorageKindFilesystem) { var volumeOp txn.Op volumeParams := VolumeParams{ params.storage, filesystemTag, // volume is bound to filesystem params.Pool, params.Size, } volumeOp, volumeTag, err = st.addVolumeOp(volumeParams, machineId) if err != nil { return nil, names.FilesystemTag{}, names.VolumeTag{}, errors.Annotate(err, "creating backing volume") } volumeId = volumeTag.Id() ops = append(ops, volumeOp) } filesystemOp := txn.Op{ C: filesystemsC, Id: filesystemId, Assert: txn.DocMissing, Insert: &filesystemDoc{ FilesystemId: filesystemId, VolumeId: volumeId, StorageId: params.storage.Id(), Binding: params.binding.String(), Params: ¶ms, // Every filesystem is created with one attachment. AttachmentCount: 1, }, } ops = append(ops, filesystemOp) return ops, filesystemTag, volumeTag, nil }
// getAvailableRoleSizes returns the role sizes available for the configured // location. func (env *azureEnviron) getAvailableRoleSizes() (_ set.Strings, err error) { defer errors.DeferredAnnotatef(&err, "cannot get available role sizes") snap := env.getSnapshot() if snap.availableRoleSizes != nil { return snap.availableRoleSizes, nil } locations, err := snap.api.ListLocations() if err != nil { return nil, errors.Annotate(err, "cannot list locations") } var available set.Strings for _, location := range locations { if location.Name != snap.ecfg.location() { continue } if location.ComputeCapabilities == nil { return nil, errors.Annotate(err, "cannot determine compute capabilities") } available = set.NewStrings(location.ComputeCapabilities.VirtualMachineRoleSizes...) break } if available == nil { return nil, errors.NotFoundf("location %q", snap.ecfg.location()) } env.Lock() env.availableRoleSizes = available env.Unlock() return available, nil }
func (api *API) createParamsStorageAttachment(si params.StorageDetails, sa state.StorageAttachment) (params.StorageDetails, error) { result := params.StorageDetails{Status: "pending"} result.StorageTag = sa.StorageInstance().String() if result.StorageTag != si.StorageTag { panic("attachment does not belong to storage instance") } result.UnitTag = sa.Unit().String() result.OwnerTag = si.OwnerTag result.Kind = si.Kind result.Persistent = si.Persistent // TODO(axw) set status according to whether storage has been provisioned. // This is only for provisioned attachments machineTag, err := api.storage.UnitAssignedMachine(sa.Unit()) if err != nil { return params.StorageDetails{}, errors.Annotate(err, "getting unit for storage attachment") } info, err := common.StorageAttachmentInfo(api.storage, sa, machineTag) if err != nil { if errors.IsNotProvisioned(err) { // If Info returns an error, then the storage has not yet been provisioned. return result, nil } return params.StorageDetails{}, errors.Annotate(err, "getting storage attachment info") } result.Location = info.Location if result.Location != "" { result.Status = "attached" } return result, nil }
// SourcePrecheck checks the state of the source controller to make // sure that the preconditions for model migration are met. The // backend provided must be for the model to be migrated. func SourcePrecheck(backend PrecheckBackend) error { if err := checkModel(backend); err != nil { return errors.Trace(err) } if err := checkMachines(backend); err != nil { return errors.Trace(err) } if err := checkApplications(backend); err != nil { return errors.Trace(err) } if cleanupNeeded, err := backend.NeedsCleanup(); err != nil { return errors.Annotate(err, "checking cleanups") } else if cleanupNeeded { return errors.New("cleanup needed") } // Check the source controller. controllerBackend, err := backend.ControllerBackend() if err != nil { return errors.Trace(err) } if err := checkController(controllerBackend); err != nil { return errors.Annotate(err, "controller") } return nil }
func (dw *discoverspacesWorker) createSpacesFromArgs(createSpacesArgs params.CreateSpacesParams) error { facade := dw.config.Facade expectedNumCreated := len(createSpacesArgs.Spaces) if expectedNumCreated > 0 { result, err := facade.CreateSpaces(createSpacesArgs) if err != nil { return errors.Annotate(err, "creating spaces failed") } if len(result.Results) != expectedNumCreated { return errors.Errorf( "unexpected response from CreateSpaces: expected %d results, got %d", expectedNumCreated, len(result.Results), ) } for _, res := range result.Results { if res.Error != nil { return errors.Annotate(res.Error, "creating space failed") } } logger.Debugf("discovered and imported %d spaces: %v", expectedNumCreated, createSpacesArgs) } else { logger.Debugf("no unknown spaces discovered for import") } return nil }
func addUUIDToMachineNames(e *Environ) error { nova := e.nova() servers, err := nova.ListServers(oldMachinesFilter(e)) if err != nil { return errors.Annotate(err, "upgrading server names") } cfg := e.Config() eName := cfg.Name() eUUID, ok := cfg.UUID() if !ok { return errors.NotFoundf("model uuid for model %q", eName) } for _, server := range servers { newName, ok, err := replaceNameWithID(server.Name, eName, eUUID) if err != nil { return errors.Annotate(err, "generating the new server name") } if !ok { continue } // Name should have uuid instead of name _, err = nova.UpdateServerName(server.Id, newName) if err != nil { return errors.Annotatef(err, "upgrading machine name from %q to %q", server.Name, newName) } } return nil }
func (dw *discoverspacesWorker) addSubnetsFromArgs(addSubnetsArgs params.AddSubnetsParams) error { facade := dw.config.Facade expectedNumAdded := len(addSubnetsArgs.Subnets) if expectedNumAdded > 0 { result, err := facade.AddSubnets(addSubnetsArgs) if err != nil { return errors.Annotate(err, "adding subnets failed") } if len(result.Results) != expectedNumAdded { return errors.Errorf( "unexpected response from AddSubnets: expected %d results, got %d", expectedNumAdded, len(result.Results), ) } for _, res := range result.Results { if res.Error != nil { return errors.Annotate(res.Error, "adding subnet failed") } } logger.Debugf("discovered and imported %d subnets: %v", expectedNumAdded, addSubnetsArgs) } else { logger.Debugf("no unknown subnets discovered for import") } return nil }
func (mi *maas1Instance) hardwareCharacteristics() (*instance.HardwareCharacteristics, error) { nodeArch, _, err := mi.architecture() if err != nil { return nil, errors.Annotate(err, "error determining architecture") } nodeCpuCount, err := mi.cpuCount() if err != nil { return nil, errors.Annotate(err, "error determining cpu count") } nodeMemoryMB, err := mi.memory() if err != nil { return nil, errors.Annotate(err, "error determining available memory") } zone, err := mi.zone() if err != nil { return nil, errors.Annotate(err, "error determining availability zone") } hc := &instance.HardwareCharacteristics{ Arch: &nodeArch, CpuCores: &nodeCpuCount, Mem: &nodeMemoryMB, AvailabilityZone: &zone, } nodeTags, err := mi.tagNames() if err != nil && !errors.IsNotFound(err) { return nil, errors.Annotate(err, "error determining tag names") } if len(nodeTags) > 0 { hc.Tags = &nodeTags } return hc, nil }
func (env *environ) destroyHostedModelResources() error { // Destroy all instances where juju-controller-uuid, // but not juju-model-uuid, matches env.uuid. prefix := common.EnvFullName("") instances, err := env.prefixedInstances(prefix) if err != nil { return errors.Annotate(err, "listing instances") } logger.Debugf("instances: %v", instances) var names []string for _, inst := range instances { metadata := inst.raw.Metadata() if metadata[tags.JujuModel] == env.uuid { continue } if metadata[tags.JujuController] != env.uuid { continue } names = append(names, string(inst.Id())) } if err := env.raw.RemoveInstances(prefix, names...); err != nil { return errors.Annotate(err, "removing hosted model instances") } return nil }
// setPrivateMetadataSources sets the default tools metadata source // for tools syncing, and adds an image metadata source after verifying // the contents. func setPrivateMetadataSources(env environs.Environ, metadataDir string) ([]*imagemetadata.ImageMetadata, error) { logger.Infof("Setting default tools and image metadata sources: %s", metadataDir) tools.DefaultBaseURL = metadataDir imageMetadataDir := filepath.Join(metadataDir, storage.BaseImagesPath) if _, err := os.Stat(imageMetadataDir); err != nil { if !os.IsNotExist(err) { return nil, errors.Annotate(err, "cannot access image metadata") } return nil, nil } baseURL := fmt.Sprintf("file://%s", filepath.ToSlash(imageMetadataDir)) datasource := simplestreams.NewURLDataSource("bootstrap metadata", baseURL, utils.NoVerifySSLHostnames) // Read the image metadata, as we'll want to upload it to the environment. imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{}) existingMetadata, _, err := imagemetadata.Fetch( []simplestreams.DataSource{datasource}, imageConstraint, false) if err != nil && !errors.IsNotFound(err) { return nil, errors.Annotate(err, "cannot read image metadata") } // Add an image metadata datasource for constraint validation, etc. // TODO (anastasiamac 2015-09-26) Delete when search path is modified to look // into state first. environs.RegisterUserImageDataSourceFunc("bootstrap metadata", func(environs.Environ) (simplestreams.DataSource, error) { return datasource, nil }) logger.Infof("custom image metadata added to search path") return existingMetadata, nil }
// Validate ensures that the origin is correct. func (o Origin) Validate() error { if o.ControllerUUID == "" { return errors.NewNotValid(nil, "empty ControllerUUID") } if !names.IsValidModel(o.ControllerUUID) { return errors.NewNotValid(nil, fmt.Sprintf("ControllerUUID %q not a valid UUID", o.ControllerUUID)) } if o.ModelUUID == "" { return errors.NewNotValid(nil, "empty ModelUUID") } if !names.IsValidModel(o.ModelUUID) { return errors.NewNotValid(nil, fmt.Sprintf("ModelUUID %q not a valid UUID", o.ModelUUID)) } if err := o.Type.Validate(); err != nil { return errors.Annotate(err, "invalid Type") } if o.Name == "" && o.Type != OriginTypeUnknown { return errors.NewNotValid(nil, "empty Name") } if err := o.Type.ValidateName(o.Name); err != nil { return errors.Annotatef(err, "invalid Name %q", o.Name) } if !o.Software.isZero() { if err := o.Software.Validate(); err != nil { return errors.Annotate(err, "invalid Software") } } return nil }
// Run implements Command.Run. func (c *changePasswordCommand) Run(ctx *cmd.Context) error { if c.api == nil { api, err := c.NewUserManagerAPIClient() if err != nil { return errors.Trace(err) } c.api = api defer c.api.Close() } password, err := c.generateOrReadPassword(ctx, c.Generate) if err != nil { return errors.Trace(err) } var writer EnvironInfoCredsWriter var creds configstore.APICredentials if c.User == "" { // We get the creds writer before changing the password just to // minimise the things that could go wrong after changing the password // in the server. if c.writer == nil { writer, err = c.ConnectionInfo() if err != nil { return errors.Trace(err) } } else { writer = c.writer } creds = writer.APICredentials() } else { creds.User = c.User } oldPassword := creds.Password creds.Password = password if err = c.api.SetPassword(creds.User, password); err != nil { return block.ProcessBlockedError(err, block.BlockChange) } if c.User != "" { return writeServerFile(c, ctx, c.User, password, c.OutPath) } writer.SetAPICredentials(creds) if err := writer.Write(); err != nil { logger.Errorf("updating the cached credentials failed, reverting to original password") setErr := c.api.SetPassword(creds.User, oldPassword) if setErr != nil { logger.Errorf("failed to set password back, you will need to edit your environments file by hand to specify the password: %q", password) return errors.Annotate(setErr, "failed to set password back") } return errors.Annotate(err, "failed to write new password to environments file") } ctx.Infof("Your password has been updated.") return nil }
// SetHostedEnvironCount is an upgrade step that sets hostedEnvCountDoc.Count // to the number of hosted environments. func SetHostedEnvironCount(st *State) error { environments, closer := st.getCollection(environmentsC) defer closer() envCount, err := environments.Find(nil).Count() if err != nil { return errors.Annotate(err, "failed to read environments") } stateServers, closer := st.getCollection(stateServersC) defer closer() count, err := stateServers.FindId(hostedEnvCountKey).Count() if err != nil { return errors.Annotate(err, "failed to read state server") } hostedCount := envCount - 1 // -1 as we don't count the system environment op := txn.Op{ C: stateServersC, Id: hostedEnvCountKey, } if count == 0 { op.Assert = txn.DocMissing op.Insert = &hostedEnvCountDoc{hostedCount} } else { op.Update = bson.D{{"$set", bson.D{{"refcount", hostedCount}}}} } return st.runTransaction([]txn.Op{op}) }