// obliterateUnit removes a unit from state completely. It is not safe or // sane to obliterate any unit in isolation; its only reasonable use is in // the context of machine obliteration, in which we can be sure that unclean // shutdown of units is not going to leave a machine in a difficult state. func (st *State) obliterateUnit(unitName string) error { unit, err := st.Unit(unitName) if errors.IsNotFound(err) { return nil } else if err != nil { return err } // Unlike the machine, we *can* always destroy the unit, and (at least) // prevent further dependencies being added. If we're really lucky, the // unit will be removed immediately. if err := unit.Destroy(); err != nil { return err } if err := unit.Refresh(); errors.IsNotFound(err) { return nil } else if err != nil { return err } for _, subName := range unit.SubordinateNames() { if err := st.obliterateUnit(subName); err != nil { return err } } if err := unit.EnsureDead(); err != nil { return err } return unit.Remove() }
// Destroy, when called on a Alive unit, advances its lifecycle as far as // possible; it otherwise has no effect. In most situations, the unit's // life is just set to Dying; but if a principal unit that is not assigned // to a provisioned machine is Destroyed, it will be removed from state // directly. func (u *Unit) Destroy() (err error) { defer func() { if err == nil { // This is a white lie; the document might actually be removed. u.doc.Life = Dying } }() unit := &Unit{st: u.st, doc: u.doc} for i := 0; i < 5; i++ { switch ops, err := unit.destroyOps(); err { case errRefresh: case errAlreadyDying: return nil case nil: if err := unit.st.runTransaction(ops); err != txn.ErrAborted { return err } default: return err } if err := unit.Refresh(); errors.IsNotFound(err) { return nil } else if err != nil { return err } } return ErrExcessiveContention }
// Destroy ensures that the relation will be removed at some point; if no units // are currently in scope, it will be removed immediately. func (r *Relation) Destroy() (err error) { defer errors.Maskf(&err, "cannot destroy relation %q", r) if len(r.doc.Endpoints) == 1 && r.doc.Endpoints[0].Role == charm.RolePeer { return fmt.Errorf("is a peer relation") } defer func() { if err == nil { // This is a white lie; the document might actually be removed. r.doc.Life = Dying } }() rel := &Relation{r.st, r.doc} // In this context, aborted transactions indicate that the number of units // in scope have changed between 0 and not-0. The chances of 5 successive // attempts each hitting this change -- which is itself an unlikely one -- // are considered to be extremely small. for attempt := 0; attempt < 5; attempt++ { ops, _, err := rel.destroyOps("") if err == errAlreadyDying { return nil } else if err != nil { return err } if err := rel.st.runTransaction(ops); err != txn.ErrAborted { return err } if err := rel.Refresh(); errors.IsNotFound(err) { return nil } else if err != nil { return err } } return ErrExcessiveContention }
func (m *machine) refresh() (bool, error) { if err := m.stm.Refresh(); err != nil { if errors.IsNotFound(err) { // We want to be robust when the machine // state is out of date with respect to the // state server info, so if the machine // has been removed, just assume that // no change has happened - the machine // loop will be stopped very soon anyway. return false, nil } return false, err } changed := false if wantsVote := m.stm.WantsVote(); wantsVote != m.wantsVote { m.wantsVote = wantsVote changed = true } if hps := m.stm.MongoHostPorts(); !hostPortsEqual(hps, m.mongoHostPorts) { m.mongoHostPorts = hps changed = true } if hps := m.stm.APIHostPorts(); !hostPortsEqual(hps, m.apiHostPorts) { m.apiHostPorts = hps changed = true } return changed, nil }
func openState(agentConfig agent.Config) (_ *state.State, _ *state.Machine, err error) { info, ok := agentConfig.StateInfo() if !ok { return nil, nil, fmt.Errorf("no state info available") } st, err := state.Open(info, state.DialOpts{}, environs.NewStatePolicy()) if err != nil { return nil, nil, err } defer func() { if err != nil { st.Close() } }() m0, err := st.FindEntity(agentConfig.Tag()) if err != nil { if errors.IsNotFound(err) { err = worker.ErrTerminateAgent } return nil, nil, err } m := m0.(*state.Machine) if m.Life() == state.Dead { return nil, nil, worker.ErrTerminateAgent } // Check the machine nonce as provisioned matches the agent.Conf value. if !m.CheckProvisioned(agentConfig.Nonce()) { // The agent is running on a different machine to the one it // should be according to state. It must stop immediately. logger.Errorf("running machine %v agent on inappropriate instance", m) return nil, nil, worker.ErrTerminateAgent } return st, m, nil }
// WatchAuthorisedKeys starts a watcher to track changes to the authorised ssh keys // for the specified machines. // The current implementation relies on global authorised keys being stored in the environment config. // This will change as new user management and authorisation functionality is added. func (api *KeyUpdaterAPI) WatchAuthorisedKeys(arg params.Entities) (params.NotifyWatchResults, error) { results := make([]params.NotifyWatchResult, len(arg.Entities)) canRead, err := api.getCanRead() if err != nil { return params.NotifyWatchResults{}, err } for i, entity := range arg.Entities { // 1. Check permissions if !canRead(entity.Tag) { results[i].Error = common.ServerError(common.ErrPerm) continue } // 2. Check entity exists if _, err := api.state.FindEntity(entity.Tag); err != nil { if errors.IsNotFound(err) { results[i].Error = common.ServerError(common.ErrPerm) } else { results[i].Error = common.ServerError(err) } continue } // 3. Watch fr changes var err error watch := api.state.WatchForEnvironConfigChanges() // Consume the initial event. if _, ok := <-watch.Changes(); ok { results[i].NotifyWatcherId = api.resources.Register(watch) } else { err = watcher.MustErr(watch) } results[i].Error = common.ServerError(err) } return params.NotifyWatchResults{Results: results}, nil }
// NextTools returns the next changed tools, waiting // until the tools are actually set. func (w *toolsWaiter) NextTools(c *gc.C) (*coretools.Tools, error) { for _ = range w.changes { err := w.tooler.Refresh() if err != nil { return nil, fmt.Errorf("cannot refresh: %v", err) } if w.tooler.Life() == state.Dead { return nil, fmt.Errorf("object is dead") } tools, err := w.tooler.AgentTools() if errors.IsNotFound(err) { c.Logf("tools not yet set") continue } if err != nil { return nil, err } changed := w.lastTools == nil || *tools != *w.lastTools w.lastTools = tools if changed { return tools, nil } c.Logf("found same tools") } return nil, fmt.Errorf("watcher closed prematurely: %v", w.watcher.Err()) }
// Destroy ensures that the service and all its relations will be removed at // some point; if the service has no units, and no relation involving the // service has any units in scope, they are all removed immediately. func (s *Service) Destroy() (err error) { defer errors.Maskf(&err, "cannot destroy service %q", s) defer func() { if err == nil { // This is a white lie; the document might actually be removed. s.doc.Life = Dying } }() svc := &Service{st: s.st, doc: s.doc} for i := 0; i < 5; i++ { switch ops, err := svc.destroyOps(); err { case errRefresh: case errAlreadyDying: return nil case nil: if err := svc.st.runTransaction(ops); err != txn.ErrAborted { return err } default: return err } if err := svc.Refresh(); errors.IsNotFound(err) { return nil } else if err != nil { return err } } return ErrExcessiveContention }
// ConfigForName returns the configuration for the environment with // the given name from the default environments file. If the name is // blank, the default environment will be used. If the configuration // is not found, an errors.NotFoundError is returned. If the given // store contains an entry for the environment and it has associated // bootstrap config, that configuration will be returned. // ConfigForName also returns where the configuration was sourced from // (this is also valid even when there is an error. func ConfigForName(name string, store configstore.Storage) (*config.Config, ConfigSource, error) { envs, err := ReadEnvirons("") if err != nil { return nil, ConfigFromNowhere, err } if name == "" { name = envs.Default } // TODO(rog) 2013-10-04 https://bugs.github.com/wallyworld/core/+bug/1235217 // Don't fall back to reading from environments.yaml // when we can be sure that everyone has a // .jenv file for their currently bootstrapped environments. if name != "" { info, err := store.ReadInfo(name) if err == nil { if len(info.BootstrapConfig()) == 0 { return nil, ConfigFromNowhere, EmptyConfig{fmt.Errorf("environment has no bootstrap configuration data")} } logger.Debugf("ConfigForName found bootstrap config %#v", info.BootstrapConfig()) cfg, err := config.New(config.NoDefaults, info.BootstrapConfig()) return cfg, ConfigFromInfo, err } if err != nil && !errors.IsNotFound(err) { return nil, ConfigFromInfo, fmt.Errorf("cannot read environment info for %q: %v", name, err) } } cfg, err := envs.Config(name) return cfg, ConfigFromEnvirons, err }
func (p *updater) startMachines(ids []string) error { for _, id := range ids { if c := p.machines[id]; c == nil { // We don't know about the machine - start // a goroutine to deal with it. m, err := p.context.getMachine(id) if errors.IsNotFound(err) { logger.Warningf("watcher gave notification of non-existent machine %q", id) continue } if err != nil { return err } // We don't poll manual machines. isManual, err := m.IsManual() if err != nil { return err } if isManual { continue } c = make(chan struct{}) p.machines[id] = c go runMachine(p.context.newMachineContext(), m, c, p.machineDead) } else { c <- struct{}{} } } return nil }
// CharmArchiveURL returns the URL, corresponding to the charm archive // (bundle) in the provider storage for each given charm URL, along // with the DisableSSLHostnameVerification flag. func (u *UniterAPI) CharmArchiveURL(args params.CharmURLs) (params.CharmArchiveURLResults, error) { result := params.CharmArchiveURLResults{ Results: make([]params.CharmArchiveURLResult, len(args.URLs)), } // Get the SSL hostname verification environment setting. envConfig, err := u.st.EnvironConfig() if err != nil { return result, err } // SSLHostnameVerification defaults to true, so we need to // invert that, for backwards-compatibility (older versions // will have DisableSSLHostnameVerification: false by default). disableSSLHostnameVerification := !envConfig.SSLHostnameVerification() for i, arg := range args.URLs { curl, err := charm.ParseURL(arg.URL) if err != nil { err = common.ErrPerm } else { var sch *state.Charm sch, err = u.st.Charm(curl) if errors.IsNotFound(err) { err = common.ErrPerm } if err == nil { result.Results[i].Result = sch.BundleURL().String() result.Results[i].DisableSSLHostnameVerification = disableSSLHostnameVerification } } result.Results[i].Error = common.ServerError(err) } return result, nil }
func (w *settingsWatcher) loop(key string) (err error) { ch := make(chan watcher.Change) revno := int64(-1) settings, err := readSettings(w.st, key) if err == nil { revno = settings.txnRevno } else if !errors.IsNotFound(err) { return err } w.st.watcher.Watch(w.st.settings.Name, key, revno, ch) defer w.st.watcher.Unwatch(w.st.settings.Name, key, ch) out := w.out if revno == -1 { out = nil } for { select { case <-w.st.watcher.Dead(): return stateWatcherDeadError(w.st.watcher.Err()) case <-w.tomb.Dying(): return tomb.ErrDying case <-ch: settings, err = readSettings(w.st, key) if err != nil { return err } out = w.out case out <- settings: out = nil } } }
// FindToolsForCloud returns a List containing all tools with a given // major.minor version number and cloudSpec, filtered by filter. // If minorVersion = -1, then only majorVersion is considered. // If no *available* tools have the supplied major.minor version number, or match the // supplied filter, the function returns a *NotFoundError. func FindToolsForCloud(sources []simplestreams.DataSource, cloudSpec simplestreams.CloudSpec, majorVersion, minorVersion int, filter coretools.Filter) (list coretools.List, err error) { toolsConstraint, err := makeToolsConstraint(cloudSpec, majorVersion, minorVersion, filter) if err != nil { return nil, err } toolsMetadata, _, err := Fetch(sources, simplestreams.DefaultIndexPath, toolsConstraint, false) if err != nil { if errors.IsNotFound(err) { err = ErrNoTools } return nil, err } if len(toolsMetadata) == 0 { return nil, coretools.ErrNoMatches } list = make(coretools.List, len(toolsMetadata)) for i, metadata := range toolsMetadata { list[i] = &coretools.Tools{ Version: metadata.binary(), URL: metadata.FullPath, Size: metadata.Size, SHA256: metadata.SHA256, } } if filter.Series != "" { if err := checkToolsSeries(list, filter.Series); err != nil { return nil, err } } return list, err }
// ServerError returns an error suitable for returning to an API // client, with an error code suitable for various kinds of errors // generated in packages outside the API. func ServerError(err error) *params.Error { if err == nil { return nil } code, ok := singletonCode(err) switch { case ok: case errors.IsUnauthorized(err): code = params.CodeUnauthorized case errors.IsNotFound(err): code = params.CodeNotFound case errors.IsAlreadyExists(err): code = params.CodeAlreadyExists case state.IsNotAssigned(err): code = params.CodeNotAssigned case state.IsHasAssignedUnitsError(err): code = params.CodeHasAssignedUnits case IsNoAddressSetError(err): code = params.CodeNoAddressSet case state.IsNotProvisionedError(err): code = params.CodeNotProvisioned default: code = params.ErrCode(err) } return ¶ms.Error{ Message: err.Error(), Code: code, } }
// GetIndexWithFormat returns a simplestreams index of the specified format. // Exported for testing. func GetIndexWithFormat(source DataSource, indexPath, indexFormat string, requireSigned bool, cloudSpec CloudSpec, params ValueParams) (*IndexReference, error) { data, url, err := fetchData(source, indexPath, requireSigned, params.PublicKey) if err != nil { if errors.IsNotFound(err) || errors.IsUnauthorized(err) { return nil, err } return nil, fmt.Errorf("cannot read index data, %v", err) } var indices Indices err = json.Unmarshal(data, &indices) if err != nil { logger.Errorf("bad JSON index data at URL %q: %v", url, string(data)) return nil, fmt.Errorf("cannot unmarshal JSON index metadata at URL %q: %v", url, err) } if indices.Format != indexFormat { return nil, fmt.Errorf( "unexpected index file format %q, expected %q at URL %q", indices.Format, indexFormat, url) } mirrors, url, err := getMirrorRefs(source, mirrorsPath, requireSigned, params) if err != nil && !errors.IsNotFound(err) && !errors.IsUnauthorized(err) { return nil, fmt.Errorf("cannot load mirror metadata at URL %q: %v", url, err) } indexRef := &IndexReference{ Source: source, Indices: indices, valueParams: params, } // Apply any mirror information to the source. if params.MirrorContentId != "" { mirrorInfo, err := getMirror( source, mirrors, params.DataType, params.MirrorContentId, cloudSpec, requireSigned, params.PublicKey) if err == nil { logger.Debugf("using mirrored products path: %s", path.Join(mirrorInfo.MirrorURL, mirrorInfo.Path)) indexRef.Source = NewURLDataSource("mirror", mirrorInfo.MirrorURL, utils.VerifySSLHostnames) indexRef.MirroredProductsPath = mirrorInfo.Path } else { logger.Debugf("no mirror information available for %s: %v", cloudSpec, err) } } return indexRef, nil }
// constraints is a helper function to return a unit's deployment constraints. func (u *Unit) constraints() (*constraints.Value, error) { cons, err := readConstraints(u.st, u.globalKey()) if errors.IsNotFound(err) { // Lack of constraints indicates lack of unit. return nil, errors.NotFoundf("unit") } else if err != nil { return nil, err } return &cons, nil }
// LoadState reads state from the given storage. func LoadState(stor storage.StorageReader) (*BootstrapState, error) { r, err := storage.Get(stor, StateFile) if err != nil { if coreerrors.IsNotFound(err) { return nil, environs.ErrNotBootstrapped } return nil, err } return loadState(r) }
func isRemoved(st *state.State, name string) func(*gc.C) bool { return func(c *gc.C) bool { _, err := st.Unit(name) if errors.IsNotFound(err) { return true } c.Assert(err, gc.IsNil) return false } }
// removeOps returns the operations necessary to remove the unit, assuming // the supplied asserts apply to the unit document. func (u *Unit) removeOps(asserts bson.D) ([]txn.Op, error) { svc, err := u.st.Service(u.doc.Service) if errors.IsNotFound(err) { // If the service has been removed, the unit must already have been. return nil, errAlreadyRemoved } else if err != nil { return nil, err } return svc.removeUnitOps(u, asserts) }
// waitRemoved waits for the supplied machine to be removed from state. func (s *CommonProvisionerSuite) waitRemoved(c *gc.C, m *state.Machine) { s.waitMachine(c, m, func() bool { err := m.Refresh() if errors.IsNotFound(err) { return true } c.Assert(err, gc.IsNil) c.Logf("machine %v is still %s", m, m.Life()) return false }) }
// readMetadata reads the image metadata from metadataStore. func readMetadata(metadataStore storage.Storage) ([]*ImageMetadata, error) { // Read any existing metadata so we can merge the new tools metadata with what's there. dataSource := storage.NewStorageSimpleStreamsDataSource("existing metadata", metadataStore, storage.BaseImagesPath) imageConstraint := NewImageConstraint(simplestreams.LookupParams{}) existingMetadata, _, err := Fetch( []simplestreams.DataSource{dataSource}, simplestreams.DefaultIndexPath, imageConstraint, false) if err != nil && !errors.IsNotFound(err) { return nil, err } return existingMetadata, nil }
// getMaybeSignedMetadata returns metadata records matching the specified constraint. func getMaybeSignedMetadata(source DataSource, baseIndexPath string, cons LookupConstraint, signed bool, params ValueParams) ([]interface{}, *ResolveInfo, error) { resolveInfo := &ResolveInfo{} indexPath := baseIndexPath + UnsignedSuffix if signed { indexPath = baseIndexPath + signedSuffix } var items []interface{} indexURL, err := source.URL(indexPath) if err != nil { // Some providers return an error if asked for the URL of a non-existent file. // So the best we can do is use the relative path for the URL when logging messages. indexURL = indexPath } resolveInfo.Source = source.Description() resolveInfo.Signed = signed resolveInfo.IndexURL = indexURL indexRef, err := GetIndexWithFormat(source, indexPath, "index:1.0", signed, cons.Params().CloudSpec, params) if err != nil { if errors.IsNotFound(err) || errors.IsUnauthorized(err) { logger.Debugf("cannot load index %q: %v", indexURL, err) } return nil, resolveInfo, err } logger.Debugf("read metadata index at %q", indexURL) items, err = indexRef.getLatestMetadataWithFormat(cons, "products:1.0", signed) if err != nil { if errors.IsNotFound(err) { logger.Debugf("skipping index because of error getting latest metadata %q: %v", indexURL, err) return nil, resolveInfo, err } if _, ok := err.(*noMatchingProductsError); ok { logger.Debugf("%v", err) } } if indexRef.Source.Description() == "mirror" { resolveInfo.MirrorURL = indexRef.Source.(*urlDataSource).baseURL } return items, resolveInfo, err }
// cleanupMachine systematically destroys and removes all entities that // depend upon the supplied machine, and removes the machine from state. It's // expected to be used in response to destroy-machine --force. func (st *State) cleanupMachine(machineId string) error { machine, err := st.Machine(machineId) if errors.IsNotFound(err) { return nil } else if err != nil { return err } // In an ideal world, we'd call machine.Destroy() here, and thus prevent // new dependencies being added while we clean up the ones we know about. // But machine destruction is unsophisticated, and doesn't allow for // destruction while dependencies exist; so we just have to deal with that // possibility below. if err := st.cleanupContainers(machine); err != nil { return err } for _, unitName := range machine.doc.Principals { if err := st.obliterateUnit(unitName); err != nil { return err } } // We need to refresh the machine at this point, because the local copy // of the document will not reflect changes caused by the unit cleanups // above, and may thus fail immediately. if err := machine.Refresh(); errors.IsNotFound(err) { return nil } else if err != nil { return err } // TODO(fwereade): 2013-11-11 bug 1250104 // If this fails, it's *probably* due to a race in which new dependencies // were added while we cleaned up the old ones. If the cleanup doesn't run // again -- which it *probably* will anyway -- the issue can be resolved by // force-destroying the machine again; that's better than adding layer // upon layer of complication here. return machine.EnsureDead() // Note that we do *not* remove the machine entirely: we leave it for the // provisioner to clean up, so that we don't end up with an unreferenced // instance that would otherwise be ignored when in provisioner-safe-mode. }
// DestroyInfo destroys the configuration data for the named // environment from the given store. func DestroyInfo(envName string, store configstore.Storage) error { info, err := store.ReadInfo(envName) if err != nil { if errors.IsNotFound(err) { return nil } return err } if err := info.Destroy(); err != nil { return errgo.Annotate(err, "cannot destroy environment configuration information") } return nil }
// ReadMetadata returns the tools metadata from the given storage. func ReadMetadata(store storage.StorageReader) ([]*ToolsMetadata, error) { dataSource := storage.NewStorageSimpleStreamsDataSource("existing metadata", store, storage.BaseToolsPath) toolsConstraint, err := makeToolsConstraint(simplestreams.CloudSpec{}, -1, -1, coretools.Filter{}) if err != nil { return nil, err } metadata, _, err := Fetch( []simplestreams.DataSource{dataSource}, simplestreams.DefaultIndexPath, toolsConstraint, false) if err != nil && !errors.IsNotFound(err) { return nil, err } return metadata, nil }
// cleanupContainers recursively calls cleanupMachine on the supplied // machine's containers, and removes them from state entirely. func (st *State) cleanupContainers(machine *Machine) error { containerIds, err := machine.Containers() if errors.IsNotFound(err) { return nil } else if err != nil { return err } for _, containerId := range containerIds { if err := st.cleanupMachine(containerId); err != nil { return err } container, err := st.Machine(containerId) if errors.IsNotFound(err) { return nil } else if err != nil { return err } if err := container.Remove(); err != nil { return err } } return nil }
// EnsureToolsAvailability verifies the tools are available. If no tools are // found, it will automatically synchronize them. func EnsureToolsAvailability(ctx environs.BootstrapContext, env environs.Environ, series string, toolsArch *string) (coretools.List, error) { cfg := env.Config() var vers *version.Number if agentVersion, ok := cfg.AgentVersion(); ok { vers = &agentVersion } logger.Debugf( "looking for bootstrap tools: series=%q, arch=%v, version=%v", series, toolsArch, vers, ) params := envtools.BootstrapToolsParams{ Version: vers, Arch: toolsArch, Series: series, // If vers.Build>0, the tools may have been uploaded in this session. // Allow retries, so we wait until the storage has caught up. AllowRetry: vers != nil && vers.Build > 0, } toolsList, err := envtools.FindBootstrapTools(env, params) if err == nil { return toolsList, nil } else if !errors.IsNotFound(err) { return nil, err } // Only automatically upload tools for dev versions. if !version.Current.IsDev() { return nil, fmt.Errorf("cannot upload bootstrap tools: %v", noToolsNoUploadMessage) } // No tools available so our only hope is to build locally and upload. logger.Warningf("no prepackaged tools available") uploadSeries := SeriesToUpload(cfg, nil) if series != "" { uploadSeries = append(uploadSeries, series) } if err := UploadTools(ctx, env, toolsArch, false, uploadSeries...); err != nil { logger.Errorf("%s", noToolsMessage) return nil, fmt.Errorf("cannot upload bootstrap tools: %v", err) } // TODO(axw) have uploadTools return the list of tools in the target, and use that. params.AllowRetry = true if toolsList, err = envtools.FindBootstrapTools(env, params); err != nil { return nil, fmt.Errorf("cannot find bootstrap tools: %v", err) } return toolsList, nil }
// Initialize sets up an initial empty state and returns it. // This needs to be performed only once for a given environment. // It returns unauthorizedError if access is unauthorized. func Initialize(info *Info, cfg *config.Config, opts DialOpts, policy Policy) (rst *State, err error) { st, err := Open(info, opts, policy) if err != nil { return nil, err } defer func() { if err != nil { st.Close() } }() // A valid environment is used as a signal that the // state has already been initalized. If this is the case // do nothing. if _, err := st.Environment(); err == nil { return st, nil } else if !errors.IsNotFound(err) { return nil, err } logger.Infof("initializing environment") if err := checkEnvironConfig(cfg); err != nil { return nil, err } uuid, err := utils.NewUUID() if err != nil { return nil, fmt.Errorf("environment UUID cannot be created: %v", err) } ops := []txn.Op{ createConstraintsOp(st, environGlobalKey, constraints.Value{}), createSettingsOp(st, environGlobalKey, cfg.AllAttrs()), createEnvironmentOp(st, cfg.Name(), uuid.String()), { C: st.stateServers.Name, Id: environGlobalKey, Insert: &stateServersDoc{}, }, { C: st.stateServers.Name, Id: apiHostPortsKey, Insert: &apiHostPortsDoc{}, }, } if err := st.runTransaction(ops); err == txn.ErrAborted { // The config was created in the meantime. return st, nil } else if err != nil { return nil, err } return st, nil }
// Remove removes the unit from state, and may remove its service as well, if // the service is Dying and no other references to it exist. It will fail if // the unit is not Dead. func (u *Unit) Remove() (err error) { defer errors.Maskf(&err, "cannot remove unit %q", u) if u.doc.Life != Dead { return stderrors.New("unit is not dead") } // Now the unit is Dead, we can be sure that it's impossible for it to // enter relation scopes (once it's Dying, we can be sure of this; but // EnsureDead does not require that it already be Dying, so this is the // only point at which we can safely backstop lp:1233457 and mitigate // the impact of unit agent bugs that leave relation scopes occupied). relations, err := serviceRelations(u.st, u.doc.Service) if err != nil { return err } for _, rel := range relations { ru, err := rel.Unit(u) if err != nil { return err } if err := ru.LeaveScope(); err != nil { return err } } // Now we're sure we haven't left any scopes occupied by this unit, we // can safely remove the document. unit := &Unit{st: u.st, doc: u.doc} for i := 0; i < 5; i++ { switch ops, err := unit.removeOps(isDeadDoc); err { case errRefresh: case errAlreadyRemoved: return nil case nil: if err := u.st.runTransaction(ops); err != txn.ErrAborted { return err } default: return err } if err := unit.Refresh(); errors.IsNotFound(err) { return nil } else if err != nil { return err } } return ErrExcessiveContention }
// CheckEnvironment checks if an environment has a bootstrap-verify // that is written by juju-core commands (as compared to one being // written by Python juju). // // If there is no bootstrap-verify file in the storage, it is still // considered to be a Juju-core environment since early versions have // not written it out. // // Returns InvalidEnvironmentError on failure, nil otherwise. func CheckEnvironment(environ Environ) error { stor := environ.Storage() reader, err := storage.Get(stor, VerificationFilename) if errors.IsNotFound(err) { // When verification file does not exist, this is a juju-core // environment. return nil } else if err != nil { return err } else if content, err := ioutil.ReadAll(reader); err != nil { return err } else if string(content) != verificationContent { return InvalidEnvironmentError } return nil }