// SetResolved marks the unit as having had any previous state transition // problems resolved, and informs the unit that it may attempt to // reestablish normal workflow. The resolved mode parameter informs // whether to attempt to reexecute previous failed hooks or to continue // as if they had succeeded before. func (u *Unit) SetResolved(mode ResolvedMode) (err error) { defer utils.ErrorContextf(&err, "cannot set resolved mode for unit %q", u) switch mode { case ResolvedRetryHooks, ResolvedNoHooks: default: return fmt.Errorf("invalid error resolution mode: %q", mode) } // TODO(fwereade): assert unit has error status. resolvedNotSet := D{{"resolved", ResolvedNone}} ops := []txn.Op{{ C: u.st.units.Name, Id: u.doc.Name, Assert: append(notDeadDoc, resolvedNotSet...), Update: D{{"$set", D{{"resolved", mode}}}}, }} if err := u.st.runTransaction(ops); err == nil { u.doc.Resolved = mode return nil } else if err != txn.ErrAborted { return err } if ok, err := isNotDead(u.st.units, u.doc.Name); err != nil { return err } else if !ok { return errDead } // For now, the only remaining assert is that resolved was unset. return fmt.Errorf("already resolved") }
// Destroy ensures that the service and all its relations will be removed at // some point; if the service has no units, and no relation involving the // service has any units in scope, they are all removed immediately. func (s *Service) Destroy() (err error) { defer utils.ErrorContextf(&err, "cannot destroy service %q", s) defer func() { if err == nil { // This is a white lie; the document might actually be removed. s.doc.Life = Dying } }() svc := &Service{st: s.st, doc: s.doc} for i := 0; i < 5; i++ { switch ops, err := svc.destroyOps(); err { case errRefresh: case errAlreadyDying: return nil case nil: if err := svc.st.runTransaction(ops); err != txn.ErrAborted { return err } default: return err } if err := svc.Refresh(); errors.IsNotFoundError(err) { return nil } else if err != nil { return err } } return ErrExcessiveContention }
// Remove removes the unit from state, and may remove its service as well, if // the service is Dying and no other references to it exist. It will fail if // the unit is not Dead. func (u *Unit) Remove() (err error) { defer utils.ErrorContextf(&err, "cannot remove unit %q", u) if u.doc.Life != Dead { return stderrors.New("unit is not dead") } unit := &Unit{st: u.st, doc: u.doc} for i := 0; i < 5; i++ { switch ops, err := unit.removeOps(isDeadDoc); err { case errRefresh: case errAlreadyRemoved: return nil case nil: if err := u.st.runTransaction(ops); err != txn.ErrAborted { return err } default: return err } if err := unit.Refresh(); errors.IsNotFoundError(err) { return nil } else if err != nil { return err } } return ErrExcessiveContention }
// validate returns an error if the state violates expectations. func (st State) validate() (err error) { defer utils.ErrorContextf(&err, "invalid uniter state") hasHook := st.Hook != nil hasCharm := st.CharmURL != nil switch st.Op { case Install: if hasHook { return fmt.Errorf("unexpected hook info") } fallthrough case Upgrade: if !hasCharm { return fmt.Errorf("missing charm URL") } case Continue, RunHook: if !hasHook { return fmt.Errorf("missing hook info") } else if hasCharm { return fmt.Errorf("unexpected charm URL") } default: return fmt.Errorf("unknown operation %q", st.Op) } switch st.OpStep { case Queued, Pending, Done: default: return fmt.Errorf("unknown operation step %q", st.OpStep) } if hasHook { return st.Hook.Validate() } return nil }
// SetConstraints sets the exact constraints to apply when provisioning an // instance for the machine. It will fail if the machine is Dead, or if it // is already provisioned. func (m *Machine) SetConstraints(cons constraints.Value) (err error) { defer utils.ErrorContextf(&err, "cannot set constraints") notSetYet := D{{"nonce", ""}} ops := []txn.Op{ { C: m.st.machines.Name, Id: m.doc.Id, Assert: append(isAliveDoc, notSetYet...), }, setConstraintsOp(m.st, m.globalKey(), cons), } // 3 attempts is enough to push the ErrExcessiveContention case out of the // realm of plausibility: it implies local state indicating unprovisioned, // and remote state indicating provisioned (reasonable); but which changes // back to unprovisioned and then to provisioned again with *very* specific // timing in the course of this loop. for i := 0; i < 3; i++ { if m.doc.Life != Alive { return errNotAlive } if _, err := m.InstanceId(); err == nil { return fmt.Errorf("machine is already provisioned") } else if !IsNotProvisionedError(err) { return err } if err := m.st.runTransaction(ops); err != txn.ErrAborted { return err } if m, err = m.st.Machine(m.doc.Id); err != nil { return err } } return ErrExcessiveContention }
// Remove removes the machine from state. It will fail if the machine is not // Dead. func (m *Machine) Remove() (err error) { defer utils.ErrorContextf(&err, "cannot remove machine %s", m.doc.Id) if m.doc.Life != Dead { return fmt.Errorf("machine is not dead") } ops := []txn.Op{ { C: m.st.machines.Name, Id: m.doc.Id, Assert: txn.DocExists, Remove: true, }, { C: m.st.instanceData.Name, Id: m.doc.Id, Remove: true, }, removeStatusOp(m.st, m.globalKey()), removeConstraintsOp(m.st, m.globalKey()), annotationRemoveOp(m.st, m.globalKey()), } ops = append(ops, removeContainerRefOps(m.st, m.Id())...) // The only abort conditions in play indicate that the machine has already // been removed. return onAbort(m.st.runTransaction(ops), nil) }
// Validate returns an error if the supplied hook.Info does not represent // a valid change to the relation state. Hooks must always be validated // against the current state before they are run, to ensure that the system // meets its guarantees about hook execution order. func (s *State) Validate(hi hook.Info) (err error) { defer utils.ErrorContextf(&err, "inappropriate %q for %q", hi.Kind, hi.RemoteUnit) if hi.RelationId != s.RelationId { return fmt.Errorf("expected relation %d, got relation %d", s.RelationId, hi.RelationId) } if s.Members == nil { return fmt.Errorf(`relation is broken and cannot be changed further`) } unit, kind := hi.RemoteUnit, hi.Kind if kind == hooks.RelationBroken { if len(s.Members) == 0 { return nil } return fmt.Errorf(`cannot run "relation-broken" while units still present`) } if s.ChangedPending != "" { if unit != s.ChangedPending || kind != hooks.RelationChanged { return fmt.Errorf(`expected "relation-changed" for %q`, s.ChangedPending) } } else if _, joined := s.Members[unit]; joined && kind == hooks.RelationJoined { return fmt.Errorf("unit already joined") } else if !joined && kind != hooks.RelationJoined { return fmt.Errorf("unit has not joined") } return nil }
// Destroy ensures that the relation will be removed at some point; if no units // are currently in scope, it will be removed immediately. func (r *Relation) Destroy() (err error) { defer utils.ErrorContextf(&err, "cannot destroy relation %q", r) if len(r.doc.Endpoints) == 1 && r.doc.Endpoints[0].Role == charm.RolePeer { return fmt.Errorf("is a peer relation") } defer func() { if err == nil { // This is a white lie; the document might actually be removed. r.doc.Life = Dying } }() rel := &Relation{r.st, r.doc} // In this context, aborted transactions indicate that the number of units // in scope have changed between 0 and not-0. The chances of 5 successive // attempts each hitting this change -- which is itself an unlikely one -- // are considered to be extremely small. for attempt := 0; attempt < 5; attempt++ { ops, _, err := rel.destroyOps("") if err == errAlreadyDying { return nil } else if err != nil { return err } if err := rel.st.runTransaction(ops); err != txn.ErrAborted { return err } if err := rel.Refresh(); errors.IsNotFoundError(err) { return nil } else if err != nil { return err } } return ErrExcessiveContention }
// Write atomically writes to disk the relation state change in hi. // It must be called after the respective hook was executed successfully. // Write doesn't validate hi but guarantees that successive writes of // the same hi are idempotent. func (d *StateDir) Write(hi hook.Info) (err error) { defer utils.ErrorContextf(&err, "failed to write %q hook info for %q on state directory", hi.Kind, hi.RemoteUnit) if hi.Kind == hooks.RelationBroken { return d.Remove() } name := strings.Replace(hi.RemoteUnit, "/", "-", 1) path := filepath.Join(d.path, name) if hi.Kind == hooks.RelationDeparted { if err = os.Remove(path); err != nil && !os.IsNotExist(err) { return err } // If atomic delete succeeded, update own state. delete(d.state.Members, hi.RemoteUnit) return nil } di := diskInfo{&hi.ChangeVersion, hi.Kind == hooks.RelationJoined} if err := utils.WriteYaml(path, &di); err != nil { return err } // If write was successful, update own state. d.state.Members[hi.RemoteUnit] = hi.ChangeVersion if hi.Kind == hooks.RelationJoined { d.state.ChangedPending = hi.RemoteUnit } else { d.state.ChangedPending = "" } return nil }
// ReadAllStateDirs loads and returns every StateDir persisted directly inside // the supplied dirPath. If dirPath does not exist, no error is returned. func ReadAllStateDirs(dirPath string) (dirs map[int]*StateDir, err error) { defer utils.ErrorContextf(&err, "cannot load relations state from %q", dirPath) if _, err := os.Stat(dirPath); os.IsNotExist(err) { return nil, nil } else if err != nil { return nil, err } fis, err := ioutil.ReadDir(dirPath) if err != nil { return nil, err } dirs = map[int]*StateDir{} for _, fi := range fis { // Entries with integer names must be directories containing StateDir // data; all other names will be ignored. relationId, err := strconv.Atoi(fi.Name()) if err != nil { // This doesn't look like a relation. continue } dir, err := ReadStateDir(dirPath, relationId) if err != nil { return nil, err } dirs[relationId] = dir } return dirs, nil }
// AddService creates a new service, running the supplied charm, with the // supplied name (which must be unique). If the charm defines peer relations, // they will be created automatically. func (st *State) AddService(name string, ch *Charm) (service *Service, err error) { defer utils.ErrorContextf(&err, "cannot add service %q", name) // Sanity checks. if !names.IsService(name) { return nil, fmt.Errorf("invalid name") } if ch == nil { return nil, fmt.Errorf("charm is nil") } if exists, err := isNotDead(st.services, name); err != nil { return nil, err } else if exists { return nil, fmt.Errorf("service already exists") } // Create the service addition operations. peers := ch.Meta().Peers svcDoc := &serviceDoc{ Name: name, Series: ch.URL().Series, Subordinate: ch.Meta().Subordinate, CharmURL: ch.URL(), RelationCount: len(peers), Life: Alive, } svc := newService(st, svcDoc) ops := []txn.Op{ createConstraintsOp(st, svc.globalKey(), constraints.Value{}), createSettingsOp(st, svc.settingsKey(), nil), { C: st.settingsrefs.Name, Id: svc.settingsKey(), Assert: txn.DocMissing, Insert: settingsRefsDoc{1}, }, { C: st.services.Name, Id: name, Assert: txn.DocMissing, Insert: svcDoc, }} // Collect peer relation addition operations. peerOps, err := st.addPeerRelationsOps(name, peers) if err != nil { return nil, err } ops = append(ops, peerOps...) // Run the transaction; happily, there's never any reason to retry, // because all the possible failed assertions imply that the service // already exists. if err := st.runTransaction(ops); err == txn.ErrAborted { return nil, fmt.Errorf("service already exists") } else if err != nil { return nil, err } // Refresh to pick the txn-revno. if err = svc.Refresh(); err != nil { return nil, err } return svc, nil }
// SetMinUnits changes the number of minimum units required by the service. func (s *Service) SetMinUnits(minUnits int) (err error) { defer utils.ErrorContextf(&err, "cannot set minimum units for service %q", s) defer func() { if err == nil { s.doc.MinUnits = minUnits } }() if minUnits < 0 { return errors.New("cannot set a negative minimum number of units") } service := &Service{st: s.st, doc: s.doc} // Removing the document never fails. Racing clients trying to create the // document generate one failure, but the second attempt should succeed. // If one client tries to update the document, and a racing client removes // it, the former should be able to re-create the document in the second // attempt. If the referred-to service advanced its life cycle to a not // alive state, an error is returned after the first failing attempt. for i := 0; i < 2; i++ { if service.doc.Life != Alive { return errors.New("service is no longer alive") } if minUnits == service.doc.MinUnits { return nil } ops := setMinUnitsOps(service, minUnits) if err := s.st.runTransaction(ops); err != txn.ErrAborted { return err } if err := service.Refresh(); err != nil { return err } } return ErrExcessiveContention }
// AssignUnit places the unit on a machine. Depending on the policy, and the // state of the environment, this may lead to new instances being launched // within the environment. func (st *State) AssignUnit(u *Unit, policy AssignmentPolicy) (err error) { if !u.IsPrincipal() { return fmt.Errorf("subordinate unit %q cannot be assigned directly to a machine", u) } defer utils.ErrorContextf(&err, "cannot assign unit %q to machine", u) var m *Machine switch policy { case AssignLocal: m, err = st.Machine("0") if err != nil { return err } return u.AssignToMachine(m) case AssignClean: if _, err = u.AssignToCleanMachine(); err != noCleanMachines { return err } return u.AssignToNewMachineOrContainer() case AssignCleanEmpty: if _, err = u.AssignToCleanEmptyMachine(); err != noCleanMachines { return err } return u.AssignToNewMachineOrContainer() case AssignNew: return u.AssignToNewMachine() } return fmt.Errorf("unknown unit assignment policy: %q", policy) }
// Relations returns a Relation for every relation the service is in. func (s *Service) Relations() (relations []*Relation, err error) { defer utils.ErrorContextf(&err, "can't get relations for service %q", s) docs := []relationDoc{} err = s.st.relations.Find(D{{"endpoints.servicename", s.doc.Name}}).All(&docs) if err != nil { return nil, err } for _, v := range docs { relations = append(relations, newRelation(s.st, &v)) } return relations, nil }
// EnsureMinUnits adds new units if the service's MinUnits value is greater // than the number of alive units. func (s *Service) EnsureMinUnits() (err error) { defer utils.ErrorContextf(&err, "cannot ensure minimum units for service %q", s) service := &Service{st: s.st, doc: s.doc} for { // Ensure the service is alive. if service.doc.Life != Alive { return errors.New("service is not alive") } // Exit without errors if the MinUnits for the service is not set. if service.doc.MinUnits == 0 { return nil } // Retrieve the number of alive units for the service. aliveUnits, err := aliveUnitsCount(service) if err != nil { return err } // Calculate the number of required units to be added. missing := service.doc.MinUnits - aliveUnits if missing <= 0 { return nil } name, ops, err := ensureMinUnitsOps(service) if err != nil { return err } // Add missing unit. switch err := s.st.runTransaction(ops); err { case nil: // Assign the new unit. unit, err := service.Unit(name) if err != nil { return err } if err := service.st.AssignUnit(unit, AssignNew); err != nil { return err } // No need to proceed and refresh the service if this was the // last/only missing unit. if missing == 1 { return nil } case txn.ErrAborted: // Refresh the service and restart the loop. default: return err } if err := service.Refresh(); err != nil { return err } } panic("unreachable") }
// SetAnnotations adds key/value pairs to annotations in MongoDB. func (a *annotator) SetAnnotations(pairs map[string]string) (err error) { defer utils.ErrorContextf(&err, "cannot update annotations on %s", a.tag) if len(pairs) == 0 { return nil } // Collect in separate maps pairs to be inserted/updated or removed. toRemove := make(map[string]bool) toInsert := make(map[string]string) toUpdate := make(map[string]string) for key, value := range pairs { if strings.Contains(key, ".") { return fmt.Errorf("invalid key %q", key) } if value == "" { toRemove["annotations."+key] = true } else { toInsert[key] = value toUpdate["annotations."+key] = value } } // Two attempts should be enough to update annotations even with racing // clients - if the document does not already exist, one of the clients // will create it and the others will fail, then all the rest of the // clients should succeed on their second attempt. If the referred-to // entity has disappeared, and removed its annotations in the meantime, // we consider that worthy of an error (will be fixed when new entities // can never share names with old ones). for i := 0; i < 2; i++ { var ops []txn.Op if count, err := a.st.annotations.FindId(a.globalKey).Count(); err != nil { return err } else if count == 0 { // Check that the annotator entity was not previously destroyed. if i != 0 { return fmt.Errorf("%s no longer exists", a.tag) } ops, err = a.insertOps(toInsert) if err != nil { return err } } else { ops = a.updateOps(toUpdate, toRemove) } if err := a.st.runTransaction(ops); err == nil { return nil } else if err != txn.ErrAborted { return err } } return ErrExcessiveContention }
// SetProvisioned sets the provider specific machine id, nonce and also metadata for // this machine. Once set, the instance id cannot be changed. func (m *Machine) SetProvisioned(id instance.Id, nonce string, characteristics *instance.HardwareCharacteristics) (err error) { defer utils.ErrorContextf(&err, "cannot set instance data for machine %q", m) if id == "" || nonce == "" { return fmt.Errorf("instance id and nonce cannot be empty") } if characteristics == nil { characteristics = &instance.HardwareCharacteristics{} } hc := &instanceData{ Id: m.doc.Id, InstanceId: id, Arch: characteristics.Arch, Mem: characteristics.Mem, CpuCores: characteristics.CpuCores, CpuPower: characteristics.CpuPower, } // SCHEMACHANGE // TODO(wallyworld) - do not check instanceId on machineDoc after schema is upgraded notSetYet := D{{"instanceid", ""}, {"nonce", ""}} ops := []txn.Op{ { C: m.st.machines.Name, Id: m.doc.Id, Assert: append(isAliveDoc, notSetYet...), Update: D{{"$set", D{{"instanceid", id}, {"nonce", nonce}}}}, }, { C: m.st.instanceData.Name, Id: m.doc.Id, Assert: txn.DocMissing, Insert: hc, }, } if err = m.st.runTransaction(ops); err == nil { m.doc.Nonce = nonce // SCHEMACHANGE // TODO(wallyworld) - remove this backward compatibility code when schema upgrades are possible // (InstanceId is stored on the instanceData document but we duplicate the value on the machineDoc. m.doc.InstanceId = id return nil } else if err != txn.ErrAborted { return err } else if alive, err := isAlive(m.st.machines, m.doc.Id); err != nil { return err } else if !alive { return errNotAlive } return fmt.Errorf("already set") }
// FinishMachineConfig sets fields on a MachineConfig that can be determined by // inspecting a plain config.Config and the machine constraints at the last // moment before bootstrapping. It assumes that the supplied Config comes from // an environment that has passed through all the validation checks in the // Bootstrap func, and that has set an agent-version (via FindBootstrapTools, // or otherwise). // TODO(fwereade) This function is not meant to be "good" in any serious way: // it is better that this functionality be collected in one place here than // that it be spread out across 3 or 4 providers, but this is its only // redeeming feature. func FinishMachineConfig(mcfg *cloudinit.MachineConfig, cfg *config.Config, cons constraints.Value) (err error) { defer utils.ErrorContextf(&err, "cannot complete machine configuration") // Everything needs the environment's authorized keys. authKeys := cfg.AuthorizedKeys() if authKeys == "" { return fmt.Errorf("environment configuration has no authorized-keys") } mcfg.AuthorizedKeys = authKeys if mcfg.MachineEnvironment == nil { mcfg.MachineEnvironment = make(map[string]string) } mcfg.MachineEnvironment[osenv.JujuProviderType] = cfg.Type() if !mcfg.StateServer { return nil } // These settings are only appropriate at bootstrap time. At the // moment, the only state server is the bootstrap node, but this // will probably change. if mcfg.APIInfo != nil || mcfg.StateInfo != nil { return fmt.Errorf("machine configuration already has api/state info") } caCert, hasCACert := cfg.CACert() if !hasCACert { return fmt.Errorf("environment configuration has no ca-cert") } password := cfg.AdminSecret() if password == "" { return fmt.Errorf("environment configuration has no admin-secret") } passwordHash := utils.PasswordHash(password) mcfg.APIInfo = &api.Info{Password: passwordHash, CACert: caCert} mcfg.StateInfo = &state.Info{Password: passwordHash, CACert: caCert} mcfg.StatePort = cfg.StatePort() mcfg.APIPort = cfg.APIPort() mcfg.Constraints = cons if mcfg.Config, err = BootstrapConfig(cfg); err != nil { return err } // These really are directly relevant to running a state server. cert, key, err := cfg.GenerateStateServerCertAndKey() if err != nil { return fmt.Errorf("cannot generate state server certificate: %v", err) } mcfg.StateServerCert = cert mcfg.StateServerKey = key return nil }
// ReadSettings returns a map holding the settings of the unit with the // supplied name within this relation. An error will be returned if the // relation no longer exists, or if the unit's service is not part of the // relation, or the settings are invalid; but mere non-existence of the // unit is not grounds for an error, because the unit settings are // guaranteed to persist for the lifetime of the relation, regardless // of the lifetime of the unit. func (ru *RelationUnit) ReadSettings(uname string) (m map[string]interface{}, err error) { defer utils.ErrorContextf(&err, "cannot read settings for unit %q in relation %q", uname, ru.relation) if !IsUnitName(uname) { return nil, fmt.Errorf("%q is not a valid unit name", uname) } key, err := ru.key(uname) if err != nil { return nil, err } node, err := readSettings(ru.st, key) if err != nil { return nil, err } return node.Map(), nil }
// ReadStateDir loads a StateDir from the subdirectory of dirPath named // for the supplied RelationId. If the directory does not exist, no error // is returned, func ReadStateDir(dirPath string, relationId int) (d *StateDir, err error) { d = &StateDir{ filepath.Join(dirPath, strconv.Itoa(relationId)), State{relationId, map[string]int64{}, ""}, } defer utils.ErrorContextf(&err, "cannot load relation state from %q", d.path) if _, err := os.Stat(d.path); os.IsNotExist(err) { return d, nil } else if err != nil { return nil, err } fis, err := ioutil.ReadDir(d.path) if err != nil { return nil, err } for _, fi := range fis { // Entries with names ending in "-" followed by an integer must be // files containing valid unit data; all other names are ignored. name := fi.Name() i := strings.LastIndex(name, "-") if i == -1 { continue } svcName := name[:i] unitId := name[i+1:] if _, err := strconv.Atoi(unitId); err != nil { continue } unitName := svcName + "/" + unitId var info diskInfo if err = utils.ReadYaml(filepath.Join(d.path, name), &info); err != nil { return nil, fmt.Errorf("invalid unit file %q: %v", name, err) } if info.ChangeVersion == nil { return nil, fmt.Errorf(`invalid unit file %q: "changed-version" not set`, name) } d.state.Members[unitName] = *info.ChangeVersion if info.ChangedPending { if d.state.ChangedPending != "" { return nil, fmt.Errorf("%q and %q both have pending changed hooks", d.state.ChangedPending, unitName) } d.state.ChangedPending = unitName } } return d, nil }
// newTempCertFile stores the given x509 certificate in a temporary file, // which only the current user will be allowed to access. // You *must* clean up the file after use, by calling its Delete method. func newTempCertFile(data []byte) (certFile *tempCertFile, err error) { // Add context to any error we may return. defer utils.ErrorContextf(&err, "failed while writing temporary certificate file") // Access permissions for these temporary files: const ( // Owner can read/write temporary files. Not backed up. fileMode = 0600 | os.ModeTemporary | os.ModeExclusive // Temporary dirs are like files, but owner also has "x" // permission. dirMode = fileMode | 0100 ) certFile = &tempCertFile{} // We'll randomize the file's name, so that even someone with access // to the temporary directory (perhaps a group member sneaking in // just before we close access to the directory) won't be able to // guess its name and inject their own file. certFile.filename = fmt.Sprintf("x509-%d.cert", rand.Int31()) // To guarantee that nobody else will be able to access the file, even // by predicting or guessing its name, we create the file in its own // private directory. certFile.tempDir, err = ioutil.TempDir("", "juju-azure") if err != nil { return nil, err } err = os.Chmod(certFile.tempDir, dirMode) if err != nil { return nil, err } // Now, at last, write the file. WriteFile could have done most of // the work on its own, but it doesn't guarantee that nobody creates // a file of the same name first. When that happens, you get a file // but not with the requested permissions. err = ioutil.WriteFile(certFile.Path(), data, fileMode) if err != nil { os.RemoveAll(certFile.tempDir) return nil, err } return certFile, nil }
// SetAgentTools sets the tools that the agent is currently running. func (m *Machine) SetAgentTools(t *tools.Tools) (err error) { defer utils.ErrorContextf(&err, "cannot set agent tools for machine %v", m) if t.Version.Series == "" || t.Version.Arch == "" { return fmt.Errorf("empty series or arch") } ops := []txn.Op{{ C: m.st.machines.Name, Id: m.doc.Id, Assert: notDeadDoc, Update: D{{"$set", D{{"tools", t}}}}, }} if err := m.st.runTransaction(ops); err != nil { return onAbort(err, errDead) } tools := *t m.doc.Tools = &tools return nil }
// AddUnit adds a new principal unit to the service. func (s *Service) AddUnit() (unit *Unit, err error) { defer utils.ErrorContextf(&err, "cannot add unit to service %q", s) name, ops, err := s.addUnitOps("", nil) if err != nil { return nil, err } if err := s.st.runTransaction(ops); err == txn.ErrAborted { if alive, err := isAlive(s.st.services, s.doc.Name); err != nil { return nil, err } else if !alive { return nil, fmt.Errorf("service is not alive") } return nil, fmt.Errorf("inconsistent state") } else if err != nil { return nil, err } return s.Unit(name) }
// SetConstraints replaces the current service constraints. func (s *Service) SetConstraints(cons constraints.Value) (err error) { if s.doc.Subordinate { return ErrSubordinateConstraints } defer utils.ErrorContextf(&err, "cannot set constraints") if s.doc.Life != Alive { return errNotAlive } ops := []txn.Op{ { C: s.st.services.Name, Id: s.doc.Name, Assert: isAliveDoc, }, setConstraintsOp(s.st, s.globalKey(), cons), } return onAbort(s.st.runTransaction(ops), errNotAlive) }
// SetAgentTools sets the tools that the agent is currently running. func (u *Unit) SetAgentTools(t *tools.Tools) (err error) { defer utils.ErrorContextf(&err, "cannot set agent tools for unit %q", u) if t.Series == "" || t.Arch == "" { return fmt.Errorf("empty series or arch") } ops := []txn.Op{{ C: u.st.units.Name, Id: u.doc.Name, Assert: notDeadDoc, Update: D{{"$set", D{{"tools", t}}}}, }} if err := u.st.runTransaction(ops); err != nil { return onAbort(err, errDead) } tools := *t u.doc.Tools = &tools return nil }
// addMachine implements AddMachine and InjectMachine. func (st *State) addMachine(params *AddMachineParams) (m *Machine, err error) { msg := "cannot add a new machine" if params.ParentId != "" || params.ContainerType != "" { msg = "cannot add a new container" } defer utils.ErrorContextf(&err, msg) cons, err := st.EnvironConstraints() if err != nil { return nil, err } cons = params.Constraints.WithFallbacks(cons) ops, instData, containerParams, err := st.addMachineContainerOps(params, cons) if err != nil { return nil, err } mdoc := &machineDoc{ Series: params.Series, ContainerType: string(params.ContainerType), Jobs: params.Jobs, Clean: true, } if mdoc.ContainerType == "" { mdoc.InstanceId = params.instanceId mdoc.Nonce = params.nonce } mdoc, machineOps, err := st.addMachineOps(mdoc, instData, cons, containerParams) if err != nil { return nil, err } ops = append(ops, machineOps...) err = st.runTransaction(ops) if err != nil { return nil, err } // Refresh to pick the txn-revno. m = newMachine(st, mdoc) if err = m.Refresh(); err != nil { return nil, err } return m, nil }
// WaitAgentAlive blocks until the respective agent is alive. func (m *Machine) WaitAgentAlive(timeout time.Duration) (err error) { defer utils.ErrorContextf(&err, "waiting for agent of machine %v", m) ch := make(chan presence.Change) m.st.pwatcher.Watch(m.globalKey(), ch) defer m.st.pwatcher.Unwatch(m.globalKey(), ch) for i := 0; i < 2; i++ { select { case change := <-ch: if change.Alive { return nil } case <-time.After(timeout): return fmt.Errorf("still not alive after timeout") case <-m.st.pwatcher.Dead(): return m.st.pwatcher.Err() } } panic(fmt.Sprintf("presence reported dead status twice in a row for machine %v", m)) }
// Units returns all the units that have been assigned to the machine. func (m *Machine) Units() (units []*Unit, err error) { defer utils.ErrorContextf(&err, "cannot get units assigned to machine %v", m) pudocs := []unitDoc{} err = m.st.units.Find(D{{"machineid", m.doc.Id}}).All(&pudocs) if err != nil { return nil, err } for _, pudoc := range pudocs { units = append(units, newUnit(m.st, &pudoc)) docs := []unitDoc{} err = m.st.units.Find(D{{"principal", pudoc.Name}}).All(&docs) if err != nil { return nil, err } for _, doc := range docs { units = append(units, newUnit(m.st, &doc)) } } return units, nil }
// download fetches the supplied charm and checks that it has the correct sha256 // hash, then copies it into the directory. If a value is received on abort, the // download will be stopped. func (d *BundlesDir) download(sch *state.Charm, abort <-chan struct{}) (err error) { defer utils.ErrorContextf(&err, "failed to download charm %q from %q", sch.URL(), sch.BundleURL()) dir := d.downloadsPath() if err := os.MkdirAll(dir, 0755); err != nil { return err } burl := sch.BundleURL().String() log.Infof("worker/uniter/charm: downloading %s from %s", sch.URL(), burl) dl := downloader.New(burl, dir) defer dl.Stop() for { select { case <-abort: log.Infof("worker/uniter/charm: download aborted") return fmt.Errorf("aborted") case st := <-dl.Done(): if st.Err != nil { return st.Err } log.Infof("worker/uniter/charm: download complete") defer st.File.Close() hash := sha256.New() if _, err = io.Copy(hash, st.File); err != nil { return err } actualSha256 := hex.EncodeToString(hash.Sum(nil)) if actualSha256 != sch.BundleSha256() { return fmt.Errorf( "expected sha256 %q, got %q", sch.BundleSha256(), actualSha256, ) } log.Infof("worker/uniter/charm: download verified") if err := os.MkdirAll(d.path, 0755); err != nil { return err } return os.Rename(st.File.Name(), d.bundlePath(sch)) } } panic("unreachable") }
// ClosePort sets the policy of the port with protocol and number to be closed. func (u *Unit) ClosePort(protocol string, number int) (err error) { port := instance.Port{Protocol: protocol, Number: number} defer utils.ErrorContextf(&err, "cannot close port %v for unit %q", port, u) ops := []txn.Op{{ C: u.st.units.Name, Id: u.doc.Name, Assert: notDeadDoc, Update: D{{"$pull", D{{"ports", port}}}}, }} err = u.st.runTransaction(ops) if err != nil { return onAbort(err, errDead) } newPorts := make([]instance.Port, 0, len(u.doc.Ports)) for _, p := range u.doc.Ports { if p != port { newPorts = append(newPorts, p) } } u.doc.Ports = newPorts return nil }