// SetAPIHostPorts sets the addresses of the API server instances. // Each server is represented by one element in the top level slice. // If prefer-ipv6 environment setting is true, the addresses will be // sorted before setting them to bring IPv6 addresses on top (if // available). func (st *State) SetAPIHostPorts(hps [][]network.HostPort) error { envConfig, err := st.EnvironConfig() if err != nil { return err } for i, _ := range hps { network.SortHostPorts(hps[i], envConfig.PreferIPv6()) } doc := apiHostPortsDoc{ APIHostPorts: instanceHostPortsToHostPorts(hps), } buildTxn := func(attempt int) ([]txn.Op, error) { existing, err := st.APIHostPorts() if err != nil { return nil, err } op := txn.Op{ C: st.stateServers.Name, Id: apiHostPortsKey, Assert: bson.D{{ "apihostports", instanceHostPortsToHostPorts(existing), }}, } if !hostPortsEqual(hps, existing) { op.Update = bson.D{{ "$set", bson.D{{"apihostports", doc.APIHostPorts}}, }} } return []txn.Op{op}, nil } if err := st.run(buildTxn); err != nil { return errors.Annotate(err, "cannot set API addresses") } return nil }
// removeOps returns the operations necessary to remove the relation. If // ignoreService is not empty, no operations affecting that service will be // included; if departingUnit is not nil, this implies that the relation's // services may be Dying and otherwise unreferenced, and may thus require // removal themselves. func (r *Relation) removeOps(ignoreService string, departingUnit *Unit) ([]txn.Op, error) { relOp := txn.Op{ C: relationsC, Id: r.doc.Key, Remove: true, } if departingUnit != nil { relOp.Assert = bson.D{{"life", Dying}, {"unitcount", 1}} } else { relOp.Assert = bson.D{{"life", Alive}, {"unitcount", 0}} } ops := []txn.Op{relOp} for _, ep := range r.doc.Endpoints { if ep.ServiceName == ignoreService { continue } var asserts bson.D hasRelation := bson.D{{"relationcount", bson.D{{"$gt", 0}}}} if departingUnit == nil { // We're constructing a destroy operation, either of the relation // or one of its services, and can therefore be assured that both // services are Alive. asserts = append(hasRelation, isAliveDoc...) } else if ep.ServiceName == departingUnit.ServiceName() { // This service must have at least one unit -- the one that's // departing the relation -- so it cannot be ready for removal. cannotDieYet := bson.D{{"unitcount", bson.D{{"$gt", 0}}}} asserts = append(hasRelation, cannotDieYet...) } else { // This service may require immediate removal. services, closer := r.st.getCollection(servicesC) defer closer() svc := &Service{st: r.st} hasLastRef := bson.D{{"life", Dying}, {"unitcount", 0}, {"relationcount", 1}} removable := append(bson.D{{"_id", ep.ServiceName}}, hasLastRef...) if err := services.Find(removable).One(&svc.doc); err == nil { ops = append(ops, svc.removeOps(hasLastRef)...) continue } else if err != mgo.ErrNotFound { return nil, err } // If not, we must check that this is still the case when the // transaction is applied. asserts = bson.D{{"$or", []bson.D{ {{"life", Alive}}, {{"unitcount", bson.D{{"$gt", 0}}}}, {{"relationcount", bson.D{{"$gt", 1}}}}, }}} } ops = append(ops, txn.Op{ C: servicesC, Id: ep.ServiceName, Assert: asserts, Update: bson.D{{"$inc", bson.D{{"relationcount", -1}}}}, }) } cleanupOp := r.st.newCleanupOp(cleanupRelationSettings, fmt.Sprintf("r#%d#", r.Id())) return append(ops, cleanupOp), nil }
// removeUnitOps returns the operations necessary to remove the supplied unit, // assuming the supplied asserts apply to the unit document. func (s *Service) removeUnitOps(u *Unit, asserts bson.D) ([]txn.Op, error) { ops, err := u.destroyHostOps(s) if err != nil { return nil, err } observedFieldsMatch := bson.D{ {"charmurl", u.doc.CharmURL}, {"machineid", u.doc.MachineId}, } ops = append(ops, txn.Op{ C: s.st.units.Name, Id: u.doc.Name, Assert: append(observedFieldsMatch, asserts...), Remove: true, }, removeConstraintsOp(s.st, u.globalKey()), removeStatusOp(s.st, u.globalKey()), annotationRemoveOp(s.st, u.globalKey()), s.st.newCleanupOp(cleanupRemovedUnit, u.doc.Name), ) if u.doc.CharmURL != nil { decOps, err := settingsDecRefOps(s.st, s.doc.Name, u.doc.CharmURL) if errors.IsNotFound(err) { return nil, errRefresh } else if err != nil { return nil, err } ops = append(ops, decOps...) } if s.doc.Life == Dying && s.doc.RelationCount == 0 && s.doc.UnitCount == 1 { hasLastRef := bson.D{{"life", Dying}, {"relationcount", 0}, {"unitcount", 1}} return append(ops, s.removeOps(hasLastRef)...), nil } svcOp := txn.Op{ C: s.st.services.Name, Id: s.doc.Name, Update: bson.D{{"$inc", bson.D{{"unitcount", -1}}}}, } if s.doc.Life == Alive { svcOp.Assert = bson.D{{"life", Alive}, {"unitcount", bson.D{{"$gt", 0}}}} } else { svcOp.Assert = bson.D{ {"life", Dying}, {"$or", []bson.D{ {{"unitcount", bson.D{{"$gt", 1}}}}, {{"relationcount", bson.D{{"$gt", 0}}}}, }}, } } ops = append(ops, svcOp) return ops, nil }
// setAddresses updates the machine's addresses (either Addresses or // MachineAddresses, depending on the field argument). func (m *Machine) setAddresses(addresses []network.Address, field *[]address, fieldName string) error { var changed bool envConfig, err := m.st.EnvironConfig() if err != nil { return err } network.SortAddresses(addresses, envConfig.PreferIPv6()) stateAddresses := instanceAddressesToAddresses(addresses) buildTxn := func(attempt int) ([]txn.Op, error) { changed = false if attempt > 0 { if err := m.Refresh(); err != nil { return nil, err } } if m.doc.Life == Dead { return nil, errDead } op := txn.Op{ C: m.st.machines.Name, Id: m.doc.Id, Assert: append(bson.D{{fieldName, *field}}, notDeadDoc...), } if !addressesEqual(addresses, addressesToInstanceAddresses(*field)) { op.Update = bson.D{{"$set", bson.D{{fieldName, stateAddresses}}}} changed = true } return []txn.Op{op}, nil } switch err := m.st.run(buildTxn); err { case nil: case jujutxn.ErrExcessiveContention: return errors.Annotatef(err, "cannot set %s for machine %s", fieldName, m) default: return err } if !changed { return nil } *field = stateAddresses return nil }
func (s *Service) removeUnitOps(u *Unit) []txn.Op { var ops []txn.Op if u.doc.Principal != "" { ops = append(ops, txn.Op{ C: s.st.units.Name, Id: u.doc.Principal, Assert: txn.DocExists, Update: D{{"$pull", D{{"subordinates", u.doc.Name}}}}, }) } else if u.doc.MachineId != "" { ops = append(ops, txn.Op{ C: s.st.machines.Name, Id: u.doc.MachineId, Assert: txn.DocExists, Update: D{{"$pull", D{{"principals", u.doc.Name}}}}, }) } ops = append(ops, txn.Op{ C: s.st.units.Name, Id: u.doc.Name, Assert: txn.DocExists, Remove: true, }) if s.doc.Life == Dying && s.doc.RelationCount == 0 && s.doc.UnitCount == 1 { hasLastRef := D{{"life", Dying}, {"relationcount", 0}, {"unitcount", 1}} return append(ops, s.removeOps(hasLastRef)...) } svcOp := txn.Op{ C: s.st.services.Name, Id: s.doc.Name, Update: D{{"$inc", D{{"unitcount", -1}}}}, } if s.doc.Life == Alive { svcOp.Assert = D{{"life", Alive}, {"unitcount", D{{"$gt", 0}}}} } else { svcOp.Assert = D{{"life", Dying}, {"unitcount", D{{"$gt", 1}}}} } return append(ops, svcOp) }
// advanceLifecycle ensures that the machine's lifecycle is no earlier // than the supplied value. If the machine already has that lifecycle // value, or a later one, no changes will be made to remote state. If // the machine has any responsibilities that preclude a valid change in // lifecycle, it will return an error. func (original *Machine) advanceLifecycle(life Life) (err error) { containers, err := original.Containers() if err != nil { return err } if len(containers) > 0 { return &HasContainersError{ MachineId: original.doc.Id, ContainerIds: containers, } } m := original defer func() { if err == nil { // The machine's lifecycle is known to have advanced; it may be // known to have already advanced further than requested, in // which case we set the latest known valid value. if m == nil { life = Dead } else if m.doc.Life > life { life = m.doc.Life } original.doc.Life = life } }() // op and op := txn.Op{ C: m.st.machines.Name, Id: m.doc.Id, Update: bson.D{{"$set", bson.D{{"life", life}}}}, } advanceAsserts := bson.D{ {"jobs", bson.D{{"$nin", []MachineJob{JobManageEnviron}}}}, {"$or", []bson.D{ {{"principals", bson.D{{"$size", 0}}}}, {{"principals", bson.D{{"$exists", false}}}}, }}, {"hasvote", bson.D{{"$ne", true}}}, } // 3 attempts: one with original data, one with refreshed data, and a final // one intended to determine the cause of failure of the preceding attempt. for i := 0; i < 3; i++ { // If the transaction was aborted, grab a fresh copy of the machine data. // We don't write to original, because the expectation is that state- // changing methods only set the requested change on the receiver; a case // could perhaps be made that this is not a helpful convention in the // context of the new state API, but we maintain consistency in the // face of uncertainty. if i != 0 { if m, err = m.st.Machine(m.doc.Id); errors.IsNotFound(err) { return nil } else if err != nil { return err } } // Check that the life change is sane, and collect the assertions // necessary to determine that it remains so. switch life { case Dying: if m.doc.Life != Alive { return nil } op.Assert = append(advanceAsserts, isAliveDoc...) case Dead: if m.doc.Life == Dead { return nil } op.Assert = append(advanceAsserts, notDeadDoc...) default: panic(fmt.Errorf("cannot advance lifecycle to %v", life)) } // Check that the machine does not have any responsibilities that // prevent a lifecycle change. if hasJob(m.doc.Jobs, JobManageEnviron) { // (NOTE: When we enable multiple JobManageEnviron machines, // this restriction will be lifted, but we will assert that the // machine is not voting) return fmt.Errorf("machine %s is required by the environment", m.doc.Id) } if m.doc.HasVote { return fmt.Errorf("machine %s is a voting replica set member", m.doc.Id) } if len(m.doc.Principals) != 0 { return &HasAssignedUnitsError{ MachineId: m.doc.Id, UnitNames: m.doc.Principals, } } // Run the transaction... if err := m.st.runTransaction([]txn.Op{op}); err != txn.ErrAborted { return err } // ...and retry on abort. } // In very rare circumstances, the final iteration above will have determined // no cause of failure, and attempted a final transaction: if this also failed, // we can be sure that the machine document is changing very fast, in a somewhat // surprising fashion, and that it is sensible to back off for now. return fmt.Errorf("machine %s cannot advance lifecycle: %v", m, ErrExcessiveContention) }
// removeUnitOps returns the operations necessary to remove the supplied unit, // assuming the supplied asserts apply to the unit document. func (s *Service) removeUnitOps(u *Unit, asserts D) ([]txn.Op, error) { var ops []txn.Op if s.doc.Subordinate { ops = append(ops, txn.Op{ C: s.st.units.Name, Id: u.doc.Principal, Assert: txn.DocExists, Update: D{{"$pull", D{{"subordinates", u.doc.Name}}}}, }) } else if u.doc.MachineId != "" { ops = append(ops, txn.Op{ C: s.st.machines.Name, Id: u.doc.MachineId, Assert: txn.DocExists, Update: D{{"$pull", D{{"principals", u.doc.Name}}}}, }) } observedFieldsMatch := D{ {"charmurl", u.doc.CharmURL}, {"machineid", u.doc.MachineId}, } ops = append(ops, txn.Op{ C: s.st.units.Name, Id: u.doc.Name, Assert: append(observedFieldsMatch, asserts...), Remove: true, }, removeConstraintsOp(s.st, u.globalKey()), removeStatusOp(s.st, u.globalKey()), annotationRemoveOp(s.st, u.globalKey()), ) if u.doc.CharmURL != nil { decOps, err := settingsDecRefOps(s.st, s.doc.Name, u.doc.CharmURL) if errors.IsNotFoundError(err) { return nil, errRefresh } else if err != nil { return nil, err } ops = append(ops, decOps...) } if s.doc.Life == Dying && s.doc.RelationCount == 0 && s.doc.UnitCount == 1 { hasLastRef := D{{"life", Dying}, {"relationcount", 0}, {"unitcount", 1}} return append(ops, s.removeOps(hasLastRef)...), nil } svcOp := txn.Op{ C: s.st.services.Name, Id: s.doc.Name, Update: D{{"$inc", D{{"unitcount", -1}}}}, } if s.doc.Life == Alive { svcOp.Assert = D{{"life", Alive}, {"unitcount", D{{"$gt", 0}}}} } else { svcOp.Assert = D{ {"life", Dying}, {"$or", []D{ {{"unitcount", D{{"$gt", 1}}}}, {{"relationcount", D{{"$gt", 0}}}}, }}, } } return append(ops, svcOp), nil }