// NewPaths returns the set of filesystem paths that the supplied unit should // use, given the supplied root juju data directory path. func NewPaths(dataDir string, unitTag names.UnitTag) Paths { join := filepath.Join baseDir := join(dataDir, "agents", unitTag.String()) stateDir := join(baseDir, "state") socket := func(name string, abstract bool) string { if version.Current.OS == version.Windows { return fmt.Sprintf(`\\.\pipe\%s-%s`, unitTag, name) } path := join(baseDir, name+".socket") if abstract { path = "@" + path } return path } toolsDir := tools.ToolsDir(dataDir, unitTag.String()) return Paths{ ToolsDir: filepath.FromSlash(toolsDir), Runtime: RuntimePaths{ JujuRunSocket: socket("run", false), JujucServerSocket: socket("agent", true), }, State: StatePaths{ CharmDir: join(baseDir, "charm"), OperationsFile: join(stateDir, "uniter"), RelationsDir: join(stateDir, "relations"), BundlesDir: join(stateDir, "bundles"), DeployerDir: join(stateDir, "deployer"), StorageDir: join(stateDir, "storage"), MetricsSpoolDir: join(stateDir, "spool", "metrics"), }, } }
// FormatDetailResource converts the arguments into a FormattedServiceResource. func FormatDetailResource(tag names.UnitTag, svc, unit resource.Resource, progress int64) (FormattedDetailResource, error) { // note that the unit resource can be a zero value here, to indicate that // the unit has not downloaded that resource yet. unitNum, err := unitNum(tag) if err != nil { return FormattedDetailResource{}, errors.Trace(err) } progressStr := "" fUnit := FormatSvcResource(unit) expected := FormatSvcResource(svc) revProgress := expected.combinedRevision if progress >= 0 { progressStr = "100%" if expected.Size > 0 { progressStr = fmt.Sprintf("%.f%%", float64(progress)*100.0/float64(expected.Size)) } if fUnit.combinedRevision != expected.combinedRevision { revProgress = fmt.Sprintf("%s (fetching: %s)", expected.combinedRevision, progressStr) } } return FormattedDetailResource{ UnitID: tag.Id(), unitNumber: unitNum, Unit: fUnit, Expected: expected, Progress: progress, progress: progressStr, revProgress: revProgress, }, nil }
// WatchRelationUnits returns a watcher that notifies of changes to the // counterpart units in the relation for the given unit. func (st *State) WatchRelationUnits( relationTag names.RelationTag, unitTag names.UnitTag, ) (watcher.RelationUnitsWatcher, error) { var results params.RelationUnitsWatchResults args := params.RelationUnits{ RelationUnits: []params.RelationUnit{{ Relation: relationTag.String(), Unit: unitTag.String(), }}, } err := st.facade.FacadeCall("WatchRelationUnits", args, &results) if err != nil { return nil, err } if len(results.Results) != 1 { return nil, fmt.Errorf("expected 1 result, got %d", len(results.Results)) } result := results.Results[0] if result.Error != nil { return nil, result.Error } w := apiwatcher.NewRelationUnitsWatcher(st.facade.RawAPICaller(), result) return w, nil }
// UnitStorageAttachments returns the StorageAttachments for the specified unit. func (st *State) UnitStorageAttachments(unit names.UnitTag) ([]StorageAttachment, error) { query := bson.D{{"unitid", unit.Id()}} attachments, err := st.storageAttachments(query) if err != nil { return nil, errors.Annotatef(err, "cannot get storage attachments for unit %s", unit.Id()) } return attachments, nil }
func tryClosePorts( protocol string, fromPort, toPort int, unitTag names.UnitTag, machinePorts map[network.PortRange]params.RelationUnit, pendingPorts map[PortRange]PortRangeInfo, ) error { // TODO(dimitern) Once port ranges are linked to relations in // addition to networks, refactor this functions and test it // better to ensure it handles relations properly. relationId := -1 // Validate the given range. newRange, err := validatePortRange(protocol, fromPort, toPort) if err != nil { return err } rangeKey := PortRange{ Ports: newRange, RelationId: relationId, } rangeInfo, isKnown := pendingPorts[rangeKey] if isKnown { if rangeInfo.ShouldOpen { // If the same range is already pending to be opened, just // remove it from pending. delete(pendingPorts, rangeKey) } return nil } // Ensure the range we're trying to close is opened on the // machine. relUnit, found := machinePorts[newRange] if !found { // Trying to close a range which is not open is ignored. return nil } else if relUnit.Unit != unitTag.String() { relUnitTag, err := names.ParseUnitTag(relUnit.Unit) if err != nil { return errors.Annotatef( err, "machine ports %v contain invalid unit tag", newRange, ) } return errors.Errorf( "cannot close %v (opened by %q) from %q", newRange, relUnitTag.Id(), unitTag.Id(), ) } rangeInfo = pendingPorts[rangeKey] rangeInfo.ShouldOpen = false pendingPorts[rangeKey] = rangeInfo return nil }
func destroyStorageAttachmentOps(storage names.StorageTag, unit names.UnitTag) []txn.Op { ops := []txn.Op{{ C: storageAttachmentsC, Id: storageAttachmentId(unit.Id(), storage.Id()), Assert: isAliveDoc, Update: bson.D{{"$set", bson.D{{"life", Dying}}}}, }} return ops }
// createStorageAttachmentOps returns a txn.Op for creating a storage attachment. // The caller is responsible for updating the attachmentcount field of the storage // instance. func createStorageAttachmentOp(storage names.StorageTag, unit names.UnitTag) txn.Op { return txn.Op{ C: storageAttachmentsC, Id: storageAttachmentId(unit.Id(), storage.Id()), Assert: txn.DocMissing, Insert: &storageAttachmentDoc{ Unit: unit.Id(), StorageInstance: storage.Id(), }, } }
// UnitAssignedMachine returns the tag of the machine that the unit // is assigned to, or an error if the unit cannot be obtained or is // not assigned to a machine. func (s stateShim) UnitAssignedMachine(tag names.UnitTag) (names.MachineTag, error) { unit, err := s.Unit(tag.Id()) if err != nil { return names.MachineTag{}, errors.Trace(err) } mid, err := unit.AssignedMachineId() if err != nil { return names.MachineTag{}, errors.Trace(err) } return names.NewMachineTag(mid), nil }
func (m *MeterStatusAPI) watchOneUnitMeterStatus(tag names.UnitTag) (string, error) { unit, err := m.state.Unit(tag.Id()) if err != nil { return "", err } watch := unit.WatchMeterStatus() if _, ok := <-watch.Changes(); ok { return m.resources.Register(watch), nil } return "", watcher.EnsureErr(watch) }
func (s *StorageStateSuiteBase) obliterateUnit(c *gc.C, tag names.UnitTag) { u, err := s.State.Unit(tag.Id()) c.Assert(err, jc.ErrorIsNil) err = u.Destroy() c.Assert(err, jc.ErrorIsNil) s.obliterateUnitStorage(c, tag) err = u.EnsureDead() c.Assert(err, jc.ErrorIsNil) err = u.Remove() c.Assert(err, jc.ErrorIsNil) }
func (st *State) storageAttachment(storage names.StorageTag, unit names.UnitTag) (*storageAttachment, error) { coll, closer := st.getCollection(storageAttachmentsC) defer closer() var s storageAttachment err := coll.FindId(storageAttachmentId(unit.Id(), storage.Id())).One(&s.doc) if err == mgo.ErrNotFound { return nil, errors.NotFoundf("storage attachment %s:%s", storage.Id(), unit.Id()) } else if err != nil { return nil, errors.Annotatef(err, "cannot get storage attachment %s:%s", storage.Id(), unit.Id()) } return &s, nil }
// UnitStorageConstraints returns storage constraints for this unit, // or an error if the unit or its constraints cannot be obtained. func (s storageStateShim) UnitStorageConstraints(u names.UnitTag) (map[string]state.StorageConstraints, error) { unit, err := s.Unit(u.Id()) if err != nil { return nil, errors.Trace(err) } cons, err := unit.StorageConstraints() if err != nil { return nil, errors.Trace(err) } return cons, nil }
// FormatDetailResource converts the arguments into a FormattedServiceResource. func FormatDetailResource(tag names.UnitTag, svc, unit resource.Resource) (FormattedDetailResource, error) { // note that the unit resource can be a zero value here, to indicate that // the unit has not downloaded that resource yet. unitNum, err := unitNum(tag) if err != nil { return FormattedDetailResource{}, errors.Trace(err) } return FormattedDetailResource{ UnitID: tag.Id(), unitNumber: unitNum, Unit: FormatSvcResource(unit), Expected: FormatSvcResource(svc), }, nil }
func (st *mockState) StorageAttachment( storageTag names.StorageTag, unitTag names.UnitTag, ) (params.StorageAttachment, error) { if unitTag != st.unit.tag { return params.StorageAttachment{}, ¶ms.Error{Code: params.CodeNotFound} } attachment, ok := st.storageAttachment[params.StorageAttachmentId{ UnitTag: unitTag.String(), StorageTag: storageTag.String(), }] if !ok { return params.StorageAttachment{}, ¶ms.Error{Code: params.CodeNotFound} } if attachment.Kind == params.StorageKindUnknown { return params.StorageAttachment{}, ¶ms.Error{Code: params.CodeNotProvisioned} } return attachment, nil }
// WatchUnitStorageAttachments starts a watcher for changes to storage // attachments related to the unit. The watcher will return the // IDs of the corresponding storage instances. func (sa *StorageAccessor) WatchUnitStorageAttachments(unitTag names.UnitTag) (watcher.StringsWatcher, error) { var results params.StringsWatchResults args := params.Entities{ Entities: []params.Entity{{Tag: unitTag.String()}}, } err := sa.facade.FacadeCall("WatchUnitStorageAttachments", args, &results) if err != nil { return nil, err } if len(results.Results) != 1 { return nil, errors.Errorf("expected 1 result, got %d", len(results.Results)) } result := results.Results[0] if result.Error != nil { return nil, result.Error } w := apiwatcher.NewStringsWatcher(sa.facade.RawAPICaller(), result) return w, nil }
// AddMetric adds a new batch of metrics to the database. // A UUID for the metric will be generated and the new MetricBatch will be returned func (st *State) addMetrics(unitTag names.UnitTag, charmUrl *charm.URL, metrics []*Metric) (*MetricBatch, error) { if len(metrics) == 0 { return nil, errors.New("cannot add a batch of 0 metrics") } uuid, err := utils.NewUUID() if err != nil { return nil, err } metricDocs := make([]metricDoc, len(metrics)) for i, m := range metrics { metricDocs[i] = metricDoc{ Key: m.Key(), Value: m.Value(), Time: m.Time(), Credentials: m.Credentials(), } } metric := &MetricBatch{ st: st, doc: metricBatchDoc{ UUID: uuid.String(), Unit: unitTag.Id(), CharmUrl: charmUrl.String(), Sent: false, Metrics: metricDocs, }} buildTxn := func(attempt int) ([]txn.Op, error) { if attempt > 0 { notDead, err := isNotDead(st.db, unitsC, unitTag.Id()) if err != nil || !notDead { return nil, errors.NotFoundf(unitTag.Id()) } } ops := []txn.Op{{ C: unitsC, Id: unitTag.Id(), Assert: notDeadDoc, }, { C: metricsC, Id: metric.UUID(), Assert: txn.DocMissing, Insert: &metric.doc, }} return ops, nil } err = st.run(buildTxn) if err != nil { return nil, errors.Trace(err) } return metric, nil }
// AddStorage adds storage instances to given unit as specified. // Missing storage constraints are populated // based on environment defaults. Storage store name is used to retrieve // existing storage instances for this store. // Combination of existing storage instances and // anticipated additional storage instances is validated against storage // store as specified in the charm. func (st *State) AddStorageForUnit( tag names.UnitTag, name string, cons StorageConstraints, ) error { u, err := st.Unit(tag.Id()) if err != nil { return errors.Trace(err) } s, err := u.Service() if err != nil { return errors.Annotatef(err, "getting service for unit %v", u.Tag().Id()) } ch, _, err := s.Charm() if err != nil { return errors.Annotatef(err, "getting charm for unit %q", u.Tag().Id()) } return st.addStorageForUnit(ch, u, name, cons) }
// NewTrackerWorker returns a TrackerWorker that attempts to claim and retain // service leadership for the supplied unit. It will claim leadership for twice // the supplied duration, and once it's leader it will renew leadership every // time the duration elapses. // Thus, successful leadership claims on the resulting Tracker will guarantee // leadership for the duration supplied here without generating additional calls // to the supplied manager (which may very well be on the other side of a // network connection). func NewTrackerWorker(tag names.UnitTag, leadership leadership.LeadershipManager, duration time.Duration) TrackerWorker { unitName := tag.Id() serviceName, _ := names.UnitService(unitName) t := &tracker{ unitName: unitName, serviceName: serviceName, leadership: leadership, duration: duration, claimTickets: make(chan chan bool), waitLeaderTickets: make(chan chan bool), waitMinionTickets: make(chan chan bool), } go func() { defer t.tomb.Done() defer func() { for _, ticketCh := range t.waitingLeader { close(ticketCh) } for _, ticketCh := range t.waitingMinion { close(ticketCh) } }() err := t.loop() // TODO: jam 2015-04-02 is this the most elegant way to make // sure we shutdown cleanly? Essentially the lowest level sees // that we are dying, and propagates an ErrDying up to us so // that we shut down, which we then are passing back into // Tomb.Kill(). // Tomb.Kill() special cases the exact object ErrDying, and has // no idea about errors.Cause and the general errors.Trace // mechanisms that we use. // So we explicitly unwrap before calling tomb.Kill() else // tomb.Stop() thinks that we have a genuine error. switch cause := errors.Cause(err); cause { case tomb.ErrDying: err = cause } t.tomb.Kill(err) }() return t }
// DestroyUnitStorageAttachments ensures that the specified unit's storage // attachments will be removed at some point in the future. func (sa *StorageAccessor) DestroyUnitStorageAttachments(unitTag names.UnitTag) error { if sa.facade.BestAPIVersion() < 2 { return errors.NotImplementedf("DestroyUnitStorageAttachments() (need V2+)") } args := params.Entities{ Entities: []params.Entity{{Tag: unitTag.String()}}, } var results params.ErrorResults err := sa.facade.FacadeCall("DestroyUnitStorageAttachments", args, &results) if err != nil { return errors.Trace(err) } if len(results.Results) != 1 { panic(errors.Errorf("expected 1 result, got %d", len(results.Results))) } result := results.Results[0] if result.Error != nil { return result.Error } return nil }
// RemoveStorageAttachment removes the storage attachment with the // specified unit and storage tags from state. This method is only // expected to succeed if the storage attachment is Dead. func (sa *StorageAccessor) RemoveStorageAttachment(storageTag names.StorageTag, unitTag names.UnitTag) error { var results params.ErrorResults args := params.StorageAttachmentIds{ Ids: []params.StorageAttachmentId{{ StorageTag: storageTag.String(), UnitTag: unitTag.String(), }}, } err := sa.facade.FacadeCall("RemoveStorageAttachments", args, &results) if err != nil { return err } if len(results.Results) != 1 { return errors.Errorf("expected 1 result, got %d", len(results.Results)) } result := results.Results[0] if result.Error != nil { return result.Error } return nil }
// WatchStorageAttachments starts a watcher for changes to the info // of the storage attachment with the specified unit and storage tags. func (sa *StorageAccessor) WatchStorageAttachment(storageTag names.StorageTag, unitTag names.UnitTag) (watcher.NotifyWatcher, error) { var results params.NotifyWatchResults args := params.StorageAttachmentIds{ Ids: []params.StorageAttachmentId{{ StorageTag: storageTag.String(), UnitTag: unitTag.String(), }}, } err := sa.facade.FacadeCall("WatchStorageAttachments", args, &results) if err != nil { return nil, err } if len(results.Results) != 1 { return nil, errors.Errorf("expected 1 result, got %d", len(results.Results)) } result := results.Results[0] if result.Error != nil { return nil, result.Error } w := apiwatcher.NewNotifyWatcher(sa.facade.RawAPICaller(), result) return w, nil }
// DestroyStorageAttachment ensures that the existing storage attachments of // the specified unit are removed at some point. func (st *State) DestroyUnitStorageAttachments(unit names.UnitTag) (err error) { defer errors.DeferredAnnotatef(&err, "cannot destroy unit %s storage attachments", unit.Id()) buildTxn := func(attempt int) ([]txn.Op, error) { attachments, err := st.UnitStorageAttachments(unit) if err != nil { return nil, errors.Trace(err) } ops := make([]txn.Op, 0, len(attachments)) for _, attachment := range attachments { if attachment.Life() != Alive { continue } ops = append(ops, destroyStorageAttachmentOps( attachment.StorageInstance(), unit, )...) } if len(ops) == 0 { return nil, jujutxn.ErrNoOperations } return ops, nil } return st.run(buildTxn) }
// StorageAttachment returns the storage attachment with the specified // unit and storage tags. func (sa *StorageAccessor) StorageAttachment(storageTag names.StorageTag, unitTag names.UnitTag) (params.StorageAttachment, error) { if sa.facade.BestAPIVersion() < 2 { return params.StorageAttachment{}, errors.NotImplementedf("StorageAttachment() (need V2+)") } args := params.StorageAttachmentIds{ Ids: []params.StorageAttachmentId{{ StorageTag: storageTag.String(), UnitTag: unitTag.String(), }}, } var results params.StorageAttachmentResults err := sa.facade.FacadeCall("StorageAttachments", args, &results) if err != nil { return params.StorageAttachment{}, errors.Trace(err) } if len(results.Results) != 1 { panic(errors.Errorf("expected 1 result, got %d", len(results.Results))) } result := results.Results[0] if result.Error != nil { return params.StorageAttachment{}, result.Error } return result.Result, nil }
func unitNum(unit names.UnitTag) (int, error) { vals := strings.SplitN(unit.Id(), "/", 2) if len(vals) != 2 { return 0, errors.Errorf("%q is not a valid unit ID", unit.Id()) } num, err := strconv.Atoi(vals[1]) if err != nil { return 0, errors.Annotatef(err, "%q is not a valid unit ID", unit.Id()) } return num, nil }
func (u *UniterAPI) getUnit(tag names.UnitTag) (*state.Unit, error) { return u.st.Unit(tag.Id()) }
func tryOpenPorts( protocol string, fromPort, toPort int, unitTag names.UnitTag, machinePorts map[network.PortRange]params.RelationUnit, pendingPorts map[PortRange]PortRangeInfo, ) error { // TODO(dimitern) Once port ranges are linked to relations in // addition to networks, refactor this functions and test it // better to ensure it handles relations properly. relationId := -1 //Validate the given range. newRange, err := validatePortRange(protocol, fromPort, toPort) if err != nil { return err } rangeKey := PortRange{ Ports: newRange, RelationId: relationId, } rangeInfo, isKnown := pendingPorts[rangeKey] if isKnown { if !rangeInfo.ShouldOpen { // If the same range is already pending to be closed, just // mark is pending to be opened. rangeInfo.ShouldOpen = true pendingPorts[rangeKey] = rangeInfo } return nil } // Ensure there are no conflicts with existing ports on the // machine. for portRange, relUnit := range machinePorts { relUnitTag, err := names.ParseUnitTag(relUnit.Unit) if err != nil { return errors.Annotatef( err, "machine ports %v contain invalid unit tag", portRange, ) } if newRange.ConflictsWith(portRange) { if portRange == newRange && relUnitTag == unitTag { // The same unit trying to open the same range is just // ignored. return nil } return errors.Errorf( "cannot open %v (unit %q): conflicts with existing %v (unit %q)", newRange, unitTag.Id(), portRange, relUnitTag.Id(), ) } } // Ensure other pending port ranges do not conflict with this one. for rangeKey, rangeInfo := range pendingPorts { if newRange.ConflictsWith(rangeKey.Ports) && rangeInfo.ShouldOpen { return errors.Errorf( "cannot open %v (unit %q): conflicts with %v requested earlier", newRange, unitTag.Id(), rangeKey.Ports, ) } } rangeInfo = pendingPorts[rangeKey] rangeInfo.ShouldOpen = true pendingPorts[rangeKey] = rangeInfo return nil }
// Remove removes the storage attachment from state, and may remove its storage // instance as well, if the storage instance is Dying and no other references to // it exist. It will fail if the storage attachment is not Dead. func (st *State) RemoveStorageAttachment(storage names.StorageTag, unit names.UnitTag) (err error) { defer errors.DeferredAnnotatef(&err, "cannot remove storage attachment %s:%s", storage.Id(), unit.Id()) buildTxn := func(attempt int) ([]txn.Op, error) { s, err := st.storageAttachment(storage, unit) if errors.IsNotFound(err) { return nil, jujutxn.ErrNoOperations } else if err != nil { return nil, errors.Trace(err) } inst, err := st.storageInstance(storage) if errors.IsNotFound(err) { // This implies that the attachment was removed // after the call to st.storageAttachment. return nil, jujutxn.ErrNoOperations } else if err != nil { return nil, errors.Trace(err) } ops, err := removeStorageAttachmentOps(st, s, inst) if err != nil { return nil, errors.Trace(err) } return ops, nil } return st.run(buildTxn) }
// DestroyStorageAttachment ensures that the storage attachment will be // removed at some point. func (st *State) DestroyStorageAttachment(storage names.StorageTag, unit names.UnitTag) (err error) { defer errors.DeferredAnnotatef(&err, "cannot destroy storage attachment %s:%s", storage.Id(), unit.Id()) buildTxn := func(attempt int) ([]txn.Op, error) { s, err := st.storageAttachment(storage, unit) if errors.IsNotFound(err) { return nil, jujutxn.ErrNoOperations } else if err != nil { return nil, errors.Trace(err) } if s.doc.Life == Dying { return nil, jujutxn.ErrNoOperations } return destroyStorageAttachmentOps(storage, unit), nil } return st.run(buildTxn) }