Ejemplo n.º 1
0
// FormatDetailResource converts the arguments into a FormattedServiceResource.
func FormatDetailResource(tag names.UnitTag, svc, unit resource.Resource, progress int64) (FormattedDetailResource, error) {
	// note that the unit resource can be a zero value here, to indicate that
	// the unit has not downloaded that resource yet.

	unitNum, err := unitNum(tag)
	if err != nil {
		return FormattedDetailResource{}, errors.Trace(err)
	}
	progressStr := ""
	fUnit := FormatSvcResource(unit)
	expected := FormatSvcResource(svc)
	revProgress := expected.combinedRevision
	if progress >= 0 {
		progressStr = "100%"
		if expected.Size > 0 {
			progressStr = fmt.Sprintf("%.f%%", float64(progress)*100.0/float64(expected.Size))
		}
		if fUnit.combinedRevision != expected.combinedRevision {
			revProgress = fmt.Sprintf("%s (fetching: %s)", expected.combinedRevision, progressStr)
		}
	}
	return FormattedDetailResource{
		UnitID:      tag.Id(),
		unitNumber:  unitNum,
		Unit:        fUnit,
		Expected:    expected,
		Progress:    progress,
		progress:    progressStr,
		revProgress: revProgress,
	}, nil
}
Ejemplo n.º 2
0
// UnitStorageAttachments returns the StorageAttachments for the specified unit.
func (st *State) UnitStorageAttachments(unit names.UnitTag) ([]StorageAttachment, error) {
	query := bson.D{{"unitid", unit.Id()}}
	attachments, err := st.storageAttachments(query)
	if err != nil {
		return nil, errors.Annotatef(err, "cannot get storage attachments for unit %s", unit.Id())
	}
	return attachments, nil
}
Ejemplo n.º 3
0
func tryClosePorts(
	protocol string,
	fromPort, toPort int,
	unitTag names.UnitTag,
	machinePorts map[network.PortRange]params.RelationUnit,
	pendingPorts map[PortRange]PortRangeInfo,
) error {
	// TODO(dimitern) Once port ranges are linked to relations in
	// addition to networks, refactor this functions and test it
	// better to ensure it handles relations properly.
	relationId := -1

	// Validate the given range.
	newRange, err := validatePortRange(protocol, fromPort, toPort)
	if err != nil {
		return err
	}
	rangeKey := PortRange{
		Ports:      newRange,
		RelationId: relationId,
	}

	rangeInfo, isKnown := pendingPorts[rangeKey]
	if isKnown {
		if rangeInfo.ShouldOpen {
			// If the same range is already pending to be opened, just
			// remove it from pending.
			delete(pendingPorts, rangeKey)
		}
		return nil
	}

	// Ensure the range we're trying to close is opened on the
	// machine.
	relUnit, found := machinePorts[newRange]
	if !found {
		// Trying to close a range which is not open is ignored.
		return nil
	} else if relUnit.Unit != unitTag.String() {
		relUnitTag, err := names.ParseUnitTag(relUnit.Unit)
		if err != nil {
			return errors.Annotatef(
				err,
				"machine ports %v contain invalid unit tag",
				newRange,
			)
		}
		return errors.Errorf(
			"cannot close %v (opened by %q) from %q",
			newRange, relUnitTag.Id(), unitTag.Id(),
		)
	}

	rangeInfo = pendingPorts[rangeKey]
	rangeInfo.ShouldOpen = false
	pendingPorts[rangeKey] = rangeInfo
	return nil
}
Ejemplo n.º 4
0
func destroyStorageAttachmentOps(storage names.StorageTag, unit names.UnitTag) []txn.Op {
	ops := []txn.Op{{
		C:      storageAttachmentsC,
		Id:     storageAttachmentId(unit.Id(), storage.Id()),
		Assert: isAliveDoc,
		Update: bson.D{{"$set", bson.D{{"life", Dying}}}},
	}}
	return ops
}
Ejemplo n.º 5
0
// UnitAssignedMachine returns the tag of the machine that the unit
// is assigned to, or an error if the unit cannot be obtained or is
// not assigned to a machine.
func (s stateShim) UnitAssignedMachine(tag names.UnitTag) (names.MachineTag, error) {
	unit, err := s.Unit(tag.Id())
	if err != nil {
		return names.MachineTag{}, errors.Trace(err)
	}
	mid, err := unit.AssignedMachineId()
	if err != nil {
		return names.MachineTag{}, errors.Trace(err)
	}
	return names.NewMachineTag(mid), nil
}
Ejemplo n.º 6
0
func (m *MeterStatusAPI) watchOneUnitMeterStatus(tag names.UnitTag) (string, error) {
	unit, err := m.state.Unit(tag.Id())
	if err != nil {
		return "", err
	}
	watch := unit.WatchMeterStatus()
	if _, ok := <-watch.Changes(); ok {
		return m.resources.Register(watch), nil
	}
	return "", watcher.EnsureErr(watch)
}
Ejemplo n.º 7
0
func (s *StorageStateSuiteBase) obliterateUnit(c *gc.C, tag names.UnitTag) {
	u, err := s.State.Unit(tag.Id())
	c.Assert(err, jc.ErrorIsNil)
	err = u.Destroy()
	c.Assert(err, jc.ErrorIsNil)
	s.obliterateUnitStorage(c, tag)
	err = u.EnsureDead()
	c.Assert(err, jc.ErrorIsNil)
	err = u.Remove()
	c.Assert(err, jc.ErrorIsNil)
}
Ejemplo n.º 8
0
// createStorageAttachmentOps returns a txn.Op for creating a storage attachment.
// The caller is responsible for updating the attachmentcount field of the storage
// instance.
func createStorageAttachmentOp(storage names.StorageTag, unit names.UnitTag) txn.Op {
	return txn.Op{
		C:      storageAttachmentsC,
		Id:     storageAttachmentId(unit.Id(), storage.Id()),
		Assert: txn.DocMissing,
		Insert: &storageAttachmentDoc{
			Unit:            unit.Id(),
			StorageInstance: storage.Id(),
		},
	}
}
Ejemplo n.º 9
0
func (st *State) storageAttachment(storage names.StorageTag, unit names.UnitTag) (*storageAttachment, error) {
	coll, closer := st.getCollection(storageAttachmentsC)
	defer closer()
	var s storageAttachment
	err := coll.FindId(storageAttachmentId(unit.Id(), storage.Id())).One(&s.doc)
	if err == mgo.ErrNotFound {
		return nil, errors.NotFoundf("storage attachment %s:%s", storage.Id(), unit.Id())
	} else if err != nil {
		return nil, errors.Annotatef(err, "cannot get storage attachment %s:%s", storage.Id(), unit.Id())
	}
	return &s, nil
}
Ejemplo n.º 10
0
// UnitStorageConstraints returns storage constraints for this unit,
// or an error if the unit or its constraints cannot be obtained.
func (s storageStateShim) UnitStorageConstraints(u names.UnitTag) (map[string]state.StorageConstraints, error) {
	unit, err := s.Unit(u.Id())
	if err != nil {
		return nil, errors.Trace(err)
	}

	cons, err := unit.StorageConstraints()
	if err != nil {
		return nil, errors.Trace(err)
	}
	return cons, nil
}
Ejemplo n.º 11
0
// FormatDetailResource converts the arguments into a FormattedServiceResource.
func FormatDetailResource(tag names.UnitTag, svc, unit resource.Resource) (FormattedDetailResource, error) {
	// note that the unit resource can be a zero value here, to indicate that
	// the unit has not downloaded that resource yet.

	unitNum, err := unitNum(tag)
	if err != nil {
		return FormattedDetailResource{}, errors.Trace(err)
	}
	return FormattedDetailResource{
		UnitID:     tag.Id(),
		unitNumber: unitNum,
		Unit:       FormatSvcResource(unit),
		Expected:   FormatSvcResource(svc),
	}, nil
}
Ejemplo n.º 12
0
// AddMetric adds a new batch of metrics to the database.
// A UUID for the metric will be generated and the new MetricBatch will be returned
func (st *State) addMetrics(unitTag names.UnitTag, charmUrl *charm.URL, metrics []*Metric) (*MetricBatch, error) {
	if len(metrics) == 0 {
		return nil, errors.New("cannot add a batch of 0 metrics")
	}
	uuid, err := utils.NewUUID()
	if err != nil {
		return nil, err
	}

	metricDocs := make([]metricDoc, len(metrics))
	for i, m := range metrics {
		metricDocs[i] = metricDoc{
			Key:         m.Key(),
			Value:       m.Value(),
			Time:        m.Time(),
			Credentials: m.Credentials(),
		}
	}
	metric := &MetricBatch{
		st: st,
		doc: metricBatchDoc{
			UUID:     uuid.String(),
			Unit:     unitTag.Id(),
			CharmUrl: charmUrl.String(),
			Sent:     false,
			Metrics:  metricDocs,
		}}
	buildTxn := func(attempt int) ([]txn.Op, error) {
		if attempt > 0 {
			notDead, err := isNotDead(st.db, unitsC, unitTag.Id())
			if err != nil || !notDead {
				return nil, errors.NotFoundf(unitTag.Id())
			}
		}
		ops := []txn.Op{{
			C:      unitsC,
			Id:     unitTag.Id(),
			Assert: notDeadDoc,
		}, {
			C:      metricsC,
			Id:     metric.UUID(),
			Assert: txn.DocMissing,
			Insert: &metric.doc,
		}}
		return ops, nil
	}
	err = st.run(buildTxn)
	if err != nil {
		return nil, errors.Trace(err)
	}

	return metric, nil
}
Ejemplo n.º 13
0
// AddStorage adds storage instances to given unit as specified.
// Missing storage constraints are populated
// based on environment defaults. Storage store name is used to retrieve
// existing storage instances for this store.
// Combination of existing storage instances and
// anticipated additional storage instances is validated against storage
// store as specified in the charm.
func (st *State) AddStorageForUnit(
	tag names.UnitTag, name string, cons StorageConstraints,
) error {
	u, err := st.Unit(tag.Id())
	if err != nil {
		return errors.Trace(err)
	}

	s, err := u.Service()
	if err != nil {
		return errors.Annotatef(err, "getting service for unit %v", u.Tag().Id())
	}
	ch, _, err := s.Charm()
	if err != nil {
		return errors.Annotatef(err, "getting charm for unit %q", u.Tag().Id())
	}

	return st.addStorageForUnit(ch, u, name, cons)
}
Ejemplo n.º 14
0
// NewTrackerWorker returns a TrackerWorker that attempts to claim and retain
// service leadership for the supplied unit. It will claim leadership for twice
// the supplied duration, and once it's leader it will renew leadership every
// time the duration elapses.
// Thus, successful leadership claims on the resulting Tracker will guarantee
// leadership for the duration supplied here without generating additional calls
// to the supplied manager (which may very well be on the other side of a
// network connection).
func NewTrackerWorker(tag names.UnitTag, leadership leadership.LeadershipManager, duration time.Duration) TrackerWorker {
	unitName := tag.Id()
	serviceName, _ := names.UnitService(unitName)
	t := &tracker{
		unitName:          unitName,
		serviceName:       serviceName,
		leadership:        leadership,
		duration:          duration,
		claimTickets:      make(chan chan bool),
		waitLeaderTickets: make(chan chan bool),
		waitMinionTickets: make(chan chan bool),
	}
	go func() {
		defer t.tomb.Done()
		defer func() {
			for _, ticketCh := range t.waitingLeader {
				close(ticketCh)
			}
			for _, ticketCh := range t.waitingMinion {
				close(ticketCh)
			}
		}()
		err := t.loop()
		// TODO: jam 2015-04-02 is this the most elegant way to make
		// sure we shutdown cleanly? Essentially the lowest level sees
		// that we are dying, and propagates an ErrDying up to us so
		// that we shut down, which we then are passing back into
		// Tomb.Kill().
		// Tomb.Kill() special cases the exact object ErrDying, and has
		// no idea about errors.Cause and the general errors.Trace
		// mechanisms that we use.
		// So we explicitly unwrap before calling tomb.Kill() else
		// tomb.Stop() thinks that we have a genuine error.
		switch cause := errors.Cause(err); cause {
		case tomb.ErrDying:
			err = cause
		}
		t.tomb.Kill(err)
	}()
	return t
}
Ejemplo n.º 15
0
// DestroyStorageAttachment ensures that the existing storage attachments of
// the specified unit are removed at some point.
func (st *State) DestroyUnitStorageAttachments(unit names.UnitTag) (err error) {
	defer errors.DeferredAnnotatef(&err, "cannot destroy unit %s storage attachments", unit.Id())
	buildTxn := func(attempt int) ([]txn.Op, error) {
		attachments, err := st.UnitStorageAttachments(unit)
		if err != nil {
			return nil, errors.Trace(err)
		}
		ops := make([]txn.Op, 0, len(attachments))
		for _, attachment := range attachments {
			if attachment.Life() != Alive {
				continue
			}
			ops = append(ops, destroyStorageAttachmentOps(
				attachment.StorageInstance(), unit,
			)...)
		}
		if len(ops) == 0 {
			return nil, jujutxn.ErrNoOperations
		}
		return ops, nil
	}
	return st.run(buildTxn)
}
Ejemplo n.º 16
0
func unitNum(unit names.UnitTag) (int, error) {
	vals := strings.SplitN(unit.Id(), "/", 2)
	if len(vals) != 2 {
		return 0, errors.Errorf("%q is not a valid unit ID", unit.Id())
	}
	num, err := strconv.Atoi(vals[1])
	if err != nil {
		return 0, errors.Annotatef(err, "%q is not a valid unit ID", unit.Id())
	}
	return num, nil
}
Ejemplo n.º 17
0
func (u *UniterAPI) getUnit(tag names.UnitTag) (*state.Unit, error) {
	return u.st.Unit(tag.Id())
}
Ejemplo n.º 18
0
// Remove removes the storage attachment from state, and may remove its storage
// instance as well, if the storage instance is Dying and no other references to
// it exist. It will fail if the storage attachment is not Dead.
func (st *State) RemoveStorageAttachment(storage names.StorageTag, unit names.UnitTag) (err error) {
	defer errors.DeferredAnnotatef(&err, "cannot remove storage attachment %s:%s", storage.Id(), unit.Id())
	buildTxn := func(attempt int) ([]txn.Op, error) {
		s, err := st.storageAttachment(storage, unit)
		if errors.IsNotFound(err) {
			return nil, jujutxn.ErrNoOperations
		} else if err != nil {
			return nil, errors.Trace(err)
		}
		inst, err := st.storageInstance(storage)
		if errors.IsNotFound(err) {
			// This implies that the attachment was removed
			// after the call to st.storageAttachment.
			return nil, jujutxn.ErrNoOperations
		} else if err != nil {
			return nil, errors.Trace(err)
		}
		ops, err := removeStorageAttachmentOps(st, s, inst)
		if err != nil {
			return nil, errors.Trace(err)
		}
		return ops, nil
	}
	return st.run(buildTxn)
}
Ejemplo n.º 19
0
func tryOpenPorts(
	protocol string,
	fromPort, toPort int,
	unitTag names.UnitTag,
	machinePorts map[network.PortRange]params.RelationUnit,
	pendingPorts map[PortRange]PortRangeInfo,
) error {
	// TODO(dimitern) Once port ranges are linked to relations in
	// addition to networks, refactor this functions and test it
	// better to ensure it handles relations properly.
	relationId := -1

	//Validate the given range.
	newRange, err := validatePortRange(protocol, fromPort, toPort)
	if err != nil {
		return err
	}
	rangeKey := PortRange{
		Ports:      newRange,
		RelationId: relationId,
	}

	rangeInfo, isKnown := pendingPorts[rangeKey]
	if isKnown {
		if !rangeInfo.ShouldOpen {
			// If the same range is already pending to be closed, just
			// mark is pending to be opened.
			rangeInfo.ShouldOpen = true
			pendingPorts[rangeKey] = rangeInfo
		}
		return nil
	}

	// Ensure there are no conflicts with existing ports on the
	// machine.
	for portRange, relUnit := range machinePorts {
		relUnitTag, err := names.ParseUnitTag(relUnit.Unit)
		if err != nil {
			return errors.Annotatef(
				err,
				"machine ports %v contain invalid unit tag",
				portRange,
			)
		}
		if newRange.ConflictsWith(portRange) {
			if portRange == newRange && relUnitTag == unitTag {
				// The same unit trying to open the same range is just
				// ignored.
				return nil
			}
			return errors.Errorf(
				"cannot open %v (unit %q): conflicts with existing %v (unit %q)",
				newRange, unitTag.Id(), portRange, relUnitTag.Id(),
			)
		}
	}
	// Ensure other pending port ranges do not conflict with this one.
	for rangeKey, rangeInfo := range pendingPorts {
		if newRange.ConflictsWith(rangeKey.Ports) && rangeInfo.ShouldOpen {
			return errors.Errorf(
				"cannot open %v (unit %q): conflicts with %v requested earlier",
				newRange, unitTag.Id(), rangeKey.Ports,
			)
		}
	}

	rangeInfo = pendingPorts[rangeKey]
	rangeInfo.ShouldOpen = true
	pendingPorts[rangeKey] = rangeInfo
	return nil
}
Ejemplo n.º 20
0
// DestroyStorageAttachment ensures that the storage attachment will be
// removed at some point.
func (st *State) DestroyStorageAttachment(storage names.StorageTag, unit names.UnitTag) (err error) {
	defer errors.DeferredAnnotatef(&err, "cannot destroy storage attachment %s:%s", storage.Id(), unit.Id())
	buildTxn := func(attempt int) ([]txn.Op, error) {
		s, err := st.storageAttachment(storage, unit)
		if errors.IsNotFound(err) {
			return nil, jujutxn.ErrNoOperations
		} else if err != nil {
			return nil, errors.Trace(err)
		}
		if s.doc.Life == Dying {
			return nil, jujutxn.ErrNoOperations
		}
		return destroyStorageAttachmentOps(storage, unit), nil
	}
	return st.run(buildTxn)
}