Пример #1
0
// SetAPIHostPorts sets the addresses of the API server instances.
// Each server is represented by one element in the top level slice.
func (st *State) SetAPIHostPorts(netHostsPorts [][]network.HostPort) error {
	controllers, closer := st.getCollection(controllersC)
	defer closer()
	doc := apiHostPortsDoc{
		APIHostPorts: fromNetworkHostsPorts(netHostsPorts),
	}
	buildTxn := func(attempt int) ([]txn.Op, error) {
		var existingDoc apiHostPortsDoc
		err := controllers.Find(bson.D{{"_id", apiHostPortsKey}}).One(&existingDoc)
		if err != nil {
			return nil, err
		}
		op := txn.Op{
			C:  controllersC,
			Id: apiHostPortsKey,
			Assert: bson.D{{
				"txn-revno", existingDoc.TxnRevno,
			}},
		}
		hostPorts := networkHostsPorts(existingDoc.APIHostPorts)
		if !hostsPortsEqual(netHostsPorts, hostPorts) {
			op.Update = bson.D{{
				"$set", bson.D{{"apihostports", doc.APIHostPorts}},
			}}
		} else {
			return nil, statetxn.ErrNoOperations
		}
		return []txn.Op{op}, nil
	}
	if err := st.run(buildTxn); err != nil {
		return errors.Annotate(err, "cannot set API addresses")
	}
	logger.Debugf("setting API hostPorts: %v", netHostsPorts)
	return nil
}
Пример #2
0
// removeOps returns the operations necessary to remove the relation. If
// ignoreService is not empty, no operations affecting that service will be
// included; if departingUnit is not nil, this implies that the relation's
// services may be Dying and otherwise unreferenced, and may thus require
// removal themselves.
func (r *Relation) removeOps(ignoreService string, departingUnit *Unit) ([]txn.Op, error) {
	relOp := txn.Op{
		C:      relationsC,
		Id:     r.doc.Key,
		Remove: true,
	}
	if departingUnit != nil {
		relOp.Assert = bson.D{{"life", Dying}, {"unitcount", 1}}
	} else {
		relOp.Assert = bson.D{{"life", Alive}, {"unitcount", 0}}
	}
	ops := []txn.Op{relOp}
	for _, ep := range r.doc.Endpoints {
		if ep.ServiceName == ignoreService {
			continue
		}
		var asserts bson.D
		hasRelation := bson.D{{"relationcount", bson.D{{"$gt", 0}}}}
		if departingUnit == nil {
			// We're constructing a destroy operation, either of the relation
			// or one of its services, and can therefore be assured that both
			// services are Alive.
			asserts = append(hasRelation, isAliveDoc...)
		} else if ep.ServiceName == departingUnit.ServiceName() {
			// This service must have at least one unit -- the one that's
			// departing the relation -- so it cannot be ready for removal.
			cannotDieYet := bson.D{{"unitcount", bson.D{{"$gt", 0}}}}
			asserts = append(hasRelation, cannotDieYet...)
		} else {
			// This service may require immediate removal.
			services, closer := r.st.getCollection(servicesC)
			defer closer()

			svc := &Service{st: r.st}
			hasLastRef := bson.D{{"life", Dying}, {"unitcount", 0}, {"relationcount", 1}}
			removable := append(bson.D{{"_id", ep.ServiceName}}, hasLastRef...)
			if err := services.Find(removable).One(&svc.doc); err == nil {
				ops = append(ops, svc.removeOps(hasLastRef)...)
				continue
			} else if err != mgo.ErrNotFound {
				return nil, err
			}
			// If not, we must check that this is still the case when the
			// transaction is applied.
			asserts = bson.D{{"$or", []bson.D{
				{{"life", Alive}},
				{{"unitcount", bson.D{{"$gt", 0}}}},
				{{"relationcount", bson.D{{"$gt", 1}}}},
			}}}
		}
		ops = append(ops, txn.Op{
			C:      servicesC,
			Id:     ep.ServiceName,
			Assert: asserts,
			Update: bson.D{{"$inc", bson.D{{"relationcount", -1}}}},
		})
	}
	cleanupOp := r.st.newCleanupOp(cleanupRelationSettings, fmt.Sprintf("r#%d#", r.Id()))
	return append(ops, cleanupOp), nil
}
Пример #3
0
// SetAPIHostPorts sets the addresses of the API server instances.
// Each server is represented by one element in the top level slice.
func (st *State) SetAPIHostPorts(netHostsPorts [][]network.HostPort) error {
	doc := apiHostPortsDoc{
		APIHostPorts: fromNetworkHostsPorts(netHostsPorts),
	}
	buildTxn := func(attempt int) ([]txn.Op, error) {
		existing, err := st.APIHostPorts()
		if err != nil {
			return nil, err
		}
		op := txn.Op{
			C:  stateServersC,
			Id: apiHostPortsKey,
			Assert: bson.D{{
				"apihostports", fromNetworkHostsPorts(existing),
			}},
		}
		if !hostsPortsEqual(netHostsPorts, existing) {
			op.Update = bson.D{{
				"$set", bson.D{{"apihostports", doc.APIHostPorts}},
			}}
		}
		return []txn.Op{op}, nil
	}
	if err := st.run(buildTxn); err != nil {
		return errors.Annotate(err, "cannot set API addresses")
	}
	logger.Debugf("setting API hostPorts: %v", netHostsPorts)
	return nil
}
Пример #4
0
Файл: model.go Проект: bac/juju
// validateCloudRegion validates the given region name against the
// provided Cloud definition, and returns a txn.Op to include in a
// transaction to assert the same.
func validateCloudRegion(cloud jujucloud.Cloud, cloudName, regionName string) (txn.Op, error) {
	// Ensure that the cloud region is valid, or if one is not specified,
	// that the cloud does not support regions.
	assertCloudRegionOp := txn.Op{
		C:  cloudsC,
		Id: cloudName,
	}
	if regionName != "" {
		region, err := jujucloud.RegionByName(cloud.Regions, regionName)
		if err != nil {
			return txn.Op{}, errors.Trace(err)
		}
		assertCloudRegionOp.Assert = bson.D{
			{"regions." + region.Name, bson.D{{"$exists", true}}},
		}
	} else {
		if len(cloud.Regions) > 0 {
			return txn.Op{}, errors.NotValidf("missing CloudRegion")
		}
		assertCloudRegionOp.Assert = bson.D{
			{"regions", bson.D{{"$exists", false}}},
		}
	}
	return assertCloudRegionOp, nil
}
Пример #5
0
// SetAPIHostPorts sets the addresses of the API server instances.
// Each server is represented by one element in the top level slice.
func (st *State) SetAPIHostPorts(hps [][]network.HostPort) error {
	doc := apiHostPortsDoc{
		APIHostPorts: instanceHostPortsToHostPorts(hps),
	}
	buildTxn := func(attempt int) ([]txn.Op, error) {
		existing, err := st.APIHostPorts()
		if err != nil {
			return nil, err
		}
		op := txn.Op{
			C:  stateServersC,
			Id: apiHostPortsKey,
			Assert: bson.D{{
				"apihostports", instanceHostPortsToHostPorts(existing),
			}},
		}
		if !hostPortsEqual(hps, existing) {
			op.Update = bson.D{{
				"$set", bson.D{{"apihostports", doc.APIHostPorts}},
			}}
		}
		return []txn.Op{op}, nil
	}
	if err := st.run(buildTxn); err != nil {
		return errors.Annotate(err, "cannot set API addresses")
	}
	return nil
}
Пример #6
0
// SetHostedEnvironCount is an upgrade step that sets hostedEnvCountDoc.Count
// to the number of hosted environments.
func SetHostedEnvironCount(st *State) error {
	environments, closer := st.getCollection(environmentsC)
	defer closer()

	envCount, err := environments.Find(nil).Count()
	if err != nil {
		return errors.Annotate(err, "failed to read environments")
	}

	stateServers, closer := st.getCollection(stateServersC)
	defer closer()

	count, err := stateServers.FindId(hostedEnvCountKey).Count()
	if err != nil {
		return errors.Annotate(err, "failed to read state server")
	}

	hostedCount := envCount - 1 // -1 as we don't count the system environment
	op := txn.Op{
		C:  stateServersC,
		Id: hostedEnvCountKey,
	}
	if count == 0 {
		op.Assert = txn.DocMissing
		op.Insert = &hostedEnvCountDoc{hostedCount}
	} else {
		op.Update = bson.D{{"$set", bson.D{{"refcount", hostedCount}}}}
	}

	return st.runTransaction([]txn.Op{op})
}
Пример #7
0
// removeUnitOps returns the operations necessary to remove the supplied unit,
// assuming the supplied asserts apply to the unit document.
func (s *Service) removeUnitOps(u *Unit, asserts bson.D) ([]txn.Op, error) {
	ops, err := u.destroyHostOps(s)
	if err != nil {
		return nil, err
	}

	observedFieldsMatch := bson.D{
		{"charmurl", u.doc.CharmURL},
		{"machineid", u.doc.MachineId},
	}
	ops = append(ops, txn.Op{
		C:      unitsC,
		Id:     u.doc.Name,
		Assert: append(observedFieldsMatch, asserts...),
		Remove: true,
	},
		removeConstraintsOp(s.st, u.globalKey()),
		removeStatusOp(s.st, u.globalKey()),
		annotationRemoveOp(s.st, u.globalKey()),
		s.st.newCleanupOp(cleanupRemovedUnit, u.doc.Name),
	)
	if u.doc.CharmURL != nil {
		decOps, err := settingsDecRefOps(s.st, s.doc.Name, u.doc.CharmURL)
		if errors.IsNotFound(err) {
			return nil, errRefresh
		} else if err != nil {
			return nil, err
		}
		ops = append(ops, decOps...)
	}
	if s.doc.Life == Dying && s.doc.RelationCount == 0 && s.doc.UnitCount == 1 {
		hasLastRef := bson.D{{"life", Dying}, {"relationcount", 0}, {"unitcount", 1}}
		return append(ops, s.removeOps(hasLastRef)...), nil
	}
	svcOp := txn.Op{
		C:      servicesC,
		Id:     s.doc.Name,
		Update: bson.D{{"$inc", bson.D{{"unitcount", -1}}}},
	}
	if s.doc.Life == Alive {
		svcOp.Assert = bson.D{{"life", Alive}, {"unitcount", bson.D{{"$gt", 0}}}}
	} else {
		svcOp.Assert = bson.D{
			{"life", Dying},
			{"$or", []bson.D{
				{{"unitcount", bson.D{{"$gt", 1}}}},
				{{"relationcount", bson.D{{"$gt", 0}}}},
			}},
		}
	}
	ops = append(ops, svcOp)

	return ops, nil
}
Пример #8
0
// JustRemoveOp returns a txn.Op that deletes a refcount doc so long as
// the refcount matches count. You should avoid using this method in
// most cases.
func (ns nsRefcounts_) JustRemoveOp(collName, key string, count int) txn.Op {
	op := txn.Op{
		C:      collName,
		Id:     key,
		Remove: true,
	}
	if count >= 0 {
		op.Assert = bson.D{{"refcount", count}}
	}
	return op
}
Пример #9
0
// incUnitCountOp returns the operation to increment the service's unit count.
func (s *Service) incUnitCountOp(asserts bson.D) txn.Op {
	op := txn.Op{
		C:      servicesC,
		Id:     s.doc.DocID,
		Update: bson.D{{"$inc", bson.D{{"unitcount", 1}}}},
	}
	if len(asserts) > 0 {
		op.Assert = asserts
	}
	return op
}
Пример #10
0
// SaveMetadata implements Storage.SaveMetadata and behaves as save-or-update.
func (s *storage) SaveMetadata(metadata []Metadata) error {
	if len(metadata) == 0 {
		return nil
	}

	newDocs := make([]imagesMetadataDoc, len(metadata))
	for i, m := range metadata {
		newDoc := s.mongoDoc(m)
		if err := validateMetadata(&newDoc); err != nil {
			return err
		}
		newDocs[i] = newDoc
	}

	buildTxn := func(attempt int) ([]txn.Op, error) {
		var ops []txn.Op
		for _, newDoc := range newDocs {
			newDocCopy := newDoc
			op := txn.Op{
				C:  s.collection,
				Id: newDocCopy.Id,
			}

			// Check if this image metadata is already known.
			existing, err := s.getMetadata(newDocCopy.Id)
			if errors.IsNotFound(err) {
				op.Assert = txn.DocMissing
				op.Insert = &newDocCopy
				ops = append(ops, op)
				logger.Debugf("inserting cloud image metadata for %v", newDocCopy.Id)
			} else if err != nil {
				return nil, errors.Trace(err)
			} else if existing.ImageId != newDocCopy.ImageId {
				// need to update imageId
				op.Assert = txn.DocExists
				op.Update = bson.D{{"$set", bson.D{{"image_id", newDocCopy.ImageId}}}}
				ops = append(ops, op)
				logger.Debugf("updating cloud image id for metadata %v", newDocCopy.Id)
			}
		}
		if len(ops) == 0 {
			return nil, jujutxn.ErrNoOperations
		}
		return ops, nil
	}

	err := s.store.RunTransaction(buildTxn)
	if err != nil {
		return errors.Annotate(err, "cannot save cloud image metadata")
	}
	return nil
}
Пример #11
0
// updateEndpointBindingsOp returns an op that merges the existing bindings with
// givenMap, using newMeta to validate the merged bindings, and asserting the
// existing ones haven't changed in the since we fetched them.
func updateEndpointBindingsOp(st *State, key string, givenMap map[string]string, newMeta *charm.Meta) (txn.Op, error) {
	// Fetch existing bindings.
	existingMap, txnRevno, err := readEndpointBindings(st, key)
	if err != nil && !errors.IsNotFound(err) {
		return txn.Op{}, errors.Trace(err)
	}

	// Merge existing with given as needed.
	updatedMap, removedKeys, err := mergeBindings(givenMap, existingMap, newMeta)
	if err != nil {
		return txn.Op{}, errors.Trace(err)
	}

	// Validate the bindings before updating.
	if err := validateEndpointBindingsForCharm(st, updatedMap, newMeta); err != nil {
		return txn.Op{}, errors.Trace(err)
	}

	// Prepare the update operations.
	sanitize := inSubdocEscapeReplacer("bindings")
	changes := make(bson.M, len(updatedMap))
	for endpoint, space := range updatedMap {
		changes[sanitize(endpoint)] = space
	}
	deletes := make(bson.M, len(removedKeys))
	for _, endpoint := range removedKeys {
		deletes[sanitize(endpoint)] = 1
	}

	var update bson.D
	if len(changes) != 0 {
		update = append(update, bson.DocElem{Name: "$set", Value: changes})
	}
	if len(deletes) != 0 {
		update = append(update, bson.DocElem{Name: "$unset", Value: deletes})
	}
	if len(update) == 0 {
		return txn.Op{}, jujutxn.ErrNoOperations
	}
	updateOp := txn.Op{
		C:      endpointBindingsC,
		Id:     key,
		Update: update,
	}
	if existingMap != nil {
		// Only assert existing haven't changed when they actually exist.
		updateOp.Assert = bson.D{{"txn-revno", txnRevno}}
	}
	return updateOp, nil
}
Пример #12
0
// trackOp returns a txn.Op that will either insert or update the
// supplied payload, and fail if the observed precondition changes.
func (nsPayloads_) trackOp(payloads mongo.Collection, doc payloadDoc) (txn.Op, error) {
	docID := nsPayloads.docID(doc.UnitID, doc.Name)
	payloadOp := txn.Op{
		C:  payloads.Name(),
		Id: docID,
	}
	count, err := payloads.FindId(docID).Count()
	if err != nil {
		return txn.Op{}, errors.Trace(err)
	} else if count == 0 {
		payloadOp.Assert = txn.DocMissing
		payloadOp.Insert = doc
	} else {
		payloadOp.Assert = txn.DocExists
		payloadOp.Update = bson.D{{"$set", doc}}
	}
	return payloadOp, nil
}
Пример #13
0
// setAddresses updates the machine's addresses (either Addresses or
// MachineAddresses, depending on the field argument).
func (m *Machine) setAddresses(addresses []network.Address, field *[]address, fieldName string) error {
	var changed bool
	envConfig, err := m.st.EnvironConfig()
	if err != nil {
		return err
	}
	network.SortAddresses(addresses, envConfig.PreferIPv6())
	stateAddresses := instanceAddressesToAddresses(addresses)
	buildTxn := func(attempt int) ([]txn.Op, error) {
		changed = false
		if attempt > 0 {
			if err := m.Refresh(); err != nil {
				return nil, err
			}
		}
		if m.doc.Life == Dead {
			return nil, ErrDead
		}
		op := txn.Op{
			C:      machinesC,
			Id:     m.doc.Id,
			Assert: append(bson.D{{fieldName, *field}}, notDeadDoc...),
		}
		if !addressesEqual(addresses, addressesToInstanceAddresses(*field)) {
			op.Update = bson.D{{"$set", bson.D{{fieldName, stateAddresses}}}}
			changed = true
		}
		return []txn.Op{op}, nil
	}
	switch err := m.st.run(buildTxn); err {
	case nil:
	case jujutxn.ErrExcessiveContention:
		return errors.Annotatef(err, "cannot set %s for machine %s", fieldName, m)
	default:
		return err
	}
	if !changed {
		return nil
	}
	*field = stateAddresses
	return nil
}
Пример #14
0
// SetAPIHostPorts sets the addresses of the API server instances.
// Each server is represented by one element in the top level slice.
func (st *State) SetAPIHostPorts(netHostsPorts [][]network.HostPort) error {
	// Filter any addresses not on the default space, if possible.
	// All API servers need to be accessible there.
	var hpsToSet [][]network.HostPort
	for _, hps := range netHostsPorts {
		defaultSpaceHP, ok := network.SelectHostPortBySpace(hps, network.DefaultSpace)
		if !ok {
			logger.Warningf("cannot determine API addresses in space %q to use as API endpoints; using all addresses", network.DefaultSpace)
			hpsToSet = netHostsPorts
			break
		}
		hpsToSet = append(hpsToSet, []network.HostPort{defaultSpaceHP})
	}

	doc := apiHostPortsDoc{
		APIHostPorts: fromNetworkHostsPorts(hpsToSet),
	}
	buildTxn := func(attempt int) ([]txn.Op, error) {
		existing, err := st.APIHostPorts()
		if err != nil {
			return nil, err
		}
		op := txn.Op{
			C:  controllersC,
			Id: apiHostPortsKey,
			Assert: bson.D{{
				"apihostports", fromNetworkHostsPorts(existing),
			}},
		}
		if !hostsPortsEqual(netHostsPorts, existing) {
			op.Update = bson.D{{
				"$set", bson.D{{"apihostports", doc.APIHostPorts}},
			}}
		}
		return []txn.Op{op}, nil
	}
	if err := st.run(buildTxn); err != nil {
		return errors.Annotate(err, "cannot set API addresses")
	}
	logger.Debugf("setting API hostPorts: %v", hpsToSet)
	return nil
}
Пример #15
0
// SaveMetadata implements Storage.SaveMetadata and behaves as save-or-update.
func (s *storage) SaveMetadata(metadata Metadata) error {
	newDoc := s.mongoDoc(metadata)

	buildTxn := func(attempt int) ([]txn.Op, error) {
		op := txn.Op{
			C:  s.collection,
			Id: newDoc.Id,
		}

		// Check if this image metadata is already known.
		existing, err := s.getMetadata(newDoc.Id)
		if err != nil {
			return nil, errors.Trace(err)
		}
		if existing.MetadataAttributes == metadata.MetadataAttributes {
			// may need to updated imageId
			if existing.ImageId != metadata.ImageId {
				op.Assert = txn.DocExists
				op.Update = bson.D{{"$set", bson.D{{"image_id", metadata.ImageId}}}}
				logger.Debugf("updating cloud image id for metadata %v", newDoc.Id)
			} else {
				return nil, jujutxn.ErrNoOperations
			}
		} else {
			op.Assert = txn.DocMissing
			op.Insert = &newDoc
			logger.Debugf("inserting cloud image metadata for %v", newDoc.Id)
		}
		return []txn.Op{op}, nil
	}

	err := s.store.RunTransaction(buildTxn)
	if err != nil {
		return errors.Annotatef(err, "cannot save metadata for cloud image %v", newDoc.ImageId)
	}
	return nil
}
Пример #16
0
func removeStorageAttachmentOps(
	st *State,
	s *storageAttachment,
	si *storageInstance,
) ([]txn.Op, error) {
	if s.doc.Life != Dying {
		return nil, errors.New("storage attachment is not dying")
	}
	ops := []txn.Op{{
		C:      storageAttachmentsC,
		Id:     storageAttachmentId(s.doc.Unit, s.doc.StorageInstance),
		Assert: bson.D{{"life", Dying}},
		Remove: true,
	}, {
		C:      unitsC,
		Id:     s.doc.Unit,
		Assert: txn.DocExists,
		Update: bson.D{{"$inc", bson.D{{"storageattachmentcount", -1}}}},
	}}
	if si.doc.AttachmentCount == 1 {
		var hasLastRef bson.D
		if si.doc.Life == Dying {
			hasLastRef = bson.D{{"life", Dying}, {"attachmentcount", 1}}
		} else if si.doc.Owner == names.NewUnitTag(s.doc.Unit).String() {
			hasLastRef = bson.D{{"attachmentcount", 1}}
		}
		if len(hasLastRef) > 0 {
			// Either the storage instance is dying, or its owner
			// is a unit; in either case, no more attachments can
			// be added to the instance, so it can be removed.
			siOps, err := removeStorageInstanceOps(st, si.StorageTag(), hasLastRef)
			if err != nil {
				return nil, errors.Trace(err)
			}
			ops = append(ops, siOps...)
			return ops, nil
		}
	}
	decrefOp := txn.Op{
		C:      storageInstancesC,
		Id:     si.doc.Id,
		Update: bson.D{{"$inc", bson.D{{"attachmentcount", -1}}}},
	}
	if si.doc.Life == Alive {
		// This may be the last reference, but the storage instance is
		// still alive. The storage instance will be removed when its
		// Destroy method is called, if it has no attachments.
		decrefOp.Assert = bson.D{
			{"life", Alive},
			{"attachmentcount", bson.D{{"$gt", 0}}},
		}
	} else {
		// If it's not the last reference when we checked, we want to
		// allow for concurrent attachment removals but want to ensure
		// that we don't drop to zero without removing the storage
		// instance.
		decrefOp.Assert = bson.D{
			{"life", Dying},
			{"attachmentcount", bson.D{{"$gt", 1}}},
		}
	}
	ops = append(ops, decrefOp)
	return ops, nil
}
Пример #17
0
// advanceLifecycle ensures that the machine's lifecycle is no earlier
// than the supplied value. If the machine already has that lifecycle
// value, or a later one, no changes will be made to remote state. If
// the machine has any responsibilities that preclude a valid change in
// lifecycle, it will return an error.
func (original *Machine) advanceLifecycle(life Life) (err error) {
	containers, err := original.Containers()
	if err != nil {
		return err
	}
	if len(containers) > 0 {
		return &HasContainersError{
			MachineId:    original.doc.Id,
			ContainerIds: containers,
		}
	}
	m := original
	defer func() {
		if err == nil {
			// The machine's lifecycle is known to have advanced; it may be
			// known to have already advanced further than requested, in
			// which case we set the latest known valid value.
			if m == nil {
				life = Dead
			} else if m.doc.Life > life {
				life = m.doc.Life
			}
			original.doc.Life = life
		}
	}()
	// op and
	op := txn.Op{
		C:      machinesC,
		Id:     m.doc.Id,
		Update: bson.D{{"$set", bson.D{{"life", life}}}},
	}
	advanceAsserts := bson.D{
		{"jobs", bson.D{{"$nin", []MachineJob{JobManageEnviron}}}},
		{"$or", []bson.D{
			{{"principals", bson.D{{"$size", 0}}}},
			{{"principals", bson.D{{"$exists", false}}}},
		}},
		{"hasvote", bson.D{{"$ne", true}}},
	}
	// multiple attempts: one with original data, one with refreshed data, and a final
	// one intended to determine the cause of failure of the preceding attempt.
	buildTxn := func(attempt int) ([]txn.Op, error) {
		// If the transaction was aborted, grab a fresh copy of the machine data.
		// We don't write to original, because the expectation is that state-
		// changing methods only set the requested change on the receiver; a case
		// could perhaps be made that this is not a helpful convention in the
		// context of the new state API, but we maintain consistency in the
		// face of uncertainty.
		if attempt != 0 {
			if m, err = m.st.Machine(m.doc.Id); errors.IsNotFound(err) {
				return nil, jujutxn.ErrNoOperations
			} else if err != nil {
				return nil, err
			}
		}
		// Check that the life change is sane, and collect the assertions
		// necessary to determine that it remains so.
		switch life {
		case Dying:
			if m.doc.Life != Alive {
				return nil, jujutxn.ErrNoOperations
			}
			op.Assert = append(advanceAsserts, isAliveDoc...)
		case Dead:
			if m.doc.Life == Dead {
				return nil, jujutxn.ErrNoOperations
			}
			op.Assert = append(advanceAsserts, notDeadDoc...)
		default:
			panic(fmt.Errorf("cannot advance lifecycle to %v", life))
		}
		// Check that the machine does not have any responsibilities that
		// prevent a lifecycle change.
		if hasJob(m.doc.Jobs, JobManageEnviron) {
			// (NOTE: When we enable multiple JobManageEnviron machines,
			// this restriction will be lifted, but we will assert that the
			// machine is not voting)
			return nil, fmt.Errorf("machine %s is required by the environment", m.doc.Id)
		}
		if m.doc.HasVote {
			return nil, fmt.Errorf("machine %s is a voting replica set member", m.doc.Id)
		}
		if len(m.doc.Principals) != 0 {
			return nil, &HasAssignedUnitsError{
				MachineId: m.doc.Id,
				UnitNames: m.doc.Principals,
			}
		}
		return []txn.Op{op}, nil
	}
	if err = m.st.run(buildTxn); err == jujutxn.ErrExcessiveContention {
		err = errors.Annotatef(err, "machine %s cannot advance lifecycle", m)
	}
	return err
}
Пример #18
0
// AddImage is defined on the Storage interface.
func (s *imageStorage) AddImage(r io.Reader, metadata *Metadata) (resultErr error) {
	session := s.blobDb.Session.Copy()
	defer session.Close()
	managedStorage := s.getManagedStorage(session)
	path := imagePath(metadata.Kind, metadata.Series, metadata.Arch, metadata.SHA256)
	if err := managedStorage.PutForEnvironment(s.envUUID, path, r, metadata.Size); err != nil {
		return errors.Annotate(err, "cannot store image")
	}
	defer func() {
		if resultErr == nil {
			return
		}
		err := managedStorage.RemoveForEnvironment(s.envUUID, path)
		if err != nil {
			logger.Errorf("failed to remove image blob: %v", err)
		}
	}()

	newDoc := imageMetadataDoc{
		Id:        docId(metadata),
		EnvUUID:   s.envUUID,
		Kind:      metadata.Kind,
		Series:    metadata.Series,
		Arch:      metadata.Arch,
		Size:      metadata.Size,
		SHA256:    metadata.SHA256,
		SourceURL: metadata.SourceURL,
		Path:      path,
		Created:   time.Now(),
	}

	// Add or replace metadata. If replacing, record the
	// existing path so we can remove the blob later.
	var oldPath string
	buildTxn := func(attempt int) ([]txn.Op, error) {
		op := txn.Op{
			C:  imagemetadataC,
			Id: newDoc.Id,
		}

		// On the first attempt we assume we're adding a new image blob.
		// Subsequent attempts to add image will fetch the existing
		// doc, record the old path, and attempt to update the
		// size, path and hash fields.
		if attempt == 0 {
			op.Assert = txn.DocMissing
			op.Insert = &newDoc
		} else {
			oldDoc, err := s.imageMetadataDoc(metadata.EnvUUID, metadata.Kind, metadata.Series, metadata.Arch)
			if err != nil {
				return nil, err
			}
			oldPath = oldDoc.Path
			op.Assert = bson.D{{"path", oldPath}}
			if oldPath != path {
				op.Update = bson.D{{
					"$set", bson.D{
						{"size", metadata.Size},
						{"sha256", metadata.SHA256},
						{"path", path},
					},
				}}
			}
		}
		return []txn.Op{op}, nil
	}
	txnRunner := s.txnRunner(session)
	err := txnRunner.Run(buildTxn)
	if err != nil {
		return errors.Annotate(err, "cannot store image metadata")
	}

	if oldPath != "" && oldPath != path {
		// Attempt to remove the old path. Failure is non-fatal.
		err := managedStorage.RemoveForEnvironment(s.envUUID, oldPath)
		if err != nil {
			logger.Errorf("failed to remove old image blob: %v", err)
		} else {
			logger.Debugf("removed old image blob")
		}
	}
	return nil
}
Пример #19
0
// advanceLifecycle ensures that the machine's lifecycle is no earlier
// than the supplied value. If the machine already has that lifecycle
// value, or a later one, no changes will be made to remote state. If
// the machine has any responsibilities that preclude a valid change in
// lifecycle, it will return an error.
func (original *Machine) advanceLifecycle(life Life) (err error) {
	containers, err := original.Containers()
	if err != nil {
		return err
	}
	if len(containers) > 0 {
		return &HasContainersError{
			MachineId:    original.doc.Id,
			ContainerIds: containers,
		}
	}
	m := original
	defer func() {
		if err == nil {
			// The machine's lifecycle is known to have advanced; it may be
			// known to have already advanced further than requested, in
			// which case we set the latest known valid value.
			if m == nil {
				life = Dead
			} else if m.doc.Life > life {
				life = m.doc.Life
			}
			original.doc.Life = life
		}
	}()
	// op and
	op := txn.Op{
		C:      machinesC,
		Id:     m.doc.DocID,
		Update: bson.D{{"$set", bson.D{{"life", life}}}},
	}
	// noUnits asserts that the machine has no principal units.
	noUnits := bson.DocElem{
		"$or", []bson.D{
			{{"principals", bson.D{{"$size", 0}}}},
			{{"principals", bson.D{{"$exists", false}}}},
		},
	}
	cleanupOp := m.st.newCleanupOp(cleanupDyingMachine, m.doc.Id)
	// multiple attempts: one with original data, one with refreshed data, and a final
	// one intended to determine the cause of failure of the preceding attempt.
	buildTxn := func(attempt int) ([]txn.Op, error) {
		advanceAsserts := bson.D{
			{"jobs", bson.D{{"$nin", []MachineJob{JobManageEnviron}}}},
			{"hasvote", bson.D{{"$ne", true}}},
		}
		// Grab a fresh copy of the machine data.
		// We don't write to original, because the expectation is that state-
		// changing methods only set the requested change on the receiver; a case
		// could perhaps be made that this is not a helpful convention in the
		// context of the new state API, but we maintain consistency in the
		// face of uncertainty.
		if m, err = m.st.Machine(m.doc.Id); errors.IsNotFound(err) {
			return nil, jujutxn.ErrNoOperations
		} else if err != nil {
			return nil, err
		}
		// Check that the life change is sane, and collect the assertions
		// necessary to determine that it remains so.
		switch life {
		case Dying:
			if m.doc.Life != Alive {
				return nil, jujutxn.ErrNoOperations
			}
			advanceAsserts = append(advanceAsserts, isAliveDoc...)
		case Dead:
			if m.doc.Life == Dead {
				return nil, jujutxn.ErrNoOperations
			}
			advanceAsserts = append(advanceAsserts, notDeadDoc...)
		default:
			panic(fmt.Errorf("cannot advance lifecycle to %v", life))
		}
		// Check that the machine does not have any responsibilities that
		// prevent a lifecycle change.
		if hasJob(m.doc.Jobs, JobManageEnviron) {
			// (NOTE: When we enable multiple JobManageEnviron machines,
			// this restriction will be lifted, but we will assert that the
			// machine is not voting)
			return nil, fmt.Errorf("machine %s is required by the environment", m.doc.Id)
		}
		if m.doc.HasVote {
			return nil, fmt.Errorf("machine %s is a voting replica set member", m.doc.Id)
		}
		// If there are no alive units left on the machine, or all the services are dying,
		// then the machine may be soon destroyed by a cleanup worker.
		// In that case, we don't want to return any error about not being able to
		// destroy a machine with units as it will be a lie.
		if life == Dying {
			canDie := true
			var principalUnitnames []string
			for _, principalUnit := range m.doc.Principals {
				principalUnitnames = append(principalUnitnames, principalUnit)
				u, err := m.st.Unit(principalUnit)
				if err != nil {
					return nil, errors.Annotatef(err, "reading machine %s principal unit %v", m, m.doc.Principals[0])
				}
				svc, err := u.Service()
				if err != nil {
					return nil, errors.Annotatef(err, "reading machine %s principal unit service %v", m, u.doc.Service)
				}
				if u.Life() == Alive && svc.Life() == Alive {
					canDie = false
					break
				}
			}
			if canDie {
				containers, err := m.Containers()
				if err != nil {
					return nil, errors.Annotatef(err, "reading machine %s containers", m)
				}
				canDie = len(containers) == 0
			}
			if canDie {
				checkUnits := bson.DocElem{
					"$or", []bson.D{
						{{"principals", principalUnitnames}},
						{{"principals", bson.D{{"$size", 0}}}},
						{{"principals", bson.D{{"$exists", false}}}},
					},
				}
				op.Assert = append(advanceAsserts, checkUnits)
				containerCheck := txn.Op{
					C:  containerRefsC,
					Id: m.doc.DocID,
					Assert: bson.D{{"$or", []bson.D{
						{{"children", bson.D{{"$size", 0}}}},
						{{"children", bson.D{{"$exists", false}}}},
					}}},
				}
				return []txn.Op{op, containerCheck, cleanupOp}, nil
			}
		}

		if len(m.doc.Principals) > 0 {
			return nil, &HasAssignedUnitsError{
				MachineId: m.doc.Id,
				UnitNames: m.doc.Principals,
			}
		}
		advanceAsserts = append(advanceAsserts, noUnits)

		if life == Dead {
			// A machine may not become Dead until it has no more
			// attachments to inherently machine-bound storage.
			storageAsserts, err := m.assertNoPersistentStorage()
			if err != nil {
				return nil, errors.Trace(err)
			}
			advanceAsserts = append(advanceAsserts, storageAsserts...)
		}

		// Add the additional asserts needed for this transaction.
		op.Assert = advanceAsserts
		return []txn.Op{op, cleanupOp}, nil
	}
	if err = m.st.run(buildTxn); err == jujutxn.ErrExcessiveContention {
		err = errors.Annotatef(err, "machine %s cannot advance lifecycle", m)
	}
	return err
}
Пример #20
0
// Add implements Storage.Add.
func (s *binaryStorage) Add(r io.Reader, metadata Metadata) (resultErr error) {
	// Add the binary file to storage.
	path := fmt.Sprintf("tools/%s-%s", metadata.Version, metadata.SHA256)
	if err := s.managedStorage.PutForBucket(s.modelUUID, path, r, metadata.Size); err != nil {
		return errors.Annotate(err, "cannot store binary file")
	}
	defer func() {
		if resultErr == nil {
			return
		}
		err := s.managedStorage.RemoveForBucket(s.modelUUID, path)
		if err != nil {
			logger.Errorf("failed to remove binary blob: %v", err)
		}
	}()

	newDoc := metadataDoc{
		Id:      metadata.Version,
		Version: metadata.Version,
		Size:    metadata.Size,
		SHA256:  metadata.SHA256,
		Path:    path,
	}

	// Add or replace metadata. If replacing, record the existing path so we
	// can remove it later.
	var oldPath string
	buildTxn := func(attempt int) ([]txn.Op, error) {
		op := txn.Op{
			C:  s.metadataCollection.Name,
			Id: newDoc.Id,
		}

		// On the first attempt we assume we're adding new binary files.
		// Subsequent attempts to add files will fetch the existing
		// doc, record the old path, and attempt to update the
		// size, path and hash fields.
		if attempt == 0 {
			op.Assert = txn.DocMissing
			op.Insert = &newDoc
		} else {
			oldDoc, err := s.findMetadata(metadata.Version)
			if err != nil {
				return nil, err
			}
			oldPath = oldDoc.Path
			op.Assert = bson.D{{"path", oldPath}}
			if oldPath != path {
				op.Update = bson.D{{
					"$set", bson.D{
						{"size", metadata.Size},
						{"sha256", metadata.SHA256},
						{"path", path},
					},
				}}
			}
		}
		return []txn.Op{op}, nil
	}
	err := s.txnRunner.Run(buildTxn)
	if err != nil {
		return errors.Annotate(err, "cannot store binary metadata")
	}

	if oldPath != "" && oldPath != path {
		// Attempt to remove the old path. Failure is non-fatal.
		err := s.managedStorage.RemoveForBucket(s.modelUUID, oldPath)
		if err != nil {
			logger.Errorf("failed to remove old binary blob: %v", err)
		} else {
			logger.Debugf("removed old binary blob")
		}
	}
	return nil
}
Пример #21
0
Файл: volume.go Проект: bac/juju
// machineStorageDecrefOp returns a txn.Op that will decrement the attachment
// count for a given machine storage entity (volume or filesystem), given its
// current attachment count and lifecycle state. If the attachment count goes
// to zero, then the entity should become Dead.
func machineStorageDecrefOp(
	collection, id string,
	attachmentCount int, life Life,
	machine names.MachineTag,
	binding string,
) txn.Op {
	op := txn.Op{
		C:  collection,
		Id: id,
	}
	if life == Dying {
		if attachmentCount == 1 {
			// This is the last attachment: the volume can be
			// marked Dead. There can be no concurrent attachments
			// since it is Dying.
			op.Assert = bson.D{
				{"life", Dying},
				{"attachmentcount", 1},
			}
			op.Update = bson.D{
				{"$inc", bson.D{{"attachmentcount", -1}}},
				{"$set", bson.D{{"life", Dead}}},
			}
		} else {
			// This is not the last attachment; just decref,
			// allowing for concurrent attachment removals but
			// ensuring we don't drop to zero without marking
			// the volume Dead.
			op.Assert = bson.D{
				{"life", Dying},
				{"attachmentcount", bson.D{{"$gt", 1}}},
			}
			op.Update = bson.D{
				{"$inc", bson.D{{"attachmentcount", -1}}},
			}
		}
	} else {
		// The volume is still Alive: decref, retrying if the
		// volume is destroyed concurrently or the binding changes.
		// If the volume is bound to the machine, advance it to
		// Dead; binding storage to a machine and attaching the
		// storage to multiple machines will be mutually exclusive.
		//
		// Otherwise, when DestroyVolume is called, the volume will
		// be marked Dead if it has no attachments.
		update := bson.D{
			{"$inc", bson.D{{"attachmentcount", -1}}},
		}
		if binding == machine.String() {
			update = append(update, bson.DocElem{
				"$set", bson.D{{"life", Dead}},
			})
		}
		op.Assert = bson.D{
			{"life", Alive},
			{"binding", binding},
			{"attachmentcount", bson.D{{"$gt", 0}}},
		}
		op.Update = update
	}
	return op
}