// SetHostedEnvironCount is an upgrade step that sets hostedEnvCountDoc.Count // to the number of hosted environments. func SetHostedEnvironCount(st *State) error { environments, closer := st.getCollection(environmentsC) defer closer() envCount, err := environments.Find(nil).Count() if err != nil { return errors.Annotate(err, "failed to read environments") } stateServers, closer := st.getCollection(stateServersC) defer closer() count, err := stateServers.FindId(hostedEnvCountKey).Count() if err != nil { return errors.Annotate(err, "failed to read state server") } hostedCount := envCount - 1 // -1 as we don't count the system environment op := txn.Op{ C: stateServersC, Id: hostedEnvCountKey, } if count == 0 { op.Assert = txn.DocMissing op.Insert = &hostedEnvCountDoc{hostedCount} } else { op.Update = bson.D{{"$set", bson.D{{"refcount", hostedCount}}}} } return st.runTransaction([]txn.Op{op}) }
// SaveMetadata implements Storage.SaveMetadata and behaves as save-or-update. func (s *storage) SaveMetadata(metadata []Metadata) error { if len(metadata) == 0 { return nil } newDocs := make([]imagesMetadataDoc, len(metadata)) for i, m := range metadata { newDoc := s.mongoDoc(m) if err := validateMetadata(&newDoc); err != nil { return err } newDocs[i] = newDoc } buildTxn := func(attempt int) ([]txn.Op, error) { var ops []txn.Op for _, newDoc := range newDocs { newDocCopy := newDoc op := txn.Op{ C: s.collection, Id: newDocCopy.Id, } // Check if this image metadata is already known. existing, err := s.getMetadata(newDocCopy.Id) if errors.IsNotFound(err) { op.Assert = txn.DocMissing op.Insert = &newDocCopy ops = append(ops, op) logger.Debugf("inserting cloud image metadata for %v", newDocCopy.Id) } else if err != nil { return nil, errors.Trace(err) } else if existing.ImageId != newDocCopy.ImageId { // need to update imageId op.Assert = txn.DocExists op.Update = bson.D{{"$set", bson.D{{"image_id", newDocCopy.ImageId}}}} ops = append(ops, op) logger.Debugf("updating cloud image id for metadata %v", newDocCopy.Id) } } if len(ops) == 0 { return nil, jujutxn.ErrNoOperations } return ops, nil } err := s.store.RunTransaction(buildTxn) if err != nil { return errors.Annotate(err, "cannot save cloud image metadata") } return nil }
// trackOp returns a txn.Op that will either insert or update the // supplied payload, and fail if the observed precondition changes. func (nsPayloads_) trackOp(payloads mongo.Collection, doc payloadDoc) (txn.Op, error) { docID := nsPayloads.docID(doc.UnitID, doc.Name) payloadOp := txn.Op{ C: payloads.Name(), Id: docID, } count, err := payloads.FindId(docID).Count() if err != nil { return txn.Op{}, errors.Trace(err) } else if count == 0 { payloadOp.Assert = txn.DocMissing payloadOp.Insert = doc } else { payloadOp.Assert = txn.DocExists payloadOp.Update = bson.D{{"$set", doc}} } return payloadOp, nil }
// SaveMetadata implements Storage.SaveMetadata and behaves as save-or-update. func (s *storage) SaveMetadata(metadata Metadata) error { newDoc := s.mongoDoc(metadata) buildTxn := func(attempt int) ([]txn.Op, error) { op := txn.Op{ C: s.collection, Id: newDoc.Id, } // Check if this image metadata is already known. existing, err := s.getMetadata(newDoc.Id) if err != nil { return nil, errors.Trace(err) } if existing.MetadataAttributes == metadata.MetadataAttributes { // may need to updated imageId if existing.ImageId != metadata.ImageId { op.Assert = txn.DocExists op.Update = bson.D{{"$set", bson.D{{"image_id", metadata.ImageId}}}} logger.Debugf("updating cloud image id for metadata %v", newDoc.Id) } else { return nil, jujutxn.ErrNoOperations } } else { op.Assert = txn.DocMissing op.Insert = &newDoc logger.Debugf("inserting cloud image metadata for %v", newDoc.Id) } return []txn.Op{op}, nil } err := s.store.RunTransaction(buildTxn) if err != nil { return errors.Annotatef(err, "cannot save metadata for cloud image %v", newDoc.ImageId) } return nil }
// Add implements Storage.Add. func (s *binaryStorage) Add(r io.Reader, metadata Metadata) (resultErr error) { // Add the binary file to storage. path := fmt.Sprintf("tools/%s-%s", metadata.Version, metadata.SHA256) if err := s.managedStorage.PutForBucket(s.modelUUID, path, r, metadata.Size); err != nil { return errors.Annotate(err, "cannot store binary file") } defer func() { if resultErr == nil { return } err := s.managedStorage.RemoveForBucket(s.modelUUID, path) if err != nil { logger.Errorf("failed to remove binary blob: %v", err) } }() newDoc := metadataDoc{ Id: metadata.Version, Version: metadata.Version, Size: metadata.Size, SHA256: metadata.SHA256, Path: path, } // Add or replace metadata. If replacing, record the existing path so we // can remove it later. var oldPath string buildTxn := func(attempt int) ([]txn.Op, error) { op := txn.Op{ C: s.metadataCollection.Name, Id: newDoc.Id, } // On the first attempt we assume we're adding new binary files. // Subsequent attempts to add files will fetch the existing // doc, record the old path, and attempt to update the // size, path and hash fields. if attempt == 0 { op.Assert = txn.DocMissing op.Insert = &newDoc } else { oldDoc, err := s.findMetadata(metadata.Version) if err != nil { return nil, err } oldPath = oldDoc.Path op.Assert = bson.D{{"path", oldPath}} if oldPath != path { op.Update = bson.D{{ "$set", bson.D{ {"size", metadata.Size}, {"sha256", metadata.SHA256}, {"path", path}, }, }} } } return []txn.Op{op}, nil } err := s.txnRunner.Run(buildTxn) if err != nil { return errors.Annotate(err, "cannot store binary metadata") } if oldPath != "" && oldPath != path { // Attempt to remove the old path. Failure is non-fatal. err := s.managedStorage.RemoveForBucket(s.modelUUID, oldPath) if err != nil { logger.Errorf("failed to remove old binary blob: %v", err) } else { logger.Debugf("removed old binary blob") } } return nil }
// AddImage is defined on the Storage interface. func (s *imageStorage) AddImage(r io.Reader, metadata *Metadata) (resultErr error) { session := s.blobDb.Session.Copy() defer session.Close() managedStorage := s.getManagedStorage(session) path := imagePath(metadata.Kind, metadata.Series, metadata.Arch, metadata.SHA256) if err := managedStorage.PutForEnvironment(s.envUUID, path, r, metadata.Size); err != nil { return errors.Annotate(err, "cannot store image") } defer func() { if resultErr == nil { return } err := managedStorage.RemoveForEnvironment(s.envUUID, path) if err != nil { logger.Errorf("failed to remove image blob: %v", err) } }() newDoc := imageMetadataDoc{ Id: docId(metadata), EnvUUID: s.envUUID, Kind: metadata.Kind, Series: metadata.Series, Arch: metadata.Arch, Size: metadata.Size, SHA256: metadata.SHA256, SourceURL: metadata.SourceURL, Path: path, Created: time.Now(), } // Add or replace metadata. If replacing, record the // existing path so we can remove the blob later. var oldPath string buildTxn := func(attempt int) ([]txn.Op, error) { op := txn.Op{ C: imagemetadataC, Id: newDoc.Id, } // On the first attempt we assume we're adding a new image blob. // Subsequent attempts to add image will fetch the existing // doc, record the old path, and attempt to update the // size, path and hash fields. if attempt == 0 { op.Assert = txn.DocMissing op.Insert = &newDoc } else { oldDoc, err := s.imageMetadataDoc(metadata.EnvUUID, metadata.Kind, metadata.Series, metadata.Arch) if err != nil { return nil, err } oldPath = oldDoc.Path op.Assert = bson.D{{"path", oldPath}} if oldPath != path { op.Update = bson.D{{ "$set", bson.D{ {"size", metadata.Size}, {"sha256", metadata.SHA256}, {"path", path}, }, }} } } return []txn.Op{op}, nil } txnRunner := s.txnRunner(session) err := txnRunner.Run(buildTxn) if err != nil { return errors.Annotate(err, "cannot store image metadata") } if oldPath != "" && oldPath != path { // Attempt to remove the old path. Failure is non-fatal. err := managedStorage.RemoveForEnvironment(s.envUUID, oldPath) if err != nil { logger.Errorf("failed to remove old image blob: %v", err) } else { logger.Debugf("removed old image blob") } } return nil }