Example #1
0
// Create creates a new file with the provided name in the GridFS.  If the file
// name already exists, a new version will be inserted with an up-to-date
// uploadDate that will cause it to be atomically visible to the Open and
// OpenId methods.  If the file name is not important, an empty name may be
// provided and the file Id used instead.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// A simple example inserting a new file:
//
//     func check(err error) {
//         if err != nil {
//             panic(err.String())
//         }
//     }
//     file, err := db.GridFS("fs").Create("myfile.txt")
//     check(err)
//     n, err := file.Write([]byte("Hello world!"))
//     check(err)
//     err = file.Close()
//     check(err)
//     fmt.Printf("%d bytes written\n", n)
//
// The io.Writer interface is implemented by *GridFile and may be used to
// help on the file creation.  For example:
//
//     file, err := db.GridFS("fs").Create("myfile.txt")
//     check(err)
//     messages, err := os.Open("/var/log/messages")
//     check(err)
//     defer messages.Close()
//     err = io.Copy(file, messages)
//     check(err)
//     err = file.Close()
//     check(err)
//
func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
	file = gfs.newFile()
	file.mode = gfsWriting
	file.wsum = md5.New()
	file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name}
	return
}
Example #2
0
// Save the scan object in the database. On creation the scan object is going to receive the id that
// refers to the entry in the database
func (dao ScanDAO) Save(scan *model.Scan) error {
	// Check if the programmer forgot to set the database in scanDAO object
	if dao.Database == nil {
		return ErrScanDAOUndefinedDatabase
	}

	// When creating a new scan object, the id will be probably nil (or kind of new
	// according to bson.ObjectId), so we must initialize it
	if len(scan.Id.Hex()) == 0 {
		scan.Id = bson.NewObjectId()
	}

	// Every time we modified a scan object we increase the revision counter to identify
	// changes in high level structures. Maybe a better approach would be doing this on the
	// MongoDB server side, check out the link http://docs.mongodb.org/manual/tutorial
	// /optimize-query-performance-with-indexes-and-projections/ - Use the Increment
	// Operator to Perform Operations Server-Side
	scan.Revision += 1

	// Store the last time that the object was modified
	scan.LastModifiedAt = time.Now().UTC()

	// Upsert try to update the collection entry if exists, if not, it creates a new entry. For all
	// the scan objects we are going to use the collection "scan". We also avoid concurency adding the
	// revision as a paremeter for updating the entry
	_, err := dao.Database.C(scanDAOCollection).Upsert(bson.M{
		"_id":      scan.Id,
		"revision": scan.Revision - 1,
	}, scan)

	return err
}
Example #3
0
// Run creates a new transaction with ops and runs it immediately.
// The id parameter specifies the transaction id, and may be written
// down ahead of time to later verify the success of the change and
// resume it, when the procedure is interrupted for any reason. If
// empty, a random id will be generated.
// The info parameter, if not nil, is included under the "i"
// field of the transaction document.
//
// Operations across documents are not atomically applied, but are
// guaranteed to be eventually all applied in the order provided or
// all aborted, as long as the affected documents are only modified
// through transactions. If documents are simultaneously modified
// by transactions and out of transactions the behavior is undefined.
//
// If Run returns no errors, all operations were applied successfully.
// If it returns ErrAborted, one or more operations can't be applied
// and the transaction was entirely aborted with no changes performed.
// Otherwise, if the transaction is interrupted while running for any
// reason, it may be resumed explicitly or by attempting to apply
// another transaction on any of the documents targeted by ops, as
// long as the interruption was made after the transaction document
// itself was inserted. Run Resume with the obtained transaction id
// to confirm whether the transaction was applied or not.
//
// Any number of transactions may be run concurrently, with one
// runner or many.
func (r *Runner) Run(ops []Op, id bson.ObjectId, info interface{}) (err error) {
	const efmt = "error in transaction op %d: %s"
	for i := range ops {
		op := &ops[i]
		if op.C == "" || op.Id == nil {
			return fmt.Errorf(efmt, i, "C or Id missing")
		}
		changes := 0
		if op.Insert != nil {
			changes++
		}
		if op.Update != nil {
			changes++
		}
		if op.Remove {
			changes++
		}
		if changes > 1 {
			return fmt.Errorf(efmt, i, "more than one of Insert/Update/Remove set")
		}
		if changes == 0 && op.Assert == nil {
			return fmt.Errorf(efmt, i, "none of Assert/Insert/Update/Remove set")
		}
	}
	if id == "" {
		id = bson.NewObjectId()
	}

	// Insert transaction sooner rather than later, to stay on the safer side.
	t := transaction{
		Id:    id,
		Ops:   ops,
		State: tpreparing,
		Info:  info,
	}
	if err = r.tc.Insert(&t); err != nil {
		return err
	}
	if err = flush(r, &t); err != nil {
		return err
	}
	if t.State == taborted {
		return ErrAborted
	} else if t.State != tapplied {
		panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
	}
	return nil
}
Example #4
0
func (file *GridFile) insertChunk(data []byte) {
	n := file.chunk
	file.chunk++
	debugf("GridFile %p: adding to checksum: %q", file, string(data))
	file.wsum.Write(data)

	for file.doc.ChunkSize*file.wpending >= 1024*1024 {
		// Hold on.. we got a MB pending.
		file.c.Wait()
		if file.err != nil {
			return
		}
	}

	file.wpending++

	debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))

	// We may not own the memory of data, so rather than
	// simply copying it, we'll marshal the document ahead of time.
	data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
	if err != nil {
		file.err = err
		return
	}

	go func() {
		err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
		file.m.Lock()
		file.wpending--
		if err != nil && file.err == nil {
			file.err = err
		}
		file.c.Broadcast()
		file.m.Unlock()
	}()
}