Exemple #1
0
func (s *S) TestNewObjectId(c *C) {
	// Generate 10 ids
	ids := make([]bson.ObjectId, 10)
	for i := 0; i < 10; i++ {
		ids[i] = bson.NewObjectId()
	}
	for i := 1; i < 10; i++ {
		prevId := ids[i-1]
		id := ids[i]
		// Test for uniqueness among all other 9 generated ids
		for j, tid := range ids {
			if j != i {
				c.Assert(id, Not(Equals), tid, Commentf("Generated ObjectId is not unique"))
			}
		}
		// Check that timestamp was incremented and is within 30 seconds of the previous one
		secs := id.Time().Sub(prevId.Time()).Seconds()
		c.Assert((secs >= 0 && secs <= 30), Equals, true, Commentf("Wrong timestamp in generated ObjectId"))
		// Check that machine ids are the same
		c.Assert(id.Machine(), DeepEquals, prevId.Machine())
		// Check that pids are the same
		c.Assert(id.Pid(), Equals, prevId.Pid())
		// Test for proper increment
		delta := int(id.Counter() - prevId.Counter())
		c.Assert(delta, Equals, 1, Commentf("Wrong increment in generated ObjectId"))
	}
}
Exemple #2
0
// Create creates a new file with the provided name in the GridFS.  If the file
// name already exists, a new version will be inserted with an up-to-date
// uploadDate that will cause it to be atomically visible to the Open and
// OpenId methods.  If the file name is not important, an empty name may be
// provided and the file Id used instead.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// A simple example inserting a new file:
//
//     func check(err os.Error) {
//         if err != nil {
//             panic(err.String())
//         }
//     }
//     file, err := db.GridFS("fs").Create("myfile.txt")
//     check(err)
//     n, err := file.Write([]byte("Hello world!")
//     check(err)
//     err = file.Close()
//     check(err)
//     fmt.Printf("%d bytes written\n", n)
//
// The io.Writer interface is implemented by *GridFile and may be used to
// help on the file creation.  For example:
//
//     file, err := db.GridFS("fs").Create("myfile.txt")
//     check(err)
//     messages, err := os.Open("/var/log/messages")
//     check(err)
//     defer messages.Close()
//     err = io.Copy(file, messages)
//     check(err)
//     err = file.Close()
//     check(err)
//
func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
	file = gfs.newFile()
	file.mode = gfsWriting
	file.wsum = md5.New()
	file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 256 * 1024, Filename: name}
	return
}
Exemple #3
0
Fichier : txn.go Projet : gs412/mgo
// Run creates a new transaction with ops and runs it immediately.
// The id parameter specifies the transaction id, and may be written
// down ahead of time to later verify the success of the change and
// resume it, when the procedure is interrupted for any reason. If
// empty, a random id will be generated.
// The info parameter, if not nil, is included under the "i"
// field of the transaction document.
//
// Operations across documents are not atomically applied, but are
// guaranteed to be eventually all applied or all aborted, as long as
// the affected documents are only modified through transactions.
// If documents are simultaneously modified by transactions and out
// of transactions the behavior is undefined.
//
// If Run returns no errors, all operations were applied successfully.
// If it returns ErrAborted, one or more operations can't be applied
// and the transaction was entirely aborted with no changes performed.
// Otherwise, if the transaction is interrupted while running for any
// reason, it may be resumed explicitly or by attempting to apply
// another transaction on any of the documents targeted by ops, as
// long as the interruption was made after the transaction document
// itself was inserted. Run Resume with the obtained transaction id
// to confirm whether the transaction was applied or not.
//
// Any number of transactions may be run concurrently, with one
// runner or many.
func (r *Runner) Run(ops []Op, id bson.ObjectId, info interface{}) (err error) {
	const efmt = "error in transaction op %d: %s"
	for i := range ops {
		op := &ops[i]
		if op.C == "" || op.Id == nil {
			return fmt.Errorf(efmt, i, "C or Id missing")
		}
		changes := 0
		if op.Insert != nil {
			changes++
		}
		if op.Update != nil {
			changes++
		}
		if op.Remove {
			changes++
		}
		if changes > 1 {
			return fmt.Errorf(efmt, i, "more than one of Insert/Update/Remove set")
		}
		if changes == 0 && op.Assert == nil {
			return fmt.Errorf(efmt, i, "none of Assert/Insert/Update/Remove set")
		}
	}
	if id == "" {
		id = bson.NewObjectId()
	}

	// Insert transaction sooner rather than later, to stay on the safer side.
	t := transaction{
		Id:    id,
		Ops:   ops,
		State: tpreparing,
		Info:  info,
	}
	if err = r.tc.Insert(&t); err != nil {
		return err
	}
	if err = flush(r, &t); err != nil {
		return err
	}
	if t.State == taborted {
		return ErrAborted
	} else if t.State != tapplied {
		panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
	}
	return nil
}
Exemple #4
0
func (s *S) TestInfo(c *C) {
	ops := []txn.Op{{
		C:      "accounts",
		Id:     0,
		Assert: txn.DocMissing,
	}}

	id := bson.NewObjectId()
	err := s.runner.Run(ops, id, M{"n": 42})
	c.Assert(err, IsNil)

	var t struct{ I struct{ N int } }
	err = s.tc.FindId(id).One(&t)
	c.Assert(err, IsNil)
	c.Assert(t.I.N, Equals, 42)
}
Exemple #5
0
func (s *S) TestQueueStashing(c *C) {
	txn.SetChaos(txn.Chaos{
		KillChance: 1,
		Breakpoint: "set-applying",
	})

	opses := [][]txn.Op{{{
		C:      "accounts",
		Id:     0,
		Insert: M{"balance": 100},
	}}, {{
		C:      "accounts",
		Id:     0,
		Remove: true,
	}}, {{
		C:      "accounts",
		Id:     0,
		Insert: M{"balance": 200},
	}}, {{
		C:      "accounts",
		Id:     0,
		Update: M{"$inc": M{"balance": 100}},
	}}}

	var last bson.ObjectId
	for _, ops := range opses {
		last = bson.NewObjectId()
		err := s.runner.Run(ops, last, nil)
		c.Assert(err, Equals, txn.ErrChaos)
	}

	txn.SetChaos(txn.Chaos{})
	err := s.runner.Resume(last)
	c.Assert(err, IsNil)

	var account Account
	err = s.accounts.FindId(0).One(&account)
	c.Assert(err, IsNil)
	c.Assert(account.Balance, Equals, 300)
}
Exemple #6
0
func (file *GridFile) insertChunk(data []byte) {
	n := file.chunk
	file.chunk++
	debugf("GridFile %p: adding to checksum: %q", file, string(data))
	file.wsum.Write(data)

	for file.doc.ChunkSize*file.wpending >= 1024*1024 {
		// Hold on.. we got a MB pending.
		file.c.Wait()
		if file.err != nil {
			return
		}
	}

	file.wpending++

	debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))

	// We may not own the memory of data, so rather than
	// simply copying it, we'll marshal the document ahead of time.
	data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
	if err != nil {
		file.err = err
		return
	}

	go func() {
		err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
		file.m.Lock()
		file.wpending--
		if err != nil && file.err == nil {
			file.err = err
		}
		file.c.Broadcast()
		file.m.Unlock()
	}()
}
Exemple #7
0
func (s *S) TestChangeLog(c *C) {
	chglog := s.db.C("chglog")
	s.runner.ChangeLog(chglog)

	ops := []txn.Op{{
		C:      "debts",
		Id:     0,
		Assert: txn.DocMissing,
	}, {
		C:      "accounts",
		Id:     0,
		Insert: M{"balance": 300},
	}, {
		C:      "accounts",
		Id:     1,
		Insert: M{"balance": 300},
	}, {
		C:      "people",
		Id:     "joe",
		Insert: M{"accounts": []int64{0, 1}},
	}}
	id := bson.NewObjectId()
	err := s.runner.Run(ops, id, nil)
	c.Assert(err, IsNil)

	type IdList []interface{}
	type Log struct {
		Docs   IdList  "d"
		Revnos []int64 "r"
	}
	var m map[string]*Log
	err = chglog.FindId(id).One(&m)
	c.Assert(err, IsNil)

	c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{2, 2}})
	c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{2}})
	c.Assert(m["debts"], IsNil)

	ops = []txn.Op{{
		C:      "accounts",
		Id:     0,
		Update: M{"$inc": M{"balance": 100}},
	}, {
		C:      "accounts",
		Id:     1,
		Update: M{"$inc": M{"balance": 100}},
	}}
	id = bson.NewObjectId()
	err = s.runner.Run(ops, id, nil)
	c.Assert(err, IsNil)

	m = nil
	err = chglog.FindId(id).One(&m)
	c.Assert(err, IsNil)

	c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{3, 3}})
	c.Assert(m["people"], IsNil)

	ops = []txn.Op{{
		C:      "accounts",
		Id:     0,
		Remove: true,
	}, {
		C:      "people",
		Id:     "joe",
		Remove: true,
	}}
	id = bson.NewObjectId()
	err = s.runner.Run(ops, id, nil)
	c.Assert(err, IsNil)

	m = nil
	err = chglog.FindId(id).One(&m)
	c.Assert(err, IsNil)

	c.Assert(m["accounts"], DeepEquals, &Log{IdList{0}, []int64{-4}})
	c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{-3}})
}
Exemple #8
0
func simulate(c *C, params params) {
	seed := *seed
	if seed == 0 {
		seed = time.Now().UnixNano()
	}
	rand.Seed(seed)
	c.Logf("Seed: %v", seed)

	txn.SetChaos(txn.Chaos{
		KillChance:     params.killChance,
		SlowdownChance: params.slowdownChance,
		Slowdown:       params.slowdown,
	})
	defer txn.SetChaos(txn.Chaos{})

	session, err := mgo.Dial(mgoaddr)
	c.Assert(err, IsNil)
	defer session.Close()

	db := session.DB("test")
	tc := db.C("tc")

	runner := txn.NewRunner(tc)

	tclog := db.C("tc.log")
	if params.changelog {
		info := mgo.CollectionInfo{
			Capped:   true,
			MaxBytes: 1000000,
		}
		err := tclog.Create(&info)
		c.Assert(err, IsNil)
		runner.ChangeLog(tclog)
	}

	accounts := db.C("accounts")
	for i := 0; i < params.accounts; i++ {
		err := accounts.Insert(M{"_id": i, "balance": 300})
		c.Assert(err, IsNil)
	}
	var stop time.Time
	if params.changes <= 0 {
		stop = time.Now().Add(*duration)
	}

	max := params.accounts
	if params.reinsertCopy || params.reinsertZeroed {
		max = int(float64(params.accounts) * 1.5)
	}

	changes := make(chan balanceChange, 1024)

	//session.SetMode(mgo.Eventual, true)
	for i := 0; i < params.workers; i++ {
		go func() {
			n := 0
			for {
				if n > 0 && n == params.changes {
					break
				}
				if !stop.IsZero() && time.Now().After(stop) {
					break
				}

				change := balanceChange{
					id:     bson.NewObjectId(),
					origin: rand.Intn(max),
					target: rand.Intn(max),
					amount: 100,
				}

				var old Account
				var oldExists bool
				if params.reinsertCopy || params.reinsertZeroed {
					if err := accounts.FindId(change.origin).One(&old); err != mgo.ErrNotFound {
						c.Check(err, IsNil)
						change.amount = old.Balance
						oldExists = true
					}
				}

				var ops []txn.Op
				switch {
				case params.reinsertCopy && oldExists:
					ops = []txn.Op{{
						C:      "accounts",
						Id:     change.origin,
						Assert: M{"balance": change.amount},
						Remove: true,
					}, {
						C:      "accounts",
						Id:     change.target,
						Assert: txn.DocMissing,
						Insert: M{"balance": change.amount},
					}}
				case params.reinsertZeroed && oldExists:
					ops = []txn.Op{{
						C:      "accounts",
						Id:     change.target,
						Assert: txn.DocMissing,
						Insert: M{"balance": 0},
					}, {
						C:      "accounts",
						Id:     change.origin,
						Assert: M{"balance": change.amount},
						Remove: true,
					}, {
						C:      "accounts",
						Id:     change.target,
						Assert: txn.DocExists,
						Update: M{"$inc": M{"balance": change.amount}},
					}}
				case params.changeHalf:
					ops = []txn.Op{{
						C:      "accounts",
						Id:     change.origin,
						Assert: M{"balance": M{"$gte": change.amount}},
						Update: M{"$inc": M{"balance": -change.amount / 2}},
					}, {
						C:      "accounts",
						Id:     change.target,
						Assert: txn.DocExists,
						Update: M{"$inc": M{"balance": change.amount / 2}},
					}, {
						C:      "accounts",
						Id:     change.origin,
						Update: M{"$inc": M{"balance": -change.amount / 2}},
					}, {
						C:      "accounts",
						Id:     change.target,
						Update: M{"$inc": M{"balance": change.amount / 2}},
					}}
				default:
					ops = []txn.Op{{
						C:      "accounts",
						Id:     change.origin,
						Assert: M{"balance": M{"$gte": change.amount}},
						Update: M{"$inc": M{"balance": -change.amount}},
					}, {
						C:      "accounts",
						Id:     change.target,
						Assert: txn.DocExists,
						Update: M{"$inc": M{"balance": change.amount}},
					}}
				}

				err = runner.Run(ops, change.id, nil)
				if err != nil && err != txn.ErrAborted && err != txn.ErrChaos {
					c.Check(err, IsNil)
				}
				n++
				changes <- change
			}
			changes <- balanceChange{}
		}()
	}

	alive := params.workers
	changeLog := make([]balanceChange, 0, 1024)
	for alive > 0 {
		change := <-changes
		if change.id == "" {
			alive--
		} else {
			changeLog = append(changeLog, change)
		}
	}
	c.Check(len(changeLog), Not(Equals), 0, Commentf("No operations were even attempted."))

	txn.SetChaos(txn.Chaos{})
	err = runner.ResumeAll()
	c.Assert(err, IsNil)

	n, err := accounts.Count()
	c.Check(err, IsNil)
	c.Check(n, Equals, params.accounts, Commentf("Number of accounts has changed."))

	n, err = accounts.Find(M{"balance": M{"$lt": 0}}).Count()
	c.Check(err, IsNil)
	c.Check(n, Equals, 0, Commentf("There are %d accounts with negative balance.", n))

	globalBalance := 0
	iter := accounts.Find(nil).Iter()
	account := Account{}
	for iter.Next(&account) {
		globalBalance += account.Balance
	}
	c.Check(iter.Err(), IsNil)
	c.Check(globalBalance, Equals, params.accounts*300, Commentf("Total amount of money should be constant."))

	// Compute and verify the exact final state of all accounts.
	balance := make(map[int]int)
	for i := 0; i < params.accounts; i++ {
		balance[i] += 300
	}
	var applied, aborted int
	for _, change := range changeLog {
		err := runner.Resume(change.id)
		if err == txn.ErrAborted {
			aborted++
			continue
		} else if err != nil {
			c.Fatalf("resuming %s failed: %v", change.id, err)
		}
		balance[change.origin] -= change.amount
		balance[change.target] += change.amount
		applied++
	}
	iter = accounts.Find(nil).Iter()
	for iter.Next(&account) {
		c.Assert(account.Balance, Equals, balance[account.Id])
	}
	c.Check(iter.Err(), IsNil)
	c.Logf("Total transactions: %d (%d applied, %d aborted)", len(changeLog), applied, aborted)

	if params.changelog {
		n, err := tclog.Count()
		c.Assert(err, IsNil)
		// Check if the capped collection is full.
		dummy := make([]byte, 1024)
		tclog.Insert(M{"_id": bson.NewObjectId(), "dummy": dummy})
		m, err := tclog.Count()
		c.Assert(err, IsNil)
		if m == n+1 {
			// Wasn't full, so it must have seen it all.
			c.Assert(err, IsNil)
			c.Assert(n, Equals, applied)
		}
	}
}