func (s *S) TestNewObjectId(c *C) { // Generate 10 ids ids := make([]bson.ObjectId, 10) for i := 0; i < 10; i++ { ids[i] = bson.NewObjectId() } for i := 1; i < 10; i++ { prevId := ids[i-1] id := ids[i] // Test for uniqueness among all other 9 generated ids for j, tid := range ids { if j != i { c.Assert(id, Not(Equals), tid, Commentf("Generated ObjectId is not unique")) } } // Check that timestamp was incremented and is within 30 seconds of the previous one secs := id.Time().Sub(prevId.Time()).Seconds() c.Assert((secs >= 0 && secs <= 30), Equals, true, Commentf("Wrong timestamp in generated ObjectId")) // Check that machine ids are the same c.Assert(id.Machine(), DeepEquals, prevId.Machine()) // Check that pids are the same c.Assert(id.Pid(), Equals, prevId.Pid()) // Test for proper increment delta := int(id.Counter() - prevId.Counter()) c.Assert(delta, Equals, 1, Commentf("Wrong increment in generated ObjectId")) } }
// Create creates a new file with the provided name in the GridFS. If the file // name already exists, a new version will be inserted with an up-to-date // uploadDate that will cause it to be atomically visible to the Open and // OpenId methods. If the file name is not important, an empty name may be // provided and the file Id used instead. // // It's important to Close files whether they are being written to // or read from, and to check the err result to ensure the operation // completed successfully. // // A simple example inserting a new file: // // func check(err error) { // if err != nil { // panic(err.String()) // } // } // file, err := db.GridFS("fs").Create("myfile.txt") // check(err) // n, err := file.Write([]byte("Hello world!")) // check(err) // err = file.Close() // check(err) // fmt.Printf("%d bytes written\n", n) // // The io.Writer interface is implemented by *GridFile and may be used to // help on the file creation. For example: // // file, err := db.GridFS("fs").Create("myfile.txt") // check(err) // messages, err := os.Open("/var/log/messages") // check(err) // defer messages.Close() // err = io.Copy(file, messages) // check(err) // err = file.Close() // check(err) // func (gfs *GridFS) Create(name string) (file *GridFile, err error) { file = gfs.newFile() file.mode = gfsWriting file.wsum = md5.New() file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name} return }
func (s *S) TestInfo(c *C) { ops := []txn.Op{{ C: "accounts", Id: 0, Assert: txn.DocMissing, }} id := bson.NewObjectId() err := s.runner.Run(ops, id, M{"n": 42}) c.Assert(err, IsNil) var t struct{ I struct{ N int } } err = s.tc.FindId(id).One(&t) c.Assert(err, IsNil) c.Assert(t.I.N, Equals, 42) }
func (s *S) TestDirect(c *C) { session, err := mgo.Dial("localhost:40012?connect=direct") c.Assert(err, IsNil) defer session.Close() // We know that server is a slave. session.SetMode(mgo.Monotonic, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) stats := mgo.GetStats() c.Assert(stats.SocketsAlive, Equals, 1) c.Assert(stats.SocketsInUse, Equals, 1) c.Assert(stats.SocketRefs, Equals, 1) // We've got no master, so it'll timeout. session.SetSyncTimeout(5e8 * time.Nanosecond) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"test": 1}) c.Assert(err, ErrorMatches, "no reachable servers") // Writing to the local database is okay. coll = session.DB("local").C("mycoll") defer coll.RemoveAll(nil) id := bson.NewObjectId() err = coll.Insert(M{"_id": id}) c.Assert(err, IsNil) // Data was stored in the right server. n, err := coll.Find(M{"_id": id}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) // Server hasn't changed. result.Host = "" err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) }
func (s *S) TestQueueStashing(c *C) { txn.SetChaos(txn.Chaos{ KillChance: 1, Breakpoint: "set-applying", }) opses := [][]txn.Op{{{ C: "accounts", Id: 0, Insert: M{"balance": 100}, }}, {{ C: "accounts", Id: 0, Remove: true, }}, {{ C: "accounts", Id: 0, Insert: M{"balance": 200}, }}, {{ C: "accounts", Id: 0, Update: M{"$inc": M{"balance": 100}}, }}} var last bson.ObjectId for _, ops := range opses { last = bson.NewObjectId() err := s.runner.Run(ops, last, nil) c.Assert(err, Equals, txn.ErrChaos) } txn.SetChaos(txn.Chaos{}) err := s.runner.Resume(last) c.Assert(err, IsNil) var account Account err = s.accounts.FindId(0).One(&account) c.Assert(err, IsNil) c.Assert(account.Balance, Equals, 300) }
func (file *GridFile) insertChunk(data []byte) { n := file.chunk file.chunk++ debugf("GridFile %p: adding to checksum: %q", file, string(data)) file.wsum.Write(data) for file.doc.ChunkSize*file.wpending >= 1024*1024 { // Hold on.. we got a MB pending. file.c.Wait() if file.err != nil { return } } file.wpending++ debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data)) // We may not own the memory of data, so rather than // simply copying it, we'll marshal the document ahead of time. data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data}) if err != nil { file.err = err return } go func() { err := file.gfs.Chunks.Insert(bson.Raw{Data: data}) file.m.Lock() file.wpending-- if err != nil && file.err == nil { file.err = err } file.c.Broadcast() file.m.Unlock() }() }
func (s *S) TestPurgeMissingPipelineSizeLimit(c *C) { // This test ensures that PurgeMissing can handle very large // txn-queue fields. Previous iterations of PurgeMissing would // trigger a 16MB aggregation pipeline result size limit when run // against a documents or stashes with large numbers of txn-queue // entries. PurgeMissing now no longer uses aggregation pipelines // to work around this limit. // The pipeline result size limitation was removed from MongoDB in 2.6 so // this test is only run for older MongoDB version. build, err := s.session.BuildInfo() c.Assert(err, IsNil) if build.VersionAtLeast(2, 6) { c.Skip("This tests a problem that can only happen with MongoDB < 2.6 ") } // Insert a single document to work with. err = s.accounts.Insert(M{"_id": 0, "balance": 100}) c.Assert(err, IsNil) ops := []txn.Op{{ C: "accounts", Id: 0, Update: M{"$inc": M{"balance": 100}}, }} // Generate one successful transaction. good := bson.NewObjectId() c.Logf("---- Running ops under transaction %q", good.Hex()) err = s.runner.Run(ops, good, nil) c.Assert(err, IsNil) // Generate another transaction which which will go missing. missing := bson.NewObjectId() c.Logf("---- Running ops under transaction %q (which will go missing)", missing.Hex()) err = s.runner.Run(ops, missing, nil) c.Assert(err, IsNil) err = s.tc.RemoveId(missing) c.Assert(err, IsNil) // Generate a txn-queue on the test document that's large enough // that it used to cause PurgeMissing to exceed MongoDB's pipeline // result 16MB size limit (MongoDB 2.4 and older only). // // The contents of the txn-queue field doesn't matter, only that // it's big enough to trigger the size limit. The required size // can also be achieved by using multiple documents as long as the // cumulative size of all the txn-queue fields exceeds the // pipeline limit. A single document is easier to work with for // this test however. // // The txn id of the successful transaction is used fill the // txn-queue because this takes advantage of a short circuit in // PurgeMissing, dramatically speeding up the test run time. const fakeQueueLen = 250000 fakeTxnQueue := make([]string, fakeQueueLen) token := good.Hex() + "_12345678" // txn id + nonce for i := 0; i < fakeQueueLen; i++ { fakeTxnQueue[i] = token } err = s.accounts.UpdateId(0, bson.M{ "$set": bson.M{"txn-queue": fakeTxnQueue}, }) c.Assert(err, IsNil) // PurgeMissing could hit the same pipeline result size limit when // processing the txn-queue fields of stash documents so insert // the large txn-queue there too to ensure that no longer happens. err = s.sc.Insert( bson.D{{"c", "accounts"}, {"id", 0}}, bson.M{"txn-queue": fakeTxnQueue}, ) c.Assert(err, IsNil) c.Logf("---- Purging missing transactions") err = s.runner.PurgeMissing("accounts") c.Assert(err, IsNil) }
func (s *S) TestPurgeMissing(c *C) { txn.SetChaos(txn.Chaos{ KillChance: 1, Breakpoint: "set-applying", }) err := s.accounts.Insert(M{"_id": 0, "balance": 100}) c.Assert(err, IsNil) err = s.accounts.Insert(M{"_id": 1, "balance": 100}) c.Assert(err, IsNil) ops1 := []txn.Op{{ C: "accounts", Id: 3, Insert: M{"balance": 100}, }} ops2 := []txn.Op{{ C: "accounts", Id: 0, Remove: true, }, { C: "accounts", Id: 1, Update: M{"$inc": M{"balance": 100}}, }, { C: "accounts", Id: 2, Insert: M{"balance": 100}, }} first := bson.NewObjectId() c.Logf("---- Running ops1 under transaction %q, to be canceled by chaos", first.Hex()) err = s.runner.Run(ops1, first, nil) c.Assert(err, Equals, txn.ErrChaos) last := bson.NewObjectId() c.Logf("---- Running ops2 under transaction %q, to be canceled by chaos", last.Hex()) err = s.runner.Run(ops2, last, nil) c.Assert(err, Equals, txn.ErrChaos) c.Logf("---- Removing transaction %q", last.Hex()) err = s.tc.RemoveId(last) c.Assert(err, IsNil) c.Logf("---- Disabling chaos and attempting to resume all") txn.SetChaos(txn.Chaos{}) err = s.runner.ResumeAll() c.Assert(err, IsNil) again := bson.NewObjectId() c.Logf("---- Running ops2 again under transaction %q, to fail for missing transaction", again.Hex()) err = s.runner.Run(ops2, again, nil) c.Assert(err, ErrorMatches, "cannot find transaction .*") c.Logf("---- Purging missing transactions") err = s.runner.PurgeMissing("accounts") c.Assert(err, IsNil) c.Logf("---- Resuming pending transactions") err = s.runner.ResumeAll() c.Assert(err, IsNil) expect := []struct{ Id, Balance int }{ {0, -1}, {1, 200}, {2, 100}, {3, 100}, } var got Account for _, want := range expect { err = s.accounts.FindId(want.Id).One(&got) if want.Balance == -1 { if err != mgo.ErrNotFound { c.Errorf("Account %d should not exist, find got err=%#v", err) } } else if err != nil { c.Errorf("Account %d should have balance of %d, but wasn't found", want.Id, want.Balance) } else if got.Balance != want.Balance { c.Errorf("Account %d should have balance of %d, got %d", want.Id, want.Balance, got.Balance) } } }
func (s *S) TestChangeLog(c *C) { chglog := s.db.C("chglog") s.runner.ChangeLog(chglog) ops := []txn.Op{{ C: "debts", Id: 0, Assert: txn.DocMissing, }, { C: "accounts", Id: 0, Insert: M{"balance": 300}, }, { C: "accounts", Id: 1, Insert: M{"balance": 300}, }, { C: "people", Id: "joe", Insert: M{"accounts": []int64{0, 1}}, }} id := bson.NewObjectId() err := s.runner.Run(ops, id, nil) c.Assert(err, IsNil) type IdList []interface{} type Log struct { Docs IdList "d" Revnos []int64 "r" } var m map[string]*Log err = chglog.FindId(id).One(&m) c.Assert(err, IsNil) c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{2, 2}}) c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{2}}) c.Assert(m["debts"], IsNil) ops = []txn.Op{{ C: "accounts", Id: 0, Update: M{"$inc": M{"balance": 100}}, }, { C: "accounts", Id: 1, Update: M{"$inc": M{"balance": 100}}, }} id = bson.NewObjectId() err = s.runner.Run(ops, id, nil) c.Assert(err, IsNil) m = nil err = chglog.FindId(id).One(&m) c.Assert(err, IsNil) c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{3, 3}}) c.Assert(m["people"], IsNil) ops = []txn.Op{{ C: "accounts", Id: 0, Remove: true, }, { C: "people", Id: "joe", Remove: true, }} id = bson.NewObjectId() err = s.runner.Run(ops, id, nil) c.Assert(err, IsNil) m = nil err = chglog.FindId(id).One(&m) c.Assert(err, IsNil) c.Assert(m["accounts"], DeepEquals, &Log{IdList{0}, []int64{-4}}) c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{-3}}) }
func simulate(c *C, params params) { seed := *seed if seed == 0 { seed = time.Now().UnixNano() } rand.Seed(seed) c.Logf("Seed: %v", seed) txn.SetChaos(txn.Chaos{ KillChance: params.killChance, SlowdownChance: params.slowdownChance, Slowdown: params.slowdown, }) defer txn.SetChaos(txn.Chaos{}) session, err := mgo.Dial(mgoaddr) c.Assert(err, IsNil) defer session.Close() db := session.DB("test") tc := db.C("tc") runner := txn.NewRunner(tc) tclog := db.C("tc.log") if params.changelog { info := mgo.CollectionInfo{ Capped: true, MaxBytes: 1000000, } err := tclog.Create(&info) c.Assert(err, IsNil) runner.ChangeLog(tclog) } accounts := db.C("accounts") for i := 0; i < params.accounts; i++ { err := accounts.Insert(M{"_id": i, "balance": 300}) c.Assert(err, IsNil) } var stop time.Time if params.changes <= 0 { stop = time.Now().Add(*duration) } max := params.accounts if params.reinsertCopy || params.reinsertZeroed { max = int(float64(params.accounts) * 1.5) } changes := make(chan balanceChange, 1024) //session.SetMode(mgo.Eventual, true) for i := 0; i < params.workers; i++ { go func() { n := 0 for { if n > 0 && n == params.changes { break } if !stop.IsZero() && time.Now().After(stop) { break } change := balanceChange{ id: bson.NewObjectId(), origin: rand.Intn(max), target: rand.Intn(max), amount: 100, } var old Account var oldExists bool if params.reinsertCopy || params.reinsertZeroed { if err := accounts.FindId(change.origin).One(&old); err != mgo.ErrNotFound { c.Check(err, IsNil) change.amount = old.Balance oldExists = true } } var ops []txn.Op switch { case params.reinsertCopy && oldExists: ops = []txn.Op{{ C: "accounts", Id: change.origin, Assert: M{"balance": change.amount}, Remove: true, }, { C: "accounts", Id: change.target, Assert: txn.DocMissing, Insert: M{"balance": change.amount}, }} case params.reinsertZeroed && oldExists: ops = []txn.Op{{ C: "accounts", Id: change.target, Assert: txn.DocMissing, Insert: M{"balance": 0}, }, { C: "accounts", Id: change.origin, Assert: M{"balance": change.amount}, Remove: true, }, { C: "accounts", Id: change.target, Assert: txn.DocExists, Update: M{"$inc": M{"balance": change.amount}}, }} case params.changeHalf: ops = []txn.Op{{ C: "accounts", Id: change.origin, Assert: M{"balance": M{"$gte": change.amount}}, Update: M{"$inc": M{"balance": -change.amount / 2}}, }, { C: "accounts", Id: change.target, Assert: txn.DocExists, Update: M{"$inc": M{"balance": change.amount / 2}}, }, { C: "accounts", Id: change.origin, Update: M{"$inc": M{"balance": -change.amount / 2}}, }, { C: "accounts", Id: change.target, Update: M{"$inc": M{"balance": change.amount / 2}}, }} default: ops = []txn.Op{{ C: "accounts", Id: change.origin, Assert: M{"balance": M{"$gte": change.amount}}, Update: M{"$inc": M{"balance": -change.amount}}, }, { C: "accounts", Id: change.target, Assert: txn.DocExists, Update: M{"$inc": M{"balance": change.amount}}, }} } err = runner.Run(ops, change.id, nil) if err != nil && err != txn.ErrAborted && err != txn.ErrChaos { c.Check(err, IsNil) } n++ changes <- change } changes <- balanceChange{} }() } alive := params.workers changeLog := make([]balanceChange, 0, 1024) for alive > 0 { change := <-changes if change.id == "" { alive-- } else { changeLog = append(changeLog, change) } } c.Check(len(changeLog), Not(Equals), 0, Commentf("No operations were even attempted.")) txn.SetChaos(txn.Chaos{}) err = runner.ResumeAll() c.Assert(err, IsNil) n, err := accounts.Count() c.Check(err, IsNil) c.Check(n, Equals, params.accounts, Commentf("Number of accounts has changed.")) n, err = accounts.Find(M{"balance": M{"$lt": 0}}).Count() c.Check(err, IsNil) c.Check(n, Equals, 0, Commentf("There are %d accounts with negative balance.", n)) globalBalance := 0 iter := accounts.Find(nil).Iter() account := Account{} for iter.Next(&account) { globalBalance += account.Balance } c.Check(iter.Close(), IsNil) c.Check(globalBalance, Equals, params.accounts*300, Commentf("Total amount of money should be constant.")) // Compute and verify the exact final state of all accounts. balance := make(map[int]int) for i := 0; i < params.accounts; i++ { balance[i] += 300 } var applied, aborted int for _, change := range changeLog { err := runner.Resume(change.id) if err == txn.ErrAborted { aborted++ continue } else if err != nil { c.Fatalf("resuming %s failed: %v", change.id, err) } balance[change.origin] -= change.amount balance[change.target] += change.amount applied++ } iter = accounts.Find(nil).Iter() for iter.Next(&account) { c.Assert(account.Balance, Equals, balance[account.Id]) } c.Check(iter.Close(), IsNil) c.Logf("Total transactions: %d (%d applied, %d aborted)", len(changeLog), applied, aborted) if params.changelog { n, err := tclog.Count() c.Assert(err, IsNil) // Check if the capped collection is full. dummy := make([]byte, 1024) tclog.Insert(M{"_id": bson.NewObjectId(), "dummy": dummy}) m, err := tclog.Count() c.Assert(err, IsNil) if m == n+1 { // Wasn't full, so it must have seen it all. c.Assert(err, IsNil) c.Assert(n, Equals, applied) } } }