func (s *S) TestSafeInsert(c *C) { session, err := mgo.Mongo("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") // Insert an element with a predefined key. err = coll.Insert(M{"_id": 1}) c.Assert(err, IsNil) mgo.ResetStats() // Session should be safe by default, so inserting it again must fail. err = coll.Insert(M{"_id": 1}) c.Assert(err, Matches, "E11000 duplicate.*") c.Assert(err.(*mgo.LastError).Code, Equals, 11000) // It must have sent two operations (INSERT_OP + getLastError QUERY_OP) stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 2) mgo.ResetStats() // If we disable safety, though, it won't complain. session.SetSafe(nil) err = coll.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Must have sent a single operation this time (just the INSERT_OP) stats = mgo.GetStats() c.Assert(stats.SentOps, Equals, 1) }
func (s *S) TestTopologySyncWithSlaveSeed(c *C) { // That's supposed to be a slave. Must run discovery // and find out master to insert successfully. session, err := mgo.Mongo("localhost:40012") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"a": 1, "b": 2}) result := struct{ Ok bool }{} err = session.Run("getLastError", &result) c.Assert(err, IsNil) c.Assert(result.Ok, Equals, true) // One connection to each during discovery. Master // socket recycled for insert. stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 2) // Only one socket reference alive, in the master socket owned // by the above session. c.Assert(stats.SocketsInUse, Equals, 1) // Refresh it, and it must be gone. session.Refresh() stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) }
func (s *S) TestPrefetching(c *C) { session, err := mgo.Mongo("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") docs := make([]interface{}, 200) for i := 0; i != 200; i++ { docs[i] = M{"n": i} } coll.Insert(docs...) // Same test three times. Once with prefetching via query, then with the // default prefetching, and a third time tweaking the default settings in // the session. for testi := 0; testi != 3; testi++ { mgo.ResetStats() var iter *mgo.Iter var nextn int switch testi { case 0: // First, using query methods. iter, err = coll.Find(M{}).Prefetch(0.27).Batch(100).Iter() c.Assert(err, IsNil) nextn = 73 case 1: // Then, the default session value. session.SetBatch(100) iter, err = coll.Find(M{}).Iter() c.Assert(err, IsNil) nextn = 75 case 2: // Then, tweaking the session value. session.SetBatch(100) session.SetPrefetch(0.27) iter, err = coll.Find(M{}).Iter() c.Assert(err, IsNil) nextn = 73 } result := struct{ N int }{} for i := 0; i != nextn; i++ { iter.Next(&result) } stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 100) iter.Next(&result) // Ping the database just to wait for the fetch above // to get delivered. session.Run("ping", M{}) // XXX Should support nil here. stats = mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 201) // 200 + the ping result } }
func (s *S) TestNewSession(c *C) { session, err := mgo.Mongo("localhost:40001") c.Assert(err, IsNil) defer session.Close() // Do a dummy operation to wait for connection. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Tweak safety and query settings to ensure other has copied those. session.SetSafe(nil) session.SetBatch(-1) other := session.New() defer other.Close() session.SetSafe(&mgo.Safe{}) // Clone was copied while session was unsafe, so no errors. otherColl := other.DB("mydb").C("mycoll") err = otherColl.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Original session was made safe again. err = coll.Insert(M{"_id": 1}) c.Assert(err, NotNil) // With New(), each session has its own socket now. stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 2) c.Assert(stats.SocketsInUse, Equals, 2) // Ensure query parameters were cloned. err = otherColl.Insert(M{"_id": 2}) c.Assert(err, IsNil) // Ping the database to ensure the nonce has been received already. c.Assert(other.Ping(), IsNil) mgo.ResetStats() iter := otherColl.Find(M{}).Iter() c.Assert(err, IsNil) m := M{} ok := iter.Next(m) c.Assert(ok, Equals, true) err = iter.Err() c.Assert(err, IsNil) // If Batch(-1) is in effect, a single document must have been received. stats = mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 1) }
func (s *S) TestAuthLoginCachingAcrossPoolWithLogout(c *C) { // Now verify that logouts are properly flushed if they // are not revalidated after leaving the pool. session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) // Add another user to test the logout case at the same time. mydb := session.DB("mydb") err = mydb.AddUser("myuser", "mypass", true) c.Assert(err, IsNil) err = mydb.Login("myuser", "mypass") c.Assert(err, IsNil) // Just some data to query later. err = session.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) // Give socket back to pool. session.Refresh() // Brand new session, should use socket from the pool. other := session.New() defer other.Close() oldStats := mgo.GetStats() err = other.DB("mydb").Login("myuser", "mypass") c.Assert(err, IsNil) // Login was cached, so no ops. newStats := mgo.GetStats() c.Assert(newStats.SentOps, Equals, oldStats.SentOps) // Can't write, since root has been implicitly logged out // when the collection went into the pool, and not revalidated. err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, ErrorMatches, "unauthorized") // But can read due to the revalidated myuser login. result := struct{ N int }{} err = other.DB("mydb").C("mycoll").Find(nil).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 1) }
func (s *S) TestAuthLoginCachingAcrossPool(c *C) { // Logins are cached even when the conenction goes back // into the pool. session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) // Add another user to test the logout case at the same time. mydb := session.DB("mydb") err = mydb.AddUser("myuser", "mypass", false) c.Assert(err, IsNil) err = mydb.Login("myuser", "mypass") c.Assert(err, IsNil) // Logout root explicitly, to test both cases. admindb.Logout() // Give socket back to pool. session.Refresh() // Brand new session, should use socket from the pool. other := session.New() defer other.Close() oldStats := mgo.GetStats() err = other.DB("admin").Login("root", "rapadura") c.Assert(err, IsNil) err = other.DB("mydb").Login("myuser", "mypass") c.Assert(err, IsNil) // Both logins were cached, so no ops. newStats := mgo.GetStats() c.Assert(newStats.SentOps, Equals, oldStats.SentOps) // And they actually worked. err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) other.DB("admin").Logout() err = other.DB("mydb").C("mycoll").Insert(M{"n": 1}) c.Assert(err, IsNil) }
func (s *S) TestPreserveSocketCountOnSync(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Mongo("localhost:40011") c.Assert(err, IsNil) defer session.Close() stats := mgo.GetStats() for stats.MasterConns+stats.SlaveConns != 3 { stats = mgo.GetStats() c.Log("Waiting for all connections to be established...") time.Sleep(5e8) } c.Assert(stats.SocketsAlive, Equals, 3) // Kill the master (with rs1, 'a' is always the master). s.Stop("localhost:40011") // Wait for the logic to run for a bit and bring it back. go func() { time.Sleep(5e9) s.StartAll() }() // Do an action to kick the resync logic in, and also to // wait until the cluster recognizes the server is back. result := struct{ Ok bool }{} err = session.Run("getLastError", &result) c.Assert(err, IsNil) c.Assert(result.Ok, Equals, true) for i := 0; i != 20; i++ { stats = mgo.GetStats() if stats.SocketsAlive == 3 { break } c.Logf("Waiting for 3 sockets alive, have %d", stats.SocketsAlive) time.Sleep(5e8) } // Ensure the number of sockets is preserved after syncing. stats = mgo.GetStats() c.Assert(stats.SocketsAlive, Equals, 3) c.Assert(stats.SocketsInUse, Equals, 1) c.Assert(stats.SocketRefs, Equals, 1) }
func (s *S) TestSetModeMonotonicAfterStrong(c *C) { // Test that a strong session shifting to a monotonic // one preserves the socket untouched. session, err := mgo.Mongo("localhost:40012") c.Assert(err, IsNil) defer session.Close() // Insert something to force a connection to the master. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) session.SetMode(mgo.Monotonic, false) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } // Master socket should still be reserved. stats := mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 1) // Confirm it's the master even though it's Monotonic by now. result := M{} cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, true) }
func (s *S) TestDirect(c *C) { session, err := mgo.Mongo("localhost:40012?connect=direct") c.Assert(err, IsNil) defer session.Close() // We know that server is a slave. session.SetMode(mgo.Monotonic, true) result := &struct{ Host string }{} err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) stats := mgo.GetStats() c.Assert(stats.SocketsAlive, Equals, 1) c.Assert(stats.SocketsInUse, Equals, 1) c.Assert(stats.SocketRefs, Equals, 1) // We've got no master, so it'll timeout. session.SetSyncTimeout(5e8) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"test": 1}) c.Assert(err, Matches, "no reachable servers") // Slave is still reachable. result.Host = "" err = session.Run("serverStatus", result) c.Assert(err, IsNil) c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true) }
func (s *S) TestAuthLoginTwiceDoesNothing(c *C) { session, err := mgo.Dial("localhost:40002") c.Assert(err, IsNil) defer session.Close() admindb := session.DB("admin") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) oldStats := mgo.GetStats() err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) newStats := mgo.GetStats() c.Assert(newStats.SentOps, Equals, oldStats.SentOps) }
func (s *S) TestFindIterLimitWithBatch(c *C) { session, err := mgo.Mongo("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } // Ping the database to ensure the nonce has been received already. c.Assert(session.Ping(), IsNil) session.Refresh() // Release socket. mgo.ResetStats() query := coll.Find(M{"n": M{"$gte": 42}}).Sort(M{"$natural": 1}).Limit(3).Batch(2) iter, err := query.Iter() c.Assert(err, IsNil) result := struct{ N int }{} for i := 2; i < 5; i++ { err = iter.Next(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, ns[i]) if i == 3 { stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) } } err = iter.Next(&result) c.Assert(err == mgo.NotFound, Equals, true) session.Refresh() // Release socket. stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 2) // 1*QUERY_OP + 1*GET_MORE_OP c.Assert(stats.ReceivedOps, Equals, 2) // and its REPLY_OPs c.Assert(stats.ReceivedDocs, Equals, 3) c.Assert(stats.SocketsInUse, Equals, 0) }
func (s *S) TestSocketLimit(c *C) { if *fast { c.Skip("-fast") } const socketLimit = 64 restore := mgo.HackSocketsPerServer(socketLimit) defer restore() session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) defer session.Close() stats := mgo.GetStats() for stats.MasterConns+stats.SlaveConns != 3 { stats = mgo.GetStats() c.Log("Waiting for all connections to be established...") time.Sleep(5e8) } c.Assert(stats.SocketsAlive, Equals, 3) // Consume the whole limit for the master. var master []*mgo.Session for i := 0; i < socketLimit; i++ { s := session.Copy() defer s.Close() err := s.Ping() c.Assert(err, IsNil) master = append(master, s) } before := time.Now() go func() { time.Sleep(3e9) master[0].Refresh() }() // Now a single ping must block, since it would need another // connection to the master, over the limit. Once the goroutine // above releases its socket, it should move on. session.Ping() delay := time.Now().Sub(before) c.Assert(delay > 3e9, Equals, true) c.Assert(delay < 6e9, Equals, true) }
func (s *S) TestSetModeMonotonic(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Mongo("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, false) c.Assert(session.Mode(), Equals, mgo.Monotonic) result := M{} cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, false) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) result = M{} err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, true) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 2) c.Assert(stats.SocketsInUse, Equals, 1) session.SetMode(mgo.Monotonic, true) stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) }
func (s *S) TestFindForOnIter(c *C) { session, err := mgo.Mongo("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } session.Refresh() // Release socket. mgo.ResetStats() query := coll.Find(M{"n": M{"$gte": 42}}).Sort(M{"$natural": 1}).Prefetch(0).Batch(2) iter, err := query.Iter() c.Assert(err, IsNil) i := 2 var result *struct{ N int } err = iter.For(&result, func() os.Error { c.Assert(i < 7, Equals, true) c.Assert(result.N, Equals, ns[i]) if i == 1 { stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) } i++ return nil }) c.Assert(err, IsNil) session.Refresh() // Release socket. stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. c.Assert(stats.ReceivedDocs, Equals, 5) c.Assert(stats.SocketsInUse, Equals, 0) }
func (s *S) TestEnsureIndexCaching(c *C) { session, err := mgo.Mongo("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.EnsureIndexKey([]string{"a"}) c.Assert(err, IsNil) mgo.ResetStats() // Second EnsureIndex should be cached and do nothing. err = coll.EnsureIndexKey([]string{"a"}) c.Assert(err, IsNil) stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 0) // Resetting the cache should make it contact the server again. session.ResetIndexCache() err = coll.EnsureIndexKey([]string{"a"}) c.Assert(err, IsNil) stats = mgo.GetStats() c.Assert(stats.SentOps, Equals, 2) // Dropping the index should also drop the cached index key. err = coll.DropIndex([]string{"a"}) c.Assert(err, IsNil) mgo.ResetStats() err = coll.EnsureIndexKey([]string{"a"}) c.Assert(err, IsNil) stats = mgo.GetStats() c.Assert(stats.SentOps, Equals, 2) }
// Connect to the master of a deployment with a single server, // run an insert, and then ensure the insert worked and that a // single connection was established. func (s *S) TestTopologySyncWithSingleMaster(c *C) { // Use hostname here rather than IP, to make things trickier. session, err := mgo.Mongo("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1, "b": 2}) c.Assert(err, IsNil) // One connection used for discovery. Master socket recycled for // insert. Socket is reserved after insert. stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 0) c.Assert(stats.SocketsInUse, Equals, 1) // Refresh session and socket must be released. session.Refresh() stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) }
func (s *S) TestSetModeStrong(c *C) { session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, false) session.SetMode(mgo.Strong, false) c.Assert(session.Mode(), Equals, mgo.Strong) result := M{} cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, true) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } stats := mgo.GetStats() c.Assert(stats.MasterConns, Equals, 1) c.Assert(stats.SlaveConns, Equals, 2) c.Assert(stats.SocketsInUse, Equals, 1) session.SetMode(mgo.Strong, true) stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 0) }
func (s *S) TearDownTest(c *C) { if s.stopped { s.StartAll() } for i := 0; ; i++ { stats := mgo.GetStats() if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { break } if i == 20 { c.Fatal("Test left sockets in a dirty state") } c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive) time.Sleep(5e8) } }
func (s *S) TestPing(c *C) { session, err := mgo.Mongo("localhost:40001") c.Assert(err, IsNil) defer session.Close() // Just ensure the nonce has been received. result := struct{}{} err = session.Run("ping", &result) mgo.ResetStats() err = session.Ping() c.Assert(err, IsNil) // Pretty boring. stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 1) c.Assert(stats.ReceivedOps, Equals, 1) }
func (s *S) TestSetModeStrongAfterMonotonic(c *C) { // Test that shifting from Monotonic to Strong while // using a slave socket will keep the socket reserved // until the master socket is necessary, so that no // switch over occurs unless it's actually necessary. // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Mongo("localhost:40012") c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, false) // Ensure we're talking to a slave, and reserve the socket. result := M{} err = session.Run("ismaster", &result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, false) // Switch to a Strong session. session.SetMode(mgo.Strong, false) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { c.Log("Waiting for cluster sync to finish...") time.Sleep(5e8) } // Slave socket should still be reserved. stats := mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 1) // But any operation will switch it to the master. result = M{} err = session.Run("ismaster", &result) c.Assert(err, IsNil) c.Assert(result["ismaster"], Equals, true) }
func (s *S) TestFindIterLimit(c *C) { session, err := mgo.Mongo("localhost:40001") c.Assert(err, IsNil) defer session.Close() coll := session.DB("mydb").C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } session.Refresh() // Release socket. mgo.ResetStats() query := coll.Find(M{"n": M{"$gte": 42}}).Sort(M{"$natural": 1}).Limit(3) iter, err := query.Iter() c.Assert(err, IsNil) result := struct{ N int }{} for i := 2; i < 5; i++ { err = iter.Next(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, ns[i]) } err = iter.Next(&result) c.Assert(err == mgo.NotFound, Equals, true) session.Refresh() // Release socket. stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 1) // 1*QUERY_OP c.Assert(stats.ReceivedOps, Equals, 1) // and its REPLY_OP c.Assert(stats.ReceivedDocs, Equals, 3) c.Assert(stats.SocketsInUse, Equals, 0) }
// Test tailable cursors in a situation where Next never gets to sleep once // to respect the timeout requested on Tail. func (s *S) TestFindTailTimeoutNoSleep(c *C) { session, err := mgo.Mongo("localhost:40001") c.Assert(err, IsNil) defer session.Close() cresult := struct{ ErrMsg string }{} db := session.DB("mydb") err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) c.Assert(err, IsNil) c.Assert(cresult.ErrMsg, Equals, "") coll := db.C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } session.Refresh() // Release socket. mgo.ResetStats() const timeout = 1 query := coll.Find(M{"n": M{"$gte": 42}}).Sort(M{"$natural": 1}).Prefetch(0).Batch(2) iter, err := query.Tail(timeout) c.Assert(err, IsNil) n := len(ns) result := struct{ N int }{} for i := 2; i != n; i++ { err = iter.Next(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, ns[i]) if i == 3 { // The batch boundary. stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) } } mgo.ResetStats() // The following call to Next will block. go func() { // The internal AwaitData timing of MongoDB is around 2 seconds, // so this item should arrive within the AwaitData threshold. time.Sleep(5e8) session := session.New() defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"n": 47}) }() c.Log("Will wait for Next with N=47...") err = iter.Next(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 47) c.Log("Got Next with N=47!") // The following may break because it depends a bit on the internal // timing used by MongoDB's AwaitData logic. If it does, the problem // will be observed as more GET_MORE_OPs than predicted: // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 4) c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response c.Log("Will wait for a result which will never come...") started := time.Nanoseconds() err = iter.Next(&result) c.Assert(time.Nanoseconds()-started > timeout*1e9, Equals, true) c.Assert(err == mgo.TailTimeout, Equals, true) }
// Test tailable cursors in a situation where Next never gets to sleep once // to respect the timeout requested on Tail. func (s *S) TestFindTailNoTimeout(c *C) { if *fast { c.Skip("-fast") } session, err := mgo.Mongo("localhost:40001") c.Assert(err, IsNil) defer session.Close() cresult := struct{ ErrMsg string }{} db := session.DB("mydb") err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult) c.Assert(err, IsNil) c.Assert(cresult.ErrMsg, Equals, "") coll := db.C("mycoll") ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { coll.Insert(M{"n": n}) } session.Refresh() // Release socket. mgo.ResetStats() query := coll.Find(M{"n": M{"$gte": 42}}).Sort(M{"$natural": 1}).Prefetch(0).Batch(2) iter, err := query.Tail(-1) c.Assert(err, IsNil) n := len(ns) result := struct{ N int }{} for i := 2; i != n; i++ { err = iter.Next(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, ns[i]) if i == 3 { // The batch boundary. stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) } } mgo.ResetStats() // The following call to Next will block. go func() { time.Sleep(5e8) session := session.New() defer session.Close() coll := session.DB("mydb").C("mycoll") coll.Insert(M{"n": 47}) }() c.Log("Will wait for Next with N=47...") err = iter.Next(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 47) c.Log("Got Next with N=47!") // The following may break because it depends a bit on the internal // timing used by MongoDB's AwaitData logic. If it does, the problem // will be observed as more GET_MORE_OPs than predicted: // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 4) c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response c.Log("Will wait for a result which will never come...") gotNext := make(chan os.Error) go func() { err := iter.Next(&result) gotNext <- err }() select { case err := <-gotNext: c.Fatal("Next returned: " + err.String()) case <-time.After(3e9): // Good. Should still be sleeping at that point. } // Closing the session should cause Next to return. session.Close() select { case err := <-gotNext: c.Assert(err, Matches, "Closed explicitly") case <-time.After(1e9): c.Fatal("Closing the session did not unblock Next") } }
func (s *S) TestCloneSession(c *C) { session, err := mgo.Mongo("localhost:40001") c.Assert(err, IsNil) defer session.Close() // Do a dummy operation to wait for connection. coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Tweak safety and query settings to ensure clone is copying those. session.SetSafe(nil) session.SetBatch(-1) clone := session.Clone() defer clone.Close() session.SetSafe(&mgo.Safe{}) // Clone was copied while session was unsafe, so no errors. cloneColl := clone.DB("mydb").C("mycoll") err = cloneColl.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Original session was made safe again. err = coll.Insert(M{"_id": 1}) c.Assert(err, NotNil) // With Clone(), same socket is shared between sessions now. stats := mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 1) c.Assert(stats.SocketRefs, Equals, 2) // Refreshing one of them should let the original socket go, // while preserving the safety settings. clone.Refresh() err = cloneColl.Insert(M{"_id": 1}) c.Assert(err, IsNil) // Must have used another connection now. stats = mgo.GetStats() c.Assert(stats.SocketsInUse, Equals, 2) c.Assert(stats.SocketRefs, Equals, 2) // Ensure query parameters were cloned. err = cloneColl.Insert(M{"_id": 2}) c.Assert(err, IsNil) // Ping the database to ensure the nonce has been received already. c.Assert(clone.Ping(), IsNil) mgo.ResetStats() iter := cloneColl.Find(M{}).Iter() c.Assert(err, IsNil) m := M{} ok := iter.Next(m) c.Assert(ok, Equals, true) err = iter.Err() c.Assert(err, IsNil) // If Batch(-1) is in effect, a single document must have been received. stats = mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 1) }
func (s *S) TestGridFSSeek(c *C) { session, err := mgo.Mongo("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) id := file.Id() file.SetChunkSize(5) n, err := file.Write([]byte("abcdefghijklmnopqrstuv")) c.Assert(err, IsNil) c.Assert(n, Equals, 22) err = file.Close() c.Assert(err, IsNil) b := make([]byte, 5) file, err = gfs.OpenId(id) c.Assert(err, IsNil) o, err := file.Seek(3, os.SEEK_SET) c.Assert(err, IsNil) c.Assert(o, Equals, int64(3)) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, Equals, []byte("defgh")) o, err = file.Seek(5, os.SEEK_CUR) c.Assert(err, IsNil) c.Assert(o, Equals, int64(13)) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, Equals, []byte("nopqr")) o, err = file.Seek(-10, os.SEEK_END) c.Assert(err, IsNil) c.Assert(o, Equals, int64(12)) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, Equals, []byte("mnopq")) o, err = file.Seek(8, os.SEEK_SET) c.Assert(err, IsNil) c.Assert(o, Equals, int64(8)) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, Equals, []byte("ijklm")) // Trivial seek forward within same chunk. Already // got the data, shouldn't touch the database. sent := mgo.GetStats().SentOps o, err = file.Seek(1, os.SEEK_CUR) c.Assert(err, IsNil) c.Assert(o, Equals, int64(14)) c.Assert(mgo.GetStats().SentOps, Equals, sent) _, err = file.Read(b) c.Assert(err, IsNil) c.Assert(b, Equals, []byte("opqrs")) // Try seeking past end of file. file.Seek(3, os.SEEK_SET) o, err = file.Seek(23, os.SEEK_SET) c.Assert(err, Matches, "Seek past end of file") c.Assert(o, Equals, int64(3)) }