コード例 #1
0
ファイル: gridfs_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestGridFSOpen(c *C) {
	session, err := mgo.Mongo("localhost:40011")
	c.Assert(err, IsNil)
	defer session.Close()

	db := session.DB("mydb")

	gfs := db.GridFS("fs")

	file, err := gfs.Create("myfile.txt")
	c.Assert(err, IsNil)
	file.Write([]byte{'1'})
	file.Close()

	file, err = gfs.Create("myfile.txt")
	c.Assert(err, IsNil)
	file.Write([]byte{'2'})
	file.Close()

	file, err = gfs.Open("myfile.txt")
	c.Assert(err, IsNil)
	defer file.Close()

	var b [1]byte

	_, err = file.Read(b[:])
	c.Assert(err, IsNil)
	c.Assert(string(b[:]), Equals, "2")
}
コード例 #2
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestPrimaryShutdownStrong(c *C) {
	if *fast {
		c.Skip("-fast")
	}

	session, err := mgo.Mongo("localhost:40021")
	c.Assert(err, IsNil)
	defer session.Close()

	// With strong consistency, this will open a socket to the master.
	result := &struct{ Host string }{}
	err = session.Run("serverStatus", result)
	c.Assert(err, IsNil)

	// Kill the master.
	host := result.Host
	s.Stop(host)

	// This must fail, since the connection was broken.
	err = session.Run("serverStatus", result)
	c.Assert(err, Equals, os.EOF)

	// With strong consistency, it fails again until reset.
	err = session.Run("serverStatus", result)
	c.Assert(err, Equals, os.EOF)

	session.Refresh()

	// Now we should be able to talk to the new master.
	err = session.Run("serverStatus", result)
	c.Assert(err, IsNil)
	c.Assert(result.Host, Not(Equals), host)
}
コード例 #3
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestPrimaryShutdownEventual(c *C) {
	if *fast {
		c.Skip("-fast")
	}

	session, err := mgo.Mongo("localhost:40021")
	c.Assert(err, IsNil)
	defer session.Close()

	result := &struct{ Host string }{}
	err = session.Run("serverStatus", result)
	c.Assert(err, IsNil)
	master := result.Host

	session.SetMode(mgo.Eventual, true)

	// Should connect to the master when needed.
	coll := session.DB("mydb").C("mycoll")
	err = coll.Insert(M{"a": 1})
	c.Assert(err, IsNil)

	// Kill the master.
	s.Stop(master)

	// Should still work, with the new master now.
	coll = session.DB("mydb").C("mycoll")
	err = coll.Insert(M{"a": 1})
	c.Assert(err, IsNil)

	err = session.Run("serverStatus", result)
	c.Assert(err, IsNil)
	c.Assert(result.Host, Not(Equals), master)
}
コード例 #4
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestDirect(c *C) {
	session, err := mgo.Mongo("localhost:40012?connect=direct")
	c.Assert(err, IsNil)
	defer session.Close()

	// We know that server is a slave.
	session.SetMode(mgo.Monotonic, true)

	result := &struct{ Host string }{}
	err = session.Run("serverStatus", result)
	c.Assert(err, IsNil)
	c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true)

	stats := mgo.GetStats()
	c.Assert(stats.SocketsAlive, Equals, 1)
	c.Assert(stats.SocketsInUse, Equals, 1)
	c.Assert(stats.SocketRefs, Equals, 1)

	// We've got no master, so it'll timeout.
	session.SetSyncTimeout(5e8)

	coll := session.DB("mydb").C("mycoll")
	err = coll.Insert(M{"test": 1})
	c.Assert(err, Matches, "no reachable servers")

	// Slave is still reachable.
	result.Host = ""
	err = session.Run("serverStatus", result)
	c.Assert(err, IsNil)
	c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true)
}
コード例 #5
0
ファイル: gridfs_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestGridFSRemove(c *C) {
	session, err := mgo.Mongo("localhost:40011")
	c.Assert(err, IsNil)
	defer session.Close()

	db := session.DB("mydb")

	gfs := db.GridFS("fs")

	file, err := gfs.Create("myfile.txt")
	c.Assert(err, IsNil)
	file.Write([]byte{'1'})
	file.Close()

	file, err = gfs.Create("myfile.txt")
	c.Assert(err, IsNil)
	file.Write([]byte{'2'})
	file.Close()

	err = gfs.Remove("myfile.txt")
	c.Assert(err, IsNil)

	_, err = gfs.Open("myfile.txt")
	c.Assert(err == mgo.NotFound, Equals, true)

	n, err := db.C("fs.chunks").Find(nil).Count()
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 0)
}
コード例 #6
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestSetModeMonotonicAfterStrong(c *C) {
	// Test that a strong session shifting to a monotonic
	// one preserves the socket untouched.

	session, err := mgo.Mongo("localhost:40012")
	c.Assert(err, IsNil)
	defer session.Close()

	// Insert something to force a connection to the master.
	coll := session.DB("mydb").C("mycoll")
	err = coll.Insert(M{"a": 1})
	c.Assert(err, IsNil)

	session.SetMode(mgo.Monotonic, false)

	// Wait since the sync also uses sockets.
	for len(session.LiveServers()) != 3 {
		c.Log("Waiting for cluster sync to finish...")
		time.Sleep(5e8)
	}

	// Master socket should still be reserved.
	stats := mgo.GetStats()
	c.Assert(stats.SocketsInUse, Equals, 1)

	// Confirm it's the master even though it's Monotonic by now.
	result := M{}
	cmd := session.DB("admin").C("$cmd")
	err = cmd.Find(M{"ismaster": 1}).One(&result)
	c.Assert(err, IsNil)
	c.Assert(result["ismaster"], Equals, true)
}
コード例 #7
0
ファイル: gridfs_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestGridFSReadChunking(c *C) {
	session, err := mgo.Mongo("localhost:40011")
	c.Assert(err, IsNil)
	defer session.Close()

	db := session.DB("mydb")

	gfs := db.GridFS("fs")

	file, err := gfs.Create("")
	c.Assert(err, IsNil)

	id := file.Id()

	file.SetChunkSize(5)

	n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 22)

	err = file.Close()
	c.Assert(err, IsNil)

	file, err = gfs.OpenId(id)
	c.Assert(err, IsNil)

	b := make([]byte, 30)

	// Smaller than the chunk size.
	n, err = file.Read(b[:3])
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 3)
	c.Assert(b[:3], Equals, []byte("abc"))

	// Boundary in the middle.
	n, err = file.Read(b[:4])
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 4)
	c.Assert(b[:4], Equals, []byte("defg"))

	// Boundary at the end.
	n, err = file.Read(b[:3])
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 3)
	c.Assert(b[:3], Equals, []byte("hij"))

	// Larger than the chunk size, with 3 chunks.
	n, err = file.Read(b)
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 12)
	c.Assert(b[:12], Equals, []byte("klmnopqrstuv"))

	n, err = file.Read(b)
	c.Assert(n, Equals, 0)
	c.Assert(err == os.EOF, Equals, true)

	err = file.Close()
	c.Assert(err, IsNil)
}
コード例 #8
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func getOpCounters(server string) (c *OpCounters, err os.Error) {
	session, err := mgo.Mongo(server + "?connect=direct")
	if err != nil {
		return nil, err
	}
	defer session.Close()
	session.SetMode(mgo.Monotonic, true)
	result := struct{ OpCounters }{}
	err = session.Run("serverStatus", &result)
	return &result.OpCounters, err
}
コード例 #9
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestNewSession(c *C) {
	session, err := mgo.Mongo("localhost:40001")
	c.Assert(err, IsNil)
	defer session.Close()

	// Do a dummy operation to wait for connection.
	coll := session.DB("mydb").C("mycoll")
	err = coll.Insert(M{"_id": 1})
	c.Assert(err, IsNil)

	// Tweak safety and query settings to ensure other has copied those.
	session.SetSafe(nil)
	session.SetBatch(-1)
	other := session.New()
	defer other.Close()
	session.SetSafe(&mgo.Safe{})

	// Clone was copied while session was unsafe, so no errors.
	otherColl := other.DB("mydb").C("mycoll")
	err = otherColl.Insert(M{"_id": 1})
	c.Assert(err, IsNil)

	// Original session was made safe again.
	err = coll.Insert(M{"_id": 1})
	c.Assert(err, NotNil)

	// With New(), each session has its own socket now.
	stats := mgo.GetStats()
	c.Assert(stats.MasterConns, Equals, 2)
	c.Assert(stats.SocketsInUse, Equals, 2)

	// Ensure query parameters were cloned.
	err = otherColl.Insert(M{"_id": 2})
	c.Assert(err, IsNil)

	// Ping the database to ensure the nonce has been received already.
	c.Assert(other.Ping(), IsNil)

	mgo.ResetStats()

	iter, err := otherColl.Find(M{}).Iter()
	c.Assert(err, IsNil)

	m := M{}
	err = iter.Next(m)
	c.Assert(err, IsNil)

	// If Batch(-1) is in effect, a single document must have been received.
	stats = mgo.GetStats()
	c.Assert(stats.ReceivedDocs, Equals, 1)
}
コード例 #10
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestPreserveSocketCountOnSync(c *C) {
	if *fast {
		c.Skip("-fast")
	}

	session, err := mgo.Mongo("localhost:40011")
	c.Assert(err, IsNil)
	defer session.Close()

	stats := mgo.GetStats()
	for stats.MasterConns+stats.SlaveConns != 3 {
		stats = mgo.GetStats()
		c.Log("Waiting for all connections to be established...")
		time.Sleep(5e8)
	}

	c.Assert(stats.SocketsAlive, Equals, 3)

	// Kill the master (with rs1, 'a' is always the master).
	s.Stop("localhost:40011")

	// Wait for the logic to run for a bit and bring it back.
	go func() {
		time.Sleep(5e9)
		s.StartAll()
	}()

	// Do an action to kick the resync logic in, and also to
	// wait until the cluster recognizes the server is back.
	result := struct{ Ok bool }{}
	err = session.Run("getLastError", &result)
	c.Assert(err, IsNil)
	c.Assert(result.Ok, Equals, true)

	for i := 0; i != 20; i++ {
		stats = mgo.GetStats()
		if stats.SocketsAlive == 3 {
			break
		}
		c.Logf("Waiting for 3 sockets alive, have %d", stats.SocketsAlive)
		time.Sleep(5e8)
	}

	// Ensure the number of sockets is preserved after syncing.
	stats = mgo.GetStats()
	c.Assert(stats.SocketsAlive, Equals, 3)
	c.Assert(stats.SocketsInUse, Equals, 1)
	c.Assert(stats.SocketRefs, Equals, 1)
}
コード例 #11
0
ファイル: gridfs_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestGridFSOpenNotFound(c *C) {
	session, err := mgo.Mongo("localhost:40011")
	c.Assert(err, IsNil)
	defer session.Close()

	db := session.DB("mydb")

	gfs := db.GridFS("fs")
	file, err := gfs.OpenId("non-existent")
	c.Assert(err == mgo.NotFound, Equals, true)
	c.Assert(file, IsNil)

	file, err = gfs.Open("non-existent")
	c.Assert(err == mgo.NotFound, Equals, true)
	c.Assert(file, IsNil)
}
コード例 #12
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestSetModeMonotonic(c *C) {
	// Must necessarily connect to a slave, otherwise the
	// master connection will be available first.
	session, err := mgo.Mongo("localhost:40012")
	c.Assert(err, IsNil)
	defer session.Close()

	session.SetMode(mgo.Monotonic, false)

	c.Assert(session.Mode(), Equals, mgo.Monotonic)

	result := M{}
	cmd := session.DB("admin").C("$cmd")
	err = cmd.Find(M{"ismaster": 1}).One(&result)
	c.Assert(err, IsNil)
	c.Assert(result["ismaster"], Equals, false)

	coll := session.DB("mydb").C("mycoll")
	err = coll.Insert(M{"a": 1})
	c.Assert(err, IsNil)

	result = M{}
	err = cmd.Find(M{"ismaster": 1}).One(&result)
	c.Assert(err, IsNil)
	c.Assert(result["ismaster"], Equals, true)

	// Wait since the sync also uses sockets.
	for len(session.LiveServers()) != 3 {
		c.Log("Waiting for cluster sync to finish...")
		time.Sleep(5e8)
	}

	stats := mgo.GetStats()
	c.Assert(stats.MasterConns, Equals, 1)
	c.Assert(stats.SlaveConns, Equals, 2)
	c.Assert(stats.SocketsInUse, Equals, 1)

	session.SetMode(mgo.Monotonic, true)

	stats = mgo.GetStats()
	c.Assert(stats.SocketsInUse, Equals, 0)
}
コード例 #13
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestSetModeStrongAfterMonotonic(c *C) {
	// Test that shifting from Monotonic to Strong while
	// using a slave socket will keep the socket reserved
	// until the master socket is necessary, so that no
	// switch over occurs unless it's actually necessary.

	// Must necessarily connect to a slave, otherwise the
	// master connection will be available first.
	session, err := mgo.Mongo("localhost:40012")
	c.Assert(err, IsNil)
	defer session.Close()

	session.SetMode(mgo.Monotonic, false)

	// Ensure we're talking to a slave, and reserve the socket.
	result := M{}
	err = session.Run("ismaster", &result)
	c.Assert(err, IsNil)
	c.Assert(result["ismaster"], Equals, false)

	// Switch to a Strong session.
	session.SetMode(mgo.Strong, false)

	// Wait since the sync also uses sockets.
	for len(session.LiveServers()) != 3 {
		c.Log("Waiting for cluster sync to finish...")
		time.Sleep(5e8)
	}

	// Slave socket should still be reserved.
	stats := mgo.GetStats()
	c.Assert(stats.SocketsInUse, Equals, 1)

	// But any operation will switch it to the master.
	result = M{}
	err = session.Run("ismaster", &result)
	c.Assert(err, IsNil)
	c.Assert(result["ismaster"], Equals, true)
}
コード例 #14
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestPrimaryShutdownMonotonic(c *C) {
	if *fast {
		c.Skip("-fast")
	}

	session, err := mgo.Mongo("localhost:40021")
	c.Assert(err, IsNil)
	defer session.Close()

	session.SetMode(mgo.Monotonic, true)

	// Insert something to force a switch to the master.
	coll := session.DB("mydb").C("mycoll")
	err = coll.Insert(M{"a": 1})
	c.Assert(err, IsNil)

	result := &struct{ Host string }{}
	err = session.Run("serverStatus", result)
	c.Assert(err, IsNil)

	// Kill the master.
	host := result.Host
	s.Stop(host)

	// This must fail, since the connection was broken.
	err = session.Run("serverStatus", result)
	c.Assert(err, Equals, os.EOF)

	// With monotonic consistency, it fails again until reset.
	err = session.Run("serverStatus", result)
	c.Assert(err, Equals, os.EOF)

	session.Refresh()

	// Now we should be able to talk to the new master.
	err = session.Run("serverStatus", result)
	c.Assert(err, IsNil)
	c.Assert(result.Host, Not(Equals), host)
}
コード例 #15
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestSyncTimeout(c *C) {
	if *fast {
		c.Skip("-fast")
	}

	// 40009 isn't used by the test servers.
	session, err := mgo.Mongo("localhost:40009")
	c.Assert(err, IsNil)
	defer session.Close()

	timeout := int64(3e9)

	session.SetSyncTimeout(timeout)

	started := time.Nanoseconds()

	// Do something.
	result := struct{ Ok bool }{}
	err = session.Run("getLastError", &result)
	c.Assert(err, Matches, "no reachable servers")
	c.Assert(time.Nanoseconds()-started > timeout, Equals, true)
	c.Assert(time.Nanoseconds()-started < timeout*2, Equals, true)
}
コード例 #16
0
ファイル: gridfs_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestGridFSRemoveId(c *C) {
	session, err := mgo.Mongo("localhost:40011")
	c.Assert(err, IsNil)
	defer session.Close()

	db := session.DB("mydb")

	gfs := db.GridFS("fs")

	file, err := gfs.Create("myfile.txt")
	c.Assert(err, IsNil)
	file.Write([]byte{'1'})
	file.Close()

	file, err = gfs.Create("myfile.txt")
	c.Assert(err, IsNil)
	file.Write([]byte{'2'})
	id := file.Id()
	file.Close()

	err = gfs.RemoveId(id)
	c.Assert(err, IsNil)

	file, err = gfs.Open("myfile.txt")
	c.Assert(err, IsNil)
	defer file.Close()

	var b [1]byte

	_, err = file.Read(b[:])
	c.Assert(err, IsNil)
	c.Assert(string(b[:]), Equals, "1")

	n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count()
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 0)
}
コード例 #17
0
ファイル: gridfs_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestGridFSReadAll(c *C) {
	session, err := mgo.Mongo("localhost:40011")
	c.Assert(err, IsNil)
	defer session.Close()

	db := session.DB("mydb")

	gfs := db.GridFS("fs")
	file, err := gfs.Create("")
	c.Assert(err, IsNil)
	id := file.Id()

	file.SetChunkSize(5)

	n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 22)

	err = file.Close()
	c.Assert(err, IsNil)

	file, err = gfs.OpenId(id)
	c.Assert(err, IsNil)

	b := make([]byte, 30)
	n, err = file.Read(b)
	c.Assert(n, Equals, 22)
	c.Assert(err, IsNil)

	n, err = file.Read(b)
	c.Assert(n, Equals, 0)
	c.Assert(err == os.EOF, Equals, true)

	err = file.Close()
	c.Assert(err, IsNil)
}
コード例 #18
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestPrimaryShutdownMonotonicWithSlave(c *C) {
	if *fast {
		c.Skip("-fast")
	}

	session, err := mgo.Mongo("localhost:40021")
	c.Assert(err, IsNil)
	defer session.Close()

	ssresult := &struct{ Host string }{}
	imresult := &struct{ IsMaster bool }{}

	// Figure the master while still using the strong session.
	err = session.Run("serverStatus", ssresult)
	c.Assert(err, IsNil)
	err = session.Run("isMaster", imresult)
	c.Assert(err, IsNil)
	master := ssresult.Host
	c.Assert(imresult.IsMaster, Equals, true, Bug("%s is not the master", master))

	// Create new monotonic session with an explicit address to ensure
	// a slave is synchronized before the master, otherwise a connection
	// with the master may be used below for lack of other options.
	var addr string
	switch {
	case strings.HasSuffix(ssresult.Host, ":40021"):
		addr = "localhost:40022"
	case strings.HasSuffix(ssresult.Host, ":40022"):
		addr = "localhost:40021"
	case strings.HasSuffix(ssresult.Host, ":40023"):
		addr = "localhost:40021"
	default:
		c.Fatal("Unknown host: ", ssresult.Host)
	}

	session, err = mgo.Mongo(addr)
	c.Assert(err, IsNil)
	defer session.Close()

	session.SetMode(mgo.Monotonic, true)

	// Check the address of the socket associated with the monotonic session.
	c.Log("Running serverStatus and isMaster with monotonic session")
	err = session.Run("serverStatus", ssresult)
	c.Assert(err, IsNil)
	err = session.Run("isMaster", imresult)
	c.Assert(err, IsNil)
	slave := ssresult.Host
	c.Assert(imresult.IsMaster, Equals, false, Bug("%s is not a slave", slave))

	c.Assert(master, Not(Equals), slave)

	// Kill the master.
	s.Stop(master)

	// Session must still be good, since we were talking to a slave.
	err = session.Run("serverStatus", ssresult)
	c.Assert(err, IsNil)

	c.Assert(ssresult.Host, Equals, slave,
		Bug("Monotonic session moved from %s to %s", slave, ssresult.Host))

	// If we try to insert something, it'll have to hold until the new
	// master is available to move the connection, and work correctly.
	coll := session.DB("mydb").C("mycoll")
	err = coll.Insert(M{"a": 1})
	c.Assert(err, IsNil)

	// Must now be talking to the new master.
	err = session.Run("serverStatus", ssresult)
	c.Assert(err, IsNil)
	err = session.Run("isMaster", imresult)
	c.Assert(err, IsNil)
	c.Assert(imresult.IsMaster, Equals, true, Bug("%s is not the master", master))

	// ... which is not the old one, since it's still dead.
	c.Assert(ssresult.Host, Not(Equals), master)
}
コード例 #19
0
ファイル: gridfs_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestGridFSCreate(c *C) {
	session, err := mgo.Mongo("localhost:40011")
	c.Assert(err, IsNil)
	defer session.Close()

	db := session.DB("mydb")

	before := bson.Now()

	gfs := db.GridFS("fs")
	file, err := gfs.Create("")
	c.Assert(err, IsNil)

	n, err := file.Write([]byte("some data"))
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 9)

	err = file.Close()
	c.Assert(err, IsNil)

	after := bson.Now()

	// Check the file information.
	result := M{}
	err = db.C("fs.files").Find(nil).One(result)
	c.Assert(err, IsNil)

	fileId, ok := result["_id"].(bson.ObjectId)
	c.Assert(ok, Equals, true)
	c.Assert(fileId.Valid(), Equals, true)
	result["_id"] = "<id>"

	fileTs, ok := result["uploadDate"].(bson.Timestamp)
	c.Assert(ok, Equals, true)
	c.Assert(fileTs >= before && fileTs <= after, Equals, true)
	result["uploadDate"] = "<timestamp>"

	expected := M{
		"_id":        "<id>",
		"length":     9,
		"chunkSize":  262144,
		"uploadDate": "<timestamp>",
		"md5":        "1e50210a0202497fb79bc38b6ade6c34",
	}
	c.Assert(result, Equals, expected)

	// Check the chunk.
	result = M{}
	err = db.C("fs.chunks").Find(nil).One(result)
	c.Assert(err, IsNil)

	chunkId, ok := result["_id"].(bson.ObjectId)
	c.Assert(ok, Equals, true)
	c.Assert(chunkId.Valid(), Equals, true)
	result["_id"] = "<id>"

	expected = M{
		"_id":      "<id>",
		"files_id": fileId,
		"n":        0,
		"data":     []byte("some data"),
	}
	c.Assert(result, Equals, expected)

	// Check that an index was created.
	indexes, err := db.C("fs.chunks").Indexes()
	c.Assert(err, IsNil)
	c.Assert(len(indexes), Equals, 2)
	c.Assert(indexes[1].Key, Equals, []string{"files_id", "n"})
}
コード例 #20
0
ファイル: gridfs_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestGridFSCreateWithChunking(c *C) {
	session, err := mgo.Mongo("localhost:40011")
	c.Assert(err, IsNil)
	defer session.Close()

	db := session.DB("mydb")

	gfs := db.GridFS("fs")

	file, err := gfs.Create("")
	c.Assert(err, IsNil)

	file.SetChunkSize(5)

	// Smaller than the chunk size.
	n, err := file.Write([]byte("abc"))
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 3)

	// Boundary in the middle.
	n, err = file.Write([]byte("defg"))
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 4)

	// Boundary at the end.
	n, err = file.Write([]byte("hij"))
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 3)

	// Larger than the chunk size, with 3 chunks.
	n, err = file.Write([]byte("klmnopqrstuv"))
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 12)

	err = file.Close()
	c.Assert(err, IsNil)

	// Check the file information.
	result := M{}
	err = db.C("fs.files").Find(nil).One(result)
	c.Assert(err, IsNil)

	fileId, _ := result["_id"].(bson.ObjectId)
	c.Assert(fileId.Valid(), Equals, true)
	result["_id"] = "<id>"
	result["uploadDate"] = "<timestamp>"

	expected := M{
		"_id":        "<id>",
		"length":     22,
		"chunkSize":  5,
		"uploadDate": "<timestamp>",
		"md5":        "44a66044834cbe55040089cabfc102d5",
	}
	c.Assert(result, Equals, expected)

	// Check the chunks.
	iter, err := db.C("fs.chunks").Find(nil).Sort(M{"n": 1}).Iter()
	c.Assert(err, IsNil)

	dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"}

	for i := 0; ; i++ {
		result = M{}
		err := iter.Next(result)
		if err == mgo.NotFound {
			if i != 5 {
				c.Fatalf("Expected 5 chunks, got %d", i)
			}
			break
		}

		result["_id"] = "<id>"

		expected = M{
			"_id":      "<id>",
			"files_id": fileId,
			"n":        i,
			"data":     []byte(dataChunks[i]),
		}
		c.Assert(result, Equals, expected)
	}
}
コード例 #21
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) {
	session, err := mgo.Mongo("localhost:40021")
	c.Assert(err, IsNil)
	defer session.Close()

	ssresult := &struct{ Host string }{}
	imresult := &struct{ IsMaster bool }{}

	// Figure the master while still using the strong session.
	err = session.Run("serverStatus", ssresult)
	c.Assert(err, IsNil)
	err = session.Run("isMaster", imresult)
	c.Assert(err, IsNil)
	master := ssresult.Host
	c.Assert(imresult.IsMaster, Equals, true, Bug("%s is not the master", master))

	// Collect op counters for everyone.
	opc21a, err := getOpCounters("localhost:40021")
	c.Assert(err, IsNil)
	opc22a, err := getOpCounters("localhost:40022")
	c.Assert(err, IsNil)
	opc23a, err := getOpCounters("localhost:40023")
	c.Assert(err, IsNil)

	// Do a SlaveOk query through MongoS

	mongos, err := mgo.Mongo("localhost:40202")
	c.Assert(err, IsNil)
	defer mongos.Close()

	mongos.SetMode(mgo.Monotonic, true)

	coll := mongos.DB("mydb").C("mycoll")
	result := &struct{}{}
	for i := 0; i != 5; i++ {
		err := coll.Find(nil).One(result)
		c.Assert(err, Equals, mgo.NotFound)
	}

	// Collect op counters for everyone again.
	opc21b, err := getOpCounters("localhost:40021")
	c.Assert(err, IsNil)
	opc22b, err := getOpCounters("localhost:40022")
	c.Assert(err, IsNil)
	opc23b, err := getOpCounters("localhost:40023")
	c.Assert(err, IsNil)

	masterPort := master[strings.Index(master, ":")+1:]

	var masterDelta, slaveDelta int
	switch masterPort {
	case "40021":
		masterDelta = opc21b.Query - opc21a.Query
		slaveDelta = (opc22b.Query - opc22a.Query) + (opc23b.Query - opc23a.Query)
	case "40022":
		masterDelta = opc22b.Query - opc22a.Query
		slaveDelta = (opc21b.Query - opc21a.Query) + (opc23b.Query - opc23a.Query)
	case "40023":
		masterDelta = opc23b.Query - opc23a.Query
		slaveDelta = (opc21b.Query - opc21a.Query) + (opc22b.Query - opc22a.Query)
	default:
		c.Fatal("Uh?")
	}

	c.Check(masterDelta, Equals, 0) // Just the counting itself.
	c.Check(slaveDelta, Equals, 5)  // The counting for both, plus 5 queries above.
}
コード例 #22
0
ファイル: gridfs_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestGridFSSeek(c *C) {
	session, err := mgo.Mongo("localhost:40011")
	c.Assert(err, IsNil)
	defer session.Close()

	db := session.DB("mydb")

	gfs := db.GridFS("fs")
	file, err := gfs.Create("")
	c.Assert(err, IsNil)
	id := file.Id()

	file.SetChunkSize(5)

	n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 22)

	err = file.Close()
	c.Assert(err, IsNil)

	b := make([]byte, 5)

	file, err = gfs.OpenId(id)
	c.Assert(err, IsNil)

	o, err := file.Seek(3, os.SEEK_SET)
	c.Assert(err, IsNil)
	c.Assert(o, Equals, int64(3))
	_, err = file.Read(b)
	c.Assert(err, IsNil)
	c.Assert(b, Equals, []byte("defgh"))

	o, err = file.Seek(5, os.SEEK_CUR)
	c.Assert(err, IsNil)
	c.Assert(o, Equals, int64(13))
	_, err = file.Read(b)
	c.Assert(err, IsNil)
	c.Assert(b, Equals, []byte("nopqr"))

	o, err = file.Seek(-10, os.SEEK_END)
	c.Assert(err, IsNil)
	c.Assert(o, Equals, int64(12))
	_, err = file.Read(b)
	c.Assert(err, IsNil)
	c.Assert(b, Equals, []byte("mnopq"))

	o, err = file.Seek(8, os.SEEK_SET)
	c.Assert(err, IsNil)
	c.Assert(o, Equals, int64(8))
	_, err = file.Read(b)
	c.Assert(err, IsNil)
	c.Assert(b, Equals, []byte("ijklm"))

	// Trivial seek forward within same chunk. Already
	// got the data, shouldn't touch the database.
	sent := mgo.GetStats().SentOps
	o, err = file.Seek(1, os.SEEK_CUR)
	c.Assert(err, IsNil)
	c.Assert(o, Equals, int64(14))
	c.Assert(mgo.GetStats().SentOps, Equals, sent)
	_, err = file.Read(b)
	c.Assert(err, IsNil)
	c.Assert(b, Equals, []byte("opqrs"))

	// Try seeking past end of file.
	file.Seek(3, os.SEEK_SET)
	o, err = file.Seek(23, os.SEEK_SET)
	c.Assert(err, Matches, "Seek past end of file")
	c.Assert(o, Equals, int64(3))
}
コード例 #23
0
ファイル: cluster_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestCloneSession(c *C) {
	session, err := mgo.Mongo("localhost:40001")
	c.Assert(err, IsNil)
	defer session.Close()

	// Do a dummy operation to wait for connection.
	coll := session.DB("mydb").C("mycoll")
	err = coll.Insert(M{"_id": 1})
	c.Assert(err, IsNil)

	// Tweak safety and query settings to ensure clone is copying those.
	session.SetSafe(nil)
	session.SetBatch(-1)
	clone := session.Clone()
	defer clone.Close()
	session.SetSafe(&mgo.Safe{})

	// Clone was copied while session was unsafe, so no errors.
	cloneColl := clone.DB("mydb").C("mycoll")
	err = cloneColl.Insert(M{"_id": 1})
	c.Assert(err, IsNil)

	// Original session was made safe again.
	err = coll.Insert(M{"_id": 1})
	c.Assert(err, NotNil)

	// With Clone(), same socket is shared between sessions now.
	stats := mgo.GetStats()
	c.Assert(stats.SocketsInUse, Equals, 1)
	c.Assert(stats.SocketRefs, Equals, 2)

	// Refreshing one of them should let the original socket go,
	// while preserving the safety settings.
	clone.Refresh()
	err = cloneColl.Insert(M{"_id": 1})
	c.Assert(err, IsNil)

	// Must have used another connection now.
	stats = mgo.GetStats()
	c.Assert(stats.SocketsInUse, Equals, 2)
	c.Assert(stats.SocketRefs, Equals, 2)

	// Ensure query parameters were cloned.
	err = cloneColl.Insert(M{"_id": 2})
	c.Assert(err, IsNil)

	// Ping the database to ensure the nonce has been received already.
	c.Assert(clone.Ping(), IsNil)

	mgo.ResetStats()

	iter, err := cloneColl.Find(M{}).Iter()
	c.Assert(err, IsNil)

	m := M{}
	err = iter.Next(m)
	c.Assert(err, IsNil)

	// If Batch(-1) is in effect, a single document must have been received.
	stats = mgo.GetStats()
	c.Assert(stats.ReceivedDocs, Equals, 1)
}
コード例 #24
0
ファイル: gridfs_test.go プロジェクト: CloudMarc/mgo
func (s *S) TestGridFSFileDetails(c *C) {
	session, err := mgo.Mongo("localhost:40011")
	c.Assert(err, IsNil)
	defer session.Close()

	db := session.DB("mydb")

	gfs := db.GridFS("fs")

	file, err := gfs.Create("myfile1.txt")
	c.Assert(err, IsNil)

	n, err := file.Write([]byte("some"))
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 4)

	c.Assert(file.Size(), Equals, int64(4))

	n, err = file.Write([]byte(" data"))
	c.Assert(err, IsNil)
	c.Assert(n, Equals, 5)

	c.Assert(file.Size(), Equals, int64(9))

	id, _ := file.Id().(bson.ObjectId)
	c.Assert(id.Valid(), Equals, true)
	c.Assert(file.Name(), Equals, "myfile1.txt")
	c.Assert(file.ContentType(), Equals, "")

	var info interface{}
	err = file.GetInfo(&info)
	c.Assert(err, IsNil)
	c.Assert(info, IsNil)

	file.SetId("myid")
	file.SetName("myfile2.txt")
	file.SetContentType("text/plain")
	file.SetInfo(M{"any": "thing"})

	c.Assert(file.Id(), Equals, "myid")
	c.Assert(file.Name(), Equals, "myfile2.txt")
	c.Assert(file.ContentType(), Equals, "text/plain")

	err = file.GetInfo(&info)
	c.Assert(err, IsNil)
	c.Assert(info, Equals, bson.M{"any": "thing"})

	err = file.Close()
	c.Assert(err, IsNil)

	c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34")

	c.Assert(file.UploadDate() < time.Nanoseconds(), Equals, true)
	c.Assert(file.UploadDate() > time.Nanoseconds()-3e9, Equals, true)

	result := M{}
	err = db.C("fs.files").Find(nil).One(result)
	c.Assert(err, IsNil)

	result["uploadDate"] = "<timestamp>"

	expected := M{
		"_id":         "myid",
		"length":      9,
		"chunkSize":   262144,
		"uploadDate":  "<timestamp>",
		"md5":         "1e50210a0202497fb79bc38b6ade6c34",
		"filename":    "myfile2.txt",
		"contentType": "text/plain",
		"metadata":    bson.M{"any": "thing"},
	}
	c.Assert(result, Equals, expected)
}