func ping(c *Context) { defer pingRecover(c.w) p := parser{r: c.r} var ip net.IP p.IP(&ip) if p.err != nil { panic(p.err) } co := c.DB.C("users") var user *User co.Find(M{"ips": ip.String()}).One(&user) if user == nil { //insert a new user! co.Insert(M{ "lastseen": bson.Now(), "lastip": ip.String(), "ips": []interface{}{ip.String()}, }) fmt.Fprintln(c.w, "You have been added to the user database") return } fmt.Fprintln(c.w, "You were already in here") }
func (s *S) TestNow(c *C) { before := time.Nanoseconds() time.Sleep(1e6) now := bson.Now() time.Sleep(1e6) after := time.Nanoseconds() c.Assert(reflect.TypeOf(now), Equals, reflect.TypeOf(bson.Timestamp(00))) c.Assert(int64(now) > before && int64(now) < after, Equals, true, Bug("now=%d, before=%d, after=%d", now, before, after)) }
func (file *GridFile) insertFile() { hexsum := hex.EncodeToString(file.wsum.Sum()) for file.wpending > 0 { debugf("GridFile %p: waiting for %d pending chunks to insert file", file, file.wpending) file.c.Wait() } if file.err == nil { file.doc.UploadDate = bson.Now() file.doc.MD5 = hexsum file.err = file.gfs.Files.Insert(file.doc) file.gfs.Chunks.EnsureIndexKey([]string{"files_id", "n"}) } }
func announce(c *Context) { defer announceRecover(c.w) log.Print(c.r.RawURL) a, err := ParseAnnounce(c.r) if err != nil { log.Panic("ERROR: ", err) } //grab the user by ip co := c.DB.C("users") var user *User co.Find(M{"ips": a.IP.String()}).One(&user) if user == nil { log.Panic("Unauthorized IP: ", a.IP) } //update the user in the database user.LastIP = a.IP.String() user.LastSeen = bson.Now() user.Update(a.InfoHash, a.Event, a.Left) co.Update(M{"_id": user.ID}, user) var peers []StructPeer query := co.Find(M{"info.infohash": a.InfoHash, "_id": M{"$ne": user.ID}}) query.Limit(min(50, max(0, a.Numwant))) //bound between 0 <= n <= 50 selector := M{"lastip": 1, "port": 1} if !a.NoPeerId { selector["peerid"] = 1 } query.Select(selector) query.All(&peers) if query.Iter().Err() != nil { panic(query.Iter().Err()) } enc := bencode.NewEncoder(c.w) //build the response if a.Compact { response := CompactAnnounceResponse{ Interval: 30, Complete: 0, Incomplete: 0, Peers: make([]string, len(peers)), } for i := range peers { response.Peers[i] = peers[i].Compact() } enc.Encode(response) } else { response := AnnounceResponse{ Interval: 30, Complete: 0, Incomplete: 0, Peers: peers, } enc.Encode(response) } fmt.Fprintln(c.w, "") }
func (s *S) TestGridFSCreate(c *C) { session, err := mgo.Mongo("localhost:40011") c.Assert(err, IsNil) defer session.Close() db := session.DB("mydb") before := bson.Now() gfs := db.GridFS("fs") file, err := gfs.Create("") c.Assert(err, IsNil) n, err := file.Write([]byte("some data")) c.Assert(err, IsNil) c.Assert(n, Equals, 9) err = file.Close() c.Assert(err, IsNil) after := bson.Now() // Check the file information. result := M{} err = db.C("fs.files").Find(nil).One(result) c.Assert(err, IsNil) fileId, ok := result["_id"].(bson.ObjectId) c.Assert(ok, Equals, true) c.Assert(fileId.Valid(), Equals, true) result["_id"] = "<id>" fileTs, ok := result["uploadDate"].(bson.Timestamp) c.Assert(ok, Equals, true) c.Assert(fileTs >= before && fileTs <= after, Equals, true) result["uploadDate"] = "<timestamp>" expected := M{ "_id": "<id>", "length": 9, "chunkSize": 262144, "uploadDate": "<timestamp>", "md5": "1e50210a0202497fb79bc38b6ade6c34", } c.Assert(result, Equals, expected) // Check the chunk. result = M{} err = db.C("fs.chunks").Find(nil).One(result) c.Assert(err, IsNil) chunkId, ok := result["_id"].(bson.ObjectId) c.Assert(ok, Equals, true) c.Assert(chunkId.Valid(), Equals, true) result["_id"] = "<id>" expected = M{ "_id": "<id>", "files_id": fileId, "n": 0, "data": []byte("some data"), } c.Assert(result, Equals, expected) // Check that an index was created. indexes, err := db.C("fs.chunks").Indexes() c.Assert(err, IsNil) c.Assert(len(indexes), Equals, 2) c.Assert(indexes[1].Key, Equals, []string{"files_id", "n"}) }