func (ps *ProxyStorage) GetByIpPort(ip string, port int) (Proxy, error) { var query interface{} queryString := fmt.Sprintf(`[{"eq" : "%s", "in" : ["Ip"] },{"eq" : "%n", "in" : ["Port"] }]`, ip, port) json.Unmarshal([]byte(queryString), &query) queryResult := make(map[int]struct{}) // query result (document IDs) goes into map keys var proxy Proxy if err := db.EvalQuery(query, ps.proxyCol, &queryResult); err != nil { logger.Errf("Error while query eval : %s. Error : %s", query, err) return proxy, err } if len(queryResult) == 0 { return proxy, errors.New("No proxy found") } for k, _ := range queryResult { //expecting single key r, err := ps.proxyCol.Read(k) if err != nil { return proxy, err } err = mapstructure.Decode(r, &proxy) if err != nil { return proxy, err } } return proxy, nil }
// Document CRUD benchmark (insert/read/query/update/delete), intended for catching performance regressions. func benchmark() { ids := make([]int, 0, benchSize) // Prepare a collection with two indexes tmp := "/tmp/tiedot_bench" os.RemoveAll(tmp) if benchCleanup { defer os.RemoveAll(tmp) } benchDB, col := mkTmpDBAndCol(tmp, "tmp") defer benchDB.Close() col.Index([]string{"nested", "nested", "str"}) col.Index([]string{"nested", "nested", "int"}) col.Index([]string{"nested", "nested", "float"}) col.Index([]string{"strs"}) col.Index([]string{"ints"}) col.Index([]string{"floats"}) // Benchmark document insert average("insert", func() { if _, err := col.Insert(sampleDoc()); err != nil { fmt.Println("Insert error", err) } }) // Collect all document IDs and benchmark document read col.ForEachDoc(func(id int, _ []byte) bool { ids = append(ids, id) return true }) average("read", func() { doc, err := col.Read(ids[rand.Intn(benchSize)]) if doc == nil || err != nil { fmt.Println("Read error", doc, err) } }) // Benchmark lookup query (two attributes) average("lookup", func() { result := make(map[int]struct{}) if err := db.EvalQuery(sampleQuery(), col, &result); err != nil { fmt.Println("Query error", err) } }) // Benchmark document update average("update", func() { if err := col.Update(ids[rand.Intn(benchSize)], sampleDoc()); err != nil { fmt.Println("Update error", err) } }) // Benchmark document delete var delCount int64 average("delete", func() { if err := col.Delete(ids[rand.Intn(benchSize)]); err == nil { atomic.AddInt64(&delCount, 1) } }) fmt.Printf("Deleted %d documents\n", delCount) }
func SearchQuestionListByCourseId(res http.ResponseWriter, req *http.Request) string { courseId := req.URL.Query()["courseId"] fmt.Println(courseId) var query interface{} json.Unmarshal([]byte(`[{"eq": "`+courseId[0]+`", "in": ["courseId"]}]`), &query) queryResult := make(map[int]struct{}) // query result (document IDs) goes into map keys if err := db.EvalQuery(query, qlCl, &queryResult); err != nil { panic(err) } fmt.Println("result length", len(queryResult)) questionList := make([]map[string]interface{}, 0) for docId, _ := range queryResult { // Read document var readBack map[string]interface{} var err error if readBack, err = qlCl.Read(docId); err != nil { return "questionList not found" } readBack["questionListId"] = string(strconv.AppendInt(nil, int64(docId), 10)) fmt.Println("read data", readBack) questionList = append(questionList, readBack) } ret, _ := json.Marshal(questionList) res.WriteHeader(http.StatusOK) return string(ret) }
func GetBandsByGenre(id string) []DocWithID { database := GetDB() defer database.Close() collection := database.Use(tiedotmartini2.BAND_COL) var query interface{} // id2, _ := strconv.ParseInt(id, 10, 64) q := `{"in": ["albums", "genre_id"], "eq": "` + id + `"}` // json.Unmarshal([]byte(`"all"`), &query) err := json.Unmarshal([]byte(q), &query) if err != nil { fmt.Println("Unmarshall error on genre search:", err) } result := make(map[uint64]struct{}) err = db.EvalQuery(query, collection, &result) if err != nil { fmt.Println("Eval error:", err) os.Exit(1) } fmt.Println("Ran query") var docs []DocWithID for id2 := range result { fmt.Println("Found", id2) var readback map[string]interface{} collection.Read(id2, &readback) doc := DocWithID{DocKey: id2, Value: readback} docs = append(docs, doc) } if docs == nil { fmt.Println("Returning empty value") } return docs }
func GetAll() (pastes []Paste) { myDB, err1 := GetDb() if err1 != nil { log.Fatal("Error on database start - GetAll():", err1) } col := myDB.Use(PASTES) var query interface{} result := make(map[int]struct{}) err := json.Unmarshal([]byte(`"all"`), &query) // err := json.Unmarshal([]byte(`{"limit": 5}`), &query) if err != nil { log.Fatal("json error:", err) } db.EvalQuery(query, col, &result) var docs []Paste for id := range result { doc, _ := col.Read(id) theTime, _ := time.Parse(time.RFC3339, doc[CREATED].(string)) docObj := Paste{Id: id, Title: doc[TITLE].(string), Content: doc[CONTENT].(string), CreatedOn: theTime} docs = append(docs, docObj) } sort.Sort(ByCreated(docs)) q := docs[0:5] return q }
func (ps *ProxyStorage) SaveProxy(toSave *Proxy, update bool) (int, error) { logger.Tracef("Saving proxy to storage: %s", toSave) var query interface{} queryString := fmt.Sprintf(`[{"eq" : "%s", "in" : ["Ip"] },{"eq" : "%n", "in" : ["Port"] }]`, toSave.Ip, toSave.Port) json.Unmarshal([]byte(queryString), &query) queryResult := make(map[int]struct{}) // query result (document IDs) goes into map keys if err := db.EvalQuery(query, ps.proxyCol, &queryResult); err != nil { panic(err) } if len(queryResult) > 0 { logger.Tracef("Exists in database: %s", toSave) if update { logger.Tracef("Updating %s", toSave) for k, _ := range queryResult { //expecting single key return k, ps.proxyCol.Update(k, structs.Map(toSave)) } } } return ps.proxyCol.Insert(structs.Map(toSave)) }
// Execute a query and return number of documents from the result. func Count(w http.ResponseWriter, r *http.Request) { w.Header().Set("Cache-Control", "must-revalidate") w.Header().Set("Content-Type", "text/plain") var col, q string if !Require(w, r, "col", &col) { return } if !Require(w, r, "q", &q) { return } var qJson interface{} if err := json.Unmarshal([]byte(q), &qJson); err != nil { http.Error(w, fmt.Sprintf("'%v' is not valid JSON.", q), 400) return } dbcol := HttpDB.Use(col) if dbcol == nil { http.Error(w, fmt.Sprintf("Collection '%s' does not exist.", col), 400) return } queryResult := make(map[int]struct{}) if err := db.EvalQuery(qJson, dbcol, &queryResult); err != nil { http.Error(w, fmt.Sprint(err), 400) return } w.Write([]byte(strconv.Itoa(len(queryResult)))) }
func (t *TiedotEngine) All(collectionName string) (map[uint64]struct{}, error) { r := make(map[uint64]struct{}) if err := tiedot.EvalQuery("all", t.tiedot.Use(collectionName), &r); err != nil { log.Error("Error executing TiedotEngine.All() err=%s", err.Error()) return nil, err } return r, nil }
// Verify user identity and hand out a JWT. func getJWT(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") addCommonJwtRespHeaders(w, r) // Verify identity user := r.FormValue(JWT_USER_ATTR) if user == "" { http.Error(w, "Please pass JWT 'user' parameter", http.StatusBadRequest) return } jwtCol := HttpDB.Use(JWT_COL_NAME) if jwtCol == nil { http.Error(w, "Server is missing JWT identity collection, please restart the server.", http.StatusInternalServerError) return } userQuery := map[string]interface{}{ "eq": user, "in": []interface{}{JWT_USER_ATTR}} userQueryResult := make(map[int]struct{}) if err := db.EvalQuery(userQuery, jwtCol, &userQueryResult); err != nil { tdlog.CritNoRepeat("Query failed in JWT identity collection : %v", err) http.Error(w, "Query failed in JWT identity collection", http.StatusInternalServerError) return } // Verify password pass := r.FormValue(JWT_PASS_ATTR) for recID := range userQueryResult { rec, err := jwtCol.Read(recID) if err != nil { break } if rec[JWT_PASS_ATTR] != pass { tdlog.CritNoRepeat("JWT: identitify verification failed from request sent by %s", r.RemoteAddr) break } // Successful password match token := jwt.New(jwt.GetSigningMethod("RS256")) token.Claims = jwt.MapClaims{ JWT_USER_ATTR: rec[JWT_USER_ATTR], JWT_COLLECTIONS_ATTR: rec[JWT_COLLECTIONS_ATTR], JWT_ENDPOINTS_ATTR: rec[JWT_ENDPOINTS_ATTR], JWT_EXPIRY: time.Now().Add(time.Hour * 72).Unix(), } var tokenString string var e error if tokenString, e = token.SignedString(privateKey); e != nil { panic(e) } w.Header().Set("Authorization", "Bearer "+tokenString) w.WriteHeader(http.StatusOK) return } // ... password mismatch http.Error(w, "Invalid password", http.StatusUnauthorized) }
func (repo *GenericRepository) Query(query interface{}) []GenericModel { col := repo.database.Use(repo.coll) result := make(map[int]struct{}) if err := db.EvalQuery(query, col, &result); err != nil { panic(err) } generics := make([]GenericModel, 0) for id := range result { generics = append(generics, repo.Fetch(id)) } return generics }
func (tdb *DBTiedot) Query(col, querystr string) ([]byte, error) { var query interface{} var data []interface{} json.Unmarshal([]byte(querystr), &query) queryResult := make(map[uint64]struct{}) colh := tdb.Db.Use(col) if err := tiedot.EvalQuery(query, colh, &queryResult); err != nil { return nil, err } for id := range queryResult { var intf interface{} var subintf interface{} _, err := colh.Read(id, &intf) if err != nil { fmt.Println("Read back failed ", err) } //FIXME: wtf do we have to do this? //fmt.Println("Value of: %s\n",reflect.ValueOf(intf)) val := reflect.ValueOf(intf) keys := val.MapKeys() for k := range keys { if strings.Contains(keys[k].String(), "id") { continue } fmt.Println("We want uuid: ", keys[k].String()) subintf = val.MapIndex(keys[k]).Interface() data = append(data, subintf) } //subintf = val.Elem(; } dataBytes, err := json.Marshal(data) if err != nil { fmt.Println("Failed to marshal interface{} to raw []bytes: ", err) return nil, err } return dataBytes, nil }
func Query(w http.ResponseWriter, r *http.Request) { w.Header().Set("Cache-Control", "must-revalidate") w.Header().Set("Content-Type", "text/plain") var col, q string if !Require(w, r, "col", &col) { return } if !Require(w, r, "q", &q) { return } var qJson interface{} if err := json.Unmarshal([]byte(q), &qJson); err != nil { http.Error(w, fmt.Sprintf("'%v' is not valid JSON.", q), 400) return } V3Sync.RLock() defer V3Sync.RUnlock() dbcol := V3DB.Use(col) if dbcol == nil { http.Error(w, fmt.Sprintf("Collection '%s' does not exist.", col), 400) return } // Evaluate the query queryResult := make(map[uint64]struct{}) if err := db.EvalQuery(qJson, dbcol, &queryResult); err != nil { http.Error(w, fmt.Sprint(err), 400) return } // Construct array of result resultDocs := make([]map[string]interface{}, len(queryResult)) counter := 0 for docID := range queryResult { var doc map[string]interface{} dbcol.Read(docID, &doc) if doc != nil { resultDocs[counter] = doc counter++ } } // Serialize the array resp, err := json.Marshal(resultDocs) if err != nil { http.Error(w, fmt.Sprintf("Server error: query returned invalid structure"), 500) return } w.Write([]byte(string(resp))) }
func GetAll(docType string) []DocWithID { database := GetDB() defer database.Close() collection := database.Use(docType) var query interface{} result := make(map[uint64]struct{}) json.Unmarshal([]byte(`"all"`), &query) db.EvalQuery(query, collection, &result) var docs []DocWithID for id := range result { var doc MyDoc collection.Read(id, &doc) docObj := DocWithID{DocKey: id, Value: doc} docs = append(docs, docObj) } return docs }
func (ds *DataStore) GetUserForNick(nick string) (recs []Record, err error) { query := createQuery(nick, "nick") queryResult := make(map[uint64]struct{}) err = db.EvalQuery(query, ds.users, &queryResult) if err != nil { return } if len(queryResult) == 0 { err = errors.New("No User Found") return } recs = ds.getUsersForIDs(queryResult) return }
func home(w http.ResponseWriter, req *http.Request) { // local vars var query interface{} entries := Entries{} queryResults := make(map[int]struct{}) // open database d, err := db.OpenDB(DBdir) if err != nil { panic(err) } // use collection docEntries := d.Use("Entries") // build query from json and convert to interface{} json.Unmarshal([]byte(`"all"`), &query) // execute query and pass results to queryResults if err := db.EvalQuery(query, docEntries, &queryResults); err != nil { panic(err) } // queryResults contains []int of IDs for id := range queryResults { entry := Entry{} readBack, _ := docEntries.Read(id) // map[string]interface{} TO struct hack j, _ := json.Marshal(readBack) // struct to json json.Unmarshal(j, &entry) // json to actual type entries = append(entries, &entry) } // compile template with data if err := index.Execute(w, entries); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } }
// Lookup the user record for the given email address func lookupUser(users *db.Col, email string) dbRow { var query interface{} queryStr := `{"eq": "` + email + `", "in": ["Email"]}` json.Unmarshal([]byte(queryStr), &query) queryResult := make(map[int]struct{}) if err := db.EvalQuery(query, users, &queryResult); err != nil { panic(err) } for id := range queryResult { results, err := users.Read(id) if err != nil { panic(err) } return results } return nil }
// If necessary, create the JWT identity collection, indexes, and the default/special user identity "admin". func jwtInitSetup() { // Create collection if HttpDB.Use(JWT_COL_NAME) == nil { if err := HttpDB.Create(JWT_COL_NAME); err != nil { tdlog.Panicf("JWT: failed to create JWT identity collection - %v", err) } } jwtCol := HttpDB.Use(JWT_COL_NAME) // Create indexes on ID attribute indexPaths := make(map[string]struct{}) for _, oneIndex := range jwtCol.AllIndexes() { indexPaths[strings.Join(oneIndex, db.INDEX_PATH_SEP)] = struct{}{} } if _, exists := indexPaths[JWT_USER_ATTR]; !exists { if err := jwtCol.Index([]string{JWT_USER_ATTR}); err != nil { tdlog.Panicf("JWT: failed to create collection index - %v", err) } } // Create default user "admin" adminQuery := map[string]interface{}{ "eq": JWT_USER_ADMIN, "in": []interface{}{JWT_USER_ATTR}} adminQueryResult := make(map[int]struct{}) if err := db.EvalQuery(adminQuery, jwtCol, &adminQueryResult); err != nil { tdlog.Panicf("JWT: failed to query admin user ID - %v", err) } if len(adminQueryResult) == 0 { if _, err := jwtCol.Insert(map[string]interface{}{ JWT_USER_ATTR: JWT_USER_ADMIN, JWT_PASS_ATTR: "", JWT_COLLECTIONS_ATTR: []interface{}{}, JWT_ENDPOINTS_ATTR: []interface{}{}}); err != nil { tdlog.Panicf("JWT: failed to create default admin user - %v", err) } tdlog.Notice("JWT: successfully initialized DB for JWT features. The default user 'admin' has been created.") } }
func (self *Database) Query(colName string, query Query, result interface{}) error { resultv := reflect.ValueOf(result) if resultv.Kind() != reflect.Ptr || resultv.Elem().Kind() != reflect.Slice { panic("result argument must be a slice address") } slicev := resultv.Elem() //slicev = slicev.Slice(0, slicev.Cap()) elemt := slicev.Type().Elem() col := self.db.Use(colName) var out []Query for k, v := range query { q := Query{} q["eq"] = v q["in"] = []string{k} out = append(out, q) } queryResult := make(map[int]struct{}) // query result (document IDs) goes into map keys if err := db.EvalQuery(out, col, &queryResult); err != nil { return err } fmt.Printf("%v", queryResult) for id := range queryResult { elemp := reflect.New(elemt) if err := self.Get(colName, fmt.Sprintf("%d", id), elemp.Interface()); err != nil { return err } slicev = reflect.Append(slicev, elemp.Elem()) } return nil }
// example.go written in test case style func TestAll(t *testing.T) { myDBDir := "/tmp/tiedot_test_embeddedExample" os.RemoveAll(myDBDir) defer os.RemoveAll(myDBDir) myDB, err := db.OpenDB(myDBDir) if err != nil { t.Fatal(err) } if err := myDB.Create("Feeds"); err != nil { t.Fatal(err) } if err := myDB.Create("Votes"); err != nil { t.Fatal(err) } for _, name := range myDB.AllCols() { if name != "Feeds" && name != "Votes" { t.Fatal(myDB.AllCols()) } } if err := myDB.Rename("Votes", "Points"); err != nil { t.Fatal(err) } if err := myDB.Drop("Points"); err != nil { t.Fatal(err) } if err := myDB.Scrub("Feeds"); err != nil { t.Fatal(err) } feeds := myDB.Use("Feeds") docID, err := feeds.Insert(map[string]interface{}{ "name": "Go 1.2 is released", "url": "golang.org"}) if err != nil { t.Fatal(err) } readBack, err := feeds.Read(docID) if err != nil { t.Fatal(err) } if !sameMap(readBack, map[string]interface{}{ "name": "Go 1.2 is released", "url": "golang.org"}) { t.Fatal(readBack) } err = feeds.Update(docID, map[string]interface{}{ "name": "Go is very popular", "url": "google.com"}) if err != nil { panic(err) } feeds.ForEachDoc(func(id int, docContent []byte) (willMoveOn bool) { var doc map[string]interface{} if json.Unmarshal(docContent, &doc) != nil { t.Fatal("cannot deserialize") } if !sameMap(doc, map[string]interface{}{ "name": "Go is very popular", "url": "google.com"}) { t.Fatal(doc) } return true }) if err := feeds.Delete(docID); err != nil { t.Fatal(err) } if err := feeds.Delete(docID); dberr.Type(err) != dberr.ErrorNoDoc { t.Fatal(err) } if err := feeds.Index([]string{"author", "name", "first_name"}); err != nil { t.Fatal(err) } if err := feeds.Index([]string{"Title"}); err != nil { t.Fatal(err) } if err := feeds.Index([]string{"Source"}); err != nil { t.Fatal(err) } for _, path := range feeds.AllIndexes() { joint := strings.Join(path, "") if joint != "authornamefirst_name" && joint != "Title" && joint != "Source" { t.Fatal(feeds.AllIndexes()) } } if err := feeds.Unindex([]string{"author", "name", "first_name"}); err != nil { t.Fatal(err) } // ****************** Queries ****************** // Prepare some documents for the query if _, err := feeds.Insert(map[string]interface{}{"Title": "New Go release", "Source": "golang.org", "Age": 3}); err != nil { t.Fatal("failure") } if _, err := feeds.Insert(map[string]interface{}{"Title": "Kitkat is here", "Source": "google.com", "Age": 2}); err != nil { t.Fatal("failure") } if _, err := feeds.Insert(map[string]interface{}{"Title": "Good Slackware", "Source": "slackware.com", "Age": 1}); err != nil { t.Fatal("failure") } var query interface{} if json.Unmarshal([]byte(`[{"eq": "New Go release", "in": ["Title"]}, {"eq": "slackware.com", "in": ["Source"]}]`), &query) != nil { t.Fatal("failure") } queryResult := make(map[int]struct{}) // query result (document IDs) goes into map keys if err := db.EvalQuery(query, feeds, &queryResult); err != nil { t.Fatal(err) } // Query result are document IDs for id := range queryResult { // To get query result document, simply read it readBack, err := feeds.Read(id) if err != nil { panic(err) } if !sameMap(readBack, map[string]interface{}{"Title": "New Go release", "Source": "golang.org", "Age": 3}) && !sameMap(readBack, map[string]interface{}{"Title": "Good Slackware", "Source": "slackware.com", "Age": 1}) { t.Fatal(readBack) } } if err := myDB.Close(); err != nil { t.Fatal(err) } }
func (q *Query) eval() (RawResultSet, error) { query := prepQuery(q.q) res := make(map[uint64]struct{}) err := tiedot.EvalQuery(query, q.col, &res) return res, err }
func embeddedExample() { // ****************** Collection Management ****************** myDBDir := "/tmp/MyDatabase" os.RemoveAll(myDBDir) defer os.RemoveAll(myDBDir) // (Create if not exist) open a database myDB, err := db.OpenDB(myDBDir) if err != nil { panic(err) } // Create two collections: Feeds and Votes if err := myDB.Create("Feeds"); err != nil { panic(err) } if err := myDB.Create("Votes"); err != nil { panic(err) } // What collections do I now have? for _, name := range myDB.AllCols() { fmt.Printf("I have a collection called %s\n", name) } // Rename collection "Votes" to "Points" if err := myDB.Rename("Votes", "Points"); err != nil { panic(err) } // Drop (delete) collection "Points" if err := myDB.Drop("Points"); err != nil { panic(err) } // Scrub (repair and compact) "Feeds" if err := myDB.Scrub("Feeds"); err != nil { panic(err) } // ****************** Document Management ****************** // Start using a collection (the reference is valid until DB schema changes or Scrub is carried out) feeds := myDB.Use("Feeds") // Insert document (afterwards the docID uniquely identifies the document and will never change) docID, err := feeds.Insert(map[string]interface{}{ "name": "Go 1.2 is released", "url": "golang.org"}) if err != nil { panic(err) } // Read document readBack, err := feeds.Read(docID) if err != nil { panic(err) } fmt.Println("Document", docID, "is", readBack) // Update document err = feeds.Update(docID, map[string]interface{}{ "name": "Go is very popular", "url": "google.com"}) if err != nil { panic(err) } // Process all documents (note that document order is undetermined) feeds.ForEachDoc(func(id int, docContent []byte) (willMoveOn bool) { fmt.Println("Document", id, "is", string(docContent)) return true // move on to the next document OR return false // do not move on to the next document }) // Delete document if err := feeds.Delete(docID); err != nil { panic(err) } // ****************** Index Management ****************** // Indexes assist in many types of queries // Create index (path leads to document JSON attribute) if err := feeds.Index([]string{"author", "name", "first_name"}); err != nil { panic(err) } if err := feeds.Index([]string{"Title"}); err != nil { panic(err) } if err := feeds.Index([]string{"Source"}); err != nil { panic(err) } // What indexes do I have on collection A? for _, path := range feeds.AllIndexes() { fmt.Printf("I have an index on path %v\n", path) } // Remove index if err := feeds.Unindex([]string{"author", "name", "first_name"}); err != nil { panic(err) } // ****************** Queries ****************** // Prepare some documents for the query feeds.Insert(map[string]interface{}{"Title": "New Go release", "Source": "golang.org", "Age": 3}) feeds.Insert(map[string]interface{}{"Title": "Kitkat is here", "Source": "google.com", "Age": 2}) feeds.Insert(map[string]interface{}{"Title": "Good Slackware", "Source": "slackware.com", "Age": 1}) var query interface{} json.Unmarshal([]byte(`[{"eq": "New Go release", "in": ["Title"]}, {"eq": "slackware.com", "in": ["Source"]}]`), &query) queryResult := make(map[int]struct{}) // query result (document IDs) goes into map keys if err := db.EvalQuery(query, feeds, &queryResult); err != nil { panic(err) } // Query result are document IDs for id := range queryResult { // To get query result document, simply read it readBack, err := feeds.Read(id) if err != nil { panic(err) } fmt.Printf("Query returned document %v\n", readBack) } // Make sure important transactions are persisted (very expensive call: do NOT invoke too often) // (A background goroutine is already doing it for you every few seconds) if err := myDB.Sync(); err != nil { panic(err) } // Gracefully close database, you should call Close on all opened databases // Otherwise background goroutines will prevent program shutdown if err := myDB.Close(); err != nil { panic(err) } }
func embeddedExample() { // ****************** Collection Management ****************** // It is very important to initialize random number generator seed! rand.Seed(time.Now().UTC().UnixNano()) // Create and open database dir := "/tmp/MyDatabase" os.RemoveAll(dir) defer os.RemoveAll(dir) myDB, err := db.OpenDB(dir) if err != nil { panic(err) } // Create two collections Feeds and Votes // "2" means collection data and indexes are divided into two halves, allowing concurrent access from two threads if err := myDB.Create("Feeds", 2); err != nil { panic(err) } if err := myDB.Create("Votes", 2); err != nil { panic(err) } // What collections do I now have? for name := range myDB.StrCol { fmt.Printf("I have a collection called %s\n", name) } // Rename collection "Votes" to "Points" if err := myDB.Rename("Votes", "Points"); err != nil { panic(err) } // Drop (delete) collection "Points" if err := myDB.Drop("Points"); err != nil { panic(err) } // Scrub (repair and compact) "Feeds" myDB.Scrub("Feeds") // ****************** Document Management ****************** // Start using a collection feeds := myDB.Use("Feeds") // Insert document (document must be map[string]interface{}) docID, err := feeds.Insert(map[string]interface{}{ "name": "Go 1.2 is released", "url": "golang.org"}) if err != nil { panic(err) } // Read document var readBack interface{} feeds.Read(docID, &readBack) // pass in document's physical ID fmt.Println(readBack) // Update document (document must be map[string]interface{}) err = feeds.Update(docID, map[string]interface{}{ "name": "Go is very popular", "url": "google.com"}) if err != nil { panic(err) } // Delete document feeds.Delete(docID) // Delete document feeds.Delete(123) // An ID which does not exist does no harm // ****************** Index Management ****************** // Secondary indexes assist in many types of queries // Create index (path leads to document JSON attribute) if err := feeds.Index([]string{"author", "name", "first_name"}); err != nil { panic(err) } if err := feeds.Index([]string{"Title"}); err != nil { panic(err) } if err := feeds.Index([]string{"Source"}); err != nil { panic(err) } // What indexes do I have on collection A? for path := range feeds.SecIndexes { fmt.Printf("I have an index on path %s\n", path) } // Remove index if err := feeds.Unindex([]string{"author", "name", "first_name"}); err != nil { panic(err) } // ****************** Queries ****************** // Let's prepare a number of docments for a start feeds.Insert(map[string]interface{}{"Title": "New Go release", "Source": "golang.org", "Age": 3}) feeds.Insert(map[string]interface{}{"Title": "Kitkat is here", "Source": "google.com", "Age": 2}) feeds.Insert(map[string]interface{}{"Title": "Good Slackware", "Source": "slackware.com", "Age": 1}) queryStr := `[{"eq": "New Go release", "in": ["Title"]}, {"eq": "slackware.com", "in": ["Source"]}]` var query interface{} json.Unmarshal([]byte(queryStr), &query) queryResult := make(map[uint64]struct{}) // query result (document IDs) goes into map keys if err := db.EvalQuery(query, feeds, &queryResult); err != nil { panic(err) } // Query results are physical document IDs for id := range queryResult { fmt.Printf("Query returned document ID %d\n", id) } // To use the document itself, simply read it back for id := range queryResult { feeds.Read(id, &readBack) fmt.Printf("Query returned document %v\n", readBack) } // Gracefully close database myDB.Close() }
func embeddedExample() { // ****************** Collection Management ****************** myDBDir := "/tmp/MyDatabase" os.RemoveAll(myDBDir) defer os.RemoveAll(myDBDir) // (Create if not exist) open a database myDB, err := db.OpenDB(myDBDir) if err != nil { panic(err) } // Create two collections: Feeds and Votes if err := myDB.Create("Feeds"); err != nil { panic(err) } if err := myDB.Create("Votes"); err != nil { panic(err) } // What collections do I now have? for _, name := range myDB.AllCols() { fmt.Printf("I have a collection called %s\n", name) } // Rename collection "Votes" to "Points" if err := myDB.Rename("Votes", "Points"); err != nil { panic(err) } // Drop (delete) collection "Points" if err := myDB.Drop("Points"); err != nil { panic(err) } // Scrub (repair and compact) "Feeds" myDB.Scrub("Feeds") // ****************** Document Management ****************** // Start using a collection // (The reference is valid until DB schema changes or Scrub is carried out) feeds := myDB.Use("Feeds") // Insert document (document must be map[string]interface{}) docID, err := feeds.Insert(map[string]interface{}{ "name": "Go 1.2 is released", "url": "golang.org"}) if err != nil { panic(err) } // Read document readBack, err := feeds.Read(docID) fmt.Println(readBack) // Update document (document must be map[string]interface{}) err = feeds.Update(docID, map[string]interface{}{ "name": "Go is very popular", "url": "google.com"}) if err != nil { panic(err) } // Delete document feeds.Delete(docID) // ****************** Index Management ****************** // Secondary indexes assist in many types of queries // Create index (path leads to document JSON attribute) if err := feeds.Index([]string{"author", "name", "first_name"}); err != nil { panic(err) } if err := feeds.Index([]string{"Title"}); err != nil { panic(err) } if err := feeds.Index([]string{"Source"}); err != nil { panic(err) } // What indexes do I have on collection A? for _, path := range feeds.AllIndexes() { fmt.Printf("I have an index on path %v\n", path) } // Remove index if err := feeds.Unindex([]string{"author", "name", "first_name"}); err != nil { panic(err) } // ****************** Queries ****************** // Prepare some documents for the query feeds.Insert(map[string]interface{}{"Title": "New Go release", "Source": "golang.org", "Age": 3}) feeds.Insert(map[string]interface{}{"Title": "Kitkat is here", "Source": "google.com", "Age": 2}) feeds.Insert(map[string]interface{}{"Title": "Good Slackware", "Source": "slackware.com", "Age": 1}) feeds.ForEachDoc(true, func(id int, doc []byte) bool { var data map[string]interface{} json.Unmarshal(doc, &data) fmt.Println("data is") fmt.Println(data) fmt.Println("data was") return true }) var query interface{} json.Unmarshal([]byte(`[{"eq": "New Go release", "in": ["Title"]}, {"eq": "slackware.com", "in": ["Source"]}]`), &query) queryResult := make(map[int]struct{}) // query result (document IDs) goes into map keys if err := db.EvalQuery(query, feeds, &queryResult); err != nil { panic(err) } // Query results are document IDs for id := range queryResult { fmt.Printf("Query returned document ID %d\n", id) } // To use the document itself, simply read it back for id := range queryResult { readBack, err := feeds.Read(id) if err != nil { panic(err) } fmt.Printf("Query returned document %v\n", readBack) } // Make sure important transactions are persisted (very expensive call: do NOT invoke too often) // (A background goroutine is already doing it for you every few seconds) if err := myDB.Sync(); err != nil { panic(err) } // Gracefully close database if err := myDB.Close(); err != nil { panic(err) } }
// benchmark2 written in test case style func TestBenchmark2(t *testing.T) { return runtime.GOMAXPROCS(runtime.NumCPU()) rand.Seed(time.Now().UnixNano()) docs := make([]int, 0, benchTestSize*2+1000) wp := new(sync.WaitGroup) numThreads := runtime.GOMAXPROCS(-1) // There are goroutines doing document operations: insert, read, query, update, delete wp.Add(5 * numThreads) // And one more changing schema and stuff wp.Add(1) // Prepare a collection with two indexes tmp := "/tmp/tiedot_test_bench2" os.RemoveAll(tmp) defer os.RemoveAll(tmp) benchdb, col := mkTmpDBAndCol(tmp, "tmp") defer benchdb.Close() col.Index([]string{"nested", "nested", "str"}) col.Index([]string{"nested", "nested", "int"}) col.Index([]string{"nested", "nested", "float"}) col.Index([]string{"strs"}) col.Index([]string{"ints"}) col.Index([]string{"floats"}) // Insert 1000 documents to make a start for j := 0; j < 1000; j++ { if newID, err := col.Insert(sampleDoc()); err == nil { docs = append(docs, newID) } else { panic(err) } } start := float64(time.Now().UTC().UnixNano()) // Insert benchTestSize * 2 documents for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Insert thread %d starting\n", i) defer wp.Done() for j := 0; j < benchTestSize/numThreads*2; j++ { if newID, err := col.Insert(sampleDoc()); err == nil { docs = append(docs, newID) } else { panic(err) } } fmt.Printf("Insert thread %d completed\n", i) }(i) } // Read benchTestSize * 2 documents var readCount int64 for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Read thread %d starting\n", i) defer wp.Done() for j := 0; j < benchTestSize/numThreads*2; j++ { if _, err := col.Read(docs[rand.Intn(len(docs))]); err == nil { atomic.AddInt64(&readCount, 1) } } fmt.Printf("Read thread %d completed\n", i) }(i) } // Query benchTestSize times (lookup on two attributes) for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Query thread %d starting\n", i) defer wp.Done() var err error for j := 0; j < benchTestSize/numThreads; j++ { result := make(map[int]struct{}) if err = db.EvalQuery(sampleQuery(), col, &result); err != nil { panic(err) } } fmt.Printf("Query thread %d completed\n", i) }(i) } // Update benchTestSize documents var updateCount int64 for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Update thread %d starting\n", i) defer wp.Done() for j := 0; j < benchTestSize/numThreads; j++ { if err := col.Update(docs[rand.Intn(len(docs))], sampleDoc()); err == nil { atomic.AddInt64(&updateCount, 1) } } fmt.Printf("Update thread %d completed\n", i) }(i) } // Delete benchTestSize documents var delCount int64 for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Delete thread %d starting\n", i) defer wp.Done() for j := 0; j < benchTestSize/numThreads; j++ { if err := col.Delete(docs[rand.Intn(len(docs))]); err == nil { atomic.AddInt64(&delCount, 1) } } fmt.Printf("Delete thread %d completed\n", i) }(i) } // This one does a bunch of schema-changing stuff, testing the engine while document operations are busy go func() { time.Sleep(500 * time.Millisecond) if err := benchdb.Create("foo"); err != nil { panic(err) } else if err := benchdb.Rename("foo", "bar"); err != nil { panic(err) } else if err := benchdb.Truncate("bar"); err != nil { panic(err) } else if err := benchdb.Scrub("bar"); err != nil { panic(err) } else if benchdb.Use("bar") == nil { panic("Missing collection") } for _, colName := range benchdb.AllCols() { if colName != "bar" && colName != "tmp" { panic("Wrong collections in benchmark db") } } os.RemoveAll("/tmp/tiedot_test_bench2_dump") defer os.RemoveAll("/tmp/tiedot_test_bench2_dump") if err := benchdb.Dump("/tmp/tiedot_test_bench2_dump"); err != nil { panic(err) } else if err := benchdb.Drop("bar"); err != nil { panic(err) } defer wp.Done() }() // Wait for all goroutines to finish, then print summary wp.Wait() end := float64(time.Now().UTC().UnixNano()) fmt.Printf("Total operations %d: %d ns/iter, %d iter/sec\n", benchTestSize*7, int((end-start)/float64(benchTestSize)/7), int(1000000000/((end-start)/float64(benchTestSize)/7))) fmt.Printf("Read %d documents\n", readCount) fmt.Printf("Updated %d documents\n", updateCount) fmt.Printf("Deleted %d documents\n", delCount) if readCount < int64(benchTestSize/3) { t.Fatal("Did not read enough documents") } if updateCount < int64(benchTestSize/8) { t.Fatal("Did not update enough documents") } if delCount < int64(benchTestSize/8) { t.Fatal("Did not delete enough documents") } }
// Run document operations (insert, read, query, update and delete) all at once. func benchmark2(benchSize int) { docs := make([]int, 0, benchSize*2+1000) wp := new(sync.WaitGroup) numThreads := runtime.GOMAXPROCS(-1) wp.Add(5 * numThreads) // There are 5 goroutines: insert, read, query, update and delete // Prepare a collection with two indexes tmp := "/tmp/tiedot_bench" defer os.RemoveAll(tmp) col := mkTmpDBAndCol(tmp, "tmp") col.Index([]string{"a"}) col.Index([]string{"b"}) // Insert 1000 documents to make a start var docToInsert map[string]interface{} for j := 0; j < 1000; j++ { if err := json.Unmarshal([]byte( `{"a": `+strconv.Itoa(rand.Intn(benchSize))+`, "b": `+strconv.Itoa(rand.Intn(benchSize))+`, "more": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed mi sem, ultrices mollis nisl quis, convallis volutpat ante. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Proin interdum egestas risus, imperdiet vulputate est. Cras semper risus sit amet dolor facilisis malesuada. Nunc velit augue, accumsan id facilisis ultricies, vehicula eget massa. Ut non dui eu magna egestas aliquam. Fusce in pellentesque risus. Aliquam ornare pharetra lacus in rhoncus. In eu commodo nibh. Praesent at lacinia quam. Curabitur laoreet pellentesque mollis. Maecenas mollis bibendum neque. Pellentesque semper justo ac purus auctor cursus. In egestas sodales metus sed dictum. Vivamus at elit nunc. Phasellus sit amet augue sed augue rhoncus congue. Aenean et molestie augue. Aliquam blandit lacus eu nunc rhoncus, vitae varius mauris placerat. Quisque velit urna, pretium quis dolor et, blandit sodales libero. Nulla sollicitudin est vel dolor feugiat viverra massa nunc."}`), &docToInsert); err != nil { panic("json error") } if newID, err := col.Insert(docToInsert); err == nil { docs = append(docs, newID) } else { fmt.Println("Insert error", err) } } start := float64(time.Now().UTC().UnixNano()) // Insert benchSize * 2 documents for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Insert thread %d starting\n", i) defer wp.Done() var docToInsert map[string]interface{} for j := 0; j < benchSize/numThreads*2; j++ { if err := json.Unmarshal([]byte( `{"a": `+strconv.Itoa(rand.Intn(benchSize))+`, "b": `+strconv.Itoa(rand.Intn(benchSize))+`, "more": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed mi sem, ultrices mollis nisl quis, convallis volutpat ante. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Proin interdum egestas risus, imperdiet vulputate est. Cras semper risus sit amet dolor facilisis malesuada. Nunc velit augue, accumsan id facilisis ultricies, vehicula eget massa. Ut non dui eu magna egestas aliquam. Fusce in pellentesque risus. Aliquam ornare pharetra lacus in rhoncus. In eu commodo nibh. Praesent at lacinia quam. Curabitur laoreet pellentesque mollis. Maecenas mollis bibendum neque. Pellentesque semper justo ac purus auctor cursus. In egestas sodales metus sed dictum. Vivamus at elit nunc. Phasellus sit amet augue sed augue rhoncus congue. Aenean et molestie augue. Aliquam blandit lacus eu nunc rhoncus, vitae varius mauris placerat. Quisque velit urna, pretium quis dolor et, blandit sodales libero. Nulla sollicitudin est vel dolor feugiat viverra massa nunc."}`), &docToInsert); err != nil { panic("json error") } if newID, err := col.Insert(docToInsert); err == nil { docs = append(docs, newID) } else { fmt.Println("Insert error", err) } } fmt.Printf("Insert thread %d completed\n", i) }(i) } // Read benchSize * 2 documents for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Read thread %d starting\n", i) defer wp.Done() for j := 0; j < benchSize/numThreads*2; j++ { col.Read(docs[rand.Intn(len(docs))]) } fmt.Printf("Read thread %d completed\n", i) }(i) } // Query benchSize times (lookup on two attributes) for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Query thread %d starting\n", i) defer wp.Done() var query interface{} var err error for j := 0; j < benchSize/numThreads; j++ { if err = json.Unmarshal([]byte(`{"c": [{"eq": `+strconv.Itoa(rand.Intn(benchSize))+`, "in": ["a"], "limit": 1}, `+ `{"eq": `+strconv.Itoa(rand.Intn(benchSize))+`, "in": ["b"], "limit": 1}]}`), &query); err != nil { panic("json error") } result := make(map[int]struct{}) if err = db.EvalQuery(query, col, &result); err != nil { fmt.Println("Query error", err) } } fmt.Printf("Query thread %d completed\n", i) }(i) } // Update benchSize documents for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Update thread %d starting\n", i) defer wp.Done() var updated map[string]interface{} for j := 0; j < benchSize/numThreads; j++ { if err := json.Unmarshal([]byte( `{"a": `+strconv.Itoa(rand.Intn(benchSize))+`, "b": `+strconv.Itoa(rand.Intn(benchSize))+`, "more": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed mi sem, ultrices mollis nisl quis, convallis volutpat ante. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Proin interdum egestas risus, imperdiet vulputate est. Cras semper risus sit amet dolor facilisis malesuada. Nunc velit augue, accumsan id facilisis ultricies, vehicula eget massa. Ut non dui eu magna egestas aliquam. Fusce in pellentesque risus. Aliquam ornare pharetra lacus in rhoncus. In eu commodo nibh. Praesent at lacinia quam. Curabitur laoreet pellentesque mollis. Maecenas mollis bibendum neque. Pellentesque semper justo ac purus auctor cursus. In egestas sodales metus sed dictum. Vivamus at elit nunc. Phasellus sit amet augue sed augue rhoncus congue. Aenean et molestie augue. Aliquam blandit lacus eu nunc rhoncus, vitae varius mauris placerat. Quisque velit urna, pretium quis dolor et, blandit sodales libero. Nulla sollicitudin est vel dolor feugiat viverra massa nunc."}`), &updated); err != nil { panic("json error") } col.Update(docs[rand.Intn(len(docs))], updated) } fmt.Printf("Update thread %d completed\n", i) }(i) } // Delete benchSize documents for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Delete thread %d starting\n", i) defer wp.Done() for j := 0; j < benchSize/numThreads; j++ { col.Delete(docs[rand.Intn(len(docs))]) } fmt.Printf("Delete thread %d completed\n", i) }(i) } // Wait for all goroutines to finish, then print summary wp.Wait() end := float64(time.Now().UTC().UnixNano()) fmt.Printf("Total operations %d: %d ns/iter, %d iter/sec\n", benchSize*7, int((end-start)/float64(benchSize)/7), int(1000000000/((end-start)/float64(benchSize)/7))) }
// Benchmark individual document operation: insert, read, query, update and delete. func benchmark(benchSize int) { ids := make([]int, 0, benchSize) // Prepare a collection with two indexes tmp := "/tmp/tiedot_bench2" defer os.RemoveAll(tmp) col := mkTmpDBAndCol(tmp, "tmp") col.Index([]string{"a"}) col.Index([]string{"b"}) // Benchmark document insert average("insert", benchSize, func() {}, func() { var doc map[string]interface{} if err := json.Unmarshal([]byte( `{"a": `+strconv.Itoa(rand.Intn(benchSize))+`, "b": `+strconv.Itoa(rand.Intn(benchSize))+`, "more": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed mi sem, ultrices mollis nisl quis, convallis volutpat ante. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Proin interdum egestas risus, imperdiet vulputate est. Cras semper risus sit amet dolor facilisis malesuada. Nunc velit augue, accumsan id facilisis ultricies, vehicula eget massa. Ut non dui eu magna egestas aliquam. Fusce in pellentesque risus. Aliquam ornare pharetra lacus in rhoncus. In eu commodo nibh. Praesent at lacinia quam. Curabitur laoreet pellentesque mollis. Maecenas mollis bibendum neque. Pellentesque semper justo ac purus auctor cursus. In egestas sodales metus sed dictum. Vivamus at elit nunc. Phasellus sit amet augue sed augue rhoncus congue. Aenean et molestie augue. Aliquam blandit lacus eu nunc rhoncus, vitae varius mauris placerat. Quisque velit urna, pretium quis dolor et, blandit sodales libero. Nulla sollicitudin est vel dolor feugiat viverra massa nunc."}`), &doc); err != nil { panic("json error") } if _, err := col.Insert(doc); err != nil { fmt.Println("Insert error", err) } }) // Collect all document IDs and benchmark document read average("read", benchSize, func() { col.ForEachDoc(false, func(id int, _ []byte) bool { ids = append(ids, id) return true }) }, func() { doc, err := col.Read(ids[rand.Intn(benchSize)]) if doc == nil || err != nil { fmt.Println("Read error", doc, err) } }) // Benchmark lookup query (two attributes) average("lookup", benchSize, func() {}, func() { var query interface{} if err := json.Unmarshal([]byte(`{"c": [{"eq": `+strconv.Itoa(rand.Intn(benchSize))+`, "in": ["a"], "limit": 1}, `+ `{"eq": `+strconv.Itoa(rand.Intn(benchSize))+`, "in": ["b"], "limit": 1}]}`), &query); err != nil { panic("json error") } result := make(map[int]struct{}) if err := db.EvalQuery(query, col, &result); err != nil { fmt.Println("Query error", err) } }) // Benchmark document update average("update", benchSize, func() {}, func() { var doc map[string]interface{} if err := json.Unmarshal([]byte( `{"a": `+strconv.Itoa(rand.Intn(benchSize))+`, "b": `+strconv.Itoa(rand.Intn(benchSize))+`, "more": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed mi sem, ultrices mollis nisl quis, convallis volutpat ante. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Proin interdum egestas risus, imperdiet vulputate est. Cras semper risus sit amet dolor facilisis malesuada. Nunc velit augue, accumsan id facilisis ultricies, vehicula eget massa. Ut non dui eu magna egestas aliquam. Fusce in pellentesque risus. Aliquam ornare pharetra lacus in rhoncus. In eu commodo nibh. Praesent at lacinia quam. Curabitur laoreet pellentesque mollis. Maecenas mollis bibendum neque. Pellentesque semper justo ac purus auctor cursus. In egestas sodales metus sed dictum. Vivamus at elit nunc. Phasellus sit amet augue sed augue rhoncus congue. Aenean et molestie augue. Aliquam blandit lacus eu nunc rhoncus, vitae varius mauris placerat. Quisque velit urna, pretium quis dolor et, blandit sodales libero. Nulla sollicitudin est vel dolor feugiat viverra massa nunc."}`), &doc); err != nil { panic("json error") } if err := col.Update(ids[rand.Intn(benchSize)], doc); err != nil { fmt.Println("Update error", err) } }) // Benchmark document delete average("delete", benchSize, func() {}, func() { col.Delete(ids[rand.Intn(benchSize)]) }) col.Close() }
// benchmark(1) written in test case style func TestBenchmark1(t *testing.T) { return runtime.GOMAXPROCS(runtime.NumCPU()) rand.Seed(time.Now().UnixNano()) ids := make([]int, 0, benchTestSize) // Prepare a collection with two indexes tmp := "/tmp/tiedot_test_bench1" os.RemoveAll(tmp) defer os.RemoveAll(tmp) benchDB, col := mkTmpDBAndCol(tmp, "tmp") defer benchDB.Close() col.Index([]string{"nested", "nested", "str"}) col.Index([]string{"nested", "nested", "int"}) col.Index([]string{"nested", "nested", "float"}) col.Index([]string{"strs"}) col.Index([]string{"ints"}) col.Index([]string{"floats"}) // Benchmark document insert average("insert", func() { if _, err := col.Insert(sampleDoc()); err != nil { panic(err) } }) // Collect all document IDs and benchmark document read col.ForEachDoc(func(id int, _ []byte) bool { ids = append(ids, id) return true }) average("read", func() { doc, err := col.Read(ids[rand.Intn(benchTestSize)]) if doc == nil || err != nil { panic(err) } }) // Benchmark lookup query (two attributes) average("lookup", func() { result := make(map[int]struct{}) if err := db.EvalQuery(sampleQuery(), col, &result); err != nil { panic(err) } }) // Benchmark document update average("update", func() { if err := col.Update(ids[rand.Intn(benchTestSize)], sampleDoc()); err != nil && !strings.Contains(err.Error(), "locked") { panic(err) } }) // Benchmark document delete var delCount int64 average("delete", func() { if err := col.Delete(ids[rand.Intn(benchTestSize)]); err == nil { atomic.AddInt64(&delCount, 1) } }) if delCount < int64(benchTestSize/2) { t.Fatal("Did not delete enough") } }
// Document CRUD operations running in parallel, intended for catching concurrency related bugs. func benchmark2() { docs := make([]int, 0, benchSize*2+1000) wp := new(sync.WaitGroup) numThreads := runtime.GOMAXPROCS(-1) wp.Add(5 * numThreads) // There are 5 goroutines: insert, read, query, update and delete // Prepare a collection with two indexes tmp := "/tmp/tiedot_bench2" if benchCleanup { defer os.RemoveAll(tmp) } col := mkTmpDBAndCol(tmp, "tmp") col.Index([]string{"nested", "nested", "str"}) col.Index([]string{"nested", "nested", "int"}) col.Index([]string{"nested", "nested", "float"}) col.Index([]string{"strs"}) col.Index([]string{"ints"}) col.Index([]string{"floats"}) // Insert 1000 documents to make a start for j := 0; j < 1000; j++ { if newID, err := col.Insert(sampleDoc()); err == nil { docs = append(docs, newID) } else { fmt.Println("Insert error", err) } } start := float64(time.Now().UTC().UnixNano()) // Insert benchSize * 2 documents for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Insert thread %d starting\n", i) defer wp.Done() for j := 0; j < benchSize/numThreads*2; j++ { if newID, err := col.Insert(sampleDoc()); err == nil { docs = append(docs, newID) } else { fmt.Println("Insert error", err) } } fmt.Printf("Insert thread %d completed\n", i) }(i) } // Read benchSize * 2 documents var readCount int64 for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Read thread %d starting\n", i) defer wp.Done() for j := 0; j < benchSize/numThreads*2; j++ { if _, err := col.Read(docs[rand.Intn(len(docs))]); err == nil { atomic.AddInt64(&readCount, 1) } } fmt.Printf("Read thread %d completed\n", i) }(i) } // Query benchSize times (lookup on two attributes) for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Query thread %d starting\n", i) defer wp.Done() var err error for j := 0; j < benchSize/numThreads; j++ { result := make(map[int]struct{}) if err = db.EvalQuery(sampleQuery(), col, &result); err != nil { fmt.Println("Query error", err) } } fmt.Printf("Query thread %d completed\n", i) }(i) } // Update benchSize documents var updateCount int64 for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Update thread %d starting\n", i) defer wp.Done() for j := 0; j < benchSize/numThreads; j++ { if err := col.Update(docs[rand.Intn(len(docs))], sampleDoc()); err == nil { atomic.AddInt64(&updateCount, 1) } } fmt.Printf("Update thread %d completed\n", i) }(i) } // Delete benchSize documents var delCount int64 for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Delete thread %d starting\n", i) defer wp.Done() for j := 0; j < benchSize/numThreads; j++ { if err := col.Delete(docs[rand.Intn(len(docs))]); err == nil { atomic.AddInt64(&delCount, 1) } } fmt.Printf("Delete thread %d completed\n", i) }(i) } // Wait for all goroutines to finish, then print summary wp.Wait() end := float64(time.Now().UTC().UnixNano()) fmt.Printf("Total operations %d: %d ns/iter, %d iter/sec\n", benchSize*7, int((end-start)/float64(benchSize)/7), int(1000000000/((end-start)/float64(benchSize)/7))) fmt.Printf("Read %d documents\n", readCount) fmt.Printf("Updated %d documents\n", updateCount) fmt.Printf("Deleted %d documents\n", delCount) }