// Execute a query and return number of documents from the result. func Count(w http.ResponseWriter, r *http.Request) { w.Header().Set("Cache-Control", "must-revalidate") w.Header().Set("Content-Type", "text/plain") var col, q string if !Require(w, r, "col", &col) { return } if !Require(w, r, "q", &q) { return } var qJson interface{} if err := json.Unmarshal([]byte(q), &qJson); err != nil { http.Error(w, fmt.Sprintf("'%v' is not valid JSON.", q), 400) return } dbcol := HttpDB.Use(col) if dbcol == nil { http.Error(w, fmt.Sprintf("Collection '%s' does not exist.", col), 400) return } queryResult := make(map[int]struct{}) if err := db.EvalQuery(qJson, dbcol, &queryResult); err != nil { http.Error(w, fmt.Sprint(err), 400) return } w.Write([]byte(strconv.Itoa(len(queryResult)))) }
// Execute a query and return documents from the result. func Query(w http.ResponseWriter, r *http.Request) { w.Header().Set("Cache-Control", "must-revalidate") w.Header().Set("Content-Type", "application/json") var col, q string if !Require(w, r, "col", &col) { return } if !Require(w, r, "q", &q) { return } var qJson interface{} if err := json.Unmarshal([]byte(q), &qJson); err != nil { http.Error(w, fmt.Sprintf("'%v' is not valid JSON.", q), 400) return } dbcol := HttpDB.Use(col) if dbcol == nil { http.Error(w, fmt.Sprintf("Collection '%s' does not exist.", col), 400) return } // Evaluate the query queryResult := make(map[int]struct{}) if err := db.EvalQuery(qJson, dbcol, &queryResult); err != nil { http.Error(w, fmt.Sprint(err), 400) return } // Construct array of result resultDocs := make(map[string]interface{}, len(queryResult)) counter := 0 for docID := range queryResult { doc, _ := dbcol.Read(docID) if doc != nil { resultDocs[strconv.Itoa(docID)] = doc counter++ } } // Serialize the array resp, err := json.Marshal(resultDocs) if err != nil { http.Error(w, fmt.Sprintf("Server error: query returned invalid structure"), 500) return } w.Write([]byte(string(resp))) }
func embeddedExample() { // ****************** Collection Management ****************** myDBDir := "/tmp/MyDatabase" os.RemoveAll(myDBDir) defer os.RemoveAll(myDBDir) // (Create if not exist) open a database myDB, err := db.OpenDB(myDBDir) if err != nil { panic(err) } // Create two collections: Feeds and Votes if err := myDB.Create("Feeds"); err != nil { panic(err) } if err := myDB.Create("Votes"); err != nil { panic(err) } // What collections do I now have? for _, name := range myDB.AllCols() { fmt.Printf("I have a collection called %s\n", name) } // Rename collection "Votes" to "Points" if err := myDB.Rename("Votes", "Points"); err != nil { panic(err) } // Drop (delete) collection "Points" if err := myDB.Drop("Points"); err != nil { panic(err) } // Scrub (repair and compact) "Feeds" if err := myDB.Scrub("Feeds"); err != nil { panic(err) } // ****************** Document Management ****************** // Start using a collection (the reference is valid until DB schema changes or Scrub is carried out) feeds := myDB.Use("Feeds") // Insert document (afterwards the docID uniquely identifies the document and will never change) docID, err := feeds.Insert(map[string]interface{}{ "name": "Go 1.2 is released", "url": "golang.org"}) if err != nil { panic(err) } // Read document readBack, err := feeds.Read(docID) if err != nil { panic(err) } fmt.Println("Document", docID, "is", readBack) // Update document err = feeds.Update(docID, map[string]interface{}{ "name": "Go is very popular", "url": "google.com"}) if err != nil { panic(err) } // Process all documents (note that document order is undetermined) feeds.ForEachDoc(func(id int, docContent []byte) (willMoveOn bool) { fmt.Println("Document", id, "is", string(docContent)) return true // move on to the next document OR return false // do not move on to the next document }) // Delete document if err := feeds.Delete(docID); err != nil { panic(err) } // More complicated error handing - identify the error Type. // In this example, the error code tells that the document no longer exists. if err := feeds.Delete(docID); dberr.Type(err) == dberr.ErrorNoDoc { fmt.Println("The document was already deleted") } // ****************** Index Management ****************** // Indexes assist in many types of queries // Create index (path leads to document JSON attribute) if err := feeds.Index([]string{"author", "name", "first_name"}); err != nil { panic(err) } if err := feeds.Index([]string{"Title"}); err != nil { panic(err) } if err := feeds.Index([]string{"Source"}); err != nil { panic(err) } // What indexes do I have on collection A? for _, path := range feeds.AllIndexes() { fmt.Printf("I have an index on path %v\n", path) } // Remove index if err := feeds.Unindex([]string{"author", "name", "first_name"}); err != nil { panic(err) } // ****************** Queries ****************** // Prepare some documents for the query feeds.Insert(map[string]interface{}{"Title": "New Go release", "Source": "golang.org", "Age": 3}) feeds.Insert(map[string]interface{}{"Title": "Kitkat is here", "Source": "google.com", "Age": 2}) feeds.Insert(map[string]interface{}{"Title": "Good Slackware", "Source": "slackware.com", "Age": 1}) var query interface{} json.Unmarshal([]byte(`[{"eq": "New Go release", "in": ["Title"]}, {"eq": "slackware.com", "in": ["Source"]}]`), &query) queryResult := make(map[int]struct{}) // query result (document IDs) goes into map keys if err := db.EvalQuery(query, feeds, &queryResult); err != nil { panic(err) } // Query result are document IDs for id := range queryResult { // To get query result document, simply read it readBack, err := feeds.Read(id) if err != nil { panic(err) } fmt.Printf("Query returned document %v\n", readBack) } // Gracefully close database if err := myDB.Close(); err != nil { panic(err) } }
// example.go written in test case style func TestAll(t *testing.T) { myDBDir := "/tmp/tiedot_test_embeddedExample" os.RemoveAll(myDBDir) defer os.RemoveAll(myDBDir) myDB, err := db.OpenDB(myDBDir) if err != nil { t.Fatal(err) } if err := myDB.Create("Feeds"); err != nil { t.Fatal(err) } if err := myDB.Create("Votes"); err != nil { t.Fatal(err) } for _, name := range myDB.AllCols() { if name != "Feeds" && name != "Votes" { t.Fatal(myDB.AllCols()) } } if err := myDB.Rename("Votes", "Points"); err != nil { t.Fatal(err) } if err := myDB.Drop("Points"); err != nil { t.Fatal(err) } if err := myDB.Scrub("Feeds"); err != nil { t.Fatal(err) } feeds := myDB.Use("Feeds") docID, err := feeds.Insert(map[string]interface{}{ "name": "Go 1.2 is released", "url": "golang.org"}) if err != nil { t.Fatal(err) } readBack, err := feeds.Read(docID) if err != nil { t.Fatal(err) } if !sameMap(readBack, map[string]interface{}{ "name": "Go 1.2 is released", "url": "golang.org"}) { t.Fatal(readBack) } err = feeds.Update(docID, map[string]interface{}{ "name": "Go is very popular", "url": "google.com"}) if err != nil { panic(err) } feeds.ForEachDoc(func(id int, docContent []byte) (willMoveOn bool) { var doc map[string]interface{} if json.Unmarshal(docContent, &doc) != nil { t.Fatal("cannot deserialize") } if !sameMap(doc, map[string]interface{}{ "name": "Go is very popular", "url": "google.com"}) { t.Fatal(doc) } return true }) if err := feeds.Delete(docID); err != nil { t.Fatal(err) } if err := feeds.Delete(docID); dberr.Type(err) != dberr.ErrorNoDoc { t.Fatal(err) } if err := feeds.Index([]string{"author", "name", "first_name"}); err != nil { t.Fatal(err) } if err := feeds.Index([]string{"Title"}); err != nil { t.Fatal(err) } if err := feeds.Index([]string{"Source"}); err != nil { t.Fatal(err) } for _, path := range feeds.AllIndexes() { joint := strings.Join(path, "") if joint != "authornamefirst_name" && joint != "Title" && joint != "Source" { t.Fatal(feeds.AllIndexes()) } } if err := feeds.Unindex([]string{"author", "name", "first_name"}); err != nil { t.Fatal(err) } // ****************** Queries ****************** // Prepare some documents for the query if _, err := feeds.Insert(map[string]interface{}{"Title": "New Go release", "Source": "golang.org", "Age": 3}); err != nil { t.Fatal("failure") } if _, err := feeds.Insert(map[string]interface{}{"Title": "Kitkat is here", "Source": "google.com", "Age": 2}); err != nil { t.Fatal("failure") } if _, err := feeds.Insert(map[string]interface{}{"Title": "Good Slackware", "Source": "slackware.com", "Age": 1}); err != nil { t.Fatal("failure") } var query interface{} if json.Unmarshal([]byte(`[{"eq": "New Go release", "in": ["Title"]}, {"eq": "slackware.com", "in": ["Source"]}]`), &query) != nil { t.Fatal("failure") } queryResult := make(map[int]struct{}) // query result (document IDs) goes into map keys if err := db.EvalQuery(query, feeds, &queryResult); err != nil { t.Fatal(err) } // Query result are document IDs for id := range queryResult { // To get query result document, simply read it readBack, err := feeds.Read(id) if err != nil { panic(err) } if !sameMap(readBack, map[string]interface{}{"Title": "New Go release", "Source": "golang.org", "Age": 3}) && !sameMap(readBack, map[string]interface{}{"Title": "Good Slackware", "Source": "slackware.com", "Age": 1}) { t.Fatal(readBack) } } if err := myDB.Close(); err != nil { t.Fatal(err) } }
// benchmark(1) written in test case style func TestBenchmark1(t *testing.T) { return runtime.GOMAXPROCS(runtime.NumCPU()) rand.Seed(time.Now().UnixNano()) ids := make([]int, 0, benchTestSize) // Prepare a collection with two indexes tmp := "/tmp/tiedot_test_bench1" os.RemoveAll(tmp) defer os.RemoveAll(tmp) benchDB, col := mkTmpDBAndCol(tmp, "tmp") defer benchDB.Close() col.Index([]string{"nested", "nested", "str"}) col.Index([]string{"nested", "nested", "int"}) col.Index([]string{"nested", "nested", "float"}) col.Index([]string{"strs"}) col.Index([]string{"ints"}) col.Index([]string{"floats"}) // Benchmark document insert average("insert", func() { if _, err := col.Insert(sampleDoc()); err != nil { panic(err) } }) // Collect all document IDs and benchmark document read col.ForEachDoc(func(id int, _ []byte) bool { ids = append(ids, id) return true }) average("read", func() { doc, err := col.Read(ids[rand.Intn(benchTestSize)]) if doc == nil || err != nil { panic(err) } }) // Benchmark lookup query (two attributes) average("lookup", func() { result := make(map[int]struct{}) if err := db.EvalQuery(sampleQuery(), col, &result); err != nil { panic(err) } }) // Benchmark document update average("update", func() { if err := col.Update(ids[rand.Intn(benchTestSize)], sampleDoc()); err != nil && !strings.Contains(err.Error(), "locked") { panic(err) } }) // Benchmark document delete var delCount int64 average("delete", func() { if err := col.Delete(ids[rand.Intn(benchTestSize)]); err == nil { atomic.AddInt64(&delCount, 1) } }) if delCount < int64(benchTestSize/2) { t.Fatal("Did not delete enough") } }
// benchmark2 written in test case style func TestBenchmark2(t *testing.T) { return runtime.GOMAXPROCS(runtime.NumCPU()) rand.Seed(time.Now().UnixNano()) docs := make([]int, 0, benchTestSize*2+1000) wp := new(sync.WaitGroup) numThreads := runtime.GOMAXPROCS(-1) // There are goroutines doing document operations: insert, read, query, update, delete wp.Add(5 * numThreads) // And one more changing schema and stuff wp.Add(1) // Prepare a collection with two indexes tmp := "/tmp/tiedot_test_bench2" os.RemoveAll(tmp) defer os.RemoveAll(tmp) benchdb, col := mkTmpDBAndCol(tmp, "tmp") defer benchdb.Close() col.Index([]string{"nested", "nested", "str"}) col.Index([]string{"nested", "nested", "int"}) col.Index([]string{"nested", "nested", "float"}) col.Index([]string{"strs"}) col.Index([]string{"ints"}) col.Index([]string{"floats"}) // Insert 1000 documents to make a start for j := 0; j < 1000; j++ { if newID, err := col.Insert(sampleDoc()); err == nil { docs = append(docs, newID) } else { panic(err) } } start := float64(time.Now().UTC().UnixNano()) // Insert benchTestSize * 2 documents for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Insert thread %d starting\n", i) defer wp.Done() for j := 0; j < benchTestSize/numThreads*2; j++ { if newID, err := col.Insert(sampleDoc()); err == nil { docs = append(docs, newID) } else { panic(err) } } fmt.Printf("Insert thread %d completed\n", i) }(i) } // Read benchTestSize * 2 documents var readCount int64 for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Read thread %d starting\n", i) defer wp.Done() for j := 0; j < benchTestSize/numThreads*2; j++ { if _, err := col.Read(docs[rand.Intn(len(docs))]); err == nil { atomic.AddInt64(&readCount, 1) } } fmt.Printf("Read thread %d completed\n", i) }(i) } // Query benchTestSize times (lookup on two attributes) for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Query thread %d starting\n", i) defer wp.Done() var err error for j := 0; j < benchTestSize/numThreads; j++ { result := make(map[int]struct{}) if err = db.EvalQuery(sampleQuery(), col, &result); err != nil { panic(err) } } fmt.Printf("Query thread %d completed\n", i) }(i) } // Update benchTestSize documents var updateCount int64 for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Update thread %d starting\n", i) defer wp.Done() for j := 0; j < benchTestSize/numThreads; j++ { if err := col.Update(docs[rand.Intn(len(docs))], sampleDoc()); err == nil { atomic.AddInt64(&updateCount, 1) } } fmt.Printf("Update thread %d completed\n", i) }(i) } // Delete benchTestSize documents var delCount int64 for i := 0; i < numThreads; i++ { go func(i int) { fmt.Printf("Delete thread %d starting\n", i) defer wp.Done() for j := 0; j < benchTestSize/numThreads; j++ { if err := col.Delete(docs[rand.Intn(len(docs))]); err == nil { atomic.AddInt64(&delCount, 1) } } fmt.Printf("Delete thread %d completed\n", i) }(i) } // This one does a bunch of schema-changing stuff, testing the engine while document operations are busy go func() { time.Sleep(500 * time.Millisecond) if err := benchdb.Create("foo"); err != nil { panic(err) } else if err := benchdb.Rename("foo", "bar"); err != nil { panic(err) } else if err := benchdb.Truncate("bar"); err != nil { panic(err) } else if err := benchdb.Scrub("bar"); err != nil { panic(err) } else if benchdb.Use("bar") == nil { panic("Missing collection") } for _, colName := range benchdb.AllCols() { if colName != "bar" && colName != "tmp" { panic("Wrong collections in benchmark db") } } os.RemoveAll("/tmp/tiedot_test_bench2_dump") defer os.RemoveAll("/tmp/tiedot_test_bench2_dump") if err := benchdb.Dump("/tmp/tiedot_test_bench2_dump"); err != nil { panic(err) } else if err := benchdb.Drop("bar"); err != nil { panic(err) } defer wp.Done() }() // Wait for all goroutines to finish, then print summary wp.Wait() end := float64(time.Now().UTC().UnixNano()) fmt.Printf("Total operations %d: %d ns/iter, %d iter/sec\n", benchTestSize*7, int((end-start)/float64(benchTestSize)/7), int(1000000000/((end-start)/float64(benchTestSize)/7))) fmt.Printf("Read %d documents\n", readCount) fmt.Printf("Updated %d documents\n", updateCount) fmt.Printf("Deleted %d documents\n", delCount) if readCount < int64(benchTestSize/3) { t.Fatal("Did not read enough documents") } if updateCount < int64(benchTestSize/8) { t.Fatal("Did not update enough documents") } if delCount < int64(benchTestSize/8) { t.Fatal("Did not delete enough documents") } }