Пример #1
0
func TestInsertDeleteRead(t *testing.T) {
	tmp := "/tmp/tiedot_test_col"
	os.Remove(tmp)
	defer os.Remove(tmp)
	col, err := OpenCollection(tmp)
	if err != nil {
		t.Fatalf("Failed to open: %v", err)
		return
	}
	defer col.Close()
	docs := [][]byte{
		[]byte("abc"),
		[]byte("1234"),
		[]byte("2345")}
	ids := [3]int{}
	if ids[0], err = col.Insert(docs[0]); err != nil {
		t.Fatalf("Failed to insert: %v", err)
	}
	if ids[1], err = col.Insert(docs[1]); err != nil {
		t.Fatalf("Failed to insert: %v", err)
	}
	if ids[2], err = col.Insert(docs[2]); err != nil {
		t.Fatalf("Failed to insert: %v", err)
	}
	if doc0 := col.Read(ids[0]); doc0 == nil || strings.TrimSpace(string(doc0)) != string(docs[0]) {
		t.Fatalf("Failed to read")
	}
	if err = col.Delete(ids[1]); err != nil {
		t.Fatal(err)
	}
	if doc1 := col.Read(ids[1]); doc1 != nil {
		t.Fatalf("Did not delete")
	}
	if doc2 := col.Read(ids[2]); doc2 == nil || strings.TrimSpace(string(doc2)) != string(docs[2]) {
		t.Fatalf("Failed to read")
	}
	// it shall not panic
	if err = col.Delete(col.Size); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatal("did not error")
	}
}
Пример #2
0
func TestQuery(t *testing.T) {
	os.RemoveAll(TEST_DATA_DIR)
	defer os.RemoveAll(TEST_DATA_DIR)
	if err := os.MkdirAll(TEST_DATA_DIR, 0700); err != nil {
		t.Fatal(err)
	}
	if err := ioutil.WriteFile(TEST_DATA_DIR+"/number_of_partitions", []byte("2"), 0600); err != nil {
		t.Fatal(err)
	}
	db, err := OpenDB(TEST_DATA_DIR)
	if err != nil {
		t.Fatal(err)
	}
	defer db.Close()
	// Prepare collection and index
	if err = db.Create("col"); err != nil {
		t.Fatal(err)
	}
	col := db.Use("col")
	docs := []string{
		`{"a": {"b": [1]}, "c": 1, "d": 1, "f": 1, "g": 1, "special": {"thing": null}, "h": 1}`,
		`{"a": {"b": 1}, "c": [1], "d": 2, "f": 2, "g": 2}`,
		`{"a": [{"b": [2]}], "c": 2, "d": 1, "f": 3, "g": 3, "h": 3}`,
		`{"a": {"b": 3}, "c": [3], "d": 2, "f": 4, "g": 4}`,
		`{"a": {"b": [4]}, "c": 4, "d": 1, "f": 5, "g": 5}`,
		`{"a": [{"b": 5}, {"b": 6}], "c": 4, "d": 1, "f": 5, "g": 5, "h": 2}`,
		`{"a": [{"b": "val1"}, {"b": "val2"}]}`,
		`{"a": [{"b": "val3"}, {"b": ["val4", "val5"]}]}`}
	ids := make([]int, len(docs))
	for i, doc := range docs {
		var jsonDoc map[string]interface{}
		if err := json.Unmarshal([]byte(doc), &jsonDoc); err != nil {
			panic(err)
		}
		if ids[i], err = col.Insert(jsonDoc); err != nil {
			t.Fatal(err)
			return
		}
	}
	q, err := runQuery(`["all"]`, col)
	if err != nil {
		t.Fatal(err)
	}
	col.Index([]string{"a", "b"})
	col.Index([]string{"f"})
	col.Index([]string{"h"})
	col.Index([]string{"special"})
	col.Index([]string{"e"})
	// expand numbers
	q, err = runQuery(`["1", "2", ["3", "4"], "5"]`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, 1, 2, 3, 4, 5) {
		t.Fatal(q)
	}
	// hash scan
	q, err = runQuery(`{"eq": 1, "in": ["a", "b"]}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[0], ids[1]) {
		t.Fatal(q)
	}
	q, err = runQuery(`{"eq": 5, "in": ["a", "b"]}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[5]) {
		t.Fatal(q)
	}
	q, err = runQuery(`{"eq": 6, "in": ["a", "b"]}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[5]) {
		t.Fatal(q)
	}
	q, err = runQuery(`{"eq": 1, "limit": 1, "in": ["a", "b"]}`, col)
	if err != nil {
		fmt.Println(err)
	}
	if !ensureMapHasKeys(q, ids[1]) && !ensureMapHasKeys(q, ids[0]) {
		t.Fatal(q, ids[1], ids[0])
	}
	// collection scan
	q, err = runQuery(`{"eq": 1, "in": ["c"]}`, col)
	if dberr.Type(err) != dberr.ErrorNeedIndex {
		t.Fatal("Collection scan should not happen")
	}
	// lookup on "special" (null)
	q, err = runQuery(`{"eq": {"thing": null},  "in": ["special"]}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[0]) {
		t.Fatal(q)
	}
	// lookup in list
	q, err = runQuery(`{"eq": "val1",  "in": ["a", "b"]}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[6]) {
		t.Fatal(q)
	}
	q, err = runQuery(`{"eq": "val5",  "in": ["a", "b"]}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[7]) {
		t.Fatal(q)
	}
	// "e" should not exist
	q, err = runQuery(`{"has": ["e"]}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q) {
		t.Fatal(q)
	}
	// existence test, hash scan, with limit
	q, err = runQuery(`{"has": ["h"], "limit": 2}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[0], ids[2]) && !ensureMapHasKeys(q, ids[2], ids[5]) && !ensureMapHasKeys(q, ids[5], ids[0]) {
		t.Fatal(q, ids[0], ids[1], ids[2])
	}
	// existence test with incorrect input
	q, err = runQuery(`{"has": ["c"], "limit": "a"}`, col)
	if dberr.Type(err) != dberr.ErrorExpectingInt {
		t.Fatal(err)
	}
	// existence test, collection scan & PK
	q, err = runQuery(`{"has": ["c"], "limit": 2}`, col)
	if dberr.Type(err) != dberr.ErrorNeedIndex {
		t.Fatal("Existence test should return error")
	}
	q, err = runQuery(`{"has": ["@id"], "limit": 2}`, col)
	if dberr.Type(err) != dberr.ErrorNeedIndex {
		t.Fatal("Existence test should return error")
	}
	// int range scan with incorrect input
	q, err = runQuery(`{"int-from": "a", "int-to": 4, "in": ["f"], "limit": 1}`, col)
	if dberr.Type(err) != dberr.ErrorExpectingInt {
		t.Fatal(err)
	}
	q, err = runQuery(`{"int-from": 1, "int-to": "a", "in": ["f"], "limit": 1}`, col)
	if dberr.Type(err) != dberr.ErrorExpectingInt {
		t.Fatal(err)
	}
	q, err = runQuery(`{"int-from": 1, "int-to": 2, "in": ["f"], "limit": "a"}`, col)
	if dberr.Type(err) != dberr.ErrorExpectingInt {
		t.Fatal(err)
	}
	// int range scan
	q, err = runQuery(`{"int-from": 2, "int-to": 4, "in": ["f"]}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[1], ids[2], ids[3]) {
		t.Fatal(q)
	}
	q, err = runQuery(`{"int-from": 2, "int-to": 4, "in": ["f"], "limit": 2}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[1], ids[2]) {
		t.Fatal(q, ids[1], ids[2])
	}
	// int hash scan using reversed range and limit
	q, err = runQuery(`{"int-from": 10, "int-to": 0, "in": ["f"]}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[5], ids[4], ids[3], ids[2], ids[1], ids[0]) {
		t.Fatal(q)
	}
	q, err = runQuery(`{"int-from": 10, "int-to": 0, "in": ["f"], "limit": 2}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[5], ids[4]) {
		t.Fatal(q)
	}
	// all documents
	q, err = runQuery(`"all"`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[0], ids[1], ids[2], ids[3], ids[4], ids[5], ids[6], ids[7]) {
		t.Fatal(q)
	}
	// union
	col.Index([]string{"c"})
	q, err = runQuery(`[{"eq": 4, "limit": 1, "in": ["a", "b"]}, {"eq": 1, "limit": 1, "in": ["c"]}]`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[0], ids[4]) && !ensureMapHasKeys(q, ids[1], ids[4]) {
		t.Fatal(q)
	}
	// intersection
	col.Index([]string{"d"})
	q, err = runQuery(`{"n": [{"eq": 2, "in": ["d"]}, "all"]}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[1], ids[3]) {
		t.Fatal(q)
	}
	// intersection with incorrect input
	q, err = runQuery(`{"c": null}`, col)
	if dberr.Type(err) != dberr.ErrorExpectingSubQuery {
		t.Fatal(err)
	}
	// complement
	q, err = runQuery(`{"c": [{"eq": 4,  "in": ["c"]}, {"eq": 2, "in": ["d"]}, "all"]}`, col)
	if err != nil {
		t.Fatal(err)
	}
	if !ensureMapHasKeys(q, ids[0], ids[2], ids[6], ids[7]) {
		t.Fatal(q)
	}
	// complement with incorrect input
	q, err = runQuery(`{"c": null}`, col)
	if dberr.Type(err) != dberr.ErrorExpectingSubQuery {
		t.Fatal(err)
	}
	// union of intersection
	q, err = runQuery(`[{"n": [{"eq": 3, "in": ["c"]}]}, {"n": [{"eq": 2, "in": ["c"]}]}]`, col)
	if !ensureMapHasKeys(q, ids[2], ids[3]) {
		t.Fatal(q)
	}
	// union of complement
	q, err = runQuery(`[{"c": [{"eq": 3, "in": ["c"]}]}, {"c": [{"eq": 2, "in": ["c"]}]}]`, col)
	if !ensureMapHasKeys(q, ids[2], ids[3]) {
		t.Fatal(q)
	}
	// union of complement of intersection
	q, err = runQuery(`[{"c": [{"n": [{"eq": 1, "in": ["d"]},{"eq": 1, "in": ["c"]}]},{"eq": 1, "in": ["d"]}]},{"eq": 2, "in": ["c"]}]`, col)
	if !ensureMapHasKeys(q, ids[2], ids[4], ids[5]) {
		t.Fatal(q)
	}
}
Пример #3
0
func embeddedExample() {
	// ****************** Collection Management ******************

	myDBDir := "/tmp/MyDatabase"
	os.RemoveAll(myDBDir)
	defer os.RemoveAll(myDBDir)

	// (Create if not exist) open a database
	myDB, err := db.OpenDB(myDBDir)
	if err != nil {
		panic(err)
	}

	// Create two collections: Feeds and Votes
	if err := myDB.Create("Feeds"); err != nil {
		panic(err)
	}
	if err := myDB.Create("Votes"); err != nil {
		panic(err)
	}

	// What collections do I now have?
	for _, name := range myDB.AllCols() {
		fmt.Printf("I have a collection called %s\n", name)
	}

	// Rename collection "Votes" to "Points"
	if err := myDB.Rename("Votes", "Points"); err != nil {
		panic(err)
	}

	// Drop (delete) collection "Points"
	if err := myDB.Drop("Points"); err != nil {
		panic(err)
	}

	// Scrub (repair and compact) "Feeds"
	if err := myDB.Scrub("Feeds"); err != nil {
		panic(err)
	}

	// ****************** Document Management ******************

	// Start using a collection (the reference is valid until DB schema changes or Scrub is carried out)
	feeds := myDB.Use("Feeds")

	// Insert document (afterwards the docID uniquely identifies the document and will never change)
	docID, err := feeds.Insert(map[string]interface{}{
		"name": "Go 1.2 is released",
		"url":  "golang.org"})
	if err != nil {
		panic(err)
	}

	// Read document
	readBack, err := feeds.Read(docID)
	if err != nil {
		panic(err)
	}
	fmt.Println("Document", docID, "is", readBack)

	// Update document
	err = feeds.Update(docID, map[string]interface{}{
		"name": "Go is very popular",
		"url":  "google.com"})
	if err != nil {
		panic(err)
	}

	// Process all documents (note that document order is undetermined)
	feeds.ForEachDoc(func(id int, docContent []byte) (willMoveOn bool) {
		fmt.Println("Document", id, "is", string(docContent))
		return true  // move on to the next document OR
		return false // do not move on to the next document
	})

	// Delete document
	if err := feeds.Delete(docID); err != nil {
		panic(err)
	}

	// More complicated error handing - identify the error Type.
	// In this example, the error code tells that the document no longer exists.
	if err := feeds.Delete(docID); dberr.Type(err) == dberr.ErrorNoDoc {
		fmt.Println("The document was already deleted")
	}

	// ****************** Index Management ******************
	// Indexes assist in many types of queries
	// Create index (path leads to document JSON attribute)
	if err := feeds.Index([]string{"author", "name", "first_name"}); err != nil {
		panic(err)
	}
	if err := feeds.Index([]string{"Title"}); err != nil {
		panic(err)
	}
	if err := feeds.Index([]string{"Source"}); err != nil {
		panic(err)
	}

	// What indexes do I have on collection A?
	for _, path := range feeds.AllIndexes() {
		fmt.Printf("I have an index on path %v\n", path)
	}

	// Remove index
	if err := feeds.Unindex([]string{"author", "name", "first_name"}); err != nil {
		panic(err)
	}

	// ****************** Queries ******************
	// Prepare some documents for the query
	feeds.Insert(map[string]interface{}{"Title": "New Go release", "Source": "golang.org", "Age": 3})
	feeds.Insert(map[string]interface{}{"Title": "Kitkat is here", "Source": "google.com", "Age": 2})
	feeds.Insert(map[string]interface{}{"Title": "Good Slackware", "Source": "slackware.com", "Age": 1})

	var query interface{}
	json.Unmarshal([]byte(`[{"eq": "New Go release", "in": ["Title"]}, {"eq": "slackware.com", "in": ["Source"]}]`), &query)

	queryResult := make(map[int]struct{}) // query result (document IDs) goes into map keys

	if err := db.EvalQuery(query, feeds, &queryResult); err != nil {
		panic(err)
	}

	// Query result are document IDs
	for id := range queryResult {
		// To get query result document, simply read it
		readBack, err := feeds.Read(id)
		if err != nil {
			panic(err)
		}
		fmt.Printf("Query returned document %v\n", readBack)
	}

	// Gracefully close database
	if err := myDB.Close(); err != nil {
		panic(err)
	}
}
Пример #4
0
// example.go written in test case style
func TestAll(t *testing.T) {
	myDBDir := "/tmp/tiedot_test_embeddedExample"
	os.RemoveAll(myDBDir)
	defer os.RemoveAll(myDBDir)

	myDB, err := db.OpenDB(myDBDir)
	if err != nil {
		t.Fatal(err)
	}
	if err := myDB.Create("Feeds"); err != nil {
		t.Fatal(err)
	}
	if err := myDB.Create("Votes"); err != nil {
		t.Fatal(err)
	}
	for _, name := range myDB.AllCols() {
		if name != "Feeds" && name != "Votes" {
			t.Fatal(myDB.AllCols())
		}
	}
	if err := myDB.Rename("Votes", "Points"); err != nil {
		t.Fatal(err)
	}
	if err := myDB.Drop("Points"); err != nil {
		t.Fatal(err)
	}
	if err := myDB.Scrub("Feeds"); err != nil {
		t.Fatal(err)
	}

	feeds := myDB.Use("Feeds")
	docID, err := feeds.Insert(map[string]interface{}{
		"name": "Go 1.2 is released",
		"url":  "golang.org"})
	if err != nil {
		t.Fatal(err)
	}
	readBack, err := feeds.Read(docID)
	if err != nil {
		t.Fatal(err)
	}
	if !sameMap(readBack, map[string]interface{}{
		"name": "Go 1.2 is released",
		"url":  "golang.org"}) {
		t.Fatal(readBack)
	}
	err = feeds.Update(docID, map[string]interface{}{
		"name": "Go is very popular",
		"url":  "google.com"})
	if err != nil {
		panic(err)
	}
	feeds.ForEachDoc(func(id int, docContent []byte) (willMoveOn bool) {
		var doc map[string]interface{}
		if json.Unmarshal(docContent, &doc) != nil {
			t.Fatal("cannot deserialize")
		}
		if !sameMap(doc, map[string]interface{}{
			"name": "Go is very popular",
			"url":  "google.com"}) {
			t.Fatal(doc)
		}
		return true
	})
	if err := feeds.Delete(docID); err != nil {
		t.Fatal(err)
	}
	if err := feeds.Delete(docID); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatal(err)
	}

	if err := feeds.Index([]string{"author", "name", "first_name"}); err != nil {
		t.Fatal(err)
	}
	if err := feeds.Index([]string{"Title"}); err != nil {
		t.Fatal(err)
	}
	if err := feeds.Index([]string{"Source"}); err != nil {
		t.Fatal(err)
	}
	for _, path := range feeds.AllIndexes() {
		joint := strings.Join(path, "")
		if joint != "authornamefirst_name" && joint != "Title" && joint != "Source" {
			t.Fatal(feeds.AllIndexes())
		}
	}
	if err := feeds.Unindex([]string{"author", "name", "first_name"}); err != nil {
		t.Fatal(err)
	}

	// ****************** Queries ******************
	// Prepare some documents for the query
	if _, err := feeds.Insert(map[string]interface{}{"Title": "New Go release", "Source": "golang.org", "Age": 3}); err != nil {
		t.Fatal("failure")
	}
	if _, err := feeds.Insert(map[string]interface{}{"Title": "Kitkat is here", "Source": "google.com", "Age": 2}); err != nil {
		t.Fatal("failure")
	}
	if _, err := feeds.Insert(map[string]interface{}{"Title": "Good Slackware", "Source": "slackware.com", "Age": 1}); err != nil {
		t.Fatal("failure")
	}
	var query interface{}
	if json.Unmarshal([]byte(`[{"eq": "New Go release", "in": ["Title"]}, {"eq": "slackware.com", "in": ["Source"]}]`), &query) != nil {
		t.Fatal("failure")
	}
	queryResult := make(map[int]struct{}) // query result (document IDs) goes into map keys
	if err := db.EvalQuery(query, feeds, &queryResult); err != nil {
		t.Fatal(err)
	}
	// Query result are document IDs
	for id := range queryResult {
		// To get query result document, simply read it
		readBack, err := feeds.Read(id)
		if err != nil {
			panic(err)
		}
		if !sameMap(readBack, map[string]interface{}{"Title": "New Go release", "Source": "golang.org", "Age": 3}) &&
			!sameMap(readBack, map[string]interface{}{"Title": "Good Slackware", "Source": "slackware.com", "Age": 1}) {
			t.Fatal(readBack)
		}
	}

	if err := myDB.Close(); err != nil {
		t.Fatal(err)
	}
}
Пример #5
0
func TestDocCrudAndIdx(t *testing.T) {
	os.RemoveAll(TEST_DATA_DIR)
	defer os.RemoveAll(TEST_DATA_DIR)
	if err := os.MkdirAll(TEST_DATA_DIR, 0700); err != nil {
		t.Fatal(err)
	}
	if err := ioutil.WriteFile(TEST_DATA_DIR+"/number_of_partitions", []byte("2"), 0600); err != nil {
		t.Fatal(err)
	}
	db, err := OpenDB(TEST_DATA_DIR)
	if err != nil {
		t.Fatal(err)
	}
	// Prepare collection and index
	if err = db.Create("col"); err != nil {
		t.Fatal(err)
	}
	col := db.Use("col")
	if err = col.Index([]string{"a", "b"}); err != nil {
		t.Fatal(err)
	}
	numDocs := 2011
	docIDs := make([]int, numDocs)
	// Insert documents
	for i := 0; i < numDocs; i++ {
		if docIDs[i], err = col.Insert(map[string]interface{}{"a": map[string]interface{}{"b": i}}); err != nil {
			t.Fatal(err)
		}
	}
	// Read documents and verify index
	if _, err = col.Read(123456); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatal("Did not error")
	}
	for i, docID := range docIDs {
		if doc, err := col.Read(docID); err != nil || doc["a"].(map[string]interface{})["b"].(float64) != float64(i) {
			t.Fatal(docID, doc)
		}
		if err = idxHas(col, []string{"a", "b"}, i, docID); err != nil {
			t.Fatal(err)
		}
	}
	// Update document
	if err = col.Update(654321, map[string]interface{}{}); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatal("Did not error")
	}
	for i, docID := range docIDs {
		// i -> i * 2
		if err = col.Update(docID, map[string]interface{}{"a": map[string]interface{}{"b": i * 2}}); err != nil {
			t.Fatal(err)
		}
	}
	// After update - verify documents and index
	for i, docID := range docIDs {
		if doc, err := col.Read(docID); err != nil || doc["a"].(map[string]interface{})["b"].(float64) != float64(i*2) {
			t.Fatal(docID, doc)
		}
		if i == 0 {
			if err = idxHas(col, []string{"a", "b"}, 0, docID); err != nil {
				t.Fatal(err)
			}
		} else {
			if err = idxHasNot(col, []string{"a", "b"}, i, docID); err != nil {
				t.Fatal(err)
			}
			if err = idxHas(col, []string{"a", "b"}, i*2, docID); err != nil {
				t.Fatal(err)
			}
		}
	}
	// Delete half of those documents
	if err = col.Delete(654321); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatal("Did not error")
	}
	for i := 0; i < numDocs/2+1; i++ {
		if err := col.Delete(docIDs[i]); err != nil {
			t.Fatal(err)
		}
		if err := col.Delete(docIDs[i]); dberr.Type(err) != dberr.ErrorNoDoc {
			t.Fatal("Did not error")
		}
	}
	// After delete - verify
	for i, docID := range docIDs {
		if i < numDocs/2+1 {
			// After delete - verify deleted documents and index
			if _, err := col.Read(docID); dberr.Type(err) != dberr.ErrorNoDoc {
				t.Fatal("Did not delete", i, docID)
			}
			if err = idxHasNot(col, []string{"a", "b"}, i*2, docID); err != nil {
				t.Fatal(err)
			}
		} else {
			// After delete - verify unaffected documents and index
			if doc, err := col.Read(docID); err != nil || doc["a"].(map[string]interface{})["b"].(float64) != float64(i*2) {
				t.Fatal(docID, doc)
			}
			if err = idxHas(col, []string{"a", "b"}, i*2, docID); err != nil {
				t.Fatal(err)
			}
		}
	}
	// Recreate index and verify
	if err = col.Unindex([]string{"a", "b"}); err != nil {
		t.Fatal(err)
	}
	if err = col.Index([]string{"a", "b"}); err != nil {
		t.Fatal(err)
	}
	for i := numDocs/2 + 1; i < numDocs; i++ {
		if doc, err := col.Read(docIDs[i]); err != nil || doc["a"].(map[string]interface{})["b"].(float64) != float64(i*2) {
			t.Fatal(doc, err)
		}
		if err = idxHas(col, []string{"a", "b"}, i*2, docIDs[i]); err != nil {
			t.Fatal(err)
		}
	}

	// Verify that there are approximately 1000 documents
	t.Log("ApproxDocCount", col.ApproxDocCount())
	if col.ApproxDocCount() < 600 || col.ApproxDocCount() > 1400 {
		t.Fatal("Approximate is way off", col.ApproxDocCount())
	}

	// Scrub and verify
	if err = db.Scrub("col"); err != nil {
		t.Fatal(err)
	}
	col = db.Use("col")
	for i := numDocs/2 + 1; i < numDocs; i++ {
		if doc, err := col.Read(docIDs[i]); err != nil || doc["a"].(map[string]interface{})["b"].(float64) != float64(i*2) {
			t.Fatal(doc, err)
		}
		if err = idxHas(col, []string{"a", "b"}, i*2, docIDs[i]); err != nil {
			t.Fatal(err)
		}
	}

	// Iterate over all documents 10 times
	start := time.Now().UnixNano()
	for i := 0; i < 10; i++ {
		col.ForEachDoc(func(_ int, _ []byte) bool {
			return true
		})
	}
	timediff := time.Now().UnixNano() - start
	t.Log("It took", timediff/1000000, "milliseconds")

	// Verify again that there are approximately 1000 documents
	t.Log("ApproxDocCount", col.ApproxDocCount())
	if col.ApproxDocCount() < 600 || col.ApproxDocCount() > 1400 {
		t.Fatal("Approximate is way off", col.ApproxDocCount())
	}

	// Read back all documents page by pabe
	totalPage := col.ApproxDocCount() / 100
	collectedIDs := make(map[int]struct{})
	for page := 0; page < totalPage; page++ {
		col.ForEachDocInPage(page, totalPage, func(id int, _ []byte) bool {
			collectedIDs[id] = struct{}{}
			return true
		})
		t.Log("Went through page ", page, " got ", len(collectedIDs), " documents so far")
	}
	if len(collectedIDs) != numDocs/2 {
		t.Fatal("Wrong number of docs", len(collectedIDs))
	}

	if err = db.Close(); err != nil {
		t.Fatal(err)
	}
}
Пример #6
0
func TestCollectionGrowAndOutOfBoundAccess(t *testing.T) {
	tmp := "/tmp/tiedot_test_col"
	os.Remove(tmp)
	defer os.Remove(tmp)
	col, err := OpenCollection(tmp)
	if err != nil {
		t.Fatalf("Failed to open: %v", err)
		return
	}
	defer col.Close()
	// Insert several documents
	docs := [][]byte{
		[]byte("abc"),
		[]byte("1234"),
		[]byte("2345")}
	if _, err = col.Insert(docs[0]); err != nil {
		t.Fatalf("Failed to insert: %v", err)
	}
	if _, err = col.Insert(docs[1]); err != nil {
		t.Fatalf("Failed to insert: %v", err)
	}
	if _, err = col.Insert(docs[2]); err != nil {
		t.Fatalf("Failed to insert: %v", err)
	}
	// Test UsedSize
	calculatedUsedSize := (DOC_HEADER + 3*2) + (DOC_HEADER+4*2)*2
	if col.Used != calculatedUsedSize {
		t.Fatalf("Invalid UsedSize")
	}
	// Read invalid location
	if doc := col.Read(1); doc != nil {
		t.Fatalf("Read invalid location")
	}
	if doc := col.Read(col.Used); doc != nil {
		t.Fatalf("Read invalid location")
	}
	if doc := col.Read(col.Size); doc != nil {
		t.Fatalf("Read invalid location")
	}
	if doc := col.Read(999999999); doc != nil {
		t.Fatalf("Read invalid location")
	}
	// Update invalid location
	if _, err := col.Update(1, []byte{}); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatalf("Update invalid location")
	}
	if _, err := col.Update(col.Used, []byte{}); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatalf("Update invalid location")
	}
	if _, err := col.Update(col.Size, []byte{}); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatalf("Update invalid location")
	}
	if _, err := col.Update(999999999, []byte{}); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatalf("Update invalid location")
	}
	// Delete invalid location
	if err = col.Delete(1); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatal("did not error")
	}
	if err = col.Delete(col.Used); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatal("did not error")
	}
	if err = col.Delete(col.Size); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatal("did not error")
	}
	if err = col.Delete(999999999); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatal("did not error")
	}
	// Insert - not enough room
	count := 0
	for i := 0; i < COL_FILE_GROWTH; i += DOC_MAX_ROOM {
		if _, err := col.Insert(make([]byte, DOC_MAX_ROOM/2)); err != nil {
			t.Fatal(err)
		}
		count++
	}
	if _, err := col.Insert(make([]byte, DOC_MAX_ROOM/2)); err != nil {
		t.Fatal(err)
	}
	count++
	calculatedUsedSize += count * (DOC_HEADER + DOC_MAX_ROOM)
	if col.Used != calculatedUsedSize {
		t.Fatalf("Wrong UsedSize %d %d", col.Used, calculatedUsedSize)
	}
	if col.Size != COL_FILE_GROWTH+col.Growth {
		t.Fatalf("Size changed?! %d %d %d", col.Size, COL_FILE_GROWTH, col.Growth)
	}
	if err = col.Close(); err != nil {
		t.Fatal(err)
	}
}
Пример #7
0
func TestPartitionDocCRUD(t *testing.T) {
	colPath := "/tmp/tiedot_test_col"
	htPath := "/tmp/tiedot_test_ht"
	os.Remove(colPath)
	os.Remove(htPath)
	defer os.Remove(colPath)
	defer os.Remove(htPath)
	part, err := OpenPartition(colPath, htPath)
	if err != nil {
		t.Fatal(err)
	}
	// Insert & read
	if _, err = part.Insert(1, []byte("1")); err != nil {
		t.Fatal(err)
	}
	if _, err = part.Insert(2, []byte("2")); err != nil {
		t.Fatal(err)
	}
	if readback, err := part.Read(1); err != nil || string(readback) != "1 " {
		t.Fatal(err, readback)
	}
	if readback, err := part.Read(2); err != nil || string(readback) != "2 " {
		t.Fatal(err, readback)
	}
	// Update & read
	if err = part.Update(1, []byte("abcdef")); err != nil {
		t.Fatal(err)
	}
	if err := part.Update(1234, []byte("abcdef")); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatal("Did not error")
	}
	if readback, err := part.Read(1); err != nil || string(readback) != "abcdef      " {
		t.Fatal(err, readback)
	}
	// Delete & read
	if err = part.Delete(1); err != nil {
		t.Fatal(err)
	}
	if _, err = part.Read(1); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatal("Did not error")
	}
	if err = part.Delete(123); dberr.Type(err) != dberr.ErrorNoDoc {
		t.Fatal("Did not error")
	}
	// Lock & unlock
	if err = part.LockUpdate(123); err != nil {
		t.Fatal(err)
	}
	if err = part.LockUpdate(123); dberr.Type(err) != dberr.ErrorDocLocked {
		t.Fatal("Did not error")
	}
	part.UnlockUpdate(123)
	if err = part.LockUpdate(123); err != nil {
		t.Fatal(err)
	}
	// Foreach
	part.ForEachDoc(0, 1, func(id int, doc []byte) bool {
		if id != 2 || string(doc) != "2 " {
			t.Fatal("ID 2 should be the only remaining document")
		}
		return true
	})
	// Finish up
	if err = part.Clear(); err != nil {
		t.Fatal(err)
	}
	if err = part.Close(); err != nil {
		t.Fatal(err)
	}
}