Exemplo n.º 1
0
func Count(w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Cache-Control", "must-revalidate")
	w.Header().Set("Content-Type", "text/plain")
	var col, q string
	if !Require(w, r, "col", &col) {
		return
	}
	if !Require(w, r, "q", &q) {
		return
	}
	var qJson interface{}
	if err := json.Unmarshal([]byte(q), &qJson); err != nil {
		http.Error(w, fmt.Sprintf("'%v' is not valid JSON.", q), 400)
		return
	}
	V2Sync.RLock()
	defer V2Sync.RUnlock()
	dbcol := V2DB.Use(col)
	if dbcol == nil {
		http.Error(w, fmt.Sprintf("Collection '%s' does not exist.", col), 400)
		return
	}
	queryResult := make(map[uint64]struct{})
	if err := db.EvalQueryV2(qJson, dbcol, &queryResult); err != nil {
		http.Error(w, fmt.Sprint(err), 400)
		return
	}
	w.Write([]byte(strconv.Itoa(len(queryResult))))
}
Exemplo n.º 2
0
// Benchmark document operations involving UID.
func benchmark3() {
	// initialization
	rand.Seed(time.Now().UTC().UnixNano())
	// prepare benchmark data
	docs := [BENCH_SIZE]interface{}{}
	for i := range docs {
		if err := json.Unmarshal([]byte(
			`{"a": {"b": {"c": `+strconv.Itoa(rand.Intn(BENCH_SIZE))+`}},`+
				`"c": {"d": `+strconv.Itoa(rand.Intn(BENCH_SIZE))+`},`+
				`"more": "abcdefghijklmnopqrstuvwxyz"}`), &docs[i]); err != nil {
			panic("json error")
		}
	}
	// prepare collection
	tmp := "/tmp/tiedot_bench"
	os.RemoveAll(tmp)
	defer os.RemoveAll(tmp)
	col, err := db.OpenCol(tmp)
	if err != nil {
		panic(err)
	}
	col.Index([]string{"a", "b", "c"})
	col.Index([]string{"c", "d"})
	uidsMutex := new(sync.Mutex)
	uids := make([]string, 0, BENCH_SIZE)
	// start benchmarks
	average("insert", BENCH_SIZE, func() {}, func() {
		if _, uid, err := col.InsertWithUID(docs[rand.Intn(BENCH_SIZE)]); err == nil {
			uidsMutex.Lock()
			uids = append(uids, uid)
			uidsMutex.Unlock()
		} else {
			panic("insert error")
		}
	})

	average("read", BENCH_SIZE, func() {
	}, func() {
		var doc interface{}
		col.ReadByUID(uids[rand.Intn(BENCH_SIZE)], &doc)
	})
	average("lookup", BENCH_SIZE, func() {}, func() {
		var query interface{}
		if err := json.Unmarshal([]byte(`{"c": [{"eq": `+strconv.Itoa(rand.Intn(BENCH_SIZE))+`, "in": ["a", "b", "c"], "limit": 1}, `+
			`{"eq": `+strconv.Itoa(rand.Intn(BENCH_SIZE))+`, "in": ["c", "d"], "limit": 1}]}`), &query); err != nil {
			panic("json error")
		}
		result := make(map[uint64]struct{})
		if err := db.EvalQueryV2(query, col, &result); err != nil {
			panic("query error")
		}
	})
	average("update", BENCH_SIZE, func() {}, func() {
		col.UpdateByUID(uids[rand.Intn(BENCH_SIZE)], docs[rand.Intn(BENCH_SIZE)])
	})
	average("delete", BENCH_SIZE, func() {}, func() {
		col.DeleteByUID(uids[rand.Intn(BENCH_SIZE)])
	})
	col.Close()
}
Exemplo n.º 3
0
func Query(w http.ResponseWriter, r *http.Request) {
	w.Header().Set("Cache-Control", "must-revalidate")
	w.Header().Set("Content-Type", "text/plain")
	var col, q string
	if !Require(w, r, "col", &col) {
		return
	}
	if !Require(w, r, "q", &q) {
		return
	}
	var qJson interface{}
	if err := json.Unmarshal([]byte(q), &qJson); err != nil {
		http.Error(w, fmt.Sprintf("'%v' is not valid JSON.", q), 400)
		return
	}
	V2Sync.RLock()
	defer V2Sync.RUnlock()
	dbcol := V2DB.Use(col)
	if dbcol == nil {
		http.Error(w, fmt.Sprintf("Collection '%s' does not exist.", col), 400)
		return
	}
	// evaluate the query
	queryResult := make(map[uint64]struct{})
	if err := db.EvalQueryV2(qJson, dbcol, &queryResult); err != nil {
		http.Error(w, fmt.Sprint(err), 400)
		return
	}
	// write each document on a new line
	for k := range queryResult {
		var doc interface{}
		dbcol.Read(k, &doc)
		if doc == nil {
			continue
		}
		resp, err := json.Marshal(doc)
		if err != nil {
			log.Printf("Query returned invalid JSON '%v'", doc)
			continue
		}
		w.Write([]byte(string(resp) + "\r\n"))
	}
}
Exemplo n.º 4
0
func embeddedExample() {
	dir := "/tmp/MyDatabase"
	os.RemoveAll(dir)
	defer os.RemoveAll(dir)

	// Open database
	myDB, err := db.OpenDB(dir)
	if err != nil {
		panic(err)
	}

	// Create collection
	if err := myDB.Create("A"); err != nil {
		panic(err)
	}
	if err := myDB.Create("B"); err != nil {
		panic(err)
	}

	// Rename collection
	if err := myDB.Rename("B", "C"); err != nil {
		panic(err)
	}

	// Which collections do I have?
	for name := range myDB.StrCol {
		fmt.Printf("I have a collection called %s\n", name)
	}

	// Drop collection
	if err := myDB.Drop("C"); err != nil {
		panic(err)
	}

	// Start using collection
	A := myDB.Use("A")

	// Collection insert/update/delete operations require the document to be a map[string]interface{}
	// Otherwise index may not work
	docID, err := A.Insert(map[string]interface{}{"Url": "http://google.com", "Owner": "Google Inc."})
	if err != nil {
		panic(err)
	}
	fmt.Printf("Inserted document at %d (document ID)\n", docID)

	// Update document (you can still use struct, but this example uses generic interface{})
	var doc map[string]interface{}
	json.Unmarshal([]byte(`{"Url": "http://www.google.com.au", "Owner": "Google Inc."}`), &doc)
	newID, err := A.Update(docID, doc) // newID may or may not be the same!
	if err != nil {
		panic(err)
	}
	fmt.Printf("Updated document %d to %v, new ID is %d\n", docID, doc, newID)

	// Read document
	var readback map[string]interface{}
	if err := A.Read(newID, &readback); err != nil {
		panic(err)
	}
	fmt.Printf("Read document ID %d: %v\n", newID, readback)

	// Delete document
	A.Delete(123) // passing invalid ID to it will not harm your data

	/*
	   Collection insert/update/delete have their dedicated "durable" calls:
	   - durableInsert
	   - durableUpdate
	  - durableDelete
	  Those operations ensure a disk flush after each call to guarantee data durability on disk.
	  However - those operations are 10000x more expensive than ordinary insert/update/delete!
	*/

	// Create index
	if err := A.Index([]string{"a", "b", "c"}); err != nil {
		panic(err)
	}

	// Which indexes do I have on collection A?
	for path := range A.StrHT {
		fmt.Printf("I have an index on path %s\n", path)
	}

	// Remove index
	if err := A.Unindex([]string{"a", "b", "c"}); err != nil {
		panic(err)
	}

	// Execute query
	result := make(map[uint64]struct{})
	var query interface{}
	json.Unmarshal([]byte(`"all"`), &query)
	if err := db.EvalQueryV2(query, A, &result); err != nil {
		panic(err)
	}
	for id := range result {
		// map keys are query results - result document IDs
		fmt.Printf("Query returned document ID %d\n", id)
	}

	// Gracefully close database
	myDB.Close()
}
Exemplo n.º 5
0
// Insert/update/delete/query all running at once.
func benchmark2() {
	numThreads := runtime.GOMAXPROCS(-1)
	rand.Seed(time.Now().UTC().UnixNano())
	// prepare collection
	tmp := "/tmp/tiedot_bench"
	os.RemoveAll(tmp)
	col, err := db.OpenCol(tmp)
	if err != nil {
		panic(err)
	}
	col.Index([]string{"a", "b", "c"})
	col.Index([]string{"c", "d"})
	docs := make([]uint64, 0, BENCH2_SIZE*2+1000)
	docsMutex := new(sync.Mutex)
	// Prepare 1000 docs as a start
	var docToInsert interface{}
	for j := 0; j < 1000; j++ {
		if err = json.Unmarshal([]byte(
			`{"a": {"b": {"c": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`}},`+
				`"c": {"d": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`},`+
				`"more": "abcdefghijklmnopqrstuvwxyz"}`), &docToInsert); err != nil {
			panic(err)
		}
		if newID, err := col.Insert(docToInsert); err == nil {
			docs = append(docs, newID)
		} else {
			panic(err)
		}
	}
	// benchmark begins
	wp := new(sync.WaitGroup)
	wp.Add(5 * numThreads) // (CRUD + query) * number of benchmark threads
	start := float64(time.Now().UTC().UnixNano())
	// insert BENCH2_SIZE * 2 documents
	for i := 0; i < numThreads; i++ {
		go func(i int) {
			fmt.Printf("Insert thread %d starting\n", i)
			defer wp.Done()
			var docToInsert interface{}
			var err error
			for j := 0; j < BENCH2_SIZE/numThreads*2; j++ {
				if err = json.Unmarshal([]byte(
					`{"a": {"b": {"c": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`}},`+
						`"c": {"d": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`},`+
						`"more": "abcdefghijklmnopqrstuvwxyz"}`), &docToInsert); err != nil {
					panic(err)
				}
				if newID, err := col.Insert(docToInsert); err == nil {
					docsMutex.Lock()
					docs = append(docs, newID)
					docsMutex.Unlock()
				} else {
					panic(err)
				}
			}
			fmt.Printf("Insert thread %d completed\n", i)
		}(i)
	}
	// read BENCH2_SIZE * 2 documents
	for i := 0; i < numThreads; i++ {
		go func(i int) {
			fmt.Printf("Read thread %d starting\n", i)
			defer wp.Done()
			var doc interface{}
			for j := 0; j < BENCH2_SIZE/numThreads*2; j++ {
				col.Read(docs[uint64(rand.Intn(len(docs)))], &doc)
			}
			fmt.Printf("Read thread %d completed\n", i)
		}(i)
	}
	// query BENCH2_SIZE times
	for i := 0; i < numThreads; i++ {
		go func(i int) {
			fmt.Printf("Query thread %d starting\n", i)
			defer wp.Done()
			var query interface{}
			var err error
			for j := 0; j < BENCH2_SIZE/numThreads; j++ {
				if err = json.Unmarshal([]byte(`{"c": [{"eq": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`, "in": ["a", "b", "c"], "limit": 1}, `+
					`{"eq": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`, "in": ["c", "d"], "limit": 1}]}`), &query); err != nil {
					panic("json error")
				}
				result := make(map[uint64]struct{})
				if err = db.EvalQueryV2(query, col, &result); err != nil {
					panic("query error")
				}
			}
			fmt.Printf("Query thread %d completed\n", i)
		}(i)
	}
	// update BENCH2_SIZE documents
	for i := 0; i < numThreads; i++ {
		go func(i int) {
			fmt.Printf("Update thread %d starting\n", i)
			defer wp.Done()
			var updated interface{}
			var err error
			for j := 0; j < BENCH2_SIZE/numThreads; j++ {
				if err = json.Unmarshal([]byte(
					`{"a": {"b": {"c": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`}},`+
						`"c": {"d": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`},`+
						`"more": "abcdefghijklmnopqrstuvwxyz"}`), &updated); err != nil {
					panic(err)
				}
				if _, err = col.Update(docs[uint64(rand.Intn(len(docs)))], updated); err != nil {
					// "does not exist" indicates that a deleted document is being updated, it is safe to ignore
					if !strings.Contains(fmt.Sprint(err), "does not exist") {
						fmt.Println(err)
					}
				}
			}
			fmt.Printf("Update thread %d completed\n", i)
		}(i)
	}
	// delete BENCH2_SIZE documents
	for i := 0; i < numThreads; i++ {
		go func(i int) {
			fmt.Printf("Delete thread %d starting\n", i)
			defer wp.Done()
			for j := 0; j < BENCH2_SIZE/numThreads; j++ {
				col.Delete(docs[uint64(rand.Intn(len(docs)))])
			}
			fmt.Printf("Delete thread %d completed\n", i)
		}(i)
	}
	wp.Wait()
	end := float64(time.Now().UTC().UnixNano())
	fmt.Printf("Total operations %d: %d ns/iter, %d iter/sec\n", BENCH2_SIZE*7, int((end-start)/BENCH2_SIZE/7), int(1000000000/((end-start)/BENCH2_SIZE/7)))
}