Beispiel #1
0
// Benchmark document operations involving UID.
func benchmark3() {
	// initialization
	rand.Seed(time.Now().UTC().UnixNano())
	// prepare benchmark data
	docs := [BENCH_SIZE]interface{}{}
	for i := range docs {
		if err := json.Unmarshal([]byte(
			`{"a": {"b": {"c": `+strconv.Itoa(rand.Intn(BENCH_SIZE))+`}},`+
				`"c": {"d": `+strconv.Itoa(rand.Intn(BENCH_SIZE))+`},`+
				`"more": "abcdefghijklmnopqrstuvwxyz"}`), &docs[i]); err != nil {
			panic("json error")
		}
	}
	// prepare collection
	tmp := "/tmp/tiedot_bench"
	os.RemoveAll(tmp)
	defer os.RemoveAll(tmp)
	col, err := db.OpenCol(tmp)
	if err != nil {
		panic(err)
	}
	col.Index([]string{"a", "b", "c"})
	col.Index([]string{"c", "d"})
	uidsMutex := new(sync.Mutex)
	uids := make([]string, 0, BENCH_SIZE)
	// start benchmarks
	average("insert", BENCH_SIZE, func() {}, func() {
		if _, uid, err := col.InsertWithUID(docs[rand.Intn(BENCH_SIZE)]); err == nil {
			uidsMutex.Lock()
			uids = append(uids, uid)
			uidsMutex.Unlock()
		} else {
			panic("insert error")
		}
	})

	average("read", BENCH_SIZE, func() {
	}, func() {
		var doc interface{}
		col.ReadByUID(uids[rand.Intn(BENCH_SIZE)], &doc)
	})
	average("lookup", BENCH_SIZE, func() {}, func() {
		var query interface{}
		if err := json.Unmarshal([]byte(`{"c": [{"eq": `+strconv.Itoa(rand.Intn(BENCH_SIZE))+`, "in": ["a", "b", "c"], "limit": 1}, `+
			`{"eq": `+strconv.Itoa(rand.Intn(BENCH_SIZE))+`, "in": ["c", "d"], "limit": 1}]}`), &query); err != nil {
			panic("json error")
		}
		result := make(map[uint64]struct{})
		if err := db.EvalQueryV2(query, col, &result); err != nil {
			panic("query error")
		}
	})
	average("update", BENCH_SIZE, func() {}, func() {
		col.UpdateByUID(uids[rand.Intn(BENCH_SIZE)], docs[rand.Intn(BENCH_SIZE)])
	})
	average("delete", BENCH_SIZE, func() {}, func() {
		col.DeleteByUID(uids[rand.Intn(BENCH_SIZE)])
	})
	col.Close()
}
// Benchmark durable operations.
func durableBenchmark() {
	// initialization
	rand.Seed(time.Now().UTC().UnixNano())
	// prepare benchmark data
	docs := [DURABLE_BENCH_SIZE]interface{}{}
	for i := range docs {
		if err := json.Unmarshal([]byte(
			`{"a": {"b": {"c": `+strconv.Itoa(rand.Intn(DURABLE_BENCH_SIZE))+`}},`+
				`"c": {"d": `+strconv.Itoa(rand.Intn(DURABLE_BENCH_SIZE))+`},`+
				`"more": "abcdefghijklmnopqrstuvwxyz"}`), &docs[i]); err != nil {
			panic("json error")
		}
	}
	// prepare collection
	tmp := "/tmp/tiedot_bench"
	os.RemoveAll(tmp)
	defer os.RemoveAll(tmp)
	col, err := db.OpenCol(tmp)
	if err != nil {
		panic(err)
	}
	col.Index([]string{"a", "b", "c"})
	col.Index([]string{"c", "d"})
	// start benchmarks
	average("insert", DURABLE_BENCH_SIZE, func() {}, func() {
		if _, err := col.DurableInsert(docs[rand.Intn(DURABLE_BENCH_SIZE)]); err != nil {
			panic("insert error")
		}
	})
	ids := make([]uint64, 0)
	average("read", DURABLE_BENCH_SIZE, func() {
		col.ForAll(func(id uint64, doc interface{}) bool {
			ids = append(ids, id)
			return true
		})
	}, func() {
		var doc interface{}
		err = col.Read(ids[uint64(rand.Intn(DURABLE_BENCH_SIZE))], &doc)
		if doc == nil {
			panic("read error")
		}
	})
	average("lookup", DURABLE_BENCH_SIZE, func() {}, func() {
		var query interface{}
		if err := json.Unmarshal([]byte(`["c", ["=", {"eq": `+strconv.Itoa(rand.Intn(DURABLE_BENCH_SIZE))+`, "in": ["a", "b", "c"], "limit": 1}],`+
			`["=", {"eq": `+strconv.Itoa(rand.Intn(DURABLE_BENCH_SIZE))+`, "in": ["c", "d"], "limit": 1}]]`), &query); err != nil {
			panic("json error")
		}
		result := make(map[uint64]struct{})
		if err := db.EvalQuery(query, col, &result); err != nil {
			panic("query error")
		}
	})
	average("update", DURABLE_BENCH_SIZE, func() {}, func() {
		if _, err := col.DurableUpdate(ids[rand.Intn(DURABLE_BENCH_SIZE)], docs[rand.Intn(DURABLE_BENCH_SIZE)]); err != nil {
			panic("update error")
		}
	})
	average("delete", DURABLE_BENCH_SIZE, func() {}, func() {
		if err := col.DurableDelete(ids[rand.Intn(DURABLE_BENCH_SIZE)]); err != nil {
			panic("delete error")
		}
	})
	col.Close()
}
Beispiel #3
0
// Insert/update/delete/query all running at once.
func benchmark2() {
	numThreads := runtime.GOMAXPROCS(-1)
	rand.Seed(time.Now().UTC().UnixNano())
	// prepare collection
	tmp := "/tmp/tiedot_bench"
	os.RemoveAll(tmp)
	col, err := db.OpenCol(tmp)
	if err != nil {
		panic(err)
	}
	col.Index([]string{"a", "b", "c"})
	col.Index([]string{"c", "d"})
	docs := make([]uint64, 0, BENCH2_SIZE*2+1000)
	docsMutex := new(sync.Mutex)
	// Prepare 1000 docs as a start
	var docToInsert interface{}
	for j := 0; j < 1000; j++ {
		if err = json.Unmarshal([]byte(
			`{"a": {"b": {"c": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`}},`+
				`"c": {"d": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`},`+
				`"more": "abcdefghijklmnopqrstuvwxyz"}`), &docToInsert); err != nil {
			panic(err)
		}
		if newID, err := col.Insert(docToInsert); err == nil {
			docs = append(docs, newID)
		} else {
			panic(err)
		}
	}
	// benchmark begins
	wp := new(sync.WaitGroup)
	wp.Add(5 * numThreads) // (CRUD + query) * number of benchmark threads
	start := float64(time.Now().UTC().UnixNano())
	// insert BENCH2_SIZE * 2 documents
	for i := 0; i < numThreads; i++ {
		go func(i int) {
			fmt.Printf("Insert thread %d starting\n", i)
			defer wp.Done()
			var docToInsert interface{}
			var err error
			for j := 0; j < BENCH2_SIZE/numThreads*2; j++ {
				if err = json.Unmarshal([]byte(
					`{"a": {"b": {"c": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`}},`+
						`"c": {"d": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`},`+
						`"more": "abcdefghijklmnopqrstuvwxyz"}`), &docToInsert); err != nil {
					panic(err)
				}
				if newID, err := col.Insert(docToInsert); err == nil {
					docsMutex.Lock()
					docs = append(docs, newID)
					docsMutex.Unlock()
				} else {
					panic(err)
				}
			}
			fmt.Printf("Insert thread %d completed\n", i)
		}(i)
	}
	// read BENCH2_SIZE * 2 documents
	for i := 0; i < numThreads; i++ {
		go func(i int) {
			fmt.Printf("Read thread %d starting\n", i)
			defer wp.Done()
			var doc interface{}
			for j := 0; j < BENCH2_SIZE/numThreads*2; j++ {
				col.Read(docs[uint64(rand.Intn(len(docs)))], &doc)
			}
			fmt.Printf("Read thread %d completed\n", i)
		}(i)
	}
	// query BENCH2_SIZE times
	for i := 0; i < numThreads; i++ {
		go func(i int) {
			fmt.Printf("Query thread %d starting\n", i)
			defer wp.Done()
			var query interface{}
			var err error
			for j := 0; j < BENCH2_SIZE/numThreads; j++ {
				if err = json.Unmarshal([]byte(`{"c": [{"eq": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`, "in": ["a", "b", "c"], "limit": 1}, `+
					`{"eq": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`, "in": ["c", "d"], "limit": 1}]}`), &query); err != nil {
					panic("json error")
				}
				result := make(map[uint64]struct{})
				if err = db.EvalQueryV2(query, col, &result); err != nil {
					panic("query error")
				}
			}
			fmt.Printf("Query thread %d completed\n", i)
		}(i)
	}
	// update BENCH2_SIZE documents
	for i := 0; i < numThreads; i++ {
		go func(i int) {
			fmt.Printf("Update thread %d starting\n", i)
			defer wp.Done()
			var updated interface{}
			var err error
			for j := 0; j < BENCH2_SIZE/numThreads; j++ {
				if err = json.Unmarshal([]byte(
					`{"a": {"b": {"c": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`}},`+
						`"c": {"d": `+strconv.Itoa(rand.Intn(BENCH2_SIZE))+`},`+
						`"more": "abcdefghijklmnopqrstuvwxyz"}`), &updated); err != nil {
					panic(err)
				}
				if _, err = col.Update(docs[uint64(rand.Intn(len(docs)))], updated); err != nil {
					// "does not exist" indicates that a deleted document is being updated, it is safe to ignore
					if !strings.Contains(fmt.Sprint(err), "does not exist") {
						fmt.Println(err)
					}
				}
			}
			fmt.Printf("Update thread %d completed\n", i)
		}(i)
	}
	// delete BENCH2_SIZE documents
	for i := 0; i < numThreads; i++ {
		go func(i int) {
			fmt.Printf("Delete thread %d starting\n", i)
			defer wp.Done()
			for j := 0; j < BENCH2_SIZE/numThreads; j++ {
				col.Delete(docs[uint64(rand.Intn(len(docs)))])
			}
			fmt.Printf("Delete thread %d completed\n", i)
		}(i)
	}
	wp.Wait()
	end := float64(time.Now().UTC().UnixNano())
	fmt.Printf("Total operations %d: %d ns/iter, %d iter/sec\n", BENCH2_SIZE*7, int((end-start)/BENCH2_SIZE/7), int(1000000000/((end-start)/BENCH2_SIZE/7)))
}