func streamCollection(s []interface{}) chan ptrval { ch := make(chan ptrval) go func() { defer close(ch) t := time.Unix(1347255646, 418514126).UTC() for _, r := range s { t = t.Add(time.Second) ts := t.Format(time.RFC3339Nano) ch <- ptrval{gouchstore.NewDocumentInfo(ts), r, true} } t = t.Add(time.Second) ts := t.Format(time.RFC3339Nano) ch <- ptrval{gouchstore.NewDocumentInfo(ts), nextValue, false} }() return ch }
func deleteBulk(args []string, w http.ResponseWriter, req *http.Request) { // Parse the params req.ParseForm() compactAfter := strings.ToLower(req.FormValue("compact")) from, err := cleanupRangeParam(req.FormValue("from"), "") if err != nil { emitError(400, w, "Bad from value", err.Error()) return } to, err := cleanupRangeParam(req.FormValue("to"), "") if err != nil { emitError(400, w, "Bad to value", err.Error()) return } db, err := dbopen(args[0]) if err != nil { emitError(400, w, "Unable to open Database", err.Error()) return } bulk := db.Bulk() deleteCount := 0 commitThreshold := 10000 err = dbwalkKeys(args[0], from, to, func(k string) error { bulk.Delete(gouchstore.NewDocumentInfo(k)) deleteCount++ if deleteCount >= commitThreshold { bulk.Commit() deleteCount = 0 } return err }) if deleteCount > 0 { bulk.Commit() } bulk.Close() if compactAfter == "true" { err = dbcompact(args[0]) } w.WriteHeader(201) }
func TestPairRateConversion(t *testing.T) { ch := make(chan ptrval, 2) rch := convertTofloat64Rate(ch) tm := time.Now().UTC() val1 := "20" ch <- ptrval{gouchstore.NewDocumentInfo(tm.Format(time.RFC3339Nano)), val1, true} tm = tm.Add(5 * time.Second) val2 := "25" ch <- ptrval{gouchstore.NewDocumentInfo(tm.Format(time.RFC3339Nano)), val2, false} close(ch) exp := 1.0 val, got := <-rch if !got { t.Fatalf("Expected value, got empty channel") } if val != exp { t.Fatalf("Expected %v, got %v", exp, val) } }
func TestPointers(t *testing.T) { docID := "2013-02-22T16:29:19.750264Z" di := gouchstore.NewDocumentInfo(docID) tests := []struct { pointer string exp interface{} }{ {"/kind", "Listing"}, {"_id", docID}, } for _, test := range tests { chans := make([]chan ptrval, 0, 1) chans = append(chans, make(chan ptrval)) go processDoc(di, chans, bigInput, []string{test.pointer}, []string{}, []string{}, true) got := <-chans[0] if test.exp != got.val { t.Errorf("Expected %v for %v, got %v", test.exp, test.pointer, got.val) } } }
func dbWriteLoop(dq *dbWriter) { defer dbWg.Done() queued := 0 bulk := dq.db.Bulk() t := time.NewTimer(*flushTime) defer t.Stop() liveTracker := time.NewTicker(*liveTime) defer liveTracker.Stop() liveOps := 0 dbst := dbStats.getOrCreate(dq.dbname) defer atomic.StoreUint32(&dbst.qlen, 0) defer atomic.AddUint32(&dbst.closes, 1) for { atomic.StoreUint32(&dbst.qlen, uint32(queued)) select { case <-dq.quit: sdt := time.Now() bulk.Commit() bulk.Close() closeDBConn(dq.db) dbRemoveConn(dq.dbname) log.Printf("Closed %v with %v items in %v", dq.dbname, queued, time.Since(sdt)) return case <-liveTracker.C: if queued == 0 && liveOps == 0 { log.Printf("Closing idle DB: %v", dq.dbname) close(dq.quit) } liveOps = 0 case qi := <-dq.ch: liveOps++ switch qi.op { case opStoreItem: bulk.Set(gouchstore.NewDocumentInfo(qi.k), gouchstore.NewDocument(qi.k, qi.data)) queued++ case opDeleteItem: queued++ bulk.Delete(gouchstore.NewDocumentInfo(qi.k)) case opCompact: var err error bulk, err = dbCompact(dq, bulk, queued, qi) qi.cherr <- err atomic.AddUint64(&dbst.written, uint64(queued)) queued = 0 default: log.Panicf("Unhandled case: %v", qi.op) } if queued >= *maxOpQueue { start := time.Now() bulk.Commit() if *verbose { log.Printf("Flush of %d items took %v", queued, time.Since(start)) } atomic.AddUint64(&dbst.written, uint64(queued)) queued = 0 t.Reset(*flushTime) } case <-t.C: if queued > 0 { start := time.Now() bulk.Commit() if *verbose { log.Printf("Flush of %d items from timer took %v", queued, time.Since(start)) } atomic.AddUint64(&dbst.written, uint64(queued)) queued = 0 } t.Reset(*flushTime) } } }
func main() { flag.Parse() if flag.NArg() < 1 { fmt.Println("Must specify path to a use") return } db, err := gouchstore.Open(flag.Args()[0], gouchstore.OPEN_CREATE) if err != nil { fmt.Println(err) return } defer db.Close() var docCount uint64 = 0 var deletedCount uint64 = 0 nextDocId := 0 docs := make(map[string]*gouchstore.Document) deletedDocs := make(map[string]bool) docInfos := make(map[string]*gouchstore.DocumentInfo) docKeys := make([]string, 0) docsBySeq := make(map[uint64]*gouchstore.Document) deletedDocsBySeq := make(map[uint64]bool) // iterate through this many operations for i := 0; i < *numOps; i++ { operation := rand.Intn(100) if operation < 70 { // add doc docId := "doc-" + strconv.Itoa(nextDocId) doc := &gouchstore.Document{ ID: docId, Body: []byte(`{"content":123,"filler":"hf7BB83mKJ5APQ0OBqrj1wnYw2mjArEFmkuPaiNIYbBa4cMkOdI/JuTsGZTddqW5cxLYT1md1kWPxwuNUYliPAcrY/eoibeOMJ/3HaBUu88S/unTrR/P8iVMR1RcV0jyVzw475E3ckff8AqomvglH3ij7gsJyrAMmF2TjLlJU2EURrp+Sq5sM3bboeHf7iyuG+LnUL1hKbYju+yxC7AaoWDoiUKB2KlrZrcKwolFQnTKkZcSMEuI9sXsUvdaoq+1MNMsFKnnlQsuricwTTfcFIFQFHlyGrV+ghrljAl7RY6kxNpPtFft8s0x8xYR8Po8SxTLgA/YNEa2ERbHPUCdheAZZ3MDMSeBlv9Sy7UOCgKVH1tW8S9kidJQLwnYoDcDlz1HCxrvvakdyrYPoeohZ5Ub44xo1/2hhO0Pypk11dWbTqO8MV/3Z+BIQWdVWbvx7zrLSG+kzxh1tMBnrluuZWXjT5w/+ZUSBnLU3Ru17TpIGu5I8pp4Xh2vjWgbRsdKHGga7li5tqtO5fb59rhA+8q+lLlBTcL1AR1oZiPL/O7O+2jqHE10gE7CO4GFqfX/iM4LhrxzcnPd3taeiVGp3/zTx1JJKiYn9H8yn7e4/KkpNObag7k+bqrpE64GPWOHvlYXwm7L8kVIesaBfZEwenlOWJgK73IleBzhoYVt6iKgS+MPZqNvQBN/ndXXffoyUoM9dbiV/dYEt7pmP14N3UVqJh3N/7Kz3bXrrQH0msmso1FnfkoZTUnPEK3toXgnub1dEPkbmJnwbiXBaNhHc7KPBnMYH7HlVWN4h7R0tH9u0zgAzemdBkARViJidkYhieig5K/31OfVSVFjbqbdWNtanAU1kSzcqX+O9+3H3kFQxB8Ez7Lo08Zbx2IojmjmVnJUH+oNs0qhYwlrmhH1lK2/TOIdaEASnYsANTxnlJvYWpSbNfpk2jXcyBQCvhaIOO2E8Hq+eRhdMtdqLUwoOtI8+Q0lJVaCxuM4RhVEtvBm7frqC3fm5fCJjYmjNOAF6QGWhRWnkeaEQgA0j40DbvRQypSPGr2y6mLOpsCeELtxXKpm929CngxLzWRsGZWvmCOZNtpH6zqHGmn5PTcm2KyTC1jhoD3maicGvGF/S7MJI5Pftuzp8GVx+EieqONwPZXln6g0L66Md2V8igH+Z134RXMN4mk2j6MGhZiZ4oUokbBZ9WR/QvGUGdxzoyfFnGt1Bh+9uUaydq1l6Lk1OHH3XrRdkexMPT2CFEpX+0JonOtNwaXTmdbclFjJIA9ObfNww0iBfxQ7TZYZSv4O0mHIBXcCkhM2/kqlFbomL3/eJXhJhDaGsc+KoVBQCU4x9wydSiiXGFmskX5gWV37bA=="}`), } nextDocId++ docInfo := gouchstore.NewDocumentInfo(docId) docInfo.Rev = 1 err = db.SaveDocument(doc, docInfo) if err != nil { fmt.Printf("err: %v\n", err) return } docs[docId] = doc docsBySeq[docInfo.Seq] = doc docInfos[docId] = docInfo docKeys = append(docKeys, docId) docCount++ } else if operation < 90 { // update doc if len(docKeys) > 0 { docIdIndexToUpdate := rand.Intn(len(docKeys)) docIdToUpdate := docKeys[docIdIndexToUpdate] // get the old seq oldSeq := docInfos[docIdToUpdate].Seq // bump the rev of this doc docInfos[docIdToUpdate].Rev++ // now udpate it err = db.SaveDocument(docs[docIdToUpdate], docInfos[docIdToUpdate]) if err != nil { fmt.Printf("err: %v\n", err) return } delete(docsBySeq, oldSeq) docsBySeq[docInfos[docIdToUpdate].Seq] = docs[docIdToUpdate] } else { continue } } else { // delete doc if len(docKeys) > 0 { docIdIndexToDelete := rand.Intn(len(docKeys)) docIdToDelete := docKeys[docIdIndexToDelete] // get the old seq oldSeq := docInfos[docIdToDelete].Seq err = db.SaveDocument(nil, docInfos[docIdToDelete]) if err != nil { fmt.Printf("err: %v\n", err) return } oldInfo := docInfos[docIdToDelete] docKeys = append(docKeys[:docIdIndexToDelete], docKeys[docIdIndexToDelete+1:]...) delete(docs, docIdToDelete) deletedDocs[docIdToDelete] = true delete(docInfos, docIdToDelete) docCount-- deletedCount++ delete(docsBySeq, oldSeq) deletedDocsBySeq[oldInfo.Seq] = true } else { continue } } if i%*commitEvery == 0 { err = db.Commit() if err != nil { fmt.Printf("error committing %d: %v\n", i, err) return } } // final commit if *commitAtEnd { err = db.Commit() if err != nil { fmt.Printf("error committing end: %v\n", err) return } } } }