func batchIndexingWorker(index bleve.Index, workChan chan *Work, start time.Time) { for { select { case work, ok := <-workChan: if !ok { return } workSize := 1 if work.batch != nil { err := index.Batch(work.batch) if err != nil { log.Fatalf("indexer worker fatal: %v", err) } workSize = work.batch.Size() } else { err := index.Index(work.id, work.doc) if err != nil { log.Fatalf("indexer worker fatal: %v", err) } } elapsedTime := time.Since(start) / time.Millisecond updatedTotal := atomic.AddUint64(&totalIndexed, uint64(workSize)) atomic.AddUint64(&totalPlainTextIndexed, work.plainTextBytes) if updatedTotal%uint64(*printCount) == 0 { log.Printf("%d,%d", updatedTotal, elapsedTime) } } } }
func indexSite(index bleve.Index, maxBatchSize int) error { startT := time.Now() count := 0 batch := index.NewBatch() batchSize := 10 log.Printf("Walking %s", path.Join(htdocs, "repositories")) err := filepath.Walk(path.Join(htdocs, "repositories"), func(p string, f os.FileInfo, err error) error { if strings.Contains(p, "/accessions/") == true && strings.HasSuffix(p, ".json") == true { src, err := ioutil.ReadFile(p) if err != nil { log.Printf("Can't read %s, %s", p, err) return nil } view := new(cait.NormalizedAccessionView) err = json.Unmarshal(src, &view) if err != nil { log.Printf("Can't parse %s, %s", p, err) return nil } // Trim the htdocs and trailing .json extension //log.Printf("Queued %s", p) err = batch.Index(strings.TrimSuffix(strings.TrimPrefix(p, htdocs), "json"), view) if err != nil { log.Printf("Indexing error %s, %s", p, err) return nil } if batch.Size() >= batchSize { log.Printf("Indexing %d items", batch.Size()) err := index.Batch(batch) if err != nil { log.Fatal(err) } count += batch.Size() batch = index.NewBatch() log.Printf("Indexed: %d items, batch size %d, running %s\n", count, batchSize, time.Now().Sub(startT)) if batchSize < maxBatchSize { batchSize = batchSize * 2 } if batchSize > maxBatchSize { batchSize = maxBatchSize } } } return nil }) if batch.Size() > 0 { log.Printf("Indexing %d items", batch.Size()) err := index.Batch(batch) if err != nil { log.Fatal(err) } count += batch.Size() log.Printf("Indexed: %d items, batch size %d, running %s\n", count, batchSize, time.Now().Sub(startT)) } log.Printf("Total indexed: %d times, total run time %s\n", count, time.Now().Sub(startT)) return err }
func indexBeer(i bleve.Index) error { // open the directory dirEntries, err := ioutil.ReadDir(*jsonDir) if err != nil { return err } // walk the directory entries for indexing log.Printf("Indexing...") count := 0 startTime := time.Now() batch := i.NewBatch() batchCount := 0 for _, dirEntry := range dirEntries { filename := dirEntry.Name() // read the bytes jsonBytes, err := ioutil.ReadFile(*jsonDir + "/" + filename) if err != nil { return err } // // shred them into a document ext := filepath.Ext(filename) docId := filename[:(len(filename) - len(ext))] batch.Index(docId, jsonBytes) batchCount++ if batchCount >= *batchSize { err = i.Batch(batch) if err != nil { return err } batch = i.NewBatch() batchCount = 0 } count++ if count%1000 == 0 { indexDuration := time.Since(startTime) indexDurationSeconds := float64(indexDuration) / float64(time.Second) timePerDoc := float64(indexDuration) / float64(count) log.Printf("Indexed %d documents, in %.2fs (average %.2fms/doc)", count, indexDurationSeconds, timePerDoc/float64(time.Millisecond)) } } // flush the last batch if batchCount > 0 { err = i.Batch(batch) if err != nil { log.Fatal(err) } } indexDuration := time.Since(startTime) indexDurationSeconds := float64(indexDuration) / float64(time.Second) timePerDoc := float64(indexDuration) / float64(count) log.Printf("Indexed %d documents, in %.2fs (average %.2fms/doc)", count, indexDurationSeconds, timePerDoc/float64(time.Millisecond)) return nil }
func batchIndexingWorker(index bleve.Index, workChan chan *Work, timeStart time.Time) { for work := range workChan { workSize := 1 if work.batch != nil { err := index.Batch(work.batch) if err != nil { log.Fatalf("indexer worker fatal: %v", err) } workSize = work.batch.Size() } else { err := index.Index(work.id, work.doc) if err != nil { log.Fatalf("indexer worker fatal: %v", err) } } atomic.AddUint64(&totalIndexed, uint64(workSize)) atomic.AddUint64(&totalPlainTextIndexed, work.plainTextBytes) } }
func (be *BleveEngine) BatchIndex(documents []*Document) (int64, error) { start := time.Now().UnixNano() / int64(time.Millisecond) var index bleve.Index mapping := bleve.NewIndexMapping() index, err := bleve.New(INDEX, mapping) if err != nil { index, _ = bleve.Open(INDEX) } batch := index.NewBatch() for _, document := range documents { batch.Index(document.Id, document.Data) } index.Batch(batch) index.Close() return time.Now().UnixNano()/int64(time.Millisecond) - start, nil }
func indexTPB(i bleve.Index) error { batch := bleve.NewBatch() batchCount := 0 gzDumpFile, err := os.Open(*dump) if err != nil { return err } defer gzDumpFile.Close() dumpFile, err := gzip.NewReader(gzDumpFile) if err != nil { return err } reader := csv.NewReader(dumpFile) reader.FieldsPerRecord = 7 reader.Comma = '|' count := 0 startTime := time.Now() log.Printf("Indexing...") for { r, err := reader.Read() if err == io.EOF { break } else if err != nil { continue } size, err := strconv.ParseInt(r[1], 10, 0) if err != nil { fmt.Println("%#v", size) size = 0 } batch.Index(r[2], tpbDoc{ Name: r[0], Size: size, Hash: r[2], Category: r[4], Type: "torrent", }) batchCount++ if batchCount >= *batchSize { err = i.Batch(batch) if err != nil { return err } batch = bleve.NewBatch() batchCount = 0 } count++ if count%1000 == 0 { indexDuration := time.Since(startTime) indexDurationSeconds := float64(indexDuration) / float64(time.Second) timePerDoc := float64(indexDuration) / float64(count) log.Printf("Indexed %d documents in %.2fs (average %.2fms/doc)", count, indexDurationSeconds, timePerDoc/float64(time.Millisecond)) } if *indexLimit > 0 && count >= *indexLimit { break } } // flush the last batch if batchCount > 0 { err := i.Batch(batch) if err != nil { log.Fatal(err) } } indexDuration := time.Since(startTime) indexDurationSeconds := float64(indexDuration) / float64(time.Second) timePerDoc := float64(indexDuration) / float64(count) log.Printf("Finished indexing %d documents in %.2fs (average %.2fms/doc)", count, indexDurationSeconds, timePerDoc/float64(time.Millisecond)) log.Printf("Still listening on http://%v", bind) return nil }