func (f *Firestorm) Update(doc *document.Document) (err error) { // assign this document a number doc.Number = atomic.AddUint64(&f.highDocNumber, 1) // do analysis before acquiring write lock analysisStart := time.Now() resultChan := make(chan *index.AnalysisResult) aw := index.NewAnalysisWork(f, doc, resultChan) // put the work on the queue f.analysisQueue.Queue(aw) // wait for the result result := <-resultChan close(resultChan) atomic.AddUint64(&f.stats.analysisTime, uint64(time.Since(analysisStart))) // start a writer for this update indexStart := time.Now() var kvwriter store.KVWriter kvwriter, err = f.store.Writer() if err != nil { return } defer func() { if cerr := kvwriter.Close(); err == nil && cerr != nil { err = cerr } }() var dictionaryDeltas map[string]int64 dictionaryDeltas, err = f.batchRows(kvwriter, [][]index.IndexRow{result.Rows}, nil) if err != nil { _ = kvwriter.Close() atomic.AddUint64(&f.stats.errors, 1) return } f.compensator.Mutate([]byte(doc.ID), doc.Number) f.lookuper.NotifyBatch([]*InFlightItem{&InFlightItem{[]byte(doc.ID), doc.Number}}) f.dictUpdater.NotifyBatch(dictionaryDeltas) atomic.AddUint64(&f.stats.indexTime, uint64(time.Since(indexStart))) return }
func (udc *UpsideDownCouch) Batch(batch *index.Batch) (err error) { analysisStart := time.Now() resultChan := make(chan *index.AnalysisResult, len(batch.IndexOps)) var numUpdates uint64 for _, doc := range batch.IndexOps { if doc != nil { numUpdates++ } } var detectedUnsafeMutex sync.RWMutex detectedUnsafe := false go func() { sofar := uint64(0) for _, doc := range batch.IndexOps { if doc != nil { sofar++ if sofar > numUpdates { detectedUnsafeMutex.Lock() detectedUnsafe = true detectedUnsafeMutex.Unlock() return } aw := index.NewAnalysisWork(udc, doc, resultChan) // put the work on the queue udc.analysisQueue.Queue(aw) } } }() // retrieve back index rows concurrent with analysis docBackIndexRowErr := error(nil) docBackIndexRowCh := make(chan *docBackIndexRow, len(batch.IndexOps)) udc.writeMutex.Lock() defer udc.writeMutex.Unlock() go func() { defer close(docBackIndexRowCh) // open a reader for backindex lookup var kvreader store.KVReader kvreader, err = udc.store.Reader() if err != nil { docBackIndexRowErr = err return } for docID, doc := range batch.IndexOps { backIndexRow, err := udc.backIndexRowForDoc(kvreader, docID) if err != nil { docBackIndexRowErr = err return } docBackIndexRowCh <- &docBackIndexRow{docID, doc, backIndexRow} } err = kvreader.Close() if err != nil { docBackIndexRowErr = err return } }() // wait for analysis result newRowsMap := make(map[string][]index.IndexRow) var itemsDeQueued uint64 for itemsDeQueued < numUpdates { result := <-resultChan newRowsMap[result.DocID] = result.Rows itemsDeQueued++ } close(resultChan) atomic.AddUint64(&udc.stats.analysisTime, uint64(time.Since(analysisStart))) docsAdded := uint64(0) docsDeleted := uint64(0) indexStart := time.Now() // prepare a list of rows var addRowsAll [][]UpsideDownCouchRow var updateRowsAll [][]UpsideDownCouchRow var deleteRowsAll [][]UpsideDownCouchRow // add the internal ops var updateRows []UpsideDownCouchRow var deleteRows []UpsideDownCouchRow for internalKey, internalValue := range batch.InternalOps { if internalValue == nil { // delete deleteInternalRow := NewInternalRow([]byte(internalKey), nil) deleteRows = append(deleteRows, deleteInternalRow) } else { updateInternalRow := NewInternalRow([]byte(internalKey), internalValue) updateRows = append(updateRows, updateInternalRow) } } if len(updateRows) > 0 { updateRowsAll = append(updateRowsAll, updateRows) } if len(deleteRows) > 0 { deleteRowsAll = append(deleteRowsAll, deleteRows) } // process back index rows as they arrive for dbir := range docBackIndexRowCh { if dbir.doc == nil && dbir.backIndexRow != nil { // delete deleteRows := udc.deleteSingle(dbir.docID, dbir.backIndexRow, nil) if len(deleteRows) > 0 { deleteRowsAll = append(deleteRowsAll, deleteRows) } docsDeleted++ } else if dbir.doc != nil { addRows, updateRows, deleteRows := udc.mergeOldAndNew(dbir.backIndexRow, newRowsMap[dbir.docID]) if len(addRows) > 0 { addRowsAll = append(addRowsAll, addRows) } if len(updateRows) > 0 { updateRowsAll = append(updateRowsAll, updateRows) } if len(deleteRows) > 0 { deleteRowsAll = append(deleteRowsAll, deleteRows) } if dbir.backIndexRow == nil { docsAdded++ } } } if docBackIndexRowErr != nil { return docBackIndexRowErr } detectedUnsafeMutex.RLock() defer detectedUnsafeMutex.RUnlock() if detectedUnsafe { return UnsafeBatchUseDetected } // start a writer for this batch var kvwriter store.KVWriter kvwriter, err = udc.store.Writer() if err != nil { return } err = udc.batchRows(kvwriter, addRowsAll, updateRowsAll, deleteRowsAll) if err != nil { _ = kvwriter.Close() atomic.AddUint64(&udc.stats.errors, 1) return } err = kvwriter.Close() atomic.AddUint64(&udc.stats.indexTime, uint64(time.Since(indexStart))) if err == nil { udc.m.Lock() udc.docCount += docsAdded udc.docCount -= docsDeleted udc.m.Unlock() atomic.AddUint64(&udc.stats.updates, numUpdates) atomic.AddUint64(&udc.stats.deletes, docsDeleted) atomic.AddUint64(&udc.stats.batches, 1) } else { atomic.AddUint64(&udc.stats.errors, 1) } return }
func (udc *UpsideDownCouch) Update(doc *document.Document) (err error) { // do analysis before acquiring write lock analysisStart := time.Now() resultChan := make(chan *index.AnalysisResult) aw := index.NewAnalysisWork(udc, doc, resultChan) // put the work on the queue udc.analysisQueue.Queue(aw) // wait for the result result := <-resultChan close(resultChan) atomic.AddUint64(&udc.stats.analysisTime, uint64(time.Since(analysisStart))) udc.writeMutex.Lock() defer udc.writeMutex.Unlock() // open a reader for backindex lookup var kvreader store.KVReader kvreader, err = udc.store.Reader() if err != nil { return } // first we lookup the backindex row for the doc id if it exists // lookup the back index row var backIndexRow *BackIndexRow backIndexRow, err = udc.backIndexRowForDoc(kvreader, doc.ID) if err != nil { _ = kvreader.Close() atomic.AddUint64(&udc.stats.errors, 1) return } err = kvreader.Close() if err != nil { return } // start a writer for this update indexStart := time.Now() var kvwriter store.KVWriter kvwriter, err = udc.store.Writer() if err != nil { return } defer func() { if cerr := kvwriter.Close(); err == nil && cerr != nil { err = cerr } }() // prepare a list of rows var addRowsAll [][]UpsideDownCouchRow var updateRowsAll [][]UpsideDownCouchRow var deleteRowsAll [][]UpsideDownCouchRow addRows, updateRows, deleteRows := udc.mergeOldAndNew(backIndexRow, result.Rows) if len(addRows) > 0 { addRowsAll = append(addRowsAll, addRows) } if len(updateRows) > 0 { updateRowsAll = append(updateRowsAll, updateRows) } if len(deleteRows) > 0 { deleteRowsAll = append(deleteRowsAll, deleteRows) } err = udc.batchRows(kvwriter, addRowsAll, updateRowsAll, deleteRowsAll) if err == nil && backIndexRow == nil { udc.m.Lock() udc.docCount++ udc.m.Unlock() } atomic.AddUint64(&udc.stats.indexTime, uint64(time.Since(indexStart))) if err == nil { atomic.AddUint64(&udc.stats.updates, 1) } else { atomic.AddUint64(&udc.stats.errors, 1) } return }
func (f *Firestorm) Batch(batch *index.Batch) (err error) { // acquire enough doc numbers for all updates in the batch // FIXME we actually waste doc numbers because deletes are in the // same map and we don't need numbers for them lastDocNumber := atomic.AddUint64(&f.highDocNumber, uint64(len(batch.IndexOps))) firstDocNumber := lastDocNumber - uint64(len(batch.IndexOps)) + 1 analysisStart := time.Now() resultChan := make(chan *index.AnalysisResult) var docsUpdated uint64 var docsDeleted uint64 for _, doc := range batch.IndexOps { if doc != nil { doc.Number = firstDocNumber // actually assign doc numbers here firstDocNumber++ docsUpdated++ } else { docsDeleted++ } } var detectedUnsafeMutex sync.RWMutex detectedUnsafe := false go func() { sofar := uint64(0) for _, doc := range batch.IndexOps { if doc != nil { sofar++ if sofar > docsUpdated { detectedUnsafeMutex.Lock() detectedUnsafe = true detectedUnsafeMutex.Unlock() return } aw := index.NewAnalysisWork(f, doc, resultChan) // put the work on the queue f.analysisQueue.Queue(aw) } } }() // extra 1 capacity for internal updates. collectRows := make([][]index.IndexRow, 0, docsUpdated+1) // wait for the result var itemsDeQueued uint64 for itemsDeQueued < docsUpdated { result := <-resultChan collectRows = append(collectRows, result.Rows) itemsDeQueued++ } close(resultChan) detectedUnsafeMutex.RLock() defer detectedUnsafeMutex.RUnlock() if detectedUnsafe { return UnsafeBatchUseDetected } atomic.AddUint64(&f.stats.analysisTime, uint64(time.Since(analysisStart))) var deleteKeys [][]byte if len(batch.InternalOps) > 0 { // add the internal ops updateInternalRows := make([]index.IndexRow, 0, len(batch.InternalOps)) for internalKey, internalValue := range batch.InternalOps { if internalValue == nil { // delete deleteInternalRow := NewInternalRow([]byte(internalKey), nil) deleteKeys = append(deleteKeys, deleteInternalRow.Key()) } else { updateInternalRow := NewInternalRow([]byte(internalKey), internalValue) updateInternalRows = append(updateInternalRows, updateInternalRow) } } collectRows = append(collectRows, updateInternalRows) } inflightItems := make([]*InFlightItem, 0, len(batch.IndexOps)) for docID, doc := range batch.IndexOps { if doc != nil { inflightItems = append(inflightItems, &InFlightItem{[]byte(docID), doc.Number}) } else { inflightItems = append(inflightItems, &InFlightItem{[]byte(docID), 0}) } } indexStart := time.Now() // start a writer for this batch var kvwriter store.KVWriter kvwriter, err = f.store.Writer() if err != nil { return } var dictionaryDeltas map[string]int64 dictionaryDeltas, err = f.batchRows(kvwriter, collectRows, deleteKeys) if err != nil { _ = kvwriter.Close() atomic.AddUint64(&f.stats.errors, 1) return } f.compensator.MutateBatch(inflightItems, lastDocNumber) err = kvwriter.Close() f.lookuper.NotifyBatch(inflightItems) f.dictUpdater.NotifyBatch(dictionaryDeltas) atomic.AddUint64(&f.stats.indexTime, uint64(time.Since(indexStart))) if err == nil { atomic.AddUint64(&f.stats.updates, docsUpdated) atomic.AddUint64(&f.stats.deletes, docsDeleted) atomic.AddUint64(&f.stats.batches, 1) } else { atomic.AddUint64(&f.stats.errors, 1) } return }