Esempio n. 1
0
func (im *IndexMapping) mapDocument(doc *document.Document, data interface{}) error {
	// see if the top level object is a byte array, and possibly run through conveter
	byteArrayData, ok := data.([]byte)
	if ok {
		byteArrayConverterConstructor := registry.ByteArrayConverterByName(im.ByteArrayConverter)
		if byteArrayConverterConstructor != nil {
			byteArrayConverter, err := byteArrayConverterConstructor(nil, nil)
			if err == nil {
				convertedData, err := byteArrayConverter.Convert(byteArrayData)
				if err != nil {
					return err
				}
				data = convertedData
			} else {
				log.Printf("error creating byte array converter: %v", err)
			}
		} else {
			log.Printf("no byte array converter named: %s", im.ByteArrayConverter)
		}
	}

	docType := im.determineType(data)
	docMapping := im.mappingForType(docType)
	walkContext := im.newWalkContext(doc, docMapping)
	docMapping.walkDocument(data, []string{}, []uint64{}, walkContext)

	// see if the _all field was disabled
	allMapping := docMapping.documentMappingForPath("_all")
	if allMapping == nil || (allMapping.Enabled != false) {
		field := document.NewCompositeFieldWithIndexingOptions("_all", true, []string{}, walkContext.excludedFromAll, document.IndexField|document.IncludeTermVectors)
		doc.AddField(field)
	}

	return nil
}
Esempio n. 2
0
func (f *Firestorm) Update(doc *document.Document) (err error) {

	// assign this document a number
	doc.Number = atomic.AddUint64(&f.highDocNumber, 1)

	// do analysis before acquiring write lock
	analysisStart := time.Now()
	numPlainTextBytes := doc.NumPlainTextBytes()
	resultChan := make(chan *index.AnalysisResult)
	aw := index.NewAnalysisWork(f, doc, resultChan)

	// put the work on the queue
	f.analysisQueue.Queue(aw)

	// wait for the result
	result := <-resultChan
	close(resultChan)
	atomic.AddUint64(&f.stats.analysisTime, uint64(time.Since(analysisStart)))

	// start a writer for this update
	indexStart := time.Now()
	var kvwriter store.KVWriter
	kvwriter, err = f.store.Writer()
	if err != nil {
		return
	}
	defer func() {
		if cerr := kvwriter.Close(); err == nil && cerr != nil {
			err = cerr
		}
	}()

	var dictionaryDeltas map[string]int64
	dictionaryDeltas, err = f.batchRows(kvwriter, [][]index.IndexRow{result.Rows}, nil)
	if err != nil {
		_ = kvwriter.Close()
		atomic.AddUint64(&f.stats.errors, 1)
		return
	}

	f.compensator.Mutate([]byte(doc.ID), doc.Number)
	f.lookuper.NotifyBatch([]*InFlightItem{{[]byte(doc.ID), doc.Number}})
	f.dictUpdater.NotifyBatch(dictionaryDeltas)

	atomic.AddUint64(&f.stats.indexTime, uint64(time.Since(indexStart)))
	atomic.AddUint64(&f.stats.numPlainTextBytesIndexed, numPlainTextBytes)
	return
}
Esempio n. 3
0
func (im *IndexMapping) mapDocument(doc *document.Document, data interface{}) error {
	docType := im.determineType(data)
	docMapping := im.mappingForType(docType)
	walkContext := im.newWalkContext(doc, docMapping)
	if docMapping.Enabled {
		docMapping.walkDocument(data, []string{}, []uint64{}, walkContext)

		// see if the _all field was disabled
		allMapping := docMapping.documentMappingForPath("_all")
		if allMapping == nil || (allMapping.Enabled != false) {
			field := document.NewCompositeFieldWithIndexingOptions("_all", true, []string{}, walkContext.excludedFromAll, document.IndexField|document.IncludeTermVectors)
			doc.AddField(field)
		}
	}

	return nil
}
Esempio n. 4
0
func (udc *SmolderingCouch) Update(doc *document.Document) (err error) {

	// get the next available doc number
	doc.Number = atomic.AddUint64(&udc.maxInternalDocID, 1)

	analysisStart := time.Now()
	numPlainTextBytes := doc.NumPlainTextBytes()
	resultChan := make(chan *index.AnalysisResult)
	aw := index.NewAnalysisWork(udc, doc, resultChan)

	// put the work on the queue
	udc.analysisQueue.Queue(aw)

	// wait for the result
	result := <-resultChan
	close(resultChan)
	atomic.AddUint64(&udc.stats.analysisTime, uint64(time.Since(analysisStart)))

	udc.writeMutex.Lock()
	defer udc.writeMutex.Unlock()

	indexReader, err := udc.reader()
	if err != nil {
		return
	}

	// first we lookup the backindex row for the doc id if it exists
	// lookup the back index row
	var backIndexRow *BackIndexRow
	if udc.cf.Lookup([]byte(doc.ID)) {
		backIndexRow, err = indexReader.backIndexRowForDoc(nil, doc.ID)
		if err != nil {
			_ = indexReader.Close()
			atomic.AddUint64(&udc.stats.errors, 1)
			return
		}
	}

	err = indexReader.Close()
	if err != nil {
		return
	}

	// start a writer for this update
	indexStart := time.Now()
	var kvwriter store.KVWriter
	kvwriter, err = udc.store.Writer()
	if err != nil {
		return
	}
	defer func() {
		if cerr := kvwriter.Close(); err == nil && cerr != nil {
			err = cerr
		}
	}()

	// prepare a list of rows
	var addRowsAll [][]SmolderingCouchRow
	var updateRowsAll [][]SmolderingCouchRow
	var deleteRowsAll [][]SmolderingCouchRow

	addRows, updateRows, deleteRows := udc.mergeOldAndNew(doc.ID, backIndexRow, result.Rows)
	if len(addRows) > 0 {
		addRowsAll = append(addRowsAll, addRows)
	}
	if len(updateRows) > 0 {
		updateRowsAll = append(updateRowsAll, updateRows)
	}
	if len(deleteRows) > 0 {
		deleteRowsAll = append(deleteRowsAll, deleteRows)
	}

	err = udc.batchRows(kvwriter, addRowsAll, updateRowsAll, deleteRowsAll)
	if err == nil && backIndexRow == nil {
		udc.m.Lock()
		udc.docCount++
		udc.m.Unlock()
	}
	atomic.AddUint64(&udc.stats.indexTime, uint64(time.Since(indexStart)))
	if err == nil {
		udc.cf.Insert([]byte(doc.ID))
		atomic.AddUint64(&udc.stats.updates, 1)
		atomic.AddUint64(&udc.stats.numPlainTextBytesIndexed, numPlainTextBytes)
	} else {
		atomic.AddUint64(&udc.stats.errors, 1)
	}
	return
}