func newLucene49NormsConsumer(state *SegmentWriteState, dataCodec, dataExtension, metaCodec, metaExtension string) (nc *NormsConsumer, err error) { assert(packed.PackedFormat(packed.PACKED_SINGLE_BLOCK).IsSupported(1)) assert(packed.PackedFormat(packed.PACKED_SINGLE_BLOCK).IsSupported(2)) assert(packed.PackedFormat(packed.PACKED_SINGLE_BLOCK).IsSupported(4)) nc = &NormsConsumer{maxDoc: state.SegmentInfo.DocCount()} var success = false defer func() { if !success { util.CloseWhileSuppressingError(nc) } }() dataName := util.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, dataExtension) if nc.data, err = state.Directory.CreateOutput(dataName, state.Context); err != nil { return nil, err } if err = codec.WriteHeader(nc.data, dataCodec, VERSION_CURRENT); err != nil { return nil, err } metaName := util.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, metaExtension) if nc.meta, err = state.Directory.CreateOutput(metaName, state.Context); err != nil { return nil, err } if err = codec.WriteHeader(nc.meta, metaCodec, VERSION_CURRENT); err != nil { return nil, err } success = true return nc, nil }
func (w *CompoundFileWriter) writeEntryTable(entries map[string]*FileEntry, entryOut IndexOutput) (err error) { if err = codec.WriteHeader(entryOut, CFD_ENTRY_CODEC, CFD_VERSION_CURRENT); err == nil { if err = entryOut.WriteVInt(int32(len(entries))); err == nil { var names []string for name, _ := range entries { names = append(names, name) } sort.Strings(names) for _, name := range names { // for _, fe := range entries { fe := entries[name] if err = Stream(entryOut). WriteString(util.StripSegmentName(fe.file)). WriteLong(fe.offset). WriteLong(fe.length). Close(); err != nil { break } } } } if err == nil { err = codec.WriteFooter(entryOut) } return err }
/* Writes this vector to the file name in Directory d, in a format that can be read by the constructor BitVector(Directory, String, IOContext) */ func (bv *BitVector) Write(d store.Directory, name string, ctx store.IOContext) (err error) { assert(reflect.TypeOf(d).Name() != "CompoundFileDirectory") var output store.IndexOutput if output, err = d.CreateOutput(name, ctx); err != nil { return err } defer func() { err = mergeError(err, output.Close()) }() if err = output.WriteInt(-2); err != nil { return err } if err = codec.WriteHeader(output, CODEC, BV_VERSION_CURRENT); err != nil { return err } if bv.isSparse() { // sparse bit-set more efficiently saved as d-gaps. err = bv.writeClearedDgaps(output) } else { err = bv.writeBits(output) } if err != nil { return err } if err = codec.WriteFooter(output); err != nil { return err } bv.assertCount() return nil }
func (w *Lucene41PostingsWriter) Init(termsOut store.IndexOutput) error { err := codec.WriteHeader(termsOut, LUCENE41_TERMS_CODEC, LUCENE41_VERSION_CURRENT) if err == nil { err = termsOut.WriteVInt(LUCENE41_BLOCK_SIZE) } return err }
func (w *WriterImpl) writeHeader() error { assert(w.valueCount != -1) err := codec.WriteHeader(w.out, PACKED_CODEC_NAME, VERSION_CURRENT) if err == nil { err = w.out.WriteVInt(int32(w.bitsPerValue)) if err == nil { err = w.out.WriteVInt(int32(w.valueCount)) if err == nil { err = w.out.WriteVInt(int32(PackedFormat(w.format).Id())) } } } return err }
func (w *Lucene40SegmentInfoWriter) Write(dir store.Directory, si *SegmentInfo, fis FieldInfos, ctx store.IOContext) (err error) { filename := util.SegmentFileName(si.Name, "", LUCENE40_SI_EXTENSION) si.AddFile(filename) var output store.IndexOutput output, err = dir.CreateOutput(filename, ctx) if err != nil { return err } var success = false defer func() { if !success { util.CloseWhileSuppressingError(output) si.Dir.DeleteFile(filename) // ignore error } else { err = mergeError(err, output.Close()) } }() err = codec.WriteHeader(output, LUCENE40_CODEC_NAME, LUCENE40_VERSION_CURRENT) if err != nil { return err } // Write the Lucene version that created this segment, since 3.1 err = store.Stream(output).WriteString(si.Version().String()). WriteInt(int32(si.DocCount())). WriteByte(func() byte { if si.IsCompoundFile() { return SEGMENT_INFO_YES } return byte((SEGMENT_INFO_NO + 256) % 256) // Go byte is non-negative, unlike Java }()).WriteStringStringMap(si.Diagnostics()). WriteStringStringMap(map[string]string{}). WriteStringSet(si.Files()).Close() if err != nil { return err } success = true return nil }
func (w *CompoundFileWriter) output(ctx IOContext) (IndexOutput, error) { w.Lock() defer w.Unlock() if w.dataOut == nil { var success = false defer func() { if !success { util.CloseWhileSuppressingError(w.dataOut) } }() var err error w.dataOut, err = w.directory.CreateOutput(w.dataFileName, ctx) if err != nil { return nil, err } err = codec.WriteHeader(w.dataOut, CFD_DATA_CODEC, CFD_VERSION_CURRENT) if err != nil { return nil, err } success = true } return w.dataOut, nil }
func (sis *SegmentInfos) write(directory store.Directory) (err error) { segmentsFilename := sis.nextSegmentFilename() // Always advance the generation on write: if sis.generation == -1 { sis.generation = 1 } else { sis.generation++ } var segnOutput store.IndexOutput var success = false // var upgradedSIFiles = make(map[string]bool) defer func() { if !success { // We hit an error above; try to close the file but suppress // any errors util.CloseWhileSuppressingError(segnOutput) // for filename, _ := range upgradedSIFiles { // directory.DeleteFile(filename) // ignore error // } // Try not to leave a truncated segments_N fle in the index: directory.DeleteFile(segmentsFilename) // ignore error } }() if segnOutput, err = directory.CreateOutput(segmentsFilename, store.IO_CONTEXT_DEFAULT); err != nil { return } if err = codec.WriteHeader(segnOutput, "segments", VERSION_49); err != nil { return } if err = segnOutput.WriteLong(sis.version); err == nil { if err = segnOutput.WriteInt(int32(sis.counter)); err == nil { err = segnOutput.WriteInt(int32(len(sis.Segments))) } } if err != nil { return } for _, siPerCommit := range sis.Segments { si := siPerCommit.Info if err = segnOutput.WriteString(si.Name); err == nil { if err = segnOutput.WriteString(si.Codec().(Codec).Name()); err == nil { if err = segnOutput.WriteLong(siPerCommit.DelGen()); err == nil { assert2(siPerCommit.DelCount() >= 0 && siPerCommit.DelCount() <= si.DocCount(), "cannot write segment: invalid docCount segment=%v docCount=%v delCount=%v", si.Name, si.DocCount(), siPerCommit.DelCount()) if err = segnOutput.WriteInt(int32(siPerCommit.DelCount())); err == nil { if err = segnOutput.WriteLong(siPerCommit.FieldInfosGen()); err == nil { if err = segnOutput.WriteLong(siPerCommit.DocValuesGen()); err == nil { if err = segnOutput.WriteStringSet(siPerCommit.FieldInfosFiles()); err == nil { dvUpdatesFiles := siPerCommit.DocValuesUpdatesFiles() if err = segnOutput.WriteInt(int32(len(dvUpdatesFiles))); err == nil { for k, v := range dvUpdatesFiles { if err = segnOutput.WriteInt(int32(k)); err != nil { break } if err = segnOutput.WriteStringSet(v); err != nil { break } } } } } } } } } } if err != nil { return } assert(si.Dir == directory) // If this segment is pre-4.x, perform a one-time "upgrade" to // write the .si file for it: if version := si.Version(); len(version) == 0 || !version.OnOrAfter(util.VERSION_4_0) { panic("not implemented yet") } } if err = segnOutput.WriteStringStringMap(sis.userData); err != nil { return } sis.pendingSegnOutput = segnOutput success = true return nil }
func (w *BlockTreeTermsWriter) WriteIndexHeader(out store.IndexOutput) error { return codec.WriteHeader(out, TERMS_INDEX_CODEC_NAME, TERMS_VERSION_CURRENT) }
func NewCompressingStoredFieldsWriter(dir store.Directory, si *model.SegmentInfo, segmentSuffix string, ctx store.IOContext, formatName string, compressionMode CompressionMode, chunkSize int) (*CompressingStoredFieldsWriter, error) { assert(dir != nil) ans := &CompressingStoredFieldsWriter{ directory: dir, segment: si.Name, segmentSuffix: segmentSuffix, compressionMode: compressionMode, compressor: compressionMode.NewCompressor(), chunkSize: chunkSize, docBase: 0, bufferedDocs: newGrowableByteArrayDataOutput(chunkSize), numStoredFields: make([]int, 16), endOffsets: make([]int, 16), numBufferedDocs: 0, } var success = false indexStream, err := dir.CreateOutput(util.SegmentFileName(si.Name, segmentSuffix, lucene40.FIELDS_INDEX_EXTENSION), ctx) if err != nil { return nil, err } assert(indexStream != nil) defer func() { if !success { util.CloseWhileSuppressingError(indexStream) ans.Abort() } }() ans.fieldsStream, err = dir.CreateOutput(util.SegmentFileName(si.Name, segmentSuffix, lucene40.FIELDS_EXTENSION), ctx) if err != nil { return nil, err } codecNameIdx := formatName + CODEC_SFX_IDX codecNameDat := formatName + CODEC_SFX_DAT err = codec.WriteHeader(indexStream, codecNameIdx, VERSION_CURRENT) if err != nil { return nil, err } err = codec.WriteHeader(ans.fieldsStream, codecNameDat, VERSION_CURRENT) if err != nil { return nil, err } assert(int64(codec.HeaderLength(codecNameIdx)) == indexStream.FilePointer()) assert(int64(codec.HeaderLength(codecNameDat)) == ans.fieldsStream.FilePointer()) ans.indexWriter, err = NewStoredFieldsIndexWriter(indexStream) if err != nil { return nil, err } assert(ans.indexWriter != nil) indexStream = nil err = ans.fieldsStream.WriteVInt(int32(chunkSize)) if err != nil { return nil, err } err = ans.fieldsStream.WriteVInt(packed.VERSION_CURRENT) if err != nil { return nil, err } success = true return ans, nil }
/* Creates a postings writer with the specified PackedInts overhead ratio */ func newLucene41PostingsWriter(state *SegmentWriteState, accetableOverheadRatio float32) (*Lucene41PostingsWriter, error) { docOut, err := state.Directory.CreateOutput( util.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, LUCENE41_DOC_EXTENSION), state.Context) if err != nil { return nil, err } ans := new(Lucene41PostingsWriter) if err = func() error { var posOut store.IndexOutput var payOut store.IndexOutput var success = false defer func() { if !success { util.CloseWhileSuppressingError(docOut, posOut, payOut) } }() err := codec.WriteHeader(docOut, LUCENE41_DOC_CODEC, LUCENE41_VERSION_CURRENT) if err != nil { return err } ans.forUtil, err = NewForUtilInto(accetableOverheadRatio, docOut) if err != nil { return err } if state.FieldInfos.HasProx { ans.posDeltaBuffer = make([]int, MAX_DATA_SIZE) posOut, err = state.Directory.CreateOutput(util.SegmentFileName( state.SegmentInfo.Name, state.SegmentSuffix, LUCENE41_POS_EXTENSION), state.Context) if err != nil { return err } err = codec.WriteHeader(posOut, LUCENE41_POS_CODEC, LUCENE41_VERSION_CURRENT) if err != nil { return err } if state.FieldInfos.HasPayloads { ans.payloadBytes = make([]byte, 128) ans.payloadLengthBuffer = make([]int, MAX_DATA_SIZE) } if state.FieldInfos.HasOffsets { ans.offsetStartDeltaBuffer = make([]int, MAX_DATA_SIZE) ans.offsetLengthBuffer = make([]int, MAX_DATA_SIZE) } if state.FieldInfos.HasPayloads || state.FieldInfos.HasOffsets { payOut, err = state.Directory.CreateOutput(util.SegmentFileName( state.SegmentInfo.Name, state.SegmentSuffix, LUCENE41_PAY_EXTENSION), state.Context) if err != nil { return err } err = codec.WriteHeader(payOut, LUCENE41_PAY_CODEC, LUCENE41_VERSION_CURRENT) } } ans.payOut, ans.posOut = payOut, posOut ans.docOut = docOut success = true return nil }(); err != nil { return nil, err } ans.docDeltaBuffer = make([]int, MAX_DATA_SIZE) ans.freqBuffer = make([]int, MAX_DATA_SIZE) ans.encoded = make([]byte, MAX_ENCODED_SIZE) // TODO: should we try skipping every 2/4 blocks...? ans.skipWriter = NewSkipWriter( maxSkipLevels, LUCENE41_BLOCK_SIZE, state.SegmentInfo.DocCount(), ans.docOut, ans.posOut, ans.payOut) return ans, nil }
filename := util.SegmentFileName(segName, suffix, FI_EXTENSION) var output store.IndexOutput if output, err = dir.CreateOutput(filename, ctx); err != nil { return } var success = false defer func() { if success { err = output.Close() } else { util.CloseWhileSuppressingError(output) } }() if err = codec.WriteHeader(output, FI_CODEC_NAME, FI_FORMAT_CURRENT); err != nil { return } if err = output.WriteVInt(int32(infos.Size())); err != nil { return } for _, fi := range infos.Values { indexOptions := fi.IndexOptions() bits := byte(0) if fi.HasVectors() { bits |= FI_STORE_TERMVECTOR } if fi.OmitsNorms() { bits |= FI_OMIT_NORMS } if fi.HasPayloads() {
func (sis *SegmentInfos) write(directory store.Directory) error { segmentsFilename := sis.nextSegmentFilename() // Always advance the generation on write: if sis.generation == -1 { sis.generation = 1 } else { sis.generation++ } var segnOutput *store.ChecksumIndexOutput var success = false var upgradedSIFiles = make(map[string]bool) defer func() { if !success { // We hit an error above; try to close the file but suppress // any errors util.CloseWhileSuppressingError(segnOutput) for filename, _ := range upgradedSIFiles { directory.DeleteFile(filename) // ignore error } // Try not to leave a truncated segments_N fle in the index: directory.DeleteFile(segmentsFilename) // ignore error } }() out, err := directory.CreateOutput(segmentsFilename, store.IO_CONTEXT_DEFAULT) if err != nil { return err } segnOutput = store.NewChecksumIndexOutput(out) err = codec.WriteHeader(segnOutput, "segments", VERSION_40) if err != nil { return err } err = segnOutput.WriteLong(sis.version) if err == nil { err = segnOutput.WriteInt(int32(sis.counter)) if err == nil { err = segnOutput.WriteInt(int32(len(sis.Segments))) } } if err != nil { return err } for _, siPerCommit := range sis.Segments { si := siPerCommit.info err = segnOutput.WriteString(si.Name) if err == nil { err = segnOutput.WriteString(si.Codec().(Codec).Name()) if err == nil { err = segnOutput.WriteLong(siPerCommit.delGen) if err == nil { err = segnOutput.WriteInt(int32(siPerCommit.delCount)) } } } if err != nil { return err } assert(si.Dir == directory) assert(siPerCommit.delCount <= si.DocCount()) // If this segment is pre-4.x, perform a one-time "upgrade" to // write the .si file for it: if version := si.Version(); version == "" || versionLess(version, "4.0") { panic("not implemented yet") } } err = segnOutput.WriteStringStringMap(sis.userData) if err != nil { return err } sis.pendingSegnOutput = segnOutput success = true return nil }