/* Writes this vector to the file name in Directory d, in a format that can be read by the constructor BitVector(Directory, String, IOContext) */ func (bv *BitVector) Write(d store.Directory, name string, ctx store.IOContext) (err error) { assert(reflect.TypeOf(d).Name() != "CompoundFileDirectory") var output store.IndexOutput if output, err = d.CreateOutput(name, ctx); err != nil { return err } defer func() { err = mergeError(err, output.Close()) }() if err = output.WriteInt(-2); err != nil { return err } if err = codec.WriteHeader(output, CODEC, BV_VERSION_CURRENT); err != nil { return err } if bv.isSparse() { // sparse bit-set more efficiently saved as d-gaps. err = bv.writeClearedDgaps(output) } else { err = bv.writeBits(output) } if err != nil { return err } if err = codec.WriteFooter(output); err != nil { return err } bv.assertCount() return nil }
/* Write as a d-gaps list */ func (bv *BitVector) writeClearedDgaps(output store.IndexOutput) error { err := store.Stream(output). WriteInt(-1). // mark using d-gaps WriteInt(int32(bv.size)). WriteInt(int32(bv.Count())). Close() if err != nil { return err } last, numCleared := 0, bv.size-bv.Count() for i, v := range bv.bits { if v == byte(0xff) { continue } err = output.WriteVInt(int32(i - last)) if err == nil { err = output.WriteByte(v) } if err != nil { return err } last = i numCleared -= (8 - util.BitCount(v)) assert(numCleared >= 0 || i == len(bv.bits)-1 && numCleared == -(8-(bv.size&7))) if numCleared <= 0 { break } } return nil }
func (w *Lucene41PostingsWriter) Init(termsOut store.IndexOutput) error { err := codec.WriteHeader(termsOut, LUCENE41_TERMS_CODEC, LUCENE41_VERSION_CURRENT) if err == nil { err = termsOut.WriteVInt(LUCENE41_BLOCK_SIZE) } return err }
/* A utility for writing the SEGMENTS_GEN file to a Directory. NOTE: this is an internal utility which is kept public so that it's accessible by code from other packages. You should avoid calling this method unless you're absolutely sure what you're doing! */ func writeSegmentsGen(dir store.Directory, generation int64) { if err := func() (err error) { var genOutput store.IndexOutput genOutput, err = dir.CreateOutput(INDEX_FILENAME_SEGMENTS_GEN, store.IO_CONTEXT_READONCE) if err != nil { return err } defer func() { err = mergeError(err, genOutput.Close()) err = mergeError(err, dir.Sync([]string{INDEX_FILENAME_SEGMENTS_GEN})) }() if err = genOutput.WriteInt(FORMAT_SEGMENTS_GEN_CURRENT); err == nil { if err = genOutput.WriteLong(generation); err == nil { if err = genOutput.WriteLong(generation); err == nil { err = codec.WriteFooter(genOutput) } } } return err }(); err != nil { // It's OK if we fail to write this file since it's used only as // one of the retry fallbacks. dir.DeleteFile(INDEX_FILENAME_SEGMENTS_GEN) // Ignore error; this file is only used in a retry fallback on init } }
func NewStoredFieldsIndexWriter(indexOutput store.IndexOutput) (*StoredFieldsIndexWriter, error) { err := indexOutput.WriteVInt(packed.VERSION_CURRENT) if err != nil { return nil, err } return &StoredFieldsIndexWriter{ fieldsIndexOut: indexOutput, blockChunks: 0, blockDocs: 0, firstStartPointer: -1, totalDocs: 0, docBaseDeltas: make([]int, BLOCK_SIZE), startPointerDeltas: make([]int64, BLOCK_SIZE), }, nil }
func (w *Lucene40SegmentInfoWriter) Write(dir store.Directory, si *SegmentInfo, fis FieldInfos, ctx store.IOContext) (err error) { filename := util.SegmentFileName(si.Name, "", LUCENE40_SI_EXTENSION) si.AddFile(filename) var output store.IndexOutput output, err = dir.CreateOutput(filename, ctx) if err != nil { return err } var success = false defer func() { if !success { util.CloseWhileSuppressingError(output) si.Dir.DeleteFile(filename) // ignore error } else { err = mergeError(err, output.Close()) } }() err = codec.WriteHeader(output, LUCENE40_CODEC_NAME, LUCENE40_VERSION_CURRENT) if err != nil { return err } // Write the Lucene version that created this segment, since 3.1 err = store.Stream(output).WriteString(si.Version().String()). WriteInt(int32(si.DocCount())). WriteByte(func() byte { if si.IsCompoundFile() { return SEGMENT_INFO_YES } return byte((SEGMENT_INFO_NO + 256) % 256) // Go byte is non-negative, unlike Java }()).WriteStringStringMap(si.Diagnostics()). WriteStringStringMap(map[string]string{}). WriteStringSet(si.Files()).Close() if err != nil { return err } success = true return nil }
func (sis *SegmentInfos) write(directory store.Directory) (err error) { segmentsFilename := sis.nextSegmentFilename() // Always advance the generation on write: if sis.generation == -1 { sis.generation = 1 } else { sis.generation++ } var segnOutput store.IndexOutput var success = false // var upgradedSIFiles = make(map[string]bool) defer func() { if !success { // We hit an error above; try to close the file but suppress // any errors util.CloseWhileSuppressingError(segnOutput) // for filename, _ := range upgradedSIFiles { // directory.DeleteFile(filename) // ignore error // } // Try not to leave a truncated segments_N fle in the index: directory.DeleteFile(segmentsFilename) // ignore error } }() if segnOutput, err = directory.CreateOutput(segmentsFilename, store.IO_CONTEXT_DEFAULT); err != nil { return } if err = codec.WriteHeader(segnOutput, "segments", VERSION_49); err != nil { return } if err = segnOutput.WriteLong(sis.version); err == nil { if err = segnOutput.WriteInt(int32(sis.counter)); err == nil { err = segnOutput.WriteInt(int32(len(sis.Segments))) } } if err != nil { return } for _, siPerCommit := range sis.Segments { si := siPerCommit.Info if err = segnOutput.WriteString(si.Name); err == nil { if err = segnOutput.WriteString(si.Codec().(Codec).Name()); err == nil { if err = segnOutput.WriteLong(siPerCommit.DelGen()); err == nil { assert2(siPerCommit.DelCount() >= 0 && siPerCommit.DelCount() <= si.DocCount(), "cannot write segment: invalid docCount segment=%v docCount=%v delCount=%v", si.Name, si.DocCount(), siPerCommit.DelCount()) if err = segnOutput.WriteInt(int32(siPerCommit.DelCount())); err == nil { if err = segnOutput.WriteLong(siPerCommit.FieldInfosGen()); err == nil { if err = segnOutput.WriteLong(siPerCommit.DocValuesGen()); err == nil { if err = segnOutput.WriteStringSet(siPerCommit.FieldInfosFiles()); err == nil { dvUpdatesFiles := siPerCommit.DocValuesUpdatesFiles() if err = segnOutput.WriteInt(int32(len(dvUpdatesFiles))); err == nil { for k, v := range dvUpdatesFiles { if err = segnOutput.WriteInt(int32(k)); err != nil { break } if err = segnOutput.WriteStringSet(v); err != nil { break } } } } } } } } } } if err != nil { return } assert(si.Dir == directory) // If this segment is pre-4.x, perform a one-time "upgrade" to // write the .si file for it: if version := si.Version(); len(version) == 0 || !version.OnOrAfter(util.VERSION_4_0) { panic("not implemented yet") } } if err = segnOutput.WriteStringStringMap(sis.userData); err != nil { return } sis.pendingSegnOutput = segnOutput success = true return nil }
func writeBytesRef(out store.IndexOutput, bytes []byte) (err error) { if err = out.WriteVInt(int32(len(bytes))); err == nil { err = out.WriteBytes(bytes) } return }
/* Writes the index file trailer. */ func (w *BlockTreeTermsWriter) writeIndexTrailer(indexOut store.IndexOutput, dirStart int64) error { return indexOut.WriteLong(dirStart) }
func (w *SkipWriter) WriteSkipData(level int, skipBuffer store.IndexOutput) error { delta := w.curDoc - w.lastSkipDoc[level] var err error if err = skipBuffer.WriteVInt(int32(delta)); err != nil { return err } w.lastSkipDoc[level] = w.curDoc if err = skipBuffer.WriteVInt(int32(w.curDocPointer - w.lastSkipDocPointer[level])); err != nil { return err } w.lastSkipDocPointer[level] = w.curDocPointer if w.fieldHasPositions { if err = skipBuffer.WriteVInt(int32(w.curPosPointer - w.lastSkipPosPointer[level])); err != nil { return err } w.lastSkipPosPointer[level] = w.curPosPointer if err = skipBuffer.WriteVInt(int32(w.curPosBufferUpto)); err != nil { return err } if w.fieldHasPayloads { if err = skipBuffer.WriteVInt(int32(w.curPayloadByteUpto)); err != nil { return err } } if w.fieldHasOffsets || w.fieldHasPayloads { if err = skipBuffer.WriteVInt(int32(w.curPayPointer - w.lastSkipPayPointer[level])); err != nil { return err } w.lastSkipPayPointer[level] = w.curPayPointer } } return nil }
func (w *MockDirectoryWrapper) _crash() error { w.crashed = true w.openFiles = make(map[string]int) w.openFilesForWrite = make(map[string]bool) w.openFilesDeleted = make(map[string]bool) files := w.unSyncedFiles w.unSyncedFiles = make(map[string]bool) // first force-close all files, so we can corrupt on windows etc. // clone the file map, as these guys want to remove themselves on close. m := make(map[io.Closer]error) for k, v := range w.openFileHandles { m[k] = v } for f, _ := range m { f.Close() // ignore error } for name, _ := range files { var action string var err error switch w.randomState.Intn(5) { case 0: action = "deleted" err = w.deleteFile(name, true) case 1: action = "zeroes" // Zero out file entirely var length int64 length, err = w.FileLength(name) if err == nil { zeroes := make([]byte, 256) var upto int64 = 0 var out store.IndexOutput out, err = w.BaseDirectoryWrapperImpl.CreateOutput(name, NewDefaultIOContext(w.randomState)) if err == nil { for upto < length && err == nil { limit := length - upto if int64(len(zeroes)) < limit { limit = int64(len(zeroes)) } err = out.WriteBytes(zeroes[:limit]) upto += limit } if err == nil { err = out.Close() } } } case 2: action = "partially truncated" // Partially Truncate the file: // First, make temp file and copy only half this file over: var tempFilename string for { tempFilename = fmt.Sprintf("%v", w.randomState.Int()) if !w.BaseDirectoryWrapperImpl.FileExists(tempFilename) { break } } var tempOut store.IndexOutput if tempOut, err = w.BaseDirectoryWrapperImpl.CreateOutput(tempFilename, NewDefaultIOContext(w.randomState)); err == nil { var ii store.IndexInput if ii, err = w.BaseDirectoryWrapperImpl.OpenInput(name, NewDefaultIOContext(w.randomState)); err == nil { if err = tempOut.CopyBytes(ii, ii.Length()/2); err == nil { if err = tempOut.Close(); err == nil { if err = ii.Close(); err == nil { // Delete original and copy bytes back: if err = w.deleteFile(name, true); err == nil { var out store.IndexOutput if out, err = w.BaseDirectoryWrapperImpl.CreateOutput(name, NewDefaultIOContext(w.randomState)); err == nil { if ii, err = w.BaseDirectoryWrapperImpl.OpenInput(tempFilename, NewDefaultIOContext(w.randomState)); err == nil { if err = out.CopyBytes(ii, ii.Length()); err == nil { if err = out.Close(); err == nil { if err = ii.Close(); err == nil { err = w.deleteFile(tempFilename, true) } } } } } } } } } } } case 3: // the file survived intact: action = "didn't change" default: action = "fully truncated" // totally truncate the file to zero bytes if err = w.deleteFile(name, true); err == nil { var out store.IndexOutput if out, err = w.BaseDirectoryWrapperImpl.CreateOutput(name, NewDefaultIOContext(w.randomState)); err == nil { if err = out.SetLength(0); err == nil { err = out.Close() } } } } if err != nil { return err } if VERBOSE { log.Printf("MockDirectoryWrapper: %v unsynced file: %v", action, name) } } return nil }
return DOC_VALUES_TYPE_BINARY, nil case 3: return DOC_VALUES_TYPE_SORTED, nil case 4: return DOC_VALUES_TYPE_SORTED_SET, nil default: return DocValuesType(0), errors.New( fmt.Sprintf("invalid docvalues byte: %v (resource=%v)", b, input)) } } var Lucene46FieldInfosWriter = func(dir store.Directory, segName, suffix string, infos FieldInfos, ctx store.IOContext) (err error) { filename := util.SegmentFileName(segName, suffix, FI_EXTENSION) var output store.IndexOutput if output, err = dir.CreateOutput(filename, ctx); err != nil { return } var success = false defer func() { if success { err = output.Close() } else { util.CloseWhileSuppressingError(output) } }() if err = codec.WriteHeader(output, FI_CODEC_NAME, FI_FORMAT_CURRENT); err != nil { return