// TODO support IndexCommit func openStandardDirectoryReader(directory store.Directory, termInfosIndexDivisor int) (r DirectoryReader, err error) { log.Print("Initializing SegmentsFile...") obj, err := NewFindSegmentsFile(directory, func(segmentFileName string) (obj interface{}, err error) { sis := &SegmentInfos{} err = sis.Read(directory, segmentFileName) if err != nil { return nil, err } log.Printf("Found %v segments...", len(sis.Segments)) readers := make([]AtomicReader, len(sis.Segments)) for i := len(sis.Segments) - 1; i >= 0; i-- { sr, err := NewSegmentReader(sis.Segments[i], termInfosIndexDivisor, store.IO_CONTEXT_READ) readers[i] = sr if err != nil { rs := make([]io.Closer, len(readers)) for i, v := range readers { rs[i] = v } return nil, util.CloseWhileHandlingError(err, rs...) } } log.Printf("Obtained %v SegmentReaders.", len(readers)) return newStandardDirectoryReader(directory, readers, *sis, termInfosIndexDivisor, false), nil }).run() if err != nil { return nil, err } return obj.(*StandardDirectoryReader), err }
log.Printf("Reading FieldInfos from %v...", dir) fi = FieldInfos{} fileName := util.SegmentFileName(segment, "", LUCENE42_FI_EXTENSION) log.Printf("Segment: %v", fileName) input, err := dir.OpenInput(fileName, context) if err != nil { return fi, err } log.Printf("Reading %v", input) success := false defer func() { if success { input.Close() } else { util.CloseWhileHandlingError(err, input) } }() _, err = codec.CheckHeader(input, LUCENE42_FI_CODEC_NAME, LUCENE42_FI_FORMAT_START, LUCENE42_FI_FORMAT_CURRENT) if err != nil { return fi, err } size, err := input.ReadVInt() //read in the size if err != nil { return fi, err }
func readEntries(handle IndexInputSlicer, dir Directory, name string) (mapping map[string]FileEntry, err error) { var stream, entriesStream IndexInput = nil, nil defer func() { err = util.CloseWhileHandlingError(err, stream, entriesStream) }() // read the first VInt. If it is negative, it's the version number // otherwise it's the count (pre-3.1 indexes) mapping = make(map[string]FileEntry) stream = handle.openFullSlice() log.Printf("Reading from stream: %v", stream) firstInt, err := stream.ReadVInt() if err != nil { return mapping, err } // impossible for 3.0 to have 63 files in a .cfs, CFS writer was not visible // and separate norms/etc are outside of cfs. if firstInt == int32(CODEC_MAGIC_BYTE1) { if secondByte, err := stream.ReadByte(); err == nil { if thirdByte, err := stream.ReadByte(); err == nil { if fourthByte, err := stream.ReadByte(); err == nil { if secondByte != CODEC_MAGIC_BYTE2 || thirdByte != CODEC_MAGIC_BYTE3 || fourthByte != CODEC_MAGIC_BYTE4 { return mapping, errors.New(fmt.Sprintf( "Illegal/impossible header for CFS file: %v,%v,%v", secondByte, thirdByte, fourthByte)) } } } } if err != nil { return mapping, err } _, err = codec.CheckHeaderNoMagic(stream, CFD_DATA_CODEC, CFD_VERSION_START, CFD_VERSION_START) if err != nil { return mapping, err } entriesFileName := util.SegmentFileName(util.StripExtension(name), "", COMPOUND_FILE_ENTRIES_EXTENSION) entriesStream, err = dir.OpenInput(entriesFileName, IO_CONTEXT_READONCE) if err != nil { return mapping, err } _, err = codec.CheckHeader(entriesStream, CFD_ENTRY_CODEC, CFD_VERSION_START, CFD_VERSION_START) if err != nil { return mapping, err } numEntries, err := entriesStream.ReadVInt() if err != nil { return mapping, err } log.Printf("Entries number: %v", numEntries) for i := int32(0); i < numEntries; i++ { id, err := entriesStream.ReadString() if err != nil { return mapping, err } if _, ok := mapping[id]; ok { return mapping, errors.New(fmt.Sprintf( "Duplicate cfs entry id=%v in CFS: %v", id, entriesStream)) } log.Printf("Found entry: %v", id) offset, err := entriesStream.ReadLong() if err != nil { return mapping, err } length, err := entriesStream.ReadLong() if err != nil { return mapping, err } mapping[id] = FileEntry{offset, length} } } else { // TODO remove once 3.x is not supported anymore panic("not supported yet; will also be obsolete soon") } return mapping, nil }