// TODO support IndexCommit
func openStandardDirectoryReader(directory store.Directory,
	termInfosIndexDivisor int) (r DirectoryReader, err error) {
	log.Print("Initializing SegmentsFile...")
	obj, err := NewFindSegmentsFile(directory, func(segmentFileName string) (obj interface{}, err error) {
		sis := &SegmentInfos{}
		err = sis.Read(directory, segmentFileName)
		if err != nil {
			return nil, err
		}
		log.Printf("Found %v segments...", len(sis.Segments))
		readers := make([]AtomicReader, len(sis.Segments))
		for i := len(sis.Segments) - 1; i >= 0; i-- {
			sr, err := NewSegmentReader(sis.Segments[i], termInfosIndexDivisor, store.IO_CONTEXT_READ)
			readers[i] = sr
			if err != nil {
				rs := make([]io.Closer, len(readers))
				for i, v := range readers {
					rs[i] = v
				}
				return nil, util.CloseWhileHandlingError(err, rs...)
			}
		}
		log.Printf("Obtained %v SegmentReaders.", len(readers))
		return newStandardDirectoryReader(directory, readers, sis, termInfosIndexDivisor, false), nil
	}).run()
	if err != nil {
		return nil, err
	}
	return obj.(*StandardDirectoryReader), err
}
Example #2
0
/*
Copies the file src to 'to' under the new file name dest.

If you want to copy the entire source directory to the destination
one, you can do so like this:

		var to Directory // the directory to copy to
		for _, file := range dir.ListAll() {
			dir.Copy(to, file, newFile, IO_CONTEXT_DEFAULT)
			// newFile can be either file, or a new name
		}

NOTE: this method does not check whether dest exists and will
overwrite it if it does.
*/
func (d *DirectoryImpl) Copy(to Directory, src, dest string, ctx IOContext) (err error) {
	var os IndexOutput
	var is IndexInput
	defer func() {
		var success = false
		defer func() {
			if !success {
				to.DeleteFile(dest) // ignore error
			}
		}()

		err = util.CloseWhileHandlingError(err, os, is)
		success = true
	}()

	os, err = to.CreateOutput(dest, ctx)
	if err != nil {
		return err
	}
	is, err = d.OpenInput(src, ctx)
	if err != nil {
		return err
	}
	err = os.CopyBytes(is, is.Length())
	return
}
func (p *StoredFieldsProcessor) flush(state SegmentWriteState) (err error) {
	numDocs := state.segmentInfo.DocCount()
	if numDocs > 0 {
		// It's possible that all documents seen in this segment hit
		// non-aborting errors, in which case we will not have yet init'd
		// the FieldsWriter:
		err = p.initFieldsWriter(state.context)
		if err == nil {
			err = p.fill(numDocs)
		}
	}
	if w := p.fieldsWriter; w != nil {
		var success = false
		defer func() {
			if success {
				err = util.CloseWhileHandlingError(err, w)
			} else {
				util.CloseWhileSuppressingError(w)
			}
		}()

		err = w.Finish(state.fieldInfos, numDocs)
		if err != nil {
			return err
		}
		success = true
	}
	return
}
Example #4
0
func (out *FSIndexOutput) Close() error {
	out.parent.onIndexOutputClosed(out)
	// only close the file if it has not been closed yet
	if out.isOpen {
		var err error
		defer func() {
			out.isOpen = false
			util.CloseWhileHandlingError(err, out.file)
		}()
		err = out.BufferedIndexOutput.Close()
	}
	return nil
}
Example #5
0
	log.Printf("Reading FieldInfos from %v...", dir)
	fi = model.FieldInfos{}
	fileName := util.SegmentFileName(segment, "", LUCENE42_FI_EXTENSION)
	log.Printf("Segment: %v", fileName)
	input, err := dir.OpenInput(fileName, context)
	if err != nil {
		return fi, err
	}
	log.Printf("Reading %v", input)

	success := false
	defer func() {
		if success {
			input.Close()
		} else {
			util.CloseWhileHandlingError(err, input)
		}
	}()

	_, err = codec.CheckHeader(input,
		LUCENE42_FI_CODEC_NAME,
		LUCENE42_FI_FORMAT_START,
		LUCENE42_FI_FORMAT_CURRENT)
	if err != nil {
		return fi, err
	}

	size, err := input.ReadVInt() //read in the size
	if err != nil {
		return fi, err
	}
Example #6
0
func readEntries(handle IndexInputSlicer, dir Directory, name string) (mapping map[string]FileEntry, err error) {
	var stream, entriesStream IndexInput = nil, nil
	defer func() {
		err = util.CloseWhileHandlingError(err, stream, entriesStream)
	}()
	// read the first VInt. If it is negative, it's the version number
	// otherwise it's the count (pre-3.1 indexes)
	mapping = make(map[string]FileEntry)
	stream = handle.openFullSlice()
	log.Printf("Reading from stream: %v", stream)
	firstInt, err := stream.ReadVInt()
	if err != nil {
		return mapping, err
	}
	// impossible for 3.0 to have 63 files in a .cfs, CFS writer was not visible
	// and separate norms/etc are outside of cfs.
	if firstInt == int32(CODEC_MAGIC_BYTE1) {
		if secondByte, err := stream.ReadByte(); err == nil {
			if thirdByte, err := stream.ReadByte(); err == nil {
				if fourthByte, err := stream.ReadByte(); err == nil {
					if secondByte != CODEC_MAGIC_BYTE2 ||
						thirdByte != CODEC_MAGIC_BYTE3 ||
						fourthByte != CODEC_MAGIC_BYTE4 {
						return mapping, errors.New(fmt.Sprintf(
							"Illegal/impossible header for CFS file: %v,%v,%v",
							secondByte, thirdByte, fourthByte))
					}
				}
			}
		}
		if err != nil {
			return mapping, err
		}

		_, err = codec.CheckHeaderNoMagic(stream, CFD_DATA_CODEC, CFD_VERSION_START, CFD_VERSION_START)
		if err != nil {
			return mapping, err
		}
		entriesFileName := util.SegmentFileName(util.StripExtension(name), "", COMPOUND_FILE_ENTRIES_EXTENSION)
		entriesStream, err = dir.OpenInput(entriesFileName, IO_CONTEXT_READONCE)
		if err != nil {
			return mapping, err
		}
		_, err = codec.CheckHeader(entriesStream, CFD_ENTRY_CODEC, CFD_VERSION_START, CFD_VERSION_START)
		if err != nil {
			return mapping, err
		}
		numEntries, err := entriesStream.ReadVInt()
		if err != nil {
			return mapping, err
		}
		log.Printf("Entries number: %v", numEntries)
		for i := int32(0); i < numEntries; i++ {
			id, err := entriesStream.ReadString()
			if err != nil {
				return mapping, err
			}
			if _, ok := mapping[id]; ok {
				return mapping, errors.New(fmt.Sprintf(
					"Duplicate cfs entry id=%v in CFS: %v", id, entriesStream))
			}
			log.Printf("Found entry: %v", id)
			offset, err := entriesStream.ReadLong()
			if err != nil {
				return mapping, err
			}
			length, err := entriesStream.ReadLong()
			if err != nil {
				return mapping, err
			}
			mapping[id] = FileEntry{offset, length}
		}
	} else {
		// TODO remove once 3.x is not supported anymore
		panic("not supported yet; will also be obsolete soon")
	}
	return mapping, nil
}