Пример #1
0
/*
Build a PackedLongValues instance that contains values that have been
added to this builder. This operation is destructive.
*/
func (b *PackedLongValuesBuilderImpl) Build() PackedLongValues {
	b.finish()
	b.pending = nil
	values := make([]PackedIntsReader, b.valuesOff)
	copy(values, b.values[:b.valuesOff])
	ramBytesUsed := util.ShallowSizeOfInstance(reflect.TypeOf(&PackedLongValuesImpl{})) +
		util.SizeOf(values)
	return newPackedLongValues(b.pageShift, b.pageMask, values, b.size, ramBytesUsed)
}
Пример #2
0
func NewDeltaPackedLongValuesBuilder(pageSize int,
	acceptableOverheadRatio float32) *DeltaPackedLongValuesBuilderImpl {

	super := newPackedLongValuesBuilder(pageSize, acceptableOverheadRatio)
	ans := &DeltaPackedLongValuesBuilderImpl{
		PackedLongValuesBuilderImpl: super,
		mins: make([]int64, len(super.values)),
	}
	ans.ramBytesUsed += util.ShallowSizeOfInstance(reflect.TypeOf(&DeltaPackedLongValuesBuilderImpl{})) +
		util.SizeOf(ans.mins)
	return ans
}
Пример #3
0
func newPackedLongValuesBuilder(pageSize int,
	acceptableOverheadRatio float32) *PackedLongValuesBuilderImpl {

	ans := &PackedLongValuesBuilderImpl{
		pageShift:               checkBlockSize(pageSize, MIN_PAGE_SIZE, MAX_PAGE_SIZE),
		pageMask:                pageSize - 1,
		acceptableOverheadRatio: acceptableOverheadRatio,
		values:                  make([]PackedIntsReader, INITIAL_PAGE_COUNT),
		pending:                 make([]int64, pageSize),
	}
	ans.ramBytesUsed = util.ShallowSizeOfInstance(reflect.TypeOf(&PackedLongValuesBuilderImpl{})) +
		util.SizeOf(ans.pending) + util.ShallowSizeOf(ans.values)
	return ans
}
Пример #4
0
func newLucene49NormsProducer(state SegmentReadState,
	dataCodec, dataExtension, metaCodec, metaExtension string) (np *NormsProducer, err error) {

	np = &NormsProducer{
		Locker:       new(sync.Mutex),
		norms:        make(map[int]*NormsEntry),
		instances:    make(map[int]NumericDocValues),
		maxDoc:       state.SegmentInfo.DocCount(),
		ramBytesUsed: util.ShallowSizeOfInstance(reflect.TypeOf(np)),
	}
	metaName := util.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, metaExtension)
	// read in the entries from the metadta file.
	var in store.ChecksumIndexInput
	if in, err = state.Dir.OpenChecksumInput(metaName, state.Context); err != nil {
		return nil, err
	}

	if err = func() error {
		var success = false
		defer func() {
			if success {
				err = util.Close(in)
			} else {
				util.CloseWhileSuppressingError(in)
			}
		}()

		if np.version, err = codec.CheckHeader(in, metaCodec, VERSION_START, VERSION_CURRENT); err != nil {
			return err
		}
		if err = np.readFields(in, state.FieldInfos); err != nil {
			return err
		}
		if _, err = codec.CheckFooter(in); err != nil {
			return err
		}
		success = true
		return nil
	}(); err != nil {
		return nil, err
	}

	dataName := util.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, dataExtension)
	if np.data, err = state.Dir.OpenInput(dataName, state.Context); err != nil {
		return nil, err
	}
	var success = false
	defer func() {
		if !success {
			util.CloseWhileSuppressingError(np.data)
		}
	}()

	var version2 int32
	if version2, err = codec.CheckHeader(np.data, dataCodec, VERSION_START, VERSION_CURRENT); err != nil {
		return nil, err
	}
	if version2 != np.version {
		return nil, errors.New("Format versions mismatch")
	}

	// NOTE: data file is too costly to verify checksum against all the
	// bytes on open, but fo rnow we at least verify proper structure
	// of the checksum footer: which looks for FOOTER_MATIC +
	// algorithmID. This is cheap and can detect some forms of
	// corruption such as file trucation.
	if _, err = codec.RetrieveChecksum(np.data); err != nil {
		return nil, err
	}

	success = true

	return np, nil
}