Пример #1
0
func (np *NormsProducer) loadNorms(field *FieldInfo) (NumericDocValues, error) {
	entry, ok := np.norms[int(field.Number)]
	assert(ok)
	switch entry.format {
	case CONST_COMPRESSED:
		return func(int) int64 { return entry.offset }, nil
	case UNCOMPRESSED:
		panic("not implemented yet")
	case DELTA_COMPRESSED:
		panic("not implemented yet")
	case TABLE_COMPRESSED:
		var err error
		if err = np.data.Seek(entry.offset); err == nil {
			var packedVersion int32
			if packedVersion, err = np.data.ReadVInt(); err == nil {
				var size int
				if size, err = int32ToInt(np.data.ReadVInt()); err == nil {
					if size > 256 {
						return nil, errors.New(fmt.Sprintf(
							"TABLE_COMPRESSED cannot have more than 256 distinct values, input=%v",
							np.data))
					}
					decode := make([]int64, size)
					for i, _ := range decode {
						if decode[i], err = np.data.ReadLong(); err != nil {
							break
						}
					}
					if err == nil {
						var formatId int
						if formatId, err = int32ToInt(np.data.ReadVInt()); err == nil {
							var bitsPerValue int32
							if bitsPerValue, err = np.data.ReadVInt(); err == nil {
								var ordsReader packed.PackedIntsReader
								if ordsReader, err = packed.ReaderNoHeader(np.data,
									packed.PackedFormat(formatId), packedVersion,
									int32(np.maxDoc), uint32(bitsPerValue)); err == nil {

									atomic.AddInt64(&np.ramBytesUsed, util.SizeOf(decode)+ordsReader.RamBytesUsed())
									return func(docId int) int64 {
										return decode[int(ordsReader.Get(docId))]
									}, nil
								}
							}
						}
					}
				}
			}
		}
		if err != nil {
			return nil, err
		}
	default:
		panic("assert fail")
	}
	panic("should not be here")
}
Пример #2
0
func newCompressingStoredFieldsIndexReader(fieldsIndexIn store.IndexInput,
	si *model.SegmentInfo) (r *CompressingStoredFieldsIndexReader, err error) {

	r = &CompressingStoredFieldsIndexReader{}
	r.maxDoc = si.DocCount()
	r.docBases = make([]int, 0, 16)
	r.startPointers = make([]int64, 0, 16)
	r.avgChunkDocs = make([]int, 0, 16)
	r.avgChunkSizes = make([]int64, 0, 16)
	r.docBasesDeltas = make([]packed.PackedIntsReader, 0, 16)
	r.startPointersDeltas = make([]packed.PackedIntsReader, 0, 16)

	packedIntsVersion, err := fieldsIndexIn.ReadVInt()
	if err != nil {
		return nil, err
	}

	for blockCount := 0; ; blockCount++ {
		numChunks, err := fieldsIndexIn.ReadVInt()
		if err != nil {
			return nil, err
		}
		if numChunks == 0 {
			break
		}

		{ // doc bases
			n, err := fieldsIndexIn.ReadVInt()
			if err != nil {
				return nil, err
			}
			r.docBases = append(r.docBases, int(n))
			n, err = fieldsIndexIn.ReadVInt()
			if err != nil {
				return nil, err
			}
			r.avgChunkDocs = append(r.avgChunkDocs, int(n))
			bitsPerDocBase, err := fieldsIndexIn.ReadVInt()
			if err != nil {
				return nil, err
			}
			if bitsPerDocBase > 32 {
				return nil, errors.New(fmt.Sprintf("Corrupted bitsPerDocBase (resource=%v)", fieldsIndexIn))
			}
			pr, err := packed.ReaderNoHeader(fieldsIndexIn, packed.PACKED, packedIntsVersion, numChunks, uint32(bitsPerDocBase))
			if err != nil {
				return nil, err
			}
			r.docBasesDeltas = append(r.docBasesDeltas, pr)
		}

		{ // start pointers
			n, err := fieldsIndexIn.ReadVLong()
			if err != nil {
				return nil, err
			}
			r.startPointers = append(r.startPointers, n)
			n, err = fieldsIndexIn.ReadVLong()
			if err != nil {
				return nil, err
			}
			r.avgChunkSizes = append(r.avgChunkSizes, n)
			bitsPerStartPointer, err := fieldsIndexIn.ReadVInt()
			if err != nil {
				return nil, err
			}
			if bitsPerStartPointer > 64 {
				return nil, errors.New(fmt.Sprintf("Corrupted bitsPerStartPonter (resource=%v)", fieldsIndexIn))
			}
			pr, err := packed.ReaderNoHeader(fieldsIndexIn, packed.PACKED, packedIntsVersion, numChunks, uint32(bitsPerStartPointer))
			if err != nil {
				return nil, err
			}
			r.startPointersDeltas = append(r.startPointersDeltas, pr)
		}
	}

	return r, nil
}