func newPackedLongValuesBuilder(pageSize int,
	acceptableOverheadRatio float32) *PackedLongValuesBuilderImpl {

	ans := &PackedLongValuesBuilderImpl{
		pageShift:               checkBlockSize(pageSize, MIN_PAGE_SIZE, MAX_PAGE_SIZE),
		pageMask:                pageSize - 1,
		acceptableOverheadRatio: acceptableOverheadRatio,
		values:                  make([]PackedIntsReader, INITIAL_PAGE_COUNT),
		pending:                 make([]int64, pageSize),
	}
	ans.ramBytesUsed = util.ShallowSizeOfInstance(reflect.TypeOf(&PackedLongValuesBuilderImpl{})) +
		util.SizeOf(ans.pending) + util.ShallowSizeOf(ans.values)
	return ans
}
func freezeBufferedUpdates(deletes *BufferedUpdates, isPrivate bool) *FrozenBufferedUpdates {
	assert2(!isPrivate || len(deletes.terms) == 0,
		"segment private package should only have del queries")
	var termsArray []*Term
	for k, _ := range deletes.terms {
		termsArray = append(termsArray, k)
	}
	util.TimSort(TermSorter(termsArray))
	builder := newPrefixCodedTermsBuilder()
	for _, term := range termsArray {
		builder.add(term)
	}
	terms := builder.finish()

	queries := make([]Query, len(deletes.queries))
	queryLimits := make([]int, len(deletes.queries))
	var upto = 0
	for k, v := range deletes.queries {
		queries[upto] = k
		queryLimits[upto] = v
		upto++
	}

	// TODO if a Term affects multiple fields, we could keep the updates key'd by Term
	// so that it maps to all fields it affects, sorted by their docUpto, and traverse
	// that Term only once, applying the update to all fields that still need to be
	// updated.
	var allNumericUpdates []*DocValuesUpdate
	numericUpdatesSize := 0
	for _, numericUpdates := range deletes.numericUpdates {
		for _, update := range numericUpdates {
			allNumericUpdates = append(allNumericUpdates, update)
			numericUpdatesSize += update.sizeInBytes()
		}
	}

	// TODO if a Term affects multiple fields, we could keep the updates key'd by Term
	// so that it maps to all fields it affects, sorted by their docUpto, and traverse
	// that Term only once, applying the update to all fields that still need to be
	// updated.
	var allBinaryUpdates []*DocValuesUpdate
	binaryUpdatesSize := 0
	for _, binaryUpdates := range deletes.binaryUpdates {
		for _, update := range binaryUpdates {
			allBinaryUpdates = append(allBinaryUpdates, update)
			binaryUpdatesSize += update.sizeInBytes()
		}
	}

	bytesUsed := int(terms.RamBytesUsed() +
		int64(len(queries))*BYTES_PER_DEL_QUERY +
		int64(numericUpdatesSize) + util.ShallowSizeOf(allNumericUpdates) +
		int64(binaryUpdatesSize) + util.ShallowSizeOf(allBinaryUpdates))

	return &FrozenBufferedUpdates{
		gen:              -1,
		isSegmentPrivate: isPrivate,
		termCount:        len(termsArray),
		terms:            terms,
		_queries:         queries,
		queryLimits:      queryLimits,
		numericDVUpdates: allNumericUpdates,
		binaryDVUpdates:  allBinaryUpdates,
		bytesUsed:        bytesUsed,
		numTermDeletes:   int(atomic.LoadInt32(&deletes.numTermDeletes)),
	}
}