Beispiel #1
0
func (self *LevelDbDatastore) fetchSinglePoint(database, series string, fields []*Field,
	query *parser.SelectQuery) (*protocol.Series, error) {
	fieldCount := len(fields)
	fieldNames := make([]string, 0, fieldCount)
	point := &protocol.Point{Values: make([]*protocol.FieldValue, 0, fieldCount)}
	timestampBuffer := bytes.NewBuffer(make([]byte, 0, 8))
	sequenceNumberBuffer := bytes.NewBuffer(make([]byte, 0, 8))
	timestamp := common.TimeToMicroseconds(query.GetStartTime())
	sequenceNumber, err := query.GetSinglePointQuerySequenceNumber()
	if err != nil {
		return nil, err
	}

	binary.Write(timestampBuffer, binary.BigEndian, self.convertTimestampToUint(&timestamp))
	binary.Write(sequenceNumberBuffer, binary.BigEndian, sequenceNumber)
	sequenceNumber_uint64 := uint64(sequenceNumber)
	point.SequenceNumber = &sequenceNumber_uint64
	point.SetTimestampInMicroseconds(timestamp)

	for _, field := range fields {
		pointKey := append(append(field.Id, timestampBuffer.Bytes()...), sequenceNumberBuffer.Bytes()...)

		if data, err := self.db.Get(self.readOptions, pointKey); err != nil {
			return nil, err
		} else {
			fieldValue := &protocol.FieldValue{}
			err := proto.Unmarshal(data, fieldValue)
			if err != nil {
				return nil, err
			}
			if data != nil {
				fieldNames = append(fieldNames, field.Name)
				point.Values = append(point.Values, fieldValue)
			}
		}
	}

	result := &protocol.Series{Name: &series, Fields: fieldNames, Points: []*protocol.Point{point}}

	return result, nil
}
Beispiel #2
0
func (self *LevelDbDatastore) executeQueryForSeries(database, series string, columns []string,
	query *parser.SelectQuery, yield func(*protocol.Series) error,
	ringFilter func(database, series *string, time *int64) bool) error {

	startTimeBytes, endTimeBytes := self.byteArraysForStartAndEndTimes(common.TimeToMicroseconds(query.GetStartTime()), common.TimeToMicroseconds(query.GetEndTime()))
	emptyResult := &protocol.Series{Name: &series, Points: nil}

	fields, err := self.getFieldsForSeries(database, series, columns)
	if err != nil {
		// because a db is distributed across the cluster, it's possible we don't have the series indexed here. ignore
		switch err := err.(type) {
		case FieldLookupError:
			return yield(emptyResult)
		default:
			return err
		}
	}

	fieldCount := len(fields)
	rawColumnValues := make([]*rawColumnValue, fieldCount, fieldCount)

	if query.IsSinglePointQuery() {
		result, err := self.fetchSinglePoint(database, series, fields, query)
		if err != nil {
			return err
		}

		if err := yield(result); err != nil {
			return err
		}
		return nil
	}

	fieldNames, iterators := self.getIterators(fields, startTimeBytes, endTimeBytes, query.Ascending)
	result := &protocol.Series{Name: &series, Fields: fieldNames, Points: make([]*protocol.Point, 0)}

	limit := query.Limit
	shouldLimit := true
	if limit == 0 {
		limit = -1
		shouldLimit = false
	}
	resultByteCount := 0

	// TODO: clean up, this is super gnarly
	// optimize for the case where we're pulling back only a single column or aggregate
	for {
		isValid := false
		point := &protocol.Point{Values: make([]*protocol.FieldValue, fieldCount, fieldCount)}

		for i, it := range iterators {
			if rawColumnValues[i] != nil || !it.Valid() {
				continue
			}

			key := it.Key()
			if len(key) < 16 {
				continue
			}

			if !isPointInRange(fields[i].Id, startTimeBytes, endTimeBytes, key) {
				continue
			}

			value := it.Value()
			sequenceNumber := key[16:]

			rawTime := key[8:16]
			rawValue := &rawColumnValue{time: rawTime, sequence: sequenceNumber, value: value}
			rawColumnValues[i] = rawValue
		}

		var pointTimeRaw []byte
		var pointSequenceRaw []byte
		// choose the highest (or lowest in case of ascending queries) timestamp
		// and sequence number. that will become the timestamp and sequence of
		// the next point.
		for _, value := range rawColumnValues {
			if value == nil {
				continue
			}

			pointTimeRaw, pointSequenceRaw = value.updatePointTimeAndSequence(pointTimeRaw,
				pointSequenceRaw, query.Ascending)
		}

		for i, iterator := range iterators {
			// if the value is nil or doesn't match the point's timestamp and sequence number
			// then skip it
			if rawColumnValues[i] == nil ||
				!bytes.Equal(rawColumnValues[i].time, pointTimeRaw) ||
				!bytes.Equal(rawColumnValues[i].sequence, pointSequenceRaw) {

				point.Values[i] = &protocol.FieldValue{IsNull: &TRUE}
				continue
			}

			// if we emitted at lease one column, then we should keep
			// trying to get more points
			isValid = true

			// advance the iterator to read a new value in the next iteration
			if query.Ascending {
				iterator.Next()
			} else {
				iterator.Prev()
			}

			fv := &protocol.FieldValue{}
			resultByteCount += len(rawColumnValues[i].value)
			err := proto.Unmarshal(rawColumnValues[i].value, fv)
			if err != nil {
				return err
			}
			point.Values[i] = fv
			rawColumnValues[i] = nil
		}

		var sequence uint64
		// set the point sequence number and timestamp
		binary.Read(bytes.NewBuffer(pointSequenceRaw), binary.BigEndian, &sequence)
		var t uint64
		binary.Read(bytes.NewBuffer(pointTimeRaw), binary.BigEndian, &t)
		time := self.convertUintTimestampToInt64(&t)
		point.SetTimestampInMicroseconds(time)
		point.SequenceNumber = &sequence

		// stop the loop if we ran out of points
		if !isValid {
			break
		}

		limit -= 1

		if ringFilter != nil && ringFilter(&database, &series, point.Timestamp) {
			continue
		}

		result.Points = append(result.Points, point)

		// add byte count for the timestamp and the sequence
		resultByteCount += 16

		// check if we should send the batch along
		if resultByteCount > MAX_SERIES_SIZE || (shouldLimit && limit == 0) {
			dropped, err := self.sendBatch(query, result, yield)
			if err != nil {
				return err
			}
			limit += dropped
			resultByteCount = 0
			result = &protocol.Series{Name: &series, Fields: fieldNames, Points: make([]*protocol.Point, 0)}
		}
		if shouldLimit && limit < 1 {
			break
		}
	}
	if _, err := self.sendBatch(query, result, yield); err != nil {
		return err
	}
	_, err = self.sendBatch(query, emptyResult, yield)
	return err
}