예제 #1
0
파일: engine.go 프로젝트: rramos/influxdb
// Returns false if the query should be stopped (either because of limit or error)
func (self *QueryEngine) YieldPoint(seriesName *string, fieldNames []string, point *protocol.Point) (shouldContinue bool) {
	shouldContinue = true
	series := self.seriesToPoints[*seriesName]
	if series == nil {
		series = &protocol.Series{Name: protocol.String(*seriesName), Fields: fieldNames, Points: make([]*protocol.Point, 0, POINT_BATCH_SIZE)}
		self.seriesToPoints[*seriesName] = series
	} else if len(series.Points) >= POINT_BATCH_SIZE {
		shouldContinue = self.yieldSeriesData(series)
		series = &protocol.Series{Name: protocol.String(*seriesName), Fields: fieldNames, Points: make([]*protocol.Point, 0, POINT_BATCH_SIZE)}
		self.seriesToPoints[*seriesName] = series
	}
	series.Points = append(series.Points, point)

	return shouldContinue
}
예제 #2
0
func (self *ClusterServer) heartbeat() {
	defer func() {
		self.heartbeatStarted = false
	}()

	responseChan := make(chan *protocol.Response)
	heartbeatRequest := &protocol.Request{
		Type:     &HEARTBEAT_TYPE,
		Database: protocol.String(""),
	}
	for {
		heartbeatRequest.Id = nil
		self.MakeRequest(heartbeatRequest, responseChan)
		err := self.getHeartbeatResponse(responseChan)
		if err != nil {
			self.handleHeartbeatError(err)
			continue
		}

		// otherwise, reset the backoff and mark the server as up
		self.isUp = true
		self.Backoff = DEFAULT_BACKOFF
		<-time.After(self.HeartbeatInterval)
	}
}
예제 #3
0
func (self *ClusterServer) heartbeat() {
	defer func() {
		self.heartbeatStarted = false
	}()

	heartbeatRequest := &protocol.Request{
		Type:     &HEARTBEAT_TYPE,
		Database: protocol.String(""),
	}
	for {
		// this chan is buffered and in the loop on purpose. This is so
		// that if reading a heartbeat times out, and the heartbeat then comes through
		// later, it will be dumped into this chan and not block the protobuf client reader.
		responseChan := make(chan *protocol.Response, 1)
		heartbeatRequest.Id = nil
		self.MakeRequest(heartbeatRequest, responseChan)
		err := self.getHeartbeatResponse(responseChan)
		if err != nil {
			self.handleHeartbeatError(err)
			continue
		}

		if !self.isUp {
			log.Warn("Server marked as up. Hearbeat succeeded")
		}
		// otherwise, reset the backoff and mark the server as up
		self.isUp = true
		self.Backoff = self.MinBackoff
		<-time.After(self.HeartbeatInterval)
	}
}
예제 #4
0
파일: wal_test.go 프로젝트: sploit/influxdb
func (_ *WalSuite) TestRecoverWithNonWriteRequests(c *C) {
	wal := newWal(c)
	requestType := protocol.Request_QUERY
	request := &protocol.Request{
		Type:     &requestType,
		Database: protocol.String("some_db"),
	}
	wal.AssignSequenceNumbersAndLog(request, &MockServer{id: 1})
	c.Assert(wal.Close(), IsNil)
	wal, err := NewWAL(wal.config)
	c.Assert(err, IsNil)
	wal.SetServerId(1)
}
예제 #5
0
func (self *ProtobufRequestHandler) handleWrites(request *protocol.Request, conn net.Conn) {
	shard := self.clusterConfig.GetLocalShardById(*request.ShardId)
	log.Debug("HANDLE: (%d):%d:%v", self.clusterConfig.LocalServer.Id, request.GetId(), shard)
	err := shard.WriteLocalOnly(request)
	var errorMsg *string
	if err != nil {
		log.Error("ProtobufRequestHandler: error writing local shard: %s", err)
		errorMsg = protocol.String(err.Error())
	}
	response := &protocol.Response{RequestId: request.Id, Type: &self.writeOk, ErrorMessage: errorMsg}
	if err := self.WriteResponse(conn, response); err != nil {
		log.Error("ProtobufRequestHandler: error writing local shard: %s", err)
	}
}
예제 #6
0
func BenchmarkSingle(b *testing.B) {
	var HEARTBEAT_TYPE = protocol.Request_HEARTBEAT
	prs := FakeHearbeatServer()
	client := NewProtobufClient(prs.Listener.Addr().String(), time.Second)
	client.Connect()
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		responseChan := make(chan *protocol.Response, 1)
		heartbeatRequest := &protocol.Request{
			Type:     &HEARTBEAT_TYPE,
			Database: protocol.String(""),
		}
		client.MakeRequest(heartbeatRequest, responseChan)
		<-responseChan
	}
}
예제 #7
0
func ConvertToDataStoreSeries(s ApiSeries, precision TimePrecision) (*protocol.Series, error) {
	points := []*protocol.Point{}
	for _, point := range s.GetPoints() {
		values := []*protocol.FieldValue{}
		var timestamp *int64
		var sequence *uint64

		for idx, field := range s.GetColumns() {
			value := point[idx]
			if field == "time" {
				switch value.(type) {
				case float64:
					_timestamp := int64(value.(float64))
					switch precision {
					case SecondPrecision:
						_timestamp *= 1000
						fallthrough
					case MillisecondPrecision:
						_timestamp *= 1000
					}

					timestamp = &_timestamp
					continue
				default:
					return nil, fmt.Errorf("time field must be float but is %T (%v)", value, value)
				}
			}

			if field == "sequence_number" {
				switch value.(type) {
				case float64:
					_sequenceNumber := uint64(value.(float64))
					sequence = &_sequenceNumber
					continue
				default:
					return nil, fmt.Errorf("sequence_number field must be float but is %T (%v)", value, value)
				}
			}

			switch v := value.(type) {
			case string:
				values = append(values, &protocol.FieldValue{StringValue: &v})
			case float64:
				if i := int64(v); float64(i) == v {
					values = append(values, &protocol.FieldValue{Int64Value: &i})
				} else {
					values = append(values, &protocol.FieldValue{DoubleValue: &v})
				}
			case bool:
				values = append(values, &protocol.FieldValue{BoolValue: &v})
			case nil:
				values = append(values, &protocol.FieldValue{IsNull: &TRUE})
			default:
				// if we reached this line then the dynamic type didn't match
				return nil, fmt.Errorf("Unknown type %T", value)
			}
		}
		points = append(points, &protocol.Point{
			Values:         values,
			Timestamp:      timestamp,
			SequenceNumber: sequence,
		})
	}

	fields := removeTimestampFieldDefinition(s.GetColumns())

	series := &protocol.Series{
		Name:   protocol.String(s.GetName()),
		Fields: fields,
		Points: points,
	}
	return series, nil
}
예제 #8
0
파일: trie_test.go 프로젝트: 9cat/influxdb
func (self *TrieTestSuite) TestTrie(c *C) {
	trie := NewTrie(2, 1)

	firstValue := []*protocol.FieldValue{
		&protocol.FieldValue{StringValue: protocol.String("some_value")},
		&protocol.FieldValue{Int64Value: protocol.Int64(1)},
	}
	firstNode := trie.GetNode(firstValue)
	c.Assert(firstNode, NotNil)
	c.Assert(trie.GetNode(firstValue), DeepEquals, firstNode)
	c.Assert(trie.CountLeafNodes(), Equals, 1)

	secondValue := []*protocol.FieldValue{
		&protocol.FieldValue{StringValue: protocol.String("some_value")},
		&protocol.FieldValue{Int64Value: protocol.Int64(2)},
	}
	secondNode := trie.GetNode(secondValue)
	c.Assert(secondNode, NotNil)
	c.Assert(trie.GetNode(secondValue), DeepEquals, secondNode)
	c.Assert(trie.CountLeafNodes(), Equals, 2)

	thirdValue := []*protocol.FieldValue{
		&protocol.FieldValue{StringValue: protocol.String("another_value")},
		&protocol.FieldValue{Int64Value: protocol.Int64(1)},
	}
	thirdNode := trie.GetNode(thirdValue)
	c.Assert(thirdNode, NotNil)
	c.Assert(trie.GetNode(thirdValue), DeepEquals, thirdNode)
	c.Assert(trie.CountLeafNodes(), Equals, 3)

	nodes := 0
	orderValues := [][]*protocol.FieldValue{thirdValue, firstValue, secondValue}
	c.Assert(trie.Traverse(func(v []*protocol.FieldValue, _ *Node) error {
		c.Assert(v, DeepEquals, orderValues[nodes])
		nodes++
		return nil
	}), IsNil)
	c.Assert(nodes, Equals, trie.CountLeafNodes())

	// make sure TraverseLevel work as expected
	ns := []*Node{}
	c.Assert(trie.TraverseLevel(0, func(_ []*protocol.FieldValue, n *Node) error {
		ns = append(ns, n)
		return nil
	}), IsNil)
	c.Assert(ns, HasLen, 1) // should return the root node only
	c.Assert(ns[0].value, IsNil)

	ns = []*Node{}
	c.Assert(trie.TraverseLevel(1, func(_ []*protocol.FieldValue, n *Node) error {
		ns = append(ns, n)
		return nil
	}), IsNil)
	c.Assert(ns, HasLen, 2) // should return the root node only
	c.Assert(ns[0].value.GetStringValue(), Equals, "another_value")
	c.Assert(ns[1].value.GetStringValue(), Equals, "some_value")

	c.Assert(ns[0].GetChildNode(&protocol.FieldValue{Int64Value: protocol.Int64(1)}).isLeaf, Equals, true)
	c.Assert(ns[0].GetChildNode(&protocol.FieldValue{Int64Value: protocol.Int64(2)}), IsNil)

	c.Assert(ns[1].GetChildNode(&protocol.FieldValue{Int64Value: protocol.Int64(1)}).isLeaf, Equals, true)
	c.Assert(ns[1].GetChildNode(&protocol.FieldValue{Int64Value: protocol.Int64(2)}).isLeaf, Equals, true)
}
예제 #9
0
파일: shard.go 프로젝트: hanshenu/influxdb
func (self *Shard) executeQueryForSeries(querySpec *parser.QuerySpec, seriesName string, columns []string, processor cluster.QueryProcessor) error {
	startTimeBytes := self.byteArrayForTime(querySpec.GetStartTime())
	endTimeBytes := self.byteArrayForTime(querySpec.GetEndTime())

	fields, err := self.getFieldsForSeries(querySpec.Database(), seriesName, columns)
	if err != nil {
		// because a db is distributed across the cluster, it's possible we don't have the series indexed here. ignore
		switch err := err.(type) {
		case FieldLookupError:
			log.Debug("Cannot find fields %v", columns)
			return nil
		default:
			log.Error("Error looking up fields for %s: %s", seriesName, err)
			return fmt.Errorf("Error looking up fields for %s: %s", seriesName, err)
		}
	}

	fieldCount := len(fields)
	rawColumnValues := make([]rawColumnValue, fieldCount, fieldCount)
	query := querySpec.SelectQuery()

	aliases := query.GetTableAliases(seriesName)
	if querySpec.IsSinglePointQuery() {
		series, err := self.fetchSinglePoint(querySpec, seriesName, fields)
		if err != nil {
			log.Error("Error reading a single point: %s", err)
			return err
		}
		if len(series.Points) > 0 {
			processor.YieldPoint(series.Name, series.Fields, series.Points[0])
		}
		return nil
	}

	fieldNames, iterators := self.getIterators(fields, startTimeBytes, endTimeBytes, query.Ascending)
	defer func() {
		for _, it := range iterators {
			it.Close()
		}
	}()

	seriesOutgoing := &protocol.Series{Name: protocol.String(seriesName), Fields: fieldNames, Points: make([]*protocol.Point, 0, self.pointBatchSize)}

	// TODO: clean up, this is super gnarly
	// optimize for the case where we're pulling back only a single column or aggregate
	buffer := bytes.NewBuffer(nil)
	valueBuffer := proto.NewBuffer(nil)
	for {
		isValid := false
		point := &protocol.Point{Values: make([]*protocol.FieldValue, fieldCount, fieldCount)}

		for i, it := range iterators {
			if rawColumnValues[i].value != nil || !it.Valid() {
				if err := it.Error(); err != nil {
					return err
				}
				continue
			}

			key := it.Key()
			if len(key) < 16 {
				continue
			}

			if !isPointInRange(fields[i].Id, startTimeBytes, endTimeBytes, key) {
				continue
			}

			value := it.Value()
			sequenceNumber := key[16:]

			rawTime := key[8:16]
			rawColumnValues[i] = rawColumnValue{time: rawTime, sequence: sequenceNumber, value: value}
		}

		var pointTimeRaw []byte
		var pointSequenceRaw []byte
		// choose the highest (or lowest in case of ascending queries) timestamp
		// and sequence number. that will become the timestamp and sequence of
		// the next point.
		for _, value := range rawColumnValues {
			if value.value == nil {
				continue
			}

			pointTimeRaw, pointSequenceRaw = value.updatePointTimeAndSequence(pointTimeRaw,
				pointSequenceRaw, query.Ascending)
		}

		for i, iterator := range iterators {
			// if the value is nil or doesn't match the point's timestamp and sequence number
			// then skip it
			if rawColumnValues[i].value == nil ||
				!bytes.Equal(rawColumnValues[i].time, pointTimeRaw) ||
				!bytes.Equal(rawColumnValues[i].sequence, pointSequenceRaw) {

				point.Values[i] = &protocol.FieldValue{IsNull: &TRUE}
				continue
			}

			// if we emitted at lease one column, then we should keep
			// trying to get more points
			isValid = true

			// advance the iterator to read a new value in the next iteration
			if query.Ascending {
				iterator.Next()
			} else {
				iterator.Prev()
			}

			fv := &protocol.FieldValue{}
			valueBuffer.SetBuf(rawColumnValues[i].value)
			err := valueBuffer.Unmarshal(fv)
			if err != nil {
				log.Error("Error while running query: %s", err)
				return err
			}
			point.Values[i] = fv
			rawColumnValues[i].value = nil
		}

		var sequence uint64
		var t uint64

		// set the point sequence number and timestamp
		buffer.Reset()
		buffer.Write(pointSequenceRaw)
		binary.Read(buffer, binary.BigEndian, &sequence)
		buffer.Reset()
		buffer.Write(pointTimeRaw)
		binary.Read(buffer, binary.BigEndian, &t)

		time := self.convertUintTimestampToInt64(&t)
		point.SetTimestampInMicroseconds(time)
		point.SequenceNumber = &sequence

		// stop the loop if we ran out of points
		if !isValid {
			break
		}

		shouldContinue := true

		seriesOutgoing.Points = append(seriesOutgoing.Points, point)

		if len(seriesOutgoing.Points) >= self.pointBatchSize {
			for _, alias := range aliases {
				series := &protocol.Series{
					Name:   proto.String(alias),
					Fields: fieldNames,
					Points: seriesOutgoing.Points,
				}
				if !processor.YieldSeries(series) {
					log.Info("Stopping processing")
					shouldContinue = false
				}
			}
			seriesOutgoing = &protocol.Series{Name: protocol.String(seriesName), Fields: fieldNames, Points: make([]*protocol.Point, 0, self.pointBatchSize)}
		}

		if !shouldContinue {
			break
		}
	}

	//Yield remaining data
	for _, alias := range aliases {
		log.Debug("Final Flush %s", alias)
		series := &protocol.Series{Name: protocol.String(alias), Fields: seriesOutgoing.Fields, Points: seriesOutgoing.Points}
		if !processor.YieldSeries(series) {
			log.Debug("Cancelled...")
		}
	}

	log.Debug("Finished running query %s", query.GetQueryString())
	return nil
}
예제 #10
0
파일: shard.go 프로젝트: jhermann/influxdb
func (self *ShardData) Query(querySpec *parser.QuerySpec, response chan *p.Response) {
	log.Debug("QUERY: shard %d, query '%s'", self.Id(), querySpec.GetQueryString())
	defer common.RecoverFunc(querySpec.Database(), querySpec.GetQueryString(), func(err interface{}) {
		response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(fmt.Sprintf("%s", err))}
	})

	// This is only for queries that are deletes or drops. They need to be sent everywhere as opposed to just the local or one of the remote shards.
	// But this boolean should only be set to true on the server that receives the initial query.
	if querySpec.RunAgainstAllServersInShard {
		if querySpec.IsDeleteFromSeriesQuery() {
			self.logAndHandleDeleteQuery(querySpec, response)
		} else if querySpec.IsDropSeriesQuery() {
			self.logAndHandleDropSeriesQuery(querySpec, response)
		}
	}

	if self.IsLocal {
		var processor QueryProcessor
		var err error

		if querySpec.IsListSeriesQuery() {
			processor = engine.NewListSeriesEngine(response)
		} else if querySpec.IsDeleteFromSeriesQuery() || querySpec.IsDropSeriesQuery() || querySpec.IsSinglePointQuery() {
			maxDeleteResults := 10000
			processor = engine.NewPassthroughEngine(response, maxDeleteResults)
		} else {
			query := querySpec.SelectQuery()
			if self.ShouldAggregateLocally(querySpec) {
				log.Debug("creating a query engine")
				processor, err = engine.NewQueryEngine(query, response)
				if err != nil {
					response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
					log.Error("Error while creating engine: %s", err)
					return
				}
				processor.SetShardInfo(int(self.Id()), self.IsLocal)
			} else if query.HasAggregates() {
				maxPointsToBufferBeforeSending := 1000
				log.Debug("creating a passthrough engine")
				processor = engine.NewPassthroughEngine(response, maxPointsToBufferBeforeSending)
			} else {
				maxPointsToBufferBeforeSending := 1000
				log.Debug("creating a passthrough engine with limit")
				processor = engine.NewPassthroughEngineWithLimit(response, maxPointsToBufferBeforeSending, query.Limit)
			}

			if query.GetFromClause().Type != parser.FromClauseInnerJoin {
				// Joins do their own filtering since we need to get all
				// points before filtering. This is due to the fact that some
				// where expressions will be difficult to compute before the
				// points are joined together, think where clause with
				// left.column = 'something' or right.column =
				// 'something_else'. We can't filter the individual series
				// separately. The filtering happens in merge.go:55

				processor = engine.NewFilteringEngine(query, processor)
			}
		}
		shard, err := self.store.GetOrCreateShard(self.id)
		if err != nil {
			response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
			log.Error("Error while getting shards: %s", err)
			return
		}
		defer self.store.ReturnShard(self.id)
		err = shard.Query(querySpec, processor)
		// if we call Close() in case of an error it will mask the error
		if err != nil {
			response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
			return
		}
		processor.Close()
		response <- &p.Response{Type: &endStreamResponse}
		return
	}

	if server := self.randomHealthyServer(); server != nil {
		log.Debug("Querying server %d for shard %d", server.GetId(), self.Id())
		request := self.createRequest(querySpec)
		server.MakeRequest(request, response)
		return
	}

	message := fmt.Sprintf("No servers up to query shard %d", self.id)
	response <- &p.Response{Type: &endStreamResponse, ErrorMessage: &message}
	log.Error(message)
}
예제 #11
0
파일: shard.go 프로젝트: qq101/influxdb
func (self *ShardData) Query(querySpec *parser.QuerySpec, response chan *p.Response) {
	// This is only for queries that are deletes or drops. They need to be sent everywhere as opposed to just the local or one of the remote shards.
	// But this boolean should only be set to true on the server that receives the initial query.
	if querySpec.RunAgainstAllServersInShard {
		if querySpec.IsDeleteFromSeriesQuery() {
			self.logAndHandleDeleteQuery(querySpec, response)
		} else if querySpec.IsDropSeriesQuery() {
			self.logAndHandleDropSeriesQuery(querySpec, response)
		}
	}

	if self.IsLocal {
		var processor QueryProcessor
		var err error

		if querySpec.IsListSeriesQuery() {
			processor = engine.NewListSeriesEngine(response)
		} else if querySpec.IsDeleteFromSeriesQuery() || querySpec.IsDropSeriesQuery() || querySpec.IsSinglePointQuery() {
			maxDeleteResults := 10000
			processor = engine.NewPassthroughEngine(response, maxDeleteResults)
		} else {
			query := querySpec.SelectQuery()
			if self.ShouldAggregateLocally(querySpec) {
				log.Debug("creating a query engine\n")
				processor, err = engine.NewQueryEngine(query, response)
				if err != nil {
					response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
					log.Error("Error while creating engine: %s", err)
					return
				}
				processor.SetShardInfo(int(self.Id()), self.IsLocal)
			} else if query.HasAggregates() {
				maxPointsToBufferBeforeSending := 1000
				log.Debug("creating a passthrough engine\n")
				processor = engine.NewPassthroughEngine(response, maxPointsToBufferBeforeSending)
			} else {
				maxPointsToBufferBeforeSending := 1000
				log.Debug("creating a passthrough engine with limit\n")
				processor = engine.NewPassthroughEngineWithLimit(response, maxPointsToBufferBeforeSending, query.Limit)
			}
			processor = engine.NewFilteringEngine(query, processor)
		}
		shard, err := self.store.GetOrCreateShard(self.id)
		if err != nil {
			response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
			log.Error("Error while getting shards: %s", err)
			return
		}
		defer self.store.ReturnShard(self.id)
		err = shard.Query(querySpec, processor)
		processor.Close()
		if err != nil {
			response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
		}
		response <- &p.Response{Type: &endStreamResponse}
		return
	}

	healthyServers := make([]*ClusterServer, 0, len(self.clusterServers))
	for _, s := range self.clusterServers {
		if !s.IsUp() {
			continue
		}
		healthyServers = append(healthyServers, s)
	}
	healthyCount := len(healthyServers)
	if healthyCount == 0 {
		message := fmt.Sprintf("No servers up to query shard %d", self.id)
		response <- &p.Response{Type: &endStreamResponse, ErrorMessage: &message}
		log.Error(message)
		return
	}
	randServerIndex := int(time.Now().UnixNano() % int64(healthyCount))
	server := healthyServers[randServerIndex]
	request := self.createRequest(querySpec)

	server.MakeRequest(request, response)
}
예제 #12
0
func ConvertToDataStoreSeries(s ApiSeries, precision TimePrecision) (*protocol.Series, error) {
	points := make([]*protocol.Point, 0, len(s.GetPoints()))
	for _, point := range s.GetPoints() {
		if len(point) != len(s.GetColumns()) {
			return nil, fmt.Errorf("invalid payload")
		}

		values := make([]*protocol.FieldValue, 0, len(point))
		var timestamp *int64
		var sequence *uint64

		for idx, field := range s.GetColumns() {

			value := point[idx]
			if field == "time" {
				switch x := value.(type) {
				case json.Number:
					f, err := x.Float64()
					if err != nil {
						return nil, err
					}
					_timestamp := int64(f)
					switch precision {
					case SecondPrecision:
						_timestamp *= 1000
						fallthrough
					case MillisecondPrecision:
						_timestamp *= 1000
					}

					timestamp = &_timestamp
					continue
				default:
					return nil, fmt.Errorf("time field must be float but is %T (%v)", value, value)
				}
			}

			if field == "sequence_number" {
				switch x := value.(type) {
				case json.Number:
					f, err := x.Float64()
					if err != nil {
						return nil, err
					}
					_sequenceNumber := uint64(f)
					sequence = &_sequenceNumber
					continue
				default:
					return nil, fmt.Errorf("sequence_number field must be float but is %T (%v)", value, value)
				}
			}

			switch v := value.(type) {
			case string:
				values = append(values, &protocol.FieldValue{StringValue: &v})
			case json.Number:
				i, err := v.Int64()
				if err == nil {
					values = append(values, &protocol.FieldValue{Int64Value: &i})
					break
				}
				f, err := v.Float64()
				if err != nil {
					return nil, err
				}
				values = append(values, &protocol.FieldValue{DoubleValue: &f})
			case bool:
				values = append(values, &protocol.FieldValue{BoolValue: &v})
			case nil:
				values = append(values, &protocol.FieldValue{IsNull: &TRUE})
			default:
				// if we reached this line then the dynamic type didn't match
				return nil, fmt.Errorf("Unknown type %T", value)
			}
		}
		points = append(points, &protocol.Point{
			Values:         values,
			Timestamp:      timestamp,
			SequenceNumber: sequence,
		})
	}

	fields := removeTimestampFieldDefinition(s.GetColumns())

	series := &protocol.Series{
		Name:   protocol.String(s.GetName()),
		Fields: fields,
		Points: points,
	}
	return series, nil
}