コード例 #1
0
ファイル: passthrough_engine.go プロジェクト: 9cat/influxdb
func (self *PassthroughEngine) YieldSeries(seriesIncoming *protocol.Series) bool {
	log.Debug("PassthroughEngine YieldSeries %d", len(seriesIncoming.Points))
	if *seriesIncoming.Name == "explain query" {
		self.responseType = &explainQueryResponse
		log.Debug("Response Changed!")
	} else {
		self.responseType = &queryResponse
	}

	self.limiter.calculateLimitAndSlicePoints(seriesIncoming)
	if len(seriesIncoming.Points) == 0 {
		log.Debug("Not sent == 0")
		return false
	}

	if self.response == nil {
		self.response = &protocol.Response{
			Type:   self.responseType,
			Series: seriesIncoming,
		}
	} else if self.response.Series.GetName() != seriesIncoming.GetName() {
		self.responseChan <- self.response
		self.response = &protocol.Response{
			Type:   self.responseType,
			Series: seriesIncoming,
		}
	} else if len(self.response.Series.Points) > self.maxPointsInResponse {
		self.responseChan <- self.response
		self.response = &protocol.Response{
			Type:   self.responseType,
			Series: seriesIncoming,
		}
	} else {
		self.response.Series = common.MergeSeries(self.response.Series, seriesIncoming)
	}
	return !self.limiter.hitLimit(seriesIncoming.GetName())
	//return true
}
コード例 #2
0
ファイル: coordinator.go プロジェクト: hanshenu/influxdb
func (self *CoordinatorImpl) CommitSeriesData(db string, serieses []*protocol.Series, sync bool) error {
	now := common.CurrentTime()

	shardToSerieses := map[uint32]map[string]*protocol.Series{}
	shardIdToShard := map[uint32]*cluster.ShardData{}

	for _, series := range serieses {
		if len(series.Points) == 0 {
			return fmt.Errorf("Can't write series with zero points.")
		}

		for _, point := range series.Points {
			if point.Timestamp == nil {
				point.Timestamp = &now
			}
		}

		// sort the points by timestamp
		// TODO: this isn't needed anymore
		series.SortPointsTimeDescending()

		for i := 0; i < len(series.Points); {
			if len(series.GetName()) == 0 {
				return fmt.Errorf("Series name cannot be empty")
			}

			shard, err := self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, series.GetName(), series.Points[i].GetTimestamp())
			if err != nil {
				return err
			}
			firstIndex := i
			timestamp := series.Points[i].GetTimestamp()
			for ; i < len(series.Points) && series.Points[i].GetTimestamp() == timestamp; i++ {
				// add all points with the same timestamp
			}
			newSeries := &protocol.Series{Name: series.Name, Fields: series.Fields, Points: series.Points[firstIndex:i:i]}

			shardIdToShard[shard.Id()] = shard
			shardSerieses := shardToSerieses[shard.Id()]
			if shardSerieses == nil {
				shardSerieses = map[string]*protocol.Series{}
				shardToSerieses[shard.Id()] = shardSerieses
			}
			seriesName := series.GetName()
			s := shardSerieses[seriesName]
			if s == nil {
				shardSerieses[seriesName] = newSeries
				continue
			}
			shardSerieses[seriesName] = common.MergeSeries(s, newSeries)
		}
	}

	for id, serieses := range shardToSerieses {
		shard := shardIdToShard[id]

		seriesesSlice := make([]*protocol.Series, 0, len(serieses))
		for _, s := range serieses {
			seriesesSlice = append(seriesesSlice, s)
		}

		err := self.write(db, seriesesSlice, shard, sync)
		if err != nil {
			log.Error("COORD error writing: ", err)
			return err
		}
	}

	return nil
}