Exemple #1
0
// Query InfluxDB and collect batches on batch collector.
func (b *BatchNode) doQuery() error {
	defer b.ins[0].Close()

	if b.et.tm.InfluxDBService == nil {
		return errors.New("InfluxDB not configured, cannot query InfluxDB for batch query")
	}

	tickC := b.ticker.Start()
	for {
		select {
		case <-b.closing:
			return nil
		case <-b.aborting:
			return errors.New("batch doQuery aborted")
		case now := <-tickC:

			// Update times for query
			stop := now.Add(-1 * b.b.Offset)
			b.query.Start(stop.Add(-1 * b.b.Period))
			b.query.Stop(stop)

			b.logger.Println("D! starting next batch query:", b.query.String())

			// Connect
			con, err := b.et.tm.InfluxDBService.NewClient()
			if err != nil {
				return err
			}
			q := client.Query{
				Command: b.query.String(),
			}

			// Execute query
			resp, err := con.Query(q)
			if err != nil {
				return err
			}

			if resp.Err != nil {
				return resp.Err
			}

			// Collect batches
			for _, res := range resp.Results {
				batches, err := models.ResultToBatches(res)
				if err != nil {
					return err
				}
				for _, bch := range batches {
					b.ins[0].CollectBatch(bch)
				}
			}
		}
	}
}
Exemple #2
0
// Record a series of batch queries defined by a batch task
func (r *Service) doRecordBatch(rid uuid.UUID, t *kapacitor.Task, start, stop time.Time) error {
	et, err := kapacitor.NewExecutingTask(r.TaskMaster.New(), t)
	if err != nil {
		return err
	}

	batches, err := et.BatchQueries(start, stop)
	if err != nil {
		return err
	}

	if r.InfluxDBService == nil {
		return errors.New("InfluxDB not configured, cannot record batch query")
	}
	con, err := r.InfluxDBService.NewClient()
	if err != nil {
		return err
	}

	archive, err := r.newBatchArchive(rid)
	if err != nil {
		return err
	}

	for batchIdx, queries := range batches {
		w, err := archive.Create(batchIdx)
		if err != nil {
			return err
		}
		for _, q := range queries {
			query := client.Query{
				Command: q,
			}
			resp, err := con.Query(query)
			if err != nil {
				return err
			}
			if resp.Err != nil {
				return resp.Err
			}
			for _, res := range resp.Results {
				batches, err := models.ResultToBatches(res)
				if err != nil {
					return err
				}
				for _, b := range batches {
					kapacitor.WriteBatchForRecording(w, b)
				}
			}
		}
	}
	return archive.Close()
}
Exemple #3
0
func (r *Service) runQueryBatch(source chan<- models.Batch, q string, cluster string) error {
	defer close(source)
	_, resp, err := r.execQuery(q, cluster)
	if err != nil {
		return err
	}
	// Write results to sources
	for _, res := range resp.Results {
		batches, err := models.ResultToBatches(res, false)
		if err != nil {
			return err
		}
		for _, batch := range batches {
			source <- batch
		}
	}
	return nil
}
Exemple #4
0
func (r *Service) doRecordQuery(rid uuid.UUID, q string, tt kapacitor.TaskType) error {
	// Parse query to determine dbrp
	var db, rp string
	s, err := influxql.ParseStatement(q)
	if err != nil {
		return err
	}
	if slct, ok := s.(*influxql.SelectStatement); ok && len(slct.Sources) == 1 {
		if m, ok := slct.Sources[0].(*influxql.Measurement); ok {
			db = m.Database
			rp = m.RetentionPolicy
		}
	}
	if db == "" || rp == "" {
		return errors.New("could not determine database and retention policy. Is the query fully qualified?")
	}
	// Query InfluxDB
	con, err := r.InfluxDBService.NewClient()
	if err != nil {
		return err
	}
	query := client.Query{
		Command: q,
	}
	resp, err := con.Query(query)
	if err != nil {
		return err
	}
	if resp.Err != nil {
		return resp.Err
	}
	// Open appropriate writer
	var w io.Writer
	var c io.Closer
	switch tt {
	case kapacitor.StreamTask:
		sw, err := r.newStreamWriter(rid)
		if err != nil {
			return err
		}
		w = sw
		c = sw
	case kapacitor.BatchTask:
		archive, err := r.newBatchArchive(rid)
		if err != nil {
			return err
		}
		w, err = archive.Create(0)
		if err != nil {
			return err
		}
		c = archive
	}
	// Write results to writer
	for _, res := range resp.Results {
		batches, err := models.ResultToBatches(res)
		if err != nil {
			c.Close()
			return err
		}
		for _, batch := range batches {
			switch tt {
			case kapacitor.StreamTask:
				for _, bp := range batch.Points {
					p := models.Point{
						Name:            batch.Name,
						Database:        db,
						RetentionPolicy: rp,
						Tags:            bp.Tags,
						Fields:          bp.Fields,
						Time:            bp.Time,
					}
					kapacitor.WritePointForRecording(w, p, precision)
				}
			case kapacitor.BatchTask:
				kapacitor.WriteBatchForRecording(w, batch)
			}
		}
	}
	return c.Close()
}
Exemple #5
0
func (r *Service) runQueryStream(source chan<- models.Point, q, cluster string) error {
	defer close(source)
	dbrp, resp, err := r.execQuery(q, cluster)
	if err != nil {
		return err
	}
	// Write results to sources
	for _, res := range resp.Results {
		batches, err := models.ResultToBatches(res, false)
		if err != nil {
			return err
		}
		// Write points in order across batches

		// Find earliest time of first points
		current := time.Time{}
		for _, batch := range batches {
			if len(batch.Points) > 0 &&
				(current.IsZero() ||
					batch.Points[0].Time.Before(current)) {
				current = batch.Points[0].Time
			}
		}

		finishedCount := 0
		batchCount := len(batches)
		for finishedCount != batchCount {
			next := time.Time{}
			for b := range batches {
				l := len(batches[b].Points)
				if l == 0 {
					finishedCount++
					continue
				}
				i := 0
				for ; i < l; i++ {
					bp := batches[b].Points[i]
					if bp.Time.After(current) {
						if next.IsZero() || bp.Time.Before(next) {
							next = bp.Time
						}
						break
					}
					// Write point
					p := models.Point{
						Name:            batches[b].Name,
						Database:        dbrp.Database,
						RetentionPolicy: dbrp.RetentionPolicy,
						Tags:            bp.Tags,
						Fields:          bp.Fields,
						Time:            bp.Time,
					}
					source <- p
				}
				// Remove written points
				batches[b].Points = batches[b].Points[i:]
			}
			current = next
		}
	}
	return nil
}
Exemple #6
0
func (s *Service) startRecordBatch(t *kapacitor.Task, start, stop time.Time) ([]<-chan models.Batch, <-chan error, error) {
	// We do not open the task master so it does not need to be closed
	et, err := kapacitor.NewExecutingTask(s.TaskMaster.New(""), t)
	if err != nil {
		return nil, nil, err
	}

	batches, err := et.BatchQueries(start, stop)
	if err != nil {
		return nil, nil, err
	}

	if s.InfluxDBService == nil {
		return nil, nil, errors.New("InfluxDB not configured, cannot record batch query")
	}

	sources := make([]<-chan models.Batch, len(batches))
	errors := make(chan error, len(batches))

	for batchIndex, batchQueries := range batches {
		source := make(chan models.Batch)
		sources[batchIndex] = source
		go func(cluster string, queries []*kapacitor.Query, groupByName bool) {
			defer close(source)

			// Connect to the cluster
			cli, err := s.InfluxDBService.NewNamedClient(cluster)
			if err != nil {
				errors <- err
				return
			}
			// Run queries
			for _, q := range queries {
				query := influxdb.Query{
					Command: q.String(),
				}
				resp, err := cli.Query(query)
				if err != nil {
					errors <- err
					return
				}
				for _, res := range resp.Results {
					batches, err := models.ResultToBatches(res, groupByName)
					if err != nil {
						errors <- err
						return
					}
					for _, b := range batches {
						// Set stop time based off query bounds
						if b.TMax.IsZero() || !q.IsGroupedByTime() {
							b.TMax = q.StopTime()
						}
						source <- b
					}
				}
			}
			errors <- nil
		}(batchQueries.Cluster, batchQueries.Queries, batchQueries.GroupByMeasurement)
	}
	errC := make(chan error, 1)
	go func() {
		for i := 0; i < cap(errors); i++ {
			err := <-errors
			if err != nil {
				errC <- err
				return
			}
		}
		errC <- nil
	}()
	return sources, errC, nil
}
Exemple #7
0
// Query InfluxDB and collect batches on batch collector.
func (b *QueryNode) doQuery() error {
	defer b.ins[0].Close()
	b.queryErrors = &expvar.Int{}
	b.batchesQueried = &expvar.Int{}
	b.pointsQueried = &expvar.Int{}

	b.statMap.Set(statsQueryErrors, b.queryErrors)
	b.statMap.Set(statsBatchesQueried, b.batchesQueried)
	b.statMap.Set(statsPointsQueried, b.pointsQueried)

	if b.et.tm.InfluxDBService == nil {
		return errors.New("InfluxDB not configured, cannot query InfluxDB for batch query")
	}

	con, err := b.et.tm.InfluxDBService.NewNamedClient(b.b.Cluster)
	if err != nil {
		return errors.Wrap(err, "failed to get InfluxDB client")
	}
	tickC := b.ticker.Start()
	for {
		select {
		case <-b.closing:
			return nil
		case <-b.aborting:
			return errors.New("batch doQuery aborted")
		case now := <-tickC:
			b.timer.Start()
			// Update times for query
			stop := now.Add(-1 * b.b.Offset)
			b.query.SetStartTime(stop.Add(-1 * b.b.Period))
			b.query.SetStopTime(stop)

			qStr := b.query.String()
			b.logger.Println("D! starting next batch query:", qStr)

			// Execute query
			q := influxdb.Query{
				Command: qStr,
			}
			resp, err := con.Query(q)
			if err != nil {
				b.queryErrors.Add(1)
				b.logger.Println("E!", err)
				b.timer.Stop()
				break
			}

			// Collect batches
			for _, res := range resp.Results {
				batches, err := models.ResultToBatches(res, b.byName)
				if err != nil {
					b.logger.Println("E! failed to understand query result:", err)
					b.queryErrors.Add(1)
					continue
				}
				for _, bch := range batches {
					// Set stop time based off query bounds
					if bch.TMax.IsZero() || !b.query.IsGroupedByTime() {
						bch.TMax = stop
					}
					b.batchesQueried.Add(1)
					b.pointsQueried.Add(int64(len(bch.Points)))
					b.timer.Pause()
					err := b.ins[0].CollectBatch(bch)
					if err != nil {
						return err
					}
					b.timer.Resume()
				}
			}
			b.timer.Stop()
		}
	}
}
Exemple #8
0
// Query InfluxDB and collect batches on batch collector.
func (b *QueryNode) doQuery() error {
	defer b.ins[0].Close()
	b.queryErrors = &expvar.Int{}
	b.connectErrors = &expvar.Int{}
	b.batchesQueried = &expvar.Int{}
	b.pointsQueried = &expvar.Int{}

	b.statMap.Set(statsQueryErrors, b.queryErrors)
	b.statMap.Set(statsConnectErrors, b.connectErrors)
	b.statMap.Set(statsBatchesQueried, b.batchesQueried)
	b.statMap.Set(statsPointsQueried, b.pointsQueried)

	if b.et.tm.InfluxDBService == nil {
		return errors.New("InfluxDB not configured, cannot query InfluxDB for batch query")
	}

	var con influxdb.Client
	tickC := b.ticker.Start()
	for {
		select {
		case <-b.closing:
			return nil
		case <-b.aborting:
			return errors.New("batch doQuery aborted")
		case now := <-tickC:
			b.timer.Start()

			// Update times for query
			stop := now.Add(-1 * b.b.Offset)
			b.query.Start(stop.Add(-1 * b.b.Period))
			b.query.Stop(stop)

			b.logger.Println("D! starting next batch query:", b.query.String())

			var err error
			if con == nil {
				if b.b.Cluster != "" {
					con, err = b.et.tm.InfluxDBService.NewNamedClient(b.b.Cluster)
				} else {
					con, err = b.et.tm.InfluxDBService.NewDefaultClient()
				}
				if err != nil {
					b.logger.Println("E! failed to connect to InfluxDB:", err)
					b.connectErrors.Add(1)
					// Ensure connection is nil
					con = nil
					b.timer.Stop()
					break
				}
			}
			q := influxdb.Query{
				Command: b.query.String(),
			}

			// Execute query
			resp, err := con.Query(q)
			if err != nil {
				b.logger.Println("E! query failed:", err)
				b.queryErrors.Add(1)
				// Get a new connection
				con = nil
				b.timer.Stop()
				break
			}

			if err := resp.Error(); err != nil {
				b.logger.Println("E! query returned error response:", err)
				b.queryErrors.Add(1)
				b.timer.Stop()
				break
			}

			// Collect batches
			for _, res := range resp.Results {
				batches, err := models.ResultToBatches(res, b.byName)
				if err != nil {
					b.logger.Println("E! failed to understand query result:", err)
					b.queryErrors.Add(1)
					continue
				}
				for _, bch := range batches {
					bch.TMax = stop
					b.batchesQueried.Add(1)
					b.pointsQueried.Add(int64(len(bch.Points)))
					b.timer.Pause()
					err := b.ins[0].CollectBatch(bch)
					if err != nil {
						return err
					}
					b.timer.Resume()
				}
			}
			b.timer.Stop()
		}
	}
}