Exemple #1
0
// Record a series of batch queries defined by a batch task
func (r *Service) doRecordBatch(rid uuid.UUID, t *kapacitor.Task, start, stop time.Time) error {
	et, err := kapacitor.NewExecutingTask(r.TaskMaster.New(), t)
	if err != nil {
		return err
	}

	batches, err := et.BatchQueries(start, stop)
	if err != nil {
		return err
	}

	if r.InfluxDBService == nil {
		return errors.New("InfluxDB not configured, cannot record batch query")
	}
	con, err := r.InfluxDBService.NewClient()
	if err != nil {
		return err
	}

	archive, err := r.newBatchArchive(rid)
	if err != nil {
		return err
	}

	for batchIdx, queries := range batches {
		w, err := archive.Create(batchIdx)
		if err != nil {
			return err
		}
		for _, q := range queries {
			query := client.Query{
				Command: q,
			}
			resp, err := con.Query(query)
			if err != nil {
				return err
			}
			if resp.Err != nil {
				return resp.Err
			}
			for _, res := range resp.Results {
				batches, err := models.ResultToBatches(res)
				if err != nil {
					return err
				}
				for _, b := range batches {
					kapacitor.WriteBatchForRecording(w, b)
				}
			}
		}
	}
	return archive.Close()
}
Exemple #2
0
func (r *Service) saveBatchRecording(dataSource DataSource, sources []<-chan models.Batch) error {
	archiver, err := dataSource.BatchArchiver()
	if err != nil {
		return err
	}

	for batchIdx, batches := range sources {
		w, err := archiver.Archive(batchIdx)
		if err != nil {
			return err
		}
		for b := range batches {
			kapacitor.WriteBatchForRecording(w, b)
		}
	}
	return archiver.Close()
}
Exemple #3
0
func (r *Service) saveBatchQuery(dataSource DataSource, batches <-chan models.Batch) error {
	archiver, err := dataSource.BatchArchiver()
	if err != nil {
		return err
	}
	w, err := archiver.Archive(0)
	if err != nil {
		return err
	}

	for batch := range batches {
		err := kapacitor.WriteBatchForRecording(w, batch)
		if err != nil {
			return err
		}
	}

	return archiver.Close()
}
Exemple #4
0
func (r *Service) doRecordQuery(rid uuid.UUID, q string, tt kapacitor.TaskType) error {
	// Parse query to determine dbrp
	var db, rp string
	s, err := influxql.ParseStatement(q)
	if err != nil {
		return err
	}
	if slct, ok := s.(*influxql.SelectStatement); ok && len(slct.Sources) == 1 {
		if m, ok := slct.Sources[0].(*influxql.Measurement); ok {
			db = m.Database
			rp = m.RetentionPolicy
		}
	}
	if db == "" || rp == "" {
		return errors.New("could not determine database and retention policy. Is the query fully qualified?")
	}
	// Query InfluxDB
	con, err := r.InfluxDBService.NewClient()
	if err != nil {
		return err
	}
	query := client.Query{
		Command: q,
	}
	resp, err := con.Query(query)
	if err != nil {
		return err
	}
	if resp.Err != nil {
		return resp.Err
	}
	// Open appropriate writer
	var w io.Writer
	var c io.Closer
	switch tt {
	case kapacitor.StreamTask:
		sw, err := r.newStreamWriter(rid)
		if err != nil {
			return err
		}
		w = sw
		c = sw
	case kapacitor.BatchTask:
		archive, err := r.newBatchArchive(rid)
		if err != nil {
			return err
		}
		w, err = archive.Create(0)
		if err != nil {
			return err
		}
		c = archive
	}
	// Write results to writer
	for _, res := range resp.Results {
		batches, err := models.ResultToBatches(res)
		if err != nil {
			c.Close()
			return err
		}
		for _, batch := range batches {
			switch tt {
			case kapacitor.StreamTask:
				for _, bp := range batch.Points {
					p := models.Point{
						Name:            batch.Name,
						Database:        db,
						RetentionPolicy: rp,
						Tags:            bp.Tags,
						Fields:          bp.Fields,
						Time:            bp.Time,
					}
					kapacitor.WritePointForRecording(w, p, precision)
				}
			case kapacitor.BatchTask:
				kapacitor.WriteBatchForRecording(w, batch)
			}
		}
	}
	return c.Close()
}