func (s *influxdbCluster) execQuery(cli influxdb.Client, q influxql.Statement) (*influxdb.Response, error) { query := influxdb.Query{ Command: q.String(), } resp, err := cli.Query(query) if err != nil { return nil, err } if err := resp.Error(); err != nil { return nil, err } return resp, nil }
func (s *Service) execQuery(q, cluster string) (kapacitor.DBRP, *influxdb.Response, error) { // Parse query to determine dbrp dbrp := kapacitor.DBRP{} stmt, err := influxql.ParseStatement(q) if err != nil { return dbrp, nil, err } if slct, ok := stmt.(*influxql.SelectStatement); ok && len(slct.Sources) == 1 { if m, ok := slct.Sources[0].(*influxql.Measurement); ok { dbrp.Database = m.Database dbrp.RetentionPolicy = m.RetentionPolicy } } if dbrp.Database == "" || dbrp.RetentionPolicy == "" { return dbrp, nil, errors.New("could not determine database and retention policy. Is the query fully qualified?") } if s.InfluxDBService == nil { return dbrp, nil, errors.New("InfluxDB not configured, cannot record query") } // Query InfluxDB var con influxdb.Client if cluster != "" { con, err = s.InfluxDBService.NewNamedClient(cluster) } else { con, err = s.InfluxDBService.NewDefaultClient() } if err != nil { return dbrp, nil, err } query := influxdb.Query{ Command: q, } resp, err := con.Query(query) if err != nil { return dbrp, nil, err } if err := resp.Error(); err != nil { return dbrp, nil, err } return dbrp, resp, nil }
func (s *Service) startRecordBatch(t *kapacitor.Task, start, stop time.Time) ([]<-chan models.Batch, <-chan error, error) { // We do not open the task master so it does not need to be closed et, err := kapacitor.NewExecutingTask(s.TaskMaster.New(""), t) if err != nil { return nil, nil, err } batches, err := et.BatchQueries(start, stop) if err != nil { return nil, nil, err } if s.InfluxDBService == nil { return nil, nil, errors.New("InfluxDB not configured, cannot record batch query") } sources := make([]<-chan models.Batch, len(batches)) errors := make(chan error, len(batches)) for batchIndex, batchQueries := range batches { source := make(chan models.Batch) sources[batchIndex] = source go func(cluster string, queries []string, groupByName bool) { defer close(source) // Connect to the cluster var con influxdb.Client var err error if cluster != "" { con, err = s.InfluxDBService.NewNamedClient(cluster) } else { con, err = s.InfluxDBService.NewDefaultClient() } if err != nil { errors <- err return } // Run queries for _, q := range queries { query := influxdb.Query{ Command: q, } resp, err := con.Query(query) if err != nil { errors <- err return } if err := resp.Error(); err != nil { errors <- err return } for _, res := range resp.Results { batches, err := models.ResultToBatches(res, groupByName) if err != nil { errors <- err return } for _, b := range batches { source <- b } } } errors <- nil }(batchQueries.Cluster, batchQueries.Queries, batchQueries.GroupByMeasurement) } errC := make(chan error, 1) go func() { for i := 0; i < cap(errors); i++ { err := <-errors if err != nil { errC <- err return } } errC <- nil }() return sources, errC, nil }
// Query InfluxDB and collect batches on batch collector. func (b *QueryNode) doQuery() error { defer b.ins[0].Close() b.queryErrors = &expvar.Int{} b.connectErrors = &expvar.Int{} b.batchesQueried = &expvar.Int{} b.pointsQueried = &expvar.Int{} b.statMap.Set(statsQueryErrors, b.queryErrors) b.statMap.Set(statsConnectErrors, b.connectErrors) b.statMap.Set(statsBatchesQueried, b.batchesQueried) b.statMap.Set(statsPointsQueried, b.pointsQueried) if b.et.tm.InfluxDBService == nil { return errors.New("InfluxDB not configured, cannot query InfluxDB for batch query") } var con influxdb.Client tickC := b.ticker.Start() for { select { case <-b.closing: return nil case <-b.aborting: return errors.New("batch doQuery aborted") case now := <-tickC: b.timer.Start() // Update times for query stop := now.Add(-1 * b.b.Offset) b.query.Start(stop.Add(-1 * b.b.Period)) b.query.Stop(stop) b.logger.Println("D! starting next batch query:", b.query.String()) var err error if con == nil { if b.b.Cluster != "" { con, err = b.et.tm.InfluxDBService.NewNamedClient(b.b.Cluster) } else { con, err = b.et.tm.InfluxDBService.NewDefaultClient() } if err != nil { b.logger.Println("E! failed to connect to InfluxDB:", err) b.connectErrors.Add(1) // Ensure connection is nil con = nil b.timer.Stop() break } } q := influxdb.Query{ Command: b.query.String(), } // Execute query resp, err := con.Query(q) if err != nil { b.logger.Println("E! query failed:", err) b.queryErrors.Add(1) // Get a new connection con = nil b.timer.Stop() break } if err := resp.Error(); err != nil { b.logger.Println("E! query returned error response:", err) b.queryErrors.Add(1) b.timer.Stop() break } // Collect batches for _, res := range resp.Results { batches, err := models.ResultToBatches(res, b.byName) if err != nil { b.logger.Println("E! failed to understand query result:", err) b.queryErrors.Add(1) continue } for _, bch := range batches { bch.TMax = stop b.batchesQueried.Add(1) b.pointsQueried.Add(int64(len(bch.Points))) b.timer.Pause() err := b.ins[0].CollectBatch(bch) if err != nil { return err } b.timer.Resume() } } b.timer.Stop() } } }