// NewServer returns a new instance of Server built from a config. func NewServer(c *Config, buildInfo *BuildInfo, logService logging.Interface) (*Server, error) { err := c.Validate() if err != nil { return nil, fmt.Errorf("%s. To generate a valid configuration file run `kapacitord config > kapacitor.generated.conf`.", err) } l := logService.NewLogger("[srv] ", log.LstdFlags) s := &Server{ buildInfo: *buildInfo, dataDir: c.DataDir, hostname: c.Hostname, err: make(chan error), LogService: logService, MetaStore: &metastore{}, QueryExecutor: &queryexecutor{}, Logger: l, } s.Logger.Println("I! Kapacitor hostname:", s.hostname) // Start Task Master s.TaskMaster = kapacitor.NewTaskMaster(logService) if err := s.TaskMaster.Open(); err != nil { return nil, err } // Append Kapacitor services. s.appendUDFService(c.UDF) s.appendDeadmanService(c.Deadman) s.appendSMTPService(c.SMTP) s.appendHTTPDService(c.HTTP) s.appendInfluxDBService(c.InfluxDB, c.Hostname) s.appendTaskStoreService(c.Task) s.appendReplayStoreService(c.Replay) s.appendOpsGenieService(c.OpsGenie) s.appendVictorOpsService(c.VictorOps) s.appendPagerDutyService(c.PagerDuty) s.appendHipChatService(c.HipChat) s.appendAlertaService(c.Alerta) s.appendSlackService(c.Slack) // Append InfluxDB services s.appendCollectdService(c.Collectd) if err := s.appendOpenTSDBService(c.OpenTSDB); err != nil { return nil, err } for _, g := range c.UDPs { s.appendUDPService(g) } for _, g := range c.Graphites { if err := s.appendGraphiteService(g); err != nil { return nil, err } } // append StatsService and ReportingService last so all stats are ready // to be reported s.appendStatsService(c.Stats) s.appendReportingService(c.Reporting) return s, nil }
// Helper test function for streamer func testStreamer( t *testing.T, name, script string, ) ( clock.Setter, *kapacitor.ExecutingTask, <-chan error, *kapacitor.TaskMaster, ) { if testing.Verbose() { wlog.SetLevel(wlog.DEBUG) } else { wlog.SetLevel(wlog.OFF) } //Create the task task, err := kapacitor.NewStreamer(name, script, dbrps) if err != nil { t.Fatal(err) } // Load test data dir, err := os.Getwd() if err != nil { t.Fatal(err) } data, err := os.Open(path.Join(dir, "data", name+".srpl")) if err != nil { t.Fatal(err) } // Use 1971 so that we don't get true negatives on Epoch 0 collisions c := clock.New(time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC)) r := kapacitor.NewReplay(c) // Create a new execution env tm := kapacitor.NewTaskMaster(logService) tm.HTTPDService = httpService tm.Open() //Start the task et, err := tm.StartTask(task) if err != nil { t.Fatal(err) } // Replay test data to executor stream, err := tm.Stream(name) if err != nil { t.Fatal(err) } replayErr := r.ReplayStream(data, stream, false, "s") t.Log(string(et.Task.Dot())) return r.Setter, et, replayErr, tm }
// Helper test function for batcher func testBatcher(t *testing.T, name, script string) (clock.Setter, *kapacitor.ExecutingTask, <-chan error, *kapacitor.TaskMaster) { if testing.Verbose() { wlog.SetLevel(wlog.DEBUG) } else { wlog.SetLevel(wlog.OFF) } // Create a new execution env tm := kapacitor.NewTaskMaster(logService) tm.HTTPDService = httpService tm.TaskStore = taskStore{} tm.Open() scope := tm.CreateTICKScope() // Create task task, err := kapacitor.NewTask(name, script, kapacitor.BatchTask, dbrps, 0, scope) if err != nil { t.Fatal(err) } // Load test data var allData []io.ReadCloser var data io.ReadCloser for i := 0; err == nil; { f := fmt.Sprintf("%s.%d.brpl", name, i) data, err = os.Open(path.Join("data", f)) if err == nil { allData = append(allData, data) i++ } } if len(allData) == 0 { t.Fatal("could not find any data files for", name) } // Use 1971 so that we don't get true negatives on Epoch 0 collisions c := clock.New(time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC)) r := kapacitor.NewReplay(c) //Start the task et, err := tm.StartTask(task) if err != nil { t.Fatal(err) } // Replay test data to executor batches := tm.BatchCollectors(name) replayErr := r.ReplayBatch(allData, batches, false) t.Log(string(et.Task.Dot())) return r.Setter, et, replayErr, tm }
func TestBatch_InvalidQuery(t *testing.T) { // Create a new execution env tm := kapacitor.NewTaskMaster("invalidQuery", logService) tm.HTTPDService = httpService tm.TaskStore = taskStore{} tm.DeadmanService = deadman{} tm.Open() defer tm.Close() testCases := []struct { script string err string }{ { script: `batch|query('SELECT value FROM db.rp.m; DROP DATABASE _internal').every(1s)`, err: "query must be a single select statement, got 2 statements", }, { script: `batch|query('DROP DATABASE _internal').every(1s)`, err: `query is not a select statement "DROP DATABASE _internal"`, }, } for _, tc := range testCases { task, err := tm.NewTask("invalid", tc.script, kapacitor.BatchTask, dbrps, 0, nil) if err != nil { t.Error(err) continue } if _, err := tm.StartTask(task); err == nil { t.Errorf("expected error for invalid query %s", task.Dot()) } else if got := err.Error(); got != tc.err { t.Errorf("unexpected error got %s exp %s", got, tc.err) } } }
// New returns a new instance of Server built from a config. func New(c *Config, buildInfo BuildInfo, logService logging.Interface) (*Server, error) { err := c.Validate() if err != nil { return nil, fmt.Errorf("%s. To generate a valid configuration file run `kapacitord config > kapacitor.generated.conf`.", err) } l := logService.NewLogger("[srv] ", log.LstdFlags) s := &Server{ config: c, BuildInfo: buildInfo, dataDir: c.DataDir, hostname: c.Hostname, err: make(chan error), configUpdates: make(chan config.ConfigUpdate, 100), LogService: logService, MetaClient: &kapacitor.NoopMetaClient{}, QueryExecutor: &Queryexecutor{}, Logger: l, ServicesByName: make(map[string]int), DynamicServices: make(map[string]Updater), } s.Logger.Println("I! Kapacitor hostname:", s.hostname) // Setup IDs err = s.setupIDs() if err != nil { return nil, err } // Set published vars kapacitor.ClusterIDVar.Set(s.ClusterID) kapacitor.ServerIDVar.Set(s.ServerID) kapacitor.HostVar.Set(s.hostname) kapacitor.ProductVar.Set(kapacitor.Product) kapacitor.VersionVar.Set(s.BuildInfo.Version) s.Logger.Printf("I! ClusterID: %s ServerID: %s", s.ClusterID, s.ServerID) // Start Task Master s.TaskMasterLookup = kapacitor.NewTaskMasterLookup() s.TaskMaster = kapacitor.NewTaskMaster(kapacitor.MainTaskMaster, logService) s.TaskMaster.DefaultRetentionPolicy = c.DefaultRetentionPolicy s.TaskMasterLookup.Set(s.TaskMaster) if err := s.TaskMaster.Open(); err != nil { return nil, err } // Append Kapacitor services. s.initHTTPDService() s.appendStorageService() s.appendAuthService() s.appendConfigOverrideService() s.appendTesterService() // Append all dynamic services after the config override and tester services. s.appendUDFService() s.appendDeadmanService() if err := s.appendInfluxDBService(); err != nil { return nil, errors.Wrap(err, "influxdb service") } // Append these after InfluxDB because they depend on it s.appendTaskStoreService() s.appendReplayService() // Append Alert integration services s.appendAlertaService() s.appendHipChatService() s.appendOpsGenieService() s.appendPagerDutyService() s.appendSMTPService() s.appendSensuService() s.appendSlackService() s.appendTalkService() s.appendTelegramService() s.appendVictorOpsService() // Append third-party integrations if err := s.appendK8sService(); err != nil { return nil, errors.Wrap(err, "kubernetes service") } // Append extra input services s.appendCollectdService() s.appendUDPServices() if err := s.appendOpenTSDBService(); err != nil { return nil, errors.Wrap(err, "opentsdb service") } if err := s.appendGraphiteServices(); err != nil { return nil, errors.Wrap(err, "graphite service") } // Append StatsService and ReportingService after other services so all stats are ready // to be reported s.appendStatsService() s.appendReportingService() // Append HTTPD Service last so that the API is not listening till everything else succeeded. s.appendHTTPDService() return s, nil }
// Generic Benchmark method func Bench(b *testing.B, tasksCount, pointCount, expectedProcessedCount int, tickScript, db, rp string, measurements ...string) { // Setup HTTPD service config := httpd.NewConfig() config.BindAddress = ":0" // Choose port dynamically config.LogEnabled = false httpdService := httpd.NewService(config, "localhost", logService.NewLogger("[http] ", log.LstdFlags), logService) httpdService.Handler.AuthService = noauth.NewService(logService.NewLogger("[noauth] ", log.LstdFlags)) err := httpdService.Open() if err != nil { b.Fatal(err) } writes := make([]struct { request *http.Request seeker io.Seeker }, len(measurements)) for i, m := range measurements { writes[i].request, writes[i].seeker = createWriteRequest(b, db, rp, m, pointCount) } dbrps := []kapacitor.DBRP{{Database: db, RetentionPolicy: rp}} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { // Do not time setup b.StopTimer() tm := kapacitor.NewTaskMaster("bench", &LogService{}) tm.HTTPDService = httpdService tm.UDFService = nil tm.TaskStore = taskStore{} tm.DeadmanService = deadman{} tm.Open() httpdService.Handler.PointsWriter = tm tasks := createTasks(b, tm, tasksCount, tickScript, dbrps) // Seek writes back to beginning for _, write := range writes { write.seeker.Seek(0, 0) } wg := sync.WaitGroup{} wg.Add(len(writes)) // Time how long it takes to process all data b.StartTimer() for _, write := range writes { go func(writeRequest *http.Request, seeker io.Seeker) { defer wg.Done() responseRecorder := httptest.NewRecorder() httpdService.Handler.ServeHTTP(responseRecorder, writeRequest) if responseRecorder.Code != http.StatusNoContent { b.Fatalf("failed to write test data %s", responseRecorder.Body.String()) } }(write.request, write.seeker) } wg.Wait() tm.Drain() for _, t := range tasks { t.Wait() } // Do not time cleanup b.StopTimer() // Validate that tasks did not error out and processed all points validateTasks(b, tm, tasks, expectedProcessedCount) tm.Close() } }