func (server *Server) startProviderWorkers() error { server.providerWorkers = worker.NewWorkerPool() logger.Log(logger.LevelDebug, "server", "declaring providers") for _, prov := range server.providers { connectorType, err := config.GetString(prov.Config.Connector, "type", true) if err != nil { return fmt.Errorf("provider `%s' connector: %s", prov.Name, err) } else if _, ok := connector.Connectors[connectorType]; !ok { return fmt.Errorf("provider `%s' uses unknown connector type `%s'", prov.Name, connectorType) } providerWorker := worker.NewWorker() providerWorker.RegisterEvent(eventInit, workerProviderInit) providerWorker.RegisterEvent(eventShutdown, workerProviderShutdown) providerWorker.RegisterEvent(eventRun, workerProviderRun) providerWorker.RegisterEvent(eventCatalogRefresh, workerProviderRefresh) if err := providerWorker.SendEvent(eventInit, false, prov, connectorType); err != nil { logger.Log(logger.LevelWarning, "server", "in provider `%s', %s", prov.Name, err) logger.Log(logger.LevelWarning, "server", "discarding provider `%s'", prov.Name) continue } // Add worker into pool if initialization went fine server.providerWorkers.Add(providerWorker) providerWorker.SendEvent(eventRun, true, nil) logger.Log(logger.LevelDebug, "server", "declared provider `%s'", prov.Name) } return nil }
func workerCatalogRun(w *worker.Worker, args ...interface{}) { var serverCatalog = w.Props[0].(*catalog.Catalog) defer w.Shutdown() logger.Log(logger.LevelDebug, "catalogWorker", "starting") w.State = worker.JobStarted for { select { case cmd := <-w.ReceiveJobSignals(): switch cmd { case jobSignalShutdown: logger.Log(logger.LevelInfo, "catalogWorker", "received shutdown command, stopping job") w.State = worker.JobStopped return default: logger.Log(logger.LevelNotice, "catalogWorker", "received unknown command, ignoring") } case record := <-serverCatalog.RecordChan: serverCatalog.Insert(record) } } }
// Stop stops the server. func (server *Server) Stop() { if server.stopping { return } logger.Log(logger.LevelNotice, "server", "shutting down server") server.stopping = true // Shutdown serve worker if err := server.serveWorker.SendEvent(eventShutdown, false, nil); err != nil { logger.Log(logger.LevelWarning, "server", "serve worker did not shut down successfully: %s", err) } // Shutdown running provider workers server.stopProviderWorkers() // Shutdown catalog worker if err := server.catalogWorker.SendEvent(eventShutdown, false, nil); err != nil { logger.Log(logger.LevelWarning, "server", "catalog worker did not shut down successfully: %s", err) } server.Catalog.Close() // Remove pid file if server.Config.PidFile != "" { logger.Log(logger.LevelDebug, "server", "removing `%s' pid file", server.Config.PidFile) os.Remove(server.Config.PidFile) } server.wg.Done() }
// Refresh triggers a full connector data update. func (connector *InfluxDBConnector) Refresh(originName string, outputChan chan<- *catalog.Record) error { seriesList, err := connector.client.Query("select * from /.*/ limit 1") if err != nil { return fmt.Errorf("influxdb[%s]: unable to fetch series list: %s", connector.name, err) } for _, series := range seriesList { var seriesName, sourceName, metricName string seriesName = series.GetName() seriesPoints := series.GetPoints() if len(seriesPoints) == 0 { logger.Log(logger.LevelInfo, "connector", "influxdb[%s]: series `%s' does not return sample data, ignoring", connector.name, seriesName, ) continue } for columnIndex, columnName := range series.GetColumns() { if columnName == "time" || columnName == "sequence_number" { continue } else if _, ok := seriesPoints[0][columnIndex].(float64); !ok { continue } seriesMatch, err := matchSeriesPattern(connector.re, seriesName+"."+columnName) if err != nil { logger.Log(logger.LevelInfo, "connector", "influxdb[%s]: series `%s' does not match pattern, ignoring", connector.name, seriesName, ) continue } sourceName, metricName = seriesMatch[0], seriesMatch[1] if _, ok := connector.series[sourceName]; !ok { connector.series[sourceName] = make(map[string][2]string) } connector.series[sourceName][metricName] = [2]string{seriesName, columnName} outputChan <- &catalog.Record{ Origin: originName, Source: sourceName, Metric: metricName, Connector: connector, } } } return nil }
func workerServeRun(w *worker.Worker, args ...interface{}) { var server = w.Props[0].(*Server) defer w.Shutdown() logger.Log(logger.LevelDebug, "serveWorker", "starting") // Prepare router router := NewRouter(server) router.HandleFunc(urlStaticPath, server.serveStatic) router.HandleFunc(urlCatalogPath, server.serveCatalog) router.HandleFunc(urlLibraryPath, server.serveLibrary) router.HandleFunc(urlAdminPath, server.serveAdmin) router.HandleFunc(urlBrowsePath, server.serveBrowse) router.HandleFunc(urlShowPath, server.serveShow) router.HandleFunc(urlReloadPath, server.serveReload) router.HandleFunc(urlStatsPath, server.serveStats) router.HandleFunc("/", server.serveBrowse) http.Handle("/", router) // Start serving HTTP requests listener, err := net.Listen("tcp", server.Config.BindAddr) if err != nil { w.ReturnErr(err) return } logger.Log(logger.LevelInfo, "serveWorker", "listening on %s", server.Config.BindAddr) go http.Serve(listener, nil) for { select { case cmd := <-w.ReceiveJobSignals(): switch cmd { case jobSignalShutdown: logger.Log(logger.LevelInfo, "serveWorker", "received shutdown command, stopping job") listener.Close() logger.Log(logger.LevelInfo, "serveWorker", "server listener closed") w.State = worker.JobStopped return default: logger.Log(logger.LevelInfo, "serveWorker", "received unknown command, ignoring") } } } w.ReturnErr(nil) }
func (library *Library) expandGroup(name string, groupType int, sourceName string) []string { item, err := library.GetItemByName(name, groupType) if err != nil { logger.Log(logger.LevelError, "library", "expand group: unknown group `%s': %s", name, err) return []string{} } // Parse group entries for patterns group := item.(*Group) result := []string{} for _, entry := range group.Entries { subResult := []string{} if groupType == LibraryItemSourceGroup { origin, err := library.Catalog.GetOrigin(entry.Origin) if err != nil { logger.Log(logger.LevelError, "library", "%s", err) continue } for _, source := range origin.GetSources() { if utils.FilterMatch(entry.Pattern, source.Name) { subResult = append(subResult, source.Name) } } } else { source, err := library.Catalog.GetSource(entry.Origin, sourceName) if err != nil { logger.Log(logger.LevelError, "library", "%s", err) continue } for _, metric := range source.GetMetrics() { if utils.FilterMatch(entry.Pattern, metric.Name) { subResult = append(subResult, metric.Name) } } } // Preserve manual ordering if grouped with `Single' matching type if strings.HasPrefix(entry.Pattern, "glob:") || strings.HasPrefix(entry.Pattern, "regexp:") { sort.Strings(subResult) } // Merge all group items subresults result = append(result, subResult...) } return result }
func (server *Server) serveAdmin(writer http.ResponseWriter, request *http.Request) { var err error if request.Method != "GET" && request.Method != "HEAD" { server.serveResponse(writer, nil, http.StatusMethodNotAllowed) return } setHTTPCacheHeaders(writer) if strings.HasPrefix(request.URL.Path, urlAdminPath+"sourcegroups/") || strings.HasPrefix(request.URL.Path, urlAdminPath+"metricgroups/") { err = server.serveAdminGroup(writer, request) } else if strings.HasPrefix(request.URL.Path, urlAdminPath+"graphs/") { err = server.serveAdminGraph(writer, request) } else if strings.HasPrefix(request.URL.Path, urlAdminPath+"collections/") { err = server.serveAdminCollection(writer, request) } else if request.URL.Path == urlAdminPath+"origins/" || request.URL.Path == urlAdminPath+"sources/" || request.URL.Path == urlAdminPath+"metrics/" { err = server.serveAdminCatalog(writer, request) } else if strings.HasPrefix(request.URL.Path, urlAdminPath+"scales/") { err = server.serveAdminScale(writer, request) } else if request.URL.Path == urlAdminPath { err = server.serveAdminIndex(writer, request) } else { err = os.ErrNotExist } if os.IsNotExist(err) { server.serveError(writer, http.StatusNotFound) } else if err != nil { logger.Log(logger.LevelError, "server", "%s", err) server.serveError(writer, http.StatusInternalServerError) } }
func workerProviderRefresh(w *worker.Worker, args ...interface{}) { var prov = w.Props[0].(*provider.Provider) logger.Log(logger.LevelDebug, "provider", "%s: refresh", prov.Name) w.SendJobSignal(jobSignalRefresh) }
// Insert inserts a new record in the catalog. func (catalog *Catalog) Insert(record *CatalogRecord) { logger.Log( logger.LevelDebug, "catalog", "appending metric `%s' to source `%s' via origin `%s'", record.Metric, record.Source, record.Origin, ) if _, ok := catalog.Origins[record.Origin]; !ok { catalog.Origins[record.Origin] = NewOrigin( record.Origin, record.OriginalOrigin, catalog, ) } if _, ok := catalog.Origins[record.Origin].Sources[record.Source]; !ok { catalog.Origins[record.Origin].Sources[record.Source] = NewSource( record.Source, record.OriginalSource, catalog.Origins[record.Origin], ) } if _, ok := catalog.Origins[record.Origin].Sources[record.Source].Metrics[record.Metric]; !ok { catalog.Origins[record.Origin].Sources[record.Source].Metrics[record.Metric] = NewMetric( record.Metric, record.OriginalMetric, catalog.Origins[record.Origin].Sources[record.Source], record.Connector, ) } }
func workerProviderShutdown(w *worker.Worker, args ...interface{}) { var prov = w.Props[0].(*provider.Provider) logger.Log(logger.LevelDebug, "provider", "%s: shutdown", prov.Name) w.SendJobSignal(jobSignalShutdown) }
func compileAggregatorPatterns(aggregators interface{}, connector string) []metricAggregator { var ( re *regexp.Regexp err error ) if aggregators == nil { return nil } list := aggregators.([]interface{}) out := make([]metricAggregator, 0) for _, a := range list { aggregator := a.(map[string]interface{}) if re, err = regexp.Compile(aggregator["metric"].(string)); err != nil { logger.Log(logger.LevelWarning, "connector", "kairosdb[%s]: can't compile `%s', skipping", connector, aggregator["metric"].(string)) continue } out = append(out, metricAggregator{ pattern: aggregator["metric"].(string), re: re, hook: aggregator["aggregator"], }) } return out }
func workerServeShutdown(w *worker.Worker, args ...interface{}) { logger.Log(logger.LevelDebug, "serveWorker", "shutdown") w.SendJobSignal(jobSignalShutdown) w.ReturnErr(nil) }
func (server *Server) serveGraphList(writer http.ResponseWriter, request *http.Request) { var offset, limit int if response, status := server.parseListRequest(writer, request, &offset, &limit); status != http.StatusOK { server.serveResponse(writer, response, status) return } graphSet := set.New(set.ThreadSafe) // Filter on collection if any if request.FormValue("collection") != "" { item, err := server.Library.GetItem(request.FormValue("collection"), library.LibraryItemCollection) if os.IsNotExist(err) { server.serveResponse(writer, serverResponse{mesgResourceNotFound}, http.StatusNotFound) return } else if err != nil { logger.Log(logger.LevelError, "server", "%s", err) server.serveResponse(writer, serverResponse{mesgUnhandledError}, http.StatusInternalServerError) return } collection := item.(*library.Collection) for _, graph := range collection.Entries { graphSet.Add(graph.ID) } } // Fill graphs list items := make(ItemListResponse, 0) for _, graph := range server.Library.Graphs { if !graphSet.IsEmpty() && !graphSet.Has(graph.ID) { continue } if request.FormValue("filter") != "" && !utils.FilterMatch(request.FormValue("filter"), graph.Name) { continue } items = append(items, &ItemResponse{ ID: graph.ID, Name: graph.Name, Description: graph.Description, Modified: graph.Modified.Format(time.RFC3339), }) } response := &listResponse{ list: items, offset: offset, limit: limit, } server.applyResponseLimit(writer, request, response) server.serveResponse(writer, response.list, http.StatusOK) }
func workerProviderRun(w *worker.Worker, args ...interface{}) { var ( prov = w.Props[0].(*provider.Provider) timeTicker *time.Ticker timeChan <-chan time.Time ) defer func() { w.State = worker.JobStopped }() defer w.Shutdown() logger.Log(logger.LevelDebug, "provider", "%s: starting", prov.Name) // If provider `refresh_interval` has been configured, set up a time ticker if prov.Config.RefreshInterval > 0 { timeTicker = time.NewTicker(time.Duration(prov.Config.RefreshInterval) * time.Second) timeChan = timeTicker.C } for { select { case _ = <-timeChan: logger.Log(logger.LevelDebug, "provider", "%s: performing refresh from connector", prov.Name) if err := prov.Connector.Refresh(prov.Name, prov.Filters.Input); err != nil { logger.Log(logger.LevelError, "provider", "%s: unable to refresh: %s", prov.Name, err) continue } prov.LastRefresh = time.Now() case cmd := <-w.ReceiveJobSignals(): switch cmd { case jobSignalRefresh: logger.Log(logger.LevelInfo, "provider", "%s: received refresh command", prov.Name) if err := prov.Connector.Refresh(prov.Name, prov.Filters.Input); err != nil { logger.Log(logger.LevelError, "provider", "%s: unable to refresh: %s", prov.Name, err) continue } prov.LastRefresh = time.Now() case jobSignalShutdown: logger.Log(logger.LevelInfo, "provider", "%s: received shutdown command, stopping job", prov.Name) w.State = worker.JobStopped if timeTicker != nil { // Stop refresh time ticker timeTicker.Stop() } return default: logger.Log(logger.LevelNotice, "provider", "%s: received unknown command, ignoring", prov.Name) } } } }
// Reload reloads the configuration and refreshes both catalog and library. func (server *Server) Reload(config bool) error { logger.Log(logger.LevelNotice, "server", "reloading") server.loading = true if config { if err := server.Config.Reload(); err != nil { logger.Log(logger.LevelError, "server", "unable to reload configuration: %s", err) return err } } server.providerWorkers.Broadcast(eventCatalogRefresh, nil) server.Library.Refresh() server.loading = false return nil }
// GetPlots retrieves time series data from provider based on a query and a time interval. func (c *KairosdbConnector) GetPlots(query *plot.Query) ([]*plot.Series, error) { var ( jsonResponse map[string][]metricQueryResponse results []*plot.Series ) if len(query.Series) == 0 { return nil, fmt.Errorf("kairosdb[%s]: requested series list is empty", c.name) } jsonQuery, err := kairosdbBuildJSONQuery(query, c.series) if err != nil { return nil, fmt.Errorf("kairosdb[%s]: unable to build or marshal JSON query: %s", c.name, err) } client := utils.NewHTTPClient(c.timeout, c.insecureTLS) logger.Log(logger.LevelDebug, "connector", "kairosdb[%s]: API Call to %s: %s", c.name, strings.TrimSuffix(c.url, "/")+kairosdbURLQueryMetric, string(jsonQuery)) r, err := http.NewRequest("POST", strings.TrimSuffix(c.url, "/")+kairosdbURLQueryMetric, bytes.NewBuffer(jsonQuery)) if err != nil { return nil, fmt.Errorf("kairosdb[%s]: unable to set up HTTP request: %s", c.name, err) } r.Header.Add("User-Agent", "Facette") r.Header.Add("X-Requested-With", "KairosDBConnector") r.Header.Set("Content-Type", "application/json") rsp, err := client.Do(r) if err != nil { return nil, fmt.Errorf("kairosdb[%s]: unable to perform HTTP request: %s", c.name, err) } defer rsp.Body.Close() if err = kairosdbCheckBackendResponse(rsp); err != nil { return nil, fmt.Errorf("kairosdb[%s]: invalid HTTP backend response: %s", c.name, err) } data, err := ioutil.ReadAll(rsp.Body) if err != nil { return nil, fmt.Errorf("kairosdb[%s]: unable to read HTTP response body: %s", c.name, err) } if err = json.Unmarshal(data, &jsonResponse); err != nil { return nil, fmt.Errorf("kairosdb[%s]: unable to unmarshal JSON data: %s", c.name, err) } if results, err = kairosdbExtractPlots(query, c.series, jsonResponse["queries"]); err != nil { return nil, fmt.Errorf("kairosdb[%s]: unable to extract plot values from backend response: %s", c.name, err) } return results, nil }
func workerCatalogInit(w *worker.Worker, args ...interface{}) { var catalog = args[0].(*catalog.Catalog) logger.Log(logger.LevelDebug, "catalogWorker", "init") // Worker properties: // 0: catalog instance (*catalog.Catalog) w.Props = append(w.Props, catalog) w.ReturnErr(nil) }
func workerServeInit(w *worker.Worker, args ...interface{}) { var server = args[0].(*Server) logger.Log(logger.LevelDebug, "serveWorker", "init") // Worker properties: // 0: server instance (*Server) w.Props = append(w.Props, server) w.ReturnErr(nil) }
func (server *Server) serveError(writer http.ResponseWriter, status int) { err := server.execTemplate( writer, status, struct { URLPrefix string Status int }{ URLPrefix: server.Config.URLPrefix, Status: status, }, path.Join(server.Config.BaseDir, "template", "layout.html"), path.Join(server.Config.BaseDir, "template", "error.html"), ) if err != nil { logger.Log(logger.LevelError, "server", "%s", err) server.serveResponse(writer, nil, status) } }
func (server *Server) serveWait(writer http.ResponseWriter, request *http.Request) { err := server.execTemplate( writer, http.StatusServiceUnavailable, struct { URLPrefix string }{ URLPrefix: server.Config.URLPrefix, }, path.Join(server.Config.BaseDir, "template", "layout.html"), path.Join(server.Config.BaseDir, "template", "wait.html"), ) if err != nil { if os.IsNotExist(err) { server.serveError(writer, http.StatusNotFound) } else { logger.Log(logger.LevelError, "server", "%s", err) server.serveError(writer, http.StatusInternalServerError) } } }
// Insert inserts a new record in the catalog. func (c *Catalog) Insert(record *Record) { c.Lock() defer c.Unlock() logger.Log( logger.LevelDebug, "catalog", "appending metric `%s' to source `%s' via origin `%s'", record.Metric, record.Source, record.Origin, ) if _, ok := c.origins[record.Origin]; !ok { c.origins[record.Origin] = NewOrigin( record.Origin, record.OriginalOrigin, c, ) } if _, ok := c.origins[record.Origin].sources[record.Source]; !ok { c.origins[record.Origin].sources[record.Source] = NewSource( record.Source, record.OriginalSource, c.origins[record.Origin], ) } if _, ok := c.origins[record.Origin].sources[record.Source].metrics[record.Metric]; !ok { c.origins[record.Origin].sources[record.Source].metrics[record.Metric] = NewMetric( record.Metric, record.OriginalMetric, c.origins[record.Origin].sources[record.Source], record.Connector, ) } }
func executeQueries(queries map[string]*providerQuery) (map[string][]plot.Series, error) { plotSeries := make(map[string][]plot.Series) for _, providerQuery := range queries { plots, err := providerQuery.connector.GetPlots(&providerQuery.query) if err != nil { logger.Log(logger.LevelError, "server", "%s", err) continue } // Re-arrange internal plot results according to original queries for plotsIndex, plotsItem := range plots { // Add metric name detail to series name is a source/metric group if providerQuery.queryMap[plotsIndex].fromSourceGroup || strings.HasPrefix(providerQuery.queryMap[plotsIndex].seriesName, library.LibraryGroupPrefix) { plotsItem.Name = fmt.Sprintf( "%s (%s)", providerQuery.queryMap[plotsIndex].sourceName, providerQuery.queryMap[plotsIndex].metricName, ) } else { plotsItem.Name = providerQuery.queryMap[plotsIndex].seriesName } if _, ok := plotSeries[providerQuery.queryMap[plotsIndex].seriesName]; !ok { plotSeries[providerQuery.queryMap[plotsIndex].seriesName] = make([]plot.Series, 0) } plotSeries[providerQuery.queryMap[plotsIndex].seriesName] = append( plotSeries[providerQuery.queryMap[plotsIndex].seriesName], *plotsItem, ) } } return plotSeries, nil }
func (server *Server) serveShow(writer http.ResponseWriter, request *http.Request) { var err error if request.Method != "GET" && request.Method != "HEAD" { server.serveResponse(writer, nil, http.StatusMethodNotAllowed) return } setHTTPCacheHeaders(writer) if strings.HasPrefix(request.URL.Path, urlShowPath+"graphs/") { err = server.serveShowGraph(writer, request) } else { err = os.ErrNotExist } if os.IsNotExist(err) { server.serveError(writer, http.StatusNotFound) } else if err != nil { logger.Log(logger.LevelError, "server", "%s", err) server.serveError(writer, http.StatusInternalServerError) } }
func workerProviderInit(w *worker.Worker, args ...interface{}) { var ( prov = args[0].(*provider.Provider) connectorType = args[1].(string) ) logger.Log(logger.LevelDebug, "provider", "%s: init", prov.Name) // Instanciate the connector according to its type conn, err := connector.Connectors[connectorType](prov.Name, prov.Config.Connector) if err != nil { w.ReturnErr(err) return } prov.Connector = conn.(connector.Connector) // Worker properties: // 0: provider instance (*provider.Provider) w.Props = append(w.Props, prov) w.ReturnErr(nil) }
func (server *Server) serveBrowse(writer http.ResponseWriter, request *http.Request) { var err error if request.Method != "GET" && request.Method != "HEAD" { server.serveResponse(writer, nil, http.StatusMethodNotAllowed) return } // Redirect to default location if request.URL.Path == "/" { http.Redirect(writer, request, server.Config.URLPrefix+urlBrowsePath, 301) return } setHTTPCacheHeaders(writer) if strings.HasPrefix(request.URL.Path, urlBrowsePath+"collections/") { err = server.serveBrowseCollection(writer, request) } else if strings.HasPrefix(request.URL.Path, urlBrowsePath+"graphs/") { err = server.serveBrowseGraph(writer, request) } else if request.URL.Path == urlBrowsePath+"search" || request.URL.Path == urlBrowsePath+"opensearch.xml" { err = server.serveBrowseSearch(writer, request) } else if request.URL.Path == urlBrowsePath { err = server.serveBrowseIndex(writer, request) } else { err = os.ErrNotExist } if os.IsNotExist(err) { server.serveError(writer, http.StatusNotFound) } else if err != nil { logger.Log(logger.LevelError, "server", "%s", err) server.serveError(writer, http.StatusInternalServerError) } }
func walkDir(dirPath string, linkPath string, walkFunc filepath.WalkFunc) error { if _, err := os.Stat(dirPath); err != nil { return err } // Search for files recursively return filepath.Walk(dirPath, func(filePath string, fileInfo os.FileInfo, err error) error { mode := fileInfo.Mode() & os.ModeType if mode == os.ModeSymlink { realPath, err := filepath.EvalSymlinks(filePath) if err != nil { logger.Log(logger.LevelWarning, "utils", "failed to resolve symlink %q: %v", filePath, err) return nil } return walkDir(realPath, filePath, walkFunc) } else if linkPath != "" { return walkFunc(linkPath+filePath[len(dirPath):], fileInfo, err) } else { return walkFunc(filePath, fileInfo, err) } }) }
// WriteHeader sends an HTTP response header with along with its status code. func (writer ResponseWriter) WriteHeader(status int) { writer.ResponseWriter.WriteHeader(status) logger.Log(logger.LevelDebug, "serveWorker", "\"%s %s %s\" %d", writer.request.Method, writer.request.URL, writer.request.Proto, status) }
// Run starts the server serving the HTTP responses. func (server *Server) Run() error { server.startTime = time.Now() // Set up server logging if server.Config.LogFile != "" && server.Config.LogFile != "-" { dirPath, _ := path.Split(server.Config.LogFile) os.MkdirAll(dirPath, 0755) serverOutput, err := os.OpenFile(server.Config.LogFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { logger.Log(logger.LevelError, "server", "unable to open log file: %s", err) return err } defer serverOutput.Close() logger.SetOutput(serverOutput) } logger.SetLevel(server.logLevel) // Load server configuration if err := server.Config.Reload(); err != nil { logger.Log(logger.LevelError, "server", "unable to load configuration: %s", err) return err } // Handle pid file creation if set if server.Config.PidFile != "" { fd, err := os.OpenFile(server.Config.PidFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) if err != nil { return fmt.Errorf("unable to create pid file `%s'", server.Config.PidFile) } defer fd.Close() fd.Write([]byte(strconv.Itoa(os.Getpid()) + "\n")) } server.wg.Add(1) // Create new catalog instance server.Catalog = catalog.NewCatalog() // Instanciate catalog worker server.catalogWorker = worker.NewWorker() server.catalogWorker.RegisterEvent(eventInit, workerCatalogInit) server.catalogWorker.RegisterEvent(eventShutdown, workerCatalogShutdown) server.catalogWorker.RegisterEvent(eventRun, workerCatalogRun) if err := server.catalogWorker.SendEvent(eventInit, false, server.Catalog); err != nil { return err } server.catalogWorker.SendEvent(eventRun, true, nil) // Instanciate providers for providerName, providerConfig := range server.Config.Providers { server.providers[providerName] = provider.NewProvider(providerName, providerConfig, server.Catalog) } if err := server.startProviderWorkers(); err != nil { return err } // Send initial catalog refresh event to provider workers server.providerWorkers.Broadcast(eventCatalogRefresh, nil) // Create library instance server.Library = library.NewLibrary(server.Config, server.Catalog) go server.Library.Refresh() // Instanciate serve worker server.serveWorker = worker.NewWorker() server.serveWorker.RegisterEvent(eventInit, workerServeInit) server.serveWorker.RegisterEvent(eventShutdown, workerServeShutdown) server.serveWorker.RegisterEvent(eventRun, workerServeRun) if err := server.serveWorker.SendEvent(eventInit, false, server); err != nil { return err } else if err := server.serveWorker.SendEvent(eventRun, false, nil); err != nil { return err } server.wg.Wait() return nil }
// Refresh triggers a full connector data update. func (c *RRDConnector) Refresh(originName string, outputChan chan<- *catalog.Record) error { // Search for files and parse their path for source/metric pairs walkFunc := func(filePath string, fileInfo os.FileInfo, err error) error { var sourceName, metricName string // Report errors if err != nil { logger.Log(logger.LevelWarning, "connector", "rrd[%s]: error while walking: %s", c.name, err) return nil } // Skip non-files mode := fileInfo.Mode() & os.ModeType if mode != 0 { return nil } // Get pattern matches m, err := matchSeriesPattern(c.re, strings.TrimPrefix(filePath, c.path+"/")) if err != nil { logger.Log(logger.LevelInfo, "connector", "rrd[%s]: file `%s' does not match pattern, ignoring", c.name, filePath) return nil } sourceName, metricName = m[0], m[1] if _, ok := c.metrics[sourceName]; !ok { c.metrics[sourceName] = make(map[string]*rrdMetric) } // Extract metric information from .rrd file info, err := rrd.Info(filePath) if err != nil { logger.Log(logger.LevelWarning, "connector", "rrd[%s]: %s", c.name, err) return nil } // Extract consolidation functions list cfSet := set.New(set.ThreadSafe) if cf, ok := info["rra.cf"].([]interface{}); ok { for _, entry := range cf { if name, ok := entry.(string); ok { cfSet.Add(name) } } } cfList := set.StringSlice(cfSet) if _, ok := info["ds.index"]; ok { indexes, ok := info["ds.index"].(map[string]interface{}) if !ok { return nil } for dsName := range indexes { for _, cfName := range cfList { metricFullName := metricName + "/" + dsName + "/" + strings.ToLower(cfName) c.metrics[sourceName][metricFullName] = &rrdMetric{ Dataset: dsName, FilePath: filePath, Step: time.Duration(info["step"].(uint)) * time.Second, Cf: cfName, } outputChan <- &catalog.Record{ Origin: originName, Source: sourceName, Metric: metricFullName, Connector: c, } } } } return nil } if err := utils.WalkDir(c.path, walkFunc); err != nil { return err } return nil }
func (server *Server) serveGroupExpand(writer http.ResponseWriter, request *http.Request) { if request.Method != "POST" { server.serveResponse(writer, serverResponse{mesgMethodNotAllowed}, http.StatusMethodNotAllowed) return } body, _ := ioutil.ReadAll(request.Body) query := ExpandRequest{} if err := json.Unmarshal(body, &query); err != nil { logger.Log(logger.LevelError, "server", "%s", err) server.serveResponse(writer, serverResponse{mesgResourceInvalid}, http.StatusBadRequest) return } response := make([]ExpandRequest, 0) for _, entry := range query { item := ExpandRequest{} if _, ok := server.Catalog.Origins[entry[0]]; !ok { continue } if strings.HasPrefix(entry[1], library.LibraryGroupPrefix) { for _, sourceName := range server.Library.ExpandGroup( strings.TrimPrefix(entry[1], library.LibraryGroupPrefix), library.LibraryItemSourceGroup, ) { if _, ok := server.Catalog.Origins[entry[0]].Sources[sourceName]; !ok { continue } if strings.HasPrefix(entry[2], library.LibraryGroupPrefix) { for _, metricName := range server.Library.ExpandGroup( strings.TrimPrefix(entry[2], library.LibraryGroupPrefix), library.LibraryItemMetricGroup, ) { if _, ok := server.Catalog.Origins[entry[0]].Sources[sourceName].Metrics[metricName]; !ok { continue } item = append(item, [3]string{entry[0], sourceName, metricName}) } } else { if _, ok := server.Catalog.Origins[entry[0]].Sources[sourceName].Metrics[entry[2]]; !ok { continue } item = append(item, [3]string{entry[0], sourceName, entry[2]}) } } } else if strings.HasPrefix(entry[2], library.LibraryGroupPrefix) { for _, metricName := range server.Library.ExpandGroup( strings.TrimPrefix(entry[2], library.LibraryGroupPrefix), library.LibraryItemMetricGroup, ) { if _, ok := server.Catalog.Origins[entry[0]].Sources[entry[1]].Metrics[metricName]; !ok { continue } item = append(item, [3]string{entry[0], entry[1], metricName}) } } else { item = append(item, entry) } sort.Sort(item) response = append(response, item) } server.serveResponse(writer, response, http.StatusOK) }