func loadLdapConfig() { if !setting.LdapEnabled { return } log.Info("Login: Ldap enabled, reading config file: %s", setting.LdapConfigFile) _, err := toml.DecodeFile(setting.LdapConfigFile, &ldapCfg) if err != nil { log.Fatal(3, "Failed to load ldap config file: %s", err) } if len(ldapCfg.Servers) == 0 { log.Fatal(3, "ldap enabled but no ldap servers defined in config file: %s", setting.LdapConfigFile) } // set default org id for _, server := range ldapCfg.Servers { assertNotEmptyCfg(server.SearchFilter, "search_filter") assertNotEmptyCfg(server.SearchBaseDNs, "search_base_dns") for _, groupMap := range server.LdapGroups { if groupMap.OrgId == 0 { groupMap.OrgId = 1 } } } }
func Construct() { cache, err := lru.New(setting.ExecutorLRUSize) if err != nil { panic(fmt.Sprintf("Can't create LRU: %s", err.Error())) } if setting.AlertingHandler != "amqp" && setting.AlertingHandler != "builtin" { log.Fatal(0, "alerting handler must be either 'builtin' or 'amqp'") } if setting.AlertingHandler == "amqp" { sec := setting.Cfg.Section("event_publisher") if !sec.Key("enabled").MustBool(false) { log.Fatal(0, "alerting handler 'amqp' requires the event_publisher to be enabled") } url := sec.Key("rabbitmq_url").String() if err := distributed(url, cache); err != nil { log.Fatal(0, "failed to start amqp consumer.", err) } return } else { if !setting.EnableScheduler { log.Fatal(0, "Alerting in standalone mode requires a scheduler (enable_scheduler = true)") } if setting.Executors == 0 { log.Fatal(0, "Alerting in standalone mode requires at least 1 executor (try: executors = 10)") } standalone(cache) } }
func Publish(event *schema.ProbeEvent) error { if !enabled { return nil } version := uint8(msgFormatJson) buf := new(bytes.Buffer) err := binary.Write(buf, binary.LittleEndian, version) if err != nil { log.Fatal(0, "binary.Write failed: %s", err.Error()) } id := time.Now().UnixNano() binary.Write(buf, binary.BigEndian, id) if err != nil { log.Fatal(0, "binary.Write failed: %s", err.Error()) } msg, err := json.Marshal(event) if err != nil { return fmt.Errorf("Failed to marshal event payload: %s", err) } _, err = buf.Write(msg) if err != nil { log.Fatal(0, "buf.Write failed: %s", err.Error()) } collectorEventPublisherMsgs.Inc(1) err = globalProducer.Publish(topic, buf.Bytes()) if err != nil { panic(fmt.Errorf("can't publish to nsqd: %s", err)) } log.Info("event published to NSQ %d", id) //globalProducer.Stop() return nil }
func Init(metrics met.Backend) { sec := setting.Cfg.Section("metric_publisher") if !sec.Key("enabled").MustBool(false) { return } addr := sec.Key("nsqd_addr").MustString("localhost:4150") topic = sec.Key("topic").MustString("metrics") cfg := nsq.NewConfig() cfg.UserAgent = fmt.Sprintf("probe-ctrl") var err error globalProducer, err = nsq.NewProducer(addr, cfg) if err != nil { log.Fatal(0, "failed to initialize nsq producer.", err) } err = globalProducer.Ping() if err != nil { log.Fatal(0, "can't connect to nsqd: %s", err) } metricsPublished = metrics.NewCount("metricpublisher.metrics-published") messagesPublished = metrics.NewCount("metricpublisher.messages-published") messagesSize = metrics.NewMeter("metricpublisher.message_size", 0) metricsPerMessage = metrics.NewMeter("metricpublisher.metrics_per_message", 0) publishDuration = metrics.NewTimer("metricpublisher.publish_duration", 0) }
func NewEngine() { x, err := getEngine() if err != nil { log.Fatal(3, "Sqlstore: Fail to connect to database: %v", err) } err = SetEngine(x, true) if err != nil { log.Fatal(3, "fail to initialize orm engine: %v", err) } }
func assertNotEmptyCfg(val interface{}, propName string) { switch v := val.(type) { case string: if v == "" { log.Fatal(3, "LDAP config file is missing option: %s", propName) } case []string: if len(v) == 0 { log.Fatal(3, "LDAP config file is missing option: %s", propName) } default: fmt.Println("unknown") } }
func SocketIO(c *middleware.Context) { if server == nil { log.Fatal(4, "socket.io server not initialized.", nil) } server.ServeHTTP(c.Resp, c.Req.Request) }
func main() { buildstampInt64, _ := strconv.ParseInt(buildstamp, 10, 64) setting.BuildVersion = version setting.BuildCommit = commit setting.BuildStamp = buildstampInt64 go listenToSystemSignels() flag.Parse() writePIDFile() initRuntime() search.Init() login.Init() social.NewOAuthService() eventpublisher.Init() plugins.Init() if err := notifications.Init(); err != nil { log.Fatal(3, "Notification service failed to initialize", err) } if setting.ReportingEnabled { go metrics.StartUsageReportLoop() } cmd.StartServer() exitChan <- 0 }
func loadSpecifedConfigFile(configFile string) { if configFile == "" { configFile = filepath.Join(HomePath, "conf/custom.ini") // return without error if custom file does not exist if !pathExists(configFile) { return } } userConfig, err := ini.Load(configFile) userConfig.BlockMode = false if err != nil { log.Fatal(3, "Failed to parse %v, %v", configFile, err) } for _, section := range userConfig.Sections() { for _, key := range section.Keys() { if key.Value() == "" { continue } defaultSec, err := Cfg.GetSection(section.Name()) if err != nil { defaultSec, _ = Cfg.NewSection(section.Name()) } defaultKey, err := defaultSec.GetKey(key.Name()) if err != nil { defaultKey, _ = defaultSec.NewKey(key.Name(), key.Value()) } defaultKey.SetValue(key.Value()) } } configFiles = append(configFiles, configFile) }
func EnsureAdminUser() { statsQuery := m.GetSystemStatsQuery{} if err := bus.Dispatch(&statsQuery); err != nil { log.Fatal(3, "Could not determine if admin user exists: %v", err) return } if statsQuery.Result.UserCount > 0 { return } cmd := m.CreateUserCommand{} cmd.Login = setting.AdminUser cmd.Email = setting.AdminUser + "@localhost" cmd.Password = setting.AdminPassword cmd.IsAdmin = true if err := bus.Dispatch(&cmd); err != nil { log.Error(3, "Failed to create default admin user", err) return } log.Info("Created default admin user: %v", setting.AdminUser) }
func InitCollectorController(metrics met.Backend) { sec := setting.Cfg.Section("event_publisher") cmd := &m.ClearCollectorSessionCommand{ InstanceId: setting.InstanceId, } if err := bus.Dispatch(cmd); err != nil { log.Fatal(0, "failed to clear collectorSessions", err) } if sec.Key("enabled").MustBool(false) { url := sec.Key("rabbitmq_url").String() exchange := sec.Key("exchange").String() exch := rabbitmq.Exchange{ Name: exchange, ExchangeType: "topic", Durable: true, } q := rabbitmq.Queue{ Name: "", Durable: false, AutoDelete: true, Exclusive: true, } consumer := rabbitmq.Consumer{ Url: url, Exchange: &exch, Queue: &q, BindingKey: []string{"INFO.monitor.*", "INFO.collector.*"}, } err := consumer.Connect() if err != nil { log.Fatal(0, "failed to start event.consumer.", err) } consumer.Consume(eventConsumer) } else { //tap into the update/add/Delete events emitted when monitors are modified. bus.AddEventListener(EmitUpdateMonitor) bus.AddEventListener(EmitAddMonitor) bus.AddEventListener(EmitDeleteMonitor) bus.AddEventListener(HandleCollectorConnected) bus.AddEventListener(HandleCollectorDisconnected) } metricsRecvd = metrics.NewCount("collector-ctrl.metrics-recv") bufCh = make(chan m.MetricDefinition, runtime.NumCPU()*100) go metricpublisher.ProcessBuffer(bufCh) }
func writePIDFile() { if *pidFile == "" { return } // Ensure the required directory structure exists. err := os.MkdirAll(filepath.Dir(*pidFile), 0700) if err != nil { log.Fatal(3, "Failed to verify pid directory", err) } // Retrieve the PID and write it. pid := strconv.Itoa(os.Getpid()) if err := ioutil.WriteFile(*pidFile, []byte(pid), 0644); err != nil { log.Fatal(3, "Failed to write pidfile", err) } }
func getLogLevel(key string, defaultName string) (string, int) { levelName := Cfg.Section(key).Key("level").In(defaultName, []string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"}) level, ok := logLevels[levelName] if !ok { log.Fatal(4, "Unknown log level: %s", levelName) } return levelName, level }
func Publish(metrics []*schema.MetricData) error { if globalProducer == nil { return nil } if len(metrics) == 0 { return nil } // typical metrics seem to be around 300B // nsqd allows <= 10MiB messages. // we ideally have 64kB ~ 1MiB messages (see benchmark https://gist.github.com/Dieterbe/604232d35494eae73f15) // at 300B, about 3500 msg fit in 1MiB // in worst case, this allows messages up to 2871B // this could be made more robust of course // real world findings in dev-stack with env-load: // 159569B msg /795 metrics per msg = 200B per msg // so peak message size is about 3500*200 = 700k (seen 711k) subslices := Reslice(metrics, 3500) for _, subslice := range subslices { id := time.Now().UnixNano() data, err := msg.CreateMsg(subslice, id, msg.FormatMetricDataArrayMsgp) if err != nil { log.Fatal(0, "Fatal error creating metric message: %s", err) } metricsPublished.Inc(int64(len(subslice))) messagesPublished.Inc(1) messagesSize.Value(int64(len(data))) metricsPerMessage.Value(int64(len(subslice))) pre := time.Now() err = globalProducer.Publish(topic, data) publishDuration.Value(time.Since(pre)) if err != nil { log.Fatal(0, "can't publish to nsqd: %s", err) } log.Info("published metrics %d size=%d", id, len(data)) } //globalProducer.Stop() return nil }
func main() { buildstampInt64, _ := strconv.ParseInt(buildstamp, 10, 64) setting.BuildVersion = version setting.BuildCommit = commit setting.BuildStamp = buildstampInt64 go listenToSystemSignels() flag.Parse() writePIDFile() initRuntime() if setting.ProfileHeapMB > 0 { errors := make(chan error) go func() { for e := range errors { log.Error(0, e.Error()) } }() heap, _ := heap.New(setting.ProfileHeapDir, setting.ProfileHeapMB*1000000, setting.ProfileHeapWait, time.Duration(1)*time.Second, errors) go heap.Run() } search.Init() login.Init() social.NewOAuthService() eventpublisher.Init() plugins.Init() elasticstore.Init() metricsBackend, err := helper.New(setting.StatsdEnabled, setting.StatsdAddr, setting.StatsdType, "grafana", setting.InstanceId) if err != nil { log.Error(3, "Statsd client:", err) } metricpublisher.Init(metricsBackend) collectoreventpublisher.Init(metricsBackend) api.InitCollectorController(metricsBackend) if setting.AlertingEnabled { alerting.Init(metricsBackend) alerting.Construct() } if err := notifications.Init(); err != nil { log.Fatal(3, "Notification service failed to initialize", err) } if setting.ReportingEnabled { go metrics.StartUsageReportLoop() } cmd.StartServer() exitChan <- 0 }
func getEnv(s cfenv.Service, key string) string { data := s.Credentials[key] if str, ok := data.(string); ok { return str } else if f, ok := data.(float64); ok { return strconv.Itoa(int(f)) } else { log.Fatal(3, "Failed to get env", key, ok) return "" } }
func StartServer() { var err error m := newMacaron() api.Register(m) listenAddr := fmt.Sprintf("%s:%s", setting.HttpAddr, setting.HttpPort) log.Info("Listen: %v://%s%s", setting.Protocol, listenAddr, setting.AppSubUrl) switch setting.Protocol { case setting.HTTP: err = http.ListenAndServe(listenAddr, m) case setting.HTTPS: err = http.ListenAndServeTLS(listenAddr, setting.CertFile, setting.KeyFile, m) default: log.Fatal(4, "Invalid protocol: %s", setting.Protocol) } if err != nil { log.Fatal(4, "Fail to start server: %v", err) } }
func loadConfiguration(args *CommandLineArgs) { var err error // load config defaults defaultConfigFile := path.Join(HomePath, "conf/defaults.ini") configFiles = append(configFiles, defaultConfigFile) Cfg, err = ini.Load(defaultConfigFile) Cfg.BlockMode = false if err != nil { log.Fatal(3, "Failed to parse defaults.ini, %v", err) } // command line props commandLineProps := getCommandLineProperties(args.Args) // load default overrides applyCommandLineDefaultProperties(commandLineProps) // load specified config file err = loadSpecifedConfigFile(args.Config) if err != nil { initLogging() log.Fatal(3, err.Error()) } // apply environment overrides applyEnvVariableOverrides() // apply command line overrides applyCommandLineProperties(commandLineProps) // evaluate config values containing environment variables evalConfigValues() // update data path and logging config DataPath = makeAbsolute(Cfg.Section("paths").Key("data").String(), HomePath) initLogging() }
func Decrypt(payload []byte, secret string) []byte { salt := payload[:saltLength] key := encryptionKeyToBytes(secret, string(salt)) block, err := aes.NewCipher(key) if err != nil { log.Fatal(4, err.Error()) } // The IV needs to be unique, but not secure. Therefore it's common to // include it at the beginning of the ciphertext. if len(payload) < aes.BlockSize { log.Fatal(4, "payload too short") } iv := payload[saltLength : saltLength+aes.BlockSize] payload = payload[saltLength+aes.BlockSize:] stream := cipher.NewCFBDecrypter(block, iv) // XORKeyStream can work in-place if the two arguments are the same. stream.XORKeyStream(payload, payload) return payload }
func Encrypt(payload []byte, secret string) []byte { salt := GetRandomString(saltLength) key := encryptionKeyToBytes(secret, salt) block, err := aes.NewCipher(key) if err != nil { log.Fatal(4, err.Error()) } // The IV needs to be unique, but not secure. Therefore it's common to // include it at the beginning of the ciphertext. ciphertext := make([]byte, saltLength+aes.BlockSize+len(payload)) copy(ciphertext[:saltLength], []byte(salt)) iv := ciphertext[saltLength : saltLength+aes.BlockSize] if _, err := io.ReadFull(rand.Reader, iv); err != nil { log.Fatal(4, err.Error()) } stream := cipher.NewCFBEncrypter(block, iv) stream.XORKeyStream(ciphertext[saltLength+aes.BlockSize:], payload) return ciphertext }
func NewAggMetrics(chunkSpan, numChunks, chunkMaxStale, metricMaxStale uint32, aggSettings []aggSetting) *AggMetrics { ms := AggMetrics{ Metrics: make(map[string]*AggMetric), chunkSpan: chunkSpan, numChunks: numChunks, aggSettings: aggSettings, chunkMaxStale: chunkMaxStale, metricMaxStale: metricMaxStale, } // open data file dataFile, err := os.Open(*dumpFile) if err == nil { log.Info("loading aggMetrics from file " + *dumpFile) dataDecoder := gob.NewDecoder(dataFile) err = dataDecoder.Decode(&ms) if err != nil { log.Error(3, "failed to load aggMetrics from file. %s", err) } dataFile.Close() log.Info("aggMetrics loaded from file.") if ms.numChunks != numChunks { if ms.numChunks > numChunks { log.Fatal(3, "numChunks can not be decreased.") } log.Info("numChunks has changed. Updating memory structures.") sem := make(chan bool, *concurrency) for _, m := range ms.Metrics { sem <- true go func() { m.GrowNumChunks(numChunks) <-sem }() } for i := 0; i < cap(sem); i++ { sem <- true } ms.numChunks = numChunks log.Info("memory structures updated.") } } else { log.Info("starting with fresh aggmetrics.") } go ms.stats() go ms.GC() return &ms }
func parseAppUrlAndSubUrl(section *ini.Section) (string, string) { appUrl := section.Key("root_url").MustString("http://localhost:3000/") if appUrl[len(appUrl)-1] != '/' { appUrl += "/" } // Check if has app suburl. url, err := url.Parse(appUrl) if err != nil { log.Fatal(4, "Invalid root_url(%s): %s", appUrl, err) } appSubUrl := strings.TrimSuffix(url.Path, "/") return appUrl, appSubUrl }
func NewMetricMeta(name string, tagStrings []string) *MetricMeta { if len(tagStrings)%2 != 0 { log.Fatal(3, "Metrics: tags array is missing value for key, %v", tagStrings) } tags := make(map[string]string) for i := 0; i < len(tagStrings); i += 2 { tags[tagStrings[i]] = tagStrings[i+1] } return &MetricMeta{ tags: tags, name: name, } }
func loadConfiguration(args *CommandLineArgs) { var err error // load config defaults defaultConfigFile := path.Join(HomePath, "conf/defaults.ini") configFiles = append(configFiles, defaultConfigFile) // check if config file exists if _, err := os.Stat(defaultConfigFile); os.IsNotExist(err) { fmt.Println("Grafana-server Init Failed: Could not find config defaults, make sure homepath command line parameter is set or working directory is homepath") os.Exit(1) } // load defaults Cfg, err = ini.Load(defaultConfigFile) if err != nil { fmt.Println(fmt.Sprintf("Failed to parse defaults.ini, %v", err)) os.Exit(1) return } Cfg.BlockMode = false // command line props commandLineProps := getCommandLineProperties(args.Args) // load default overrides applyCommandLineDefaultProperties(commandLineProps) // load specified config file err = loadSpecifedConfigFile(args.Config) if err != nil { initLogging() log.Fatal(3, err.Error()) } // apply environment overrides applyEnvVariableOverrides() // apply command line overrides applyCommandLineProperties(commandLineProps) // evaluate config values containing environment variables evalConfigValues() // update data path and logging config DataPath = makeAbsolute(Cfg.Section("paths").Key("data").String(), HomePath) initLogging() }
func init() { contextCache = NewContextCache() var err error server, err = socketio.NewServer([]string{"polling", "websocket"}) if err != nil { log.Fatal(4, "failed to initialize socketio.", err) return } server.On("connection", func(so socketio.Socket) { c, err := register(so) if err != nil { if err == m.ErrInvalidApiKey { log.Info("collector failed to authenticate.") } else if err.Error() == "invalid collector version. Please upgrade." { log.Info("collector is wrong version") } else { log.Error(0, "Failed to initialize collector.", err) } so.Emit("error", err.Error()) return } log.Info("connection registered without error") //get list of monitorTypes cmd := &m.GetMonitorTypesQuery{} if err := bus.Dispatch(cmd); err != nil { log.Error(0, "Failed to initialize collector.", err) so.Emit("error", err) return } log.Info("sending ready event to collector %s", c.Collector.Name) readyPayload := map[string]interface{}{ "collector": c.Collector, "monitor_types": cmd.Result, "socket_id": c.SocketId, } c.Socket.Emit("ready", readyPayload) log.Info("binding event handlers for collector %s owned by OrgId: %d", c.Collector.Name, c.OrgId) c.Socket.On("event", c.OnEvent) c.Socket.On("results", c.OnResults) c.Socket.On("disconnection", c.OnDisconnection) log.Info("calling refresh for collector %s owned by OrgId: %d", c.Collector.Name, c.OrgId) }) server.On("error", func(so socketio.Socket, err error) { log.Error(0, "socket emitted error", err) }) }
func initRuntime() { err := setting.NewConfigContext(&setting.CommandLineArgs{ Config: *configFile, HomePath: *homePath, Args: flag.Args(), }) if err != nil { log.Fatal(3, err.Error()) } logger := log.New("main") logger.Info("Starting Grafana", "version", version, "commit", commit, "compiled", time.Unix(setting.BuildStamp, 0)) setting.LogConfigurationInfo() }
func initRuntime() { err := setting.NewConfigContext(&setting.CommandLineArgs{ Config: *configFile, HomePath: *homePath, Args: flag.Args(), }) if err != nil { log.Fatal(3, err.Error()) } log.Info("Starting Grafana") log.Info("Version: %v, Commit: %v, Build date: %v", setting.BuildVersion, setting.BuildCommit, time.Unix(setting.BuildStamp, 0)) setting.LogConfigurationInfo() sqlstore.NewEngine() sqlstore.EnsureAdminUser() }
func getCommandLineProperties(args []string) map[string]string { props := make(map[string]string) for _, arg := range args { if !strings.HasPrefix(arg, "cfg:") { continue } trimmed := strings.TrimPrefix(arg, "cfg:") parts := strings.Split(trimmed, "=") if len(parts) != 2 { log.Fatal(3, "Invalid command line argument", arg) return nil } props[parts[0]] = parts[1] } return props }
func Init(metrics met.Backend) { sec := setting.Cfg.Section("collector_event_publisher") if !sec.Key("enabled").MustBool(false) { enabled = false return } enabled = true addr := sec.Key("nsqd_addr").MustString("localhost:4150") topic = sec.Key("topic").MustString("metrics") cfg := nsq.NewConfig() cfg.UserAgent = fmt.Sprintf("probe-ctrl") var err error globalProducer, err = nsq.NewProducer(addr, cfg) if err != nil { log.Fatal(0, "failed to initialize nsq producer.", err) } collectorEventPublisherMsgs = metrics.NewCount("collectoreventpublisher.events-published") }
func Init() { log.Info("statsdclient enabled:%t addr:%s", setting.StatsdEnabled, setting.StatsdAddr) s, err := statsd.NewClient(setting.StatsdEnabled, setting.StatsdAddr, "grafana") if err != nil { log.Error(3, "Statsd client:", err) } Stat = s sec := setting.Cfg.Section("event_publisher") if sec.Key("enabled").MustBool(false) { //rabbitmq is enabled, lets us it for our jobs. url := sec.Key("rabbitmq_url").String() if err := distributed(url); err != nil { log.Fatal(0, "failed to start amqp consumer.", err) } return } else { standalone() } }