func init() { metadata.AddMetricMeta( "bosun.email.sent", metadata.Counter, metadata.PerSecond, "The number of email notifications sent by Bosun.") metadata.AddMetricMeta( "bosun.email.sent_failed", metadata.Counter, metadata.PerSecond, "The number of email notifications that Bosun failed to send.") }
func init() { metadata.AddMetricMeta( "bosun.schedule.lock_time", metadata.Counter, metadata.MilliSecond, "Length of time spent waiting for or holding the schedule lock.") metadata.AddMetricMeta( "bosun.schedule.lock_count", metadata.Counter, metadata.Count, "Number of times the given caller acquired the lock.") }
func init() { metadata.AddMetricMeta("bosun.ping.resolved", metadata.Gauge, metadata.Bool, "1=Ping resolved to an IP Address. 0=Ping failed to resolve to an IP Address.") metadata.AddMetricMeta("bosun.ping.rtt", metadata.Gauge, metadata.MilliSecond, "The number of milliseconds for the echo reply to be received. Also known as Round Trip Time.") metadata.AddMetricMeta("bosun.ping.timeout", metadata.Gauge, metadata.Ok, "0=Ping responded before timeout. 1=Ping did not respond before 5 second timeout.") }
func AggregateMeta(metric string, unit metadata.Unit, desc string) { agStrings := []string{"avg", "count", "min", "median", "max", "95", "99"} for _, ag := range agStrings { if ag == "count" { metadata.AddMetricMeta(metric+"_"+ag, metadata.Gauge, metadata.Count, "The number of samples per aggregation.") continue } metadata.AddMetricMeta(metric+"_"+ag, metadata.Gauge, unit, desc) } }
func init() { metadata.AddMetricMeta("bosun.statefile.size", metadata.Gauge, metadata.Bytes, "The total size of the Bosun state file.") metadata.AddMetricMeta("bosun.check.duration", metadata.Gauge, metadata.Second, "The number of seconds it took Bosun to check each alert rule.") metadata.AddMetricMeta("bosun.check.err", metadata.Gauge, metadata.Error, "The running count of the number of errors Bosun has received while trying to evaluate an alert expression.") metadata.AddMetricMeta("bosun.actions", metadata.Gauge, metadata.Count, "The running count of actions performed by individual users (Closed alert, Acknowledged alert, etc).") }
func init() { metadata.AddMetricMeta( "bosun.alerts.current_severity", metadata.Gauge, metadata.Alert, "The number of open alerts by current severity.") metadata.AddMetricMeta( "bosun.alerts.last_abnormal_severity", metadata.Gauge, metadata.Alert, "The number of open alerts by last abnormal severity.") metadata.AddMetricMeta( "bosun.alerts.acknowledgement_status", metadata.Gauge, metadata.Alert, "The number of open alerts by acknowledgement status.") metadata.AddMetricMeta( "bosun.alerts.active_status", metadata.Gauge, metadata.Alert, "The number of open alerts by active status.") }
func init() { miniprofiler.Position = "bottomleft" miniprofiler.StartHidden = true miniprofiler.Enable = func(r *http.Request) bool { return r.Header.Get(miniprofilerHeader) != "" } metadata.AddMetricMeta("bosun.search.puts_relayed", metadata.Counter, metadata.Request, "The count of api put requests sent to Bosun for relaying to the backend server.") metadata.AddMetricMeta("bosun.search.datapoints_relayed", metadata.Counter, metadata.Item, "The count of data points sent to Bosun for relaying to the backend server.") metadata.AddMetricMeta("bosun.relay.bytes", metadata.Counter, metadata.BytesPerSecond, "Bytes per second relayed from Bosun to the backend server.") metadata.AddMetricMeta("bosun.relay.response", metadata.Counter, metadata.PerSecond, "HTTP response codes from the backend server for request relayed through Bosun.") }
func init() { metadata.AddMetricMeta( "bosun.alerts.current_severity", metadata.Gauge, metadata.Alert, "The number of open alerts by current severity.") metadata.AddMetricMeta( "bosun.alerts.last_abnormal_severity", metadata.Gauge, metadata.Alert, "The number of open alerts by last abnormal severity.") metadata.AddMetricMeta( "bosun.alerts.acknowledgement_status", metadata.Gauge, metadata.Alert, "The number of open alerts by acknowledgement status.") metadata.AddMetricMeta( "bosun.alerts.active_status", metadata.Gauge, metadata.Alert, "The number of open alerts by active status.") collect.AggregateMeta("bosun.template.render", metadata.MilliSecond, "The amount of time it takes to render the specified alert template.") }
func init() { metadata.AddMetricMeta("bosun.statefile.size", metadata.Gauge, metadata.Bytes, "The total size of the Bosun state file.") metadata.AddMetricMeta("bosun.check.duration", metadata.Gauge, metadata.Second, "The number of seconds it took Bosun to check each alert rule.") metadata.AddMetricMeta("bosun.check.err", metadata.Gauge, metadata.Error, "The running count of the number of errors Bosun has received while trying to evaluate an alert expression.") metadata.AddMetricMeta("bosun.ping.resolved", metadata.Gauge, metadata.Bool, "1=Ping resolved to an IP Address. 0=Ping failed to resolve to an IP Address.") metadata.AddMetricMeta("bosun.ping.rtt", metadata.Gauge, metadata.MilliSecond, "The number of milliseconds for the echo reply to be received. Also known as Round Trip Time.") metadata.AddMetricMeta("bosun.ping.timeout", metadata.Gauge, metadata.Ok, "0=Ping responded before timeout. 1=Ping did not respond before 5 second timeout.") metadata.AddMetricMeta("bosun.actions", metadata.Gauge, metadata.Count, "The running count of actions performed by individual users (Closed alert, Acknowledged alert, etc).") }
func init() { metadata.AddMetricMeta( "bosun.alerts.current_severity", metadata.Gauge, metadata.Alert, "The number of open alerts by current severity.") metadata.AddMetricMeta( "bosun.alerts.last_abnormal_severity", metadata.Gauge, metadata.Alert, "The number of open alerts by last abnormal severity.") metadata.AddMetricMeta( "bosun.alerts.acknowledgement_status", metadata.Gauge, metadata.Alert, "The number of open alerts by acknowledgement status.") metadata.AddMetricMeta( "bosun.alerts.active_status", metadata.Gauge, metadata.Alert, "The number of open alerts by active status.") metadata.AddMetricMeta("alerts.acknowledgement_status_by_notification", metadata.Gauge, metadata.Alert, "The number of alerts by acknowledgement status and notification. Does not reflect escalation chains.") metadata.AddMetricMeta("alerts.oldest_unacked_by_notification", metadata.Gauge, metadata.Second, "How old the oldest unacknowledged notification is by notification.. Does not reflect escalation chains.") collect.AggregateMeta("bosun.template.render", metadata.MilliSecond, "The amount of time it takes to render the specified alert template.") }
// InitChan is similar to Init, but uses the given channel instead of creating a // new one. func InitChan(tsdbhost *url.URL, root string, ch chan *opentsdb.DataPoint) error { if tchan != nil { return fmt.Errorf("cannot init twice") } if err := checkClean(root, "metric root"); err != nil { return err } u, err := tsdbhost.Parse("/api/put") if err != nil { return err } if strings.HasPrefix(u.Host, ":") { u.Host = "localhost" + u.Host } tsdbURL = u.String() metricRoot = root + "." tchan = ch go queuer() go send() go collect() if DisableDefaultCollectors { return nil } Set("collect.dropped", Tags, func() (i interface{}) { slock.Lock() i = dropped slock.Unlock() return }) Set("collect.sent", Tags, func() (i interface{}) { slock.Lock() i = sent slock.Unlock() return }) Set("collect.queued", Tags, func() (i interface{}) { qlock.Lock() i = len(queue) qlock.Unlock() return }) Set("collect.alloc", Tags, func() interface{} { var ms runtime.MemStats runtime.ReadMemStats(&ms) return ms.Alloc }) Set("collect.goroutines", Tags, func() interface{} { return runtime.NumGoroutine() }) AggregateMeta(metricRoot+"collect.post.batchsize", metadata.Count, descCollectPostBatchSize) AggregateMeta(metricRoot+"collect.post.duration", metadata.MilliSecond, descCollectPostDuration) metadata.AddMetricMeta(metricRoot+"collect.alloc", metadata.Gauge, metadata.Bytes, descCollectAlloc) metadata.AddMetricMeta(metricRoot+"collect.goroutines", metadata.Gauge, metadata.Count, descCollectGoRoutines) metadata.AddMetricMeta(metricRoot+"collect.post.bad_status", metadata.Counter, metadata.PerSecond, descCollectPostBad) metadata.AddMetricMeta(metricRoot+"collect.post.count", metadata.Counter, metadata.PerSecond, descCollectPostCount) metadata.AddMetricMeta(metricRoot+"collect.post.error", metadata.Counter, metadata.PerSecond, descCollectPostError) metadata.AddMetricMeta(metricRoot+"collect.post.restore", metadata.Counter, metadata.PerSecond, descCollectPostRestore) metadata.AddMetricMeta(metricRoot+"collect.post.total_bytes", metadata.Counter, metadata.Bytes, descCollectPostTotalBytes) metadata.AddMetricMeta(metricRoot+"collect.post.total_duration", metadata.Counter, metadata.MilliSecond, descCollectPostTotalDuration) metadata.AddMetricMeta(metricRoot+"collect.queued", metadata.Gauge, metadata.Item, descCollectQueued) metadata.AddMetricMeta(metricRoot+"collect.sent", metadata.Counter, metadata.PerSecond, descCollectSent) metadata.AddMetricMeta(metricRoot+"collect.dropped", metadata.Counter, metadata.PerSecond, descCollectDropped) // Make sure these get zeroed out instead of going unknown on restart Add("collect.post.error", Tags, 0) Add("collect.post.bad_status", Tags, 0) Add("collect.post.restore", Tags, 0) return nil }
func init() { metadata.AddMetricMeta("tsdbrelay.udp.packets", metadata.Counter, metadata.Count, "Number of valid udp packets received") metadata.AddMetricMeta("tsdbrelay.puts.relayed", metadata.Counter, metadata.Count, "Number of successful puts relayed") metadata.AddMetricMeta("tsdbrelay.metadata.relayed", metadata.Counter, metadata.Count, "Number of successful metadata puts relayed") }
func init() { metadata.AddMetricMeta("tsdbrelay.puts.relayed", metadata.Counter, metadata.Count, "Number of successful puts relayed") metadata.AddMetricMeta("tsdbrelay.metadata.relayed", metadata.Counter, metadata.Count, "Number of successful metadata puts relayed") }
func main() { flag.Parse() if *flagToToml != "" { toToml(*flagToToml) fmt.Println("toml conversion complete; remove all empty values by hand (empty strings, 0)") return } if *flagPrint || *flagDebug { slog.Set(&slog.StdLog{Log: log.New(os.Stdout, "", log.LstdFlags)}) } if *flagVersion { fmt.Println(version.GetVersionInfo("scollector")) os.Exit(0) } for _, m := range mains { m() } conf := readConf() if *flagHost != "" { conf.Host = *flagHost } if *flagFilter != "" { conf.Filter = strings.Split(*flagFilter, ",") } if !conf.Tags.Valid() { slog.Fatalf("invalid tags: %v", conf.Tags) } else if conf.Tags["host"] != "" { slog.Fatalf("host not supported in custom tags, use Hostname instead") } if conf.PProf != "" { go func() { slog.Infof("Starting pprof at http://%s/debug/pprof/", conf.PProf) slog.Fatal(http.ListenAndServe(conf.PProf, nil)) }() } collectors.AddTags = conf.Tags util.FullHostname = conf.FullHost util.Set() if conf.Hostname != "" { util.Hostname = conf.Hostname } if err := collect.SetHostname(util.Hostname); err != nil { slog.Fatal(err) } if conf.ColDir != "" { collectors.InitPrograms(conf.ColDir) } var err error check := func(e error) { if e != nil { err = e } } collectors.Init(conf) for _, r := range conf.MetricFilters { check(collectors.AddMetricFilters(r)) } for _, rmq := range conf.RabbitMQ { check(collectors.RabbitMQ(rmq.URL)) } for _, cfg := range conf.SNMP { check(collectors.SNMP(cfg, conf.MIBS)) } for _, i := range conf.ICMP { check(collectors.ICMP(i.Host)) } for _, a := range conf.AWS { check(collectors.AWS(a.AccessKey, a.SecretKey, a.Region)) } for _, v := range conf.Vsphere { check(collectors.Vsphere(v.User, v.Password, v.Host)) } for _, p := range conf.Process { check(collectors.AddProcessConfig(p)) } for _, p := range conf.ProcessDotNet { check(collectors.AddProcessDotNetConfig(p)) } for _, h := range conf.HTTPUnit { if h.TOML != "" { check(collectors.HTTPUnitTOML(h.TOML)) } if h.Hiera != "" { check(collectors.HTTPUnitHiera(h.Hiera)) } } for _, r := range conf.Riak { check(collectors.Riak(r.URL)) } for _, x := range conf.ExtraHop { check(collectors.ExtraHop(x.Host, x.APIKey, x.FilterBy, x.FilterPercent)) } if err != nil { slog.Fatal(err) } collectors.KeepalivedCommunity = conf.KeepalivedCommunity // Add all process collectors. This is platform specific. collectors.WatchProcesses() collectors.WatchProcessesDotNet() if *flagFake > 0 { collectors.InitFake(*flagFake) } collect.Debug = *flagDebug util.Debug = *flagDebug collect.DisableDefaultCollectors = conf.DisableSelf c := collectors.Search(conf.Filter) if len(c) == 0 { slog.Fatalf("Filter %v matches no collectors.", conf.Filter) } for _, col := range c { col.Init() } u, err := parseHost(conf.Host) if *flagList { list(c) return } else if *flagPrint { u = &url.URL{Scheme: "http", Host: "localhost:0"} } else if err != nil { slog.Fatalf("invalid host %v: %v", conf.Host, err) } freq := time.Second * time.Duration(conf.Freq) if freq <= 0 { slog.Fatal("freq must be > 0") } collectors.DefaultFreq = freq collect.Freq = freq if conf.BatchSize < 0 { slog.Fatal("BatchSize must be > 0") } if conf.BatchSize != 0 { collect.BatchSize = conf.BatchSize } collect.Tags = conf.Tags.Copy().Merge(opentsdb.TagSet{"os": runtime.GOOS}) if *flagPrint { collect.Print = true } if !*flagDisableMetadata { if err := metadata.Init(u, *flagDebug); err != nil { slog.Fatal(err) } } cdp, cquit := collectors.Run(c) if u != nil { slog.Infoln("OpenTSDB host:", u) } if err := collect.InitChan(u, "scollector", cdp); err != nil { slog.Fatal(err) } if version.VersionDate != "" { v, err := strconv.ParseInt(version.VersionDate, 10, 64) if err == nil { go func() { metadata.AddMetricMeta("scollector.version", metadata.Gauge, metadata.None, "Scollector version number, which indicates when scollector was built.") for { if err := collect.Put("version", collect.Tags, v); err != nil { slog.Error(err) } time.Sleep(time.Hour) } }() } } if *flagBatchSize > 0 { collect.BatchSize = *flagBatchSize } go func() { const maxMem = 500 * 1024 * 1024 // 500MB var m runtime.MemStats for range time.Tick(time.Minute) { runtime.ReadMemStats(&m) if m.Alloc > maxMem { panic("memory max reached") } } }() sChan := make(chan os.Signal) signal.Notify(sChan, os.Interrupt) <-sChan close(cquit) // try to flush all datapoints on sigterm, but quit after 5 seconds no matter what. time.AfterFunc(5*time.Second, func() { os.Exit(0) }) collect.Flush() }
func main() { flag.Parse() if *flagToToml != "" { toToml(*flagToToml) fmt.Println("toml conversion complete; remove all empty values by hand (empty strings, 0)") return } if *flagPrint || *flagDebug { slog.Set(&slog.StdLog{Log: log.New(os.Stdout, "", log.LstdFlags)}) } if *flagVersion { fmt.Println(version.GetVersionInfo("scollector")) os.Exit(0) } for _, m := range mains { m() } conf := readConf() ua := "Scollector/" + version.ShortVersion() if conf.UserAgentMessage != "" { ua += fmt.Sprintf(" (%s)", conf.UserAgentMessage) } client := &http.Client{ Transport: &scollectorHTTPTransport{ ua, &httpcontrol.Transport{ RequestTimeout: time.Minute, }, }, } http.DefaultClient = client collect.DefaultClient = client if *flagHost != "" { conf.Host = *flagHost } if *flagNtlm { conf.UseNtlm = *flagNtlm } if *flagFilter != "" { conf.Filter = strings.Split(*flagFilter, ",") } if !conf.Tags.Valid() { slog.Fatalf("invalid tags: %v", conf.Tags) } else if conf.Tags["host"] != "" { slog.Fatalf("host not supported in custom tags, use Hostname instead") } if conf.PProf != "" { go func() { slog.Infof("Starting pprof at http://%s/debug/pprof/", conf.PProf) slog.Fatal(http.ListenAndServe(conf.PProf, nil)) }() } collectors.AddTags = conf.Tags util.FullHostname = conf.FullHost util.Set() if conf.Hostname != "" { util.Hostname = conf.Hostname } if err := collect.SetHostname(util.Hostname); err != nil { slog.Fatal(err) } if conf.ColDir != "" { collectors.InitPrograms(conf.ColDir) } if conf.SNMPTimeout > 0 { snmp.Timeout = conf.SNMPTimeout } var err error check := func(e error) { if e != nil { err = e } } collectors.Init(conf) for _, r := range conf.MetricFilters { slog.Infof("Adding MetricFilter: %v\n", r) check(collectors.AddMetricFilters(r)) } for _, rmq := range conf.RabbitMQ { check(collectors.RabbitMQ(rmq.URL)) } for _, cfg := range conf.SNMP { check(collectors.SNMP(cfg, conf.MIBS)) } for _, i := range conf.ICMP { check(collectors.ICMP(i.Host)) } for _, a := range conf.AWS { check(collectors.AWS(a.AccessKey, a.SecretKey, a.Region, a.BillingProductCodesRegex, a.BillingBucketName, a.BillingBucketPath, a.BillingPurgeDays)) } for _, ea := range conf.AzureEA { check(collectors.AzureEABilling(ea.EANumber, ea.APIKey, ea.LogBillingDetails)) } for _, v := range conf.Vsphere { check(collectors.Vsphere(v.User, v.Password, v.Host)) } for _, p := range conf.Process { check(collectors.AddProcessConfig(p)) } for _, p := range conf.ProcessDotNet { check(collectors.AddProcessDotNetConfig(p)) } for _, h := range conf.HTTPUnit { var freq time.Duration var parseerr error if h.Freq == "" { freq = time.Minute * 5 } else { freq, parseerr = time.ParseDuration(h.Freq) if parseerr != nil { slog.Fatal(parseerr) } if freq < time.Second { slog.Fatalf("Invalid HTTPUnit frequency %s, cannot be less than 1 second.", h.Freq) } } if h.TOML != "" { check(collectors.HTTPUnitTOML(h.TOML, freq)) } if h.Hiera != "" { check(collectors.HTTPUnitHiera(h.Hiera, freq)) } } for _, r := range conf.Riak { check(collectors.Riak(r.URL)) } for _, x := range conf.ExtraHop { check(collectors.ExtraHop(x.Host, x.APIKey, x.FilterBy, x.FilterPercent, x.AdditionalMetrics, x.CertificateSubjectMatch, x.CertificateActivityGroup)) } if err != nil { slog.Fatal(err) } collectors.KeepalivedCommunity = conf.KeepalivedCommunity // Add all process collectors. This is platform specific. collectors.WatchProcesses() collectors.WatchProcessesDotNet() if *flagFake > 0 { collectors.InitFake(*flagFake) } collect.Debug = *flagDebug util.Debug = *flagDebug collect.DisableDefaultCollectors = conf.DisableSelf c := collectors.Search(conf.Filter) if len(c) == 0 { slog.Fatalf("Filter %v matches no collectors.", conf.Filter) } for _, col := range c { col.Init() } err = collectors.AddTagOverrides(c, conf.TagOverride) if err != nil { slog.Fatalf("Error adding tag overrides: %s", err) } u, err := parseHost(conf.Host) if *flagList { list(c) return } else if *flagPrint { u = &url.URL{Scheme: "http", Host: "localhost:0"} } else if err != nil { slog.Fatalf("invalid host %v: %v", conf.Host, err) } freq := time.Second * time.Duration(conf.Freq) if freq <= 0 { slog.Fatal("freq must be > 0") } collectors.DefaultFreq = freq collect.Freq = freq if conf.BatchSize < 0 { slog.Fatal("BatchSize must be > 0") } if conf.BatchSize != 0 { collect.BatchSize = conf.BatchSize } collect.Tags = conf.Tags.Copy().Merge(opentsdb.TagSet{"os": runtime.GOOS}) if *flagPrint { collect.Print = true } if !*flagDisableMetadata { if err := metadata.Init(u, *flagDebug); err != nil { slog.Fatal(err) } } cdp, cquit := collectors.Run(c) if u != nil { slog.Infoln("OpenTSDB host:", u) } collect.UseNtlm = conf.UseNtlm if err := collect.InitChan(u, "scollector", cdp); err != nil { slog.Fatal(err) } if collect.DisableDefaultCollectors == false && version.VersionDate != "" { v, err := strconv.ParseInt(version.VersionDate, 10, 64) if err == nil { go func() { metadata.AddMetricMeta("scollector.version", metadata.Gauge, metadata.None, "Scollector version number, which indicates when scollector was built.") for { if err := collect.Put("version", collect.Tags, v); err != nil { slog.Error(err) } time.Sleep(time.Hour) } }() } } if *flagBatchSize > 0 { collect.BatchSize = *flagBatchSize } if conf.MaxQueueLen != 0 { if conf.MaxQueueLen < collect.BatchSize { slog.Fatalf("MaxQueueLen must be >= %d (BatchSize)", collect.BatchSize) } collect.MaxQueueLen = conf.MaxQueueLen } maxMemMB := uint64(500) if conf.MaxMem != 0 { maxMemMB = conf.MaxMem } go func() { var m runtime.MemStats for range time.Tick(time.Second * 30) { runtime.ReadMemStats(&m) allocMB := m.Alloc / 1024 / 1024 if allocMB > maxMemMB { slog.Fatalf("memory max runtime reached: (current alloc: %v megabytes, max: %v megabytes)", allocMB, maxMemMB) } //See proccess_windows.go and process_linux.go for total process memory usage. //Note that in linux the rss metric includes shared pages, where as in //Windows the private working set does not include shared memory. //Total memory used seems to scale linerarly with m.Alloc. //But we want this to catch a memory leak outside the runtime (WMI/CGO). //So for now just add any runtime allocations to the allowed total limit. maxMemTotalMB := maxMemMB + allocMB if collectors.TotalScollectorMemoryMB > maxMemTotalMB { slog.Fatalf("memory max total reached: (current total: %v megabytes, current runtime alloc: %v megabytes, max: %v megabytes)", collectors.TotalScollectorMemoryMB, allocMB, maxMemTotalMB) } } }() sChan := make(chan os.Signal) signal.Notify(sChan, os.Interrupt) <-sChan close(cquit) // try to flush all datapoints on sigterm, but quit after 5 seconds no matter what. time.AfterFunc(5*time.Second, func() { os.Exit(0) }) collect.Flush() }
func init() { metadata.AddMetricMeta("bosun.search.index_queue", metadata.Gauge, metadata.Count, "Number of datapoints queued for indexing to redis") metadata.AddMetricMeta("bosun.search.dropped", metadata.Counter, metadata.Count, "Number of datapoints discarded without being saved to redis") }
func main() { flag.Parse() if *flagToToml != "" { toToml(*flagToToml) fmt.Println("toml conversion complete; remove all empty values by hand (empty strings, 0)") return } if *flagPrint || *flagDebug { slog.Set(&slog.StdLog{Log: log.New(os.Stdout, "", log.LstdFlags)}) } if *flagVersion { fmt.Println(version.GetVersionInfo("scollector")) os.Exit(0) } for _, m := range mains { m() } conf := readConf() if *flagHost != "" { conf.Host = *flagHost } if *flagFilter != "" { conf.Filter = strings.Split(*flagFilter, ",") } if !conf.Tags.Valid() { slog.Fatalf("invalid tags: %v", conf.Tags) } else if conf.Tags["host"] != "" { slog.Fatalf("host not supported in custom tags, use Hostname instead") } collectors.AddTags = conf.Tags util.FullHostname = conf.FullHost util.Set() if conf.Hostname != "" { util.Hostname = conf.Hostname if err := collect.SetHostname(conf.Hostname); err != nil { slog.Fatal(err) } } if conf.ColDir != "" { collectors.InitPrograms(conf.ColDir) } var err error check := func(e error) { if e != nil { err = e } } for _, h := range conf.HAProxy { for _, i := range h.Instances { collectors.HAProxy(h.User, h.Password, i.Tier, i.URL) } } for _, s := range conf.SNMP { check(collectors.SNMP(s.Community, s.Host)) } for _, i := range conf.ICMP { check(collectors.ICMP(i.Host)) } for _, a := range conf.AWS { check(collectors.AWS(a.AccessKey, a.SecretKey, a.Region)) } for _, v := range conf.Vsphere { check(collectors.Vsphere(v.User, v.Password, v.Host)) } for _, p := range conf.Process { check(collectors.AddProcessConfig(p)) } for _, h := range conf.HTTPUnit { if h.TOML != "" { check(collectors.HTTPUnitTOML(h.TOML)) } if h.Hiera != "" { check(collectors.HTTPUnitHiera(h.Hiera)) } } if err != nil { slog.Fatal(err) } collectors.KeepalivedCommunity = conf.KeepalivedCommunity // Add all process collectors. This is platform specific. collectors.WatchProcesses() collectors.WatchProcessesDotNet() if *flagFake > 0 { collectors.InitFake(*flagFake) } collect.Debug = *flagDebug util.Debug = *flagDebug collect.DisableDefaultCollectors = conf.DisableSelf c := collectors.Search(conf.Filter) if len(c) == 0 { slog.Fatalf("Filter %v matches no collectors.", conf.Filter) } for _, col := range c { col.Init() } u, err := parseHost(conf.Host) if *flagList { list(c) return } else if err != nil { slog.Fatalf("invalid host %v: %v", conf.Host, err) } freq := time.Second * time.Duration(conf.Freq) if freq <= 0 { slog.Fatal("freq must be > 0") } collectors.DefaultFreq = freq collect.Freq = freq collect.Tags = opentsdb.TagSet{"os": runtime.GOOS} if *flagPrint { collect.Print = true } if !*flagDisableMetadata { if err := metadata.Init(u, *flagDebug); err != nil { slog.Fatal(err) } } cdp := collectors.Run(c) if u != nil { slog.Infoln("OpenTSDB host:", u) } if err := collect.InitChan(u, "scollector", cdp); err != nil { slog.Fatal(err) } if version.VersionDate != "" { v, err := strconv.ParseInt(version.VersionDate, 10, 64) if err == nil { go func() { metadata.AddMetricMeta("scollector.version", metadata.Gauge, metadata.None, "Scollector version number, which indicates when scollector was built.") for { if err := collect.Put("version", collect.Tags, v); err != nil { slog.Error(err) } time.Sleep(time.Hour) } }() } } if *flagBatchSize > 0 { collect.BatchSize = *flagBatchSize } go func() { const maxMem = 500 * 1024 * 1024 // 500MB var m runtime.MemStats for range time.Tick(time.Minute) { runtime.ReadMemStats(&m) if m.Alloc > maxMem { panic("memory max reached") } } }() select {} }