コード例 #1
0
// TODO update once we clean old data, then we should look at numChunks
func BenchmarkAggMetrics1000Metrics1Day(b *testing.B) {
	stats, _ := helper.New(false, "", "standard", "metrics_tank", "")
	initMetrics(stats)
	// we will store 10s metrics in 5 chunks of 2 hours
	// aggragate them in 5min buckets, stored in 1 chunk of 24hours
	chunkSpan := uint32(2 * 3600)
	numChunks := uint32(5)
	chunkMaxStale := uint32(3600)
	metricMaxStale := uint32(21600)
	aggSettings := []aggSetting{
		{
			span:      uint32(300),
			chunkSpan: uint32(24 * 3600),
			numChunks: uint32(1),
		},
	}

	keys := make([]string, 1000)
	for i := 0; i < 1000; i++ {
		keys[i] = fmt.Sprintf("hello.this.is.a.test.key.%d", i)
	}

	metrics := NewAggMetrics(chunkSpan, numChunks, chunkMaxStale, metricMaxStale, aggSettings)

	maxT := 3600 * 24 * uint32(b.N) // b.N in days
	for t := uint32(1); t < maxT; t += 10 {
		for metricI := 0; metricI < 1000; metricI++ {
			m := metrics.GetOrCreate(keys[metricI])
			m.Add(t, float64(t))
		}
	}
}
コード例 #2
0
ファイル: main.go プロジェクト: 0x20h/grafana
func main() {
	buildstampInt64, _ := strconv.ParseInt(buildstamp, 10, 64)

	setting.BuildVersion = version
	setting.BuildCommit = commit
	setting.BuildStamp = buildstampInt64

	go listenToSystemSignels()

	flag.Parse()
	writePIDFile()
	initRuntime()

	if setting.ProfileHeapMB > 0 {
		errors := make(chan error)
		go func() {
			for e := range errors {
				log.Error(0, e.Error())
			}
		}()
		heap, _ := heap.New(setting.ProfileHeapDir, setting.ProfileHeapMB*1000000, setting.ProfileHeapWait, time.Duration(1)*time.Second, errors)
		go heap.Run()
	}

	search.Init()
	login.Init()
	social.NewOAuthService()
	eventpublisher.Init()
	plugins.Init()
	elasticstore.Init()

	metricsBackend, err := helper.New(setting.StatsdEnabled, setting.StatsdAddr, setting.StatsdType, "grafana", setting.InstanceId)
	if err != nil {
		log.Error(3, "Statsd client:", err)
	}
	metricpublisher.Init(metricsBackend)
	collectoreventpublisher.Init(metricsBackend)
	api.InitCollectorController(metricsBackend)
	if setting.AlertingEnabled {
		alerting.Init(metricsBackend)
		alerting.Construct()
	}

	if err := notifications.Init(); err != nil {
		log.Fatal(3, "Notification service failed to initialize", err)
	}

	if setting.ReportingEnabled {
		go metrics.StartUsageReportLoop()
	}

	cmd.StartServer()
	exitChan <- 0
}
コード例 #3
0
func TestAggMetric(t *testing.T) {
	stats, _ := helper.New(false, "", "standard", "metrics_tank", "")
	initMetrics(stats)

	c := NewChecker(t, NewAggMetric("foo", 100, 5, []aggSetting{}...))

	// basic case, single range
	c.Add(101, 101)
	c.Verify(100, 200, 101, 101)
	c.Add(105, 105)
	c.Verify(100, 199, 101, 105)
	c.Add(115, 115)
	c.Add(125, 125)
	c.Add(135, 135)
	c.Verify(100, 199, 101, 135)

	// add new ranges, aligned and unaligned
	c.Add(200, 200)
	c.Add(315, 315)
	c.Verify(100, 399, 101, 315)

	// get subranges
	c.Verify(120, 299, 101, 200)
	c.Verify(220, 299, 200, 200)
	c.Verify(312, 330, 315, 315)

	// border dancing. good for testing inclusivity and exclusivity
	c.Verify(100, 199, 101, 135)
	c.Verify(100, 200, 101, 135)
	c.Verify(100, 201, 101, 200)
	c.Verify(198, 199, 101, 135)
	c.Verify(199, 200, 101, 135)
	c.Verify(200, 201, 200, 200)
	c.Verify(201, 202, 200, 200)
	c.Verify(299, 300, 200, 200)
	c.Verify(300, 301, 315, 315)

	// skipping
	c.Add(510, 510)
	c.Add(512, 512)
	c.Verify(100, 599, 101, 512)

	// basic wraparound
	c.Add(610, 610)
	c.Add(612, 612)
	c.Add(710, 710)
	c.Add(712, 712)
	// TODO would be nice to test that it panics when requesting old range. something with recover?
	//c.Verify(100, 799, 101, 512)

	// largest range we have so far
	c.Verify(300, 799, 315, 712)
	// a smaller range
	c.Verify(502, 799, 510, 712)

	// the circular buffer had these ranges:
	// 100 200 300 skipped 500
	// then we made it:
	// 600 700 300 skipped 500
	// now we want to do another wrap around with skip (must have cleared old data)
	// let's jump to 1200. the accessible range should then be 800-1200
	// clea 1200 clea clea clea
	// we can't (and shouldn't, due to abstraction) test the clearing itself
	// but we just check we only get this point
	c.Add(1299, 1299)
	// TODO: implement skips and enable this
	//	c.Verify(800, 1299, 1299, 1299)
}
コード例 #4
0
func main() {
	flag.Parse()

	// Only try and parse the conf file if it exists
	if _, err := os.Stat(*confFile); err == nil {
		conf, err := globalconf.NewWithOptions(&globalconf.Options{Filename: *confFile})
		if err != nil {
			log.Fatal(4, err.Error())
		}
		conf.ParseAll()
	}

	log.NewLogger(0, "console", fmt.Sprintf(`{"level": %d, "formatting":true}`, *logLevel))

	if *showVersion {
		fmt.Println("nsq_probe_events_to_elasticsearch")
		return
	}

	if *channel == "" {
		rand.Seed(time.Now().UnixNano())
		*channel = fmt.Sprintf("tail%06d#ephemeral", rand.Int()%999999)
	}

	if *topic == "" {
		log.Fatal(4, "--topic is required")
	}

	if *nsqdTCPAddrs == "" && *lookupdHTTPAddrs == "" {
		log.Fatal(4, "--nsqd-tcp-address or --lookupd-http-address required")
	}
	if *nsqdTCPAddrs != "" && *lookupdHTTPAddrs != "" {
		log.Fatal(4, "use --nsqd-tcp-address or --lookupd-http-address not both")
	}

	hostname, err := os.Hostname()
	if err != nil {
		log.Fatal(4, err.Error())
	}
	metrics, err := helper.New(true, *statsdAddr, *statsdType, "nsq_probe_events_to_elasticsearch", strings.Replace(hostname, ".", "_", -1))
	if err != nil {
		log.Fatal(4, err.Error())
	}

	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)

	eventsToEsOK = metrics.NewCount("events_to_es.ok")
	eventsToEsFail = metrics.NewCount("events_to_es.fail")
	messagesSize = metrics.NewMeter("message_size", 0)
	msgsAge = metrics.NewMeter("message_age", 0)
	esPutDuration = metrics.NewTimer("es_put_duration", 0)
	msgsHandleOK = metrics.NewCount("handle.ok")
	msgsHandleFail = metrics.NewCount("handle.fail")

	cfg := nsq.NewConfig()
	cfg.UserAgent = "nsq_probe_events_to_elasticsearch"
	err = app.ParseOpts(cfg, *consumerOpts)
	if err != nil {
		log.Fatal(4, err.Error())
	}
	cfg.MaxInFlight = *maxInFlight

	consumer, err := insq.NewConsumer(*topic, *channel, cfg, "%s", metrics)

	if err != nil {
		log.Fatal(4, err.Error())
	}

	handler, err := NewESHandler()
	if err != nil {
		log.Fatal(4, err.Error())
	}

	consumer.AddConcurrentHandlers(handler, 80)

	nsqdAdds := strings.Split(*nsqdTCPAddrs, ",")
	if len(nsqdAdds) == 1 && nsqdAdds[0] == "" {
		nsqdAdds = []string{}
	}
	err = consumer.ConnectToNSQDs(nsqdAdds)
	if err != nil {
		log.Fatal(4, err.Error())
	}
	log.Info("connected to nsqd")

	lookupdAdds := strings.Split(*lookupdHTTPAddrs, ",")
	if len(lookupdAdds) == 1 && lookupdAdds[0] == "" {
		lookupdAdds = []string{}
	}
	err = consumer.ConnectToNSQLookupds(lookupdAdds)
	if err != nil {
		log.Fatal(4, err.Error())
	}
	go func() {
		log.Info("INFO starting listener for http/debug on %s", *listenAddr)
		httperr := http.ListenAndServe(*listenAddr, nil)
		if httperr != nil {
			log.Info(httperr.Error())
		}
	}()

	for {
		select {
		case <-consumer.StopChan:
			return
		case <-sigChan:
			consumer.Stop()
		}
	}
}
コード例 #5
0
func main() {
	flag.Parse()

	// Only try and parse the conf file if it exists
	if _, err := os.Stat(*confFile); err == nil {
		conf, err := globalconf.NewWithOptions(&globalconf.Options{Filename: *confFile})
		if err != nil {
			log.Fatal(4, "error with configuration file: %s", err)
			os.Exit(1)
		}
		conf.ParseAll()
	}

	log.NewLogger(0, "console", fmt.Sprintf(`{"level": %d, "formatting":true}`, *logLevel))

	if *showVersion {
		fmt.Println("metrics_tank")
		return
	}
	if *instance == "" {
		log.Fatal(0, "instance can't be empty")
	}
	hostname, err := os.Hostname()
	if err != nil {
		log.Fatal(0, "failed to lookup hostname. %s", err)
	}
	stats, err := helper.New(true, *statsdAddr, *statsdType, "metric_tank", strings.Replace(hostname, ".", "_", -1))
	if err != nil {
		log.Fatal(0, "failed to initialize statsd. %s", err)
	}

	if *channel == "" {
		rand.Seed(time.Now().UnixNano())
		*channel = fmt.Sprintf("metric_tank%06d#ephemeral", rand.Int()%999999)
	}

	if *topic == "" {
		log.Fatal(0, "--topic is required")
	}

	if *nsqdTCPAddrs == "" && *lookupdHTTPAddrs == "" {
		log.Fatal(0, "--nsqd-tcp-address or --lookupd-http-address required")
	}
	if *nsqdTCPAddrs != "" && *lookupdHTTPAddrs != "" {
		log.Fatal(0, "use --nsqd-tcp-address or --lookupd-http-address not both")
	}
	// set default cassandra address if none is set.
	if *cassandraAddrs == "" {
		*cassandraAddrs = "localhost"
	}

	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)

	cfg := nsq.NewConfig()
	cfg.UserAgent = "metrics_tank"
	err = app.ParseOpts(cfg, *consumerOpts)
	if err != nil {
		log.Fatal(0, "failed to parse nsq consumer options. %s", err)
	}
	cfg.MaxInFlight = *maxInFlight

	consumer, err := insq.NewConsumer(*topic, *channel, cfg, "%s", stats)
	if err != nil {
		log.Fatal(0, "Failed to create NSQ consumer. %s", err)
	}

	initMetrics(stats)

	err = InitCassandra()

	if err != nil {
		log.Fatal(4, "failed to initialize cassandra. %s", err)
	}

	set := strings.Split(*aggSettings, ",")
	finalSettings := make([]aggSetting, 0)
	for _, v := range set {
		if v == "" {
			continue
		}
		fields := strings.Split(v, ":")
		if len(fields) != 3 {
			log.Fatal(0, "bad agg settings")
		}
		aggSpan, err := strconv.Atoi(fields[0])
		if err != nil {
			log.Fatal(0, "bad agg settings", err)
		}
		aggChunkSpan, err := strconv.Atoi(fields[1])
		if err != nil {
			log.Fatal(0, "bad agg settings", err)
		}
		aggNumChunks, err := strconv.Atoi(fields[2])
		if err != nil {
			log.Fatal(0, "bad agg settings", err)
		}
		finalSettings = append(finalSettings, aggSetting{uint32(aggSpan), uint32(aggChunkSpan), uint32(aggNumChunks)})
	}

	metrics = NewAggMetrics(uint32(*chunkSpan), uint32(*numChunks), uint32(*chunkMaxStale), uint32(*metricMaxStale), finalSettings)
	handler := NewHandler(metrics)
	consumer.AddConcurrentHandlers(handler, *concurrency)

	nsqdAdds := strings.Split(*nsqdTCPAddrs, ",")
	if len(nsqdAdds) == 1 && nsqdAdds[0] == "" {
		nsqdAdds = []string{}
	}
	err = consumer.ConnectToNSQDs(nsqdAdds)
	if err != nil {
		log.Fatal(4, "failed to connect to NSQDs. %s", err)
	}
	log.Info("connected to nsqd")

	lookupdAdds := strings.Split(*lookupdHTTPAddrs, ",")
	if len(lookupdAdds) == 1 && lookupdAdds[0] == "" {
		lookupdAdds = []string{}
	}
	err = consumer.ConnectToNSQLookupds(lookupdAdds)
	if err != nil {
		log.Fatal(4, "failed to connect to NSQLookupds. %s", err)
	}

	go func() {
		m := &runtime.MemStats{}
		for range time.Tick(time.Duration(1) * time.Second) {
			runtime.ReadMemStats(m)
			alloc.Value(int64(m.Alloc))
			totalAlloc.Value(int64(m.TotalAlloc))
			sysBytes.Value(int64(m.Sys))
		}
	}()

	go func() {
		http.HandleFunc("/get", Get)
		log.Info("starting listener for metrics and http/debug on %s", *listenAddr)
		log.Info("%s", http.ListenAndServe(*listenAddr, nil))
	}()

	for {
		select {
		case <-consumer.StopChan:
			err := metrics.Persist()
			if err != nil {
				log.Error(3, "failed to persist aggmetrics. %s", err)
			}
			log.Info("closing cassandra session.")
			cSession.Close()
			log.Info("terminating.")
			log.Close()
			return
		case <-sigChan:
			log.Info("Shutting down")
			consumer.Stop()
		}
	}
}