예제 #1
0
func Publish(metrics []*schema.MetricData) error {
	if globalProducer == nil {
		log.Debug("droping %d metrics as publishing is disbaled", len(metrics))
		return nil
	}
	if len(metrics) == 0 {
		return nil
	}

	subslices := schema.Reslice(metrics, 3500)

	for _, subslice := range subslices {
		id := time.Now().UnixNano()
		data, err := msg.CreateMsg(subslice, id, msg.FormatMetricDataArrayMsgp)
		if err != nil {
			log.Fatal(4, "Fatal error creating metric message: %s", err)
		}
		metricsPublished.Inc(int64(len(subslice)))
		messagesPublished.Inc(1)
		messagesSize.Value(int64(len(data)))
		metricsPerMessage.Value(int64(len(subslice)))
		pre := time.Now()
		err = globalProducer.Publish(topic, data)
		publishDuration.Value(time.Since(pre))
		if err != nil {
			log.Fatal(4, "can't publish to nsqd: %s", err)
		}
		log.Info("published metrics %d size=%d", id, len(data))
	}

	//globalProducer.Stop()
	return nil
}
예제 #2
0
func (q *WriteQueue) Flush() {
	q.Lock()
	if len(q.Metrics) == 0 {
		q.Unlock()
		return
	}
	metrics := make([]*schema.MetricData, len(q.Metrics))
	copy(metrics, q.Metrics)
	q.Metrics = q.Metrics[:0]
	q.Unlock()
	// Write the metrics to our HTTP server.
	log.Printf("writing %d metrics to API", len(metrics))
	id := time.Now().UnixNano()
	body, err := msg.CreateMsg(metrics, id, msg.FormatMetricDataArrayMsgp)
	if err != nil {
		log.Printf("Error: unable to convert metrics to MetricDataArrayMsgp. %s", err)
		return
	}
	sent := false
	for !sent {
		if err = PostData("metrics", Token, body); err != nil {
			log.Printf("Error: %s", err)
			time.Sleep(time.Second)
		} else {
			sent = true
		}
	}
}
예제 #3
0
func Publish(metrics []*schema.MetricData) error {
	if globalProducer == nil {
		return nil
	}
	if len(metrics) == 0 {
		return nil
	}
	// typical metrics seem to be around 300B
	// nsqd allows <= 10MiB messages.
	// we ideally have 64kB ~ 1MiB messages (see benchmark https://gist.github.com/Dieterbe/604232d35494eae73f15)
	// at 300B, about 3500 msg fit in 1MiB
	// in worst case, this allows messages up to 2871B
	// this could be made more robust of course

	// real world findings in dev-stack with env-load:
	// 159569B msg /795  metrics per msg = 200B per msg
	// so peak message size is about 3500*200 = 700k (seen 711k)

	subslices := Reslice(metrics, 3500)

	for _, subslice := range subslices {
		id := time.Now().UnixNano()
		data, err := msg.CreateMsg(subslice, id, msg.FormatMetricDataArrayMsgp)
		if err != nil {
			log.Fatal(0, "Fatal error creating metric message: %s", err)
		}
		metricsPublished.Inc(int64(len(subslice)))
		messagesPublished.Inc(1)
		messagesSize.Value(int64(len(data)))
		metricsPerMessage.Value(int64(len(subslice)))
		pre := time.Now()
		err = globalProducer.Publish(topic, data)
		publishDuration.Value(time.Since(pre))
		if err != nil {
			log.Fatal(0, "can't publish to nsqd: %s", err)
		}
		log.Info("published metrics %d size=%d", id, len(data))
	}

	//globalProducer.Stop()
	return nil
}
예제 #4
0
func (route *RouteGrafanaNet) run() {
	metrics := make([]*schema.MetricData, 0, route.flushMaxNum)
	ticker := time.NewTicker(route.flushMaxWait)
	client := &http.Client{
		Timeout: time.Duration(2 * time.Second),
	}

	b := &backoff.Backoff{
		Min:    100 * time.Millisecond,
		Max:    time.Minute,
		Factor: 1.5,
		Jitter: true,
	}

	flush := func() {
		if len(metrics) == 0 {
			return
		}
		for {
			pre := time.Now()
			mda := schema.MetricDataArray(metrics)
			data, err := msg.CreateMsg(mda, 0, msg.FormatMetricDataArrayMsgp)
			if err != nil {
				panic(err)
			}

			req, err := http.NewRequest("POST", route.addr, bytes.NewBuffer(data))
			if err != nil {
				panic(err)
			}
			req.Header.Add("Authorization", "Bearer "+route.apiKey)
			req.Header.Add("Content-Type", "rt-metric-binary")
			resp, err := client.Do(req)
			diff := time.Since(pre)
			if err == nil && resp.StatusCode >= 200 && resp.StatusCode < 300 {
				b.Reset()
				log.Info("GrafanaNet sent %d metrics in %s -msg size %d", len(metrics), diff, len(data))
				route.numOut.Inc(int64(len(metrics)))
				route.tickFlushSize.Update(int64(len(data)))
				route.durationTickFlush.Update(diff)
				metrics = metrics[:0]
				resp.Body.Close()
				break
			}
			route.numErrFlush.Inc(1)
			dur := b.Duration()
			if err != nil {
				log.Warning("GrafanaNet failed to submit data: %s will try again in %s (this attempt took %s)", err, dur, diff)
			} else {
				buf := make([]byte, 300)
				n, _ := resp.Body.Read(buf)
				log.Warning("GrafanaNet failed to submit data: http %d - %s will try again in %s (this attempt took %s)", resp.StatusCode, buf[:n], dur, diff)
				resp.Body.Close()
			}

			time.Sleep(dur)
		}
	}
	for {
		select {
		case buf := <-route.buf:
			route.numBuffered.Dec(1)
			md := parseMetric(buf, route.schemas)
			if md == nil {
				continue
			}
			md.SetId()
			metrics = append(metrics, md)
			if len(metrics) == route.flushMaxNum {
				flush()
			}
		case <-ticker.C:
			flush()
		}
	}
}