func (m *MetricNames) get(api newRelicApi, appId int) error {
	log.Debugf("Requesting metrics names for application id %d.", appId)
	path := fmt.Sprintf("/v2/applications/%s/metrics.json", strconv.Itoa(appId))

	body, err := api.req(path, "")
	if err != nil {
		log.Print("Error getting metric names: ", err)
		return err
	}

	dec := json.NewDecoder(bytes.NewReader(body))

	for {
		var part MetricNames
		if err = dec.Decode(&part); err == io.EOF {
			break
		} else if err != nil {
			log.Print("Error decoding metric names: ", err)
			return err
		}
		tmpMetrics := append(m.Metrics, part.Metrics...)
		m.Metrics = tmpMetrics
	}

	return nil
}
func interruptHandler(l net.Listener) {
	notifier := make(chan os.Signal)
	signal.Notify(notifier, os.Interrupt, syscall.SIGTERM)
	<-notifier
	log.Print("Received SIGINT/SIGTERM; exiting gracefully...")
	l.Close()
}
func (dms *DiskMetricStore) loop(persistenceInterval time.Duration) {
	lastPersist := time.Now()
	persistScheduled := false
	lastWrite := time.Time{}
	persistDone := make(chan time.Time)
	var persistTimer *time.Timer

	checkPersist := func() {
		if !persistScheduled && lastWrite.After(lastPersist) {
			persistTimer = time.AfterFunc(
				persistenceInterval-lastWrite.Sub(lastPersist),
				func() {
					persistStarted := time.Now()
					if err := dms.persist(); err != nil {
						log.Print("Error persisting metrics: ", err)
					} else {
						log.Printf(
							"Metrics persisted to '%s'.",
							dms.persistenceFile,
						)
					}
					persistDone <- persistStarted
				},
			)
			persistScheduled = true
		}
	}

	for {
		select {
		case wr := <-dms.writeQueue:
			dms.processWriteRequest(wr)
			lastWrite = time.Now()
			checkPersist()
		case lastPersist = <-persistDone:
			persistScheduled = false
			checkPersist() // In case something has been written in the meantime.
		case <-dms.drain:
			// Prevent a scheduled persist from firing later.
			if persistTimer != nil {
				persistTimer.Stop()
			}
			// Now draining...
			for {
				select {
				case wr := <-dms.writeQueue:
					dms.processWriteRequest(wr)
				default:
					dms.done <- dms.persist()
					return
				}
			}
		}
	}
}
func (a *AppList) get(api newRelicApi) error {
	log.Debugf("Requesting application list from %s.", api.server.String())
	body, err := api.req("/v2/applications.json", "")
	if err != nil {
		log.Print("Error getting application list: ", err)
		return err
	}

	err = json.Unmarshal(body, a)
	return err
}
// NewDiskMetricStore returns a DiskMetricStore ready to use. To cleanly shut it
// down and free resources, the Shutdown() method has to be called.  If
// persistenceFile is the empty string, no persisting to disk will
// happen. Otherwise, a file of that name is used for persisting metrics to
// disk. If the file already exists, metrics are read from it as part of the
// start-up. Persisting is happening upon shutdown and after every write action,
// but the latter will only happen persistenceDuration after the previous
// persisting.
func NewDiskMetricStore(
	persistenceFile string,
	persistenceInterval time.Duration,
) *DiskMetricStore {
	dms := &DiskMetricStore{
		writeQueue:      make(chan WriteRequest, writeQueueCapacity),
		drain:           make(chan struct{}),
		done:            make(chan error),
		metricGroups:    GroupingKeyToMetricGroup{},
		persistenceFile: persistenceFile,
	}
	if err := dms.restore(); err != nil {
		log.Print("Could not load persisted metrics: ", err)
		log.Print("Retrying assuming legacy format for persisted metrics...")
		if err := dms.legacyRestore(); err != nil {
			log.Print("Could not load persisted metrics in legacy format: ", err)
		}
	}

	go dms.loop(persistenceInterval)
	return dms
}
func main() {
	var server, apikey, listenAddress, metricPath string
	var period int

	flag.StringVar(&apikey, "api.key", "", "NewRelic API key")
	flag.StringVar(&server, "api.server", "https://api.newrelic.com", "NewRelic API URL")
	flag.IntVar(&period, "api.period", 60, "Period of data to extract in seconds")

	flag.StringVar(&listenAddress, "web.listen-address", ":9126", "Address to listen on for web interface and telemetry.")
	flag.StringVar(&metricPath, "web.telemetry-path", "/metrics", "Path under which to expose metrics.")

	flag.Parse()

	api := NewNewRelicApi(server, apikey)
	api.period = period
	exporter := NewExporter()
	exporter.api = *api

	prometheus.MustRegister(exporter)

	http.Handle(metricPath, prometheus.Handler())
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`<html>
<head><title>NewRelic exporter</title></head>
<body>
<h1>NewRelic exporter</h1>
<p><a href='` + metricPath + `'>Metrics</a></p>
</body>
</html>
`))
	})

	log.Printf("Listening on %s.", listenAddress)
	err := http.ListenAndServe(listenAddress, nil)
	if err != nil {
		log.Fatal(err)
	}
	log.Print("HTTP server stopped.")
}
func (m *MetricData) get(api newRelicApi, appId int, names MetricNames) error {
	path := fmt.Sprintf("/v2/applications/%s/metrics/data.json", strconv.Itoa(appId))

	var nameList []string

	for i := range names.Metrics {
		// We urlencode the metric names as the API will return
		// unencoded names which it cannot read
		nameList = append(nameList, names.Metrics[i].Name)
	}
	log.Debugf("Requesting %d metrics for application id %d.", len(nameList), appId)

	// Because the Go client does not yet support 100-continue
	// ( see issue #3665 ),
	// we have to process this in chunks, to ensure the response
	// fits within a single request.

	chans := make([]chan MetricData, 0)

	for i := 0; i < len(nameList); i += ChunkSize {

		chans = append(chans, make(chan MetricData))

		var thisList []string

		if i+ChunkSize > len(nameList) {
			thisList = nameList[i:]
		} else {
			thisList = nameList[i : i+ChunkSize]
		}

		go func(names []string, ch chan<- MetricData) {

			var data MetricData

			params := url.Values{}

			for _, thisName := range thisList {
				params.Add("names[]", thisName)
			}

			params.Add("raw", "true")
			params.Add("summarize", "true")
			params.Add("period", strconv.Itoa(api.period))
			params.Add("from", api.from.Format(time.RFC3339))
			params.Add("to", api.to.Format(time.RFC3339))

			body, err := api.req(path, params.Encode())
			if err != nil {
				log.Print("Error requesting metrics: ", err)
				close(ch)
				return
			}

			err = json.Unmarshal(body, &data)
			if err != nil {
				log.Print("Error decoding metrics data: ", err)
				close(ch)
				return
			}

			ch <- data
			close(ch)

		}(thisList, chans[len(chans)-1])

	}

	allData := m.Metric_Data.Metrics

	for _, ch := range chans {
		m := <-ch
		allData = append(allData, m.Metric_Data.Metrics...)
	}
	m.Metric_Data.Metrics = allData

	return nil
}
func main() {
	flag.Parse()
	versionInfoTmpl.Execute(os.Stdout, BuildInfo)
	flags := map[string]string{}
	flag.VisitAll(func(f *flag.Flag) {
		flags[f.Name] = f.Value.String()
	})

	ms := storage.NewDiskMetricStore(*persistenceFile, *persistenceInterval)
	prometheus.SetMetricFamilyInjectionHook(ms.GetMetricFamilies)
	// Enable collect checks for debugging.
	// prometheus.EnableCollectChecks(true)

	r := httprouter.New()
	r.Handler("GET", *metricsPath, prometheus.Handler())

	// Handlers for pushing and deleting metrics.
	r.PUT("/metrics/job/:job/*labels", handler.Push(ms, true))
	r.POST("/metrics/job/:job/*labels", handler.Push(ms, false))
	r.DELETE("/metrics/job/:job/*labels", handler.Delete(ms))
	r.PUT("/metrics/job/:job", handler.Push(ms, true))
	r.POST("/metrics/job/:job", handler.Push(ms, false))
	r.DELETE("/metrics/job/:job", handler.Delete(ms))

	// Handlers for the deprecated API.
	r.PUT("/metrics/jobs/:job/instances/:instance", handler.LegacyPush(ms, true))
	r.POST("/metrics/jobs/:job/instances/:instance", handler.LegacyPush(ms, false))
	r.DELETE("/metrics/jobs/:job/instances/:instance", handler.LegacyDelete(ms))
	r.PUT("/metrics/jobs/:job", handler.LegacyPush(ms, true))
	r.POST("/metrics/jobs/:job", handler.LegacyPush(ms, false))
	r.DELETE("/metrics/jobs/:job", handler.LegacyDelete(ms))

	r.Handler("GET", "/static/*filepath", prometheus.InstrumentHandler(
		"static",
		http.FileServer(
			&assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir},
		),
	))
	statusHandler := prometheus.InstrumentHandlerFunc("status", handler.Status(ms, Asset, flags, BuildInfo))
	r.Handler("GET", "/status", statusHandler)
	r.Handler("GET", "/", statusHandler)

	// Re-enable pprof.
	r.GET("/debug/pprof/*pprof", handlePprof)

	log.Printf("Listening on %s.", *listenAddress)
	l, err := net.Listen("tcp", *listenAddress)
	if err != nil {
		log.Fatal(err)
	}
	go interruptHandler(l)
	err = (&http.Server{Addr: *listenAddress, Handler: r}).Serve(l)
	log.Print("HTTP server stopped: ", err)
	// To give running connections a chance to submit their payload, we wait
	// for 1sec, but we don't want to wait long (e.g. until all connections
	// are done) to not delay the shutdown.
	time.Sleep(time.Second)
	if err := ms.Shutdown(); err != nil {
		log.Print("Problem shutting down metric storage: ", err)
	}
}