示例#1
0
// Gather ...
func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error {
	if r.Client == nil {
		tlsCfg, err := internal.GetTLSConfig(
			r.SSLCert, r.SSLKey, r.SSLCA, r.InsecureSkipVerify)
		if err != nil {
			return err
		}
		tr := &http.Transport{
			ResponseHeaderTimeout: time.Duration(3 * time.Second),
			TLSClientConfig:       tlsCfg,
		}
		r.Client = &http.Client{
			Transport: tr,
			Timeout:   time.Duration(4 * time.Second),
		}
	}

	var wg sync.WaitGroup
	wg.Add(len(gatherFunctions))
	errChan := errchan.New(len(gatherFunctions))
	for _, f := range gatherFunctions {
		go func(gf gatherFunc) {
			defer wg.Done()
			gf(r, acc, errChan.C)
		}(f)
	}
	wg.Wait()

	return errChan.Error()
}
示例#2
0
// Gather reads the stats from Elasticsearch and writes it to the
// Accumulator.
func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error {
	errChan := errchan.New(len(e.Servers))
	var wg sync.WaitGroup
	wg.Add(len(e.Servers))

	for _, serv := range e.Servers {
		go func(s string, acc telegraf.Accumulator) {
			defer wg.Done()
			var url string
			if e.Local {
				url = s + statsPathLocal
			} else {
				url = s + statsPath
			}
			if err := e.gatherNodeStats(url, acc); err != nil {
				errChan.C <- err
				return
			}
			if e.ClusterHealth {
				e.gatherClusterStats(fmt.Sprintf("%s/_cluster/health?level=indices", s), acc)
			}
		}(serv, acc)
	}

	wg.Wait()
	return errChan.Error()
}
示例#3
0
// Reads stats from all configured servers.
func (d *Dovecot) Gather(acc telegraf.Accumulator) error {
	if !validQuery[d.Type] {
		return fmt.Errorf("Error: %s is not a valid query type\n",
			d.Type)
	}

	if len(d.Servers) == 0 {
		d.Servers = append(d.Servers, "127.0.0.1:24242")
	}

	if len(d.Filters) <= 0 {
		d.Filters = append(d.Filters, "")
	}

	var wg sync.WaitGroup
	errChan := errchan.New(len(d.Servers) * len(d.Filters))
	for _, server := range d.Servers {
		for _, filter := range d.Filters {
			wg.Add(1)
			go func(s string, f string) {
				defer wg.Done()
				errChan.C <- d.gatherServer(s, acc, d.Type, f)
			}(server, filter)
		}
	}

	wg.Wait()
	return errChan.Error()
}
示例#4
0
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (m *MongoDB) Gather(acc telegraf.Accumulator) error {
	if len(m.Servers) == 0 {
		m.gatherServer(m.getMongoServer(localhost), acc)
		return nil
	}

	var wg sync.WaitGroup
	errChan := errchan.New(len(m.Servers))
	for _, serv := range m.Servers {
		u, err := url.Parse(serv)
		if err != nil {
			return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
		} else if u.Scheme == "" {
			u.Scheme = "mongodb"
			// fallback to simple string based address (i.e. "10.0.0.1:10000")
			u.Host = serv
			if u.Path == u.Host {
				u.Path = ""
			}
		}
		wg.Add(1)
		go func(srv *Server) {
			defer wg.Done()
			errChan.C <- m.gatherServer(srv, acc)
		}(m.getMongoServer(u))
	}

	wg.Wait()
	return errChan.Error()
}
示例#5
0
//Gather collects kubernetes metrics from a given URL
func (k *Kubernetes) Gather(acc telegraf.Accumulator) error {
	var wg sync.WaitGroup
	errChan := errchan.New(1)
	wg.Add(1)
	go func(k *Kubernetes) {
		defer wg.Done()
		errChan.C <- k.gatherSummary(k.URL, acc)
	}(k)
	wg.Wait()
	return errChan.Error()
}
示例#6
0
文件: exec.go 项目: mkuzmin/telegraf
func (e *Exec) Gather(acc telegraf.Accumulator) error {
	var wg sync.WaitGroup
	// Legacy single command support
	if e.Command != "" {
		e.Commands = append(e.Commands, e.Command)
		e.Command = ""
	}

	commands := make([]string, 0, len(e.Commands))
	for _, pattern := range e.Commands {
		cmdAndArgs := strings.SplitN(pattern, " ", 2)
		if len(cmdAndArgs) == 0 {
			continue
		}

		matches, err := filepath.Glob(cmdAndArgs[0])
		if err != nil {
			return err
		}

		if len(matches) == 0 {
			// There were no matches with the glob pattern, so let's assume
			// that the command is in PATH and just run it as it is
			commands = append(commands, pattern)
		} else {
			// There were matches, so we'll append each match together with
			// the arguments to the commands slice
			for _, match := range matches {
				if len(cmdAndArgs) == 1 {
					commands = append(commands, match)
				} else {
					commands = append(commands,
						strings.Join([]string{match, cmdAndArgs[1]}, " "))
				}
			}
		}
	}

	errChan := errchan.New(len(commands))
	e.errChan = errChan.C

	wg.Add(len(commands))
	for _, command := range commands {
		go e.ProcessCommand(command, acc, &wg)
	}
	wg.Wait()
	return errChan.Error()
}
示例#7
0
// Gather reads stats from all configured servers accumulates stats
func (m *Memcached) Gather(acc telegraf.Accumulator) error {
	if len(m.Servers) == 0 && len(m.UnixSockets) == 0 {
		return m.gatherServer(":11211", false, acc)
	}

	errChan := errchan.New(len(m.Servers) + len(m.UnixSockets))
	for _, serverAddress := range m.Servers {
		errChan.C <- m.gatherServer(serverAddress, false, acc)
	}

	for _, unixAddress := range m.UnixSockets {
		errChan.C <- m.gatherServer(unixAddress, true, acc)
	}

	return errChan.Error()
}
示例#8
0
func (a *Aerospike) Gather(acc telegraf.Accumulator) error {
	if len(a.Servers) == 0 {
		return a.gatherServer("127.0.0.1:3000", acc)
	}

	var wg sync.WaitGroup
	errChan := errchan.New(len(a.Servers))
	wg.Add(len(a.Servers))
	for _, server := range a.Servers {
		go func(serv string) {
			defer wg.Done()
			errChan.C <- a.gatherServer(serv, acc)
		}(server)
	}

	wg.Wait()
	return errChan.Error()
}
示例#9
0
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (g *haproxy) Gather(acc telegraf.Accumulator) error {
	if len(g.Servers) == 0 {
		return g.gatherServer("http://127.0.0.1:1936", acc)
	}

	var wg sync.WaitGroup
	errChan := errchan.New(len(g.Servers))
	wg.Add(len(g.Servers))
	for _, server := range g.Servers {
		go func(serv string) {
			defer wg.Done()
			errChan.C <- g.gatherServer(serv, acc)
		}(server)
	}

	wg.Wait()
	return errChan.Error()
}
示例#10
0
文件: haproxy.go 项目: Wikia/telegraf
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (g *haproxy) Gather(acc telegraf.Accumulator) error {
	if len(g.Servers) == 0 {
		return g.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc)
	}

	endpoints := make([]string, 0, len(g.Servers))

	for _, endpoint := range g.Servers {

		if strings.HasPrefix(endpoint, "http") {
			endpoints = append(endpoints, endpoint)
			continue
		}

		socketPath := getSocketAddr(endpoint)

		matches, err := filepath.Glob(socketPath)

		if err != nil {
			return err
		}

		if len(matches) == 0 {
			endpoints = append(endpoints, socketPath)
		} else {
			for _, match := range matches {
				endpoints = append(endpoints, match)
			}
		}
	}

	var wg sync.WaitGroup
	errChan := errchan.New(len(endpoints))
	wg.Add(len(endpoints))
	for _, server := range endpoints {
		go func(serv string) {
			defer wg.Done()
			errChan.C <- g.gatherServer(serv, acc)
		}(server)
	}

	wg.Wait()
	return errChan.Error()
}
示例#11
0
// Reads stats from all configured servers accumulates stats.
// Returns one of the errors encountered while gather stats (if any).
func (r *Redis) Gather(acc telegraf.Accumulator) error {
	if len(r.Servers) == 0 {
		url := &url.URL{
			Scheme: "tcp",
			Host:   ":6379",
		}
		r.gatherServer(url, acc)
		return nil
	}

	var wg sync.WaitGroup
	errChan := errchan.New(len(r.Servers))
	for _, serv := range r.Servers {
		if !strings.HasPrefix(serv, "tcp://") && !strings.HasPrefix(serv, "unix://") {
			serv = "tcp://" + serv
		}

		u, err := url.Parse(serv)
		if err != nil {
			return fmt.Errorf("Unable to parse to address '%s': %s", serv, err)
		} else if u.Scheme == "" {
			// fallback to simple string based address (i.e. "10.0.0.1:10000")
			u.Scheme = "tcp"
			u.Host = serv
			u.Path = ""
		}
		if u.Scheme == "tcp" {
			_, _, err := net.SplitHostPort(u.Host)
			if err != nil {
				u.Host = u.Host + ":" + defaultPort
			}
		}

		wg.Add(1)
		go func(serv string) {
			defer wg.Done()
			errChan.C <- r.gatherServer(u, acc)
		}(serv)
	}

	wg.Wait()
	return errChan.Error()
}
示例#12
0
func (n *Nginx) Gather(acc telegraf.Accumulator) error {
	var wg sync.WaitGroup
	errChan := errchan.New(len(n.Urls))

	for _, u := range n.Urls {
		addr, err := url.Parse(u)
		if err != nil {
			return fmt.Errorf("Unable to parse address '%s': %s", u, err)
		}

		wg.Add(1)
		go func(addr *url.URL) {
			defer wg.Done()
			errChan.C <- n.gatherUrl(addr, acc)
		}(addr)
	}

	wg.Wait()
	return errChan.Error()
}
示例#13
0
func (d *DnsQuery) Gather(acc telegraf.Accumulator) error {
	d.setDefaultValues()

	errChan := errchan.New(len(d.Domains) * len(d.Servers))
	for _, domain := range d.Domains {
		for _, server := range d.Servers {
			dnsQueryTime, err := d.getDnsQueryTime(domain, server)
			errChan.C <- err
			tags := map[string]string{
				"server":      server,
				"domain":      domain,
				"record_type": d.RecordType,
			}

			fields := map[string]interface{}{"query_time_ms": dnsQueryTime}
			acc.AddFields("dns_query", fields, tags)
		}
	}

	return errChan.Error()
}
示例#14
0
func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error {
	if r.Client == nil {
		tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)}
		r.Client = &http.Client{
			Transport: tr,
			Timeout:   time.Duration(4 * time.Second),
		}
	}

	var wg sync.WaitGroup
	wg.Add(len(gatherFunctions))
	errChan := errchan.New(len(gatherFunctions))
	for _, f := range gatherFunctions {
		go func(gf gatherFunc) {
			defer wg.Done()
			gf(r, acc, errChan.C)
		}(f)
	}
	wg.Wait()

	return errChan.Error()
}
示例#15
0
文件: mysql.go 项目: li-ang/telegraf
func (m *Mysql) Gather(acc telegraf.Accumulator) error {
	if len(m.Servers) == 0 {
		// default to localhost if nothing specified.
		return m.gatherServer(localhost, acc)
	}
	// Initialise additional query intervals
	if !initDone {
		m.InitMysql()
	}
	var wg sync.WaitGroup
	errChan := errchan.New(len(m.Servers))

	// Loop through each server and collect metrics
	for _, server := range m.Servers {
		wg.Add(1)
		go func(s string) {
			defer wg.Done()
			errChan.C <- m.gatherServer(s, acc)
		}(server)
	}

	wg.Wait()
	return errChan.Error()
}
示例#16
0
func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
	if c.client == nil {
		c.initializeCloudWatch()
	}

	var metrics []*cloudwatch.Metric

	// check for provided metric filter
	if c.Metrics != nil {
		metrics = []*cloudwatch.Metric{}
		for _, m := range c.Metrics {
			if !hasWilcard(m.Dimensions) {
				dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
				for k, d := range m.Dimensions {
					fmt.Printf("Dimension [%s]:[%s]\n", d.Name, d.Value)
					dimensions[k] = &cloudwatch.Dimension{
						Name:  aws.String(d.Name),
						Value: aws.String(d.Value),
					}
				}
				for _, name := range m.MetricNames {
					metrics = append(metrics, &cloudwatch.Metric{
						Namespace:  aws.String(c.Namespace),
						MetricName: aws.String(name),
						Dimensions: dimensions,
					})
				}
			} else {
				allMetrics, err := c.fetchNamespaceMetrics()
				if err != nil {
					return err
				}
				for _, name := range m.MetricNames {
					for _, metric := range allMetrics {
						if isSelected(metric, m.Dimensions) {
							metrics = append(metrics, &cloudwatch.Metric{
								Namespace:  aws.String(c.Namespace),
								MetricName: aws.String(name),
								Dimensions: metric.Dimensions,
							})
						}
					}
				}
			}

		}
	} else {
		var err error
		metrics, err = c.fetchNamespaceMetrics()
		if err != nil {
			return err
		}
	}

	metricCount := len(metrics)
	errChan := errchan.New(metricCount)

	now := time.Now()

	// limit concurrency or we can easily exhaust user connection limit
	// see cloudwatch API request limits:
	// http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html
	lmtr := limiter.NewRateLimiter(10, time.Second)
	defer lmtr.Stop()
	var wg sync.WaitGroup
	wg.Add(len(metrics))
	for _, m := range metrics {
		<-lmtr.C
		go func(inm *cloudwatch.Metric) {
			defer wg.Done()
			c.gatherMetric(acc, inm, now, errChan.C)
		}(m)
	}
	wg.Wait()

	return errChan.Error()
}
示例#17
0
func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error {
	l.Lock()
	defer l.Unlock()

	l.acc = acc
	l.lines = make(chan string, 1000)
	l.done = make(chan struct{})

	// Looks for fields which implement LogParser interface
	l.parsers = []LogParser{}
	s := reflect.ValueOf(l).Elem()
	for i := 0; i < s.NumField(); i++ {
		f := s.Field(i)

		if !f.CanInterface() {
			continue
		}

		if lpPlugin, ok := f.Interface().(LogParser); ok {
			if reflect.ValueOf(lpPlugin).IsNil() {
				continue
			}
			l.parsers = append(l.parsers, lpPlugin)
		}
	}

	if len(l.parsers) == 0 {
		return fmt.Errorf("ERROR: logparser input plugin: no parser defined.")
	}

	// compile log parser patterns:
	errChan := errchan.New(len(l.parsers))
	for _, parser := range l.parsers {
		if err := parser.Compile(); err != nil {
			errChan.C <- err
		}
	}
	if err := errChan.Error(); err != nil {
		return err
	}

	var seek tail.SeekInfo
	if !l.FromBeginning {
		seek.Whence = 2
		seek.Offset = 0
	}

	l.wg.Add(1)
	go l.parser()

	// Create a "tailer" for each file
	for _, filepath := range l.Files {
		g, err := globpath.Compile(filepath)
		if err != nil {
			log.Printf("E! Error Glob %s failed to compile, %s", filepath, err)
			continue
		}
		files := g.Match()
		errChan = errchan.New(len(files))
		for file, _ := range files {
			tailer, err := tail.TailFile(file,
				tail.Config{
					ReOpen:    true,
					Follow:    true,
					Location:  &seek,
					MustExist: true,
				})
			errChan.C <- err

			// create a goroutine for each "tailer"
			l.wg.Add(1)
			go l.receiver(tailer)
			l.tailers = append(l.tailers, tailer)
		}
	}

	return errChan.Error()
}