Ejemplo n.º 1
0
import (
	"time"

	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/packetbeat/config"
	"github.com/elastic/beats/packetbeat/publish"
)

type Flows struct {
	worker     *worker
	table      *flowMetaTable
	counterReg *counterReg
}

var debugf = logp.MakeDebug("flows")

const (
	defaultTimeout = 30 * time.Second
	defaultPeriod  = 10 * time.Second
)

func NewFlows(pub publish.Flows, config *config.Flows) (*Flows, error) {
	duration := func(s string, d time.Duration) (time.Duration, error) {
		if s == "" {
			return d, nil
		}
		return time.ParseDuration(s)
	}

	timeout, err := duration(config.Timeout, defaultTimeout)
Ejemplo n.º 2
0
package info

import (
	"strings"
	"time"

	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/metricbeat/mb"

	rd "github.com/garyburd/redigo/redis"
)

var (
	debugf = logp.MakeDebug("redis-info")
)

func init() {
	if err := mb.Registry.AddMetricSet("redis", "info", New); err != nil {
		panic(err)
	}
}

// MetricSet for fetching Redis server information and statistics.
type MetricSet struct {
	mb.BaseMetricSet
	pool *rd.Pool
}

// New creates new instance of MetricSet
func New(base mb.BaseMetricSet) (mb.MetricSet, error) {
Ejemplo n.º 3
0
	"github.com/elastic/beats/libbeat/cfgfile"
	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/filter"
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/libbeat/paths"
	"github.com/elastic/beats/libbeat/publisher"
	svc "github.com/elastic/beats/libbeat/service"
	"github.com/satori/go.uuid"
)

var (
	printVersion = flag.Bool("version", false, "Print the version and exit")
)

var debugf = logp.MakeDebug("beat")

// GracefulExit is an error that signals to exit with a code of 0.
var GracefulExit = errors.New("graceful exit")

// Beater is the interface that must be implemented every Beat. The full
// lifecycle of a Beat instance is managed through this interface.
//
// Life-cycle of Beater
//
// The four operational methods are always invoked serially in the following
// order:
//
//   Config -> Setup -> Run -> Cleanup
//
// The Stop() method is invoked the first time (and only the first time) a
Ejemplo n.º 4
0
package beat

import (
	"sync"
	"time"

	cfg "github.com/elastic/beats/filebeat/config"
	"github.com/elastic/beats/filebeat/input"
	"github.com/elastic/beats/libbeat/logp"
)

var debugf = logp.MakeDebug("spooler")

// channelSize is the number of events Channel can buffer before blocking will occur.
const channelSize = 16

// Spooler aggregates the events and sends the aggregated data to the publisher.
type Spooler struct {
	Channel chan *input.FileEvent // Channel is the input to the Spooler.

	// Config
	idleTimeout time.Duration // How often to flush the spooler if spoolSize is not reached.
	spoolSize   uint64        // Maximum number of events that are stored before a flush occurs.

	exit          chan struct{}             // Channel used to signal shutdown.
	nextFlushTime time.Time                 // Scheduled time of the next flush.
	publisher     chan<- []*input.FileEvent // Channel used to publish events.
	spool         []*input.FileEvent        // FileEvents being held by the Spooler.
	wg            sync.WaitGroup            // WaitGroup used to control the shutdown.
}
Ejemplo n.º 5
0
// license that can be found in the LICENSE file.

package cassandra

import (
	"errors"
	"fmt"
	"github.com/elastic/beats/libbeat/common/streambuf"
	"github.com/elastic/beats/libbeat/logp"
	"runtime"
	"sync"
)

var (
	ErrFrameTooBig = errors.New("frame length is bigger than the maximum allowed")
	debugf         = logp.MakeDebug("cassandra")
)

type frameHeader struct {
	Version       protoVersion
	Flags         byte
	Stream        int
	Op            FrameOp
	BodyLength    int
	HeadLength    int
	CustomPayload map[string][]byte
}

func (f frameHeader) ToMap() map[string]interface{} {
	data := make(map[string]interface{})
	data["version"] = fmt.Sprintf("%d", f.Version.version())
Ejemplo n.º 6
0
	"github.com/elastic/beats/libbeat/outputs"
	"github.com/elastic/beats/libbeat/outputs/mode"
)

type elasticsearchOutput struct {
	index string
	mode  mode.ConnectionMode
	topology
}

func init() {
	outputs.RegisterOutputPlugin("elasticsearch", New)
}

var (
	debug = logp.MakeDebug("elasticsearch")
)

var (
	// ErrNotConnected indicates failure due to client having no valid connection
	ErrNotConnected = errors.New("not connected")

	// ErrJSONEncodeFailed indicates encoding failures
	ErrJSONEncodeFailed = errors.New("json encode failed")

	// ErrResponseRead indicates error parsing Elasticsearch response
	ErrResponseRead = errors.New("bulk item status parse failed.")
)

// NewOutput instantiates a new output plugin instance publishing to elasticsearch.
func New(cfg *ucfg.Config, topologyExpire int) (outputs.Outputer, error) {
Ejemplo n.º 7
0
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/libbeat/outputs"

	// load supported output plugins
	_ "github.com/elastic/beats/libbeat/outputs/console"
	_ "github.com/elastic/beats/libbeat/outputs/elasticsearch"
	_ "github.com/elastic/beats/libbeat/outputs/fileout"
	_ "github.com/elastic/beats/libbeat/outputs/kafka"
	_ "github.com/elastic/beats/libbeat/outputs/logstash"
	_ "github.com/elastic/beats/libbeat/outputs/redis"
)

// command line flags
var publishDisabled *bool

var debug = logp.MakeDebug("publish")

// EventPublisher provides the interface for beats to publish events.
type eventPublisher interface {
	PublishEvent(ctx Context, event common.MapStr) bool
	PublishEvents(ctx Context, events []common.MapStr) bool
}

type Context struct {
	publishOptions
	Signal outputs.Signaler
}

type publishOptions struct {
	Guaranteed bool
	Sync       bool
Ejemplo n.º 8
0
package keyspace

import (
	"time"

	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/metricbeat/mb"
	"github.com/elastic/beats/metricbeat/mb/parse"
	"github.com/elastic/beats/metricbeat/module/redis"

	rd "github.com/garyburd/redigo/redis"
)

var (
	debugf = logp.MakeDebug("redis-keyspace")
)

func init() {
	if err := mb.Registry.AddMetricSet("redis", "keyspace", New, parse.PassThruHostParser); err != nil {
		panic(err)
	}
}

// MetricSet for fetching Redis server information and statistics.
type MetricSet struct {
	mb.BaseMetricSet
	pool *rd.Pool
}

// New creates new instance of MetricSet
Ejemplo n.º 9
0
)

// Metrics that can retrieved through the expvar web interface. Metrics must be
// enable through configuration in order for the web service to be started.
var (
	publishedEvents = expvar.NewMap("publishedEvents")
	ignoredEvents   = expvar.NewMap("ignoredEvents")
)

func init() {
	expvar.Publish("uptime", expvar.Func(uptime))
}

// Debug logging functions for this package.
var (
	debugf    = logp.MakeDebug("winlogbeat")
	detailf   = logp.MakeDebug("winlogbeat_detail")
	memstatsf = logp.MakeDebug("memstats")
)

// Time the application was started.
var startTime = time.Now().UTC()

type log struct {
	config.EventLogConfig
	eventLog eventlog.EventLog
}

type Winlogbeat struct {
	beat       *beat.Beat             // Common beat information.
	config     *config.Settings       // Configuration settings.
Ejemplo n.º 10
0
}

var (
	droppedBecauseOfGaps = expvar.NewInt("tcp.dropped_because_of_gaps")
)

type seqCompare int

const (
	seqLT seqCompare = -1
	seqEq seqCompare = 0
	seqGT seqCompare = 1
)

var (
	debugf  = logp.MakeDebug("tcp")
	isDebug = false
)

func (tcp *Tcp) getId() uint32 {
	tcp.id += 1
	return tcp.id
}

func (tcp *Tcp) decideProtocol(tuple *common.IPPortTuple) protos.Protocol {
	protocol, exists := tcp.portMap[tuple.SrcPort]
	if exists {
		return protocol
	}

	protocol, exists = tcp.portMap[tuple.DstPort]
Ejemplo n.º 11
0
// +build darwin freebsd linux windows

package network

import (
	"strings"

	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/metricbeat/mb"

	"github.com/pkg/errors"
	"github.com/shirou/gopsutil/net"
)

var debugf = logp.MakeDebug("system-network")

func init() {
	if err := mb.Registry.AddMetricSet("system", "network", New); err != nil {
		panic(err)
	}
}

// MetricSet for fetching system network IO metrics.
type MetricSet struct {
	mb.BaseMetricSet
	interfaces map[string]struct{}
}

// New is a mb.MetricSetFactory that returns a new MetricSet.
func New(base mb.BaseMetricSet) (mb.MetricSet, error) {
Ejemplo n.º 12
0
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/metricbeat/mb"

	"github.com/joeshaw/multierror"
	"github.com/pkg/errors"
)

// Expvar metric names.
const (
	successesKey = "success"
	failuresKey  = "failures"
	eventsKey    = "events"
)

var (
	debugf      = logp.MakeDebug("metricbeat")
	fetchesLock = sync.Mutex{}
	fetches     = expvar.NewMap("fetches")
)

// ModuleWrapper contains the Module and the private data associated with
// running the Module and its MetricSets.
//
// Use NewModuleWrapper or NewModuleWrappers to construct new ModuleWrappers.
type ModuleWrapper struct {
	mb.Module
	filters    *filter.FilterList
	metricSets []*metricSetWrapper // List of pointers to its associated MetricSets.
}

// metricSetWrapper contains the MetricSet and the private data associated with
Ejemplo n.º 13
0
Archivo: stat.go Proyecto: ruflin/beats
import (
	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/metricbeat/mb"
	"github.com/elastic/beats/metricbeat/module/haproxy"

	"github.com/pkg/errors"
)

const (
	statsMethod = "stat"
)

var (
	debugf = logp.MakeDebug("haproxy-stat")
)

// init registers the haproxy stat MetricSet.
func init() {
	if err := mb.Registry.AddMetricSet("haproxy", statsMethod, New, haproxy.HostParser); err != nil {
		panic(err)
	}
}

// MetricSet for haproxy stats.
type MetricSet struct {
	mb.BaseMetricSet
}

// New creates a new haproxy stat MetricSet.
Ejemplo n.º 14
0
package decoder

import (
	"fmt"

	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/packetbeat/protos"
	"github.com/elastic/beats/packetbeat/protos/icmp"
	"github.com/elastic/beats/packetbeat/protos/tcp"
	"github.com/elastic/beats/packetbeat/protos/udp"

	"github.com/tsg/gopacket"
	"github.com/tsg/gopacket/layers"
)

var debugf = logp.MakeDebug("decoder")

type DecoderStruct struct {
	decoders         map[gopacket.LayerType]gopacket.DecodingLayer
	linkLayerDecoder gopacket.DecodingLayer
	linkLayerType    gopacket.LayerType

	sll       layers.LinuxSLL
	d1q       layers.Dot1Q
	lo        layers.Loopback
	eth       layers.Ethernet
	ip4       layers.IPv4
	ip6       layers.IPv6
	icmp4     layers.ICMPv4
	icmp6     layers.ICMPv6
	tcp       layers.TCP
Ejemplo n.º 15
0
import (
	"expvar"
	"time"

	"github.com/elastic/go-lumber/log"

	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/common/op"
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/libbeat/outputs"
	"github.com/elastic/beats/libbeat/outputs/mode"
	"github.com/elastic/beats/libbeat/outputs/mode/modeutil"
	"github.com/elastic/beats/libbeat/outputs/transport"
)

var debug = logp.MakeDebug("logstash")

// Metrics that can retrieved through the expvar web interface.
var (
	ackedEvents            = expvar.NewInt("libbeat.logstash.published_and_acked_events")
	eventsNotAcked         = expvar.NewInt("libbeat.logstash.published_but_not_acked_events")
	publishEventsCallCount = expvar.NewInt("libbeat.logstash.call_count.PublishEvents")

	statReadBytes   = expvar.NewInt("libbeat.logstash.publish.read_bytes")
	statWriteBytes  = expvar.NewInt("libbeat.logstash.publish.write_bytes")
	statReadErrors  = expvar.NewInt("libbeat.logstash.publish.read_errors")
	statWriteErrors = expvar.NewInt("libbeat.logstash.publish.write_errors")
)

const (
	defaultWaitRetry = 1 * time.Second
Ejemplo n.º 16
0
	PgsqlGetDataState
	PgsqlExtendedQueryState
)

const (
	SSLRequest = iota
	StartupMessage
	CancelRequest
)

var (
	errInvalidLength = errors.New("invalid length")
)

var (
	debugf    = logp.MakeDebug("pgsql")
	detailedf = logp.MakeDebug("pgsqldetailed")
)

var (
	unmatchedResponses = expvar.NewInt("pgsql.unmatched_responses")
)

type Pgsql struct {

	// config
	Ports         []int
	maxStoreRows  int
	maxRowLength  int
	Send_request  bool
	Send_response bool
Ejemplo n.º 17
0
	"strconv"
	"strings"
	"time"

	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/logp"

	"github.com/elastic/beats/packetbeat/protos"
	"github.com/elastic/beats/packetbeat/publish"

	mkdns "github.com/miekg/dns"
	"golang.org/x/net/publicsuffix"
)

var (
	debugf = logp.MakeDebug("dns")
)

const MaxDnsTupleRawSize = 16 + 16 + 2 + 2 + 4 + 1

// Constants used to associate the DNS QR flag with a meaningful value.
const (
	Query    = false
	Response = true
)

// Transport protocol.
type Transport uint8

var (
	unmatchedRequests  = expvar.NewInt("dns.unmatched_requests")
Ejemplo n.º 18
0
// +build darwin linux openbsd windows

package fsstats

import (
	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/metricbeat/mb"
	"github.com/elastic/beats/topbeat/system"

	"github.com/pkg/errors"
)

var debugf = logp.MakeDebug("system-fsstats")

func init() {
	if err := mb.Registry.AddMetricSet("system", "fsstats", New); err != nil {
		panic(err)
	}
}

// MetricSet for fetching a summary of filesystem stats.
type MetricSet struct {
	mb.BaseMetricSet
}

// New creates and returns a new instance of MetricSet.
func New(base mb.BaseMetricSet) (mb.MetricSet, error) {
	return &MetricSet{
		BaseMetricSet: base,
	}, nil
Ejemplo n.º 19
0
	// error case. On return nextEvents contains all events not yet published.
	PublishEvents(events []common.MapStr) (nextEvents []common.MapStr, err error)

	// PublishEvent sends one event to the clients sink. On failure and error is
	// returned.
	PublishEvent(event common.MapStr) error
}

// AsyncProtocolClient interface is a output plugin specfic client implementation
// for asynchronous encoding and publishing events.
type AsyncProtocolClient interface {
	Connectable

	AsyncPublishEvents(cb func([]common.MapStr, error), events []common.MapStr) error

	AsyncPublishEvent(cb func(error), event common.MapStr) error
}

var (
	// ErrTempBulkFailure indicates PublishEvents fail temporary to retry.
	ErrTempBulkFailure = errors.New("temporary bulk send failure")
)

var (
	debug = logp.MakeDebug("output")
)

func Dropped(i int) {
	messagesDropped.Add(int64(i))
}
Ejemplo n.º 20
0
// +build darwin freebsd linux openbsd windows

package filesystem

import (
	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/metricbeat/mb"

	"github.com/pkg/errors"
)

var debugf = logp.MakeDebug("system-filesystem")

func init() {
	if err := mb.Registry.AddMetricSet("system", "filesystem", New); err != nil {
		panic(err)
	}
}

// MetricSet for fetching filesystem metrics.
type MetricSet struct {
	mb.BaseMetricSet
}

// New creates and returns a new instance of MetricSet.
func New(base mb.BaseMetricSet) (mb.MetricSet, error) {
	return &MetricSet{
		BaseMetricSet: base,
	}, nil
}
Ejemplo n.º 21
0
import (
	"encoding"
	"encoding/json"
	"fmt"
	"reflect"
	"strconv"
	"strings"

	"github.com/elastic/beats/libbeat/logp"

	"github.com/pkg/errors"
)

const eventDebugSelector = "event"

var eventDebugf = logp.MakeDebug(eventDebugSelector)

var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()

type Float float64

// ConvertToGenericEvent normalizes the types contained in the given MapStr.
//
// Nil values in maps are dropped during the conversion. Any unsupported types
// that are found in the MapStr are dropped and warnings are logged.
func ConvertToGenericEvent(m MapStr) MapStr {
	keys := make([]string, 0, 10)
	event, errs := normalizeMap(m, keys...)
	if len(errs) > 0 {
		logp.Warn("Unsuccessful conversion to generic event: %v errors: %v, "+
			"event=%#v", len(errs), errs, m)
Ejemplo n.º 22
0
}

type memcacheString struct {
	raw []byte
}

type memcacheData struct {
	data []byte
}

type memcacheStat struct {
	Name  memcacheString `json:"name"`
	Value memcacheString `json:"value"`
}

var debug = logp.MakeDebug("memcache")

var (
	unmatchedRequests      = expvar.NewInt("memcache.unmatched_requests")
	unmatchedResponses     = expvar.NewInt("memcache.unmatched_responses")
	unfinishedTransactions = expvar.NewInt("memcache.unfinished_transactions")
)

func init() {
	protos.Register("memcache", New)
}

func New(
	testMode bool,
	results publish.Transactions,
	cfg *common.Config,
Ejemplo n.º 23
0
	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/common/op"
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/libbeat/outputs"
	"github.com/elastic/beats/libbeat/outputs/mode"
	"github.com/elastic/beats/libbeat/outputs/mode/modeutil"
	"github.com/elastic/beats/libbeat/outputs/transport"
)

type redisOut struct {
	mode mode.ConnectionMode
	topology
}

var debugf = logp.MakeDebug("redis")

// Metrics that can retrieved through the expvar web interface.
var (
	statReadBytes   = expvar.NewInt("libbeat.redis.publish.read_bytes")
	statWriteBytes  = expvar.NewInt("libbeat.redis.publish.write_bytes")
	statReadErrors  = expvar.NewInt("libbeat.redis.publish.read_errors")
	statWriteErrors = expvar.NewInt("libbeat.redis.publish.write_errors")
)

const (
	defaultWaitRetry    = 1 * time.Second
	defaultMaxWaitRetry = 60 * time.Second
)

func init() {
Ejemplo n.º 24
0
)

// Metrics that can retrieved through the expvar web interface. Metrics must be
// enable through configuration in order for the web service to be started.
var (
	publishedEvents = expvar.NewMap("published_events")
	ignoredEvents   = expvar.NewMap("ignored_events")
)

func init() {
	expvar.Publish("uptime", expvar.Func(uptime))
}

// Debug logging functions for this package.
var (
	debugf    = logp.MakeDebug("winlogbeat")
	memstatsf = logp.MakeDebug("memstats")
)

// Time the application was started.
var startTime = time.Now().UTC()

// Winlogbeat is used to conform to the beat interface
type Winlogbeat struct {
	beat       *beat.Beat             // Common beat information.
	config     *config.Settings       // Configuration settings.
	eventLogs  []eventlog.EventLog    // List of all event logs being monitored.
	done       chan struct{}          // Channel to initiate shutdown of main event loop.
	client     publisher.Client       // Interface to publish event.
	checkpoint *checkpoint.Checkpoint // Persists event log state to disk.
}
Ejemplo n.º 25
0
}

const (
	defaultWaitRetry = 1 * time.Second

	// NOTE: maxWaitRetry has no effect on mode, as logstash client currently does
	// not return ErrTempBulkFailure
	defaultMaxWaitRetry = 60 * time.Second
)

func init() {
	sarama.Logger = kafkaLogger{}
	outputs.RegisterOutputPlugin("kafka", New)
}

var debugf = logp.MakeDebug("kafka")

var (
	errNoTopicSet = errors.New("No topic configured")
	errNoHosts    = errors.New("No hosts configured")
)

var (
	compressionModes = map[string]sarama.CompressionCodec{
		"none":   sarama.CompressionNone,
		"no":     sarama.CompressionNone,
		"off":    sarama.CompressionNone,
		"gzip":   sarama.CompressionGZIP,
		"snappy": sarama.CompressionSnappy,
	}
)
Ejemplo n.º 26
0
import (
	"encoding/binary"
	"time"

	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/logp"

	"fmt"

	"github.com/elastic/beats/packetbeat/protos"
	"github.com/elastic/beats/packetbeat/protos/tcp"
	"github.com/elastic/beats/packetbeat/publish"
)

var debugf = logp.MakeDebug("rpc")

const (
	RPC_LAST_FRAG = 0x80000000
	RPC_SIZE_MASK = 0x7fffffff
)

const (
	RPC_CALL  = 0
	RPC_REPLY = 1
)

type RpcStream struct {
	tcpTuple *common.TCPTuple
	rawData  []byte
}
Ejemplo n.º 27
0
const (
	// defaultScheme is the default scheme to use when it is not specified in
	// the host config.
	defaultScheme = "http"

	// defaultPath is the default path to the mod_status endpoint on the
	// Apache HTTPD server.
	defaultPath = "/server-status"

	// autoQueryParam is a query parameter added to the request so that
	// mod_status returns machine-readable output.
	autoQueryParam = "auto"
)

var (
	debugf = logp.MakeDebug("apache-status")
)

func init() {
	if err := helper.Registry.AddMetricSeter("apache", "status", New); err != nil {
		panic(err)
	}
}

// New creates new instance of MetricSeter
func New() helper.MetricSeter {
	return &MetricSeter{}
}

type MetricSeter struct {
	URLs map[string]string // Map of host to endpoint URL.
Ejemplo n.º 28
0
package amqp

import (
	"strconv"
	"strings"
	"time"

	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/packetbeat/protos"
	"github.com/elastic/beats/packetbeat/protos/tcp"
	"github.com/elastic/beats/packetbeat/publish"
)

var (
	debugf    = logp.MakeDebug("amqp")
	detailedf = logp.MakeDebug("amqpdetailed")
)

type Amqp struct {
	Ports                     []int
	SendRequest               bool
	SendResponse              bool
	MaxBodyLength             int
	ParseHeaders              bool
	ParseArguments            bool
	HideConnectionInformation bool
	transactions              *common.Cache
	transactionTimeout        time.Duration
	results                   publish.Transactions
Ejemplo n.º 29
0
Archivo: info.go Proyecto: ruflin/beats
import (
	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/metricbeat/mb"
	"github.com/elastic/beats/metricbeat/module/haproxy"

	"github.com/pkg/errors"
)

const (
	statsMethod = "info"
)

var (
	debugf = logp.MakeDebug("haproxy-info")
)

// init registers the haproxy info MetricSet.
func init() {
	if err := mb.Registry.AddMetricSet("haproxy", "info", New, haproxy.HostParser); err != nil {
		panic(err)
	}
}

// MetricSet for haproxy info.
type MetricSet struct {
	mb.BaseMetricSet
}

// New creates a haproxy info MetricSet.
Ejemplo n.º 30
0
 * Complete dashboards
*/

import (
	"database/sql"

	"github.com/elastic/beats/libbeat/common"
	"github.com/elastic/beats/libbeat/logp"
	"github.com/elastic/beats/metricbeat/mb"
	"github.com/elastic/beats/metricbeat/module/mysql"

	"github.com/pkg/errors"
)

var (
	debugf = logp.MakeDebug("mysql-status")
)

func init() {
	if err := mb.Registry.AddMetricSet("mysql", "status", New); err != nil {
		panic(err)
	}
}

// MetricSet for fetching MySQL server status.
type MetricSet struct {
	mb.BaseMetricSet
	dsn string
	db  *sql.DB
}