func TestAerospikeStatistics(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } a := &Aerospike{ Servers: []string{testutil.GetLocalHost() + ":3000"}, } var acc testutil.Accumulator err := a.Gather(&acc) require.NoError(t, err) // Only use a few of the metrics asMetrics := []string{ "transactions", "stat_write_errs", "stat_read_reqs", "stat_write_reqs", } for _, metric := range asMetrics { assert.True(t, acc.HasIntValue(metric), metric) } }
func TestZookeeperGeneratesMetrics(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } z := &Zookeeper{ Servers: []string{testutil.GetLocalHost()}, } var acc testutil.Accumulator err := z.Gather(&acc) require.NoError(t, err) intMetrics := []string{ "avg_latency", "max_latency", "min_latency", "packets_received", "packets_sent", "outstanding_requests", "znode_count", "watch_count", "ephemerals_count", "approximate_data_size", "open_file_descriptor_count", "max_file_descriptor_count", } for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric), metric) } }
func TestAddNonReplStats(t *testing.T) { d := NewMongodbData( &StatLine{ StorageEngine: "", Time: time.Now(), Insert: 0, Query: 0, Update: 0, Delete: 0, GetMore: 0, Command: 0, Flushes: 0, Virtual: 0, Resident: 0, QueuedReaders: 0, QueuedWriters: 0, ActiveReaders: 0, ActiveWriters: 0, NetIn: 0, NetOut: 0, NumConnections: 0, }, tags, ) var acc testutil.Accumulator d.AddDefaultStats(&acc) for key, _ := range DefaultStats { assert.True(t, acc.HasIntValue(key)) } }
func TestAddTableStats(t *testing.T) { var acc testutil.Accumulator err := server.addTableStats(&acc) require.NoError(t, err) for _, metric := range TableTracking { assert.True(t, acc.HasIntValue(metric)) } keys := []string{ "cache_bytes_in_use", "disk_read_bytes_per_sec", "disk_read_bytes_total", "disk_written_bytes_per_sec", "disk_written_bytes_total", "disk_usage_data_bytes", "disk_usage_garbage_bytes", "disk_usage_metadata_bytes", "disk_usage_preallocated_bytes", } for _, metric := range keys { assert.True(t, acc.HasIntValue(metric)) } }
func TestAddEngineStatsPartial(t *testing.T) { engine := &Engine{ ClientConns: 0, ClientActive: 0, QueriesPerSec: 0, ReadsPerSec: 0, WritesPerSec: 0, } var acc testutil.Accumulator keys := []string{ "active_clients", "clients", "queries_per_sec", "read_docs_per_sec", "written_docs_per_sec", } missing_keys := []string{ "total_queries", "total_reads", "total_writes", } engine.AddEngineStats(keys, &acc, tags) for _, metric := range missing_keys { assert.False(t, acc.HasIntValue(metric)) } }
func TestMemcachedGeneratesMetrics(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } m := &Memcached{ Servers: []string{testutil.GetLocalHost()}, } var acc testutil.Accumulator err := m.Gather(&acc) require.NoError(t, err) intMetrics := []string{"get_hits", "get_misses", "evictions", "limit_maxbytes", "bytes", "uptime", "curr_items", "total_items", "curr_connections", "total_connections", "connection_structures", "cmd_get", "cmd_set", "delete_hits", "delete_misses", "incr_hits", "incr_misses", "decr_hits", "decr_misses", "cas_hits", "cas_misses", "evictions", "bytes_read", "bytes_written", "threads", "conn_yields"} for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric), metric) } }
func TestMysqlGeneratesMetrics(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } m := &Mysql{ Servers: []string{fmt.Sprintf("root@tcp(%s:3306)/", testutil.GetLocalHost())}, } var acc testutil.Accumulator err := m.Gather(&acc) require.NoError(t, err) prefixes := []struct { prefix string count int }{ {"commands", 139}, {"handler", 16}, {"bytes", 2}, {"innodb", 46}, {"threads", 4}, {"aborted", 2}, {"created", 3}, {"key", 7}, {"open", 7}, {"opened", 3}, {"qcache", 8}, {"table", 1}, } intMetrics := []string{ "queries", "slow_queries", "connections", } for _, prefix := range prefixes { var count int for _, p := range acc.Points { if strings.HasPrefix(p.Measurement, prefix.prefix) { count++ } } if prefix.count > count { t.Errorf("Expected less than %d measurements with prefix %s, got %d", count, prefix.prefix, prefix.count) } } for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric)) } }
func TestAddMemberStats(t *testing.T) { var acc testutil.Accumulator err := server.addMemberStats(&acc) require.NoError(t, err) for _, metric := range MemberTracking { assert.True(t, acc.HasIntValue(metric)) } }
func TestPostgresqlGeneratesMetrics(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } p := &Postgresql{ Servers: []*Server{ { Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", testutil.GetLocalHost()), Databases: []string{"postgres"}, }, }, } var acc testutil.Accumulator err := p.Gather(&acc) require.NoError(t, err) intMetrics := []string{ "xact_commit", "xact_rollback", "blks_read", "blks_hit", "tup_returned", "tup_fetched", "tup_inserted", "tup_updated", "tup_deleted", "conflicts", "temp_files", "temp_bytes", "deadlocks", } floatMetrics := []string{ "blk_read_time", "blk_write_time", } for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric)) } for _, metric := range floatMetrics { assert.True(t, acc.HasFloatValue(metric)) } }
func TestAddDefaultStats(t *testing.T) { var acc testutil.Accumulator err := server.gatherData(&acc) require.NoError(t, err) time.Sleep(time.Duration(1) * time.Second) // need to call this twice so it can perform the diff err = server.gatherData(&acc) require.NoError(t, err) for key, _ := range DefaultStats { assert.True(t, acc.HasIntValue(key)) } }
func TestMemcachedGeneratesMetrics(t *testing.T) { m := &Memcached{ Servers: []string{"localhost"}, } var acc testutil.Accumulator err := m.Gather(&acc) require.NoError(t, err) intMetrics := []string{"get_hits", "get_misses", "evictions", "limit_maxbytes", "bytes"} for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric), metric) } }
func TestMysqlGeneratesMetrics(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } m := &Mysql{ Servers: []string{fmt.Sprintf("root@tcp(%s:3306)/", testutil.GetLocalHost())}, } var acc testutil.Accumulator err := m.Gather(&acc) require.NoError(t, err) prefixes := []struct { prefix string count int }{ {"commands", 141}, {"handler", 18}, {"bytes", 2}, {"innodb", 51}, {"threads", 4}, } intMetrics := []string{ "queries", "slow_queries", } for _, prefix := range prefixes { var count int for _, p := range acc.Points { if strings.HasPrefix(p.Measurement, prefix.prefix) { count++ } } assert.Equal(t, prefix.count, count) } for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric)) } }
func TestAddReplStats(t *testing.T) { d := NewMongodbData( &StatLine{ StorageEngine: "mmapv1", Mapped: 0, NonMapped: 0, Faults: 0, }, tags, ) var acc testutil.Accumulator d.AddDefaultStats(&acc) for key, _ := range MmapStats { assert.True(t, acc.HasIntValue(key)) } }
func TestZfsPoolMetrics(t *testing.T) { err := os.MkdirAll(testKstatPath, 0755) require.NoError(t, err) err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(pool_ioContents), 0644) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) poolMetrics := getPoolMetrics() var acc testutil.Accumulator //one pool, all metrics tags := map[string]string{ "pool": "HOME", } z := &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}} err = z.Gather(&acc) require.NoError(t, err) for _, metric := range poolMetrics { assert.True(t, !acc.HasIntValue(metric.name), metric.name) assert.True(t, !acc.CheckTaggedValue(metric.name, metric.value, tags)) } z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}, PoolMetrics: true} err = z.Gather(&acc) require.NoError(t, err) for _, metric := range poolMetrics { assert.True(t, acc.HasIntValue(metric.name), metric.name) assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) } err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) }
func TestMysqlGeneratesMetrics(t *testing.T) { m := &Mysql{ Servers: []string{""}, } var acc testutil.Accumulator err := m.Gather(&acc) require.NoError(t, err) prefixes := []struct { prefix string count int }{ {"commands", 141}, {"handler", 18}, {"bytes", 2}, {"innodb", 51}, {"threads", 4}, } intMetrics := []string{ "queries", "slow_queries", } for _, prefix := range prefixes { var count int for _, p := range acc.Points { if strings.HasPrefix(p.Measurement, prefix.prefix) { count++ } } assert.Equal(t, prefix.count, count) } for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric)) } }
func TestAddStorageStats(t *testing.T) { storage := &Storage{ Cache: Cache{ BytesInUse: 0, }, Disk: Disk{ ReadBytesPerSec: 0, ReadBytesTotal: 0, WriteBytesPerSec: 0, WriteBytesTotal: 0, SpaceUsage: SpaceUsage{ Data: 0, Garbage: 0, Metadata: 0, Prealloc: 0, }, }, } var acc testutil.Accumulator keys := []string{ "cache_bytes_in_use", "disk_read_bytes_per_sec", "disk_read_bytes_total", "disk_written_bytes_per_sec", "disk_written_bytes_total", "disk_usage_data_bytes", "disk_usage_garbage_bytes", "disk_usage_metadata_bytes", "disk_usage_preallocated_bytes", } storage.AddStats(&acc, tags) for _, metric := range keys { assert.True(t, acc.HasIntValue(metric)) } }
func TestPostgresqlGeneratesMetrics(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } p := &Postgresql{ Servers: []*Server{ { Address: fmt.Sprintf("host=%s user=postgres sslmode=disable", testutil.GetLocalHost()), Databases: []string{"postgres"}, }, }, } var acc testutil.Accumulator err := p.Gather(&acc) require.NoError(t, err) availableColumns := make(map[string]bool) for _, col := range p.Servers[0].OrderedColumns { availableColumns[col] = true } intMetrics := []string{ "xact_commit", "xact_rollback", "blks_read", "blks_hit", "tup_returned", "tup_fetched", "tup_inserted", "tup_updated", "tup_deleted", "conflicts", "temp_files", "temp_bytes", "deadlocks", "numbackends", } floatMetrics := []string{ "blk_read_time", "blk_write_time", } metricsCounted := 0 for _, metric := range intMetrics { _, ok := availableColumns[metric] if ok { assert.True(t, acc.HasIntValue(metric)) metricsCounted++ } } for _, metric := range floatMetrics { _, ok := availableColumns[metric] if ok { assert.True(t, acc.HasFloatValue(metric)) metricsCounted++ } } assert.True(t, metricsCounted > 0) assert.Equal(t, len(availableColumns)-len(p.IgnoredColumns()), metricsCounted) }
func TestZfsGeneratesMetrics(t *testing.T) { err := os.MkdirAll(testKstatPath, 0755) require.NoError(t, err) err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/vdev_cache_stats", []byte(vdev_cache_statsContents), 0644) require.NoError(t, err) intMetrics := []*metrics{ { name: "arcstats_hits", value: 5968846374, }, { name: "arcstats_misses", value: 1659178751, }, { name: "arcstats_demand_data_hits", value: 4860247322, }, { name: "arcstats_demand_data_misses", value: 501499535, }, { name: "arcstats_demand_metadata_hits", value: 708608325, }, { name: "arcstats_demand_metadata_misses", value: 156591375, }, { name: "arcstats_prefetch_data_hits", value: 367047144, }, { name: "arcstats_prefetch_data_misses", value: 974529898, }, { name: "arcstats_prefetch_metadata_hits", value: 32943583, }, { name: "arcstats_prefetch_metadata_misses", value: 26557943, }, { name: "arcstats_mru_hits", value: 301176811, }, { name: "arcstats_mru_ghost_hits", value: 47066067, }, { name: "arcstats_mfu_hits", value: 5520612438, }, { name: "arcstats_mfu_ghost_hits", value: 45784009, }, { name: "arcstats_deleted", value: 1718937704, }, { name: "arcstats_recycle_miss", value: 481222994, }, { name: "arcstats_mutex_miss", value: 20575623, }, { name: "arcstats_evict_skip", value: 14655903906543, }, { name: "arcstats_evict_l2_cached", value: 145310202998272, }, { name: "arcstats_evict_l2_eligible", value: 16345402777088, }, { name: "arcstats_evict_l2_ineligible", value: 7437226893312, }, { name: "arcstats_hash_elements", value: 36617980, }, { name: "arcstats_hash_elements_max", value: 36618318, }, { name: "arcstats_hash_collisions", value: 554145157, }, { name: "arcstats_hash_chains", value: 4187651, }, { name: "arcstats_hash_chain_max", value: 26, }, { name: "arcstats_p", value: 13963222064, }, { name: "arcstats_c", value: 16381258376, }, { name: "arcstats_c_min", value: 4194304, }, { name: "arcstats_c_max", value: 16884125696, }, { name: "arcstats_size", value: 16319887096, }, { name: "arcstats_hdr_size", value: 42567864, }, { name: "arcstats_data_size", value: 60066304, }, { name: "arcstats_meta_size", value: 1701534208, }, { name: "arcstats_other_size", value: 1661543168, }, { name: "arcstats_anon_size", value: 94720, }, { name: "arcstats_anon_evict_data", value: 0, }, { name: "arcstats_anon_evict_metadata", value: 0, }, { name: "arcstats_mru_size", value: 973099008, }, { name: "arcstats_mru_evict_data", value: 9175040, }, { name: "arcstats_mru_evict_metadata", value: 32768, }, { name: "arcstats_mru_ghost_size", value: 32768, }, { name: "arcstats_mru_ghost_evict_data", value: 0, }, { name: "arcstats_mru_ghost_evict_metadata", value: 32768, }, { name: "arcstats_mfu_size", value: 788406784, }, { name: "arcstats_mfu_evict_data", value: 50881024, }, { name: "arcstats_mfu_evict_metadata", value: 81920, }, { name: "arcstats_mfu_ghost_size", value: 0, }, { name: "arcstats_mfu_ghost_evict_data", value: 0, }, { name: "arcstats_mfu_ghost_evict_metadata", value: 0, }, { name: "arcstats_l2_hits", value: 573868618, }, { name: "arcstats_l2_misses", value: 1085309718, }, { name: "arcstats_l2_feeds", value: 12182087, }, { name: "arcstats_l2_rw_clash", value: 9610, }, { name: "arcstats_l2_read_bytes", value: 32695938336768, }, { name: "arcstats_l2_write_bytes", value: 2826774778880, }, { name: "arcstats_l2_writes_sent", value: 4267687, }, { name: "arcstats_l2_writes_done", value: 4267687, }, { name: "arcstats_l2_writes_error", value: 0, }, { name: "arcstats_l2_writes_hdr_miss", value: 164, }, { name: "arcstats_l2_evict_lock_retry", value: 5, }, { name: "arcstats_l2_evict_reading", value: 0, }, { name: "arcstats_l2_free_on_write", value: 1606914, }, { name: "arcstats_l2_cdata_free_on_write", value: 1775, }, { name: "arcstats_l2_abort_lowmem", value: 83462, }, { name: "arcstats_l2_cksum_bad", value: 393860640, }, { name: "arcstats_l2_io_error", value: 53881460, }, { name: "arcstats_l2_size", value: 2471466648576, }, { name: "arcstats_l2_asize", value: 2461690072064, }, { name: "arcstats_l2_hdr_size", value: 12854175552, }, { name: "arcstats_l2_compress_successes", value: 12184849, }, { name: "arcstats_l2_compress_zeros", value: 0, }, { name: "arcstats_l2_compress_failures", value: 0, }, { name: "arcstats_memory_throttle_count", value: 0, }, { name: "arcstats_duplicate_buffers", value: 0, }, { name: "arcstats_duplicate_buffers_size", value: 0, }, { name: "arcstats_duplicate_reads", value: 0, }, { name: "arcstats_memory_direct_count", value: 5159942, }, { name: "arcstats_memory_indirect_count", value: 3034640, }, { name: "arcstats_arc_no_grow", value: 0, }, { name: "arcstats_arc_tempreserve", value: 0, }, { name: "arcstats_arc_loaned_bytes", value: 0, }, { name: "arcstats_arc_prune", value: 114554259559, }, { name: "arcstats_arc_meta_used", value: 16259820792, }, { name: "arcstats_arc_meta_limit", value: 12663094272, }, { name: "arcstats_arc_meta_max", value: 18327165696, }, { name: "zfetchstats_hits", value: 7812959060, }, { name: "zfetchstats_misses", value: 4154484207, }, { name: "zfetchstats_colinear_hits", value: 1366368, }, { name: "zfetchstats_colinear_misses", value: 4153117839, }, { name: "zfetchstats_stride_hits", value: 7309776732, }, { name: "zfetchstats_stride_misses", value: 222766182, }, { name: "zfetchstats_reclaim_successes", value: 107788388, }, { name: "zfetchstats_reclaim_failures", value: 4045329451, }, { name: "zfetchstats_streams_resets", value: 20989756, }, { name: "zfetchstats_streams_noresets", value: 503182328, }, { name: "zfetchstats_bogus_streams", value: 0, }, { name: "vdev_cache_stats_delegations", value: 0, }, { name: "vdev_cache_stats_hits", value: 0, }, { name: "vdev_cache_stats_misses", value: 0, }, } var acc testutil.Accumulator //one pool, all metrics tags := map[string]string{ "pools": "HOME", } z := &Zfs{KstatPath: testKstatPath} err = z.Gather(&acc) require.NoError(t, err) for _, metric := range intMetrics { fmt.Println(metric.name) assert.True(t, acc.HasIntValue(metric.name), metric.name) assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) } //two pools, all metrics err = os.MkdirAll(testKstatPath+"/STORAGE", 0755) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) require.NoError(t, err) tags = map[string]string{ "pools": "HOME::STORAGE", } z = &Zfs{KstatPath: testKstatPath} err = z.Gather(&acc) require.NoError(t, err) for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric.name), metric.name) assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) } //two pools, one metric z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}} err = z.Gather(&acc) require.NoError(t, err) for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric.name), metric.name) assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) } err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) }
func TestRabbitMQGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string if r.URL.Path == "/api/overview" { rsp = sampleOverviewResponse } else if r.URL.Path == "/api/nodes" { rsp = sampleNodesResponse } else { panic("Cannot handle request") } fmt.Fprintln(w, rsp) })) defer ts.Close() r := &RabbitMQ{ Servers: []*Server{ { URL: ts.URL, }, }, } var acc testutil.Accumulator err := r.Gather(&acc) require.NoError(t, err) intMetrics := []string{ "messages", "messages_ready", "messages_unacked", "messages_acked", "messages_delivered", "messages_published", "channels", "connections", "consumers", "exchanges", "queues", } for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric)) } nodeIntMetrics := []string{ "disk_free", "disk_free_limit", "fd_total", "fd_used", "mem_limit", "mem_used", "proc_total", "proc_used", "run_queue", "sockets_total", "sockets_used", } for _, metric := range nodeIntMetrics { assert.True(t, acc.HasIntValue(metric)) } }
func TestZfsGeneratesMetrics(t *testing.T) { err := os.MkdirAll(testKstatPath, 0755) require.NoError(t, err) err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/vdev_cache_stats", []byte(vdev_cache_statsContents), 0644) require.NoError(t, err) intMetrics := getKstatMetricsAll() var acc testutil.Accumulator //one pool, all metrics tags := map[string]string{ "pools": "HOME", } z := &Zfs{KstatPath: testKstatPath} err = z.Gather(&acc) require.NoError(t, err) for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric.name), metric.name) assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) } //two pools, all metrics err = os.MkdirAll(testKstatPath+"/STORAGE", 0755) require.NoError(t, err) err = ioutil.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) require.NoError(t, err) tags = map[string]string{ "pools": "HOME::STORAGE", } z = &Zfs{KstatPath: testKstatPath} acc = testutil.Accumulator{} err = z.Gather(&acc) require.NoError(t, err) for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric.name), metric.name) assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) } intMetrics = getKstatMetricsArcOnly() //two pools, one metric z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}} acc = testutil.Accumulator{} err = z.Gather(&acc) require.NoError(t, err) for _, metric := range intMetrics { assert.True(t, acc.HasIntValue(metric.name), metric.name) assert.True(t, acc.CheckTaggedValue(metric.name, metric.value, tags)) } err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) }