func TestExec(t *testing.T) { parser, _ := parsers.NewJSONParser("exec", []string{}, nil) e := &Exec{ runner: newRunnerMock([]byte(validJson), nil), Commands: []string{"testcommand arg1"}, parser: parser, } var acc testutil.Accumulator err := e.Gather(&acc) require.NoError(t, err) assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored") fields := map[string]interface{}{ "num_processes": float64(82), "cpu_used": float64(8234), "cpu_free": float64(32), "percent": float64(0.81), "users_0": float64(0), "users_1": float64(1), "users_2": float64(2), "users_3": float64(3), } acc.AssertContainsFields(t, "exec", fields) }
func TestRunParser(t *testing.T) { var testmsg = []byte(testMsg) listener, in := newTestTcpListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) listener.parser, _ = parsers.NewInfluxParser() listener.wg.Add(1) go listener.tcpParser() in <- testmsg time.Sleep(time.Millisecond * 25) listener.Gather(&acc) if a := acc.NFields(); a != 1 { t.Errorf("got %v, expected %v", a, 1) } acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, ) }
func TestRunParser(t *testing.T) { log.SetOutput(ioutil.Discard) var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257") listener, in := newTestUdpListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) listener.parser, _ = parsers.NewInfluxParser() listener.wg.Add(1) go listener.udpParser() in <- testmsg time.Sleep(time.Millisecond * 25) listener.Gather(&acc) if a := acc.NFields(); a != 1 { t.Errorf("got %v, expected %v", a, 1) } acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, ) }
// Test response to malformed JSON func TestHttpJsonBadJson(t *testing.T) { graylog := genMockGrayLog(invalidJSON, 200) var acc testutil.Accumulator err := graylog[0].Gather(&acc) assert.NotNil(t, err) assert.Equal(t, 0, acc.NFields()) }
// Test response to malformed JSON func TestHttpJsonBadJson(t *testing.T) { httpjson := genMockHttpJson(invalidJSON, 200) var acc testutil.Accumulator err := httpjson[0].Gather(&acc) assert.NotNil(t, err) assert.Equal(t, 0, acc.NFields()) }
// Test response to empty string as response objectgT func TestHttpJsonEmptyResponse(t *testing.T) { httpjson := genMockHttpJson(empty, 200) var acc testutil.Accumulator err := httpjson[0].Gather(&acc) assert.NotNil(t, err) assert.Equal(t, 0, acc.NFields()) }
// Test response to empty string as response objectgT func TestHttpJsonEmptyResponse(t *testing.T) { graylog := genMockGrayLog(empty, 200) var acc testutil.Accumulator err := graylog[0].Gather(&acc) assert.NotNil(t, err) assert.Equal(t, 0, acc.NFields()) }
// Test response to HTTP 405 func TestHttpJsonBadMethod(t *testing.T) { httpjson := genMockHttpJson(validJSON, 200) httpjson[0].Method = "NOT_A_REAL_METHOD" var acc testutil.Accumulator err := httpjson[0].Gather(&acc) assert.NotNil(t, err) assert.Equal(t, 0, acc.NFields()) }
func TestExecMalformed(t *testing.T) { e := &Exec{ runner: newRunnerMock([]byte(malformedJson), nil), Command: "badcommand arg1", } var acc testutil.Accumulator err := e.Gather(&acc) require.Error(t, err) assert.Equal(t, acc.NFields(), 0, "No new points should have been added") }
func TestCommandError(t *testing.T) { e := &Exec{ runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")), Command: "badcommand", } var acc testutil.Accumulator err := e.Gather(&acc) require.Error(t, err) assert.Equal(t, acc.NFields(), 0, "No new points should have been added") }
// Test that the parser ignores invalid messages func TestRunParserInvalidMsg(t *testing.T) { k, in := newTestKafka() acc := testutil.Accumulator{} k.acc = &acc defer close(k.done) k.parser, _ = parsers.NewInfluxParser() go k.receiver() in <- saramaMsg(invalidMsg) time.Sleep(time.Millisecond * 5) assert.Equal(t, acc.NFields(), 0) }
func TestExecMalformed(t *testing.T) { parser, _ := parsers.NewJSONParser("exec", []string{}, nil) e := &Exec{ runner: newRunnerMock([]byte(malformedJson), nil), Commands: []string{"badcommand arg1"}, parser: parser, } var acc testutil.Accumulator err := e.Gather(&acc) require.Error(t, err) assert.Equal(t, acc.NFields(), 0, "No new points should have been added") }
func TestCommandError(t *testing.T) { parser, _ := parsers.NewJSONParser("exec", []string{}, nil) e := &Exec{ runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")), Commands: []string{"badcommand"}, parser: parser, } var acc testutil.Accumulator err := e.Gather(&acc) require.Error(t, err) assert.Equal(t, acc.NFields(), 0, "No new points should have been added") }
// Test that the parser parses NATS messages into metrics func TestRunParser(t *testing.T) { n, in := newTestNatsConsumer() acc := testutil.Accumulator{} n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- natsMsg(testMsg) time.Sleep(time.Millisecond * 25) if acc.NFields() != 1 { t.Errorf("got %v, expected %v", acc.NFields(), 1) } }
// Test that the parser ignores invalid messages func TestRunParserInvalidMsg(t *testing.T) { n, in := newTestMQTTConsumer() acc := testutil.Accumulator{} n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- mqttMsg(invalidMsg) time.Sleep(time.Millisecond * 25) if a := acc.NFields(); a != 0 { t.Errorf("got %v, expected %v", a, 0) } }
// Waits for the metric that was sent to the kafka broker to arrive at the kafka // consumer func waitForPoint(acc *testutil.Accumulator, t *testing.T) { // Give the kafka container up to 2 seconds to get the point to the consumer ticker := time.NewTicker(5 * time.Millisecond) counter := 0 for { select { case <-ticker.C: counter++ if counter > 1000 { t.Fatal("Waited for 5s, point never arrived to consumer") } else if acc.NFields() == 1 { return } } } }
// Test that the parser parses kafka messages into points func TestRunParserAndGatherGraphite(t *testing.T) { k, in := newTestKafka() acc := testutil.Accumulator{} k.acc = &acc defer close(k.done) k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) go k.receiver() in <- saramaMsg(testMsgGraphite) time.Sleep(time.Millisecond * 5) k.Gather(&acc) assert.Equal(t, acc.NFields(), 1) acc.AssertContainsFields(t, "cpu_load_short_graphite", map[string]interface{}{"value": float64(23422)}) }
func TestRunParserInvalidMsg(t *testing.T) { var testmsg = []byte("cpu_load_short") listener, in := newTestTcpListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) listener.parser, _ = parsers.NewInfluxParser() listener.wg.Add(1) go listener.tcpParser() in <- testmsg time.Sleep(time.Millisecond * 25) if a := acc.NFields(); a != 0 { t.Errorf("got %v, expected %v", a, 0) } }
// Test that the parser parses kafka messages into points func TestRunParserAndGatherJSON(t *testing.T) { k, in := newTestKafka() acc := testutil.Accumulator{} k.acc = &acc defer close(k.done) k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil) go k.receiver() in <- saramaMsg(testMsgJSON) time.Sleep(time.Millisecond * 5) k.Gather(&acc) assert.Equal(t, acc.NFields(), 2) acc.AssertContainsFields(t, "kafka_json_test", map[string]interface{}{ "a": float64(5), "b_c": float64(6), }) }
// Test that the proper values are ignored or collected func TestHttpJson200(t *testing.T) { httpjson := genMockHttpJson(validJSON, 200) for _, service := range httpjson { var acc testutil.Accumulator err := service.Gather(&acc) require.NoError(t, err) assert.Equal(t, 12, acc.NFields()) // Set responsetime for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } for _, srv := range service.Servers { tags := map[string]string{"server": srv} mname := "httpjson_" + service.Name expectedFields["response_time"] = 1.0 acc.AssertContainsTaggedFields(t, mname, expectedFields, tags) } } }
// Test that the proper values are ignored or collected func TestHttpJson200Tags(t *testing.T) { httpjson := genMockHttpJson(validJSONTags, 200) for _, service := range httpjson { if service.Name == "other_webapp" { var acc testutil.Accumulator err := service.Gather(&acc) // Set responsetime for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } require.NoError(t, err) assert.Equal(t, 4, acc.NFields()) for _, srv := range service.Servers { tags := map[string]string{"server": srv, "role": "master", "build": "123"} fields := map[string]interface{}{"value": float64(15), "response_time": float64(1)} mname := "httpjson_" + service.Name acc.AssertContainsTaggedFields(t, mname, fields, tags) } } } }
func TestIpmi(t *testing.T) { i := &Ipmi{ Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"}, runner: newRunnerMock(cmdReturn, nil), } var acc testutil.Accumulator err := i.Gather(&acc) require.NoError(t, err) assert.Equal(t, acc.NFields(), 266, "non-numeric measurements should be ignored") var tests = []struct { fields map[string]interface{} tags map[string]string }{ { map[string]interface{}{ "value": float64(20), "status": int(1), }, map[string]string{ "name": "ambient_temp", "server": "192.168.1.1", "unit": "degrees_c", }, }, { map[string]interface{}{ "value": float64(80), "status": int(1), }, map[string]string{ "name": "altitude", "server": "192.168.1.1", "unit": "feet", }, }, { map[string]interface{}{ "value": float64(210), "status": int(1), }, map[string]string{ "name": "avg_power", "server": "192.168.1.1", "unit": "watts", }, }, { map[string]interface{}{ "value": float64(4.9), "status": int(1), }, map[string]string{ "name": "planar_5v", "server": "192.168.1.1", "unit": "volts", }, }, { map[string]interface{}{ "value": float64(3.05), "status": int(1), }, map[string]string{ "name": "planar_vbat", "server": "192.168.1.1", "unit": "volts", }, }, { map[string]interface{}{ "value": float64(2610), "status": int(1), }, map[string]string{ "name": "fan_1a_tach", "server": "192.168.1.1", "unit": "rpm", }, }, { map[string]interface{}{ "value": float64(1775), "status": int(1), }, map[string]string{ "name": "fan_1b_tach", "server": "192.168.1.1", "unit": "rpm", }, }, } for _, test := range tests { acc.AssertContainsTaggedFields(t, "ipmi_sensor", test.fields, test.tags) } }
func TestIptables_Gather(t *testing.T) { tests := []struct { table string chains []string values []string tags []map[string]string fields [][]map[string]interface{} err error }{ { // 1 - no configured table => no results values: []string{ `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) pkts bytes target prot opt in out source destination 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 `}, }, { // 2 - no configured chains => no results table: "filter", values: []string{ `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) pkts bytes target prot opt in out source destination 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 `}, }, { // 3 - pkts and bytes are gathered as integers table: "filter", chains: []string{"INPUT"}, values: []string{ `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) pkts bytes target prot opt in out source destination 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */ `}, tags: []map[string]string{map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}}, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}}, }, }, { // 4 - missing fields header => no results table: "filter", chains: []string{"INPUT"}, values: []string{`Chain INPUT (policy ACCEPT 58 packets, 5096 bytes)`}, }, { // 5 - invalid chain header => error table: "filter", chains: []string{"INPUT"}, values: []string{ `INPUT (policy ACCEPT 58 packets, 5096 bytes) pkts bytes target prot opt in out source destination 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 `}, err: errParse, }, { // 6 - invalid fields header => error table: "filter", chains: []string{"INPUT"}, values: []string{ `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 `}, err: errParse, }, { // 7 - invalid integer value => best effort, no error table: "filter", chains: []string{"INPUT"}, values: []string{ `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) pkts bytes target prot opt in out source destination K 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 `}, }, { // 8 - Multiple rows, multipe chains => no error table: "filter", chains: []string{"INPUT", "FORWARD"}, values: []string{ `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) pkts bytes target prot opt in out source destination 100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 200 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foo */ `, `Chain FORWARD (policy ACCEPT 58 packets, 5096 bytes) pkts bytes target prot opt in out source destination 300 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* bar */ 400 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 500 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */ `, }, tags: []map[string]string{ map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foo"}, map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "bar"}, map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "foobar"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(200), "bytes": uint64(4520)}}, {map[string]interface{}{"pkts": uint64(300), "bytes": uint64(4520)}}, {map[string]interface{}{"pkts": uint64(500), "bytes": uint64(4520)}}, }, }, { // 9 - comments are used as ruleid if any table: "filter", chains: []string{"INPUT"}, values: []string{ `Chain INPUT (policy ACCEPT 58 packets, 5096 bytes) pkts bytes target prot opt in out source destination 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:22 /* foobar */ 100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 `}, tags: []map[string]string{ map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}}, }, }, } for i, tt := range tests { i++ ipt := &Iptables{ Table: tt.table, Chains: tt.chains, lister: func(table, chain string) (string, error) { if len(tt.values) > 0 { v := tt.values[0] tt.values = tt.values[1:] return v, nil } return "", nil }, } acc := new(testutil.Accumulator) err := ipt.Gather(acc) if !reflect.DeepEqual(tt.err, err) { t.Errorf("%d: expected error '%#v' got '%#v'", i, tt.err, err) } if tt.table == "" { n := acc.NFields() if n != 0 { t.Errorf("%d: expected 0 fields if empty table got %d", i, n) } continue } if len(tt.chains) == 0 { n := acc.NFields() if n != 0 { t.Errorf("%d: expected 0 fields if empty chains got %d", i, n) } continue } if len(tt.tags) == 0 { n := acc.NFields() if n != 0 { t.Errorf("%d: expected 0 values got %d", i, n) } continue } n := 0 for j, tags := range tt.tags { for k, fields := range tt.fields[j] { if len(acc.Metrics) < n+1 { t.Errorf("%d: expected at least %d values got %d", i, n+1, len(acc.Metrics)) break } m := acc.Metrics[n] if !reflect.DeepEqual(m.Measurement, measurement) { t.Errorf("%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement) } if !reflect.DeepEqual(m.Tags, tags) { t.Errorf("%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags) } if !reflect.DeepEqual(m.Fields, fields) { t.Errorf("%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields) } n++ } } } }
func TestDiskStats(t *testing.T) { var mps MockPS defer mps.AssertExpectations(t) var acc testutil.Accumulator var err error duAll := []*disk.DiskUsageStat{ { Path: "/", Fstype: "ext4", Total: 128, Free: 23, Used: 100, InodesTotal: 1234, InodesFree: 234, InodesUsed: 1000, }, { Path: "/home", Fstype: "ext4", Total: 256, Free: 46, Used: 200, InodesTotal: 2468, InodesFree: 468, InodesUsed: 2000, }, } duFiltered := []*disk.DiskUsageStat{ { Path: "/", Fstype: "ext4", Total: 128, Free: 23, Used: 100, InodesTotal: 1234, InodesFree: 234, InodesUsed: 1000, }, } mps.On("DiskUsage", []string(nil), []string(nil)).Return(duAll, nil) mps.On("DiskUsage", []string{"/", "/dev"}, []string(nil)).Return(duFiltered, nil) mps.On("DiskUsage", []string{"/", "/home"}, []string(nil)).Return(duAll, nil) err = (&DiskStats{ps: &mps}).Gather(&acc) require.NoError(t, err) numDiskMetrics := acc.NFields() expectedAllDiskMetrics := 14 assert.Equal(t, expectedAllDiskMetrics, numDiskMetrics) tags1 := map[string]string{ "path": "/", "fstype": "ext4", } tags2 := map[string]string{ "path": "/home", "fstype": "ext4", } fields1 := map[string]interface{}{ "total": uint64(128), "used": uint64(100), "free": uint64(23), "inodes_total": uint64(1234), "inodes_free": uint64(234), "inodes_used": uint64(1000), "used_percent": float64(81.30081300813008), } fields2 := map[string]interface{}{ "total": uint64(256), "used": uint64(200), "free": uint64(46), "inodes_total": uint64(2468), "inodes_free": uint64(468), "inodes_used": uint64(2000), "used_percent": float64(81.30081300813008), } acc.AssertContainsTaggedFields(t, "disk", fields1, tags1) acc.AssertContainsTaggedFields(t, "disk", fields2, tags2) // We expect 6 more DiskMetrics to show up with an explicit match on "/" // and /home not matching the /dev in MountPoints err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) assert.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) // We should see all the diskpoints as MountPoints includes both // / and /home err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc) assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) }