func TestTailFromEnd(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu,mytag=foo usage_idle=100\n") require.NoError(t, err) tt := NewTail() tt.Files = []string{tmpfile.Name()} p, _ := parsers.NewInfluxParser() tt.SetParser(p) defer tt.Stop() defer tmpfile.Close() acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) time.Sleep(time.Millisecond * 100) _, err = tmpfile.WriteString("cpu,othertag=foo usage_idle=100\n") require.NoError(t, err) require.NoError(t, tt.Gather(&acc)) time.Sleep(time.Millisecond * 50) acc.AssertContainsTaggedFields(t, "cpu", map[string]interface{}{ "usage_idle": float64(100), }, map[string]string{ "othertag": "foo", }) assert.Len(t, acc.Metrics, 1) }
func TestConnectUDP(t *testing.T) { listener := UdpListener{ ServiceAddress: ":8127", AllowedPendingMessages: 10000, } listener.parser, _ = parsers.NewInfluxParser() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) defer listener.Stop() time.Sleep(time.Millisecond * 25) conn, err := net.Dial("udp", "127.0.0.1:8127") require.NoError(t, err) // send single message to socket fmt.Fprintf(conn, testMsg) time.Sleep(time.Millisecond * 15) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, ) // send multiple messages to socket fmt.Fprintf(conn, testMsgs) time.Sleep(time.Millisecond * 15) hostTags := []string{"server02", "server03", "server04", "server05", "server06"} for _, hostTag := range hostTags { acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": hostTag}, ) } }
func TestRunParser(t *testing.T) { log.SetOutput(ioutil.Discard) var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257") listener, in := newTestUdpListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) listener.parser, _ = parsers.NewInfluxParser() listener.wg.Add(1) go listener.udpParser() in <- testmsg time.Sleep(time.Millisecond * 25) listener.Gather(&acc) if a := acc.NFields(); a != 1 { t.Errorf("got %v, expected %v", a, 1) } acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, ) }
func TestHighTrafficUDP(t *testing.T) { listener := UdpListener{ ServiceAddress: ":8126", AllowedPendingMessages: 100000, } listener.parser, _ = parsers.NewInfluxParser() acc := &testutil.Accumulator{} // send multiple messages to socket err := listener.Start(acc) require.NoError(t, err) time.Sleep(time.Millisecond * 25) conn, err := net.Dial("udp", "127.0.0.1:8126") require.NoError(t, err) for i := 0; i < 20000; i++ { // arbitrary, just to give the OS buffer some slack handling the // packet storm. time.Sleep(time.Microsecond) fmt.Fprintf(conn, testMsgs) } time.Sleep(time.Millisecond) listener.Stop() // this is not an exact science, since UDP packets can easily get lost or // dropped, but assume that the OS will be able to // handle at least 90% of the sent UDP packets. assert.InDelta(t, 100000, len(acc.Metrics), 10000) }
func TestLineProtocolParseMultiple(t *testing.T) { parser, _ := parsers.NewInfluxParser() e := &Exec{ runner: newRunnerMock([]byte(lineProtocolMulti), nil), Commands: []string{"line-protocol"}, parser: parser, } var acc testutil.Accumulator err := e.Gather(&acc) require.NoError(t, err) fields := map[string]interface{}{ "usage_idle": float64(99), "usage_busy": float64(1), } tags := map[string]string{ "host": "foo", "datacenter": "us-east", } cpuTags := []string{"cpu0", "cpu1", "cpu2", "cpu3", "cpu4", "cpu5", "cpu6"} for _, cpu := range cpuTags { tags["cpu"] = cpu acc.AssertContainsTaggedFields(t, "cpu", fields, tags) } }
// Test that MaxTCPConections is respected when max==1 func TestConcurrentConns1(t *testing.T) { listener := TcpListener{ ServiceAddress: ":8196", AllowedPendingMessages: 10000, MaxTCPConnections: 1, } listener.parser, _ = parsers.NewInfluxParser() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) defer listener.Stop() time.Sleep(time.Millisecond * 25) _, err := net.Dial("tcp", "127.0.0.1:8196") assert.NoError(t, err) // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8196") assert.NoError(t, err) net.Dial("tcp", "127.0.0.1:8196") buf := make([]byte, 1500) n, err := conn.Read(buf) assert.NoError(t, err) assert.Equal(t, "Telegraf maximum concurrent TCP connections (1) reached, closing.\n"+ "You may want to increase max_tcp_connections in"+ " the Telegraf tcp listener configuration.\n", string(buf[:n])) _, err = conn.Write([]byte(testMsg)) assert.NoError(t, err) time.Sleep(time.Millisecond * 10) assert.Zero(t, acc.NFields()) }
// benchmark how long it takes to accept & process 100,000 metrics: func BenchmarkTCP(b *testing.B) { listener := TcpListener{ ServiceAddress: ":8198", AllowedPendingMessages: 100000, MaxTCPConnections: 250, } listener.parser, _ = parsers.NewInfluxParser() acc := &testutil.Accumulator{Discard: true} // send multiple messages to socket for n := 0; n < b.N; n++ { err := listener.Start(acc) if err != nil { panic(err) } time.Sleep(time.Millisecond * 25) conn, err := net.Dial("tcp", "127.0.0.1:8198") if err != nil { panic(err) } for i := 0; i < 100000; i++ { fmt.Fprintf(conn, testMsg) } // wait for 100,000 metrics to get added to accumulator time.Sleep(time.Millisecond) listener.Stop() } }
func TestRunParser(t *testing.T) { var testmsg = []byte(testMsg) listener, in := newTestTcpListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) listener.parser, _ = parsers.NewInfluxParser() listener.wg.Add(1) go listener.tcpParser() in <- testmsg time.Sleep(time.Millisecond * 25) listener.Gather(&acc) if a := acc.NFields(); a != 1 { t.Errorf("got %v, expected %v", a, 1) } acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, ) }
func TestTailFromBeginning(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) tt := NewTail() tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} p, _ := parsers.NewInfluxParser() tt.SetParser(p) defer tt.Stop() defer tmpfile.Close() acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) _, err = tmpfile.WriteString("cpu,mytag=foo usage_idle=100\n") require.NoError(t, err) require.NoError(t, tt.Gather(&acc)) // arbitrary sleep to wait for message to show up time.Sleep(time.Millisecond * 250) acc.AssertContainsTaggedFields(t, "cpu", map[string]interface{}{ "usage_idle": float64(100), }, map[string]string{ "mytag": "foo", }) }
// writes 25,000 metrics to the listener with 10 different writers func TestWriteHTTPHighTraffic(t *testing.T) { listener := &HttpListener{ServiceAddress: ":8286"} parser, _ := parsers.NewInfluxParser() listener.SetParser(parser) acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) defer listener.Stop() time.Sleep(time.Millisecond * 25) // post many messages to listener var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func() { for i := 0; i < 500; i++ { resp, err := http.Post("http://localhost:8286/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) } wg.Done() }() } wg.Wait() time.Sleep(time.Millisecond * 50) listener.Gather(acc) require.Equal(t, int64(25000), int64(acc.NMetrics())) }
// Test that the parser ignores invalid messages func TestRunParserInvalidMsg(t *testing.T) { k, in := NewTestKafka() defer close(k.done) k.parser, _ = parsers.NewInfluxParser() go k.receiver() in <- saramaMsg(invalidMsg) time.Sleep(time.Millisecond) assert.Equal(t, len(k.metricC), 0) }
// Test that the parser ignores invalid messages func TestRunParserInvalidMsg(t *testing.T) { k, in := newTestKafka() acc := testutil.Accumulator{} k.acc = &acc defer close(k.done) k.parser, _ = parsers.NewInfluxParser() go k.receiver() in <- saramaMsg(invalidMsg) time.Sleep(time.Millisecond * 5) assert.Equal(t, acc.NFields(), 0) }
// Test that the parser ignores invalid messages func TestRunParserInvalidMsg(t *testing.T) { n, in := newTestNatsConsumer() defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- natsMsg(invalidMsg) time.Sleep(time.Millisecond) if a := len(n.metricC); a != 0 { t.Errorf("got %v, expected %v", a, 0) } }
// Test that points are dropped when we hit the buffer limit func TestRunParserRespectsBuffer(t *testing.T) { k, in := NewTestKafka() defer close(k.done) k.parser, _ = parsers.NewInfluxParser() go k.receiver() for i := 0; i < pointBuffer+1; i++ { in <- saramaMsg(testMsg) } time.Sleep(time.Millisecond) assert.Equal(t, len(k.metricC), 5) }
// This test is modeled after the kafka consumer integration test func TestReadsMetricsFromNSQ(t *testing.T) { msgID := nsq.MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} msg := nsq.NewMessage(msgID, []byte("cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257")) script := []instruction{ // SUB instruction{0, nsq.FrameTypeResponse, []byte("OK")}, // IDENTIFY instruction{0, nsq.FrameTypeResponse, []byte("OK")}, instruction{20 * time.Millisecond, nsq.FrameTypeMessage, frameMessage(msg)}, // needed to exit test instruction{100 * time.Millisecond, -1, []byte("exit")}, } addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:4155") newMockNSQD(script, addr.String()) consumer := &NSQConsumer{ Server: "127.0.0.1:4155", Topic: "telegraf", Channel: "consume", MaxInFlight: 1, } p, _ := parsers.NewInfluxParser() consumer.SetParser(p) var acc testutil.Accumulator assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") if err := consumer.Start(&acc); err != nil { t.Fatal(err.Error()) } else { defer consumer.Stop() } waitForPoint(&acc, t) if len(acc.Metrics) == 1 { point := acc.Metrics[0] assert.Equal(t, "cpu_load_short", point.Measurement) assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) assert.Equal(t, map[string]string{ "host": "server01", "direction": "in", "region": "us-west", }, point.Tags) assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) } else { t.Errorf("No points found in accumulator, expected 1") } }
func TestReceive404ForInvalidEndpoint(t *testing.T) { listener := newTestHttpListener() listener.parser, _ = parsers.NewInfluxParser() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) defer listener.Stop() time.Sleep(time.Millisecond * 25) // post single message to listener resp, err := http.Post("http://localhost:8186/foobar", "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) require.EqualValues(t, 404, resp.StatusCode) }
// Test that the parser ignores invalid messages func TestRunParserInvalidMsg(t *testing.T) { n, in := newTestMQTTConsumer() acc := testutil.Accumulator{} n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- mqttMsg(invalidMsg) time.Sleep(time.Millisecond * 25) if a := acc.NFields(); a != 0 { t.Errorf("got %v, expected %v", a, 0) } }
// Test that metrics are dropped when we hit the buffer limit func TestRunParserRespectsBuffer(t *testing.T) { n, in := newTestNatsConsumer() defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() for i := 0; i < metricBuffer+1; i++ { in <- natsMsg(testMsg) } time.Sleep(time.Millisecond) if a := len(n.metricC); a != metricBuffer { t.Errorf("got %v, expected %v", a, metricBuffer) } }
// Test that the parser parses NATS messages into metrics func TestRunParser(t *testing.T) { n, in := newTestNatsConsumer() acc := testutil.Accumulator{} n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- natsMsg(testMsg) time.Sleep(time.Millisecond * 25) if acc.NFields() != 1 { t.Errorf("got %v, expected %v", acc.NFields(), 1) } }
// Test that the parser parses line format messages into metrics func TestRunParserAndGather(t *testing.T) { n, in := newTestMQTTConsumer() acc := testutil.Accumulator{} n.acc = &acc defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- mqttMsg(testMsg) time.Sleep(time.Millisecond * 25) n.Gather(&acc) acc.AssertContainsFields(t, "cpu_load_short", map[string]interface{}{"value": float64(23422)}) }
// Test that the parser parses kafka messages into points func TestRunParserAndGather(t *testing.T) { k, in := NewTestKafka() defer close(k.done) k.parser, _ = parsers.NewInfluxParser() go k.receiver() in <- saramaMsg(testMsg) time.Sleep(time.Millisecond) acc := testutil.Accumulator{} k.Gather(&acc) assert.Equal(t, len(acc.Metrics), 1) acc.AssertContainsFields(t, "cpu_load_short", map[string]interface{}{"value": float64(23422)}) }
func TestWriteHTTPEmpty(t *testing.T) { time.Sleep(time.Millisecond * 250) listener := newTestHttpListener() listener.parser, _ = parsers.NewInfluxParser() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) defer listener.Stop() time.Sleep(time.Millisecond * 25) // post single message to listener resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) }
// Test that the parser parses line format messages into metrics func TestRunParserAndGather(t *testing.T) { n, in := newTestNatsConsumer() defer close(n.done) n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- natsMsg(testMsg) time.Sleep(time.Millisecond) acc := testutil.Accumulator{} n.Gather(&acc) if a := len(acc.Metrics); a != 1 { t.Errorf("got %v, expected %v", a, 1) } acc.AssertContainsFields(t, "cpu_load_short", map[string]interface{}{"value": float64(23422)}) }
func TestRunParserInvalidMsg(t *testing.T) { var testmsg = []byte("cpu_load_short") listener, in := newTestTcpListener() acc := testutil.Accumulator{} listener.acc = &acc defer close(listener.done) listener.parser, _ = parsers.NewInfluxParser() listener.wg.Add(1) go listener.tcpParser() in <- testmsg time.Sleep(time.Millisecond * 25) if a := acc.NFields(); a != 0 { t.Errorf("got %v, expected %v", a, 0) } }
// Test that MaxTCPConections is respected func TestCloseConcurrentConns(t *testing.T) { listener := TcpListener{ ServiceAddress: ":8195", AllowedPendingMessages: 10000, MaxTCPConnections: 2, } listener.parser, _ = parsers.NewInfluxParser() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) time.Sleep(time.Millisecond * 25) _, err := net.Dial("tcp", "127.0.0.1:8195") assert.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8195") assert.NoError(t, err) listener.Stop() }
func TestQueryAndPingHTTP(t *testing.T) { time.Sleep(time.Millisecond * 250) listener := newTestHttpListener() listener.parser, _ = parsers.NewInfluxParser() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) defer listener.Stop() time.Sleep(time.Millisecond * 25) // post query to listener resp, err := http.Post("http://localhost:8186/query?db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22", "", nil) require.NoError(t, err) require.EqualValues(t, 200, resp.StatusCode) // post ping to listener resp, err = http.Post("http://localhost:8186/ping", "", nil) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) }
func TestTailBadLine(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) tt := NewTail() tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} p, _ := parsers.NewInfluxParser() tt.SetParser(p) defer tt.Stop() defer tmpfile.Close() acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) _, err = tmpfile.WriteString("cpu mytag= foo usage_idle= 100\n") require.NoError(t, err) require.NoError(t, tt.Gather(&acc)) time.Sleep(time.Millisecond * 50) assert.Len(t, acc.Metrics, 0) }
func TestWriteHTTP(t *testing.T) { listener := newTestHttpListener() parser, _ := parsers.NewInfluxParser() listener.SetParser(parser) acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) defer listener.Stop() time.Sleep(time.Millisecond * 25) // post single message to listener resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) time.Sleep(time.Millisecond * 15) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, ) // post multiple message to listener resp, err = http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) time.Sleep(time.Millisecond * 15) hostTags := []string{"server02", "server03", "server04", "server05", "server06"} for _, hostTag := range hostTags { acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": hostTag}, ) } }
func TestHighTrafficTCP(t *testing.T) { listener := TcpListener{ ServiceAddress: ":8199", AllowedPendingMessages: 100000, MaxTCPConnections: 250, } listener.parser, _ = parsers.NewInfluxParser() acc := &testutil.Accumulator{} // send multiple messages to socket err := listener.Start(acc) require.NoError(t, err) time.Sleep(time.Millisecond * 25) conn, err := net.Dial("tcp", "127.0.0.1:8199") require.NoError(t, err) for i := 0; i < 100000; i++ { fmt.Fprintf(conn, testMsg) } time.Sleep(time.Millisecond) listener.Stop() assert.Equal(t, 100000, len(acc.Metrics)) }
func TestReadsMetricsFromKafka(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } brokerPeers := []string{testutil.GetLocalHost() + ":9092"} zkPeers := []string{testutil.GetLocalHost() + ":2181"} testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix()) // Send a Kafka message to the kafka host msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257" producer, err := sarama.NewSyncProducer(brokerPeers, nil) require.NoError(t, err) _, _, err = producer.SendMessage( &sarama.ProducerMessage{ Topic: testTopic, Value: sarama.StringEncoder(msg), }) require.NoError(t, err) defer producer.Close() // Start the Kafka Consumer k := &Kafka{ ConsumerGroup: "telegraf_test_consumers", Topics: []string{testTopic}, ZookeeperPeers: zkPeers, PointBuffer: 100000, Offset: "oldest", } p, _ := parsers.NewInfluxParser() k.SetParser(p) if err := k.Start(); err != nil { t.Fatal(err.Error()) } else { defer k.Stop() } waitForPoint(k, t) // Verify that we can now gather the sent message var acc testutil.Accumulator // Sanity check assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") // Gather points err = k.Gather(&acc) require.NoError(t, err) if len(acc.Metrics) == 1 { point := acc.Metrics[0] assert.Equal(t, "cpu_load_short", point.Measurement) assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) assert.Equal(t, map[string]string{ "host": "server01", "direction": "in", "region": "us-west", }, point.Tags) assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) } else { t.Errorf("No points found in accumulator, expected 1") } }