func (d *EventGenerator) getNetworkEvent(container *docker.APIContainers, stats *docker.Stats) common.MapStr { newNetworkData := NetworkData{ stats.Read, stats.Network.RxBytes, stats.Network.RxDropped, stats.Network.RxErrors, stats.Network.RxPackets, stats.Network.TxBytes, stats.Network.TxDropped, stats.Network.TxErrors, stats.Network.TxPackets, } var event common.MapStr oldNetworkData, ok := d.networkStats[container.ID] if ok { calculator := NetworkCalculator{oldNetworkData, newNetworkData} event = common.MapStr{ "timestamp": common.Time(stats.Read), "type": "net", "containerID": container.ID, "containerNames": container.Names, "net": common.MapStr{ "rxBytes_ps": calculator.getRxBytesPerSecond(), "rxDropped_ps": calculator.getRxDroppedPerSecond(), "rxErrors_ps": calculator.getRxErrorsPerSecond(), "rxPackets_ps": calculator.getRxPacketsPerSecond(), "txBytes_ps": calculator.getTxBytesPerSecond(), "txDropped_ps": calculator.getTxDroppedPerSecond(), "txErrors_ps": calculator.getTxErrorsPerSecond(), "txPackets_ps": calculator.getTxPacketsPerSecond(), }, } } else { event = common.MapStr{ "timestamp": common.Time(stats.Read), "type": "net", "containerID": container.ID, "containerNames": container.Names, "net": common.MapStr{ "rxBytes_ps": 0, "rxDropped_ps": 0, "rxErrors_ps": 0, "rxPackets_ps": 0, "txBytes_ps": 0, "txDropped_ps": 0, "txErrors_ps": 0, "txPackets_ps": 0, }, } } d.networkStats[container.ID] = newNetworkData return event }
func (ab *AmqpBeat) newAmqpEvent(delivery *amqp.Delivery, typeTag, tsField, tsFormat *string) (*AmqpEvent, error) { m := common.MapStr{} err := json.Unmarshal(delivery.Body, &m) if err != nil { return nil, fmt.Errorf("error unmarshalling delivery %v: %v", delivery.Body, err) } now := time.Now() ts := common.Time(now) if tsField != nil && tsFormat != nil { var err error ts, err = extractTS(m, *tsField, *tsFormat, ts) if err != nil { logp.Warn("Failed to extract @timestamp for event, defaulting to delivery time ('%s'): %v", now, err) } } sanitize(m, ab.RbConfig.AmqpInput) m["type"] = *typeTag m["@timestamp"] = ts ev := &AmqpEvent{ deliveryTag: delivery.DeliveryTag, acknowledger: delivery.Acknowledger, body: m, } return ev, nil }
func extractTS(m common.MapStr, tsField, tsFormat string, dflt common.Time) (common.Time, error) { path := strings.Split(tsField, nestingDelim) submap := m var ok bool for _, k := range path[:len(path)-1] { v, found := submap[k] if !found { return dflt, fmt.Errorf("did not find component '%s' of path '%s' in %v", k, tsField, m) } // careful not to shadow submap here (ie don't use ':=' ) submap, ok = v.(map[string]interface{}) if !ok { return dflt, fmt.Errorf("component '%s' of path '%s' is not a submap in %v", k, tsField, m) } } tsValue, found := submap[path[len(path)-1]] if !found { return dflt, fmt.Errorf("no value found at path '%s' in %v", tsField, m) } tsStr, ok := tsValue.(string) if !ok { return dflt, fmt.Errorf("value '%v' at path '%s' is not a string, cannot parse as timestamp", tsValue, tsField) } ts, err := time.Parse(tsFormat, tsStr) if err != nil { return dflt, fmt.Errorf("failed to parse timestamp '%s' with layout '%s': %v", tsValue, tsFormat, err) } return common.Time(ts), nil }
func TestSendMessageViaLogstash(t *testing.T) { if testing.Short() { t.Skip("Skipping in short mode. Requires Logstash and Elasticsearch") } test := "basic" ls := newTestLogstashOutput(t, test) defer ls.Cleanup() event := common.MapStr{ "timestamp": common.Time(time.Now()), "host": "test-host", "type": "log", "message": "hello world", } ls.PublishEvent(nil, time.Now(), event) // wait for logstash event flush + elasticsearch waitUntilTrue(5*time.Second, checkIndex(ls, 1)) // search value in logstash elasticsearch index resp, err := ls.Read() if err != nil { return } if len(resp) != 1 { t.Errorf("wrong number of results: %d", len(resp)) } }
func Publish(beat *beat.Beat, fb *Filebeat) { // Receives events from spool during flush for events := range fb.publisherChan { logp.Debug("filebeat", "Send events to output") pubEvents := make([]common.MapStr, 0, len(events)) for _, event := range events { bEvent := common.MapStr{ "timestamp": common.Time(time.Now()), "source": event.Source, "offset": event.Offset, "line": event.Line, "message": event.Text, "fields": event.Fields, "fileinfo": event.Fileinfo, "type": "log", } pubEvents = append(pubEvents, bEvent) } beat.Events.PublishEvents(pubEvents, publisher.Sync) logp.Debug("filebeat", "Events sent: %d", len(events)) // Tell the registrar that we've successfully sent these events fb.registrar.Channel <- events } }
func TestExtractTsFormats(t *testing.T) { type test struct { tsField string tsFormat string tsValue string expected string } tests := [...]test{ test{"field1", "2006-01-02 15:04:05 -0700", "2015-12-29 14:55:15 +0100", tsOutput}, test{"field2", "January 02, 2006 03:04:05PM -0700", "December 29, 2015 01:55:15PM -0000", tsOutput}, test{"field3", "20060102150405", "20151229135515", tsOutput}, test{"field4", "01/02 `06, 03:04:05.00000 -0700", "12/29 `15, 06:55:15.00000 -0700", tsOutput}, } for _, tst := range tests { m := make(common.MapStr) m[tst.tsField] = tst.tsValue ts, err := extractTS(m, tst.tsField, tst.tsFormat, common.Time(time.Now())) assert.Nil(t, err) bytes, _ := ts.MarshalJSON() str := string(bytes) assert.Equal(t, tst.expected, strings.Trim(str, "\"")) } }
func Publish(beat *beat.Beat, fb *Filebeat) { // Receives events from spool during flush for events := range fb.publisherChan { logp.Debug("filebeat", "Send events to output") for _, event := range events { bEvent := common.MapStr{ "timestamp": common.Time(time.Now()), "source": event.Source, "offset": event.Offset, "line": event.Line, "text": event.Text, "fields": event.Fields, "fileinfo": event.Fileinfo, "type": "log", } // Sends event to beat (outputs) beat.Events <- bEvent } logp.Debug("filebeat", "Events sent:", len(events)) // Tell the registrar that we've successfully sent these events fb.RegistrarChan <- events } }
func testSendMultipleViaLogstash(t *testing.T, name string, tls bool) { if testing.Short() { t.Skip("Skipping in short mode. Requires Logstash and Elasticsearch") } ls := newTestLogstashOutput(t, name, tls) defer ls.Cleanup() for i := 0; i < 10; i++ { event := common.MapStr{ "@timestamp": common.Time(time.Now()), "host": "test-host", "type": "log", "message": fmt.Sprintf("hello world - %v", i), } ls.PublishEvent(nil, time.Now(), event) } // wait for logstash event flush + elasticsearch waitUntilTrue(5*time.Second, checkIndex(ls, 10)) // search value in logstash elasticsearch index resp, err := ls.Read() if err != nil { return } if len(resp) != 10 { t.Errorf("wrong number of results: %d", len(resp)) } }
func (f *FileEvent) ToMapStr() common.MapStr { event := common.MapStr{ "@timestamp": common.Time(f.ReadTime), "source": f.Source, "offset": f.Offset, "line": f.Line, "message": f.Text, "fileinfo": f.Fileinfo, "type": f.DocumentType, "input_type": f.InputType, } if f.Fields != nil { if f.fieldsUnderRoot { for key, value := range *f.Fields { // in case of conflicts, overwrite _, found := event[key] if found { logp.Warn("Overwriting %s key", key) } event[key] = value } } else { event["fields"] = f.Fields } } return event }
func (redis *Redis) publishTransaction(t *RedisTransaction) { if redis.results == nil { return } event := common.MapStr{} event["type"] = "redis" if !t.IsError { event["status"] = common.OK_STATUS } else { event["status"] = common.ERROR_STATUS } event["responsetime"] = t.ResponseTime if redis.Send_request { event["request"] = t.Request_raw } if redis.Send_response { event["response"] = t.Response_raw } event["redis"] = common.MapStr(t.Redis) event["method"] = strings.ToUpper(t.Method) event["resource"] = t.Path event["query"] = t.Query event["bytes_in"] = uint64(t.BytesIn) event["bytes_out"] = uint64(t.BytesOut) event["timestamp"] = common.Time(t.ts) event["src"] = &t.Src event["dst"] = &t.Dst redis.results.PublishEvent(event) }
func (t *Topbeat) exportFileSystemStats() error { fss, err := GetFileSystemList() if err != nil { logp.Warn("Getting filesystem list: %v", err) return err } for _, fs := range fss { fs_stat, err := GetFileSystemStat(fs) if err != nil { logp.Debug("topbeat", "Skip filesystem %d: %v", fs_stat, err) continue } t.addFileSystemUsedPercentage(fs_stat) event := common.MapStr{ "timestamp": common.Time(time.Now()), "type": "filesystem", "fs": fs_stat, } t.events <- event } return nil }
func (mysql *Mysql) publishTransaction(t *MysqlTransaction) { if mysql.results == nil { return } logp.Debug("mysql", "mysql.results exists") event := common.MapStr{} event["type"] = "mysql" if t.Mysql["iserror"].(bool) { event["status"] = common.ERROR_STATUS } else { event["status"] = common.OK_STATUS } event["responsetime"] = t.ResponseTime if mysql.Send_request { event["request"] = t.Request_raw } if mysql.Send_response { event["response"] = t.Response_raw } event["method"] = t.Method event["query"] = t.Query event["mysql"] = t.Mysql event["path"] = t.Path event["bytes_out"] = t.BytesOut event["bytes_in"] = t.BytesIn if len(t.Notes) > 0 { event["notes"] = t.Notes } event["timestamp"] = common.Time(t.ts) event["src"] = &t.Src event["dst"] = &t.Dst mysql.results <- event }
// testEvent returns a new common.MapStr with the required fields // populated. func testEvent() common.MapStr { event := common.MapStr{} event["@timestamp"] = common.Time(time.Now()) event["type"] = "test" event["src"] = &common.Endpoint{} event["dst"] = &common.Endpoint{} return event }
func testEvent() common.MapStr { return common.MapStr{ "@timestamp": common.Time(time.Now()), "type": "log", "extra": 10, "message": "message", } }
func (p *Pingbeat) Run(b *beat.Beat) error { p.isAlive = true fp := fastping.NewPinger() errInput, err := fp.Network(p.pingType) if err != nil { logp.Critical("Error: %v (input %v)", err, errInput) os.Exit(1) } if p.useIPv4 { for addr, details := range p.ipv4targets { logp.Debug("pingbeat", "Adding target IP: %s, Name: %s, Tag: %s\n", addr, details[0], details[1]) fp.AddIP(addr) } } if p.useIPv6 { for addr, details := range p.ipv6targets { logp.Debug("pingbeat", "Adding target IP: %s, Name: %s, Tag: %s\n", addr, details[0], details[1]) fp.AddIP(addr) } } fp.OnRecv = func(addr *net.IPAddr, rtt time.Duration) { var name, tag string ip := addr.IP if ip.To4() != nil { name = p.ipv4targets[addr.String()][0] tag = p.ipv4targets[addr.String()][1] } else { name = p.ipv6targets[addr.String()][0] tag = p.ipv6targets[addr.String()][1] } event := common.MapStr{ "timestamp": common.Time(time.Now()), "type": "pingbeat", "target_name": name, "target_addr": addr.String(), "tag": tag, "rtt": milliSeconds(rtt), } p.events.PublishEvent(event) } // fp.OnIdle = func() { // fmt.Println("loop done") // } for p.isAlive { time.Sleep(p.period) err := fp.Run() if err != nil { logp.Warn("Warning: %v", err) } } return nil }
func (h *HttpEvent) ToMapStr() common.MapStr { event := common.MapStr{ "@timestamp": common.Time(h.ReadTime), "type": h.DocumentType, "request": h.Request, "response": h.Response, } return event }
func TestEventGeneratorGetContainerEvent(t *testing.T) { // GIVEN labels := make(map[string]string) labels["label1"] = "value1" labels["label2"] = "value2" var container = docker.APIContainers{ "container_id", "container_image", "container command", 9876543210, "Up", []docker.APIPort{docker.APIPort{1234, 4567, "portType", "123.456.879.1"}}, 123, 456, []string{"name1", "name2"}, labels, } timestamp := time.Now() var stats = new(docker.Stats) stats.Read = timestamp var eventGenerator = EventGenerator{nil} // expected output expectedEvent := common.MapStr{ "timestamp": common.Time(timestamp), "type": "container", "containerID": container.ID, "containerNames": container.Names, "container": common.MapStr{ "id": container.ID, "command": container.Command, "created": time.Unix(container.Created, 0), "image": container.Image, "labels": container.Labels, "names": container.Names, "ports": []map[string]interface{}{common.MapStr{ "ip": container.Ports[0].IP, "privatePort": container.Ports[0].PrivatePort, "publicPort": container.Ports[0].PublicPort, "type": container.Ports[0].Type, }}, "sizeRootFs": container.SizeRootFs, "sizeRw": container.SizeRw, "status": container.Status, }, } // WHEN event := eventGenerator.getContainerEvent(&container, stats) // THEN assert.Equal(t, expectedEvent, event) }
func (mongodb *Mongodb) publishTransaction(t *MongodbTransaction) { if mongodb.results == nil { logp.Debug("mongodb", "Try to publish transaction with null results") return } event := common.MapStr{} event["type"] = "mongodb" if t.error == "" { event["status"] = common.OK_STATUS } else { t.event["error"] = t.error event["status"] = common.ERROR_STATUS } event["mongodb"] = t.event event["method"] = t.method event["resource"] = t.resource event["query"] = reconstructQuery(t, false) event["responsetime"] = t.ResponseTime event["bytes_in"] = uint64(t.BytesIn) event["bytes_out"] = uint64(t.BytesOut) event["timestamp"] = common.Time(t.ts) event["src"] = &t.Src event["dst"] = &t.Dst if mongodb.Send_request { event["request"] = reconstructQuery(t, true) } if mongodb.Send_response { if len(t.documents) > 0 { // response field needs to be a string docs := make([]string, 0, len(t.documents)) for i, doc := range t.documents { if mongodb.Max_docs > 0 && i >= mongodb.Max_docs { docs = append(docs, "[...]") break } str, err := doc2str(doc) if err != nil { logp.Warn("Failed to JSON marshal document from Mongo: %v (error: %v)", doc, err) } else { if mongodb.Max_doc_length > 0 && len(str) > mongodb.Max_doc_length { str = str[:mongodb.Max_doc_length] + " ..." } docs = append(docs, str) } } event["response"] = strings.Join(docs, "\n") } } mongodb.results <- event }
func (t *Topbeat) exportSystemStats() error { load_stat, err := GetSystemLoad() if err != nil { logp.Warn("Getting load statistics: %v", err) return err } cpu_stat, err := GetCpuTimes() if err != nil { logp.Warn("Getting cpu times: %v", err) return err } t.addCpuPercentage(cpu_stat) cpu_core_stat, err := GetCpuTimesList() if err != nil { logp.Warn("Getting cpu core times: %v", err) return err } t.addCpuPercentageList(cpu_core_stat) mem_stat, err := GetMemory() if err != nil { logp.Warn("Getting memory details: %v", err) return err } t.addMemPercentage(mem_stat) swap_stat, err := GetSwap() if err != nil { logp.Warn("Getting swap details: %v", err) return err } t.addMemPercentage(swap_stat) event := common.MapStr{ "@timestamp": common.Time(time.Now()), "type": "system", "load": load_stat, "cpu": cpu_stat, "mem": mem_stat, "swap": swap_stat, } for coreNumber, core := range cpu_core_stat { event["cpu"+strconv.Itoa(coreNumber)] = core } t.events.PublishEvent(event) return nil }
func testSendMultipleBatchesViaLogstash( t *testing.T, name string, numBatches int, batchSize int, tls bool, ) { if testing.Short() { t.Skip("Skipping in short mode. Requires Logstash and Elasticsearch") } ls := newTestLogstashOutput(t, name, tls) defer ls.Cleanup() batches := make([][]common.MapStr, 0, numBatches) for i := 0; i < numBatches; i++ { batch := make([]common.MapStr, 0, batchSize) for j := 0; j < batchSize; j++ { event := common.MapStr{ "@timestamp": common.Time(time.Now()), "host": "test-host", "type": "log", "message": fmt.Sprintf("batch hello world - %v", i*batchSize+j), } batch = append(batch, event) } batches = append(batches, batch) } for _, batch := range batches { sig := outputs.NewSyncSignal() ls.BulkPublish(sig, time.Now(), batch) ok := sig.Wait() assert.Equal(t, true, ok) } // wait for logstash event flush + elasticsearch ok := waitUntilTrue(5*time.Second, checkIndex(ls, numBatches*batchSize)) assert.True(t, ok) // check number of events matches total number of events // search value in logstash elasticsearch index resp, err := ls.Read() if err != nil { return } if len(resp) != 10 { t.Errorf("wrong number of results: %d", len(resp)) } }
func TestNestedTsField(t *testing.T) { m := common.MapStr{ "foo": map[string]interface{}{ "bar": map[string]interface{}{ "tsfield": "December 29, 2015 01:55:15PM -0000", }, }, } ts, err := extractTS(m, "foo[>]bar[>]tsfield", "January 02, 2006 03:04:05PM -0700", common.Time(time.Now())) assert.Nil(t, err) bytes, _ := ts.MarshalJSON() str := string(bytes) assert.Equal(t, tsOutput, strings.Trim(str, "\"")) }
// Event fills common event fields. func (t *Transaction) Event(event common.MapStr) error { event["type"] = t.Type event["@timestamp"] = common.Time(t.Ts.Ts) event["responsetime"] = t.ResponseTime event["src"] = &t.Src event["dst"] = &t.Dst event["transport"] = t.Transport.String() event["bytes_out"] = t.BytesOut event["bytes_in"] = t.BytesIn event["status"] = t.Status if len(t.Notes) > 0 { event["notes"] = t.Notes } return nil }
func (d *Dockerbeat) getMemoryEvent(container *docker.APIContainers, stats *docker.Stats) common.MapStr { event := common.MapStr{ "timestamp": common.Time(stats.Read), "type": "memory", "containerID": container.ID, "containerNames": container.Names, "memory": common.MapStr{ "failcnt": stats.MemoryStats.Failcnt, "limit": stats.MemoryStats.Limit, "maxUsage": stats.MemoryStats.MaxUsage, "usage": stats.MemoryStats.Usage, }, } return event }
func (d *Dockerbeat) getCpuEvent(container *docker.APIContainers, stats *docker.Stats) common.MapStr { event := common.MapStr{ "timestamp": common.Time(stats.Read), "type": "cpu", "containerID": container.ID, "containerNames": container.Names, "cpu": common.MapStr{ "percpuUsage": stats.CPUStats.CPUUsage.PercpuUsage, "totalUsage": stats.CPUStats.CPUUsage.TotalUsage, "usageInKernelmode": stats.CPUStats.CPUUsage.UsageInKernelmode, "usageInUsermode": stats.CPUStats.CPUUsage.UsageInUsermode, }, } return event }
func TestLogstashElasticOutputPluginBulkCompatibleMessage(t *testing.T) { if testing.Short() { t.Skip("Skipping in short mode. Requires Logstash and Elasticsearch") } test := "cmpbulk" timeout := 10 * time.Second ls := newTestLogstashOutput(t, test) defer ls.Cleanup() es := newTestElasticsearchOutput(t, test) defer es.Cleanup() ts := time.Now() events := []common.MapStr{ common.MapStr{ "timestamp": common.Time(ts), "host": "test-host", "type": "log", "message": "hello world", }, } es.BulkPublish(nil, ts, events) waitUntilTrue(timeout, checkIndex(es, 1)) ls.BulkPublish(nil, ts, events) waitUntilTrue(timeout, checkIndex(ls, 1)) // search value in logstash elasticsearch index lsResp, err := ls.Read() if err != nil { return } esResp, err := es.Read() if err != nil { return } // validate assert.Equal(t, len(lsResp), len(esResp)) if len(lsResp) != 1 { t.Fatalf("wrong number of results: %d", len(lsResp)) } checkEvent(t, lsResp[0], esResp[0]) }
func (hadoop *Hadoop) publishTransaction(m *HadoopMessage) { if hadoop.results == nil { return } //TODO Find a better int to String convertion event := common.MapStr{} event["nonce"] = uuid.NewRandom().String() event["timestamp"] = common.Time(m.Ts) event["src"] = &m.Src event["dst"] = &m.Dst event["type"] = "hadoop" event["fullsize"] = m.FullSize hadoop.results <- event }
func (f *FileEvent) ToMapStr() common.MapStr { event := common.MapStr{ "timestamp": common.Time(f.ReadTime), "source": f.Source, "offset": f.Offset, "line": f.Line, "message": f.Text, "fileinfo": f.Fileinfo, "type": "log", } if f.Fields != nil { event["fields"] = f.Fields } return event }
func (t *Topbeat) exportProcStats() error { if len(t.procs) == 0 { return nil } pids, err := Pids() if err != nil { logp.Warn("Getting the list of pids: %v", err) return err } newProcs := make(ProcsMap, len(pids)) for _, pid := range pids { process, err := GetProcess(pid) if err != nil { logp.Debug("topbeat", "Skip process %d: %v", pid, err) continue } if t.MatchProcess(process.Name) { t.addProcCpuPercentage(process) t.addProcMemPercentage(process, 0 /*read total mem usage */) newProcs[process.Pid] = process event := common.MapStr{ "@timestamp": common.Time(time.Now()), "type": "proc", "proc": common.MapStr{ "pid": process.Pid, "ppid": process.Ppid, "name": process.Name, "state": process.State, "mem": process.Mem, "cpu": process.Cpu, }, } t.events.PublishEvent(event) } } t.procsMap = newProcs return nil }
func collectFileSystemStats(fss []sigar.FileSystem) []common.MapStr { events := make([]common.MapStr, 0, len(fss)) for _, fs := range fss { fsStat, err := GetFileSystemStat(fs) if err != nil { logp.Debug("topbeat", "Skip filesystem %d: %v", fsStat, err) continue } addFileSystemUsedPercentage(fsStat) event := common.MapStr{ "@timestamp": common.Time(time.Now()), "type": "filesystem", "fs": fsStat, } events = append(events, event) } return events }
func (http *Http) publishTransaction(t *HttpTransaction) { if http.results == nil { return } event := common.MapStr{} event["type"] = "http" code := t.Http["code"].(uint16) if code < 400 { event["status"] = common.OK_STATUS } else { event["status"] = common.ERROR_STATUS } event["responsetime"] = t.ResponseTime if http.Send_request { event["request"] = t.Request_raw } if http.Send_response { event["response"] = t.Response_raw } event["http"] = t.Http if len(t.Real_ip) > 0 { event["real_ip"] = t.Real_ip } event["method"] = t.Method event["path"] = t.Path event["query"] = fmt.Sprintf("%s %s", t.Method, t.Path) event["params"] = t.Params event["bytes_out"] = t.BytesOut event["bytes_in"] = t.BytesIn event["timestamp"] = common.Time(t.ts) event["src"] = &t.Src event["dst"] = &t.Dst if len(t.Notes) > 0 { event["notes"] = t.Notes } http.results.PublishEvent(event) }