func (p *Probe) report() report.Report { reports := make(chan report.Report, len(p.reporters)) for _, rep := range p.reporters { go func(rep Reporter) { t := time.Now() timer := time.AfterFunc(p.spyInterval, func() { log.Warningf("%v reporter took longer than %v", rep.Name(), p.spyInterval) }) newReport, err := rep.Report() if !timer.Stop() { log.Warningf("%v reporter took %v (longer than %v)", rep.Name(), time.Now().Sub(t), p.spyInterval) } metrics.MeasureSince([]string{rep.Name(), "reporter"}, t) if err != nil { log.Errorf("error generating report: %v", err) newReport = report.MakeReport() // empty is OK to merge } reports <- newReport }(rep) } result := report.MakeReport() for i := 0; i < cap(reports); i++ { result = result.Merge(<-reports) } return result }
func TestTagger(t *testing.T) { oldProcessTree := docker.NewProcessTreeStub defer func() { docker.NewProcessTreeStub = oldProcessTree }() docker.NewProcessTreeStub = func(_ process.Walker) (process.Tree, error) { return &mockProcessTree{map[int]int{3: 2}}, nil } var ( pid1NodeID = report.MakeProcessNodeID("somehost.com", "2") pid2NodeID = report.MakeProcessNodeID("somehost.com", "3") wantNode = report.MakeNodeWith(map[string]string{docker.ContainerID: "ping"}) ) input := report.MakeReport() input.Process.AddNode(pid1NodeID, report.MakeNodeWith(map[string]string{process.PID: "2"})) input.Process.AddNode(pid2NodeID, report.MakeNodeWith(map[string]string{process.PID: "3"})) want := report.MakeReport() want.Process.AddNode(pid1NodeID, report.MakeNodeWith(map[string]string{process.PID: "2"}).Merge(wantNode)) want.Process.AddNode(pid2NodeID, report.MakeNodeWith(map[string]string{process.PID: "3"}).Merge(wantNode)) tagger := docker.NewTagger(mockRegistryInstance, nil) have, err := tagger.Tag(input) if err != nil { t.Errorf("%v", err) } if !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) } }
func TestMerge(t *testing.T) { log.SetOutput(ioutil.Discard) var ( p1Addr = "localhost:7888" p2Addr = "localhost:7889" ) p1, err := xfer.NewTCPPublisher(p1Addr) if err != nil { t.Fatal(err) } defer p1.Close() p2, err := xfer.NewTCPPublisher(p2Addr) if err != nil { t.Fatal(err) } defer p2.Close() batchTime := 100 * time.Millisecond c := xfer.NewCollector(batchTime, "id") c.Add(p1Addr) c.Add(p2Addr) defer c.Stop() time.Sleep(batchTime / 10) // connect k1, k2 := report.MakeHostNodeID("p1"), report.MakeHostNodeID("p2") { r := report.MakeReport() r.Host.NodeMetadatas[k1] = report.NodeMetadata{"host_name": "test1"} p1.Publish(r) } { r := report.MakeReport() r.Host.NodeMetadatas[k2] = report.NodeMetadata{"host_name": "test2"} p2.Publish(r) } success := make(chan struct{}) go func() { defer close(success) for r := range c.Reports() { if r.Host.NodeMetadatas[k1]["host_name"] != "test1" { continue } if r.Host.NodeMetadatas[k2]["host_name"] != "test2" { continue } return } }() select { case <-success: case <-time.After(2 * batchTime): t.Errorf("collector didn't capture both reports") } }
func TestFilterUnconnectedPesudoNodes(t *testing.T) { // Test pseudo nodes that are made unconnected by filtering // are also removed. { nodes := render.RenderableNodes{ "foo": {ID: "foo", Node: report.MakeNode().WithAdjacent("bar")}, "bar": {ID: "bar", Node: report.MakeNode().WithAdjacent("baz")}, "baz": {ID: "baz", Node: report.MakeNode(), Pseudo: true}, } renderer := render.Filter{ FilterFunc: func(node render.RenderableNode) bool { return true }, Renderer: mockRenderer{RenderableNodes: nodes}, } want := nodes.Prune() have := renderer.Render(report.MakeReport()).Prune() if !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } } { renderer := render.Filter{ FilterFunc: func(node render.RenderableNode) bool { return node.ID != "bar" }, Renderer: mockRenderer{RenderableNodes: render.RenderableNodes{ "foo": {ID: "foo", Node: report.MakeNode().WithAdjacent("bar")}, "bar": {ID: "bar", Node: report.MakeNode().WithAdjacent("baz")}, "baz": {ID: "baz", Node: report.MakeNode(), Pseudo: true}, }}, } want := render.RenderableNodes{ "foo": {ID: "foo", Node: report.MakeNode()}, } have := renderer.Render(report.MakeReport()).Prune() if !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } } { renderer := render.Filter{ FilterFunc: func(node render.RenderableNode) bool { return node.ID != "bar" }, Renderer: mockRenderer{RenderableNodes: render.RenderableNodes{ "foo": {ID: "foo", Node: report.MakeNode()}, "bar": {ID: "bar", Node: report.MakeNode().WithAdjacent("foo")}, "baz": {ID: "baz", Node: report.MakeNode().WithAdjacent("bar"), Pseudo: true}, }}, } want := render.RenderableNodes{ "foo": {ID: "foo", Node: report.MakeNode()}, } have := renderer.Render(report.MakeReport()).Prune() if !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } } }
func TestCollector(t *testing.T) { window := time.Millisecond c := xfer.NewCollector(window) r1 := report.MakeReport() r1.Endpoint.NodeMetadatas["foo"] = report.MakeNodeMetadata() r2 := report.MakeReport() r2.Endpoint.NodeMetadatas["bar"] = report.MakeNodeMetadata() if want, have := report.MakeReport(), c.Report(); !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } c.Add(r1) if want, have := r1, c.Report(); !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } c.Add(r2) merged := report.MakeReport() merged = merged.Merge(r1) merged = merged.Merge(r2) if want, have := merged, c.Report(); !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } }
func TestTagger(t *testing.T) { oldProcessTree := docker.NewProcessTreeStub defer func() { docker.NewProcessTreeStub = oldProcessTree }() docker.NewProcessTreeStub = func(procRoot string) (process.Tree, error) { return &mockProcessTree{map[int]int{2: 1}}, nil } var ( pid1NodeID = report.MakeProcessNodeID("somehost.com", "1") pid2NodeID = report.MakeProcessNodeID("somehost.com", "2") wantNodeMetadata = report.NodeMetadata{docker.ContainerID: "ping"} ) input := report.MakeReport() input.Process.NodeMetadatas[pid1NodeID] = report.NodeMetadata{"pid": "1"} input.Process.NodeMetadatas[pid2NodeID] = report.NodeMetadata{"pid": "2"} want := report.MakeReport() want.Process.NodeMetadatas[pid1NodeID] = report.NodeMetadata{"pid": "1"}.Merge(wantNodeMetadata) want.Process.NodeMetadatas[pid2NodeID] = report.NodeMetadata{"pid": "2"}.Merge(wantNodeMetadata) tagger := docker.NewTagger(mockRegistryInstance, "/irrelevant") have, err := tagger.Tag(input) if err != nil { t.Errorf("%v", err) } if !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) } }
func TestMemoise(t *testing.T) { calls := 0 r := renderFunc(func(rpt report.Report) render.RenderableNodes { calls++ return render.RenderableNodes{rpt.ID: render.NewRenderableNode(rpt.ID)} }) m := render.Memoise(r) rpt1 := report.MakeReport() result1 := m.Render(rpt1) // it should have rendered it. if _, ok := result1[rpt1.ID]; !ok { t.Errorf("Expected rendered report to contain a node, but got: %v", result1) } if calls != 1 { t.Errorf("Expected renderer to have been called the first time") } result2 := m.Render(rpt1) if !reflect.DeepEqual(result1, result2) { t.Errorf("Expected memoised result to be returned: %s", test.Diff(result1, result2)) } if calls != 1 { t.Errorf("Expected renderer to not have been called the second time") } rpt2 := report.MakeReport() result3 := m.Render(rpt2) if reflect.DeepEqual(result1, result3) { t.Errorf("Expected different result for different report, but were the same") } if calls != 2 { t.Errorf("Expected renderer to have been called again for a different report") } }
func (s smartMerger) Merge(reports []report.Report) report.Report { // Start with a sorted list of leaves. // Note we must dedupe reports with the same ID to ensure the // algorithm below doesn't go into an infinite loop. This is // fine as reports with the same ID are assumed to be the same. nodes := []*node{} seen := map[uint64]struct{}{} for _, r := range reports { id := hash(r.ID) if _, ok := seen[id]; ok { continue } seen[id] = struct{}{} nodes = append(nodes, &node{ id: id, rpt: r, }) } sort.Sort(byID(nodes)) // Define how to merge two nodes together. The result of merging // two reports is cached. merge := func(left, right *node) *node { return &node{ id: hash(left.rpt.ID, right.rpt.ID), rpt: report.MakeReport().Merge(left.rpt).Merge(right.rpt), } } // Define how to reduce n nodes to 1. // Min and max are both inclusive! var reduce func(min, max uint64, nodes []*node) *node reduce = func(min, max uint64, nodes []*node) *node { switch len(nodes) { case 0: return &node{rpt: report.MakeReport()} case 1: return nodes[0] case 2: return merge(nodes[0], nodes[1]) } partition := min + ((max - min) / 2) index := sort.Search(len(nodes), func(i int) bool { return nodes[i].id > partition }) if index == len(nodes) { return reduce(min, partition, nodes) } else if index == 0 { return reduce(partition+1, max, nodes) } left := reduce(min, partition, nodes[:index]) right := reduce(partition+1, max, nodes[index:]) return merge(left, right) } return reduce(0, math.MaxUint64, nodes).rpt }
// Report generates a Report containing Container and ContainerImage topologies func (r *Reporter) Report() (report.Report, error) { localAddrs, err := report.LocalAddresses() if err != nil { return report.MakeReport(), nil } result := report.MakeReport() result.Container = result.Container.Merge(r.containerTopology(localAddrs)) result.ContainerImage = result.ContainerImage.Merge(r.containerImageTopology()) return result, nil }
func (s *Sniffer) loop(src gopacket.ZeroCopyPacketDataSource, on, off time.Duration) { var ( process = uint64(1) // initially enabled total = uint64(0) // total packets seen count = uint64(0) // count of packets captured packets = make(chan Packet, 1024) // decoded packets rpt = report.MakeReport() // the report we build turnOn = (<-chan time.Time)(nil) // signal to start capture (initially enabled) turnOff = time.After(on) // signal to stop capture done = make(chan struct{}) // when src is finished, we're done too ) // As a special case, if our off duty cycle is zero, i.e. 100% sample // rate, we simply disable the turn-off signal channel. if off == 0 { turnOff = nil } go func() { s.read(src, packets, &process, &total, &count) close(done) }() for { select { case p := <-packets: s.Merge(p, &rpt) case <-turnOn: atomic.StoreUint64(&process, 1) // enable packet capture turnOn = nil // disable the on switch turnOff = time.After(on) // enable the off switch case <-turnOff: atomic.StoreUint64(&process, 0) // disable packet capture turnOn = time.After(off) // enable the on switch turnOff = nil // disable the off switch case c := <-s.reports: rpt.Sampling.Count = atomic.LoadUint64(&count) rpt.Sampling.Total = atomic.LoadUint64(&total) interpolateCounts(rpt) c <- rpt atomic.StoreUint64(&count, 0) atomic.StoreUint64(&total, 0) rpt = report.MakeReport() case <-done: return } } }
func TestApply(t *testing.T) { var ( endpointNodeID = "c" addressNodeID = "d" endpointNode = report.MakeNodeWith(map[string]string{"5": "6"}) addressNode = report.MakeNodeWith(map[string]string{"7": "8"}) ) r := report.MakeReport() r.Endpoint.Nodes[endpointNodeID] = endpointNode r.Address.Nodes[addressNodeID] = addressNode r = Apply(r, []Tagger{newTopologyTagger()}) for _, tuple := range []struct { want report.Node from report.Topology via string }{ {endpointNode.Merge(report.MakeNodeWith(map[string]string{"topology": "endpoint"})), r.Endpoint, endpointNodeID}, {addressNode.Merge(report.MakeNodeWith(map[string]string{"topology": "address"})), r.Address, addressNodeID}, } { if want, have := tuple.want, tuple.from.Nodes[tuple.via]; !reflect.DeepEqual(want, have) { t.Errorf("want %+v, have %+v", want, have) } } }
func TestApply(t *testing.T) { var ( endpointNodeID = "c" addressNodeID = "d" endpointNodeMetadata = report.NodeMetadata{"5": "6"} addressNodeMetadata = report.NodeMetadata{"7": "8"} ) r := report.MakeReport() r.Endpoint.NodeMetadatas[endpointNodeID] = endpointNodeMetadata r.Address.NodeMetadatas[addressNodeID] = addressNodeMetadata r = tag.Apply(r, []tag.Tagger{tag.NewTopologyTagger()}) for _, tuple := range []struct { want report.NodeMetadata from report.Topology via string }{ {copy(endpointNodeMetadata).Merge(report.NodeMetadata{"topology": "endpoint"}), r.Endpoint, endpointNodeID}, {copy(addressNodeMetadata).Merge(report.NodeMetadata{"topology": "address"}), r.Address, addressNodeID}, } { if want, have := tuple.want, tuple.from.NodeMetadatas[tuple.via]; !reflect.DeepEqual(want, have) { t.Errorf("want %+v, have %+v", want, have) } } }
// Report implements Reporter. func (w *Weave) Report() (report.Report, error) { w.mtx.RLock() defer w.mtx.RUnlock() r := report.MakeReport() r.Container = r.Container.WithMetadataTemplates(report.MetadataTemplates{ WeaveMACAddress: {ID: WeaveMACAddress, Label: "Weave MAC", From: report.FromLatest, Priority: 17}, WeaveDNSHostname: {ID: WeaveDNSHostname, Label: "Weave DNS Name", From: report.FromLatest, Priority: 18}, }) for _, peer := range w.statusCache.Router.Peers { r.Overlay.AddNode(report.MakeNodeWith(report.MakeOverlayNodeID(peer.Name), map[string]string{ WeavePeerName: peer.Name, WeavePeerNickName: peer.NickName, })) } if w.statusCache.IPAM.DefaultSubnet != "" { r.Overlay.AddNode( report.MakeNode(report.MakeOverlayNodeID(w.statusCache.Router.Name)).WithSets( report.MakeSets().Add(host.LocalNetworks, report.MakeStringSet(w.statusCache.IPAM.DefaultSubnet)), ), ) } return r, nil }
// Report implements Reporter. func (r *Reporter) Report() (report.Report, error) { var ( rep = report.MakeReport() localCIDRs []string ) for _, localNet := range r.localNets { localCIDRs = append(localCIDRs, localNet.String()) } uptime, err := GetUptime() if err != nil { return rep, err } kernel, err := GetKernelVersion() if err != nil { return rep, err } rep.Host.AddNode(report.MakeHostNodeID(r.hostID), report.MakeNodeWith(map[string]string{ Timestamp: Now(), HostName: r.hostName, OS: runtime.GOOS, Load: GetLoad(), KernelVersion: kernel, Uptime: uptime.String(), }).WithSets(report.Sets{ LocalNetworks: report.MakeStringSet(localCIDRs...), })) return rep, nil }
// Report implements Reporter. func (r *Reporter) Report() (report.Report, error) { var ( rep = report.MakeReport() localCIDRs []string ) for _, localNet := range r.localNets { localCIDRs = append(localCIDRs, localNet.String()) } uptime, err := GetUptime() if err != nil { return rep, err } kernel, err := GetKernelVersion() if err != nil { return rep, err } rep.Host.NodeMetadatas[report.MakeHostNodeID(r.hostID)] = report.MakeNodeMetadataWith(map[string]string{ Timestamp: Now(), HostName: r.hostName, LocalNetworks: strings.Join(localCIDRs, " "), OS: runtime.GOOS, Load: GetLoad(), KernelVersion: kernel, Uptime: uptime.String(), }) return rep, nil }
func TestApply(t *testing.T) { var ( endpointNodeID = "c" addressNodeID = "d" endpointNode = report.MakeNodeWith(map[string]string{"5": "6"}) addressNode = report.MakeNodeWith(map[string]string{"7": "8"}) ) p := New(0, 0, nil) p.AddTagger(NewTopologyTagger()) r := report.MakeReport() r.Endpoint.AddNode(endpointNodeID, endpointNode) r.Address.AddNode(addressNodeID, addressNode) r = p.tag(r) for _, tuple := range []struct { want report.Node from report.Topology via string }{ {endpointNode.Merge(report.MakeNode().WithID("c").WithTopology(report.Endpoint)), r.Endpoint, endpointNodeID}, {addressNode.Merge(report.MakeNode().WithID("d").WithTopology(report.Address)), r.Address, addressNodeID}, } { if want, have := tuple.want, tuple.from.Nodes[tuple.via]; !reflect.DeepEqual(want, have) { t.Errorf("want %+v, have %+v", want, have) } } }
func TestReporter(t *testing.T) { want := report.MakeReport() want.Container = report.Topology{ Adjacency: report.Adjacency{}, EdgeMetadatas: report.EdgeMetadatas{}, NodeMetadatas: report.NodeMetadatas{ report.MakeContainerNodeID("", "ping"): report.NewNodeMetadata(map[string]string{ docker.ContainerID: "ping", docker.ContainerName: "pong", docker.ImageID: "baz", }), }, } want.ContainerImage = report.Topology{ Adjacency: report.Adjacency{}, EdgeMetadatas: report.EdgeMetadatas{}, NodeMetadatas: report.NodeMetadatas{ report.MakeContainerNodeID("", "baz"): report.NewNodeMetadata(map[string]string{ docker.ImageID: "baz", docker.ImageName: "bang", }), }, } reporter := docker.NewReporter(mockRegistryInstance, "") have, _ := reporter.Report() if !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) } }
// Report generates a Report containing Container and ContainerImage topologies func (r *Reporter) Report() (report.Report, error) { result := report.MakeReport() serviceTopology, services, err := r.serviceTopology() if err != nil { return result, err } hostTopology := r.hostTopology(services) if err != nil { return result, err } deploymentTopology, deployments, err := r.deploymentTopology(r.probeID) if err != nil { return result, err } replicaSetTopology, replicaSets, err := r.replicaSetTopology(r.probeID, deployments) if err != nil { return result, err } podTopology, err := r.podTopology(services, replicaSets) if err != nil { return result, err } result.Pod = result.Pod.Merge(podTopology) result.Service = result.Service.Merge(serviceTopology) result.Host = result.Host.Merge(hostTopology) result.Deployment = result.Deployment.Merge(deploymentTopology) result.ReplicaSet = result.ReplicaSet.Merge(replicaSetTopology) return result, nil }
// NewReportLIFO XXX func NewReportLIFO(r reporter, maxAge time.Duration) *ReportLIFO { l := ReportLIFO{ reports: []timedReport{}, requests: make(chan chan report.Report), quit: make(chan chan struct{}), } go func() { for { select { case report := <-r.Reports(): tr := timedReport{ Timestamp: time.Now(), Report: report, } l.reports = append(l.reports, tr) l.reports = cleanOld(l.reports, time.Now().Add(-maxAge)) case req := <-l.requests: report := report.MakeReport() for _, r := range l.reports { report.Merge(r.Report) } req <- report case q := <-l.quit: close(q) return } } }() return &l }
func TestHTTPPublisher(t *testing.T) { var ( token = "abcdefg" rpt = report.MakeReport() ) s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if want, have := xfer.AuthorizationHeader(token), r.Header.Get("Authorization"); want != have { t.Errorf("want %q, have %q", want, have) } var have report.Report if err := gob.NewDecoder(r.Body).Decode(&have); err != nil { t.Error(err) return } if want := rpt; !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) return } w.WriteHeader(http.StatusOK) })) defer s.Close() p, err := xfer.NewHTTPPublisher(s.URL, token) if err != nil { t.Fatal(err) } if err := p.Publish(rpt); err != nil { t.Error(err) } }
// Report implements Reporter. func (w Weave) Report() (report.Report, error) { r := report.MakeReport() resp, err := http.Get(w.url) if err != nil { log.Printf("Weave Tagger: %v", err) return r, err } defer resp.Body.Close() var status struct { Peers []struct { Name string `json:"Name"` NickName string `json:"NickName"` } `json:"Peers"` } if err := json.NewDecoder(resp.Body).Decode(&status); err != nil { log.Printf("Weave Tagger: %v", err) return r, err } for _, peer := range status.Peers { r.Overlay.NodeMetadatas[report.MakeOverlayNodeID(peer.Name)] = report.NewNodeMetadata(map[string]string{ WeavePeerName: peer.Name, WeavePeerNickName: peer.NickName, }) } return r, nil }
func TestAPITopologyAddsKubernetes(t *testing.T) { router := mux.NewRouter() c := app.NewCollector(1 * time.Minute) app.RegisterReportPostHandler(c, router) app.RegisterTopologyRoutes(router, c) ts := httptest.NewServer(router) defer ts.Close() body := getRawJSON(t, ts, "/api/topology") var topologies []app.APITopologyDesc decoder := codec.NewDecoderBytes(body, &codec.JsonHandle{}) if err := decoder.Decode(&topologies); err != nil { t.Fatalf("JSON parse error: %s", err) } equals(t, 4, len(topologies)) // Enable the kubernetes topologies rpt := report.MakeReport() rpt.Pod = report.MakeTopology() rpt.Pod.Nodes[fixture.ClientPodNodeID] = kubernetes.NewPod(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pong-a", Namespace: "ping", Labels: map[string]string{"ponger": "true"}, }, Status: api.PodStatus{ HostIP: "1.2.3.4", ContainerStatuses: []api.ContainerStatus{ {ContainerID: "container1"}, {ContainerID: "container2"}, }, }, }).GetNode("") buf := &bytes.Buffer{} encoder := codec.NewEncoder(buf, &codec.MsgpackHandle{}) if err := encoder.Encode(rpt); err != nil { t.Fatalf("GOB encoding error: %s", err) } checkRequest(t, ts, "POST", "/api/report", buf.Bytes()) body = getRawJSON(t, ts, "/api/topology") decoder = codec.NewDecoderBytes(body, &codec.JsonHandle{}) if err := decoder.Decode(&topologies); err != nil { t.Fatalf("JSON parse error: %s", err) } equals(t, 4, len(topologies)) found := false for _, topology := range topologies { if topology.Name == "Pods" { found = true break } } if !found { t.Error("Could not find pods topology") } }
func TestReporter(t *testing.T) { walker := &mockWalker{ processes: []process.Process{ {PID: 1, PPID: 0, Name: "init"}, {PID: 2, PPID: 1, Name: "bash"}, {PID: 3, PPID: 1, Name: "apache", Threads: 2}, {PID: 4, PPID: 2, Name: "ping", Cmdline: "ping foo.bar.local"}, {PID: 5, PPID: 1, Cmdline: "tail -f /var/log/syslog"}, }, } getDeltaTotalJiffies := func() (uint64, float64, error) { return 0, 0., nil } now := time.Now() mtime.NowForce(now) defer mtime.NowReset() reporter := process.NewReporter(walker, "", getDeltaTotalJiffies) want := report.MakeReport() want.Process = report.MakeTopology().AddNode( report.MakeProcessNodeID("", "1"), report.MakeNodeWith(map[string]string{ process.PID: "1", process.Name: "init", process.Threads: "0", }).WithMetric(process.MemoryUsage, report.MakeMetric().Add(now, 0.)), ).AddNode( report.MakeProcessNodeID("", "2"), report.MakeNodeWith(map[string]string{ process.PID: "2", process.Name: "bash", process.PPID: "1", process.Threads: "0", }).WithMetric(process.MemoryUsage, report.MakeMetric().Add(now, 0.)), ).AddNode( report.MakeProcessNodeID("", "3"), report.MakeNodeWith(map[string]string{ process.PID: "3", process.Name: "apache", process.PPID: "1", process.Threads: "2", }).WithMetric(process.MemoryUsage, report.MakeMetric().Add(now, 0.)), ).AddNode( report.MakeProcessNodeID("", "4"), report.MakeNodeWith(map[string]string{ process.PID: "4", process.Name: "ping", process.PPID: "2", process.Cmdline: "ping foo.bar.local", process.Threads: "0", }).WithMetric(process.MemoryUsage, report.MakeMetric().Add(now, 0.)), ).AddNode( report.MakeProcessNodeID("", "5"), report.MakeNodeWith(map[string]string{ process.PID: "5", process.PPID: "1", process.Cmdline: "tail -f /var/log/syslog", process.Threads: "0", }).WithMetric(process.MemoryUsage, report.MakeMetric().Add(now, 0.)), ) have, err := reporter.Report() if err != nil || !reflect.DeepEqual(want, have) { t.Errorf("%s (%v)", test.Diff(want, have), err) } }
func TestReporter(t *testing.T) { var ( release = "release" version = "version" network = "192.168.0.0/16" hostID = "hostid" hostname = "hostname" timestamp = time.Now() load = report.Metrics{ host.Load1: report.MakeMetric().Add(timestamp, 1.0), host.Load5: report.MakeMetric().Add(timestamp, 5.0), host.Load15: report.MakeMetric().Add(timestamp, 15.0), host.CPUUsage: report.MakeMetric().Add(timestamp, 30.0).WithMax(100.0), host.MemUsage: report.MakeMetric().Add(timestamp, 60.0).WithMax(100.0), } uptime = "278h55m43s" kernel = "release version" _, ipnet, _ = net.ParseCIDR(network) localNets = report.Networks([]*net.IPNet{ipnet}) ) mtime.NowForce(timestamp) defer mtime.NowReset() var ( oldGetKernelVersion = host.GetKernelVersion oldGetLoad = host.GetLoad oldGetUptime = host.GetUptime oldGetCPUUsagePercent = host.GetCPUUsagePercent oldGetMemoryUsageBytes = host.GetMemoryUsageBytes ) defer func() { host.GetKernelVersion = oldGetKernelVersion host.GetLoad = oldGetLoad host.GetUptime = oldGetUptime host.GetCPUUsagePercent = oldGetCPUUsagePercent host.GetMemoryUsageBytes = oldGetMemoryUsageBytes }() host.GetKernelVersion = func() (string, error) { return release + " " + version, nil } host.GetLoad = func(time.Time) report.Metrics { return load } host.GetUptime = func() (time.Duration, error) { return time.ParseDuration(uptime) } host.GetCPUUsagePercent = func() (float64, float64) { return 30.0, 100.0 } host.GetMemoryUsageBytes = func() (float64, float64) { return 60.0, 100.0 } want := report.MakeReport() want.Host.AddNode(report.MakeHostNodeID(hostID), report.MakeNodeWith(map[string]string{ host.Timestamp: timestamp.UTC().Format(time.RFC3339Nano), host.HostName: hostname, host.OS: runtime.GOOS, host.Uptime: uptime, host.KernelVersion: kernel, }).WithSets(report.Sets{ host.LocalNetworks: report.MakeStringSet(network), }).WithMetrics(load)) have, _ := host.NewReporter(hostID, hostname, localNets).Report() if !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) } }
// Tag implements Tagger. func (t *Tagger) Tag(r report.Report) (report.Report, error) { tree, err := NewProcessTreeStub(t.procWalker) if err != nil { return report.MakeReport(), err } t.tag(tree, &r.Process) return r, nil }
// Make sure we don't add a topology and miss it in the Topologies method. func TestReportTopologies(t *testing.T) { var ( reportType = reflect.TypeOf(report.MakeReport()) topologyType = reflect.TypeOf(report.NewTopology()) ) var want int for i := 0; i < reportType.NumField(); i++ { if reportType.Field(i).Type == topologyType { want++ } } if have := len(report.MakeReport().Topologies()); want != have { t.Errorf("want %d, have %d", want, have) } }
func TestReporter(t *testing.T) { want := report.MakeReport() pod1ID := report.MakePodNodeID("ping", "pong-a") pod2ID := report.MakePodNodeID("ping", "pong-b") serviceID := report.MakeServiceNodeID("ping", "pongservice") want.Pod = report.MakeTopology().AddNode(pod1ID, report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-a", kubernetes.PodName: "pong-a", kubernetes.Namespace: "ping", kubernetes.PodCreated: pod1.Created(), kubernetes.PodContainerIDs: "container1 container2", kubernetes.ServiceIDs: "ping/pongservice", }).WithParents(report.Sets{ report.Service: report.MakeStringSet(serviceID), })).AddNode(pod2ID, report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-b", kubernetes.PodName: "pong-b", kubernetes.Namespace: "ping", kubernetes.PodCreated: pod1.Created(), kubernetes.PodContainerIDs: "container3 container4", kubernetes.ServiceIDs: "ping/pongservice", }).WithParents(report.Sets{ report.Service: report.MakeStringSet(serviceID), })) want.Service = report.MakeTopology().AddNode(serviceID, report.MakeNodeWith(map[string]string{ kubernetes.ServiceID: "ping/pongservice", kubernetes.ServiceName: "pongservice", kubernetes.Namespace: "ping", kubernetes.ServiceCreated: pod1.Created(), })) want.Container = report.MakeTopology().AddNode(report.MakeContainerNodeID("container1"), report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-a", kubernetes.Namespace: "ping", }).WithParents(report.Sets{ report.Pod: report.MakeStringSet(pod1ID), })).AddNode(report.MakeContainerNodeID("container2"), report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-a", kubernetes.Namespace: "ping", }).WithParents(report.Sets{ report.Pod: report.MakeStringSet(pod1ID), })).AddNode(report.MakeContainerNodeID("container3"), report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-b", kubernetes.Namespace: "ping", }).WithParents(report.Sets{ report.Pod: report.MakeStringSet(pod2ID), })).AddNode(report.MakeContainerNodeID("container4"), report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-b", kubernetes.Namespace: "ping", }).WithParents(report.Sets{ report.Pod: report.MakeStringSet(pod2ID), })) reporter := kubernetes.NewReporter(mockClientInstance) have, _ := reporter.Report() if !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) } }
// Report implements Reporter. func (r *Reporter) Report() (report.Report, error) { result := report.MakeReport() processes, err := r.processTopology() if err != nil { return result, err } result.Process = result.Process.Merge(processes) return result, nil }
func TestTagMissingID(t *testing.T) { const nodeID = "not-found" r := report.MakeReport() rpt, _ := NewTopologyTagger().Tag(r) _, ok := rpt.Endpoint.Nodes[nodeID] if ok { t.Error("TopologyTagger erroneously tagged a missing node ID") } }
func TestReporter(t *testing.T) { oldWalk := process.Walk defer func() { process.Walk = oldWalk }() process.Walk = func(_ string, f func(*process.Process)) error { for _, p := range []*process.Process{ {PID: 1, PPID: 0, Comm: "init"}, {PID: 2, PPID: 1, Comm: "bash"}, {PID: 3, PPID: 1, Comm: "apache", Threads: 2}, {PID: 4, PPID: 2, Comm: "ping", Cmdline: "ping foo.bar.local"}, } { f(p) } return nil } reporter := process.NewReporter("", "") want := report.MakeReport() want.Process = report.Topology{ Adjacency: report.Adjacency{}, EdgeMetadatas: report.EdgeMetadatas{}, NodeMetadatas: report.NodeMetadatas{ report.MakeProcessNodeID("", "1"): report.NodeMetadata{ process.PID: "1", process.Comm: "init", process.Cmdline: "", process.Threads: "0", }, report.MakeProcessNodeID("", "2"): report.NodeMetadata{ process.PID: "2", process.Comm: "bash", process.PPID: "1", process.Cmdline: "", process.Threads: "0", }, report.MakeProcessNodeID("", "3"): report.NodeMetadata{ process.PID: "3", process.Comm: "apache", process.PPID: "1", process.Cmdline: "", process.Threads: "2", }, report.MakeProcessNodeID("", "4"): report.NodeMetadata{ process.PID: "4", process.Comm: "ping", process.PPID: "2", process.Cmdline: "ping foo.bar.local", process.Threads: "0", }, }, } have, err := reporter.Report() if err != nil || !reflect.DeepEqual(want, have) { t.Errorf("%s (%v)", test.Diff(want, have), err) } }