func TestMerger(t *testing.T) { // Use 3 reports to check the pair-wise merging in SmartMerger report1 := report.MakeReport() report1.Endpoint.AddNode(report.MakeNode("foo")) report2 := report.MakeReport() report2.Endpoint.AddNode(report.MakeNode("bar")) report3 := report.MakeReport() report3.Endpoint.AddNode(report.MakeNode("baz")) reports := []report.Report{ report1, report2, report3, } want := report.MakeReport() want.Endpoint. AddNode(report.MakeNode("foo")). AddNode(report.MakeNode("bar")). AddNode(report.MakeNode("baz")) for _, merger := range []app.Merger{app.MakeDumbMerger(), app.NewSmartMerger()} { // Test the empty list case if have := merger.Merge([]report.Report{}); !reflect.DeepEqual(have, report.MakeReport()) { t.Errorf("Bad merge: %s", test.Diff(have, want)) } if have := merger.Merge(reports); !reflect.DeepEqual(have, want) { t.Errorf("Bad merge: %s", test.Diff(have, want)) } // Repeat the above test to ensure caching works if have := merger.Merge(reports); !reflect.DeepEqual(have, want) { t.Errorf("Bad merge: %s", test.Diff(have, want)) } } }
func (s smartMerger) Merge(reports []report.Report) report.Report { // Start with a sorted list of leaves. // Note we must dedupe reports with the same ID to ensure the // algorithm below doesn't go into an infinite loop. This is // fine as reports with the same ID are assumed to be the same. nodes := []*node{} seen := map[uint64]struct{}{} for _, r := range reports { id := hash(r.ID) if _, ok := seen[id]; ok { continue } seen[id] = struct{}{} nodes = append(nodes, &node{ id: id, rpt: r, }) } sort.Sort(byID(nodes)) // Define how to merge two nodes together. The result of merging // two reports is cached. merge := func(left, right *node) *node { return &node{ id: hash(left.rpt.ID, right.rpt.ID), rpt: report.MakeReport().Merge(left.rpt).Merge(right.rpt), } } // Define how to reduce n nodes to 1. // Min and max are both inclusive! var reduce func(min, max uint64, nodes []*node) *node reduce = func(min, max uint64, nodes []*node) *node { switch len(nodes) { case 0: return &node{rpt: report.MakeReport()} case 1: return nodes[0] case 2: return merge(nodes[0], nodes[1]) } partition := min + ((max - min) / 2) index := sort.Search(len(nodes), func(i int) bool { return nodes[i].id > partition }) if index == len(nodes) { return reduce(min, partition, nodes) } else if index == 0 { return reduce(partition+1, max, nodes) } left := reduce(min, partition, nodes[:index]) right := reduce(partition+1, max, nodes[index:]) return merge(left, right) } return reduce(0, math.MaxUint64, nodes).rpt }
// Report generates a Report containing Container and ContainerImage topologies func (r *Reporter) Report() (report.Report, error) { localAddrs, err := report.LocalAddresses() if err != nil { return report.MakeReport(), nil } result := report.MakeReport() result.Container = result.Container.Merge(r.containerTopology(localAddrs)) result.ContainerImage = result.ContainerImage.Merge(r.containerImageTopology()) return result, nil }
func (s *Sniffer) loop(src gopacket.ZeroCopyPacketDataSource, on, off time.Duration) { var ( process = uint64(1) // initially enabled total = uint64(0) // total packets seen count = uint64(0) // count of packets captured packets = make(chan Packet, 1024) // decoded packets rpt = report.MakeReport() // the report we build turnOn = (<-chan time.Time)(nil) // signal to start capture (initially enabled) turnOff = time.After(on) // signal to stop capture done = make(chan struct{}) // when src is finished, we're done too ) // As a special case, if our off duty cycle is zero, i.e. 100% sample // rate, we simply disable the turn-off signal channel. if off == 0 { turnOff = nil } go func() { s.read(src, packets, &process, &total, &count) close(done) }() for { select { case p := <-packets: s.Merge(p, &rpt) case <-turnOn: atomic.StoreUint64(&process, 1) // enable packet capture turnOn = nil // disable the on switch turnOff = time.After(on) // enable the off switch case <-turnOff: atomic.StoreUint64(&process, 0) // disable packet capture turnOn = time.After(off) // enable the on switch turnOff = nil // disable the off switch case c := <-s.reports: rpt.Sampling.Count = atomic.LoadUint64(&count) rpt.Sampling.Total = atomic.LoadUint64(&total) interpolateCounts(rpt) c <- rpt atomic.StoreUint64(&count, 0) atomic.StoreUint64(&total, 0) rpt = report.MakeReport() case <-done: return } } }
// Report implements Reporter. func (w *Weave) Report() (report.Report, error) { w.mtx.RLock() defer w.mtx.RUnlock() r := report.MakeReport() r.Container = r.Container.WithMetadataTemplates(report.MetadataTemplates{ WeaveMACAddress: {ID: WeaveMACAddress, Label: "Weave MAC", From: report.FromLatest, Priority: 17}, WeaveDNSHostname: {ID: WeaveDNSHostname, Label: "Weave DNS Name", From: report.FromLatest, Priority: 18}, }) for _, peer := range w.statusCache.Router.Peers { r.Overlay.AddNode(report.MakeNodeWith(report.MakeOverlayNodeID(peer.Name), map[string]string{ WeavePeerName: peer.Name, WeavePeerNickName: peer.NickName, })) } if w.statusCache.IPAM.DefaultSubnet != "" { r.Overlay.AddNode( report.MakeNode(report.MakeOverlayNodeID(w.statusCache.Router.Name)).WithSets( report.MakeSets().Add(host.LocalNetworks, report.MakeStringSet(w.statusCache.IPAM.DefaultSubnet)), ), ) } return r, nil }
// Report generates a Report containing Container and ContainerImage topologies func (r *Reporter) Report() (report.Report, error) { result := report.MakeReport() serviceTopology, services, err := r.serviceTopology() if err != nil { return result, err } hostTopology := r.hostTopology(services) if err != nil { return result, err } deploymentTopology, deployments, err := r.deploymentTopology(r.probeID) if err != nil { return result, err } replicaSetTopology, replicaSets, err := r.replicaSetTopology(r.probeID, deployments) if err != nil { return result, err } podTopology, err := r.podTopology(services, replicaSets) if err != nil { return result, err } result.Pod = result.Pod.Merge(podTopology) result.Service = result.Service.Merge(serviceTopology) result.Host = result.Host.Merge(hostTopology) result.Deployment = result.Deployment.Merge(deploymentTopology) result.ReplicaSet = result.ReplicaSet.Merge(replicaSetTopology) return result, nil }
// ContainerUpdated should be called whenever a container is updated. func (r *Reporter) ContainerUpdated(n report.Node) { // Publish a 'short cut' report container just this container rpt := report.MakeReport() rpt.Shortcut = true rpt.Container.AddNode(n) r.probe.Publish(rpt) }
func TestTagger(t *testing.T) { var ( hostID = "foo" endpointNodeID = report.MakeEndpointNodeID(hostID, "1.2.3.4", "56789") // hostID ignored node = report.MakeNodeWith(endpointNodeID, map[string]string{"foo": "bar"}) ) r := report.MakeReport() r.Process.AddNode(node) rpt, _ := host.NewTagger(hostID).Tag(r) have := rpt.Process.Nodes[endpointNodeID].Copy() // It should now have the host ID wantHostID := report.MakeHostNodeID(hostID) if hostID, ok := have.Latest.Lookup(report.HostNodeID); !ok || hostID != wantHostID { t.Errorf("Expected %q got %q", wantHostID, report.MakeHostNodeID(hostID)) } // It should still have the other keys want := "bar" if have, ok := have.Latest.Lookup("foo"); !ok || have != want { t.Errorf("Expected %q got %q", want, have) } // It should have the host as a parent wantParent := report.MakeHostNodeID(hostID) if have, ok := have.Parents.Lookup(report.Host); !ok || len(have) != 1 || have[0] != wantParent { t.Errorf("Expected %q got %q", report.MakeStringSet(wantParent), have) } }
func TestReportLocalNetworks(t *testing.T) { r := report.MakeReport().Merge(report.Report{ Host: report.Topology{ Nodes: report.Nodes{ "nonets": report.MakeNode("nonets"), "foo": report.MakeNode("foo").WithSets(report.EmptySets. Add(host.LocalNetworks, report.MakeStringSet( "10.0.0.1/8", "192.168.1.1/24", "10.0.0.1/8", "badnet/33")), ), }, }, Overlay: report.Topology{ Nodes: report.Nodes{ "router": report.MakeNode("router").WithSets(report.EmptySets. Add(host.LocalNetworks, report.MakeStringSet("10.32.0.1/12")), ), }, }, }) want := report.Networks([]*net.IPNet{ mustParseCIDR("10.0.0.1/8"), mustParseCIDR("192.168.1.1/24"), mustParseCIDR("10.32.0.1/12"), }) have := render.LocalNetworks(r) if !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) } }
func TestApply(t *testing.T) { var ( endpointNodeID = "c" endpointNode = report.MakeNodeWith(endpointNodeID, map[string]string{"5": "6"}) ) p := New(0, 0, nil) p.AddTagger(NewTopologyTagger()) r := report.MakeReport() r.Endpoint.AddNode(endpointNode) r = p.tag(r) for _, tuple := range []struct { want report.Node from report.Topology via string }{ {endpointNode.Merge(report.MakeNode("c").WithTopology(report.Endpoint)), r.Endpoint, endpointNodeID}, } { if want, have := tuple.want, tuple.from.Nodes[tuple.via]; !reflect.DeepEqual(want, have) { t.Errorf("want %+v, have %+v", want, have) } } }
func benchmarkMerger(b *testing.B, merger app.Merger) { makeReport := func() report.Report { rpt := report.MakeReport() for i := 0; i < 100; i++ { rpt.Endpoint.AddNode(report.MakeNode(fmt.Sprintf("%x", rand.Int63()))) } return rpt } reports := []report.Report{} for i := 0; i < numHosts*5; i++ { reports = append(reports, makeReport()) } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { // replace 1/3 of hosts work of reports & merge them all for i := 0; i < numHosts/3; i++ { reports[rand.Intn(len(reports))] = makeReport() } merger.Merge(reports) } }
func TestAPITopologyAddsKubernetes(t *testing.T) { router := mux.NewRouter() c := app.NewCollector(1 * time.Minute) app.RegisterReportPostHandler(c, router) app.RegisterTopologyRoutes(router, c) ts := httptest.NewServer(router) defer ts.Close() body := getRawJSON(t, ts, "/api/topology") var topologies []app.APITopologyDesc decoder := codec.NewDecoderBytes(body, &codec.JsonHandle{}) if err := decoder.Decode(&topologies); err != nil { t.Fatalf("JSON parse error: %s", err) } equals(t, 4, len(topologies)) // Enable the kubernetes topologies rpt := report.MakeReport() rpt.Pod = report.MakeTopology() rpt.Pod.Nodes[fixture.ClientPodNodeID] = kubernetes.NewPod(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pong-a", Namespace: "ping", Labels: map[string]string{"ponger": "true"}, }, Status: api.PodStatus{ HostIP: "1.2.3.4", ContainerStatuses: []api.ContainerStatus{ {ContainerID: "container1"}, {ContainerID: "container2"}, }, }, }).GetNode("") buf := &bytes.Buffer{} encoder := codec.NewEncoder(buf, &codec.MsgpackHandle{}) if err := encoder.Encode(rpt); err != nil { t.Fatalf("GOB encoding error: %s", err) } checkRequest(t, ts, "POST", "/api/report", buf.Bytes()) body = getRawJSON(t, ts, "/api/topology") decoder = codec.NewDecoderBytes(body, &codec.JsonHandle{}) if err := decoder.Decode(&topologies); err != nil { t.Fatalf("JSON parse error: %s", err) } equals(t, 4, len(topologies)) found := false for _, topology := range topologies { if topology.Name == "Pods" { found = true break } } if !found { t.Error("Could not find pods topology") } }
// Tag implements Tagger. func (t *Tagger) Tag(r report.Report) (report.Report, error) { tree, err := NewProcessTreeStub(t.procWalker) if err != nil { return report.MakeReport(), err } t.tag(tree, &r.Process) return r, nil }
// Make sure we don't add a topology and miss it in the Topologies method. func TestReportTopologies(t *testing.T) { var ( reportType = reflect.TypeOf(report.MakeReport()) topologyType = reflect.TypeOf(report.MakeTopology()) ) var want int for i := 0; i < reportType.NumField(); i++ { if reportType.Field(i).Type == topologyType { want++ } } if have := len(report.MakeReport().Topologies()); want != have { t.Errorf("want %d, have %d", want, have) } }
func (c *dynamoDBCollector) getRows(userid string, row int64, start, end time.Time, input report.Report) (report.Report, error) { rowKey := fmt.Sprintf("%s-%s", userid, strconv.FormatInt(row, 10)) resp, err := c.db.Query(&dynamodb.QueryInput{ TableName: aws.String(tableName), KeyConditions: map[string]*dynamodb.Condition{ hourField: { AttributeValueList: []*dynamodb.AttributeValue{ {S: aws.String(rowKey)}, }, ComparisonOperator: aws.String("EQ"), }, tsField: { AttributeValueList: []*dynamodb.AttributeValue{ {N: aws.String(strconv.FormatInt(start.UnixNano(), 10))}, {N: aws.String(strconv.FormatInt(end.UnixNano(), 10))}, }, ComparisonOperator: aws.String("BETWEEN"), }, }, }) if err != nil { return report.MakeReport(), err } result := input for _, item := range resp.Items { b := item[reportField].B if b == nil { log.Errorf("Empty row!") continue } buf := bytes.NewBuffer(b) reader, err := gzip.NewReader(buf) if err != nil { log.Errorf("Error gunzipping report: %v", err) continue } rep := report.MakeReport() if err := codec.NewDecoder(reader, &codec.MsgpackHandle{}).Decode(&rep); err != nil { log.Errorf("Failed to decode report: %v", err) continue } result = result.Merge(rep) } return result, nil }
func TestReportTopology(t *testing.T) { r := report.MakeReport() if _, ok := r.Topology(report.Container); !ok { t.Errorf("Expected %s topology to be found", report.Container) } if _, ok := r.Topology("foo"); ok { t.Errorf("Expected %s topology not to be found", "foo") } }
func TestTagMissingID(t *testing.T) { const nodeID = "not-found" r := report.MakeReport() rpt, _ := NewTopologyTagger().Tag(r) _, ok := rpt.Endpoint.Nodes[nodeID] if ok { t.Error("TopologyTagger erroneously tagged a missing node ID") } }
// Report implements Reporter. func (r *Reporter) Report() (report.Report, error) { result := report.MakeReport() processes, err := r.processTopology() if err != nil { return result, err } result.Process = result.Process.Merge(processes) return result, nil }
func TestMemoise(t *testing.T) { calls := 0 r := renderFunc(func(rpt report.Report) report.Nodes { calls++ return report.Nodes{rpt.ID: report.MakeNode(rpt.ID)} }) m := render.Memoise(r) rpt1 := report.MakeReport() result1 := m.Render(rpt1, nil) // it should have rendered it. if _, ok := result1[rpt1.ID]; !ok { t.Errorf("Expected rendered report to contain a node, but got: %v", result1) } if calls != 1 { t.Errorf("Expected renderer to have been called the first time") } result2 := m.Render(rpt1, nil) if !reflect.DeepEqual(result1, result2) { t.Errorf("Expected memoised result to be returned: %s", test.Diff(result1, result2)) } if calls != 1 { t.Errorf("Expected renderer to not have been called the second time") } rpt2 := report.MakeReport() result3 := m.Render(rpt2, nil) if reflect.DeepEqual(result1, result3) { t.Errorf("Expected different result for different report, but were the same") } if calls != 2 { t.Errorf("Expected renderer to have been called again for a different report") } render.ResetCache() result4 := m.Render(rpt1, nil) if !reflect.DeepEqual(result1, result4) { t.Errorf("Expected original result to be returned: %s", test.Diff(result1, result4)) } if calls != 3 { t.Errorf("Expected renderer to have been called again after cache reset") } }
func (dumbMerger) Merge(reports []report.Report) report.Report { rpt := report.MakeReport() id := murmur3.New64() for _, r := range reports { rpt = rpt.Merge(r) id.Write([]byte(r.ID)) } rpt.ID = fmt.Sprintf("%x", id.Sum64()) return rpt }
func (r *Reporter) podEvent(e Event, pod Pod) { switch e { case ADD: rpt := report.MakeReport() rpt.Shortcut = true rpt.Pod.AddNode(pod.GetNode(r.probeID)) r.probe.Publish(rpt) case DELETE: rpt := report.MakeReport() rpt.Shortcut = true rpt.Pod.AddNode( report.MakeNodeWith( report.MakePodNodeID(pod.UID()), map[string]string{State: StateDeleted}, ), ) r.probe.Publish(rpt) } }
func TestNodeTables(t *testing.T) { inputs := []struct { name string rpt report.Report node report.Node want []report.Table }{ { name: "container", rpt: report.Report{ Container: report.MakeTopology(). WithTableTemplates(docker.ContainerTableTemplates), }, node: report.MakeNodeWith(fixture.ClientContainerNodeID, map[string]string{ docker.ContainerID: fixture.ClientContainerID, docker.LabelPrefix + "label1": "label1value", docker.ContainerState: docker.StateRunning, }).WithTopology(report.Container).WithSets(report.EmptySets. Add(docker.ContainerIPs, report.MakeStringSet("10.10.10.0/24", "10.10.10.1/24")), ), want: []report.Table{ { ID: docker.EnvPrefix, Label: "Environment Variables", Rows: []report.MetadataRow{}, }, { ID: docker.LabelPrefix, Label: "Docker Labels", Rows: []report.MetadataRow{ { ID: "label_label1", Label: "label1", Value: "label1value", }, }, }, }, }, { name: "unknown topology", rpt: report.MakeReport(), node: report.MakeNodeWith(fixture.ClientContainerNodeID, map[string]string{ docker.ContainerID: fixture.ClientContainerID, }).WithTopology("foobar"), want: nil, }, } for _, input := range inputs { have := detailed.NodeTables(input.rpt, input.node) if !reflect.DeepEqual(input.want, have) { t.Errorf("%s: %s", input.name, test.Diff(input.want, have)) } } }
// Report gets the latest report from the plugin func (p *Plugin) Report() (result report.Report, err error) { result = report.MakeReport() defer func() { p.setStatus(err) result.Plugins = result.Plugins.Add(p.PluginSpec) if err != nil { result = report.MakeReport() result.Plugins = xfer.MakePluginSpecs(p.PluginSpec) } }() if err := p.get("/report", p.handshakeMetadata, &result); err != nil { return result, err } if result.Plugins.Size() != 1 { return result, fmt.Errorf("report must contain exactly one plugin (found %d)", result.Plugins.Size()) } key := result.Plugins.Keys()[0] spec, _ := result.Plugins.Lookup(key) p.PluginSpec = spec foundReporter := false for _, i := range spec.Interfaces { if i == "reporter" { foundReporter = true break } } switch { case spec.APIVersion != p.expectedAPIVersion: err = fmt.Errorf("incorrect API version: expected %q, got %q", p.expectedAPIVersion, spec.APIVersion) case spec.ID == "": err = fmt.Errorf("spec must contain an id") case spec.Label == "": err = fmt.Errorf("spec must contain a label") case !foundReporter: err = fmt.Errorf("spec must implement the \"reporter\" interface") } return result, err }
func TestMerge(t *testing.T) { var ( hostID = "xyz" src = newMockSource([]byte{}, nil) on = time.Millisecond off = time.Millisecond rpt = report.MakeReport() p = sniff.Packet{ SrcIP: "1.0.0.0", SrcPort: "1000", DstIP: "2.0.0.0", DstPort: "2000", Network: 512, Transport: 256, } _, ipnet, _ = net.ParseCIDR(p.SrcIP + "/24") // ;) localNets = report.Networks([]*net.IPNet{ipnet}) ) sniff.New(hostID, localNets, src, on, off).Merge(p, &rpt) var ( srcEndpointNodeID = report.MakeEndpointNodeID(hostID, p.SrcIP, p.SrcPort) dstEndpointNodeID = report.MakeEndpointNodeID(hostID, p.DstIP, p.DstPort) ) if want, have := (report.Topology{ Nodes: report.Nodes{ srcEndpointNodeID: report.MakeNode(srcEndpointNodeID).WithEdge(dstEndpointNodeID, report.EdgeMetadata{ EgressPacketCount: newu64(1), EgressByteCount: newu64(256), }), dstEndpointNodeID: report.MakeNode(dstEndpointNodeID), }, }), rpt.Endpoint; !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) } var ( srcAddressNodeID = report.MakeAddressNodeID(hostID, p.SrcIP) dstAddressNodeID = report.MakeAddressNodeID(hostID, p.DstIP) ) if want, have := (report.Topology{ Nodes: report.Nodes{ srcAddressNodeID: report.MakeNode(srcAddressNodeID).WithEdge(dstAddressNodeID, report.EdgeMetadata{ EgressPacketCount: newu64(1), EgressByteCount: newu64(512), }), dstAddressNodeID: report.MakeNode(dstAddressNodeID), }, }), rpt.Address; !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) } }
// Report implements the Reporter interface func (r *Registry) Report() (report.Report, error) { rpt := report.MakeReport() // All plugins are assumed to (and must) implement reporter r.ForEach(func(plugin *Plugin) { pluginReport, err := plugin.Report() if err != nil { log.Errorf("plugins: %s: /report error: %v", plugin.socket, err) } rpt = rpt.Merge(pluginReport) }) return rpt, nil }
func TestCollector(t *testing.T) { ctx := context.Background() window := 10 * time.Second c := app.NewCollector(window) r1 := report.MakeReport() r1.Endpoint.AddNode(report.MakeNode("foo")) r2 := report.MakeReport() r2.Endpoint.AddNode(report.MakeNode("foo")) have, err := c.Report(ctx) if err != nil { t.Error(err) } if want := report.MakeReport(); !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } c.Add(ctx, r1) have, err = c.Report(ctx) if err != nil { t.Error(err) } if want := r1; !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } c.Add(ctx, r2) merged := report.MakeReport() merged = merged.Merge(r1) merged = merged.Merge(r2) have, err = c.Report(ctx) if err != nil { t.Error(err) } if want := merged; !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } }
func TestCollectorExpire(t *testing.T) { now := time.Now() mtime.NowForce(now) defer mtime.NowReset() ctx := context.Background() window := 10 * time.Second c := app.NewCollector(window) // 1st check the collector is empty have, err := c.Report(ctx) if err != nil { t.Error(err) } if want := report.MakeReport(); !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } // Now check an added report is returned r1 := report.MakeReport() r1.Endpoint.AddNode(report.MakeNode("foo")) c.Add(ctx, r1) have, err = c.Report(ctx) if err != nil { t.Error(err) } if want := r1; !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } // Finally move time forward to expire the report mtime.NowForce(now.Add(window)) have, err = c.Report(ctx) if err != nil { t.Error(err) } if want := report.MakeReport(); !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } }
func TestFilterUnconnectedSelf(t *testing.T) { // Test nodes that are only connected to themselves are filtered. { nodes := report.Nodes{ "foo": report.MakeNode("foo").WithAdjacent("foo"), } renderer := mockRenderer{Nodes: nodes} have := renderer.Render(report.MakeReport(), render.FilterUnconnected) if len(have) > 0 { t.Error("expected node only connected to self to be removed") } } }
func TestSmartMerger(t *testing.T) { // Use 3 reports _WITH SAME ID_ report1 := report.MakeReport() report1.Endpoint.AddNode(report.MakeNode("foo")) report1.ID = "foo" report2 := report.MakeReport() report2.Endpoint.AddNode(report.MakeNode("bar")) report2.ID = "foo" report3 := report.MakeReport() report3.Endpoint.AddNode(report.MakeNode("baz")) report3.ID = "foo" reports := []report.Report{ report1, report2, report3, } want := report.MakeReport() want.Endpoint.AddNode(report.MakeNode("foo")) merger := app.NewSmartMerger() if have := merger.Merge(reports); !reflect.DeepEqual(have, want) { t.Errorf("Bad merge: %s", test.Diff(have, want)) } }
func loadReport() (report.Report, error) { if *benchReportFile == "" { return fixture.Report, nil } b, err := ioutil.ReadFile(*benchReportFile) if err != nil { return rpt, err } rpt := report.MakeReport() err = codec.NewDecoderBytes(b, &codec.JsonHandle{}).Decode(&rpt) return rpt, err }