func BenchmarkNodeSetMerge(b *testing.B) { n, other := report.NodeSet{}, report.NodeSet{} for i := 0; i < 600; i++ { n = n.Add( report.MakeNodeWith(fmt.Sprint(i), map[string]string{ "a": "1", "b": "2", }), ) } for i := 400; i < 1000; i++ { other = other.Add( report.MakeNodeWith(fmt.Sprint(i), map[string]string{ "c": "1", "d": "2", }), ) } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { benchmarkResult = n.Merge(other) } }
func (r *Reporter) addConnection(rpt *report.Report, t fourTuple, extraFromNode, extraToNode map[string]string) { // Update endpoint topology if !r.includeProcesses { return } var ( fromEndpointNodeID = report.MakeEndpointNodeID(r.hostID, t.fromAddr, strconv.Itoa(int(t.fromPort))) toEndpointNodeID = report.MakeEndpointNodeID(r.hostID, t.toAddr, strconv.Itoa(int(t.toPort))) fromNode = report.MakeNodeWith(fromEndpointNodeID, map[string]string{ Addr: t.fromAddr, Port: strconv.Itoa(int(t.fromPort)), }).WithEdge(toEndpointNodeID, report.EdgeMetadata{}) toNode = report.MakeNodeWith(toEndpointNodeID, map[string]string{ Addr: t.toAddr, Port: strconv.Itoa(int(t.toPort)), }) ) // In case we have a reverse resolution for the IP, we can use it for // the name... if toNames, err := r.reverseResolver.get(t.toAddr); err == nil { toNode = toNode.WithSet(ReverseDNSNames, report.MakeStringSet(toNames...)) } if extraFromNode != nil { fromNode = fromNode.WithLatests(extraFromNode) } if extraToNode != nil { toNode = toNode.WithLatests(extraToNode) } rpt.Endpoint = rpt.Endpoint.AddNode(fromNode) rpt.Endpoint = rpt.Endpoint.AddNode(toNode) }
func TestMapProcess2Container(t *testing.T) { for _, input := range []testcase{ {"empty", report.MakeNode("empty"), true}, {"basic process", report.MakeNodeWith("basic", map[string]string{process.PID: "201", docker.ContainerID: "a1b2c3"}), true}, {"uncontained", report.MakeNodeWith("uncontained", map[string]string{process.PID: "201", report.HostNodeID: report.MakeHostNodeID("foo")}), true}, } { testMap(t, render.MapProcess2Container, input) } }
func TestNodeTables(t *testing.T) { inputs := []struct { name string rpt report.Report node report.Node want []report.Table }{ { name: "container", rpt: report.Report{ Container: report.MakeTopology(). WithTableTemplates(docker.ContainerTableTemplates), }, node: report.MakeNodeWith(fixture.ClientContainerNodeID, map[string]string{ docker.ContainerID: fixture.ClientContainerID, docker.LabelPrefix + "label1": "label1value", docker.ContainerState: docker.StateRunning, }).WithTopology(report.Container).WithSets(report.EmptySets. Add(docker.ContainerIPs, report.MakeStringSet("10.10.10.0/24", "10.10.10.1/24")), ), want: []report.Table{ { ID: docker.EnvPrefix, Label: "Environment Variables", Rows: []report.MetadataRow{}, }, { ID: docker.LabelPrefix, Label: "Docker Labels", Rows: []report.MetadataRow{ { ID: "label_label1", Label: "label1", Value: "label1value", }, }, }, }, }, { name: "unknown topology", rpt: report.MakeReport(), node: report.MakeNodeWith(fixture.ClientContainerNodeID, map[string]string{ docker.ContainerID: fixture.ClientContainerID, }).WithTopology("foobar"), want: nil, }, } for _, input := range inputs { have := detailed.NodeTables(input.rpt, input.node) if !reflect.DeepEqual(input.want, have) { t.Errorf("%s: %s", input.name, test.Diff(input.want, have)) } } }
func TestTagger(t *testing.T) { mtime.NowForce(time.Now()) defer mtime.NowReset() oldProcessTree := docker.NewProcessTreeStub defer func() { docker.NewProcessTreeStub = oldProcessTree }() docker.NewProcessTreeStub = func(_ process.Walker) (process.Tree, error) { return &mockProcessTree{map[int]int{3: 2}}, nil } var ( pid1NodeID = report.MakeProcessNodeID("somehost.com", "2") pid2NodeID = report.MakeProcessNodeID("somehost.com", "3") ) input := report.MakeReport() input.Process.AddNode(report.MakeNodeWith(pid1NodeID, map[string]string{process.PID: "2"})) input.Process.AddNode(report.MakeNodeWith(pid2NodeID, map[string]string{process.PID: "3"})) have, err := docker.NewTagger(mockRegistryInstance, nil).Tag(input) if err != nil { t.Errorf("%v", err) } // Processes should be tagged with their container ID, and parents for _, nodeID := range []string{pid1NodeID, pid2NodeID} { node, ok := have.Process.Nodes[nodeID] if !ok { t.Errorf("Expected process node %s, but not found", nodeID) } // node should have the container id added if have, ok := node.Latest.Lookup(docker.ContainerID); !ok || have != "ping" { t.Errorf("Expected process node %s to have container id %q, got %q", nodeID, "ping", have) } // node should have the container as a parent if have, ok := node.Parents.Lookup(report.Container); !ok || !have.Contains(report.MakeContainerNodeID("ping")) { t.Errorf("Expected process node %s to have container %q as a parent, got %q", nodeID, "ping", have) } // node should have the container image as a parent if have, ok := node.Parents.Lookup(report.ContainerImage); !ok || !have.Contains(report.MakeContainerImageNodeID("baz")) { t.Errorf("Expected process node %s to have container image %q as a parent, got %q", nodeID, "baz", have) } } }
func TestTagger(t *testing.T) { var ( hostID = "foo" endpointNodeID = report.MakeEndpointNodeID(hostID, "1.2.3.4", "56789") // hostID ignored node = report.MakeNodeWith(endpointNodeID, map[string]string{"foo": "bar"}) ) r := report.MakeReport() r.Process.AddNode(node) rpt, _ := host.NewTagger(hostID).Tag(r) have := rpt.Process.Nodes[endpointNodeID].Copy() // It should now have the host ID wantHostID := report.MakeHostNodeID(hostID) if hostID, ok := have.Latest.Lookup(report.HostNodeID); !ok || hostID != wantHostID { t.Errorf("Expected %q got %q", wantHostID, report.MakeHostNodeID(hostID)) } // It should still have the other keys want := "bar" if have, ok := have.Latest.Lookup("foo"); !ok || have != want { t.Errorf("Expected %q got %q", want, have) } // It should have the host as a parent wantParent := report.MakeHostNodeID(hostID) if have, ok := have.Parents.Lookup(report.Host); !ok || len(have) != 1 || have[0] != wantParent { t.Errorf("Expected %q got %q", report.MakeStringSet(wantParent), have) } }
func TestApply(t *testing.T) { var ( endpointNodeID = "c" endpointNode = report.MakeNodeWith(endpointNodeID, map[string]string{"5": "6"}) ) p := New(0, 0, nil) p.AddTagger(NewTopologyTagger()) r := report.MakeReport() r.Endpoint.AddNode(endpointNode) r = p.tag(r) for _, tuple := range []struct { want report.Node from report.Topology via string }{ {endpointNode.Merge(report.MakeNode("c").WithTopology(report.Endpoint)), r.Endpoint, endpointNodeID}, } { if want, have := tuple.want, tuple.from.Nodes[tuple.via]; !reflect.DeepEqual(want, have) { t.Errorf("want %+v, have %+v", want, have) } } }
// Report implements Reporter. func (w *Weave) Report() (report.Report, error) { w.mtx.RLock() defer w.mtx.RUnlock() r := report.MakeReport() r.Container = r.Container.WithMetadataTemplates(report.MetadataTemplates{ WeaveMACAddress: {ID: WeaveMACAddress, Label: "Weave MAC", From: report.FromLatest, Priority: 17}, WeaveDNSHostname: {ID: WeaveDNSHostname, Label: "Weave DNS Name", From: report.FromLatest, Priority: 18}, }) for _, peer := range w.statusCache.Router.Peers { r.Overlay.AddNode(report.MakeNodeWith(report.MakeOverlayNodeID(peer.Name), map[string]string{ WeavePeerName: peer.Name, WeavePeerNickName: peer.NickName, })) } if w.statusCache.IPAM.DefaultSubnet != "" { r.Overlay.AddNode( report.MakeNode(report.MakeOverlayNodeID(w.statusCache.Router.Name)).WithSets( report.MakeSets().Add(host.LocalNetworks, report.MakeStringSet(w.statusCache.IPAM.DefaultSubnet)), ), ) } return r, nil }
func TestContainerHostnameFilterRenderer(t *testing.T) { // add a system container into the topology and ensure // it is filtered out correctly. input := fixture.Report.Copy() clientContainer2ID := "f6g7h8i9j1" clientContainer2NodeID := report.MakeContainerNodeID(clientContainer2ID) input.Container.AddNode(report.MakeNodeWith(clientContainer2NodeID, map[string]string{ docker.LabelPrefix + "works.weave.role": "system", docker.ContainerHostname: fixture.ClientContainerHostname, report.HostNodeID: fixture.ClientHostNodeID, }). WithParents(report.EmptySets. Add("host", report.MakeStringSet(fixture.ClientHostNodeID)), ).WithTopology(report.Container)) have := Prune(render.ContainerHostnameRenderer.Render(input, render.FilterApplication)) want := Prune(expected.RenderedContainerHostnames) // Test works by virtue of the RenderedContainerHostname only having a container // counter == 1 if !reflect.DeepEqual(want, have) { t.Error(test.Diff(want, have)) } }
// MetaNode gets the node metadata func (m meta) MetaNode(id string) report.Node { return report.MakeNodeWith(id, map[string]string{ ID: m.ID(), Name: m.Name(), Namespace: m.Namespace(), Created: m.Created(), }).AddTable(LabelPrefix, m.Labels()) }
func (c *container) getBaseNode() report.Node { result := report.MakeNodeWith(report.MakeContainerNodeID(c.ID()), map[string]string{ ContainerID: c.ID(), ContainerCreated: c.container.Created.Format(time.RFC822), ContainerCommand: c.container.Path + " " + strings.Join(c.container.Args, " "), ImageID: c.Image(), ContainerHostname: c.Hostname(), }).WithParents(report.EmptySets. Add(report.ContainerImage, report.MakeStringSet(report.MakeContainerImageNodeID(c.Image()))), ) result = result.AddTable(LabelPrefix, c.container.Config.Labels) result = result.AddTable(EnvPrefix, c.env()) return result }
func BenchmarkNodeSetAdd(b *testing.B) { n := report.EmptyNodeSet for i := 0; i < 600; i++ { n = n.Add( report.MakeNodeWith(fmt.Sprint(i), map[string]string{ "a": "1", "b": "2", }), ) } node := report.MakeNodeWith("401.5", map[string]string{ "a": "1", "b": "2", }) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { benchmarkResult = n.Add(node) } }
func BenchmarkMakeNodeSet(b *testing.B) { nodes := []report.Node{} for i := 1000; i >= 0; i-- { nodes = append(nodes, report.MakeNodeWith(fmt.Sprint(i), map[string]string{ "a": "1", "b": "2", })) } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { benchmarkResult = report.MakeNodeSet(nodes...) } }
func TestNodeMetadata(t *testing.T) { inputs := []struct { name string node report.Node want []report.MetadataRow }{ { name: "container", node: report.MakeNodeWith(fixture.ClientContainerNodeID, map[string]string{ docker.ContainerID: fixture.ClientContainerID, docker.LabelPrefix + "label1": "label1value", docker.ContainerStateHuman: docker.StateRunning, }).WithTopology(report.Container).WithSets(report.EmptySets. Add(docker.ContainerIPs, report.MakeStringSet("10.10.10.0/24", "10.10.10.1/24")), ), want: []report.MetadataRow{ {ID: docker.ContainerID, Label: "ID", Value: fixture.ClientContainerID, Priority: 1}, {ID: docker.ContainerStateHuman, Label: "State", Value: "running", Priority: 2}, {ID: docker.ContainerIPs, Label: "IPs", Value: "10.10.10.0/24, 10.10.10.1/24", Priority: 14}, }, }, { name: "unknown topology", node: report.MakeNodeWith(fixture.ClientContainerNodeID, map[string]string{ docker.ContainerID: fixture.ClientContainerID, }).WithTopology("foobar"), want: nil, }, } for _, input := range inputs { have := detailed.NodeMetadata(fixture.Report, input.node) if !reflect.DeepEqual(input.want, have) { t.Errorf("%s: %s", input.name, test.Diff(input.want, have)) } } }
func TestTagger(t *testing.T) { rpt := report.MakeReport() rpt.Container.AddNode(report.MakeNodeWith("container1", map[string]string{ docker.LabelPrefix + "io.kubernetes.pod.uid": "123456", })) rpt, err := kubernetes.NewReporter(newMockClient(), nil, "", "", nil).Tag(rpt) if err != nil { t.Errorf("Unexpected error: %v", err) } have, ok := rpt.Container.Nodes["container1"].Parents.Lookup(report.Pod) want := report.EmptyStringSet.Add(report.MakePodNodeID("123456")) if !ok || !reflect.DeepEqual(have, want) { t.Errorf("Expected container to have pod parent %v %v", have, want) } }
func (t *Tagger) tag(tree process.Tree, topology *report.Topology) { for nodeID, node := range topology.Nodes { pidStr, ok := node.Latest.Lookup(process.PID) if !ok { continue } pid, err := strconv.ParseUint(pidStr, 10, 64) if err != nil { continue } var ( c Container candidate = int(pid) ) t.registry.LockedPIDLookup(func(lookup func(int) Container) { for { c = lookup(candidate) if c != nil { break } candidate, err = tree.GetParent(candidate) if err != nil { break } } }) if c == nil || ContainerIsStopped(c) || c.PID() == 1 { continue } topology.AddNode(report.MakeNodeWith(nodeID, map[string]string{ ContainerID: c.ID(), }).WithParents(report.EmptySets. Add(report.Container, report.MakeStringSet(report.MakeContainerNodeID(c.ID()))). Add(report.ContainerImage, report.MakeStringSet(report.MakeContainerImageNodeID(c.Image()))), )) } }
func (r *Reporter) podEvent(e Event, pod Pod) { switch e { case ADD: rpt := report.MakeReport() rpt.Shortcut = true rpt.Pod.AddNode(pod.GetNode(r.probeID)) r.probe.Publish(rpt) case DELETE: rpt := report.MakeReport() rpt.Shortcut = true rpt.Pod.AddNode( report.MakeNodeWith( report.MakePodNodeID(pod.UID()), map[string]string{State: StateDeleted}, ), ) r.probe.Publish(rpt) } }
func (r *Reporter) containerImageTopology() report.Topology { result := report.MakeTopology(). WithMetadataTemplates(ContainerImageMetadataTemplates). WithTableTemplates(ContainerImageTableTemplates) r.registry.WalkImages(func(image *docker_client.APIImages) { imageID := trimImageID(image.ID) nodeID := report.MakeContainerImageNodeID(imageID) node := report.MakeNodeWith(nodeID, map[string]string{ ImageID: imageID, }) node = node.AddTable(ImageLabelPrefix, image.Labels) if len(image.RepoTags) > 0 { node = node.WithLatests(map[string]string{ImageName: image.RepoTags[0]}) } result.AddNode(node) }) return result }
func TestProbe(t *testing.T) { // marshalling->unmarshaling is not idempotent due to `json:"omitempty"` // tags, transforming empty slices into nils. So, we make DeepEqual // happy by setting empty `json:"omitempty"` entries to nil const probeID = "probeid" now := time.Now() mtime.NowForce(now) defer mtime.NowReset() want := report.MakeReport() node := report.MakeNodeWith("a", map[string]string{"b": "c"}) node.Metrics = nil // omitempty // omitempty want.Endpoint.Controls = nil want.Process.Controls = nil want.Container.Controls = nil want.ContainerImage.Controls = nil want.Pod.Controls = nil want.Service.Controls = nil want.Deployment.Controls = nil want.ReplicaSet.Controls = nil want.Host.Controls = nil want.Overlay.Controls = nil want.Endpoint.AddNode(node) pub := mockPublisher{make(chan report.Report, 10)} p := New(10*time.Millisecond, 100*time.Millisecond, pub) p.AddReporter(mockReporter{want}) p.Start() defer p.Stop() test.Poll(t, 300*time.Millisecond, want, func() interface{} { return <-pub.have }) }
func TestNode(t *testing.T) { { node := report.MakeNodeWith("foo", map[string]string{ "foo": "bar", }) if v, _ := node.Latest.Lookup("foo"); v != "bar" { t.Errorf("want foo, have %s", v) } } { node := report.MakeNode("foo").WithCounters( map[string]int{"foo": 1}, ) if value, _ := node.Counters.Lookup("foo"); value != 1 { t.Errorf("want foo, have %d", value) } } { node := report.MakeNode("foo").WithAdjacent("foo") if node.Adjacency[0] != "foo" { t.Errorf("want foo, have %v", node.Adjacency) } } { node := report.MakeNode("foo").WithEdge("foo", report.EdgeMetadata{ EgressPacketCount: newu64(13), }) if node.Adjacency[0] != "foo" { t.Errorf("want foo, have %v", node.Adjacency) } if v, ok := node.Edges.Lookup("foo"); ok && *v.EgressPacketCount != 13 { t.Errorf("want 13, have %v", node.Edges) } } }
EgressPacketCount: newu64(60), EgressByteCount: newu64(600), }), GoogleEndpointNodeID: report.MakeNode(GoogleEndpointNodeID).WithTopology(report.Endpoint).WithLatests(map[string]string{ endpoint.Addr: GoogleIP, endpoint.Port: GooglePort, endpoint.Procspied: True, }), }, }, Process: report.Topology{ Nodes: report.Nodes{ ClientProcess1NodeID: report.MakeNodeWith(ClientProcess1NodeID, map[string]string{ process.PID: Client1PID, process.Name: Client1Name, docker.ContainerID: ClientContainerID, report.HostNodeID: ClientHostNodeID, }). WithTopology(report.Process).WithParents(report.EmptySets. Add("host", report.MakeStringSet(ClientHostNodeID)). Add("container", report.MakeStringSet(ClientContainerNodeID)). Add("container_image", report.MakeStringSet(ClientContainerImageNodeID)), ).WithMetrics(report.Metrics{ process.CPUUsage: ClientProcess1CPUMetric, process.MemoryUsage: ClientProcess1MemoryMetric, }), ClientProcess2NodeID: report.MakeNodeWith(ClientProcess2NodeID, map[string]string{ process.PID: Client2PID, process.Name: Client2Name, docker.ContainerID: ClientContainerID, report.HostNodeID: ClientHostNodeID,
func demoReport(nodeCount int) report.Report { r := report.MakeReport() // Make up some plausible IPv4 numbers. hosts := []string{} ip := [4]int{192, 168, 1, 1} for range make([]struct{}, nodeCount) { hosts = append(hosts, fmt.Sprintf("%d.%d.%d.%d", ip[0], ip[1], ip[2], ip[3])) ip[3]++ if ip[3] > 200 { ip[2]++ ip[3] = 1 } } hosts = append(hosts, []string{"1.2.3.4", "2.3.4.5"}...) // Some non-local ones, too. _, localNet, err := net.ParseCIDR("192.168.0.0/16") if err != nil { panic(err) } type conn struct { srcProc, dstProc string dstPort int } procPool := []conn{ {srcProc: "curl", dstPort: 80, dstProc: "apache"}, {srcProc: "wget", dstPort: 80, dstProc: "apache"}, {srcProc: "curl", dstPort: 80, dstProc: "nginx"}, {srcProc: "curl", dstPort: 8080, dstProc: "app1"}, {srcProc: "nginx", dstPort: 8080, dstProc: "app1"}, {srcProc: "nginx", dstPort: 8080, dstProc: "app2"}, {srcProc: "nginx", dstPort: 8080, dstProc: "app3"}, } connectionCount := nodeCount * 2 for i := 0; i < connectionCount; i++ { var ( c = procPool[rand.Intn(len(procPool))] src = hosts[rand.Intn(len(hosts))] dst = hosts[rand.Intn(len(hosts))] srcPort = rand.Intn(50000) + 10000 srcPortID = report.MakeEndpointNodeID("", src, strconv.Itoa(srcPort)) dstPortID = report.MakeEndpointNodeID("", dst, strconv.Itoa(c.dstPort)) ) // Endpoint topology r.Endpoint = r.Endpoint.AddNode(report.MakeNodeWith(srcPortID, map[string]string{ process.PID: "4000", "name": c.srcProc, "domain": "node-" + src, }). WithEdge(dstPortID, report.EdgeMetadata{})) r.Endpoint = r.Endpoint.AddNode(report.MakeNodeWith(dstPortID, map[string]string{ process.PID: "4000", "name": c.dstProc, "domain": "node-" + dst, }). WithEdge(srcPortID, report.EdgeMetadata{})) // Host data r.Host = r.Host.AddNode(report.MakeNodeWith("hostX", map[string]string{ "ts": time.Now().UTC().Format(time.RFC3339Nano), "host_name": "host-x", "local_networks": localNet.String(), "os": "linux", })) } return r }
func TestNat(t *testing.T) { mtime.NowForce(mtime.Now()) defer mtime.NowReset() // test that two containers, on the docker network, get their connections mapped // correctly. // the setup is this: // // container2 (10.0.47.2:222222), host2 (2.3.4.5:22223) -> // host1 (1.2.3.4:80), container1 (10.0.47.1:80) // from the PoV of host1 { f := makeFlow(updateType) addIndependant(&f, 1, "") f.Original = addMeta(&f, "original", "2.3.4.5", "1.2.3.4", 222222, 80) f.Reply = addMeta(&f, "reply", "10.0.47.1", "2.3.4.5", 80, 222222) ct := &mockFlowWalker{ flows: []flow{f}, } have := report.MakeReport() originalID := report.MakeEndpointNodeID("host1", "10.0.47.1", "80") have.Endpoint.AddNode(report.MakeNodeWith(originalID, map[string]string{ Addr: "10.0.47.1", Port: "80", "foo": "bar", Procspied: "true", })) want := have.Copy() wantID := report.MakeEndpointNodeID("host1", "1.2.3.4", "80") want.Endpoint.AddNode(report.MakeNodeWith(wantID, map[string]string{ Addr: "1.2.3.4", Port: "80", "copy_of": originalID, "foo": "bar", Procspied: "true", })) makeNATMapper(ct).applyNAT(have, "host1") if !reflect.DeepEqual(want, have) { t.Fatal(test.Diff(want, have)) } } // form the PoV of host2 { f := makeFlow(updateType) addIndependant(&f, 2, "") f.Original = addMeta(&f, "original", "10.0.47.2", "1.2.3.4", 22222, 80) f.Reply = addMeta(&f, "reply", "1.2.3.4", "2.3.4.5", 80, 22223) ct := &mockFlowWalker{ flows: []flow{f}, } have := report.MakeReport() originalID := report.MakeEndpointNodeID("host2", "10.0.47.2", "22222") have.Endpoint.AddNode(report.MakeNodeWith(originalID, map[string]string{ Addr: "10.0.47.2", Port: "22222", "foo": "baz", Procspied: "true", })) want := have.Copy() want.Endpoint.AddNode(report.MakeNodeWith(report.MakeEndpointNodeID("host2", "2.3.4.5", "22223"), map[string]string{ Addr: "2.3.4.5", Port: "22223", "copy_of": originalID, "foo": "baz", Procspied: "true", })) makeNATMapper(ct).applyNAT(have, "host1") if !reflect.DeepEqual(want, have) { t.Fatal(test.Diff(want, have)) } } }
func TestMergeNodes(t *testing.T) { mtime.NowForce(time.Now()) defer mtime.NowReset() for name, c := range map[string]struct { a, b, want report.Nodes }{ "Empty a": { a: report.Nodes{}, b: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(":192.168.1.1:12345", map[string]string{ PID: "23128", Name: "curl", Domain: "node-a.local", }), }, want: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(":192.168.1.1:12345", map[string]string{ PID: "23128", Name: "curl", Domain: "node-a.local", }), }, }, "Empty b": { a: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(":192.168.1.1:12345", map[string]string{ PID: "23128", Name: "curl", Domain: "node-a.local", }), }, b: report.Nodes{}, want: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(":192.168.1.1:12345", map[string]string{ PID: "23128", Name: "curl", Domain: "node-a.local", }), }, }, "Simple merge": { a: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(":192.168.1.1:12345", map[string]string{ PID: "23128", Name: "curl", Domain: "node-a.local", }), }, b: report.Nodes{ ":192.168.1.2:12345": report.MakeNodeWith(":192.168.1.2:12345", map[string]string{ PID: "42", Name: "curl", Domain: "node-a.local", }), }, want: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(":192.168.1.1:12345", map[string]string{ PID: "23128", Name: "curl", Domain: "node-a.local", }), ":192.168.1.2:12345": report.MakeNodeWith(":192.168.1.2:12345", map[string]string{ PID: "42", Name: "curl", Domain: "node-a.local", }), }, }, "Merge conflict with rank difference": { a: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(":192.168.1.1:12345", map[string]string{ PID: "23128", Name: "curl", Domain: "node-a.local", }), }, b: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(":192.168.1.1:12345", map[string]string{ // <-- same ID Name: "curl", Domain: "node-a.local", }).WithLatest(PID, time.Now().Add(-1*time.Minute), "0"), }, want: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(":192.168.1.1:12345", map[string]string{ PID: "23128", Name: "curl", Domain: "node-a.local", }), }, }, "Merge conflict with no rank difference": { a: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(":192.168.1.1:12345", map[string]string{ PID: "23128", Name: "curl", Domain: "node-a.local", }), }, b: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(":192.168.1.1:12345", map[string]string{ // <-- same ID Name: "curl", Domain: "node-a.local", }).WithLatest(PID, time.Now().Add(-1*time.Minute), "0"), }, want: report.Nodes{ ":192.168.1.1:12345": report.MakeNodeWith(":192.168.1.1:12345", map[string]string{ PID: "23128", Name: "curl", Domain: "node-a.local", }), }, }, "Counters": { a: report.Nodes{ "1": report.MakeNode("1").WithCounters(map[string]int{ "a": 13, "b": 57, "c": 89, }), }, b: report.Nodes{ "1": report.MakeNode("1").WithCounters(map[string]int{ "a": 78, "b": 3, "d": 47, }), }, want: report.Nodes{ "1": report.MakeNode("1").WithCounters(map[string]int{ "a": 91, "b": 60, "c": 89, "d": 47, }), }, }, } { if have := c.a.Merge(c.b); !reflect.DeepEqual(c.want, have) { t.Errorf("%s: %s", name, test.Diff(c.want, have)) } } }
func (r *registry) updateContainerState(containerID string, intendedState *string) { r.Lock() defer r.Unlock() dockerContainer, err := r.client.InspectContainer(containerID) if err != nil { // Don't spam the logs if the container was short lived if _, ok := err.(*docker_client.NoSuchContainer); !ok { log.Errorf("Error processing event for container %s: %v", containerID, err) return } // Container doesn't exist anymore, so lets stop and remove it c, ok := r.containers.Get(containerID) if !ok { return } container := c.(Container) r.containers.Delete(containerID) delete(r.containersByPID, container.PID()) if r.collectStats { container.StopGatheringStats() } if intendedState != nil { node := report.MakeNodeWith(report.MakeContainerNodeID(containerID), map[string]string{ ContainerID: containerID, ContainerState: *intendedState, }) // Trigger anyone watching for updates for _, f := range r.watchers { f(node) } } return } // Container exists, ensure we have it o, ok := r.containers.Get(containerID) var c Container if !ok { c = NewContainerStub(dockerContainer, r.hostID) r.containers.Insert(containerID, c) } else { c = o.(Container) // potentially remove existing pid mapping. delete(r.containersByPID, c.PID()) c.UpdateState(dockerContainer) } // Update PID index if c.PID() > 1 { r.containersByPID[c.PID()] = c } // Trigger anyone watching for updates if err != nil { node := c.GetNode() for _, f := range r.watchers { f(node) } } // And finally, ensure we gather stats for it if r.collectStats { if dockerContainer.State.Running { if err := c.StartGatheringStats(); err != nil { log.Errorf("Error gather stats for container: %s", containerID) return } } else { c.StopGatheringStats() } } }
container2ID = "21b2c3d4e5" container2IP = duplicatedIP container2Name = "bar" container2NodeID = report.MakeContainerNodeID(container2ID) pauseContainerID = "31b2c3d4e5" pauseContainerIP = duplicatedIP pauseContainerName = "POD" pauseContainerNodeID = report.MakeContainerNodeID(pauseContainerID) rpt = report.Report{ Endpoint: report.Topology{ Nodes: report.Nodes{ randomEndpointNodeID: report.MakeNodeWith(randomEndpointNodeID, map[string]string{ endpoint.Addr: randomIP, endpoint.Port: randomPort, endpoint.Conntracked: "true", }). WithAdjacent(serverEndpointNodeID).WithTopology(report.Endpoint), serverEndpointNodeID: report.MakeNodeWith(serverEndpointNodeID, map[string]string{ endpoint.Addr: serverIP, endpoint.Port: serverPort, endpoint.Conntracked: "true", }). WithTopology(report.Endpoint), container1EndpointNodeID: report.MakeNodeWith(container1EndpointNodeID, map[string]string{ endpoint.Addr: container1IP, endpoint.Port: container1Port, endpoint.Conntracked: "true",
func TestContainer(t *testing.T) { log.SetOutput(ioutil.Discard) oldDialStub, oldNewClientConnStub := docker.DialStub, docker.NewClientConnStub defer func() { docker.DialStub, docker.NewClientConnStub = oldDialStub, oldNewClientConnStub }() docker.DialStub = func(network, address string) (net.Conn, error) { return nil, nil } reader, writer := io.Pipe() connection := &mockConnection{reader} docker.NewClientConnStub = func(c net.Conn, r *bufio.Reader) docker.ClientConn { return connection } now := time.Unix(12345, 67890).UTC() mtime.NowForce(now) defer mtime.NowReset() const hostID = "scope" c := docker.NewContainer(container1, hostID) err := c.StartGatheringStats() if err != nil { t.Errorf("%v", err) } defer c.StopGatheringStats() // Send some stats to the docker container stats := &client.Stats{} stats.Read = now stats.MemoryStats.Usage = 12345 stats.MemoryStats.Limit = 45678 encoder := codec.NewEncoder(writer, &codec.JsonHandle{}) if err = encoder.Encode(&stats); err != nil { t.Error(err) } // Now see if we go them { uptime := (now.Sub(startTime) / time.Second) * time.Second want := report.MakeNodeWith("ping;<container>", map[string]string{ "docker_container_command": " ", "docker_container_created": "01 Jan 01 00:00 UTC", "docker_container_id": "ping", "docker_container_name": "pong", "docker_image_id": "baz", "docker_label_foo1": "bar1", "docker_label_foo2": "bar2", "docker_container_state": "running", "docker_container_state_human": "Up 6 years", "docker_container_uptime": uptime.String(), }). WithControls( docker.RestartContainer, docker.StopContainer, docker.PauseContainer, docker.AttachContainer, docker.ExecContainer, ).WithMetrics(report.Metrics{ "docker_cpu_total_usage": report.MakeMetric(), "docker_memory_usage": report.MakeMetric().Add(now, 12345).WithMax(45678), }).WithParents(report.EmptySets. Add(report.ContainerImage, report.MakeStringSet(report.MakeContainerImageNodeID("baz"))), ) test.Poll(t, 100*time.Millisecond, want, func() interface{} { node := c.GetNode() node.Latest.ForEach(func(k, v string) { if v == "0" || v == "" { node.Latest = node.Latest.Delete(k) } }) return node }) } { want := report.EmptySets. Add("docker_container_ports", report.MakeStringSet("1.2.3.4:80->80/tcp", "81/tcp")). Add("docker_container_ips", report.MakeStringSet("1.2.3.4")). Add("docker_container_ips_with_scopes", report.MakeStringSet(";1.2.3.4")) test.Poll(t, 100*time.Millisecond, want, func() interface{} { return c.NetworkInfo([]net.IP{}) }) } if c.Image() != "baz" { t.Errorf("%s != baz", c.Image()) } if c.PID() != 2 { t.Errorf("%d != 2", c.PID()) } node := c.GetNode().WithSets(c.NetworkInfo([]net.IP{})) if have := docker.ExtractContainerIPs(node); !reflect.DeepEqual(have, []string{"1.2.3.4"}) { t.Errorf("%v != %v", have, []string{"1.2.3.4"}) } }
func TestWeaveTaggerOverlayTopology(t *testing.T) { w := overlay.NewWeave(mockHostID, weave.MockClient{}) defer w.Stop() // Wait until the reporter reports some nodes test.Poll(t, 300*time.Millisecond, 1, func() interface{} { have, _ := w.Report() return len(have.Overlay.Nodes) }) { // Overlay node should include peer name and nickname have, err := w.Report() if err != nil { t.Fatal(err) } nodeID := report.MakeOverlayNodeID(weave.MockWeavePeerName) node, ok := have.Overlay.Nodes[nodeID] if !ok { t.Errorf("Expected overlay node %q, but not found", nodeID) } if peerName, ok := node.Latest.Lookup(overlay.WeavePeerName); !ok || peerName != weave.MockWeavePeerName { t.Errorf("Expected weave peer name %q, got %q", weave.MockWeavePeerName, peerName) } if peerNick, ok := node.Latest.Lookup(overlay.WeavePeerNickName); !ok || peerNick != weave.MockWeavePeerNickName { t.Errorf("Expected weave peer nickname %q, got %q", weave.MockWeavePeerNickName, peerNick) } if localNetworks, ok := node.Sets.Lookup(host.LocalNetworks); !ok || !reflect.DeepEqual(localNetworks, report.MakeStringSet(weave.MockWeaveDefaultSubnet)) { t.Errorf("Expected weave node local_networks %q, got %q", report.MakeStringSet(weave.MockWeaveDefaultSubnet), localNetworks) } } { // Container nodes should be tagged with their overlay info nodeID := report.MakeContainerNodeID(weave.MockContainerID) have, err := w.Tag(report.Report{ Container: report.MakeTopology().AddNode(report.MakeNodeWith(nodeID, map[string]string{ docker.ContainerID: weave.MockContainerID, })), }) if err != nil { t.Fatal(err) } node, ok := have.Container.Nodes[nodeID] if !ok { t.Errorf("Expected container node %q, but not found", nodeID) } // Should have Weave DNS Hostname if have, ok := node.Latest.Lookup(overlay.WeaveDNSHostname); !ok || have != weave.MockHostname { t.Errorf("Expected weave dns hostname %q, got %q", weave.MockHostname, have) } // Should have Weave MAC Address if have, ok := node.Latest.Lookup(overlay.WeaveMACAddress); !ok || have != weave.MockContainerMAC { t.Errorf("Expected weave mac address %q, got %q", weave.MockContainerMAC, have) } // Should have Weave container ip if have, ok := node.Sets.Lookup(docker.ContainerIPs); !ok || !have.Contains(weave.MockContainerIP) { t.Errorf("Expected container ips to include the weave IP %q, got %q", weave.MockContainerIP, have) } // Should have Weave container ip (with scope) if have, ok := node.Sets.Lookup(docker.ContainerIPsWithScopes); !ok || !have.Contains(mockContainerIPWithScope) { t.Errorf("Expected container ips to include the weave IP (with scope) %q, got %q", mockContainerIPWithScope, have) } } }