// Render produces a container graph where the the latest metadata contains the // container image name, if found. func (r containerWithImageNameRenderer) Render(rpt report.Report, dct Decorator) report.Nodes { containers := r.Renderer.Render(rpt, dct) images := SelectContainerImage.Render(rpt, dct) outputs := report.Nodes{} for id, c := range containers { outputs[id] = c imageID, ok := c.Latest.Lookup(docker.ImageID) if !ok { continue } image, ok := images[report.MakeContainerImageNodeID(imageID)] if !ok { continue } imageName, ok := image.Latest.Lookup(docker.ImageName) if !ok { continue } imageNameWithoutVersion := docker.ImageNameWithoutVersion(imageName) imageNodeID := report.MakeContainerImageNodeID(imageNameWithoutVersion) output := c.Copy() output = propagateLatest(docker.ImageName, image, output) output = propagateLatest(docker.ImageLabelPrefix+"works.weave.role", image, output) output.Parents = output.Parents. Delete(report.ContainerImage). Add(report.ContainerImage, report.MakeStringSet(imageNodeID)) outputs[id] = output } return outputs }
// MapContainer2ContainerImage maps container RenderableNodes to container // image RenderableNodes. // // If this function is given a node without a docker_image_id // (including other pseudo nodes), it will produce an "Uncontained" // pseudo node. // // Otherwise, this function will produce a node with the correct ID // format for a container, but without any Major or Minor labels. // It does not have enough info to do that, and the resulting graph // must be merged with a container graph to get that info. func MapContainer2ContainerImage(n RenderableNode, _ report.Networks) RenderableNodes { // Propogate all pseudo nodes if n.Pseudo { return RenderableNodes{n.ID: n} } // Otherwise, if some some reason the container doesn't have a image_id // (maybe slightly out of sync reports), just drop it imageID, ok := n.Node.Latest.Lookup(docker.ImageID) if !ok { return RenderableNodes{} } // Add container id key to the counters, which will later be counted to produce the minor label id := MakeContainerImageID(imageID) result := NewDerivedNode(id, n.WithParents(report.EmptySets)) result.Node.Counters = result.Node.Counters.Add(ContainersKey, 1) // Add the container as a child of the new image node result.Children = result.Children.Add(n.Node) result.Node.Topology = "container_image" result.Node.ID = report.MakeContainerImageNodeID(imageID) return RenderableNodes{id: result} }
func TestReporter(t *testing.T) { containerImageNodeID := report.MakeContainerImageNodeID("baz") rpt, err := docker.NewReporter(mockRegistryInstance, "host1", nil).Report() if err != nil { t.Fatal(err) } // Reporter should add a container { containerNodeID := report.MakeContainerNodeID("ping") node, ok := rpt.Container.Nodes[containerNodeID] if !ok { t.Fatalf("Expected report to have container image %q, but not found", containerNodeID) } for k, want := range map[string]string{ docker.ContainerID: "ping", docker.ContainerName: "pong", docker.ImageID: "baz", } { if have, ok := node.Latest.Lookup(k); !ok || have != want { t.Errorf("Expected container %s latest %q: %q, got %q", containerNodeID, k, want, have) } } // container should have controls if len(rpt.Container.Controls) == 0 { t.Errorf("Container should have some controls") } // container should have the image as a parent if parents, ok := node.Parents.Lookup(report.ContainerImage); !ok || !parents.Contains(containerImageNodeID) { t.Errorf("Expected container %s to have parent container image %q, got %q", containerNodeID, containerImageNodeID, parents) } } // Reporter should add a container image { node, ok := rpt.ContainerImage.Nodes[containerImageNodeID] if !ok { t.Fatalf("Expected report to have container image %q, but not found", containerImageNodeID) } for k, want := range map[string]string{ docker.ImageID: "baz", docker.ImageName: "bang", } { if have, ok := node.Latest.Lookup(k); !ok || have != want { t.Errorf("Expected container image %s latest %q: %q, got %q", containerImageNodeID, k, want, have) } } // container image should have no controls if len(rpt.ContainerImage.Controls) != 0 { t.Errorf("Container images should not have any controls") } } }
func (t *Tagger) tag(tree process.Tree, topology *report.Topology) { for nodeID, node := range topology.Nodes { pidStr, ok := node.Latest.Lookup(process.PID) if !ok { continue } pid, err := strconv.ParseUint(pidStr, 10, 64) if err != nil { continue } var ( c Container candidate = int(pid) ) t.registry.LockedPIDLookup(func(lookup func(int) Container) { for { c = lookup(candidate) if c != nil { break } candidate, err = tree.GetParent(candidate) if err != nil { break } } }) if c == nil || ContainerIsStopped(c) || c.PID() == 1 { continue } node := report.MakeNodeWith(nodeID, map[string]string{ ContainerID: c.ID(), }).WithParents(report.EmptySets. Add(report.Container, report.MakeStringSet(report.MakeContainerNodeID(c.ID()))), ) // If we can work out the image name, add a parent tag for it image, ok := t.registry.GetContainerImage(c.Image()) if ok && len(image.RepoTags) > 0 { imageName := ImageNameWithoutVersion(image.RepoTags[0]) node = node.WithParents(report.EmptySets. Add(report.ContainerImage, report.MakeStringSet(report.MakeContainerImageNodeID(imageName))), ) } topology.AddNode(node) } }
func (c *container) GetNode(hostID string, localAddrs []net.IP) report.Node { c.RLock() defer c.RUnlock() ips := append(c.container.NetworkSettings.SecondaryIPAddresses, c.container.NetworkSettings.IPAddress) // Treat all Docker IPs as local scoped. ipsWithScopes := []string{} for _, ip := range ips { ipsWithScopes = append(ipsWithScopes, report.MakeScopedAddressNodeID(hostID, ip)) } state := c.State() result := report.MakeNodeWith(map[string]string{ ContainerID: c.ID(), ContainerName: strings.TrimPrefix(c.container.Name, "/"), ContainerCreated: c.container.Created.Format(time.RFC822), ContainerCommand: c.container.Path + " " + strings.Join(c.container.Args, " "), ImageID: c.container.Image, ContainerHostname: c.Hostname(), ContainerState: state, }).WithSets(report.EmptySets. Add(ContainerPorts, c.ports(localAddrs)). Add(ContainerIPs, report.MakeStringSet(ips...)). Add(ContainerIPsWithScopes, report.MakeStringSet(ipsWithScopes...)), ).WithMetrics( c.metrics(), ).WithParents(report.EmptySets. Add(report.ContainerImage, report.MakeStringSet(report.MakeContainerImageNodeID(c.container.Image))), ) if c.container.State.Paused { result = result.WithControls(UnpauseContainer) } else if c.container.State.Running { uptime := (mtime.Now().Sub(c.container.State.StartedAt) / time.Second) * time.Second result = result.WithLatests(map[string]string{ ContainerUptime: uptime.String(), ContainerRestartCount: strconv.Itoa(c.container.RestartCount), }) result = result.WithControls( RestartContainer, StopContainer, PauseContainer, AttachContainer, ExecContainer, ) } else { result = result.WithControls(StartContainer) } result = AddLabels(result, c.container.Config.Labels) result = result.WithMetrics(c.metrics()) return result }
func (c *container) getBaseNode() report.Node { result := report.MakeNodeWith(report.MakeContainerNodeID(c.ID()), map[string]string{ ContainerID: c.ID(), ContainerCreated: c.container.Created.Format(time.RFC822), ContainerCommand: c.container.Path + " " + strings.Join(c.container.Args, " "), ImageID: c.Image(), ContainerHostname: c.Hostname(), }).WithParents(report.EmptySets. Add(report.ContainerImage, report.MakeStringSet(report.MakeContainerImageNodeID(c.Image()))), ) result = result.AddTable(LabelPrefix, c.container.Config.Labels) result = result.AddTable(EnvPrefix, c.env()) return result }
func TestTagger(t *testing.T) { mtime.NowForce(time.Now()) defer mtime.NowReset() oldProcessTree := docker.NewProcessTreeStub defer func() { docker.NewProcessTreeStub = oldProcessTree }() docker.NewProcessTreeStub = func(_ process.Walker) (process.Tree, error) { return &mockProcessTree{map[int]int{3: 2}}, nil } var ( pid1NodeID = report.MakeProcessNodeID("somehost.com", "2") pid2NodeID = report.MakeProcessNodeID("somehost.com", "3") ) input := report.MakeReport() input.Process.AddNode(pid1NodeID, report.MakeNodeWith(map[string]string{process.PID: "2"})) input.Process.AddNode(pid2NodeID, report.MakeNodeWith(map[string]string{process.PID: "3"})) have, err := docker.NewTagger(mockRegistryInstance, nil).Tag(input) if err != nil { t.Errorf("%v", err) } // Processes should be tagged with their container ID, and parents for _, nodeID := range []string{pid1NodeID, pid2NodeID} { node, ok := have.Process.Nodes[nodeID] if !ok { t.Errorf("Expected process node %s, but not found", nodeID) } // node should have the container id added if have, ok := node.Latest.Lookup(docker.ContainerID); !ok || have != "ping" { t.Errorf("Expected process node %s to have container id %q, got %q", nodeID, "ping", have) } // node should have the container as a parent if have, ok := node.Parents.Lookup(report.Container); !ok || !have.Contains(report.MakeContainerNodeID("ping")) { t.Errorf("Expected process node %s to have container %q as a parent, got %q", nodeID, "ping", have) } // node should have the container image as a parent if have, ok := node.Parents.Lookup(report.ContainerImage); !ok || !have.Contains(report.MakeContainerImageNodeID("baz")) { t.Errorf("Expected process node %s to have container image %q as a parent, got %q", nodeID, "baz", have) } } }
func (r *Reporter) containerImageTopology() report.Topology { result := report.MakeTopology() r.registry.WalkImages(func(image *docker_client.APIImages) { nmd := report.MakeNodeWith(map[string]string{ ImageID: image.ID, }) AddLabels(nmd, image.Labels) if len(image.RepoTags) > 0 { nmd.Metadata[ImageName] = image.RepoTags[0] } nodeID := report.MakeContainerImageNodeID(image.ID) result.AddNode(nodeID, nmd) }) return result }
func (t *Tagger) tag(tree process.Tree, topology *report.Topology) { for nodeID, node := range topology.Nodes { pidStr, ok := node.Latest.Lookup(process.PID) if !ok { continue } pid, err := strconv.ParseUint(pidStr, 10, 64) if err != nil { continue } var ( c Container candidate = int(pid) ) t.registry.LockedPIDLookup(func(lookup func(int) Container) { for { c = lookup(candidate) if c != nil { break } candidate, err = tree.GetParent(candidate) if err != nil { break } } }) if c == nil || c.State() == StateStopped || c.PID() == 1 { continue } topology.AddNode(nodeID, report.MakeNodeWith(map[string]string{ ContainerID: c.ID(), }).WithParents(report.EmptySets. Add(report.Container, report.MakeStringSet(report.MakeContainerNodeID(c.ID()))). Add(report.ContainerImage, report.MakeStringSet(report.MakeContainerImageNodeID(c.Image()))), )) } }
func (r *Reporter) containerImageTopology() report.Topology { result := report.MakeTopology() r.registry.WalkImages(func(image *docker_client.APIImages) { node := report.MakeNodeWith(map[string]string{ ImageID: image.ID, }) node = AddLabels(node, image.Labels) if len(image.RepoTags) > 0 { node = node.WithLatests(map[string]string{ImageName: image.RepoTags[0]}) } nodeID := report.MakeContainerImageNodeID(image.ID) result.AddNode(nodeID, node) }) return result }
// MapContainer2ContainerImage maps container Nodes to container // image Nodes. // // If this function is given a node without a docker_image_id // (including other pseudo nodes), it will produce an "Uncontained" // pseudo node. // // Otherwise, this function will produce a node with the correct ID // format for a container, but without any Major or Minor labels. // It does not have enough info to do that, and the resulting graph // must be merged with a container graph to get that info. func MapContainer2ContainerImage(n report.Node, _ report.Networks) report.Nodes { // Propagate all pseudo nodes if n.Topology == Pseudo { return report.Nodes{n.ID: n} } // Otherwise, if some some reason the container doesn't have a image_id // (maybe slightly out of sync reports), just drop it imageID, timestamp, ok := n.Latest.LookupEntry(docker.ImageID) if !ok { return report.Nodes{} } // Add container id key to the counters, which will later be counted to produce the minor label id := report.MakeContainerImageNodeID(imageID) result := NewDerivedNode(id, n).WithTopology(report.ContainerImage) result.Latest = result.Latest.Set(docker.ImageID, timestamp, imageID) result.Counters = result.Counters.Add(n.Topology, 1) return report.Nodes{id: result} }
// MapContainerImage2Name ignores image versions func MapContainerImage2Name(n report.Node, _ report.Networks) report.Nodes { // Propagate all pseudo nodes if n.Topology == Pseudo { return report.Nodes{n.ID: n} } imageName, ok := n.Latest.Lookup(docker.ImageName) if !ok { return report.Nodes{} } imageNameWithoutVersion := docker.ImageNameWithoutVersion(imageName) output := n.Copy() output.ID = report.MakeContainerImageNodeID(imageNameWithoutVersion) if imageID, ok := report.ParseContainerImageNodeID(n.ID); ok { output.Sets = output.Sets.Add(docker.ImageID, report.EmptyStringSet.Add(imageID)) } return report.Nodes{output.ID: output} }
func TestTagger(t *testing.T) { oldProcessTree := docker.NewProcessTreeStub defer func() { docker.NewProcessTreeStub = oldProcessTree }() docker.NewProcessTreeStub = func(_ process.Walker) (process.Tree, error) { return &mockProcessTree{map[int]int{3: 2}}, nil } var ( pid1NodeID = report.MakeProcessNodeID("somehost.com", "2") pid2NodeID = report.MakeProcessNodeID("somehost.com", "3") wantNode = report.MakeNodeWith(map[string]string{ docker.ContainerID: "ping", }).WithParents(report.Sets{ report.Container: report.MakeStringSet(report.MakeContainerNodeID("ping")), report.ContainerImage: report.MakeStringSet(report.MakeContainerImageNodeID("baz")), }) ) input := report.MakeReport() input.Process.AddNode(pid1NodeID, report.MakeNodeWith(map[string]string{process.PID: "2"})) input.Process.AddNode(pid2NodeID, report.MakeNodeWith(map[string]string{process.PID: "3"})) want := report.MakeReport() want.Process.AddNode(pid1NodeID, report.MakeNodeWith(map[string]string{process.PID: "2"}).Merge(wantNode)) want.Process.AddNode(pid2NodeID, report.MakeNodeWith(map[string]string{process.PID: "3"}).Merge(wantNode)) tagger := docker.NewTagger(mockRegistryInstance, nil) have, err := tagger.Tag(input) if err != nil { t.Errorf("%v", err) } if !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) } }
NonContainerNodeID = report.MakeEndpointNodeID(ServerHostID, ServerIP, NonContainerClientPort) GoogleEndpointNodeID = report.MakeEndpointNodeID(ServerHostID, GoogleIP, GooglePort) ClientProcess1NodeID = report.MakeProcessNodeID(ClientHostID, Client1PID) ClientProcess2NodeID = report.MakeProcessNodeID(ClientHostID, Client2PID) ServerProcessNodeID = report.MakeProcessNodeID(ServerHostID, ServerPID) NonContainerProcessNodeID = report.MakeProcessNodeID(ServerHostID, NonContainerPID) ClientContainerID = "a1b2c3d4e5" ServerContainerID = "5e4d3c2b1a" ClientContainerNodeID = report.MakeContainerNodeID(ClientContainerID) ServerContainerNodeID = report.MakeContainerNodeID(ServerContainerID) ClientContainerImageID = "imageid123" ServerContainerImageID = "imageid456" ClientContainerImageNodeID = report.MakeContainerImageNodeID(ClientContainerImageID) ServerContainerImageNodeID = report.MakeContainerImageNodeID(ServerContainerImageID) ClientContainerImageName = "image/client" ServerContainerImageName = "image/server" ClientAddressNodeID = report.MakeAddressNodeID(ClientHostID, ClientIP) ServerAddressNodeID = report.MakeAddressNodeID(ServerHostID, ServerIP) UnknownAddress1NodeID = report.MakeAddressNodeID(ServerHostID, UnknownClient1IP) UnknownAddress2NodeID = report.MakeAddressNodeID(ServerHostID, UnknownClient2IP) UnknownAddress3NodeID = report.MakeAddressNodeID(ServerHostID, UnknownClient3IP) RandomAddressNodeID = report.MakeAddressNodeID(ServerHostID, RandomClientIP) // this should become an internet node KubernetesNamespace = "ping" ClientPodID = "ping/pong-a" ServerPodID = "ping/pong-b" ClientPodNodeID = report.MakePodNodeID(KubernetesNamespace, "pong-a")
func (c *container) GetNode(hostID string, localAddrs []net.IP) report.Node { c.RLock() defer c.RUnlock() ips := append(c.container.NetworkSettings.SecondaryIPAddresses, c.container.NetworkSettings.IPAddress) // Treat all Docker IPs as local scoped. ipsWithScopes := []string{} for _, ip := range ips { ipsWithScopes = append(ipsWithScopes, report.MakeScopedAddressNodeID(hostID, ip)) } state := c.State() result := report.MakeNodeWith(map[string]string{ ContainerID: c.ID(), ContainerName: strings.TrimPrefix(c.container.Name, "/"), ContainerCreated: c.container.Created.Format(time.RFC822), ContainerCommand: c.container.Path + " " + strings.Join(c.container.Args, " "), ImageID: c.container.Image, ContainerHostname: c.Hostname(), }).WithSets(report.Sets{ ContainerPorts: c.ports(localAddrs), ContainerIPs: report.MakeStringSet(ips...), ContainerIPsWithScopes: report.MakeStringSet(ipsWithScopes...), }).WithLatest( ContainerState, mtime.Now(), state, ).WithMetrics( c.metrics(), ).WithParents(report.Sets{ report.ContainerImage: report.MakeStringSet(report.MakeContainerImageNodeID(c.container.Image)), }) if c.container.State.Paused { result = result.WithControls(UnpauseContainer) } else if c.container.State.Running { result = result.WithControls( RestartContainer, StopContainer, PauseContainer, AttachContainer, ExecContainer, ) } else { result = result.WithControls(StartContainer) } AddLabels(result, c.container.Config.Labels) if c.latestStats == nil { return result } result = result.WithMetadata(map[string]string{ MemoryMaxUsage: strconv.FormatUint(c.latestStats.MemoryStats.MaxUsage, 10), MemoryUsage: strconv.FormatUint(c.latestStats.MemoryStats.Usage, 10), MemoryFailcnt: strconv.FormatUint(c.latestStats.MemoryStats.Failcnt, 10), MemoryLimit: strconv.FormatUint(c.latestStats.MemoryStats.Limit, 10), // CPUPercpuUsage: strconv.FormatUint(stats.CPUStats.CPUUsage.PercpuUsage, 10), CPUUsageInUsermode: strconv.FormatUint(c.latestStats.CPUStats.CPUUsage.UsageInUsermode, 10), CPUTotalUsage: strconv.FormatUint(c.latestStats.CPUStats.CPUUsage.TotalUsage, 10), CPUUsageInKernelmode: strconv.FormatUint(c.latestStats.CPUStats.CPUUsage.UsageInKernelmode, 10), CPUSystemCPUUsage: strconv.FormatUint(c.latestStats.CPUStats.SystemCPUUsage, 10), }).WithMetrics(c.metrics()) return result }
func TestContainer(t *testing.T) { log.SetOutput(ioutil.Discard) oldDialStub, oldNewClientConnStub := docker.DialStub, docker.NewClientConnStub defer func() { docker.DialStub, docker.NewClientConnStub = oldDialStub, oldNewClientConnStub }() docker.DialStub = func(network, address string) (net.Conn, error) { return nil, nil } reader, writer := io.Pipe() connection := &mockConnection{reader} docker.NewClientConnStub = func(c net.Conn, r *bufio.Reader) docker.ClientConn { return connection } c := docker.NewContainer(container1) err := c.StartGatheringStats() if err != nil { t.Errorf("%v", err) } defer c.StopGatheringStats() now := time.Unix(12345, 67890).UTC() mtime.NowForce(now) defer mtime.NowReset() // Send some stats to the docker container stats := &client.Stats{} stats.Read = now stats.MemoryStats.Usage = 12345 if err = json.NewEncoder(writer).Encode(&stats); err != nil { t.Error(err) } // Now see if we go them uptime := (now.Sub(startTime) / time.Second) * time.Second want := report.MakeNode().WithLatests(map[string]string{ "docker_container_command": " ", "docker_container_created": "01 Jan 01 00:00 UTC", "docker_container_id": "ping", "docker_container_name": "pong", "docker_image_id": "baz", "docker_label_foo1": "bar1", "docker_label_foo2": "bar2", "docker_container_state": "running", "docker_container_uptime": uptime.String(), }).WithSets(report.EmptySets. Add("docker_container_ports", report.MakeStringSet("1.2.3.4:80->80/tcp", "81/tcp")). Add("docker_container_ips", report.MakeStringSet("1.2.3.4")). Add("docker_container_ips_with_scopes", report.MakeStringSet("scope;1.2.3.4")), ).WithControls( docker.RestartContainer, docker.StopContainer, docker.PauseContainer, docker.AttachContainer, docker.ExecContainer, ).WithMetrics(report.Metrics{ "docker_cpu_total_usage": report.MakeMetric(), "docker_memory_usage": report.MakeMetric().Add(now, 12345), }).WithParents(report.EmptySets. Add(report.ContainerImage, report.MakeStringSet(report.MakeContainerImageNodeID("baz"))), ) test.Poll(t, 100*time.Millisecond, want, func() interface{} { node := c.GetNode("scope", []net.IP{}) node.Latest.ForEach(func(k, v string) { if v == "0" || v == "" { node.Latest = node.Latest.Delete(k) } }) return node }) if c.Image() != "baz" { t.Errorf("%s != baz", c.Image()) } if c.PID() != 2 { t.Errorf("%d != 2", c.PID()) } if have := docker.ExtractContainerIPs(c.GetNode("", []net.IP{})); !reflect.DeepEqual(have, []string{"1.2.3.4"}) { t.Errorf("%v != %v", have, []string{"1.2.3.4"}) } }
func TestReporter(t *testing.T) { want := report.MakeReport() want.Container = report.Topology{ Nodes: report.Nodes{ report.MakeContainerNodeID("ping"): report.MakeNodeWith(map[string]string{ docker.ContainerID: "ping", docker.ContainerName: "pong", docker.ImageID: "baz", }), }, Controls: report.Controls{ docker.RestartContainer: report.Control{ ID: docker.RestartContainer, Human: "Restart", Icon: "fa-repeat", }, docker.StartContainer: report.Control{ ID: docker.StartContainer, Human: "Start", Icon: "fa-play", }, docker.StopContainer: report.Control{ ID: docker.StopContainer, Human: "Stop", Icon: "fa-stop", }, docker.PauseContainer: report.Control{ ID: docker.PauseContainer, Human: "Pause", Icon: "fa-pause", }, docker.UnpauseContainer: report.Control{ ID: docker.UnpauseContainer, Human: "Unpause", Icon: "fa-play", }, docker.AttachContainer: report.Control{ ID: docker.AttachContainer, Human: "Attach", Icon: "fa-desktop", }, docker.ExecContainer: report.Control{ ID: docker.ExecContainer, Human: "Exec /bin/sh", Icon: "fa-terminal", }, }, } want.ContainerImage = report.Topology{ Nodes: report.Nodes{ report.MakeContainerImageNodeID("baz"): report.MakeNodeWith(map[string]string{ docker.ImageID: "baz", docker.ImageName: "bang", }), }, Controls: report.Controls{}, } reporter := docker.NewReporter(mockRegistryInstance, "host1", nil) have, _ := reporter.Report() if !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) } }