func TestPipeTimeout(t *testing.T) { router := mux.NewRouter() pr := RegisterPipeRoutes(router) pr.Stop() // we don't want the loop running in the background mtime.NowForce(time.Now()) defer mtime.NowReset() // create a new pipe. id := "foo" pipe, ok := pr.getOrCreate(id) if !ok { t.Fatalf("not ok") } // move time forward such that the new pipe should timeout mtime.NowForce(mtime.Now().Add(pipeTimeout)) pr.timeout() if !pipe.Closed() { t.Fatalf("pipe didn't timeout") } // move time forward such that the pipe should be GCd mtime.NowForce(mtime.Now().Add(gcTimeout)) pr.garbageCollect() if _, ok := pr.pipes[id]; ok { t.Fatalf("pipe not gc'd") } }
func (c *container) GetNode() report.Node { c.RLock() defer c.RUnlock() latest := map[string]string{ ContainerState: c.StateString(), ContainerStateHuman: c.State(), } controls := []string{} if c.container.State.Paused { controls = append(controls, UnpauseContainer) } else if c.container.State.Running { uptime := (mtime.Now().Sub(c.container.State.StartedAt) / time.Second) * time.Second networkMode := "" if c.container.HostConfig != nil { networkMode = c.container.HostConfig.NetworkMode } latest[ContainerName] = strings.TrimPrefix(c.container.Name, "/") latest[ContainerUptime] = uptime.String() latest[ContainerRestartCount] = strconv.Itoa(c.container.RestartCount) latest[ContainerNetworkMode] = networkMode controls = append(controls, RestartContainer, StopContainer, PauseContainer, AttachContainer, ExecContainer) } else { controls = append(controls, StartContainer, RemoveContainer) } result := c.baseNode.WithLatests(latest) result = result.WithControls(controls...) result = result.WithMetrics(c.metrics()) return result }
// ColorConnected colors nodes with the IsConnected key if // they have edges to or from them. Edges to/from yourself // are not counted here (see #656). func ColorConnected(r Renderer) Renderer { return CustomRenderer{ Renderer: r, RenderFunc: func(input report.Nodes) report.Nodes { connected := map[string]struct{}{} void := struct{}{} for id, node := range input { if len(node.Adjacency) == 0 { continue } for _, adj := range node.Adjacency { if adj != id { connected[id] = void connected[adj] = void } } } output := input.Copy() for id := range connected { output[id] = output[id].WithLatest(IsConnected, mtime.Now(), "true") } return output }, } }
// WithLatests returns a fresh copy of n, with Metadata m merged in. func (n Node) WithLatests(m map[string]string) Node { result := n.Copy() ts := mtime.Now() for k, v := range m { result.Latest = result.Latest.Set(k, ts, v) } return result }
func (pr *PipeRouter) getOrCreate(id string) (*pipe, bool) { pr.Lock() defer pr.Unlock() p, ok := pr.pipes[id] if !ok { log.Printf("Creating pipe id %s", id) p = &pipe{ ui: end{lastUsedTime: mtime.Now()}, probe: end{lastUsedTime: mtime.Now()}, Pipe: xfer.NewPipe(), } pr.pipes[id] = p } if p.Closed() { return nil, false } return p, true }
func (pr *PipeRouter) delete(w http.ResponseWriter, r *http.Request) { pipeID := mux.Vars(r)["pipeID"] pipe, ok := pr.getOrCreate(pipeID) if ok && pr.retain(pipeID, pipe, &pipe.ui) { log.Printf("Closing pipe %s", pipeID) pipe.Close() pipe.tombstoneTime = mtime.Now() pr.release(pipeID, pipe, &pipe.ui) } }
func (pr *PipeRouter) garbageCollect() { pr.Lock() defer pr.Unlock() now := mtime.Now() for pipeID, pipe := range pr.pipes { if pipe.Closed() && now.Sub(pipe.tombstoneTime) >= gcTimeout { delete(pr.pipes, pipeID) } } }
// Report implements Reporter. func (r *Reporter) Report() (report.Report, error) { var ( rep = report.MakeReport() localCIDRs []string ) localNets, err := GetLocalNetworks() if err != nil { return rep, nil } for _, localNet := range localNets { localCIDRs = append(localCIDRs, localNet.String()) } uptime, err := GetUptime() if err != nil { return rep, err } kernel, err := GetKernelVersion() if err != nil { return rep, err } now := mtime.Now() metrics := GetLoad(now) cpuUsage, max := GetCPUUsagePercent() metrics[CPUUsage] = report.MakeMetric().Add(now, cpuUsage).WithMax(max) memoryUsage, max := GetMemoryUsageBytes() metrics[MemoryUsage] = report.MakeMetric().Add(now, memoryUsage).WithMax(max) rep.Host.AddNode(report.MakeHostNodeID(r.hostID), report.MakeNodeWith(map[string]string{ Timestamp: mtime.Now().UTC().Format(time.RFC3339Nano), HostName: r.hostName, OS: runtime.GOOS, KernelVersion: kernel, Uptime: uptime.String(), }).WithSets(report.EmptySets. Add(LocalNetworks, report.MakeStringSet(localCIDRs...)), ).WithMetrics(metrics)) return rep, nil }
func (pr *localPipeRouter) Delete(_ context.Context, id string) error { pr.Lock() defer pr.Unlock() p, ok := pr.pipes[id] if !ok { return nil } p.Close() p.tombstoneTime = mtime.Now() return nil }
func (pr *localPipeRouter) Get(_ context.Context, id string, e End) (xfer.Pipe, io.ReadWriter, error) { pr.Lock() defer pr.Unlock() p, ok := pr.pipes[id] if !ok { log.Infof("Creating pipe id %s", id) p = &pipe{ ui: end{lastUsedTime: mtime.Now()}, probe: end{lastUsedTime: mtime.Now()}, Pipe: xfer.NewPipe(), } pr.pipes[id] = p } if p.Closed() { return nil, nil, fmt.Errorf("Pipe %s closed", id) } end, endIO := p.end(e) end.refCount++ return p, endIO, nil }
func (pr *PipeRouter) release(id string, pipe *pipe, end *end) { pr.Lock() defer pr.Unlock() end.refCount-- if end.refCount != 0 { return } if !pipe.Closed() { end.lastUsedTime = mtime.Now() } }
func (c *container) GetNode(hostID string, localAddrs []net.IP) report.Node { c.RLock() defer c.RUnlock() ips := append(c.container.NetworkSettings.SecondaryIPAddresses, c.container.NetworkSettings.IPAddress) // Treat all Docker IPs as local scoped. ipsWithScopes := []string{} for _, ip := range ips { ipsWithScopes = append(ipsWithScopes, report.MakeScopedAddressNodeID(hostID, ip)) } state := c.State() result := report.MakeNodeWith(map[string]string{ ContainerID: c.ID(), ContainerName: strings.TrimPrefix(c.container.Name, "/"), ContainerCreated: c.container.Created.Format(time.RFC822), ContainerCommand: c.container.Path + " " + strings.Join(c.container.Args, " "), ImageID: c.container.Image, ContainerHostname: c.Hostname(), ContainerState: state, }).WithSets(report.EmptySets. Add(ContainerPorts, c.ports(localAddrs)). Add(ContainerIPs, report.MakeStringSet(ips...)). Add(ContainerIPsWithScopes, report.MakeStringSet(ipsWithScopes...)), ).WithMetrics( c.metrics(), ).WithParents(report.EmptySets. Add(report.ContainerImage, report.MakeStringSet(report.MakeContainerImageNodeID(c.container.Image))), ) if c.container.State.Paused { result = result.WithControls(UnpauseContainer) } else if c.container.State.Running { uptime := (mtime.Now().Sub(c.container.State.StartedAt) / time.Second) * time.Second result = result.WithLatests(map[string]string{ ContainerUptime: uptime.String(), ContainerRestartCount: strconv.Itoa(c.container.RestartCount), }) result = result.WithControls( RestartContainer, StopContainer, PauseContainer, AttachContainer, ExecContainer, ) } else { result = result.WithControls(StartContainer) } result = AddLabels(result, c.container.Config.Labels) result = result.WithMetrics(c.metrics()) return result }
func (pr *PipeRouter) timeout() { pr.Lock() defer pr.Unlock() now := mtime.Now() for id, pipe := range pr.pipes { if pipe.Closed() || (pipe.ui.refCount > 0 && pipe.probe.refCount > 0) { continue } if (pipe.ui.refCount == 0 && now.Sub(pipe.ui.lastUsedTime) >= pipeTimeout) || (pipe.probe.refCount == 0 && now.Sub(pipe.probe.lastUsedTime) >= pipeTimeout) { log.Printf("Timing out pipe %s", id) pipe.Close() pipe.tombstoneTime = now } } }
// Tag adds pod parents to container nodes. func (r *Reporter) Tag(rpt report.Report) (report.Report, error) { for id, n := range rpt.Container.Nodes { uid, ok := n.Latest.Lookup(docker.LabelPrefix + "io.kubernetes.pod.uid") if !ok { continue } // Tag the pause containers with "does-not-make-connections" if isPauseContainer(n, rpt) { n = n.WithLatest(report.DoesNotMakeConnections, mtime.Now(), "") } rpt.Container.Nodes[id] = n.WithParents(report.EmptySets.Add( report.Pod, report.EmptyStringSet.Add(report.MakePodNodeID(uid)), )) } return rpt, nil }
func (r *Reporter) processTopology() (report.Topology, error) { t := report.MakeTopology(). WithMetadataTemplates(MetadataTemplates). WithMetricTemplates(MetricTemplates) now := mtime.Now() deltaTotal, maxCPU, err := r.jiffies() if err != nil { return t, err } err = r.walker.Walk(func(p, prev Process) { pidstr := strconv.Itoa(p.PID) nodeID := report.MakeProcessNodeID(r.scope, pidstr) node := report.MakeNode(nodeID) for _, tuple := range []struct{ key, value string }{ {PID, pidstr}, {Name, p.Name}, {Cmdline, p.Cmdline}, {Threads, strconv.Itoa(p.Threads)}, } { if tuple.value != "" { node = node.WithLatests(map[string]string{tuple.key: tuple.value}) } } if p.PPID > 0 { node = node.WithLatests(map[string]string{PPID: strconv.Itoa(p.PPID)}) } if deltaTotal > 0 { cpuUsage := float64(p.Jiffies-prev.Jiffies) / float64(deltaTotal) * 100. node = node.WithMetric(CPUUsage, report.MakeMetric().Add(now, cpuUsage).WithMax(maxCPU)) } node = node.WithMetric(MemoryUsage, report.MakeMetric().Add(now, float64(p.RSSBytes)).WithMax(float64(p.RSSBytesLimit))) node = node.WithMetric(OpenFilesCount, report.MakeMetric().Add(now, float64(p.OpenFilesCount)).WithMax(float64(p.OpenFilesLimit))) t.AddNode(node) }) return t, err }
func (pr *localPipeRouter) Release(_ context.Context, id string, e End) error { pr.Lock() defer pr.Unlock() p, ok := pr.pipes[id] if !ok { return fmt.Errorf("Pipe %s not found", id) } end, _ := p.end(e) end.refCount-- if end.refCount > 0 { return nil } if !p.Closed() { end.lastUsedTime = mtime.Now() } return nil }
// Add the new control IDs to this NodeControls, producing a fresh NodeControls. func (nc NodeControls) Add(ids ...string) NodeControls { return NodeControls{ Timestamp: mtime.Now(), Controls: nc.Controls.Add(ids...), } }
func (c *container) GetNode(hostID string, localAddrs []net.IP) report.Node { c.RLock() defer c.RUnlock() ips := append(c.container.NetworkSettings.SecondaryIPAddresses, c.container.NetworkSettings.IPAddress) // Treat all Docker IPs as local scoped. ipsWithScopes := []string{} for _, ip := range ips { ipsWithScopes = append(ipsWithScopes, report.MakeScopedAddressNodeID(hostID, ip)) } state := c.State() result := report.MakeNodeWith(map[string]string{ ContainerID: c.ID(), ContainerName: strings.TrimPrefix(c.container.Name, "/"), ContainerCreated: c.container.Created.Format(time.RFC822), ContainerCommand: c.container.Path + " " + strings.Join(c.container.Args, " "), ImageID: c.container.Image, ContainerHostname: c.Hostname(), }).WithSets(report.Sets{ ContainerPorts: c.ports(localAddrs), ContainerIPs: report.MakeStringSet(ips...), ContainerIPsWithScopes: report.MakeStringSet(ipsWithScopes...), }).WithLatest( ContainerState, mtime.Now(), state, ).WithMetrics(c.metrics()) if c.container.State.Paused { result = result.WithControls(UnpauseContainer) } else if c.container.State.Running { result = result.WithControls(RestartContainer, StopContainer, PauseContainer) } else { result = result.WithControls(StartContainer) } AddLabels(result, c.container.Config.Labels) if c.latestStats == nil { return result } result = result.WithMetadata(map[string]string{ NetworkRxDropped: strconv.FormatUint(c.latestStats.Network.RxDropped, 10), NetworkRxBytes: strconv.FormatUint(c.latestStats.Network.RxBytes, 10), NetworkRxErrors: strconv.FormatUint(c.latestStats.Network.RxErrors, 10), NetworkTxPackets: strconv.FormatUint(c.latestStats.Network.TxPackets, 10), NetworkTxDropped: strconv.FormatUint(c.latestStats.Network.TxDropped, 10), NetworkRxPackets: strconv.FormatUint(c.latestStats.Network.RxPackets, 10), NetworkTxErrors: strconv.FormatUint(c.latestStats.Network.TxErrors, 10), NetworkTxBytes: strconv.FormatUint(c.latestStats.Network.TxBytes, 10), MemoryMaxUsage: strconv.FormatUint(c.latestStats.MemoryStats.MaxUsage, 10), MemoryUsage: strconv.FormatUint(c.latestStats.MemoryStats.Usage, 10), MemoryFailcnt: strconv.FormatUint(c.latestStats.MemoryStats.Failcnt, 10), MemoryLimit: strconv.FormatUint(c.latestStats.MemoryStats.Limit, 10), // CPUPercpuUsage: strconv.FormatUint(stats.CPUStats.CPUUsage.PercpuUsage, 10), CPUUsageInUsermode: strconv.FormatUint(c.latestStats.CPUStats.CPUUsage.UsageInUsermode, 10), CPUTotalUsage: strconv.FormatUint(c.latestStats.CPUStats.CPUUsage.TotalUsage, 10), CPUUsageInKernelmode: strconv.FormatUint(c.latestStats.CPUStats.CPUUsage.UsageInKernelmode, 10), CPUSystemCPUUsage: strconv.FormatUint(c.latestStats.CPUStats.SystemCPUUsage, 10), }).WithMetrics(c.metrics()) return result }
docker.ContainerName: container2Name, report.HostNodeID: serverHostNodeID, }). WithSets(report.EmptySets. Add(docker.ContainerIPs, report.MakeStringSet(container2IP)). Add(docker.ContainerIPsWithScopes, report.MakeStringSet(report.MakeAddressNodeID("", container2IP))), ).WithTopology(report.Container), pauseContainerNodeID: report.MakeNodeWith(pauseContainerNodeID, map[string]string{ docker.ContainerID: pauseContainerID, docker.ContainerName: pauseContainerName, report.HostNodeID: serverHostNodeID, }). WithSets(report.EmptySets. Add(docker.ContainerIPs, report.MakeStringSet(pauseContainerIP)). Add(docker.ContainerIPsWithScopes, report.MakeStringSet(report.MakeAddressNodeID("", pauseContainerIP))), ).WithTopology(report.Container).WithLatest(report.DoesNotMakeConnections, mtime.Now(), ""), }, }, Host: report.Topology{ Nodes: report.Nodes{ serverHostNodeID: report.MakeNodeWith(serverHostNodeID, map[string]string{ report.HostNodeID: serverHostNodeID, }). WithSets(report.EmptySets. Add(host.LocalNetworks, report.MakeStringSet("192.168.0.0/16")), ).WithTopology(report.Host), }, }, } )
func TestNat(t *testing.T) { mtime.NowForce(mtime.Now()) defer mtime.NowReset() // test that two containers, on the docker network, get their connections mapped // correctly. // the setup is this: // // container2 (10.0.47.2:222222), host2 (2.3.4.5:22223) -> // host1 (1.2.3.4:80), container1 (10.0.47.2:80) // from the PoV of host1 { f := makeFlow("") addIndependant(&f, 1, "") f.Original = addMeta(&f, "original", "2.3.4.5", "1.2.3.4", 222222, 80) f.Reply = addMeta(&f, "reply", "10.0.47.1", "2.3.4.5", 80, 222222) ct := &mockFlowWalker{ flows: []flow{f}, } have := report.MakeReport() originalID := report.MakeEndpointNodeID("host1", "10.0.47.1", "80") have.Endpoint.AddNode(originalID, report.MakeNodeWith(map[string]string{ Addr: "10.0.47.1", Port: "80", "foo": "bar", })) want := have.Copy() want.Endpoint.AddNode(report.MakeEndpointNodeID("host1", "1.2.3.4", "80"), report.MakeNodeWith(map[string]string{ Addr: "1.2.3.4", Port: "80", "copy_of": originalID, "foo": "bar", })) makeNATMapper(ct).applyNAT(have, "host1") if !reflect.DeepEqual(want, have) { t.Fatal(test.Diff(want, have)) } } // form the PoV of host2 { f := makeFlow("") addIndependant(&f, 2, "") f.Original = addMeta(&f, "original", "10.0.47.2", "1.2.3.4", 22222, 80) f.Reply = addMeta(&f, "reply", "1.2.3.4", "2.3.4.5", 80, 22223) ct := &mockFlowWalker{ flows: []flow{f}, } have := report.MakeReport() originalID := report.MakeEndpointNodeID("host2", "10.0.47.2", "22222") have.Endpoint.AddNode(originalID, report.MakeNodeWith(map[string]string{ Addr: "10.0.47.2", Port: "22222", "foo": "baz", })) want := have.Copy() want.Endpoint.AddNode(report.MakeEndpointNodeID("host2", "2.3.4.5", "22223"), report.MakeNodeWith(map[string]string{ Addr: "2.3.4.5", Port: "22223", "copy_of": originalID, "foo": "baz", })) makeNATMapper(ct).applyNAT(have, "host1") if !reflect.DeepEqual(want, have) { t.Fatal(test.Diff(want, have)) } } }