func (r *Reporter) addConnection(rpt *report.Report, t fourTuple, extraFromNode, extraToNode map[string]string) { // Update endpoint topology if !r.includeProcesses { return } var ( fromEndpointNodeID = report.MakeEndpointNodeID(r.hostID, t.fromAddr, strconv.Itoa(int(t.fromPort))) toEndpointNodeID = report.MakeEndpointNodeID(r.hostID, t.toAddr, strconv.Itoa(int(t.toPort))) fromNode = report.MakeNodeWith(fromEndpointNodeID, map[string]string{ Addr: t.fromAddr, Port: strconv.Itoa(int(t.fromPort)), }).WithEdge(toEndpointNodeID, report.EdgeMetadata{}) toNode = report.MakeNodeWith(toEndpointNodeID, map[string]string{ Addr: t.toAddr, Port: strconv.Itoa(int(t.toPort)), }) ) // In case we have a reverse resolution for the IP, we can use it for // the name... if toNames, err := r.reverseResolver.get(t.toAddr); err == nil { toNode = toNode.WithSet(ReverseDNSNames, report.MakeStringSet(toNames...)) } if extraFromNode != nil { fromNode = fromNode.WithLatests(extraFromNode) } if extraToNode != nil { toNode = toNode.WithLatests(extraToNode) } rpt.Endpoint = rpt.Endpoint.AddNode(fromNode) rpt.Endpoint = rpt.Endpoint.AddNode(toNode) }
// interpolateCounts compensates for sampling by artificially inflating counts // throughout the report. It should be run once for each report, within the // probe, before it gets emitted into the rest of the system. func interpolateCounts(r report.Report) { rate := r.Sampling.Rate() if rate >= 1.0 { return } factor := 1.0 / rate for _, topology := range r.Topologies() { for _, nmd := range topology.Nodes { nmd.Edges.ForEach(func(_ string, emd report.EdgeMetadata) { if emd.EgressPacketCount != nil { *emd.EgressPacketCount = uint64(float64(*emd.EgressPacketCount) * factor) } if emd.IngressPacketCount != nil { *emd.IngressPacketCount = uint64(float64(*emd.IngressPacketCount) * factor) } if emd.EgressByteCount != nil { *emd.EgressByteCount = uint64(float64(*emd.EgressByteCount) * factor) } if emd.IngressByteCount != nil { *emd.IngressByteCount = uint64(float64(*emd.IngressByteCount) * factor) } }) } } }
// groupNodeSummary renders the summary for a group node. n.Topology is // expected to be of the form: group:container:hostname func groupNodeSummary(base NodeSummary, r report.Report, n report.Node) (NodeSummary, bool) { parts := strings.Split(n.Topology, ":") if len(parts) != 3 { return NodeSummary{}, false } label, ok := n.Latest.Lookup(parts[2]) if !ok { return NodeSummary{}, false } base.Label, base.Rank = label, label t, ok := r.Topology(parts[1]) if ok && t.Label != "" { if count, ok := n.Counters.Lookup(parts[1]); ok { if count == 1 { base.LabelMinor = fmt.Sprintf("%d %s", count, t.Label) } else { base.LabelMinor = fmt.Sprintf("%d %s", count, t.LabelPlural) } } } base.Shape = t.GetShape() base.Stack = true return base, true }
// NodeTables produces a list of tables (to be consumed directly by the UI) based // on the report and the node. It uses the report to get the templates for the node's // topology. func NodeTables(r report.Report, n report.Node) []report.Table { if _, ok := n.Counters.Lookup(n.Topology); ok { // This is a group of nodes, so no tables! return nil } if topology, ok := r.Topology(n.Topology); ok { return topology.TableTemplates.Tables(n) } return nil }
// NodeMetadata produces a table (to be consumed directly by the UI) based on // an a report.Node, which is (hopefully) a node in one of our topologies. func NodeMetadata(r report.Report, n report.Node) []report.MetadataRow { if _, ok := n.Counters.Lookup(n.Topology); ok { // This is a group of nodes, so no metadata! return nil } if topology, ok := r.Topology(n.Topology); ok { return topology.MetadataTemplates.MetadataRows(n) } return nil }
func baseNodeSummary(r report.Report, n report.Node) NodeSummary { t, _ := r.Topology(n.Topology) return NodeSummary{ ID: n.ID, Shape: t.GetShape(), Linkable: true, Metadata: NodeMetadata(r, n), Metrics: NodeMetrics(r, n), Tables: NodeTables(r, n), Adjacency: n.Adjacency.Copy(), } }
// NodeMetrics produces a table (to be consumed directly by the UI) based on // an a report.Node, which is (hopefully) a node in one of our topologies. func NodeMetrics(r report.Report, n report.Node) []report.MetricRow { if _, ok := n.Counters.Lookup(n.Topology); ok { // This is a group of nodes, so no metrics! return nil } topology, ok := r.Topology(n.Topology) if !ok { return nil } return topology.MetricTemplates.MetricRows(n) }
func (p *Probe) drainAndPublish(rpt report.Report, rs chan report.Report) { ForLoop: for { select { case r := <-rs: rpt = rpt.Merge(r) default: break ForLoop } } if err := p.publisher.Publish(rpt); err != nil { log.Infof("publish: %v", err) } }
func controls(r report.Report, n report.Node) []ControlInstance { if t, ok := r.Topology(n.Topology); ok { return controlsFor(t, n.ID) } return []ControlInstance{} }
// Merge puts the packet into the report. // // Note that, for the moment, we encode bidirectional traffic as ingress and // egress traffic on a single edge whose src is local and dst is remote. That // is, if we see a packet from the remote addr 9.8.7.6 to the local addr // 1.2.3.4, we apply it as *ingress* on the edge (1.2.3.4 -> 9.8.7.6). func (s *Sniffer) Merge(p Packet, rpt *report.Report) { if p.SrcIP == "" || p.DstIP == "" { return } // One end of the traffic has to be local. Otherwise, we don't know how to // construct the edge. // // If we need to get around this limitation, we may be able to change the // semantics of the report, and allow the src side of edges to be from // anywhere. But that will have ramifications throughout Scope (read: it // may violate implicit invariants) and needs to be thought through. var ( srcLocal = s.localNets.Contains(net.ParseIP(p.SrcIP)) dstLocal = s.localNets.Contains(net.ParseIP(p.DstIP)) localIP string remoteIP string localPort string remotePort string egress bool ) switch { case srcLocal && !dstLocal: localIP, localPort, remoteIP, remotePort, egress = p.SrcIP, p.SrcPort, p.DstIP, p.DstPort, true case !srcLocal && dstLocal: localIP, localPort, remoteIP, remotePort, egress = p.DstIP, p.DstPort, p.SrcIP, p.SrcPort, false case srcLocal && dstLocal: localIP, localPort, remoteIP, remotePort, egress = p.SrcIP, p.SrcPort, p.DstIP, p.DstPort, true // loopback case !srcLocal && !dstLocal: log.Printf("sniffer ignoring remote-to-remote (%s -> %s) traffic", p.SrcIP, p.DstIP) return } addAdjacency := func(t report.Topology, srcNodeID, dstNodeID string) report.Topology { result := t.AddNode(report.MakeNode(srcNodeID).WithAdjacent(dstNodeID)) result = result.AddNode(report.MakeNode(dstNodeID)) return result } // If we have ports, we can add to the endpoint topology, too. if p.SrcPort != "" && p.DstPort != "" { var ( srcNodeID = report.MakeEndpointNodeID(s.hostID, localIP, localPort) dstNodeID = report.MakeEndpointNodeID(s.hostID, remoteIP, remotePort) ) rpt.Endpoint = addAdjacency(rpt.Endpoint, srcNodeID, dstNodeID) node := rpt.Endpoint.Nodes[srcNodeID] emd, _ := node.Edges.Lookup(dstNodeID) if egress { if emd.EgressPacketCount == nil { emd.EgressPacketCount = new(uint64) } *emd.EgressPacketCount++ if emd.EgressByteCount == nil { emd.EgressByteCount = new(uint64) } *emd.EgressByteCount += uint64(p.Transport) } else { if emd.IngressPacketCount == nil { emd.IngressPacketCount = new(uint64) } *emd.IngressPacketCount++ if emd.IngressByteCount == nil { emd.IngressByteCount = new(uint64) } *emd.IngressByteCount += uint64(p.Transport) } rpt.Endpoint.Nodes[srcNodeID] = node.WithEdge(dstNodeID, emd) } }
// Render implements Renderer func (t TopologySelector) Render(r report.Report, _ Decorator) report.Nodes { topology, _ := r.Topology(string(t)) return topology.Nodes }