func TestReporter(t *testing.T) { want := report.MakeReport() pod1ID := report.MakePodNodeID("ping", "pong-a") pod2ID := report.MakePodNodeID("ping", "pong-b") serviceID := report.MakeServiceNodeID("ping", "pongservice") want.Pod = report.MakeTopology().AddNode(pod1ID, report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-a", kubernetes.PodName: "pong-a", kubernetes.Namespace: "ping", kubernetes.PodCreated: pod1.Created(), kubernetes.PodContainerIDs: "container1 container2", kubernetes.ServiceIDs: "ping/pongservice", }).WithParents(report.Sets{ report.Service: report.MakeStringSet(serviceID), })).AddNode(pod2ID, report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-b", kubernetes.PodName: "pong-b", kubernetes.Namespace: "ping", kubernetes.PodCreated: pod1.Created(), kubernetes.PodContainerIDs: "container3 container4", kubernetes.ServiceIDs: "ping/pongservice", }).WithParents(report.Sets{ report.Service: report.MakeStringSet(serviceID), })) want.Service = report.MakeTopology().AddNode(serviceID, report.MakeNodeWith(map[string]string{ kubernetes.ServiceID: "ping/pongservice", kubernetes.ServiceName: "pongservice", kubernetes.Namespace: "ping", kubernetes.ServiceCreated: pod1.Created(), })) want.Container = report.MakeTopology().AddNode(report.MakeContainerNodeID("container1"), report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-a", kubernetes.Namespace: "ping", }).WithParents(report.Sets{ report.Pod: report.MakeStringSet(pod1ID), })).AddNode(report.MakeContainerNodeID("container2"), report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-a", kubernetes.Namespace: "ping", }).WithParents(report.Sets{ report.Pod: report.MakeStringSet(pod1ID), })).AddNode(report.MakeContainerNodeID("container3"), report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-b", kubernetes.Namespace: "ping", }).WithParents(report.Sets{ report.Pod: report.MakeStringSet(pod2ID), })).AddNode(report.MakeContainerNodeID("container4"), report.MakeNodeWith(map[string]string{ kubernetes.PodID: "ping/pong-b", kubernetes.Namespace: "ping", }).WithParents(report.Sets{ report.Pod: report.MakeStringSet(pod2ID), })) reporter := kubernetes.NewReporter(mockClientInstance) have, _ := reporter.Report() if !reflect.DeepEqual(want, have) { t.Errorf("%s", test.Diff(want, have)) } }
func TestReporter(t *testing.T) { pod1ID := report.MakePodNodeID("ping", "pong-a") pod2ID := report.MakePodNodeID("ping", "pong-b") serviceID := report.MakeServiceNodeID("ping", "pongservice") rpt, _ := kubernetes.NewReporter(mockClientInstance).Report() // Reporter should have added the following pods for _, pod := range []struct { id string parentService string latest map[string]string }{ {pod1ID, serviceID, map[string]string{ kubernetes.PodID: "ping/pong-a", kubernetes.PodName: "pong-a", kubernetes.Namespace: "ping", kubernetes.PodCreated: pod1.Created(), kubernetes.PodContainerIDs: "container1 container2", kubernetes.ServiceIDs: "ping/pongservice", }}, {pod2ID, serviceID, map[string]string{ kubernetes.PodID: "ping/pong-b", kubernetes.PodName: "pong-b", kubernetes.Namespace: "ping", kubernetes.PodCreated: pod1.Created(), kubernetes.PodContainerIDs: "container3 container4", kubernetes.ServiceIDs: "ping/pongservice", }}, } { node, ok := rpt.Pod.Nodes[pod.id] if !ok { t.Errorf("Expected report to have pod %q, but not found", pod.id) } if parents, ok := node.Parents.Lookup(report.Service); !ok || !parents.Contains(pod.parentService) { t.Errorf("Expected pod %s to have parent service %q, got %q", pod.id, pod.parentService, parents) } for k, want := range pod.latest { if have, ok := node.Latest.Lookup(k); !ok || have != want { t.Errorf("Expected pod %s latest %q: %q, got %q", pod.id, k, want, have) } } } // Reporter should have added a service { node, ok := rpt.Service.Nodes[serviceID] if !ok { t.Errorf("Expected report to have service %q, but not found", serviceID) } for k, want := range map[string]string{ kubernetes.ServiceID: "ping/pongservice", kubernetes.ServiceName: "pongservice", kubernetes.Namespace: "ping", kubernetes.ServiceCreated: pod1.Created(), } { if have, ok := node.Latest.Lookup(k); !ok || have != want { t.Errorf("Expected service %s latest %q: %q, got %q", serviceID, k, want, have) } } } // Reporter should have tagged the containers for _, pod := range []struct { id, nodeID string containers []string }{ {"ping/pong-a", pod1ID, []string{"container1", "container2"}}, {"ping/pong-b", pod2ID, []string{"container3", "container4"}}, } { for _, containerID := range pod.containers { node, ok := rpt.Container.Nodes[report.MakeContainerNodeID(containerID)] if !ok { t.Errorf("Expected report to have container %q, but not found", containerID) } // container should have pod id if have, ok := node.Latest.Lookup(kubernetes.PodID); !ok || have != pod.id { t.Errorf("Expected container %s latest %q: %q, got %q", containerID, kubernetes.PodID, pod.id, have) } // container should have namespace if have, ok := node.Latest.Lookup(kubernetes.Namespace); !ok || have != "ping" { t.Errorf("Expected container %s latest %q: %q, got %q", containerID, kubernetes.Namespace, "ping", have) } // container should have pod parent if parents, ok := node.Parents.Lookup(report.Pod); !ok || !parents.Contains(pod.nodeID) { t.Errorf("Expected container %s to have parent service %q, got %q", containerID, pod.nodeID, parents) } } } }
// Main runs the probe func probeMain() { var ( targets = []string{fmt.Sprintf("localhost:%d", xfer.AppPort)} token = flag.String("token", "default-token", "probe token") httpListen = flag.String("http.listen", "", "listen address for HTTP profiling and instrumentation server") publishInterval = flag.Duration("publish.interval", 3*time.Second, "publish (output) interval") spyInterval = flag.Duration("spy.interval", time.Second, "spy (scan) interval") spyProcs = flag.Bool("processes", true, "report processes (needs root)") procRoot = flag.String("proc.root", "/proc", "location of the proc filesystem") useConntrack = flag.Bool("conntrack", true, "also use conntrack to track connections") insecure = flag.Bool("insecure", false, "(SSL) explicitly allow \"insecure\" SSL connections and transfers") logPrefix = flag.String("log.prefix", "<probe>", "prefix for each log line") logLevel = flag.String("log.level", "info", "logging threshold level: debug|info|warn|error|fatal|panic") dockerEnabled = flag.Bool("docker", false, "collect Docker-related attributes for processes") dockerInterval = flag.Duration("docker.interval", 10*time.Second, "how often to update Docker attributes") dockerBridge = flag.String("docker.bridge", "docker0", "the docker bridge name") kubernetesEnabled = flag.Bool("kubernetes", false, "collect kubernetes-related attributes for containers, should only be enabled on the master node") kubernetesAPI = flag.String("kubernetes.api", "", "Address of kubernetes master api") kubernetesInterval = flag.Duration("kubernetes.interval", 10*time.Second, "how often to do a full resync of the kubernetes data") weaveRouterAddr = flag.String("weave.router.addr", "127.0.0.1:6784", "IP address & port of the Weave router") weaveDNSTarget = flag.String("weave.hostname", fmt.Sprintf("scope.weave.local:%d", xfer.AppPort), "Hostname to lookup in weaveDNS") ) flag.Parse() setLogLevel(*logLevel) setLogFormatter(*logPrefix) // Setup in memory metrics sink inm := metrics.NewInmemSink(time.Minute, 2*time.Minute) sig := metrics.DefaultInmemSignal(inm) defer sig.Stop() metrics.NewGlobal(metrics.DefaultConfig("scope-probe"), inm) defer log.Info("probe exiting") if *spyProcs && os.Getegid() != 0 { log.Warn("-process=true, but that requires root to find everything") } rand.Seed(time.Now().UnixNano()) probeID := strconv.FormatInt(rand.Int63(), 16) var ( hostName = hostname.Get() hostID = hostName // TODO(pb): we should sanitize the hostname ) log.Infof("probe starting, version %s, ID %s", version, probeID) go check() if len(flag.Args()) > 0 { targets = flag.Args() } log.Infof("publishing to: %s", strings.Join(targets, ", ")) probeConfig := appclient.ProbeConfig{ Token: *token, ProbeID: probeID, Insecure: *insecure, } clients := appclient.NewMultiAppClient(func(hostname, endpoint string) (appclient.AppClient, error) { return appclient.NewAppClient( probeConfig, hostname, endpoint, xfer.ControlHandlerFunc(controls.HandleControlRequest), ) }) defer clients.Stop() resolver := appclient.NewResolver(targets, net.LookupIP, clients.Set) defer resolver.Stop() processCache := process.NewCachingWalker(process.NewWalker(*procRoot)) scanner := procspy.NewConnectionScanner(processCache) endpointReporter := endpoint.NewReporter(hostID, hostName, *spyProcs, *useConntrack, scanner) defer endpointReporter.Stop() p := probe.New(*spyInterval, *publishInterval, clients) p.AddTicker(processCache) p.AddReporter( endpointReporter, host.NewReporter(hostID, hostName), process.NewReporter(processCache, hostID, process.GetDeltaTotalJiffies), ) p.AddTagger(probe.NewTopologyTagger(), host.NewTagger(hostID, probeID)) if *dockerEnabled { if err := report.AddLocalBridge(*dockerBridge); err != nil { log.Errorf("Docker: problem with bridge %s: %v", *dockerBridge, err) } if registry, err := docker.NewRegistry(*dockerInterval, clients); err == nil { defer registry.Stop() p.AddTagger(docker.NewTagger(registry, processCache)) p.AddReporter(docker.NewReporter(registry, hostID, p)) } else { log.Errorf("Docker: failed to start registry: %v", err) } } if *kubernetesEnabled { if client, err := kubernetes.NewClient(*kubernetesAPI, *kubernetesInterval); err == nil { defer client.Stop() p.AddReporter(kubernetes.NewReporter(client)) } else { log.Errorf("Kubernetes: failed to start client: %v", err) log.Errorf("Kubernetes: make sure to run Scope inside a POD with a service account or provide a valid kubernetes.api url") } } if *weaveRouterAddr != "" { client := weave.NewClient(sanitize.URL("http://", 6784, "")(*weaveRouterAddr)) weave := overlay.NewWeave(hostID, client) defer weave.Stop() p.AddTagger(weave) p.AddReporter(weave) dockerBridgeIP, err := getFirstAddressOf(*dockerBridge) if err != nil { log.Println("Error getting docker bridge ip:", err) } else { weaveDNSLookup := appclient.LookupUsing(dockerBridgeIP + ":53") weaveResolver := appclient.NewResolver([]string{*weaveDNSTarget}, weaveDNSLookup, clients.Set) defer weaveResolver.Stop() } } if *httpListen != "" { go func() { log.Infof("Profiling data being exported to %s", *httpListen) log.Infof("go tool pprof http://%s/debug/pprof/{profile,heap,block}", *httpListen) log.Infof("Profiling endpoint %s terminated: %v", *httpListen, http.ListenAndServe(*httpListen, nil)) }() } p.Start() defer p.Stop() common.SignalHandlerLoop() }
func main() { var ( targets = []string{fmt.Sprintf("localhost:%d", xfer.AppPort), fmt.Sprintf("scope.weave.local:%d", xfer.AppPort)} token = flag.String("token", "default-token", "probe token") httpListen = flag.String("http.listen", "", "listen address for HTTP profiling and instrumentation server") publishInterval = flag.Duration("publish.interval", 3*time.Second, "publish (output) interval") spyInterval = flag.Duration("spy.interval", time.Second, "spy (scan) interval") spyProcs = flag.Bool("processes", true, "report processes (needs root)") dockerEnabled = flag.Bool("docker", false, "collect Docker-related attributes for processes") dockerInterval = flag.Duration("docker.interval", 10*time.Second, "how often to update Docker attributes") dockerBridge = flag.String("docker.bridge", "docker0", "the docker bridge name") kubernetesEnabled = flag.Bool("kubernetes", false, "collect kubernetes-related attributes for containers, should only be enabled on the master node") kubernetesAPI = flag.String("kubernetes.api", "http://localhost:8080", "Address of kubernetes master api") kubernetesInterval = flag.Duration("kubernetes.interval", 10*time.Second, "how often to do a full resync of the kubernetes data") weaveRouterAddr = flag.String("weave.router.addr", "", "IP address or FQDN of the Weave router") procRoot = flag.String("proc.root", "/proc", "location of the proc filesystem") printVersion = flag.Bool("version", false, "print version number and exit") useConntrack = flag.Bool("conntrack", true, "also use conntrack to track connections") insecure = flag.Bool("insecure", false, "(SSL) explicitly allow \"insecure\" SSL connections and transfers") logPrefix = flag.String("log.prefix", "<probe>", "prefix for each log line") ) flag.Parse() if *printVersion { fmt.Println(version) return } // Setup in memory metrics sink inm := metrics.NewInmemSink(time.Minute, 2*time.Minute) sig := metrics.DefaultInmemSignal(inm) defer sig.Stop() metrics.NewGlobal(metrics.DefaultConfig("scope-probe"), inm) if !strings.HasSuffix(*logPrefix, " ") { *logPrefix += " " } log.SetPrefix(*logPrefix) defer log.Print("probe exiting") if *spyProcs && os.Getegid() != 0 { log.Printf("warning: -process=true, but that requires root to find everything") } rand.Seed(time.Now().UnixNano()) probeID := strconv.FormatInt(rand.Int63(), 16) var ( hostName = probe.Hostname() hostID = hostName // TODO(pb): we should sanitize the hostname ) log.Printf("probe starting, version %s, ID %s", version, probeID) addrs, err := net.InterfaceAddrs() if err != nil { log.Fatal(err) } localNets := report.Networks{} for _, addr := range addrs { // Not all addrs are IPNets. if ipNet, ok := addr.(*net.IPNet); ok { localNets = append(localNets, ipNet) } } if len(flag.Args()) > 0 { targets = flag.Args() } log.Printf("publishing to: %s", strings.Join(targets, ", ")) factory := func(hostname, endpoint string) (string, xfer.Publisher, error) { id, publisher, err := xfer.NewHTTPPublisher(hostname, endpoint, *token, probeID, *insecure) if err != nil { return "", nil, err } return id, xfer.NewBackgroundPublisher(publisher), nil } publishers := xfer.NewMultiPublisher(factory) defer publishers.Stop() clients := xfer.NewMultiAppClient(xfer.ProbeConfig{ Token: *token, ProbeID: probeID, Insecure: *insecure, }, xfer.ControlHandlerFunc(controls.HandleControlRequest), xfer.NewAppClient) defer clients.Stop() resolver := xfer.NewStaticResolver(targets, publishers.Set, clients.Set) defer resolver.Stop() endpointReporter := endpoint.NewReporter(hostID, hostName, *spyProcs, *useConntrack) defer endpointReporter.Stop() processCache := process.NewCachingWalker(process.NewWalker(*procRoot)) p := probe.New(*spyInterval, *publishInterval, publishers) p.AddTicker(processCache) p.AddReporter( endpointReporter, host.NewReporter(hostID, hostName, localNets), process.NewReporter(processCache, hostID), ) p.AddTagger(probe.NewTopologyTagger(), host.NewTagger(hostID, probeID)) if *dockerEnabled { if err := report.AddLocalBridge(*dockerBridge); err != nil { log.Printf("Docker: problem with bridge %s: %v", *dockerBridge, err) } if registry, err := docker.NewRegistry(*dockerInterval); err == nil { defer registry.Stop() p.AddTagger(docker.NewTagger(registry, processCache)) p.AddReporter(docker.NewReporter(registry, hostID, p)) } else { log.Printf("Docker: failed to start registry: %v", err) } } if *kubernetesEnabled { if client, err := kubernetes.NewClient(*kubernetesAPI, *kubernetesInterval); err == nil { defer client.Stop() p.AddReporter(kubernetes.NewReporter(client)) } else { log.Printf("Kubernetes: failed to start client: %v", err) } } if *weaveRouterAddr != "" { weave := overlay.NewWeave(hostID, *weaveRouterAddr) defer weave.Stop() p.AddTicker(weave) p.AddTagger(weave) p.AddReporter(weave) } if *httpListen != "" { go func() { log.Printf("Profiling data being exported to %s", *httpListen) log.Printf("go tool pprof http://%s/debug/pprof/{profile,heap,block}", *httpListen) log.Printf("Profiling endpoint %s terminated: %v", *httpListen, http.ListenAndServe(*httpListen, nil)) }() } p.Start() defer p.Stop() log.Printf("%s", <-interrupt()) }
// Main runs the probe func probeMain(flags probeFlags) { setLogLevel(flags.logLevel) setLogFormatter(flags.logPrefix) // Setup in memory metrics sink inm := metrics.NewInmemSink(time.Minute, 2*time.Minute) sig := metrics.DefaultInmemSignal(inm) defer sig.Stop() metrics.NewGlobal(metrics.DefaultConfig("scope-probe"), inm) defer log.Info("probe exiting") if flags.spyProcs && os.Getegid() != 0 { log.Warn("--probe.process=true, but that requires root to find everything") } rand.Seed(time.Now().UnixNano()) var ( probeID = strconv.FormatInt(rand.Int63(), 16) hostName = hostname.Get() hostID = hostName // TODO(pb): we should sanitize the hostname ) log.Infof("probe starting, version %s, ID %s", version, probeID) log.Infof("command line: %v", os.Args) checkpointFlags := map[string]string{} if flags.kubernetesEnabled { checkpointFlags["kubernetes_enabled"] = "true" } go check(checkpointFlags) var targets = []string{} if flags.token != "" { // service mode if len(flag.Args()) == 0 { targets = append(targets, defaultServiceHost) } } else if !flags.noApp { targets = append(targets, fmt.Sprintf("localhost:%d", xfer.AppPort)) } targets = append(targets, flag.Args()...) log.Infof("publishing to: %s", strings.Join(targets, ", ")) probeConfig := appclient.ProbeConfig{ Token: flags.token, ProbeVersion: version, ProbeID: probeID, Insecure: flags.insecure, } clients := appclient.NewMultiAppClient(func(hostname, endpoint string) (appclient.AppClient, error) { return appclient.NewAppClient( probeConfig, hostname, endpoint, xfer.ControlHandlerFunc(controls.HandleControlRequest), ) }) defer clients.Stop() dnsLookupFn := net.LookupIP if flags.resolver != "" { dnsLookupFn = appclient.LookupUsing(flags.resolver) } resolver := appclient.NewResolver(targets, dnsLookupFn, clients.Set) defer resolver.Stop() p := probe.New(flags.spyInterval, flags.publishInterval, clients) hostReporter := host.NewReporter(hostID, hostName, probeID, version, clients) defer hostReporter.Stop() p.AddReporter(hostReporter) p.AddTagger(probe.NewTopologyTagger(), host.NewTagger(hostID)) var processCache *process.CachingWalker var scanner procspy.ConnectionScanner if flags.procEnabled { processCache = process.NewCachingWalker(process.NewWalker(flags.procRoot)) scanner = procspy.NewConnectionScanner(processCache) p.AddTicker(processCache) p.AddReporter(process.NewReporter(processCache, hostID, process.GetDeltaTotalJiffies)) } endpointReporter := endpoint.NewReporter(hostID, hostName, flags.spyProcs, flags.useConntrack, flags.procEnabled, scanner) defer endpointReporter.Stop() p.AddReporter(endpointReporter) if flags.dockerEnabled { // Don't add the bridge in Kubernetes since container IPs are global and // shouldn't be scoped if !flags.kubernetesEnabled { if err := report.AddLocalBridge(flags.dockerBridge); err != nil { log.Errorf("Docker: problem with bridge %s: %v", flags.dockerBridge, err) } } if registry, err := docker.NewRegistry(flags.dockerInterval, clients, true, hostID); err == nil { defer registry.Stop() if flags.procEnabled { p.AddTagger(docker.NewTagger(registry, processCache)) } p.AddReporter(docker.NewReporter(registry, hostID, probeID, p)) } else { log.Errorf("Docker: failed to start registry: %v", err) } } if flags.kubernetesEnabled { if client, err := kubernetes.NewClient(flags.kubernetesAPI, flags.kubernetesInterval); err == nil { defer client.Stop() reporter := kubernetes.NewReporter(client, clients, probeID, hostID, p) defer reporter.Stop() p.AddReporter(reporter) p.AddTagger(reporter) } else { log.Errorf("Kubernetes: failed to start client: %v", err) log.Errorf("Kubernetes: make sure to run Scope inside a POD with a service account or provide a valid kubernetes.api url") } } if flags.weaveAddr != "" { client := weave.NewClient(sanitize.URL("http://", 6784, "")(flags.weaveAddr)) weave := overlay.NewWeave(hostID, client) defer weave.Stop() p.AddTagger(weave) p.AddReporter(weave) dockerBridgeIP, err := network.GetFirstAddressOf(flags.dockerBridge) if err != nil { log.Println("Error getting docker bridge ip:", err) } else { weaveDNSLookup := appclient.LookupUsing(dockerBridgeIP + ":53") weaveResolver := appclient.NewResolver([]string{flags.weaveHostname}, weaveDNSLookup, clients.Set) defer weaveResolver.Stop() } } pluginRegistry, err := plugins.NewRegistry( flags.pluginsRoot, pluginAPIVersion, map[string]string{ "probe_id": probeID, "api_version": pluginAPIVersion, }, ) if err != nil { log.Errorf("plugins: problem loading: %v", err) } else { defer pluginRegistry.Close() p.AddReporter(pluginRegistry) } if flags.httpListen != "" { go func() { log.Infof("Profiling data being exported to %s", flags.httpListen) log.Infof("go tool pprof http://%s/debug/pprof/{profile,heap,block}", flags.httpListen) log.Infof("Profiling endpoint %s terminated: %v", flags.httpListen, http.ListenAndServe(flags.httpListen, nil)) }() } p.Start() defer p.Stop() common.SignalHandlerLoop() }
func main() { var ( targets = []string{fmt.Sprintf("localhost:%d", xfer.AppPort), fmt.Sprintf("scope.weave.local:%d", xfer.AppPort)} token = flag.String("token", "default-token", "probe token") httpListen = flag.String("http.listen", "", "listen address for HTTP profiling and instrumentation server") publishInterval = flag.Duration("publish.interval", 3*time.Second, "publish (output) interval") spyInterval = flag.Duration("spy.interval", time.Second, "spy (scan) interval") prometheusEndpoint = flag.String("prometheus.endpoint", "/metrics", "Prometheus metrics exposition endpoint (requires -http.listen)") spyProcs = flag.Bool("processes", true, "report processes (needs root)") dockerEnabled = flag.Bool("docker", false, "collect Docker-related attributes for processes") dockerInterval = flag.Duration("docker.interval", 10*time.Second, "how often to update Docker attributes") dockerBridge = flag.String("docker.bridge", "docker0", "the docker bridge name") kubernetesEnabled = flag.Bool("kubernetes", false, "collect kubernetes-related attributes for containers, should only be enabled on the master node") kubernetesAPI = flag.String("kubernetes.api", "http://localhost:8080", "Address of kubernetes master api") kubernetesInterval = flag.Duration("kubernetes.interval", 10*time.Second, "how often to do a full resync of the kubernetes data") weaveRouterAddr = flag.String("weave.router.addr", "", "IP address or FQDN of the Weave router") procRoot = flag.String("proc.root", "/proc", "location of the proc filesystem") printVersion = flag.Bool("version", false, "print version number and exit") useConntrack = flag.Bool("conntrack", true, "also use conntrack to track connections") insecure = flag.Bool("insecure", false, "(SSL) explicitly allow \"insecure\" SSL connections and transfers") logPrefix = flag.String("log.prefix", "<probe>", "prefix for each log line") ) flag.Parse() if *printVersion { fmt.Println(version) return } if !strings.HasSuffix(*logPrefix, " ") { *logPrefix += " " } log.SetPrefix(*logPrefix) defer log.Print("probe exiting") if *spyProcs && os.Getegid() != 0 { log.Printf("warning: -process=true, but that requires root to find everything") } rand.Seed(time.Now().UnixNano()) probeID := strconv.FormatInt(rand.Int63(), 16) var ( hostName = hostname() hostID = hostName // TODO(pb): we should sanitize the hostname ) log.Printf("probe starting, version %s, ID %s", version, probeID) addrs, err := net.InterfaceAddrs() if err != nil { log.Fatal(err) } localNets := report.Networks{} for _, addr := range addrs { // Not all addrs are IPNets. if ipNet, ok := addr.(*net.IPNet); ok { localNets = append(localNets, ipNet) } } if len(flag.Args()) > 0 { targets = flag.Args() } log.Printf("publishing to: %s", strings.Join(targets, ", ")) factory := func(hostname, endpoint string) (string, xfer.Publisher, error) { id, publisher, err := xfer.NewHTTPPublisher(hostname, endpoint, *token, probeID, *insecure) if err != nil { return "", nil, err } return id, xfer.NewBackgroundPublisher(publisher), nil } publishers := xfer.NewMultiPublisher(factory) defer publishers.Stop() clients := xfer.NewMultiAppClient(xfer.ProbeConfig{ Token: *token, ProbeID: probeID, Insecure: *insecure, }, xfer.ControlHandlerFunc(controls.HandleControlRequest), xfer.NewAppClient) defer clients.Stop() resolver := newStaticResolver(targets, publishers.Set, clients.Set) defer resolver.Stop() endpointReporter := endpoint.NewReporter(hostID, hostName, *spyProcs, *useConntrack) defer endpointReporter.Stop() processCache := process.NewCachingWalker(process.NewWalker(*procRoot)) var ( tickers = []Ticker{processCache} reporters = []Reporter{endpointReporter, host.NewReporter(hostID, hostName, localNets), process.NewReporter(processCache, hostID)} taggers = []Tagger{newTopologyTagger(), host.NewTagger(hostID, probeID)} ) dockerTagger, dockerReporter, dockerRegistry := func() (*docker.Tagger, *docker.Reporter, docker.Registry) { if !*dockerEnabled { return nil, nil, nil } if err := report.AddLocalBridge(*dockerBridge); err != nil { log.Printf("Docker: problem with bridge %s: %v", *dockerBridge, err) return nil, nil, nil } registry, err := docker.NewRegistry(*dockerInterval) if err != nil { log.Printf("Docker: failed to start registry: %v", err) return nil, nil, nil } return docker.NewTagger(registry, processCache), docker.NewReporter(registry, hostID), registry }() if dockerTagger != nil { taggers = append(taggers, dockerTagger) } if dockerReporter != nil { reporters = append(reporters, dockerReporter) } if dockerRegistry != nil { defer dockerRegistry.Stop() } if *kubernetesEnabled { if client, err := kubernetes.NewClient(*kubernetesAPI, *kubernetesInterval); err == nil { defer client.Stop() reporters = append(reporters, kubernetes.NewReporter(client)) } else { log.Printf("Kubernetes: failed to start client: %v", err) } } if *weaveRouterAddr != "" { weave := overlay.NewWeave(hostID, *weaveRouterAddr) tickers = append(tickers, weave) taggers = append(taggers, weave) reporters = append(reporters, weave) } if *httpListen != "" { go func() { log.Printf("Profiling data being exported to %s", *httpListen) log.Printf("go tool pprof http://%s/debug/pprof/{profile,heap,block}", *httpListen) if *prometheusEndpoint != "" { log.Printf("exposing Prometheus endpoint at %s%s", *httpListen, *prometheusEndpoint) http.Handle(*prometheusEndpoint, makePrometheusHandler()) } log.Printf("Profiling endpoint %s terminated: %v", *httpListen, http.ListenAndServe(*httpListen, nil)) }() } quit, done := make(chan struct{}), sync.WaitGroup{} done.Add(2) defer func() { done.Wait() }() // second, wait for the main loops to be killed defer close(quit) // first, kill the main loops var rpt syncReport rpt.swap(report.MakeReport()) go func() { defer done.Done() spyTick := time.Tick(*spyInterval) for { select { case <-spyTick: start := time.Now() for _, ticker := range tickers { if err := ticker.Tick(); err != nil { log.Printf("error doing ticker: %v", err) } } localReport := rpt.copy() localReport = localReport.Merge(doReport(reporters)) localReport = Apply(localReport, taggers) rpt.swap(localReport) if took := time.Since(start); took > *spyInterval { log.Printf("report generation took too long (%s)", took) } case <-quit: return } } }() go func() { defer done.Done() var ( pubTick = time.Tick(*publishInterval) p = xfer.NewReportPublisher(publishers) ) for { select { case <-pubTick: publishTicks.WithLabelValues().Add(1) localReport := rpt.swap(report.MakeReport()) localReport.Window = *publishInterval if err := p.Publish(localReport); err != nil { log.Printf("publish: %v", err) } case <-quit: return } } }() log.Printf("%s", <-interrupt()) }