Beispiel #1
0
// Main runs the probe
func probeMain(flags probeFlags) {
	setLogLevel(flags.logLevel)
	setLogFormatter(flags.logPrefix)

	// Setup in memory metrics sink
	inm := metrics.NewInmemSink(time.Minute, 2*time.Minute)
	sig := metrics.DefaultInmemSignal(inm)
	defer sig.Stop()
	metrics.NewGlobal(metrics.DefaultConfig("scope-probe"), inm)

	defer log.Info("probe exiting")

	if flags.spyProcs && os.Getegid() != 0 {
		log.Warn("--probe.process=true, but that requires root to find everything")
	}

	rand.Seed(time.Now().UnixNano())
	var (
		probeID  = strconv.FormatInt(rand.Int63(), 16)
		hostName = hostname.Get()
		hostID   = hostName // TODO(pb): we should sanitize the hostname
	)
	log.Infof("probe starting, version %s, ID %s", version, probeID)
	log.Infof("command line: %v", os.Args)
	checkpointFlags := map[string]string{}
	if flags.kubernetesEnabled {
		checkpointFlags["kubernetes_enabled"] = "true"
	}
	go check(checkpointFlags)

	var targets = []string{}
	if !flags.noApp {
		targets = append(targets, fmt.Sprintf("localhost:%d", xfer.AppPort))
	}
	if len(flag.Args()) > 0 {
		targets = append(targets, flag.Args()...)
	}
	log.Infof("publishing to: %s", strings.Join(targets, ", "))

	probeConfig := appclient.ProbeConfig{
		Token:    flags.token,
		ProbeID:  probeID,
		Insecure: flags.insecure,
	}
	clients := appclient.NewMultiAppClient(func(hostname, endpoint string) (appclient.AppClient, error) {
		return appclient.NewAppClient(
			probeConfig, hostname, endpoint,
			xfer.ControlHandlerFunc(controls.HandleControlRequest),
		)
	})
	defer clients.Stop()

	dnsLookupFn := net.LookupIP
	if flags.resolver != "" {
		dnsLookupFn = appclient.LookupUsing(flags.resolver)
	}
	resolver := appclient.NewResolver(targets, dnsLookupFn, clients.Set)
	defer resolver.Stop()

	processCache := process.NewCachingWalker(process.NewWalker(flags.procRoot))
	scanner := procspy.NewConnectionScanner(processCache)

	endpointReporter := endpoint.NewReporter(hostID, hostName, flags.spyProcs, flags.useConntrack, scanner)
	defer endpointReporter.Stop()

	p := probe.New(flags.spyInterval, flags.publishInterval, clients)
	p.AddTicker(processCache)
	hostReporter := host.NewReporter(hostID, hostName, probeID, version, clients)
	defer hostReporter.Stop()
	p.AddReporter(
		endpointReporter,
		hostReporter,
		process.NewReporter(processCache, hostID, process.GetDeltaTotalJiffies),
	)
	p.AddTagger(probe.NewTopologyTagger(), host.NewTagger(hostID))

	if flags.dockerEnabled {
		// Don't add the bridge in Kubernetes since container IPs are global and
		// shouldn't be scoped
		if !flags.kubernetesEnabled {
			if err := report.AddLocalBridge(flags.dockerBridge); err != nil {
				log.Errorf("Docker: problem with bridge %s: %v", flags.dockerBridge, err)
			}
		}
		if registry, err := docker.NewRegistry(flags.dockerInterval, clients, true, hostID); err == nil {
			defer registry.Stop()
			p.AddTagger(docker.NewTagger(registry, processCache))
			p.AddReporter(docker.NewReporter(registry, hostID, probeID, p))
		} else {
			log.Errorf("Docker: failed to start registry: %v", err)
		}
	}

	if flags.kubernetesEnabled {
		if client, err := kubernetes.NewClient(flags.kubernetesAPI, flags.kubernetesInterval); err == nil {
			defer client.Stop()
			reporter := kubernetes.NewReporter(client, clients, probeID, hostID, p)
			defer reporter.Stop()
			p.AddReporter(reporter)
			p.AddTagger(reporter)
		} else {
			log.Errorf("Kubernetes: failed to start client: %v", err)
			log.Errorf("Kubernetes: make sure to run Scope inside a POD with a service account or provide a valid kubernetes.api url")
		}
	}

	if flags.weaveAddr != "" {
		client := weave.NewClient(sanitize.URL("http://", 6784, "")(flags.weaveAddr))
		weave := overlay.NewWeave(hostID, client)
		defer weave.Stop()
		p.AddTagger(weave)
		p.AddReporter(weave)

		dockerBridgeIP, err := network.GetFirstAddressOf(flags.dockerBridge)
		if err != nil {
			log.Println("Error getting docker bridge ip:", err)
		} else {
			weaveDNSLookup := appclient.LookupUsing(dockerBridgeIP + ":53")
			weaveResolver := appclient.NewResolver([]string{flags.weaveHostname}, weaveDNSLookup, clients.Set)
			defer weaveResolver.Stop()
		}
	}

	pluginRegistry, err := plugins.NewRegistry(
		flags.pluginsRoot,
		pluginAPIVersion,
		map[string]string{
			"probe_id":    probeID,
			"api_version": pluginAPIVersion,
		},
	)
	if err != nil {
		log.Errorf("plugins: problem loading: %v", err)
	} else {
		defer pluginRegistry.Close()
		p.AddReporter(pluginRegistry)
	}

	if flags.httpListen != "" {
		go func() {
			log.Infof("Profiling data being exported to %s", flags.httpListen)
			log.Infof("go tool pprof http://%s/debug/pprof/{profile,heap,block}", flags.httpListen)
			log.Infof("Profiling endpoint %s terminated: %v", flags.httpListen, http.ListenAndServe(flags.httpListen, nil))
		}()
	}

	p.Start()
	defer p.Stop()

	common.SignalHandlerLoop()
}
Beispiel #2
0
func TestReporter(t *testing.T) {
	var (
		release   = "release"
		version   = "version"
		network   = "192.168.0.0/16"
		hostID    = "hostid"
		hostname  = "hostname"
		timestamp = time.Now()
		metrics   = report.Metrics{
			host.Load1:       report.MakeMetric().Add(timestamp, 1.0),
			host.CPUUsage:    report.MakeMetric().Add(timestamp, 30.0).WithMax(100.0),
			host.MemoryUsage: report.MakeMetric().Add(timestamp, 60.0).WithMax(100.0),
		}
		uptime      = "278h55m43s"
		kernel      = "release version"
		_, ipnet, _ = net.ParseCIDR(network)
	)

	mtime.NowForce(timestamp)
	defer mtime.NowReset()

	var (
		oldGetKernelVersion    = host.GetKernelVersion
		oldGetLoad             = host.GetLoad
		oldGetUptime           = host.GetUptime
		oldGetCPUUsagePercent  = host.GetCPUUsagePercent
		oldGetMemoryUsageBytes = host.GetMemoryUsageBytes
		oldGetLocalNetworks    = host.GetLocalNetworks
	)
	defer func() {
		host.GetKernelVersion = oldGetKernelVersion
		host.GetLoad = oldGetLoad
		host.GetUptime = oldGetUptime
		host.GetCPUUsagePercent = oldGetCPUUsagePercent
		host.GetMemoryUsageBytes = oldGetMemoryUsageBytes
		host.GetLocalNetworks = oldGetLocalNetworks
	}()
	host.GetKernelVersion = func() (string, error) { return release + " " + version, nil }
	host.GetLoad = func(time.Time) report.Metrics { return metrics }
	host.GetUptime = func() (time.Duration, error) { return time.ParseDuration(uptime) }
	host.GetCPUUsagePercent = func() (float64, float64) { return 30.0, 100.0 }
	host.GetMemoryUsageBytes = func() (float64, float64) { return 60.0, 100.0 }
	host.GetLocalNetworks = func() ([]*net.IPNet, error) { return []*net.IPNet{ipnet}, nil }

	rpt, err := host.NewReporter(hostID, hostname, "", "", nil).Report()
	if err != nil {
		t.Fatal(err)
	}

	nodeID := report.MakeHostNodeID(hostID)
	node, ok := rpt.Host.Nodes[nodeID]
	if !ok {
		t.Errorf("Expected host node %q, but not found", nodeID)
	}

	// Should have a bunch of expected latest keys
	for _, tuple := range []struct {
		key, want string
	}{
		{host.Timestamp, timestamp.UTC().Format(time.RFC3339Nano)},
		{host.HostName, hostname},
		{host.OS, runtime.GOOS},
		{host.Uptime, uptime},
		{host.KernelVersion, kernel},
	} {
		if have, ok := node.Latest.Lookup(tuple.key); !ok || have != tuple.want {
			t.Errorf("Expected %s %q, got %q", tuple.key, tuple.want, have)
		}
	}

	// Should have the local network
	if have, ok := node.Sets.Lookup(host.LocalNetworks); !ok || !have.Contains(network) {
		t.Errorf("Expected host.LocalNetworks to include %q, got %q", network, have)
	}

	// Should have metrics
	for key, want := range metrics {
		wantSample := want.LastSample()
		if metric, ok := node.Metrics[key]; !ok {
			t.Errorf("Expected %s metric, but not found", key)
		} else if sample := metric.LastSample(); sample == nil {
			t.Errorf("Expected %s metric to have a sample, but there were none", key)
		} else if sample.Value != wantSample.Value {
			t.Errorf("Expected %s metric sample %f, got %f", key, wantSample, sample.Value)
		}
	}
}