コード例 #1
0
ファイル: reporter_test.go プロジェクト: dilgerma/scope
func TestTagger(t *testing.T) {
	rpt := report.MakeReport()
	rpt.Container.AddNode(report.MakeNodeWith("container1", map[string]string{
		docker.LabelPrefix + "io.kubernetes.pod.uid": "123456",
	}))

	rpt, err := kubernetes.NewReporter(newMockClient(), nil, "", "", nil).Tag(rpt)
	if err != nil {
		t.Errorf("Unexpected error: %v", err)
	}

	have, ok := rpt.Container.Nodes["container1"].Parents.Lookup(report.Pod)
	want := report.EmptyStringSet.Add(report.MakePodNodeID("123456"))
	if !ok || !reflect.DeepEqual(have, want) {
		t.Errorf("Expected container to have pod parent %v %v", have, want)
	}
}
コード例 #2
0
ファイル: probe.go プロジェクト: dilgerma/scope
// Main runs the probe
func probeMain(flags probeFlags) {
	setLogLevel(flags.logLevel)
	setLogFormatter(flags.logPrefix)

	// Setup in memory metrics sink
	inm := metrics.NewInmemSink(time.Minute, 2*time.Minute)
	sig := metrics.DefaultInmemSignal(inm)
	defer sig.Stop()
	metrics.NewGlobal(metrics.DefaultConfig("scope-probe"), inm)

	defer log.Info("probe exiting")

	if flags.spyProcs && os.Getegid() != 0 {
		log.Warn("--probe.process=true, but that requires root to find everything")
	}

	rand.Seed(time.Now().UnixNano())
	var (
		probeID  = strconv.FormatInt(rand.Int63(), 16)
		hostName = hostname.Get()
		hostID   = hostName // TODO(pb): we should sanitize the hostname
	)
	log.Infof("probe starting, version %s, ID %s", version, probeID)
	log.Infof("command line: %v", os.Args)
	checkpointFlags := map[string]string{}
	if flags.kubernetesEnabled {
		checkpointFlags["kubernetes_enabled"] = "true"
	}
	go check(checkpointFlags)

	var targets = []string{}
	if !flags.noApp {
		targets = append(targets, fmt.Sprintf("localhost:%d", xfer.AppPort))
	}
	if len(flag.Args()) > 0 {
		targets = append(targets, flag.Args()...)
	}
	log.Infof("publishing to: %s", strings.Join(targets, ", "))

	probeConfig := appclient.ProbeConfig{
		Token:    flags.token,
		ProbeID:  probeID,
		Insecure: flags.insecure,
	}
	clients := appclient.NewMultiAppClient(func(hostname, endpoint string) (appclient.AppClient, error) {
		return appclient.NewAppClient(
			probeConfig, hostname, endpoint,
			xfer.ControlHandlerFunc(controls.HandleControlRequest),
		)
	})
	defer clients.Stop()

	dnsLookupFn := net.LookupIP
	if flags.resolver != "" {
		dnsLookupFn = appclient.LookupUsing(flags.resolver)
	}
	resolver := appclient.NewResolver(targets, dnsLookupFn, clients.Set)
	defer resolver.Stop()

	processCache := process.NewCachingWalker(process.NewWalker(flags.procRoot))
	scanner := procspy.NewConnectionScanner(processCache)

	endpointReporter := endpoint.NewReporter(hostID, hostName, flags.spyProcs, flags.useConntrack, scanner)
	defer endpointReporter.Stop()

	p := probe.New(flags.spyInterval, flags.publishInterval, clients)
	p.AddTicker(processCache)
	hostReporter := host.NewReporter(hostID, hostName, probeID, version, clients)
	defer hostReporter.Stop()
	p.AddReporter(
		endpointReporter,
		hostReporter,
		process.NewReporter(processCache, hostID, process.GetDeltaTotalJiffies),
	)
	p.AddTagger(probe.NewTopologyTagger(), host.NewTagger(hostID))

	if flags.dockerEnabled {
		// Don't add the bridge in Kubernetes since container IPs are global and
		// shouldn't be scoped
		if !flags.kubernetesEnabled {
			if err := report.AddLocalBridge(flags.dockerBridge); err != nil {
				log.Errorf("Docker: problem with bridge %s: %v", flags.dockerBridge, err)
			}
		}
		if registry, err := docker.NewRegistry(flags.dockerInterval, clients, true, hostID); err == nil {
			defer registry.Stop()
			p.AddTagger(docker.NewTagger(registry, processCache))
			p.AddReporter(docker.NewReporter(registry, hostID, probeID, p))
		} else {
			log.Errorf("Docker: failed to start registry: %v", err)
		}
	}

	if flags.kubernetesEnabled {
		if client, err := kubernetes.NewClient(flags.kubernetesAPI, flags.kubernetesInterval); err == nil {
			defer client.Stop()
			reporter := kubernetes.NewReporter(client, clients, probeID, hostID, p)
			defer reporter.Stop()
			p.AddReporter(reporter)
			p.AddTagger(reporter)
		} else {
			log.Errorf("Kubernetes: failed to start client: %v", err)
			log.Errorf("Kubernetes: make sure to run Scope inside a POD with a service account or provide a valid kubernetes.api url")
		}
	}

	if flags.weaveAddr != "" {
		client := weave.NewClient(sanitize.URL("http://", 6784, "")(flags.weaveAddr))
		weave := overlay.NewWeave(hostID, client)
		defer weave.Stop()
		p.AddTagger(weave)
		p.AddReporter(weave)

		dockerBridgeIP, err := network.GetFirstAddressOf(flags.dockerBridge)
		if err != nil {
			log.Println("Error getting docker bridge ip:", err)
		} else {
			weaveDNSLookup := appclient.LookupUsing(dockerBridgeIP + ":53")
			weaveResolver := appclient.NewResolver([]string{flags.weaveHostname}, weaveDNSLookup, clients.Set)
			defer weaveResolver.Stop()
		}
	}

	pluginRegistry, err := plugins.NewRegistry(
		flags.pluginsRoot,
		pluginAPIVersion,
		map[string]string{
			"probe_id":    probeID,
			"api_version": pluginAPIVersion,
		},
	)
	if err != nil {
		log.Errorf("plugins: problem loading: %v", err)
	} else {
		defer pluginRegistry.Close()
		p.AddReporter(pluginRegistry)
	}

	if flags.httpListen != "" {
		go func() {
			log.Infof("Profiling data being exported to %s", flags.httpListen)
			log.Infof("go tool pprof http://%s/debug/pprof/{profile,heap,block}", flags.httpListen)
			log.Infof("Profiling endpoint %s terminated: %v", flags.httpListen, http.ListenAndServe(flags.httpListen, nil))
		}()
	}

	p.Start()
	defer p.Stop()

	common.SignalHandlerLoop()
}
コード例 #3
0
ファイル: reporter_test.go プロジェクト: dilgerma/scope
func TestReporterGetLogs(t *testing.T) {
	oldGetNodeName := kubernetes.GetNodeName
	defer func() { kubernetes.GetNodeName = oldGetNodeName }()
	kubernetes.GetNodeName = func(*kubernetes.Reporter) (string, error) {
		return nodeName, nil
	}

	client := newMockClient()
	pipes := mockPipeClient{}
	reporter := kubernetes.NewReporter(client, pipes, "", "", nil)

	// Should error on invalid IDs
	{
		resp := reporter.CapturePod(reporter.GetLogs)(xfer.Request{
			NodeID:  "invalidID",
			Control: kubernetes.GetLogs,
		})
		if want := "Invalid ID: invalidID"; resp.Error != want {
			t.Errorf("Expected error on invalid ID: %q, got %q", want, resp.Error)
		}
	}

	// Should pass through errors from k8s (e.g if pod does not exist)
	{
		resp := reporter.CapturePod(reporter.GetLogs)(xfer.Request{
			AppID:   "appID",
			NodeID:  report.MakePodNodeID("notfound"),
			Control: kubernetes.GetLogs,
		})
		if want := "Pod not found: notfound"; resp.Error != want {
			t.Errorf("Expected error on invalid ID: %q, got %q", want, resp.Error)
		}
	}

	podNamespaceAndID := "ping;pong-a"
	pod1Request := xfer.Request{
		AppID:   "appID",
		NodeID:  report.MakePodNodeID(pod1UID),
		Control: kubernetes.GetLogs,
	}

	// Inject our logs content, and watch for it to be closed
	closed := false
	wantContents := "logs: ping/pong-a"
	client.logs[podNamespaceAndID] = &callbackReadCloser{Reader: strings.NewReader(wantContents), close: func() error {
		closed = true
		return nil
	}}

	// Should create a new pipe for the stream
	resp := reporter.CapturePod(reporter.GetLogs)(pod1Request)
	if resp.Pipe == "" {
		t.Errorf("Expected pipe id to be returned, but got %#v", resp)
	}
	pipe, ok := pipes[resp.Pipe]
	if !ok {
		t.Fatalf("Expected pipe %q to have been created, but wasn't", resp.Pipe)
	}

	// Should push logs from k8s client into the pipe
	_, readWriter := pipe.Ends()
	contents, err := ioutil.ReadAll(readWriter)
	if err != nil {
		t.Error(err)
	}
	if string(contents) != wantContents {
		t.Errorf("Expected pipe to contain %q, but got %q", wantContents, string(contents))
	}

	// Should close the stream when the pipe closes
	if err := pipe.Close(); err != nil {
		t.Error(err)
	}
	if !closed {
		t.Errorf("Expected pipe to close the underlying log stream")
	}
}
コード例 #4
0
ファイル: reporter_test.go プロジェクト: dilgerma/scope
func TestReporter(t *testing.T) {
	oldGetNodeName := kubernetes.GetNodeName
	defer func() { kubernetes.GetNodeName = oldGetNodeName }()
	kubernetes.GetNodeName = func(*kubernetes.Reporter) (string, error) {
		return nodeName, nil
	}

	pod1ID := report.MakePodNodeID(pod1UID)
	pod2ID := report.MakePodNodeID(pod2UID)
	serviceID := report.MakeServiceNodeID(serviceUID)
	rpt, _ := kubernetes.NewReporter(newMockClient(), nil, "", "foo", nil).Report()

	// Reporter should have added the following pods
	for _, pod := range []struct {
		id            string
		parentService string
		latest        map[string]string
	}{
		{pod1ID, serviceID, map[string]string{
			kubernetes.ID:        "ping/pong-a",
			kubernetes.Name:      "pong-a",
			kubernetes.Namespace: "ping",
			kubernetes.Created:   pod1.Created(),
		}},
		{pod2ID, serviceID, map[string]string{
			kubernetes.ID:        "ping/pong-b",
			kubernetes.Name:      "pong-b",
			kubernetes.Namespace: "ping",
			kubernetes.Created:   pod1.Created(),
		}},
	} {
		node, ok := rpt.Pod.Nodes[pod.id]
		if !ok {
			t.Errorf("Expected report to have pod %q, but not found", pod.id)
		}

		if parents, ok := node.Parents.Lookup(report.Service); !ok || !parents.Contains(pod.parentService) {
			t.Errorf("Expected pod %s to have parent service %q, got %q", pod.id, pod.parentService, parents)
		}

		for k, want := range pod.latest {
			if have, ok := node.Latest.Lookup(k); !ok || have != want {
				t.Errorf("Expected pod %s latest %q: %q, got %q", pod.id, k, want, have)
			}
		}
	}

	// Reporter should have added a service
	{
		node, ok := rpt.Service.Nodes[serviceID]
		if !ok {
			t.Errorf("Expected report to have service %q, but not found", serviceID)
		}

		for k, want := range map[string]string{
			kubernetes.ID:        "ping/pongservice",
			kubernetes.Name:      "pongservice",
			kubernetes.Namespace: "ping",
			kubernetes.Created:   pod1.Created(),
		} {
			if have, ok := node.Latest.Lookup(k); !ok || have != want {
				t.Errorf("Expected service %s latest %q: %q, got %q", serviceID, k, want, have)
			}
		}
	}
}