Esempio n. 1
0
func TestPipeTimeout(t *testing.T) {
	router := mux.NewRouter()
	pr := RegisterPipeRoutes(router)
	pr.Stop() // we don't want the loop running in the background

	mtime.NowForce(time.Now())
	defer mtime.NowReset()

	// create a new pipe.
	id := "foo"
	pipe, ok := pr.getOrCreate(id)
	if !ok {
		t.Fatalf("not ok")
	}

	// move time forward such that the new pipe should timeout
	mtime.NowForce(mtime.Now().Add(pipeTimeout))
	pr.timeout()
	if !pipe.Closed() {
		t.Fatalf("pipe didn't timeout")
	}

	// move time forward such that the pipe should be GCd
	mtime.NowForce(mtime.Now().Add(gcTimeout))
	pr.garbageCollect()
	if _, ok := pr.pipes[id]; ok {
		t.Fatalf("pipe not gc'd")
	}
}
Esempio n. 2
0
func TestReporter(t *testing.T) {
	var (
		release   = "release"
		version   = "version"
		network   = "192.168.0.0/16"
		hostID    = "hostid"
		hostname  = "hostname"
		timestamp = time.Now()
		load      = report.Metrics{
			host.Load1:    report.MakeMetric().Add(timestamp, 1.0),
			host.Load5:    report.MakeMetric().Add(timestamp, 5.0),
			host.Load15:   report.MakeMetric().Add(timestamp, 15.0),
			host.CPUUsage: report.MakeMetric().Add(timestamp, 30.0).WithMax(100.0),
			host.MemUsage: report.MakeMetric().Add(timestamp, 60.0).WithMax(100.0),
		}
		uptime      = "278h55m43s"
		kernel      = "release version"
		_, ipnet, _ = net.ParseCIDR(network)
		localNets   = report.Networks([]*net.IPNet{ipnet})
	)

	mtime.NowForce(timestamp)
	defer mtime.NowReset()

	var (
		oldGetKernelVersion    = host.GetKernelVersion
		oldGetLoad             = host.GetLoad
		oldGetUptime           = host.GetUptime
		oldGetCPUUsagePercent  = host.GetCPUUsagePercent
		oldGetMemoryUsageBytes = host.GetMemoryUsageBytes
	)
	defer func() {
		host.GetKernelVersion = oldGetKernelVersion
		host.GetLoad = oldGetLoad
		host.GetUptime = oldGetUptime
		host.GetCPUUsagePercent = oldGetCPUUsagePercent
		host.GetMemoryUsageBytes = oldGetMemoryUsageBytes
	}()
	host.GetKernelVersion = func() (string, error) { return release + " " + version, nil }
	host.GetLoad = func(time.Time) report.Metrics { return load }
	host.GetUptime = func() (time.Duration, error) { return time.ParseDuration(uptime) }
	host.GetCPUUsagePercent = func() (float64, float64) { return 30.0, 100.0 }
	host.GetMemoryUsageBytes = func() (float64, float64) { return 60.0, 100.0 }

	want := report.MakeReport()
	want.Host.AddNode(report.MakeHostNodeID(hostID), report.MakeNodeWith(map[string]string{
		host.Timestamp:     timestamp.UTC().Format(time.RFC3339Nano),
		host.HostName:      hostname,
		host.OS:            runtime.GOOS,
		host.Uptime:        uptime,
		host.KernelVersion: kernel,
	}).WithSets(report.Sets{
		host.LocalNetworks: report.MakeStringSet(network),
	}).WithMetrics(load))
	have, _ := host.NewReporter(hostID, hostname, localNets).Report()
	if !reflect.DeepEqual(want, have) {
		t.Errorf("%s", test.Diff(want, have))
	}
}
Esempio n. 3
0
func TestReporter(t *testing.T) {
	walker := &mockWalker{
		processes: []process.Process{
			{PID: 1, PPID: 0, Name: "init"},
			{PID: 2, PPID: 1, Name: "bash"},
			{PID: 3, PPID: 1, Name: "apache", Threads: 2},
			{PID: 4, PPID: 2, Name: "ping", Cmdline: "ping foo.bar.local"},
			{PID: 5, PPID: 1, Cmdline: "tail -f /var/log/syslog"},
		},
	}
	getDeltaTotalJiffies := func() (uint64, float64, error) { return 0, 0., nil }
	now := time.Now()
	mtime.NowForce(now)
	defer mtime.NowReset()

	reporter := process.NewReporter(walker, "", getDeltaTotalJiffies)
	want := report.MakeReport()
	want.Process = report.MakeTopology().AddNode(
		report.MakeProcessNodeID("", "1"), report.MakeNodeWith(map[string]string{
			process.PID:     "1",
			process.Name:    "init",
			process.Threads: "0",
		}).WithMetric(process.MemoryUsage, report.MakeMetric().Add(now, 0.)),
	).AddNode(
		report.MakeProcessNodeID("", "2"), report.MakeNodeWith(map[string]string{
			process.PID:     "2",
			process.Name:    "bash",
			process.PPID:    "1",
			process.Threads: "0",
		}).WithMetric(process.MemoryUsage, report.MakeMetric().Add(now, 0.)),
	).AddNode(
		report.MakeProcessNodeID("", "3"), report.MakeNodeWith(map[string]string{
			process.PID:     "3",
			process.Name:    "apache",
			process.PPID:    "1",
			process.Threads: "2",
		}).WithMetric(process.MemoryUsage, report.MakeMetric().Add(now, 0.)),
	).AddNode(
		report.MakeProcessNodeID("", "4"), report.MakeNodeWith(map[string]string{
			process.PID:     "4",
			process.Name:    "ping",
			process.PPID:    "2",
			process.Cmdline: "ping foo.bar.local",
			process.Threads: "0",
		}).WithMetric(process.MemoryUsage, report.MakeMetric().Add(now, 0.)),
	).AddNode(
		report.MakeProcessNodeID("", "5"), report.MakeNodeWith(map[string]string{
			process.PID:     "5",
			process.PPID:    "1",
			process.Cmdline: "tail -f /var/log/syslog",
			process.Threads: "0",
		}).WithMetric(process.MemoryUsage, report.MakeMetric().Add(now, 0.)),
	)

	have, err := reporter.Report()
	if err != nil || !reflect.DeepEqual(want, have) {
		t.Errorf("%s (%v)", test.Diff(want, have), err)
	}
}
Esempio n. 4
0
func TestTagger(t *testing.T) {
	mtime.NowForce(time.Now())
	defer mtime.NowReset()

	oldProcessTree := docker.NewProcessTreeStub
	defer func() { docker.NewProcessTreeStub = oldProcessTree }()

	docker.NewProcessTreeStub = func(_ process.Walker) (process.Tree, error) {
		return &mockProcessTree{map[int]int{3: 2}}, nil
	}

	var (
		pid1NodeID = report.MakeProcessNodeID("somehost.com", "2")
		pid2NodeID = report.MakeProcessNodeID("somehost.com", "3")
	)

	input := report.MakeReport()
	input.Process.AddNode(pid1NodeID, report.MakeNodeWith(map[string]string{process.PID: "2"}))
	input.Process.AddNode(pid2NodeID, report.MakeNodeWith(map[string]string{process.PID: "3"}))

	have, err := docker.NewTagger(mockRegistryInstance, nil).Tag(input)
	if err != nil {
		t.Errorf("%v", err)
	}

	// Processes should be tagged with their container ID, and parents
	for _, nodeID := range []string{pid1NodeID, pid2NodeID} {
		node, ok := have.Process.Nodes[nodeID]
		if !ok {
			t.Errorf("Expected process node %s, but not found", nodeID)
		}

		// node should have the container id added
		if have, ok := node.Latest.Lookup(docker.ContainerID); !ok || have != "ping" {
			t.Errorf("Expected process node %s to have container id %q, got %q", nodeID, "ping", have)
		}

		// node should have the container as a parent
		if have, ok := node.Parents.Lookup(report.Container); !ok || !have.Contains(report.MakeContainerNodeID("ping")) {
			t.Errorf("Expected process node %s to have container %q as a parent, got %q", nodeID, "ping", have)
		}

		// node should have the container image as a parent
		if have, ok := node.Parents.Lookup(report.ContainerImage); !ok || !have.Contains(report.MakeContainerImageNodeID("baz")) {
			t.Errorf("Expected process node %s to have container image %q as a parent, got %q", nodeID, "baz", have)
		}
	}
}
Esempio n. 5
0
func TestCollectorExpire(t *testing.T) {
	now := time.Now()
	mtime.NowForce(now)
	defer mtime.NowReset()

	ctx := context.Background()
	window := 10 * time.Second
	c := app.NewCollector(window)

	// 1st check the collector is empty
	have, err := c.Report(ctx)
	if err != nil {
		t.Error(err)
	}
	if want := report.MakeReport(); !reflect.DeepEqual(want, have) {
		t.Error(test.Diff(want, have))
	}

	// Now check an added report is returned
	r1 := report.MakeReport()
	r1.Endpoint.AddNode(report.MakeNode("foo"))
	c.Add(ctx, r1)
	have, err = c.Report(ctx)
	if err != nil {
		t.Error(err)
	}
	if want := r1; !reflect.DeepEqual(want, have) {
		t.Error(test.Diff(want, have))
	}

	// Finally move time forward to expire the report
	mtime.NowForce(now.Add(window))
	have, err = c.Report(ctx)
	if err != nil {
		t.Error(err)
	}
	if want := report.MakeReport(); !reflect.DeepEqual(want, have) {
		t.Error(test.Diff(want, have))
	}
}
Esempio n. 6
0
func TestReporter(t *testing.T) {
	processes := []process.Process{
		{PID: 1, PPID: 0, Name: "init"},
		{PID: 2, PPID: 1, Name: "bash"},
		{PID: 3, PPID: 1, Name: "apache", Threads: 2},
		{PID: 4, PPID: 2, Name: "ping", Cmdline: "ping foo.bar.local"},
		{PID: 5, PPID: 1, Cmdline: "tail -f /var/log/syslog"},
	}
	walker := &mockWalker{processes: processes}
	getDeltaTotalJiffies := func() (uint64, float64, error) { return 0, 0., nil }
	now := time.Now()
	mtime.NowForce(now)
	defer mtime.NowReset()

	rpt, err := process.NewReporter(walker, "", getDeltaTotalJiffies).Report()
	if err != nil {
		t.Error(err)
	}

	// It reports the init process
	node, ok := rpt.Process.Nodes[report.MakeProcessNodeID("", "1")]
	if !ok {
		t.Errorf("Expected report to include the pid 1 init")
	}
	if name, ok := node.Latest.Lookup(process.Name); !ok || name != processes[0].Name {
		t.Errorf("Expected %q got %q", processes[0].Name, name)
	}

	// It reports plain processes (with parent pid, and metrics)
	node, ok = rpt.Process.Nodes[report.MakeProcessNodeID("", "2")]
	if !ok {
		t.Errorf("Expected report to include the pid 2 bash")
	}
	if name, ok := node.Latest.Lookup(process.Name); !ok || name != processes[1].Name {
		t.Errorf("Expected %q got %q", processes[1].Name, name)
	}
	if ppid, ok := node.Latest.Lookup(process.PPID); !ok || ppid != fmt.Sprint(processes[1].PPID) {
		t.Errorf("Expected %d got %q", processes[1].PPID, ppid)
	}
	if memoryUsage, ok := node.Metrics[process.MemoryUsage]; !ok {
		t.Errorf("Expected memory usage metric, but not found")
	} else if sample := memoryUsage.LastSample(); sample == nil {
		t.Errorf("Expected memory usage metric to have a sample, but there were none")
	} else if sample.Value != 0. {
		t.Errorf("Expected memory usage metric sample %f, got %f", 0., sample.Value)
	}

	// It reports thread-counts
	node, ok = rpt.Process.Nodes[report.MakeProcessNodeID("", "3")]
	if !ok {
		t.Errorf("Expected report to include the pid 3 apache")
	}
	if threads, ok := node.Latest.Lookup(process.Threads); !ok || threads != fmt.Sprint(processes[2].Threads) {
		t.Errorf("Expected %d got %q", processes[2].Threads, threads)
	}

	// It reports the Cmdline
	node, ok = rpt.Process.Nodes[report.MakeProcessNodeID("", "4")]
	if !ok {
		t.Errorf("Expected report to include the pid 4 ping")
	}
	if cmdline, ok := node.Latest.Lookup(process.Cmdline); !ok || cmdline != fmt.Sprint(processes[3].Cmdline) {
		t.Errorf("Expected %q got %q", processes[3].Cmdline, cmdline)
	}

	// It reports processes without a Name
	node, ok = rpt.Process.Nodes[report.MakeProcessNodeID("", "5")]
	if !ok {
		t.Errorf("Expected report to include the pid 5 tail")
	}
	if name, ok := node.Latest.Lookup(process.Name); ok {
		t.Errorf("Expected no name, but got %q", name)
	}
	if cmdline, ok := node.Latest.Lookup(process.Cmdline); !ok || cmdline != fmt.Sprint(processes[4].Cmdline) {
		t.Errorf("Expected %q got %q", processes[4].Cmdline, cmdline)
	}
}
Esempio n. 7
0
func TestMergeNodes(t *testing.T) {
	mtime.NowForce(time.Now())
	defer mtime.NowReset()

	for name, c := range map[string]struct {
		a, b, want report.Nodes
	}{
		"Empty a": {
			a: report.Nodes{},
			b: report.Nodes{
				":192.168.1.1:12345": report.MakeNodeWith(map[string]string{
					PID:    "23128",
					Name:   "curl",
					Domain: "node-a.local",
				}),
			},
			want: report.Nodes{
				":192.168.1.1:12345": report.MakeNodeWith(map[string]string{
					PID:    "23128",
					Name:   "curl",
					Domain: "node-a.local",
				}),
			},
		},
		"Empty b": {
			a: report.Nodes{
				":192.168.1.1:12345": report.MakeNodeWith(map[string]string{
					PID:    "23128",
					Name:   "curl",
					Domain: "node-a.local",
				}),
			},
			b: report.Nodes{},
			want: report.Nodes{
				":192.168.1.1:12345": report.MakeNodeWith(map[string]string{
					PID:    "23128",
					Name:   "curl",
					Domain: "node-a.local",
				}),
			},
		},
		"Simple merge": {
			a: report.Nodes{
				":192.168.1.1:12345": report.MakeNodeWith(map[string]string{
					PID:    "23128",
					Name:   "curl",
					Domain: "node-a.local",
				}),
			},
			b: report.Nodes{
				":192.168.1.2:12345": report.MakeNodeWith(map[string]string{
					PID:    "42",
					Name:   "curl",
					Domain: "node-a.local",
				}),
			},
			want: report.Nodes{
				":192.168.1.1:12345": report.MakeNodeWith(map[string]string{
					PID:    "23128",
					Name:   "curl",
					Domain: "node-a.local",
				}),
				":192.168.1.2:12345": report.MakeNodeWith(map[string]string{
					PID:    "42",
					Name:   "curl",
					Domain: "node-a.local",
				}),
			},
		},
		"Merge conflict with rank difference": {
			a: report.Nodes{
				":192.168.1.1:12345": report.MakeNodeWith(map[string]string{
					PID:    "23128",
					Name:   "curl",
					Domain: "node-a.local",
				}),
			},
			b: report.Nodes{
				":192.168.1.1:12345": report.MakeNodeWith(map[string]string{ // <-- same ID
					Name:   "curl",
					Domain: "node-a.local",
				}).WithLatest(PID, time.Now().Add(-1*time.Minute), "0"),
			},
			want: report.Nodes{
				":192.168.1.1:12345": report.MakeNodeWith(map[string]string{
					PID:    "23128",
					Name:   "curl",
					Domain: "node-a.local",
				}),
			},
		},
		"Merge conflict with no rank difference": {
			a: report.Nodes{
				":192.168.1.1:12345": report.MakeNodeWith(map[string]string{
					PID:    "23128",
					Name:   "curl",
					Domain: "node-a.local",
				}),
			},
			b: report.Nodes{
				":192.168.1.1:12345": report.MakeNodeWith(map[string]string{ // <-- same ID
					Name:   "curl",
					Domain: "node-a.local",
				}).WithLatest(PID, time.Now().Add(-1*time.Minute), "0"),
			},
			want: report.Nodes{
				":192.168.1.1:12345": report.MakeNodeWith(map[string]string{
					PID:    "23128",
					Name:   "curl",
					Domain: "node-a.local",
				}),
			},
		},
		"Counters": {
			a: report.Nodes{
				"1": report.MakeNode().WithCounters(map[string]int{
					"a": 13,
					"b": 57,
					"c": 89,
				}),
			},
			b: report.Nodes{
				"1": report.MakeNode().WithCounters(map[string]int{
					"a": 78,
					"b": 3,
					"d": 47,
				}),
			},
			want: report.Nodes{
				"1": report.MakeNode().WithCounters(map[string]int{
					"a": 91,
					"b": 60,
					"c": 89,
					"d": 47,
				}),
			},
		},
	} {
		if have := c.a.Merge(c.b); !reflect.DeepEqual(c.want, have) {
			t.Errorf("%s: %s", name, test.Diff(c.want, have))
		}
	}
}
Esempio n. 8
0
func TestContainer(t *testing.T) {
	log.SetOutput(ioutil.Discard)

	oldDialStub, oldNewClientConnStub := docker.DialStub, docker.NewClientConnStub
	defer func() { docker.DialStub, docker.NewClientConnStub = oldDialStub, oldNewClientConnStub }()

	docker.DialStub = func(network, address string) (net.Conn, error) {
		return nil, nil
	}

	reader, writer := io.Pipe()
	connection := &mockConnection{reader}

	docker.NewClientConnStub = func(c net.Conn, r *bufio.Reader) docker.ClientConn {
		return connection
	}

	c := docker.NewContainer(container1)
	err := c.StartGatheringStats()
	if err != nil {
		t.Errorf("%v", err)
	}
	defer c.StopGatheringStats()

	now := time.Unix(12345, 67890).UTC()
	mtime.NowForce(now)
	defer mtime.NowReset()

	// Send some stats to the docker container
	stats := &client.Stats{}
	stats.Read = now
	stats.MemoryStats.Usage = 12345
	if err = json.NewEncoder(writer).Encode(&stats); err != nil {
		t.Error(err)
	}

	// Now see if we go them
	uptime := (now.Sub(startTime) / time.Second) * time.Second
	want := report.MakeNode().WithLatests(map[string]string{
		"docker_container_command": " ",
		"docker_container_created": "01 Jan 01 00:00 UTC",
		"docker_container_id":      "ping",
		"docker_container_name":    "pong",
		"docker_image_id":          "baz",
		"docker_label_foo1":        "bar1",
		"docker_label_foo2":        "bar2",
		"docker_container_state":   "running",
		"docker_container_uptime":  uptime.String(),
	}).WithSets(report.EmptySets.
		Add("docker_container_ports", report.MakeStringSet("1.2.3.4:80->80/tcp", "81/tcp")).
		Add("docker_container_ips", report.MakeStringSet("1.2.3.4")).
		Add("docker_container_ips_with_scopes", report.MakeStringSet("scope;1.2.3.4")),
	).WithControls(
		docker.RestartContainer, docker.StopContainer, docker.PauseContainer,
		docker.AttachContainer, docker.ExecContainer,
	).WithMetrics(report.Metrics{
		"docker_cpu_total_usage": report.MakeMetric(),
		"docker_memory_usage":    report.MakeMetric().Add(now, 12345),
	}).WithParents(report.EmptySets.
		Add(report.ContainerImage, report.MakeStringSet(report.MakeContainerImageNodeID("baz"))),
	)

	test.Poll(t, 100*time.Millisecond, want, func() interface{} {
		node := c.GetNode("scope", []net.IP{})
		node.Latest.ForEach(func(k, v string) {
			if v == "0" || v == "" {
				node.Latest = node.Latest.Delete(k)
			}
		})
		return node
	})

	if c.Image() != "baz" {
		t.Errorf("%s != baz", c.Image())
	}
	if c.PID() != 2 {
		t.Errorf("%d != 2", c.PID())
	}
	if have := docker.ExtractContainerIPs(c.GetNode("", []net.IP{})); !reflect.DeepEqual(have, []string{"1.2.3.4"}) {
		t.Errorf("%v != %v", have, []string{"1.2.3.4"})
	}
}
Esempio n. 9
0
func TestNat(t *testing.T) {
	mtime.NowForce(mtime.Now())
	defer mtime.NowReset()

	// test that two containers, on the docker network, get their connections mapped
	// correctly.
	// the setup is this:
	//
	// container2 (10.0.47.2:222222), host2 (2.3.4.5:22223) ->
	//     host1 (1.2.3.4:80), container1 (10.0.47.2:80)

	// from the PoV of host1
	{
		f := makeFlow("")
		addIndependant(&f, 1, "")
		f.Original = addMeta(&f, "original", "2.3.4.5", "1.2.3.4", 222222, 80)
		f.Reply = addMeta(&f, "reply", "10.0.47.1", "2.3.4.5", 80, 222222)
		ct := &mockFlowWalker{
			flows: []flow{f},
		}

		have := report.MakeReport()
		originalID := report.MakeEndpointNodeID("host1", "10.0.47.1", "80")
		have.Endpoint.AddNode(originalID, report.MakeNodeWith(map[string]string{
			Addr:  "10.0.47.1",
			Port:  "80",
			"foo": "bar",
		}))

		want := have.Copy()
		want.Endpoint.AddNode(report.MakeEndpointNodeID("host1", "1.2.3.4", "80"), report.MakeNodeWith(map[string]string{
			Addr:      "1.2.3.4",
			Port:      "80",
			"copy_of": originalID,
			"foo":     "bar",
		}))

		makeNATMapper(ct).applyNAT(have, "host1")
		if !reflect.DeepEqual(want, have) {
			t.Fatal(test.Diff(want, have))
		}
	}

	// form the PoV of host2
	{
		f := makeFlow("")
		addIndependant(&f, 2, "")
		f.Original = addMeta(&f, "original", "10.0.47.2", "1.2.3.4", 22222, 80)
		f.Reply = addMeta(&f, "reply", "1.2.3.4", "2.3.4.5", 80, 22223)
		ct := &mockFlowWalker{
			flows: []flow{f},
		}

		have := report.MakeReport()
		originalID := report.MakeEndpointNodeID("host2", "10.0.47.2", "22222")
		have.Endpoint.AddNode(originalID, report.MakeNodeWith(map[string]string{
			Addr:  "10.0.47.2",
			Port:  "22222",
			"foo": "baz",
		}))

		want := have.Copy()
		want.Endpoint.AddNode(report.MakeEndpointNodeID("host2", "2.3.4.5", "22223"), report.MakeNodeWith(map[string]string{
			Addr:      "2.3.4.5",
			Port:      "22223",
			"copy_of": originalID,
			"foo":     "baz",
		}))

		makeNATMapper(ct).applyNAT(have, "host1")
		if !reflect.DeepEqual(want, have) {
			t.Fatal(test.Diff(want, have))
		}
	}
}
Esempio n. 10
0
func TestReporter(t *testing.T) {
	var (
		release   = "release"
		version   = "version"
		network   = "192.168.0.0/16"
		hostID    = "hostid"
		hostname  = "hostname"
		timestamp = time.Now()
		metrics   = report.Metrics{
			host.Load1:       report.MakeMetric().Add(timestamp, 1.0),
			host.Load5:       report.MakeMetric().Add(timestamp, 5.0),
			host.Load15:      report.MakeMetric().Add(timestamp, 15.0),
			host.CPUUsage:    report.MakeMetric().Add(timestamp, 30.0).WithMax(100.0),
			host.MemoryUsage: report.MakeMetric().Add(timestamp, 60.0).WithMax(100.0),
		}
		uptime      = "278h55m43s"
		kernel      = "release version"
		_, ipnet, _ = net.ParseCIDR(network)
	)

	mtime.NowForce(timestamp)
	defer mtime.NowReset()

	var (
		oldGetKernelVersion    = host.GetKernelVersion
		oldGetLoad             = host.GetLoad
		oldGetUptime           = host.GetUptime
		oldGetCPUUsagePercent  = host.GetCPUUsagePercent
		oldGetMemoryUsageBytes = host.GetMemoryUsageBytes
		oldGetLocalNetworks    = host.GetLocalNetworks
	)
	defer func() {
		host.GetKernelVersion = oldGetKernelVersion
		host.GetLoad = oldGetLoad
		host.GetUptime = oldGetUptime
		host.GetCPUUsagePercent = oldGetCPUUsagePercent
		host.GetMemoryUsageBytes = oldGetMemoryUsageBytes
		host.GetLocalNetworks = oldGetLocalNetworks
	}()
	host.GetKernelVersion = func() (string, error) { return release + " " + version, nil }
	host.GetLoad = func(time.Time) report.Metrics { return metrics }
	host.GetUptime = func() (time.Duration, error) { return time.ParseDuration(uptime) }
	host.GetCPUUsagePercent = func() (float64, float64) { return 30.0, 100.0 }
	host.GetMemoryUsageBytes = func() (float64, float64) { return 60.0, 100.0 }
	host.GetLocalNetworks = func() ([]*net.IPNet, error) { return []*net.IPNet{ipnet}, nil }

	rpt, err := host.NewReporter(hostID, hostname).Report()
	if err != nil {
		t.Fatal(err)
	}

	nodeID := report.MakeHostNodeID(hostID)
	node, ok := rpt.Host.Nodes[nodeID]
	if !ok {
		t.Errorf("Expected host node %q, but not found", nodeID)
	}

	// Should have a bunch of expected latest keys
	for _, tuple := range []struct {
		key, want string
	}{
		{host.Timestamp, timestamp.UTC().Format(time.RFC3339Nano)},
		{host.HostName, hostname},
		{host.OS, runtime.GOOS},
		{host.Uptime, uptime},
		{host.KernelVersion, kernel},
	} {
		if have, ok := node.Latest.Lookup(tuple.key); !ok || have != tuple.want {
			t.Errorf("Expected %s %q, got %q", tuple.key, tuple.want, have)
		}
	}

	// Should have the local network
	if have, ok := node.Sets.Lookup(host.LocalNetworks); !ok || !have.Contains(network) {
		t.Errorf("Expected host.LocalNetworks to include %q, got %q", network, have)
	}

	// Should have metrics
	for key, want := range metrics {
		wantSample := want.LastSample()
		if metric, ok := node.Metrics[key]; !ok {
			t.Errorf("Expected %s metric, but not found", key)
		} else if sample := metric.LastSample(); sample == nil {
			t.Errorf("Expected %s metric to have a sample, but there were none", key)
		} else if sample.Value != wantSample.Value {
			t.Errorf("Expected %s metric sample %f, got %f", key, wantSample, sample.Value)
		}
	}
}