Exemple #1
0
// Check the network ContainerStats.
func TestDockerContainerNetworkStats(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	// Wait for the container to show up.
	containerId := fm.Docker().RunBusybox("watch", "-n1", "wget", "http://www.google.com/")
	waitForContainer(containerId, fm)

	time.Sleep(10 * time.Second)
	request := &info.ContainerInfoRequest{
		NumStats: 1,
	}
	containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
	require.NoError(t, err)
	sanityCheck(containerId, containerInfo, t)

	// Checks for NetworkStats.
	stat := containerInfo.Stats[0]
	assert := assert.New(t)
	assert.NotEqual(0, stat.Network.TxBytes, "Network tx bytes should not be zero")
	assert.NotEqual(0, stat.Network.TxPackets, "Network tx packets should not be zero")
	assert.NotEqual(0, stat.Network.RxBytes, "Network rx bytes should not be zero")
	assert.NotEqual(0, stat.Network.RxPackets, "Network rx packets should not be zero")
	assert.NotEqual(stat.Network.RxBytes, stat.Network.TxBytes, "Network tx and rx bytes should not be equal")
	assert.NotEqual(stat.Network.RxPackets, stat.Network.TxPackets, "Network tx and rx packets should not be equal")
}
Exemple #2
0
func TestDockerFilesystemStats(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	storageDriver := fm.Docker().StorageDriver()
	switch storageDriver {
	case framework.Aufs:
	case framework.Overlay:
	default:
		t.Skip("skipping filesystem stats test")
	}
	// Wait for the container to show up.
	containerId := fm.Docker().RunBusybox("/bin/sh", "-c", "dd if=/dev/zero of=/file count=1 bs=1M & ping www.google.com")
	waitForContainer(containerId, fm)
	time.Sleep(time.Minute)
	request := &v2.RequestOptions{
		IdType: v2.TypeDocker,
		Count:  1,
	}
	containerInfo, err := fm.Cadvisor().ClientV2().Stats(containerId, request)
	time.Sleep(time.Minute)
	require.NoError(t, err)
	require.True(t, len(containerInfo) == 1)
	var info v2.ContainerInfo
	for _, cInfo := range containerInfo {
		info = cInfo
	}
	sanityCheckV2(containerId, info, t)
	require.NotNil(t, info.Stats[0].Filesystem.BaseUsageBytes)
	assert.True(t, *info.Stats[0].Filesystem.BaseUsageBytes > (1<<6), "expected base fs usage to be greater than 1MB")
	require.NotNil(t, info.Stats[0].Filesystem.TotalUsageBytes)
	assert.True(t, *info.Stats[0].Filesystem.TotalUsageBytes > (1<<6), "expected total fs usage to be greater than 1MB")
}
func TestMachineStatsIsReturned(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	machineStats, err := fm.Cadvisor().ClientV2().MachineStats()
	if err != nil {
		t.Fatal(err)
	}

	as := assert.New(t)
	for _, stat := range machineStats {
		as.NotEqual(stat.Timestamp, time.Time{})
		as.True(stat.Cpu.Usage.Total > 0)
		as.True(len(stat.Cpu.Usage.PerCpu) > 0)
		if stat.CpuInst != nil {
			as.True(stat.CpuInst.Usage.Total > 0)
		}
		as.True(stat.Memory.Usage > 0)
		for _, nStat := range stat.Network.Interfaces {
			as.NotEqual(nStat.Name, "")
			as.NotEqual(nStat.RxBytes, 0)
		}
		for _, fsStat := range stat.Filesystem {
			as.NotEqual(fsStat.Device, "")
			as.NotNil(fsStat.Capacity)
			as.NotNil(fsStat.Usage)
			as.NotNil(fsStat.ReadsCompleted)
			require.NotEmpty(t, fsStat.Type)
			if fsStat.Type == "vfs" {
				as.NotEmpty(fsStat.InodesFree)
			}
		}
	}
}
Exemple #4
0
func TestStreamingEventInformationIsReturned(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	// Watch for container deletions
	einfo := make(chan *info.Event)
	go func() {
		err := fm.Cadvisor().Client().EventStreamingInfo("?deletion_events=true&stream=true&subcontainers=true", einfo)
		require.NoError(t, err)
	}()

	// Create a short-lived container.
	containerId := fm.Docker().RunBusybox("sleep", "2")

	// Wait for the deletion event.
	timeout := time.After(30 * time.Second)
	done := false
	for !done {
		select {
		case ev := <-einfo:
			if ev.EventType == info.EventContainerDeletion {
				if strings.Contains(ev.ContainerName, containerId) {
					done = true
				}
			}
		case <-timeout:
			t.Errorf(
				"timeout happened before destruction event was detected for container %q", containerId)
			done = true
		}
	}

	// We should have already received a creation event.
	waitForStaticEvent(containerId, "?creation_events=true&subcontainers=true", t, fm, info.EventContainerCreation)
}
Exemple #5
0
// Check expected properties of a Docker container.
func TestBasicDockerContainer(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	containerName := fmt.Sprintf("test-basic-docker-container-%d", os.Getpid())
	containerId := fm.Docker().Run(framework.DockerRunArgs{
		Image: "kubernetes/pause",
		Args: []string{
			"--name", containerName,
		},
	})

	// Wait for the container to show up.
	waitForContainer(containerId, fm)

	request := &info.ContainerInfoRequest{
		NumStats: 1,
	}
	containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
	require.NoError(t, err)

	// Check that the contianer is known by both its name and ID.
	sanityCheck(containerId, containerInfo, t)
	sanityCheck(containerName, containerInfo, t)

	assert.Empty(t, containerInfo.Subcontainers, "Should not have subcontainers")
	assert.Len(t, containerInfo.Stats, 1, "Should have exactly one stat")
}
func TestMachineInformationIsReturned(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	machineInfo, err := fm.Cadvisor().Client().MachineInfo()
	if err != nil {
		t.Fatal(err)
	}

	// Check for "sane" values. Note these can change with time.
	if machineInfo.NumCores <= 0 || machineInfo.NumCores >= 1000000 {
		t.Errorf("Machine info has unexpected number of cores: %v", machineInfo.NumCores)
	}
	if machineInfo.MemoryCapacity <= 0 || machineInfo.MemoryCapacity >= (1<<50 /* 1PB */) {
		t.Errorf("Machine info has unexpected amount of memory: %v", machineInfo.MemoryCapacity)
	}
	if len(machineInfo.Filesystems) == 0 {
		t.Errorf("Expected to have some filesystems, found none")
	}
	for _, fs := range machineInfo.Filesystems {
		if fs.Device == "" {
			t.Errorf("Expected a non-empty device name in: %+v", fs)
		}
		if fs.Capacity < 0 || fs.Capacity >= (1<<60 /* 1 EB*/) {
			t.Errorf("Unexpected capacity in device %q: %v", fs.Device, fs.Capacity)
		}
	}
}
Exemple #7
0
// TODO(vmarmol): Handle if CPU or memory is not isolated on this system.
// Check the ContainerSpec.
func TestDockerContainerSpec(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	var (
		cpuShares   = uint64(2048)
		cpuMask     = "0"
		memoryLimit = uint64(1 << 30) // 1GB
		image       = "kubernetes/pause"
		env         = map[string]string{"test_var": "FOO"}
		labels      = map[string]string{"bar": "baz"}
	)

	cpusetArg := "--cpuset"
	if getDockerMinorVersion(fm) >= 10 {
		cpusetArg = "--cpuset-cpus"
	}
	containerId := fm.Docker().Run(framework.DockerRunArgs{
		Image: image,
		Args: []string{
			"--cpu-shares", strconv.FormatUint(cpuShares, 10),
			cpusetArg, cpuMask,
			"--memory", strconv.FormatUint(memoryLimit, 10),
			"--env", "TEST_VAR=FOO",
			"--label", "bar=baz",
		},
	})

	// Wait for the container to show up.
	waitForContainer(containerId, fm)

	request := &info.ContainerInfoRequest{
		NumStats: 1,
	}
	containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
	require.NoError(t, err)
	sanityCheck(containerId, containerInfo, t)

	assert := assert.New(t)

	assert.True(containerInfo.Spec.HasCpu, "CPU should be isolated")
	assert.Equal(cpuShares, containerInfo.Spec.Cpu.Limit, "Container should have %d shares, has %d", cpuShares, containerInfo.Spec.Cpu.Limit)
	assert.Equal(cpuMask, containerInfo.Spec.Cpu.Mask, "Cpu mask should be %q, but is %q", cpuMask, containerInfo.Spec.Cpu.Mask)
	assert.True(containerInfo.Spec.HasMemory, "Memory should be isolated")
	assert.Equal(memoryLimit, containerInfo.Spec.Memory.Limit, "Container should have memory limit of %d, has %d", memoryLimit, containerInfo.Spec.Memory.Limit)
	assert.True(containerInfo.Spec.HasNetwork, "Network should be isolated")
	assert.True(containerInfo.Spec.HasDiskIo, "Blkio should be isolated")

	assert.Equal(image, containerInfo.Spec.Image, "Spec should include container image")
	assert.Equal(env, containerInfo.Spec.Envs, "Spec should include environment variables")
	assert.Equal(labels, containerInfo.Spec.Labels, "Spec should include labels")
}
Exemple #8
0
func TestAttributeInformationIsReturned(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	attributes, err := fm.Cadvisor().ClientV2().Attributes()
	if err != nil {
		t.Fatal(err)
	}

	vp := `\d+\.\d+\.\d+`
	assert.True(t, assert.Regexp(t, vp, attributes.DockerVersion),
		"Expected %s to match %s", attributes.DockerVersion, vp)
}
Exemple #9
0
func TestHealthzOk(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	// Ensure that /heathz returns "ok"
	resp, err := http.Get(fm.Host().FullHost() + "healthz")
	if err != nil {
		t.Fatal(err)
	}
	defer resp.Body.Close()
	body, err := ioutil.ReadAll(resp.Body)

	if string(body) != "ok" {
		t.Fatalf("cAdvisor returned unexpected healthz status of %q", body)
	}
}
Exemple #10
0
// A Docker container in /docker/<ID>
func TestDockerContainerById(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	containerId := fm.Docker().RunPause()

	// Wait for the container to show up.
	waitForContainer(containerId, fm)

	request := &info.ContainerInfoRequest{
		NumStats: 1,
	}
	containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
	require.NoError(t, err)

	sanityCheck(containerId, containerInfo, t)
}
Exemple #11
0
// Check the memory ContainerStats.
func TestDockerContainerMemoryStats(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	// Wait for the container to show up.
	containerId := fm.Docker().RunBusybox("ping", "www.google.com")
	waitForContainer(containerId, fm)

	request := &info.ContainerInfoRequest{
		NumStats: 1,
	}
	containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
	require.NoError(t, err)
	sanityCheck(containerId, containerInfo, t)

	// Checks for MemoryStats.
	checkMemoryStats(t, containerInfo.Stats[0].Memory)
}
Exemple #12
0
// Check the CPU ContainerStats.
func TestDockerContainerCpuStats(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	// Wait for the container to show up.
	containerId := fm.Docker().RunBusybox("ping", "www.google.com")
	waitForContainer(containerId, fm)

	request := &info.ContainerInfoRequest{
		NumStats: 1,
	}
	containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
	if err != nil {
		t.Fatal(err)
	}
	sanityCheck(containerId, containerInfo, t)

	// Checks for CpuStats.
	checkCpuStats(t, containerInfo.Stats[0].Cpu)
}
Exemple #13
0
// A Docker container in /docker/<name>
func TestDockerContainerByName(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	containerName := fmt.Sprintf("test-docker-container-by-name-%d", os.Getpid())
	fm.Docker().Run(framework.DockerRunArgs{
		Image: "kubernetes/pause",
		Args:  []string{"--name", containerName},
	})

	// Wait for the container to show up.
	waitForContainer(containerName, fm)

	request := &info.ContainerInfoRequest{
		NumStats: 1,
	}
	containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerName, request)
	require.NoError(t, err)

	sanityCheck(containerName, containerInfo, t)
}
Exemple #14
0
// Check the network ContainerStats.
func TestDockerContainerNetworkStats(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	// Wait for the container to show up.
	containerId := fm.Docker().RunBusybox("ping", "www.google.com")
	waitForContainer(containerId, fm)

	request := &info.ContainerInfoRequest{
		NumStats: 1,
	}
	containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
	require.NoError(t, err)
	sanityCheck(containerId, containerInfo, t)

	// Checks for NetworkStats.
	stat := containerInfo.Stats[0]
	assert.NotEqual(t, 0, stat.Network.TxBytes, "Network tx bytes should not be zero")
	assert.NotEqual(t, 0, stat.Network.TxPackets, "Network tx packets should not be zero")
	// TODO(vmarmol): Can probably do a better test with two containers pinging each other.
}
Exemple #15
0
// TODO(vmarmol): Handle if CPU or memory is not isolated on this system.
// Check the ContainerSpec.
func TestDockerContainerSpec(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	cpuShares := uint64(2048)
	cpuMask := "0"
	memoryLimit := uint64(1 << 30) // 1GB
	cpusetArg := "--cpuset"
	if getDockerMinorVersion(fm) >= 10 {
		cpusetArg = "--cpuset-cpus"
	}
	containerId := fm.Docker().Run(framework.DockerRunArgs{
		Image: "kubernetes/pause",
		Args: []string{
			"--cpu-shares", strconv.FormatUint(cpuShares, 10),
			cpusetArg, cpuMask,
			"--memory", strconv.FormatUint(memoryLimit, 10),
		},
	})

	// Wait for the container to show up.
	waitForContainer(containerId, fm)

	request := &info.ContainerInfoRequest{
		NumStats: 1,
	}
	containerInfo, err := fm.Cadvisor().Client().DockerContainer(containerId, request)
	require.NoError(t, err)
	sanityCheck(containerId, containerInfo, t)

	assert := assert.New(t)

	assert.True(containerInfo.Spec.HasCpu, "CPU should be isolated")
	assert.Equal(containerInfo.Spec.Cpu.Limit, cpuShares, "Container should have %d shares, has %d", cpuShares, containerInfo.Spec.Cpu.Limit)
	assert.Equal(containerInfo.Spec.Cpu.Mask, cpuMask, "Cpu mask should be %q, but is %q", cpuMask, containerInfo.Spec.Cpu.Mask)
	assert.True(containerInfo.Spec.HasMemory, "Memory should be isolated")
	assert.Equal(containerInfo.Spec.Memory.Limit, memoryLimit, "Container should have memory limit of %d, has %d", memoryLimit, containerInfo.Spec.Memory.Limit)
	assert.True(containerInfo.Spec.HasNetwork, "Network should be isolated")
	assert.True(containerInfo.Spec.HasDiskIo, "Blkio should be isolated")
}
Exemple #16
0
// All Docker containers through /docker
func TestGetAllDockerContainers(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	// Wait for the containers to show up.
	containerId1 := fm.Docker().RunPause()
	containerId2 := fm.Docker().RunPause()
	waitForContainer(containerId1, fm)
	waitForContainer(containerId2, fm)

	request := &info.ContainerInfoRequest{
		NumStats: 1,
	}
	containersInfo, err := fm.Cadvisor().Client().AllDockerContainers(request)
	require.NoError(t, err)

	if len(containersInfo) < 2 {
		t.Fatalf("At least 2 Docker containers should exist, received %d: %+v", len(containersInfo), containersInfo)
	}
	sanityCheck(containerId1, findContainer(containerId1, containersInfo, t), t)
	sanityCheck(containerId2, findContainer(containerId2, containersInfo, t), t)
}
Exemple #17
0
func TestDockerFilesystemStats(t *testing.T) {
	fm := framework.New(t)
	defer fm.Cleanup()

	storageDriver := fm.Docker().StorageDriver()
	if storageDriver == framework.DeviceMapper {
		// Filesystem stats not supported with devicemapper, yet
		return
	}

	const (
		ddUsage       = uint64(1 << 3) // 1 KB
		sleepDuration = 10 * time.Second
	)
	// Wait for the container to show up.
	// FIXME: Tests should be bundled and run on the remote host instead of being run over ssh.
	// Escaping bash over ssh is ugly.
	// Once github issue 1130 is fixed, this logic can be removed.
	dockerCmd := fmt.Sprintf("dd if=/dev/zero of=/file count=2 bs=%d & ping google.com", ddUsage)
	if fm.Hostname().Host != "localhost" {
		dockerCmd = fmt.Sprintf("'%s'", dockerCmd)
	}
	containerId := fm.Docker().RunBusybox("/bin/sh", "-c", dockerCmd)
	waitForContainer(containerId, fm)
	request := &v2.RequestOptions{
		IdType: v2.TypeDocker,
		Count:  1,
	}
	needsBaseUsageCheck := false
	switch storageDriver {
	case framework.Aufs, framework.Overlay:
		needsBaseUsageCheck = true
	}
	pass := false
	// We need to wait for the `dd` operation to complete.
	for i := 0; i < 10; i++ {
		containerInfo, err := fm.Cadvisor().ClientV2().Stats(containerId, request)
		if err != nil {
			t.Logf("%v stats unavailable - %v", time.Now().String(), err)
			t.Logf("retrying after %s...", sleepDuration.String())
			time.Sleep(sleepDuration)

			continue
		}
		require.Equal(t, len(containerInfo), 1)
		var info v2.ContainerInfo
		// There is only one container in containerInfo. Since it is a map with unknown key,
		// use the value blindly.
		for _, cInfo := range containerInfo {
			info = cInfo
		}
		sanityCheckV2(containerId, info, t)

		require.NotNil(t, info.Stats[0], "got info: %+v", info)
		require.NotNil(t, info.Stats[0].Filesystem, "got info: %+v", info)
		require.NotNil(t, info.Stats[0].Filesystem.TotalUsageBytes, "got info: %+v", info.Stats[0].Filesystem)
		if *info.Stats[0].Filesystem.TotalUsageBytes >= ddUsage {
			if !needsBaseUsageCheck {
				pass = true
				break
			}
			require.NotNil(t, info.Stats[0].Filesystem.BaseUsageBytes)
			if *info.Stats[0].Filesystem.BaseUsageBytes >= ddUsage {
				pass = true
				break
			}
		}
		t.Logf("expected total usage %d bytes to be greater than %d bytes", *info.Stats[0].Filesystem.TotalUsageBytes, ddUsage)
		if needsBaseUsageCheck {
			t.Logf("expected base %d bytes to be greater than %d bytes", *info.Stats[0].Filesystem.BaseUsageBytes, ddUsage)
		}
		t.Logf("retrying after %s...", sleepDuration.String())
		time.Sleep(sleepDuration)
	}

	if !pass {
		t.Fail()
	}
}