func TestAppIsolatorCPU(t *testing.T) { isUnified, err := cgroup.IsCgroupUnified("/") if err != nil { t.Fatalf("Error determining the cgroup version: %v", err) } if isUnified { // TODO: for now kernel does not support cpu isolator in cgroup2. // Write a test when it does. t.Skip("kernel does not support cpu isolator in cgroup2.") } ok, err := cgroup.IsIsolatorSupported("cpu") if err != nil { t.Fatalf("Error checking cpu isolator support: %v", err) } if !ok { t.Skip("CPU isolator not supported.") } ctx := testutils.NewRktRunCtx() defer ctx.Cleanup() t.Logf("Running test: %v", cpuTest.testName) aciFileName := patchTestACI("rkt-inspect-isolators.aci", cpuTest.aciBuildArgs...) defer os.Remove(aciFileName) rktCmd := fmt.Sprintf("%s --insecure-options=image run --mds-register=false %s", ctx.Cmd(), aciFileName) expectedLine := "CPU Quota: " + strconv.Itoa(CPUQuota) runRktAndCheckOutput(t, rktCmd, expectedLine, false) rktCmd = fmt.Sprintf("%s --insecure-options=image run --mds-register=false %s --cpu 900m", ctx.Cmd(), aciFileName) expectedLine = "CPU Quota: " + strconv.Itoa(900) runRktAndCheckOutput(t, rktCmd, expectedLine, false) }
func TestAppIsolatorCPU(t *testing.T) { ok := cgroup.IsIsolatorSupported("cpu") if !ok { t.Skip("CPU isolator not supported.") } ctx := newRktRunCtx() defer ctx.cleanup() t.Logf("Running test: %v", cpuTest.testName) aciFileName := patchTestACI("rkt-inspect-isolators.aci", cpuTest.aciBuildArgs...) defer os.Remove(aciFileName) rktCmd := fmt.Sprintf("%s --insecure-skip-verify run --mds-register=false %s", ctx.cmd(), aciFileName) t.Logf("Command: %v", rktCmd) child, err := gexpect.Spawn(rktCmd) if err != nil { t.Fatalf("Cannot exec rkt: %v", err) } expectedLine := "CPU Quota: " + strconv.Itoa(CPUQuota) if err := expectWithOutput(child, expectedLine); err != nil { t.Fatalf("Didn't receive expected output %q: %v", expectedLine, err) } err = child.Wait() if err != nil { t.Fatalf("rkt didn't terminate correctly: %v", err) } }
func TestAppIsolatorMemory(t *testing.T) { ok := cgroup.IsIsolatorSupported("memory") if !ok { t.Skip("Memory isolator not supported.") } ctx := newRktRunCtx() defer ctx.cleanup() t.Logf("Running test: %v", memoryTest.testName) aciFileName := "rkt-inspect-isolators.aci" patchTestACI(aciFileName, memoryTest.aciBuildArgs...) defer os.Remove(aciFileName) rktCmd := fmt.Sprintf("%s %s", ctx.cmd(), memoryTest.rktArgs) t.Logf("Command: %v", rktCmd) child, err := gexpect.Spawn(rktCmd) if err != nil { t.Fatalf("Cannot exec rkt: %v", err) } expectedLine := "Memory Limit: " + strconv.Itoa(maxMemoryUsage) if err := expectWithOutput(child, expectedLine); err != nil { t.Fatalf("Didn't receive expected output %q: %v", expectedLine, err) } err = child.Wait() if err != nil { t.Fatalf("rkt didn't terminate correctly: %v", err) } }
func TestAppIsolatorMemory(t *testing.T) { ok, err := cgroup.IsIsolatorSupported("memory") if err != nil { t.Fatalf("Error checking memory isolator support: %v", err) } if !ok { t.Skip("Memory isolator not supported.") } ctx := testutils.NewRktRunCtx() defer ctx.Cleanup() t.Logf("Running test: %v", memoryTest.testName) aciFileName := patchTestACI("rkt-inspect-isolators.aci", memoryTest.aciBuildArgs...) defer os.Remove(aciFileName) rktCmd := fmt.Sprintf("%s --insecure-options=image run --mds-register=false %s", ctx.Cmd(), aciFileName) expectedLine := "Memory Limit: " + strconv.Itoa(maxMemoryUsage) runRktAndCheckOutput(t, rktCmd, expectedLine, false) rktCmd = fmt.Sprintf("%s --insecure-options=image run --mds-register=false %s --memory 42Mi", ctx.Cmd(), aciFileName) expectedLine = "Memory Limit: " + strconv.Itoa(42*1024*1024) runRktAndCheckOutput(t, rktCmd, expectedLine, false) }
func TestAppIsolatorCPU(t *testing.T) { ok := cgroup.IsIsolatorSupported("cpu") if !ok { t.Skip("CPU isolator not supported.") } ctx := newRktRunCtx() defer ctx.cleanup() t.Logf("Running test: %v", cpuTest.testName) aciFileName := patchTestACI("rkt-inspect-isolators.aci", cpuTest.aciBuildArgs...) defer os.Remove(aciFileName) rktCmd := fmt.Sprintf("%s --insecure-skip-verify run --mds-register=false %s", ctx.cmd(), aciFileName) expectedLine := "CPU Quota: " + strconv.Itoa(CPUQuota) runRktAndCheckOutput(t, rktCmd, expectedLine, false) }
// Test running pod manifests that contains just one app. // TODO(yifan): Figure out a way to test port mapping on single host. func TestPodManifest(t *testing.T) { ctx := testutils.NewRktRunCtx() defer ctx.Cleanup() tmpdir := createTempDirOrPanic("rkt-tests.") defer os.RemoveAll(tmpdir) boolFalse, boolTrue := false, true tests := []struct { // [image name]:[image patches] images []imagePatch podManifest *schema.PodManifest expectedExit int expectedResult string cgroup string }{ { // Special characters []imagePatch{ {"rkt-test-run-pod-manifest-special-characters.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-msg=\n'\"$"}, User: "******", Group: "0", }, }, }, }, 0, `'"[$]`, "", }, { // Working directory. []imagePatch{ {"rkt-test-run-pod-manifest-working-directory.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-cwd"}, User: "******", Group: "0", WorkingDirectory: "/dir1", }, }, }, }, 0, "cwd: /dir1", "", }, { // Simple read. []imagePatch{ {"rkt-test-run-pod-manifest-read.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, }, }, }, }, }, 0, "dir1", "", }, { // Simple read from read-only rootfs. []imagePatch{ {"rkt-test-run-read-only-rootfs-pod-manifest-read.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, }, }, ReadOnlyRootFS: true, }, }, }, 0, "dir1", "", }, { // Simple read after write with *empty* volume mounted. []imagePatch{ {"rkt-test-run-pod-manifest-empty-vol-rw.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "empty:foo"}, }, MountPoints: []types.MountPoint{ { Name: "dir1", Path: "/dir1", ReadOnly: false, }, }, }, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "empty", Mode: stringP("0755"), UID: intP(0), GID: intP(0), }, }, }, 0, "empty:foo", "", }, { // Simple read from read-only rootfs after write with *empty* volume mounted. []imagePatch{ {"rkt-test-run-pod-manifest-read-only-rootfs-empty-vol-rw.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "empty:foo"}, }, MountPoints: []types.MountPoint{ { Name: "dir1", Path: "/dir1", ReadOnly: false, }, }, }, ReadOnlyRootFS: true, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "empty", Mode: stringP("0755"), UID: intP(0), GID: intP(0), }, }, }, 0, "empty:foo", "", }, { // Stat directory in a *empty* volume mounted. []imagePatch{ {"rkt-test-run-pod-manifest-empty-vol-stat.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--stat-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1"}, }, MountPoints: []types.MountPoint{ { Name: "dir1", Path: "/dir1", ReadOnly: false, }, }, }, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "empty", Mode: stringP("0123"), UID: intP(9991), GID: intP(9992), }, }, }, 0, "(?s)/dir1: mode: d--x-w--wx.*" + "/dir1: user: 9991.*" + "/dir1: group: 9992", "", }, { // Stat directory in a *empty* volume mounted using a read-only rootfs. []imagePatch{ {"rkt-test-run-pod-manifest-read-only-rootfs-empty-vol-stat.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--stat-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1"}, }, MountPoints: []types.MountPoint{ { Name: "dir1", Path: "/dir1", ReadOnly: false, }, }, }, ReadOnlyRootFS: true, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "empty", Mode: stringP("0123"), UID: intP(9991), GID: intP(9992), }, }, }, 0, "(?s)/dir1: mode: d--x-w--wx.*" + "/dir1: user: 9991.*" + "/dir1: group: 9992", "", }, { // Simple read after write with volume mounted. []imagePatch{ {"rkt-test-run-pod-manifest-vol-rw.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "host:foo"}, }, MountPoints: []types.MountPoint{ { Name: "dir1", Path: "/dir1", ReadOnly: false, }, }, }, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: nil, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 0, "host:foo", "", }, { // Simple read after write with volume mounted in a read-only rootfs. []imagePatch{ {"rkt-test-run-pod-manifest-read-only-rootfs-vol-rw.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "host:foo"}, }, MountPoints: []types.MountPoint{ { Name: "dir1", Path: "/dir1", ReadOnly: false, }, }, }, ReadOnlyRootFS: true, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: nil, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 0, "host:foo", "", }, { // Simple read after write with read-only mount point, should fail. []imagePatch{ {"rkt-test-run-pod-manifest-vol-ro.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "bar"}, }, MountPoints: []types.MountPoint{ { Name: "dir1", Path: "/dir1", ReadOnly: true, }, }, }, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: nil, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 254, `Cannot write to file "/dir1/file": open /dir1/file: read-only file system`, "", }, { // Simple read after write with read-only mount point in a read-only rootfs, should fail. []imagePatch{ {"rkt-test-run-pod-manifest-read-only-rootfs-vol-ro.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "bar"}, }, MountPoints: []types.MountPoint{ { Name: "dir1", Path: "/dir1", ReadOnly: true, }, }, }, ReadOnlyRootFS: true, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: nil, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 254, `Cannot write to file "/dir1/file": open /dir1/file: read-only file system`, "", }, { // Simple read after write with volume mounted. // Override the image's mount point spec. This should fail as the volume is // read-only in pod manifest, (which will override the mount point in both image/pod manifest). []imagePatch{ {"rkt-test-run-pod-manifest-vol-rw-override.aci", []string{"--mounts=dir1,path=/dir1,readOnly=false"}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "bar"}, }, MountPoints: []types.MountPoint{ { Name: "dir1", Path: "/dir1", ReadOnly: false, }, }, }, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: &boolTrue, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 254, `Cannot write to file "/dir1/file": open /dir1/file: read-only file system`, "", }, { // Simple read after write with volume mounted in a read-only rootfs. // Override the image's mount point spec. This should fail as the volume is // read-only in pod manifest, (which will override the mount point in both image/pod manifest). []imagePatch{ {"rkt-test-run-pod-manifest-vol-rw-override.aci", []string{"--mounts=dir1,path=/dir1,readOnly=false"}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "bar"}, }, MountPoints: []types.MountPoint{ { Name: "dir1", Path: "/dir1", ReadOnly: false, }, }, }, ReadOnlyRootFS: true, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: &boolTrue, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 254, `Cannot write to file "/dir1/file": open /dir1/file: read-only file system`, "", }, { // Simple read after write with volume mounted. // Override the image's mount point spec. []imagePatch{ {"rkt-test-run-pod-manifest-vol-rw-override.aci", []string{"--mounts=dir1,path=/dir1,readOnly=true"}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir2/file"}, {"CONTENT", "host:bar"}, }, MountPoints: []types.MountPoint{ { Name: "dir1", Path: "/dir2", ReadOnly: false, }, }, }, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: nil, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 0, "host:bar", "", }, { // Simple read after write with volume mounted in a read-only rootfs. // Override the image's mount point spec. []imagePatch{ {"rkt-test-run-pod-manifest-read-only-rootfs-vol-rw-override.aci", []string{"--mounts=dir1,path=/dir1,readOnly=true"}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir2/file"}, {"CONTENT", "host:bar"}, }, MountPoints: []types.MountPoint{ { Name: "dir1", Path: "/dir2", ReadOnly: false, }, }, }, ReadOnlyRootFS: true, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: nil, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 0, "host:bar", "", }, { // Simple read after write with volume mounted, no apps in pod manifest. []imagePatch{ { "rkt-test-run-pod-manifest-vol-rw-no-app.aci", []string{ "--exec=/inspect --write-file --read-file --file-name=/dir1/file --content=host:baw", "--mounts=dir1,path=/dir1,readOnly=false", }, }, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ {Name: baseAppName}, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: nil, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 0, "host:baw", "", }, { // Simple read after write with volume mounted in a read-only rootfs, no apps in pod manifest. []imagePatch{ { "rkt-test-run-pod-manifest-read-only-rootfs-vol-rw-no-app.aci", []string{ "--exec=/inspect --write-file --read-file --file-name=/dir1/file --content=host:baz", "--mounts=dir1,path=/dir1,readOnly=false", }, }, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, ReadOnlyRootFS: true, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: nil, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 0, "host:baz", "", }, { // Simple read after write with volume mounted, no apps in pod manifest. // This should succeed even the mount point in image manifest is readOnly, // because it is overridden by the volume's readOnly. []imagePatch{ { "rkt-test-run-pod-manifest-vol-ro-no-app.aci", []string{ "--exec=/inspect --write-file --read-file --file-name=/dir1/file --content=host:zaz", "--mounts=dir1,path=/dir1,readOnly=true", }, }, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ {Name: baseAppName}, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: &boolFalse, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 0, "host:zaz", "", }, { // Simple read after write with read-only volume mounted, no apps in pod manifest. // This should fail as the volume is read-only. []imagePatch{ { "rkt-test-run-pod-manifest-vol-ro-no-app.aci", []string{ "--exec=/inspect --write-file --read-file --file-name=/dir1/file --content=baz", "--mounts=dir1,path=/dir1,readOnly=false", }, }, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ {Name: baseAppName}, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: &boolTrue, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 254, `Cannot write to file "/dir1/file": open /dir1/file: read-only file system`, "", }, { // Simple read after write in read-only rootfs with read-only volume mounted, no apps in pod manifest. // This should fail as the volume is read-only. []imagePatch{ { "rkt-test-run-pod-manifest-read-only-rootfs-vol-ro-no-app.aci", []string{ "--exec=/inspect --write-file --read-file --file-name=/dir1/file --content=baz", "--mounts=dir1,path=/dir1,readOnly=false", }, }, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, ReadOnlyRootFS: true, }, }, Volumes: []types.Volume{ { Name: "dir1", Kind: "host", Source: tmpdir, ReadOnly: &boolTrue, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 254, `Cannot write to file "/dir1/file": open /dir1/file: read-only file system`, "", }, { // Print CPU quota, which should be overwritten by the pod manifest. []imagePatch{ {"rkt-test-run-pod-manifest-cpu-isolator.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-cpuquota"}, User: "******", Group: "0", Isolators: []types.Isolator{ mustNewIsolator(`{ "name": "resource/cpu", "value": { "request": "100m", "limit": "100m"} }`), mustNewIsolator(`{ "name": "os/linux/capabilities-retain-set", "value": { "set": ["CAP_SYS_PTRACE"] } }`), }, }, }, }, }, 0, `CPU Quota: 100`, "cpu", }, { // Print memory limit, which should be overwritten by the pod manifest. []imagePatch{ {"rkt-test-run-pod-manifest-memory-isolator.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-memorylimit"}, User: "******", Group: "0", Isolators: []types.Isolator{ // 4MB. mustNewIsolator(`{ "name": "resource/memory", "value": { "request": "4194304", "limit": "4194304"} }`), mustNewIsolator(`{ "name": "os/linux/capabilities-retain-set", "value": { "set": ["CAP_SYS_PTRACE"] } }`), }, }, }, }, }, 0, `Memory Limit: 4194304`, "memory", }, { // Multiple apps (with same images) in the pod. The first app will read out the content // written by the second app. []imagePatch{ {"rkt-test-run-pod-manifest-app.aci", []string{"--name=aci1"}}, {"rkt-test-run-pod-manifest-app.aci", []string{"--name=aci2"}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: "rkt-inspect-readapp", App: &types.App{ Exec: []string{"/inspect", "--pre-sleep=10", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir/file"}, }, MountPoints: []types.MountPoint{ { Name: "dir", Path: "/dir", ReadOnly: false, }, }, }, }, { Name: "rkt-inspect-writeapp", App: &types.App{ Exec: []string{"/inspect", "--write-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir/file"}, {"CONTENT", "host:foo"}, }, MountPoints: []types.MountPoint{ { Name: "dir", Path: "/dir", ReadOnly: false, }, }, }, }, }, Volumes: []types.Volume{ { Name: "dir", Kind: "host", Source: tmpdir, ReadOnly: nil, Recursive: nil, Mode: nil, UID: nil, GID: nil, }, }, }, 0, "host:foo", "", }, { // Pod manifest overwrites the image's capability. []imagePatch{ {"rkt-test-run-pod-manifest-cap.aci", []string{"--capability=CAP_NET_ADMIN"}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-caps-pid=0"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"CAPABILITY", strconv.Itoa(int(capability.CAP_NET_ADMIN))}, }, }, }, }, }, 0, fmt.Sprintf("%v=disabled", capability.CAP_NET_ADMIN.String()), "", }, { // Pod manifest overwrites the image's capability. []imagePatch{ {"rkt-test-run-pod-manifest-cap.aci", []string{"--capability=CAP_NET_BIND_SERVICE"}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-caps-pid=0"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"CAPABILITY", strconv.Itoa(int(capability.CAP_NET_ADMIN))}, }, Isolators: []types.Isolator{ mustNewIsolator(`{ "name": "os/linux/capabilities-retain-set", "value": { "set": ["CAP_NET_ADMIN"] } }`), }, }, }, }, }, 0, fmt.Sprintf("%v=enabled", capability.CAP_NET_ADMIN.String()), "", }, { // Set valid numerical app user and group. []imagePatch{ {"rkt-test-run-pod-manifest-valid-numerical-user-group.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-user"}, User: "******", Group: "100", }, }, }, }, 0, "User: uid=1000 euid=1000 gid=100 egid=100", "", }, { // Set valid non-numerical app user and group. []imagePatch{ {"rkt-test-run-pod-manifest-valid-user-group.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-user"}, User: "******", Group: "group1", }, }, }, }, 0, "User: uid=1000 euid=1000 gid=100 egid=100", "", }, { // Set "root", it should work without it being present in // /etc/{passwd,group} []imagePatch{ {"rkt-test-run-pod-manifest-root-user-group.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-user"}, User: "******", Group: "root", }, }, }, }, 0, "User: uid=0 euid=0 gid=0 egid=0", "", }, { // Set invalid non-numerical app user. []imagePatch{ {"rkt-test-run-pod-manifest-invalid-user.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-user"}, User: "******", Group: "0", }, }, }, }, 254, `"user2" user not found`, "", }, { // Set invalid non-numerical app group. []imagePatch{ {"rkt-test-run-pod-manifest-invalid-group.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-user"}, User: "******", Group: "group2", }, }, }, }, 254, `"group2" group not found`, "", }, { // Set valid path-like app user and group. []imagePatch{ {"rkt-test-run-pod-manifest-valid-path-user-group.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-user"}, User: "******", Group: "/etc/group", }, }, }, }, 0, "User: uid=0 euid=0 gid=0 egid=0", "", }, { // Set invalid path-like app user. []imagePatch{ {"rkt-test-run-pod-manifest-invalid-path-user.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-user"}, User: "******", Group: "0", }, }, }, }, 254, `no such file or directory`, "", }, { // Set invalid path-like app group. []imagePatch{ {"rkt-test-run-pod-manifest-invalid-path-group.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-user"}, User: "******", Group: "/etc/nofile", }, }, }, }, 254, `no such file or directory`, "", }, } for i, tt := range tests { if tt.cgroup != "" { ok, err := cgroup.IsIsolatorSupported(tt.cgroup) if err != nil { t.Fatalf("Error checking memory isolator support: %v", err) } if !ok { t.Logf("Skip test #%v: cgroup %s not supported", i, tt.cgroup) continue } } var hashesToRemove []string for j, v := range tt.images { hash, err := patchImportAndFetchHash(v.name, v.patches, t, ctx) if err != nil { t.Fatalf("%v", err) } hashesToRemove = append(hashesToRemove, hash) imgName := types.MustACIdentifier(v.name) imgID, err := types.NewHash(hash) if err != nil { t.Fatalf("Cannot generate types.Hash from %v: %v", hash, err) } ra := &tt.podManifest.Apps[j] ra.Image.Name = imgName ra.Image.ID = *imgID } tt.podManifest.ACKind = schema.PodManifestKind tt.podManifest.ACVersion = schema.AppContainerVersion manifestFile := generatePodManifestFile(t, tt.podManifest) defer os.Remove(manifestFile) // 1. Test 'rkt run'. runCmd := fmt.Sprintf("%s run --mds-register=false --pod-manifest=%s", ctx.Cmd(), manifestFile) t.Logf("Running 'run' test #%v", i) child := spawnOrFail(t, runCmd) if tt.expectedResult != "" { if _, out, err := expectRegexWithOutput(child, tt.expectedResult); err != nil { t.Errorf("Expected %q but not found: %v\n%s", tt.expectedResult, err, out) continue } } waitOrFail(t, child, tt.expectedExit) verifyHostFile(t, tmpdir, "file", i, tt.expectedResult) // 2. Test 'rkt prepare' + 'rkt run-prepared'. rktCmd := fmt.Sprintf("%s --insecure-options=image prepare --pod-manifest=%s", ctx.Cmd(), manifestFile) uuid := runRktAndGetUUID(t, rktCmd) runPreparedCmd := fmt.Sprintf("%s run-prepared --mds-register=false %s", ctx.Cmd(), uuid) t.Logf("Running 'run-prepared' test #%v", i) child = spawnOrFail(t, runPreparedCmd) if tt.expectedResult != "" { if _, out, err := expectRegexWithOutput(child, tt.expectedResult); err != nil { t.Errorf("Expected %q but not found: %v\n%s", tt.expectedResult, err, out) continue } } waitOrFail(t, child, tt.expectedExit) verifyHostFile(t, tmpdir, "file", i, tt.expectedResult) // we run the garbage collector and remove the imported images to save // space runGC(t, ctx) for _, h := range hashesToRemove { removeFromCas(t, ctx, h) } } }
// Test running pod manifests that contains just one app. // TODO(yifan): Figure out a way to test port mapping on single host. func TestPodManifest(t *testing.T) { ctx := testutils.NewRktRunCtx() defer ctx.Cleanup() tmpdir := createTempDirOrPanic("rkt-tests.") defer os.RemoveAll(tmpdir) boolFalse, boolTrue := false, true tests := []struct { // [image name]:[image patches] images []imagePatch podManifest *schema.PodManifest shouldSucceed bool expectedResult string cgroup string }{ { // Simple read. []imagePatch{ {"rkt-test-run-pod-manifest-read.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, }, }, }, }, }, true, "dir1", "", }, { // Simple read after write with volume mounted. []imagePatch{ {"rkt-test-run-pod-manifest-vol-rw.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "host:foo"}, }, MountPoints: []types.MountPoint{ {"dir1", "/dir1", false}, }, }, }, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, nil}, }, }, true, "host:foo", "", }, { // Simple read after write with read-only mount point, should fail. []imagePatch{ {"rkt-test-run-pod-manifest-vol-ro.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "bar"}, }, MountPoints: []types.MountPoint{ {"dir1", "/dir1", true}, }, }, }, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, nil}, }, }, false, `Cannot write to file "/dir1/file": open /dir1/file: read-only file system`, "", }, { // Simple read after write with volume mounted. // Override the image's mount point spec. This should fail as the volume is // read-only in pod manifest, (which will override the mount point in both image/pod manifest). []imagePatch{ {"rkt-test-run-pod-manifest-vol-rw-override.aci", []string{"--mounts=dir1,path=/dir1,readOnly=false"}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "bar"}, }, MountPoints: []types.MountPoint{ {"dir1", "/dir1", false}, }, }, }, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, &boolTrue}, }, }, false, `Cannot write to file "/dir1/file": open /dir1/file: read-only file system`, "", }, { // Simple read after write with volume mounted. // Override the image's mount point spec. []imagePatch{ {"rkt-test-run-pod-manifest-vol-rw-override.aci", []string{"--mounts=dir1,path=/dir1,readOnly=true"}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir2/file"}, {"CONTENT", "host:bar"}, }, MountPoints: []types.MountPoint{ {"dir1", "/dir2", false}, }, }, }, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, nil}, }, }, true, "host:bar", "", }, { // Simple read after write with volume mounted, no apps in pod manifest. []imagePatch{ { "rkt-test-run-pod-manifest-vol-rw-no-app.aci", []string{ "--exec=/inspect --write-file --read-file --file-name=/dir1/file --content=host:baz", "--mounts=dir1,path=/dir1,readOnly=false", }, }, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ {Name: baseAppName}, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, nil}, }, }, true, "host:baz", "", }, { // Simple read after write with volume mounted, no apps in pod manifest. // This should succeed even the mount point in image manifest is readOnly, // because it is overrided by the volume's readOnly. []imagePatch{ { "rkt-test-run-pod-manifest-vol-ro-no-app.aci", []string{ "--exec=/inspect --write-file --read-file --file-name=/dir1/file --content=host:zaz", "--mounts=dir1,path=/dir1,readOnly=true", }, }, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ {Name: baseAppName}, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, &boolFalse}, }, }, true, "host:zaz", "", }, { // Simple read after write with read-only volume mounted, no apps in pod manifest. // This should fail as the volume is read-only. []imagePatch{ { "rkt-test-run-pod-manifest-vol-ro-no-app.aci", []string{ "--exec=/inspect --write-file --read-file --file-name=/dir1/file --content=baz", "--mounts=dir1,path=/dir1,readOnly=false", }, }, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ {Name: baseAppName}, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, &boolTrue}, }, }, false, `Cannot write to file "/dir1/file": open /dir1/file: read-only file system`, "", }, { // Print CPU quota, which should be overwritten by the pod manifest. []imagePatch{ {"rkt-test-run-pod-manifest-cpu-isolator.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-cpuquota"}, User: "******", Group: "0", Isolators: []types.Isolator{ { Name: "resource/cpu", ValueRaw: rawRequestLimit("100", "100"), }, }, }, }, }, }, true, `CPU Quota: 100`, "cpu", }, { // Print memory limit, which should be overwritten by the pod manifest. []imagePatch{ {"rkt-test-run-pod-manifest-memory-isolator.aci", []string{}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-memorylimit"}, User: "******", Group: "0", Isolators: []types.Isolator{ { Name: "resource/memory", // 4MB. ValueRaw: rawRequestLimit("4194304", "4194304"), }, }, }, }, }, }, true, `Memory Limit: 4194304`, "memory", }, { // Multiple apps (with same images) in the pod. The first app will read out the content // written by the second app. []imagePatch{ {"rkt-test-run-pod-manifest-app.aci", []string{"--name=aci1"}}, {"rkt-test-run-pod-manifest-app.aci", []string{"--name=aci2"}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: "rkt-inspect-readapp", App: &types.App{ Exec: []string{"/inspect", "--pre-sleep=10", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir/file"}, }, MountPoints: []types.MountPoint{ {"dir", "/dir", false}, }, }, }, { Name: "rkt-inspect-writeapp", App: &types.App{ Exec: []string{"/inspect", "--write-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir/file"}, {"CONTENT", "host:foo"}, }, MountPoints: []types.MountPoint{ {"dir", "/dir", false}, }, }, }, }, Volumes: []types.Volume{ {"dir", "host", tmpdir, nil}, }, }, true, "host:foo", "", }, { // Pod manifest overwrites the image's capability. []imagePatch{ {"rkt-test-run-pod-manifest-cap.aci", []string{"--capability=CAP_NET_ADMIN"}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-caps-pid=0"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"CAPABILITY", strconv.Itoa(int(capability.CAP_NET_ADMIN))}, }, }, }, }, }, true, fmt.Sprintf("%v=disabled", capability.CAP_NET_ADMIN.String()), "", }, { // Pod manifest overwrites the image's capability. []imagePatch{ {"rkt-test-run-pod-manifest-cap.aci", []string{"--capability=CAP_NET_BIND_SERVICE"}}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-caps-pid=0"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"CAPABILITY", strconv.Itoa(int(capability.CAP_NET_ADMIN))}, }, Isolators: []types.Isolator{ { Name: "os/linux/capabilities-retain-set", ValueRaw: rawValue(fmt.Sprintf(`{"set":["CAP_NET_ADMIN"]}`)), }, }, }, }, }, }, true, fmt.Sprintf("%v=enabled", capability.CAP_NET_ADMIN.String()), "", }, } for i, tt := range tests { if tt.cgroup != "" && !cgroup.IsIsolatorSupported(tt.cgroup) { t.Logf("Skip test #%v: cgroup %s not supported", i, tt.cgroup) continue } var hashesToRemove []string for j, v := range tt.images { hash := patchImportAndFetchHash(v.name, v.patches, t, ctx) hashesToRemove = append(hashesToRemove, hash) imgName := types.MustACIdentifier(v.name) imgID, err := types.NewHash(hash) if err != nil { t.Fatalf("Cannot generate types.Hash from %v: %v", hash, err) } ra := &tt.podManifest.Apps[j] ra.Image.Name = imgName ra.Image.ID = *imgID } tt.podManifest.ACKind = schema.PodManifestKind tt.podManifest.ACVersion = schema.AppContainerVersion manifestFile := generatePodManifestFile(t, tt.podManifest) defer os.Remove(manifestFile) // 1. Test 'rkt run'. runCmd := fmt.Sprintf("%s run --mds-register=false --pod-manifest=%s", ctx.Cmd(), manifestFile) t.Logf("Running 'run' test #%v", i) child := spawnOrFail(t, runCmd) if tt.expectedResult != "" { if err := expectWithOutput(child, tt.expectedResult); err != nil { t.Fatalf("Expected %q but not found: %v", tt.expectedResult, err) } } if err := child.Wait(); err != nil { if tt.shouldSucceed { t.Fatalf("rkt didn't terminate correctly: %v", err) } } verifyHostFile(t, tmpdir, "file", i, tt.expectedResult) // 2. Test 'rkt prepare' + 'rkt run-prepared'. rktCmd := fmt.Sprintf("%s --insecure-skip-verify prepare --pod-manifest=%s", ctx.Cmd(), manifestFile) uuid := runRktAndGetUUID(t, rktCmd) runPreparedCmd := fmt.Sprintf("%s run-prepared --mds-register=false %s", ctx.Cmd(), uuid) t.Logf("Running 'run-prepared' test #%v", i) child = spawnOrFail(t, runPreparedCmd) if tt.expectedResult != "" { if err := expectWithOutput(child, tt.expectedResult); err != nil { t.Fatalf("Expected %q but not found: %v", tt.expectedResult, err) } } if err := child.Wait(); err != nil { if tt.shouldSucceed { t.Fatalf("rkt didn't terminate correctly: %v", err) } } verifyHostFile(t, tmpdir, "file", i, tt.expectedResult) // we run the garbage collector and remove the imported images to save // space runGC(t, ctx) for _, h := range hashesToRemove { removeFromCas(t, ctx, h) } } }
func main() { globalFlagset.Parse(os.Args[1:]) args := globalFlagset.Args() if len(args) > 0 { fmt.Fprintln(os.Stderr, "Wrong parameters") os.Exit(1) } if globalFlags.PreSleep >= 0 { time.Sleep(time.Duration(globalFlags.PreSleep) * time.Second) } if globalFlags.ReadStdin { reader := bufio.NewReader(os.Stdin) fmt.Printf("Enter text:\n") text, _ := reader.ReadString('\n') fmt.Printf("Received text: %s\n", text) } if globalFlags.CheckTty { fd := int(os.Stdin.Fd()) var termios syscall.Termios _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TCGETS, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) if err == 0 { fmt.Printf("stdin is a terminal\n") } else { fmt.Printf("stdin is not a terminal\n") } } if globalFlags.PrintExec { fmt.Fprintf(os.Stdout, "inspect execed as: %s\n", os.Args[0]) } if globalFlags.PrintMsg != "" { fmt.Fprintf(os.Stdout, "%s\n", globalFlags.PrintMsg) messageLoopStr := os.Getenv("MESSAGE_LOOP") messageLoop, err := strconv.Atoi(messageLoopStr) if err == nil { for i := 0; i < messageLoop; i++ { time.Sleep(time.Second) fmt.Fprintf(os.Stdout, "%s\n", globalFlags.PrintMsg) } } } if globalFlags.PrintEnv != "" { fmt.Fprintf(os.Stdout, "%s=%s\n", globalFlags.PrintEnv, os.Getenv(globalFlags.PrintEnv)) } if globalFlags.PrintCapsPid >= 0 { caps, err := capability.NewPid(globalFlags.PrintCapsPid) if err != nil { fmt.Fprintf(os.Stderr, "Cannot get caps: %v\n", err) os.Exit(1) } fmt.Printf("Capability set: effective: %s\n", caps.StringCap(capability.EFFECTIVE)) fmt.Printf("Capability set: permitted: %s\n", caps.StringCap(capability.PERMITTED)) fmt.Printf("Capability set: inheritable: %s\n", caps.StringCap(capability.INHERITABLE)) fmt.Printf("Capability set: bounding: %s\n", caps.StringCap(capability.BOUNDING)) if capStr := os.Getenv("CAPABILITY"); capStr != "" { capInt, err := strconv.Atoi(capStr) if err != nil { fmt.Fprintf(os.Stderr, "Environment variable $CAPABILITY is not a valid capability number: %v\n", err) os.Exit(1) } c := capability.Cap(capInt) if caps.Get(capability.BOUNDING, c) { fmt.Printf("%v=enabled\n", c.String()) } else { fmt.Printf("%v=disabled\n", c.String()) } } } if globalFlags.PrintUser { fmt.Printf("User: uid=%d euid=%d gid=%d egid=%d\n", os.Getuid(), os.Geteuid(), os.Getgid(), os.Getegid()) } if globalFlags.PrintGroups { gids, err := os.Getgroups() if err != nil { fmt.Fprintf(os.Stderr, "Error getting groups: %v\n", err) os.Exit(1) } // getgroups(2): It is unspecified whether the effective group ID of // the calling process is included in the returned list. (Thus, an // application should also call getegid(2) and add or remove the // resulting value.) egid := os.Getegid() if !in(gids, egid) { gids = append(gids, egid) sort.Ints(gids) } var b bytes.Buffer for _, gid := range gids { b.WriteString(fmt.Sprintf("%d ", gid)) } fmt.Printf("Groups: %s\n", b.String()) } if globalFlags.WriteFile { fileName := os.Getenv("FILE") if globalFlags.FileName != "" { fileName = globalFlags.FileName } content := os.Getenv("CONTENT") if globalFlags.Content != "" { content = globalFlags.Content } err := ioutil.WriteFile(fileName, []byte(content), 0600) if err != nil { fmt.Fprintf(os.Stderr, "Cannot write to file %q: %v\n", fileName, err) os.Exit(1) } } if globalFlags.ReadFile { fileName := os.Getenv("FILE") if globalFlags.FileName != "" { fileName = globalFlags.FileName } dat, err := ioutil.ReadFile(fileName) if err != nil { fmt.Fprintf(os.Stderr, "Cannot read file %q: %v\n", fileName, err) os.Exit(1) } fmt.Print("<<<") fmt.Print(string(dat)) fmt.Print(">>>\n") } if globalFlags.CheckCwd != "" { wd, err := os.Getwd() if err != nil { fmt.Fprintf(os.Stderr, "Cannot get working directory: %v\n", err) os.Exit(1) } if wd != globalFlags.CheckCwd { fmt.Fprintf(os.Stderr, "Working directory: %q. Expected: %q.\n", wd, globalFlags.CheckCwd) os.Exit(1) } } if globalFlags.Sleep >= 0 { time.Sleep(time.Duration(globalFlags.Sleep) * time.Second) } if globalFlags.PrintMemoryLimit { memCgroupPath, err := cgroup.GetOwnCgroupPath("memory") if err != nil { fmt.Fprintf(os.Stderr, "Error getting own memory cgroup path: %v\n", err) os.Exit(1) } // we use /proc/1/root to escape the chroot we're in and read our // memory limit limitPath := filepath.Join("/proc/1/root/sys/fs/cgroup/memory", memCgroupPath, "memory.limit_in_bytes") limit, err := ioutil.ReadFile(limitPath) if err != nil { fmt.Fprintf(os.Stderr, "Can't read memory.limit_in_bytes\n") os.Exit(1) } fmt.Printf("Memory Limit: %s\n", string(limit)) } if globalFlags.PrintCPUQuota { cpuCgroupPath, err := cgroup.GetOwnCgroupPath("cpu") if err != nil { fmt.Fprintf(os.Stderr, "Error getting own cpu cgroup path: %v\n", err) os.Exit(1) } // we use /proc/1/root to escape the chroot we're in and read our // cpu quota periodPath := filepath.Join("/proc/1/root/sys/fs/cgroup/cpu", cpuCgroupPath, "cpu.cfs_period_us") periodBytes, err := ioutil.ReadFile(periodPath) if err != nil { fmt.Fprintf(os.Stderr, "Can't read cpu.cpu_period_us\n") os.Exit(1) } quotaPath := filepath.Join("/proc/1/root/sys/fs/cgroup/cpu", cpuCgroupPath, "cpu.cfs_quota_us") quotaBytes, err := ioutil.ReadFile(quotaPath) if err != nil { fmt.Fprintf(os.Stderr, "Can't read cpu.cpu_quota_us\n") os.Exit(1) } period, err := strconv.Atoi(strings.Trim(string(periodBytes), "\n")) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } quota, err := strconv.Atoi(strings.Trim(string(quotaBytes), "\n")) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } quotaMilliCores := quota * 1000 / period fmt.Printf("CPU Quota: %s\n", strconv.Itoa(quotaMilliCores)) } if globalFlags.CheckCgroupMounts { rootCgroupPath := "/proc/1/root/sys/fs/cgroup" testPaths := []string{rootCgroupPath} // test a couple of controllers if they're available if cgroup.IsIsolatorSupported("memory") { testPaths = append(testPaths, filepath.Join(rootCgroupPath, "memory")) } if cgroup.IsIsolatorSupported("cpu") { testPaths = append(testPaths, filepath.Join(rootCgroupPath, "cpu")) } for _, p := range testPaths { if err := syscall.Mkdir(filepath.Join(p, "test"), 0600); err == nil || err != syscall.EROFS { fmt.Println("check-cgroups: FAIL") os.Exit(1) } } fmt.Println("check-cgroups: SUCCESS") } if globalFlags.PrintNetNS { ns, err := os.Readlink("/proc/self/ns/net") if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("NetNS: %s\n", ns) } if globalFlags.PrintIPv4 != "" { iface := globalFlags.PrintIPv4 ips, err := testutils.GetIPsv4(iface) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("%v IPv4: %s\n", iface, ips[0]) } if globalFlags.PrintDefaultGWv4 { gw, err := testutils.GetDefaultGWv4() if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("DefaultGWv4: %s\n", gw) } if globalFlags.PrintDefaultGWv6 { gw, err := testutils.GetDefaultGWv6() if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("DefaultGWv6: %s\n", gw) } if globalFlags.PrintGWv4 != "" { // TODO: GetGW not implemented yet iface := globalFlags.PrintGWv4 gw, err := testutils.GetGWv4(iface) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("%v GWv4: %s\n", iface, gw) } if globalFlags.PrintIPv6 != "" { // TODO } if globalFlags.PrintGWv6 != "" { // TODO } if globalFlags.ServeHttp != "" { err := testutils.HttpServe(globalFlags.ServeHttp, globalFlags.ServeHttpTimeout) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } } if globalFlags.GetHttp != "" { body, err := testutils.HttpGet(globalFlags.GetHttp) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } fmt.Printf("HTTP-Get received: %s\n", body) } os.Exit(globalFlags.ExitCode) }
func (uw *UnitWriter) AppUnit( ra *schema.RuntimeApp, binPath, privateUsers string, insecureOptions Stage1InsecureOptions, opts ...*unit.UnitOption, ) { if uw.err != nil { return } flavor, systemdVersion, err := GetFlavor(uw.p) if err != nil { uw.err = errwrap.Wrap(errors.New("unable to determine stage1 flavor"), err) return } app := ra.App appName := ra.Name imgName := uw.p.AppNameToImageName(appName) if len(app.Exec) == 0 { uw.err = fmt.Errorf(`image %q has an empty "exec" (try --exec=BINARY)`, imgName) return } env := app.Environment env.Set("AC_APP_NAME", appName.String()) if uw.p.MetadataServiceURL != "" { env.Set("AC_METADATA_URL", uw.p.MetadataServiceURL) } envFilePath := EnvFilePath(uw.p.Root, appName) uidRange := user.NewBlankUidRange() if err := uidRange.Deserialize([]byte(privateUsers)); err != nil { uw.err = err return } if err := common.WriteEnvFile(env, uidRange, envFilePath); err != nil { uw.err = errwrap.Wrap(errors.New("unable to write environment file for systemd"), err) return } u, g, err := parseUserGroup(uw.p, ra, uidRange) if err != nil { uw.err = err return } if err := generateSysusers(uw.p, ra, u, g, uidRange); err != nil { uw.err = errwrap.Wrap(errors.New("unable to generate sysusers"), err) return } var supplementaryGroups []string for _, g := range app.SupplementaryGIDs { supplementaryGroups = append(supplementaryGroups, strconv.Itoa(g)) } capabilitiesStr, err := getAppCapabilities(app.Isolators) if err != nil { uw.err = err return } execStart := append([]string{binPath}, app.Exec[1:]...) execStartString := quoteExec(execStart) opts = append(opts, []*unit.UnitOption{ unit.NewUnitOption("Unit", "Description", fmt.Sprintf("Application=%v Image=%v", appName, imgName)), unit.NewUnitOption("Unit", "DefaultDependencies", "false"), unit.NewUnitOption("Unit", "Wants", fmt.Sprintf("reaper-%s.service", appName)), unit.NewUnitOption("Service", "Restart", "no"), unit.NewUnitOption("Service", "ExecStart", execStartString), unit.NewUnitOption("Service", "RootDirectory", common.RelAppRootfsPath(appName)), // MountFlags=shared creates a new mount namespace and (as unintuitive // as it might seem) makes sure the mount is slave+shared. unit.NewUnitOption("Service", "MountFlags", "shared"), unit.NewUnitOption("Service", "WorkingDirectory", app.WorkingDirectory), unit.NewUnitOption("Service", "EnvironmentFile", RelEnvFilePath(appName)), unit.NewUnitOption("Service", "User", strconv.Itoa(u)), unit.NewUnitOption("Service", "Group", strconv.Itoa(g)), // This helps working around a race // (https://github.com/systemd/systemd/issues/2913) that causes the // systemd unit name not getting written to the journal if the unit is // short-lived and runs as non-root. unit.NewUnitOption("Service", "SyslogIdentifier", appName.String()), }...) if len(supplementaryGroups) > 0 { opts = appendOptionsList(opts, "Service", "SupplementaryGroups", "", supplementaryGroups) } if supportsNotify(uw.p, appName.String()) { opts = append(opts, unit.NewUnitOption("Service", "Type", "notify")) } if !insecureOptions.DisableCapabilities { opts = append(opts, unit.NewUnitOption("Service", "CapabilityBoundingSet", strings.Join(capabilitiesStr, " "))) } noNewPrivileges := getAppNoNewPrivileges(app.Isolators) // Apply seccomp isolator, if any and not opt-ing out; // see https://www.freedesktop.org/software/systemd/man/systemd.exec.html#SystemCallFilter= if !insecureOptions.DisableSeccomp { var forceNoNewPrivileges bool unprivileged := (u != 0) opts, forceNoNewPrivileges, err = getSeccompFilter(opts, uw.p, unprivileged, app.Isolators) if err != nil { uw.err = err return } // Seccomp filters require NoNewPrivileges for unprivileged apps, that may override // manifest annotation. if forceNoNewPrivileges { noNewPrivileges = true } } opts = append(opts, unit.NewUnitOption("Service", "NoNewPrivileges", strconv.FormatBool(noNewPrivileges))) if ra.ReadOnlyRootFS { opts = append(opts, unit.NewUnitOption("Service", "ReadOnlyDirectories", common.RelAppRootfsPath(appName))) } // TODO(tmrts): Extract this logic into a utility function. vols := make(map[types.ACName]types.Volume) for _, v := range uw.p.Manifest.Volumes { vols[v.Name] = v } absRoot, err := filepath.Abs(uw.p.Root) // Absolute path to the pod's rootfs. if err != nil { uw.err = err return } appRootfs := common.AppRootfsPath(absRoot, appName) rwDirs := []string{} imageManifest := uw.p.Images[appName.String()] mounts := GenerateMounts(ra, vols, imageManifest) for _, m := range mounts { mntPath, err := EvaluateSymlinksInsideApp(appRootfs, m.Path) if err != nil { uw.err = err return } if !IsMountReadOnly(vols[m.Volume], app.MountPoints) { rwDirs = append(rwDirs, filepath.Join(common.RelAppRootfsPath(appName), mntPath)) } } if len(rwDirs) > 0 { opts = appendOptionsList(opts, "Service", "ReadWriteDirectories", "", rwDirs) } // Restrict access to sensitive paths (eg. procfs and sysfs entries). if !insecureOptions.DisablePaths { opts = protectKernelTunables(opts, appName, systemdVersion) } // Generate default device policy for the app, as well as the list of allowed devices. // For kvm flavor, devices are VM-specific and restricting them is not strictly needed. if !insecureOptions.DisablePaths && flavor != "kvm" { opts = append(opts, unit.NewUnitOption("Service", "DevicePolicy", "closed")) deviceAllows, err := generateDeviceAllows(common.Stage1RootfsPath(absRoot), appName, app.MountPoints, mounts, vols, uidRange) if err != nil { uw.err = err return } for _, dev := range deviceAllows { opts = append(opts, unit.NewUnitOption("Service", "DeviceAllow", dev)) } } // When an app fails, we shut down the pod opts = append(opts, unit.NewUnitOption("Unit", "OnFailure", "halt.target")) for _, eh := range app.EventHandlers { var typ string switch eh.Name { case "pre-start": typ = "ExecStartPre" case "post-stop": typ = "ExecStopPost" default: uw.err = fmt.Errorf("unrecognized eventHandler: %v", eh.Name) return } exec := quoteExec(eh.Exec) opts = append(opts, unit.NewUnitOption("Service", typ, exec)) } // Some pre-start jobs take a long time, set the timeout to 0 opts = append(opts, unit.NewUnitOption("Service", "TimeoutStartSec", "0")) var saPorts []types.Port for _, p := range app.Ports { if p.SocketActivated { saPorts = append(saPorts, p) } } doWithIsolator := func(isolator string, f func() error) bool { ok, err := cgroup.IsIsolatorSupported(isolator) if err != nil { uw.err = err return true } if !ok { fmt.Fprintf(os.Stderr, "warning: resource/%s isolator set but support disabled in the kernel, skipping\n", isolator) } if err := f(); err != nil { uw.err = err return true } return false } exit := false for _, i := range app.Isolators { if exit { return } switch v := i.Value().(type) { case *types.ResourceMemory: exit = doWithIsolator("memory", func() error { if v.Limit() == nil { return nil } opts = append(opts, unit.NewUnitOption("Service", "MemoryLimit", strconv.Itoa(int(v.Limit().Value())))) return nil }) case *types.ResourceCPU: exit = doWithIsolator("cpu", func() error { if v.Limit() == nil { return nil } if v.Limit().Value() > resource.MaxMilliValue { return fmt.Errorf("cpu limit exceeds the maximum millivalue: %v", v.Limit().String()) } quota := strconv.Itoa(int(v.Limit().MilliValue()/10)) + "%" opts = append(opts, unit.NewUnitOption("Service", "CPUQuota", quota)) return nil }) } } if len(saPorts) > 0 { sockopts := []*unit.UnitOption{ unit.NewUnitOption("Unit", "Description", fmt.Sprintf("Application=%v Image=%v %s", appName, imgName, "socket-activated ports")), unit.NewUnitOption("Unit", "DefaultDependencies", "false"), unit.NewUnitOption("Socket", "BindIPv6Only", "both"), unit.NewUnitOption("Socket", "Service", ServiceUnitName(appName)), } for _, sap := range saPorts { var proto string switch sap.Protocol { case "tcp": proto = "ListenStream" case "udp": proto = "ListenDatagram" default: uw.err = fmt.Errorf("unrecognized protocol: %v", sap.Protocol) return } // We find the host port for the pod's port and use that in the // socket unit file. // This is so because systemd inside the pod will match based on // the socket port number, and since the socket was created on the // host, it will have the host port number. port := findHostPort(*uw.p.Manifest, sap.Name) if port == 0 { log.Printf("warning: no --port option for socket-activated port %q, assuming port %d as specified in the manifest", sap.Name, sap.Port) port = sap.Port } sockopts = append(sockopts, unit.NewUnitOption("Socket", proto, fmt.Sprintf("%v", port))) } file, err := os.OpenFile(SocketUnitPath(uw.p.Root, appName), os.O_WRONLY|os.O_CREATE, 0644) if err != nil { uw.err = errwrap.Wrap(errors.New("failed to create socket file"), err) return } defer file.Close() if _, err = io.Copy(file, unit.Serialize(sockopts)); err != nil { uw.err = errwrap.Wrap(errors.New("failed to write socket unit file"), err) return } if err = os.Symlink(path.Join("..", SocketUnitName(appName)), SocketWantPath(uw.p.Root, appName)); err != nil { uw.err = errwrap.Wrap(errors.New("failed to link socket want"), err) return } opts = append(opts, unit.NewUnitOption("Unit", "Requires", SocketUnitName(appName))) } opts = append(opts, unit.NewUnitOption("Unit", "Requires", InstantiatedPrepareAppUnitName(appName))) opts = append(opts, unit.NewUnitOption("Unit", "After", InstantiatedPrepareAppUnitName(appName))) opts = append(opts, unit.NewUnitOption("Unit", "Requires", "sysusers.service")) opts = append(opts, unit.NewUnitOption("Unit", "After", "sysusers.service")) uw.WriteUnit(ServiceUnitPath(uw.p.Root, appName), "failed to create service unit file", opts...) uw.Activate(ServiceUnitName(appName), ServiceWantPath(uw.p.Root, appName)) }
// Test running pod manifests that contains just one app. // TODO(yifan): Figure out a way to test port mapping on single host. func TestPodManifest(t *testing.T) { ctx := newRktRunCtx() defer ctx.cleanup() tmpdir, err := ioutil.TempDir("", "rkt-tests.") if err != nil { t.Fatalf("Cannot create temporary directory: %v", err) } defer os.RemoveAll(tmpdir) boolFalse, boolTrue := false, true tests := []struct { // [image name]:[image patches] images map[string][]string podManifest *schema.PodManifest shouldSuccess bool expectedResult string cgroup string }{ { // Simple read. map[string][]string{ "rkt-test-run-pod-manifest-read.aci": {}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, }, }, }, }, }, true, "dir1", "", }, { // Simple read after write with volume mounted. map[string][]string{ "rkt-test-run-pod-manifest-vol-rw.aci": {}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "host:foo"}, }, MountPoints: []types.MountPoint{ {"dir1", "/dir1", false}, }, }, }, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, nil}, }, }, true, "host:foo", "", }, { // Simple read after write with read-only mount point, should fail. map[string][]string{ "rkt-test-run-pod-manifest-vol-ro.aci": {}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "bar"}, }, MountPoints: []types.MountPoint{ {"dir1", "/dir1", true}, }, }, }, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, nil}, }, }, false, `Cannot write to file "/dir1/file": open /dir1/file: read-only file system`, "", }, { // Simple read after write with volume mounted. // Override the image's mount point spec. This should fail as the volume is // read-only in pod manifest, (which will override the mount point in both image/pod manifest). map[string][]string{ "rkt-test-run-pod-manifest-vol-rw-override.aci": {"--mounts=dir1,path=/dir1,readOnly=false"}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir1/file"}, {"CONTENT", "bar"}, }, MountPoints: []types.MountPoint{ {"dir1", "/dir1", false}, }, }, }, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, &boolTrue}, }, }, false, `Cannot write to file "/dir1/file": open /dir1/file: read-only file system`, "", }, { // Simple read after write with volume mounted. // Override the image's mount point spec. map[string][]string{ "rkt-test-run-pod-manifest-vol-rw-override.aci": {"--mounts=dir1,path=/dir1,readOnly=true"}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--write-file", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir2/file"}, {"CONTENT", "host:bar"}, }, MountPoints: []types.MountPoint{ {"dir1", "/dir2", false}, }, }, }, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, nil}, }, }, true, "host:bar", "", }, { // Simple read after write with volume mounted, no apps in pod manifest. map[string][]string{ "rkt-test-run-pod-manifest-vol-rw-no-app.aci": { "--exec=/inspect --write-file --read-file --file-name=/dir1/file --content=host:baz", "--mounts=dir1,path=/dir1,readOnly=false", }, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ {Name: baseAppName}, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, nil}, }, }, true, "host:baz", "", }, { // Simple read after write with volume mounted, no apps in pod manifest. // This should succeed even the mount point in image manifest is readOnly, // because it is overrided by the volume's readOnly. map[string][]string{ "rkt-test-run-pod-manifest-vol-ro-no-app.aci": { "--exec=/inspect --write-file --read-file --file-name=/dir1/file --content=host:zaz", "--mounts=dir1,path=/dir1,readOnly=true", }, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ {Name: baseAppName}, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, &boolFalse}, }, }, true, "host:zaz", "", }, { // Simple read after write with read-only volume mounted, no apps in pod manifest. // This should fail as the volume is read-only. map[string][]string{ "rkt-test-run-pod-manifest-vol-ro-no-app.aci": { "--exec=/inspect --write-file --read-file --file-name=/dir1/file --content=baz", "--mounts=dir1,path=/dir1,readOnly=false", }, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ {Name: baseAppName}, }, Volumes: []types.Volume{ {"dir1", "host", tmpdir, &boolTrue}, }, }, false, `Cannot write to file "/dir1/file": open /dir1/file: read-only file system`, "", }, { // Print CPU quota, which should be overwritten by the pod manifest. map[string][]string{ "rkt-test-run-pod-manifest-cpu-isolator.aci": {}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-cpuquota"}, User: "******", Group: "0", Isolators: []types.Isolator{ { Name: "resource/cpu", ValueRaw: rawRequestLimit("100", "100"), }, }, }, }, }, }, true, `CPU Quota: 100`, "cpu", }, { // Print memory limit, which should be overwritten by the pod manifest. map[string][]string{ "rkt-test-run-pod-manifest-memory-isolator.aci": {}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-memorylimit"}, User: "******", Group: "0", Isolators: []types.Isolator{ { Name: "resource/memory", // 4MB. ValueRaw: rawRequestLimit("4194304", "4194304"), }, }, }, }, }, }, true, `Memory Limit: 4194304`, "memory", }, { // Multiple apps in the pod. The first app will read out the content // written by the second app. map[string][]string{ "rkt-test-run-pod-manifest-app1.aci": {"--name=rkt-inspect-readapp"}, "rkt-test-run-pod-manifest-app2.aci": {"--name=rkt-inspect-writeapp"}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: "rkt-inspect-readapp", App: &types.App{ Exec: []string{"/inspect", "--pre-sleep=10", "--read-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir/file"}, }, MountPoints: []types.MountPoint{ {"dir", "/dir", false}, }, }, }, { Name: "rkt-inspect-writeapp", App: &types.App{ Exec: []string{"/inspect", "--write-file"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"FILE", "/dir/file"}, {"CONTENT", "host:foo"}, }, MountPoints: []types.MountPoint{ {"dir", "/dir", false}, }, }, }, }, Volumes: []types.Volume{ {"dir", "host", tmpdir, nil}, }, }, true, "host:foo", "", }, { // Pod manifest overwrites the image's capability. map[string][]string{ "rkt-test-run-pod-manifest-cap.aci": {"--capability=CAP_NET_ADMIN"}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-caps-pid=0"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"CAPABILITY", strconv.Itoa(int(capability.CAP_NET_ADMIN))}, }, }, }, }, }, true, fmt.Sprintf("%v=disabled", capability.CAP_NET_ADMIN.String()), "", }, { // Pod manifest overwrites the image's capability. map[string][]string{ "rkt-test-run-pod-manifest-cap.aci": {"--capability=CAP_NET_ADMIN"}, }, &schema.PodManifest{ Apps: []schema.RuntimeApp{ { Name: baseAppName, App: &types.App{ Exec: []string{"/inspect", "--print-caps-pid=0"}, User: "******", Group: "0", Environment: []types.EnvironmentVariable{ {"CAPABILITY", strconv.Itoa(int(capability.CAP_NET_BIND_SERVICE))}, }, Isolators: []types.Isolator{ { Name: "os/linux/capabilities-retain-set", ValueRaw: rawValue(fmt.Sprintf(`{"set":["CAP_NET_BIND_SERVICE"]}`)), }, }, }, }, }, }, true, fmt.Sprintf("%v=enabled", capability.CAP_NET_BIND_SERVICE.String()), "", }, } for i, tt := range tests { if tt.cgroup != "" && !cgroup.IsIsolatorSupported(tt.cgroup) { t.Logf("Skip test #%v: cgroup %s not supported", i, tt.cgroup) continue } j := 0 for name, patches := range tt.images { imageFile := patchTestACI(name, patches...) hash := importImageAndFetchHash(t, ctx, imageFile) defer os.Remove(imageFile) imgName := types.MustACIdentifier(name) imgID, err := types.NewHash(hash) if err != nil { t.Fatalf("Cannot generate types.Hash from %v: %v", hash, err) } ra := &tt.podManifest.Apps[j] ra.Image.Name = imgName ra.Image.ID = *imgID j++ } tt.podManifest.ACKind = schema.PodManifestKind tt.podManifest.ACVersion = schema.AppContainerVersion manifestFile := generatePodManifestFile(t, tt.podManifest) defer os.Remove(manifestFile) // 1. Test 'rkt run'. runCmd := fmt.Sprintf("%s run --mds-register=false --pod-manifest=%s", ctx.cmd(), manifestFile) t.Logf("Running 'run' test #%v: %v", i, runCmd) child, err := gexpect.Spawn(runCmd) if err != nil { t.Fatalf("Cannot exec rkt #%v: %v", i, err) } if tt.expectedResult != "" { if err := expectWithOutput(child, tt.expectedResult); err != nil { t.Fatalf("Expected %q but not found: %v", tt.expectedResult, err) } } if err := child.Wait(); err != nil { if tt.shouldSuccess { t.Fatalf("rkt didn't terminate correctly: %v", err) } } verifyHostFile(t, tmpdir, "file", i, tt.expectedResult) // 2. Test 'rkt prepare' + 'rkt run-prepared'. cmds := strings.Fields(ctx.cmd()) prepareCmd := exec.Command(cmds[0], cmds[1:]...) prepareArg := fmt.Sprintf("--pod-manifest=%s", manifestFile) prepareCmd.Args = append(prepareCmd.Args, "--insecure-skip-verify", "prepare", prepareArg) output, err := prepareCmd.Output() if err != nil { t.Fatalf("Cannot read the output: %v", err) } podIDStr := strings.TrimSpace(string(output)) podID, err := types.NewUUID(podIDStr) if err != nil { t.Fatalf("%q is not a valid UUID: %v", podIDStr, err) } runPreparedCmd := fmt.Sprintf("%s run-prepared --mds-register=false %s", ctx.cmd(), podID.String()) t.Logf("Running 'run' test #%v: %v", i, runPreparedCmd) child, err = gexpect.Spawn(runPreparedCmd) if err != nil { t.Fatalf("Cannot exec rkt #%v: %v", i, err) } if tt.expectedResult != "" { if err := expectWithOutput(child, tt.expectedResult); err != nil { t.Fatalf("Expected %q but not found: %v", tt.expectedResult, err) } } if err := child.Wait(); err != nil { if tt.shouldSuccess { t.Fatalf("rkt didn't terminate correctly: %v", err) } } verifyHostFile(t, tmpdir, "file", i, tt.expectedResult) } }
func main() { globalFlagset.Parse(os.Args[1:]) args := globalFlagset.Args() if len(args) > 0 { fmt.Fprintln(os.Stderr, "Wrong parameters") os.Exit(1) } if globalFlags.PreSleep >= 0 { time.Sleep(time.Duration(globalFlags.PreSleep) * time.Second) } if globalFlags.ReadStdin { reader := bufio.NewReader(os.Stdin) fmt.Printf("Enter text:\n") text, _ := reader.ReadString('\n') fmt.Printf("Received text: %s\n", text) } if globalFlags.CheckTty { fd := int(os.Stdin.Fd()) var termios syscall.Termios _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TCGETS, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) if err == 0 { fmt.Printf("stdin is a terminal\n") } else { fmt.Printf("stdin is not a terminal\n") } } if globalFlags.PrintMsg != "" { fmt.Fprintf(os.Stdout, "%s\n", globalFlags.PrintMsg) messageLoopStr := os.Getenv("MESSAGE_LOOP") messageLoop, err := strconv.Atoi(messageLoopStr) if err == nil { for i := 0; i < messageLoop; i++ { time.Sleep(time.Second) fmt.Fprintf(os.Stdout, "%s\n", globalFlags.PrintMsg) } } } if globalFlags.PrintEnv != "" { fmt.Fprintf(os.Stdout, "%s=%s\n", globalFlags.PrintEnv, os.Getenv(globalFlags.PrintEnv)) } if globalFlags.PrintCapsPid >= 0 { caps, err := capability.NewPid(globalFlags.PrintCapsPid) if err != nil { fmt.Fprintf(os.Stderr, "Cannot get caps: %v\n", err) os.Exit(1) return } fmt.Printf("Capability set: effective: %s\n", caps.StringCap(capability.EFFECTIVE)) fmt.Printf("Capability set: permitted: %s\n", caps.StringCap(capability.PERMITTED)) fmt.Printf("Capability set: inheritable: %s\n", caps.StringCap(capability.INHERITABLE)) fmt.Printf("Capability set: bounding: %s\n", caps.StringCap(capability.BOUNDING)) if capStr := os.Getenv("CAPABILITY"); capStr != "" { capInt, err := strconv.Atoi(capStr) if err != nil { fmt.Fprintf(os.Stderr, "Environment variable $CAPABILITY is not a valid capability number: %v\n", err) os.Exit(1) return } c := capability.Cap(capInt) if caps.Get(capability.BOUNDING, c) { fmt.Printf("%v=enabled\n", c.String()) } else { fmt.Printf("%v=disabled\n", c.String()) } } } if globalFlags.PrintUser { fmt.Printf("User: uid=%d euid=%d gid=%d egid=%d\n", os.Getuid(), os.Geteuid(), os.Getgid(), os.Getegid()) } if globalFlags.WriteFile { fileName := os.Getenv("FILE") if globalFlags.FileName != "" { fileName = globalFlags.FileName } content := os.Getenv("CONTENT") if globalFlags.Content != "" { content = globalFlags.Content } err := ioutil.WriteFile(fileName, []byte(content), 0600) if err != nil { fmt.Fprintf(os.Stderr, "Cannot write to file %q: %v\n", fileName, err) os.Exit(1) return } } if globalFlags.ReadFile { fileName := os.Getenv("FILE") if globalFlags.FileName != "" { fileName = globalFlags.FileName } dat, err := ioutil.ReadFile(fileName) if err != nil { fmt.Fprintf(os.Stderr, "Cannot read file %q: %v\n", fileName, err) os.Exit(1) return } fmt.Print("<<<") fmt.Print(string(dat)) fmt.Print(">>>\n") } if globalFlags.CheckCwd != "" { wd, err := os.Getwd() if err != nil { fmt.Fprintf(os.Stderr, "Cannot get working directory: %v\n", err) os.Exit(1) } if wd != globalFlags.CheckCwd { fmt.Fprintf(os.Stderr, "Working directory: %q. Expected: %q.\n", wd, globalFlags.CheckCwd) os.Exit(1) } } if globalFlags.Sleep >= 0 { time.Sleep(time.Duration(globalFlags.Sleep) * time.Second) } if globalFlags.PrintMemoryLimit { memCgroupPath, err := cgroup.GetOwnCgroupPath("memory") if err != nil { fmt.Fprintf(os.Stderr, "Error getting own memory cgroup path: %v\n", err) os.Exit(1) } // we use /proc/1/root to escape the chroot we're in and read our // memory limit limitPath := filepath.Join("/proc/1/root/sys/fs/cgroup/memory", memCgroupPath, "memory.limit_in_bytes") limit, err := ioutil.ReadFile(limitPath) if err != nil { fmt.Fprintf(os.Stderr, "Can't read memory.limit_in_bytes\n") os.Exit(1) } fmt.Printf("Memory Limit: %s\n", string(limit)) } if globalFlags.PrintCPUQuota { cpuCgroupPath, err := cgroup.GetOwnCgroupPath("cpu") if err != nil { fmt.Fprintf(os.Stderr, "Error getting own cpu cgroup path: %v\n", err) os.Exit(1) } // we use /proc/1/root to escape the chroot we're in and read our // cpu quota periodPath := filepath.Join("/proc/1/root/sys/fs/cgroup/cpu", cpuCgroupPath, "cpu.cfs_period_us") periodBytes, err := ioutil.ReadFile(periodPath) if err != nil { fmt.Fprintf(os.Stderr, "Can't read cpu.cpu_period_us\n") os.Exit(1) } quotaPath := filepath.Join("/proc/1/root/sys/fs/cgroup/cpu", cpuCgroupPath, "cpu.cfs_quota_us") quotaBytes, err := ioutil.ReadFile(quotaPath) if err != nil { fmt.Fprintf(os.Stderr, "Can't read cpu.cpu_quota_us\n") os.Exit(1) } period, err := strconv.Atoi(strings.Trim(string(periodBytes), "\n")) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } quota, err := strconv.Atoi(strings.Trim(string(quotaBytes), "\n")) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } quotaMilliCores := quota * 1000 / period fmt.Printf("CPU Quota: %s\n", strconv.Itoa(quotaMilliCores)) } if globalFlags.CheckCgroupMounts { rootCgroupPath := "/proc/1/root/sys/fs/cgroup" testPaths := []string{rootCgroupPath} // test a couple of controllers if they're available if cgroup.IsIsolatorSupported("memory") { testPaths = append(testPaths, filepath.Join(rootCgroupPath, "memory")) } if cgroup.IsIsolatorSupported("cpu") { testPaths = append(testPaths, filepath.Join(rootCgroupPath, "cpu")) } for _, p := range testPaths { if err := syscall.Mkdir(filepath.Join(p, "test"), 0600); err == nil || err != syscall.EROFS { fmt.Println("check-cgroups: FAIL") os.Exit(1) } } fmt.Println("check-cgroups: SUCCESS") } os.Exit(globalFlags.ExitCode) }