// dockerSetup does all of the basic setup you need to get a running docker // process up and running for testing. Use like: // // task := taskTemplate() // // do custom task configuration // client, handle, cleanup := dockerSetup(t, task) // defer cleanup() // // do test stuff // // If there is a problem during setup this function will abort or skip the test // and indicate the reason. func dockerSetup(t *testing.T, task *structs.Task) (*docker.Client, DriverHandle, func()) { if !testutil.DockerIsConnected(t) { t.SkipNow() } client, err := docker.NewClientFromEnv() if err != nil { t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack()) } driverCtx, execCtx := testDriverContexts(task) driver := NewDockerDriver(driverCtx) handle, err := driver.Start(execCtx, task) if err != nil { execCtx.AllocDir.Destroy() t.Fatalf("Failed to start driver: %s\nStack\n%s", err, debug.Stack()) } if handle == nil { execCtx.AllocDir.Destroy() t.Fatalf("handle is nil\nStack\n%s", debug.Stack()) } cleanup := func() { handle.Kill() execCtx.AllocDir.Destroy() } return client, handle, cleanup }
func TestDockerDriver_StartNVersions(t *testing.T) { if !testutil.DockerIsConnected(t) { t.SkipNow() } task1, _, _ := dockerTask() task1.Config["image"] = "busybox" task1.Config["load"] = []string{"busybox.tar"} task2, _, _ := dockerTask() task2.Config["image"] = "busybox:musl" task2.Config["load"] = []string{"busybox_musl.tar"} task3, _, _ := dockerTask() task3.Config["image"] = "busybox:glibc" task3.Config["load"] = []string{"busybox_glibc.tar"} taskList := []*structs.Task{task1, task2, task3} handles := make([]DriverHandle, len(taskList)) t.Logf("Starting %d tasks", len(taskList)) // Let's spin up a bunch of things var err error for idx, task := range taskList { driverCtx, execCtx := testDriverContexts(task) driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} defer execCtx.AllocDir.Destroy() d := NewDockerDriver(driverCtx) copyImage(execCtx, task, "busybox.tar", t) copyImage(execCtx, task, "busybox_musl.tar", t) copyImage(execCtx, task, "busybox_glibc.tar", t) handles[idx], err = d.Start(execCtx, task) if err != nil { t.Errorf("Failed starting task #%d: %s", idx+1, err) } } t.Log("All tasks are started. Terminating...") for idx, handle := range handles { if handle == nil { t.Errorf("Bad handle for task #%d", idx+1) continue } err := handle.Kill() if err != nil { t.Errorf("Failed stopping task #%d: %s", idx+1, err) } } t.Log("Test complete!") }
func TestDockerUser(t *testing.T) { t.Parallel() task := &structs.Task{ Name: "redis-demo", User: "******", Config: map[string]interface{}{ "image": "redis", "command": "sleep", "args": []string{"10000"}, }, Resources: &structs.Resources{ MemoryMB: 256, CPU: 512, }, LogConfig: &structs.LogConfig{ MaxFiles: 10, MaxFileSizeMB: 10, }, } if !testutil.DockerIsConnected(t) { t.SkipNow() } driverCtx, execCtx := testDriverContexts(task) driver := NewDockerDriver(driverCtx) defer execCtx.AllocDir.Destroy() // It should fail because the user "alice" does not exist on the given // image. handle, err := driver.Start(execCtx, task) if err == nil { handle.Kill() t.Fatalf("Should've failed") } msgs := []string{ "System error: Unable to find user alice", "linux spec user: Unable to find user alice", } var found bool for _, msg := range msgs { if strings.Contains(err.Error(), msg) { found = true break } } if !found { t.Fatalf("Expected failure string not found, found %q instead", err.Error()) } }
func TestDockerScriptCheck(t *testing.T) { if !testutil.DockerIsConnected(t) { return } client, err := docker.NewClientFromEnv() if err != nil { t.Fatalf("error creating docker client: %v", err) } if err := client.PullImage(docker.PullImageOptions{Repository: "busybox", Tag: "latest"}, docker.AuthConfiguration{}); err != nil { t.Fatalf("error pulling redis: %v", err) } container, err := client.CreateContainer(docker.CreateContainerOptions{ Config: &docker.Config{ Image: "busybox", Cmd: []string{"/bin/sleep", "1000"}, }, }) if err != nil { t.Fatalf("error creating container: %v", err) } defer removeContainer(client, container.ID) if err := client.StartContainer(container.ID, container.HostConfig); err != nil { t.Fatalf("error starting container", err) } check := &DockerScriptCheck{ id: "1", interval: 5 * time.Second, containerID: container.ID, logger: log.New(os.Stdout, "", log.LstdFlags), cmd: "/bin/echo", args: []string{"hello", "world"}, } res := check.Run() expectedOutput := "hello world" expectedExitCode := 0 if res.Err != nil { t.Fatalf("err: %v", res.Err) } if strings.TrimSpace(res.Output) != expectedOutput { t.Fatalf("output expected: %v, actual: %v", expectedOutput, res.Output) } if res.ExitCode != expectedExitCode { t.Fatalf("exitcode expected: %v, actual: %v", expectedExitCode, res.ExitCode) } }
func setupDockerVolumes(t *testing.T, cfg *config.Config) (*structs.Task, Driver, *ExecContext, string, func()) { if !testutil.DockerIsConnected(t) { t.SkipNow() } tmpvol, err := ioutil.TempDir("", "nomadtest_dockerdriver_volumes") if err != nil { t.Fatalf("error creating temporary dir: %v", err) } randfn := fmt.Sprintf("test-%d", rand.Int()) hostpath := path.Join(tmpvol, randfn) contpath := path.Join("/mnt/vol", randfn) task := &structs.Task{ Name: "ls", Config: map[string]interface{}{ "image": "busybox", "load": []string{"busybox.tar"}, "command": "touch", "args": []string{contpath}, "volumes": []string{fmt.Sprintf("%s:/mnt/vol", tmpvol)}, }, LogConfig: &structs.LogConfig{ MaxFiles: 10, MaxFileSizeMB: 10, }, Resources: basicResources, } allocDir := allocdir.NewAllocDir(filepath.Join(cfg.AllocDir, structs.GenerateUUID()), task.Resources.DiskMB) allocDir.Build([]*structs.Task{task}) alloc := mock.Alloc() execCtx := NewExecContext(allocDir, alloc.ID) cleanup := func() { execCtx.AllocDir.Destroy() os.RemoveAll(tmpvol) } taskEnv, err := GetTaskEnv(allocDir, cfg.Node, task, alloc, "") if err != nil { cleanup() t.Fatalf("Failed to get task env: %v", err) } driverCtx := NewDriverContext(task.Name, cfg, cfg.Node, testLogger(), taskEnv) driver := NewDockerDriver(driverCtx) copyImage(execCtx, task, "busybox.tar", t) return task, driver, execCtx, hostpath, cleanup }
func TestDocker_StartNVersions(t *testing.T) { t.Parallel() if !testutil.DockerIsConnected(t) { t.SkipNow() } task1, _, _ := dockerTask() task1.Config["image"] = "redis" task2, _, _ := dockerTask() task2.Config["image"] = "redis:latest" task3, _, _ := dockerTask() task3.Config["image"] = "redis:3.0" taskList := []*structs.Task{task1, task2, task3} handles := make([]DriverHandle, len(taskList)) t.Logf("Starting %d tasks", len(taskList)) // Let's spin up a bunch of things var err error for idx, task := range taskList { driverCtx, execCtx := testDriverContexts(task) defer execCtx.AllocDir.Destroy() d := NewDockerDriver(driverCtx) handles[idx], err = d.Start(execCtx, task) if err != nil { t.Errorf("Failed starting task #%d: %s", idx+1, err) } } t.Log("All tasks are started. Terminating...") for idx, handle := range handles { if handle == nil { t.Errorf("Bad handle for task #%d", idx+1) continue } err := handle.Kill() if err != nil { t.Errorf("Failed stopping task #%d: %s", idx+1, err) } } t.Log("Test complete!") }
func setupDockerVolumes(t *testing.T, cfg *config.Config, hostpath string) (*structs.Task, Driver, *ExecContext, string, func()) { if !testutil.DockerIsConnected(t) { t.SkipNow() } randfn := fmt.Sprintf("test-%d", rand.Int()) hostfile := filepath.Join(hostpath, randfn) containerPath := "/mnt/vol" containerFile := filepath.Join(containerPath, randfn) task := &structs.Task{ Name: "ls", Env: map[string]string{"VOL_PATH": containerPath}, Config: map[string]interface{}{ "image": "busybox", "load": []string{"busybox.tar"}, "command": "touch", "args": []string{containerFile}, "volumes": []string{fmt.Sprintf("%s:${VOL_PATH}", hostpath)}, }, LogConfig: &structs.LogConfig{ MaxFiles: 10, MaxFileSizeMB: 10, }, Resources: basicResources, } allocDir := allocdir.NewAllocDir(filepath.Join(cfg.AllocDir, structs.GenerateUUID())) allocDir.Build([]*structs.Task{task}) alloc := mock.Alloc() execCtx := NewExecContext(allocDir, alloc.ID) cleanup := func() { execCtx.AllocDir.Destroy() if filepath.IsAbs(hostpath) { os.RemoveAll(hostpath) } } taskEnv, err := GetTaskEnv(allocDir, cfg.Node, task, alloc, "") if err != nil { cleanup() t.Fatalf("Failed to get task env: %v", err) } driverCtx := NewDriverContext(task.Name, cfg, cfg.Node, testLogger(), taskEnv) driver := NewDockerDriver(driverCtx) copyImage(execCtx, task, "busybox.tar", t) return task, driver, execCtx, hostfile, cleanup }
// This test should always pass, even if docker daemon is not available func TestDockerDriver_Fingerprint(t *testing.T) { driverCtx, _ := testDriverContexts(&structs.Task{Name: "foo"}) d := NewDockerDriver(driverCtx) node := &structs.Node{ Attributes: make(map[string]string), } apply, err := d.Fingerprint(&config.Config{}, node) if err != nil { t.Fatalf("err: %v", err) } if apply != testutil.DockerIsConnected(t) { t.Fatalf("Fingerprinter should detect when docker is available") } if node.Attributes["driver.docker"] != "1" { t.Log("Docker daemon not available. The remainder of the docker tests will be skipped.") } t.Logf("Found docker version %s", node.Attributes["driver.docker.version"]) }
func TestDockerDriver_StartOpen_Wait(t *testing.T) { if !testutil.DockerIsConnected(t) { t.SkipNow() } task := &structs.Task{ Name: "nc-demo", Config: map[string]interface{}{ "load": []string{"busybox.tar"}, "image": "busybox", "command": "/bin/nc", "args": []string{"-l", "127.0.0.1", "-p", "0"}, }, LogConfig: &structs.LogConfig{ MaxFiles: 10, MaxFileSizeMB: 10, }, Resources: basicResources, } driverCtx, execCtx := testDriverContexts(task) driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} defer execCtx.AllocDir.Destroy() d := NewDockerDriver(driverCtx) copyImage(execCtx, task, "busybox.tar", t) handle, err := d.Start(execCtx, task) if err != nil { t.Fatalf("err: %v", err) } if handle == nil { t.Fatalf("missing handle") } defer handle.Kill() // Attempt to open handle2, err := d.Open(execCtx, handle.ID()) if err != nil { t.Fatalf("err: %v", err) } if handle2 == nil { t.Fatalf("missing handle") } }
func TestDockerDriver_User(t *testing.T) { task := &structs.Task{ Name: "redis-demo", User: "******", Config: map[string]interface{}{ "image": "busybox", "load": []string{"busybox.tar"}, "command": "/bin/sleep", "args": []string{"10000"}, }, Resources: &structs.Resources{ MemoryMB: 256, CPU: 512, }, LogConfig: &structs.LogConfig{ MaxFiles: 10, MaxFileSizeMB: 10, }, } if !testutil.DockerIsConnected(t) { t.SkipNow() } driverCtx, execCtx := testDriverContexts(task) driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} driver := NewDockerDriver(driverCtx) defer execCtx.AllocDir.Destroy() copyImage(execCtx, task, "busybox.tar", t) // It should fail because the user "alice" does not exist on the given // image. handle, err := driver.Start(execCtx, task) if err == nil { handle.Kill() t.Fatalf("Should've failed") } if !strings.Contains(err.Error(), "alice") { t.Fatalf("Expected failure string not found, found %q instead", err.Error()) } }
// This test should always pass, even if docker daemon is not available func TestDockerDriver_Fingerprint(t *testing.T) { driverCtx, execCtx := testDriverContexts(&structs.Task{Name: "foo", Resources: basicResources}) driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} defer execCtx.AllocDir.Destroy() d := NewDockerDriver(driverCtx) node := &structs.Node{ Attributes: make(map[string]string), } apply, err := d.Fingerprint(&config.Config{}, node) if err != nil { t.Fatalf("err: %v", err) } if apply != testutil.DockerIsConnected(t) { t.Fatalf("Fingerprinter should detect when docker is available") } if node.Attributes["driver.docker"] != "1" { t.Log("Docker daemon not available. The remainder of the docker tests will be skipped.") } t.Logf("Found docker version %s", node.Attributes["driver.docker.version"]) }
func TestDockerDriver_StartOpen_Wait(t *testing.T) { t.Parallel() if !testutil.DockerIsConnected(t) { t.SkipNow() } task := &structs.Task{ Name: "redis-demo", Config: map[string]interface{}{ "image": "redis", }, LogConfig: &structs.LogConfig{ MaxFiles: 10, MaxFileSizeMB: 10, }, Resources: basicResources, } driverCtx, execCtx := testDriverContexts(task) defer execCtx.AllocDir.Destroy() d := NewDockerDriver(driverCtx) handle, err := d.Start(execCtx, task) if err != nil { t.Fatalf("err: %v", err) } if handle == nil { t.Fatalf("missing handle") } defer handle.Kill() // Attempt to open handle2, err := d.Open(execCtx, handle.ID()) if err != nil { t.Fatalf("err: %v", err) } if handle2 == nil { t.Fatalf("missing handle") } }
func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) { if !testutil.DockerIsConnected(t) { t.SkipNow() } task := &structs.Task{ Name: "busybox-demo", Config: map[string]interface{}{ "image": "127.0.1.1:32121/foo", // bad path "command": "/bin/echo", "args": []string{ "hello", }, }, LogConfig: &structs.LogConfig{ MaxFiles: 10, MaxFileSizeMB: 10, }, Resources: &structs.Resources{ MemoryMB: 256, CPU: 512, }, } driverCtx, execCtx := testDriverContexts(task) driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} defer execCtx.AllocDir.Destroy() d := NewDockerDriver(driverCtx) _, err := d.Start(execCtx, task) if err == nil { t.Fatalf("want err: %v", err) } if rerr, ok := err.(*structs.RecoverableError); !ok { t.Fatalf("want recoverable error: %+v", err) } else if !rerr.Recoverable { t.Fatalf("error not recoverable: %+v", err) } }
func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { t.Parallel() // This test requires that the alloc dir be mounted into docker as a volume. // Because this cannot happen when docker is run remotely, e.g. when running // docker in a VM, we skip this when we detect Docker is being run remotely. if !testutil.DockerIsConnected(t) || dockerIsRemote(t) { t.SkipNow() } exp := []byte{'w', 'i', 'n'} file := "output.txt" task := &structs.Task{ Name: "redis-demo", Config: map[string]interface{}{ "image": "redis", "command": "/bin/bash", "args": []string{ "-c", fmt.Sprintf(`sleep 1; echo -n %s > $%s/%s`, string(exp), env.AllocDir, file), }, }, LogConfig: &structs.LogConfig{ MaxFiles: 10, MaxFileSizeMB: 10, }, Resources: &structs.Resources{ MemoryMB: 256, CPU: 512, }, } driverCtx, execCtx := testDriverContexts(task) defer execCtx.AllocDir.Destroy() d := NewDockerDriver(driverCtx) handle, err := d.Start(execCtx, task) if err != nil { t.Fatalf("err: %v", err) } if handle == nil { t.Fatalf("missing handle") } defer handle.Kill() select { case res := <-handle.WaitCh(): if !res.Successful() { t.Fatalf("err: %v", res) } case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): t.Fatalf("timeout") } // Check that data was written to the shared alloc directory. outputFile := filepath.Join(execCtx.AllocDir.SharedDir, file) act, err := ioutil.ReadFile(outputFile) if err != nil { t.Fatalf("Couldn't read expected output: %v", err) } if !reflect.DeepEqual(act, exp) { t.Fatalf("Command outputted %v; want %v", act, exp) } }
func TestDockerDriver_Start_LoadImage(t *testing.T) { t.Parallel() if !testutil.DockerIsConnected(t) { t.SkipNow() } task := &structs.Task{ Name: "busybox-demo", Config: map[string]interface{}{ "image": "busybox", "load": []string{"busybox.tar"}, "command": "/bin/echo", "args": []string{ "hello", }, }, LogConfig: &structs.LogConfig{ MaxFiles: 10, MaxFileSizeMB: 10, }, Resources: &structs.Resources{ MemoryMB: 256, CPU: 512, }, } driverCtx, execCtx := testDriverContexts(task) defer execCtx.AllocDir.Destroy() d := NewDockerDriver(driverCtx) // Copy the test jar into the task's directory taskDir, _ := execCtx.AllocDir.TaskDirs[task.Name] dst := filepath.Join(taskDir, allocdir.TaskLocal, "busybox.tar") copyFile("./test-resources/docker/busybox.tar", dst, t) handle, err := d.Start(execCtx, task) if err != nil { t.Fatalf("err: %v", err) } if handle == nil { t.Fatalf("missing handle") } defer handle.Kill() select { case res := <-handle.WaitCh(): if !res.Successful() { t.Fatalf("err: %v", res) } case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): t.Fatalf("timeout") } // Check that data was written to the shared alloc directory. outputFile := filepath.Join(execCtx.AllocDir.LogDir(), "busybox-demo.stdout.0") act, err := ioutil.ReadFile(outputFile) if err != nil { t.Fatalf("Couldn't read expected output: %v", err) } exp := "hello" if strings.TrimSpace(string(act)) != exp { t.Fatalf("Command outputted %v; want %v", act, exp) } }