Exemplo n.º 1
0
func TestGetAllChildren(t *testing.T) {
	eng := NewTestEngine(t)
	daemon := mkDaemonFromEngine(eng, t)
	defer nuke(daemon)

	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
	if err != nil {
		t.Fatal(err)
	}

	container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp"))

	webapp, err := daemon.GetByName("/webapp")
	if err != nil {
		t.Fatal(err)
	}

	if webapp.ID != container.ID {
		t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
	}

	config, _, _, err = runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
	if err != nil {
		t.Fatal(err)
	}

	childContainer := daemon.Get(createTestContainer(eng, config, t))

	if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil {
		t.Fatal(err)
	}

	children, err := daemon.Children("/webapp")
	if err != nil {
		t.Fatal(err)
	}

	if children == nil {
		t.Fatal("Children should not be nil")
	}
	if len(children) == 0 {
		t.Fatal("Children should not be empty")
	}

	for key, value := range children {
		if key != "/webapp/db" {
			t.Fatalf("Expected /webapp/db got %s", key)
		}
		if value.ID != childContainer.ID {
			t.Fatalf("Expected id %s got %s", childContainer.ID, value.ID)
		}
	}
}
Exemplo n.º 2
0
// Create a test container from the given daemon `r` and run arguments `args`.
// If the image name is "_", (eg. []string{"-i", "-t", "_", "bash"}, it is
// dynamically replaced by the current test image.
// The caller is responsible for destroying the container.
// Call t.Fatal() at the first error.
func mkContainer(r *daemon.Daemon, args []string, t *testing.T) (*daemon.Container, *runconfig.HostConfig, error) {
	config, hc, _, err := runconfig.Parse(args, nil)
	defer func() {
		if err != nil && t != nil {
			t.Fatal(err)
		}
	}()
	if err != nil {
		return nil, nil, err
	}
	if config.Image == "_" {
		config.Image = GetTestImage(r).ID
	}
	c, _, err := r.Create(config, "")
	if err != nil {
		return nil, nil, err
	}
	// NOTE: hostConfig is ignored.
	// If `args` specify privileged mode, custom lxc conf, external mount binds,
	// port redirects etc. they will be ignored.
	// This is because the correct way to set these things is to pass environment
	// to the `start` job.
	// FIXME: this helper function should be deprecated in favor of calling
	// `create` and `start` jobs directly.
	return c, hc, nil
}
Exemplo n.º 3
0
// CmdCreate creates a new container from a given image.
//
// Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...]
func (cli *DockerCli) CmdCreate(args ...string) error {
	cmd := Cli.Subcmd("create", []string{"IMAGE [COMMAND] [ARG...]"}, "Create a new container", true)
	addTrustedFlags(cmd, true)

	// These are flags not stored in Config/HostConfig
	var (
		flName = cmd.String([]string{"-name"}, "", "Assign a name to the container")
	)

	config, hostConfig, cmd, err := runconfig.Parse(cmd, args)
	if err != nil {
		cmd.ReportError(err.Error(), true)
		os.Exit(1)
	}
	if config.Image == "" {
		cmd.Usage()
		return nil
	}
	response, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
	if err != nil {
		return err
	}
	fmt.Fprintf(cli.out, "%s\n", response.ID)
	return nil
}
Exemplo n.º 4
0
// RUN some command yo
//
// run a command and commit the image. Args are automatically prepended with
// 'sh -c' in the event there is only one argument. The difference in
// processing:
//
// RUN echo hi          # sh -c echo hi
// RUN [ "echo", "hi" ] # echo hi
//
func run(b *Builder, args []string, attributes map[string]bool, original string) error {
	if b.image == "" && !b.noBaseImage {
		return fmt.Errorf("Please provide a source image with `from` prior to run")
	}

	args = handleJsonArgs(args, attributes)

	if !attributes["json"] {
		args = append([]string{"/bin/sh", "-c"}, args...)
	}

	runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
	runCmd.SetOutput(ioutil.Discard)
	runCmd.Usage = nil

	config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...))
	if err != nil {
		return err
	}

	cmd := b.Config.Cmd
	// set Cmd manually, this is special case only for Dockerfiles
	b.Config.Cmd = config.Cmd
	runconfig.Merge(b.Config, config)

	defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)

	logrus.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)

	hit, err := b.probeCache()
	if err != nil {
		return err
	}
	if hit {
		return nil
	}

	c, err := b.create()
	if err != nil {
		return err
	}

	// Ensure that we keep the container mounted until the commit
	// to avoid unmounting and then mounting directly again
	c.Mount()
	defer c.Unmount()

	err = b.run(c)
	if err != nil {
		return err
	}
	if err := b.commit(c.ID, cmd, "run"); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 5
0
func TestMergeConfigOnCommit(t *testing.T) {
	eng := NewTestEngine(t)
	runtime := mkDaemonFromEngine(eng, t)
	defer runtime.Nuke()

	container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t)
	defer runtime.Destroy(container1)

	config, _, _, err := runconfig.Parse([]string{container1.ID, "cat /tmp/foo"}, nil)
	if err != nil {
		t.Error(err)
	}

	job := eng.Job("commit", container1.ID)
	job.Setenv("repo", "testrepo")
	job.Setenv("tag", "testtag")
	job.SetenvJson("config", config)
	var outputBuffer = bytes.NewBuffer(nil)
	job.Stdout.Add(outputBuffer)
	if err := job.Run(); err != nil {
		t.Error(err)
	}

	container2, _, _ := mkContainer(runtime, []string{engine.Tail(outputBuffer, 1)}, t)
	defer runtime.Destroy(container2)

	job = eng.Job("container_inspect", container1.Name)
	baseContainer, _ := job.Stdout.AddEnv()
	if err := job.Run(); err != nil {
		t.Error(err)
	}

	job = eng.Job("container_inspect", container2.Name)
	commitContainer, _ := job.Stdout.AddEnv()
	if err := job.Run(); err != nil {
		t.Error(err)
	}

	baseConfig := baseContainer.GetSubEnv("Config")
	commitConfig := commitContainer.GetSubEnv("Config")

	if commitConfig.Get("Env") != baseConfig.Get("Env") {
		t.Fatalf("Env config in committed container should be %v, was %v",
			baseConfig.Get("Env"), commitConfig.Get("Env"))
	}

	if baseConfig.Get("Cmd") != "[\"echo test \\u003e /tmp/foo\"]" {
		t.Fatalf("Cmd in base container should be [\"echo test \\u003e /tmp/foo\"], was %s",
			baseConfig.Get("Cmd"))
	}

	if commitConfig.Get("Cmd") != "[\"cat /tmp/foo\"]" {
		t.Fatalf("Cmd in committed container should be [\"cat /tmp/foo\"], was %s",
			commitConfig.Get("Cmd"))
	}
}
Exemplo n.º 6
0
// RUN some command yo
//
// run a command and commit the image. Args are automatically prepended with
// 'sh -c' in the event there is only one argument. The difference in
// processing:
//
// RUN echo hi          # sh -c echo hi
// RUN [ "echo", "hi" ] # echo hi
//
func run(b *Builder, args []string, attributes map[string]bool) error {
	if b.image == "" {
		return fmt.Errorf("Please provide a source image with `from` prior to run")
	}

	args = handleJsonArgs(args, attributes)

	if len(args) == 1 {
		args = append([]string{"/bin/sh", "-c"}, args[0])
	}

	args = append([]string{b.image}, args...)

	config, _, _, err := runconfig.Parse(args, nil)
	if err != nil {
		return err
	}

	cmd := b.Config.Cmd
	// set Cmd manually, this is special case only for Dockerfiles
	b.Config.Cmd = config.Cmd
	runconfig.Merge(b.Config, config)

	defer func(cmd []string) { b.Config.Cmd = cmd }(cmd)

	log.Debugf("Command to be executed: %v", b.Config.Cmd)

	hit, err := b.probeCache()
	if err != nil {
		return err
	}
	if hit {
		return nil
	}

	c, err := b.create()
	if err != nil {
		return err
	}

	// Ensure that we keep the container mounted until the commit
	// to avoid unmounting and then mounting directly again
	c.Mount()
	defer c.Unmount()

	err = b.run(c)
	if err != nil {
		return err
	}
	if err := b.commit(c.ID, cmd, "run"); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 7
0
func TestCreateNumberHostname(t *testing.T) {
	eng := NewTestEngine(t)
	defer mkDaemonFromEngine(eng, t).Nuke()

	config, _, _, err := runconfig.Parse([]string{"-h", "web.0", unitTestImageID, "echo test"}, nil)
	if err != nil {
		t.Fatal(err)
	}

	createTestContainer(eng, config, t)
}
Exemplo n.º 8
0
func TestLinkChildContainer(t *testing.T) {
	eng := NewTestEngine(t)
	daemon := mkDaemonFromEngine(eng, t)
	defer nuke(daemon)

	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
	if err != nil {
		t.Fatal(err)
	}

	container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp"))

	webapp, err := daemon.GetByName("/webapp")
	if err != nil {
		t.Fatal(err)
	}

	if webapp.ID != container.ID {
		t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID)
	}

	config, _, _, err = runconfig.Parse([]string{GetTestImage(daemon).ID, "echo test"}, nil)
	if err != nil {
		t.Fatal(err)
	}

	childContainer := daemon.Get(createTestContainer(eng, config, t))

	if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil {
		t.Fatal(err)
	}

	// Get the child by it's new name
	db, err := daemon.GetByName("/webapp/db")
	if err != nil {
		t.Fatal(err)
	}
	if db.ID != childContainer.ID {
		t.Fatalf("Expect db id to match container id: %s != %s", db.ID, childContainer.ID)
	}
}
Exemplo n.º 9
0
// Regression test for being able to untag an image with an existing
// container
func TestDeleteTagWithExistingContainers(t *testing.T) {
	eng := NewTestEngine(t)
	defer nuke(mkDaemonFromEngine(eng, t))

	srv := mkServerFromEngine(eng, t)

	// Tag the image
	if err := eng.Job("tag", unitTestImageID, "utest", "tag1").Run(); err != nil {
		t.Fatal(err)
	}

	// Create a container from the image
	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
	if err != nil {
		t.Fatal(err)
	}

	id := createNamedTestContainer(eng, config, t, "testingtags")
	if id == "" {
		t.Fatal("No id returned")
	}

	job := srv.Eng.Job("containers")
	job.SetenvBool("all", true)
	outs, err := job.Stdout.AddListTable()
	if err != nil {
		t.Fatal(err)
	}
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}

	if len(outs.Data) != 1 {
		t.Fatalf("Expected 1 container got %d", len(outs.Data))
	}

	// Try to remove the tag
	imgs := engine.NewTable("", 0)
	if err := srv.DeleteImage("utest:tag1", imgs, true, false, false); err != nil {
		t.Fatal(err)
	}

	if len(imgs.Data) != 1 {
		t.Fatalf("Should only have deleted one untag %d", len(imgs.Data))
	}

	if untag := imgs.Data[0].Get("Untagged"); untag != "utest:tag1" {
		t.Fatalf("Expected %s got %s", unitTestImageID, untag)
	}
}
Exemplo n.º 10
0
func TestContainerNameValidation(t *testing.T) {
	eng := NewTestEngine(t)
	daemon := mkDaemonFromEngine(eng, t)
	defer nuke(daemon)

	for _, test := range []struct {
		Name  string
		Valid bool
	}{
		{"abc-123_AAA.1", true},
		{"\000asdf", false},
	} {
		config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
		if err != nil {
			if !test.Valid {
				continue
			}
			t.Fatal(err)
		}

		var outputBuffer = bytes.NewBuffer(nil)
		job := eng.Job("create", test.Name)
		if err := job.ImportEnv(config); err != nil {
			t.Fatal(err)
		}
		job.Stdout.Add(outputBuffer)
		if err := job.Run(); err != nil {
			if !test.Valid {
				continue
			}
			t.Fatal(err)
		}

		container := daemon.Get(engine.Tail(outputBuffer, 1))

		if container.Name != "/"+test.Name {
			t.Fatalf("Expect /%s got %s", test.Name, container.Name)
		}

		if c := daemon.Get("/" + test.Name); c == nil {
			t.Fatalf("Couldn't retrieve test container as /%s", test.Name)
		} else if c.ID != container.ID {
			t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID)
		}
	}

}
Exemplo n.º 11
0
func (b *buildFile) CmdRun(args string) error {
	if b.image == "" {
		return fmt.Errorf("Please provide a source image with `from` prior to run")
	}
	config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil)
	if err != nil {
		return err
	}

	cmd := b.config.Cmd
	// set Cmd manually, this is special case only for Dockerfiles
	b.config.Cmd = config.Cmd
	runconfig.Merge(b.config, config)

	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)

	log.Debugf("Command to be executed: %v", b.config.Cmd)

	hit, err := b.probeCache()
	if err != nil {
		return err
	}
	if hit {
		return nil
	}

	c, err := b.create()
	if err != nil {
		return err
	}
	// Ensure that we keep the container mounted until the commit
	// to avoid unmounting and then mounting directly again
	c.Mount()
	defer c.Unmount()

	err = b.run(c)
	if err != nil {
		return err
	}
	if err := b.commit(c.ID, cmd, "run"); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 12
0
func TestCommit(t *testing.T) {
	eng := NewTestEngine(t)
	defer mkDaemonFromEngine(eng, t).Nuke()

	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "/bin/cat"}, nil)
	if err != nil {
		t.Fatal(err)
	}

	id := createTestContainer(eng, config, t)

	job := eng.Job("commit", id)
	job.Setenv("repo", "testrepo")
	job.Setenv("tag", "testtag")
	job.SetenvJson("config", config)
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}
}
Exemplo n.º 13
0
func (c *Container) parseCmd() {
	flags := flag.NewFlagSet("run", flag.ExitOnError)

	flRemove := flags.Bool([]string{"#rm", "-rm"}, false, "")
	flDetach := flags.Bool([]string{"d", "-detach"}, false, "")
	flName := flags.String([]string{"#name", "-name"}, "", "")

	args, err := shlex.Split(c.ContainerCfg.Cmd)
	if err != nil {
		c.Err = err
		return
	}

	log.Debugf("Parsing [%s]", strings.Join(args, ","))
	c.Config, c.HostConfig, _, c.Err = runconfig.Parse(flags, args)

	c.Name = *flName
	c.detach = *flDetach
	c.remove = *flRemove
}
Exemplo n.º 14
0
func TestRandomContainerName(t *testing.T) {
	eng := NewTestEngine(t)
	daemon := mkDaemonFromEngine(eng, t)
	defer nuke(daemon)

	config, _, _, err := runconfig.Parse([]string{GetTestImage(daemon).ID, "echo test"}, nil)
	if err != nil {
		t.Fatal(err)
	}

	container := daemon.Get(createTestContainer(eng, config, t))
	containerID := container.ID

	if container.Name == "" {
		t.Fatalf("Expected not empty container name")
	}

	if c := daemon.Get(container.Name); c == nil {
		log.Fatalf("Could not lookup container %s by its name", container.Name)
	} else if c.ID != containerID {
		log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID)
	}
}
Exemplo n.º 15
0
func TestDefaultContainerName(t *testing.T) {
	eng := NewTestEngine(t)
	daemon := mkDaemonFromEngine(eng, t)
	defer nuke(daemon)

	config, _, _, err := runconfig.Parse([]string{unitTestImageID, "echo test"}, nil)
	if err != nil {
		t.Fatal(err)
	}

	container := daemon.Get(createNamedTestContainer(eng, config, t, "some_name"))
	containerID := container.ID

	if container.Name != "/some_name" {
		t.Fatalf("Expect /some_name got %s", container.Name)
	}

	if c := daemon.Get("/some_name"); c == nil {
		t.Fatalf("Couldn't retrieve test container as /some_name")
	} else if c.ID != containerID {
		t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID)
	}
}
Exemplo n.º 16
0
func (cli *KraneCli) CmdRun(args ...string) error {
	// FIXME: just use runconfig.Parse already
	cmd := cli.Subcmd("run", "IMAGE [COMMAND] [ARG...]", "Run a command in a new container")

	// These are flags not stored in Config/HostConfig
	var (
		flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)")
		flDetach     = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run the container in the background and print the new container ID")
		flSigProxy   = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.")
		flName       = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container")
		flShip       = cmd.String([]string{"ship", "-ship"}, "", "Ship name to deploy at")
		flAttach     *opts.ListOpts

		ErrConflictAttachDetach               = fmt.Errorf("Conflicting options: -a and -d")
		ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm")
		ErrConflictDetachAutoRemove           = fmt.Errorf("Conflicting options: --rm and -d")
	)

	config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil)
	if err != nil {
		return err
	}
	if config.Image == "" || *flShip == "" {
		cmd.Usage()
		return nil
	}

	if *flDetach {
		if fl := cmd.Lookup("attach"); fl != nil {
			flAttach = fl.Value.(*opts.ListOpts)
			if flAttach.Len() != 0 {
				return ErrConflictAttachDetach
			}
		}
		if *flAutoRemove {
			return ErrConflictDetachAutoRemove
		}

		config.AttachStdin = false
		config.AttachStdout = false
		config.AttachStderr = false
		config.StdinOnce = false
	}

	// Disable flSigProxy when in TTY mode
	sigProxy := *flSigProxy
	if config.Tty {
		sigProxy = false
	}

	runResult, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName, *flShip)
	if err != nil {
		return err
	}

	if sigProxy {
		sigc := cli.forwardAllSignals(runResult.Get("Id"))
		defer signal.StopCatch(sigc)
	}

	var (
		waitDisplayId chan struct{}
		errCh         chan error
	)

	if !config.AttachStdout && !config.AttachStderr {
		// Make this asynchronous to allow the client to write to stdin before having to read the ID
		waitDisplayId = make(chan struct{})
		go func() {
			defer close(waitDisplayId)
			fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id"))
		}()
	}

	if *flAutoRemove && (hostConfig.RestartPolicy.Name == "always" || hostConfig.RestartPolicy.Name == "on-failure") {
		return ErrConflictRestartPolicyAndAutoRemove
	}

	// We need to instantiate the chan because the select needs it. It can
	// be closed but can't be uninitialized.
	hijacked := make(chan io.Closer)

	// Block the return until the chan gets closed
	defer func() {
		log.Debugf("End of CmdRun(), Waiting for hijack to finish.")
		if _, ok := <-hijacked; ok {
			log.Errorf("Hijack did not finish (chan still open)")
		}
	}()

	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
		var (
			out, stderr io.Writer
			in          io.ReadCloser
			v           = url.Values{}
		)
		v.Set("stream", "1")

		if config.AttachStdin {
			v.Set("stdin", "1")
			in = cli.in
		}
		if config.AttachStdout {
			v.Set("stdout", "1")
			out = cli.out
		}
		if config.AttachStderr {
			v.Set("stderr", "1")
			if config.Tty {
				stderr = cli.out
			} else {
				stderr = cli.err
			}
		}

		errCh = promise.Go(func() error {
			return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil)
		})
	} else {
		close(hijacked)
	}

	// Acknowledge the hijack before starting
	select {
	case closer := <-hijacked:
		// Make sure that the hijack gets closed when returning (results
		// in closing the hijack chan and freeing server's goroutines)
		if closer != nil {
			defer closer.Close()
		}
	case err := <-errCh:
		if err != nil {
			log.Debugf("Error hijack: %s", err)
			return err
		}
	}

	//start the container
	if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil {
		return err
	}

	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
		if err := cli.monitorTtySize(runResult.Get("Id"), false); err != nil {
			log.Errorf("Error monitoring TTY size: %s", err)
		}
	}

	if errCh != nil {
		if err := <-errCh; err != nil {
			log.Debugf("Error hijack: %s", err)
			return err
		}
	}

	// Detached mode: wait for the id to be displayed and return.
	if !config.AttachStdout && !config.AttachStderr {
		// Detached mode
		<-waitDisplayId
		return nil
	}

	var status int

	// Attached mode
	if *flAutoRemove {
		// Autoremove: wait for the container to finish, retrieve
		// the exit code and remove the container
		if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil {
			return err
		}
		if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil {
			return err
		}
		if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil {
			return err
		}
	} else {
		// No Autoremove: Simply retrieve the exit code
		if !config.Tty {
			// In non-TTY mode, we can't detach, so we must wait for container exit
			if status, err = waitForExit(cli, runResult.Get("Id")); err != nil {
				return err
			}
		} else {
			// In TTY mode, there is a race: if the process dies too slowly, the state could
			// be updated after the getExitCode call and result in the wrong exit code being reported
			if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil {
				return err
			}
		}
	}
	if status != 0 {
		return &utils.StatusError{StatusCode: status}
	}
	return nil
}
Exemplo n.º 17
0
// RUN some command yo
//
// run a command and commit the image. Args are automatically prepended with
// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is
// only one argument. The difference in processing:
//
// RUN echo hi          # sh -c echo hi       (Linux)
// RUN echo hi          # cmd /S /C echo hi   (Windows)
// RUN [ "echo", "hi" ] # echo hi
//
func run(b *builder, args []string, attributes map[string]bool, original string) error {
	if b.image == "" && !b.noBaseImage {
		return derr.ErrorCodeMissingFrom
	}

	if err := b.BuilderFlags.Parse(); err != nil {
		return err
	}

	args = handleJSONArgs(args, attributes)

	if !attributes["json"] {
		if runtime.GOOS != "windows" {
			args = append([]string{"/bin/sh", "-c"}, args...)
		} else {
			args = append([]string{"cmd", "/S /C"}, args...)
		}
	}

	runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
	runCmd.SetOutput(ioutil.Discard)
	runCmd.Usage = nil

	config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...))
	if err != nil {
		return err
	}

	cmd := b.Config.Cmd
	// set Cmd manually, this is special case only for Dockerfiles
	b.Config.Cmd = config.Cmd
	runconfig.Merge(b.Config, config)

	defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)

	logrus.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)

	hit, err := b.probeCache()
	if err != nil {
		return err
	}
	if hit {
		return nil
	}

	c, err := b.create()
	if err != nil {
		return err
	}

	// Ensure that we keep the container mounted until the commit
	// to avoid unmounting and then mounting directly again
	c.Mount()
	defer c.Unmount()

	err = b.run(c)
	if err != nil {
		return err
	}
	if err := b.commit(c.ID, cmd, "run"); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 18
0
Arquivo: run.go Projeto: rpinz/docker
// CmdRun runs a command in a new container.
//
// Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
func (cli *DockerCli) CmdRun(args ...string) error {
	cmd := Cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, Cli.DockerCommands["run"].Description, true)
	addTrustedFlags(cmd, true)

	// These are flags not stored in Config/HostConfig
	var (
		flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits")
		flDetach     = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID")
		flSigProxy   = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process")
		flName       = cmd.String([]string{"-name"}, "", "Assign a name to the container")
		flDetachKeys = cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container")
		flAttach     *opts.ListOpts

		ErrConflictAttachDetach               = fmt.Errorf("Conflicting options: -a and -d")
		ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm")
		ErrConflictDetachAutoRemove           = fmt.Errorf("Conflicting options: --rm and -d")
	)

	config, hostConfig, cmd, err := runconfig.Parse(cmd, args)
	// just in case the Parse does not exit
	if err != nil {
		cmd.ReportError(err.Error(), true)
		os.Exit(125)
	}

	if hostConfig.OomKillDisable && hostConfig.Memory == 0 {
		fmt.Fprintf(cli.err, "WARNING: Dangerous only disable the OOM Killer on containers but not set the '-m/--memory' option\n")
	}

	if len(hostConfig.DNS) > 0 {
		// check the DNS settings passed via --dns against
		// localhost regexp to warn if they are trying to
		// set a DNS to a localhost address
		for _, dnsIP := range hostConfig.DNS {
			if dns.IsLocalhost(dnsIP) {
				fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
				break
			}
		}
	}
	if config.Image == "" {
		cmd.Usage()
		return nil
	}

	config.ArgsEscaped = false

	if !*flDetach {
		if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil {
			return err
		}
	} else {
		if fl := cmd.Lookup("-attach"); fl != nil {
			flAttach = fl.Value.(*opts.ListOpts)
			if flAttach.Len() != 0 {
				return ErrConflictAttachDetach
			}
		}
		if *flAutoRemove {
			return ErrConflictDetachAutoRemove
		}

		config.AttachStdin = false
		config.AttachStdout = false
		config.AttachStderr = false
		config.StdinOnce = false
	}

	// Disable flSigProxy when in TTY mode
	sigProxy := *flSigProxy
	if config.Tty {
		sigProxy = false
	}

	// Telling the Windows daemon the initial size of the tty during start makes
	// a far better user experience rather than relying on subsequent resizes
	// to cause things to catch up.
	if runtime.GOOS == "windows" {
		hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize()
	}

	createResponse, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
	if err != nil {
		cmd.ReportError(err.Error(), true)
		return runStartContainerErr(err)
	}
	if sigProxy {
		sigc := cli.forwardAllSignals(createResponse.ID)
		defer signal.StopCatch(sigc)
	}
	var (
		waitDisplayID chan struct{}
		errCh         chan error
	)
	if !config.AttachStdout && !config.AttachStderr {
		// Make this asynchronous to allow the client to write to stdin before having to read the ID
		waitDisplayID = make(chan struct{})
		go func() {
			defer close(waitDisplayID)
			fmt.Fprintf(cli.out, "%s\n", createResponse.ID)
		}()
	}
	if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) {
		return ErrConflictRestartPolicyAndAutoRemove
	}

	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
		var (
			out, stderr io.Writer
			in          io.ReadCloser
		)
		if config.AttachStdin {
			in = cli.in
		}
		if config.AttachStdout {
			out = cli.out
		}
		if config.AttachStderr {
			if config.Tty {
				stderr = cli.out
			} else {
				stderr = cli.err
			}
		}

		if *flDetachKeys != "" {
			cli.configFile.DetachKeys = *flDetachKeys
		}

		options := types.ContainerAttachOptions{
			ContainerID: createResponse.ID,
			Stream:      true,
			Stdin:       config.AttachStdin,
			Stdout:      config.AttachStdout,
			Stderr:      config.AttachStderr,
			DetachKeys:  cli.configFile.DetachKeys,
		}

		resp, err := cli.client.ContainerAttach(options)
		if err != nil {
			return err
		}
		errCh = promise.Go(func() error {
			return cli.holdHijackedConnection(config.Tty, in, out, stderr, resp)
		})
	}

	defer func() {
		if *flAutoRemove {
			options := types.ContainerRemoveOptions{
				ContainerID:   createResponse.ID,
				RemoveVolumes: true,
			}
			if err := cli.client.ContainerRemove(options); err != nil {
				fmt.Fprintf(cli.err, "Error deleting container: %s\n", err)
			}
		}
	}()

	//start the container
	if err := cli.client.ContainerStart(createResponse.ID); err != nil {
		cmd.ReportError(err.Error(), false)
		return runStartContainerErr(err)
	}

	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
		if err := cli.monitorTtySize(createResponse.ID, false); err != nil {
			fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
		}
	}

	if errCh != nil {
		if err := <-errCh; err != nil {
			logrus.Debugf("Error hijack: %s", err)
			return err
		}
	}

	// Detached mode: wait for the id to be displayed and return.
	if !config.AttachStdout && !config.AttachStderr {
		// Detached mode
		<-waitDisplayID
		return nil
	}

	var status int

	// Attached mode
	if *flAutoRemove {
		// Autoremove: wait for the container to finish, retrieve
		// the exit code and remove the container
		if status, err = cli.client.ContainerWait(createResponse.ID); err != nil {
			return runStartContainerErr(err)
		}
		if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
			return err
		}
	} else {
		// No Autoremove: Simply retrieve the exit code
		if !config.Tty {
			// In non-TTY mode, we can't detach, so we must wait for container exit
			if status, err = cli.client.ContainerWait(createResponse.ID); err != nil {
				return err
			}
		} else {
			// In TTY mode, there is a race: if the process dies too slowly, the state could
			// be updated after the getExitCode call and result in the wrong exit code being reported
			if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
				return err
			}
		}
	}
	if status != 0 {
		return Cli.StatusError{StatusCode: status}
	}
	return nil
}
Exemplo n.º 19
0
// CmdRun runs a command in a new container.
//
// Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
func (cli *DockerCli) CmdRun(args ...string) error {
	cmd := cli.Subcmd("run", []string{"IMAGE [COMMAND] [ARG...]"}, "Run a command in a new container", true)

	// These are flags not stored in Config/HostConfig
	var (
		flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits")
		flDetach     = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID")
		flSigProxy   = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process")
		flName       = cmd.String([]string{"-name"}, "", "Assign a name to the container")
		flAttach     *opts.ListOpts

		ErrConflictAttachDetach               = fmt.Errorf("Conflicting options: -a and -d")
		ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm")
		ErrConflictDetachAutoRemove           = fmt.Errorf("Conflicting options: --rm and -d")
	)

	config, hostConfig, cmd, err := runconfig.Parse(cmd, args)
	// just in case the Parse does not exit
	if err != nil {
		cmd.ReportError(err.Error(), true)
		os.Exit(1)
	}

	if len(hostConfig.Dns) > 0 {
		// check the DNS settings passed via --dns against
		// localhost regexp to warn if they are trying to
		// set a DNS to a localhost address
		for _, dnsIP := range hostConfig.Dns {
			if dns.IsLocalhost(dnsIP) {
				fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
				break
			}
		}
	}
	if config.Image == "" {
		cmd.Usage()
		return nil
	}

	if !*flDetach {
		if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil {
			return err
		}
	} else {
		if fl := cmd.Lookup("-attach"); fl != nil {
			flAttach = fl.Value.(*opts.ListOpts)
			if flAttach.Len() != 0 {
				return ErrConflictAttachDetach
			}
		}
		if *flAutoRemove {
			return ErrConflictDetachAutoRemove
		}

		config.AttachStdin = false
		config.AttachStdout = false
		config.AttachStderr = false
		config.StdinOnce = false
	}

	// Disable flSigProxy when in TTY mode
	sigProxy := *flSigProxy
	if config.Tty {
		sigProxy = false
	}

	// Telling the Windows daemon the initial size of the tty during start makes
	// a far better user experience rather than relying on subsequent resizes
	// to cause things to catch up.
	if runtime.GOOS == "windows" {
		hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = cli.getTtySize()
	}

	createResponse, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
	if err != nil {
		return err
	}
	if sigProxy {
		sigc := cli.forwardAllSignals(createResponse.ID)
		defer signal.StopCatch(sigc)
	}
	var (
		waitDisplayID chan struct{}
		errCh         chan error
	)
	if !config.AttachStdout && !config.AttachStderr {
		// Make this asynchronous to allow the client to write to stdin before having to read the ID
		waitDisplayID = make(chan struct{})
		go func() {
			defer close(waitDisplayID)
			fmt.Fprintf(cli.out, "%s\n", createResponse.ID)
		}()
	}
	if *flAutoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) {
		return ErrConflictRestartPolicyAndAutoRemove
	}
	// We need to instantiate the chan because the select needs it. It can
	// be closed but can't be uninitialized.
	hijacked := make(chan io.Closer)
	// Block the return until the chan gets closed
	defer func() {
		logrus.Debugf("End of CmdRun(), Waiting for hijack to finish.")
		if _, ok := <-hijacked; ok {
			fmt.Fprintln(cli.err, "Hijack did not finish (chan still open)")
		}
	}()
	if config.AttachStdin || config.AttachStdout || config.AttachStderr {
		var (
			out, stderr io.Writer
			in          io.ReadCloser
			v           = url.Values{}
		)
		v.Set("stream", "1")
		if config.AttachStdin {
			v.Set("stdin", "1")
			in = cli.in
		}
		if config.AttachStdout {
			v.Set("stdout", "1")
			out = cli.out
		}
		if config.AttachStderr {
			v.Set("stderr", "1")
			if config.Tty {
				stderr = cli.out
			} else {
				stderr = cli.err
			}
		}
		errCh = promise.Go(func() error {
			return cli.hijack("POST", "/containers/"+createResponse.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil)
		})
	} else {
		close(hijacked)
	}
	// Acknowledge the hijack before starting
	select {
	case closer := <-hijacked:
		// Make sure that the hijack gets closed when returning (results
		// in closing the hijack chan and freeing server's goroutines)
		if closer != nil {
			defer closer.Close()
		}
	case err := <-errCh:
		if err != nil {
			logrus.Debugf("Error hijack: %s", err)
			return err
		}
	}

	defer func() {
		if *flAutoRemove {
			if _, _, err = readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, nil)); err != nil {
				fmt.Fprintf(cli.err, "Error deleting container: %s\n", err)
			}
		}
	}()

	//start the container
	if _, _, err = readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, nil)); err != nil {
		return err
	}

	if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
		if err := cli.monitorTtySize(createResponse.ID, false); err != nil {
			fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err)
		}
	}

	if errCh != nil {
		if err := <-errCh; err != nil {
			logrus.Debugf("Error hijack: %s", err)
			return err
		}
	}

	// Detached mode: wait for the id to be displayed and return.
	if !config.AttachStdout && !config.AttachStderr {
		// Detached mode
		<-waitDisplayID
		return nil
	}

	var status int

	// Attached mode
	if *flAutoRemove {
		// Autoremove: wait for the container to finish, retrieve
		// the exit code and remove the container
		if _, _, err := readBody(cli.call("POST", "/containers/"+createResponse.ID+"/wait", nil, nil)); err != nil {
			return err
		}
		if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
			return err
		}
	} else {
		// No Autoremove: Simply retrieve the exit code
		if !config.Tty {
			// In non-TTY mode, we can't detach, so we must wait for container exit
			if status, err = waitForExit(cli, createResponse.ID); err != nil {
				return err
			}
		} else {
			// In TTY mode, there is a race: if the process dies too slowly, the state could
			// be updated after the getExitCode call and result in the wrong exit code being reported
			if _, status, err = getExitCode(cli, createResponse.ID); err != nil {
				return err
			}
		}
	}
	if status != 0 {
		return StatusError{StatusCode: status}
	}
	return nil
}
Exemplo n.º 20
0
func parseRun(args []string, sysInfo *sysinfo.SysInfo) (*runconfig.Config, *runconfig.HostConfig, *flag.FlagSet, error) {
	cmd := flag.NewFlagSet("run", flag.ContinueOnError)
	cmd.SetOutput(ioutil.Discard)
	cmd.Usage = nil
	return runconfig.Parse(cmd, args, sysInfo)
}
Exemplo n.º 21
0
// RUN some command yo
//
// run a command and commit the image. Args are automatically prepended with
// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is
// only one argument. The difference in processing:
//
// RUN echo hi          # sh -c echo hi       (Linux)
// RUN echo hi          # cmd /S /C echo hi   (Windows)
// RUN [ "echo", "hi" ] # echo hi
//
func run(b *Builder, args []string, attributes map[string]bool, original string) error {
	if b.image == "" && !b.noBaseImage {
		return derr.ErrorCodeMissingFrom
	}

	if err := b.flags.Parse(); err != nil {
		return err
	}

	args = handleJSONArgs(args, attributes)

	if !attributes["json"] {
		if runtime.GOOS != "windows" {
			args = append([]string{"/bin/sh", "-c"}, args...)
		} else {
			args = append([]string{"cmd", "/S", "/C"}, args...)
		}
	}

	runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
	runCmd.SetOutput(ioutil.Discard)
	runCmd.Usage = nil

	config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...))
	if err != nil {
		return err
	}

	// stash the cmd
	cmd := b.runConfig.Cmd
	runconfig.Merge(b.runConfig, config)
	// stash the config environment
	env := b.runConfig.Env

	defer func(cmd *stringutils.StrSlice) { b.runConfig.Cmd = cmd }(cmd)
	defer func(env []string) { b.runConfig.Env = env }(env)

	// derive the net build-time environment for this run. We let config
	// environment override the build time environment.
	// This means that we take the b.buildArgs list of env vars and remove
	// any of those variables that are defined as part of the container. In other
	// words, anything in b.Config.Env. What's left is the list of build-time env
	// vars that we need to add to each RUN command - note the list could be empty.
	//
	// We don't persist the build time environment with container's config
	// environment, but just sort and prepend it to the command string at time
	// of commit.
	// This helps with tracing back the image's actual environment at the time
	// of RUN, without leaking it to the final image. It also aids cache
	// lookup for same image built with same build time environment.
	cmdBuildEnv := []string{}
	configEnv := runconfig.ConvertKVStringsToMap(b.runConfig.Env)
	for key, val := range b.BuildArgs {
		if !b.isBuildArgAllowed(key) {
			// skip build-args that are not in allowed list, meaning they have
			// not been defined by an "ARG" Dockerfile command yet.
			// This is an error condition but only if there is no "ARG" in the entire
			// Dockerfile, so we'll generate any necessary errors after we parsed
			// the entire file (see 'leftoverArgs' processing in evaluator.go )
			continue
		}
		if _, ok := configEnv[key]; !ok {
			cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, val))
		}
	}

	// derive the command to use for probeCache() and to commit in this container.
	// Note that we only do this if there are any build-time env vars.  Also, we
	// use the special argument "|#" at the start of the args array. This will
	// avoid conflicts with any RUN command since commands can not
	// start with | (vertical bar). The "#" (number of build envs) is there to
	// help ensure proper cache matches. We don't want a RUN command
	// that starts with "foo=abc" to be considered part of a build-time env var.
	saveCmd := config.Cmd
	if len(cmdBuildEnv) > 0 {
		sort.Strings(cmdBuildEnv)
		tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...)
		saveCmd = stringutils.NewStrSlice(append(tmpEnv, saveCmd.Slice()...)...)
	}

	b.runConfig.Cmd = saveCmd
	hit, err := b.probeCache()
	if err != nil {
		return err
	}
	if hit {
		return nil
	}

	// set Cmd manually, this is special case only for Dockerfiles
	b.runConfig.Cmd = config.Cmd
	// set build-time environment for 'run'.
	b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...)
	// set config as already being escaped, this prevents double escaping on windows
	b.runConfig.ArgsEscaped = true

	logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd)

	c, err := b.create()
	if err != nil {
		return err
	}

	// Ensure that we keep the container mounted until the commit
	// to avoid unmounting and then mounting directly again
	b.docker.Mount(c)
	defer b.docker.Unmount(c)

	err = b.run(c)
	if err != nil {
		return err
	}

	// revert to original config environment and set the command string to
	// have the build-time env vars in it (if any) so that future cache look-ups
	// properly match it.
	b.runConfig.Env = env
	b.runConfig.Cmd = saveCmd
	if err := b.commit(c.ID, cmd, "run"); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 22
0
func TestRestartKillWait(t *testing.T) {
	eng := NewTestEngine(t)
	runtime := mkDaemonFromEngine(eng, t)
	defer runtime.Nuke()

	config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil)
	if err != nil {
		t.Fatal(err)
	}

	id := createTestContainer(eng, config, t)

	job := eng.Job("containers")
	job.SetenvBool("all", true)
	outs, err := job.Stdout.AddListTable()
	if err != nil {
		t.Fatal(err)
	}
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}

	if len(outs.Data) != 1 {
		t.Errorf("Expected 1 container, %v found", len(outs.Data))
	}

	job = eng.Job("start", id)
	if err := job.ImportEnv(hostConfig); err != nil {
		t.Fatal(err)
	}
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}
	job = eng.Job("kill", id)
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}

	eng = newTestEngine(t, false, runtime.Config().Root)

	job = eng.Job("containers")
	job.SetenvBool("all", true)
	outs, err = job.Stdout.AddListTable()
	if err != nil {
		t.Fatal(err)
	}
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}

	if len(outs.Data) != 1 {
		t.Errorf("Expected 1 container, %v found", len(outs.Data))
	}

	setTimeout(t, "Waiting on stopped container timedout", 5*time.Second, func() {
		job = eng.Job("wait", outs.Data[0].Get("Id"))
		if err := job.Run(); err != nil {
			t.Fatal(err)
		}
	})
}
Exemplo n.º 23
0
func TestCreateStartRestartStopStartKillRm(t *testing.T) {
	eng := NewTestEngine(t)
	defer mkDaemonFromEngine(eng, t).Nuke()

	config, hostConfig, _, err := runconfig.Parse([]string{"-i", unitTestImageID, "/bin/cat"}, nil)
	if err != nil {
		t.Fatal(err)
	}

	id := createTestContainer(eng, config, t)

	job := eng.Job("containers")
	job.SetenvBool("all", true)
	outs, err := job.Stdout.AddListTable()
	if err != nil {
		t.Fatal(err)
	}
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}

	if len(outs.Data) != 1 {
		t.Errorf("Expected 1 container, %v found", len(outs.Data))
	}

	job = eng.Job("start", id)
	if err := job.ImportEnv(hostConfig); err != nil {
		t.Fatal(err)
	}
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}

	job = eng.Job("restart", id)
	job.SetenvInt("t", 15)
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}

	job = eng.Job("stop", id)
	job.SetenvInt("t", 15)
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}

	job = eng.Job("start", id)
	if err := job.ImportEnv(hostConfig); err != nil {
		t.Fatal(err)
	}
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}

	if err := eng.Job("kill", id).Run(); err != nil {
		t.Fatal(err)
	}

	// FIXME: this failed once with a race condition ("Unable to remove filesystem for xxx: directory not empty")
	job = eng.Job("rm", id)
	job.SetenvBool("removeVolume", true)
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}

	job = eng.Job("containers")
	job.SetenvBool("all", true)
	outs, err = job.Stdout.AddListTable()
	if err != nil {
		t.Fatal(err)
	}
	if err := job.Run(); err != nil {
		t.Fatal(err)
	}

	if len(outs.Data) != 0 {
		t.Errorf("Expected 0 container, %v found", len(outs.Data))
	}
}