// Ensure that CIDFile gets deleted if it's empty // Perform this test by making `docker run` fail func TestRunCidFileCleanupIfEmpty(t *testing.T) { tmpDir, err := ioutil.TempDir("", "TestRunCidFile") if err != nil { t.Fatal(err) } tmpCidFile := path.Join(tmpDir, "cid") cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) if err := cli.CmdRun("--cidfile", tmpCidFile, unitTestImageID); err == nil { t.Fatal("running without a command should haveve failed") } if _, err := os.Stat(tmpCidFile); err == nil { t.Fatalf("empty CIDFile '%s' should've been deleted", tmpCidFile) } }() defer os.RemoveAll(tmpDir) setTimeout(t, "CmdRun timed out", 5*time.Second, func() { <-c }) }
// TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint func TestHttpsInfo(t *testing.T) { cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) setTimeout(t, "Reading command output time out", 10*time.Second, func() { if err := cli.CmdInfo(); err != nil { t.Fatal(err) } }) }
// TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint // by using a rogue client certificate and checks that it fails with the expected error. func TestHttpsInfoRogueCert(t *testing.T) { cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonHttpsAddr, getTlsConfig("client-rogue-cert.pem", "client-rogue-key.pem", t)) setTimeout(t, "Reading command output time out", 10*time.Second, func() { err := cli.CmdInfo() if err == nil { t.Fatal("Expected error but got nil") } if !strings.Contains(err.Error(), errBadCertificate) { t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err) } }) }
// #2098 - Docker cidFiles only contain short version of the containerId //sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" // TestRunCidFile tests that run --cidfile returns the longid func TestRunCidFileCheckIDLength(t *testing.T) { stdout, stdoutPipe := io.Pipe() tmpDir, err := ioutil.TempDir("", "TestRunCidFile") if err != nil { t.Fatal(err) } tmpCidFile := path.Join(tmpDir, "cid") cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) if err := cli.CmdRun("--cidfile", tmpCidFile, unitTestImageID, "ls"); err != nil { t.Fatal(err) } }() defer os.RemoveAll(tmpDir) setTimeout(t, "Reading command output time out", 2*time.Second, func() { cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') if err != nil { t.Fatal(err) } if len(cmdOutput) < 1 { t.Fatalf("'ls' should return something , not '%s'", cmdOutput) } //read the tmpCidFile buffer, err := ioutil.ReadFile(tmpCidFile) if err != nil { t.Fatal(err) } id := string(buffer) if len(id) != len("2bf44ea18873287bd9ace8a4cb536a7cbe134bed67e805fdf2f58a57f69b320c") { t.Fatalf("--cidfile should be a long id, not '%s'", id) } //test that its a valid cid? (though the container is gone..) //remove the file and dir. }) setTimeout(t, "CmdRun timed out", 5*time.Second, func() { <-c }) }
// Expected behaviour: the process stay alive when the client disconnects // but the client detaches. func TestRunDisconnectTty(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c1 := make(chan struct{}) go func() { defer close(c1) // We're simulating a disconnect so the return value doesn't matter. What matters is the // fact that CmdRun returns. if err := cli.CmdRun("-i", "-t", unitTestImageID, "/bin/cat"); err != nil { log.Debugf("Error CmdRun: %s", err) } }() container := waitContainerStart(t, 10*time.Second) state := setRaw(t, container) defer unsetRaw(t, container, state) // Client disconnect after run -i should keep stdin out in TTY mode setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) // Close pipes (simulate disconnect) if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { t.Fatal(err) } // wait for CmdRun to return setTimeout(t, "Waiting for CmdRun timed out", 5*time.Second, func() { <-c1 }) // In tty mode, we expect the process to stay alive even after client's stdin closes. // Give some time to monitor to do his thing container.State.WaitStop(500 * time.Millisecond) if !container.State.IsRunning() { t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)") } }
// TestRunDetach checks attaching and detaching with the escape sequence. func TestRunDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { defer close(ch) cli.CmdRun("-i", "-t", unitTestImageID, "cat") }() container := waitContainerStart(t, 10*time.Second) state := setRaw(t, container) defer unsetRaw(t, container, state) setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { stdinPipe.Write([]byte{16}) time.Sleep(100 * time.Millisecond) stdinPipe.Write([]byte{17}) }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdRun timed out", 15*time.Second, func() { <-ch }) closeWrap(stdin, stdinPipe, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.State.IsRunning() { t.Fatal("The detached container should be still running") } setTimeout(t, "Waiting for container to die timed out", 20*time.Second, func() { container.Kill() }) }
// Expected behaviour: error out when attempting to bind mount non-existing source paths func TestRunErrorBindNonExistingSource(t *testing.T) { cli := client.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) // This check is made at runtime, can't be "unit tested" if err := cli.CmdRun("-v", "/i/dont/exist:/tmp", unitTestImageID, "echo 'should fail'"); err == nil { t.Fatal("should have failed to run when using /i/dont/exist as a source for the bind mount") } }() setTimeout(t, "CmdRun timed out", 5*time.Second, func() { <-c }) }
// Expected behaviour: the process dies when the client disconnects func TestRunDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c1 := make(chan struct{}) go func() { // We're simulating a disconnect so the return value doesn't matter. What matters is the // fact that CmdRun returns. cli.CmdRun("-i", unitTestImageID, "/bin/cat") close(c1) }() setTimeout(t, "Read/Write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) // Close pipes (simulate disconnect) if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { t.Fatal(err) } // as the pipes are close, we expect the process to die, // therefore CmdRun to unblock. Wait for CmdRun setTimeout(t, "Waiting for CmdRun timed out", 2*time.Second, func() { <-c1 }) // Client disconnect after run -i should cause stdin to be closed, which should // cause /bin/cat to exit. setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() { container := globalDaemon.List()[0] container.State.WaitStop(-1 * time.Second) if container.State.IsRunning() { t.Fatalf("/bin/cat is still running after closing stdin") } }) }
// Expected behaviour: container gets deleted automatically after exit func TestRunAutoRemove(t *testing.T) { t.Skip("Fixme. Skipping test for now, race condition") stdout, stdoutPipe := io.Pipe() cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) go func() { defer close(c) if err := cli.CmdRun("--rm", unitTestImageID, "hostname"); err != nil { t.Fatal(err) } }() var temporaryContainerID string setTimeout(t, "Reading command output time out", 2*time.Second, func() { cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') if err != nil { t.Fatal(err) } temporaryContainerID = cmdOutput if err := closeWrap(stdout, stdoutPipe); err != nil { t.Fatal(err) } }) setTimeout(t, "CmdRun timed out", 10*time.Second, func() { <-c }) time.Sleep(500 * time.Millisecond) if len(globalDaemon.List()) > 0 { t.Fatalf("failed to remove container automatically: container %s still exists", temporaryContainerID) } }
// Expected behaviour, the process stays alive when the client disconnects func TestAttachDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) go func() { // Start a process in daemon mode if err := cli.CmdRun("-d", "-i", unitTestImageID, "/bin/cat"); err != nil { log.Debugf("Error CmdRun: %s", err) } }() setTimeout(t, "Waiting for CmdRun timed out", 10*time.Second, func() { if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { t.Fatal(err) } }) setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() { for { l := globalDaemon.List() if len(l) == 1 && l[0].State.IsRunning() { break } time.Sleep(10 * time.Millisecond) } }) container := globalDaemon.List()[0] // Attach to it c1 := make(chan struct{}) go func() { // We're simulating a disconnect so the return value doesn't matter. What matters is the // fact that CmdAttach returns. cli.CmdAttach(container.ID) close(c1) }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { t.Fatal(err) } }) // Close pipes (client disconnects) if err := closeWrap(stdin, stdinPipe, stdout, stdoutPipe); err != nil { t.Fatal(err) } // Wait for attach to finish, the client disconnected, therefore, Attach finished his job setTimeout(t, "Waiting for CmdAttach timed out", 2*time.Second, func() { <-c1 }) // We closed stdin, expect /bin/cat to still be running // Wait a little bit to make sure container.monitor() did his thing _, err := container.State.WaitStop(500 * time.Millisecond) if err == nil || !container.State.IsRunning() { t.Fatalf("/bin/cat is not running after closing stdin") } // Try to avoid the timeout in destroy. Best effort, don't check error cStdin, _ := container.StdinPipe() cStdin.Close() container.State.WaitStop(-1 * time.Second) }
// TestAttachDetach checks that attach in tty mode can be detached using the long container ID func TestAttachDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) go func() { defer close(ch) if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil { t.Fatal(err) } }() container := waitContainerStart(t, 10*time.Second) setTimeout(t, "Reading container's id timed out", 10*time.Second, func() { buf := make([]byte, 1024) n, err := stdout.Read(buf) if err != nil { t.Fatal(err) } if strings.Trim(string(buf[:n]), " \r\n") != container.ID { t.Fatalf("Wrong ID received. Expect %s, received %s", container.ID, buf[:n]) } }) setTimeout(t, "Starting container timed out", 10*time.Second, func() { <-ch }) state := setRaw(t, container) defer unsetRaw(t, container, state) stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) ch = make(chan struct{}) go func() { defer close(ch) if err := cli.CmdAttach(container.ID); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } } }() setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { if err := assertPipe("hello\n", "hello", stdout, stdinPipe, 150); err != nil { if err != io.ErrClosedPipe { t.Fatal(err) } } }) setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { stdinPipe.Write([]byte{16}) time.Sleep(100 * time.Millisecond) stdinPipe.Write([]byte{17}) }) // wait for CmdRun to return setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() { <-ch }) closeWrap(stdin, stdinPipe, stdout, stdoutPipe) time.Sleep(500 * time.Millisecond) if !container.State.IsRunning() { t.Fatal("The detached container should be still running") } setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() { container.Kill() }) }
func main() { if reexec.Init() { return } flag.Parse() // FIXME: validate daemon flags here if *flVersion { showVersion() return } if *flDebug { os.Setenv("DEBUG", "1") } if len(flHosts) == 0 { defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { // If we do not have a host, default to unix socket defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) } if _, err := api.ValidateHost(defaultHost); err != nil { log.Fatal(err) } flHosts = append(flHosts, defaultHost) } if *flDaemon { mainDaemon() return } if len(flHosts) > 1 { log.Fatal("Please specify only one -H") } protoAddrParts := strings.SplitN(flHosts[0], "://", 2) var ( cli *client.DockerCli tlsConfig tls.Config ) tlsConfig.InsecureSkipVerify = true // If we should verify the server, we need to load a trusted ca if *flTlsVerify { *flTls = true certPool := x509.NewCertPool() file, err := ioutil.ReadFile(*flCa) if err != nil { log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err) } certPool.AppendCertsFromPEM(file) tlsConfig.RootCAs = certPool tlsConfig.InsecureSkipVerify = false } // If tls is enabled, try to load and send client certificates if *flTls || *flTlsVerify { _, errCert := os.Stat(*flCert) _, errKey := os.Stat(*flKey) if errCert == nil && errKey == nil { *flTls = true cert, err := tls.LoadX509KeyPair(*flCert, *flKey) if err != nil { log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err) } tlsConfig.Certificates = []tls.Certificate{cert} } } if *flTls || *flTlsVerify { cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], &tlsConfig) } else { cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], nil) } if err := cli.Cmd(flag.Args()...); err != nil { if sterr, ok := err.(*utils.StatusError); ok { if sterr.Status != "" { log.Println(sterr.Status) } os.Exit(sterr.StatusCode) } log.Fatal(err) } }