func TestIPv6InterfaceAllocationAutoNetmaskLe80(t *testing.T) { input := engine.Env{} _, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/80") // set global ipv6 input.Set("globalIPv6Network", subnet.String()) input.Set("RequestedMac", "ab:cd:ab:cd:ab:cd") output := newInterfaceAllocation(t, input) // ensure global ip with mac ip := net.ParseIP(output.Get("GlobalIPv6")) expected_ip := net.ParseIP("2001:db8:1234:1234:1234:abcd:abcd:abcd") if ip.String() != expected_ip.String() { t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String()) } // ensure link local format ip = net.ParseIP(output.Get("LinkLocalIPv6")) expected_ip = net.ParseIP("fe80::a9cd:abff:fecd:abcd") if ip.String() != expected_ip.String() { t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String()) } }
func (cli *DockerCli) CmdCommit(args ...string) error { cmd := cli.Subcmd("commit", "CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes", true) flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith <*****@*****.**>\")") flChanges := opts.NewListOpts(nil) cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") cmd.Require(flag.Max, 2) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) var ( name = cmd.Arg(0) repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) ) //Check if the given image name can be resolved if repository != "" { if err := registry.ValidateRepositoryName(repository); err != nil { return err } } v := url.Values{} v.Set("container", name) v.Set("repo", repository) v.Set("tag", tag) v.Set("comment", *flComment) v.Set("author", *flAuthor) for _, change := range flChanges.GetAll() { v.Add("changes", change) } if *flPause != true { v.Set("pause", "0") } var ( config *runconfig.Config env engine.Env ) if *flConfig != "" { config = &runconfig.Config{} if err := json.Unmarshal([]byte(*flConfig), config); err != nil { return err } } stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false) if err != nil { return err } if err := env.Decode(stream); err != nil { return err } fmt.Fprintf(cli.out, "%s\n", env.Get("Id")) return nil }
// GetRegistryConfig returns current registry configuration. func (s *Service) GetRegistryConfig(job *engine.Job) error { out := engine.Env{} err := out.SetJson("config", s.Config) if err != nil { return err } out.WriteTo(job.Stdout) return nil }
func waitForExit(cli *DockerCli, containerId string) (int, error) { stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false) if err != nil { return -1, err } var out engine.Env if err := out.Decode(stream); err != nil { return -1, err } return out.GetInt("StatusCode"), nil }
func (cli *DockerCli) CmdPort(args ...string) error { cmd := cli.Subcmd("port", "CONTAINER [PRIVATE_PORT[/PROTO]]", "List port mappings for the CONTAINER, or lookup the public-facing port that\nis NAT-ed to the PRIVATE_PORT", true) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) if err != nil { return err } env := engine.Env{} if err := env.Decode(stream); err != nil { return err } ports := nat.PortMap{} if err := env.GetSubEnv("NetworkSettings").GetJson("Ports", &ports); err != nil { return err } if cmd.NArg() == 2 { var ( port = cmd.Arg(1) proto = "tcp" parts = strings.SplitN(port, "/", 2) ) if len(parts) == 2 && len(parts[1]) != 0 { port = parts[0] proto = parts[1] } natPort := port + "/" + proto if frontends, exists := ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { for _, frontend := range frontends { fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) } return nil } return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) } for from, frontends := range ports { for _, frontend := range frontends { fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIp, frontend.HostPort) } } return nil }
func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var ( env engine.Env stdoutBuffer = bytes.NewBuffer(nil) job = eng.Job("wait", vars["name"]) ) job.Stdout.Add(stdoutBuffer) if err := job.Run(); err != nil { return err } env.Set("StatusCode", engine.Tail(stdoutBuffer, 1)) return writeJSONEnv(w, http.StatusOK, env) }
func (cli *DockerCli) CmdTop(args ...string) error { cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Display the running processes of a container", true) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) val := url.Values{} if cmd.NArg() > 1 { val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) } stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false) if err != nil { return err } var procs engine.Env if err := procs.Decode(stream); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) processes := [][]string{} if err := procs.GetJson("Processes", &processes); err != nil { return err } for _, proc := range processes { fmt.Fprintln(w, strings.Join(proc, "\t")) } w.Flush() return nil }
func TestIPv6InterfaceAllocationAutoNetmaskGt80(t *testing.T) { input := engine.Env{} _, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/81") // set global ipv6 input.Set("globalIPv6Network", subnet.String()) output := newInterfaceAllocation(t, input) // ensure low manually assigend global ip ip := net.ParseIP(output.Get("GlobalIPv6")) _, subnet, _ = net.ParseCIDR(fmt.Sprintf("%s/%d", subnet.IP.String(), 120)) if !subnet.Contains(ip) { t.Fatalf("Error ip %s not in subnet %s", ip.String(), subnet.String()) } }
// getExitCode perform an inspect on the container. It returns // the running state and the exit code. func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { stream, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false) if err != nil { // If we can't connect, then the daemon probably died. if err != ErrConnectionRefused { return false, -1, err } return false, -1, nil } var result engine.Env if err := result.Decode(stream); err != nil { return false, -1, err } state := result.GetSubEnv("State") return state.GetBool("Running"), state.GetInt("ExitCode"), nil }
// ResolveRepository splits a repository name into its components // and configuration of the associated registry. func (s *Service) ResolveRepository(job *engine.Job) error { var ( reposName = job.Args[0] ) repoInfo, err := s.Config.NewRepositoryInfo(reposName) if err != nil { return err } out := engine.Env{} err = out.SetJson("repository", repoInfo) if err != nil { return err } out.WriteTo(job.Stdout) return nil }
// ResolveIndex takes indexName and returns index info func (s *Service) ResolveIndex(job *engine.Job) error { var ( indexName = job.Args[0] ) index, err := s.Config.NewIndexInfo(indexName) if err != nil { return err } out := engine.Env{} err = out.SetJson("index", index) if err != nil { return err } out.WriteTo(job.Stdout) return nil }
// FIXME: --viz and --tree are deprecated. Remove them in a future version. func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { var imageID string if noTrunc { imageID = image.Get("Id") } else { imageID = stringid.TruncateID(image.Get("Id")) } fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.GetInt64("VirtualSize")))) if image.GetList("RepoTags")[0] != "<none>:<none>" { fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) } else { fmt.Fprint(cli.out, "\n") } }
func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var ( authConfig, err = ioutil.ReadAll(r.Body) job = eng.Job("auth") stdoutBuffer = bytes.NewBuffer(nil) ) if err != nil { return err } job.Setenv("authConfig", string(authConfig)) job.Stdout.Add(stdoutBuffer) if err = job.Run(); err != nil { return err } if status := engine.Tail(stdoutBuffer, 1); status != "" { var env engine.Env env.Set("Status", status) return writeJSON(w, http.StatusOK, &types.AuthResponse{ Status: status, }) } w.WriteHeader(http.StatusNoContent) return nil }
func (cli *DockerCli) CmdLogs(args ...string) error { var ( cmd = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container", true) follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") times = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") tail = cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs") ) cmd.Require(flag.Exact, 1) utils.ParseFlags(cmd, args, true) name := cmd.Arg(0) stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) if err != nil { return err } env := engine.Env{} if err := env.Decode(stream); err != nil { return err } if env.GetSubEnv("HostConfig").GetSubEnv("LogConfig").Get("Type") != "json-file" { return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver") } v := url.Values{} v.Set("stdout", "1") v.Set("stderr", "1") if *times { v.Set("timestamps", "1") } if *follow { v.Set("follow", "1") } v.Set("tail", *tail) return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), env.GetSubEnv("Config").GetBool("Tty"), nil, cli.out, cli.err, nil) }
func (cli *DockerCli) CmdCp(args ...string) error { cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTDIR|-", "Copy files/folders from a PATH on the container to a HOSTDIR on the host\nrunning the command. Use '-' to write the data\nas a tar file to STDOUT.", true) cmd.Require(flag.Exact, 2) utils.ParseFlags(cmd, args, true) var copyData engine.Env info := strings.Split(cmd.Arg(0), ":") if len(info) != 2 { return fmt.Errorf("Error: Path not specified") } copyData.Set("Resource", info[1]) copyData.Set("HostPath", cmd.Arg(1)) stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false) if stream != nil { defer stream.Close() } if statusCode == 404 { return fmt.Errorf("No such container: %v", info[0]) } if err != nil { return err } if statusCode == 200 { dest := copyData.Get("HostPath") if dest == "-" { _, err = io.Copy(cli.out, stream) } else { err = archive.Untar(stream, dest, &archive.TarOptions{NoLchown: true}) } if err != nil { return err } } return nil }
func TestIPv6InterfaceAllocationRequest(t *testing.T) { input := engine.Env{} _, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/80") expected_ip := net.ParseIP("2001:db8:1234:1234:1234::1328") // set global ipv6 input.Set("globalIPv6Network", subnet.String()) input.Set("RequestedIPv6", expected_ip.String()) output := newInterfaceAllocation(t, input) // ensure global ip with mac ip := net.ParseIP(output.Get("GlobalIPv6")) if ip.String() != expected_ip.String() { t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String()) } // retry -> fails for duplicated address input.SetBool("expectFail", true) output = newInterfaceAllocation(t, input) }
func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( config engine.Env env engine.Env job = eng.Job("commit", r.Form.Get("container")) stdoutBuffer = bytes.NewBuffer(nil) ) if err := checkForJson(r); err != nil { return err } if err := config.Decode(r.Body); err != nil { log.Errorf("%s", err) } if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { job.Setenv("pause", "1") } else { job.Setenv("pause", r.FormValue("pause")) } job.Setenv("repo", r.Form.Get("repo")) job.Setenv("tag", r.Form.Get("tag")) job.Setenv("author", r.Form.Get("author")) job.Setenv("comment", r.Form.Get("comment")) job.SetenvList("changes", r.Form["changes"]) job.SetenvSubEnv("config", &config) job.Stdout.Add(stdoutBuffer) if err := job.Run(); err != nil { return err } env.Set("Id", engine.Tail(stdoutBuffer, 1)) return writeJSONEnv(w, http.StatusCreated, env) }
// Allocate a network interface func Allocate(job *engine.Job) error { var ( ip net.IP mac net.HardwareAddr err error id = job.Args[0] requestedIP = net.ParseIP(job.Getenv("RequestedIP")) requestedIPv6 = net.ParseIP(job.Getenv("RequestedIPv6")) globalIPv6 net.IP ) ip, err = ipAllocator.RequestIP(bridgeIPv4Network, requestedIP) if err != nil { return err } // If no explicit mac address was given, generate a random one. if mac, err = net.ParseMAC(job.Getenv("RequestedMac")); err != nil { mac = generateMacAddr(ip) } if globalIPv6Network != nil { // If globalIPv6Network Size is at least a /80 subnet generate IPv6 address from MAC address netmask_ones, _ := globalIPv6Network.Mask.Size() if requestedIPv6 == nil && netmask_ones <= 80 { requestedIPv6 = make(net.IP, len(globalIPv6Network.IP)) copy(requestedIPv6, globalIPv6Network.IP) for i, h := range mac { requestedIPv6[i+10] = h } } globalIPv6, err = ipAllocator.RequestIP(globalIPv6Network, requestedIPv6) if err != nil { log.Errorf("Allocator: RequestIP v6: %v", err) return err } log.Infof("Allocated IPv6 %s", globalIPv6) } out := engine.Env{} out.Set("IP", ip.String()) out.Set("Mask", bridgeIPv4Network.Mask.String()) out.Set("Gateway", bridgeIPv4Network.IP.String()) out.Set("MacAddress", mac.String()) out.Set("Bridge", bridgeIface) size, _ := bridgeIPv4Network.Mask.Size() out.SetInt("IPPrefixLen", size) // If linklocal IPv6 localIPv6Net, err := linkLocalIPv6FromMac(mac.String()) if err != nil { return err } localIPv6, _, _ := net.ParseCIDR(localIPv6Net) out.Set("LinkLocalIPv6", localIPv6.String()) out.Set("MacAddress", mac.String()) if globalIPv6Network != nil { out.Set("GlobalIPv6", globalIPv6.String()) sizev6, _ := globalIPv6Network.Mask.Size() out.SetInt("GlobalIPv6PrefixLen", sizev6) out.Set("IPv6Gateway", bridgeIPv6Addr.String()) } currentInterfaces.Set(id, &networkInterface{ IP: ip, IPv6: globalIPv6, }) out.WriteTo(job.Stdout) return nil }
// Allocate a network interface func Allocate(job *engine.Job) engine.Status { var ( ip net.IP mac net.HardwareAddr err error id = job.Args[0] requestedIP = net.ParseIP(job.Getenv("RequestedIP")) ) if requestedIP != nil { ip, err = ipallocator.RequestIP(bridgeNetwork, requestedIP) } else { ip, err = ipallocator.RequestIP(bridgeNetwork, nil) } if err != nil { return job.Error(err) } // If no explicit mac address was given, generate a random one. if mac, err = net.ParseMAC(job.Getenv("RequestedMac")); err != nil { mac = generateMacAddr(ip) } out := engine.Env{} out.Set("IP", ip.String()) out.Set("Mask", bridgeNetwork.Mask.String()) out.Set("Gateway", bridgeNetwork.IP.String()) out.Set("MacAddress", mac.String()) out.Set("Bridge", bridgeIface) size, _ := bridgeNetwork.Mask.Size() out.SetInt("IPPrefixLen", size) currentInterfaces.Set(id, &networkInterface{ IP: ip, }) out.WriteTo(job.Stdout) return engine.StatusOK }
// Allocate an external port and map it to the interface func AllocatePort(job *engine.Job) engine.Status { var ( err error ip = defaultBindingIP id = job.Args[0] hostIP = job.Getenv("HostIP") hostPort = job.GetenvInt("HostPort") containerPort = job.GetenvInt("ContainerPort") proto = job.Getenv("Proto") network = currentInterfaces.Get(id) ) if hostIP != "" { ip = net.ParseIP(hostIP) if ip == nil { return job.Errorf("Bad parameter: invalid host ip %s", hostIP) } } // host ip, proto, and host port var container net.Addr switch proto { case "tcp": container = &net.TCPAddr{IP: network.IP, Port: containerPort} case "udp": container = &net.UDPAddr{IP: network.IP, Port: containerPort} default: return job.Errorf("unsupported address type %s", proto) } // // Try up to 10 times to get a port that's not already allocated. // // In the event of failure to bind, return the error that portmapper.Map // yields. // var host net.Addr for i := 0; i < MaxAllocatedPortAttempts; i++ { if host, err = portmapper.Map(container, ip, hostPort); err == nil { break } // There is no point in immediately retrying to map an explicitly // chosen port. if hostPort != 0 { job.Logf("Failed to allocate and map port %d: %s", hostPort, err) break } job.Logf("Failed to allocate and map port: %s, retry: %d", err, i+1) } if err != nil { return job.Error(err) } network.PortMappings = append(network.PortMappings, host) out := engine.Env{} switch netAddr := host.(type) { case *net.TCPAddr: out.Set("HostIP", netAddr.IP.String()) out.SetInt("HostPort", netAddr.Port) case *net.UDPAddr: out.Set("HostIP", netAddr.IP.String()) out.SetInt("HostPort", netAddr.Port) } if _, err := out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
func (cli *DockerCli) CmdStart(args ...string) error { var ( cErr chan error tty bool cmd = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Start one or more stopped containers", true) attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals") openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") ) cmd.Require(flag.Min, 1) utils.ParseFlags(cmd, args, true) if *attach || *openStdin { if cmd.NArg() > 1 { return fmt.Errorf("You cannot start and attach multiple containers at once.") } stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) if err != nil { return err } env := engine.Env{} if err := env.Decode(stream); err != nil { return err } config := env.GetSubEnv("Config") tty = config.GetBool("Tty") if !tty { sigc := cli.forwardAllSignals(cmd.Arg(0)) defer signal.StopCatch(sigc) } var in io.ReadCloser v := url.Values{} v.Set("stream", "1") if *openStdin && config.GetBool("OpenStdin") { v.Set("stdin", "1") in = cli.in } v.Set("stdout", "1") v.Set("stderr", "1") hijacked := make(chan io.Closer) // Block the return until the chan gets closed defer func() { log.Debugf("CmdStart() returned, defer waiting for hijack to finish.") if _, ok := <-hijacked; ok { log.Errorf("Hijack did not finish (chan still open)") } cli.in.Close() }() cErr = promise.Go(func() error { return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil) }) // Acknowledge the hijack before starting select { case closer := <-hijacked: // Make sure that the hijack gets closed when returning (results // in closing the hijack chan and freeing server's goroutines) if closer != nil { defer closer.Close() } case err := <-cErr: if err != nil { return err } } } var encounteredError error for _, name := range cmd.Args() { _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false)) if err != nil { if !*attach && !*openStdin { // attach and openStdin is false means it could be starting multiple containers // when a container start failed, show the error message and start next fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to start one or more containers") } else { encounteredError = err } } else { if !*attach && !*openStdin { fmt.Fprintf(cli.out, "%s\n", name) } } } if encounteredError != nil { return encounteredError } if *openStdin || *attach { if tty && cli.isTerminalOut { if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { log.Errorf("Error monitoring TTY size: %s", err) } } if attchErr := <-cErr; attchErr != nil { return attchErr } _, status, err := getExitCode(cli, cmd.Arg(0)) if err != nil { return err } if status != 0 { return &utils.StatusError{StatusCode: status} } } return nil }
func newInterfaceAllocation(t *testing.T, input engine.Env) (output engine.Env) { eng := engine.New() eng.Logging = false done := make(chan bool) // set IPv6 global if given if input.Exists("globalIPv6Network") { _, globalIPv6Network, _ = net.ParseCIDR(input.Get("globalIPv6Network")) } job := eng.Job("allocate_interface", "container_id") job.Env().Init(&input) reader, _ := job.Stdout.AddPipe() go func() { output.Decode(reader) done <- true }() res := Allocate(job) job.Stdout.Close() <-done if input.Exists("expectFail") && input.GetBool("expectFail") { if res == nil { t.Fatal("Doesn't fail to allocate network interface") } } else { if res != nil { t.Fatal("Failed to allocate network interface") } } if input.Exists("globalIPv6Network") { // check for bug #11427 _, subnet, _ := net.ParseCIDR(input.Get("globalIPv6Network")) if globalIPv6Network.IP.String() != subnet.IP.String() { t.Fatal("globalIPv6Network was modified during allocation") } // clean up IPv6 global globalIPv6Network = nil } return }
// writeJSONEnv writes the engine.Env values to the http response stream as a // json encoded body. func writeJSONEnv(w http.ResponseWriter, code int, v engine.Env) error { w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) return v.Encode(w) }
// FIXME: --viz and --tree are deprecated. Remove them in a future version. func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { var ( imageID string parentID string ) if noTrunc { imageID = image.Get("Id") parentID = image.Get("ParentId") } else { imageID = stringid.TruncateID(image.Get("Id")) parentID = stringid.TruncateID(image.Get("ParentId")) } if parentID == "" { fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) } else { fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) } if image.GetList("RepoTags")[0] != "<none>:<none>" { fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n")) } }
func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } var copyData engine.Env if err := checkForJson(r); err != nil { return err } if err := copyData.Decode(r.Body); err != nil { return err } if copyData.Get("Resource") == "" { return fmt.Errorf("Path cannot be empty") } origResource := copyData.Get("Resource") if copyData.Get("Resource")[0] == '/' { copyData.Set("Resource", copyData.Get("Resource")[1:]) } job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) job.Stdout.Add(w) w.Header().Set("Content-Type", "application/x-tar") if err := job.Run(); err != nil { log.Errorf("%v", err) if strings.Contains(strings.ToLower(err.Error()), "no such id") { w.WriteHeader(http.StatusNotFound) } else if strings.Contains(err.Error(), "no such file or directory") { return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) } } return nil }
func (cli *DockerCli) CmdAttach(args ...string) error { var ( cmd = cli.Subcmd("attach", "CONTAINER", "Attach to a running container", true) noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN") proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process") ) cmd.Require(flag.Exact, 1) utils.ParseFlags(cmd, args, true) name := cmd.Arg(0) stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) if err != nil { return err } env := engine.Env{} if err := env.Decode(stream); err != nil { return err } if !env.GetSubEnv("State").GetBool("Running") { return fmt.Errorf("You cannot attach to a stopped container, start it first") } var ( config = env.GetSubEnv("Config") tty = config.GetBool("Tty") ) if err := cli.CheckTtyInput(!*noStdin, tty); err != nil { return err } if tty && cli.isTerminalOut { if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { log.Debugf("Error monitoring TTY size: %s", err) } } var in io.ReadCloser v := url.Values{} v.Set("stream", "1") if !*noStdin && config.GetBool("OpenStdin") { v.Set("stdin", "1") in = cli.in } v.Set("stdout", "1") v.Set("stderr", "1") if *proxy && !tty { sigc := cli.forwardAllSignals(cmd.Arg(0)) defer signal.StopCatch(sigc) } if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil); err != nil { return err } _, status, err := getExitCode(cli, cmd.Arg(0)) if err != nil { return err } if status != 0 { return &utils.StatusError{StatusCode: status} } return nil }