// CmdLookup return an image encoded in JSON func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if image, err := s.LookupImage(name); err == nil && image != nil { if job.GetenvBool("raw") { b, err := image.RawJson() if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } out := &engine.Env{} out.Set("Id", image.ID) out.Set("Parent", image.Parent) out.Set("Comment", image.Comment) out.SetAuto("Created", image.Created) out.Set("Container", image.Container) out.SetJson("ContainerConfig", image.ContainerConfig) out.Set("DockerVersion", image.DockerVersion) out.Set("Author", image.Author) out.SetJson("Config", image.Config) out.Set("Architecture", image.Architecture) out.Set("Os", image.OS) out.SetInt64("Size", image.Size) if _, err = out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such image: %s", name) }
func (s *TagStore) CmdViz(job *engine.Job) engine.Status { images, _ := s.graph.Map() if images == nil { return engine.StatusOK } job.Stdout.Write([]byte("digraph docker {\n")) var ( parentImage *image.Image err error ) for _, image := range images { parentImage, err = image.GetParent() if err != nil { return job.Errorf("Error while getting parent image: %v", err) } if parentImage != nil { job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) } else { job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) } } for id, repos := range s.GetRepoRefs() { job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) } job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) return engine.StatusOK }
// ServeApi loops through all of the protocols sent in to docker and spawns // off a go routine to setup a serving http.Server for each. func ServeApi(job *engine.Job) engine.Status { if len(job.Args) == 0 { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } var ( protoAddrs = job.Args chErrors = make(chan error, len(protoAddrs)) ) activationLock = make(chan struct{}) for _, protoAddr := range protoAddrs { protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } go func() { log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) }() } for i := 0; i < len(protoAddrs); i += 1 { err := <-chErrors if err != nil { return job.Error(err) } } return engine.StatusOK }
func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] if container := daemon.Get(name); container != nil { outs := engine.NewTable("", 0) changes, err := container.Changes() if err != nil { return job.Error(err) } for _, change := range changes { out := &engine.Env{} if err := out.Import(change); err != nil { return job.Error(err) } outs.Add(out) } if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } } else { return job.Errorf("No such container: %s", name) } return engine.StatusOK }
func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) } name := job.Args[0] container := daemon.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } var ( config = container.Config newConfig runconfig.Config ) if err := job.GetenvJson("config", &newConfig); err != nil { return job.Error(err) } if err := runconfig.Merge(&newConfig, config); err != nil { return job.Error(err) } img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig) if err != nil { return job.Error(err) } job.Printf("%s\n", img.ID) return engine.StatusOK }
func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) } var ( name = job.Args[0] resource = job.Args[1] ) if container := daemon.Get(name); container != nil { data, err := container.Copy(resource) if err != nil { return job.Error(err) } defer data.Close() if _, err := io.Copy(job.Stdout, data); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
// FIXME: Allow to interrupt current push when new push of same image is done. func (s *TagStore) CmdPush(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } var ( localName = job.Args[0] sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) tag := job.Getenv("tag") job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) if _, err := s.poolAdd("push", localName); err != nil { return job.Error(err) } defer s.poolRemove("push", localName) // Resolve the Repository name from fqn to endpoint + name hostname, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { return job.Error(err) } endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } img, err := s.graph.Get(localName) r, err2 := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) if err2 != nil { return job.Error(err2) } if err != nil { reposLen := 1 if tag == "" { reposLen = len(s.Repositories[localName]) } job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) // If it fails, try to get the repository if localRepo, exists := s.Repositories[localName]; exists { if err := s.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil { return job.Error(err) } return engine.StatusOK } return job.Error(err) } var token []string job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) if _, err := s.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil { return job.Error(err) } return engine.StatusOK }
func (e *Events) Log(job *engine.Job) engine.Status { if len(job.Args) != 3 { return job.Errorf("usage: %s ACTION ID FROM", job.Name) } // not waiting for receivers go e.log(job.Args[0], job.Args[1], job.Args[2]) return engine.StatusOK }
func (s *TagStore) CmdPull(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 && n != 2 { return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) } var ( localName = job.Args[0] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) if len(job.Args) > 1 { tag = job.Args[1] } job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", &metaHeaders) c, err := s.poolAdd("pull", localName+":"+tag) if err != nil { if c != nil { // Another pull of the same repository is already taking place; just wait for it to finish job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) <-c return engine.StatusOK } return job.Error(err) } defer s.poolRemove("pull", localName+":"+tag) // Resolve the Repository name from fqn to endpoint + name hostname, remoteName, err := registry.ResolveRepositoryName(localName) if err != nil { return job.Error(err) } endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true) if err != nil { return job.Error(err) } if endpoint == registry.IndexServerAddress() { // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" localName = remoteName } if err = s.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if container := daemon.Get(name); container != nil { container.Lock() defer container.Unlock() if job.GetenvBool("raw") { b, err := json.Marshal(&struct { *Container HostConfig *runconfig.HostConfig }{container, container.hostConfig}) if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } out := &engine.Env{} out.Set("Id", container.ID) out.SetAuto("Created", container.Created) out.Set("Path", container.Path) out.SetList("Args", container.Args) out.SetJson("Config", container.Config) out.SetJson("State", container.State) out.Set("Image", container.Image) out.SetJson("NetworkSettings", container.NetworkSettings) out.Set("ResolvConfPath", container.ResolvConfPath) out.Set("HostnamePath", container.HostnamePath) out.Set("HostsPath", container.HostsPath) out.Set("Name", container.Name) out.Set("Driver", container.Driver) out.Set("ExecDriver", container.ExecDriver) out.Set("MountLabel", container.MountLabel) out.Set("ProcessLabel", container.ProcessLabel) out.SetJson("Volumes", container.Volumes) out.SetJson("VolumesRW", container.VolumesRW) if children, err := daemon.Children(container.Name); err == nil { for linkAlias, child := range children { container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) } } out.SetJson("HostConfig", container.hostConfig) container.hostConfig.Links = nil if _, err := out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s", job.Name) } name := job.Args[0] if container := daemon.Get(name); container != nil { status, _ := container.State.WaitStop(-1 * time.Second) job.Printf("%d\n", status) return engine.StatusOK } return job.Errorf("%s: no such container: %s", job.Name, name) }
// FIXME: merge into CmdTag above, and merge "image_tag" and "tag" into a single job. func (s *TagStore) CmdTagLegacy(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) } var ( name = job.Args[0] container = daemon.Get(name) ) if container == nil { return job.Errorf("No such container: %s", name) } if container.State.IsRunning() { return job.Errorf("Container already started") } // If no environment was set, then no hostconfig was passed. if len(job.Environ()) > 0 { hostConfig := runconfig.ContainerHostConfigFromJob(job) if err := daemon.setHostConfig(container, hostConfig); err != nil { return job.Error(err) } } if err := container.Start(); err != nil { return job.Errorf("Cannot start container %s: %s", name, err) } return engine.StatusOK }
func (s *TagStore) CmdImport(job *engine.Job) engine.Status { if n := len(job.Args); n != 2 && n != 3 { return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) } var ( src = job.Args[0] repo = job.Args[1] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) archive archive.ArchiveReader resp *http.Response ) if len(job.Args) > 2 { tag = job.Args[2] } if src == "-" { archive = job.Stdin } else { u, err := url.Parse(src) if err != nil { return job.Error(err) } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = utils.Download(u.String()) if err != nil { return job.Error(err) } progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") defer progressReader.Close() archive = progressReader } img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, nil) if err != nil { return job.Error(err) } // Optionally register the image at REPO/TAG if repo != "" { if err := s.Set(repo, tag, img.ID, true); err != nil { return job.Error(err) } } job.Stdout.Write(sf.FormatStatus("", img.ID)) return engine.StatusOK }
func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } imgs := engine.NewTable("", 0) if err := daemon.DeleteImage(job.Eng, job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil { return job.Error(err) } if len(imgs.Data) == 0 { return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) } if _, err := imgs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
// CmdTag assigns a new name and tag to an existing image. If the tag already exists, // it is changed and the image previously referenced by the tag loses that reference. // This may cause the old image to be garbage-collected if its reference count reaches zero. // // Syntax: image_tag NEWNAME OLDNAME // Example: image_tag shykes/myapp:latest shykes/myapp:1.42.0 func (s *TagStore) CmdTag(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("usage: %s NEWNAME OLDNAME", job.Name) } var ( newName = job.Args[0] oldName = job.Args[1] ) newRepo, newTag := parsers.ParseRepositoryTag(newName) // FIXME: Set should either parse both old and new name, or neither. // the current prototype is inconsistent. if err := s.Set(newRepo, newTag, oldName, true); err != nil { return job.Error(err) } return engine.StatusOK }
// ContainerKill send signal to the container // If no signal is given (sig 0), then Kill with SIGKILL and wait // for the container to exit. // If a signal is given, then just send it to the container and return. func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status { if n := len(job.Args); n < 1 || n > 2 { return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) } var ( name = job.Args[0] sig uint64 err error ) // If we have a signal, look at it. Otherwise, do nothing if len(job.Args) == 2 && job.Args[1] != "" { // Check if we passed the signal as a number: // The largest legal signal is 31, so let's parse on 5 bits sig, err = strconv.ParseUint(job.Args[1], 10, 5) if err != nil { // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL") sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")]) } if sig == 0 { return job.Errorf("Invalid signal: %s", job.Args[1]) } } if container := daemon.Get(name); container != nil { // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { if err := container.Kill(); err != nil { return job.Errorf("Cannot kill container %s: %s", name, err) } container.LogEvent("kill") } else { // Otherwise, just send the requested signal if err := container.KillSig(int(sig)); err != nil { return job.Errorf("Cannot kill container %s: %s", name, err) } // FIXME: Add event for signals } } else { return job.Errorf("No such container: %s", name) } return engine.StatusOK }
func LinkContainers(job *engine.Job) engine.Status { var ( action = job.Args[0] childIP = job.Getenv("ChildIP") parentIP = job.Getenv("ParentIP") ignoreErrors = job.GetenvBool("IgnoreErrors") ports = job.GetenvList("Ports") ) split := func(p string) (string, string) { parts := strings.Split(p, "/") return parts[0], parts[1] } for _, p := range ports { port, proto := split(p) if output, err := iptables.Raw(action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-p", proto, "-s", parentIP, "--dport", port, "-d", childIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) } else if len(output) != 0 { return job.Errorf("Error toggle iptables forward: %s", output) } if output, err := iptables.Raw(action, "FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-p", proto, "-s", childIP, "--sport", port, "-d", parentIP, "-j", "ACCEPT"); !ignoreErrors && err != nil { return job.Error(err) } else if len(output) != 0 { return job.Errorf("Error toggle iptables forward: %s", output) } } return engine.StatusOK }
// release an interface for a select ip func Release(job *engine.Job) engine.Status { var ( id = job.Args[0] containerInterface = currentInterfaces.Get(id) ) if containerInterface == nil { return job.Errorf("No network information to release for %s", id) } for _, nat := range containerInterface.PortMappings { if err := portmapper.Unmap(nat); err != nil { log.Infof("Unable to unmap port %s: %s", nat, err) } } if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil { log.Infof("Unable to release ip %s", err) } return engine.StatusOK }
func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status { if len(job.Args) != 3 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) } name := job.Args[0] height, err := strconv.Atoi(job.Args[1]) if err != nil { return job.Error(err) } width, err := strconv.Atoi(job.Args[2]) if err != nil { return job.Error(err) } if container := daemon.Get(name); container != nil { if err := container.Resize(height, width); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
// Search queries the public registry for images matching the specified // search terms, and returns the results. // // Argument syntax: search TERM // // Option environment: // 'authConfig': json-encoded credentials to authenticate against the registry. // The search extends to images only accessible via the credentials. // // 'metaHeaders': extra HTTP headers to include in the request to the registry. // The headers should be passed as a json-encoded dictionary. // // Output: // Results are sent as a collection of structured messages (using engine.Table). // Each result is sent as a separate message. // Results are ordered by number of stars on the public registry. func (s *Service) Search(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s TERM", job.Name) } var ( term = job.Args[0] metaHeaders = map[string][]string{} authConfig = &AuthConfig{} ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", metaHeaders) hostname, term, err := ResolveRepositoryName(term) if err != nil { return job.Error(err) } hostname, err = ExpandAndVerifyRegistryUrl(hostname) if err != nil { return job.Error(err) } r, err := NewSession(authConfig, HTTPRequestFactory(metaHeaders), hostname, true) if err != nil { return job.Error(err) } results, err := r.SearchRepositories(term) if err != nil { return job.Error(err) } outs := engine.NewTable("star_count", 0) for _, result := range results.Results { out := &engine.Env{} out.Import(result) outs.Add(out) } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
// CmdTarLayer return the tarLayer of the image func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if image, err := s.LookupImage(name); err == nil && image != nil { fs, err := image.TarLayer() if err != nil { return job.Error(err) } defer fs.Close() if written, err := io.Copy(job.Stdout, fs); err != nil { return job.Error(err) } else { log.Debugf("rendered layer for %s of [%d] size", image.ID, written) } return engine.StatusOK } return job.Errorf("No such image: %s", name) }
// CmdSet stores a new image in the graph. // Images are stored in the graph using 4 elements: // - A user-defined ID // - A collection of metadata describing the image // - A directory tree stored as a tar archive (also called the "layer") // - A reference to a "parent" ID on top of which the layer should be applied // // NOTE: even though the parent ID is only useful in relation to the layer and how // to apply it (ie you could represent the full directory tree as 'parent_layer + layer', // it is treated as a top-level property of the image. This is an artifact of early // design and should probably be cleaned up in the future to simplify the design. // // Syntax: image_set ID // Input: // - Layer content must be streamed in tar format on stdin. An empty input is // valid and represents a nil layer. // // - Image metadata must be passed in the command environment. // 'json': a json-encoded object with all image metadata. // It will be stored as-is, without any encoding/decoding artifacts. // That is a requirement of the current registry client implementation, // because a re-encoded json might invalidate the image checksum at // the next upload, even with functionaly identical content. func (s *TagStore) CmdSet(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } var ( imgJSON = []byte(job.Getenv("json")) layer = job.Stdin ) if len(imgJSON) == 0 { return job.Errorf("mandatory key 'json' is not set") } // We have to pass an *image.Image object, even though it will be completely // ignored in favor of the redundant json data. // FIXME: the current prototype of Graph.Register is stupid and redundant. img, err := image.NewImgJSON(imgJSON) if err != nil { return job.Error(err) } if err := s.graph.Register(imgJSON, layer, img); err != nil { return job.Error(err) } return engine.StatusOK }
func (s *TagStore) CmdHistory(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } name := job.Args[0] foundImage, err := s.LookupImage(name) if err != nil { return job.Error(err) } lookupMap := make(map[string][]string) for name, repository := range s.Repositories { for tag, id := range repository { // If the ID already has a reverse lookup, do not update it unless for "latest" if _, exists := lookupMap[id]; !exists { lookupMap[id] = []string{} } lookupMap[id] = append(lookupMap[id], name+":"+tag) } } outs := engine.NewTable("Created", 0) err = foundImage.WalkHistory(func(img *image.Image) error { out := &engine.Env{} out.Set("Id", img.ID) out.SetInt64("Created", img.Created.Unix()) out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " ")) out.SetList("Tags", lookupMap[img.ID]) out.SetInt64("Size", img.Size) outs.Add(out) return nil }) if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
// CmdGet returns information about an image. // If the image doesn't exist, an empty object is returned, to allow // checking for an image's existence. func (s *TagStore) CmdGet(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] res := &engine.Env{} img, err := s.LookupImage(name) // Note: if the image doesn't exist, LookupImage returns // nil, nil. if err != nil { return job.Error(err) } if img != nil { // We don't directly expose all fields of the Image objects, // to maintain a clean public API which we can maintain over // time even if the underlying structure changes. // We should have done this with the Image object to begin with... // but we didn't, so now we're doing it here. // // Fields that we're probably better off not including: // - Config/ContainerConfig. Those structs have the same sprawl problem, // so we shouldn't include them wholesale either. // - Comment: initially created to fulfill the "every image is a git commit" // metaphor, in practice people either ignore it or use it as a // generic description field which it isn't. On deprecation shortlist. res.SetAuto("Created", img.Created) res.Set("Author", img.Author) res.Set("Os", img.OS) res.Set("Architecture", img.Architecture) res.Set("DockerVersion", img.DockerVersion) res.Set("Id", img.ID) res.Set("Parent", img.Parent) } res.WriteTo(job.Stdout) return engine.StatusOK }
func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s container_id", job.Name) } name := job.Args[0] if container := daemon.Get(name); container != nil { data, err := container.Export() if err != nil { return job.Errorf("%s: %s", name, err) } defer data.Close() // Stream the entire contents of the container (basically a volatile snapshot) if _, err := io.Copy(job.Stdout, data); err != nil { return job.Errorf("%s: %s", name, err) } // FIXME: factor job-specific LogEvent to engine.Job.Run() container.LogEvent("export") return engine.StatusOK } return job.Errorf("No such container: %s", name) }
func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] container := daemon.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } if err := container.Pause(); err != nil { return job.Errorf("Cannot pause container %s: %s", name, err) } container.LogEvent("pause") return engine.StatusOK }
func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] t = 10 ) if job.EnvExists("t") { t = job.GetenvInt("t") } if container := daemon.Get(name); container != nil { if !container.State.IsRunning() { return job.Errorf("Container already stopped") } if err := container.Stop(int(t)); err != nil { return job.Errorf("Cannot stop container %s: %s\n", name, err) } container.LogEvent("stop") } else { return job.Errorf("No such container: %s\n", name) } return engine.StatusOK }
func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] t = 10 ) if job.EnvExists("t") { t = job.GetenvInt("t") } if container := daemon.Get(name); container != nil { if err := container.Restart(int(t)); err != nil { return job.Errorf("Cannot restart container %s: %s\n", name, err) } container.LogEvent("restart") } else { return job.Errorf("No such container: %s\n", name) } return engine.StatusOK }
func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] stdout = job.GetenvBool("stdout") stderr = job.GetenvBool("stderr") tail = job.Getenv("tail") follow = job.GetenvBool("follow") times = job.GetenvBool("timestamps") lines = -1 format string ) if !(stdout || stderr) { return job.Errorf("You must choose at least one stream") } if times { format = time.RFC3339Nano } if tail == "" { tail = "all" } container := daemon.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { // Legacy logs log.Debugf("Old logs format") if stdout { cLog, err := container.ReadLog("stdout") if err != nil { log.Errorf("Error reading logs (stdout): %s", err) } else if _, err := io.Copy(job.Stdout, cLog); err != nil { log.Errorf("Error streaming logs (stdout): %s", err) } } if stderr { cLog, err := container.ReadLog("stderr") if err != nil { log.Errorf("Error reading logs (stderr): %s", err) } else if _, err := io.Copy(job.Stderr, cLog); err != nil { log.Errorf("Error streaming logs (stderr): %s", err) } } } else if err != nil { log.Errorf("Error reading logs (json): %s", err) } else { if tail != "all" { var err error lines, err = strconv.Atoi(tail) if err != nil { log.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err) lines = -1 } } if lines != 0 { if lines > 0 { f := cLog.(*os.File) ls, err := tailfile.TailFile(f, lines) if err != nil { return job.Error(err) } tmp := bytes.NewBuffer([]byte{}) for _, l := range ls { fmt.Fprintf(tmp, "%s\n", l) } cLog = tmp } dec := json.NewDecoder(cLog) for { l := &jsonlog.JSONLog{} if err := dec.Decode(l); err == io.EOF { break } else if err != nil { log.Errorf("Error streaming logs: %s", err) break } logLine := l.Log if times { logLine = fmt.Sprintf("%s %s", l.Created.Format(format), logLine) } if l.Stream == "stdout" && stdout { fmt.Fprintf(job.Stdout, "%s", logLine) } if l.Stream == "stderr" && stderr { fmt.Fprintf(job.Stderr, "%s", logLine) } } } } if follow { errors := make(chan error, 2) if stdout { stdoutPipe := container.StdoutLogPipe() go func() { errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format) }() } if stderr { stderrPipe := container.StderrLogPipe() go func() { errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format) }() } err := <-errors if err != nil { log.Errorf("%s", err) } } return engine.StatusOK }