func (s *TagStore) CmdViz(job *engine.Job) engine.Status { images, _ := s.graph.Map() if images == nil { return engine.StatusOK } job.Stdout.Write([]byte("digraph docker {\n")) var ( parentImage *image.Image err error ) for _, image := range images { parentImage, err = image.GetParent() if err != nil { return job.Errorf("Error while getting parent image: %v", err) } if parentImage != nil { job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) } else { job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) } } for id, repos := range s.GetRepoRefs() { job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) } job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) return engine.StatusOK }
func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] if container := daemon.Get(name); container != nil { outs := engine.NewTable("", 0) changes, err := container.Changes() if err != nil { return job.Error(err) } for _, change := range changes { out := &engine.Env{} if err := out.Import(change); err != nil { return job.Error(err) } outs.Add(out) } if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } } else { return job.Errorf("No such container: %s", name) } return engine.StatusOK }
// Allocate a network interface func Allocate(job *engine.Job) engine.Status { var ( ip *net.IP err error id = job.Args[0] requestedIP = net.ParseIP(job.Getenv("RequestedIP")) ) if requestedIP != nil { ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP) } else { ip, err = ipallocator.RequestIP(bridgeNetwork, nil) } if err != nil { return job.Error(err) } out := engine.Env{} out.Set("IP", ip.String()) out.Set("Mask", bridgeNetwork.Mask.String()) out.Set("Gateway", bridgeNetwork.IP.String()) out.Set("Bridge", bridgeIface) size, _ := bridgeNetwork.Mask.Size() out.SetInt("IPPrefixLen", size) currentInterfaces.Set(id, &networkInterface{ IP: *ip, }) out.WriteTo(job.Stdout) return engine.StatusOK }
// CmdLookup return an image encoded in JSON func (s *TagStore) CmdLookup(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if image, err := s.LookupImage(name); err == nil && image != nil { if job.GetenvBool("raw") { b, err := image.RawJson() if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } out := &engine.Env{} out.Set("Id", image.ID) out.Set("Parent", image.Parent) out.Set("Comment", image.Comment) out.SetAuto("Created", image.Created) out.Set("Container", image.Container) out.SetJson("ContainerConfig", image.ContainerConfig) out.Set("DockerVersion", image.DockerVersion) out.Set("Author", image.Author) out.SetJson("Config", image.Config) out.Set("Architecture", image.Architecture) out.Set("Os", image.OS) out.SetInt64("Size", image.Size) if _, err = out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such image: %s", name) }
func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status { images, _ := daemon.Graph().Map() var imgcount int if images == nil { imgcount = 0 } else { imgcount = len(images) } kernelVersion := "<unknown>" if kv, err := kernel.GetKernelVersion(); err == nil { kernelVersion = kv.String() } operatingSystem := "<unknown>" if s, err := operatingsystem.GetOperatingSystem(); err == nil { operatingSystem = s } if inContainer, err := operatingsystem.IsContainerized(); err != nil { log.Errorf("Could not determine if daemon is containerized: %v", err) operatingSystem += " (error determining if containerized)" } else if inContainer { operatingSystem += " (containerized)" } // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) initPath := utils.DockerInitPath("") if initPath == "" { // if that fails, we'll just return the path from the daemon initPath = daemon.SystemInitPath() } cjob := job.Eng.Job("subscribers_count") env, _ := cjob.Stdout.AddEnv() if err := cjob.Run(); err != nil { return job.Error(err) } v := &engine.Env{} v.SetInt("Containers", len(daemon.List())) v.SetInt("Images", imgcount) v.Set("Driver", daemon.GraphDriver().String()) v.SetJson("DriverStatus", daemon.GraphDriver().Status()) v.SetBool("MemoryLimit", daemon.SystemConfig().MemoryLimit) v.SetBool("SwapLimit", daemon.SystemConfig().SwapLimit) v.SetBool("IPv4Forwarding", !daemon.SystemConfig().IPv4ForwardingDisabled) v.SetBool("Debug", os.Getenv("DEBUG") != "") v.SetInt("NFd", utils.GetTotalUsedFds()) v.SetInt("NGoroutines", runtime.NumGoroutine()) v.Set("ExecutionDriver", daemon.ExecutionDriver().Name()) v.SetInt("NEventsListener", env.GetInt("count")) v.Set("KernelVersion", kernelVersion) v.Set("OperatingSystem", operatingSystem) v.Set("IndexServerAddress", registry.IndexServerAddress()) v.Set("InitSha1", dockerversion.INITSHA1) v.Set("InitPath", initPath) if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
func (e *Events) Log(job *engine.Job) engine.Status { if len(job.Args) != 3 { return job.Errorf("usage: %s ACTION ID FROM", job.Name) } // not waiting for receivers go e.log(job.Args[0], job.Args[1], job.Args[2]) return engine.StatusOK }
func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if container := daemon.Get(name); container != nil { container.Lock() defer container.Unlock() if job.GetenvBool("raw") { b, err := json.Marshal(&struct { *Container HostConfig *runconfig.HostConfig }{container, container.hostConfig}) if err != nil { return job.Error(err) } job.Stdout.Write(b) return engine.StatusOK } out := &engine.Env{} out.Set("Id", container.ID) out.SetAuto("Created", container.Created) out.Set("Path", container.Path) out.SetList("Args", container.Args) out.SetJson("Config", container.Config) out.SetJson("State", container.State) out.Set("Image", container.Image) out.SetJson("NetworkSettings", container.NetworkSettings) out.Set("ResolvConfPath", container.ResolvConfPath) out.Set("HostnamePath", container.HostnamePath) out.Set("HostsPath", container.HostsPath) out.Set("Name", container.Name) out.Set("Driver", container.Driver) out.Set("ExecDriver", container.ExecDriver) out.Set("MountLabel", container.MountLabel) out.Set("ProcessLabel", container.ProcessLabel) out.SetJson("Volumes", container.Volumes) out.SetJson("VolumesRW", container.VolumesRW) if children, err := daemon.Children(container.Name); err == nil { for linkAlias, child := range children { container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) } } out.SetJson("HostConfig", container.hostConfig) container.hostConfig.Links = nil if _, err := out.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) } var ( name = job.Args[0] resource = job.Args[1] ) if container := daemon.Get(name); container != nil { data, err := container.Copy(resource) if err != nil { return job.Error(err) } defer data.Close() if _, err := io.Copy(job.Stdout, data); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
// builtins jobs independent of any subsystem func dockerVersion(job *engine.Job) engine.Status { v := &engine.Env{} v.SetJson("Version", dockerversion.VERSION) v.SetJson("ApiVersion", api.APIVERSION) v.Set("GitCommit", dockerversion.GITCOMMIT) v.Set("GoVersion", runtime.Version()) v.Set("Os", runtime.GOOS) v.Set("Arch", runtime.GOARCH) if kernelVersion, err := kernel.GetKernelVersion(); err == nil { v.Set("KernelVersion", kernelVersion.String()) } if _, err := v.WriteTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } imgs := engine.NewTable("", 0) if err := daemon.DeleteImage(job.Eng, job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil { return job.Error(err) } if len(imgs.Data) == 0 { return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) } if _, err := imgs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
// CmdTag assigns a new name and tag to an existing image. If the tag already exists, // it is changed and the image previously referenced by the tag loses that reference. // This may cause the old image to be garbage-collected if its reference count reaches zero. // // Syntax: image_tag NEWNAME OLDNAME // Example: image_tag shykes/myapp:latest shykes/myapp:1.42.0 func (s *TagStore) CmdTag(job *engine.Job) engine.Status { if len(job.Args) != 2 { return job.Errorf("usage: %s NEWNAME OLDNAME", job.Name) } var ( newName = job.Args[0] oldName = job.Args[1] ) newRepo, newTag := parsers.ParseRepositoryTag(newName) // FIXME: Set should either parse both old and new name, or neither. // the current prototype is inconsistent. if err := s.Set(newRepo, newTag, oldName, true); err != nil { return job.Error(err) } return engine.StatusOK }
// ContainerKill send signal to the container // If no signal is given (sig 0), then Kill with SIGKILL and wait // for the container to exit. // If a signal is given, then just send it to the container and return. func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status { if n := len(job.Args); n < 1 || n > 2 { return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) } var ( name = job.Args[0] sig uint64 err error ) // If we have a signal, look at it. Otherwise, do nothing if len(job.Args) == 2 && job.Args[1] != "" { // Check if we passed the signal as a number: // The largest legal signal is 31, so let's parse on 5 bits sig, err = strconv.ParseUint(job.Args[1], 10, 5) if err != nil { // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL") sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")]) } if sig == 0 { return job.Errorf("Invalid signal: %s", job.Args[1]) } } if container := daemon.Get(name); container != nil { // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { if err := container.Kill(); err != nil { return job.Errorf("Cannot kill container %s: %s", name, err) } container.LogEvent("kill") } else { // Otherwise, just send the requested signal if err := container.KillSig(int(sig)); err != nil { return job.Errorf("Cannot kill container %s: %s", name, err) } // FIXME: Add event for signals } } else { return job.Errorf("No such container: %s", name) } return engine.StatusOK }
func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] t = 10 ) if job.EnvExists("t") { t = job.GetenvInt("t") } if container := daemon.Get(name); container != nil { if err := container.Restart(int(t)); err != nil { return job.Errorf("Cannot restart container %s: %s\n", name, err) } container.LogEvent("restart") } else { return job.Errorf("No such container: %s\n", name) } return engine.StatusOK }
// release an interface for a select ip func Release(job *engine.Job) engine.Status { var ( id = job.Args[0] containerInterface = currentInterfaces.Get(id) ) if containerInterface == nil { return job.Errorf("No network information to release for %s", id) } for _, nat := range containerInterface.PortMappings { if err := portmapper.Unmap(nat); err != nil { log.Infof("Unable to unmap port %s: %s", nat, err) } } if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil { log.Infof("Unable to release ip %s", err) } return engine.StatusOK }
func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status { if len(job.Args) != 3 { return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) } name := job.Args[0] height, err := strconv.Atoi(job.Args[1]) if err != nil { return job.Error(err) } width, err := strconv.Atoi(job.Args[2]) if err != nil { return job.Error(err) } if container := daemon.Get(name); container != nil { if err := container.Resize(height, width); err != nil { return job.Error(err) } return engine.StatusOK } return job.Errorf("No such container: %s", name) }
// ServeApi loops through all of the protocols sent in to docker and spawns // off a go routine to setup a serving http.Server for each. func ServeApi(job *engine.Job) engine.Status { if len(job.Args) == 0 { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } var ( protoAddrs = job.Args chErrors = make(chan error, len(protoAddrs)) ) activationLock = make(chan struct{}) for _, protoAddr := range protoAddrs { protoAddrParts := strings.SplitN(protoAddr, "://", 2) if len(protoAddrParts) != 2 { return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) } go func() { log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) }() } for i := 0; i < len(protoAddrs); i += 1 { err := <-chErrors if err != nil { return job.Error(err) } } return engine.StatusOK }
// CmdSet stores a new image in the graph. // Images are stored in the graph using 4 elements: // - A user-defined ID // - A collection of metadata describing the image // - A directory tree stored as a tar archive (also called the "layer") // - A reference to a "parent" ID on top of which the layer should be applied // // NOTE: even though the parent ID is only useful in relation to the layer and how // to apply it (ie you could represent the full directory tree as 'parent_layer + layer', // it is treated as a top-level property of the image. This is an artifact of early // design and should probably be cleaned up in the future to simplify the design. // // Syntax: image_set ID // Input: // - Layer content must be streamed in tar format on stdin. An empty input is // valid and represents a nil layer. // // - Image metadata must be passed in the command environment. // 'json': a json-encoded object with all image metadata. // It will be stored as-is, without any encoding/decoding artifacts. // That is a requirement of the current registry client implementation, // because a re-encoded json might invalidate the image checksum at // the next upload, even with functionaly identical content. func (s *TagStore) CmdSet(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } var ( imgJSON = []byte(job.Getenv("json")) layer = job.Stdin ) if len(imgJSON) == 0 { return job.Errorf("mandatory key 'json' is not set") } // We have to pass an *image.Image object, even though it will be completely // ignored in favor of the redundant json data. // FIXME: the current prototype of Graph.Register is stupid and redundant. img, err := image.NewImgJSON(imgJSON) if err != nil { return job.Error(err) } if err := s.graph.Register(imgJSON, layer, img); err != nil { return job.Error(err) } return engine.StatusOK }
// CmdGet returns information about an image. // If the image doesn't exist, an empty object is returned, to allow // checking for an image's existence. func (s *TagStore) CmdGet(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] res := &engine.Env{} img, err := s.LookupImage(name) // Note: if the image doesn't exist, LookupImage returns // nil, nil. if err != nil { return job.Error(err) } if img != nil { // We don't directly expose all fields of the Image objects, // to maintain a clean public API which we can maintain over // time even if the underlying structure changes. // We should have done this with the Image object to begin with... // but we didn't, so now we're doing it here. // // Fields that we're probably better off not including: // - Config/ContainerConfig. Those structs have the same sprawl problem, // so we shouldn't include them wholesale either. // - Comment: initially created to fulfill the "every image is a git commit" // metaphor, in practice people either ignore it or use it as a // generic description field which it isn't. On deprecation shortlist. res.SetAuto("Created", img.Created) res.Set("Author", img.Author) res.Set("Os", img.OS) res.Set("Architecture", img.Architecture) res.Set("DockerVersion", img.DockerVersion) res.Set("Id", img.ID) res.Set("Parent", img.Parent) } res.WriteTo(job.Stdout) return engine.StatusOK }
func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s", job.Name) } name := job.Args[0] if container := daemon.Get(name); container != nil { status, _ := container.State.WaitStop(-1 * time.Second) job.Printf("%d\n", status) return engine.StatusOK } return job.Errorf("%s: no such container: %s", job.Name, name) }
// FIXME: merge into CmdTag above, and merge "image_tag" and "tag" into a single job. func (s *TagStore) CmdTagLegacy(job *engine.Job) engine.Status { if len(job.Args) != 2 && len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) } var tag string if len(job.Args) == 3 { tag = job.Args[2] } if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { return job.Error(err) } return engine.StatusOK }
// CmdTarLayer return the tarLayer of the image func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("usage: %s NAME", job.Name) } name := job.Args[0] if image, err := s.LookupImage(name); err == nil && image != nil { fs, err := image.TarLayer() if err != nil { return job.Error(err) } defer fs.Close() if written, err := io.Copy(job.Stdout, fs); err != nil { return job.Error(err) } else { log.Debugf("rendered layer for %s of [%d] size", image.ID, written) } return engine.StatusOK } return job.Errorf("No such image: %s", name) }
func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s container_id", job.Name) } name := job.Args[0] if container := daemon.Get(name); container != nil { data, err := container.Export() if err != nil { return job.Errorf("%s: %s", name, err) } defer data.Close() // Stream the entire contents of the container (basically a volatile snapshot) if _, err := io.Copy(job.Stdout, data); err != nil { return job.Errorf("%s: %s", name, err) } // FIXME: factor job-specific LogEvent to engine.Job.Run() container.LogEvent("export") return engine.StatusOK } return job.Errorf("No such container: %s", name) }
// Auth contacts the public registry with the provided credentials, // and returns OK if authentication was sucessful. // It can be used to verify the validity of a client's credentials. func (s *Service) Auth(job *engine.Job) engine.Status { var ( err error authConfig = &AuthConfig{} ) job.GetenvJson("authConfig", authConfig) // TODO: this is only done here because auth and registry need to be merged into one pkg if addr := authConfig.ServerAddress; addr != "" && addr != IndexServerAddress() { addr, err = ExpandAndVerifyRegistryUrl(addr) if err != nil { return job.Error(err) } authConfig.ServerAddress = addr } status, err := Login(authConfig, HTTPRequestFactory(nil)) if err != nil { return job.Error(err) } job.Printf("%s\n", status) return engine.StatusOK }
func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER", job.Name) } name := job.Args[0] container := daemon.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } if err := container.Pause(); err != nil { return job.Errorf("Cannot pause container %s: %s", name, err) } container.LogEvent("pause") return engine.StatusOK }
func (e *Events) Get(job *engine.Job) engine.Status { var ( since = job.GetenvInt64("since") until = job.GetenvInt64("until") timeout = time.NewTimer(time.Unix(until, 0).Sub(time.Now())) ) // If no until, disable timeout if until == 0 { timeout.Stop() } listener := make(chan *utils.JSONMessage) e.subscribe(listener) defer e.unsubscribe(listener) job.Stdout.Write(nil) // Resend every event in the [since, until] time interval. if since != 0 { if err := e.writeCurrent(job, since, until); err != nil { return job.Error(err) } } for { select { case event, ok := <-listener: if !ok { return engine.StatusOK } if err := writeEvent(job, event); err != nil { return job.Error(err) } case <-timeout.C: return engine.StatusOK } } }
func (s *TagStore) CmdHistory(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) } name := job.Args[0] foundImage, err := s.LookupImage(name) if err != nil { return job.Error(err) } lookupMap := make(map[string][]string) for name, repository := range s.Repositories { for tag, id := range repository { // If the ID already has a reverse lookup, do not update it unless for "latest" if _, exists := lookupMap[id]; !exists { lookupMap[id] = []string{} } lookupMap[id] = append(lookupMap[id], name+":"+tag) } } outs := engine.NewTable("Created", 0) err = foundImage.WalkHistory(func(img *image.Image) error { out := &engine.Env{} out.Set("Id", img.ID) out.SetInt64("Created", img.Created.Unix()) out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " ")) out.SetList("Tags", lookupMap[img.ID]) out.SetInt64("Size", img.Size) outs.Add(out) return nil }) if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] t = 10 ) if job.EnvExists("t") { t = job.GetenvInt("t") } if container := daemon.Get(name); container != nil { if !container.State.IsRunning() { return job.Errorf("Container already stopped") } if err := container.Stop(int(t)); err != nil { return job.Errorf("Cannot stop container %s: %s\n", name, err) } container.LogEvent("stop") } else { return job.Errorf("No such container: %s\n", name) } return engine.StatusOK }
func (s *TagStore) CmdImages(job *engine.Job) engine.Status { var ( allImages map[string]*image.Image err error filt_tagged = true ) imageFilters, err := filters.FromParam(job.Getenv("filters")) if err != nil { return job.Error(err) } if i, ok := imageFilters["dangling"]; ok { for _, value := range i { if strings.ToLower(value) == "true" { filt_tagged = false } } } if job.GetenvBool("all") && filt_tagged { allImages, err = s.graph.Map() } else { allImages, err = s.graph.Heads() } if err != nil { return job.Error(err) } lookup := make(map[string]*engine.Env) s.Lock() for name, repository := range s.Repositories { if job.Getenv("filter") != "" { if match, _ := path.Match(job.Getenv("filter"), name); !match { continue } } for tag, id := range repository { image, err := s.graph.Get(id) if err != nil { log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err) continue } if out, exists := lookup[id]; exists { if filt_tagged { out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag))) } } else { // get the boolean list for if only the untagged images are requested delete(allImages, id) if filt_tagged { out := &engine.Env{} out.Set("ParentId", image.Parent) out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)}) out.Set("Id", image.ID) out.SetInt64("Created", image.Created.Unix()) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) lookup[id] = out } } } } s.Unlock() outs := engine.NewTable("Created", len(lookup)) for _, value := range lookup { outs.Add(value) } // Display images which aren't part of a repository/tag if job.Getenv("filter") == "" { for _, image := range allImages { out := &engine.Env{} out.Set("ParentId", image.Parent) out.SetList("RepoTags", []string{"<none>:<none>"}) out.Set("Id", image.ID) out.SetInt64("Created", image.Created.Unix()) out.SetInt64("Size", image.Size) out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) outs.Add(out) } } outs.ReverseSort() if _, err := outs.WriteListTo(job.Stdout); err != nil { return job.Error(err) } return engine.StatusOK }
func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s CONTAINER\n", job.Name) } var ( name = job.Args[0] stdout = job.GetenvBool("stdout") stderr = job.GetenvBool("stderr") tail = job.Getenv("tail") follow = job.GetenvBool("follow") times = job.GetenvBool("timestamps") lines = -1 format string ) if !(stdout || stderr) { return job.Errorf("You must choose at least one stream") } if times { format = time.RFC3339Nano } if tail == "" { tail = "all" } container := daemon.Get(name) if container == nil { return job.Errorf("No such container: %s", name) } cLog, err := container.ReadLog("json") if err != nil && os.IsNotExist(err) { // Legacy logs log.Debugf("Old logs format") if stdout { cLog, err := container.ReadLog("stdout") if err != nil { log.Errorf("Error reading logs (stdout): %s", err) } else if _, err := io.Copy(job.Stdout, cLog); err != nil { log.Errorf("Error streaming logs (stdout): %s", err) } } if stderr { cLog, err := container.ReadLog("stderr") if err != nil { log.Errorf("Error reading logs (stderr): %s", err) } else if _, err := io.Copy(job.Stderr, cLog); err != nil { log.Errorf("Error streaming logs (stderr): %s", err) } } } else if err != nil { log.Errorf("Error reading logs (json): %s", err) } else { if tail != "all" { var err error lines, err = strconv.Atoi(tail) if err != nil { log.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err) lines = -1 } } if lines != 0 { if lines > 0 { f := cLog.(*os.File) ls, err := tailfile.TailFile(f, lines) if err != nil { return job.Error(err) } tmp := bytes.NewBuffer([]byte{}) for _, l := range ls { fmt.Fprintf(tmp, "%s\n", l) } cLog = tmp } dec := json.NewDecoder(cLog) for { l := &jsonlog.JSONLog{} if err := dec.Decode(l); err == io.EOF { break } else if err != nil { log.Errorf("Error streaming logs: %s", err) break } logLine := l.Log if times { logLine = fmt.Sprintf("%s %s", l.Created.Format(format), logLine) } if l.Stream == "stdout" && stdout { fmt.Fprintf(job.Stdout, "%s", logLine) } if l.Stream == "stderr" && stderr { fmt.Fprintf(job.Stderr, "%s", logLine) } } } } if follow { errors := make(chan error, 2) if stdout { stdoutPipe := container.StdoutLogPipe() go func() { errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format) }() } if stderr { stderrPipe := container.StderrLogPipe() go func() { errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format) }() } err := <-errors if err != nil { log.Errorf("%s", err) } } return engine.StatusOK }
// CmdImageExport exports all images with the given tag. All versions // containing the same tag are exported. The resulting output is an // uncompressed tar ball. // name is the set of tags to export. // out is the writer where the images are written to. func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s IMAGE\n", job.Name) } name := job.Args[0] // get image json tempdir, err := ioutil.TempDir("", "docker-export-") if err != nil { return job.Error(err) } defer os.RemoveAll(tempdir) log.Debugf("Serializing %s", name) rootRepoMap := map[string]Repository{} rootRepo, err := s.Get(name) if err != nil { return job.Error(err) } if rootRepo != nil { // this is a base repo name, like 'busybox' for _, id := range rootRepo { if err := s.exportImage(job.Eng, id, tempdir); err != nil { return job.Error(err) } } rootRepoMap[name] = rootRepo } else { img, err := s.LookupImage(name) if err != nil { return job.Error(err) } if img != nil { // This is a named image like 'busybox:latest' repoName, repoTag := parsers.ParseRepositoryTag(name) if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil { return job.Error(err) } // check this length, because a lookup of a truncated has will not have a tag // and will not need to be added to this map if len(repoTag) > 0 { rootRepoMap[repoName] = Repository{repoTag: img.ID} } } else { // this must be an ID that didn't get looked up just right? if err := s.exportImage(job.Eng, name, tempdir); err != nil { return job.Error(err) } } } // write repositories, if there is something to write if len(rootRepoMap) > 0 { rootRepoJson, _ := json.Marshal(rootRepoMap) if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil { return job.Error(err) } } else { log.Debugf("There were no repositories to write") } fs, err := archive.Tar(tempdir, archive.Uncompressed) if err != nil { return job.Error(err) } defer fs.Close() if _, err := io.Copy(job.Stdout, fs); err != nil { return job.Error(err) } log.Debugf("End Serializing %s", name) return engine.StatusOK }