func (d *Driver) Status() [][2]string { s := d.DeviceSet.Status() status := [][2]string{ {"Pool Name", s.PoolName}, {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, {"Backing Filesystem", backingFs}, {"Data file", s.DataFile}, {"Metadata file", s.MetadataFile}, {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, } if len(s.DataLoopback) > 0 { status = append(status, [2]string{"Data loop file", s.DataLoopback}) } if len(s.MetadataLoopback) > 0 { status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) } if vStr, err := devicemapper.GetLibraryVersion(); err == nil { status = append(status, [2]string{"Library Version", vStr}) } return status }
// CommitContainer commits docker container func (c *DockerClient) CommitContainer(s State, message string) (*docker.Image, error) { commitOpts := docker.CommitContainerOptions{ Container: s.NoCache.ContainerID, Message: message, Run: &s.Config, } c.log.Debugf("Commit container: %# v", pretty.Formatter(commitOpts)) image, err := c.client.CommitContainer(commitOpts) if err != nil { return nil, err } // Inspect the image to get the real size c.log.Debugf("Inspect image %s", image.ID) if image, err = c.client.InspectImage(image.ID); err != nil { return nil, err } size := fmt.Sprintf("%s (+%s)", units.HumanSize(float64(image.VirtualSize)), units.HumanSize(float64(image.Size)), ) c.log.WithFields(logrus.Fields{ "size": size, }).Infof("| Result image is %.12s", image.ID) return image, nil }
func (p *JSONProgress) String() string { var ( width = 200 pbBox string numbersBox string timeLeftBox string ) ws, err := term.GetWinsize(p.terminalFd) if err == nil { width = int(ws.Width) } if p.Current <= 0 && p.Total <= 0 { return "" } current := units.HumanSize(float64(p.Current)) if p.Total <= 0 { return fmt.Sprintf("%8v", current) } total := units.HumanSize(float64(p.Total)) percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 if percentage > 50 { percentage = 50 } if width > 110 { // this number can't be negetive gh#7136 numSpaces := 0 if 50-percentage > 0 { numSpaces = 50 - percentage - 1 } pbBox = fmt.Sprintf("[%s%s%s] ", strings.Repeat("~", percentage), p.Animal, strings.Repeat("~", numSpaces)) } numbersBox = fmt.Sprintf("%8v/%v", current, total) if p.Current > p.Total { // remove total display if the reported current is wonky. numbersBox = fmt.Sprintf("%8v", current) } if p.Current > 0 && p.Start > 0 && percentage < 50 { fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) perEntry := fromStart / time.Duration(p.Current) left := time.Duration(p.Total-p.Current) * perEntry left = (left / time.Second) * time.Second if width > 50 { timeLeftBox = " " + left.String() } } return pbBox + numbersBox + timeLeftBox }
func (c *containerContext) Size() string { c.addHeader(sizeHeader) srw := units.HumanSize(float64(c.c.SizeRw)) sv := units.HumanSize(float64(c.c.SizeRootFs)) sf := srw if c.c.SizeRootFs > 0 { sf = fmt.Sprintf("%s (virtual %s)", srw, sv) } return sf }
func (b *Build) probeCache(s State) (cachedState State, hit bool, err error) { if b.cache == nil || s.NoCache.CacheBusted { return s, false, nil } var s2 *State if s2, err = b.cache.Get(s); err != nil { return s, false, err } if s2 == nil { s.NoCache.CacheBusted = true log.Info(color.New(color.FgYellow).SprintFunc()("| Not cached")) return s, false, nil } if b.cfg.ReloadCache { defer b.cache.Del(*s2) s.NoCache.CacheBusted = true log.Info(color.New(color.FgYellow).SprintFunc()("| Reload cache")) return s, false, nil } var img *docker.Image if img, err = b.client.InspectImage(s2.ImageID); err != nil { return s, true, err } if img == nil { defer b.cache.Del(*s2) s.NoCache.CacheBusted = true log.Info(color.New(color.FgYellow).SprintFunc()("| Not cached")) return s, false, nil } size := fmt.Sprintf("%s (+%s)", units.HumanSize(float64(img.VirtualSize)), units.HumanSize(float64(img.Size)), ) log.WithFields(log.Fields{ "size": size, }).Infof(color.New(color.FgGreen).SprintfFunc()("| Cached! Take image %.12s", s2.ImageID)) // Store some stuff to the build b.ProducedSize += img.Size b.VirtualSize = img.VirtualSize // Keep items that should not be cached from the previous state s2.NoCache = s.NoCache // We don't want commits to go through the cache s2.CleanCommits() return *s2, true, nil }
func (s *containerStats) Display(w io.Writer) error { s.mu.RLock() defer s.mu.RUnlock() if s.err != nil { return s.err } fmt.Fprintf(w, "%s\t%.2f%%\t%s/%s\t%.2f%%\t%s/%s\n", s.Name, s.CPUPercentage, units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit), s.MemoryPercentage, units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx)) return nil }
func (d *Driver) Status() [][2]string { s := d.DeviceSet.Status() status := [][2]string{ {"Pool Name", s.PoolName}, {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(int64(s.SectorSize)))}, {"Data file", s.DataLoopback}, {"Metadata file", s.MetadataLoopback}, {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Data.Used)))}, {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Data.Total)))}, {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Used)))}, {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Total)))}, } return status }
func (h *Handler) serveTemplate(w http.ResponseWriter, r *http.Request) { templateDir := path.Join("/src", "templates") lp := path.Join(templateDir, "layout.html") // set up custom functions funcMap := template.FuncMap{ "ext": func(name string) string { return strings.TrimPrefix(filepath.Ext(name), ".") }, "base": func(name string) string { parts := strings.Split(name, "/") return parts[len(parts)-1] }, "size": func(s int64) string { return units.HumanSize(float64(s)) }, } // parse & execute the template tmpl := template.Must(template.New("").Funcs(funcMap).ParseFiles(lp)) if err := tmpl.ExecuteTemplate(w, "layout", h.Files); err != nil { writeError(w, fmt.Sprintf("Execute template failed: %v", err)) return } }
func (e *Export) PrintHistory() { current := e.Root() order := []*ExportedImage{} for { order = append(order, current) current = e.ChildOf(current.LayerConfig.Id) if current == nil { break } } for i := 0; i < len(order); i++ { stat, err := os.Stat(order[i].LayerTarPath) size := int64(-1) if stat != nil && err == nil { size = stat.Size() } cmd := strings.Join(order[i].LayerConfig.ContainerConfig().Cmd, " ") if len(cmd) > 60 { cmd = cmd[0:57] + "..." } debug(" - ", order[i].LayerConfig.Id[0:12], humanDuration(time.Now().UTC().Sub(order[i].LayerConfig.Created.UTC())), cmd, units.HumanSize(float64(size))) } }
// Print all the images based on SUSE. It will print in a format that is as // close to the `docker` command as possible. func printImages(imgs []*dockerclient.Image) { w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0) fmt.Fprintf(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE\n") cache := getCacheFile() for counter, img := range imgs { fmt.Printf("Inspecting image %d/%d\r", (counter + 1), len(imgs)) if cache.isSUSE(img.Id) { if len(img.RepoTags) < 1 { continue } id := stringid.TruncateID(img.Id) size := units.HumanSize(float64(img.VirtualSize)) for _, tag := range img.RepoTags { t := strings.SplitN(tag, ":", 2) fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", t[0], t[1], id, timeAgo(img.Created), size) } } } fmt.Printf("\n") _ = w.Flush() cache.flush() }
// CmdHistory shows the history of an image. // // Usage: docker history [OPTIONS] IMAGE func (cli *DockerCli) CmdHistory(args ...string) error { cmd := Cli.Subcmd("history", []string{"IMAGE"}, Cli.DockerCommands["history"].Description, true) human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format") quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) serverResp, err := cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, nil) if err != nil { return err } defer serverResp.body.Close() history := []types.ImageHistory{} if err := json.NewDecoder(serverResp.body).Decode(&history); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") } for _, entry := range history { if *noTrunc { fmt.Fprintf(w, entry.ID) } else { fmt.Fprintf(w, stringid.TruncateID(entry.ID)) } if !*quiet { if *human { fmt.Fprintf(w, "\t%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0)))) } else { fmt.Fprintf(w, "\t%s\t", time.Unix(entry.Created, 0).Format(time.RFC3339)) } if *noTrunc { fmt.Fprintf(w, "%s\t", strings.Replace(entry.CreatedBy, "\t", " ", -1)) } else { fmt.Fprintf(w, "%s\t", stringutils.Truncate(strings.Replace(entry.CreatedBy, "\t", " ", -1), 45)) } if *human { fmt.Fprintf(w, "%s\t", units.HumanSize(float64(entry.Size))) } else { fmt.Fprintf(w, "%d\t", entry.Size) } fmt.Fprintf(w, "%s", entry.Comment) } fmt.Fprintf(w, "\n") } w.Flush() return nil }
// Execute runs the command func (c *CommandFrom) Execute(b *Build) (s State, err error) { // TODO: for "scratch" image we may use /images/create if len(c.cfg.args) != 1 { return s, fmt.Errorf("FROM requires one argument") } var ( img *docker.Image name = c.cfg.args[0] ) if name == "scratch" { s.NoBaseImage = true return s, nil } if img, err = b.lookupImage(name); err != nil { return s, fmt.Errorf("FROM error: %s", err) } if img == nil { return s, fmt.Errorf("FROM: image %s not found", name) } // We want to say the size of the FROM image. Better to do it // from the client, but don't know how to do it better, // without duplicating InspectImage calls and making unnecessary functions log.WithFields(log.Fields{ "size": units.HumanSize(float64(img.VirtualSize)), }).Infof("| Image %.12s", img.ID) s = b.state s.ImageID = img.ID s.Config = docker.Config{} if img.Config != nil { s.Config = *img.Config } b.ProducedSize = 0 b.VirtualSize = img.VirtualSize // If we don't have OnBuild triggers, then we are done if len(s.Config.OnBuild) == 0 { return s, nil } log.Infof("| Found %d ONBUILD triggers", len(s.Config.OnBuild)) // Remove them from the config, since the config will be committed. s.InjectCommands = s.Config.OnBuild s.Config.OnBuild = []string{} return s, nil }
func (d *Driver) Status() [][2]string { s := d.DeviceSet.Status() status := [][2]string{ {"Pool Name", s.PoolName}, {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(int64(s.SectorSize)))}, {"Data file", s.DataLoopback}, {"Metadata file", s.MetadataLoopback}, {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Data.Used)))}, {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Data.Total)))}, {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Used)))}, {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(int64(s.Metadata.Total)))}, } if vStr, err := GetLibraryVersion(); err == nil { status = append(status, [2]string{"Library Version", vStr}) } return status }
// CmdHistory shows the history of an image. // // Usage: docker history [OPTIONS] IMAGE func (cli *DockerCli) CmdHistory(args ...string) error { cmd := Cli.Subcmd("history", []string{"IMAGE"}, Cli.DockerCommands["history"].Description, true) human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format") quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) history, err := cli.client.ImageHistory(cmd.Arg(0)) if err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if *quiet { for _, entry := range history { if *noTrunc { fmt.Fprintf(w, "%s\n", entry.ID) } else { fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) } } w.Flush() return nil } var imageID string var createdBy string var created string var size string fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") for _, entry := range history { imageID = entry.ID createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) if *noTrunc == false { createdBy = stringutils.Truncate(createdBy, 45) imageID = stringid.TruncateID(entry.ID) } if *human { created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" size = units.HumanSize(float64(entry.Size)) } else { created = time.Unix(entry.Created, 0).Format(time.RFC3339) size = strconv.FormatInt(entry.Size, 10) } fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) } w.Flush() return nil }
func (c *containerData) housekeeping() { // Long housekeeping is either 100ms or half of the housekeeping interval. longHousekeeping := 100 * time.Millisecond if *HousekeepingInterval/2 < longHousekeeping { longHousekeeping = *HousekeepingInterval / 2 } // Housekeep every second. glog.Infof("Start housekeeping for container %q\n", c.info.Name) lastHousekeeping := time.Now() for { select { case <-c.stop: // Stop housekeeping when signaled. return default: // Perform housekeeping. start := time.Now() c.housekeepingTick() // Log if housekeeping took too long. duration := time.Since(start) if duration >= longHousekeeping { glog.V(2).Infof("[%s] Housekeeping took %s", c.info.Name, duration) } } // Log usage if asked to do so. if c.logUsage { stats, err := c.storageDriver.RecentStats(c.info.Name, 2) if err != nil { if c.allowErrorLogging() { glog.Infof("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err) } } else if len(stats) < 2 { // Ignore, not enough stats yet. } else { usageCpuNs := stats[1].Cpu.Usage.Total - stats[0].Cpu.Usage.Total usageMemory := stats[1].Memory.Usage usageInCores := float64(usageCpuNs) / float64(stats[1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds()) usageInHuman := units.HumanSize(int64(usageMemory)) glog.Infof("[%s] %.3f cores, %s of memory", c.info.Name, usageInCores, usageInHuman) } } // Schedule the next housekeeping. Sleep until that time. nextHousekeeping := c.nextHousekeeping(lastHousekeeping) if time.Now().Before(nextHousekeeping) { time.Sleep(nextHousekeeping.Sub(time.Now())) } lastHousekeeping = nextHousekeeping } }
// CommitContainer commits docker container func (c *DockerClient) CommitContainer(s *State) (*docker.Image, error) { commitOpts := docker.CommitContainerOptions{ Container: s.NoCache.ContainerID, Run: &s.Config, } c.log.Debugf("Commit container: %# v", pretty.Formatter(commitOpts)) image, err := c.client.CommitContainer(commitOpts) if err != nil { return nil, err } // Inspect the image to get the real size c.log.Debugf("Inspect image %s", image.ID) if image, err = c.client.InspectImage(image.ID); err != nil { return nil, err } s.ParentSize = s.Size s.Size = image.VirtualSize fields := logrus.Fields{} if c.useHumanSize { size := fmt.Sprintf("%s (+%s)", units.HumanSize(float64(s.Size)), units.HumanSize(float64(s.Size-s.ParentSize)), ) fields["size"] = size } else { fields["size"] = s.Size fields["delta"] = s.Size - s.ParentSize } c.log.WithFields(fields).Infof("| Result image is %.12s", image.ID) return image, nil }
func (p *JSONProgress) String() string { var ( width = 200 pbBox string numbersBox string timeLeftBox string ) ws, err := term.GetWinsize(p.terminalFd) if err == nil { width = int(ws.Width) } if p.Current <= 0 && p.Total <= 0 { return "" } current := units.HumanSize(int64(p.Current)) if p.Total <= 0 { return fmt.Sprintf("%8v", current) } total := units.HumanSize(int64(p.Total)) percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 if width > 110 { pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", 50-percentage)) } numbersBox = fmt.Sprintf("%8v/%v", current, total) if p.Current > 0 && p.Start > 0 && percentage < 50 { fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0)) perEntry := fromStart / time.Duration(p.Current) left := time.Duration(p.Total-p.Current) * perEntry left = (left / time.Second) * time.Second if width > 50 { timeLeftBox = " " + left.String() } } return pbBox + numbersBox + timeLeftBox }
// CmdHistory shows the history of an image. // // Usage: docker history [OPTIONS] IMAGE func (cli *DockerCli) CmdHistory(args ...string) error { cmd := cli.Subcmd("history", "IMAGE", "Show the history of an image", true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") cmd.Require(flag.Exact, 1) utils.ParseFlags(cmd, args, true) body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, nil)) if err != nil { return err } outs := engine.NewTable("Created", 0) if _, err := outs.ReadListFrom(body); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") } for _, out := range outs.Data { outID := out.Get("Id") if !*quiet { if *noTrunc { fmt.Fprintf(w, "%s\t", outID) } else { fmt.Fprintf(w, "%s\t", stringid.TruncateID(outID)) } fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) if *noTrunc { fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) } else { fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) } fmt.Fprintf(w, "%s\n", units.HumanSize(float64(out.GetInt64("Size")))) } else { if *noTrunc { fmt.Fprintln(w, outID) } else { fmt.Fprintln(w, stringid.TruncateID(outID)) } } } w.Flush() return nil }
// FIXME: --viz and --tree are deprecated. Remove them in a future version. func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { var imageID string if noTrunc { imageID = image.Get("Id") } else { imageID = stringid.TruncateID(image.Get("Id")) } fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.GetInt64("VirtualSize")))) if image.GetList("RepoTags")[0] != "<none>:<none>" { fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) } else { fmt.Fprint(cli.out, "\n") } }
// FIXME: --viz and --tree are deprecated. Remove them in a future version. func (cli *DockerCli) printTreeNode(noTrunc bool, image *types.Image, prefix string) { var imageID string if noTrunc { imageID = image.ID } else { imageID = stringid.TruncateID(image.ID) } fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.VirtualSize))) if image.RepoTags[0] != "<none>:<none>" { fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.RepoTags, ", ")) } else { fmt.Fprint(cli.out, "\n") } }
func describeImage(image *imageapi.Image, imageName string) (string, error) { return tabbedString(func(out *tabwriter.Writer) error { formatMeta(out, image.ObjectMeta) formatString(out, "Docker Image", image.DockerImageReference) if len(imageName) > 0 { formatString(out, "Image Name", imageName) } formatString(out, "Parent Image", image.DockerImageMetadata.Parent) formatString(out, "Layer Size", units.HumanSize(float64(image.DockerImageMetadata.Size))) formatString(out, "Image Created", fmt.Sprintf("%s ago", formatRelativeTime(image.DockerImageMetadata.Created.Time))) formatString(out, "Author", image.DockerImageMetadata.Author) formatString(out, "Arch", image.DockerImageMetadata.Architecture) describeDockerImage(out, image.DockerImageMetadata.Config) return nil }) }
// CmdHistory shows the history of an image. // // Usage: docker history [OPTIONS] IMAGE func (cli *DockerCli) CmdHistory(args ...string) error { cmd := cli.Subcmd("history", "IMAGE", "Show the history of an image", true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) rdr, _, err := cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, nil) if err != nil { return err } history := []types.ImageHistory{} err = json.NewDecoder(rdr).Decode(&history) if err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") } for _, entry := range history { if *noTrunc { fmt.Fprintf(w, entry.ID) } else { fmt.Fprintf(w, stringid.TruncateID(entry.ID)) } if !*quiet { fmt.Fprintf(w, "\t%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0)))) if *noTrunc { fmt.Fprintf(w, "%s\t", entry.CreatedBy) } else { fmt.Fprintf(w, "%s\t", utils.Trunc(entry.CreatedBy, 45)) } fmt.Fprintf(w, "%s\t", units.HumanSize(float64(entry.Size))) fmt.Fprintf(w, "%s", entry.Comment) } fmt.Fprintf(w, "\n") } w.Flush() return nil }
//Render container stats func (r *statsRenderer) Render() string { s := r.stats buf := bytes.NewBufferString("") w := tabwriter.NewWriter(buf, 22, 0, 1, ' ', 0) io.WriteString(w, "<green>CONTAINER\tCOMMAND\t%%CPU\tMEM USAGE / LIMIT\t%%MEM\tNET I/O\tBLOCK I/O</>\n") io.WriteString( w, fmt.Sprintf("<white>%s\t%s\t%.2f\t%s / %s\t%.2f\t%s / %s\t%s / %s</>\n", s.CID, strutil.Resize(s.Command, 20), s.CPUPercentage, units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit), s.MemoryPercentage, units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx), units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite))) w.Flush() return buf.String() }
func runHistory(cmd *types.Command, args []string) { if historyHelp { cmd.PrintUsage() } if len(args) != 1 { cmd.PrintShortUsage() } imageID := cmd.API.GetImageID(args[0], true) image, err := cmd.API.GetImage(imageID) if err != nil { log.Fatalf("Cannot get image %s: %v", imageID, err) } if imagesQ { fmt.Println(imageID) return } w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0) defer w.Flush() fmt.Fprintf(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\n") identifier := utils.TruncIf(image.Identifier, 8, !historyNoTrunc) creationDate, err := time.Parse("2006-01-02T15:04:05.000000+00:00", image.CreationDate) if err != nil { log.Fatalf("Unable to parse creation date from the Scaleway API: %v", err) } creationDateStr := units.HumanDuration(time.Now().UTC().Sub(creationDate)) volumeName := utils.TruncIf(image.RootVolume.Name, 25, !historyNoTrunc) size := units.HumanSize(float64(image.RootVolume.Size)) fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", identifier, creationDateStr, volumeName, size) }
func describeImage(image *imageapi.Image, imageName string) (string, error) { return tabbedString(func(out *tabwriter.Writer) error { formatMeta(out, image.ObjectMeta) formatString(out, "Docker Image", image.DockerImageReference) if len(imageName) > 0 { formatString(out, "Image Name", imageName) } switch l := len(image.DockerImageLayers); l { case 0: // legacy case, server does not know individual layers formatString(out, "Layer Size", units.HumanSize(float64(image.DockerImageMetadata.Size))) case 1: formatString(out, "Image Size", units.HumanSize(float64(image.DockerImageMetadata.Size))) default: info := []string{} if image.DockerImageLayers[0].LayerSize > 0 { info = append(info, fmt.Sprintf("first layer %s", units.HumanSize(float64(image.DockerImageLayers[0].LayerSize)))) } for i := l - 1; i > 0; i-- { if image.DockerImageLayers[i].LayerSize == 0 { continue } info = append(info, fmt.Sprintf("last binary layer %s", units.HumanSize(float64(image.DockerImageLayers[i].LayerSize)))) break } if len(info) > 0 { formatString(out, "Image Size", fmt.Sprintf("%s (%s)", units.HumanSize(float64(image.DockerImageMetadata.Size)), strings.Join(info, ", "))) } else { formatString(out, "Image Size", units.HumanSize(float64(image.DockerImageMetadata.Size))) } } //formatString(out, "Parent Image", image.DockerImageMetadata.Parent) formatString(out, "Image Created", fmt.Sprintf("%s ago", formatRelativeTime(image.DockerImageMetadata.Created.Time))) formatString(out, "Author", image.DockerImageMetadata.Author) formatString(out, "Arch", image.DockerImageMetadata.Architecture) describeDockerImage(out, image.DockerImageMetadata.Config) return nil }) }
// CmdImages lists the images in a specified repository, or all top-level images if no repository is specified. // // Usage: docker images [OPTIONS] [REPOSITORY] func (cli *DockerCli) CmdImages(args ...string) error { cmd := Cli.Subcmd("images", []string{"[REPOSITORY[:TAG]]"}, Cli.DockerCommands["images"].Description, true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") noTrunc := cmd.Bool([]string{"-no-trunc"}, false, "Don't truncate output") showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") cmd.Require(flag.Max, 1) cmd.ParseFlags(args, true) // Consolidate all filter flags, and sanity check them early. // They'll get process in the daemon/server. imageFilterArgs := filters.NewArgs() for _, f := range flFilter.GetAll() { var err error imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) if err != nil { return err } } var matchName string if cmd.NArg() == 1 { matchName = cmd.Arg(0) } options := types.ImageListOptions{ MatchName: matchName, All: *all, Filters: imageFilterArgs, } images, err := cli.client.ImageList(options) if err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { if *showDigests { fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tSIZE") } else { fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tSIZE") } } for _, image := range images { ID := image.ID if !*noTrunc { ID = stringid.TruncateID(ID) } repoTags := image.RepoTags repoDigests := image.RepoDigests if len(repoTags) == 1 && repoTags[0] == "<none>:<none>" && len(repoDigests) == 1 && repoDigests[0] == "<none>@<none>" { // dangling image - clear out either repoTags or repoDigsts so we only show it once below repoDigests = []string{} } // combine the tags and digests lists tagsAndDigests := append(repoTags, repoDigests...) for _, repoAndRef := range tagsAndDigests { // default repo, tag, and digest to none - if there's a value, it'll be set below repo := "<none>" tag := "<none>" digest := "<none>" if !strings.HasPrefix(repoAndRef, "<none>") { ref, err := reference.ParseNamed(repoAndRef) if err != nil { return err } repo = ref.Name() switch x := ref.(type) { case reference.Digested: digest = x.Digest().String() case reference.Tagged: tag = x.Tag() } } if !*quiet { if *showDigests { fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.Size))) } else { fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.Size))) } } else { fmt.Fprintln(w, ID) } } } if !*quiet { w.Flush() } return nil }
// TODO(vmarmol): Implement stats collecting as a custom collector. func (c *containerData) housekeeping() { // Long housekeeping is either 100ms or half of the housekeeping interval. longHousekeeping := 100 * time.Millisecond if *HousekeepingInterval/2 < longHousekeeping { longHousekeeping = *HousekeepingInterval / 2 } // Housekeep every second. glog.V(3).Infof("Start housekeeping for container %q\n", c.info.Name) lastHousekeeping := time.Now() for { select { case <-c.stop: // Cleanup container resources before stopping housekeeping. c.handler.Cleanup() // Stop housekeeping when signaled. return default: // Perform housekeeping. start := time.Now() c.housekeepingTick() // Log if housekeeping took too long. duration := time.Since(start) if duration >= longHousekeeping { glog.V(3).Infof("[%s] Housekeeping took %s", c.info.Name, duration) } } // Log usage if asked to do so. if c.logUsage { const numSamples = 60 var empty time.Time stats, err := c.memoryCache.RecentStats(c.info.Name, empty, empty, numSamples) if err != nil { if c.allowErrorLogging() { glog.Infof("[%s] Failed to get recent stats for logging usage: %v", c.info.Name, err) } } else if len(stats) < numSamples { // Ignore, not enough stats yet. } else { usageCpuNs := uint64(0) for i := range stats { if i > 0 { usageCpuNs += (stats[i].Cpu.Usage.Total - stats[i-1].Cpu.Usage.Total) } } usageMemory := stats[numSamples-1].Memory.Usage instantUsageInCores := float64(stats[numSamples-1].Cpu.Usage.Total-stats[numSamples-2].Cpu.Usage.Total) / float64(stats[numSamples-1].Timestamp.Sub(stats[numSamples-2].Timestamp).Nanoseconds()) usageInCores := float64(usageCpuNs) / float64(stats[numSamples-1].Timestamp.Sub(stats[0].Timestamp).Nanoseconds()) usageInHuman := units.HumanSize(float64(usageMemory)) glog.Infof("[%s] %.3f cores (average: %.3f cores), %s of memory", c.info.Name, instantUsageInCores, usageInCores, usageInHuman) } } next := c.nextHousekeeping(lastHousekeeping) // Schedule the next housekeeping. Sleep until that time. if time.Now().Before(next) { time.Sleep(next.Sub(time.Now())) } else { next = time.Now() } lastHousekeeping = next } }
// CmdImages lists the images in a specified repository, or all top-level images if no repository is specified. // // Usage: docker images [OPTIONS] [REPOSITORY] func (cli *DockerCli) CmdImages(args ...string) error { cmd := Cli.Subcmd("images", []string{"[REPOSITORY[:TAG]]"}, Cli.DockerCommands["images"].Description, true) quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") cmd.Require(flag.Max, 1) cmd.ParseFlags(args, true) // Consolidate all filter flags, and sanity check them early. // They'll get process in the daemon/server. imageFilterArgs := filters.Args{} for _, f := range flFilter.GetAll() { var err error imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) if err != nil { return err } } matchName := cmd.Arg(0) v := url.Values{} if len(imageFilterArgs) > 0 { filterJSON, err := filters.ToParam(imageFilterArgs) if err != nil { return err } v.Set("filters", filterJSON) } if cmd.NArg() == 1 { // FIXME rename this parameter, to not be confused with the filters flag v.Set("filter", matchName) } if *all { v.Set("all", "1") } serverResp, err := cli.call("GET", "/images/json?"+v.Encode(), nil, nil) if err != nil { return err } defer serverResp.body.Close() images := []types.Image{} if err := json.NewDecoder(serverResp.body).Decode(&images); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { if *showDigests { fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE") } else { fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") } } for _, image := range images { ID := image.ID if !*noTrunc { ID = stringid.TruncateID(ID) } repoTags := image.RepoTags repoDigests := image.RepoDigests if len(repoTags) == 1 && repoTags[0] == "<none>:<none>" && len(repoDigests) == 1 && repoDigests[0] == "<none>@<none>" { // dangling image - clear out either repoTags or repoDigsts so we only show it once below repoDigests = []string{} } // combine the tags and digests lists tagsAndDigests := append(repoTags, repoDigests...) for _, repoAndRef := range tagsAndDigests { repo, ref := parsers.ParseRepositoryTag(repoAndRef) // default tag and digest to none - if there's a value, it'll be set below tag := "<none>" digest := "<none>" if utils.DigestReference(ref) { digest = ref } else { tag = ref } if !*quiet { if *showDigests { fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize))) } else { fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize))) } } else { fmt.Fprintln(w, ID) } } } if !*quiet { w.Flush() } return nil }
// CmdPs outputs a list of Docker containers. // // Usage: docker ps [OPTIONS] func (cli *DockerCli) CmdPs(args ...string) error { var ( err error psFilterArgs = filters.Args{} v = url.Values{} cmd = cli.Subcmd("ps", nil, "List containers", true) quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") noTrunc = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container, include non-running") since = cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show created since Id or Name, include non-running") before = cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name") last = cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running") flFilter = opts.NewListOpts(nil) ) cmd.Require(flag.Exact, 0) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") cmd.ParseFlags(args, true) if *last == -1 && *nLatest { *last = 1 } if *all { v.Set("all", "1") } if *last != -1 { v.Set("limit", strconv.Itoa(*last)) } if *since != "" { v.Set("since", *since) } if *before != "" { v.Set("before", *before) } if *size { v.Set("size", "1") } // Consolidate all filter flags, and sanity check them. // They'll get processed in the daemon/server. for _, f := range flFilter.GetAll() { if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { return err } } if len(psFilterArgs) > 0 { filterJSON, err := filters.ToParam(psFilterArgs) if err != nil { return err } v.Set("filters", filterJSON) } serverResp, err := cli.call("GET", "/containers/json?"+v.Encode(), nil, nil) if err != nil { return err } defer serverResp.body.Close() containers := []types.Container{} if err := json.NewDecoder(serverResp.body).Decode(&containers); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES") if *size { fmt.Fprintln(w, "\tSIZE") } else { fmt.Fprint(w, "\n") } } stripNamePrefix := func(ss []string) []string { for i, s := range ss { ss[i] = s[1:] } return ss } for _, container := range containers { ID := container.ID if !*noTrunc { ID = stringid.TruncateID(ID) } if *quiet { fmt.Fprintln(w, ID) continue } var ( names = stripNamePrefix(container.Names) command = strconv.Quote(container.Command) displayPort string ) if !*noTrunc { command = stringutils.Truncate(command, 20) // only display the default name for the container with notrunc is passed for _, name := range names { if len(strings.Split(name, "/")) == 1 { names = []string{name} break } } } image := container.Image if image == "" { image = "<no image>" } if container.HostConfig.NetworkMode == "host" { displayPort = "*/tcp, */udp" } else { displayPort = api.DisplayablePorts(container.Ports) } fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", ID, image, command, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(container.Created), 0))), container.Status, displayPort, strings.Join(names, ",")) if *size { if container.SizeRootFs > 0 { fmt.Fprintf(w, "%s (virtual %s)\n", units.HumanSize(float64(container.SizeRw)), units.HumanSize(float64(container.SizeRootFs))) } else { fmt.Fprintf(w, "%s\n", units.HumanSize(float64(container.SizeRw))) } continue } fmt.Fprint(w, "\n") } if !*quiet { w.Flush() } return nil }
func runImages(cmd *types.Command, args []string) { if imagesHelp { cmd.PrintUsage() } if len(args) != 0 { cmd.PrintShortUsage() } wg := sync.WaitGroup{} chEntries := make(chan api.ScalewayImageInterface) var entries = []api.ScalewayImageInterface{} wg.Add(1) go func() { defer wg.Done() images, err := cmd.API.GetImages() if err != nil { log.Fatalf("unable to fetch images from the Scaleway API: %v", err) } for _, val := range *images { creationDate, err := time.Parse("2006-01-02T15:04:05.000000+00:00", val.CreationDate) if err != nil { log.Fatalf("unable to parse creation date from the Scaleway API: %v", err) } chEntries <- api.ScalewayImageInterface{ Type: "image", CreationDate: creationDate, Identifier: val.Identifier, Name: val.Name, Public: val.Public, Tag: "latest", VirtualSize: float64(val.RootVolume.Size), } } }() if imagesA { wg.Add(1) go func() { defer wg.Done() snapshots, err := cmd.API.GetSnapshots() if err != nil { log.Fatalf("unable to fetch snapshots from the Scaleway API: %v", err) } for _, val := range *snapshots { creationDate, err := time.Parse("2006-01-02T15:04:05.000000+00:00", val.CreationDate) if err != nil { log.Fatalf("unable to parse creation date from the Scaleway API: %v", err) } chEntries <- api.ScalewayImageInterface{ Type: "snapshot", CreationDate: creationDate, Identifier: val.Identifier, Name: val.Name, Tag: "<snapshot>", VirtualSize: float64(val.Size), Public: false, } } }() wg.Add(1) go func() { defer wg.Done() bootscripts, err := cmd.API.GetBootscripts() if err != nil { log.Fatalf("unable to fetch bootscripts from the Scaleway API: %v", err) } for _, val := range *bootscripts { chEntries <- api.ScalewayImageInterface{ Type: "bootscript", Identifier: val.Identifier, Name: val.Title, Tag: "<bootscript>", Public: false, } } }() wg.Add(1) go func() { defer wg.Done() volumes, err := cmd.API.GetVolumes() if err != nil { log.Fatalf("unable to fetch volumes from the Scaleway API: %v", err) } for _, val := range *volumes { creationDate, err := time.Parse("2006-01-02T15:04:05.000000+00:00", val.CreationDate) if err != nil { log.Fatalf("unable to parse creation date from the Scaleway API: %v", err) } chEntries <- api.ScalewayImageInterface{ Type: "volume", CreationDate: creationDate, Identifier: val.Identifier, Name: val.Name, Tag: "<volume>", VirtualSize: float64(val.Size), Public: false, } } }() } go func() { wg.Wait() close(chEntries) }() done := false for { select { case entry, ok := <-chEntries: if !ok { done = true break } entries = append(entries, entry) } if done { break } } w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0) defer w.Flush() if !imagesQ { fmt.Fprintf(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE\n") } sort.Sort(api.ByCreationDate(entries)) for _, image := range entries { if imagesQ { fmt.Fprintf(w, "%s\n", image.Identifier) } else { tag := image.Tag shortID := utils.TruncIf(image.Identifier, 8, !imagesNoTrunc) name := utils.Wordify(image.Name) if !image.Public && image.Type == "image" { name = "user/" + name } shortName := utils.TruncIf(name, 25, !imagesNoTrunc) var creationDate, virtualSize string if image.CreationDate.IsZero() { creationDate = "n/a" } else { creationDate = units.HumanDuration(time.Now().UTC().Sub(image.CreationDate)) } if image.VirtualSize == 0 { virtualSize = "n/a" } else { virtualSize = units.HumanSize(image.VirtualSize) } fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", shortName, tag, shortID, creationDate, virtualSize) } } }