// runStats displays a live stream of resource usage statistics for one or more containers. // This shows real-time information on CPU usage, memory usage, and network I/O. func runStats(dockerCli *command.DockerCli, opts *statsOptions) error { showAll := len(opts.containers) == 0 closeChan := make(chan error) ctx := context.Background() // monitorContainerEvents watches for container creation and removal (only // used when calling `docker stats` without arguments). monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { f := filters.NewArgs() f.Add("type", "container") options := types.EventsOptions{ Filters: f, } resBody, err := dockerCli.Client().Events(ctx, options) // Whether we successfully subscribed to events or not, we can now // unblock the main goroutine. close(started) if err != nil { closeChan <- err return } defer resBody.Close() system.DecodeEvents(resBody, func(event events.Message, err error) error { if err != nil { closeChan <- err return nil } c <- event return nil }) } // waitFirst is a WaitGroup to wait first stat data's reach for each container waitFirst := &sync.WaitGroup{} cStats := stats{} // getContainerList simulates creation event for all previously existing // containers (only used when calling `docker stats` without arguments). getContainerList := func() { options := types.ContainerListOptions{ All: opts.all, } cs, err := dockerCli.Client().ContainerList(ctx, options) if err != nil { closeChan <- err } for _, container := range cs { s := &containerStats{Name: container.ID[:12]} if cStats.add(s) { waitFirst.Add(1) go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) } } } if showAll { // If no names were specified, start a long running goroutine which // monitors container events. We make sure we're subscribed before // retrieving the list of running containers to avoid a race where we // would "miss" a creation. started := make(chan struct{}) eh := system.InitEventHandler() eh.Handle("create", func(e events.Message) { if opts.all { s := &containerStats{Name: e.ID[:12]} if cStats.add(s) { waitFirst.Add(1) go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) } } }) eh.Handle("start", func(e events.Message) { s := &containerStats{Name: e.ID[:12]} if cStats.add(s) { waitFirst.Add(1) go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) } }) eh.Handle("die", func(e events.Message) { if !opts.all { cStats.remove(e.ID[:12]) } }) eventChan := make(chan events.Message) go eh.Watch(eventChan) go monitorContainerEvents(started, eventChan) defer close(eventChan) <-started // Start a short-lived goroutine to retrieve the initial list of // containers. getContainerList() } else { // Artificially send creation events for the containers we were asked to // monitor (same code path than we use when monitoring all containers). for _, name := range opts.containers { s := &containerStats{Name: name} if cStats.add(s) { waitFirst.Add(1) go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) } } // We don't expect any asynchronous errors: closeChan can be closed. close(closeChan) // Do a quick pause to detect any error with the provided list of // container names. time.Sleep(1500 * time.Millisecond) var errs []string cStats.mu.Lock() for _, c := range cStats.cs { c.mu.Lock() if c.err != nil { errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) } c.mu.Unlock() } cStats.mu.Unlock() if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, ", ")) } } // before print to screen, make sure each container get at least one valid stat data waitFirst.Wait() w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) printHeader := func() { if !opts.noStream { fmt.Fprint(dockerCli.Out(), "\033[2J") fmt.Fprint(dockerCli.Out(), "\033[H") } io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\tPIDS\n") } for range time.Tick(500 * time.Millisecond) { printHeader() toRemove := []string{} cStats.mu.Lock() for _, s := range cStats.cs { if err := s.Display(w); err != nil && !opts.noStream { logrus.Debugf("stats: got error for %s: %v", s.Name, err) if err == io.EOF { toRemove = append(toRemove, s.Name) } } } cStats.mu.Unlock() for _, name := range toRemove { cStats.remove(name) } if len(cStats.cs) == 0 && !showAll { return nil } w.Flush() if opts.noStream { break } select { case err, ok := <-closeChan: if ok { if err != nil { // this is suppressing "unexpected EOF" in the cli when the // daemon restarts so it shutdowns cleanly if err == io.ErrUnexpectedEOF { return nil } return err } } default: // just skip } } return nil }
func waitExitOrRemoved(dockerCli *command.DockerCli, ctx context.Context, containerID string, waitRemove bool) (chan int, error) { if len(containerID) == 0 { // containerID can never be empty panic("Internal Error: waitExitOrRemoved needs a containerID as parameter") } statusChan := make(chan int) exitCode := 125 eventProcessor := func(e events.Message, err error) error { if err != nil { statusChan <- exitCode return fmt.Errorf("failed to decode event: %v", err) } stopProcessing := false switch e.Status { case "die": if v, ok := e.Actor.Attributes["exitCode"]; ok { code, cerr := strconv.Atoi(v) if cerr != nil { logrus.Errorf("failed to convert exitcode '%q' to int: %v", v, cerr) } else { exitCode = code } } if !waitRemove { stopProcessing = true } case "detach": exitCode = 0 stopProcessing = true case "destroy": stopProcessing = true } if stopProcessing { statusChan <- exitCode // stop the loop processing return fmt.Errorf("done") } return nil } // Get events via Events API f := filters.NewArgs() f.Add("type", "container") f.Add("container", containerID) options := types.EventsOptions{ Filters: f, } resBody, err := dockerCli.Client().Events(ctx, options) if err != nil { return nil, fmt.Errorf("can't get events from daemon: %v", err) } go system.DecodeEvents(resBody, eventProcessor) return statusChan, nil }