func main() { resources, err := getContainerResources(containerID) if err != nil { logrus.Fatalf("Getting container's configured resources failed: %v", err) } // create the writer w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0) printHeader := func() { fmt.Fprint(os.Stdout, "\033[2J") fmt.Fprint(os.Stdout, "\033[H") io.WriteString(w, "CPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\tPIDS\n") } // collect the stats s := &containerStats{ clockTicksPerSecond: uint64(system.GetClockTicks()), bufReader: bufio.NewReaderSize(nil, 128), } go s.Collect(resources) for range time.Tick(5 * time.Second) { printHeader() if err := s.Display(w); err != nil { logrus.Errorf("Displaying stats failed: %v", err) } w.Flush() } }
// platformNewStatsCollector performs platform specific initialisation of the // statsCollector structure. func platformNewStatsCollector(s *statsCollector) { s.clockTicksPerSecond = uint64(system.GetClockTicks()) meminfo, err := sysinfo.ReadMemInfo() if err == nil && meminfo.MemTotal > 0 { s.machineMemory = uint64(meminfo.MemTotal) } }
// newStatsCollector returns a new statsCollector that collections // network and cgroup stats for a registered container at the specified // interval. The collector allows non-running containers to be added // and will start processing stats when they are started. func newStatsCollector(interval time.Duration) *statsCollector { s := &statsCollector{ interval: interval, publishers: make(map[*Container]*pubsub.Publisher), clockTicksPerSecond: uint64(system.GetClockTicks()), bufReader: bufio.NewReaderSize(nil, 128), } go s.run() return s }
// newStatsCollector returns a new statsCollector that collections // network and cgroup stats for a registered container at the specified // interval. The collector allows non-running containers to be added // and will start processing stats when they are started. func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { s := &statsCollector{ interval: interval, supervisor: daemon, publishers: make(map[*container.Container]*pubsub.Publisher), clockTicksPerSecond: uint64(system.GetClockTicks()), bufReader: bufio.NewReaderSize(nil, 128), } meminfo, err := sysinfo.ReadMemInfo() if err == nil && meminfo.MemTotal > 0 { s.machineMemory = uint64(meminfo.MemTotal) } go s.run() return s }
var ( // A mapping of directories on the host OS to attempt to embed inside each // task's chroot. chrootEnv = map[string]string{ "/bin": "/bin", "/etc": "/etc", "/lib": "/lib", "/lib32": "/lib32", "/lib64": "/lib64", "/run/resolvconf": "/run/resolvconf", "/sbin": "/sbin", "/usr": "******", } // clockTicks is the clocks per second of the machine clockTicks = uint64(system.GetClockTicks()) // The statistics the executor exposes when using cgroups ExecutorCgroupMeasuredMemStats = []string{"RSS", "Cache", "Swap", "Max Usage", "Kernel Usage", "Kernel Max Usage"} ExecutorCgroupMeasuredCpuStats = []string{"System Mode", "User Mode", "Throttled Periods", "Throttled Time", "Percent"} ) // configureIsolation configures chroot and creates cgroups func (e *UniversalExecutor) configureIsolation() error { if e.command.FSIsolation { if err := e.configureChroot(); err != nil { return err } } if e.command.ResourceLimits {
package server import ( "fmt" "github.com/docker/containerd/api/grpc/types" "github.com/docker/containerd/specs" "github.com/docker/containerd/supervisor" "github.com/opencontainers/runc/libcontainer/system" ocs "github.com/opencontainers/runtime-spec/specs-go" "golang.org/x/net/context" ) var clockTicksPerSecond = uint64(system.GetClockTicks()) func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) { process := &specs.ProcessSpec{ Terminal: r.Terminal, Args: r.Args, Env: r.Env, Cwd: r.Cwd, } process.User = ocs.User{ UID: r.User.Uid, GID: r.User.Gid, AdditionalGids: r.User.AdditionalGids, } process.Capabilities = r.Capabilities process.ApparmorProfile = r.ApparmorProfile process.SelinuxLabel = r.SelinuxLabel process.NoNewPrivileges = r.NoNewPrivileges