func execAction(context *cli.Context) { var exitCode int container, err := loadContainer() if err != nil { log.Fatal(err) } state, err := libcontainer.GetState(dataPath) if err != nil && !os.IsNotExist(err) { log.Fatalf("unable to read state.json: %s", err) } if state != nil { exitCode, err = runIn(container, state, []string(context.Args())) } else { exitCode, err = startContainer(container, dataPath, []string(context.Args())) } if err != nil { log.Fatalf("failed to exec: %s", err) } os.Exit(exitCode) }
func Stats(stateFile string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) { state, err := libcontainer.GetState(stateFile) if err != nil { if os.IsNotExist(err) { return nil, ErrNotRunning } return nil, err } now := time.Now() stats, err := libcontainer.GetStats(nil, state) if err != nil { return nil, err } // if the container does not have any memory limit specified set the // limit to the machines memory memoryLimit := containerMemoryLimit if memoryLimit == 0 { memoryLimit = machineMemory } return &ResourceStats{ Read: now, ContainerStats: stats, MemoryLimit: memoryLimit, }, nil }
func (d *driver) Terminate(p *execdriver.Command) error { // lets check the start time for the process state, err := libcontainer.GetState(filepath.Join(d.root, p.ID)) if err != nil { if !os.IsNotExist(err) { return err } // TODO: Remove this part for version 1.2.0 // This is added only to ensure smooth upgrades from pre 1.1.0 to 1.1.0 data, err := ioutil.ReadFile(filepath.Join(d.root, p.ID, "start")) if err != nil { // if we don't have the data on disk then we can assume the process is gone // because this is only removed after we know the process has stopped if os.IsNotExist(err) { return nil } return err } state = &libcontainer.State{InitStartTime: string(data)} } currentStartTime, err := system.GetProcessStartTime(p.ProcessConfig.Process.Pid) if err != nil { return err } if state.InitStartTime == currentStartTime { err = syscall.Kill(p.ProcessConfig.Process.Pid, 9) syscall.Wait4(p.ProcessConfig.Process.Pid, nil, 0, nil) } d.removeContainerRoot(p.ID) return err }
// SetupContainer is run to setup mounts and networking related operations // for a user namespace enabled process as a user namespace root doesn't // have permissions to perform these operations. // The setup process joins all the namespaces of user namespace enabled init // except the user namespace, so it run as root in the root user namespace // to perform these operations. func SetupContainer(container *libcontainer.Config, dataPath, uncleanRootfs, consolePath string) error { rootfs, err := utils.ResolveRootfs(uncleanRootfs) if err != nil { return err } // clear the current processes env and replace it with the environment // defined on the container if err := LoadContainerEnvironment(container); err != nil { return err } state, err := libcontainer.GetState(dataPath) if err != nil && !os.IsNotExist(err) { return fmt.Errorf("unable to read state: %s", err) } cloneFlags := GetNamespaceFlags(container.Namespaces) if (cloneFlags & syscall.CLONE_NEWNET) == 0 { if len(container.Networks) != 0 || len(container.Routes) != 0 { return fmt.Errorf("unable to apply network parameters without network namespace") } } else { if err := setupNetwork(container, &state.NetworkState); err != nil { return fmt.Errorf("setup networking %s", err) } if err := setupRoute(container); err != nil { return fmt.Errorf("setup route %s", err) } } label.Init() hostRootUid, err := GetHostRootUid(container) if err != nil { return fmt.Errorf("failed to get hostRootUid %s", err) } hostRootGid, err := GetHostRootGid(container) if err != nil { return fmt.Errorf("failed to get hostRootGid %s", err) } // InitializeMountNamespace() can be executed only for a new mount namespace if (cloneFlags & syscall.CLONE_NEWNS) == 0 { if container.MountConfig != nil { return fmt.Errorf("mount config is set without mount namespace") } } else if err := mount.InitializeMountNamespace(rootfs, consolePath, container.RestrictSys, hostRootUid, hostRootGid, (*mount.MountConfig)(container.MountConfig)); err != nil { return fmt.Errorf("setup mount namespace %s", err) } return nil }
// TODO(vishh): Add support for running in priviledged mode and running as a different user. func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { active := d.activeContainers[c.ID] if active == nil { return -1, fmt.Errorf("No active container exists with ID %s", c.ID) } state, err := libcontainer.GetState(filepath.Join(d.root, c.ID)) if err != nil { return -1, fmt.Errorf("State unavailable for container with ID %s. The container may have been cleaned up already. Error: %s", c.ID, err) } var term execdriver.Terminal if processConfig.Tty { term, err = NewTtyConsole(processConfig, pipes) } else { term, err = execdriver.NewStdConsole(processConfig, pipes) } processConfig.Terminal = term args := append([]string{processConfig.Entrypoint}, processConfig.Arguments...) return namespaces.ExecIn(active.container, state, args, os.Args[0], "exec", processConfig.Stdin, processConfig.Stdout, processConfig.Stderr, processConfig.Console, func(cmd *exec.Cmd) { if startCallback != nil { startCallback(&c.ProcessConfig, cmd.Process.Pid) } }) }
func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Command) error { if c.Network.HostNetworking { container.Namespaces["NEWNET"] = false return nil } container.Networks = []*libcontainer.Network{ { Mtu: c.Network.Mtu, Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0), Gateway: "localhost", Type: "loopback", }, } if c.Network.Interface != nil { vethNetwork := libcontainer.Network{ Mtu: c.Network.Mtu, Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), MacAddress: c.Network.Interface.MacAddress, Gateway: c.Network.Interface.Gateway, Type: "veth", Bridge: c.Network.Interface.Bridge, VethPrefix: "veth", } container.Networks = append(container.Networks, &vethNetwork) } if c.Network.ContainerID != "" { if d.driverType == execdriver.NativeBuiltin { d.Lock() active := d.activeContainers[c.Network.ContainerID] d.Unlock() if active == nil || active.cmd.Process == nil { return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID) } cmd := active.cmd nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") container.Networks = append(container.Networks, &libcontainer.Network{ Type: "netns", NsPath: nspath, }) } else { // external container state, err := libcontainer.GetState(filepath.Join(d.root, c.Network.ContainerID)) if err != nil { return fmt.Errorf("Read container state error: %v", err) } nspath := filepath.Join("/proc", fmt.Sprint(state.InitPid), "ns", "net") container.Networks = append(container.Networks, &libcontainer.Network{ Type: "netns", NsPath: nspath, }) } } return nil }
func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) { c := d.activeContainers[id] state, err := libcontainer.GetState(filepath.Join(d.root, id)) if err != nil { if os.IsNotExist(err) { return nil, execdriver.ErrNotRunning } return nil, err } now := time.Now() stats, err := libcontainer.GetStats(nil, state) if err != nil { return nil, err } memoryLimit := c.container.Cgroups.Memory // if the container does not have any memory limit specified set the // limit to the machines memory if memoryLimit == 0 { memoryLimit = d.machineMemory } return &execdriver.ResourceStats{ Read: now, ContainerStats: stats, MemoryLimit: memoryLimit, }, nil }
func execAction(context *cli.Context) { var exitCode int container, err := loadContainer() if err != nil { log.Fatal(err) } state, err := libcontainer.GetState(dataPath) if err != nil && !os.IsNotExist(err) { log.Fatalf("unable to read state.json: %s", err) } if state != nil { err = namespaces.ExecIn(container, state, []string(context.Args())) } else { term := namespaces.NewTerminal(os.Stdin, os.Stdout, os.Stderr, container.Tty) exitCode, err = startContainer(container, term, dataPath, []string(context.Args())) } if err != nil { log.Fatalf("failed to exec: %s", err) } os.Exit(exitCode) }
// IsRunning is determined by looking for the // pid file for a container. If the file exists then the // container is currently running func (i *info) IsRunning() bool { if _, err := libcontainer.GetState(filepath.Join(i.driver.root, i.ID)); err == nil { return true } // TODO: Remove this part for version 1.2.0 // This is added only to ensure smooth upgrades from pre 1.1.0 to 1.1.0 if _, err := os.Stat(filepath.Join(i.driver.root, i.ID, "pid")); err == nil { return true } return false }
func oomAction(context *cli.Context) { state, err := libcontainer.GetState(dataPath) if err != nil { log.Fatal(err) } n, err := libcontainer.NotifyOnOOM(state) if err != nil { log.Fatal(err) } for _ = range n { log.Printf("OOM notification received") } }
func TestExecInRlimit(t *testing.T) { if testing.Short() { return } rootfs, err := newRootFs() if err != nil { t.Fatal(err) } defer remove(rootfs) config := newTemplateConfig(rootfs) if err := writeConfig(config); err != nil { t.Fatalf("failed to write config %s", err) } containerCmd, statePath, containerErr := startLongRunningContainer(config) defer func() { // kill the container if containerCmd.Process != nil { containerCmd.Process.Kill() } if err := <-containerErr; err != nil { t.Fatal(err) } }() // start the exec process state, err := libcontainer.GetState(statePath) if err != nil { t.Fatalf("failed to get state %s", err) } buffers := newStdBuffers() execErr := make(chan error, 1) go func() { _, err := namespaces.ExecIn(config, state, []string{"/bin/sh", "-c", "ulimit -n"}, os.Args[0], "exec", buffers.Stdin, buffers.Stdout, buffers.Stderr, "", nil) execErr <- err }() if err := <-execErr; err != nil { t.Fatalf("exec finished with error %s", err) } out := buffers.Stdout.String() if limit := strings.TrimSpace(out); limit != "1024" { t.Fatalf("expected rlimit to be 1024, got %s", limit) } }
func TestExecIn(t *testing.T) { if testing.Short() { return } rootfs, err := newRootFs() if err != nil { t.Fatal(err) } defer remove(rootfs) config := newTemplateConfig(rootfs) if err := writeConfig(config); err != nil { t.Fatalf("failed to write config %s", err) } containerCmd, statePath, containerErr := startLongRunningContainer(config) defer func() { // kill the container if containerCmd.Process != nil { containerCmd.Process.Kill() } if err := <-containerErr; err != nil { t.Fatal(err) } }() // start the exec process state, err := libcontainer.GetState(statePath) if err != nil { t.Fatalf("failed to get state %s", err) } buffers := newStdBuffers() execErr := make(chan error, 1) go func() { _, err := namespaces.ExecIn(config, state, []string{"ps"}, os.Args[0], "exec", buffers.Stdin, buffers.Stdout, buffers.Stderr, "", nil) execErr <- err }() if err := <-execErr; err != nil { t.Fatalf("exec finished with error %s", err) } out := buffers.Stdout.String() if !strings.Contains(out, "sleep 10") || !strings.Contains(out, "ps") { t.Fatalf("unexpected running process, output %q", out) } }
// getContainerStats reads usage data of a container from the cgroup fs. func (collector *LibcontainerStatsCollector) getContainerStats(container *CronContainer) (*ContainerStats, error) { state, err := libcontainer.GetState(container.statePath) if err != nil { // The state file is not created immediately when a container starts. // Bubble up the error. return nil, err } // libcontainer.GetStats ignores the config argument. So, don't bother providing one. containerStats, err := libcontainer.GetStats(nil, state) if err != nil && !isNetworkStatsError(err) { log.Error("Error getting libcontainer stats", "err", err) return nil, err } cs := toContainerStats(*containerStats) return cs, nil }
func statsAction(context *cli.Context) { container, err := loadContainer() if err != nil { log.Fatal(err) } runtimeCkpt, err := libcontainer.GetState(dataPath) if err != nil { log.Fatal(err) } stats, err := getStats(container, runtimeCkpt) if err != nil { log.Fatalf("Failed to get stats - %v\n", err) } fmt.Printf("Stats:\n%v\n", stats) }
func statsAction(context *cli.Context) { container, err := loadContainer() if err != nil { log.Fatal(err) } state, err := libcontainer.GetState(dataPath) if err != nil { log.Fatal(err) } stats, err := libcontainer.GetStats(container, state) if err != nil { log.Fatal(err) } data, err := json.MarshalIndent(stats, "", "\t") if err != nil { log.Fatal(err) } fmt.Printf("%s", data) }
func execAction(context *cli.Context) { if context.Bool("list") { w := tabwriter.NewWriter(os.Stdout, 10, 1, 3, ' ', 0) fmt.Fprint(w, "NAME\tUSAGE\n") for k, f := range argvs { fmt.Fprintf(w, "%s\t%s\n", k, f.Usage) } w.Flush() return } var exitCode int container, err := loadConfig() if err != nil { log.Fatal(err) } state, err := libcontainer.GetState(dataPath) if err != nil && !os.IsNotExist(err) { log.Fatalf("unable to read state.json: %s", err) } if state != nil { exitCode, err = startInExistingContainer(container, state, context.String("func"), context) } else { exitCode, err = startContainer(container, dataPath, []string(context.Args())) } if err != nil { log.Fatalf("failed to exec: %s", err) } os.Exit(exitCode) }
func (d *driver) Terminate(p *execdriver.Command) error { // lets check the start time for the process state, err := libcontainer.GetState(filepath.Join(d.root, p.ID)) if err != nil { // if we don't have the data on disk then we can assume the process is gone // because this is only removed after we know the process has stopped if os.IsNotExist(err) { return nil } return err } currentStartTime, err := system.GetProcessStartTime(p.Process.Pid) if err != nil { return err } if state.InitStartTime == currentStartTime { err = syscall.Kill(p.Process.Pid, 9) syscall.Wait4(p.Process.Pid, nil, 0, nil) } d.removeContainerRoot(p.ID) return err }
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { // take the Command and populate the libcontainer.Config from it container, err := d.createContainer(c) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } var term execdriver.Terminal if c.ProcessConfig.Tty { term, err = NewTtyConsole(&c.ProcessConfig, pipes) } else { term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) } if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } c.ProcessConfig.Terminal = term d.Lock() d.activeContainers[c.ID] = &activeContainer{ container: container, cmd: &c.ProcessConfig.Cmd, } d.Unlock() var ( dataPath = filepath.Join(d.root, c.ID) args = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...) ) if err := d.createContainerRoot(c.ID); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } defer d.cleanContainer(c.ID) if err := d.writeContainerFile(container, c.ID); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } execOutputChan := make(chan execOutput, 1) waitForStart := make(chan struct{}) go func() { exitCode, err := namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd { c.ProcessConfig.Path = d.initPath c.ProcessConfig.Args = append([]string{ DriverName, "-console", console, "-pipe", "3", "-root", filepath.Join(d.root, c.ID), "--", }, args...) // set this to nil so that when we set the clone flags anything else is reset c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{ Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)), } c.ProcessConfig.ExtraFiles = []*os.File{child} c.ProcessConfig.Env = container.Env c.ProcessConfig.Dir = container.RootFs return &c.ProcessConfig.Cmd }, func() { close(waitForStart) if startCallback != nil { c.ContainerPid = c.ProcessConfig.Process.Pid startCallback(&c.ProcessConfig, c.ContainerPid) } }) execOutputChan <- execOutput{exitCode, err} }() select { case execOutput := <-execOutputChan: return execdriver.ExitStatus{ExitCode: execOutput.exitCode}, execOutput.err case <-waitForStart: break } oomKill := false state, err := libcontainer.GetState(filepath.Join(d.root, c.ID)) if err == nil { oomKillNotification, err := libcontainer.NotifyOnOOM(state) if err == nil { _, oomKill = <-oomKillNotification } else { log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err) } } else { log.Warnf("Failed to get container state, oom notify will not work: %s", err) } // wait for the container to exit. execOutput := <-execOutputChan return execdriver.ExitStatus{ExitCode: execOutput.exitCode, OOMKilled: oomKill}, execOutput.err }
// IsRunning is determined by looking for the // pid file for a container. If the file exists then the // container is currently running func (i *info) IsRunning() bool { if _, err := libcontainer.GetState(filepath.Join(i.driver.root, i.ID)); err == nil { return true } return false }