func waitInPIDHost(p *libcontainer.Process, c libcontainer.Container) func() (*os.ProcessState, error) { return func() (*os.ProcessState, error) { pid, err := p.Pid() if err != nil { return nil, err } process, err := os.FindProcess(pid) s, err := process.Wait() if err != nil { if err, ok := err.(*exec.ExitError); !ok { return s, err } else { s = err.ProcessState } } processes, err := c.Processes() if err != nil { return s, err } for _, pid := range processes { process, err := os.FindProcess(pid) if err != nil { log.Errorf("Failed to kill process: %d", pid) continue } process.Kill() } p.Wait() return s, err } }
func readonlyFs(container *libcontainer.Container, context interface{}, value string) error { switch value { case "1", "true": container.ReadonlyFs = true default: container.ReadonlyFs = false } return nil }
// notifyOnOOM returns a channel that signals if the container received an OOM notification // for any process. If it is unable to subscribe to OOM notifications then a closed // channel is returned as it will be non-blocking and return the correct result when read. func notifyOnOOM(container libcontainer.Container) <-chan struct{} { oom, err := container.NotifyOOM() if err != nil { logrus.Warnf("Your kernel does not support OOM notifications: %s", err) c := make(chan struct{}) close(c) return c } return oom }
func killCgroupProcs(c libcontainer.Container) { var procs []*os.Process if err := c.Pause(); err != nil { logrus.Warn(err) } pids, err := c.Processes() if err != nil { // don't care about childs if we can't get them, this is mostly because cgroup already deleted logrus.Warnf("Failed to get processes from container %s: %v", c.ID(), err) } for _, pid := range pids { if p, err := os.FindProcess(pid); err == nil { procs = append(procs, p) if err := p.Kill(); err != nil { logrus.Warn(err) } } } if err := c.Resume(); err != nil { logrus.Warn(err) } for _, p := range procs { if _, err := p.Wait(); err != nil { logrus.Warn(err) } } }
func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.Command) error { if c.Network.HostNetworking { container.Namespaces["NEWNET"] = false return nil } container.Networks = []*libcontainer.Network{ { Mtu: c.Network.Mtu, Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0), Gateway: "localhost", Type: "loopback", Context: libcontainer.Context{}, }, } if c.Network.Interface != nil { vethNetwork := libcontainer.Network{ Mtu: c.Network.Mtu, Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), Gateway: c.Network.Interface.Gateway, Type: "veth", Context: libcontainer.Context{ "prefix": "veth", "bridge": c.Network.Interface.Bridge, }, } container.Networks = append(container.Networks, &vethNetwork) } if c.Network.ContainerID != "" { d.Lock() active := d.activeContainers[c.Network.ContainerID] d.Unlock() if active == nil || active.cmd.Process == nil { return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID) } cmd := active.cmd nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") container.Networks = append(container.Networks, &libcontainer.Network{ Type: "netns", Context: libcontainer.Context{ "nspath": nspath, }, }) } return nil }
func (d *driver) setPrivileged(container *libcontainer.Container) (err error) { container.Capabilities = libcontainer.GetAllCapabilities() container.Cgroups.AllowAllDevices = true hostDeviceNodes, err := devices.GetHostDeviceNodes() if err != nil { return err } container.DeviceNodes = hostDeviceNodes delete(container.Context, "restrictions") if apparmor.IsEnabled() { container.Context["apparmor_profile"] = "unconfined" } return nil }
func dropCap(container *libcontainer.Container, context interface{}, value string) error { // If the capability is specified multiple times, remove all instances. for i, capability := range container.Capabilities { if capability == value { container.Capabilities = append(container.Capabilities[:i], container.Capabilities[i+1:]...) } } // The capability wasn't found so we will drop it anyways. return nil }
func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Command) error { for _, m := range c.Mounts { container.Mounts = append(container.Mounts, libcontainer.Mount{ Type: "bind", Source: m.Source, Destination: m.Destination, Writable: m.Writable, Private: m.Private, }) } return nil }
func joinNetNamespace(container *libcontainer.Container, context interface{}, value string) error { var ( running = context.(map[string]*exec.Cmd) cmd = running[value] ) if cmd == nil || cmd.Process == nil { return fmt.Errorf("%s is not a valid running container to join", value) } nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") container.Networks = append(container.Networks, &libcontainer.Network{ Type: "netns", Context: libcontainer.Context{ "nspath": nspath, }, }) return nil }
Flags: []cli.Flag{ cli.StringFlag{Name: "id", Value: "nsinit", Usage: "specify the ID for a container"}, cli.StringFlag{Name: "image-path", Value: "", Usage: "path to criu image files for restoring"}, cli.StringFlag{Name: "work-path", Value: "", Usage: "path for saving work files and logs"}, cli.BoolFlag{Name: "tcp-established", Usage: "allow open tcp connections"}, cli.BoolFlag{Name: "ext-unix-sk", Usage: "allow external unix sockets"}, cli.BoolFlag{Name: "shell-job", Usage: "allow shell jobs"}, }, Action: func(context *cli.Context) { imagePath := context.String("image-path") if imagePath == "" { fatal(fmt.Errorf("The --image-path option isn't specified")) } var ( container libcontainer.Container err error ) factory, err := loadFactory(context) if err != nil { fatal(err) } config, err := loadConfig(context) if err != nil { fatal(err) } created := false container, err = factory.Load(context.String("id")) if err != nil {
func addCap(container *libcontainer.Container, context interface{}, value string) error { container.Capabilities = append(container.Capabilities, value) return nil }