func TestMain(m *testing.M) { var ( err error ret int = 0 ) logrus.SetOutput(os.Stderr) logrus.SetLevel(logrus.InfoLevel) factory, err = libcontainer.New(".", libcontainer.Cgroupfs) if err != nil { logrus.Error(err) os.Exit(1) } if systemd.UseSystemd() { systemdFactory, err = libcontainer.New(".", libcontainer.SystemdCgroups) if err != nil { logrus.Error(err) os.Exit(1) } } ret = m.Run() os.Exit(ret) }
// loadFactory returns the configured factory instance for execing containers. func loadFactory(context *cli.Context) (libcontainer.Factory, error) { var ( debug = "false" root = context.GlobalString("root") ) if context.GlobalBool("debug") { debug = "true" } abs, err := filepath.Abs(root) if err != nil { return nil, err } logAbs, err := filepath.Abs(context.GlobalString("log")) if err != nil { return nil, err } return libcontainer.New(abs, libcontainer.Cgroupfs, func(l *libcontainer.LinuxFactory) error { l.CriuPath = context.GlobalString("criu") return nil }, libcontainer.InitArgs(os.Args[0], "--log", logAbs, "--log-format", context.GlobalString("log-format"), fmt.Sprintf("--debug=%s", debug), "init"), ) }
// init runs the libcontainer initialization code because of the busybox style needs // to work around the go runtime and the issues with forking func init() { if len(os.Args) < 2 || os.Args[1] != "init" { return } runtime.GOMAXPROCS(1) runtime.LockOSThread() factory, err := libcontainer.New("") if err != nil { logrus.Fatalf("unable to initialize for container: %s", err) } if err := factory.StartInitialization(); err != nil { // return proper unix error codes if exerr, ok := err.(*exec.Error); ok { switch exerr.Err { case os.ErrPermission: fmt.Fprintln(os.Stderr, err) os.Exit(126) case exec.ErrNotFound: fmt.Fprintln(os.Stderr, err) os.Exit(127) default: if os.IsNotExist(exerr.Err) { fmt.Fprintf(os.Stderr, "exec: %s: %v\n", strconv.Quote(exerr.Name), os.ErrNotExist) os.Exit(127) } } } logrus.Fatal(err) } panic("init: init failed to start contianer") }
func createFactory() (libcontainer.Factory, error) { abs, err := filepath.Abs(runc_root) if err != nil { return nil, err } return libcontainer.New(abs, libcontainer.Cgroupfs, func(l *libcontainer.LinuxFactory) error { l.CriuPath = "criu" return nil }) }
func runInit() { runtime.GOMAXPROCS(1) runtime.LockOSThread() factory, _ := libcontainer.New("") if err := factory.StartInitialization(); err != nil { // as the error is sent back to the parent there is no need to log // or write it to stderr because the parent process will handle this os.Exit(1) } panic("libcontainer: container init failed to exec") }
func init() { if len(os.Args) > 1 && os.Args[1] == "init" { runtime.GOMAXPROCS(1) runtime.LockOSThread() factory, _ := libcontainer.New("") if err := factory.StartInitialization(); err != nil { fatal(err) } panic("libcontainer: container init failed to exec") } }
// loadFactory returns the configured factory instance for execing containers. func loadFactory(context *cli.Context) (libcontainer.Factory, error) { root := context.GlobalString("root") abs, err := filepath.Abs(root) if err != nil { return nil, err } return libcontainer.New(abs, libcontainer.Cgroupfs, func(l *libcontainer.LinuxFactory) error { l.CriuPath = context.GlobalString("criu") return nil }) }
func init() { if len(os.Args) > 1 && os.Args[1] == "init" { runtime.GOMAXPROCS(1) runtime.LockOSThread() factory, _ := libcontainer.New("") if err := factory.StartInitialization(); err != nil { fatal(err) } panic("--this line should never been executed, congratulations--") } }
func init() { if len(os.Args) > 1 && os.Args[1] == "init" { goruntime.GOMAXPROCS(1) goruntime.LockOSThread() factory, _ := libcontainer.New("") if err := factory.StartInitialization(); err != nil { fmt.Fprint(os.Stderr, err) os.Exit(1) } panic("--this line should have never been executed, congratulations--") } }
func NewRuntime(stateDir string) (runtime.Runtime, error) { f, err := libcontainer.New(stateDir, libcontainer.Cgroupfs, func(l *libcontainer.LinuxFactory) error { //l.CriuPath = context.GlobalString("criu") return nil }) if err != nil { return nil, err } return &libcontainerRuntime{ factory: f, }, nil }
func getContainerResources(id string) (*specs.Resources, error) { specPath := specFile // if we are passed a containerID get the bundle dir to get the spec file if containerID != "" && root != "" { abs, err := filepath.Abs(root) if err != nil { return nil, err } // check to make sure a container exists with this ID s := path.Join(abs, id, stateFile) if _, err := os.Stat(s); os.IsNotExist(err) { return nil, fmt.Errorf("State file %s does not exist", s) } // create the factory factory, err := libcontainer.New(abs, libcontainer.Cgroupfs, func(l *libcontainer.LinuxFactory) error { return nil }) if err != nil { return nil, err } // get the container container, err := factory.Load(id) if err != nil { return nil, err } bundle := searchLabels(container.Config().Labels, "bundle") specPath = path.Join(bundle, specFile) } // read the runtime.json for the container so we know things like limits set // this is only if a container ID is not passed we assume we are in a directory // with a config.json containing the spec f, err := os.Open(specPath) if err != nil { if os.IsNotExist(err) { return nil, fmt.Errorf("JSON runtime config file %s not found", specFile) } logrus.Fatal(err) } defer f.Close() var spec specs.Spec if err = json.NewDecoder(f).Decode(&spec); err != nil { return nil, err } return spec.Linux.Resources, nil }
func initializer() { runtime.GOMAXPROCS(1) runtime.LockOSThread() factory, err := libcontainer.New("") if err != nil { fatal(err) } if err := factory.StartInitialization(); err != nil { fatal(err) } panic("unreachable") }
// init runs the libcontainer initialization code because of the busybox style needs // to work around the go runtime and the issues with forking func init() { if len(os.Args) < 2 || os.Args[1] != "init" { return } runtime.GOMAXPROCS(1) runtime.LockOSThread() factory, err := libcontainer.New("") if err != nil { logrus.Fatalf("unable to initialize for container: %s", err) } if err := factory.StartInitialization(); err != nil { logrus.Fatal(err) } }
func TestTmpfsCopyUp(t *testing.T) { if testing.Short() { return } root, err := newTestRoot() ok(t, err) defer os.RemoveAll(root) rootfs, err := newRootfs() ok(t, err) defer remove(rootfs) config := newTemplateConfig(rootfs) config.Mounts = append(config.Mounts, &configs.Mount{ Source: "tmpfs", Destination: "/etc", Device: "tmpfs", Extensions: configs.EXT_COPYUP, }) factory, err := libcontainer.New(root, libcontainer.Cgroupfs) ok(t, err) container, err := factory.Create("test", config) ok(t, err) defer container.Destroy() var stdout bytes.Buffer pconfig := libcontainer.Process{ Args: []string{"ls", "/etc/passwd"}, Env: standardEnvironment, Stdin: nil, Stdout: &stdout, } err = container.Run(&pconfig) ok(t, err) // Wait for process waitProcess(&pconfig, t) outputLs := string(stdout.Bytes()) // Check that the ls output has /etc/passwd if !strings.Contains(outputLs, "/etc/passwd") { t.Fatalf("/etc/passwd not copied up as expected: %v", outputLs) } }
// loadFactory returns the configured factory instance for execing containers. func loadFactory(context *cli.Context) (libcontainer.Factory, error) { root := context.GlobalString("root") abs, err := filepath.Abs(root) if err != nil { return nil, err } cgroupManager := libcontainer.Cgroupfs if context.GlobalBool("systemd-cgroup") { if systemd.UseSystemd() { cgroupManager = libcontainer.SystemdCgroups } else { return nil, fmt.Errorf("systemd cgroup flag passed, but systemd support for managing cgroups is not available") } } return libcontainer.New(abs, cgroupManager, libcontainer.CriuPath(context.GlobalString("criu"))) }
func init() { var err error if storeDir, err = filepath.Abs(".acbuild"); err != nil { log.Fatal("failed to get abspath: %v", err) } if len(os.Args) > 1 && os.Args[1] == "init" { runtime.GOMAXPROCS(1) runtime.LockOSThread() factory, _ := libcontainer.New("") if err := factory.StartInitialization(); err != nil { log.Fatal(fmt.Errorf("failed to initialize factory, err: %v", err)) } panic("--this line should never been executed, congratulations--") } }
func TestAdditionalGroups(t *testing.T) { if testing.Short() { return } root, err := newTestRoot() ok(t, err) defer os.RemoveAll(root) rootfs, err := newRootfs() ok(t, err) defer remove(rootfs) config := newTemplateConfig(rootfs) config.AdditionalGroups = []string{"plugdev", "audio"} factory, err := libcontainer.New(root, libcontainer.Cgroupfs) ok(t, err) container, err := factory.Create("test", config) ok(t, err) defer container.Destroy() var stdout bytes.Buffer pconfig := libcontainer.Process{ Cwd: "/", Args: []string{"sh", "-c", "id", "-Gn"}, Env: standardEnvironment, Stdin: nil, Stdout: &stdout, } err = container.Start(&pconfig) ok(t, err) // Wait for process waitProcess(&pconfig, t) outputGroups := string(stdout.Bytes()) // Check that the groups output has the groups that we specified if !strings.Contains(outputGroups, "audio") { t.Fatalf("Listed groups do not contain the audio group as expected: %v", outputGroups) } if !strings.Contains(outputGroups, "plugdev") { t.Fatalf("Listed groups do not contain the plugdev group as expected: %v", outputGroups) } }
// loadFactory returns the configured factory instance for execing containers. func loadFactory(useSystemdCgroup bool) (libcontainer.Factory, error) { abs, err := filepath.Abs(root) if err != nil { return nil, err } cgroupManager := libcontainer.Cgroupfs if useSystemdCgroup { if systemd.UseSystemd() { cgroupManager = libcontainer.SystemdCgroups } else { return nil, fmt.Errorf("systemd cgroup flag passed, but systemd support for managing cgroups is not available") } } return libcontainer.New(abs, cgroupManager, func(l *libcontainer.LinuxFactory) error { return nil }) }
func (c *container) getLibctContainer() (libcontainer.Container, error) { runtimeRoot := "/run/runc" // Check that the root wasn't changed for _, opt := range c.runtimeArgs { if strings.HasPrefix(opt, "--root=") { runtimeRoot = strings.TrimPrefix(opt, "--root=") break } } f, err := libcontainer.New(runtimeRoot, libcontainer.Cgroupfs) if err != nil { return nil, err } return f.Load(c.id) }
func TestOomScoreAdj(t *testing.T) { if testing.Short() { return } root, err := newTestRoot() ok(t, err) defer os.RemoveAll(root) rootfs, err := newRootfs() ok(t, err) defer remove(rootfs) config := newTemplateConfig(rootfs) config.OomScoreAdj = 200 factory, err := libcontainer.New(root, libcontainer.Cgroupfs) ok(t, err) container, err := factory.Create("test", config) ok(t, err) defer container.Destroy() var stdout bytes.Buffer pconfig := libcontainer.Process{ Cwd: "/", Args: []string{"sh", "-c", "cat /proc/self/oom_score_adj"}, Env: standardEnvironment, Stdin: nil, Stdout: &stdout, } err = container.Start(&pconfig) ok(t, err) // Wait for process waitProcess(&pconfig, t) outputOomScoreAdj := strings.TrimSpace(string(stdout.Bytes())) // Check that the oom_score_adj matches the value that was set as part of config. if outputOomScoreAdj != strconv.Itoa(config.OomScoreAdj) { t.Fatalf("Expected oom_score_adj %d; got %q", config.OomScoreAdj, outputOomScoreAdj) } }
func NewLibcontainerBackend(config *LibcontainerConfig) (Backend, error) { factory, err := libcontainer.New( containerRoot, libcontainer.Cgroupfs, libcontainer.InitArgs(os.Args[0], "libcontainer-init"), ) if err != nil { return nil, err } if err := setupCGroups(config.PartitionCGroups); err != nil { return nil, err } defaultTmpfs, err := createTmpfs(resource.DefaultTempDiskSize) if err != nil { return nil, err } shutdown.BeforeExit(func() { defaultTmpfs.Delete() }) l := &LibcontainerBackend{ LibcontainerConfig: config, factory: factory, logStreams: make(map[string]map[string]*logmux.LogStream), containers: make(map[string]*Container), defaultEnv: make(map[string]string), resolvConf: "/etc/resolv.conf", ipalloc: ipallocator.New(), discoverdConfigured: make(chan struct{}), networkConfigured: make(chan struct{}), globalState: &libcontainerGlobalState{}, defaultTmpfs: defaultTmpfs, } l.httpClient = &http.Client{Transport: &http.Transport{ Dial: dialer.RetryDial(l.discoverdDial), }} return l, nil }
func NewLibcontainerBackend(state *State, vman *volumemanager.Manager, bridgeName, initPath string, mux *logmux.Mux, partitionCGroups map[string]int64, logger log15.Logger) (Backend, error) { factory, err := libcontainer.New( containerRoot, libcontainer.Cgroupfs, libcontainer.InitArgs(os.Args[0], "libcontainer-init"), ) pinkertonCtx, err := pinkerton.BuildContext("aufs", imageRoot) if err != nil { return nil, err } if err := setupCGroups(partitionCGroups); err != nil { return nil, err } return &LibcontainerBackend{ InitPath: initPath, factory: factory, state: state, vman: vman, pinkerton: pinkertonCtx, logStreams: make(map[string]map[string]*logmux.LogStream), containers: make(map[string]*Container), defaultEnv: make(map[string]string), resolvConf: "/etc/resolv.conf", mux: mux, ipalloc: ipallocator.New(), bridgeName: bridgeName, discoverdConfigured: make(chan struct{}), networkConfigured: make(chan struct{}), partitionCGroups: partitionCGroups, logger: logger, globalState: &libcontainerGlobalState{}, }, nil }
// NewDriver returns a new native driver, called from NewDriver of execdriver. func NewDriver(root, initPath string, options []string) (*Driver, error) { meminfo, err := sysinfo.ReadMemInfo() if err != nil { return nil, err } if err := sysinfo.MkdirAll(root, 0700); err != nil { return nil, err } if apparmor.IsEnabled() { if err := installAppArmorProfile(); err != nil { apparmorProfiles := []string{"docker-default"} // Allow daemon to run if loading failed, but are active // (possibly through another run, manually, or via system startup) for _, policy := range apparmorProfiles { if err := hasAppArmorProfileLoaded(policy); err != nil { return nil, fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", policy) } } } } // choose cgroup manager // this makes sure there are no breaking changes to people // who upgrade from versions without native.cgroupdriver opt cgm := libcontainer.Cgroupfs if systemd.UseSystemd() { cgm = libcontainer.SystemdCgroups } // parse the options for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "native.cgroupdriver": // override the default if they set options switch val { case "systemd": if systemd.UseSystemd() { cgm = libcontainer.SystemdCgroups } else { // warn them that they chose the wrong driver logrus.Warn("You cannot use systemd as native.cgroupdriver, using cgroupfs instead") } case "cgroupfs": cgm = libcontainer.Cgroupfs default: return nil, fmt.Errorf("Unknown native.cgroupdriver given %q. try cgroupfs or systemd", val) } default: return nil, fmt.Errorf("Unknown option %s\n", key) } } f, err := libcontainer.New( root, cgm, libcontainer.InitPath(reexec.Self(), DriverName), ) if err != nil { return nil, err } return &Driver{ root: root, initPath: initPath, activeContainers: make(map[string]libcontainer.Container), machineMemory: meminfo.MemTotal, factory: f, }, nil }
func main() { // when starting a container with libcontainer, we first exec the // current binary with libcontainer-init as the first argument, // which triggers the following code to initialise the container // environment (namespaces, network etc.) then exec containerinit if len(os.Args) > 1 && os.Args[1] == "libcontainer-init" { runtime.GOMAXPROCS(1) runtime.LockOSThread() factory, _ := libcontainer.New("") if err := factory.StartInitialization(); err != nil { log.Fatal(err) } } defer shutdown.Exit() usage := `usage: flynn-host [-h|--help] [--version] <command> [<args>...] Options: -h, --help Show this message --version Show current version Commands: help Show usage for a specific command init Create cluster configuration for daemon daemon Start the daemon update Update Flynn components download Download container images bootstrap Bootstrap layer 1 inspect Get low-level information about a job log Get the logs of a job ps List jobs stop Stop running jobs signal Signal a job destroy-volumes Destroys the local volume database collect-debug-info Collect debug information into an anonymous gist or tarball list Lists ID and IP of each host version Show current version fix Fix a broken cluster tags Manage flynn-host daemon tags discover Return low-level information about a service See 'flynn-host help <command>' for more information on a specific command. ` args, _ := docopt.Parse(usage, nil, true, version.String(), true) cmd := args.String["<command>"] cmdArgs := args.All["<args>"].([]string) if cmd == "help" { if len(cmdArgs) == 0 { // `flynn help` fmt.Println(usage) return } else { // `flynn help <command>` cmd = cmdArgs[0] cmdArgs = []string{"--help"} } } if cmd == "daemon" { // merge in args and env from config file, if available var c *config.Config if n := os.Getenv("FLYNN_HOST_CONFIG"); n != "" { var err error c, err = config.Open(n) if err != nil { shutdown.Fatalf("error opening config file %s: %s", n, err) } } else { var err error c, err = config.Open(configFile) if err != nil && !os.IsNotExist(err) { shutdown.Fatalf("error opening config file %s: %s", configFile, err) } if c == nil { c = &config.Config{} } } cmdArgs = append(cmdArgs, c.Args...) for k, v := range c.Env { os.Setenv(k, v) } } if err := cli.Run(cmd, cmdArgs); err != nil { if err == cli.ErrInvalidCommand { fmt.Printf("ERROR: %q is not a valid command\n\n", cmd) fmt.Println(usage) shutdown.ExitWithCode(1) } else if _, ok := err.(cli.ErrAlreadyLogged); ok { shutdown.ExitWithCode(1) } shutdown.Fatal(err) } }
// runCmdInDir runs the given command inside a container under dir func runCmdInDir(im *schema.ImageManifest, cmd, dir string, jail bool, mounts []*configs.Mount) error { exePath, err := osext.Executable() if err != nil { return fmt.Errorf("error getting path to the current executable: %v", err) } factory, err := libcontainer.New(dir, libcontainer.InitArgs(exePath, "init")) if err != nil { return fmt.Errorf("error creating a container factory: %v", err) } // The containter ID doesn't really matter here... using a UUID containerID := uuid.NewV4().String() var container libcontainer.Container if jail { config := &configs.Config{} if err := json.Unmarshal([]byte(LibcontainerDefaultConfig), config); err != nil { return fmt.Errorf("error unmarshalling default config: %v", err) } config.Rootfs = dir config.Readonlyfs = false container, err = factory.Create(containerID, config) if err != nil { return fmt.Errorf("error creating a container: %v", err) } } else { container, err = factory.Create(containerID, &configs.Config{ Rootfs: dir, Mounts: mounts, Cgroups: &configs.Cgroup{ Name: containerID, Parent: "system", AllowAllDevices: false, AllowedDevices: configs.DefaultAllowedDevices, }, }) if err != nil { return fmt.Errorf("error creating a container: %v", err) } } process := &libcontainer.Process{ Args: strings.Fields(cmd), User: "******", Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, } if im.App != nil { process.Env = util.ACIEnvironmentToList(im.App.Environment) } process.Env = []string{"PATH=/usr/bin:/sbin/:/bin"} if err := container.Start(process); err != nil { return fmt.Errorf("error starting the process inside the container: %v", err) } _, err = process.Wait() if err != nil { return fmt.Errorf("error running the process: %v", err) } if err := container.Destroy(); err != nil { return fmt.Errorf("error destroying the container: %v", err) } return nil }
func TestCheckpoint(t *testing.T) { if testing.Short() { return } root, err := newTestRoot() if err != nil { t.Fatal(err) } defer os.RemoveAll(root) rootfs, err := newRootfs() if err != nil { t.Fatal(err) } defer remove(rootfs) config := newTemplateConfig(rootfs) factory, err := libcontainer.New(root, libcontainer.Cgroupfs) if err != nil { t.Fatal(err) } container, err := factory.Create("test", config) if err != nil { t.Fatal(err) } defer container.Destroy() stdinR, stdinW, err := os.Pipe() if err != nil { t.Fatal(err) } var stdout bytes.Buffer pconfig := libcontainer.Process{ Args: []string{"cat"}, Env: standardEnvironment, Stdin: stdinR, Stdout: &stdout, } err = container.Start(&pconfig) stdinR.Close() defer stdinW.Close() if err != nil { t.Fatal(err) } pid, err := pconfig.Pid() if err != nil { t.Fatal(err) } process, err := os.FindProcess(pid) if err != nil { t.Fatal(err) } imagesDir, err := ioutil.TempDir("", "criu") if err != nil { t.Fatal(err) } defer os.RemoveAll(imagesDir) checkpointOpts := &libcontainer.CriuOpts{ ImagesDirectory: imagesDir, WorkDirectory: imagesDir, } if err := container.Checkpoint(checkpointOpts); err != nil { t.Fatal(err) } state, err := container.Status() if err != nil { t.Fatal(err) } if state != libcontainer.Checkpointed { t.Fatal("Unexpected state: ", state) } stdinW.Close() _, err = process.Wait() if err != nil { t.Fatal(err) } // reload the container container, err = factory.Load("test") if err != nil { t.Fatal(err) } restoreStdinR, restoreStdinW, err := os.Pipe() if err != nil { t.Fatal(err) } restoreProcessConfig := &libcontainer.Process{ Stdin: restoreStdinR, Stdout: &stdout, } err = container.Restore(restoreProcessConfig, &libcontainer.CriuOpts{ ImagesDirectory: imagesDir, }) restoreStdinR.Close() defer restoreStdinW.Close() state, err = container.Status() if err != nil { t.Fatal(err) } if state != libcontainer.Running { t.Fatal("Unexpected state: ", state) } pid, err = restoreProcessConfig.Pid() if err != nil { t.Fatal(err) } process, err = os.FindProcess(pid) if err != nil { t.Fatal(err) } _, err = restoreStdinW.WriteString("Hello!") if err != nil { t.Fatal(err) } restoreStdinW.Close() s, err := process.Wait() if err != nil { t.Fatal(err) } if !s.Success() { t.Fatal(s.String(), pid) } output := string(stdout.Bytes()) if !strings.Contains(output, "Hello!") { t.Fatal("Did not restore the pipe correctly:", output) } }
func main() { rootfs := os.Getenv("ROOTFS") factory, err := libcontainer.New(rootfs, libcontainer.Cgroupfs) if err != nil { fmt.Println(err) } defaultMountFlags := syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV config := &configs.Config{ Rootfs: rootfs, Capabilities: []string{ "CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER", "CAP_MKNOD", "CAP_NET_RAW", "CAP_SETGID", "CAP_SETUID", "CAP_SETFCAP", "CAP_SETPCAP", "CAP_NET_BIND_SERVICE", "CAP_SYS_CHROOT", "CAP_KILL", "CAP_AUDIT_WRITE", }, Namespaces: configs.Namespaces([]configs.Namespace{ {Type: configs.NEWNS}, {Type: configs.NEWUTS}, {Type: configs.NEWIPC}, {Type: configs.NEWPID}, {Type: configs.NEWNET}, }), Cgroups: &configs.Cgroup{ Name: "test-container", Parent: "system", Resources: &configs.Resources{ MemorySwappiness: -1, AllowAllDevices: false, AllowedDevices: configs.DefaultAllowedDevices, }, }, MaskPaths: []string{ "/proc/kcore", }, ReadonlyPaths: []string{ "/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus", }, Devices: configs.DefaultAutoCreatedDevices, Hostname: "testing", Mounts: []*configs.Mount{ { Source: "proc", Destination: "/proc", Device: "proc", Flags: defaultMountFlags, }, { Source: "tmpfs", Destination: "/dev", Device: "tmpfs", Flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, Data: "mode=755", }, { Source: "devpts", Destination: "/dev/pts", Device: "devpts", Flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, Data: "newinstance,ptmxmode=0666,mode=0620,gid=5", }, { Device: "tmpfs", Source: "shm", Destination: "/dev/shm", Data: "mode=1777,size=65536k", Flags: defaultMountFlags, }, { Source: "mqueue", Destination: "/dev/mqueue", Device: "mqueue", Flags: defaultMountFlags, }, { Source: "sysfs", Destination: "/sys", Device: "sysfs", Flags: defaultMountFlags | syscall.MS_RDONLY, }, }, Networks: []*configs.Network{ { Type: "loopback", Address: "127.0.0.1/0", Gateway: "localhost", }, }, Rlimits: []configs.Rlimit{ { Type: syscall.RLIMIT_NOFILE, Hard: uint64(1025), Soft: uint64(1025), }, }, } container, err := factory.Create("abcde", config) if err != nil { fmt.Println(err) } process := &libcontainer.Process{ Args: []string{"/bin/sh"}, Env: []string{"PATH=/bin"}, Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, } err = container.Start(process) if err != nil { fmt.Println(err) } // wait for the process to finish. status, err := process.Wait() if err != nil { fmt.Println(err) } fmt.Println(status) container.Destroy() fmt.Println("done") }
// NewDriver returns a new native driver, called from NewDriver of execdriver. func NewDriver(root, initPath string, options []string) (*Driver, error) { meminfo, err := sysinfo.ReadMemInfo() if err != nil { return nil, err } if err := sysinfo.MkdirAll(root, 0700); err != nil { return nil, err } // choose cgroup manager // this makes sure there are no breaking changes to people // who upgrade from versions without native.cgroupdriver opt cgm := libcontainer.Cgroupfs if systemd.UseSystemd() { cgm = libcontainer.SystemdCgroups } // parse the options for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "native.cgroupdriver": // override the default if they set options switch val { case "systemd": if systemd.UseSystemd() { cgm = libcontainer.SystemdCgroups } else { // warn them that they chose the wrong driver logrus.Warn("You cannot use systemd as native.cgroupdriver, using cgroupfs instead") } case "cgroupfs": cgm = libcontainer.Cgroupfs default: return nil, fmt.Errorf("Unknown native.cgroupdriver given %q. try cgroupfs or systemd", val) } default: return nil, fmt.Errorf("Unknown option %s\n", key) } } f, err := libcontainer.New( root, cgm, libcontainer.InitPath(reexec.Self(), DriverName), ) if err != nil { return nil, err } return &Driver{ root: root, initPath: initPath, activeContainers: make(map[string]libcontainer.Container), machineMemory: meminfo.MemTotal, factory: f, }, nil }
func TestCheckpoint(t *testing.T) { if testing.Short() { return } root, err := newTestRoot() if err != nil { t.Fatal(err) } defer os.RemoveAll(root) rootfs, err := newRootfs() if err != nil { t.Fatal(err) } defer remove(rootfs) config := newTemplateConfig(rootfs) config.Mounts = append(config.Mounts, &configs.Mount{ Destination: "/sys/fs/cgroup", Device: "cgroup", Flags: defaultMountFlags | syscall.MS_RDONLY, }) factory, err := libcontainer.New(root, libcontainer.Cgroupfs) if err != nil { t.Fatal(err) } container, err := factory.Create("test", config) if err != nil { t.Fatal(err) } defer container.Destroy() stdinR, stdinW, err := os.Pipe() if err != nil { t.Fatal(err) } var stdout bytes.Buffer pconfig := libcontainer.Process{ Cwd: "/", Args: []string{"cat"}, Env: standardEnvironment, Stdin: stdinR, Stdout: &stdout, } err = container.Start(&pconfig) stdinR.Close() defer stdinW.Close() if err != nil { t.Fatal(err) } pid, err := pconfig.Pid() if err != nil { t.Fatal(err) } process, err := os.FindProcess(pid) if err != nil { t.Fatal(err) } imagesDir, err := ioutil.TempDir("", "criu") if err != nil { t.Fatal(err) } defer os.RemoveAll(imagesDir) checkpointOpts := &libcontainer.CriuOpts{ ImagesDirectory: imagesDir, WorkDirectory: imagesDir, } dumpLog := filepath.Join(checkpointOpts.WorkDirectory, "dump.log") restoreLog := filepath.Join(checkpointOpts.WorkDirectory, "restore.log") if err := container.Checkpoint(checkpointOpts); err != nil { showFile(t, dumpLog) t.Fatal(err) } state, err := container.Status() if err != nil { t.Fatal(err) } if state != libcontainer.Running { t.Fatal("Unexpected state checkpoint: ", state) } stdinW.Close() _, err = process.Wait() if err != nil { t.Fatal(err) } // reload the container container, err = factory.Load("test") if err != nil { t.Fatal(err) } restoreStdinR, restoreStdinW, err := os.Pipe() if err != nil { t.Fatal(err) } restoreProcessConfig := &libcontainer.Process{ Cwd: "/", Stdin: restoreStdinR, Stdout: &stdout, } err = container.Restore(restoreProcessConfig, checkpointOpts) restoreStdinR.Close() defer restoreStdinW.Close() if err != nil { showFile(t, restoreLog) t.Fatal(err) } state, err = container.Status() if err != nil { t.Fatal(err) } if state != libcontainer.Running { t.Fatal("Unexpected restore state: ", state) } pid, err = restoreProcessConfig.Pid() if err != nil { t.Fatal(err) } process, err = os.FindProcess(pid) if err != nil { t.Fatal(err) } _, err = restoreStdinW.WriteString("Hello!") if err != nil { t.Fatal(err) } restoreStdinW.Close() s, err := process.Wait() if err != nil { t.Fatal(err) } if !s.Success() { t.Fatal(s.String(), pid) } output := string(stdout.Bytes()) if !strings.Contains(output, "Hello!") { t.Fatal("Did not restore the pipe correctly:", output) } }
import ( "os" "runtime" "github.com/opencontainers/runc/libcontainer" _ "github.com/opencontainers/runc/libcontainer/nsenter" "github.com/urfave/cli" ) func init() { if len(os.Args) > 1 && os.Args[1] == "init" { runtime.GOMAXPROCS(1) runtime.LockOSThread() } } var initCommand = cli.Command{ Name: "init", Usage: `initialize the namespaces and launch the process (do not call it outside of runc)`, Action: func(context *cli.Context) error { factory, _ := libcontainer.New("") if err := factory.StartInitialization(); err != nil { // as the error is sent back to the parent there is no need to log // or write it to stderr because the parent process will handle this os.Exit(1) } panic("libcontainer: container init failed to exec") }, }