func (b *Builder) Build() error { logs.WithF(b.fields).Info("Building aci") lfd, err := rktcommon.GetRktLockFD() if err != nil { return errs.WithEF(err, b.fields, "can't get rkt lock fd") } if err := sys.CloseOnExec(lfd, true); err != nil { return errs.WithEF(err, b.fields, "can't set FD_CLOEXEC on rkt lock") } if err := b.runBuild(); err != nil { return err } if err := b.writeManifest(); err != nil { return err } if err := b.tarAci(); err != nil { return err } return nil }
func run() int { lfd, err := common.GetRktLockFD() if err != nil { fmt.Fprintf(os.Stderr, "Failed to get rkt lock fd: %v\n", err) return 1 } if err := sys.CloseOnExec(lfd, true); err != nil { fmt.Fprintf(os.Stderr, "Failed to set FD_CLOEXEC on rkt lock: %v\n", err) return 1 } if err := stage1common.WritePpid(os.Getpid()); err != nil { fmt.Fprintf(os.Stderr, "write ppid: %v", err) return 1 } fmt.Println("success, stub stage1 would at this point switch to stage2") return 0 }
func stage1() int { uuid, err := types.NewUUID(flag.Arg(0)) if err != nil { log.PrintE("UUID is missing or malformed", err) return 1 } root := "." p, err := stage1commontypes.LoadPod(root, uuid) if err != nil { log.PrintE("failed to load pod", err) return 1 } // set close-on-exec flag on RKT_LOCK_FD so it gets correctly closed when invoking // network plugins lfd, err := common.GetRktLockFD() if err != nil { log.PrintE("failed to get rkt lock fd", err) return 1 } if err := sys.CloseOnExec(lfd, true); err != nil { log.PrintE("failed to set FD_CLOEXEC on rkt lock", err) return 1 } mirrorLocalZoneInfo(p.Root) flavor, _, err := stage1initcommon.GetFlavor(p) if err != nil { log.PrintE("failed to get stage1 flavor", err) return 3 } var n *networking.Networking if netList.Contained() { fps, err := forwardedPorts(p) if err != nil { log.Error(err) return 6 } n, err = networking.Setup(root, p.UUID, fps, netList, localConfig, flavor, debug) if err != nil { log.PrintE("failed to setup network", err) return 6 } if err = n.Save(); err != nil { log.PrintE("failed to save networking state", err) n.Teardown(flavor, debug) return 6 } if len(mdsToken) > 0 { hostIP, err := n.GetDefaultHostIP() if err != nil { log.PrintE("failed to get default Host IP", err) return 6 } p.MetadataServiceURL = common.MetadataServicePublicURL(hostIP, mdsToken) } } else { if flavor == "kvm" { log.Print("flavor kvm requires private network configuration (try --net)") return 6 } if len(mdsToken) > 0 { p.MetadataServiceURL = common.MetadataServicePublicURL(localhostIP, mdsToken) } } if err = stage1initcommon.WriteDefaultTarget(p); err != nil { log.PrintE("failed to write default.target", err) return 2 } if err = stage1initcommon.WritePrepareAppTemplate(p); err != nil { log.PrintE("failed to write prepare-app service template", err) return 2 } if err := stage1initcommon.SetJournalPermissions(p); err != nil { log.PrintE("warning: error setting journal ACLs, you'll need root to read the pod journal", err) } if flavor == "kvm" { if err := KvmPodToSystemd(p, n); err != nil { log.PrintE("failed to configure systemd for kvm", err) return 2 } } if err = stage1initcommon.PodToSystemd(p, interactive, flavor, privateUsers); err != nil { log.PrintE("failed to configure systemd", err) return 2 } args, env, err := getArgsEnv(p, flavor, debug, n) if err != nil { log.Error(err) return 3 } // create a separate mount namespace so the cgroup filesystems // are unmounted when exiting the pod if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { log.FatalE("error unsharing", err) } // we recursively make / a "shared and slave" so mount events from the // new namespace don't propagate to the host namespace but mount events // from the host propagate to the new namespace and are forwarded to // its peer group // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt if err := syscall.Mount("", "/", "none", syscall.MS_REC|syscall.MS_SLAVE, ""); err != nil { log.FatalE("error making / a slave mount", err) } if err := syscall.Mount("", "/", "none", syscall.MS_REC|syscall.MS_SHARED, ""); err != nil { log.FatalE("error making / a shared and slave mount", err) } enabledCgroups, err := cgroup.GetEnabledCgroups() if err != nil { log.FatalE("error getting cgroups", err) return 5 } // mount host cgroups in the rkt mount namespace if err := mountHostCgroups(enabledCgroups); err != nil { log.FatalE("couldn't mount the host cgroups", err) return 5 } var serviceNames []string for _, app := range p.Manifest.Apps { serviceNames = append(serviceNames, stage1initcommon.ServiceUnitName(app.Name)) } s1Root := common.Stage1RootfsPath(p.Root) machineID := stage1initcommon.GetMachineID(p) subcgroup, err := getContainerSubCgroup(machineID) if err == nil { if err := mountContainerCgroups(s1Root, enabledCgroups, subcgroup, serviceNames); err != nil { log.PrintE("couldn't mount the container cgroups", err) return 5 } } else { log.PrintE("continuing with per-app isolators disabled", err) } if err = stage1common.WritePpid(os.Getpid()); err != nil { log.Error(err) return 4 } err = stage1common.WithClearedCloExec(lfd, func() error { return syscall.Exec(args[0], args, env) }) if err != nil { log.PrintE(fmt.Sprintf("failed to execute %q", args[0]), err) return 7 } return 0 }
func stage1() int { uuid, err := types.NewUUID(flag.Arg(0)) if err != nil { log.FatalE("UUID is missing or malformed", err) } root := "." p, err := stage1commontypes.LoadPod(root, uuid) if err != nil { log.FatalE("failed to load pod", err) } // set close-on-exec flag on RKT_LOCK_FD so it gets correctly closed when invoking // network plugins lfd, err := common.GetRktLockFD() if err != nil { log.FatalE("failed to get rkt lock fd", err) } if err := sys.CloseOnExec(lfd, true); err != nil { log.FatalE("failed to set FD_CLOEXEC on rkt lock", err) } mirrorLocalZoneInfo(p.Root) flavor, _, err := stage1initcommon.GetFlavor(p) if err != nil { log.FatalE("failed to get stage1 flavor", err) } var n *networking.Networking if netList.Contained() { fps, err := commonnet.ForwardedPorts(p.Manifest) if err != nil { log.FatalE("error initializing forwarding ports", err) } noDNS := dnsConfMode.Pairs["resolv"] != "default" // force ignore CNI DNS results n, err = networking.Setup(root, p.UUID, fps, netList, localConfig, flavor, noDNS, debug) if err != nil { log.FatalE("failed to setup network", err) } if err = n.Save(); err != nil { log.PrintE("failed to save networking state", err) n.Teardown(flavor, debug) return 254 } if len(mdsToken) > 0 { hostIP, err := n.GetForwardableNetHostIP() if err != nil { log.FatalE("failed to get default Host IP", err) } p.MetadataServiceURL = common.MetadataServicePublicURL(hostIP, mdsToken) } } else { if flavor == "kvm" { log.Fatal("flavor kvm requires private network configuration (try --net)") } if len(mdsToken) > 0 { p.MetadataServiceURL = common.MetadataServicePublicURL(localhostIP, mdsToken) } } insecureOptions := stage1initcommon.Stage1InsecureOptions{ DisablePaths: disablePaths, DisableCapabilities: disableCapabilities, DisableSeccomp: disableSeccomp, } mnt := fs.NewLoggingMounter( fs.MounterFunc(syscall.Mount), fs.UnmounterFunc(syscall.Unmount), diag.Printf, ) if dnsConfMode.Pairs["resolv"] == "host" { stage1initcommon.UseHostResolv(mnt, root) } if dnsConfMode.Pairs["hosts"] == "host" { stage1initcommon.UseHostHosts(mnt, root) } if mutable { if err = stage1initcommon.MutableEnv(p); err != nil { log.FatalE("cannot initialize mutable environment", err) } } else { if err = stage1initcommon.ImmutableEnv(p, interactive, privateUsers, insecureOptions); err != nil { log.FatalE("cannot initialize immutable environment", err) } } if err := stage1initcommon.SetJournalPermissions(p); err != nil { log.PrintE("warning: error setting journal ACLs, you'll need root to read the pod journal", err) } if flavor == "kvm" { kvm.InitDebug(debug) if err := KvmNetworkingToSystemd(p, n); err != nil { log.FatalE("failed to configure systemd for kvm", err) } } canMachinedRegister := false if flavor != "kvm" { // kvm doesn't register with systemd right now, see #2664. canMachinedRegister = machinedRegister() } diag.Printf("canMachinedRegister %t", canMachinedRegister) args, env, err := getArgsEnv(p, flavor, canMachinedRegister, debug, n, insecureOptions) if err != nil { log.FatalE("cannot get environment", err) } diag.Printf("args %q", args) diag.Printf("env %q", env) // create a separate mount namespace so the cgroup filesystems // are unmounted when exiting the pod if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { log.FatalE("error unsharing", err) } // we recursively make / a "shared and slave" so mount events from the // new namespace don't propagate to the host namespace but mount events // from the host propagate to the new namespace and are forwarded to // its peer group // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt if err := mnt.Mount("", "/", "none", syscall.MS_REC|syscall.MS_SLAVE, ""); err != nil { log.FatalE("error making / a slave mount", err) } if err := mnt.Mount("", "/", "none", syscall.MS_REC|syscall.MS_SHARED, ""); err != nil { log.FatalE("error making / a shared and slave mount", err) } unifiedCgroup, err := cgroup.IsCgroupUnified("/") if err != nil { log.FatalE("error determining cgroup version", err) } diag.Printf("unifiedCgroup %t", unifiedCgroup) s1Root := common.Stage1RootfsPath(p.Root) machineID := stage1initcommon.GetMachineID(p) subcgroup, err := getContainerSubCgroup(machineID, canMachinedRegister, unifiedCgroup) if err != nil { log.FatalE("error getting container subcgroup", err) } diag.Printf("subcgroup %q", subcgroup) if err := ioutil.WriteFile(filepath.Join(p.Root, "subcgroup"), []byte(fmt.Sprintf("%s", subcgroup)), 0644); err != nil { log.FatalE("cannot write subcgroup file", err) } if !unifiedCgroup { enabledCgroups, err := v1.GetEnabledCgroups() if err != nil { log.FatalE("error getting v1 cgroups", err) } diag.Printf("enabledCgroups %q", enabledCgroups) if err := mountHostV1Cgroups(mnt, enabledCgroups); err != nil { log.FatalE("couldn't mount the host v1 cgroups", err) } if !canMachinedRegister { if err := v1.JoinSubcgroup("systemd", subcgroup); err != nil { log.FatalE(fmt.Sprintf("error joining subcgroup %q", subcgroup), err) } } var serviceNames []string for _, app := range p.Manifest.Apps { serviceNames = append(serviceNames, stage1initcommon.ServiceUnitName(app.Name)) } diag.Printf("serviceNames %q", serviceNames) if err := mountContainerV1Cgroups(mnt, s1Root, enabledCgroups, subcgroup, serviceNames, insecureOptions); err != nil { log.FatalE("couldn't mount the container v1 cgroups", err) } } // KVM flavor has a bit different logic in handling pid vs ppid, for details look into #2389 // it doesn't require the existence of a "ppid", instead it registers the current pid (which // will be reused by lkvm binary) as a pod process pid used during entering pid_filename := "ppid" if flavor == "kvm" { pid_filename = "pid" } if err = stage1common.WritePid(os.Getpid(), pid_filename); err != nil { log.FatalE("error writing pid", err) } if flavor == "kvm" { if err := KvmPrepareMounts(s1Root, p); err != nil { log.FatalE("error preparing mounts", err) } } err = stage1common.WithClearedCloExec(lfd, func() error { return syscall.Exec(args[0], args, env) }) if err != nil { log.FatalE(fmt.Sprintf("failed to execute %q", args[0]), err) } return 0 }
func stage1() int { uuid, err := types.NewUUID(flag.Arg(0)) if err != nil { log.Print("UUID is missing or malformed\n") return 1 } root := "." p, err := stage1commontypes.LoadPod(root, uuid) if err != nil { log.PrintE("can't load pod", err) return 1 } // Sanity checks if len(p.Manifest.Apps) != 1 { log.Printf("flavor %q only supports 1 application per Pod for now", flavor) return 1 } ra := p.Manifest.Apps[0] imgName := p.AppNameToImageName(ra.Name) args := ra.App.Exec if len(args) == 0 { log.Printf(`image %q has an empty "exec" (try --exec=BINARY)`, imgName) return 1 } lfd, err := common.GetRktLockFD() if err != nil { log.PrintE("can't get rkt lock fd", err) return 1 } // set close-on-exec flag on RKT_LOCK_FD so it gets correctly closed after execution is finished if err := sys.CloseOnExec(lfd, true); err != nil { log.PrintE("can't set FD_CLOEXEC on rkt lock", err) return 1 } workDir := "/" if ra.App.WorkingDirectory != "" { workDir = ra.App.WorkingDirectory } env := []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"} for _, e := range ra.App.Environment { env = append(env, e.Name+"="+e.Value) } rfs := filepath.Join(common.AppPath(p.Root, ra.Name), "rootfs") if err := copyResolv(p); err != nil { log.PrintE("can't copy /etc/resolv.conf", err) return 1 } argFlyMounts, err := evaluateMounts(rfs, string(ra.Name), p) if err != nil { log.PrintE("can't evaluate mounts", err) return 1 } effectiveMounts := append( []flyMount{ {"", "", "/dev", "none", syscall.MS_REC | syscall.MS_SHARED}, {"/dev", rfs, "/dev", "none", syscall.MS_BIND | syscall.MS_REC}, {"", "", "/proc", "none", syscall.MS_REC | syscall.MS_SHARED}, {"/proc", rfs, "/proc", "none", syscall.MS_BIND | syscall.MS_REC}, {"", "", "/sys", "none", syscall.MS_REC | syscall.MS_SHARED}, {"/sys", rfs, "/sys", "none", syscall.MS_BIND | syscall.MS_REC}, {"tmpfs", rfs, "/tmp", "tmpfs", 0}, }, argFlyMounts..., ) for _, mount := range effectiveMounts { var ( err error hostPathInfo os.FileInfo targetPathInfo os.FileInfo ) if strings.HasPrefix(mount.HostPath, "/") { if hostPathInfo, err = os.Stat(mount.HostPath); err != nil { log.PrintE(fmt.Sprintf("stat of host path %s", mount.HostPath), err) return 1 } } else { hostPathInfo = nil } absTargetPath := filepath.Join(mount.TargetPrefixPath, mount.RelTargetPath) if targetPathInfo, err = os.Stat(absTargetPath); err != nil && !os.IsNotExist(err) { log.PrintE(fmt.Sprintf("stat of target path %s", absTargetPath), err) return 1 } switch { case targetPathInfo == nil: absTargetPathParent, _ := filepath.Split(absTargetPath) if err := os.MkdirAll(absTargetPathParent, 0755); err != nil { log.PrintE(fmt.Sprintf("can't create directory %q", absTargetPath), err) return 1 } switch { case hostPathInfo == nil || hostPathInfo.IsDir(): if err := os.Mkdir(absTargetPath, 0755); err != nil { log.PrintE(fmt.Sprintf("can't create directory %q", absTargetPath), err) return 1 } case !hostPathInfo.IsDir(): file, err := os.OpenFile(absTargetPath, os.O_CREATE, 0700) if err != nil { log.PrintE(fmt.Sprintf("can't create file %q", absTargetPath), err) return 1 } file.Close() } case hostPathInfo != nil: switch { case hostPathInfo.IsDir() && !targetPathInfo.IsDir(): log.Printf("can't mount because %q is a directory while %q is not", mount.HostPath, absTargetPath) return 1 case !hostPathInfo.IsDir() && targetPathInfo.IsDir(): log.Printf("can't mount because %q is not a directory while %q is", mount.HostPath, absTargetPath) return 1 } } if err := syscall.Mount(mount.HostPath, absTargetPath, mount.Fs, mount.Flags, ""); err != nil { log.PrintE(fmt.Sprintf("can't mount %q on %q with flags %v", mount.HostPath, absTargetPath, mount.Flags), err) return 1 } } if err = stage1common.WritePid(os.Getpid(), "pid"); err != nil { log.Error(err) return 1 } var uidResolver, gidResolver user.Resolver var uid, gid int uidResolver, err = user.NumericIDs(ra.App.User) if err != nil { uidResolver, err = user.IDsFromStat(rfs, ra.App.User, nil) } if err != nil { // give up log.PrintE(fmt.Sprintf("invalid user %q", ra.App.User), err) return 1 } if uid, _, err = uidResolver.IDs(); err != nil { log.PrintE(fmt.Sprintf("failed to configure user %q", ra.App.User), err) return 1 } gidResolver, err = user.NumericIDs(ra.App.Group) if err != nil { gidResolver, err = user.IDsFromStat(rfs, ra.App.Group, nil) } if err != nil { // give up log.PrintE(fmt.Sprintf("invalid group %q", ra.App.Group), err) return 1 } if _, gid, err = gidResolver.IDs(); err != nil { log.PrintE(fmt.Sprintf("failed to configure group %q", ra.App.Group), err) return 1 } diag.Printf("chroot to %q", rfs) if err := syscall.Chroot(rfs); err != nil { log.PrintE("can't chroot", err) return 1 } if err := os.Chdir(workDir); err != nil { log.PrintE(fmt.Sprintf("can't change to working directory %q", workDir), err) return 1 } // lock the current goroutine to its current OS thread. // This will force the subsequent syscalls to be executed in the same OS thread as Setresuid, and Setresgid, // see https://github.com/golang/go/issues/1435#issuecomment-66054163. runtime.LockOSThread() diag.Printf("setting uid %d gid %d", uid, gid) if err := syscall.Setresgid(gid, gid, gid); err != nil { log.PrintE(fmt.Sprintf("can't set gid %d", gid), err) return 1 } if err := syscall.Setresuid(uid, uid, uid); err != nil { log.PrintE(fmt.Sprintf("can't set uid %d", uid), err) return 1 } diag.Printf("execing %q in %q", args, rfs) err = stage1common.WithClearedCloExec(lfd, func() error { return syscall.Exec(args[0], args, env) }) if err != nil { log.PrintE(fmt.Sprintf("can't execute %q", args[0]), err) return 1 } return 0 }
func stage1() int { uuid, err := types.NewUUID(flag.Arg(0)) if err != nil { fmt.Fprintln(os.Stderr, "UUID is missing or malformed") return 1 } root := "." p, err := LoadPod(root, uuid) if err != nil { fmt.Fprintf(os.Stderr, "Failed to load pod: %v\n", err) return 1 } // set close-on-exec flag on RKT_LOCK_FD so it gets correctly closed when invoking // network plugins lfd, err := common.GetRktLockFD() if err != nil { fmt.Fprintf(os.Stderr, "Failed to get rkt lock fd: %v\n", err) return 1 } if err := sys.CloseOnExec(lfd, true); err != nil { fmt.Fprintf(os.Stderr, "Failed to set FD_CLOEXEC on rkt lock: %v\n", err) return 1 } mirrorLocalZoneInfo(p.Root) flavor, _, err := p.getFlavor() if err != nil { fmt.Fprintf(os.Stderr, "Failed to get stage1 flavor: %v\n", err) return 3 } var n *networking.Networking if privNet.Any() { fps, err := forwardedPorts(p) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) return 6 } n, err = networking.Setup(root, p.UUID, fps, privNet, localConfig, flavor) if err != nil { fmt.Fprintf(os.Stderr, "Failed to setup network: %v\n", err) return 6 } if err = n.Save(); err != nil { fmt.Fprintf(os.Stderr, "Failed to save networking state %v\n", err) n.Teardown(flavor) return 6 } if len(mdsToken) > 0 { hostIP, err := n.GetDefaultHostIP() if err != nil { fmt.Fprintf(os.Stderr, "Failed to get default Host IP: %v\n", err) return 6 } p.MetadataServiceURL = common.MetadataServicePublicURL(hostIP, mdsToken) } } else { if flavor == "kvm" { fmt.Fprintf(os.Stderr, "Flavor kvm requires private network configuration (try --private-net).\n") return 6 } if len(mdsToken) > 0 { p.MetadataServiceURL = common.MetadataServicePublicURL(localhostIP, mdsToken) } } if err = p.WritePrepareAppTemplate(); err != nil { fmt.Fprintf(os.Stderr, "Failed to write prepare-app service template: %v\n", err) return 2 } if err = p.PodToSystemd(interactive, flavor); err != nil { fmt.Fprintf(os.Stderr, "Failed to configure systemd: %v\n", err) return 2 } args, env, err := getArgsEnv(p, flavor, debug, n) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) return 3 } // create a separate mount namespace so the cgroup filesystems // are unmounted when exiting the pod if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { log.Fatalf("error unsharing: %v", err) } // we recursively make / a "shared and slave" so mount events from the // new namespace don't propagate to the host namespace but mount events // from the host propagate to the new namespace and are forwarded to // its peer group // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt if err := syscall.Mount("", "/", "none", syscall.MS_REC|syscall.MS_SLAVE, ""); err != nil { log.Fatalf("error making / a slave mount: %v", err) } if err := syscall.Mount("", "/", "none", syscall.MS_REC|syscall.MS_SHARED, ""); err != nil { log.Fatalf("error making / a shared and slave mount: %v", err) } var serviceNames []string for _, app := range p.Manifest.Apps { serviceNames = append(serviceNames, ServiceUnitName(app.Name)) } s1Root := common.Stage1RootfsPath(p.Root) machineID := p.GetMachineID() subcgroup, err := getContainerSubCgroup(machineID) if err == nil { if err := cgroup.CreateCgroups(s1Root, subcgroup, serviceNames); err != nil { fmt.Fprintf(os.Stderr, "Error creating cgroups: %v\n", err) return 5 } } else { fmt.Fprintf(os.Stderr, "Continuing with per-app isolators disabled: %v\n", err) } if err = writePpid(os.Getpid()); err != nil { fmt.Fprintln(os.Stderr, err.Error()) return 4 } err = withClearedCloExec(lfd, func() error { return syscall.Exec(args[0], args, env) }) if err != nil { fmt.Fprintf(os.Stderr, "Failed to execute %q: %v\n", args[0], err) return 7 } return 0 }
func stage1() int { uuid, err := types.NewUUID(flag.Arg(0)) if err != nil { log.Print("UUID is missing or malformed\n") return 1 } root := "." p, err := stage1commontypes.LoadPod(root, uuid) if err != nil { log.PrintE("can't load pod", err) return 1 } if len(p.Manifest.Apps) != 1 { log.Printf("flavor %q only supports 1 application per Pod for now", flavor) return 1 } lfd, err := common.GetRktLockFD() if err != nil { log.PrintE("can't get rkt lock fd", err) return 1 } // set close-on-exec flag on RKT_LOCK_FD so it gets correctly closed after execution is finished if err := sys.CloseOnExec(lfd, true); err != nil { log.PrintE("can't set FD_CLOEXEC on rkt lock", err) return 1 } env := []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"} for _, e := range p.Manifest.Apps[0].App.Environment { env = append(env, e.Name+"="+e.Value) } args := p.Manifest.Apps[0].App.Exec rfs := filepath.Join(common.AppPath(p.Root, p.Manifest.Apps[0].Name), "rootfs") argFlyMounts, err := evaluateMounts(rfs, string(p.Manifest.Apps[0].Name), p) if err != nil { log.PrintE("can't evaluate mounts", err) return 1 } effectiveMounts := append( []flyMount{ {"", "", "/dev", "none", syscall.MS_REC | syscall.MS_SHARED}, {"/dev", rfs, "/dev", "none", syscall.MS_BIND | syscall.MS_REC}, {"", "", "/proc", "none", syscall.MS_REC | syscall.MS_SHARED}, {"/proc", rfs, "/proc", "none", syscall.MS_BIND | syscall.MS_REC}, {"", "", "/sys", "none", syscall.MS_REC | syscall.MS_SHARED}, {"/sys", rfs, "/sys", "none", syscall.MS_BIND | syscall.MS_REC}, {"tmpfs", rfs, "/tmp", "tmpfs", 0}, }, argFlyMounts..., ) for _, mount := range effectiveMounts { var ( err error hostPathInfo os.FileInfo targetPathInfo os.FileInfo ) if strings.HasPrefix(mount.HostPath, "/") { if hostPathInfo, err = os.Stat(mount.HostPath); err != nil { log.PrintE(fmt.Sprintf("stat of host directory %s", mount.HostPath), err) return 1 } } else { hostPathInfo = nil } absTargetPath := filepath.Join(mount.TargetPrefixPath, mount.RelTargetPath) if targetPathInfo, err = os.Stat(absTargetPath); err != nil && !os.IsNotExist(err) { log.PrintE(fmt.Sprintf("stat of target directory %s", absTargetPath), err) return 1 } switch { case targetPathInfo == nil: absTargetPathParent, _ := filepath.Split(absTargetPath) if err := os.MkdirAll(absTargetPathParent, 0700); err != nil { log.PrintE(fmt.Sprintf("can't create directory %q", absTargetPath), err) return 1 } switch { case hostPathInfo == nil || hostPathInfo.IsDir(): if err := os.Mkdir(absTargetPath, 0700); err != nil { log.PrintE(fmt.Sprintf("can't create directory %q", absTargetPath), err) return 1 } case !hostPathInfo.IsDir(): file, err := os.OpenFile(absTargetPath, os.O_CREATE, 0700) if err != nil { log.PrintE(fmt.Sprintf("can't create file %q", absTargetPath), err) return 1 } file.Close() } case hostPathInfo != nil: switch { case hostPathInfo.IsDir() && !targetPathInfo.IsDir(): log.Printf("can't mount because %q is a directory while %q is not", mount.HostPath, absTargetPath) return 1 case !hostPathInfo.IsDir() && targetPathInfo.IsDir(): log.Printf("can't mount because %q is not a directory while %q is", mount.HostPath, absTargetPath) return 1 } } if err := syscall.Mount(mount.HostPath, absTargetPath, mount.Fs, mount.Flags, ""); err != nil { log.PrintE(fmt.Sprintf("can't mount %q on %q with flags %v", mount.HostPath, absTargetPath, mount.Flags), err) return 1 } } if err = stage1common.WritePpid(os.Getpid()); err != nil { log.Error(err) return 4 } diag.Printf("chroot to %q", rfs) if err := syscall.Chroot(rfs); err != nil { log.PrintE("can't chroot", err) return 1 } if err := os.Chdir("/"); err != nil { log.PrintE("can't change to root new directory", err) return 1 } diag.Printf("execing %q in %q", args, rfs) err = stage1common.WithClearedCloExec(lfd, func() error { return syscall.Exec(args[0], args, env) }) if err != nil { log.PrintE(fmt.Sprintf("can't execute %q", args[0]), err) return 7 } return 0 }
func stage1(rp *stage1commontypes.RuntimePod) int { uuid, err := types.NewUUID(flag.Arg(0)) if err != nil { log.Print("UUID is missing or malformed\n") return 254 } root := "." p, err := stage1commontypes.LoadPod(root, uuid, rp) if err != nil { log.PrintE("can't load pod", err) return 254 } if err := p.SaveRuntime(); err != nil { log.FatalE("failed to save runtime parameters", err) } // Sanity checks if len(p.Manifest.Apps) != 1 { log.Printf("flavor %q only supports 1 application per Pod for now", flavor) return 254 } ra := p.Manifest.Apps[0] imgName := p.AppNameToImageName(ra.Name) args := ra.App.Exec if len(args) == 0 { log.Printf(`image %q has an empty "exec" (try --exec=BINARY)`, imgName) return 254 } lfd, err := common.GetRktLockFD() if err != nil { log.PrintE("can't get rkt lock fd", err) return 254 } // set close-on-exec flag on RKT_LOCK_FD so it gets correctly closed after execution is finished if err := sys.CloseOnExec(lfd, true); err != nil { log.PrintE("can't set FD_CLOEXEC on rkt lock", err) return 254 } workDir := "/" if ra.App.WorkingDirectory != "" { workDir = ra.App.WorkingDirectory } env := []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"} for _, e := range ra.App.Environment { env = append(env, e.Name+"="+e.Value) } rfs := filepath.Join(common.AppPath(p.Root, ra.Name), "rootfs") argFlyMounts, err := evaluateMounts(rfs, string(ra.Name), p) if err != nil { log.PrintE("can't evaluate mounts", err) return 254 } effectiveMounts := append( []flyMount{ {"", "", "/dev", "none", syscall.MS_REC | syscall.MS_SHARED}, {"/dev", rfs, "/dev", "none", syscall.MS_BIND | syscall.MS_REC}, {"", "", "/proc", "none", syscall.MS_REC | syscall.MS_SHARED}, {"/proc", rfs, "/proc", "none", syscall.MS_BIND | syscall.MS_REC}, {"", "", "/sys", "none", syscall.MS_REC | syscall.MS_SHARED}, {"/sys", rfs, "/sys", "none", syscall.MS_BIND | syscall.MS_REC}, {"tmpfs", rfs, "/tmp", "tmpfs", 0}, }, argFlyMounts..., ) /* Process DNS config files * * /etc/resolv.conf: four modes * 'host' - bind-mount host's file * 'stage0' - bind-mount the file created by stage0 * 'default' - do nothing (we would respect CNI if fly had networking) * 'none' - do nothing */ switch p.ResolvConfMode { case "host": effectiveMounts = append(effectiveMounts, flyMount{"/etc/resolv.conf", rfs, "/etc/resolv.conf", "none", syscall.MS_BIND | syscall.MS_RDONLY}) case "stage0": if err := copyResolv(p); err != nil { log.PrintE("can't copy /etc/resolv.conf", err) return 254 } } /* * /etc/hosts: three modes: * 'host' - bind-mount hosts's file * 'stage0' - bind mount the file created by stage1 * 'default' - create a stub /etc/hosts if needed */ switch p.EtcHostsMode { case "host": effectiveMounts = append(effectiveMounts, flyMount{"/etc/hosts", rfs, "/etc/hosts", "none", syscall.MS_BIND | syscall.MS_RDONLY}) case "stage0": effectiveMounts = append(effectiveMounts, flyMount{ filepath.Join(common.Stage1RootfsPath(p.Root), "etc", "rkt-hosts"), rfs, "/etc/hosts", "none", syscall.MS_BIND | syscall.MS_RDONLY}) case "default": stage2HostsPath := filepath.Join(common.AppRootfsPath(p.Root, ra.Name), "etc", "hosts") if _, err := os.Stat(stage2HostsPath); err != nil && os.IsNotExist(err) { fallbackHosts := []byte("127.0.0.1 localhost localdomain\n") ioutil.WriteFile(stage2HostsPath, fallbackHosts, 0644) } } for _, mount := range effectiveMounts { diag.Printf("Processing %+v", mount) var ( err error hostPathInfo os.FileInfo targetPathInfo os.FileInfo ) if strings.HasPrefix(mount.HostPath, "/") { if hostPathInfo, err = os.Stat(mount.HostPath); err != nil { log.PrintE(fmt.Sprintf("stat of host path %s", mount.HostPath), err) return 254 } } else { hostPathInfo = nil } absTargetPath := filepath.Join(mount.TargetPrefixPath, mount.RelTargetPath) if targetPathInfo, err = os.Stat(absTargetPath); err != nil && !os.IsNotExist(err) { log.PrintE(fmt.Sprintf("stat of target path %s", absTargetPath), err) return 254 } switch { case (mount.Flags & syscall.MS_REMOUNT) != 0: { diag.Printf("don't attempt to create files for remount of %q", absTargetPath) } case targetPathInfo == nil: absTargetPathParent, _ := filepath.Split(absTargetPath) if err := os.MkdirAll(absTargetPathParent, 0755); err != nil { log.PrintE(fmt.Sprintf("can't create directory %q", absTargetPath), err) return 254 } switch { case hostPathInfo == nil || hostPathInfo.IsDir(): if err := os.Mkdir(absTargetPath, 0755); err != nil { log.PrintE(fmt.Sprintf("can't create directory %q", absTargetPath), err) return 254 } case !hostPathInfo.IsDir(): file, err := os.OpenFile(absTargetPath, os.O_CREATE, 0700) if err != nil { log.PrintE(fmt.Sprintf("can't create file %q", absTargetPath), err) return 254 } file.Close() } case hostPathInfo != nil: switch { case hostPathInfo.IsDir() && !targetPathInfo.IsDir(): log.Printf("can't mount because %q is a directory while %q is not", mount.HostPath, absTargetPath) return 254 case !hostPathInfo.IsDir() && targetPathInfo.IsDir(): log.Printf("can't mount because %q is not a directory while %q is", mount.HostPath, absTargetPath) return 254 } } if err := syscall.Mount(mount.HostPath, absTargetPath, mount.Fs, mount.Flags, ""); err != nil { log.PrintE(fmt.Sprintf("can't mount %q on %q with flags %v", mount.HostPath, absTargetPath, mount.Flags), err) return 254 } } if err = stage1common.WritePid(os.Getpid(), "pid"); err != nil { log.Error(err) return 254 } var uidResolver, gidResolver user.Resolver var uid, gid int uidResolver, err = user.NumericIDs(ra.App.User) if err != nil { uidResolver, err = user.IDsFromStat(rfs, ra.App.User, nil) } if err != nil { // give up log.PrintE(fmt.Sprintf("invalid user %q", ra.App.User), err) return 254 } if uid, _, err = uidResolver.IDs(); err != nil { log.PrintE(fmt.Sprintf("failed to configure user %q", ra.App.User), err) return 254 } gidResolver, err = user.NumericIDs(ra.App.Group) if err != nil { gidResolver, err = user.IDsFromStat(rfs, ra.App.Group, nil) } if err != nil { // give up log.PrintE(fmt.Sprintf("invalid group %q", ra.App.Group), err) return 254 } if _, gid, err = gidResolver.IDs(); err != nil { log.PrintE(fmt.Sprintf("failed to configure group %q", ra.App.Group), err) return 254 } diag.Printf("chroot to %q", rfs) if err := syscall.Chroot(rfs); err != nil { log.PrintE("can't chroot", err) return 254 } if err := os.Chdir(workDir); err != nil { log.PrintE(fmt.Sprintf("can't change to working directory %q", workDir), err) return 254 } // lock the current goroutine to its current OS thread. // This will force the subsequent syscalls to be executed in the same OS thread as Setresuid, and Setresgid, // see https://github.com/golang/go/issues/1435#issuecomment-66054163. runtime.LockOSThread() diag.Printf("setting uid %d gid %d", uid, gid) if err := syscall.Setresgid(gid, gid, gid); err != nil { log.PrintE(fmt.Sprintf("can't set gid %d", gid), err) return 254 } if err := syscall.Setresuid(uid, uid, uid); err != nil { log.PrintE(fmt.Sprintf("can't set uid %d", uid), err) return 254 } diag.Printf("execing %q in %q", args, rfs) err = stage1common.WithClearedCloExec(lfd, func() error { return syscall.Exec(args[0], args, env) }) if err != nil { log.PrintE(fmt.Sprintf("can't execute %q", args[0]), err) return 254 } return 0 }
func stage1() int { uuid, err := types.NewUUID(flag.Arg(0)) if err != nil { fmt.Fprintln(os.Stderr, "UUID is missing or malformed") return 1 } root := "." p, err := LoadPod(root, uuid) if err != nil { fmt.Fprintf(os.Stderr, "Failed to load pod: %v\n", err) return 1 } // set close-on-exec flag on RKT_LOCK_FD so it gets correctly closed when invoking // network plugins lfd, err := common.GetRktLockFD() if err != nil { fmt.Fprintf(os.Stderr, "Failed to get rkt lock fd: %v\n", err) return 1 } if err := sys.CloseOnExec(lfd, true); err != nil { fmt.Fprintf(os.Stderr, "Failed to set FD_CLOEXEC on rkt lock: %v\n", err) return 1 } mirrorLocalZoneInfo(p.Root) if privNet { fps, err := forwardedPorts(p) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) return 6 } n, err := networking.Setup(root, p.UUID, fps) if err != nil { fmt.Fprintf(os.Stderr, "Failed to setup network: %v\n", err) return 6 } defer n.Teardown() if err = n.Save(); err != nil { fmt.Fprintf(os.Stderr, "Failed to save networking state %v\n", err) return 6 } p.MetadataServiceURL = common.MetadataServicePublicURL(n.GetDefaultHostIP()) if err = registerPod(p, n.GetDefaultIP()); err != nil { fmt.Fprintf(os.Stderr, "Failed to register pod: %v\n", err) return 6 } defer unregisterPod(p) } if err = p.PodToSystemd(interactive); err != nil { fmt.Fprintf(os.Stderr, "Failed to configure systemd: %v\n", err) return 2 } args, env, err := getArgsEnv(p, debug) if err != nil { fmt.Fprintf(os.Stderr, "Failed to get execution parameters: %v\n", err) return 3 } var execFn func() error if privNet { cmd := exec.Cmd{ Path: args[0], Args: args, Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, Env: env, } execFn = cmd.Run } else { execFn = func() error { return syscall.Exec(args[0], args, env) } } err = withClearedCloExec(lfd, execFn) if err != nil { fmt.Fprintf(os.Stderr, "Failed to execute nspawn: %v\n", err) return 5 } return 0 }
// getArgsEnv returns the nspawn args and env according to the usr used func getArgsEnv(p *Pod, debug bool) ([]string, []string, error) { args := []string{} env := os.Environ() flavor, err := os.Readlink(filepath.Join(common.Stage1RootfsPath(p.Root), "flavor")) if err != nil { return nil, nil, fmt.Errorf("unable to determine stage1 flavor: %v", err) } switch flavor { case "coreos": // when running the coreos-derived stage1 with unpatched systemd-nspawn we need some ld-linux hackery args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), interpBin)) args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), nspawnBin)) args = append(args, "--boot") // Launch systemd in the pod // Note: the coreos flavor uses systemd-nspawn v215 but machinedRegister() // checks for the nspawn registration method used since v216. So we will // not register when the host has systemd v215. if machinedRegister() { args = append(args, fmt.Sprintf("--register=true")) } else { args = append(args, fmt.Sprintf("--register=false")) } env = append(env, "LD_PRELOAD="+filepath.Join(common.Stage1RootfsPath(p.Root), "fakesdboot.so")) env = append(env, "LD_LIBRARY_PATH="+filepath.Join(common.Stage1RootfsPath(p.Root), "usr/lib")) case "src": args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), nspawnBin)) args = append(args, "--boot") // Launch systemd in the pod out, err := os.Getwd() if err != nil { return nil, nil, err } lfd, err := common.GetRktLockFD() if err != nil { return nil, nil, err } args = append(args, fmt.Sprintf("--pid-file=%v", filepath.Join(out, "pid"))) args = append(args, fmt.Sprintf("--keep-fd=%v", lfd)) if machinedRegister() { args = append(args, fmt.Sprintf("--register=true")) } else { args = append(args, fmt.Sprintf("--register=false")) } default: return nil, nil, fmt.Errorf("unrecognized stage1 flavor: %q", flavor) } if !debug { args = append(args, "--quiet") // silence most nspawn output (log_warning is currently not covered by this) } nsargs, err := p.PodToNspawnArgs() if err != nil { return nil, nil, fmt.Errorf("Failed to generate nspawn args: %v", err) } args = append(args, nsargs...) // Arguments to systemd args = append(args, "--") args = append(args, "--default-standard-output=tty") // redirect all service logs straight to tty if !debug { args = append(args, "--log-target=null") // silence systemd output inside pod args = append(args, "--show-status=0") // silence systemd initialization status output } return args, env, nil }
// getArgsEnv returns the nspawn args and env according to the usr used func getArgsEnv(p *Pod, flavor string, systemdStage1Version string, debug bool) ([]string, []string, error) { args := []string{} env := os.Environ() switch flavor { case "coreos": args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), interpBin)) args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), nspawnBin)) args = append(args, "--boot") // Launch systemd in the pod if machinedRegister() { args = append(args, fmt.Sprintf("--register=true")) } else { args = append(args, fmt.Sprintf("--register=false")) } // use only dynamic libraries provided in the image env = append(env, "LD_LIBRARY_PATH="+filepath.Join(common.Stage1RootfsPath(p.Root), "usr/lib")) case "src": args = append(args, filepath.Join(common.Stage1RootfsPath(p.Root), nspawnBin)) args = append(args, "--boot") // Launch systemd in the pod switch systemdStage1Version { case "v215": lfd, err := common.GetRktLockFD() if err != nil { return nil, nil, err } args = append(args, fmt.Sprintf("--keep-fd=%v", lfd)) case "v219": // --keep-fd is not needed thanks to // stage1/rootfs/usr_from_src/patches/v219/0005-nspawn-close-extra-fds-before-execing-init.patch default: // since systemd-nspawn v220 (commit 6b7d2e, "nspawn: close extra fds // before execing init"), fds remain open, so --keep-fd is not needed. } if machinedRegister() { args = append(args, fmt.Sprintf("--register=true")) } else { args = append(args, fmt.Sprintf("--register=false")) } case "host": hostNspawnBin, err := lookupPath("systemd-nspawn", os.Getenv("PATH")) if err != nil { return nil, nil, err } // Check dynamically which version is installed on the host // Support version >= 220 versionBytes, err := exec.Command(hostNspawnBin, "--version").CombinedOutput() if err != nil { return nil, nil, fmt.Errorf("unable to probe %s version: %v", hostNspawnBin, err) } versionStr := strings.SplitN(string(versionBytes), "\n", 2)[0] var version int n, err := fmt.Sscanf(versionStr, "systemd %d", &version) if err != nil { return nil, nil, fmt.Errorf("cannot parse version: %q", versionStr) } if n != 1 || version < 220 { return nil, nil, fmt.Errorf("rkt needs systemd-nspawn >= 220. %s version not supported: %v", hostNspawnBin, versionStr) } // Copy systemd, bash, etc. in stage1 at run-time if err := installAssets(); err != nil { return nil, nil, fmt.Errorf("cannot install assets from the host: %v", err) } args = append(args, hostNspawnBin) args = append(args, "--boot") // Launch systemd in the pod args = append(args, fmt.Sprintf("--register=true")) default: return nil, nil, fmt.Errorf("unrecognized stage1 flavor: %q", flavor) } // link journal only if the host is running systemd and stage1 supports // linking if util.IsRunningSystemd() && systemdSupportsJournalLinking(systemdStage1Version) { // we write /etc/machine-id here because systemd-nspawn needs it to link // the container's journal to the host mPath := filepath.Join(common.Stage1RootfsPath(p.Root), "etc", "machine-id") mId := strings.Replace(p.UUID.String(), "-", "", -1) if err := ioutil.WriteFile(mPath, []byte(mId), 0644); err != nil { log.Fatalf("error writing /etc/machine-id: %v\n", err) } args = append(args, "--link-journal=try-guest") } if !debug { args = append(args, "--quiet") // silence most nspawn output (log_warning is currently not covered by this) env = append(env, "SYSTEMD_LOG_LEVEL=err") // silence log_warning too } keepUnit, err := isRunningFromUnitFile() if err != nil { return nil, nil, fmt.Errorf("error determining if we're running from a unit file: %v", err) } if keepUnit { args = append(args, "--keep-unit") } nsargs, err := p.PodToNspawnArgs() if err != nil { return nil, nil, fmt.Errorf("Failed to generate nspawn args: %v", err) } args = append(args, nsargs...) // Arguments to systemd args = append(args, "--") args = append(args, "--default-standard-output=tty") // redirect all service logs straight to tty if !debug { args = append(args, "--log-target=null") // silence systemd output inside pod // TODO remove --log-level=warning when we update stage1 to systemd v222 args = append(args, "--log-level=warning") // limit log output (systemd-shutdown ignores --log-target) args = append(args, "--show-status=0") // silence systemd initialization status output } return args, env, nil }
func stage1() int { uuid, err := types.NewUUID(flag.Arg(0)) if err != nil { fmt.Fprintln(os.Stderr, "UUID is missing or malformed") return 1 } root := "." p, err := LoadPod(root, uuid) if err != nil { fmt.Fprintf(os.Stderr, "Failed to load pod: %v\n", err) return 1 } // set close-on-exec flag on RKT_LOCK_FD so it gets correctly closed when invoking // network plugins lfd, err := common.GetRktLockFD() if err != nil { fmt.Fprintf(os.Stderr, "Failed to get rkt lock fd: %v\n", err) return 1 } if err := sys.CloseOnExec(lfd, true); err != nil { fmt.Fprintf(os.Stderr, "Failed to set FD_CLOEXEC on rkt lock: %v\n", err) return 1 } mirrorLocalZoneInfo(p.Root) if privNet.Any() { fps, err := forwardedPorts(p) if err != nil { fmt.Fprintln(os.Stderr, err.Error()) return 6 } n, err := networking.Setup(root, p.UUID, fps, privNet) if err != nil { fmt.Fprintf(os.Stderr, "Failed to setup network: %v\n", err) return 6 } defer n.Teardown() if err = n.Save(); err != nil { fmt.Fprintf(os.Stderr, "Failed to save networking state %v\n", err) return 6 } hostIP, err := n.GetDefaultHostIP() if err != nil { fmt.Fprintf(os.Stderr, "Failed to get default Host IP: %v\n", err) return 6 } mdsToken, err := generateMDSToken() if err != nil { fmt.Fprintf(os.Stderr, "Failed to generate MDS token: %v", err) return 8 } p.MetadataServiceURL = common.MetadataServicePublicURL(hostIP, mdsToken) if err = registerPod(p, mdsToken); err != nil { fmt.Fprintf(os.Stderr, "Failed to register pod: %v\n", err) return 8 } defer unregisterPod(p) } flavor, systemdStage1Version, err := p.getFlavor() if err != nil { fmt.Fprintf(os.Stderr, "Failed to get stage1 flavor: %v\n", err) return 3 } if err = p.WritePrepareAppTemplate(systemdStage1Version); err != nil { fmt.Fprintf(os.Stderr, "Failed to write prepare-app service template: %v\n", err) return 2 } if err = p.PodToSystemd(interactive); err != nil { fmt.Fprintf(os.Stderr, "Failed to configure systemd: %v\n", err) return 2 } args, env, err := getArgsEnv(p, flavor, systemdStage1Version, debug) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) return 3 } appHashes := p.GetAppHashes() s1Root := common.Stage1RootfsPath(p.Root) machineID := p.GetMachineID() subcgroup, err := getContainerSubCgroup(machineID) if err == nil { if err := cgroup.CreateCgroups(s1Root, subcgroup, appHashes); err != nil { fmt.Fprintf(os.Stderr, "Error creating cgroups: %v\n", err) return 5 } } else { fmt.Fprintf(os.Stderr, "Continuing with per-app isolators disabled: %v\n", err) } var execFn func() error if privNet.Any() { cmd := exec.Cmd{ Path: args[0], Args: args, Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, Env: env, } execFn = func() error { err = cmd.Start() if err != nil { return fmt.Errorf("Failed to start nspawn: %v\n", err) } if err = writePpid(cmd.Process.Pid); err != nil { return err } return cmd.Wait() } } else { if err = writePpid(os.Getpid()); err != nil { fmt.Fprintln(os.Stderr, err.Error()) return 4 } execFn = func() error { return syscall.Exec(args[0], args, env) } } err = withClearedCloExec(lfd, execFn) if err != nil { fmt.Fprintf(os.Stderr, "Failed to execute nspawn: %v\n", err) return 7 } return 0 }