func verifyRange(name string, idmap shared.IdmapEntry, c containers) (int, int, error) { lineage := strings.Split(name, "/") if len(lineage) == 1 { return idmap.Hostid, idmap.Hostid + idmap.Maprange, nil } last := len(lineage) - 1 pname := strings.Join(lineage[0:last], "/") parent, ok := c[pname] if !ok || parent.hostmin == -1 { return 0, 0, fmt.Errorf("Parent for %s (%s) is undefined", name, pname) } pidmap := parent.idmap.Idmap[0] if idmap.Nsid+idmap.Maprange >= pidmap.Nsid+pidmap.Maprange || idmap.Hostid < pidmap.Nsid { return 0, 0, fmt.Errorf("Mapping for %s exceeds its parent's, parentids should be between %d - %d", name, pidmap.Nsid, pidmap.Nsid+pidmap.Maprange-1) } // make an idmap shifting the parent's mapping straight onto the host absstr := fmt.Sprintf("b:%d:%d:%d", pidmap.Nsid, parent.hostmin, pidmap.Maprange) m := shared.IdmapSet{} m, err := m.Append(absstr) if err != nil { return 0, 0, err } // map the desired 'hostid' (which is really the parent-ns-id) onto the host hoststart, _ := m.ShiftIntoNs(idmap.Hostid, idmap.Hostid) hostend := hoststart + idmap.Maprange return hoststart, hostend, nil }
func ShiftIfNecessary(container container, srcIdmap *shared.IdmapSet) error { dstIdmap := container.IdmapSet() if dstIdmap == nil { dstIdmap = new(shared.IdmapSet) } if !reflect.DeepEqual(srcIdmap, dstIdmap) { if err := srcIdmap.UnshiftRootfs(container.Path()); err != nil { return err } if err := dstIdmap.ShiftRootfs(container.Path()); err != nil { return err } } return nil }
func containerFilePut(pid int, r *http.Request, p string, idmapset *shared.IdmapSet) Response { uid, gid, mode, err := shared.ParseLXDFileHeaders(r.Header) if err != nil { return BadRequest(err) } uid, gid = idmapset.ShiftIntoNs(uid, gid) if uid == -1 || gid == -1 { return BadRequest(fmt.Errorf("unmapped uid or gid specified")) } temp, err := ioutil.TempFile("", "lxd_forkputfile_") if err != nil { return InternalError(err) } defer func() { temp.Close() os.Remove(temp.Name()) }() _, err = io.Copy(temp, r.Body) if err != nil { return InternalError(err) } cmd := exec.Command( os.Args[0], "forkputfile", temp.Name(), fmt.Sprintf("%d", pid), p, fmt.Sprintf("%d", uid), fmt.Sprintf("%d", gid), fmt.Sprintf("%d", mode&os.ModePerm), ) out, err := cmd.CombinedOutput() if err != nil { return InternalError(fmt.Errorf("%s: %s", err.Error(), string(out))) } return EmptySyncResponse }
func run() error { if len(os.Args) < 3 { if len(os.Args) > 1 && (os.Args[1] == "-h" || os.Args[1] == "--help" || os.Args[1] == "help") { help(os.Args[0], 0) } else { help(os.Args[0], 1) } } directory := os.Args[1] idmap := shared.IdmapSet{} testmode := false reverse := false for pos := 2; pos < len(os.Args); pos++ { switch os.Args[pos] { case "-r", "--reverse": reverse = true case "t", "-t", "--test", "test": testmode = true default: var err error idmap, err = idmap.Append(os.Args[pos]) if err != nil { return err } } } if idmap.Len() == 0 { fmt.Printf("No idmaps given\n") help(os.Args[0], 1) } if !testmode && os.Geteuid() != 0 { fmt.Printf("This must be run as root\n") os.Exit(1) } if reverse { return idmap.UidshiftFromContainer(directory, testmode) } return idmap.UidshiftIntoContainer(directory, testmode) }
func containerFilePut(d *Daemon, pid int, r *http.Request, p string, idmapset *shared.IdmapSet) Response { uid, gid, mode := shared.ParseLXDFileHeaders(r.Header) if idmapset != nil { uid, gid = idmapset.ShiftIntoNs(uid, gid) } temp, err := ioutil.TempFile("", "lxd_forkputfile_") if err != nil { return InternalError(err) } defer func() { temp.Close() os.Remove(temp.Name()) }() _, err = io.Copy(temp, r.Body) if err != nil { return InternalError(err) } cmd := exec.Command( d.execPath, "forkputfile", temp.Name(), fmt.Sprintf("%d", pid), p, fmt.Sprintf("%d", uid), fmt.Sprintf("%d", gid), fmt.Sprintf("%d", mode&os.ModePerm), ) out, err := cmd.CombinedOutput() if err != nil { return InternalError(fmt.Errorf(strings.TrimRight(string(out), "\n"))) } return EmptySyncResponse }
func (c *migrationSink) do() error { var err error c.controlConn, err = c.connectWithSecret(c.controlSecret) if err != nil { return err } defer c.disconnect() c.fsConn, err = c.connectWithSecret(c.fsSecret) if err != nil { c.sendControl(err) return err } if c.live { c.criuConn, err = c.connectWithSecret(c.criuSecret) if err != nil { c.sendControl(err) return err } } header := MigrationHeader{} if err := c.recv(&header); err != nil { c.sendControl(err) return err } criuType := CRIUType_CRIU_RSYNC.Enum() if !c.live { criuType = nil } mySink := c.container.Storage().MigrationSink myType := c.container.Storage().MigrationType() resp := MigrationHeader{ Fs: &myType, Criu: criuType, } // If the storage type the source has doesn't match what we have, then // we have to use rsync. if *header.Fs != *resp.Fs { mySink = rsyncMigrationSink myType = MigrationFSType_RSYNC resp.Fs = &myType } if err := c.send(&resp); err != nil { c.sendControl(err) return err } restore := make(chan error) go func(c *migrationSink) { imagesDir := "" srcIdmap := new(shared.IdmapSet) snapshots := []container{} for _, snap := range header.Snapshots { // TODO: we need to propagate snapshot configurations // as well. Right now the container configuration is // done through the initial migration post. Should we // post the snapshots and their configs as well, or do // it some other way? name := c.container.Name() + shared.SnapshotDelimiter + snap args := containerArgs{ Ctype: cTypeSnapshot, Config: c.container.LocalConfig(), Profiles: c.container.Profiles(), Ephemeral: c.container.IsEphemeral(), Architecture: c.container.Architecture(), Devices: c.container.LocalDevices(), Name: name, } ct, err := containerCreateEmptySnapshot(c.container.Daemon(), args) if err != nil { restore <- err return } snapshots = append(snapshots, ct) } for _, idmap := range header.Idmap { e := shared.IdmapEntry{ Isuid: *idmap.Isuid, Isgid: *idmap.Isgid, Nsid: int(*idmap.Nsid), Hostid: int(*idmap.Hostid), Maprange: int(*idmap.Maprange)} srcIdmap.Idmap = shared.Extend(srcIdmap.Idmap, e) } /* We do the fs receive in parallel so we don't have to reason * about when to receive what. The sending side is smart enough * to send the filesystem bits that it can before it seizes the * container to start checkpointing, so the total transfer time * will be minimized even if we're dumb here. */ fsTransfer := make(chan error) go func() { if err := mySink(c.live, c.container, snapshots, c.fsConn); err != nil { fsTransfer <- err return } if err := ShiftIfNecessary(c.container, srcIdmap); err != nil { fsTransfer <- err return } fsTransfer <- nil }() if c.live { var err error imagesDir, err = ioutil.TempDir("", "lxd_restore_") if err != nil { os.RemoveAll(imagesDir) return } defer func() { err := CollectCRIULogFile(c.container, imagesDir, "migration", "restore") /* * If the checkpoint fails, we won't have any log to collect, * so don't warn about that. */ if err != nil && !os.IsNotExist(err) { shared.Debugf("Error collectiong migration log file %s", err) } os.RemoveAll(imagesDir) }() if err := RsyncRecv(shared.AddSlash(imagesDir), c.criuConn); err != nil { restore <- err return } /* * For unprivileged containers we need to shift the * perms on the images images so that they can be * opened by the process after it is in its user * namespace. */ if !c.container.IsPrivileged() { if err := c.container.IdmapSet().ShiftRootfs(imagesDir); err != nil { restore <- err return } } } err := <-fsTransfer if err != nil { restore <- err return } if c.live { err := c.container.StartFromMigration(imagesDir) if err != nil { log, err2 := GetCRIULogErrors(imagesDir, "restore") /* restore failed before CRIU was invoked, give * back the liblxc error */ if err2 != nil { log = err.Error() } err = fmt.Errorf("restore failed:\n%s", log) restore <- err return } } for _, snap := range snapshots { if err := ShiftIfNecessary(snap, srcIdmap); err != nil { restore <- err return } } restore <- nil }(c) source := c.controlChannel() for { select { case err = <-restore: c.sendControl(err) return err case msg, ok := <-source: if !ok { c.disconnect() return fmt.Errorf("Got error reading source") } if !*msg.Success { c.disconnect() return fmt.Errorf(*msg.Message) } else { // The source can only tell us it failed (e.g. if // checkpointing failed). We have to tell the source // whether or not the restore was successful. shared.Debugf("Unknown message %v from source", msg) } } } }
func (c *migrationSink) do() error { var err error c.controlConn, err = c.connectWithSecret(c.controlSecret) if err != nil { return err } defer c.disconnect() c.fsConn, err = c.connectWithSecret(c.fsSecret) if err != nil { c.sendControl(err) return err } if c.live { c.criuConn, err = c.connectWithSecret(c.criuSecret) if err != nil { c.sendControl(err) return err } } // For now, we just ignore whatever the server sends us. We only // support RSYNC, so that's what we respond with. header := MigrationHeader{} if err := c.recv(&header); err != nil { c.sendControl(err) return err } criuType := CRIUType_CRIU_RSYNC.Enum() if !c.live { criuType = nil } resp := MigrationHeader{Fs: MigrationFSType_RSYNC.Enum(), Criu: criuType} if err := c.send(&resp); err != nil { c.sendControl(err) return err } restore := make(chan error) go func(c *migrationSink) { imagesDir := "" srcIdmap := new(shared.IdmapSet) dstIdmap := c.IdmapSet if dstIdmap == nil { dstIdmap = new(shared.IdmapSet) } if c.live { var err error imagesDir, err = ioutil.TempDir("", "lxd_migration_") if err != nil { os.RemoveAll(imagesDir) c.sendControl(err) return } defer func() { err := CollectCRIULogFile(c.container, imagesDir, "migration", "restore") /* * If the checkpoint fails, we won't have any log to collect, * so don't warn about that. */ if err != nil && !os.IsNotExist(err) { shared.Debugf("Error collectiong migration log file %s", err) } os.RemoveAll(imagesDir) }() if err := RsyncRecv(shared.AddSlash(imagesDir), c.criuConn); err != nil { restore <- err os.RemoveAll(imagesDir) c.sendControl(err) return } /* * For unprivileged containers we need to shift the * perms on the images images so that they can be * opened by the process after it is in its user * namespace. */ if dstIdmap != nil { if err := dstIdmap.ShiftRootfs(imagesDir); err != nil { restore <- err os.RemoveAll(imagesDir) c.sendControl(err) return } } } fsDir := c.container.ConfigItem("lxc.rootfs")[0] if err := RsyncRecv(shared.AddSlash(fsDir), c.fsConn); err != nil { restore <- err c.sendControl(err) return } for _, idmap := range header.Idmap { e := shared.IdmapEntry{ Isuid: *idmap.Isuid, Isgid: *idmap.Isgid, Nsid: int(*idmap.Nsid), Hostid: int(*idmap.Hostid), Maprange: int(*idmap.Maprange)} srcIdmap.Idmap = shared.Extend(srcIdmap.Idmap, e) } if !reflect.DeepEqual(srcIdmap, dstIdmap) { if err := srcIdmap.UnshiftRootfs(shared.VarPath("containers", c.container.Name())); err != nil { restore <- err c.sendControl(err) return } if err := dstIdmap.ShiftRootfs(shared.VarPath("containers", c.container.Name())); err != nil { restore <- err c.sendControl(err) return } } if c.live { f, err := ioutil.TempFile("", "lxd_lxc_migrateconfig_") if err != nil { restore <- err return } if err = f.Chmod(0600); err != nil { f.Close() os.Remove(f.Name()) return } f.Close() if err := c.container.SaveConfigFile(f.Name()); err != nil { restore <- err return } cmd := exec.Command( os.Args[0], "forkmigrate", c.container.Name(), c.container.ConfigPath(), f.Name(), imagesDir, ) err = cmd.Run() if err != nil { log := GetCRIULogErrors(imagesDir, "restore") err = fmt.Errorf("restore failed:\n%s", log) } restore <- err } else { restore <- nil } }(c) source := c.controlChannel() for { select { case err = <-restore: c.sendControl(err) return err case msg, ok := <-source: if !ok { c.disconnect() return fmt.Errorf("Got error reading source") } if !*msg.Success { c.disconnect() return fmt.Errorf(*msg.Message) } else { // The source can only tell us it failed (e.g. if // checkpointing failed). We have to tell the source // whether or not the restore was successful. shared.Debugf("Unknown message %v from source", msg) } } } }
func (c *migrationSink) Do(migrateOp *operation) error { var err error if c.push { <-c.allConnected } disconnector := c.src.disconnect if c.push { disconnector = c.dest.disconnect } if c.push { defer disconnector() } else { c.src.controlConn, err = c.connectWithSecret(c.src.controlSecret) if err != nil { return err } defer c.src.disconnect() c.src.fsConn, err = c.connectWithSecret(c.src.fsSecret) if err != nil { c.src.sendControl(err) return err } if c.src.live { c.src.criuConn, err = c.connectWithSecret(c.src.criuSecret) if err != nil { c.src.sendControl(err) return err } } } receiver := c.src.recv if c.push { receiver = c.dest.recv } sender := c.src.send if c.push { sender = c.dest.send } controller := c.src.sendControl if c.push { controller = c.dest.sendControl } header := MigrationHeader{} if err := receiver(&header); err != nil { controller(err) return err } live := c.src.live if c.push { live = c.dest.live } criuType := CRIUType_CRIU_RSYNC.Enum() if !live { criuType = nil } mySink := c.src.container.Storage().MigrationSink myType := c.src.container.Storage().MigrationType() resp := MigrationHeader{ Fs: &myType, Criu: criuType, } // If the storage type the source has doesn't match what we have, then // we have to use rsync. if *header.Fs != *resp.Fs { mySink = rsyncMigrationSink myType = MigrationFSType_RSYNC resp.Fs = &myType } if err := sender(&resp); err != nil { controller(err) return err } restore := make(chan error) go func(c *migrationSink) { imagesDir := "" srcIdmap := new(shared.IdmapSet) for _, idmap := range header.Idmap { e := shared.IdmapEntry{ Isuid: *idmap.Isuid, Isgid: *idmap.Isgid, Nsid: int(*idmap.Nsid), Hostid: int(*idmap.Hostid), Maprange: int(*idmap.Maprange)} srcIdmap.Idmap = shared.Extend(srcIdmap.Idmap, e) } /* We do the fs receive in parallel so we don't have to reason * about when to receive what. The sending side is smart enough * to send the filesystem bits that it can before it seizes the * container to start checkpointing, so the total transfer time * will be minimized even if we're dumb here. */ fsTransfer := make(chan error) go func() { snapshots := []*Snapshot{} /* Legacy: we only sent the snapshot names, so we just * copy the container's config over, same as we used to * do. */ if len(header.SnapshotNames) != len(header.Snapshots) { for _, name := range header.SnapshotNames { base := snapshotToProtobuf(c.src.container) base.Name = &name snapshots = append(snapshots, base) } } else { snapshots = header.Snapshots } var fsConn *websocket.Conn if c.push { fsConn = c.dest.fsConn } else { fsConn = c.src.fsConn } if err := mySink(live, c.src.container, header.Snapshots, fsConn, srcIdmap, migrateOp); err != nil { fsTransfer <- err return } if err := ShiftIfNecessary(c.src.container, srcIdmap); err != nil { fsTransfer <- err return } fsTransfer <- nil }() if live { var err error imagesDir, err = ioutil.TempDir("", "lxd_restore_") if err != nil { restore <- err return } defer os.RemoveAll(imagesDir) var criuConn *websocket.Conn if c.push { criuConn = c.dest.criuConn } else { criuConn = c.src.criuConn } if err := RsyncRecv(shared.AddSlash(imagesDir), criuConn, nil); err != nil { restore <- err return } } err := <-fsTransfer if err != nil { restore <- err return } if live { err = c.src.container.Migrate(lxc.MIGRATE_RESTORE, imagesDir, "migration", false, false) if err != nil { restore <- err return } } restore <- nil }(c) var source <-chan MigrationControl if c.push { source = c.dest.controlChannel() } else { source = c.src.controlChannel() } for { select { case err = <-restore: controller(err) return err case msg, ok := <-source: if !ok { disconnector() return fmt.Errorf("Got error reading source") } if !*msg.Success { disconnector() return fmt.Errorf(*msg.Message) } else { // The source can only tell us it failed (e.g. if // checkpointing failed). We have to tell the source // whether or not the restore was successful. shared.LogDebugf("Unknown message %v from source", msg) } } } }