func runCommand(container *lxc.Container, command []string, options lxc.AttachOptions) shared.OperationResult { status, err := container.RunCommandStatus(command, options) if err != nil { shared.Debugf("Failed running command: %q", err.Error()) return shared.OperationError(err) } metadata, err := json.Marshal(shared.Jmap{"return": status}) if err != nil { return shared.OperationError(err) } return shared.OperationResult{Metadata: metadata, Error: nil} }
func createFromCopy(d *Daemon, req *containerPostReq) Response { if req.Source.Source == "" { return BadRequest(fmt.Errorf("must specify a source container")) } source, err := containerLXDLoad(d, req.Source.Source) if err != nil { return SmartError(err) } sourceConfig := source.ConfigGet() if req.Config == nil { req.Config = make(map[string]string) } for key, value := range sourceConfig { if len(key) > 8 && key[0:8] == "volatile" && key[9:] != "base_image" { shared.Log.Debug("Skipping volatile key from copy source", log.Ctx{"key": key}) continue } req.Config[key] = value } if req.Profiles == nil { req.Profiles = source.ProfilesGet() } args := containerLXDArgs{ Ctype: cTypeRegular, Config: req.Config, Profiles: req.Profiles, Ephemeral: req.Ephemeral, BaseImage: req.Source.BaseImage, } run := func() shared.OperationResult { _, err := containerLXDCreateAsCopy(d, req.Name, args, source) if err != nil { return shared.OperationError(err) } return shared.OperationSuccess } resources := make(map[string][]string) resources["containers"] = []string{req.Name, req.Source.Source} return &asyncResponse{run: run, resources: resources} }
func createFromCopy(d *Daemon, req *containerPostReq) Response { if req.Source.Source == "" { return BadRequest(fmt.Errorf("must specify a source container")) } // Make sure the source exists. source, err := containerLXDLoad(d, req.Source.Source) if err != nil { return SmartError(err) } sourceConfig := source.ConfigGet() if req.Config == nil { config := make(map[string]string) for key, value := range sourceConfig.Config { if key[0:8] == "volatile" { shared.Debugf("Skipping configuration key: %s\n", key) continue } req.Config[key] = value } req.Config = config } if req.Profiles == nil { req.Profiles = sourceConfig.Profiles } args := containerLXDArgs{ Ctype: cTypeRegular, Config: req.Config, Profiles: req.Profiles, Ephemeral: req.Ephemeral, BaseImage: req.Source.BaseImage, } run := func() shared.OperationResult { _, err := containerLXDCreateAsCopy(d, req.Name, args, source) if err != nil { return shared.OperationError(err) } return shared.OperationSuccess } resources := make(map[string][]string) resources["containers"] = []string{req.Name, req.Source.Source} return &asyncResponse{run: run, resources: resources} }
func (s *migrationSourceWs) Do() shared.OperationResult { <-s.allConnected criuType := CRIUType_CRIU_RSYNC.Enum() if !s.live { criuType = nil } header := MigrationHeader{ Fs: MigrationFSType_RSYNC.Enum(), Criu: criuType, } if err := s.send(&header); err != nil { s.sendControl(err) return shared.OperationError(err) } if err := s.recv(&header); err != nil { s.sendControl(err) return shared.OperationError(err) } if *header.Fs != MigrationFSType_RSYNC { err := fmt.Errorf("formats other than rsync not understood") s.sendControl(err) return shared.OperationError(err) } if s.live { if header.Criu == nil { err := fmt.Errorf("got no CRIU socket type for live migration") s.sendControl(err) return shared.OperationError(err) } else if *header.Criu != CRIUType_CRIU_RSYNC { err := fmt.Errorf("formats other than criu rsync not understood") s.sendControl(err) return shared.OperationError(err) } checkpointDir, err := ioutil.TempDir("", "lxd_migration_") if err != nil { s.sendControl(err) return shared.OperationError(err) } defer os.RemoveAll(checkpointDir) opts := lxc.CheckpointOptions{Stop: true, Directory: checkpointDir, Verbose: true} err = s.container.Checkpoint(opts) if err2 := collectMigrationLogFile(s.container, checkpointDir, "dump"); err2 != nil { shared.Debugf("error collecting checkpoint log file %s", err) } if err != nil { s.sendControl(err) return shared.OperationError(err) } /* * We do the serially right now, but there's really no reason for us * to; since we have separate websockets, we can do it in parallel if * we wanted to. However, assuming we're network bound, there's really * no reason to do these in parallel. In the future when we're using * p.haul's protocol, it will make sense to do these in parallel. */ if err := RsyncSend(AddSlash(checkpointDir), s.criuConn); err != nil { s.sendControl(err) return shared.OperationError(err) } } fsDir := s.container.ConfigItem("lxc.rootfs")[0] if err := RsyncSend(AddSlash(fsDir), s.fsConn); err != nil { s.sendControl(err) return shared.OperationError(err) } msg := MigrationControl{} if err := s.recv(&msg); err != nil { s.disconnect() return shared.OperationError(err) } // TODO: should we add some config here about automatically restarting // the container migrate failure? What about the failures above? if !*msg.Success { return shared.OperationError(fmt.Errorf(*msg.Message)) } return shared.OperationSuccess }
func createFromMigration(d *Daemon, req *containerPostReq) Response { if req.Source.Mode != "pull" { return NotImplemented } run := func() shared.OperationResult { createArgs := containerLXDArgs{ Ctype: cTypeRegular, Config: req.Config, Profiles: req.Profiles, Ephemeral: req.Ephemeral, BaseImage: req.Source.BaseImage, } var c container if _, err := dbImageGet(d.db, req.Source.BaseImage, false, true); err == nil { c, err = containerLXDCreateFromImage( d, req.Name, createArgs, req.Source.BaseImage) if err != nil { return shared.OperationError(err) } } else { c, err = containerLXDCreateAsEmpty(d, req.Name, createArgs) if err != nil { return shared.OperationError(err) } } config, err := shared.GetTLSConfig(d.certf, d.keyf) if err != nil { c.Delete() return shared.OperationError(err) } args := MigrationSinkArgs{ Url: req.Source.Operation, Dialer: websocket.Dialer{ TLSClientConfig: config, NetDial: shared.RFC3493Dialer}, Container: c, Secrets: req.Source.Websockets, } sink, err := NewMigrationSink(&args) if err != nil { c.Delete() return shared.OperationError(err) } // Start the storage for this container (LVM mount/umount) c.StorageStart() // And finaly run the migration. err = sink() if err != nil { c.StorageStop() c.Delete() return shared.OperationError(fmt.Errorf("Error transferring container data: %s", err)) } defer c.StorageStop() err = c.TemplateApply("copy") if err != nil { return shared.OperationError(err) } return shared.OperationError(nil) } resources := make(map[string][]string) resources["containers"] = []string{req.Name} return &asyncResponse{run: run, resources: resources} }
func (s *execWs) Do(id string) shared.OperationResult { <-s.allConnected var err error var ttys []*os.File var ptys []*os.File if s.interactive { ttys = make([]*os.File, 1) ptys = make([]*os.File, 1) ptys[0], ttys[0], err = shared.OpenPty(s.rootUid, s.rootGid) s.options.StdinFd = ttys[0].Fd() s.options.StdoutFd = ttys[0].Fd() s.options.StderrFd = ttys[0].Fd() } else { ttys = make([]*os.File, 3) ptys = make([]*os.File, 3) for i := 0; i < len(ttys); i++ { ptys[i], ttys[i], err = shared.Pipe() if err != nil { return shared.OperationError(err) } } s.options.StdinFd = ptys[0].Fd() s.options.StdoutFd = ttys[1].Fd() s.options.StderrFd = ttys[2].Fd() } controlExit := make(chan bool) var wgEOF sync.WaitGroup if s.interactive { wgEOF.Add(1) go func() { select { case <-s.controlConnected: break case <-controlExit: return } for { mt, r, err := s.conns[-1].NextReader() if mt == websocket.CloseMessage { break } if err != nil { shared.Debugf("Got error getting next reader %s", err) break } buf, err := ioutil.ReadAll(r) if err != nil { shared.Debugf("Failed to read message %s", err) break } command := shared.ContainerExecControl{} if err := json.Unmarshal(buf, &command); err != nil { shared.Debugf("Failed to unmarshal control socket command: %s", err) continue } if command.Command == "window-resize" { winchWidth, err := strconv.Atoi(command.Args["width"]) if err != nil { shared.Debugf("Unable to extract window width: %s", err) continue } winchHeight, err := strconv.Atoi(command.Args["height"]) if err != nil { shared.Debugf("Unable to extract window height: %s", err) continue } err = shared.SetSize(int(ptys[0].Fd()), winchWidth, winchHeight) if err != nil { shared.Debugf("Failed to set window size to: %dx%d", winchWidth, winchHeight) continue } } if err != nil { shared.Debugf("Got error writing to writer %s", err) break } } }() go func() { <-shared.WebsocketMirror(s.conns[0], ptys[0], ptys[0]) wgEOF.Done() }() } else { wgEOF.Add(len(ttys) - 1) for i := 0; i < len(ttys); i++ { go func(i int) { if i == 0 { <-shared.WebsocketRecvStream(ttys[i], s.conns[i]) ttys[i].Close() } else { <-shared.WebsocketSendStream(s.conns[i], ptys[i]) ptys[i].Close() wgEOF.Done() } }(i) } } result := runCommand( s.container, s.command, s.options, ) for _, tty := range ttys { tty.Close() } if s.conns[-1] == nil { if s.interactive { controlExit <- true } } else { s.conns[-1].Close() } wgEOF.Wait() for _, pty := range ptys { pty.Close() } return result }
func containerExecPost(d *Daemon, r *http.Request) Response { name := mux.Vars(r)["name"] c, err := containerLXDLoad(d, name) if err != nil { return SmartError(err) } if !c.IsRunning() { return BadRequest(fmt.Errorf("Container is not running.")) } if c.IsFrozen() { return BadRequest(fmt.Errorf("Container is frozen.")) } post := commandPostContent{} buf, err := ioutil.ReadAll(r.Body) if err != nil { return BadRequest(err) } if err := json.Unmarshal(buf, &post); err != nil { return BadRequest(err) } opts := lxc.DefaultAttachOptions opts.ClearEnv = true opts.Env = []string{} for k, v := range c.Config() { if strings.HasPrefix(k, "environment.") { opts.Env = append(opts.Env, fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v)) } } if post.Environment != nil { for k, v := range post.Environment { if k == "HOME" { opts.Cwd = v } opts.Env = append(opts.Env, fmt.Sprintf("%s=%s", k, v)) } } if post.WaitForWS { ws := &execWs{} ws.fds = map[int]string{} idmapset := c.IdmapSet() if idmapset != nil { ws.rootUid, ws.rootGid = idmapset.ShiftIntoNs(0, 0) } ws.conns = map[int]*websocket.Conn{} ws.conns[-1] = nil ws.conns[0] = nil if !post.Interactive { ws.conns[1] = nil ws.conns[2] = nil } ws.allConnected = make(chan bool, 1) ws.controlConnected = make(chan bool, 1) ws.interactive = post.Interactive ws.done = make(chan shared.OperationResult, 1) ws.options = opts for i := -1; i < len(ws.conns)-1; i++ { ws.fds[i], err = shared.RandomCryptoString() if err != nil { return InternalError(err) } } ws.command = post.Command ws.container = c.LXContainerGet() return AsyncResponseWithWs(ws, nil) } run := func(id string) shared.OperationResult { nullDev, err := os.OpenFile(os.DevNull, os.O_RDWR, 0666) if err != nil { return shared.OperationError(err) } defer nullDev.Close() nullfd := nullDev.Fd() opts.StdinFd = nullfd opts.StdoutFd = nullfd opts.StderrFd = nullfd return runCommand(c.LXContainerGet(), post.Command, opts) } return AsyncResponse(run, nil) }
func createFromCopy(d *Daemon, req *containerPostReq) Response { if req.Source.Source == "" { return BadRequest(fmt.Errorf("must specify a source container")) } // Make sure the source exists. source, err := newLxdContainer(req.Source.Source, d) if err != nil { return SmartError(err) } if req.Config == nil { config := make(map[string]string) for key, value := range source.config { if key[0:8] == "volatile" { shared.Debugf("skipping: %s\n", key) continue } req.Config[key] = value } req.Config = config } if req.Profiles == nil { req.Profiles = source.profiles } args := DbCreateContainerArgs{ d: d, name: req.Name, ctype: cTypeRegular, config: req.Config, profiles: req.Profiles, ephem: req.Ephemeral, baseImage: req.Source.BaseImage, } _, err = dbCreateContainer(args) if err != nil { return SmartError(err) } var oldPath string if shared.IsSnapshot(req.Source.Source) { snappieces := strings.SplitN(req.Source.Source, "/", 2) oldPath = migration.AddSlash(shared.VarPath("lxc", snappieces[0], "snapshots", snappieces[1], "rootfs")) } else { oldPath = migration.AddSlash(shared.VarPath("lxc", req.Source.Source, "rootfs")) } subvol := strings.TrimSuffix(oldPath, "rootfs/") dpath := shared.VarPath("lxc", req.Name) // Destination path if !btrfsIsSubvolume(subvol) { if err := os.MkdirAll(dpath, 0700); err != nil { removeContainer(d, req.Name) return InternalError(err) } if err := extractShiftIfExists(d, source, req.Source.BaseImage, req.Name); err != nil { removeContainer(d, req.Name) return InternalError(err) } } newPath := fmt.Sprintf("%s/%s", dpath, "rootfs") run := func() shared.OperationResult { if btrfsIsSubvolume(subvol) { /* * Copy by using btrfs snapshot */ output, err := btrfsSnapshot(subvol, dpath, false) if err != nil { shared.Debugf("Failed to create a BTRFS Snapshot of '%s' to '%s'.", subvol, dpath) shared.Debugf(string(output)) return shared.OperationError(err) } } else { /* * Copy by using rsync */ output, err := exec.Command("rsync", "-a", "--devices", oldPath, newPath).CombinedOutput() if err != nil { shared.Debugf("rsync failed:\n%s", output) return shared.OperationError(err) } } if !source.isPrivileged() { err = setUnprivUserAcl(source, dpath) if err != nil { shared.Debugf("Error adding acl for container root: falling back to chmod\n") output, err := exec.Command("chmod", "+x", dpath).CombinedOutput() if err != nil { shared.Debugf("Error chmoding the container root\n") shared.Debugf(string(output)) return shared.OperationError(err) } } } c, err := newLxdContainer(req.Name, d) if err != nil { return shared.OperationError(err) } err = templateApply(c, "copy") if err != nil { return shared.OperationError(err) } return shared.OperationError(nil) } resources := make(map[string][]string) resources["containers"] = []string{req.Name, req.Source.Source} return &asyncResponse{run: run, resources: resources} }
func createFromMigration(d *Daemon, req *containerPostReq) Response { if req.Source.Mode != "pull" { return NotImplemented } createArgs := DbCreateContainerArgs{ d: d, name: req.Name, ctype: cTypeRegular, config: req.Config, profiles: req.Profiles, ephem: req.Ephemeral, baseImage: req.Source.BaseImage, } _, err := dbCreateContainer(createArgs) if err != nil { return SmartError(err) } c, err := newLxdContainer(req.Name, d) if err != nil { removeContainer(d, req.Name) return SmartError(err) } // rsync complaisn if the parent directory for the rootfs sync doesn't // exist dpath := shared.VarPath("lxc", req.Name) if err := os.MkdirAll(dpath, 0700); err != nil { removeContainer(d, req.Name) return InternalError(err) } if err := extractShiftIfExists(d, c, req.Source.BaseImage, req.Name); err != nil { removeContainer(d, req.Name) return InternalError(err) } config, err := shared.GetTLSConfig(d.certf, d.keyf) if err != nil { removeContainer(d, req.Name) return InternalError(err) } args := migration.MigrationSinkArgs{ Url: req.Source.Operation, Dialer: websocket.Dialer{ TLSClientConfig: config, NetDial: shared.RFC3493Dialer}, Container: c.c, Secrets: req.Source.Websockets, IdMapSet: c.idmapset, } sink, err := migration.NewMigrationSink(&args) if err != nil { removeContainer(d, req.Name) return BadRequest(err) } run := func() shared.OperationResult { err := sink() if err != nil { removeContainer(d, req.Name) return shared.OperationError(err) } c, err := newLxdContainer(req.Name, d) if err != nil { return shared.OperationError(err) } err = templateApply(c, "copy") if err != nil { return shared.OperationError(err) } return shared.OperationError(nil) } resources := make(map[string][]string) resources["containers"] = []string{req.Name} return &asyncResponse{run: run, resources: resources} }
func createFromCopy(d *Daemon, req *containerPostReq) Response { if req.Source.Source == "" { return BadRequest(fmt.Errorf("must specify a source container")) } // Make sure the source exists. source, err := newLxdContainer(req.Source.Source, d) if err != nil { return SmartError(err) } if req.Config == nil { config := make(map[string]string) for key, value := range source.config { if key[0:8] == "volatile" { shared.Debugf("skipping: %s\n", key) continue } req.Config[key] = value } req.Config = config } if req.Profiles == nil { req.Profiles = source.profiles } args := containerLXDArgs{ Ctype: cTypeRegular, Config: req.Config, Profiles: req.Profiles, Ephemeral: req.Ephemeral, BaseImage: req.Source.BaseImage, } _, err = dbContainerCreate(d.db, req.Name, args) if err != nil { return SmartError(err) } run := func() shared.OperationResult { c, err := newLxdContainer(req.Name, d) if err != nil { return shared.OperationError(err) } s, err := storageForContainer(d, source) if err != nil { return shared.OperationError(err) } if err := s.ContainerCopy(c, source); err != nil { removeContainer(d, c) return shared.OperationError(err) } return shared.OperationError(nil) } resources := make(map[string][]string) resources["containers"] = []string{req.Name, req.Source.Source} return &asyncResponse{run: run, resources: resources} }
func (s *migrationSourceWs) Do() shared.OperationResult { <-s.allConnected criuType := CRIUType_CRIU_RSYNC.Enum() if !s.live { criuType = nil defer s.container.StorageStop() } idmaps := make([]*IDMapType, 0) idmapset := s.container.IdmapSetGet() if idmapset != nil { for _, ctnIdmap := range idmapset.Idmap { idmap := IDMapType{ Isuid: proto.Bool(ctnIdmap.Isuid), Isgid: proto.Bool(ctnIdmap.Isgid), Hostid: proto.Int(ctnIdmap.Hostid), Nsid: proto.Int(ctnIdmap.Nsid), Maprange: proto.Int(ctnIdmap.Maprange), } idmaps = append(idmaps, &idmap) } } header := MigrationHeader{ Fs: MigrationFSType_RSYNC.Enum(), Criu: criuType, Idmap: idmaps, } if err := s.send(&header); err != nil { s.sendControl(err) return shared.OperationError(err) } if err := s.recv(&header); err != nil { s.sendControl(err) return shared.OperationError(err) } if *header.Fs != MigrationFSType_RSYNC { err := fmt.Errorf("Formats other than rsync not understood") s.sendControl(err) return shared.OperationError(err) } if s.live { if header.Criu == nil { err := fmt.Errorf("Got no CRIU socket type for live migration") s.sendControl(err) return shared.OperationError(err) } else if *header.Criu != CRIUType_CRIU_RSYNC { err := fmt.Errorf("Formats other than criu rsync not understood") s.sendControl(err) return shared.OperationError(err) } checkpointDir, err := ioutil.TempDir("", "lxd_migration_") if err != nil { s.sendControl(err) return shared.OperationError(err) } defer os.RemoveAll(checkpointDir) opts := lxc.CheckpointOptions{Stop: true, Directory: checkpointDir, Verbose: true} err = s.container.Checkpoint(opts) if err2 := CollectCRIULogFile(s.container, checkpointDir, "migration", "dump"); err2 != nil { shared.Debugf("Error collecting checkpoint log file %s", err) } if err != nil { log := GetCRIULogErrors(checkpointDir, "dump") err = fmt.Errorf("checkpoint failed:\n%s", log) s.sendControl(err) return shared.OperationError(err) } /* * We do the serially right now, but there's really no reason for us * to; since we have separate websockets, we can do it in parallel if * we wanted to. However, assuming we're network bound, there's really * no reason to do these in parallel. In the future when we're using * p.haul's protocol, it will make sense to do these in parallel. */ if err := RsyncSend(shared.AddSlash(checkpointDir), s.criuConn); err != nil { s.sendControl(err) return shared.OperationError(err) } } fsDir := s.container.RootfsPathGet() if err := RsyncSend(shared.AddSlash(fsDir), s.fsConn); err != nil { s.sendControl(err) return shared.OperationError(err) } msg := MigrationControl{} if err := s.recv(&msg); err != nil { s.disconnect() return shared.OperationError(err) } // TODO: should we add some config here about automatically restarting // the container migrate failure? What about the failures above? if !*msg.Success { return shared.OperationError(fmt.Errorf(*msg.Message)) } return shared.OperationSuccess }