func NewMigrationSource(c container) (shared.OperationWebsocket, error) { ret := migrationSourceWs{migrationFields{container: c}, make(chan bool, 1)} var err error ret.controlSecret, err = shared.RandomCryptoString() if err != nil { return nil, err } ret.fsSecret, err = shared.RandomCryptoString() if err != nil { return nil, err } if c.IsRunning() { ret.live = true ret.criuSecret, err = shared.RandomCryptoString() if err != nil { return nil, err } } else { c.StorageStart() } return &ret, nil }
func NewMigrationSource(c container) (*migrationSourceWs, error) { ret := migrationSourceWs{migrationFields{container: c}, make(chan bool, 1)} var err error ret.controlSecret, err = shared.RandomCryptoString() if err != nil { return nil, err } ret.fsSecret, err = shared.RandomCryptoString() if err != nil { return nil, err } if c.IsRunning() { if err := findCriu("source"); err != nil { return nil, err } ret.live = true ret.criuSecret, err = shared.RandomCryptoString() if err != nil { return nil, err } } return &ret, nil }
func NewMigrationSink(args *MigrationSinkArgs) (*migrationSink, error) { sink := migrationSink{ src: migrationFields{container: args.Container}, url: args.Url, dialer: args.Dialer, push: args.Push, } if sink.push { sink.allConnected = make(chan bool, 1) } var ok bool var err error if sink.push { sink.dest.controlSecret, err = shared.RandomCryptoString() if err != nil { return nil, err } sink.dest.fsSecret, err = shared.RandomCryptoString() if err != nil { return nil, err } sink.dest.live = args.Live if sink.dest.live { sink.dest.criuSecret, err = shared.RandomCryptoString() if err != nil { return nil, err } } } else { sink.src.controlSecret, ok = args.Secrets["control"] if !ok { return nil, fmt.Errorf("Missing control secret") } sink.src.fsSecret, ok = args.Secrets["fs"] if !ok { return nil, fmt.Errorf("Missing fs secret") } sink.src.criuSecret, ok = args.Secrets["criu"] sink.src.live = ok } err = findCriu("destination") if sink.push && sink.dest.live && err != nil { return nil, err } else if sink.src.live && err != nil { return nil, err } return &sink, nil }
func imageSecret(d *Daemon, r *http.Request) Response { fingerprint := mux.Vars(r)["fingerprint"] _, err := dbImageGet(d.db, fingerprint, false, false) if err != nil { return SmartError(err) } secret, err := shared.RandomCryptoString() if err != nil { return InternalError(err) } meta := shared.Jmap{} meta["secret"] = secret resources := map[string][]string{} resources["images"] = []string{fingerprint} op, err := operationCreate(operationClassToken, resources, meta, nil, nil, nil) if err != nil { return InternalError(err) } return OperationResponse(op) }
func NewMigrationSource(c *lxc.Container, idmapset *shared.IdmapSet) (shared.OperationWebsocket, error) { ret := migrationSourceWs{migrationFields{container: c, idmapset: idmapset}, make(chan bool, 1)} var err error ret.controlSecret, err = shared.RandomCryptoString() if err != nil { return nil, err } ret.fsSecret, err = shared.RandomCryptoString() if err != nil { return nil, err } if c.Running() { ret.live = true ret.criuSecret, err = shared.RandomCryptoString() if err != nil { return nil, err } } return &ret, nil }
func imageSecret(d *Daemon, r *http.Request) Response { fingerprint := mux.Vars(r)["fingerprint"] _, err := dbImageGet(d.db, fingerprint, false) if err != nil { return SmartError(err) } secret, err := shared.RandomCryptoString() if err != nil { return InternalError(err) } meta := shared.Jmap{} meta["secret"] = secret resources := make(map[string][]string) resources["images"] = []string{fingerprint} return &asyncResponse{resources: resources, metadata: meta} }
func containerExecPost(d *Daemon, r *http.Request) Response { name := mux.Vars(r)["name"] c, err := containerLXDLoad(d, name) if err != nil { return SmartError(err) } if !c.IsRunning() { return BadRequest(fmt.Errorf("Container is not running.")) } if c.IsFrozen() { return BadRequest(fmt.Errorf("Container is frozen.")) } post := commandPostContent{} buf, err := ioutil.ReadAll(r.Body) if err != nil { return BadRequest(err) } if err := json.Unmarshal(buf, &post); err != nil { return BadRequest(err) } opts := lxc.DefaultAttachOptions opts.ClearEnv = true opts.Env = []string{} for k, v := range c.Config() { if strings.HasPrefix(k, "environment.") { opts.Env = append(opts.Env, fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v)) } } if post.Environment != nil { for k, v := range post.Environment { if k == "HOME" { opts.Cwd = v } opts.Env = append(opts.Env, fmt.Sprintf("%s=%s", k, v)) } } if post.WaitForWS { ws := &execWs{} ws.fds = map[int]string{} idmapset := c.IdmapSet() if idmapset != nil { ws.rootUid, ws.rootGid = idmapset.ShiftIntoNs(0, 0) } ws.conns = map[int]*websocket.Conn{} ws.conns[-1] = nil ws.conns[0] = nil if !post.Interactive { ws.conns[1] = nil ws.conns[2] = nil } ws.allConnected = make(chan bool, 1) ws.controlConnected = make(chan bool, 1) ws.interactive = post.Interactive ws.done = make(chan shared.OperationResult, 1) ws.options = opts for i := -1; i < len(ws.conns)-1; i++ { ws.fds[i], err = shared.RandomCryptoString() if err != nil { return InternalError(err) } } ws.command = post.Command ws.container = c.LXContainerGet() return AsyncResponseWithWs(ws, nil) } run := func(id string) shared.OperationResult { nullDev, err := os.OpenFile(os.DevNull, os.O_RDWR, 0666) if err != nil { return shared.OperationError(err) } defer nullDev.Close() nullfd := nullDev.Fd() opts.StdinFd = nullfd opts.StdoutFd = nullfd opts.StderrFd = nullfd return runCommand(c.LXContainerGet(), post.Command, opts) } return AsyncResponse(run, nil) }
func containerExecPost(d *Daemon, r *http.Request) Response { name := mux.Vars(r)["name"] c, err := containerLoadByName(d, name) if err != nil { return SmartError(err) } if !c.IsRunning() { return BadRequest(fmt.Errorf("Container is not running.")) } if c.IsFrozen() { return BadRequest(fmt.Errorf("Container is frozen.")) } post := commandPostContent{} buf, err := ioutil.ReadAll(r.Body) if err != nil { return BadRequest(err) } if err := json.Unmarshal(buf, &post); err != nil { return BadRequest(err) } env := map[string]string{} for k, v := range c.ExpandedConfig() { if strings.HasPrefix(k, "environment.") { env[strings.TrimPrefix(k, "environment.")] = v } } if post.Environment != nil { for k, v := range post.Environment { env[k] = v } } if post.WaitForWS { ws := &execWs{} ws.fds = map[int]string{} idmapset := c.IdmapSet() if idmapset != nil { ws.rootUid, ws.rootGid = idmapset.ShiftIntoNs(0, 0) } ws.conns = map[int]*websocket.Conn{} ws.conns[-1] = nil ws.conns[0] = nil if !post.Interactive { ws.conns[1] = nil ws.conns[2] = nil } ws.allConnected = make(chan bool, 1) ws.controlConnected = make(chan bool, 1) ws.interactive = post.Interactive for i := -1; i < len(ws.conns)-1; i++ { ws.fds[i], err = shared.RandomCryptoString() if err != nil { return InternalError(err) } } ws.command = post.Command ws.container = c ws.env = env ws.width = post.Width ws.height = post.Height resources := map[string][]string{} resources["containers"] = []string{ws.container.Name()} op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect) if err != nil { return InternalError(err) } return OperationResponse(op) } run := func(op *operation) error { nullDev, err := os.OpenFile(os.DevNull, os.O_RDWR, 0666) if err != nil { return err } defer nullDev.Close() _, cmdErr := c.Exec(post.Command, env, nil, nil, nil) return cmdErr } resources := map[string][]string{} resources["containers"] = []string{name} op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil) if err != nil { return InternalError(err) } return OperationResponse(op) }
func (s *migrationSourceWs) Do(migrateOp *operation) error { <-s.allConnected criuType := CRIUType_CRIU_RSYNC.Enum() if !s.live { criuType = nil err := s.container.StorageStart() if err != nil { return err } defer s.container.StorageStop() } idmaps := make([]*IDMapType, 0) idmapset := s.container.IdmapSet() if idmapset != nil { for _, ctnIdmap := range idmapset.Idmap { idmap := IDMapType{ Isuid: proto.Bool(ctnIdmap.Isuid), Isgid: proto.Bool(ctnIdmap.Isgid), Hostid: proto.Int(ctnIdmap.Hostid), Nsid: proto.Int(ctnIdmap.Nsid), Maprange: proto.Int(ctnIdmap.Maprange), } idmaps = append(idmaps, &idmap) } } driver, fsErr := s.container.Storage().MigrationSource(s.container) /* the protocol says we have to send a header no matter what, so let's * do that, but then immediately send an error. */ snapshots := []*Snapshot{} snapshotNames := []string{} if fsErr == nil { fullSnaps := driver.Snapshots() for _, snap := range fullSnaps { snapshots = append(snapshots, snapshotToProtobuf(snap)) snapshotNames = append(snapshotNames, shared.ExtractSnapshotName(snap.Name())) } } myType := s.container.Storage().MigrationType() header := MigrationHeader{ Fs: &myType, Criu: criuType, Idmap: idmaps, SnapshotNames: snapshotNames, Snapshots: snapshots, } if err := s.send(&header); err != nil { s.sendControl(err) return err } if fsErr != nil { s.sendControl(fsErr) return fsErr } if err := s.recv(&header); err != nil { s.sendControl(err) return err } if *header.Fs != myType { myType = MigrationFSType_RSYNC header.Fs = &myType driver, _ = rsyncMigrationSource(s.container) } // All failure paths need to do a few things to correctly handle errors before returning. // Unfortunately, handling errors is not well-suited to defer as the code depends on the // status of driver and the error value. The error value is especially tricky due to the // common case of creating a new err variable (intentional or not) due to scoping and use // of ":=". Capturing err in a closure for use in defer would be fragile, which defeats // the purpose of using defer. An abort function reduces the odds of mishandling errors // without introducing the fragility of closing on err. abort := func(err error) error { driver.Cleanup() s.sendControl(err) return err } if err := driver.SendWhileRunning(s.fsConn, migrateOp); err != nil { return abort(err) } restoreSuccess := make(chan bool, 1) dumpSuccess := make(chan error, 1) if s.live { if header.Criu == nil { return abort(fmt.Errorf("Got no CRIU socket type for live migration")) } else if *header.Criu != CRIUType_CRIU_RSYNC { return abort(fmt.Errorf("Formats other than criu rsync not understood")) } checkpointDir, err := ioutil.TempDir("", "lxd_checkpoint_") if err != nil { return abort(err) } if lxc.VersionAtLeast(2, 0, 4) { /* What happens below is slightly convoluted. Due to various * complications with networking, there's no easy way for criu * to exit and leave the container in a frozen state for us to * somehow resume later. * * Instead, we use what criu calls an "action-script", which is * basically a callback that lets us know when the dump is * done. (Unfortunately, we can't pass arguments, just an * executable path, so we write a custom action script with the * real command we want to run.) * * This script then hangs until the migration operation either * finishes successfully or fails, and exits 1 or 0, which * causes criu to either leave the container running or kill it * as we asked. */ dumpDone := make(chan bool, 1) actionScriptOpSecret, err := shared.RandomCryptoString() if err != nil { os.RemoveAll(checkpointDir) return abort(err) } actionScriptOp, err := operationCreate( operationClassWebsocket, nil, nil, func(op *operation) error { result := <-restoreSuccess if !result { return fmt.Errorf("restore failed, failing CRIU") } return nil }, nil, func(op *operation, r *http.Request, w http.ResponseWriter) error { secret := r.FormValue("secret") if secret == "" { return fmt.Errorf("missing secret") } if secret != actionScriptOpSecret { return os.ErrPermission } c, err := shared.WebsocketUpgrader.Upgrade(w, r, nil) if err != nil { return err } dumpDone <- true closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") return c.WriteMessage(websocket.CloseMessage, closeMsg) }, ) if err != nil { os.RemoveAll(checkpointDir) return abort(err) } if err := writeActionScript(checkpointDir, actionScriptOp.url, actionScriptOpSecret); err != nil { os.RemoveAll(checkpointDir) return abort(err) } _, err = actionScriptOp.Run() if err != nil { os.RemoveAll(checkpointDir) return abort(err) } go func() { dumpSuccess <- s.container.Migrate(lxc.MIGRATE_DUMP, checkpointDir, "migration", true, true) os.RemoveAll(checkpointDir) }() select { /* the checkpoint failed, let's just abort */ case err = <-dumpSuccess: return abort(err) /* the dump finished, let's continue on to the restore */ case <-dumpDone: shared.LogDebugf("Dump finished, continuing with restore...") } } else { defer os.RemoveAll(checkpointDir) if err := s.container.Migrate(lxc.MIGRATE_DUMP, checkpointDir, "migration", true, false); err != nil { return abort(err) } } /* * We do the serially right now, but there's really no reason for us * to; since we have separate websockets, we can do it in parallel if * we wanted to. However, assuming we're network bound, there's really * no reason to do these in parallel. In the future when we're using * p.haul's protocol, it will make sense to do these in parallel. */ if err := RsyncSend(shared.AddSlash(checkpointDir), s.criuConn, nil); err != nil { return abort(err) } if err := driver.SendAfterCheckpoint(s.fsConn); err != nil { return abort(err) } } driver.Cleanup() msg := MigrationControl{} if err := s.recv(&msg); err != nil { s.disconnect() return err } if s.live { restoreSuccess <- *msg.Success err := <-dumpSuccess if err != nil { shared.LogErrorf("dump failed after successful restore?: %q", err) } } if !*msg.Success { return fmt.Errorf(*msg.Message) } return nil }
func containerExecPost(d *Daemon, r *http.Request) Response { name := mux.Vars(r)["name"] c, err := containerLoadByName(d, name) if err != nil { return SmartError(err) } if !c.IsRunning() { return BadRequest(fmt.Errorf("Container is not running.")) } if c.IsFrozen() { return BadRequest(fmt.Errorf("Container is frozen.")) } post := commandPostContent{} buf, err := ioutil.ReadAll(r.Body) if err != nil { return BadRequest(err) } if err := json.Unmarshal(buf, &post); err != nil { return BadRequest(err) } env := map[string]string{} for k, v := range c.ExpandedConfig() { if strings.HasPrefix(k, "environment.") { env[strings.TrimPrefix(k, "environment.")] = v } } if post.Environment != nil { for k, v := range post.Environment { env[k] = v } } _, ok := env["PATH"] if !ok { env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" if shared.PathExists(fmt.Sprintf("%s/snap/bin", c.RootfsPath())) { env["PATH"] = fmt.Sprintf("%s:/snap/bin", env["PATH"]) } } if post.WaitForWS { ws := &execWs{} ws.fds = map[int]string{} idmapset := c.IdmapSet() if idmapset != nil { ws.rootUid, ws.rootGid = idmapset.ShiftIntoNs(0, 0) } ws.conns = map[int]*websocket.Conn{} ws.conns[-1] = nil ws.conns[0] = nil if !post.Interactive { ws.conns[1] = nil ws.conns[2] = nil } ws.allConnected = make(chan bool, 1) ws.controlConnected = make(chan bool, 1) ws.interactive = post.Interactive for i := -1; i < len(ws.conns)-1; i++ { ws.fds[i], err = shared.RandomCryptoString() if err != nil { return InternalError(err) } } ws.command = post.Command ws.container = c ws.env = env ws.width = post.Width ws.height = post.Height resources := map[string][]string{} resources["containers"] = []string{ws.container.Name()} op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect) if err != nil { return InternalError(err) } return OperationResponse(op) } run := func(op *operation) error { var cmdErr error var cmdResult int metadata := shared.Jmap{} if post.RecordOutput { // Prepare stdout and stderr recording stdout, err := os.OpenFile(filepath.Join(c.LogPath(), fmt.Sprintf("exec_%s.stdout", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return err } defer stdout.Close() stderr, err := os.OpenFile(filepath.Join(c.LogPath(), fmt.Sprintf("exec_%s.stderr", op.id)), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return err } defer stderr.Close() // Run the command cmdResult, cmdErr = c.Exec(post.Command, env, nil, stdout, stderr) // Update metadata with the right URLs metadata["return"] = cmdResult metadata["output"] = shared.Jmap{ "1": fmt.Sprintf("/%s/containers/%s/logs/%s", shared.APIVersion, c.Name(), filepath.Base(stdout.Name())), "2": fmt.Sprintf("/%s/containers/%s/logs/%s", shared.APIVersion, c.Name(), filepath.Base(stderr.Name())), } } else { cmdResult, cmdErr = c.Exec(post.Command, env, nil, nil, nil) metadata["return"] = cmdResult } err = op.UpdateMetadata(metadata) if err != nil { shared.LogError("error updating metadata for cmd", log.Ctx{"err": err, "cmd": post.Command}) } return cmdErr } resources := map[string][]string{} resources["containers"] = []string{name} op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil) if err != nil { return InternalError(err) } return OperationResponse(op) }