Esempio n. 1
0
func (s *storageZfs) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet, op *operation) error {
	zfsRecv := func(zfsName string, writeWrapper func(io.WriteCloser) io.WriteCloser) error {
		zfsFsName := fmt.Sprintf("%s/%s", s.zfsPool, zfsName)
		args := []string{"receive", "-F", "-u", zfsFsName}
		cmd := exec.Command("zfs", args...)

		stdin, err := cmd.StdinPipe()
		if err != nil {
			return err
		}

		stderr, err := cmd.StderrPipe()
		if err != nil {
			return err
		}

		if err := cmd.Start(); err != nil {
			return err
		}

		writePipe := io.WriteCloser(stdin)
		if writeWrapper != nil {
			writePipe = writeWrapper(stdin)
		}

		<-shared.WebsocketRecvStream(writePipe, conn)

		output, err := ioutil.ReadAll(stderr)
		if err != nil {
			shared.LogDebug("problem reading zfs recv stderr %s", log.Ctx{"err": err})
		}

		err = cmd.Wait()
		if err != nil {
			shared.LogError("problem with zfs recv", log.Ctx{"output": string(output)})
		}
		return err
	}

	/* In some versions of zfs we can write `zfs recv -F` to mounted
	 * filesystems, and in some versions we can't. So, let's always unmount
	 * this fs (it's empty anyway) before we zfs recv. N.B. that `zfs recv`
	 * of a snapshot also needs tha actual fs that it has snapshotted
	 * unmounted, so we do this before receiving anything.
	 */
	zfsName := fmt.Sprintf("containers/%s", container.Name())
	err := s.zfsUnmount(zfsName)
	if err != nil {
		return err
	}

	for _, snap := range snapshots {
		args := snapshotProtobufToContainerArgs(container.Name(), snap)
		_, err := containerCreateEmptySnapshot(container.Daemon(), args)
		if err != nil {
			return err
		}

		wrapper := StorageProgressWriter(op, "fs_progress", snap.GetName())
		name := fmt.Sprintf("containers/%s@snapshot-%s", container.Name(), snap.GetName())
		if err := zfsRecv(name, wrapper); err != nil {
			return err
		}

		err = os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", container.Name())), 0700)
		if err != nil {
			return err
		}

		err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", container.Name(), snap.GetName())))
		if err != nil {
			return err
		}
	}

	defer func() {
		/* clean up our migration-send snapshots that we got from recv. */
		zfsSnapshots, err := s.zfsListSnapshots(fmt.Sprintf("containers/%s", container.Name()))
		if err != nil {
			shared.LogError("failed listing snapshots post migration", log.Ctx{"err": err})
			return
		}

		for _, snap := range zfsSnapshots {
			// If we received a bunch of snapshots, remove the migration-send-* ones, if not, wipe any snapshot we got
			if snapshots != nil && len(snapshots) > 0 && !strings.HasPrefix(snap, "migration-send") {
				continue
			}

			s.zfsSnapshotDestroy(fmt.Sprintf("containers/%s", container.Name()), snap)
		}
	}()

	/* finally, do the real container */
	wrapper := StorageProgressWriter(op, "fs_progress", container.Name())
	if err := zfsRecv(zfsName, wrapper); err != nil {
		return err
	}

	if live {
		/* and again for the post-running snapshot if this was a live migration */
		wrapper := StorageProgressWriter(op, "fs_progress", container.Name())
		if err := zfsRecv(zfsName, wrapper); err != nil {
			return err
		}
	}

	/* Sometimes, zfs recv mounts this anyway, even if we pass -u
	 * (https://forums.freebsd.org/threads/zfs-receive-u-shouldnt-mount-received-filesystem-right.36844/)
	 * but sometimes it doesn't. Let's try to mount, but not complain about
	 * failure.
	 */
	s.zfsMount(zfsName)
	return nil
}
Esempio n. 2
0
File: client.go Progetto: djibi2/lxd
// Exec runs a command inside the LXD container. For "interactive" use such as
// `lxc exec ...`, one should pass a controlHandler that talks over the control
// socket and handles things like SIGWINCH. If running non-interactive, passing
// a nil controlHandler will cause Exec to return when all of the command
// output is sent to the output buffers.
func (c *Client) Exec(name string, cmd []string, env map[string]string,
	stdin io.ReadCloser, stdout io.WriteCloser,
	stderr io.WriteCloser, controlHandler func(*Client, *websocket.Conn)) (int, error) {

	body := shared.Jmap{
		"command":            cmd,
		"wait-for-websocket": true,
		"interactive":        controlHandler != nil,
		"environment":        env,
	}

	resp, err := c.post(fmt.Sprintf("containers/%s/exec", name), body, Async)
	if err != nil {
		return -1, err
	}

	var fds shared.Jmap

	op, err := resp.MetadataAsOperation()
	if err == nil && op.Metadata != nil {
		fds, err = op.Metadata.GetMap("fds")
		if err != nil {
			return -1, err
		}
	} else {
		// FIXME: This is a backward compatibility codepath
		md := execMd{}
		if err := json.Unmarshal(resp.Metadata, &md); err != nil {
			return -1, err
		}

		fds, err = shared.ParseMetadata(md.FDs)
		if err != nil {
			return -1, err
		}
	}

	if controlHandler != nil {
		var control *websocket.Conn
		if wsControl, ok := fds["control"]; ok {
			control, err = c.websocket(resp.Operation, wsControl.(string))
			if err != nil {
				return -1, err
			}
			defer control.Close()

			go controlHandler(c, control)
		}

		conn, err := c.websocket(resp.Operation, fds["0"].(string))
		if err != nil {
			return -1, err
		}

		shared.WebsocketSendStream(conn, stdin)
		<-shared.WebsocketRecvStream(stdout, conn)
		conn.Close()

	} else {
		conns := make([]*websocket.Conn, 3)
		dones := make([]chan bool, 3)

		conns[0], err = c.websocket(resp.Operation, fds[strconv.Itoa(0)].(string))
		if err != nil {
			return -1, err
		}
		defer conns[0].Close()

		dones[0] = shared.WebsocketSendStream(conns[0], stdin)

		outputs := []io.WriteCloser{stdout, stderr}
		for i := 1; i < 3; i++ {
			conns[i], err = c.websocket(resp.Operation, fds[strconv.Itoa(i)].(string))
			if err != nil {
				return -1, err
			}
			defer conns[i].Close()

			dones[i] = shared.WebsocketRecvStream(outputs[i-1], conns[i])
		}

		/*
		 * We'll get a read signal from each of stdout, stderr when they've
		 * both died. We need to wait for these in addition to the operation,
		 * because the server may indicate that the operation is done before we
		 * can actually read the last bits of data off these sockets and print
		 * it to the screen.
		 *
		 * We don't wait for stdin here, because if we're interactive, the user
		 * may not have closed it (e.g. if the command exits but the user
		 * didn't ^D).
		 */
		for i := 1; i < 3; i++ {
			<-dones[i]
		}

		// Once we're done, we explicitly close stdin, to signal the websockets
		// we're done.
		stdin.Close()
	}

	// Now, get the operation's status too.
	op, err = c.WaitFor(resp.Operation)
	if err != nil {
		return -1, err
	}

	if op.StatusCode == shared.Failure {
		return -1, fmt.Errorf(op.Err)
	}

	if op.StatusCode != shared.Success {
		return -1, fmt.Errorf(i18n.G("got bad op status %s"), op.Status)
	}

	if op.Metadata == nil {
		return -1, fmt.Errorf(i18n.G("no metadata received"))
	}

	return op.Metadata.GetInt("return")
}
Esempio n. 3
0
func (s *storageZfs) MigrationSink(container container, snapshots []container, conn *websocket.Conn) error {
	zfsRecv := func(zfsName string) error {
		zfsFsName := fmt.Sprintf("%s/%s", s.zfsPool, zfsName)
		args := []string{"receive", "-F", "-u", zfsFsName}
		cmd := exec.Command("zfs", args...)

		stdin, err := cmd.StdinPipe()
		if err != nil {
			return err
		}

		stderr, err := cmd.StderrPipe()
		if err != nil {
			return err
		}

		if err := cmd.Start(); err != nil {
			return err
		}

		<-shared.WebsocketRecvStream(stdin, conn)

		output, err := ioutil.ReadAll(stderr)
		if err != nil {
			shared.Debugf("problem reading zfs recv stderr %s", "err", err)
		}

		err = cmd.Wait()
		if err != nil {
			shared.Log.Error("problem with zfs recv", "output", string(output))
		}
		return err
	}

	/* In some versions of zfs we can write `zfs recv -F` to mounted
	 * filesystems, and in some versions we can't. So, let's always unmount
	 * this fs (it's empty anyway) before we zfs recv. N.B. that `zfs recv`
	 * of a snapshot also needs tha actual fs that it has snapshotted
	 * unmounted, so we do this before receiving anything.
	 *
	 * Further, `zfs unmount` doesn't actually unmount things right away,
	 * so we ask /proc/self/mountinfo whether or not this path is mounted
	 * before continuing so that we're sure the fs is actually unmounted
	 * before doing a recv.
	 */
	zfsName := fmt.Sprintf("containers/%s", container.Name())
	fsPath := shared.VarPath(fmt.Sprintf("containers/%s.zfs", container.Name()))
	for i := 0; i < 20; i++ {
		if shared.IsMountPoint(fsPath) || s.zfsMounted(zfsName) {
			if err := s.zfsUnmount(zfsName); err != nil {
				shared.Log.Error("zfs umount error for", "path", zfsName, "err", err)
			}
		} else {
			break
		}

		time.Sleep(500 * time.Millisecond)
	}

	for _, snap := range snapshots {
		fields := strings.SplitN(snap.Name(), shared.SnapshotDelimiter, 2)
		name := fmt.Sprintf("containers/%s@snapshot-%s", fields[0], fields[1])
		if err := zfsRecv(name); err != nil {
			return err
		}

		err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", fields[0])), 0700)
		if err != nil {
			return err
		}

		err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", fields[0], fields[1])))
		if err != nil {
			return err
		}
	}

	/* finally, do the real container */
	if err := zfsRecv(zfsName); err != nil {
		return err
	}

	/* Sometimes, zfs recv mounts this anyway, even if we pass -u
	 * (https://forums.freebsd.org/threads/zfs-receive-u-shouldnt-mount-received-filesystem-right.36844/)
	 * but sometimes it doesn't. Let's try to mount, but not complain about
	 * failure.
	 */
	s.zfsMount(zfsName)
	return nil
}
Esempio n. 4
0
func (s *execWs) Do(id string) shared.OperationResult {
	<-s.allConnected

	var err error
	var ttys []*os.File
	var ptys []*os.File

	if s.interactive {
		ttys = make([]*os.File, 1)
		ptys = make([]*os.File, 1)
		ptys[0], ttys[0], err = shared.OpenPty(s.rootUid, s.rootGid)
		s.options.StdinFd = ttys[0].Fd()
		s.options.StdoutFd = ttys[0].Fd()
		s.options.StderrFd = ttys[0].Fd()
	} else {
		ttys = make([]*os.File, 3)
		ptys = make([]*os.File, 3)
		for i := 0; i < len(ttys); i++ {
			ptys[i], ttys[i], err = shared.Pipe()
			if err != nil {
				return shared.OperationError(err)
			}
		}
		s.options.StdinFd = ptys[0].Fd()
		s.options.StdoutFd = ttys[1].Fd()
		s.options.StderrFd = ttys[2].Fd()
	}

	controlExit := make(chan bool)
	var wgEOF sync.WaitGroup

	if s.interactive {
		wgEOF.Add(1)
		go func() {
			select {
			case <-s.controlConnected:
				break

			case <-controlExit:
				return
			}

			for {
				mt, r, err := s.conns[-1].NextReader()
				if mt == websocket.CloseMessage {
					break
				}

				if err != nil {
					shared.Debugf("Got error getting next reader %s", err)
					break
				}

				buf, err := ioutil.ReadAll(r)
				if err != nil {
					shared.Debugf("Failed to read message %s", err)
					break
				}

				command := shared.ContainerExecControl{}

				if err := json.Unmarshal(buf, &command); err != nil {
					shared.Debugf("Failed to unmarshal control socket command: %s", err)
					continue
				}

				if command.Command == "window-resize" {
					winchWidth, err := strconv.Atoi(command.Args["width"])
					if err != nil {
						shared.Debugf("Unable to extract window width: %s", err)
						continue
					}

					winchHeight, err := strconv.Atoi(command.Args["height"])
					if err != nil {
						shared.Debugf("Unable to extract window height: %s", err)
						continue
					}

					err = shared.SetSize(int(ptys[0].Fd()), winchWidth, winchHeight)
					if err != nil {
						shared.Debugf("Failed to set window size to: %dx%d", winchWidth, winchHeight)
						continue
					}
				}

				if err != nil {
					shared.Debugf("Got error writing to writer %s", err)
					break
				}
			}
		}()
		go func() {
			<-shared.WebsocketMirror(s.conns[0], ptys[0], ptys[0])
			wgEOF.Done()
		}()
	} else {
		wgEOF.Add(len(ttys) - 1)
		for i := 0; i < len(ttys); i++ {
			go func(i int) {
				if i == 0 {
					<-shared.WebsocketRecvStream(ttys[i], s.conns[i])
					ttys[i].Close()
				} else {
					<-shared.WebsocketSendStream(s.conns[i], ptys[i])
					ptys[i].Close()
					wgEOF.Done()
				}
			}(i)
		}
	}

	result := runCommand(
		s.container,
		s.command,
		s.options,
	)

	for _, tty := range ttys {
		tty.Close()
	}

	if s.conns[-1] == nil {
		if s.interactive {
			controlExit <- true
		}
	} else {
		s.conns[-1].Close()
	}

	wgEOF.Wait()

	for _, pty := range ptys {
		pty.Close()
	}

	return result
}
Esempio n. 5
0
func (c *Client) Exec(name string, cmd []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File) (int, error) {
	interactive := terminal.IsTerminal(int(stdin.Fd()))

	body := shared.Jmap{"command": cmd, "wait-for-websocket": true, "interactive": interactive, "environment": env}

	resp, err := c.post(fmt.Sprintf("containers/%s/exec", name), body, Async)
	if err != nil {
		return -1, err
	}

	md := execMd{}
	if err := json.Unmarshal(resp.Metadata, &md); err != nil {
		return -1, err
	}

	if interactive {
		if wsControl, ok := md.FDs["control"]; ok {
			go func() {
				control, err := c.websocket(resp.Operation, wsControl)
				if err != nil {
					return
				}

				for {
					width, height, err := terminal.GetSize(syscall.Stdout)
					if err != nil {
						continue
					}

					shared.Debugf("Window size is now: %dx%d", width, height)

					w, err := control.NextWriter(websocket.TextMessage)
					if err != nil {
						shared.Debugf("Got error getting next writer %s", err)
						break
					}

					msg := shared.ContainerExecControl{}
					msg.Command = "window-resize"
					msg.Args = make(map[string]string)
					msg.Args["width"] = strconv.Itoa(width)
					msg.Args["height"] = strconv.Itoa(height)

					buf, err := json.Marshal(msg)
					if err != nil {
						shared.Debugf("Failed to convert to json %s", err)
						break
					}
					_, err = w.Write(buf)

					w.Close()
					if err != nil {
						shared.Debugf("Got err writing %s", err)
						break
					}

					ch := make(chan os.Signal)
					signal.Notify(ch, syscall.SIGWINCH)
					sig := <-ch

					shared.Debugf("Received '%s signal', updating window geometry.", sig)
				}

				closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")
				control.WriteMessage(websocket.CloseMessage, closeMsg)
			}()
		}

		conn, err := c.websocket(resp.Operation, md.FDs["0"])
		if err != nil {
			return -1, err
		}
		shared.WebsocketSendStream(conn, stdin)
		<-shared.WebsocketRecvStream(stdout, conn)
	} else {
		sources := []*os.File{stdin, stdout, stderr}
		conns := make([]*websocket.Conn, 3)
		dones := make([]chan bool, 3)
		for i := 0; i < 3; i++ {
			conns[i], err = c.websocket(resp.Operation, md.FDs[strconv.Itoa(i)])
			if err != nil {
				return -1, err
			}

			if i == 0 {
				dones[i] = shared.WebsocketSendStream(conns[i], sources[i])
			} else {
				dones[i] = shared.WebsocketRecvStream(sources[i], conns[i])
			}
		}

		/*
		 * We'll get a read signal from each of stdout, stderr when they've
		 * both died. We need to wait for these in addition to the operation,
		 * because the server may indicate that the operation is done before we
		 * can actually read the last bits of data off these sockets and print
		 * it to the screen.
		 *
		 * We don't wait for stdin here, because if we're interactive, the user
		 * may not have closed it (e.g. if the command exits but the user
		 * didn't ^D).
		 */
		for i := 1; i < 3; i++ {
			<-dones[i]
		}

		// Once we're done, we explicitly close stdin, to signal the websockets
		// we're done.
		sources[0].Close()
	}

	// Now, get the operation's status too.
	op, err := c.WaitFor(resp.Operation)
	if err != nil {
		return -1, err
	}

	if op.StatusCode == shared.Failure {
		return -1, op.GetError()
	}

	if op.StatusCode != shared.Success {
		return -1, fmt.Errorf(gettext.Gettext("got bad op status %s"), op.Status)
	}

	opMd, err := op.MetadataAsMap()
	if err != nil {
		return -1, err
	}

	return opMd.GetInt("return")
}
Esempio n. 6
0
func (s *storageBtrfs) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet) error {
	if runningInUserns {
		return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap)
	}

	cName := container.Name()

	snapshotsPath := shared.VarPath(fmt.Sprintf("snapshots/%s", cName))
	if !shared.PathExists(snapshotsPath) {
		err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", cName)), 0700)
		if err != nil {
			return err
		}
	}

	btrfsRecv := func(btrfsPath string, targetPath string, isSnapshot bool) error {
		args := []string{"receive", "-e", btrfsPath}
		cmd := exec.Command("btrfs", args...)

		// Remove the existing pre-created subvolume
		err := s.subvolsDelete(targetPath)
		if err != nil {
			return err
		}

		stdin, err := cmd.StdinPipe()
		if err != nil {
			return err
		}

		stderr, err := cmd.StderrPipe()
		if err != nil {
			return err
		}

		if err := cmd.Start(); err != nil {
			return err
		}

		<-shared.WebsocketRecvStream(stdin, conn)

		output, err := ioutil.ReadAll(stderr)
		if err != nil {
			shared.LogDebugf("problem reading btrfs receive stderr %s", err)
		}

		err = cmd.Wait()
		if err != nil {
			shared.LogError("problem with btrfs receive", log.Ctx{"output": string(output)})
			return err
		}

		if !isSnapshot {
			cPath := containerPath(fmt.Sprintf("%s/.root", cName), true)

			err := s.subvolSnapshot(cPath, targetPath, false)
			if err != nil {
				shared.LogError("problem with btrfs snapshot", log.Ctx{"err": err})
				return err
			}

			err = s.subvolsDelete(cPath)
			if err != nil {
				shared.LogError("problem with btrfs delete", log.Ctx{"err": err})
				return err
			}
		}

		return nil
	}

	for _, snap := range snapshots {
		args := snapshotProtobufToContainerArgs(container.Name(), snap)
		s, err := containerCreateEmptySnapshot(container.Daemon(), args)
		if err != nil {
			return err
		}

		if err := btrfsRecv(containerPath(cName, true), s.Path(), true); err != nil {
			return err
		}
	}

	/* finally, do the real container */
	if err := btrfsRecv(containerPath(cName, true), container.Path(), false); err != nil {
		return err
	}

	if live {
		if err := btrfsRecv(containerPath(cName, true), container.Path(), false); err != nil {
			return err
		}
	}

	// Cleanup
	if ok, _ := shared.PathIsEmpty(snapshotsPath); ok {
		err := os.Remove(snapshotsPath)
		if err != nil {
			return err
		}
	}

	return nil
}
Esempio n. 7
0
func (s *execWs) Do(op *operation) error {
	<-s.allConnected

	var err error
	var ttys []*os.File
	var ptys []*os.File

	var stdin *os.File
	var stdout *os.File
	var stderr *os.File

	if s.interactive {
		ttys = make([]*os.File, 1)
		ptys = make([]*os.File, 1)
		ptys[0], ttys[0], err = shared.OpenPty(s.rootUid, s.rootGid)

		stdin = ttys[0]
		stdout = ttys[0]
		stderr = ttys[0]

		if s.width > 0 && s.height > 0 {
			shared.SetSize(int(ptys[0].Fd()), s.width, s.height)
		}
	} else {
		ttys = make([]*os.File, 3)
		ptys = make([]*os.File, 3)
		for i := 0; i < len(ttys); i++ {
			ptys[i], ttys[i], err = shared.Pipe()
			if err != nil {
				return err
			}
		}

		stdin = ptys[0]
		stdout = ttys[1]
		stderr = ttys[2]
	}

	controlExit := make(chan bool)
	var wgEOF sync.WaitGroup

	if s.interactive {
		wgEOF.Add(1)
		go func() {
			select {
			case <-s.controlConnected:
				break

			case <-controlExit:
				return
			}

			for {
				mt, r, err := s.conns[-1].NextReader()
				if mt == websocket.CloseMessage {
					break
				}

				if err != nil {
					shared.Debugf("Got error getting next reader %s", err)
					break
				}

				buf, err := ioutil.ReadAll(r)
				if err != nil {
					shared.Debugf("Failed to read message %s", err)
					break
				}

				command := shared.ContainerExecControl{}

				if err := json.Unmarshal(buf, &command); err != nil {
					shared.Debugf("Failed to unmarshal control socket command: %s", err)
					continue
				}

				if command.Command == "window-resize" {
					winchWidth, err := strconv.Atoi(command.Args["width"])
					if err != nil {
						shared.Debugf("Unable to extract window width: %s", err)
						continue
					}

					winchHeight, err := strconv.Atoi(command.Args["height"])
					if err != nil {
						shared.Debugf("Unable to extract window height: %s", err)
						continue
					}

					err = shared.SetSize(int(ptys[0].Fd()), winchWidth, winchHeight)
					if err != nil {
						shared.Debugf("Failed to set window size to: %dx%d", winchWidth, winchHeight)
						continue
					}
				}
			}
		}()
		go func() {
			readDone, writeDone := shared.WebsocketMirror(s.conns[0], ptys[0], ptys[0])
			<-readDone
			<-writeDone
			s.conns[0].Close()
			wgEOF.Done()
		}()
	} else {
		wgEOF.Add(len(ttys) - 1)
		for i := 0; i < len(ttys); i++ {
			go func(i int) {
				if i == 0 {
					<-shared.WebsocketRecvStream(ttys[i], s.conns[i])
					ttys[i].Close()
				} else {
					<-shared.WebsocketSendStream(s.conns[i], ptys[i])
					ptys[i].Close()
					wgEOF.Done()
				}
			}(i)
		}
	}

	cmdResult, cmdErr := s.container.Exec(s.command, s.env, stdin, stdout, stderr)

	for _, tty := range ttys {
		tty.Close()
	}

	if s.conns[-1] == nil {
		if s.interactive {
			controlExit <- true
		}
	} else {
		s.conns[-1].Close()
	}

	wgEOF.Wait()

	for _, pty := range ptys {
		pty.Close()
	}

	metadata := shared.Jmap{"return": cmdResult}
	err = op.UpdateMetadata(metadata)
	if err != nil {
		return err
	}

	return cmdErr
}
Esempio n. 8
0
func (s *execWs) Do(op *operation) error {
	<-s.allConnected

	var err error
	var ttys []*os.File
	var ptys []*os.File

	var stdin *os.File
	var stdout *os.File
	var stderr *os.File

	if s.interactive {
		ttys = make([]*os.File, 1)
		ptys = make([]*os.File, 1)
		ptys[0], ttys[0], err = shared.OpenPty(s.rootUid, s.rootGid)

		stdin = ttys[0]
		stdout = ttys[0]
		stderr = ttys[0]

		if s.width > 0 && s.height > 0 {
			shared.SetSize(int(ptys[0].Fd()), s.width, s.height)
		}
	} else {
		ttys = make([]*os.File, 3)
		ptys = make([]*os.File, 3)
		for i := 0; i < len(ttys); i++ {
			ptys[i], ttys[i], err = shared.Pipe()
			if err != nil {
				return err
			}
		}

		stdin = ptys[0]
		stdout = ttys[1]
		stderr = ttys[2]
	}

	controlExit := make(chan bool)
	receivePid := make(chan int)
	var wgEOF sync.WaitGroup

	if s.interactive {
		wgEOF.Add(1)
		go func() {
			receivedPid := <-receivePid
			select {
			case <-s.controlConnected:
				break

			case <-controlExit:
				return
			}

			for {
				mt, r, err := s.conns[-1].NextReader()
				if mt == websocket.CloseMessage {
					break
				}

				if err != nil {
					shared.LogDebugf("Got error getting next reader %s", err)
					break
				}

				buf, err := ioutil.ReadAll(r)
				if err != nil {
					shared.LogDebugf("Failed to read message %s", err)
					break
				}

				command := shared.ContainerExecControl{}

				if err := json.Unmarshal(buf, &command); err != nil {
					shared.LogDebugf("Failed to unmarshal control socket command: %s", err)
					continue
				}

				if command.Command == "window-resize" {
					winchWidth, err := strconv.Atoi(command.Args["width"])
					if err != nil {
						shared.LogDebugf("Unable to extract window width: %s", err)
						continue
					}

					winchHeight, err := strconv.Atoi(command.Args["height"])
					if err != nil {
						shared.LogDebugf("Unable to extract window height: %s", err)
						continue
					}

					err = shared.SetSize(int(ptys[0].Fd()), winchWidth, winchHeight)
					if err != nil {
						shared.LogDebugf("Failed to set window size to: %dx%d", winchWidth, winchHeight)
						continue
					}
				} else if command.Command == "signal" {
					if err := syscall.Kill(receivedPid, command.Signal); err != nil {
						shared.LogDebugf("Failed forwarding signal '%s' to PID %d.", command.Signal, receivedPid)
						continue
					}
					shared.LogDebugf("Forwarded signal '%s' to PID %d.", command.Signal, receivedPid)
				}
			}
		}()
		go func() {
			readDone, writeDone := shared.WebsocketMirror(s.conns[0], ptys[0], ptys[0])
			<-readDone
			<-writeDone
			s.conns[0].Close()
			wgEOF.Done()
		}()
	} else {
		wgEOF.Add(len(ttys) - 1)
		for i := 0; i < len(ttys); i++ {
			go func(i int) {
				if i == 0 {
					<-shared.WebsocketRecvStream(ttys[i], s.conns[i])
					ttys[i].Close()
				} else {
					<-shared.WebsocketSendStream(s.conns[i], ptys[i], -1)
					ptys[i].Close()
					wgEOF.Done()
				}
			}(i)
		}
	}

	finisher := func(cmdResult int, cmdErr error) error {
		for _, tty := range ttys {
			tty.Close()
		}

		if s.conns[-1] == nil {
			if s.interactive {
				controlExit <- true
			}
		} else {
			s.conns[-1].Close()
		}

		wgEOF.Wait()

		for _, pty := range ptys {
			pty.Close()
		}

		metadata := shared.Jmap{"return": cmdResult}
		err = op.UpdateMetadata(metadata)
		if err != nil {
			return err
		}

		return cmdErr
	}

	r, w, err := shared.Pipe()
	defer r.Close()
	if err != nil {
		shared.LogErrorf("s", err)
		return err
	}

	cmd, err := s.container.ExecNoWait(s.command, s.env, stdin, stdout, stderr, w)
	if err != nil {
		w.Close()
		return err
	}

	err = cmd.Start()
	if err != nil {
		w.Close()
		return err
	}
	w.Close()

	attachedPid := -1
	if err := json.NewDecoder(r).Decode(&attachedPid); err != nil {
		shared.LogErrorf("Failed to retrieve PID of executing child process: %s", err)
		return finisher(-1, err)
	}

	if s.interactive {
		receivePid <- attachedPid
	}

	err = cmd.Wait()
	if err != nil {
		exitErr, ok := err.(*exec.ExitError)
		if ok {
			status, ok := exitErr.Sys().(syscall.WaitStatus)
			if ok {
				return finisher(status.ExitStatus(), nil)
			}
		}
	}

	return finisher(0, nil)
}
Esempio n. 9
0
func (s *execWs) Do(op *operation) error {
	<-s.allConnected

	var err error
	var ttys []*os.File
	var ptys []*os.File

	var stdin *os.File
	var stdout *os.File
	var stderr *os.File

	if s.interactive {
		ttys = make([]*os.File, 1)
		ptys = make([]*os.File, 1)
		ptys[0], ttys[0], err = shared.OpenPty(s.rootUid, s.rootGid)

		stdin = ttys[0]
		stdout = ttys[0]
		stderr = ttys[0]

		if s.width > 0 && s.height > 0 {
			shared.SetSize(int(ptys[0].Fd()), s.width, s.height)
		}
	} else {
		ttys = make([]*os.File, 3)
		ptys = make([]*os.File, 3)
		for i := 0; i < len(ttys); i++ {
			ptys[i], ttys[i], err = shared.Pipe()
			if err != nil {
				return err
			}
		}

		stdin = ptys[0]
		stdout = ttys[1]
		stderr = ttys[2]
	}

	controlExit := make(chan bool)
	receivePid := make(chan int)
	var wgEOF sync.WaitGroup

	if s.interactive {
		wgEOF.Add(1)
		go func() {
			receivedPid := <-receivePid
			select {
			case <-s.controlConnected:
				break

			case <-controlExit:
				return
			}

			for {
				mt, r, err := s.conns[-1].NextReader()
				if mt == websocket.CloseMessage {
					break
				}

				if err != nil {
					shared.LogDebugf("Got error getting next reader %s", err)
					break
				}

				buf, err := ioutil.ReadAll(r)
				if err != nil {
					shared.LogDebugf("Failed to read message %s", err)
					break
				}

				command := shared.ContainerExecControl{}

				if err := json.Unmarshal(buf, &command); err != nil {
					shared.LogDebugf("Failed to unmarshal control socket command: %s", err)
					continue
				}

				if command.Command == "window-resize" {
					winchWidth, err := strconv.Atoi(command.Args["width"])
					if err != nil {
						shared.LogDebugf("Unable to extract window width: %s", err)
						continue
					}

					winchHeight, err := strconv.Atoi(command.Args["height"])
					if err != nil {
						shared.LogDebugf("Unable to extract window height: %s", err)
						continue
					}

					err = shared.SetSize(int(ptys[0].Fd()), winchWidth, winchHeight)
					if err != nil {
						shared.LogDebugf("Failed to set window size to: %dx%d", winchWidth, winchHeight)
						continue
					}
				} else if command.Command == "signal" {
					if err := syscall.Kill(receivedPid, command.Signal); err != nil {
						shared.LogDebugf("Failed forwarding signal '%s' to PID %d.", command.Signal, receivedPid)
						continue
					}
					shared.LogDebugf("Forwarded signal '%s' to PID %d.", command.Signal, receivedPid)
				}
			}
		}()
		go func() {
			readDone, writeDone := shared.WebsocketMirror(s.conns[0], ptys[0], ptys[0])
			<-readDone
			<-writeDone
			s.conns[0].Close()
			wgEOF.Done()
		}()
	} else {
		wgEOF.Add(len(ttys) - 1)
		for i := 0; i < len(ttys); i++ {
			go func(i int) {
				if i == 0 {
					<-shared.WebsocketRecvStream(ttys[i], s.conns[i])
					ttys[i].Close()
				} else {
					<-shared.WebsocketSendStream(s.conns[i], ptys[i], -1)
					ptys[i].Close()
					wgEOF.Done()
				}
			}(i)
		}
	}

	finisher := func(cmdResult int, cmdErr error) error {
		for _, tty := range ttys {
			tty.Close()
		}

		if s.conns[-1] == nil {
			if s.interactive {
				controlExit <- true
			}
		} else {
			s.conns[-1].Close()
		}

		wgEOF.Wait()

		for _, pty := range ptys {
			pty.Close()
		}

		metadata := shared.Jmap{"return": cmdResult}
		err = op.UpdateMetadata(metadata)
		if err != nil {
			return err
		}

		return cmdErr
	}

	pid, attachedPid, err := s.container.Exec(s.command, s.env, stdin, stdout, stderr, false)
	if err != nil {
		return err
	}

	if s.interactive {
		receivePid <- attachedPid
	}

	proc, err := os.FindProcess(pid)
	if err != nil {
		return finisher(-1, fmt.Errorf("Failed finding process: %q", err))
	}

	procState, err := proc.Wait()
	if err != nil {
		return finisher(-1, fmt.Errorf("Failed waiting on process %d: %q", pid, err))
	}

	if procState.Success() {
		return finisher(0, nil)
	}

	status, ok := procState.Sys().(syscall.WaitStatus)
	if ok {
		if status.Exited() {
			return finisher(status.ExitStatus(), nil)
		}
		// Backwards compatible behavior. Report success when we exited
		// due to a signal. Otherwise this may break Jenkins, e.g. when
		// lxc exec foo reboot receives SIGTERM and status.Exitstats()
		// would report -1.
		if status.Signaled() {
			return finisher(0, nil)
		}
	}

	return finisher(-1, nil)
}