Beispiel #1
0
func (container *Container) monitorExec(ExecConfig *ExecConfig, callback execdriver.StartCallback) error {
	var (
		err      error
		exitCode int
	)
	pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin)
	exitCode, err = container.daemon.Exec(container, ExecConfig, pipes, callback)
	if err != nil {
		logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
	}
	logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
	if ExecConfig.OpenStdin {
		if err := ExecConfig.streamConfig.stdin.Close(); err != nil {
			logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
		}
	}
	if err := ExecConfig.streamConfig.stdout.Clean(); err != nil {
		logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
	}
	if err := ExecConfig.streamConfig.stderr.Clean(); err != nil {
		logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
	}
	if ExecConfig.ProcessConfig.Terminal != nil {
		if err := ExecConfig.ProcessConfig.Terminal.Close(); err != nil {
			logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
		}
	}
	// remove the exec command from the container's store only and not the
	// daemon's store so that the exec command can be inspected.
	container.execCommands.Delete(ExecConfig.ID)
	return err
}
Beispiel #2
0
func (container *Container) monitor(callback execdriver.StartCallback) error {
	var (
		err      error
		exitCode int
	)

	pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
	exitCode, err = container.daemon.Run(container, pipes, callback)
	if err != nil {
		utils.Errorf("Error running container: %s", err)
	}
	container.State.SetStopped(exitCode)

	// Cleanup
	container.cleanup()

	// Re-create a brand new stdin pipe once the container exited
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	}
	if container.daemon != nil && container.daemon.srv != nil {
		container.LogEvent("die")
	}
	if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
		// FIXME: here is race condition between two RUN instructions in Dockerfile
		// because they share same runconfig and change image. Must be fixed
		// in builder/builder.go
		if err := container.toDisk(); err != nil {
			utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err)
		}
	}
	return err
}
Beispiel #3
0
func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error {
	var (
		err      error
		exitCode int
	)

	pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin)
	exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback)
	if err != nil {
		logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
	}

	logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
	if execConfig.OpenStdin {
		if err := execConfig.StreamConfig.stdin.Close(); err != nil {
			logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
		}
	}
	if err := execConfig.StreamConfig.stdout.Clean(); err != nil {
		logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
	}
	if err := execConfig.StreamConfig.stderr.Clean(); err != nil {
		logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
	}
	if execConfig.ProcessConfig.Terminal != nil {
		if err := execConfig.ProcessConfig.Terminal.Close(); err != nil {
			logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
		}
	}

	return err
}
func (container *Container) monitor(callback execdriver.StartCallback) error {
	var (
		err      error
		exitCode int
	)

	pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
	exitCode, err = container.daemon.Run(container, pipes, callback)
	if err != nil {
		utils.Errorf("Error running container: %s", err)
	}
	container.State.SetStopped(exitCode)

	// Cleanup
	container.cleanup()

	// Re-create a brand new stdin pipe once the container exited
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	}
	container.LogEvent("die")
	// If the engine is shutting down, don't save the container state as stopped.
	// This will cause it to be restarted when the engine is restarted.
	if container.daemon != nil && container.daemon.eng != nil && !container.daemon.eng.IsShutdown() {
		if err := container.toDisk(); err != nil {
			utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err)
		}
	}
	return err
}
Beispiel #5
0
func (d *Daemon) monitorExec(container *container.Container, execConfig *exec.Config, callback execdriver.DriverCallback) error {
	pipes := execdriver.NewPipes(execConfig.Stdin(), execConfig.Stdout(), execConfig.Stderr(), execConfig.OpenStdin)
	exitCode, err := d.Exec(container, execConfig, pipes, callback)
	if err != nil {
		logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
	}
	logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)

	if err := execConfig.CloseStreams(); err != nil {
		logrus.Errorf("%s: %s", container.ID, err)
	}

	if execConfig.ProcessConfig.Terminal != nil {
		if err := execConfig.WaitResize(); err != nil {
			logrus.Errorf("Error waiting for resize: %v", err)
		}
		if err := execConfig.ProcessConfig.Terminal.Close(); err != nil {
			logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
		}
	}
	// remove the exec command from the container's store only and not the
	// daemon's store so that the exec command can be inspected.
	container.ExecCommands.Delete(execConfig.ID)
	return err
}
Beispiel #6
0
func (m *externalMonitor) Start() error {
	var (
		err        error
		exitStatus int
		// this variable indicates where we in execution flow:
		// before Run or after
		afterRun bool
	)

	// ensure that when the monitor finally exits we release the networking and unmount the rootfs
	defer func() {
		if afterRun {
			// reset container
			m.resetContainer()

			m.container.setStopped(exitStatus)
			ws := m.container.State.ToWatchState()

			// if docker daemon is deattach, watching is stopped, Emit will handle it.
			m.watching.Emit(ws)

			// close watching
			m.watching.Stop()
			log.Debugf("external monitor container %s exited", m.container.ID)

			// notify monitor server exit
			close(m.stopSignal)
		}
	}()

	pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)

	if exitStatus, err = m.startContainer(m.container, pipes, m.callback); err != nil {
		return err

	}

	afterRun = true

	// TODO: handle conainter restartPolicy

	return nil
}
Beispiel #7
0
// Start starts the containers process and monitors it according to the restart policy
func (m *containerMonitor) Start() error {
	var (
		err        error
		exitStatus execdriver.ExitStatus
		// this variable indicates where we in execution flow:
		// before Run or after
		afterRun bool
	)

	// ensure that when the monitor finally exits we release the networking and unmount the rootfs
	defer func() {
		if afterRun {
			m.container.Lock()
			m.container.setStopped(&exitStatus)
			defer m.container.Unlock()
		}
		m.Close()
	}()
	// reset stopped flag
	if m.container.HasBeenManuallyStopped {
		m.container.HasBeenManuallyStopped = false
	}

	// reset the restart count
	m.container.RestartCount = -1

	for {
		m.container.RestartCount++

		if err := m.supervisor.StartLogging(m.container); err != nil {
			m.resetContainer(false)

			return err
		}

		pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)

		m.logEvent("start")

		m.lastStartTime = time.Now()

		if exitStatus, err = m.supervisor.Run(m.container, pipes, m.callback); err != nil {
			// if we receive an internal error from the initial start of a container then lets
			// return it instead of entering the restart loop
			// set to 127 for contained cmd not found/does not exist)
			if strings.Contains(err.Error(), "executable file not found") ||
				strings.Contains(err.Error(), "no such file or directory") ||
				strings.Contains(err.Error(), "system cannot find the file specified") {
				if m.container.RestartCount == 0 {
					m.container.ExitCode = 127
					m.resetContainer(false)
					return derr.ErrorCodeCmdNotFound
				}
			}
			// set to 126 for contained cmd can't be invoked errors
			if strings.Contains(err.Error(), syscall.EACCES.Error()) {
				if m.container.RestartCount == 0 {
					m.container.ExitCode = 126
					m.resetContainer(false)
					return derr.ErrorCodeCmdCouldNotBeInvoked
				}
			}

			if m.container.RestartCount == 0 {
				m.container.ExitCode = -1
				m.resetContainer(false)

				return derr.ErrorCodeCantStart.WithArgs(m.container.ID, utils.GetErrorMessage(err))
			}

			logrus.Errorf("Error running container: %s", err)
		}

		// here container.Lock is already lost
		afterRun = true

		m.resetMonitor(err == nil && exitStatus.ExitCode == 0)

		if m.shouldRestart(exitStatus.ExitCode) {
			m.container.setRestarting(&exitStatus)
			m.logEvent("die")
			m.resetContainer(true)

			// sleep with a small time increment between each restart to help avoid issues cased by quickly
			// restarting the container because of some types of errors ( networking cut out, etc... )
			m.waitForNextRestart()

			// we need to check this before reentering the loop because the waitForNextRestart could have
			// been terminated by a request from a user
			if m.shouldStop {
				return err
			}
			continue
		}

		m.logEvent("die")
		m.resetContainer(true)
		return err
	}
}
Beispiel #8
0
// Start starts the containers process and monitors it according to the restart policy
func (m *containerMonitor) Start(ctx context.Context) error {
	var (
		err        error
		exitStatus execdriver.ExitStatus
		// this variable indicates where we in execution flow:
		// before Run or after
		afterRun bool
	)

	// ensure that when the monitor finally exits we release the networking and unmount the rootfs
	defer func() {
		if afterRun {
			m.container.Lock()
			m.container.setStopped(&exitStatus)
			defer m.container.Unlock()
		}
		m.Close(ctx)
	}()
	// reset stopped flag
	if m.container.HasBeenManuallyStopped {
		m.container.HasBeenManuallyStopped = false
	}

	// reset the restart count
	m.container.RestartCount = -1

	for {
		m.container.RestartCount++

		if err := m.container.startLogging(); err != nil {
			m.resetContainer(false)

			return err
		}

		pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)

		m.container.logEvent(ctx, "start")

		m.lastStartTime = time.Now()

		if exitStatus, err = m.container.daemon.run(ctx, m.container, pipes, m.callback); err != nil {
			// if we receive an internal error from the initial start of a container then lets
			// return it instead of entering the restart loop
			if m.container.RestartCount == 0 {
				m.container.ExitCode = -1
				m.resetContainer(false)

				return err
			}

			logrus.Errorf("Error running container: %s", err)
		}

		// here container.Lock is already lost
		afterRun = true

		m.resetMonitor(err == nil && exitStatus.ExitCode == 0)

		if m.shouldRestart(exitStatus.ExitCode) {
			m.container.setRestarting(&exitStatus)
			m.container.logEvent(ctx, "die")
			m.resetContainer(true)

			// sleep with a small time increment between each restart to help avoid issues cased by quickly
			// restarting the container because of some types of errors ( networking cut out, etc... )
			m.waitForNextRestart()

			// we need to check this before reentering the loop because the waitForNextRestart could have
			// been terminated by a request from a user
			if m.shouldStop {
				return err
			}
			continue
		}

		m.container.logEvent(ctx, "die")
		m.resetContainer(true)
		return err
	}
}
Beispiel #9
0
// Start starts the containers process and monitors it according to the restart policy
func (m *containerMonitor) Start() error {
	var (
		err        error
		exitStatus int
	)

	// ensure that when the monitor finally exits we release the networking and unmount the rootfs
	defer m.Close()

	// reset the restart count
	m.container.RestartCount = -1

	for {
		m.container.RestartCount++

		if err := m.container.startLoggingToDisk(); err != nil {
			m.resetContainer()

			return err
		}

		pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)

		m.container.LogEvent("start")

		m.lastStartTime = time.Now()

		if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil {
			// if we receive an internal error from the initial start of a container then lets
			// return it instead of entering the restart loop
			if m.container.RestartCount == 0 {
				m.resetContainer()

				return err
			}

			log.Errorf("Error running container: %s", err)
		}

		m.resetMonitor(err == nil && exitStatus == 0)

		if m.shouldRestart(exitStatus) {
			m.container.State.SetRestarting(exitStatus)

			m.container.LogEvent("die")

			m.resetContainer()

			// sleep with a small time increment between each restart to help avoid issues cased by quickly
			// restarting the container because of some types of errors ( networking cut out, etc... )
			m.waitForNextRestart()

			// we need to check this before reentering the loop because the waitForNextRestart could have
			// been terminated by a request from a user
			if m.shouldStop {
				m.container.State.SetStopped(exitStatus)

				return err
			}

			continue
		}

		m.container.State.SetStopped(exitStatus)

		m.container.LogEvent("die")

		m.resetContainer()

		break
	}

	return err
}