コード例 #1
0
ファイル: container.go プロジェクト: hwpaas/docker
func (container *Container) monitor(callback execdriver.StartCallback) error {
	var (
		err      error
		exitCode int
	)

	pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
	exitCode, err = container.daemon.Run(container, pipes, callback)
	if err != nil {
		utils.Errorf("Error running container: %s", err)
	}
	container.State.SetStopped(exitCode)

	// Cleanup
	container.cleanup()

	// Re-create a brand new stdin pipe once the container exited
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	}
	if container.daemon != nil && container.daemon.srv != nil {
		container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image))
	}
	if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
		// FIXME: here is race condition between two RUN instructions in Dockerfile
		// because they share same runconfig and change image. Must be fixed
		// in server/buildfile.go
		if err := container.toDisk(); err != nil {
			utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err)
		}
	}
	return err
}
コード例 #2
0
ファイル: container.go プロジェクト: newgoliath/docker
func (container *Container) monitor(callback execdriver.StartCallback) error {
	var (
		err      error
		exitCode int
	)

	pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin)
	exitCode, err = container.daemon.Run(container, pipes, callback)
	if err != nil {
		utils.Errorf("Error running container: %s", err)
	}

	if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
		container.State.SetStopped(exitCode)

		// FIXME: there is a race condition here which causes this to fail during the unit tests.
		// If another goroutine was waiting for Wait() to return before removing the container's root
		// from the filesystem... At this point it may already have done so.
		// This is because State.setStopped() has already been called, and has caused Wait()
		// to return.
		// FIXME: why are we serializing running state to disk in the first place?
		//log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err)
		if err := container.ToDisk(); err != nil {
			utils.Errorf("Error dumping container state to disk: %s\n", err)
		}
	}

	// Cleanup
	container.cleanup()

	// Re-create a brand new stdin pipe once the container exited
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	}

	if container.daemon != nil && container.daemon.srv != nil {
		container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image))
	}

	close(container.waitLock)

	return err
}