Exemplo n.º 1
0
// Pull tells Docker to pull image referenced by `name`.
func (d Docker) Pull(name string) (builder.Image, error) {
	ref, err := reference.ParseNamed(name)
	if err != nil {
		return nil, err
	}
	ref = reference.WithDefaultTag(ref)

	pullRegistryAuth := &types.AuthConfig{}
	if len(d.AuthConfigs) > 0 {
		// The request came with a full auth config file, we prefer to use that
		repoInfo, err := d.Daemon.RegistryService.ResolveRepository(ref)
		if err != nil {
			return nil, err
		}

		resolvedConfig := registry.ResolveAuthConfig(
			d.AuthConfigs,
			repoInfo.Index,
		)
		pullRegistryAuth = &resolvedConfig
	}

	if err := d.Daemon.PullImage(ref, nil, pullRegistryAuth, ioutils.NopWriteCloser(d.OutOld)); err != nil {
		return nil, err
	}
	return d.GetImage(name)
}
Exemplo n.º 2
0
func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
	remote, tag := parsers.ParseRepositoryTag(name)
	if tag == "" {
		tag = "latest"
	}
	job := b.Engine.Job("pull", remote, tag)
	pullRegistryAuth := b.AuthConfig
	if len(b.AuthConfigFile.Configs) > 0 {
		// The request came with a full auth config file, we prefer to use that
		repoInfo, err := registry.ResolveRepositoryInfo(job, remote)
		if err != nil {
			return nil, err
		}
		resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(repoInfo.Index)
		pullRegistryAuth = &resolvedAuth
	}
	job.SetenvBool("json", b.StreamFormatter.Json())
	job.SetenvBool("parallel", true)
	job.SetenvJson("authConfig", pullRegistryAuth)
	job.Stdout.Add(ioutils.NopWriteCloser(b.OutOld))
	if err := job.Run(); err != nil {
		return nil, err
	}
	image, err := b.Daemon.Repositories().LookupImage(name)
	if err != nil {
		return nil, err
	}

	return image, nil
}
Exemplo n.º 3
0
// Pull tells Docker to pull image referenced by `name`.
func (d Docker) Pull(name string) (*image.Image, error) {
	remote, tag := parsers.ParseRepositoryTag(name)
	if tag == "" {
		tag = "latest"
	}

	pullRegistryAuth := &cliconfig.AuthConfig{}
	if len(d.AuthConfigs) > 0 {
		// The request came with a full auth config file, we prefer to use that
		repoInfo, err := d.Daemon.RegistryService.ResolveRepository(remote)
		if err != nil {
			return nil, err
		}

		resolvedConfig := registry.ResolveAuthConfig(
			&cliconfig.ConfigFile{AuthConfigs: d.AuthConfigs},
			repoInfo.Index,
		)
		pullRegistryAuth = &resolvedConfig
	}

	imagePullConfig := &graph.ImagePullConfig{
		AuthConfig: pullRegistryAuth,
		OutStream:  ioutils.NopWriteCloser(d.OutOld),
	}

	if err := d.Daemon.PullImage(remote, tag, imagePullConfig); err != nil {
		return nil, err
	}

	return d.Daemon.GetImage(name)
}
Exemplo n.º 4
0
// Register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) Register(container *Container) error {
	if container.daemon != nil || daemon.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}
	if err := daemon.ensureName(container); err != nil {
		return err
	}

	container.daemon = daemon

	// Attach to stdout and stderr
	container.stderr = new(broadcaster.Unbuffered)
	container.stdout = new(broadcaster.Unbuffered)
	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}
	// done
	daemon.containers.Add(container.ID, container)

	// don't update the Suffixarray if we're starting up
	// we'll waste time if we update it for every container
	daemon.idIndex.Add(container.ID)

	if container.IsRunning() {
		logrus.Debugf("killing old running container %s", container.ID)
		// Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit
		container.setStoppedLocking(&execdriver.ExitStatus{ExitCode: 137})
		// use the current driver and ensure that the container is dead x.x
		cmd := &execdriver.Command{
			ID: container.ID,
		}
		daemon.execDriver.Terminate(cmd)

		if err := container.unmountIpcMounts(); err != nil {
			logrus.Errorf("%s: Failed to umount ipc filesystems: %v", container.ID, err)
		}
		if err := container.Unmount(); err != nil {
			logrus.Debugf("unmount error %s", err)
		}
		if err := container.toDiskLocking(); err != nil {
			logrus.Errorf("Error saving stopped state to disk: %v", err)
		}
	}

	if err := daemon.verifyVolumesInfo(container); err != nil {
		return err
	}

	if err := container.prepareMountPoints(); err != nil {
		return err
	}

	return nil
}
Exemplo n.º 5
0
func (b *Builder) pullImage(name string) (*imagepkg.Image, error) {
	remote, tag := parsers.ParseRepositoryTag(name)
	if tag == "" {
		tag = "latest"
	}

	pullRegistryAuth := b.AuthConfig
	if len(b.ConfigFile.AuthConfigs) > 0 {
		// The request came with a full auth config file, we prefer to use that
		repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote)
		if err != nil {
			return nil, err
		}
		resolvedAuth := registry.ResolveAuthConfig(b.ConfigFile, repoInfo.Index)
		pullRegistryAuth = &resolvedAuth
	}

	imagePullConfig := &graph.ImagePullConfig{
		AuthConfig: pullRegistryAuth,
		OutStream:  ioutils.NopWriteCloser(b.OutOld),
	}

	if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
		return nil, err
	}

	image, err := b.Daemon.Repositories().LookupImage(name)
	if err != nil {
		return nil, err
	}

	return image, nil
}
Exemplo n.º 6
0
// Job creates a new job which can later be executed.
// This function mimics `Command` from the standard os/exec package.
func (eng *Engine) Job(name string, args ...string) *Job {
	job := &Job{
		Eng:     eng,
		Name:    name,
		Args:    args,
		Stdin:   NewInput(),
		Stdout:  NewOutput(),
		Stderr:  NewOutput(),
		env:     &Env{},
		closeIO: true,

		cancelled: make(chan struct{}),
	}
	if eng.Logging {
		job.Stderr.Add(ioutils.NopWriteCloser(eng.Stderr))
	}

	// Catchall is shadowed by specific Register.
	if handler, exists := eng.handlers[name]; exists {
		job.handler = handler
	} else if eng.catchall != nil && name != "" {
		// empty job names are illegal, catchall or not.
		job.handler = eng.catchall
	}
	return job
}
Exemplo n.º 7
0
// register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error {
	if container.daemon != nil || daemon.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}
	if err := daemon.ensureName(container); err != nil {
		return err
	}

	container.daemon = daemon

	// Attach to stdout and stderr
	container.stderr = broadcastwriter.New()
	container.stdout = broadcastwriter.New()
	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}
	// done
	daemon.containers.Add(container.ID, container)

	// don't update the Suffixarray if we're starting up
	// we'll waste time if we update it for every container
	daemon.idIndex.Add(container.ID)

	if err := daemon.verifyVolumesInfo(container); err != nil {
		return err
	}

	if err := container.prepareMountPoints(); err != nil {
		return err
	}

	if container.IsRunning() {
		logrus.Debugf("killing old running container %s", container.ID)

		container.SetStopped(&execdriver.ExitStatus{ExitCode: 0})

		// use the current driver and ensure that the container is dead x.x
		cmd := &execdriver.Command{
			ID: container.ID,
		}
		daemon.execDriver.Terminate(cmd)

		if err := container.Unmount(); err != nil {
			logrus.Debugf("unmount error %s", err)
		}
		if err := container.ToDisk(); err != nil {
			logrus.Debugf("saving stopped state to disk %s", err)
		}
	}

	return nil
}
Exemplo n.º 8
0
func setupBaseImage() {
	eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase)
	job := eng.Job("image_inspect", unitTestImageName)
	img, _ := job.Stdout.AddEnv()
	// If the unit test is not found, try to download it.
	if err := job.Run(); err != nil || img.Get("Id") != unitTestImageID {
		// Retrieve the Image
		job = eng.Job("pull", unitTestImageName)
		job.Stdout.Add(ioutils.NopWriteCloser(os.Stdout))
		if err := job.Run(); err != nil {
			log.Fatalf("Unable to pull the test image: %s", err)
		}
	}
}
Exemplo n.º 9
0
func InitDockerMonitor(cfg *MonitorConfig, w *StateWatcher) *DockerMonitor {
	containerId := cfg.ID
	dockerRoot = cfg.Root

	containerRoot := getContainerRoot(dockerRoot, containerId)

	container := &Container{
		root:  containerRoot,
		State: NewState(),
	}

	if err := container.FromDisk(); err != nil {
		log.Errorf("InitDockerMonitor: container from disk failed: %v", err)
		os.Exit(1)
	}

	if container.ID != containerId {
		log.Errorf("InitDockerMonitor: Container %s is stored at %s", container.ID, containerId)
		os.Exit(1)
	}

	if err := container.ReadCommandConfig(); err != nil {
		log.Errorf("InitDockerMonitor: command from disk failed: %v", err)
		os.Exit(1)
	}

	// Attach to stdout and stderr
	container.stderr = broadcastwriter.New()
	container.stdout = broadcastwriter.New()

	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard)
	}
	monitor, err := newExternalMonitor(container, w)
	if err != nil {
		log.Errorf("external monitor initial error: %v", err)
		return nil
	}

	//container.exMonitor = monitor
	return &DockerMonitor{
		monitor,
	}
}
Exemplo n.º 10
0
Arquivo: pod.go Projeto: juito/hyper
// ContainerAttach attaches streams to the container cID. If stream is true, it streams the output.
func (d Docker) ContainerAttach(cId string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error {
	<-d.hyper.Ready

	err := d.Daemon.Attach(stdin, ioutils.NopWriteCloser(stdout), cId)
	if err != nil {
		return err
	}

	code, err := d.Daemon.ExitCode(cId, "")
	if err != nil {
		return err
	}

	if code == 0 {
		return nil
	}

	return &jsonmessage.JSONError{
		Message: fmt.Sprintf("The container '%s' returned a non-zero code: %d", cId, code),
		Code:    code,
	}
}
Exemplo n.º 11
0
// Pull tells Docker to pull image referenced by `name`.
func (d Docker) Pull(name string) (*image.Image, error) {
	ref, err := reference.ParseNamed(name)
	if err != nil {
		return nil, err
	}
	switch ref.(type) {
	case reference.Tagged:
	case reference.Digested:
	default:
		ref, err = reference.WithTag(ref, "latest")
		if err != nil {
			return nil, err
		}
	}

	pullRegistryAuth := &cliconfig.AuthConfig{}
	if len(d.AuthConfigs) > 0 {
		// The request came with a full auth config file, we prefer to use that
		repoInfo, err := d.Daemon.RegistryService.ResolveRepository(ref)
		if err != nil {
			return nil, err
		}

		resolvedConfig := registry.ResolveAuthConfig(
			&cliconfig.ConfigFile{AuthConfigs: d.AuthConfigs},
			repoInfo.Index,
		)
		pullRegistryAuth = &resolvedConfig
	}

	if err := d.Daemon.PullImage(ref, nil, pullRegistryAuth, ioutils.NopWriteCloser(d.OutOld)); err != nil {
		return nil, err
	}

	return d.Daemon.GetImage(name)
}
Exemplo n.º 12
0
Arquivo: exec.go Projeto: ch3lo/docker
func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
	var (
		cStdin           io.ReadCloser
		cStdout, cStderr io.Writer
	)

	execConfig, err := d.getExecConfig(execName)
	if err != nil {
		return err
	}

	func() {
		execConfig.Lock()
		defer execConfig.Unlock()
		if execConfig.Running {
			err = fmt.Errorf("Error: Exec command %s is already running", execName)
		}
		execConfig.Running = true
	}()
	if err != nil {
		return err
	}

	logrus.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID)
	container := execConfig.Container

	container.LogEvent("exec_start: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " "))

	if execConfig.OpenStdin {
		r, w := io.Pipe()
		go func() {
			defer w.Close()
			defer logrus.Debugf("Closing buffered stdin pipe")
			pools.Copy(w, stdin)
		}()
		cStdin = r
	}
	if execConfig.OpenStdout {
		cStdout = stdout
	}
	if execConfig.OpenStderr {
		cStderr = stderr
	}

	execConfig.StreamConfig.stderr = broadcastwriter.New()
	execConfig.StreamConfig.stdout = broadcastwriter.New()
	// Attach to stdin
	if execConfig.OpenStdin {
		execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe()
	} else {
		execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}

	attachErr := attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr)

	execErr := make(chan error)

	// Note, the execConfig data will be removed when the container
	// itself is deleted.  This allows us to query it (for things like
	// the exitStatus) even after the cmd is done running.

	go func() {
		if err := container.Exec(execConfig); err != nil {
			execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err)
		}
	}()
	select {
	case err := <-attachErr:
		if err != nil {
			return fmt.Errorf("attach failed with error: %s", err)
		}
		return nil
	case err := <-execErr:
		if err == nil {
			return nil
		}

		// Maybe the container stopped while we were trying to exec
		if !container.IsRunning() {
			return fmt.Errorf("container stopped while running exec")
		}
		return err
	}
}
Exemplo n.º 13
0
func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s [options] exec", job.Name)
	}

	var (
		cStdin           io.ReadCloser
		cStdout, cStderr io.Writer
		execName         = job.Args[0]
	)

	execConfig, err := d.getExecConfig(execName)
	if err != nil {
		return job.Error(err)
	}

	func() {
		execConfig.Lock()
		defer execConfig.Unlock()
		if execConfig.Running {
			err = fmt.Errorf("Error: Exec command %s is already running", execName)
		}
		execConfig.Running = true
	}()
	if err != nil {
		return job.Error(err)
	}

	log.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID)
	container := execConfig.Container

	if execConfig.OpenStdin {
		r, w := io.Pipe()
		go func() {
			defer w.Close()
			defer log.Debugf("Closing buffered stdin pipe")
			io.Copy(w, job.Stdin)
		}()
		cStdin = r
	}
	if execConfig.OpenStdout {
		cStdout = job.Stdout
	}
	if execConfig.OpenStderr {
		cStderr = job.Stderr
	}

	execConfig.StreamConfig.stderr = broadcastwriter.New()
	execConfig.StreamConfig.stdout = broadcastwriter.New()
	// Attach to stdin
	if execConfig.OpenStdin {
		execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe()
	} else {
		execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}

	attachErr := d.attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr)

	execErr := make(chan error)

	// Note, the execConfig data will be removed when the container
	// itself is deleted.  This allows us to query it (for things like
	// the exitStatus) even after the cmd is done running.

	go func() {
		err := container.Exec(execConfig)
		if err != nil {
			execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err)
		}
	}()

	select {
	case err := <-attachErr:
		if err != nil {
			return job.Errorf("attach failed with error: %s", err)
		}
		break
	case err := <-execErr:
		return job.Error(err)
	}

	return engine.StatusOK
}
Exemplo n.º 14
0
// register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error {
	if container.daemon != nil || daemon.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}
	if err := daemon.ensureName(container); err != nil {
		return err
	}

	container.daemon = daemon

	// Attach to stdout and stderr
	container.stderr = broadcastwriter.New()
	container.stdout = broadcastwriter.New()
	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}
	// done
	daemon.containers.Add(container.ID, container)

	// don't update the Suffixarray if we're starting up
	// we'll waste time if we update it for every container
	daemon.idIndex.Add(container.ID)

	// FIXME: if the container is supposed to be running but is not, auto restart it?
	//        if so, then we need to restart monitor and init a new lock
	// If the container is supposed to be running, make sure of it
	if container.IsRunning() {
		log.Debugf("killing old running container %s", container.ID)

		existingPid := container.Pid
		container.SetStopped(&execdriver.ExitStatus{ExitCode: 0})

		// We only have to handle this for lxc because the other drivers will ensure that
		// no processes are left when docker dies
		if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
			lxc.KillLxc(container.ID, 9)
		} else {
			// use the current driver and ensure that the container is dead x.x
			cmd := &execdriver.Command{
				ID: container.ID,
			}
			var err error
			cmd.ProcessConfig.Process, err = os.FindProcess(existingPid)
			if err != nil {
				log.Debugf("cannot find existing process for %d", existingPid)
			}
			daemon.execDriver.Terminate(cmd)
		}

		if err := container.Unmount(); err != nil {
			log.Debugf("unmount error %s", err)
		}
		if err := container.ToDisk(); err != nil {
			log.Debugf("saving stopped state to disk %s", err)
		}

		info := daemon.execDriver.Info(container.ID)
		if !info.IsRunning() {
			log.Debugf("Container %s was supposed to be running but is not.", container.ID)

			log.Debugf("Marking as stopped")

			container.SetStopped(&execdriver.ExitStatus{ExitCode: -127})
			if err := container.ToDisk(); err != nil {
				return err
			}
		}
	}
	return nil
}
Exemplo n.º 15
0
// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input.
func (streamConfig *StreamConfig) NewNopInputPipe() {
	streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard)
}
Exemplo n.º 16
0
func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s [options] exec", job.Name)
	}

	var (
		cStdin           io.ReadCloser
		cStdout, cStderr io.Writer
		cStdinCloser     io.Closer
		execName         = job.Args[0]
	)

	execConfig, err := d.getExecConfig(execName)
	if err != nil {
		return job.Error(err)
	}

	func() {
		execConfig.Lock()
		defer execConfig.Unlock()
		if execConfig.Running {
			err = fmt.Errorf("Error: Exec command %s is already running", execName)
		}
		execConfig.Running = true
	}()
	if err != nil {
		return job.Error(err)
	}

	log.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID)
	container := execConfig.Container

	if execConfig.OpenStdin {
		r, w := io.Pipe()
		go func() {
			defer w.Close()
			io.Copy(w, job.Stdin)
		}()
		cStdin = r
		cStdinCloser = job.Stdin
	}
	if execConfig.OpenStdout {
		cStdout = job.Stdout
	}
	if execConfig.OpenStderr {
		cStderr = job.Stderr
	}

	execConfig.StreamConfig.stderr = broadcastwriter.New()
	execConfig.StreamConfig.stdout = broadcastwriter.New()
	// Attach to stdin
	if execConfig.OpenStdin {
		execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe()
	} else {
		execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}

	attachErr := d.Attach(&execConfig.StreamConfig, execConfig.OpenStdin, false, execConfig.ProcessConfig.Tty, cStdin, cStdinCloser, cStdout, cStderr)

	execErr := make(chan error)

	// Remove exec from daemon and container.
	defer d.unregisterExecCommand(execConfig)

	go func() {
		err := container.Exec(execConfig)
		if err != nil {
			execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err)
		}
	}()

	select {
	case err := <-attachErr:
		if err != nil {
			return job.Errorf("attach failed with error: %s", err)
		}
		break
	case err := <-execErr:
		return job.Error(err)
	}

	return engine.StatusOK
}
Exemplo n.º 17
0
// Ensure that a job within a job both using the same underlying standard
// output writer does not close the output of the outer job when the inner
// job's stdout is wrapped with a NopCloser. When not wrapped, it should
// close the outer job's output.
func TestNestedJobSharedOutput(t *testing.T) {
	var (
		outerHandler Handler
		innerHandler Handler
		wrapOutput   bool
	)

	outerHandler = func(job *Job) error {
		job.Stdout.Write([]byte("outer1"))

		innerJob := job.Eng.Job("innerJob")

		if wrapOutput {
			innerJob.Stdout.Add(ioutils.NopWriteCloser(job.Stdout))
		} else {
			innerJob.Stdout.Add(job.Stdout)
		}

		if err := innerJob.Run(); err != nil {
			t.Fatal(err)
		}

		// If wrapOutput was *false* this write will do nothing.
		// FIXME (jlhawn): It should cause an error to write to
		// closed output.
		job.Stdout.Write([]byte(" outer2"))

		return nil
	}

	innerHandler = func(job *Job) error {
		job.Stdout.Write([]byte(" inner"))

		return nil
	}

	eng := New()
	eng.Register("outerJob", outerHandler)
	eng.Register("innerJob", innerHandler)

	// wrapOutput starts *false* so the expected
	// output of running the outer job will be:
	//
	//     "outer1 inner"
	//
	outBuf := new(bytes.Buffer)
	outerJob := eng.Job("outerJob")
	outerJob.Stdout.Add(outBuf)

	if err := outerJob.Run(); err != nil {
		t.Fatal(err)
	}

	expectedOutput := "outer1 inner"
	if outBuf.String() != expectedOutput {
		t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String())
	}

	// Set wrapOutput to true so that the expected
	// output of running the outer job will be:
	//
	//     "outer1 inner outer2"
	//
	wrapOutput = true
	outBuf.Reset()
	outerJob = eng.Job("outerJob")
	outerJob.Stdout.Add(outBuf)

	if err := outerJob.Run(); err != nil {
		t.Fatal(err)
	}

	expectedOutput = "outer1 inner outer2"
	if outBuf.String() != expectedOutput {
		t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String())
	}
}
Exemplo n.º 18
0
// ContainerExecStart starts a previously set up exec instance. The
// std streams are set up.
func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
	var (
		cStdin           io.ReadCloser
		cStdout, cStderr io.Writer
	)

	ec, err := d.getExecConfig(name)
	if err != nil {
		return derr.ErrorCodeNoExecID.WithArgs(name)
	}

	ec.Lock()
	if ec.Running {
		ec.Unlock()
		return derr.ErrorCodeExecRunning.WithArgs(ec.ID)
	}
	ec.Running = true
	ec.Unlock()

	logrus.Debugf("starting exec command %s in container %s", ec.ID, ec.Container.ID)
	container := ec.Container
	d.LogContainerEvent(container, "exec_start: "+ec.ProcessConfig.Entrypoint+" "+strings.Join(ec.ProcessConfig.Arguments, " "))

	if ec.OpenStdin {
		r, w := io.Pipe()
		go func() {
			defer w.Close()
			defer logrus.Debugf("Closing buffered stdin pipe")
			pools.Copy(w, stdin)
		}()
		cStdin = r
	}
	if ec.OpenStdout {
		cStdout = stdout
	}
	if ec.OpenStderr {
		cStderr = stderr
	}

	ec.streamConfig.stderr = new(broadcaster.Unbuffered)
	ec.streamConfig.stdout = new(broadcaster.Unbuffered)
	// Attach to stdin
	if ec.OpenStdin {
		ec.streamConfig.stdin, ec.streamConfig.stdinPipe = io.Pipe()
	} else {
		ec.streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}

	attachErr := attach(&ec.streamConfig, ec.OpenStdin, true, ec.ProcessConfig.Tty, cStdin, cStdout, cStderr)

	execErr := make(chan error)

	// Note, the ExecConfig data will be removed when the container
	// itself is deleted.  This allows us to query it (for things like
	// the exitStatus) even after the cmd is done running.

	go func() {
		execErr <- d.containerExec(container, ec)
	}()

	select {
	case err := <-attachErr:
		if err != nil {
			return derr.ErrorCodeExecAttach.WithArgs(err)
		}
		return nil
	case err := <-execErr:
		if aErr := <-attachErr; aErr != nil && err == nil {
			return derr.ErrorCodeExecAttach.WithArgs(aErr)
		}
		if err == nil {
			return nil
		}

		// Maybe the container stopped while we were trying to exec
		if !container.IsRunning() {
			return derr.ErrorCodeExecContainerStopped
		}
		return derr.ErrorCodeExecCantRun.WithArgs(ec.ID, container.ID, err)
	}
}
Exemplo n.º 19
0
// register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) register(container *Container, updateSuffixarray bool) error {
	if container.daemon != nil || daemon.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}
	if err := daemon.ensureName(container); err != nil {
		return err
	}

	container.daemon = daemon

	// Attach to stdout and stderr
	container.stderr = broadcastwriter.New()
	container.stdout = broadcastwriter.New()
	// Attach to stdin
	if container.Config.OpenStdin {
		container.stdin, container.stdinPipe = io.Pipe()
	} else {
		container.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
	}
	// done
	daemon.containers.Add(container.ID, container)

	// don't update the Suffixarray if we're starting up
	// we'll waste time if we update it for every container
	daemon.idIndex.Add(container.ID)

	// FIXME: if the container is supposed to be running but is not, auto restart it?
	//        if so, then we need to restart monitor and init a new lock
	// If the container is supposed to be running, make sure of it
	if container.IsRunning() {
		if container.MonitorDriver == MonitorBuiltin {
			log.Debugf("killing old running container %s", container.ID)

			existingPid := container.Pid
			container.SetStopped(0)

			// We only have to handle this for lxc because the other drivers will ensure that
			// no processes are left when docker dies
			if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
				lxc.KillLxc(container.ID, 9)
			} else {
				// use the current driver and ensure that the container is dead x.x
				cmd := &execdriver.Command{
					ID: container.ID,
				}
				var err error
				cmd.ProcessConfig.Process, err = os.FindProcess(existingPid)
				if err != nil {
					log.Debugf("cannot find existing process for %d", existingPid)
				}
				daemon.execDriver.Terminate(cmd)
			}

			if err := container.Unmount(); err != nil {
				log.Debugf("unmount error %s", err)
			}
			if err := container.ToDisk(); err != nil {
				log.Debugf("saving stopped state to disk %s", err)
			}

			info := daemon.execDriver.Info(container.ID)
			if !info.IsRunning() {
				log.Debugf("Container %s was supposed to be running but is not.", container.ID)

				log.Debugf("Marking as stopped")

				container.SetStopped(-127)
				if err := container.ToDisk(); err != nil {
					return err
				}
			}
		} else {
			// restore external container
			log.Debugf("restore external container: %s", container.ID)
			_, err := container.daemon.callMonitorAPI(container, "GET", "_ping")
			if err != nil {
				log.Errorf("Call monitor _ping API failed: %v, mark container %s stopped", err, container.ID)

				// Think monitor is down, mark container is down
				container.SetStopped(-125)
				if err := container.ToDisk(); err != nil {
					return err
				}
			} else {
				obj, err := container.daemon.callMonitorAPI(container, "GET", "state")
				if err != nil {
					log.Errorf("Call monitor state API failed: %v", err)
					return err
				}

				m := make(map[string]*WatchState)
				if err = json.Unmarshal(obj, &m); err != nil {
					log.Errorf("Decode WatchState error: %v", err)
					return err
				}
				ws := m["WatchState"]
				log.Debugf("WatchState %v", ws)

				if !ws.Running {
					log.Errorf("Container %s is supposed be running, but not running in monitor, mark it stopped", container.ID)
					container.SetStopped(-125)
					if err = container.ToDisk(); err != nil {
						return err
					}

					// kill monitor server
					if err = syscall.Kill(container.monitorState.Pid, syscall.SIGTERM); err != nil {
						log.Errorf("kill monitor server with pid %v error: %v", container.monitorState.Pid, err)
						return err
					}

					// write monitor state
					container.monitorState.SetStopped(0)
					if err = container.WriteMonitorState(); err != nil {
						log.Errorf("write monitor state error: %v", err)
						return err
					}
				} else { // external container is running
					// register to graph driver
					if err := container.daemon.driver.Register(container.ID); err != nil {
						log.Errorf("register container to graph driver error: %v", err)
					}
					monitor := NewMonitorProxy(container, false)
					container.exMonitor = monitor
					if err := monitor.RunStatePoller(); err != nil {
						log.Errorf("Container %s run StatePoll failed: %v", container.ID, err)
						return err
					}
				}
			}
		}
	}
	return nil
}
Exemplo n.º 20
0
Arquivo: pod.go Projeto: ZJU-SEL/hyper
// ContainerAttach attaches streams to the container cID. If stream is true, it streams the output.
func (d Docker) ContainerAttach(cId string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error {
	tag := pod.RandStr(8, "alphanum")
	return d.Daemon.Attach(stdin, ioutils.NopWriteCloser(stdout), "container", cId, tag)
}
Exemplo n.º 21
0
// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input.
func (c *Config) NewNopInputPipe() {
	c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard)
}