Ejemplo n.º 1
0
// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig.
func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error {
	keys := []byte{}
	var err error
	if c.DetachKeys != "" {
		keys, err = term.ToBytes(c.DetachKeys)
		if err != nil {
			return fmt.Errorf("Invalid escape keys (%s) provided", c.DetachKeys)
		}
	}

	container, err := daemon.GetContainer(prefixOrName)
	if err != nil {
		return err
	}
	if container.IsPaused() {
		err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName)
		return errors.NewRequestConflictError(err)
	}

	inStream, outStream, errStream, err := c.GetStreams()
	if err != nil {
		return err
	}
	defer inStream.Close()

	if !container.Config.Tty && c.MuxStreams {
		errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr)
		outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout)
	}

	var stdin io.ReadCloser
	var stdout, stderr io.Writer

	if c.UseStdin {
		stdin = inStream
	}
	if c.UseStdout {
		stdout = outStream
	}
	if c.UseStderr {
		stderr = errStream
	}

	if err := daemon.containerAttach(container, stdin, stdout, stderr, c.Logs, c.Stream, keys); err != nil {
		fmt.Fprintf(outStream, "Error attaching: %s\n", err)
	}
	return nil
}
Ejemplo n.º 2
0
func (daemon *Daemon) volumeRm(name string) error {
	v, err := daemon.volumes.Get(name)
	if err != nil {
		return err
	}

	if err := daemon.volumes.Remove(v); err != nil {
		if volumestore.IsInUse(err) {
			err := fmt.Errorf("Unable to remove volume, volume still in use: %v", err)
			return errors.NewRequestConflictError(err)
		}
		return fmt.Errorf("Error while removing volume %s: %v", name, err)
	}
	daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()})
	return nil
}
Ejemplo n.º 3
0
func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) {
	nc, err := daemon.GetContainer(connectedContainerID)
	if err != nil {
		return nil, err
	}
	if containerID == nc.ID {
		return nil, fmt.Errorf("cannot join own network")
	}
	if !nc.IsRunning() {
		err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID)
		return nil, derr.NewRequestConflictError(err)
	}
	if nc.IsRestarting() {
		return nil, errContainerIsRestarting(connectedContainerID)
	}
	return nc, nil
}
Ejemplo n.º 4
0
// ContainerExecStart starts a previously set up exec instance. The
// std streams are set up.
// If ctx is cancelled, the process is terminated.
func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) {
	var (
		cStdin           io.ReadCloser
		cStdout, cStderr io.Writer
	)

	ec, err := d.getExecConfig(name)
	if err != nil {
		return errExecNotFound(name)
	}

	ec.Lock()
	if ec.ExitCode != nil {
		ec.Unlock()
		err := fmt.Errorf("Error: Exec command %s has already run", ec.ID)
		return errors.NewRequestConflictError(err)
	}

	if ec.Running {
		ec.Unlock()
		return fmt.Errorf("Error: Exec command %s is already running", ec.ID)
	}
	ec.Running = true
	defer func() {
		if err != nil {
			ec.Running = false
			exitCode := 126
			ec.ExitCode = &exitCode
		}
	}()
	ec.Unlock()

	c := d.containers.Get(ec.ContainerID)
	logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID)
	d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " "))

	if ec.OpenStdin && stdin != nil {
		r, w := io.Pipe()
		go func() {
			defer w.Close()
			defer logrus.Debug("Closing buffered stdin pipe")
			pools.Copy(w, stdin)
		}()
		cStdin = r
	}
	if ec.OpenStdout {
		cStdout = stdout
	}
	if ec.OpenStderr {
		cStderr = stderr
	}

	if ec.OpenStdin {
		ec.NewInputPipes()
	} else {
		ec.NewNopInputPipe()
	}

	p := libcontainerd.Process{
		Args:     append([]string{ec.Entrypoint}, ec.Args...),
		Env:      ec.Env,
		Terminal: ec.Tty,
	}

	if err := execSetPlatformOpt(c, ec, &p); err != nil {
		return err
	}

	attachErr := container.AttachStreams(ctx, ec.StreamConfig, ec.OpenStdin, true, ec.Tty, cStdin, cStdout, cStderr, ec.DetachKeys)

	systemPid, err := d.containerd.AddProcess(ctx, c.ID, name, p)
	if err != nil {
		return err
	}
	ec.Lock()
	ec.Pid = systemPid
	ec.Unlock()

	select {
	case <-ctx.Done():
		logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID)
		d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["TERM"]))
		select {
		case <-time.After(termProcessTimeout * time.Second):
			logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout)
			d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["KILL"]))
		case <-attachErr:
			// TERM signal worked
		}
		return fmt.Errorf("context cancelled")
	case err := <-attachErr:
		if err != nil {
			if _, ok := err.(container.DetachError); !ok {
				return fmt.Errorf("exec attach failed with error: %v", err)
			}
			d.LogContainerEvent(c, "exec_detach")
		}
	}
	return nil
}
Ejemplo n.º 5
0
func conflictError(err error) error {
	return errors.NewRequestConflictError(err)
}
Ejemplo n.º 6
0
// ImageDelete deletes the image referenced by the given imageRef from this
// daemon. The given imageRef can be an image ID, ID prefix, or a repository
// reference (with an optional tag or digest, defaulting to the tag name
// "latest"). There is differing behavior depending on whether the given
// imageRef is a repository reference or not.
//
// If the given imageRef is a repository reference then that repository
// reference will be removed. However, if there exists any containers which
// were created using the same image reference then the repository reference
// cannot be removed unless either there are other repository references to the
// same image or force is true. Following removal of the repository reference,
// the referenced image itself will attempt to be deleted as described below
// but quietly, meaning any image delete conflicts will cause the image to not
// be deleted and the conflict will not be reported.
//
// There may be conflicts preventing deletion of an image and these conflicts
// are divided into two categories grouped by their severity:
//
// Hard Conflict:
// 	- a pull or build using the image.
// 	- any descendant image.
// 	- any running container using the image.
//
// Soft Conflict:
// 	- any stopped container using the image.
// 	- any repository tag or digest references to the image.
//
// The image cannot be removed if there are any hard conflicts and can be
// removed if there are soft conflicts only if force is true.
//
// If prune is true, ancestor images will each attempt to be deleted quietly,
// meaning any delete conflicts will cause the image to not be deleted and the
// conflict will not be reported.
//
// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph
// package. This would require that we no longer need the daemon to determine
// whether images are being used by a stopped or running container.
func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) {
	records := []types.ImageDelete{}

	imgID, err := daemon.GetImageID(imageRef)
	if err != nil {
		return nil, daemon.imageNotExistToErrcode(err)
	}

	repoRefs := daemon.referenceStore.References(imgID.Digest())

	var removedRepositoryRef bool
	if !isImageIDPrefix(imgID.String(), imageRef) {
		// A repository reference was given and should be removed
		// first. We can only remove this reference if either force is
		// true, there are multiple repository references to this
		// image, or there are no containers using the given reference.
		if !force && isSingleReference(repoRefs) {
			if container := daemon.getContainerUsingImage(imgID); container != nil {
				// If we removed the repository reference then
				// this image would remain "dangling" and since
				// we really want to avoid that the client must
				// explicitly force its removal.
				err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String()))
				return nil, errors.NewRequestConflictError(err)
			}
		}

		parsedRef, err := reference.ParseNamed(imageRef)
		if err != nil {
			return nil, err
		}

		parsedRef, err = daemon.removeImageRef(parsedRef)
		if err != nil {
			return nil, err
		}

		untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()}

		daemon.LogImageEvent(imgID.String(), imgID.String(), "untag")
		records = append(records, untaggedRecord)

		repoRefs = daemon.referenceStore.References(imgID.Digest())

		// If a tag reference was removed and the only remaining
		// references to the same repository are digest references,
		// then clean up those digest references.
		if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical {
			foundRepoTagRef := false
			for _, repoRef := range repoRefs {
				if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() {
					foundRepoTagRef = true
					break
				}
			}
			if !foundRepoTagRef {
				// Remove canonical references from same repository
				remainingRefs := []reference.Named{}
				for _, repoRef := range repoRefs {
					if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() {
						if _, err := daemon.removeImageRef(repoRef); err != nil {
							return records, err
						}

						untaggedRecord := types.ImageDelete{Untagged: repoRef.String()}
						records = append(records, untaggedRecord)
					} else {
						remainingRefs = append(remainingRefs, repoRef)

					}
				}
				repoRefs = remainingRefs
			}
		}

		// If it has remaining references then the untag finished the remove
		if len(repoRefs) > 0 {
			return records, nil
		}

		removedRepositoryRef = true
	} else {
		// If an ID reference was given AND there is at most one tag
		// reference to the image AND all references are within one
		// repository, then remove all references.
		if isSingleReference(repoRefs) {
			c := conflictHard
			if !force {
				c |= conflictSoft &^ conflictActiveReference
			}
			if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil {
				return nil, conflict
			}

			for _, repoRef := range repoRefs {
				parsedRef, err := daemon.removeImageRef(repoRef)
				if err != nil {
					return nil, err
				}

				untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()}

				daemon.LogImageEvent(imgID.String(), imgID.String(), "untag")
				records = append(records, untaggedRecord)
			}
		}
	}

	return records, daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef)
}
Ejemplo n.º 7
0
// cleanupContainer unregisters a container from the daemon, stops stats
// collection and cleanly removes contents and metadata from the filesystem.
func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove, removeVolume bool) (err error) {
	if container.IsRunning() {
		if !forceRemove {
			err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID)
			return errors.NewRequestConflictError(err)
		}
		if err := daemon.Kill(container); err != nil {
			return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err)
		}
	}

	// stop collection of stats for the container regardless
	// if stats are currently getting collected.
	daemon.statsCollector.stopCollection(container)

	if err = daemon.containerStop(container, 3); err != nil {
		return err
	}

	// Mark container dead. We don't want anybody to be restarting it.
	container.SetDead()

	// Save container state to disk. So that if error happens before
	// container meta file got removed from disk, then a restart of
	// docker should not make a dead container alive.
	if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) {
		logrus.Errorf("Error saving dying container to disk: %v", err)
	}

	// If force removal is required, delete container from various
	// indexes even if removal failed.
	defer func() {
		if err == nil || forceRemove {
			daemon.nameIndex.Delete(container.ID)
			daemon.linkIndex.delete(container)
			selinuxFreeLxcContexts(container.ProcessLabel)
			daemon.idIndex.Delete(container.ID)
			daemon.containers.Delete(container.ID)
			if e := daemon.removeMountPoints(container, removeVolume); e != nil {
				logrus.Error(e)
			}
			daemon.LogContainerEvent(container, "destroy")
		}
	}()

	if err = os.RemoveAll(container.Root); err != nil {
		return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
	}

	// When container creation fails and `RWLayer` has not been created yet, we
	// do not call `ReleaseRWLayer`
	if container.RWLayer != nil {
		metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer)
		layer.LogReleaseMetadata(metadata)
		if err != nil && err != layer.ErrMountDoesNotExist {
			return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err)
		}
	}

	return nil
}
Ejemplo n.º 8
0
func errExecPaused(id string) error {
	err := fmt.Errorf("Container %s is paused, unpause the container before exec", id)
	return errors.NewRequestConflictError(err)
}
Ejemplo n.º 9
0
func errContainerIsRestarting(containerID string) error {
	err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID)
	return errors.NewRequestConflictError(err)
}