Ejemplo n.º 1
0
// Cleanup releases any network resources allocated to the container along with any rules
// around how containers are linked together.  It also unmounts the container's root filesystem.
func (daemon *Daemon) Cleanup(container *container.Container) {
	daemon.releaseNetwork(container)

	container.UnmountIpcMounts(detachMounted)

	if err := daemon.conditionalUnmountOnCleanup(container); err != nil {
		// FIXME: remove once reference counting for graphdrivers has been refactored
		// Ensure that all the mounts are gone
		if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil {
			daemon.cleanupMountsByID(mountid)
		}
	}

	if err := container.UnmountSecrets(); err != nil {
		logrus.Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err)
	}

	for _, eConfig := range container.ExecCommands.Commands() {
		daemon.unregisterExecCommand(container, eConfig)
	}

	if container.BaseFS != "" {
		if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil {
			logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
		}
	}
	container.CancelAttachContext()
}
Ejemplo n.º 2
0
// containerStop halts a container by sending a stop signal, waiting for the given
// duration in seconds, and then calling SIGKILL and waiting for the
// process to exit. If a negative duration is given, Stop will wait
// for the initial signal forever. If the container is not running Stop returns
// immediately.
func (daemon *Daemon) containerStop(container *container.Container, seconds int) error {
	if !container.IsRunning() {
		return nil
	}

	stopSignal := container.StopSignal()
	// 1. Send a stop signal
	if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil {
		logrus.Infof("Failed to send signal %d to the process, force killing", stopSignal)
		if err := daemon.killPossiblyDeadProcess(container, 9); err != nil {
			return err
		}
	}

	// 2. Wait for the process to exit on its own
	if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil {
		logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal)
		// 3. If it doesn't, then send SIGKILL
		if err := daemon.Kill(container); err != nil {
			container.WaitStop(-1 * time.Second)
			logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it
		}
	}

	daemon.LogContainerEvent(container, "stop")
	return nil
}
Ejemplo n.º 3
0
// StartLogging initializes and starts the container logging stream.
func (daemon *Daemon) StartLogging(container *container.Container) error {
	cfg := daemon.getLogConfig(container.HostConfig.LogConfig)
	if cfg.Type == "none" {
		return nil // do not start logging routines
	}

	if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil {
		return err
	}
	l, err := container.StartLogger(cfg)
	if err != nil {
		return fmt.Errorf("Failed to initialize logging driver: %v", err)
	}

	copier := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l)
	container.LogCopier = copier
	copier.Run()
	container.LogDriver = l

	// set LogPath field only for json-file logdriver
	if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok {
		container.LogPath = jl.LogPath()
	}

	return nil
}
Ejemplo n.º 4
0
func addHooks(c *container.Container, spec *specs.Spec) error {
	prestart, err := loadHooks(prestartDir)
	if err != nil {
		return err
	}

	poststart, err := loadHooks(poststartDir)
	if err != nil {
		return err
	}

	poststop, err := loadHooks(poststopDir)
	if err != nil {
		return err
	}

	configPath, err := c.ConfigPath()
	if err != nil {
		return errors.Wrap(err, "config path")
	}

	hostConfigPath, err := c.HostConfigPath()
	if err != nil {
		return errors.Wrap(err, "host config path")
	}

	spec.Hooks.Prestart = appendHooksForContainer(configPath, hostConfigPath, c, prestart)
	spec.Hooks.Poststart = appendHooksForContainer(configPath, hostConfigPath, c, poststart)
	spec.Hooks.Poststop = appendHooksForContainer(configPath, hostConfigPath, c, poststop)

	return nil
}
Ejemplo n.º 5
0
// Mount sets container.BaseFS
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *container.Container) error {
	var layerID layer.ChainID
	if container.ImageID != "" {
		img, err := daemon.imageStore.Get(container.ImageID)
		if err != nil {
			return err
		}
		layerID = img.RootFS.ChainID()
	}
	rwlayer, err := daemon.layerStore.Mount(container.ID, layerID, container.GetMountLabel(), daemon.setupInitLayer)
	if err != nil {
		return err
	}
	dir, err := rwlayer.Path()
	if err != nil {
		return err
	}
	logrus.Debugf("container mounted via layerStore: %v", dir)

	if container.BaseFS != dir {
		// The mount path reported by the graph driver should always be trusted on Windows, since the
		// volume path for a given mounted layer may change over time.  This should only be an error
		// on non-Windows operating systems.
		if container.BaseFS != "" && runtime.GOOS != "windows" {
			daemon.Unmount(container)
			return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
				daemon.driver, container.ID, container.BaseFS, dir)
		}
	}
	container.BaseFS = dir // TODO: combine these fields
	container.RWLayer = rwlayer
	return nil
}
Ejemplo n.º 6
0
// Kill forcefully terminates a container.
func (daemon *Daemon) Kill(container *container.Container) error {
	if !container.IsRunning() {
		return derr.ErrorCodeNotRunning.WithArgs(container.ID)
	}

	// 1. Send SIGKILL
	if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil {
		// While normally we might "return err" here we're not going to
		// because if we can't stop the container by this point then
		// its probably because its already stopped. Meaning, between
		// the time of the IsRunning() call above and now it stopped.
		// Also, since the err return will be exec driver specific we can't
		// look for any particular (common) error that would indicate
		// that the process is already dead vs something else going wrong.
		// So, instead we'll give it up to 2 more seconds to complete and if
		// by that time the container is still running, then the error
		// we got is probably valid and so we return it to the caller.

		if container.IsRunning() {
			container.WaitStop(2 * time.Second)
			if container.IsRunning() {
				return err
			}
		}
	}

	// 2. Wait for the process to die, in last resort, try to kill the process directly
	if err := killProcessDirectly(container); err != nil {
		return err
	}

	container.WaitStop(-1 * time.Second)
	return nil
}
Ejemplo n.º 7
0
// containerRestart attempts to gracefully stop and then start the
// container. When stopping, wait for the given duration in seconds to
// gracefully stop, before forcefully terminating the container. If
// given a negative duration, wait forever for a graceful stop.
func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error {
	// Avoid unnecessarily unmounting and then directly mounting
	// the container when the container stops and then starts
	// again
	if err := daemon.Mount(container); err == nil {
		defer daemon.Unmount(container)
	}

	if container.IsRunning() {
		// set AutoRemove flag to false before stop so the container won't be
		// removed during restart process
		autoRemove := container.HostConfig.AutoRemove

		container.HostConfig.AutoRemove = false
		err := daemon.containerStop(container, seconds)
		// restore AutoRemove irrespective of whether the stop worked or not
		container.HostConfig.AutoRemove = autoRemove
		// containerStop will write HostConfig to disk, we shall restore AutoRemove
		// in disk too
		if toDiskErr := container.ToDiskLocking(); toDiskErr != nil {
			logrus.Errorf("Write container to disk error: %v", toDiskErr)
		}

		if err != nil {
			return err
		}
	}

	if err := daemon.containerStart(container, "", true); err != nil {
		return err
	}

	daemon.LogContainerEvent(container, "restart")
	return nil
}
Ejemplo n.º 8
0
func (daemon *Daemon) setupIpcDirs(c *container.Container) error {
	rootUID, rootGID := daemon.GetRemappedUIDGID()
	if !c.HasMountFor("/dev/shm") {
		shmPath, err := c.ShmResourcePath()
		if err != nil {
			return err
		}

		if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil {
			return err
		}

		shmSize := container.DefaultSHMSize
		if c.HostConfig.ShmSize != 0 {
			shmSize = c.HostConfig.ShmSize
		}
		shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10)
		if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil {
			return fmt.Errorf("mounting shm tmpfs: %s", err)
		}
		if err := os.Chown(shmPath, rootUID, rootGID); err != nil {
			return err
		}
	}

	return nil
}
Ejemplo n.º 9
0
// containerPause pauses the container execution without stopping the process.
// The execution can be resumed by calling containerUnpause.
func (daemon *Daemon) containerPause(container *container.Container) error {
	container.Lock()
	defer container.Unlock()

	// We cannot Pause the container which is not running
	if !container.Running {
		return derr.ErrorCodeNotRunning.WithArgs(container.ID)
	}

	// We cannot Pause the container which is already paused
	if container.Paused {
		return derr.ErrorCodeAlreadyPaused.WithArgs(container.ID)
	}

	// We cannot Pause the container which is restarting
	if container.Restarting {
		return derr.ErrorCodeContainerRestarting.WithArgs(container.ID)
	}

	if err := daemon.execDriver.Pause(container.Command); err != nil {
		return derr.ErrorCodeCantPause.WithArgs(container.ID, err)
	}
	container.Paused = true
	daemon.LogContainerEvent(container, "pause")
	return nil
}
Ejemplo n.º 10
0
// Update the container's Status.Health struct based on the latest probe's result.
func handleProbeResult(d *Daemon, c *container.Container, result *types.HealthcheckResult) {
	c.Lock()
	defer c.Unlock()

	retries := c.Config.Healthcheck.Retries
	if retries <= 0 {
		retries = defaultProbeRetries
	}

	h := c.State.Health
	oldStatus := h.Status

	if len(h.Log) >= maxLogEntries {
		h.Log = append(h.Log[len(h.Log)+1-maxLogEntries:], result)
	} else {
		h.Log = append(h.Log, result)
	}

	if result.ExitCode == exitStatusHealthy {
		h.FailingStreak = 0
		h.Status = types.Healthy
	} else {
		// Failure (including invalid exit code)
		h.FailingStreak++
		if h.FailingStreak >= retries {
			h.Status = types.Unhealthy
		}
		// Else we're starting or healthy. Stay in that state.
	}

	if oldStatus != h.Status {
		d.LogContainerEvent(c, "health_status: "+h.Status)
	}
}
Ejemplo n.º 11
0
// containerPause pauses the container execution without stopping the process.
// The execution can be resumed by calling containerUnpause.
func (daemon *Daemon) containerPause(container *container.Container) error {
	container.Lock()
	defer container.Unlock()

	// We cannot Pause the container which is not running
	if !container.Running {
		return errNotRunning{container.ID}
	}

	// We cannot Pause the container which is already paused
	if container.Paused {
		return fmt.Errorf("Container %s is already paused", container.ID)
	}

	// We cannot Pause the container which is restarting
	if container.Restarting {
		return errContainerIsRestarting(container.ID)
	}

	if err := daemon.containerd.Pause(container.ID); err != nil {
		return fmt.Errorf("Cannot pause container %s: %s", container.ID, err)
	}

	return nil
}
Ejemplo n.º 12
0
// killWithSignal sends the container the given signal. This wrapper for the
// host specific kill command prepares the container before attempting
// to send the signal. An error is returned if the container is paused
// or not running, or if there is a problem returned from the
// underlying kill command.
func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error {
	logrus.Debugf("Sending %d to %s", sig, container.ID)
	container.Lock()
	defer container.Unlock()

	// We could unpause the container for them rather than returning this error
	if container.Paused {
		return derr.ErrorCodeUnpauseContainer.WithArgs(container.ID)
	}

	if !container.Running {
		return derr.ErrorCodeNotRunning.WithArgs(container.ID)
	}

	container.ExitOnNext()

	// if the container is currently restarting we do not need to send the signal
	// to the process.  Telling the monitor that it should exit on it's next event
	// loop is enough
	if container.Restarting {
		return nil
	}

	if err := daemon.kill(container, sig); err != nil {
		return err
	}

	attributes := map[string]string{
		"signal": fmt.Sprintf("%d", sig),
	}
	daemon.LogContainerEventWithAttributes(container, "kill", attributes)
	return nil
}
Ejemplo n.º 13
0
func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error {
	var (
		labelOpts []string
		err       error
	)

	for _, opt := range config.SecurityOpt {
		con := strings.SplitN(opt, ":", 2)
		if len(con) == 1 {
			return fmt.Errorf("Invalid --security-opt: %q", opt)
		}
		switch con[0] {
		case "label":
			labelOpts = append(labelOpts, con[1])
		case "apparmor":
			container.AppArmorProfile = con[1]
		case "seccomp":
			container.SeccompProfile = con[1]
		default:
			return fmt.Errorf("Invalid --security-opt: %q", opt)
		}
	}

	container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts)
	return err
}
Ejemplo n.º 14
0
func (daemon *Daemon) initializeNetworking(container *container.Container) error {
	var err error

	if container.HostConfig.NetworkMode.IsContainer() {
		// we need to get the hosts files from the container to join
		nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer())
		if err != nil {
			return err
		}
		container.HostnamePath = nc.HostnamePath
		container.HostsPath = nc.HostsPath
		container.ResolvConfPath = nc.ResolvConfPath
		container.Config.Hostname = nc.Config.Hostname
		container.Config.Domainname = nc.Config.Domainname
		return nil
	}

	if container.HostConfig.NetworkMode.IsHost() {
		container.Config.Hostname, err = os.Hostname()
		if err != nil {
			return err
		}
	}

	if err := daemon.allocateNetwork(container); err != nil {
		return err
	}

	return container.BuildHostnameFile()
}
Ejemplo n.º 15
0
// setupMounts iterates through each of the mount points for a container and
// calls Setup() on each. It also looks to see if is a network mount such as
// /etc/resolv.conf, and if it is not, appends it to the array of mounts.
func (daemon *Daemon) setupMounts(container *container.Container) ([]execdriver.Mount, error) {
	var mounts []execdriver.Mount
	for _, m := range container.MountPoints {
		path, err := m.Setup()
		if err != nil {
			return nil, err
		}
		if !container.TrySetNetworkMount(m.Destination, path) {
			mounts = append(mounts, execdriver.Mount{
				Source:      path,
				Destination: m.Destination,
				Writable:    m.RW,
			})
		}
	}

	mounts = sortMounts(mounts)
	netMounts := container.NetworkMounts()
	// if we are going to mount any of the network files from container
	// metadata, the ownership must be set properly for potential container
	// remapped root (user namespaces)
	rootUID, rootGID := daemon.GetRemappedUIDGID()
	for _, mount := range netMounts {
		if err := os.Chown(mount.Source, rootUID, rootGID); err != nil {
			return nil, err
		}
	}
	return append(mounts, netMounts...), nil
}
Ejemplo n.º 16
0
func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
	if !c.IsRunning() {
		return nil, errNotRunning{c.ID}
	}
	stats, err := daemon.containerd.Stats(c.ID)
	if err != nil {
		return nil, err
	}
	s := &types.StatsJSON{}
	cgs := stats.CgroupStats
	if cgs != nil {
		s.BlkioStats = types.BlkioStats{
			IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive),
			IoServicedRecursive:     copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive),
			IoQueuedRecursive:       copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive),
			IoServiceTimeRecursive:  copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive),
			IoWaitTimeRecursive:     copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive),
			IoMergedRecursive:       copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive),
			IoTimeRecursive:         copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive),
			SectorsRecursive:        copyBlkioEntry(cgs.BlkioStats.SectorsRecursive),
		}
		cpu := cgs.CpuStats
		s.CPUStats = types.CPUStats{
			CPUUsage: types.CPUUsage{
				TotalUsage:        cpu.CpuUsage.TotalUsage,
				PercpuUsage:       cpu.CpuUsage.PercpuUsage,
				UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode,
				UsageInUsermode:   cpu.CpuUsage.UsageInUsermode,
			},
			ThrottlingData: types.ThrottlingData{
				Periods:          cpu.ThrottlingData.Periods,
				ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods,
				ThrottledTime:    cpu.ThrottlingData.ThrottledTime,
			},
		}
		mem := cgs.MemoryStats.Usage
		s.MemoryStats = types.MemoryStats{
			Usage:    mem.Usage,
			MaxUsage: mem.MaxUsage,
			Stats:    cgs.MemoryStats.Stats,
			Failcnt:  mem.Failcnt,
			Limit:    mem.Limit,
		}
		// if the container does not set memory limit, use the machineMemory
		if mem.Limit > daemon.statsCollector.machineMemory && daemon.statsCollector.machineMemory > 0 {
			s.MemoryStats.Limit = daemon.statsCollector.machineMemory
		}
		if cgs.PidsStats != nil {
			s.PidsStats = types.PidsStats{
				Current: cgs.PidsStats.Current,
			}
		}
	}
	s.Read, err = ptypes.Timestamp(stats.Timestamp)
	if err != nil {
		return nil, err
	}
	return s, nil
}
Ejemplo n.º 17
0
func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
	if !c.IsRunning() {
		return nil, errNotRunning{c.ID}
	}

	// Obtain the stats from HCS via libcontainerd
	stats, err := daemon.containerd.Stats(c.ID)
	if err != nil {
		return nil, err
	}

	// Start with an empty structure
	s := &types.StatsJSON{}

	// Populate the CPU/processor statistics
	s.CPUStats = types.CPUStats{
		CPUUsage: types.CPUUsage{
			TotalUsage:        stats.Processor.TotalRuntime100ns,
			UsageInKernelmode: stats.Processor.RuntimeKernel100ns,
			UsageInUsermode:   stats.Processor.RuntimeKernel100ns,
		},
	}

	// Populate the memory statistics
	s.MemoryStats = types.MemoryStats{
		Commit:            stats.Memory.UsageCommitBytes,
		CommitPeak:        stats.Memory.UsageCommitPeakBytes,
		PrivateWorkingSet: stats.Memory.UsagePrivateWorkingSetBytes,
	}

	// Populate the storage statistics
	s.StorageStats = types.StorageStats{
		ReadCountNormalized:  stats.Storage.ReadCountNormalized,
		ReadSizeBytes:        stats.Storage.ReadSizeBytes,
		WriteCountNormalized: stats.Storage.WriteCountNormalized,
		WriteSizeBytes:       stats.Storage.WriteSizeBytes,
	}

	// Populate the network statistics
	s.Networks = make(map[string]types.NetworkStats)

	for _, nstats := range stats.Network {
		s.Networks[nstats.EndpointId] = types.NetworkStats{
			RxBytes:   nstats.BytesReceived,
			RxPackets: nstats.PacketsReceived,
			RxDropped: nstats.DroppedPacketsIncoming,
			TxBytes:   nstats.BytesSent,
			TxPackets: nstats.PacketsSent,
			TxDropped: nstats.DroppedPacketsOutgoing,
		}
	}

	// Set the timestamp
	s.Stats.Read = stats.Timestamp
	s.Stats.NumProcs = platform.NumProcs()

	return s, nil
}
Ejemplo n.º 18
0
// cleanupContainer unregisters a container from the daemon, stops stats
// collection and cleanly removes contents and metadata from the filesystem.
func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) {
	if container.IsRunning() {
		if !forceRemove {
			return derr.ErrorCodeRmRunning
		}
		if err := daemon.Kill(container); err != nil {
			return derr.ErrorCodeRmFailed.WithArgs(err)
		}
	}

	// stop collection of stats for the container regardless
	// if stats are currently getting collected.
	daemon.statsCollector.stopCollection(container)

	if err = daemon.containerStop(container, 3); err != nil {
		return err
	}

	// Mark container dead. We don't want anybody to be restarting it.
	container.SetDead()

	// Save container state to disk. So that if error happens before
	// container meta file got removed from disk, then a restart of
	// docker should not make a dead container alive.
	if err := container.ToDiskLocking(); err != nil {
		logrus.Errorf("Error saving dying container to disk: %v", err)
	}

	// If force removal is required, delete container from various
	// indexes even if removal failed.
	defer func() {
		if err == nil || forceRemove {
			if _, err := daemon.containerGraphDB.Purge(container.ID); err != nil {
				logrus.Debugf("Unable to remove container from link graph: %s", err)
			}
			selinuxFreeLxcContexts(container.ProcessLabel)
			daemon.idIndex.Delete(container.ID)
			daemon.containers.Delete(container.ID)
			daemon.LogContainerEvent(container, "destroy")
		}
	}()

	if err = os.RemoveAll(container.Root); err != nil {
		return derr.ErrorCodeRmFS.WithArgs(container.ID, err)
	}

	metadata, err := daemon.layerStore.DeleteMount(container.ID)
	layer.LogReleaseMetadata(metadata)
	if err != nil && err != layer.ErrMountDoesNotExist {
		return derr.ErrorCodeRmDriverFS.WithArgs(daemon.driver, container.ID, err)
	}

	if err = daemon.execDriver.Clean(container.ID); err != nil {
		return derr.ErrorCodeRmExecDriver.WithArgs(container.ID, err)
	}

	return nil
}
Ejemplo n.º 19
0
// cleanupContainer unregisters a container from the daemon, stops stats
// collection and cleanly removes contents and metadata from the filesystem.
func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) {
	if container.IsRunning() {
		if !forceRemove {
			err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID)
			return errors.NewRequestConflictError(err)
		}
		if err := daemon.Kill(container); err != nil {
			return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err)
		}
	}

	// stop collection of stats for the container regardless
	// if stats are currently getting collected.
	daemon.statsCollector.stopCollection(container)

	if err = daemon.containerStop(container, 3); err != nil {
		return err
	}

	// Mark container dead. We don't want anybody to be restarting it.
	container.SetDead()

	// Save container state to disk. So that if error happens before
	// container meta file got removed from disk, then a restart of
	// docker should not make a dead container alive.
	if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) {
		logrus.Errorf("Error saving dying container to disk: %v", err)
	}

	// If force removal is required, delete container from various
	// indexes even if removal failed.
	defer func() {
		if err == nil || forceRemove {
			daemon.nameIndex.Delete(container.ID)
			daemon.linkIndex.delete(container)
			selinuxFreeLxcContexts(container.ProcessLabel)
			daemon.idIndex.Delete(container.ID)
			daemon.containers.Delete(container.ID)
			daemon.LogContainerEvent(container, "destroy")
		}
	}()

	if err = os.RemoveAll(container.Root); err != nil {
		return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
	}

	// When container creation fails and `RWLayer` has not been created yet, we
	// do not call `ReleaseRWLayer`
	if container.RWLayer != nil {
		metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer)
		layer.LogReleaseMetadata(metadata)
		if err != nil && err != layer.ErrMountDoesNotExist {
			return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err)
		}
	}

	return nil
}
Ejemplo n.º 20
0
func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) {
	if container.LogDriver != nil && container.IsRunning() {
		return container.LogDriver, nil
	}
	cfg := container.GetLogConfig(daemon.defaultLogConfig)
	if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil {
		return nil, err
	}
	return container.StartLogger(cfg)
}
Ejemplo n.º 21
0
// createContainerPlatformSpecificSettings performs platform specific container create functionality
func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
	if err := daemon.Mount(container); err != nil {
		return err
	}
	defer daemon.Unmount(container)

	for spec := range config.Volumes {
		name := stringid.GenerateNonCryptoID()
		destination := filepath.Clean(spec)

		// Skip volumes for which we already have something mounted on that
		// destination because of a --volume-from.
		if container.IsDestinationMounted(destination) {
			continue
		}
		path, err := container.GetResourcePath(destination)
		if err != nil {
			return err
		}

		stat, err := os.Stat(path)
		if err == nil && !stat.IsDir() {
			return derr.ErrorCodeMountOverFile.WithArgs(path)
		}

		volumeDriver := hostConfig.VolumeDriver
		if destination != "" && img != nil {
			if _, ok := img.ContainerConfig.Volumes[destination]; ok {
				// check for whether bind is not specified and then set to local
				if _, ok := container.MountPoints[destination]; !ok {
					volumeDriver = volume.DefaultDriverName
				}
			}
		}

		v, err := daemon.createVolume(name, volumeDriver, nil)
		if err != nil {
			return err
		}

		if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil {
			return err
		}

		// never attempt to copy existing content in a container FS to a shared volume
		if v.DriverName() == volume.DefaultDriverName {
			if err := container.CopyImagePathContent(v, destination); err != nil {
				return err
			}
		}

		container.AddMountPointWithVolume(destination, v, true)
	}
	return nil
}
Ejemplo n.º 22
0
func (daemon *Daemon) containerAttach(container *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error {
	if logs {
		logDriver, err := daemon.getLogger(container)
		if err != nil {
			return err
		}
		cLog, ok := logDriver.(logger.LogReader)
		if !ok {
			return logger.ErrReadLogsNotSupported
		}
		logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1})

	LogLoop:
		for {
			select {
			case msg, ok := <-logs.Msg:
				if !ok {
					break LogLoop
				}
				if msg.Source == "stdout" && stdout != nil {
					stdout.Write(msg.Line)
				}
				if msg.Source == "stderr" && stderr != nil {
					stderr.Write(msg.Line)
				}
			case err := <-logs.Err:
				logrus.Errorf("Error streaming logs: %v", err)
				break LogLoop
			}
		}
	}

	daemon.LogContainerEvent(container, "attach")

	//stream
	if stream {
		var stdinPipe io.ReadCloser
		if stdin != nil {
			r, w := io.Pipe()
			go func() {
				defer w.Close()
				defer logrus.Debugf("Closing buffered stdin pipe")
				io.Copy(w, stdin)
			}()
			stdinPipe = r
		}
		<-container.Attach(stdinPipe, stdout, stderr, keys)
		// If we are in stdinonce mode, wait for the process to end
		// otherwise, simply return
		if container.Config.StdinOnce && !container.Config.Tty {
			container.WaitStop(-1 * time.Second)
		}
	}
	return nil
}
Ejemplo n.º 23
0
Archivo: kill.go Proyecto: Mic92/docker
// killWithSignal sends the container the given signal. This wrapper for the
// host specific kill command prepares the container before attempting
// to send the signal. An error is returned if the container is paused
// or not running, or if there is a problem returned from the
// underlying kill command.
func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error {
	logrus.Debugf("Sending kill signal %d to container %s", sig, container.ID)
	container.Lock()
	defer container.Unlock()

	// We could unpause the container for them rather than returning this error
	if container.Paused {
		return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID)
	}

	if !container.Running {
		return errNotRunning{container.ID}
	}

	if container.Config.StopSignal != "" {
		containerStopSignal, err := signal.ParseSignal(container.Config.StopSignal)
		if err != nil {
			return err
		}
		if containerStopSignal == syscall.Signal(sig) {
			container.ExitOnNext()
		}
	} else {
		container.ExitOnNext()
	}

	if !daemon.IsShuttingDown() {
		container.HasBeenManuallyStopped = true
	}

	// if the container is currently restarting we do not need to send the signal
	// to the process. Telling the monitor that it should exit on its next event
	// loop is enough
	if container.Restarting {
		return nil
	}

	if err := daemon.kill(container, sig); err != nil {
		err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err)
		// if container or process not exists, ignore the error
		if strings.Contains(err.Error(), "container not found") ||
			strings.Contains(err.Error(), "no such process") {
			logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error())
		} else {
			return err
		}
	}

	attributes := map[string]string{
		"signal": fmt.Sprintf("%d", sig),
	}
	daemon.LogContainerEventWithAttributes(container, "kill", attributes)
	return nil
}
Ejemplo n.º 24
0
// populateVolumes copies data from the container's rootfs into the volume for non-binds.
// this is only called when the container is created.
func (daemon *Daemon) populateVolumes(c *container.Container) error {
	for _, mnt := range c.MountPoints {
		if !mnt.CopyData || mnt.Volume == nil {
			continue
		}

		logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name)
		if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil {
			return err
		}
	}
	return nil
}
Ejemplo n.º 25
0
// Register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) Register(c *container.Container) error {
	// Attach to stdout and stderr
	if c.Config.OpenStdin {
		c.NewInputPipes()
	} else {
		c.NewNopInputPipe()
	}

	daemon.containers.Add(c.ID, c)
	daemon.idIndex.Add(c.ID)

	return nil
}
Ejemplo n.º 26
0
// populateVolumes copies data from the container's rootfs into the volume for non-binds.
// this is only called when the container is created.
func (daemon *Daemon) populateVolumes(c *container.Container) error {
	for _, mnt := range c.MountPoints {
		// skip binds and volumes referenced by other containers (ie, volumes-from)
		if mnt.Driver == "" || mnt.Volume == nil || len(daemon.volumes.Refs(mnt.Volume)) > 1 {
			continue
		}

		logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name)
		if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil {
			return err
		}
	}
	return nil
}
Ejemplo n.º 27
0
func (daemon *Daemon) ensureName(container *container.Container) error {
	if container.Name == "" {
		name, err := daemon.generateNewName(container.ID)
		if err != nil {
			return err
		}
		container.Name = name

		if err := container.ToDiskLocking(); err != nil {
			logrus.Errorf("Error saving container name to disk: %v", err)
		}
	}
	return nil
}
Ejemplo n.º 28
0
// Cleanup releases any network resources allocated to the container along with any rules
// around how containers are linked together.  It also unmounts the container's root filesystem.
func (daemon *Daemon) Cleanup(container *container.Container) {
	daemon.releaseNetwork(container)

	container.UnmountIpcMounts(detachMounted)

	daemon.conditionalUnmountOnCleanup(container)

	for _, eConfig := range container.ExecCommands.Commands() {
		daemon.unregisterExecCommand(container, eConfig)
	}

	if err := container.UnmountVolumes(false); err != nil {
		logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
	}
}
Ejemplo n.º 29
0
func killProcessDirectly(container *container.Container) error {
	if _, err := container.WaitStop(10 * time.Second); err != nil {
		// Ensure that we don't kill ourselves
		if pid := container.GetPID(); pid != 0 {
			logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
			if err := syscall.Kill(pid, 9); err != nil {
				if err != syscall.ESRCH {
					return err
				}
				logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
			}
		}
	}
	return nil
}
Ejemplo n.º 30
0
// containerUnpause resumes the container execution after the container is paused.
func (daemon *Daemon) containerUnpause(container *container.Container) error {
	container.Lock()
	defer container.Unlock()

	// We cannot unpause the container which is not paused
	if !container.Paused {
		return fmt.Errorf("Container %s is not paused", container.ID)
	}

	if err := daemon.containerd.Resume(container.ID); err != nil {
		return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err)
	}

	return nil
}