Example #1
0
// Kill forcefully terminates a container.
func (daemon *Daemon) Kill(container *container.Container) error {
	if !container.IsRunning() {
		return derr.ErrorCodeNotRunning.WithArgs(container.ID)
	}

	// 1. Send SIGKILL
	if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil {
		// While normally we might "return err" here we're not going to
		// because if we can't stop the container by this point then
		// its probably because its already stopped. Meaning, between
		// the time of the IsRunning() call above and now it stopped.
		// Also, since the err return will be exec driver specific we can't
		// look for any particular (common) error that would indicate
		// that the process is already dead vs something else going wrong.
		// So, instead we'll give it up to 2 more seconds to complete and if
		// by that time the container is still running, then the error
		// we got is probably valid and so we return it to the caller.

		if container.IsRunning() {
			container.WaitStop(2 * time.Second)
			if container.IsRunning() {
				return err
			}
		}
	}

	// 2. Wait for the process to die, in last resort, try to kill the process directly
	if err := killProcessDirectly(container); err != nil {
		return err
	}

	container.WaitStop(-1 * time.Second)
	return nil
}
Example #2
0
func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
	if !c.IsRunning() {
		return nil, errNotRunning{c.ID}
	}
	stats, err := daemon.containerd.Stats(c.ID)
	if err != nil {
		return nil, err
	}
	s := &types.StatsJSON{}
	cgs := stats.CgroupStats
	if cgs != nil {
		s.BlkioStats = types.BlkioStats{
			IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive),
			IoServicedRecursive:     copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive),
			IoQueuedRecursive:       copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive),
			IoServiceTimeRecursive:  copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive),
			IoWaitTimeRecursive:     copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive),
			IoMergedRecursive:       copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive),
			IoTimeRecursive:         copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive),
			SectorsRecursive:        copyBlkioEntry(cgs.BlkioStats.SectorsRecursive),
		}
		cpu := cgs.CpuStats
		s.CPUStats = types.CPUStats{
			CPUUsage: types.CPUUsage{
				TotalUsage:        cpu.CpuUsage.TotalUsage,
				PercpuUsage:       cpu.CpuUsage.PercpuUsage,
				UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode,
				UsageInUsermode:   cpu.CpuUsage.UsageInUsermode,
			},
			ThrottlingData: types.ThrottlingData{
				Periods:          cpu.ThrottlingData.Periods,
				ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods,
				ThrottledTime:    cpu.ThrottlingData.ThrottledTime,
			},
		}
		mem := cgs.MemoryStats.Usage
		s.MemoryStats = types.MemoryStats{
			Usage:    mem.Usage,
			MaxUsage: mem.MaxUsage,
			Stats:    cgs.MemoryStats.Stats,
			Failcnt:  mem.Failcnt,
			Limit:    mem.Limit,
		}
		// if the container does not set memory limit, use the machineMemory
		if mem.Limit > daemon.statsCollector.machineMemory && daemon.statsCollector.machineMemory > 0 {
			s.MemoryStats.Limit = daemon.statsCollector.machineMemory
		}
		if cgs.PidsStats != nil {
			s.PidsStats = types.PidsStats{
				Current: cgs.PidsStats.Current,
			}
		}
	}
	s.Read, err = ptypes.Timestamp(stats.Timestamp)
	if err != nil {
		return nil, err
	}
	return s, nil
}
Example #3
0
func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
	if !c.IsRunning() {
		return nil, errNotRunning{c.ID}
	}

	// Obtain the stats from HCS via libcontainerd
	stats, err := daemon.containerd.Stats(c.ID)
	if err != nil {
		return nil, err
	}

	// Start with an empty structure
	s := &types.StatsJSON{}

	// Populate the CPU/processor statistics
	s.CPUStats = types.CPUStats{
		CPUUsage: types.CPUUsage{
			TotalUsage:        stats.Processor.TotalRuntime100ns,
			UsageInKernelmode: stats.Processor.RuntimeKernel100ns,
			UsageInUsermode:   stats.Processor.RuntimeKernel100ns,
		},
	}

	// Populate the memory statistics
	s.MemoryStats = types.MemoryStats{
		Commit:            stats.Memory.UsageCommitBytes,
		CommitPeak:        stats.Memory.UsageCommitPeakBytes,
		PrivateWorkingSet: stats.Memory.UsagePrivateWorkingSetBytes,
	}

	// Populate the storage statistics
	s.StorageStats = types.StorageStats{
		ReadCountNormalized:  stats.Storage.ReadCountNormalized,
		ReadSizeBytes:        stats.Storage.ReadSizeBytes,
		WriteCountNormalized: stats.Storage.WriteCountNormalized,
		WriteSizeBytes:       stats.Storage.WriteSizeBytes,
	}

	// Populate the network statistics
	s.Networks = make(map[string]types.NetworkStats)

	for _, nstats := range stats.Network {
		s.Networks[nstats.EndpointId] = types.NetworkStats{
			RxBytes:   nstats.BytesReceived,
			RxPackets: nstats.PacketsReceived,
			RxDropped: nstats.DroppedPacketsIncoming,
			TxBytes:   nstats.BytesSent,
			TxPackets: nstats.PacketsSent,
			TxDropped: nstats.DroppedPacketsOutgoing,
		}
	}

	// Set the timestamp
	s.Stats.Read = stats.Timestamp
	s.Stats.NumProcs = platform.NumProcs()

	return s, nil
}
Example #4
0
// Register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) Register(container *container.Container) error {
	// Attach to stdout and stderr
	if container.Config.OpenStdin {
		container.NewInputPipes()
	} else {
		container.NewNopInputPipe()
	}

	daemon.containers.Add(container.ID, container)
	daemon.idIndex.Add(container.ID)

	if container.IsRunning() {
		logrus.Debugf("killing old running container %s", container.ID)
		// Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit
		container.SetStoppedLocking(&execdriver.ExitStatus{ExitCode: 137})
		// use the current driver and ensure that the container is dead x.x
		cmd := &execdriver.Command{
			CommonCommand: execdriver.CommonCommand{
				ID: container.ID,
			},
		}
		daemon.execDriver.Terminate(cmd)

		container.UnmountIpcMounts(mount.Unmount)

		daemon.Unmount(container)
		if err := container.ToDiskLocking(); err != nil {
			logrus.Errorf("Error saving stopped state to disk: %v", err)
		}
	}

	return nil
}
Example #5
0
// containerRestart attempts to gracefully stop and then start the
// container. When stopping, wait for the given duration in seconds to
// gracefully stop, before forcefully terminating the container. If
// given a negative duration, wait forever for a graceful stop.
func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error {
	// Avoid unnecessarily unmounting and then directly mounting
	// the container when the container stops and then starts
	// again
	if err := daemon.Mount(container); err == nil {
		defer daemon.Unmount(container)
	}

	if container.IsRunning() {
		// set AutoRemove flag to false before stop so the container won't be
		// removed during restart process
		autoRemove := container.HostConfig.AutoRemove

		container.HostConfig.AutoRemove = false
		err := daemon.containerStop(container, seconds)
		// restore AutoRemove irrespective of whether the stop worked or not
		container.HostConfig.AutoRemove = autoRemove
		// containerStop will write HostConfig to disk, we shall restore AutoRemove
		// in disk too
		if toDiskErr := container.ToDiskLocking(); toDiskErr != nil {
			logrus.Errorf("Write container to disk error: %v", toDiskErr)
		}

		if err != nil {
			return err
		}
	}

	if err := daemon.containerStart(container, "", true); err != nil {
		return err
	}

	daemon.LogContainerEvent(container, "restart")
	return nil
}
Example #6
0
// containerStop halts a container by sending a stop signal, waiting for the given
// duration in seconds, and then calling SIGKILL and waiting for the
// process to exit. If a negative duration is given, Stop will wait
// for the initial signal forever. If the container is not running Stop returns
// immediately.
func (daemon *Daemon) containerStop(container *container.Container, seconds int) error {
	if !container.IsRunning() {
		return nil
	}

	stopSignal := container.StopSignal()
	// 1. Send a stop signal
	if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil {
		logrus.Infof("Failed to send signal %d to the process, force killing", stopSignal)
		if err := daemon.killPossiblyDeadProcess(container, 9); err != nil {
			return err
		}
	}

	// 2. Wait for the process to exit on its own
	if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil {
		logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal)
		// 3. If it doesn't, then send SIGKILL
		if err := daemon.Kill(container); err != nil {
			container.WaitStop(-1 * time.Second)
			logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it
		}
	}

	daemon.LogContainerEvent(container, "stop")
	return nil
}
Example #7
0
// cleanupContainer unregisters a container from the daemon, stops stats
// collection and cleanly removes contents and metadata from the filesystem.
func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) {
	if container.IsRunning() {
		if !forceRemove {
			return derr.ErrorCodeRmRunning
		}
		if err := daemon.Kill(container); err != nil {
			return derr.ErrorCodeRmFailed.WithArgs(err)
		}
	}

	// stop collection of stats for the container regardless
	// if stats are currently getting collected.
	daemon.statsCollector.stopCollection(container)

	if err = daemon.containerStop(container, 3); err != nil {
		return err
	}

	// Mark container dead. We don't want anybody to be restarting it.
	container.SetDead()

	// Save container state to disk. So that if error happens before
	// container meta file got removed from disk, then a restart of
	// docker should not make a dead container alive.
	if err := container.ToDiskLocking(); err != nil {
		logrus.Errorf("Error saving dying container to disk: %v", err)
	}

	// If force removal is required, delete container from various
	// indexes even if removal failed.
	defer func() {
		if err == nil || forceRemove {
			if _, err := daemon.containerGraphDB.Purge(container.ID); err != nil {
				logrus.Debugf("Unable to remove container from link graph: %s", err)
			}
			selinuxFreeLxcContexts(container.ProcessLabel)
			daemon.idIndex.Delete(container.ID)
			daemon.containers.Delete(container.ID)
			daemon.LogContainerEvent(container, "destroy")
		}
	}()

	if err = os.RemoveAll(container.Root); err != nil {
		return derr.ErrorCodeRmFS.WithArgs(container.ID, err)
	}

	metadata, err := daemon.layerStore.DeleteMount(container.ID)
	layer.LogReleaseMetadata(metadata)
	if err != nil && err != layer.ErrMountDoesNotExist {
		return derr.ErrorCodeRmDriverFS.WithArgs(daemon.driver, container.ID, err)
	}

	if err = daemon.execDriver.Clean(container.ID); err != nil {
		return derr.ErrorCodeRmExecDriver.WithArgs(container.ID, err)
	}

	return nil
}
Example #8
0
// cleanupContainer unregisters a container from the daemon, stops stats
// collection and cleanly removes contents and metadata from the filesystem.
func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) {
	if container.IsRunning() {
		if !forceRemove {
			err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID)
			return errors.NewRequestConflictError(err)
		}
		if err := daemon.Kill(container); err != nil {
			return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err)
		}
	}

	// stop collection of stats for the container regardless
	// if stats are currently getting collected.
	daemon.statsCollector.stopCollection(container)

	if err = daemon.containerStop(container, 3); err != nil {
		return err
	}

	// Mark container dead. We don't want anybody to be restarting it.
	container.SetDead()

	// Save container state to disk. So that if error happens before
	// container meta file got removed from disk, then a restart of
	// docker should not make a dead container alive.
	if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) {
		logrus.Errorf("Error saving dying container to disk: %v", err)
	}

	// If force removal is required, delete container from various
	// indexes even if removal failed.
	defer func() {
		if err == nil || forceRemove {
			daemon.nameIndex.Delete(container.ID)
			daemon.linkIndex.delete(container)
			selinuxFreeLxcContexts(container.ProcessLabel)
			daemon.idIndex.Delete(container.ID)
			daemon.containers.Delete(container.ID)
			daemon.LogContainerEvent(container, "destroy")
		}
	}()

	if err = os.RemoveAll(container.Root); err != nil {
		return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err)
	}

	// When container creation fails and `RWLayer` has not been created yet, we
	// do not call `ReleaseRWLayer`
	if container.RWLayer != nil {
		metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer)
		layer.LogReleaseMetadata(metadata)
		if err != nil && err != layer.ErrMountDoesNotExist {
			return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err)
		}
	}

	return nil
}
Example #9
0
func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) {
	if container.LogDriver != nil && container.IsRunning() {
		return container.LogDriver, nil
	}
	cfg := container.GetLogConfig(daemon.defaultLogConfig)
	if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil {
		return nil, err
	}
	return container.StartLogger(cfg)
}
Example #10
0
// Register makes a container object usable by the daemon as <container.ID>
func (daemon *Daemon) Register(container *container.Container) error {
	if daemon.Exists(container.ID) {
		return fmt.Errorf("Container is already loaded")
	}
	if err := validateID(container.ID); err != nil {
		return err
	}
	if err := daemon.ensureName(container); err != nil {
		return err
	}

	// Attach to stdout and stderr
	if container.Config.OpenStdin {
		container.NewInputPipes()
	} else {
		container.NewNopInputPipe()
	}
	daemon.containers.Add(container.ID, container)

	// don't update the Suffixarray if we're starting up
	// we'll waste time if we update it for every container
	daemon.idIndex.Add(container.ID)

	if container.IsRunning() {
		logrus.Debugf("killing old running container %s", container.ID)
		// Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit
		container.SetStoppedLocking(&execdriver.ExitStatus{ExitCode: 137})
		// use the current driver and ensure that the container is dead x.x
		cmd := &execdriver.Command{
			CommonCommand: execdriver.CommonCommand{
				ID: container.ID,
			},
		}
		daemon.execDriver.Terminate(cmd)

		container.UnmountIpcMounts(mount.Unmount)

		daemon.Unmount(container)
		if err := container.ToDiskLocking(); err != nil {
			logrus.Errorf("Error saving stopped state to disk: %v", err)
		}
	}

	if err := daemon.prepareMountPoints(container); err != nil {
		return err
	}

	return nil
}
Example #11
0
// containerStop halts a container by sending a stop signal, waiting for the given
// duration in seconds, and then calling SIGKILL and waiting for the
// process to exit. If a negative duration is given, Stop will wait
// for the initial signal forever. If the container is not running Stop returns
// immediately.
func (daemon *Daemon) containerStop(container *container.Container, seconds int) error {
	if !container.IsRunning() {
		return nil
	}

	daemon.stopHealthchecks(container)

	stopSignal := container.StopSignal()
	// 1. Send a stop signal
	if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil {
		// While normally we might "return err" here we're not going to
		// because if we can't stop the container by this point then
		// it's probably because it's already stopped. Meaning, between
		// the time of the IsRunning() call above and now it stopped.
		// Also, since the err return will be environment specific we can't
		// look for any particular (common) error that would indicate
		// that the process is already dead vs something else going wrong.
		// So, instead we'll give it up to 2 more seconds to complete and if
		// by that time the container is still running, then the error
		// we got is probably valid and so we force kill it.
		if _, err := container.WaitStop(2 * time.Second); err != nil {
			logrus.Infof("Container failed to stop after sending signal %d to the process, force killing", stopSignal)
			if err := daemon.killPossiblyDeadProcess(container, 9); err != nil {
				return err
			}
		}
	}

	// 2. Wait for the process to exit on its own
	if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil {
		logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal)
		// 3. If it doesn't, then send SIGKILL
		if err := daemon.Kill(container); err != nil {
			container.WaitStop(-1 * time.Second)
			logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it
		}
	}

	daemon.LogContainerEvent(container, "stop")
	return nil
}
Example #12
0
func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) {
	if container.LogDriver != nil && container.IsRunning() {
		return container.LogDriver, nil
	}
	return container.StartLogger(container.HostConfig.LogConfig)
}