// ConnectToNetwork connects a container to a network func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { if endpointConfig == nil { endpointConfig = &networktypes.EndpointSettings{} } if !container.Running { if container.RemovalInProgress || container.Dead { return errRemovalContainer(container.ID) } n, err := daemon.FindNetwork(idOrName) if err == nil && n != nil { if err := daemon.updateNetworkConfig(container, n, endpointConfig, true); err != nil { return err } } else { container.NetworkSettings.Networks[idOrName] = &network.EndpointSettings{ EndpointSettings: endpointConfig, } } } else if !daemon.isNetworkHotPluggable() { return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") } else { if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { return err } } if err := container.ToDiskLocking(); err != nil { return fmt.Errorf("Error saving container to disk: %v", err) } return nil }
// Register makes a container object usable by the daemon as <container.ID> func (daemon *Daemon) Register(container *container.Container) error { // Attach to stdout and stderr if container.Config.OpenStdin { container.NewInputPipes() } else { container.NewNopInputPipe() } daemon.containers.Add(container.ID, container) daemon.idIndex.Add(container.ID) if container.IsRunning() { logrus.Debugf("killing old running container %s", container.ID) // Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit container.SetStoppedLocking(&execdriver.ExitStatus{ExitCode: 137}) // use the current driver and ensure that the container is dead x.x cmd := &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: container.ID, }, } daemon.execDriver.Terminate(cmd) container.UnmountIpcMounts(mount.Unmount) daemon.Unmount(container) if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving stopped state to disk: %v", err) } } return nil }
// DisconnectFromNetwork disconnects container from network n. func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { return runconfig.ErrConflictHostNetwork } if !container.Running { if container.RemovalInProgress || container.Dead { return errRemovalContainer(container.ID) } if _, ok := container.NetworkSettings.Networks[n.Name()]; ok { delete(container.NetworkSettings.Networks, n.Name()) } else { return fmt.Errorf("container %s is not connected to the network %s", container.ID, n.Name()) } } else { if err := disconnectFromNetwork(container, n, false); err != nil { return err } } if err := container.ToDiskLocking(); err != nil { return fmt.Errorf("Error saving container to disk: %v", err) } attributes := map[string]string{ "container": container.ID, } daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes) return nil }
// containerRestart attempts to gracefully stop and then start the // container. When stopping, wait for the given duration in seconds to // gracefully stop, before forcefully terminating the container. If // given a negative duration, wait forever for a graceful stop. func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { // Avoid unnecessarily unmounting and then directly mounting // the container when the container stops and then starts // again if err := daemon.Mount(container); err == nil { defer daemon.Unmount(container) } if container.IsRunning() { // set AutoRemove flag to false before stop so the container won't be // removed during restart process autoRemove := container.HostConfig.AutoRemove container.HostConfig.AutoRemove = false err := daemon.containerStop(container, seconds) // restore AutoRemove irrespective of whether the stop worked or not container.HostConfig.AutoRemove = autoRemove // containerStop will write HostConfig to disk, we shall restore AutoRemove // in disk too if toDiskErr := container.ToDiskLocking(); toDiskErr != nil { logrus.Errorf("Write container to disk error: %v", toDiskErr) } if err != nil { return err } } if err := daemon.containerStart(container, "", true); err != nil { return err } daemon.LogContainerEvent(container, "restart") return nil }
// cleanupContainer unregisters a container from the daemon, stops stats // collection and cleanly removes contents and metadata from the filesystem. func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) { if container.IsRunning() { if !forceRemove { return derr.ErrorCodeRmRunning } if err := daemon.Kill(container); err != nil { return derr.ErrorCodeRmFailed.WithArgs(err) } } // stop collection of stats for the container regardless // if stats are currently getting collected. daemon.statsCollector.stopCollection(container) if err = daemon.containerStop(container, 3); err != nil { return err } // Mark container dead. We don't want anybody to be restarting it. container.SetDead() // Save container state to disk. So that if error happens before // container meta file got removed from disk, then a restart of // docker should not make a dead container alive. if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving dying container to disk: %v", err) } // If force removal is required, delete container from various // indexes even if removal failed. defer func() { if err == nil || forceRemove { if _, err := daemon.containerGraphDB.Purge(container.ID); err != nil { logrus.Debugf("Unable to remove container from link graph: %s", err) } selinuxFreeLxcContexts(container.ProcessLabel) daemon.idIndex.Delete(container.ID) daemon.containers.Delete(container.ID) daemon.LogContainerEvent(container, "destroy") } }() if err = os.RemoveAll(container.Root); err != nil { return derr.ErrorCodeRmFS.WithArgs(container.ID, err) } metadata, err := daemon.layerStore.DeleteMount(container.ID) layer.LogReleaseMetadata(metadata) if err != nil && err != layer.ErrMountDoesNotExist { return derr.ErrorCodeRmDriverFS.WithArgs(daemon.driver, container.ID, err) } if err = daemon.execDriver.Clean(container.ID); err != nil { return derr.ErrorCodeRmExecDriver.WithArgs(container.ID, err) } return nil }
// cleanupContainer unregisters a container from the daemon, stops stats // collection and cleanly removes contents and metadata from the filesystem. func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) { if container.IsRunning() { if !forceRemove { err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID) return errors.NewRequestConflictError(err) } if err := daemon.Kill(container); err != nil { return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) } } // stop collection of stats for the container regardless // if stats are currently getting collected. daemon.statsCollector.stopCollection(container) if err = daemon.containerStop(container, 3); err != nil { return err } // Mark container dead. We don't want anybody to be restarting it. container.SetDead() // Save container state to disk. So that if error happens before // container meta file got removed from disk, then a restart of // docker should not make a dead container alive. if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { logrus.Errorf("Error saving dying container to disk: %v", err) } // If force removal is required, delete container from various // indexes even if removal failed. defer func() { if err == nil || forceRemove { daemon.nameIndex.Delete(container.ID) daemon.linkIndex.delete(container) selinuxFreeLxcContexts(container.ProcessLabel) daemon.idIndex.Delete(container.ID) daemon.containers.Delete(container.ID) daemon.LogContainerEvent(container, "destroy") } }() if err = os.RemoveAll(container.Root); err != nil { return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) } // When container creation fails and `RWLayer` has not been created yet, we // do not call `ReleaseRWLayer` if container.RWLayer != nil { metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) layer.LogReleaseMetadata(metadata) if err != nil && err != layer.ErrMountDoesNotExist { return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err) } } return nil }
// ConnectToNetwork connects a container to a network func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { if !container.Running { return derr.ErrorCodeNotRunning.WithArgs(container.ID) } if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { return err } if err := container.ToDiskLocking(); err != nil { return fmt.Errorf("Error saving container to disk: %v", err) } return nil }
func (daemon *Daemon) ensureName(container *container.Container) error { if container.Name == "" { name, err := daemon.generateNewName(container.ID) if err != nil { return err } container.Name = name if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving container name to disk: %v", err) } } return nil }
// Register makes a container object usable by the daemon as <container.ID> func (daemon *Daemon) Register(container *container.Container) error { if daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if err := daemon.ensureName(container); err != nil { return err } // Attach to stdout and stderr if container.Config.OpenStdin { container.NewInputPipes() } else { container.NewNopInputPipe() } daemon.containers.Add(container.ID, container) // don't update the Suffixarray if we're starting up // we'll waste time if we update it for every container daemon.idIndex.Add(container.ID) if container.IsRunning() { logrus.Debugf("killing old running container %s", container.ID) // Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit container.SetStoppedLocking(&execdriver.ExitStatus{ExitCode: 137}) // use the current driver and ensure that the container is dead x.x cmd := &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: container.ID, }, } daemon.execDriver.Terminate(cmd) container.UnmountIpcMounts(mount.Unmount) daemon.Unmount(container) if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving stopped state to disk: %v", err) } } if err := daemon.prepareMountPoints(container); err != nil { return err } return nil }
// DisconnectFromNetwork disconnects container from network n. func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network) error { if !container.Running { return derr.ErrorCodeNotRunning.WithArgs(container.ID) } if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { return runconfig.ErrConflictHostNetwork } if err := disconnectFromNetwork(container, n); err != nil { return err } if err := container.ToDiskLocking(); err != nil { return fmt.Errorf("Error saving container to disk: %v", err) } return nil }
// ConnectToNetwork connects a container to a network func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { if !container.Running { if container.RemovalInProgress || container.Dead { return derr.ErrorCodeRemovalContainer.WithArgs(container.ID) } if _, err := daemon.updateNetworkConfig(container, idOrName, endpointConfig, true); err != nil { return err } } else { if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { return err } } if err := container.ToDiskLocking(); err != nil { return fmt.Errorf("Error saving container to disk: %v", err) } return nil }
func backportMountSpec(container *container.Container) error { for target, m := range container.MountPoints { if m.Spec.Type != "" { // if type is set on even one mount, no need to migrate return nil } if m.Name != "" { m.Type = mounttypes.TypeVolume m.Spec.Type = mounttypes.TypeVolume // make sure this is not an anyonmous volume before setting the spec source if _, exists := container.Config.Volumes[target]; !exists { m.Spec.Source = m.Name } if container.HostConfig.VolumeDriver != "" { m.Spec.VolumeOptions = &mounttypes.VolumeOptions{ DriverConfig: &mounttypes.Driver{Name: container.HostConfig.VolumeDriver}, } } if strings.Contains(m.Mode, "nocopy") { if m.Spec.VolumeOptions == nil { m.Spec.VolumeOptions = &mounttypes.VolumeOptions{} } m.Spec.VolumeOptions.NoCopy = true } } else { m.Type = mounttypes.TypeBind m.Spec.Type = mounttypes.TypeBind m.Spec.Source = m.Source if m.Propagation != "" { m.Spec.BindOptions = &mounttypes.BindOptions{ Propagation: m.Propagation, } } } m.Spec.Target = m.Destination if !m.RW { m.Spec.ReadOnly = true } } return container.ToDiskLocking() }
func disconnectFromNetwork(container *container.Container, n libnetwork.Network) error { if err := container.ToDiskLocking(); err != nil { return fmt.Errorf("Error saving container to disk: %v", err) } var ( ep libnetwork.Endpoint sbox libnetwork.Sandbox ) s := func(current libnetwork.Endpoint) bool { epInfo := current.Info() if epInfo == nil { return false } if sb := epInfo.Sandbox(); sb != nil { if sb.ContainerID() == container.ID { ep = current sbox = sb return true } } return false } n.WalkEndpoints(s) if ep == nil { return fmt.Errorf("container %s is not connected to the network", container.ID) } if err := ep.Leave(sbox); err != nil { return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err) } if err := ep.Delete(); err != nil { return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err) } delete(container.NetworkSettings.Networks, n.Name()) return nil }
func (daemon *Daemon) registerName(container *container.Container) error { if daemon.Exists(container.ID) { return fmt.Errorf("Container is already loaded") } if err := validateID(container.ID); err != nil { return err } if container.Name == "" { name, err := daemon.generateNewName(container.ID) if err != nil { return err } container.Name = name if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving container name to disk: %v", err) } } return daemon.nameIndex.Reserve(container.Name, container.ID) }
// DisconnectFromNetwork disconnects container from network n. func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, networkName string, force bool) error { n, err := daemon.FindNetwork(networkName) if !container.Running || (err != nil && force) { if container.RemovalInProgress || container.Dead { return errRemovalContainer(container.ID) } // In case networkName is resolved we will use n.Name() // this will cover the case where network id is passed. if n != nil { networkName = n.Name() } if _, ok := container.NetworkSettings.Networks[networkName]; !ok { return fmt.Errorf("container %s is not connected to the network %s", container.ID, networkName) } delete(container.NetworkSettings.Networks, networkName) } else if err == nil && !daemon.isNetworkHotPluggable() { return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") } else if err == nil { if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { return runconfig.ErrConflictHostNetwork } if err := daemon.disconnectFromNetwork(container, n, false); err != nil { return err } } else { return err } if err := container.ToDiskLocking(); err != nil { return fmt.Errorf("Error saving container to disk: %v", err) } if n != nil { attributes := map[string]string{ "container": container.ID, } daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes) } return nil }
// DisconnectFromNetwork disconnects container from network n. func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network) error { if !container.Running { return derr.ErrorCodeNotRunning.WithArgs(container.ID) } if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { return runconfig.ErrConflictHostNetwork } if err := disconnectFromNetwork(container, n); err != nil { return err } if err := container.ToDiskLocking(); err != nil { return fmt.Errorf("Error saving container to disk: %v", err) } attributes := map[string]string{ "container": container.ID, } daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes) return nil }
// Create creates a new container from the given configuration with a given name. func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *container.Container, retErr error) { var ( container *container.Container img *image.Image imgID image.ID err error ) if params.Config.Image != "" { img, err = daemon.GetImage(params.Config.Image) if err != nil { return nil, err } imgID = img.ID() } if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { return nil, err } if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err != nil { logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err) } } }() if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { return nil, err } // Set RWLayer for container after mount labels have been set if err := daemon.setRWLayer(container); err != nil { return nil, err } if err := daemon.Register(container); err != nil { return nil, err } rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { return nil, err } if err := daemon.setHostConfig(container, params.HostConfig); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.removeMountPoints(container, true); err != nil { logrus.Error(err) } } }() if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { return nil, err } var endpointsConfigs map[string]*networktypes.EndpointSettings if params.NetworkingConfig != nil { endpointsConfigs = params.NetworkingConfig.EndpointsConfig } if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil { return nil, err } if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving new container to disk: %v", err) return nil, err } daemon.LogContainerEvent(container, "create") return container, nil }
// Create creates a new container from the given configuration with a given name. func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *container.Container, retErr error) { var ( container *container.Container img *image.Image imgID image.ID err error ) if params.Config.Image != "" { //获取镜像 img, err = daemon.GetImage(params.Config.Image) if err != nil { return nil, err } //获取镜像ID号 imgID = img.ID() } //合并并且检查参数 if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { return nil, err } //创建一个新的容器,此时需要跟踪分析newContainer方法,该方法在daemon/daemon.go中。 //该方法返回的是container,是一个这样的结构: /* type Container struct { //这是通用的参数 CommonContainer // 这个是平台特殊的参数,只在类unix系统上有意义。 AppArmorProfile string HostnamePath string HostsPath string ShmPath string ResolvConfPath string SeccompProfile string NoNewPrivileges bool } */ //创建容器实例,实际上只是在代码中创建,并没有创建文件系统和namespace。 //实际上,创建容器的过程中最多也就创建到文件系统,namespace只有在容器 //运行时候才会生效。 if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil { return nil, err } //如果创建容器出错,就试图删除容器。 defer func() { if retErr != nil { if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err != nil { logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err) } } }() //配置容器的安全设置,例如apparmor,selinux等。 if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { return nil, err } // Set RWLayer for container after mount labels have been set // 设置可读写层,就是获取layID等信息,包括镜像层、容器层。 //详情请见setRWLayer函数,就在本文件中。 if err := daemon.setRWLayer(container); err != nil { return nil, err } //向daemon注册容器: //daemon.containers.Add(c.ID, c) if err := daemon.Register(container); err != nil { return nil, err } rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) if err != nil { return nil, err } //设置容器root目录(元数据目录/var/lib/docker/containers)的权限。 if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { return nil, err } //这个方法主要做两件事情: //挂载volumes; //设置容器之间的链接links; if err := daemon.setHostConfig(container, params.HostConfig); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.removeMountPoints(container, true); err != nil { logrus.Error(err) } } }() //这一步是处理和平台相关的操作,具体在daemon/create_unix.go文件中 /* func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { if err := daemon.Mount(container); err != nil { return err } defer daemon.Unmount(container) rootUID, rootGID := daemon.GetRemappedUIDGID() if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { return err } for spec := range config.Volumes { name := stringid.GenerateNonCryptoID() destination := filepath.Clean(spec) // Skip volumes for which we already have something mounted on that // destination because of a --volume-from. if container.IsDestinationMounted(destination) { continue } path, err := container.GetResourcePath(destination) if err != nil { return err } stat, err := os.Stat(path) if err == nil && !stat.IsDir() { return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) } v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) if err != nil { return err } if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { return err } container.AddMountPointWithVolume(destination, v, true) } return daemon.populateVolumes(container) } */ //在这一步骤里面进行一些跟平台相关的设置,主要为mount目录文件,以及volume挂载。 if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { return nil, err } //网络endpoints的配置 var endpointsConfigs map[string]*networktypes.EndpointSettings if params.NetworkingConfig != nil { endpointsConfigs = params.NetworkingConfig.EndpointsConfig } //更新网路配置,在daemon/container_operations.go中,调用了libnetwork模块,需要仔细研究。 //这里仅仅是更新container.NetworkSettings.Networks[]数组中的网络模式。并没有真正创建网络。 if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil { return nil, err } //将容器的配置保存到磁盘。但是这个和另外一个todisk的区别是什么? if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving new container to disk: %v", err) return nil, err } //记录容器的事件日志。 daemon.LogContainerEvent(container, "create") return container, nil }
// Create creates a new container from the given configuration with a given name. func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *container.Container, retErr error) { var ( container *container.Container img *image.Image imgID image.ID err error ) if params.Config.Image != "" { img, err = daemon.GetImage(params.Config.Image) if err != nil { return nil, err } imgID = img.ID() } if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { return nil, err } if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.ContainerRm(container.ID, &ContainerRmConfig{ForceRemove: true}); err != nil { logrus.Errorf("Clean up Error! Cannot destroy container %s: %v", container.ID, err) } } }() if err := daemon.Register(container); err != nil { return nil, err } rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { return nil, err } if err := daemon.setHostConfig(container, params.HostConfig); err != nil { return nil, err } defer func() { if retErr != nil { if err := daemon.removeMountPoints(container, true); err != nil { logrus.Error(err) } } }() if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig, img); err != nil { return nil, err } if err := container.ToDiskLocking(); err != nil { logrus.Errorf("Error saving new container to disk: %v", err) return nil, err } daemon.LogContainerEvent(container, "create") return container, nil }