func (m *UpdateServiceSnapshotByContainer) Process() error { _, imageName := m.info.GetName() cached, err := utils.IsImageCached(imageName) if err != nil { fmt.Println(err) return err } if !cached { //TODO: remove this image to save server disk err := utils.PullImage(imageName) if err != nil { return err } } var config container.Config config.Image = imageName config.Cmd = []string{snapshotProcess, m.info.CallbackID, m.info.Host, m.info.DataProto} var hostConfig container.HostConfig hostConfig.Binds = append(hostConfig.Binds, fmt.Sprintf("%s:%s", m.info.DataURL, snapshotMountDir)) containerName := "scan-" + m.info.CallbackID err = utils.StartContainer(config, hostConfig, containerName) return err }
func TestUtilsContainer(t *testing.T) { //TODO: dockyard dev team should provide small testing containers. imageName := "google/nodejs" containerName := "" cached, err := utils.IsImageCached(imageName) if err == utils.ErrorsNoDockerClient { fmt.Println("Please start a docker daemon to continue the container operation test") return } assert.Nil(t, err, "Fail to load Image") if !cached { err := utils.PullImage(imageName) assert.Nil(t, err, "Fail to pull image") } tmpFile, err := ioutil.TempFile("/tmp", "dockyard-test-container-oper") assert.Nil(t, err, "System err, fail to create temp file") defer os.Remove(tmpFile.Name()) var config container.Config config.Image = imageName config.Cmd = []string{"touch", tmpFile.Name()} var hostConfig container.HostConfig hostConfig.Binds = append(hostConfig.Binds, "/tmp:/tmp") utils.StartContainer(config, hostConfig, containerName) //TODO: stop, remove the container process assert.Equal(t, true, utils.IsFileExist(tmpFile.Name()), "Fail to touch file by using StartContainer") }
// adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if adjustCPUShares && hostConfig.CPUShares > 0 { // Handle unsupported CPUShares if hostConfig.CPUShares < linuxMinCPUShares { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) hostConfig.CPUShares = linuxMinCPUShares } else if hostConfig.CPUShares > linuxMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) hostConfig.CPUShares = linuxMaxCPUShares } } if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { // By default, MemorySwap is set to twice the size of Memory. hostConfig.MemorySwap = hostConfig.Memory * 2 } if hostConfig.ShmSize == 0 { hostConfig.ShmSize = container.DefaultSHMSize } var err error if hostConfig.SecurityOpt == nil { hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) if err != nil { return err } } if hostConfig.MemorySwappiness == nil { defaultSwappiness := int64(-1) hostConfig.MemorySwappiness = &defaultSwappiness } return nil }
func setSandboxResources(hc *dockercontainer.HostConfig) { hc.Resources = dockercontainer.Resources{ MemorySwap: -1, // Always disable memory swap. CPUShares: defaultSandboxCPUshares, // Use docker's default cpu quota/period. } hc.OomScoreAdj = defaultSandboxOOMScore }
// modifyHostNetworkOptionForSandbox applies NetworkMode/UTSMode to sandbox's dockercontainer.HostConfig. func modifyHostNetworkOptionForSandbox(hostNetwork bool, hc *dockercontainer.HostConfig) { if hostNetwork { hc.NetworkMode = namespaceModeHost } else { // Assume kubelet uses either the cni or the kubenet plugin. // TODO: support docker networking. hc.NetworkMode = "none" } }
func setSandboxResources(hc *dockercontainer.HostConfig) { hc.Resources = dockercontainer.Resources{ MemorySwap: -1, // Always disable memory swap. CPUShares: defaultSandboxCPUshares, // Use docker's default cpu quota/period. } // TODO: Get rid of the dependency on kubelet internal package. hc.OomScoreAdj = qos.PodInfraOOMAdj }
// These two functions are OS specific (for now at least) func updateHostConfig(hc *dockercontainer.HostConfig, opts *kubecontainer.RunContainerOptions) { // There is no /etc/resolv.conf in Windows, DNS and DNSSearch options would have to be passed to Docker runtime instead hc.DNS = opts.DNS hc.DNSSearch = opts.DNSSearch // MemorySwap == -1 is not currently supported in Docker 1.14 on Windows // https://github.com/docker/docker/blob/master/daemon/daemon_windows.go#L175 hc.Resources.MemorySwap = 0 }
// modifyCommonNamespaceOptions apply common namespace options for sandbox and container func modifyCommonNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, hostConfig *dockercontainer.HostConfig) { if nsOpts != nil { if nsOpts.GetHostPid() { hostConfig.PidMode = namespaceModeHost } if nsOpts.GetHostIpc() { hostConfig.IpcMode = namespaceModeHost } } }
func setSandboxResources(_ *runtimeApi.PodSandboxResources, hc *dockercontainer.HostConfig) { // Ignore the resource requests and limits for now and just use the docker // defaults. // TODO: apply resource limits based on the configuration. hc.Resources = dockercontainer.Resources{ MemorySwap: -1, // Always disable memory swap. CPUShares: defaultSandboxCPUshares, // Use docker's default cpu quota/period. } hc.OomScoreAdj = defaultSandboxOOMScore }
// modifyHostNetworkOptionForContainer applies NetworkMode/UTSMode to container's dockercontainer.HostConfig. func modifyHostNetworkOptionForContainer(hostNetwork bool, sandboxID string, hc *dockercontainer.HostConfig) { sandboxNSMode := fmt.Sprintf("container:%v", sandboxID) hc.NetworkMode = dockercontainer.NetworkMode(sandboxNSMode) hc.IpcMode = dockercontainer.IpcMode(sandboxNSMode) hc.UTSMode = "" hc.PidMode = "" if hostNetwork { hc.UTSMode = namespaceModeHost } }
func (l *LocalCluster) createRoach(node *testNode, vols *Container, cmd ...string) { l.panicOnStop() hostConfig := container.HostConfig{ PublishAllPorts: true, NetworkMode: container.NetworkMode(l.networkID), } if vols != nil { hostConfig.VolumesFrom = append(hostConfig.VolumesFrom, vols.id) } var hostname string if node.index >= 0 { hostname = fmt.Sprintf("roach%d", node.index) } var entrypoint []string if *cockroachImage == builderImageFull { entrypoint = append(entrypoint, "/"+filepath.Base(*cockroachBinary)) } else if *cockroachEntry != "" { entrypoint = append(entrypoint, *cockroachEntry) } var err error node.Container, err = createContainer( l, container.Config{ Hostname: hostname, Image: *cockroachImage, ExposedPorts: map[nat.Port]struct{}{ defaultTCP: {}, }, Entrypoint: entrypoint, // TODO(pmattis): Figure out why the Go DNS resolver is misbehaving. Env: []string{"GODEBUG=netdns=cgo"}, Cmd: cmd, Labels: map[string]string{ // Allow for `docker ps --filter label=Hostname=roach0` or `--filter label=Roach`. "Hostname": hostname, "Roach": "", }, }, hostConfig, &network.NetworkingConfig{ EndpointsConfig: map[string]*network.EndpointSettings{ l.networkID: { Aliases: []string{hostname}, }, }, }, node.nodeStr, ) maybePanic(err) }
func (l *LocalCluster) createRoach(i int, dns, vols *Container, cmd ...string) *Container { l.panicOnStop() hostConfig := container.HostConfig{ PublishAllPorts: true, } if dns != nil { ci, err := dns.Inspect() maybePanic(err) hostConfig.DNS = append(hostConfig.DNS, ci.NetworkSettings.IPAddress) } if vols != nil { hostConfig.VolumesFrom = append(hostConfig.VolumesFrom, vols.id) } var hostname string if i >= 0 { hostname = fmt.Sprintf("roach%d", i) } var entrypoint []string if *cockroachImage == builderImage { entrypoint = append(entrypoint, "/"+filepath.Base(*cockroachBinary)) } else if *cockroachEntry != "" { entrypoint = append(entrypoint, *cockroachEntry) } c, err := createContainer( l, container.Config{ Hostname: hostname, Domainname: domain, Image: *cockroachImage, ExposedPorts: map[nat.Port]struct{}{ cockroachTCP: {}, pgTCP: {}, }, Entrypoint: strslice.New(entrypoint...), Cmd: strslice.New(cmd...), Labels: map[string]string{ // Allow for `docker ps --filter label=Hostname=roach0` or `--filter label=Roach`. "Hostname": hostname, "Roach": "", }, }, hostConfig, nodeStr(i), ) maybePanic(err) return c }
// adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig == nil { return nil } if hostConfig.CPUShares < 0 { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, windowsMinCPUShares) hostConfig.CPUShares = windowsMinCPUShares } else if hostConfig.CPUShares > windowsMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, windowsMaxCPUShares) hostConfig.CPUShares = windowsMaxCPUShares } return nil }
// createContainer creates a new container using the specified // options. Per the docker API, the created container is not running // and must be started explicitly. Note that the passed-in hostConfig // will be augmented with the necessary settings to use the network // defined by l.createNetwork(). func createContainer(l *LocalCluster, containerConfig container.Config, hostConfig container.HostConfig, containerName string) (*Container, error) { hostConfig.NetworkMode = container.NetworkMode(l.networkID) // Disable DNS search under the host machine's domain. This can // catch upstream wildcard DNS matching and result in odd behavior. hostConfig.DNSSearch = []string{"."} resp, err := l.client.ContainerCreate(&containerConfig, &hostConfig, nil, containerName) if err != nil { return nil, err } return &Container{ id: resp.ID, name: containerName, cluster: l, }, nil }
// OneShot runs a container, expecting it to successfully run to completion // and die, after which it is removed. Not goroutine safe: only one OneShot // can be running at once. // Adds the same binds as the cluster containers (certs, binary, etc). func (l *LocalCluster) OneShot( ref string, ipo types.ImagePullOptions, containerConfig container.Config, hostConfig container.HostConfig, name string, ) error { if err := pullImage(l, ref, ipo); err != nil { return err } hostConfig.VolumesFrom = []string{l.vols.id} container, err := createContainer(l, containerConfig, hostConfig, name) if err != nil { return err } l.oneshot = container defer func() { if err := l.oneshot.Remove(); err != nil { log.Errorf("ContainerRemove: %s", err) } l.oneshot = nil }() if err := l.oneshot.Start(); err != nil { return err } return l.oneshot.Wait() }
func (l *LocalCluster) createRoach(node *testNode, vols *Container, env []string, cmd ...string) { l.panicOnStop() hostConfig := container.HostConfig{ PublishAllPorts: true, NetworkMode: container.NetworkMode(l.networkID), Privileged: l.privileged, } if vols != nil { hostConfig.VolumesFrom = append(hostConfig.VolumesFrom, vols.id) } var hostname string if node.index >= 0 { hostname = fmt.Sprintf("roach%d", node.index) } var entrypoint []string if *cockroachImage == builderImageFull { entrypoint = append(entrypoint, "/"+filepath.Base(*cockroachBinary)) } else if *cockroachEntry != "" { entrypoint = append(entrypoint, *cockroachEntry) } var err error node.Container, err = createContainer( l, container.Config{ Hostname: hostname, Image: *cockroachImage, ExposedPorts: map[nat.Port]struct{}{ DefaultTCP: {}, defaultHTTP: {}, }, Entrypoint: entrypoint, Env: env, Cmd: cmd, Labels: map[string]string{ // Allow for `docker ps --filter label=Hostname=roach0` or `--filter label=Roach`. "Hostname": hostname, "Roach": "", }, }, hostConfig, node.nodeStr, ) maybePanic(err) }
// verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { warnings := []string{} sysInfo := sysinfo.New(true) warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) if err != nil { return warnings, err } w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) if err != nil { return warnings, err } warnings = append(warnings, w...) if hostConfig.ShmSize < 0 { return warnings, fmt.Errorf("SHM size can not be less than 0") } if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) } // ip-forwarding does not affect container with '--net=host' (or '--net=none') if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") logrus.Warn("IPv4 forwarding is disabled. Networking will not work") } // check for various conflicting options with user namespaces if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { if hostConfig.Privileged { return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") } if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") } if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") } if hostConfig.ReadonlyRootfs { return warnings, fmt.Errorf("Cannot use the --read-only option when user namespaces are enabled") } } if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { // CgroupParent for systemd cgroup should be named as "xxx.slice" if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if hostConfig.Runtime == "" { hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() } if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) } return warnings, nil }
// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure // to default if it is not populated. This ensures backwards compatibility after // the validation of the network mode was moved from the docker CLI to the // docker daemon. func SetDefaultNetModeIfBlank(hc *container.HostConfig) *container.HostConfig { if hc != nil { if hc.NetworkMode == container.NetworkMode("") { hc.NetworkMode = container.NetworkMode("default") } } return hc }
// applySandboxLinuxOptions applies LinuxPodSandboxConfig to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig. func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig, lc *runtimeApi.LinuxPodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string) error { // Apply Cgroup options. // TODO: Check if this works with per-pod cgroups. hc.CgroupParent = lc.GetCgroupParent() // Apply security context. applySandboxSecurityContext(lc, createConfig.Config, hc) return nil }
// ModifyHostConfig is called before the Docker runContainer call. The // security context provider can make changes to the HostConfig, affecting // security options, whether the container is privileged, volume binds, etc. func (p SimpleSecurityContextProvider) ModifyHostConfig(pod *api.Pod, container *api.Container, hostConfig *dockercontainer.HostConfig, supplementalGids []int64) { // Apply supplemental groups if container.Name != leaky.PodInfraContainerName { // TODO: We skip application of supplemental groups to the // infra container to work around a runc issue which // requires containers to have the '/etc/group'. For // more information see: // https://github.com/opencontainers/runc/pull/313 // This can be removed once the fix makes it into the // required version of docker. if pod.Spec.SecurityContext != nil { for _, group := range pod.Spec.SecurityContext.SupplementalGroups { hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.Itoa(int(group))) } if pod.Spec.SecurityContext.FSGroup != nil { hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.Itoa(int(*pod.Spec.SecurityContext.FSGroup))) } } for _, group := range supplementalGids { hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.Itoa(int(group))) } } // Apply effective security context for container effectiveSC := DetermineEffectiveSecurityContext(pod, container) if effectiveSC == nil { return } if effectiveSC.Privileged != nil { hostConfig.Privileged = *effectiveSC.Privileged } if effectiveSC.Capabilities != nil { add, drop := MakeCapabilities(effectiveSC.Capabilities.Add, effectiveSC.Capabilities.Drop) hostConfig.CapAdd = add hostConfig.CapDrop = drop } if effectiveSC.SELinuxOptions != nil { hostConfig.SecurityOpt = modifySecurityOption(hostConfig.SecurityOpt, dockerLabelUser, effectiveSC.SELinuxOptions.User) hostConfig.SecurityOpt = modifySecurityOption(hostConfig.SecurityOpt, dockerLabelRole, effectiveSC.SELinuxOptions.Role) hostConfig.SecurityOpt = modifySecurityOption(hostConfig.SecurityOpt, dockerLabelType, effectiveSC.SELinuxOptions.Type) hostConfig.SecurityOpt = modifySecurityOption(hostConfig.SecurityOpt, dockerLabelLevel, effectiveSC.SELinuxOptions.Level) } }
func (s *Service) populateAdditionalHostConfig(hostConfig *containertypes.HostConfig) error { links, err := s.getLinks() if err != nil { return err } for _, link := range s.DependentServices() { if !s.project.ServiceConfigs.Has(link.Target) { continue } service, err := s.project.CreateService(link.Target) if err != nil { return err } containers, err := service.Containers(context.Background()) if err != nil { return err } if link.Type == project.RelTypeIpcNamespace { hostConfig, err = addIpc(hostConfig, service, containers, s.serviceConfig.Ipc) } else if link.Type == project.RelTypeNetNamespace { hostConfig, err = addNetNs(hostConfig, service, containers, s.serviceConfig.NetworkMode) } if err != nil { return err } } hostConfig.Links = []string{} for k, v := range links { hostConfig.Links = append(hostConfig.Links, strings.Join([]string{v, k}, ":")) } for _, v := range s.serviceConfig.ExternalLinks { hostConfig.Links = append(hostConfig.Links, v) } return nil }
// modifyHostNetworkOptionForSandbox applies NetworkMode/UTSMode to sandbox's dockercontainer.HostConfig. func modifyHostNetworkOptionForSandbox(hostNetwork bool, networkPlugin network.NetworkPlugin, hc *dockercontainer.HostConfig) { if hostNetwork { hc.NetworkMode = namespaceModeHost return } if networkPlugin == nil { hc.NetworkMode = "default" return } switch networkPlugin.Name() { case "cni": fallthrough case "kubenet": hc.NetworkMode = "none" default: hc.NetworkMode = "default" } }
func (c *Container) populateAdditionalHostConfig(hostConfig *container.HostConfig) error { links := map[string]string{} for _, link := range c.service.DependentServices() { if !c.service.context.Project.Configs.Has(link.Target) { continue } service, err := c.service.context.Project.CreateService(link.Target) if err != nil { return err } containers, err := service.Containers() if err != nil { return err } if link.Type == project.RelTypeLink { c.addLinks(links, service, link, containers) } else if link.Type == project.RelTypeIpcNamespace { hostConfig, err = c.addIpc(hostConfig, service, containers) } else if link.Type == project.RelTypeNetNamespace { hostConfig, err = c.addNetNs(hostConfig, service, containers) } if err != nil { return err } } hostConfig.Links = []string{} for k, v := range links { hostConfig.Links = append(hostConfig.Links, strings.Join([]string{v, k}, ":")) } for _, v := range c.service.Config().ExternalLinks { hostConfig.Links = append(hostConfig.Links, v) } return nil }
// applySandboxLinuxOptions applies LinuxPodSandboxConfig to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig. func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig, lc *runtimeapi.LinuxPodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string) error { // Apply Cgroup options. cgroupParent, err := ds.GenerateExpectedCgroupParent(lc.GetCgroupParent()) if err != nil { return err } hc.CgroupParent = cgroupParent // Apply security context. applySandboxSecurityContext(lc, createConfig.Config, hc, ds.networkPlugin) return nil }
// modifyHostConfig applies security context config to dockercontainer.HostConfig. func modifyHostConfig(sc *runtimeapi.LinuxContainerSecurityContext, hostConfig *dockercontainer.HostConfig) { if sc == nil { return } // Apply supplemental groups. for _, group := range sc.SupplementalGroups { hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.FormatInt(group, 10)) } // Apply security context for the container. if sc.Privileged != nil { hostConfig.Privileged = sc.GetPrivileged() } if sc.ReadonlyRootfs != nil { hostConfig.ReadonlyRootfs = sc.GetReadonlyRootfs() } if sc.Capabilities != nil { hostConfig.CapAdd = sc.GetCapabilities().GetAddCapabilities() hostConfig.CapDrop = sc.GetCapabilities().GetDropCapabilities() } if sc.SelinuxOptions != nil { hostConfig.SecurityOpt = securitycontext.ModifySecurityOptions( hostConfig.SecurityOpt, &v1.SELinuxOptions{ User: sc.SelinuxOptions.GetUser(), Role: sc.SelinuxOptions.GetRole(), Type: sc.SelinuxOptions.GetType(), Level: sc.SelinuxOptions.GetLevel(), }, ) } }
func addIpc(config *containertypes.HostConfig, service project.Service, containers []project.Container, ipc string) (*containertypes.HostConfig, error) { if len(containers) == 0 { return nil, fmt.Errorf("Failed to find container for IPC %v", ipc) } id, err := containers[0].ID() if err != nil { return nil, err } config.IpcMode = containertypes.IpcMode("container:" + id) return config, nil }
func (c *Container) addNetNs(config *container.HostConfig, service project.Service, containers []project.Container) (*container.HostConfig, error) { if len(containers) == 0 { return nil, fmt.Errorf("Failed to find container for networks ns %v", c.service.Config().Net) } id, err := containers[0].ID() if err != nil { return nil, err } config.NetworkMode = container.NetworkMode("container:" + id) return config, nil }
func addNetNs(config *containertypes.HostConfig, service project.Service, containers []project.Container, networkMode string) (*containertypes.HostConfig, error) { if len(containers) == 0 { return nil, fmt.Errorf("Failed to find container for networks ns %v", networkMode) } id, err := containers[0].ID() if err != nil { return nil, err } config.NetworkMode = containertypes.NetworkMode("container:" + id) return config, nil }
// modifyNamespaceOptions applies namespaceoptions to dockercontainer.HostConfig. func modifyNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, sandboxID string, hostConfig *dockercontainer.HostConfig) { hostNetwork := false if nsOpts != nil { if nsOpts.HostNetwork != nil { hostNetwork = nsOpts.GetHostNetwork() } if nsOpts.GetHostPid() { hostConfig.PidMode = namespaceModeHost } if nsOpts.GetHostIpc() { hostConfig.IpcMode = namespaceModeHost } } // Set for sandbox if sandboxID is not provided. if sandboxID == "" { modifyHostNetworkOptionForSandbox(hostNetwork, hostConfig) } else { // Set for container if sandboxID is provided. modifyHostNetworkOptionForContainer(hostNetwork, sandboxID, hostConfig) } }
// registerLinks writes the links to a file. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { if hostConfig == nil || hostConfig.Links == nil { return nil } for _, l := range hostConfig.Links { name, alias, err := runconfigopts.ParseLink(l) if err != nil { return err } child, err := daemon.GetContainer(name) if err != nil { //An error from daemon.GetContainer() means this name could not be found return fmt.Errorf("Could not get container for %s", name) } for child.HostConfig.NetworkMode.IsContainer() { parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) child, err = daemon.GetContainer(parts[1]) if err != nil { return fmt.Errorf("Could not get container for %s", parts[1]) } } if child.HostConfig.NetworkMode.IsHost() { return runconfig.ErrConflictHostNetworkAndLinks } if err := daemon.registerLink(container, child, alias); err != nil { return err } } // After we load all the links into the daemon // set them to nil on the hostconfig hostConfig.Links = nil if err := container.WriteHostConfig(); err != nil { return err } return nil }