// adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if adjustCPUShares && hostConfig.CPUShares > 0 { // Handle unsupported CPUShares if hostConfig.CPUShares < linuxMinCPUShares { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) hostConfig.CPUShares = linuxMinCPUShares } else if hostConfig.CPUShares > linuxMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) hostConfig.CPUShares = linuxMaxCPUShares } } if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { // By default, MemorySwap is set to twice the size of Memory. hostConfig.MemorySwap = hostConfig.Memory * 2 } if hostConfig.ShmSize == 0 { hostConfig.ShmSize = container.DefaultSHMSize } var err error if hostConfig.SecurityOpt == nil { hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) if err != nil { return err } } if hostConfig.MemorySwappiness == nil { defaultSwappiness := int64(-1) hostConfig.MemorySwappiness = &defaultSwappiness } return nil }
// createRoach creates the docker container for a testNode. It may be called in // parallel to start many nodes at once, and thus should remain threadsafe. func (l *LocalCluster) createRoach( ctx context.Context, node *testNode, vols *Container, env []string, cmd ...string, ) { l.panicOnStop() hostConfig := container.HostConfig{ PublishAllPorts: true, NetworkMode: container.NetworkMode(l.networkID), Privileged: l.privileged, } if vols != nil { hostConfig.VolumesFrom = append(hostConfig.VolumesFrom, vols.id) } var hostname string if node.index >= 0 { hostname = fmt.Sprintf("roach-%s-%d", l.clusterID, node.index) } log.Infof(ctx, "creating docker container with name: %s", hostname) var entrypoint []string if *cockroachImage == builderImageFull { entrypoint = append(entrypoint, CockroachBinaryInContainer) } else if *cockroachEntry != "" { entrypoint = append(entrypoint, *cockroachEntry) } var err error node.Container, err = createContainer( ctx, l, container.Config{ Hostname: hostname, Image: *cockroachImage, ExposedPorts: map[nat.Port]struct{}{ DefaultTCP: {}, defaultHTTP: {}, }, Entrypoint: entrypoint, Env: env, Cmd: cmd, Labels: map[string]string{ // Allow for `docker ps --filter label=Hostname=roach-<id>-0` or `--filter label=Roach`. "Hostname": hostname, "Roach": "", "Acceptance-cluster-id": l.clusterID, }, }, hostConfig, node.nodeStr, ) maybePanic(err) }
// adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig == nil { return nil } if hostConfig.CPUShares < 0 { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, windowsMinCPUShares) hostConfig.CPUShares = windowsMinCPUShares } else if hostConfig.CPUShares > windowsMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, windowsMaxCPUShares) hostConfig.CPUShares = windowsMaxCPUShares } return nil }
// ContainerCreate creates a new container based in the given configuration. // It can be associated with a name, but it's not mandatory. func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { var response container.ContainerCreateCreatedBody if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { return response, err } // When using API 1.24 and under, the client is responsible for removing the container if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { hostConfig.AutoRemove = false } query := url.Values{} if containerName != "" { query.Set("name", containerName) } body := configWrapper{ Config: config, HostConfig: hostConfig, NetworkingConfig: networkingConfig, } serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) if err != nil { if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { return response, imageNotFoundError{config.Image} } return response, err } err = json.NewDecoder(serverResp.body).Decode(&response) ensureReaderClosed(serverResp) return response, err }
// OneShot runs a container, expecting it to successfully run to completion // and die, after which it is removed. Not goroutine safe: only one OneShot // can be running at once. // Adds the same binds as the cluster containers (certs, binary, etc). func (l *LocalCluster) OneShot( ctx context.Context, ref string, ipo types.ImagePullOptions, containerConfig container.Config, hostConfig container.HostConfig, name string, ) error { if err := pullImage(ctx, l, ref, ipo); err != nil { return err } hostConfig.VolumesFrom = []string{l.vols.id} container, err := createContainer(ctx, l, containerConfig, hostConfig, name) if err != nil { return err } l.oneshot = container defer func() { if err := l.oneshot.Remove(ctx); err != nil { log.Errorf(ctx, "ContainerRemove: %s", err) } l.oneshot = nil }() if err := l.oneshot.Start(ctx); err != nil { return err } if err := l.oneshot.Wait(ctx); err != nil { return err } return nil }
// verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { warnings := []string{} sysInfo := sysinfo.New(true) warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) if err != nil { return warnings, err } w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) // no matter err is nil or not, w could have data in itself. warnings = append(warnings, w...) if err != nil { return warnings, err } if hostConfig.ShmSize < 0 { return warnings, fmt.Errorf("SHM size can not be less than 0") } if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) } // ip-forwarding does not affect container with '--net=host' (or '--net=none') if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") logrus.Warn("IPv4 forwarding is disabled. Networking will not work") } // check for various conflicting options with user namespaces if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { if hostConfig.Privileged { return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") } if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") } if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") } } if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { // CgroupParent for systemd cgroup should be named as "xxx.slice" if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") } } if hostConfig.Runtime == "" { hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() } if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) } return warnings, nil }
// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure // to default if it is not populated. This ensures backwards compatibility after // the validation of the network mode was moved from the docker CLI to the // docker daemon. func SetDefaultNetModeIfBlank(hc *container.HostConfig) *container.HostConfig { if hc != nil { if hc.NetworkMode == container.NetworkMode("") { hc.NetworkMode = container.NetworkMode("default") } } return hc }
func (s *Service) populateAdditionalHostConfig(hostConfig *containertypes.HostConfig) error { links, err := s.getLinks() if err != nil { return err } for _, link := range s.DependentServices() { if !s.project.ServiceConfigs.Has(link.Target) { continue } service, err := s.project.CreateService(link.Target) if err != nil { return err } containers, err := service.Containers(context.Background()) if err != nil { return err } if link.Type == project.RelTypeIpcNamespace { hostConfig, err = addIpc(hostConfig, service, containers, s.serviceConfig.Ipc) } else if link.Type == project.RelTypeNetNamespace { hostConfig, err = addNetNs(hostConfig, service, containers, s.serviceConfig.NetworkMode) } if err != nil { return err } } hostConfig.Links = []string{} for k, v := range links { hostConfig.Links = append(hostConfig.Links, strings.Join([]string{v, k}, ":")) } for _, v := range s.serviceConfig.ExternalLinks { hostConfig.Links = append(hostConfig.Links, v) } return nil }
// createContainer creates a new container using the specified // options. Per the docker API, the created container is not running // and must be started explicitly. Note that the passed-in hostConfig // will be augmented with the necessary settings to use the network // defined by l.createNetwork(). func createContainer( l *LocalCluster, containerConfig container.Config, hostConfig container.HostConfig, containerName string, ) (*Container, error) { hostConfig.NetworkMode = container.NetworkMode(l.networkID) // Disable DNS search under the host machine's domain. This can // catch upstream wildcard DNS matching and result in odd behavior. hostConfig.DNSSearch = []string{"."} resp, err := l.client.ContainerCreate(context.Background(), &containerConfig, &hostConfig, nil, containerName) if err != nil { return nil, err } return &Container{ id: resp.ID, name: containerName, cluster: l, }, nil }
// adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig == nil { return nil } numCPU := int64(sysinfo.NumCPU()) if hostConfig.CPUCount < 0 { logrus.Warnf("Changing requested CPUCount of %d to minimum allowed of %d", hostConfig.CPUCount, windowsMinCPUCount) hostConfig.CPUCount = windowsMinCPUCount } else if hostConfig.CPUCount > numCPU { logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", hostConfig.CPUCount, numCPU) hostConfig.CPUCount = numCPU } if hostConfig.CPUShares < 0 { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, windowsMinCPUShares) hostConfig.CPUShares = windowsMinCPUShares } else if hostConfig.CPUShares > windowsMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, windowsMaxCPUShares) hostConfig.CPUShares = windowsMaxCPUShares } if hostConfig.CPUPercent < 0 { logrus.Warnf("Changing requested CPUPercent of %d to minimum allowed of %d", hostConfig.CPUPercent, windowsMinCPUPercent) hostConfig.CPUPercent = windowsMinCPUPercent } else if hostConfig.CPUPercent > windowsMaxCPUPercent { logrus.Warnf("Changing requested CPUPercent of %d to maximum allowed of %d", hostConfig.CPUPercent, windowsMaxCPUPercent) hostConfig.CPUPercent = windowsMaxCPUPercent } return nil }
func addIpc(config *containertypes.HostConfig, service project.Service, containers []project.Container, ipc string) (*containertypes.HostConfig, error) { if len(containers) == 0 { return nil, fmt.Errorf("Failed to find container for IPC %v", ipc) } id, err := containers[0].ID() if err != nil { return nil, err } config.IpcMode = containertypes.IpcMode("container:" + id) return config, nil }
func addNetNs(config *containertypes.HostConfig, service project.Service, containers []project.Container, networkMode string) (*containertypes.HostConfig, error) { if len(containers) == 0 { return nil, fmt.Errorf("Failed to find container for networks ns %v", networkMode) } id, err := containers[0].ID() if err != nil { return nil, err } config.NetworkMode = containertypes.NetworkMode("container:" + id) return config, nil }
// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { ulimits := c.Ulimits // Merge ulimits with daemon defaults ulIdx := make(map[string]struct{}) for _, ul := range ulimits { ulIdx[ul.Name] = struct{}{} } for name, ul := range daemon.configStore.Ulimits { if _, exists := ulIdx[name]; !exists { ulimits = append(ulimits, ul) } } c.Ulimits = ulimits }
func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig.CPUShares < 0 { logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, solarisMinCPUShares) hostConfig.CPUShares = solarisMinCPUShares } else if hostConfig.CPUShares > solarisMaxCPUShares { logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, solarisMaxCPUShares) hostConfig.CPUShares = solarisMaxCPUShares } if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { // By default, MemorySwap is set to twice the size of Memory. hostConfig.MemorySwap = hostConfig.Memory * 2 } if hostConfig.ShmSize != 0 { hostConfig.ShmSize = container.DefaultSHMSize } if hostConfig.OomKillDisable == nil { defaultOomKillDisable := false hostConfig.OomKillDisable = &defaultOomKillDisable } return nil }
// registerLinks writes the links to a file. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { if hostConfig == nil || hostConfig.Links == nil { return nil } for _, l := range hostConfig.Links { name, alias, err := runconfigopts.ParseLink(l) if err != nil { return err } child, err := daemon.GetContainer(name) if err != nil { //An error from daemon.GetContainer() means this name could not be found return fmt.Errorf("Could not get container for %s", name) } for child.HostConfig.NetworkMode.IsContainer() { parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) child, err = daemon.GetContainer(parts[1]) if err != nil { return fmt.Errorf("Could not get container for %s", parts[1]) } } if child.HostConfig.NetworkMode.IsHost() { return runconfig.ErrConflictHostNetworkAndLinks } if err := daemon.registerLink(container, child, alias); err != nil { return err } } // After we load all the links into the daemon // set them to nil on the hostconfig hostConfig.Links = nil if err := container.WriteHostConfig(); err != nil { return err } return nil }
func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { // Do not lock while creating volumes since this could be calling out to external plugins // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin if err := daemon.registerMountPoints(container, hostConfig); err != nil { return err } container.Lock() defer container.Unlock() // Register any links from the host config before starting the container if err := daemon.registerLinks(container, hostConfig); err != nil { return err } // make sure links is not nil // this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links if hostConfig.Links == nil { hostConfig.Links = []string{} } container.HostConfig = hostConfig return container.ToDisk() }
// verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { warnings := []string{} sysInfo := sysinfo.New(true) // NOTE: We do not enforce a minimum value for swap limits for zones on Solaris and // therefore we will not do that for Docker container either. if hostConfig.Memory > 0 && !sysInfo.MemoryLimit { warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") hostConfig.Memory = 0 hostConfig.MemorySwap = -1 } if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !sysInfo.SwapLimit { warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.") hostConfig.MemorySwap = -1 } if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") } // Solaris NOTE: We allow and encourage setting the swap without setting the memory limit. if hostConfig.MemorySwappiness != nil && *hostConfig.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") hostConfig.MemorySwappiness = nil } if hostConfig.MemoryReservation > 0 && !sysInfo.MemoryReservation { warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.") hostConfig.MemoryReservation = 0 } if hostConfig.Memory > 0 && hostConfig.MemoryReservation > 0 && hostConfig.Memory < hostConfig.MemoryReservation { return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.") } if hostConfig.KernelMemory > 0 && !sysInfo.KernelMemory { warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") hostConfig.KernelMemory = 0 } if hostConfig.CPUShares != 0 && !sysInfo.CPUShares { warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.") hostConfig.CPUShares = 0 } if hostConfig.CPUShares < 0 { warnings = append(warnings, "Invalid CPUShares value. Must be positive. Discarding.") logrus.Warnf("Invalid CPUShares value. Must be positive. Discarding.") hostConfig.CPUQuota = 0 } if hostConfig.CPUShares > 0 && !sysinfo.IsCPUSharesAvailable() { warnings = append(warnings, "Global zone default scheduling class not FSS. Discarding shares.") logrus.Warnf("Global zone default scheduling class not FSS. Discarding shares.") hostConfig.CPUShares = 0 } // Solaris NOTE: Linux does not do negative checking for CPUShares and Quota here. But it makes sense to. if hostConfig.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.") if hostConfig.CPUQuota > 0 { warnings = append(warnings, "Quota will be applied on default period, not period specified.") logrus.Warnf("Quota will be applied on default period, not period specified.") } hostConfig.CPUPeriod = 0 } if hostConfig.CPUQuota != 0 && !sysInfo.CPUCfsQuota { warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.") hostConfig.CPUQuota = 0 } if hostConfig.CPUQuota < 0 { warnings = append(warnings, "Invalid CPUQuota value. Must be positive. Discarding.") logrus.Warnf("Invalid CPUQuota value. Must be positive. Discarding.") hostConfig.CPUQuota = 0 } if (hostConfig.CpusetCpus != "" || hostConfig.CpusetMems != "") && !sysInfo.Cpuset { warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.") hostConfig.CpusetCpus = "" hostConfig.CpusetMems = "" } cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(hostConfig.CpusetCpus) if err != nil { return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", hostConfig.CpusetCpus) } if !cpusAvailable { return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", hostConfig.CpusetCpus, sysInfo.Cpus) } memsAvailable, err := sysInfo.IsCpusetMemsAvailable(hostConfig.CpusetMems) if err != nil { return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", hostConfig.CpusetMems) } if !memsAvailable { return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", hostConfig.CpusetMems, sysInfo.Mems) } if hostConfig.BlkioWeight > 0 && !sysInfo.BlkioWeight { warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.") hostConfig.BlkioWeight = 0 } if hostConfig.OomKillDisable != nil && !sysInfo.OomKillDisable { *hostConfig.OomKillDisable = false // Don't warn; this is the default setting but only applicable to Linux } if sysInfo.IPv4ForwardingDisabled { warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") } // Solaris NOTE: We do not allow setting Linux specific options, so check and warn for all of them. if hostConfig.CapAdd != nil || hostConfig.CapDrop != nil { warnings = append(warnings, "Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") logrus.Warnf("Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.") hostConfig.CapAdd = nil hostConfig.CapDrop = nil } if hostConfig.GroupAdd != nil { warnings = append(warnings, "Additional groups unsupported on Solaris.Discarding groups lists.") logrus.Warnf("Additional groups unsupported on Solaris.Discarding groups lists.") hostConfig.GroupAdd = nil } if hostConfig.IpcMode != "" { warnings = append(warnings, "IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") logrus.Warnf("IPC namespace assignment unsupported on Solaris.Discarding IPC setting.") hostConfig.IpcMode = "" } if hostConfig.PidMode != "" { warnings = append(warnings, "PID namespace setting unsupported on Solaris. Running container in host PID namespace.") logrus.Warnf("PID namespace setting unsupported on Solaris. Running container in host PID namespace.") hostConfig.PidMode = "" } if hostConfig.Privileged { warnings = append(warnings, "Privileged mode unsupported on Solaris. Discarding privileged mode setting.") logrus.Warnf("Privileged mode unsupported on Solaris. Discarding privileged mode setting.") hostConfig.Privileged = false } if hostConfig.UTSMode != "" { warnings = append(warnings, "UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") logrus.Warnf("UTS namespace assignment unsupported on Solaris.Discarding UTS setting.") hostConfig.UTSMode = "" } if hostConfig.CgroupParent != "" { warnings = append(warnings, "Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") logrus.Warnf("Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.") hostConfig.CgroupParent = "" } if hostConfig.Ulimits != nil { warnings = append(warnings, "Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") logrus.Warnf("Specifying ulimits unsupported on Solaris. Discarding ulimits setting.") hostConfig.Ulimits = nil } return warnings, nil }
// createContainerPlatformSpecificSettings performs platform specific container create functionality func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { // Make sure the host config has the default daemon isolation if not specified by caller. if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { hostConfig.Isolation = daemon.defaultIsolation } for spec := range config.Volumes { mp, err := volume.ParseMountSpec(spec, hostConfig.VolumeDriver) if err != nil { return fmt.Errorf("Unrecognised volume spec: %v", err) } // If the mountpoint doesn't have a name, generate one. if len(mp.Name) == 0 { mp.Name = stringid.GenerateNonCryptoID() } // Skip volumes for which we already have something mounted on that // destination because of a --volume-from. if container.IsDestinationMounted(mp.Destination) { continue } volumeDriver := hostConfig.VolumeDriver // Create the volume in the volume driver. If it doesn't exist, // a new one will be created. v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil) if err != nil { return err } // FIXME Windows: This code block is present in the Linux version and // allows the contents to be copied to the container FS prior to it // being started. However, the function utilizes the FollowSymLinkInScope // path which does not cope with Windows volume-style file paths. There // is a separate effort to resolve this (@swernli), so this processing // is deferred for now. A case where this would be useful is when // a dockerfile includes a VOLUME statement, but something is created // in that directory during the dockerfile processing. What this means // on Windows for TP5 is that in that scenario, the contents will not // copied, but that's (somewhat) OK as HCS will bomb out soon after // at it doesn't support mapped directories which have contents in the // destination path anyway. // // Example for repro later: // FROM windowsservercore // RUN mkdir c:\myvol // RUN copy c:\windows\system32\ntdll.dll c:\myvol // VOLUME "c:\myvol" // // Then // docker build -t vol . // docker run -it --rm vol cmd <-- This is where HCS will error out. // // // never attempt to copy existing content in a container FS to a shared volume // if v.DriverName() == volume.DefaultDriverName { // if err := container.CopyImagePathContent(v, mp.Destination); err != nil { // return err // } // } // Add it to container.MountPoints container.AddMountPointWithVolume(mp.Destination, v, mp.RW) } return nil }