// NewDriver returns a new windows driver, called from NewDriver of execdriver. func NewDriver(root string, options []string) (*Driver, error) { for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "dummy": switch val { case "1": dummyMode = true logrus.Warn("Using dummy mode in Windows exec driver. This is for development use only!") } case "forcekill": switch val { case "1": forceKill = true logrus.Warn("Using force kill mode in Windows exec driver. This is for testing purposes only.") } case "isolation": if !container.IsolationLevel(val).IsValid() { return nil, fmt.Errorf("Unrecognised exec driver option 'isolation':'%s'", val) } if container.IsolationLevel(val).IsHyperV() { DefaultIsolation = "hyperv" } logrus.Infof("Windows default isolation level: '%s'", val) default: return nil, fmt.Errorf("Unrecognised exec driver option %s\n", key) } } return &Driver{ root: root, activeContainers: make(map[string]*activeContainer), }, nil }
// callDecodeContainerConfigIsolation is a utility function to call // DecodeContainerConfig for validating isolation levels func callDecodeContainerConfigIsolation(isolation string) (*container.Config, *container.HostConfig, error) { var ( b []byte err error ) w := ContainerConfigWrapper{ Config: &container.Config{}, HostConfig: &container.HostConfig{ NetworkMode: "none", Isolation: container.IsolationLevel(isolation)}, } if b, err = json.Marshal(w); err != nil { return nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) } return DecodeContainerConfig(bytes.NewReader(b)) }
func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var ( authConfigs = map[string]types.AuthConfig{} authConfigsEncoded = r.Header.Get("X-Registry-Config") buildConfig = &dockerfile.Config{} notVerboseBuffer = bytes.NewBuffer(nil) ) if authConfigsEncoded != "" { authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting // to be empty. } } w.Header().Set("Content-Type", "application/json") version := httputils.VersionFromContext(ctx) output := ioutils.NewWriteFlusher(w) defer output.Close() sf := streamformatter.NewJSONStreamFormatter() errf := func(err error) error { if !buildConfig.Verbose && notVerboseBuffer.Len() > 0 { output.Write(notVerboseBuffer.Bytes()) } // Do not write the error in the http output if it's still empty. // This prevents from writing a 200(OK) when there is an internal error. if !output.Flushed() { return err } _, err = w.Write(sf.FormatError(errors.New(utils.GetErrorMessage(err)))) if err != nil { logrus.Warnf("could not write error response: %v", err) } return nil } if httputils.BoolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { buildConfig.Remove = true } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { buildConfig.Remove = true } else { buildConfig.Remove = httputils.BoolValue(r, "rm") } if httputils.BoolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { buildConfig.Pull = true } repoAndTags, err := sanitizeRepoAndTags(r.Form["t"]) if err != nil { return errf(err) } buildConfig.DockerfileName = r.FormValue("dockerfile") buildConfig.Verbose = !httputils.BoolValue(r, "q") buildConfig.UseCache = !httputils.BoolValue(r, "nocache") buildConfig.ForceRemove = httputils.BoolValue(r, "forcerm") buildConfig.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") buildConfig.Memory = httputils.Int64ValueOrZero(r, "memory") buildConfig.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") buildConfig.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") buildConfig.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") buildConfig.CPUSetCpus = r.FormValue("cpusetcpus") buildConfig.CPUSetMems = r.FormValue("cpusetmems") buildConfig.CgroupParent = r.FormValue("cgroupparent") if r.Form.Get("shmsize") != "" { shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) if err != nil { return errf(err) } buildConfig.ShmSize = &shmSize } if i := container.IsolationLevel(r.FormValue("isolation")); i != "" { if !container.IsolationLevel.IsValid(i) { return errf(fmt.Errorf("Unsupported isolation: %q", i)) } buildConfig.Isolation = i } var buildUlimits = []*ulimit.Ulimit{} ulimitsJSON := r.FormValue("ulimits") if ulimitsJSON != "" { if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { return errf(err) } buildConfig.Ulimits = buildUlimits } var buildArgs = map[string]string{} buildArgsJSON := r.FormValue("buildargs") if buildArgsJSON != "" { if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil { return errf(err) } buildConfig.BuildArgs = buildArgs } remoteURL := r.FormValue("remote") // Currently, only used if context is from a remote url. // Look at code in DetectContextFromRemoteURL for more information. createProgressReader := func(in io.ReadCloser) io.ReadCloser { progressOutput := sf.NewProgressOutput(output, true) if !buildConfig.Verbose { progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) } return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) } var ( context builder.ModifiableContext dockerfileName string ) context, dockerfileName, err = daemonbuilder.DetectContextFromRemoteURL(r.Body, remoteURL, createProgressReader) if err != nil { return errf(err) } defer func() { if err := context.Close(); err != nil { logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) } }() uidMaps, gidMaps := br.backend.GetUIDGIDMaps() defaultArchiver := &archive.Archiver{ Untar: chrootarchive.Untar, UIDMaps: uidMaps, GIDMaps: gidMaps, } docker := &daemonbuilder.Docker{ Daemon: br.backend, OutOld: output, AuthConfigs: authConfigs, Archiver: defaultArchiver, } if !buildConfig.Verbose { docker.OutOld = notVerboseBuffer } b, err := dockerfile.NewBuilder(buildConfig, docker, builder.DockerIgnoreContext{ModifiableContext: context}, nil) if err != nil { return errf(err) } b.Stdout = &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} b.Stderr = &streamformatter.StderrFormatter{Writer: output, StreamFormatter: sf} if !buildConfig.Verbose { b.Stdout = &streamformatter.StdoutFormatter{Writer: notVerboseBuffer, StreamFormatter: sf} b.Stderr = &streamformatter.StderrFormatter{Writer: notVerboseBuffer, StreamFormatter: sf} } if closeNotifier, ok := w.(http.CloseNotifier); ok { finished := make(chan struct{}) defer close(finished) go func() { select { case <-finished: case <-closeNotifier.CloseNotify(): logrus.Infof("Client disconnected, cancelling job: build") b.Cancel() } }() } if len(dockerfileName) > 0 { b.DockerfileName = dockerfileName } imgID, err := b.Build() if err != nil { return errf(err) } for _, rt := range repoAndTags { if err := br.backend.TagImage(rt, imgID); err != nil { return errf(err) } } // Everything worked so if -q was provided the output from the daemon // should be just the image ID and we'll print that to stdout. if !buildConfig.Verbose { stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} fmt.Fprintf(stdout, "%s\n", string(imgID)) } return nil }
func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { query := url.Values{ "t": options.Tags, } if options.SuppressOutput { query.Set("q", "1") } if options.RemoteContext != "" { query.Set("remote", options.RemoteContext) } if options.NoCache { query.Set("nocache", "1") } if options.Remove { query.Set("rm", "1") } else { query.Set("rm", "0") } if options.ForceRemove { query.Set("forcerm", "1") } if options.PullParent { query.Set("pull", "1") } if !container.IsolationLevel.IsDefault(container.IsolationLevel(options.Isolation)) { query.Set("isolation", options.Isolation) } query.Set("cpusetcpus", options.CPUSetCPUs) query.Set("cpusetmems", options.CPUSetMems) query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) query.Set("memory", strconv.FormatInt(options.Memory, 10)) query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) query.Set("cgroupparent", options.CgroupParent) if options.ShmSize != "" { parsedShmSize, err := units.RAMInBytes(options.ShmSize) if err != nil { return query, err } query.Set("shmsize", strconv.FormatInt(parsedShmSize, 10)) } query.Set("dockerfile", options.Dockerfile) ulimitsJSON, err := json.Marshal(options.Ulimits) if err != nil { return query, err } query.Set("ulimits", string(ulimitsJSON)) buildArgs := convertKVStringsToMap(options.BuildArgs) buildArgsJSON, err := json.Marshal(buildArgs) if err != nil { return query, err } query.Set("buildargs", string(buildArgsJSON)) return query, nil }
// Parse parses the specified args for the specified command and generates a Config, // a HostConfig and returns them with the specified command. // If the specified args are not valid, it will return an error. func Parse(cmd *flag.FlagSet, args []string) (*container.Config, *container.HostConfig, *flag.FlagSet, error) { var ( // FIXME: use utils.ListOpts for attach and volumes? flAttach = opts.NewListOpts(opts.ValidateAttach) flVolumes = opts.NewListOpts(nil) flTmpfs = opts.NewListOpts(nil) flBlkioWeightDevice = NewWeightdeviceOpt(ValidateWeightDevice) flDeviceReadBps = NewThrottledeviceOpt(ValidateThrottleBpsDevice) flDeviceWriteBps = NewThrottledeviceOpt(ValidateThrottleBpsDevice) flLinks = opts.NewListOpts(ValidateLink) flDeviceReadIOps = NewThrottledeviceOpt(ValidateThrottleIOpsDevice) flDeviceWriteIOps = NewThrottledeviceOpt(ValidateThrottleIOpsDevice) flEnv = opts.NewListOpts(opts.ValidateEnv) flLabels = opts.NewListOpts(opts.ValidateEnv) flDevices = opts.NewListOpts(ValidateDevice) flUlimits = NewUlimitOpt(nil) flPublish = opts.NewListOpts(nil) flExpose = opts.NewListOpts(nil) flDNS = opts.NewListOpts(opts.ValidateIPAddress) flDNSSearch = opts.NewListOpts(opts.ValidateDNSSearch) flDNSOptions = opts.NewListOpts(nil) flExtraHosts = opts.NewListOpts(opts.ValidateExtraHost) flVolumesFrom = opts.NewListOpts(nil) flEnvFile = opts.NewListOpts(nil) flCapAdd = opts.NewListOpts(nil) flCapDrop = opts.NewListOpts(nil) flGroupAdd = opts.NewListOpts(nil) flSecurityOpt = opts.NewListOpts(nil) flLabelsFile = opts.NewListOpts(nil) flLoggingOpts = opts.NewListOpts(nil) flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to this container") flPidMode = cmd.String([]string{"-pid"}, "", "PID namespace to use") flUTSMode = cmd.String([]string{"-uts"}, "", "UTS namespace to use") flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to random ports") flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") flOomKillDisable = cmd.Bool([]string{"-oom-kill-disable"}, false, "Disable OOM Killer") flOomScoreAdj = cmd.Int([]string{"-oom-score-adj"}, 0, "Tune host's OOM preferences (-1000 to 1000)") flContainerIDFile = cmd.String([]string{"-cidfile"}, "", "Write the container ID to the file") flEntrypoint = cmd.String([]string{"-entrypoint"}, "", "Overwrite the default ENTRYPOINT of the image") flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name") flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemoryReservation = cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") flMemorySwap = cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") flKernelMemory = cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: <name|uid>[:<group|gid>])") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCPUShares = cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCPUPeriod = cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") flCPUQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") flCpusetCpus = cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flCpusetMems = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") flBlkioWeight = cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") flSwappiness = cmd.Int64([]string{"-memory-swappiness"}, -1, "Tune container memory swappiness (0 to 100)") flNetMode = cmd.String([]string{"-net"}, "default", "Connect a container to a network") flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") flIpcMode = cmd.String([]string{"-ipc"}, "", "IPC namespace to use") flRestartPolicy = cmd.String([]string{"-restart"}, "no", "Restart policy to apply when a container exits") flReadonlyRootfs = cmd.Bool([]string{"-read-only"}, false, "Mount the container's root filesystem as read only") flLoggingDriver = cmd.String([]string{"-log-driver"}, "", "Logging driver for container") flCgroupParent = cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") flVolumeDriver = cmd.String([]string{"-volume-driver"}, "", "Optional volume driver for the container") flStopSignal = cmd.String([]string{"-stop-signal"}, signal.DefaultStopSignal, fmt.Sprintf("Signal to stop a container, %v by default", signal.DefaultStopSignal)) flIsolation = cmd.String([]string{"-isolation"}, "", "Container isolation level") flShmSize = cmd.String([]string{"-shm-size"}, "", "Size of /dev/shm, default value is 64MB") ) cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to STDIN, STDOUT or STDERR") cmd.Var(&flBlkioWeightDevice, []string{"-blkio-weight-device"}, "Block IO weight (relative device weight)") cmd.Var(&flDeviceReadBps, []string{"-device-read-bps"}, "Limit read rate (bytes per second) from a device") cmd.Var(&flDeviceWriteBps, []string{"-device-write-bps"}, "Limit write rate (bytes per second) to a device") cmd.Var(&flDeviceReadIOps, []string{"-device-read-iops"}, "Limit read rate (IO per second) from a device") cmd.Var(&flDeviceWriteIOps, []string{"-device-write-iops"}, "Limit write rate (IO per second) to a device") cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume") cmd.Var(&flTmpfs, []string{"-tmpfs"}, "Mount a tmpfs directory") cmd.Var(&flLinks, []string{"-link"}, "Add link to another container") cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container") cmd.Var(&flLabels, []string{"l", "-label"}, "Set meta data on a container") cmd.Var(&flLabelsFile, []string{"-label-file"}, "Read in a line delimited file of labels") cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a file of environment variables") cmd.Var(&flPublish, []string{"p", "-publish"}, "Publish a container's port(s) to the host") cmd.Var(&flExpose, []string{"-expose"}, "Expose a port or a range of ports") cmd.Var(&flDNS, []string{"-dns"}, "Set custom DNS servers") cmd.Var(&flDNSSearch, []string{"-dns-search"}, "Set custom DNS search domains") cmd.Var(&flDNSOptions, []string{"-dns-opt"}, "Set DNS options") cmd.Var(&flExtraHosts, []string{"-add-host"}, "Add a custom host-to-IP mapping (host:ip)") cmd.Var(&flVolumesFrom, []string{"-volumes-from"}, "Mount volumes from the specified container(s)") cmd.Var(&flCapAdd, []string{"-cap-add"}, "Add Linux capabilities") cmd.Var(&flCapDrop, []string{"-cap-drop"}, "Drop Linux capabilities") cmd.Var(&flGroupAdd, []string{"-group-add"}, "Add additional groups to join") cmd.Var(&flSecurityOpt, []string{"-security-opt"}, "Security Options") cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") cmd.Var(&flLoggingOpts, []string{"-log-opt"}, "Log driver options") cmd.Require(flag.Min, 1) if err := cmd.ParseFlags(args, true); err != nil { return nil, nil, cmd, err } var ( attachStdin = flAttach.Get("stdin") attachStdout = flAttach.Get("stdout") attachStderr = flAttach.Get("stderr") ) // Validate the input mac address if *flMacAddress != "" { if _, err := opts.ValidateMACAddress(*flMacAddress); err != nil { return nil, nil, cmd, fmt.Errorf("%s is not a valid mac address", *flMacAddress) } } if *flStdin { attachStdin = true } // If -a is not set attach to the output stdio if flAttach.Len() == 0 { attachStdout = true attachStderr = true } var err error var flMemory int64 if *flMemoryString != "" { flMemory, err = units.RAMInBytes(*flMemoryString) if err != nil { return nil, nil, cmd, err } } var MemoryReservation int64 if *flMemoryReservation != "" { MemoryReservation, err = units.RAMInBytes(*flMemoryReservation) if err != nil { return nil, nil, cmd, err } } var memorySwap int64 if *flMemorySwap != "" { if *flMemorySwap == "-1" { memorySwap = -1 } else { memorySwap, err = units.RAMInBytes(*flMemorySwap) if err != nil { return nil, nil, cmd, err } } } var KernelMemory int64 if *flKernelMemory != "" { KernelMemory, err = units.RAMInBytes(*flKernelMemory) if err != nil { return nil, nil, cmd, err } } swappiness := *flSwappiness if swappiness != -1 && (swappiness < 0 || swappiness > 100) { return nil, nil, cmd, fmt.Errorf("Invalid value: %d. Valid memory swappiness range is 0-100", swappiness) } var shmSize int64 if *flShmSize != "" { shmSize, err = units.RAMInBytes(*flShmSize) if err != nil { return nil, nil, cmd, err } } var binds []string // add any bind targets to the list of container volumes for bind := range flVolumes.GetMap() { if arr := volumeSplitN(bind, 2); len(arr) > 1 { // after creating the bind mount we want to delete it from the flVolumes values because // we do not want bind mounts being committed to image configs binds = append(binds, bind) flVolumes.Delete(bind) } } // Can't evaluate options passed into --tmpfs until we actually mount tmpfs := make(map[string]string) for _, t := range flTmpfs.GetAll() { if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { if _, _, err := mount.ParseTmpfsOptions(arr[1]); err != nil { return nil, nil, cmd, err } tmpfs[arr[0]] = arr[1] } else { tmpfs[arr[0]] = "" } } var ( parsedArgs = cmd.Args() runCmd *strslice.StrSlice entrypoint *strslice.StrSlice image = cmd.Arg(0) ) if len(parsedArgs) > 1 { runCmd = strslice.New(parsedArgs[1:]...) } if *flEntrypoint != "" { entrypoint = strslice.New(*flEntrypoint) } var ( domainname string hostname = *flHostname parts = strings.SplitN(hostname, ".", 2) ) if len(parts) > 1 { hostname = parts[0] domainname = parts[1] } ports, portBindings, err := nat.ParsePortSpecs(flPublish.GetAll()) if err != nil { return nil, nil, cmd, err } // Merge in exposed ports to the map of published ports for _, e := range flExpose.GetAll() { if strings.Contains(e, ":") { return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e) } //support two formats for expose, original format <portnum>/[<proto>] or <startport-endport>/[<proto>] proto, port := nat.SplitProtoPort(e) //parse the start and end port and create a sequence of ports to expose //if expose a port, the start and end port are the same start, end, err := nat.ParsePortRange(port) if err != nil { return nil, nil, cmd, fmt.Errorf("Invalid range format for --expose: %s, error: %s", e, err) } for i := start; i <= end; i++ { p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) if err != nil { return nil, nil, cmd, err } if _, exists := ports[p]; !exists { ports[p] = struct{}{} } } } // parse device mappings deviceMappings := []container.DeviceMapping{} for _, device := range flDevices.GetAll() { deviceMapping, err := ParseDevice(device) if err != nil { return nil, nil, cmd, err } deviceMappings = append(deviceMappings, deviceMapping) } // collect all the environment variables for the container envVariables, err := readKVStrings(flEnvFile.GetAll(), flEnv.GetAll()) if err != nil { return nil, nil, cmd, err } // collect all the labels for the container labels, err := readKVStrings(flLabelsFile.GetAll(), flLabels.GetAll()) if err != nil { return nil, nil, cmd, err } ipcMode := container.IpcMode(*flIpcMode) if !ipcMode.Valid() { return nil, nil, cmd, fmt.Errorf("--ipc: invalid IPC mode") } pidMode := container.PidMode(*flPidMode) if !pidMode.Valid() { return nil, nil, cmd, fmt.Errorf("--pid: invalid PID mode") } utsMode := container.UTSMode(*flUTSMode) if !utsMode.Valid() { return nil, nil, cmd, fmt.Errorf("--uts: invalid UTS mode") } restartPolicy, err := ParseRestartPolicy(*flRestartPolicy) if err != nil { return nil, nil, cmd, err } loggingOpts, err := parseLoggingOpts(*flLoggingDriver, flLoggingOpts.GetAll()) if err != nil { return nil, nil, cmd, err } resources := container.Resources{ CgroupParent: *flCgroupParent, Memory: flMemory, MemoryReservation: MemoryReservation, MemorySwap: memorySwap, MemorySwappiness: flSwappiness, KernelMemory: KernelMemory, OomKillDisable: *flOomKillDisable, CPUShares: *flCPUShares, CPUPeriod: *flCPUPeriod, CpusetCpus: *flCpusetCpus, CpusetMems: *flCpusetMems, CPUQuota: *flCPUQuota, BlkioWeight: *flBlkioWeight, BlkioWeightDevice: flBlkioWeightDevice.GetList(), BlkioDeviceReadBps: flDeviceReadBps.GetList(), BlkioDeviceWriteBps: flDeviceWriteBps.GetList(), BlkioDeviceReadIOps: flDeviceReadIOps.GetList(), BlkioDeviceWriteIOps: flDeviceWriteIOps.GetList(), Ulimits: flUlimits.GetList(), Devices: deviceMappings, } config := &container.Config{ Hostname: hostname, Domainname: domainname, ExposedPorts: ports, User: *flUser, Tty: *flTty, // TODO: deprecated, it comes from -n, --networking // it's still needed internally to set the network to disabled // if e.g. bridge is none in daemon opts, and in inspect NetworkDisabled: false, OpenStdin: *flStdin, AttachStdin: attachStdin, AttachStdout: attachStdout, AttachStderr: attachStderr, Env: envVariables, Cmd: runCmd, Image: image, Volumes: flVolumes.GetMap(), MacAddress: *flMacAddress, Entrypoint: entrypoint, WorkingDir: *flWorkingDir, Labels: ConvertKVStringsToMap(labels), StopSignal: *flStopSignal, } hostConfig := &container.HostConfig{ Binds: binds, ContainerIDFile: *flContainerIDFile, OomScoreAdj: *flOomScoreAdj, Privileged: *flPrivileged, PortBindings: portBindings, Links: flLinks.GetAll(), PublishAllPorts: *flPublishAll, // Make sure the dns fields are never nil. // New containers don't ever have those fields nil, // but pre created containers can still have those nil values. // See https://github.com/docker/docker/pull/17779 // for a more detailed explanation on why we don't want that. DNS: flDNS.GetAllOrEmpty(), DNSSearch: flDNSSearch.GetAllOrEmpty(), DNSOptions: flDNSOptions.GetAllOrEmpty(), ExtraHosts: flExtraHosts.GetAll(), VolumesFrom: flVolumesFrom.GetAll(), NetworkMode: container.NetworkMode(*flNetMode), IpcMode: ipcMode, PidMode: pidMode, UTSMode: utsMode, CapAdd: strslice.New(flCapAdd.GetAll()...), CapDrop: strslice.New(flCapDrop.GetAll()...), GroupAdd: flGroupAdd.GetAll(), RestartPolicy: restartPolicy, SecurityOpt: flSecurityOpt.GetAll(), ReadonlyRootfs: *flReadonlyRootfs, LogConfig: container.LogConfig{Type: *flLoggingDriver, Config: loggingOpts}, VolumeDriver: *flVolumeDriver, Isolation: container.IsolationLevel(*flIsolation), ShmSize: shmSize, Resources: resources, Tmpfs: tmpfs, } // When allocating stdin in attached mode, close stdin at client disconnect if config.OpenStdin && config.AttachStdin { config.StdinOnce = true } return config, hostConfig, cmd, nil }
func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { version := httputils.VersionFromContext(ctx) options := &types.ImageBuildOptions{} if httputils.BoolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { options.Remove = true } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { options.Remove = true } else { options.Remove = httputils.BoolValue(r, "rm") } if httputils.BoolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { options.PullParent = true } options.Dockerfile = r.FormValue("dockerfile") options.SuppressOutput = httputils.BoolValue(r, "q") options.NoCache = httputils.BoolValue(r, "nocache") options.ForceRemove = httputils.BoolValue(r, "forcerm") options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") options.Memory = httputils.Int64ValueOrZero(r, "memory") options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") options.CPUSetCPUs = r.FormValue("cpusetcpus") options.CPUSetMems = r.FormValue("cpusetmems") options.CgroupParent = r.FormValue("cgroupparent") if r.Form.Get("shmsize") != "" { shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) if err != nil { return nil, err } options.ShmSize = shmSize } if i := container.IsolationLevel(r.FormValue("isolation")); i != "" { if !container.IsolationLevel.IsValid(i) { return nil, fmt.Errorf("Unsupported isolation: %q", i) } options.IsolationLevel = i } var buildUlimits = []*units.Ulimit{} ulimitsJSON := r.FormValue("ulimits") if ulimitsJSON != "" { if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { return nil, err } options.Ulimits = buildUlimits } var buildArgs = map[string]string{} buildArgsJSON := r.FormValue("buildargs") if buildArgsJSON != "" { if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil { return nil, err } options.BuildArgs = buildArgs } return options, nil }
// CmdBuild builds a new image from the source code at a given path. // // If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN. // // Usage: docker build [OPTIONS] PATH | URL | - func (cli *DockerCli) CmdBuild(args ...string) error { cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true) flTags := opts.NewListOpts(validateTag) cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format") suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the build output and print image ID on success") noCache := cmd.Bool([]string{"-no-cache"}, false, "Do not use cache when building the image") rm := cmd.Bool([]string{"-rm"}, true, "Remove intermediate containers after a successful build") forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers") pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") flShmSize := cmd.String([]string{"-shm-size"}, "", "Size of /dev/shm, default value is 64MB") flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period") flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container") flBuildArg := opts.NewListOpts(opts.ValidateEnv) cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables") isolation := cmd.String([]string{"-isolation"}, "", "Container isolation level") ulimits := make(map[string]*units.Ulimit) flUlimits := runconfigopts.NewUlimitOpt(&ulimits) cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options") cmd.Require(flag.Exact, 1) // For trusted pull on "FROM <image>" instruction. addTrustedFlags(cmd, true) cmd.ParseFlags(args, true) var ( context io.ReadCloser isRemote bool err error ) _, err = exec.LookPath("git") hasGit := err == nil specifiedContext := cmd.Arg(0) var ( contextDir string tempDir string relDockerfile string progBuff io.Writer buildBuff io.Writer ) progBuff = cli.out buildBuff = cli.out if *suppressOutput { progBuff = bytes.NewBuffer(nil) buildBuff = bytes.NewBuffer(nil) } switch { case specifiedContext == "-": tempDir, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName) case urlutil.IsGitURL(specifiedContext) && hasGit: tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName) case urlutil.IsURL(specifiedContext): tempDir, relDockerfile, err = getContextFromURL(progBuff, specifiedContext, *dockerfileName) default: contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName) } if err != nil { if *suppressOutput && urlutil.IsURL(specifiedContext) { fmt.Fprintln(cli.err, progBuff) } return fmt.Errorf("unable to prepare context: %s", err) } if tempDir != "" { defer os.RemoveAll(tempDir) contextDir = tempDir } // And canonicalize dockerfile name to a platform-independent one relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) if err != nil { return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) } f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) if err != nil && !os.IsNotExist(err) { return err } var excludes []string if err == nil { excludes, err = dockerignore.ReadAll(f) if err != nil { return err } } if err := validateContextDirectory(contextDir, excludes); err != nil { return fmt.Errorf("Error checking context: '%s'.", err) } // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The daemon will remove them for us, if needed, after it // parses the Dockerfile. Ignore errors here, as they will have been // caught by validateContextDirectory above. var includes = []string{"."} keepThem1, _ := fileutils.Matches(".dockerignore", excludes) keepThem2, _ := fileutils.Matches(relDockerfile, excludes) if keepThem1 || keepThem2 { includes = append(includes, ".dockerignore", relDockerfile) } context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: excludes, IncludeFiles: includes, }) if err != nil { return err } var resolvedTags []*resolvedTag if isTrusted() { // Resolve the FROM lines in the Dockerfile to trusted digest references // using Notary. On a successful build, we must tag the resolved digests // to the original name specified in the Dockerfile. var newDockerfile *trustedDockerfile newDockerfile, resolvedTags, err = rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference) if err != nil { return fmt.Errorf("unable to process Dockerfile: %v", err) } defer newDockerfile.Close() // Wrap the tar archive to replace the Dockerfile entry with the rewritten // Dockerfile which uses trusted pulls. context = replaceDockerfileTarWrapper(context, newDockerfile, relDockerfile) } // Setup an upload progress bar progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) var body io.Reader = progress.NewProgressReader(context, progressOutput, 0, "", "Sending build context to Docker daemon") var memory int64 if *flMemoryString != "" { parsedMemory, err := units.RAMInBytes(*flMemoryString) if err != nil { return err } memory = parsedMemory } var memorySwap int64 if *flMemorySwap != "" { if *flMemorySwap == "-1" { memorySwap = -1 } else { parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) if err != nil { return err } memorySwap = parsedMemorySwap } } var shmSize int64 if *flShmSize != "" { shmSize, err = units.RAMInBytes(*flShmSize) if err != nil { return err } } var remoteContext string if isRemote { remoteContext = cmd.Arg(0) } options := types.ImageBuildOptions{ Context: body, Memory: memory, MemorySwap: memorySwap, Tags: flTags.GetAll(), SuppressOutput: *suppressOutput, RemoteContext: remoteContext, NoCache: *noCache, Remove: *rm, ForceRemove: *forceRm, PullParent: *pull, IsolationLevel: container.IsolationLevel(*isolation), CPUSetCPUs: *flCPUSetCpus, CPUSetMems: *flCPUSetMems, CPUShares: *flCPUShares, CPUQuota: *flCPUQuota, CPUPeriod: *flCPUPeriod, CgroupParent: *flCgroupParent, Dockerfile: relDockerfile, ShmSize: shmSize, Ulimits: flUlimits.GetList(), BuildArgs: runconfigopts.ConvertKVStringsToMap(flBuildArg.GetAll()), AuthConfigs: cli.configFile.AuthConfigs, } response, err := cli.client.ImageBuild(options) if err != nil { return err } err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, cli.outFd, cli.isTerminalOut) if err != nil { if jerr, ok := err.(*jsonmessage.JSONError); ok { // If no error code is set, default to 1 if jerr.Code == 0 { jerr.Code = 1 } if *suppressOutput { fmt.Fprintf(cli.err, "%s%s", progBuff, buildBuff) } return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} } } // Windows: show error message about modified file permissions. if response.OSType == "windows" { fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) } // Everything worked so if -q was provided the output from the daemon // should be just the image ID and we'll print that to stdout. if *suppressOutput { fmt.Fprintf(cli.out, "%s", buildBuff) } if isTrusted() { // Since the build was successful, now we must tag any of the resolved // images from the above Dockerfile rewrite. for _, resolved := range resolvedTags { if err := cli.tagTrusted(resolved.digestRef, resolved.tagRef); err != nil { return err } } } return nil }