Ejemplo n.º 1
0
func (opts *serviceOptions) ToService() (swarm.ServiceSpec, error) {
	var service swarm.ServiceSpec

	service = swarm.ServiceSpec{
		Annotations: swarm.Annotations{
			Name:   opts.name,
			Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()),
		},
		TaskTemplate: swarm.TaskSpec{
			ContainerSpec: swarm.ContainerSpec{
				Image:           opts.image,
				Args:            opts.args,
				Env:             opts.env.GetAll(),
				Labels:          runconfigopts.ConvertKVStringsToMap(opts.containerLabels.GetAll()),
				Dir:             opts.workdir,
				User:            opts.user,
				Groups:          opts.groups,
				Mounts:          opts.mounts.Value(),
				StopGracePeriod: opts.stopGrace.Value(),
			},
			Networks:      convertNetworks(opts.networks),
			Resources:     opts.resources.ToResourceRequirements(),
			RestartPolicy: opts.restartPolicy.ToRestartPolicy(),
			Placement: &swarm.Placement{
				Constraints: opts.constraints,
			},
			LogDriver: opts.logDriver.toLogDriver(),
		},
		Networks: convertNetworks(opts.networks),
		Mode:     swarm.ServiceMode{},
		UpdateConfig: &swarm.UpdateConfig{
			Parallelism:     opts.update.parallelism,
			Delay:           opts.update.delay,
			Monitor:         opts.update.monitor,
			FailureAction:   opts.update.onFailure,
			MaxFailureRatio: opts.update.maxFailureRatio,
		},
		EndpointSpec: opts.endpoint.ToEndpointSpec(),
	}

	switch opts.mode {
	case "global":
		if opts.replicas.Value() != nil {
			return service, fmt.Errorf("replicas can only be used with replicated mode")
		}

		service.Mode.Global = &swarm.GlobalService{}
	case "replicated":
		service.Mode.Replicated = &swarm.ReplicatedService{
			Replicas: opts.replicas.Value(),
		}
	default:
		return service, fmt.Errorf("Unknown mode: %s", opts.mode)
	}
	return service, nil
}
Ejemplo n.º 2
0
func runSecretCreate(dockerCli *command.DockerCli, options createOptions) error {
	client := dockerCli.Client()
	ctx := context.Background()

	secretData, err := ioutil.ReadAll(os.Stdin)
	if err != nil {
		return fmt.Errorf("Error reading content from STDIN: %v", err)
	}

	spec := swarm.SecretSpec{
		Annotations: swarm.Annotations{
			Name:   options.name,
			Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()),
		},
		Data: secretData,
	}

	r, err := client.SecretCreate(ctx, spec)
	if err != nil {
		return err
	}

	fmt.Fprintln(dockerCli.Out(), r.ID)
	return nil
}
Ejemplo n.º 3
0
func resolveEnvironment(serviceConfig *types.ServiceConfig, serviceDict types.Dict, workingDir string) error {
	environment := make(map[string]string)

	if envFileVal, ok := serviceDict["env_file"]; ok {
		envFiles := loadStringOrListOfStrings(envFileVal)

		var envVars []string

		for _, file := range envFiles {
			filePath := path.Join(workingDir, file)
			fileVars, err := opts.ParseEnvFile(filePath)
			if err != nil {
				return err
			}
			envVars = append(envVars, fileVars...)
		}

		for k, v := range opts.ConvertKVStringsToMap(envVars) {
			environment[k] = v
		}
	}

	for k, v := range serviceConfig.Environment {
		environment[k] = v
	}

	serviceConfig.Environment = environment

	return nil
}
Ejemplo n.º 4
0
// CmdNetworkCreate creates a new network with a given name
//
// Usage: docker network create [OPTIONS] <NETWORK-NAME>
func (cli *DockerCli) CmdNetworkCreate(args ...string) error {
	cmd := Cli.Subcmd("network create", []string{"NETWORK-NAME"}, "Creates a new network with a name specified by the user", false)
	flDriver := cmd.String([]string{"d", "-driver"}, "bridge", "Driver to manage the Network")
	flOpts := opts.NewMapOpts(nil, nil)

	flIpamDriver := cmd.String([]string{"-ipam-driver"}, "default", "IP Address Management Driver")
	flIpamSubnet := opts.NewListOpts(nil)
	flIpamIPRange := opts.NewListOpts(nil)
	flIpamGateway := opts.NewListOpts(nil)
	flIpamAux := opts.NewMapOpts(nil, nil)
	flIpamOpt := opts.NewMapOpts(nil, nil)
	flLabels := opts.NewListOpts(nil)

	cmd.Var(&flIpamSubnet, []string{"-subnet"}, "subnet in CIDR format that represents a network segment")
	cmd.Var(&flIpamIPRange, []string{"-ip-range"}, "allocate container ip from a sub-range")
	cmd.Var(&flIpamGateway, []string{"-gateway"}, "ipv4 or ipv6 Gateway for the master subnet")
	cmd.Var(flIpamAux, []string{"-aux-address"}, "auxiliary ipv4 or ipv6 addresses used by Network driver")
	cmd.Var(flOpts, []string{"o", "-opt"}, "set driver specific options")
	cmd.Var(flIpamOpt, []string{"-ipam-opt"}, "set IPAM driver specific options")
	cmd.Var(&flLabels, []string{"-label"}, "set metadata on a network")

	flInternal := cmd.Bool([]string{"-internal"}, false, "restricts external access to the network")
	flIPv6 := cmd.Bool([]string{"-ipv6"}, false, "enable IPv6 networking")

	cmd.Require(flag.Exact, 1)
	err := cmd.ParseFlags(args, true)
	if err != nil {
		return err
	}

	// Set the default driver to "" if the user didn't set the value.
	// That way we can know whether it was user input or not.
	driver := *flDriver
	if !cmd.IsSet("-driver") && !cmd.IsSet("d") {
		driver = ""
	}

	ipamCfg, err := consolidateIpam(flIpamSubnet.GetAll(), flIpamIPRange.GetAll(), flIpamGateway.GetAll(), flIpamAux.GetAll())
	if err != nil {
		return err
	}

	// Construct network create request body
	nc := types.NetworkCreate{
		Driver:         driver,
		IPAM:           network.IPAM{Driver: *flIpamDriver, Config: ipamCfg, Options: flIpamOpt.GetAll()},
		Options:        flOpts.GetAll(),
		CheckDuplicate: true,
		Internal:       *flInternal,
		EnableIPv6:     *flIPv6,
		Labels:         runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()),
	}

	resp, err := cli.client.NetworkCreate(context.Background(), cmd.Arg(0), nc)
	if err != nil {
		return err
	}
	fmt.Fprintf(cli.out, "%s\n", resp.ID)
	return nil
}
Ejemplo n.º 5
0
func runSecretCreate(dockerCli *command.DockerCli, options createOptions) error {
	client := dockerCli.Client()
	ctx := context.Background()

	var in io.Reader = dockerCli.In()
	if options.file != "-" {
		file, err := system.OpenSequential(options.file)
		if err != nil {
			return err
		}
		in = file
		defer file.Close()
	}

	secretData, err := ioutil.ReadAll(in)
	if err != nil {
		return fmt.Errorf("Error reading content from %q: %v", options.file, err)
	}

	spec := swarm.SecretSpec{
		Annotations: swarm.Annotations{
			Name:   options.name,
			Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()),
		},
		Data: secretData,
	}

	r, err := client.SecretCreate(ctx, spec)
	if err != nil {
		return err
	}

	fmt.Fprintln(dockerCli.Out(), r.ID)
	return nil
}
Ejemplo n.º 6
0
func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) {
	accepted := map[string]bool{
		"name":       true,
		"id":         true,
		"label":      true,
		"role":       true,
		"membership": true,
	}
	if err := filter.Validate(accepted); err != nil {
		return nil, err
	}
	f := &swarmapi.ListNodesRequest_Filters{
		Names:      filter.Get("name"),
		IDPrefixes: filter.Get("id"),
		Labels:     runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
	}

	for _, r := range filter.Get("role") {
		if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok {
			f.Roles = append(f.Roles, swarmapi.NodeRole(role))
		} else if r != "" {
			return nil, fmt.Errorf("Invalid role filter: '%s'", r)
		}
	}

	for _, a := range filter.Get("membership") {
		if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok {
			f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership))
		} else if a != "" {
			return nil, fmt.Errorf("Invalid membership filter: '%s'", a)
		}
	}

	return f, nil
}
Ejemplo n.º 7
0
func runCreate(dockerCli *client.DockerCli, opts createOptions) error {
	client := dockerCli.Client()

	ipamCfg, err := consolidateIpam(opts.ipamSubnet, opts.ipamIPRange, opts.ipamGateway, opts.ipamAux.GetAll())
	if err != nil {
		return err
	}

	// Construct network create request body
	nc := types.NetworkCreate{
		Driver:  opts.driver,
		Options: opts.driverOpts.GetAll(),
		IPAM: network.IPAM{
			Driver:  opts.ipamDriver,
			Config:  ipamCfg,
			Options: opts.ipamOpt.GetAll(),
		},
		CheckDuplicate: true,
		Internal:       opts.internal,
		EnableIPv6:     opts.ipv6,
		Labels:         runconfigopts.ConvertKVStringsToMap(opts.labels),
	}

	resp, err := client.NetworkCreate(context.Background(), opts.name, nc)
	if err != nil {
		return err
	}
	fmt.Fprintf(dockerCli.Out(), "%s\n", resp.ID)
	return nil
}
Ejemplo n.º 8
0
func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) {
	loggingOptsMap := runconfigopts.ConvertKVStringsToMap(loggingOpts)
	if loggingDriver == "none" && len(loggingOpts) > 0 {
		return map[string]string{}, fmt.Errorf("invalid logging opts for driver %s", loggingDriver)
	}
	return loggingOptsMap, nil
}
Ejemplo n.º 9
0
// CmdVolumeCreate creates a new volume.
//
// Usage: docker volume create [OPTIONS]
func (cli *DockerCli) CmdVolumeCreate(args ...string) error {
	cmd := Cli.Subcmd("volume create", nil, "Create a volume", true)
	flDriver := cmd.String([]string{"d", "-driver"}, "local", "Specify volume driver name")
	flName := cmd.String([]string{"-name"}, "", "Specify volume name")

	flDriverOpts := opts.NewMapOpts(nil, nil)
	cmd.Var(flDriverOpts, []string{"o", "-opt"}, "Set driver specific options")

	flLabels := opts.NewListOpts(nil)
	cmd.Var(&flLabels, []string{"-label"}, "Set metadata for a volume")

	cmd.Require(flag.Exact, 0)
	cmd.ParseFlags(args, true)

	volReq := types.VolumeCreateRequest{
		Driver:     *flDriver,
		DriverOpts: flDriverOpts.GetAll(),
		Name:       *flName,
		Labels:     runconfigopts.ConvertKVStringsToMap(flLabels.GetAll()),
	}

	vol, err := cli.client.VolumeCreate(context.Background(), volReq)
	if err != nil {
		return err
	}

	fmt.Fprintf(cli.out, "%s\n", vol.Name)
	return nil
}
Ejemplo n.º 10
0
func (opts *nodeOptions) ToNodeSpec() (swarm.NodeSpec, error) {
	var spec swarm.NodeSpec

	spec.Annotations.Name = opts.annotations.name
	spec.Annotations.Labels = runconfigopts.ConvertKVStringsToMap(opts.annotations.labels.GetAll())

	switch swarm.NodeRole(strings.ToLower(opts.role)) {
	case swarm.NodeRoleWorker:
		spec.Role = swarm.NodeRoleWorker
	case swarm.NodeRoleManager:
		spec.Role = swarm.NodeRoleManager
	case "":
	default:
		return swarm.NodeSpec{}, fmt.Errorf("invalid role %q, only worker and manager are supported", opts.role)
	}

	switch swarm.NodeAvailability(strings.ToLower(opts.availability)) {
	case swarm.NodeAvailabilityActive:
		spec.Availability = swarm.NodeAvailabilityActive
	case swarm.NodeAvailabilityPause:
		spec.Availability = swarm.NodeAvailabilityPause
	case swarm.NodeAvailabilityDrain:
		spec.Availability = swarm.NodeAvailabilityDrain
	case "":
	default:
		return swarm.NodeSpec{}, fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability)
	}

	return spec, nil
}
Ejemplo n.º 11
0
func newListTasksFilters(filter filters.Args) (*swarmapi.ListTasksRequest_Filters, error) {
	accepted := map[string]bool{
		"name":          true,
		"id":            true,
		"label":         true,
		"service":       true,
		"node":          true,
		"desired_state": true,
	}
	if err := filter.Validate(accepted); err != nil {
		return nil, err
	}
	f := &swarmapi.ListTasksRequest_Filters{
		Names:      filter.Get("name"),
		IDPrefixes: filter.Get("id"),
		Labels:     runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
		ServiceIDs: filter.Get("service"),
		NodeIDs:    filter.Get("node"),
	}

	for _, s := range filter.Get("desired_state") {
		if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok {
			f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state))
		} else if s != "" {
			return nil, fmt.Errorf("Invalid desired_state filter: '%s'", s)
		}
	}

	return f, nil
}
Ejemplo n.º 12
0
func (ldo *logDriverOptions) toLogDriver() *swarm.Driver {
	if ldo.name == "" {
		return nil
	}

	// set the log driver only if specified.
	return &swarm.Driver{
		Name:    ldo.name,
		Options: runconfigopts.ConvertKVStringsToMap(ldo.opts.GetAll()),
	}
}
Ejemplo n.º 13
0
func updateLabels(flags *pflag.FlagSet, field *map[string]string) {
	if !flags.Changed(flagLabel) {
		return
	}

	values := flags.Lookup(flagLabel).Value.(*opts.ListOpts).GetAll()

	localLabels := map[string]string{}
	for key, value := range runconfigopts.ConvertKVStringsToMap(values) {
		localLabels[key] = value
	}
	*field = localLabels
}
Ejemplo n.º 14
0
func mergeLabels(flags *pflag.FlagSet, field *map[string]string) {
	if !flags.Changed("label") {
		return
	}

	if *field == nil {
		*field = make(map[string]string)
	}

	values := flags.Lookup("label").Value.(*opts.ListOpts).GetAll()
	for key, value := range runconfigopts.ConvertKVStringsToMap(values) {
		(*field)[key] = value
	}
}
Ejemplo n.º 15
0
func newListServicesFilters(filter filters.Args) (*swarmapi.ListServicesRequest_Filters, error) {
	accepted := map[string]bool{
		"name":  true,
		"id":    true,
		"label": true,
	}
	if err := filter.Validate(accepted); err != nil {
		return nil, err
	}
	return &swarmapi.ListServicesRequest_Filters{
		Names:      filter.Get("name"),
		IDPrefixes: filter.Get("id"),
		Labels:     runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
	}, nil
}
Ejemplo n.º 16
0
func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) {
	return func(node *swarm.Node) {
		mergeString := func(flag string, field *string) {
			if flags.Changed(flag) {
				*field, _ = flags.GetString(flag)
			}
		}

		mergeRole := func(flag string, field *swarm.NodeRole) {
			if flags.Changed(flag) {
				str, _ := flags.GetString(flag)
				*field = swarm.NodeRole(str)
			}
		}

		mergeMembership := func(flag string, field *swarm.NodeMembership) {
			if flags.Changed(flag) {
				str, _ := flags.GetString(flag)
				*field = swarm.NodeMembership(str)
			}
		}

		mergeAvailability := func(flag string, field *swarm.NodeAvailability) {
			if flags.Changed(flag) {
				str, _ := flags.GetString(flag)
				*field = swarm.NodeAvailability(str)
			}
		}

		mergeLabels := func(flag string, field *map[string]string) {
			if flags.Changed(flag) {
				values, _ := flags.GetStringSlice(flag)
				for key, value := range runconfigopts.ConvertKVStringsToMap(values) {
					(*field)[key] = value
				}
			}
		}

		spec := &node.Spec
		mergeString("name", &spec.Name)
		// TODO: setting labels is not working
		mergeLabels("label", &spec.Labels)
		mergeRole("role", &spec.Role)
		mergeMembership("membership", &spec.Membership)
		mergeAvailability("availability", &spec.Availability)
	}
}
Ejemplo n.º 17
0
func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error {
	return func(node *swarm.Node) error {
		spec := &node.Spec

		if flags.Changed(flagRole) {
			str, err := flags.GetString(flagRole)
			if err != nil {
				return err
			}
			spec.Role = swarm.NodeRole(str)
		}
		if flags.Changed(flagMembership) {
			str, err := flags.GetString(flagMembership)
			if err != nil {
				return err
			}
			spec.Membership = swarm.NodeMembership(str)
		}
		if flags.Changed(flagAvailability) {
			str, err := flags.GetString(flagAvailability)
			if err != nil {
				return err
			}
			spec.Availability = swarm.NodeAvailability(str)
		}
		if spec.Annotations.Labels == nil {
			spec.Annotations.Labels = make(map[string]string)
		}
		if flags.Changed(flagLabelAdd) {
			labels := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll()
			for k, v := range runconfigopts.ConvertKVStringsToMap(labels) {
				spec.Annotations.Labels[k] = v
			}
		}
		if flags.Changed(flagLabelRemove) {
			keys := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll()
			for _, k := range keys {
				// if a key doesn't exist, fail the command explicitly
				if _, exists := spec.Annotations.Labels[k]; !exists {
					return fmt.Errorf("key %s doesn't exist in node's labels", k)
				}
				delete(spec.Annotations.Labels, k)
			}
		}
		return nil
	}
}
Ejemplo n.º 18
0
func runCreate(dockerCli *client.DockerCli, opts createOptions) error {
	client := dockerCli.Client()

	volReq := types.VolumeCreateRequest{
		Driver:     opts.driver,
		DriverOpts: opts.driverOpts.GetAll(),
		Name:       opts.name,
		Labels:     runconfigopts.ConvertKVStringsToMap(opts.labels),
	}

	vol, err := client.VolumeCreate(context.Background(), volReq)
	if err != nil {
		return err
	}

	fmt.Fprintf(dockerCli.Out(), "%s\n", vol.Name)
	return nil
}
Ejemplo n.º 19
0
func updateLabels(flags *pflag.FlagSet, field *map[string]string) {
	if flags.Changed(flagLabelAdd) {
		if *field == nil {
			*field = map[string]string{}
		}

		values := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll()
		for key, value := range runconfigopts.ConvertKVStringsToMap(values) {
			(*field)[key] = value
		}
	}

	if *field != nil && flags.Changed(flagLabelRemove) {
		toRemove := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll()
		for _, label := range toRemove {
			delete(*field, label)
		}
	}
}
Ejemplo n.º 20
0
// updateLogDriver updates the log driver only if the log driver flag is set.
// All options will be replaced with those provided on the command line.
func updateLogDriver(flags *pflag.FlagSet, taskTemplate *swarm.TaskSpec) error {
	if !flags.Changed(flagLogDriver) {
		return nil
	}

	name, err := flags.GetString(flagLogDriver)
	if err != nil {
		return err
	}

	if name == "" {
		return nil
	}

	taskTemplate.LogDriver = &swarm.Driver{
		Name:    name,
		Options: runconfigopts.ConvertKVStringsToMap(flags.Lookup(flagLogOpt).Value.(*opts.ListOpts).GetAll()),
	}

	return nil
}
Ejemplo n.º 21
0
// CmdBuild builds a new image from the source code at a given path.
//
// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN.
//
// Usage: docker build [OPTIONS] PATH | URL | -
func (cli *DockerCli) CmdBuild(args ...string) error {
	cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true)
	flTags := opts.NewListOpts(validateTag)
	cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format")
	suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the build output and print image ID on success")
	noCache := cmd.Bool([]string{"-no-cache"}, false, "Do not use cache when building the image")
	rm := cmd.Bool([]string{"-rm"}, true, "Remove intermediate containers after a successful build")
	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers")
	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
	dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
	flShmSize := cmd.String([]string{"-shm-size"}, "", "Size of /dev/shm, default value is 64MB")
	flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
	flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
	flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
	flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
	flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
	flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")
	flBuildArg := opts.NewListOpts(runconfigopts.ValidateEnv)
	cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables")
	isolation := cmd.String([]string{"-isolation"}, "", "Container isolation level")

	ulimits := make(map[string]*units.Ulimit)
	flUlimits := runconfigopts.NewUlimitOpt(&ulimits)
	cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options")

	cmd.Require(flag.Exact, 1)

	// For trusted pull on "FROM <image>" instruction.
	addTrustedFlags(cmd, true)

	cmd.ParseFlags(args, true)

	var (
		context  io.ReadCloser
		isRemote bool
		err      error
	)

	_, err = exec.LookPath("git")
	hasGit := err == nil

	specifiedContext := cmd.Arg(0)

	var (
		contextDir    string
		tempDir       string
		relDockerfile string
		progBuff      io.Writer
		buildBuff     io.Writer
	)

	progBuff = cli.out
	buildBuff = cli.out
	if *suppressOutput {
		progBuff = bytes.NewBuffer(nil)
		buildBuff = bytes.NewBuffer(nil)
	}

	switch {
	case specifiedContext == "-":
		context, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName)
	case urlutil.IsGitURL(specifiedContext) && hasGit:
		tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName)
	case urlutil.IsURL(specifiedContext):
		context, relDockerfile, err = getContextFromURL(progBuff, specifiedContext, *dockerfileName)
	default:
		contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName)
	}

	if err != nil {
		if *suppressOutput && urlutil.IsURL(specifiedContext) {
			fmt.Fprintln(cli.err, progBuff)
		}
		return fmt.Errorf("unable to prepare context: %s", err)
	}

	if tempDir != "" {
		defer os.RemoveAll(tempDir)
		contextDir = tempDir
	}

	if context == nil {
		// And canonicalize dockerfile name to a platform-independent one
		relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
		if err != nil {
			return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
		}

		f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
		if err != nil && !os.IsNotExist(err) {
			return err
		}

		var excludes []string
		if err == nil {
			excludes, err = dockerignore.ReadAll(f)
			if err != nil {
				return err
			}
		}

		if err := validateContextDirectory(contextDir, excludes); err != nil {
			return fmt.Errorf("Error checking context: '%s'.", err)
		}

		// If .dockerignore mentions .dockerignore or the Dockerfile
		// then make sure we send both files over to the daemon
		// because Dockerfile is, obviously, needed no matter what, and
		// .dockerignore is needed to know if either one needs to be
		// removed. The daemon will remove them for us, if needed, after it
		// parses the Dockerfile. Ignore errors here, as they will have been
		// caught by validateContextDirectory above.
		var includes = []string{"."}
		keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
		keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
		if keepThem1 || keepThem2 {
			includes = append(includes, ".dockerignore", relDockerfile)
		}

		context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
			Compression:     archive.Uncompressed,
			ExcludePatterns: excludes,
			IncludeFiles:    includes,
		})
		if err != nil {
			return err
		}
	}

	var resolvedTags []*resolvedTag
	if isTrusted() {
		// Wrap the tar archive to replace the Dockerfile entry with the rewritten
		// Dockerfile which uses trusted pulls.
		context = replaceDockerfileTarWrapper(context, relDockerfile, cli.trustedReference, &resolvedTags)
	}

	// Setup an upload progress bar
	progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true)

	var body io.Reader = progress.NewProgressReader(context, progressOutput, 0, "", "Sending build context to Docker daemon")

	var memory int64
	if *flMemoryString != "" {
		parsedMemory, err := units.RAMInBytes(*flMemoryString)
		if err != nil {
			return err
		}
		memory = parsedMemory
	}

	var memorySwap int64
	if *flMemorySwap != "" {
		if *flMemorySwap == "-1" {
			memorySwap = -1
		} else {
			parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap)
			if err != nil {
				return err
			}
			memorySwap = parsedMemorySwap
		}
	}

	var shmSize int64
	if *flShmSize != "" {
		shmSize, err = units.RAMInBytes(*flShmSize)
		if err != nil {
			return err
		}
	}

	var remoteContext string
	if isRemote {
		remoteContext = cmd.Arg(0)
	}

	options := types.ImageBuildOptions{
		Context:        body,
		Memory:         memory,
		MemorySwap:     memorySwap,
		Tags:           flTags.GetAll(),
		SuppressOutput: *suppressOutput,
		RemoteContext:  remoteContext,
		NoCache:        *noCache,
		Remove:         *rm,
		ForceRemove:    *forceRm,
		PullParent:     *pull,
		IsolationLevel: container.IsolationLevel(*isolation),
		CPUSetCPUs:     *flCPUSetCpus,
		CPUSetMems:     *flCPUSetMems,
		CPUShares:      *flCPUShares,
		CPUQuota:       *flCPUQuota,
		CPUPeriod:      *flCPUPeriod,
		CgroupParent:   *flCgroupParent,
		Dockerfile:     relDockerfile,
		ShmSize:        shmSize,
		Ulimits:        flUlimits.GetList(),
		BuildArgs:      runconfigopts.ConvertKVStringsToMap(flBuildArg.GetAll()),
		AuthConfigs:    cli.configFile.AuthConfigs,
	}

	response, err := cli.client.ImageBuild(options)
	if err != nil {
		return err
	}

	err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, cli.outFd, cli.isTerminalOut, nil)
	if err != nil {
		if jerr, ok := err.(*jsonmessage.JSONError); ok {
			// If no error code is set, default to 1
			if jerr.Code == 0 {
				jerr.Code = 1
			}
			if *suppressOutput {
				fmt.Fprintf(cli.err, "%s%s", progBuff, buildBuff)
			}
			return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
		}
	}

	// Windows: show error message about modified file permissions if the
	// daemon isn't running Windows.
	if response.OSType != "windows" && runtime.GOOS == "windows" {
		fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
	}

	// Everything worked so if -q was provided the output from the daemon
	// should be just the image ID and we'll print that to stdout.
	if *suppressOutput {
		fmt.Fprintf(cli.out, "%s", buildBuff)
	}

	if isTrusted() {
		// Since the build was successful, now we must tag any of the resolved
		// images from the above Dockerfile rewrite.
		for _, resolved := range resolvedTags {
			if err := cli.tagTrusted(resolved.digestRef, resolved.tagRef); err != nil {
				return err
			}
		}
	}

	return nil
}
Ejemplo n.º 22
0
// parse parses the args for the specified command and generates a Config,
// a HostConfig and returns them with the specified command.
// If the specified args are not valid, it will return an error.
func parse(flags *pflag.FlagSet, copts *containerOptions) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) {
	var (
		attachStdin  = copts.attach.Get("stdin")
		attachStdout = copts.attach.Get("stdout")
		attachStderr = copts.attach.Get("stderr")
	)

	// Validate the input mac address
	if copts.macAddress != "" {
		if _, err := opts.ValidateMACAddress(copts.macAddress); err != nil {
			return nil, nil, nil, fmt.Errorf("%s is not a valid mac address", copts.macAddress)
		}
	}
	if copts.stdin {
		attachStdin = true
	}
	// If -a is not set, attach to stdout and stderr
	if copts.attach.Len() == 0 {
		attachStdout = true
		attachStderr = true
	}

	var err error

	var memory int64
	if copts.memoryString != "" {
		memory, err = units.RAMInBytes(copts.memoryString)
		if err != nil {
			return nil, nil, nil, err
		}
	}

	var memoryReservation int64
	if copts.memoryReservation != "" {
		memoryReservation, err = units.RAMInBytes(copts.memoryReservation)
		if err != nil {
			return nil, nil, nil, err
		}
	}

	var memorySwap int64
	if copts.memorySwap != "" {
		if copts.memorySwap == "-1" {
			memorySwap = -1
		} else {
			memorySwap, err = units.RAMInBytes(copts.memorySwap)
			if err != nil {
				return nil, nil, nil, err
			}
		}
	}

	var kernelMemory int64
	if copts.kernelMemory != "" {
		kernelMemory, err = units.RAMInBytes(copts.kernelMemory)
		if err != nil {
			return nil, nil, nil, err
		}
	}

	swappiness := copts.swappiness
	if swappiness != -1 && (swappiness < 0 || swappiness > 100) {
		return nil, nil, nil, fmt.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness)
	}

	var shmSize int64
	if copts.shmSize != "" {
		shmSize, err = units.RAMInBytes(copts.shmSize)
		if err != nil {
			return nil, nil, nil, err
		}
	}

	// TODO FIXME units.RAMInBytes should have a uint64 version
	var maxIOBandwidth int64
	if copts.ioMaxBandwidth != "" {
		maxIOBandwidth, err = units.RAMInBytes(copts.ioMaxBandwidth)
		if err != nil {
			return nil, nil, nil, err
		}
		if maxIOBandwidth < 0 {
			return nil, nil, nil, fmt.Errorf("invalid value: %s. Maximum IO Bandwidth must be positive", copts.ioMaxBandwidth)
		}
	}

	var binds []string
	volumes := copts.volumes.GetMap()
	// add any bind targets to the list of container volumes
	for bind := range copts.volumes.GetMap() {
		if arr := volumeSplitN(bind, 2); len(arr) > 1 {
			// after creating the bind mount we want to delete it from the copts.volumes values because
			// we do not want bind mounts being committed to image configs
			binds = append(binds, bind)
			// We should delete from the map (`volumes`) here, as deleting from copts.volumes will not work if
			// there are duplicates entries.
			delete(volumes, bind)
		}
	}

	// Can't evaluate options passed into --tmpfs until we actually mount
	tmpfs := make(map[string]string)
	for _, t := range copts.tmpfs.GetAll() {
		if arr := strings.SplitN(t, ":", 2); len(arr) > 1 {
			tmpfs[arr[0]] = arr[1]
		} else {
			tmpfs[arr[0]] = ""
		}
	}

	var (
		runCmd     strslice.StrSlice
		entrypoint strslice.StrSlice
	)

	if len(copts.Args) > 0 {
		runCmd = strslice.StrSlice(copts.Args)
	}

	if copts.entrypoint != "" {
		entrypoint = strslice.StrSlice{copts.entrypoint}
	} else if flags.Changed("entrypoint") {
		// if `--entrypoint=` is parsed then Entrypoint is reset
		entrypoint = []string{""}
	}

	ports, portBindings, err := nat.ParsePortSpecs(copts.publish.GetAll())
	if err != nil {
		return nil, nil, nil, err
	}

	// Merge in exposed ports to the map of published ports
	for _, e := range copts.expose.GetAll() {
		if strings.Contains(e, ":") {
			return nil, nil, nil, fmt.Errorf("invalid port format for --expose: %s", e)
		}
		//support two formats for expose, original format <portnum>/[<proto>] or <startport-endport>/[<proto>]
		proto, port := nat.SplitProtoPort(e)
		//parse the start and end port and create a sequence of ports to expose
		//if expose a port, the start and end port are the same
		start, end, err := nat.ParsePortRange(port)
		if err != nil {
			return nil, nil, nil, fmt.Errorf("invalid range format for --expose: %s, error: %s", e, err)
		}
		for i := start; i <= end; i++ {
			p, err := nat.NewPort(proto, strconv.FormatUint(i, 10))
			if err != nil {
				return nil, nil, nil, err
			}
			if _, exists := ports[p]; !exists {
				ports[p] = struct{}{}
			}
		}
	}

	// parse device mappings
	deviceMappings := []container.DeviceMapping{}
	for _, device := range copts.devices.GetAll() {
		deviceMapping, err := parseDevice(device)
		if err != nil {
			return nil, nil, nil, err
		}
		deviceMappings = append(deviceMappings, deviceMapping)
	}

	// collect all the environment variables for the container
	envVariables, err := runconfigopts.ReadKVStrings(copts.envFile.GetAll(), copts.env.GetAll())
	if err != nil {
		return nil, nil, nil, err
	}

	// collect all the labels for the container
	labels, err := runconfigopts.ReadKVStrings(copts.labelsFile.GetAll(), copts.labels.GetAll())
	if err != nil {
		return nil, nil, nil, err
	}

	ipcMode := container.IpcMode(copts.ipcMode)
	if !ipcMode.Valid() {
		return nil, nil, nil, fmt.Errorf("--ipc: invalid IPC mode")
	}

	pidMode := container.PidMode(copts.pidMode)
	if !pidMode.Valid() {
		return nil, nil, nil, fmt.Errorf("--pid: invalid PID mode")
	}

	utsMode := container.UTSMode(copts.utsMode)
	if !utsMode.Valid() {
		return nil, nil, nil, fmt.Errorf("--uts: invalid UTS mode")
	}

	usernsMode := container.UsernsMode(copts.usernsMode)
	if !usernsMode.Valid() {
		return nil, nil, nil, fmt.Errorf("--userns: invalid USER mode")
	}

	restartPolicy, err := runconfigopts.ParseRestartPolicy(copts.restartPolicy)
	if err != nil {
		return nil, nil, nil, err
	}

	loggingOpts, err := parseLoggingOpts(copts.loggingDriver, copts.loggingOpts.GetAll())
	if err != nil {
		return nil, nil, nil, err
	}

	securityOpts, err := parseSecurityOpts(copts.securityOpt.GetAll())
	if err != nil {
		return nil, nil, nil, err
	}

	storageOpts, err := parseStorageOpts(copts.storageOpt.GetAll())
	if err != nil {
		return nil, nil, nil, err
	}

	// Healthcheck
	var healthConfig *container.HealthConfig
	haveHealthSettings := copts.healthCmd != "" ||
		copts.healthInterval != 0 ||
		copts.healthTimeout != 0 ||
		copts.healthRetries != 0
	if copts.noHealthcheck {
		if haveHealthSettings {
			return nil, nil, nil, fmt.Errorf("--no-healthcheck conflicts with --health-* options")
		}
		test := strslice.StrSlice{"NONE"}
		healthConfig = &container.HealthConfig{Test: test}
	} else if haveHealthSettings {
		var probe strslice.StrSlice
		if copts.healthCmd != "" {
			args := []string{"CMD-SHELL", copts.healthCmd}
			probe = strslice.StrSlice(args)
		}
		if copts.healthInterval < 0 {
			return nil, nil, nil, fmt.Errorf("--health-interval cannot be negative")
		}
		if copts.healthTimeout < 0 {
			return nil, nil, nil, fmt.Errorf("--health-timeout cannot be negative")
		}

		healthConfig = &container.HealthConfig{
			Test:     probe,
			Interval: copts.healthInterval,
			Timeout:  copts.healthTimeout,
			Retries:  copts.healthRetries,
		}
	}

	resources := container.Resources{
		CgroupParent:         copts.cgroupParent,
		Memory:               memory,
		MemoryReservation:    memoryReservation,
		MemorySwap:           memorySwap,
		MemorySwappiness:     &copts.swappiness,
		KernelMemory:         kernelMemory,
		OomKillDisable:       &copts.oomKillDisable,
		NanoCPUs:             copts.cpus.Value(),
		CPUCount:             copts.cpuCount,
		CPUPercent:           copts.cpuPercent,
		CPUShares:            copts.cpuShares,
		CPUPeriod:            copts.cpuPeriod,
		CpusetCpus:           copts.cpusetCpus,
		CpusetMems:           copts.cpusetMems,
		CPUQuota:             copts.cpuQuota,
		CPURealtimePeriod:    copts.cpuRealtimePeriod,
		CPURealtimeRuntime:   copts.cpuRealtimeRuntime,
		PidsLimit:            copts.pidsLimit,
		BlkioWeight:          copts.blkioWeight,
		BlkioWeightDevice:    copts.blkioWeightDevice.GetList(),
		BlkioDeviceReadBps:   copts.deviceReadBps.GetList(),
		BlkioDeviceWriteBps:  copts.deviceWriteBps.GetList(),
		BlkioDeviceReadIOps:  copts.deviceReadIOps.GetList(),
		BlkioDeviceWriteIOps: copts.deviceWriteIOps.GetList(),
		IOMaximumIOps:        copts.ioMaxIOps,
		IOMaximumBandwidth:   uint64(maxIOBandwidth),
		Ulimits:              copts.ulimits.GetList(),
		Devices:              deviceMappings,
	}

	config := &container.Config{
		Hostname:     copts.hostname,
		ExposedPorts: ports,
		User:         copts.user,
		Tty:          copts.tty,
		// TODO: deprecated, it comes from -n, --networking
		// it's still needed internally to set the network to disabled
		// if e.g. bridge is none in daemon opts, and in inspect
		NetworkDisabled: false,
		OpenStdin:       copts.stdin,
		AttachStdin:     attachStdin,
		AttachStdout:    attachStdout,
		AttachStderr:    attachStderr,
		Env:             envVariables,
		Cmd:             runCmd,
		Image:           copts.Image,
		Volumes:         volumes,
		MacAddress:      copts.macAddress,
		Entrypoint:      entrypoint,
		WorkingDir:      copts.workingDir,
		Labels:          runconfigopts.ConvertKVStringsToMap(labels),
		Healthcheck:     healthConfig,
	}
	if flags.Changed("stop-signal") {
		config.StopSignal = copts.stopSignal
	}
	if flags.Changed("stop-timeout") {
		config.StopTimeout = &copts.stopTimeout
	}

	hostConfig := &container.HostConfig{
		Binds:           binds,
		ContainerIDFile: copts.containerIDFile,
		OomScoreAdj:     copts.oomScoreAdj,
		AutoRemove:      copts.autoRemove,
		Privileged:      copts.privileged,
		PortBindings:    portBindings,
		Links:           copts.links.GetAll(),
		PublishAllPorts: copts.publishAll,
		// Make sure the dns fields are never nil.
		// New containers don't ever have those fields nil,
		// but pre created containers can still have those nil values.
		// See https://github.com/docker/docker/pull/17779
		// for a more detailed explanation on why we don't want that.
		DNS:            copts.dns.GetAllOrEmpty(),
		DNSSearch:      copts.dnsSearch.GetAllOrEmpty(),
		DNSOptions:     copts.dnsOptions.GetAllOrEmpty(),
		ExtraHosts:     copts.extraHosts.GetAll(),
		VolumesFrom:    copts.volumesFrom.GetAll(),
		NetworkMode:    container.NetworkMode(copts.netMode),
		IpcMode:        ipcMode,
		PidMode:        pidMode,
		UTSMode:        utsMode,
		UsernsMode:     usernsMode,
		CapAdd:         strslice.StrSlice(copts.capAdd.GetAll()),
		CapDrop:        strslice.StrSlice(copts.capDrop.GetAll()),
		GroupAdd:       copts.groupAdd.GetAll(),
		RestartPolicy:  restartPolicy,
		SecurityOpt:    securityOpts,
		StorageOpt:     storageOpts,
		ReadonlyRootfs: copts.readonlyRootfs,
		LogConfig:      container.LogConfig{Type: copts.loggingDriver, Config: loggingOpts},
		VolumeDriver:   copts.volumeDriver,
		Isolation:      container.Isolation(copts.isolation),
		ShmSize:        shmSize,
		Resources:      resources,
		Tmpfs:          tmpfs,
		Sysctls:        copts.sysctls.GetAll(),
		Runtime:        copts.runtime,
	}

	// only set this value if the user provided the flag, else it should default to nil
	if flags.Changed("init") {
		hostConfig.Init = &copts.init
	}

	// When allocating stdin in attached mode, close stdin at client disconnect
	if config.OpenStdin && config.AttachStdin {
		config.StdinOnce = true
	}

	networkingConfig := &networktypes.NetworkingConfig{
		EndpointsConfig: make(map[string]*networktypes.EndpointSettings),
	}

	if copts.ipv4Address != "" || copts.ipv6Address != "" || copts.linkLocalIPs.Len() > 0 {
		epConfig := &networktypes.EndpointSettings{}
		networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig

		epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{
			IPv4Address: copts.ipv4Address,
			IPv6Address: copts.ipv6Address,
		}

		if copts.linkLocalIPs.Len() > 0 {
			epConfig.IPAMConfig.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len())
			copy(epConfig.IPAMConfig.LinkLocalIPs, copts.linkLocalIPs.GetAll())
		}
	}

	if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 {
		epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)]
		if epConfig == nil {
			epConfig = &networktypes.EndpointSettings{}
		}
		epConfig.Links = make([]string, len(hostConfig.Links))
		copy(epConfig.Links, hostConfig.Links)
		networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig
	}

	if copts.aliases.Len() > 0 {
		epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)]
		if epConfig == nil {
			epConfig = &networktypes.EndpointSettings{}
		}
		epConfig.Aliases = make([]string, copts.aliases.Len())
		copy(epConfig.Aliases, copts.aliases.GetAll())
		networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig
	}

	return config, hostConfig, networkingConfig, nil
}
Ejemplo n.º 23
0
func buildCommand(c *cli.Context) {

	var (
		rockerfile *build.Rockerfile
		err        error
	)

	// We don't want info level for 'print' mode
	// So log only errors unless 'debug' is on
	if c.Bool("print") && log.StandardLogger().Level != log.DebugLevel {
		log.StandardLogger().Level = log.ErrorLevel
	}

	vars, err := template.VarsFromFileMulti(c.StringSlice("vars"))
	if err != nil {
		log.Fatal(err)
		os.Exit(1)
	}

	cliVars, err := template.VarsFromStrings(c.StringSlice("var"))
	if err != nil {
		log.Fatal(err)
	}

	vars = vars.Merge(cliVars)

	if c.Bool("demand-artifacts") {
		vars["DemandArtifacts"] = true
	}

	wd, err := os.Getwd()
	if err != nil {
		log.Fatal(err)
	}

	configFilename := c.String("file")
	contextDir := wd

	if configFilename == "-" {

		rockerfile, err = build.NewRockerfile(filepath.Base(wd), os.Stdin, vars, template.Funs{})
		if err != nil {
			log.Fatal(err)
		}

	} else {

		if !filepath.IsAbs(configFilename) {
			configFilename = filepath.Join(wd, configFilename)
		}

		rockerfile, err = build.NewRockerfileFromFile(configFilename, vars, template.Funs{})
		if err != nil {
			log.Fatal(err)
		}

		// Initialize context dir
		contextDir = filepath.Dir(configFilename)
	}

	args := c.Args()
	if len(args) > 0 {
		contextDir = args[0]
		if !filepath.IsAbs(contextDir) {
			contextDir = filepath.Join(wd, args[0])
		}
	} else if contextDir != wd {
		log.Warningf("Implicit context directory used: %s. You can override context directory using the last argument.", contextDir)
	}

	dir, err := os.Stat(contextDir)
	if err != nil {
		log.Errorf("Problem with opening directory %s, error: %s", contextDir, err)
		os.Exit(2)
	}
	if !dir.IsDir() {
		log.Errorf("Context directory %s is not a directory.", contextDir)
		os.Exit(2)

	}
	log.Debugf("Context directory: %s", contextDir)

	if c.Bool("print") {
		fmt.Print(rockerfile.Content)
		os.Exit(0)
	}

	dockerignore := []string{}

	dockerignoreFilename := filepath.Join(contextDir, ".dockerignore")
	if _, err := os.Stat(dockerignoreFilename); err == nil {
		if dockerignore, err = build.ReadDockerignoreFile(dockerignoreFilename); err != nil {
			log.Fatal(err)
		}
	}

	var config *dockerclient.Config
	config = dockerclient.NewConfigFromCli(c)

	dockerClient, err := dockerclient.NewFromConfig(config)
	if err != nil {
		log.Fatal(err)
	}

	cacheDir, err := util.MakeAbsolute(c.String("cache-dir"))
	if err != nil {
		log.Fatal(err)
	}

	var cache build.Cache
	if !c.Bool("no-cache") {
		cache = build.NewCacheFS(cacheDir)
	}

	var (
		stdoutContainerFormatter log.Formatter = &log.JSONFormatter{}
		stderrContainerFormatter log.Formatter = &log.JSONFormatter{}
	)
	if !c.GlobalBool("json") {
		stdoutContainerFormatter = build.NewMonochromeContainerFormatter()
		stderrContainerFormatter = build.NewColoredContainerFormatter()
	}

	options := build.DockerClientOptions{
		Client:                   dockerClient,
		Auth:                     initAuth(c),
		Log:                      log.StandardLogger(),
		S3storage:                s3.New(dockerClient, cacheDir),
		StdoutContainerFormatter: stdoutContainerFormatter,
		StderrContainerFormatter: stderrContainerFormatter,
		PushRetryCount:           c.Int("push-retry"),
		Host:                     config.Host,
		LogExactSizes:            c.GlobalBool("json"),
	}
	client := build.NewDockerClient(options)

	builder := build.New(client, rockerfile, cache, build.Config{
		InStream:      os.Stdin,
		OutStream:     os.Stdout,
		ContextDir:    contextDir,
		Dockerignore:  dockerignore,
		ArtifactsPath: c.String("artifacts-path"),
		Pull:          c.Bool("pull"),
		NoGarbage:     c.Bool("no-garbage"),
		Attach:        c.Bool("attach"),
		Verbose:       c.GlobalBool("verbose"),
		ID:            c.String("id"),
		NoCache:       c.Bool("no-cache"),
		ReloadCache:   c.Bool("reload-cache"),
		Push:          c.Bool("push"),
		CacheDir:      cacheDir,
		LogJSON:       c.GlobalBool("json"),
		BuildArgs:     runconfigopts.ConvertKVStringsToMap(c.StringSlice("build-arg")),
	})

	plan, err := build.NewPlan(rockerfile.Commands(), true)
	if err != nil {
		log.Fatal(err)
	}

	// Check the docker connection before we actually run
	if err := dockerclient.Ping(dockerClient, 5000); err != nil {
		log.Fatal(err)
	}

	if err := builder.Run(plan); err != nil {
		log.Fatal(err)
	}

	fields := log.Fields{}
	if c.GlobalBool("json") {
		fields["size"] = builder.VirtualSize
		fields["delta"] = builder.ProducedSize
	}

	size := fmt.Sprintf("final size %s (+%s from the base image)",
		units.HumanSize(float64(builder.VirtualSize)),
		units.HumanSize(float64(builder.ProducedSize)),
	)

	log.WithFields(fields).Infof("Successfully built %.12s | %s", builder.GetImageID(), size)
}
Ejemplo n.º 24
0
// Execute runs the command
func (c *CommandRun) Execute(b *Build) (s State, err error) {
	s = b.state

	if s.ImageID == "" && !s.NoBaseImage {
		return s, fmt.Errorf("Please provide a source image with `FROM` prior to run")
	}

	cmd := handleJSONArgs(c.cfg.args, c.cfg.attrs)

	if !c.cfg.attrs["json"] {
		cmd = append([]string{"/bin/sh", "-c"}, cmd...)
	}

	buildEnv := []string{}
	configEnv := runconfigopts.ConvertKVStringsToMap(s.Config.Env)
	for key, val := range s.NoCache.BuildArgs {
		if !b.allowedBuildArgs[key] {
			// skip build-args that are not in allowed list, meaning they have
			// not been defined by an "ARG" Dockerfile command yet.
			// This is an error condition but only if there is no "ARG" in the entire
			// Dockerfile, so we'll generate any necessary errors after we parsed
			// the entire file (see 'leftoverArgs' processing in evaluator.go )
			continue
		}
		if _, ok := configEnv[key]; !ok {
			buildEnv = append(buildEnv, fmt.Sprintf("%s=%s", key, val))
		}
	}

	// derive the command to use for probeCache() and to commit in this container.
	// Note that we only do this if there are any build-time env vars.  Also, we
	// use the special argument "|#" at the start of the args array. This will
	// avoid conflicts with any RUN command since commands can not
	// start with | (vertical bar). The "#" (number of build envs) is there to
	// help ensure proper cache matches. We don't want a RUN command
	// that starts with "foo=abc" to be considered part of a build-time env var.
	saveCmd := cmd
	if len(buildEnv) > 0 {
		sort.Strings(buildEnv)
		tmpEnv := append([]string{fmt.Sprintf("|%d", len(buildEnv))}, buildEnv...)
		saveCmd = append(tmpEnv, saveCmd...)
	}

	s.Commit("RUN %q", saveCmd)

	// Check cache
	s, hit, err := b.probeCache(s)
	if err != nil {
		return s, err
	}
	if hit {
		return s, nil
	}

	// We run this command in the container using CMD
	origCmd := s.Config.Cmd
	origEntrypoint := s.Config.Entrypoint
	origEnv := s.Config.Env
	s.Config.Cmd = cmd
	s.Config.Entrypoint = []string{}
	s.Config.Env = append(s.Config.Env, buildEnv...)

	if s.NoCache.ContainerID, err = b.client.CreateContainer(s); err != nil {
		return s, err
	}

	if err = b.client.RunContainer(s.NoCache.ContainerID, false); err != nil {
		b.client.RemoveContainer(s.NoCache.ContainerID)
		return s, err
	}

	// Restore command after commit
	s.Config.Cmd = origCmd
	s.Config.Entrypoint = origEntrypoint
	s.Config.Env = origEnv

	return s, nil
}
Ejemplo n.º 25
0
func runBuild(dockerCli *command.DockerCli, options buildOptions) error {

	var (
		buildCtx io.ReadCloser
		err      error
	)

	specifiedContext := options.context

	var (
		contextDir    string
		tempDir       string
		relDockerfile string
		progBuff      io.Writer
		buildBuff     io.Writer
	)

	progBuff = dockerCli.Out()
	buildBuff = dockerCli.Out()
	if options.quiet {
		progBuff = bytes.NewBuffer(nil)
		buildBuff = bytes.NewBuffer(nil)
	}

	switch {
	case specifiedContext == "-":
		buildCtx, relDockerfile, err = builder.GetContextFromReader(dockerCli.In(), options.dockerfileName)
	case urlutil.IsGitURL(specifiedContext):
		tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, options.dockerfileName)
	case urlutil.IsURL(specifiedContext):
		buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName)
	default:
		contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, options.dockerfileName)
	}

	if err != nil {
		if options.quiet && urlutil.IsURL(specifiedContext) {
			fmt.Fprintln(dockerCli.Err(), progBuff)
		}
		return fmt.Errorf("unable to prepare context: %s", err)
	}

	if tempDir != "" {
		defer os.RemoveAll(tempDir)
		contextDir = tempDir
	}

	if buildCtx == nil {
		// And canonicalize dockerfile name to a platform-independent one
		relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
		if err != nil {
			return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
		}

		f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
		if err != nil && !os.IsNotExist(err) {
			return err
		}
		defer f.Close()

		var excludes []string
		if err == nil {
			excludes, err = dockerignore.ReadAll(f)
			if err != nil {
				return err
			}
		}

		if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil {
			return fmt.Errorf("Error checking context: '%s'.", err)
		}

		// If .dockerignore mentions .dockerignore or the Dockerfile
		// then make sure we send both files over to the daemon
		// because Dockerfile is, obviously, needed no matter what, and
		// .dockerignore is needed to know if either one needs to be
		// removed. The daemon will remove them for us, if needed, after it
		// parses the Dockerfile. Ignore errors here, as they will have been
		// caught by validateContextDirectory above.
		var includes = []string{"."}
		keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
		keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
		if keepThem1 || keepThem2 {
			includes = append(includes, ".dockerignore", relDockerfile)
		}

		buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
			Compression:     archive.Uncompressed,
			ExcludePatterns: excludes,
			IncludeFiles:    includes,
		})
		if err != nil {
			return err
		}
	}

	ctx := context.Background()

	var resolvedTags []*resolvedTag
	if command.IsTrusted() {
		translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) {
			return TrustedReference(ctx, dockerCli, ref)
		}
		// Wrap the tar archive to replace the Dockerfile entry with the rewritten
		// Dockerfile which uses trusted pulls.
		buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags)
	}

	// Setup an upload progress bar
	progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true)
	if !dockerCli.Out().IsTerminal() {
		progressOutput = &lastProgressOutput{output: progressOutput}
	}

	var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")

	var memory int64
	if options.memory != "" {
		parsedMemory, err := units.RAMInBytes(options.memory)
		if err != nil {
			return err
		}
		memory = parsedMemory
	}

	var memorySwap int64
	if options.memorySwap != "" {
		if options.memorySwap == "-1" {
			memorySwap = -1
		} else {
			parsedMemorySwap, err := units.RAMInBytes(options.memorySwap)
			if err != nil {
				return err
			}
			memorySwap = parsedMemorySwap
		}
	}

	var shmSize int64
	if options.shmSize != "" {
		shmSize, err = units.RAMInBytes(options.shmSize)
		if err != nil {
			return err
		}
	}

	authConfig, _ := dockerCli.CredentialsStore().GetAll()
	buildOptions := types.ImageBuildOptions{
		Memory:         memory,
		MemorySwap:     memorySwap,
		Tags:           options.tags.GetAll(),
		SuppressOutput: options.quiet,
		NoCache:        options.noCache,
		Remove:         options.rm,
		ForceRemove:    options.forceRm,
		PullParent:     options.pull,
		Isolation:      container.Isolation(options.isolation),
		CPUSetCPUs:     options.cpuSetCpus,
		CPUSetMems:     options.cpuSetMems,
		CPUShares:      options.cpuShares,
		CPUQuota:       options.cpuQuota,
		CPUPeriod:      options.cpuPeriod,
		CgroupParent:   options.cgroupParent,
		Dockerfile:     relDockerfile,
		ShmSize:        shmSize,
		Ulimits:        options.ulimits.GetList(),
		BuildArgs:      runconfigopts.ConvertKVStringsToMap(options.buildArgs.GetAll()),
		AuthConfigs:    authConfig,
		Labels:         runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()),
		CacheFrom:      options.cacheFrom,
	}

	response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions)
	if err != nil {
		if options.quiet {
			fmt.Fprintf(dockerCli.Err(), "%s", progBuff)
		}
		return err
	}
	defer response.Body.Close()

	err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), nil)
	if err != nil {
		if jerr, ok := err.(*jsonmessage.JSONError); ok {
			// If no error code is set, default to 1
			if jerr.Code == 0 {
				jerr.Code = 1
			}
			if options.quiet {
				fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff)
			}
			return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
		}
	}

	// Windows: show error message about modified file permissions if the
	// daemon isn't running Windows.
	if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet {
		fmt.Fprintln(dockerCli.Err(), `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
	}

	// Everything worked so if -q was provided the output from the daemon
	// should be just the image ID and we'll print that to stdout.
	if options.quiet {
		fmt.Fprintf(dockerCli.Out(), "%s", buildBuff)
	}

	if command.IsTrusted() {
		// Since the build was successful, now we must tag any of the resolved
		// images from the above Dockerfile rewrite.
		for _, resolved := range resolvedTags {
			if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil {
				return err
			}
		}
	}

	return nil
}
Ejemplo n.º 26
0
// RUN some command yo
//
// run a command and commit the image. Args are automatically prepended with
// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under
// Windows, in the event there is only one argument The difference in processing:
//
// RUN echo hi          # sh -c echo hi       (Linux)
// RUN echo hi          # cmd /S /C echo hi   (Windows)
// RUN [ "echo", "hi" ] # echo hi
//
func run(b *Builder, args []string, attributes map[string]bool, original string) error {
	if b.image == "" && !b.noBaseImage {
		return fmt.Errorf("Please provide a source image with `from` prior to run")
	}

	if err := b.flags.Parse(); err != nil {
		return err
	}

	args = handleJSONArgs(args, attributes)

	if !attributes["json"] {
		args = append(getShell(b.runConfig), args...)
	}
	config := &container.Config{
		Cmd:   strslice.StrSlice(args),
		Image: b.image,
	}

	// stash the cmd
	cmd := b.runConfig.Cmd
	if len(b.runConfig.Entrypoint) == 0 && len(b.runConfig.Cmd) == 0 {
		b.runConfig.Cmd = config.Cmd
	}

	// stash the config environment
	env := b.runConfig.Env

	defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd)
	defer func(env []string) { b.runConfig.Env = env }(env)

	// derive the net build-time environment for this run. We let config
	// environment override the build time environment.
	// This means that we take the b.buildArgs list of env vars and remove
	// any of those variables that are defined as part of the container. In other
	// words, anything in b.Config.Env. What's left is the list of build-time env
	// vars that we need to add to each RUN command - note the list could be empty.
	//
	// We don't persist the build time environment with container's config
	// environment, but just sort and prepend it to the command string at time
	// of commit.
	// This helps with tracing back the image's actual environment at the time
	// of RUN, without leaking it to the final image. It also aids cache
	// lookup for same image built with same build time environment.
	cmdBuildEnv := []string{}
	configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env)
	for key, val := range b.options.BuildArgs {
		if !b.isBuildArgAllowed(key) {
			// skip build-args that are not in allowed list, meaning they have
			// not been defined by an "ARG" Dockerfile command yet.
			// This is an error condition but only if there is no "ARG" in the entire
			// Dockerfile, so we'll generate any necessary errors after we parsed
			// the entire file (see 'leftoverArgs' processing in evaluator.go )
			continue
		}
		if _, ok := configEnv[key]; !ok {
			cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, val))
		}
	}

	// derive the command to use for probeCache() and to commit in this container.
	// Note that we only do this if there are any build-time env vars.  Also, we
	// use the special argument "|#" at the start of the args array. This will
	// avoid conflicts with any RUN command since commands can not
	// start with | (vertical bar). The "#" (number of build envs) is there to
	// help ensure proper cache matches. We don't want a RUN command
	// that starts with "foo=abc" to be considered part of a build-time env var.
	saveCmd := config.Cmd
	if len(cmdBuildEnv) > 0 {
		sort.Strings(cmdBuildEnv)
		tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...)
		saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...))
	}

	b.runConfig.Cmd = saveCmd
	hit, err := b.probeCache()
	if err != nil {
		return err
	}
	if hit {
		return nil
	}

	// set Cmd manually, this is special case only for Dockerfiles
	b.runConfig.Cmd = config.Cmd
	// set build-time environment for 'run'.
	b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...)
	// set config as already being escaped, this prevents double escaping on windows
	b.runConfig.ArgsEscaped = true

	logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd)

	cID, err := b.create()
	if err != nil {
		return err
	}

	if err := b.run(cID); err != nil {
		return err
	}

	// revert to original config environment and set the command string to
	// have the build-time env vars in it (if any) so that future cache look-ups
	// properly match it.
	b.runConfig.Env = env
	b.runConfig.Cmd = saveCmd
	return b.commit(cID, cmd, "run")
}
Ejemplo n.º 27
0
func (b *Builder) processImageFrom(img builder.Image) error {
	if img != nil {
		b.image = img.ImageID()

		if img.RunConfig() != nil {
			b.runConfig = img.RunConfig()
		}
	}

	// Check to see if we have a default PATH, note that windows won't
	// have one as its set by HCS
	if system.DefaultPathEnv != "" {
		// Convert the slice of strings that represent the current list
		// of env vars into a map so we can see if PATH is already set.
		// If its not set then go ahead and give it our default value
		configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env)
		if _, ok := configEnv["PATH"]; !ok {
			b.runConfig.Env = append(b.runConfig.Env,
				"PATH="+system.DefaultPathEnv)
		}
	}

	if img == nil {
		// Typically this means they used "FROM scratch"
		return nil
	}

	// Process ONBUILD triggers if they exist
	if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 {
		word := "trigger"
		if nTriggers > 1 {
			word = "triggers"
		}
		fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word)
	}

	// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
	onBuildTriggers := b.runConfig.OnBuild
	b.runConfig.OnBuild = []string{}

	// parse the ONBUILD triggers by invoking the parser
	for _, step := range onBuildTriggers {
		ast, err := parser.Parse(strings.NewReader(step))
		if err != nil {
			return err
		}

		for i, n := range ast.Children {
			switch strings.ToUpper(n.Value) {
			case "ONBUILD":
				return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
			case "MAINTAINER", "FROM":
				return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
			}

			if err := b.dispatch(i, n); err != nil {
				return err
			}
		}
	}

	return nil
}
Ejemplo n.º 28
0
func (opts *serviceOptions) ToService() (swarm.ServiceSpec, error) {
	var service swarm.ServiceSpec

	envVariables, err := runconfigopts.ReadKVStrings(opts.envFile.GetAll(), opts.env.GetAll())
	if err != nil {
		return service, err
	}

	currentEnv := make([]string, 0, len(envVariables))
	for _, env := range envVariables { // need to process each var, in order
		k := strings.SplitN(env, "=", 2)[0]
		for i, current := range currentEnv { // remove duplicates
			if current == env {
				continue // no update required, may hide this behind flag to preserve order of envVariables
			}
			if strings.HasPrefix(current, k+"=") {
				currentEnv = append(currentEnv[:i], currentEnv[i+1:]...)
			}
		}
		currentEnv = append(currentEnv, env)
	}

	service = swarm.ServiceSpec{
		Annotations: swarm.Annotations{
			Name:   opts.name,
			Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()),
		},
		TaskTemplate: swarm.TaskSpec{
			ContainerSpec: swarm.ContainerSpec{
				Image:    opts.image,
				Args:     opts.args,
				Env:      currentEnv,
				Hostname: opts.hostname,
				Labels:   runconfigopts.ConvertKVStringsToMap(opts.containerLabels.GetAll()),
				Dir:      opts.workdir,
				User:     opts.user,
				Groups:   opts.groups.GetAll(),
				TTY:      opts.tty,
				Mounts:   opts.mounts.Value(),
				DNSConfig: &swarm.DNSConfig{
					Nameservers: opts.dns.GetAll(),
					Search:      opts.dnsSearch.GetAll(),
					Options:     opts.dnsOption.GetAll(),
				},
				Hosts:           convertExtraHostsToSwarmHosts(opts.hosts.GetAll()),
				StopGracePeriod: opts.stopGrace.Value(),
				Secrets:         nil,
			},
			Networks:      convertNetworks(opts.networks.GetAll()),
			Resources:     opts.resources.ToResourceRequirements(),
			RestartPolicy: opts.restartPolicy.ToRestartPolicy(),
			Placement: &swarm.Placement{
				Constraints: opts.constraints.GetAll(),
			},
			LogDriver: opts.logDriver.toLogDriver(),
		},
		Networks: convertNetworks(opts.networks.GetAll()),
		Mode:     swarm.ServiceMode{},
		UpdateConfig: &swarm.UpdateConfig{
			Parallelism:     opts.update.parallelism,
			Delay:           opts.update.delay,
			Monitor:         opts.update.monitor,
			FailureAction:   opts.update.onFailure,
			MaxFailureRatio: opts.update.maxFailureRatio.Value(),
		},
		EndpointSpec: opts.endpoint.ToEndpointSpec(),
	}

	healthConfig, err := opts.healthcheck.toHealthConfig()
	if err != nil {
		return service, err
	}
	service.TaskTemplate.ContainerSpec.Healthcheck = healthConfig

	switch opts.mode {
	case "global":
		if opts.replicas.Value() != nil {
			return service, fmt.Errorf("replicas can only be used with replicated mode")
		}

		service.Mode.Global = &swarm.GlobalService{}
	case "replicated":
		service.Mode.Replicated = &swarm.ReplicatedService{
			Replicas: opts.replicas.Value(),
		}
	default:
		return service, fmt.Errorf("Unknown mode: %s", opts.mode)
	}
	return service, nil
}