Exemplo n.º 1
0
Arquivo: image.go Projeto: m1911/hyper
// sanitizeRepoAndTags parses the raw "t" parameter received from the client
// to a slice of repoAndTag.
// It also validates each repoName and tag.
func sanitizeRepoAndTags(names []string) ([]repoAndTag, error) {
	var (
		repoAndTags []repoAndTag
		// This map is used for deduplicating the "-t" paramter.
		uniqNames = make(map[string]struct{})
	)
	for _, repo := range names {
		name, tag := parsers.ParseRepositoryTag(repo)
		if name == "" {
			continue
		}

		if err := registry.ValidateRepositoryName(name); err != nil {
			return nil, err
		}

		nameWithTag := name
		if len(tag) > 0 {
			if err := tags.ValidateTagName(tag); err != nil {
				return nil, err
			}
			nameWithTag += ":" + tag
		} else {
			nameWithTag += ":" + tags.DefaultTag
		}
		if _, exists := uniqNames[nameWithTag]; !exists {
			uniqNames[nameWithTag] = struct{}{}
			repoAndTags = append(repoAndTags, repoAndTag{repo: name, tag: tag})
		}
	}
	return repoAndTags, nil
}
Exemplo n.º 2
0
Arquivo: pull.go Projeto: m1911/hyper
func (cli Docker) SendCmdPull(image string, imagePullConfig *graph.ImagePullConfig) ([]byte, int, error) {
	// We need to create a container via an image object.  If the image
	// is not stored locally, so we need to pull the image from the Docker HUB.

	// Get a Repository name and tag name from the argument, but be careful
	// with the Repository name with a port number.  For example:
	//      localdomain:5000/samba/hipache:latest
	repository, tag := parsers.ParseRepositoryTag(image)
	if err := registry.ValidateRepositoryName(repository); err != nil {
		return nil, -1, err
	}
	if len(tag) > 0 {
		if err := tags.ValidateTagName(tag); err != nil {
			return nil, -1, err
		}
	} else {
		tag = tags.DefaultTag
	}

	glog.V(3).Infof("The Repository is %s, and the tag is %s", repository, tag)
	glog.V(3).Info("pull the image from the repository!")
	err := cli.daemon.Repositories().Pull(repository, tag, imagePullConfig)
	if err != nil {
		return nil, -1, err
	}
	return nil, 200, nil
}
Exemplo n.º 3
0
func (store *TagStore) SetLoad(repoName, tag, imageName string, force bool, out io.Writer) error {
	img, err := store.LookupImage(imageName)
	store.Lock()
	defer store.Unlock()
	if err != nil {
		return err
	}
	if tag == "" {
		tag = tags.DEFAULTTAG
	}
	if err := validateRepoName(repoName); err != nil {
		return err
	}
	if err := tags.ValidateTagName(tag); err != nil {
		if _, formatError := err.(tags.ErrTagInvalidFormat); !formatError {
			return err
		}
		if _, dErr := digest.ParseDigest(tag); dErr != nil {
			// Still return the tag validation error.
			// It's more likely to be a user generated issue.
			return err
		}
	}
	if err := store.reload(); err != nil {
		return err
	}
	var repo Repository
	repoName = registry.NormalizeLocalName(repoName)
	if r, exists := store.Repositories[repoName]; exists {
		repo = r
		if old, exists := store.Repositories[repoName][tag]; exists {

			if !force {
				return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", tag, old)
			}

			if old != img.ID && out != nil {

				fmt.Fprintf(out, "The image %s:%s already exists, renaming the old one with ID %s to empty string\n", repoName, tag, old[:12])

			}
		}
	} else {
		repo = make(map[string]string)
		store.Repositories[repoName] = repo
	}
	repo[tag] = img.ID
	return store.save()
}
Exemplo n.º 4
0
// validateTag checks if the given image name can be resolved.
func validateTag(rawRepo string) (string, error) {
	repository, tag := parsers.ParseRepositoryTag(rawRepo)
	if err := registry.ValidateRepositoryName(repository); err != nil {
		return "", err
	}

	if len(tag) == 0 {
		return rawRepo, nil
	}

	if err := tags.ValidateTagName(tag); err != nil {
		return "", err
	}

	return rawRepo, nil
}
Exemplo n.º 5
0
// setLoad stores the image to the store.
// If the imageName is already in the repo then a '-f' flag should be used to replace existing image.
func (store *TagStore) setLoad(repoName, tag, imageName string, force bool, out io.Writer) error {
	img, err := store.LookupImage(imageName)
	store.Lock()
	defer store.Unlock()
	if err != nil {
		return err
	}
	if tag == "" {
		tag = tags.DefaultTag
	}
	if err := validateRepoName(repoName); err != nil {
		return err
	}
	if err := tags.ValidateTagName(tag); err != nil {
		return err
	}
	if err := store.reload(); err != nil {
		return err
	}
	var repo Repository
	repoName = registry.NormalizeLocalName(repoName)
	if r, exists := store.Repositories[repoName]; exists {
		repo = r
		if old, exists := store.Repositories[repoName][tag]; exists {

			if !force {
				return fmt.Errorf("Conflict: Tag %s:%s is already set to image %s, if you want to replace it, please use -f option", repoName, tag, old[:12])
			}

			if old != img.ID && out != nil {

				fmt.Fprintf(out, "The image %s:%s already exists, renaming the old one with ID %s to empty string\n", repoName, tag, old[:12])

			}
		}
	} else {
		repo = make(map[string]string)
		store.Repositories[repoName] = repo
	}
	repo[tag] = img.ID
	return store.save()
}
Exemplo n.º 6
0
// Build is the main interface of the package, it gathers the Builder
// struct and calls builder.Run() to do all the real build job.
func Build(d *daemon.Daemon, buildConfig *Config) error {
	var (
		repoName string
		tag      string
		context  io.ReadCloser
	)
	sf := streamformatter.NewJSONStreamFormatter()

	repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName)
	if repoName != "" {
		if err := registry.ValidateRepositoryName(repoName); err != nil {
			return err
		}
		if len(tag) > 0 {
			if err := tags.ValidateTagName(tag); err != nil {
				return err
			}
		}
	}

	if buildConfig.RemoteURL == "" {
		context = ioutil.NopCloser(buildConfig.Context)
	} else if urlutil.IsGitURL(buildConfig.RemoteURL) {
		root, err := utils.GitClone(buildConfig.RemoteURL)
		if err != nil {
			return err
		}
		defer os.RemoveAll(root)

		c, err := archive.Tar(root, archive.Uncompressed)
		if err != nil {
			return err
		}
		context = c
	} else if urlutil.IsURL(buildConfig.RemoteURL) {
		f, err := httputils.Download(buildConfig.RemoteURL)
		if err != nil {
			return fmt.Errorf("Error downloading remote context %s: %v", buildConfig.RemoteURL, err)
		}
		defer f.Body.Close()
		ct := f.Header.Get("Content-Type")
		clen := int(f.ContentLength)
		contentType, bodyReader, err := inspectResponse(ct, f.Body, clen)

		defer bodyReader.Close()

		if err != nil {
			return fmt.Errorf("Error detecting content type for remote %s: %v", buildConfig.RemoteURL, err)
		}
		if contentType == httputils.MimeTypes.TextPlain {
			dockerFile, err := ioutil.ReadAll(bodyReader)
			if err != nil {
				return err
			}

			// When we're downloading just a Dockerfile put it in
			// the default name - don't allow the client to move/specify it
			buildConfig.DockerfileName = api.DefaultDockerfileName

			c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile))
			if err != nil {
				return err
			}
			context = c
		} else {
			// Pass through - this is a pre-packaged context, presumably
			// with a Dockerfile with the right name inside it.
			prCfg := progressreader.Config{
				In:        bodyReader,
				Out:       buildConfig.Stdout,
				Formatter: sf,
				Size:      clen,
				NewLines:  true,
				ID:        "Downloading context",
				Action:    buildConfig.RemoteURL,
			}
			context = progressreader.New(prCfg)
		}
	}

	defer context.Close()

	builder := &builder{
		Daemon: d,
		OutStream: &streamformatter.StdoutFormater{
			Writer:          buildConfig.Stdout,
			StreamFormatter: sf,
		},
		ErrStream: &streamformatter.StderrFormater{
			Writer:          buildConfig.Stdout,
			StreamFormatter: sf,
		},
		Verbose:         !buildConfig.SuppressOutput,
		UtilizeCache:    !buildConfig.NoCache,
		Remove:          buildConfig.Remove,
		ForceRemove:     buildConfig.ForceRemove,
		Pull:            buildConfig.Pull,
		OutOld:          buildConfig.Stdout,
		StreamFormatter: sf,
		AuthConfigs:     buildConfig.AuthConfigs,
		dockerfileName:  buildConfig.DockerfileName,
		cpuShares:       buildConfig.CPUShares,
		cpuPeriod:       buildConfig.CPUPeriod,
		cpuQuota:        buildConfig.CPUQuota,
		cpuSetCpus:      buildConfig.CPUSetCpus,
		cpuSetMems:      buildConfig.CPUSetMems,
		cgroupParent:    buildConfig.CgroupParent,
		memory:          buildConfig.Memory,
		memorySwap:      buildConfig.MemorySwap,
		cancelled:       buildConfig.WaitCancelled(),
		id:              stringid.GenerateRandomID(),
	}

	defer func() {
		builder.Daemon.Graph().Release(builder.id, builder.activeImages...)
	}()

	id, err := builder.Run(context)
	if err != nil {
		return err
	}
	if repoName != "" {
		return d.Repositories().Tag(repoName, tag, id, true)
	}
	return nil
}
Exemplo n.º 7
0
// CmdBuild builds a new image from the source code at a given path.
//
// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN.
//
// Usage: docker build [OPTIONS] PATH | URL | -
func (cli *DockerCli) CmdBuild(args ...string) error {
	cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, "Build a new image from the source code at PATH", true)
	tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image")
	suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
	noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
	rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers")
	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
	dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
	flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
	flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
	flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
	flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
	flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
	flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")
	flBuildArg := opts.NewListOpts(opts.ValidateEnv)
	cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables")

	ulimits := make(map[string]*ulimit.Ulimit)
	flUlimits := opts.NewUlimitOpt(&ulimits)
	cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options")

	cmd.Require(flag.Exact, 1)

	// For trusted pull on "FROM <image>" instruction.
	addTrustedFlags(cmd, true)

	cmd.ParseFlags(args, true)

	var (
		context  io.ReadCloser
		isRemote bool
		err      error
	)

	_, err = exec.LookPath("git")
	hasGit := err == nil

	specifiedContext := cmd.Arg(0)

	var (
		contextDir    string
		tempDir       string
		relDockerfile string
	)

	switch {
	case specifiedContext == "-":
		tempDir, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName)
	case urlutil.IsGitURL(specifiedContext) && hasGit:
		tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName)
	case urlutil.IsURL(specifiedContext):
		tempDir, relDockerfile, err = getContextFromURL(cli.out, specifiedContext, *dockerfileName)
	default:
		contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName)
	}

	if err != nil {
		return fmt.Errorf("unable to prepare context: %s", err)
	}

	if tempDir != "" {
		defer os.RemoveAll(tempDir)
		contextDir = tempDir
	}

	// Resolve the FROM lines in the Dockerfile to trusted digest references
	// using Notary. On a successful build, we must tag the resolved digests
	// to the original name specified in the Dockerfile.
	newDockerfile, resolvedTags, err := rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference)
	if err != nil {
		return fmt.Errorf("unable to process Dockerfile: %v", err)
	}
	defer newDockerfile.Close()

	// And canonicalize dockerfile name to a platform-independent one
	relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
	if err != nil {
		return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
	}

	f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
	if err != nil && !os.IsNotExist(err) {
		return err
	}

	var excludes []string
	if err == nil {
		excludes, err = utils.ReadDockerIgnore(f)
		if err != nil {
			return err
		}
	}

	if err := utils.ValidateContextDirectory(contextDir, excludes); err != nil {
		return fmt.Errorf("Error checking context: '%s'.", err)
	}

	// If .dockerignore mentions .dockerignore or the Dockerfile
	// then make sure we send both files over to the daemon
	// because Dockerfile is, obviously, needed no matter what, and
	// .dockerignore is needed to know if either one needs to be
	// removed.  The deamon will remove them for us, if needed, after it
	// parses the Dockerfile. Ignore errors here, as they will have been
	// caught by ValidateContextDirectory above.
	var includes = []string{"."}
	keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
	keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
	if keepThem1 || keepThem2 {
		includes = append(includes, ".dockerignore", relDockerfile)
	}

	context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: excludes,
		IncludeFiles:    includes,
	})
	if err != nil {
		return err
	}

	// Wrap the tar archive to replace the Dockerfile entry with the rewritten
	// Dockerfile which uses trusted pulls.
	context = replaceDockerfileTarWrapper(context, newDockerfile, relDockerfile)

	// Setup an upload progress bar
	// FIXME: ProgressReader shouldn't be this annoying to use
	sf := streamformatter.NewStreamFormatter()
	var body io.Reader = progressreader.New(progressreader.Config{
		In:        context,
		Out:       cli.out,
		Formatter: sf,
		NewLines:  true,
		ID:        "",
		Action:    "Sending build context to Docker daemon",
	})

	var memory int64
	if *flMemoryString != "" {
		parsedMemory, err := units.RAMInBytes(*flMemoryString)
		if err != nil {
			return err
		}
		memory = parsedMemory
	}

	var memorySwap int64
	if *flMemorySwap != "" {
		if *flMemorySwap == "-1" {
			memorySwap = -1
		} else {
			parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap)
			if err != nil {
				return err
			}
			memorySwap = parsedMemorySwap
		}
	}
	// Send the build context
	v := &url.Values{}

	//Check if the given image name can be resolved
	if *tag != "" {
		repository, tag := parsers.ParseRepositoryTag(*tag)
		if err := registry.ValidateRepositoryName(repository); err != nil {
			return err
		}
		if len(tag) > 0 {
			if err := tags.ValidateTagName(tag); err != nil {
				return err
			}
		}
	}

	v.Set("t", *tag)

	if *suppressOutput {
		v.Set("q", "1")
	}
	if isRemote {
		v.Set("remote", cmd.Arg(0))
	}
	if *noCache {
		v.Set("nocache", "1")
	}
	if *rm {
		v.Set("rm", "1")
	} else {
		v.Set("rm", "0")
	}

	if *forceRm {
		v.Set("forcerm", "1")
	}

	if *pull {
		v.Set("pull", "1")
	}

	v.Set("cpusetcpus", *flCPUSetCpus)
	v.Set("cpusetmems", *flCPUSetMems)
	v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10))
	v.Set("cpuquota", strconv.FormatInt(*flCPUQuota, 10))
	v.Set("cpuperiod", strconv.FormatInt(*flCPUPeriod, 10))
	v.Set("memory", strconv.FormatInt(memory, 10))
	v.Set("memswap", strconv.FormatInt(memorySwap, 10))
	v.Set("cgroupparent", *flCgroupParent)

	v.Set("dockerfile", relDockerfile)

	ulimitsVar := flUlimits.GetList()
	ulimitsJSON, err := json.Marshal(ulimitsVar)
	if err != nil {
		return err
	}
	v.Set("ulimits", string(ulimitsJSON))

	// collect all the build-time environment variables for the container
	buildArgs := runconfig.ConvertKVStringsToMap(flBuildArg.GetAll())
	buildArgsJSON, err := json.Marshal(buildArgs)
	if err != nil {
		return err
	}
	v.Set("buildargs", string(buildArgsJSON))

	headers := http.Header(make(map[string][]string))
	buf, err := json.Marshal(cli.configFile.AuthConfigs)
	if err != nil {
		return err
	}
	headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
	headers.Set("Content-Type", "application/tar")

	sopts := &streamOpts{
		rawTerminal: true,
		in:          body,
		out:         cli.out,
		headers:     headers,
	}

	serverResp, err := cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts)

	// Windows: show error message about modified file permissions.
	if runtime.GOOS == "windows" {
		h, err := httputils.ParseServerHeader(serverResp.header.Get("Server"))
		if err == nil {
			if h.OS != "windows" {
				fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
			}
		}
	}

	if jerr, ok := err.(*jsonmessage.JSONError); ok {
		// If no error code is set, default to 1
		if jerr.Code == 0 {
			jerr.Code = 1
		}
		return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
	}

	if err != nil {
		return err
	}

	// Since the build was successful, now we must tag any of the resolved
	// images from the above Dockerfile rewrite.
	for _, resolved := range resolvedTags {
		if err := cli.tagTrusted(resolved.repoInfo, resolved.digestRef, resolved.tagRef); err != nil {
			return err
		}
	}

	return nil
}
Exemplo n.º 8
0
// CmdBuild builds a new image from the source code at a given path.
//
// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN.
//
// Usage: docker build [OPTIONS] PATH | URL | -
func (cli *DockerCli) CmdBuild(args ...string) error {
	cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH", true)
	tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image")
	suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
	noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
	rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers")
	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
	dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
	flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
	flCpuPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
	flCpuQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
	flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
	flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
	flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")

	cmd.Require(flag.Exact, 1)
	cmd.ParseFlags(args, true)

	var (
		context  archive.Archive
		isRemote bool
		err      error
	)

	_, err = exec.LookPath("git")
	hasGit := err == nil
	if cmd.Arg(0) == "-" {
		// As a special case, 'docker build -' will build from either an empty context with the
		// contents of stdin as a Dockerfile, or a tar-ed context from stdin.
		buf := bufio.NewReader(cli.in)
		magic, err := buf.Peek(tarHeaderSize)
		if err != nil && err != io.EOF {
			return fmt.Errorf("failed to peek context header from STDIN: %v", err)
		}
		if !archive.IsArchive(magic) {
			dockerfile, err := ioutil.ReadAll(buf)
			if err != nil {
				return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err)
			}

			// -f option has no meaning when we're reading it from stdin,
			// so just use our default Dockerfile name
			*dockerfileName = api.DefaultDockerfileName
			context, err = archive.Generate(*dockerfileName, string(dockerfile))
		} else {
			context = ioutil.NopCloser(buf)
		}
	} else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) {
		isRemote = true
	} else {
		root := cmd.Arg(0)
		if urlutil.IsGitURL(root) {
			root, err = utils.GitClone(root)
			if err != nil {
				return err
			}
			defer os.RemoveAll(root)
		}
		if _, err := os.Stat(root); err != nil {
			return err
		}

		absRoot, err := filepath.Abs(root)
		if err != nil {
			return err
		}

		filename := *dockerfileName // path to Dockerfile

		if *dockerfileName == "" {
			// No -f/--file was specified so use the default
			*dockerfileName = api.DefaultDockerfileName
			filename = filepath.Join(absRoot, *dockerfileName)

			// Just to be nice ;-) look for 'dockerfile' too but only
			// use it if we found it, otherwise ignore this check
			if _, err = os.Lstat(filename); os.IsNotExist(err) {
				tmpFN := path.Join(absRoot, strings.ToLower(*dockerfileName))
				if _, err = os.Lstat(tmpFN); err == nil {
					*dockerfileName = strings.ToLower(*dockerfileName)
					filename = tmpFN
				}
			}
		}

		origDockerfile := *dockerfileName // used for error msg
		if filename, err = filepath.Abs(filename); err != nil {
			return err
		}

		// Verify that 'filename' is within the build context
		filename, err = symlink.FollowSymlinkInScope(filename, absRoot)
		if err != nil {
			return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root)
		}

		// Now reset the dockerfileName to be relative to the build context
		*dockerfileName, err = filepath.Rel(absRoot, filename)
		if err != nil {
			return err
		}
		// And canonicalize dockerfile name to a platform-independent one
		*dockerfileName, err = archive.CanonicalTarNameForPath(*dockerfileName)
		if err != nil {
			return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", *dockerfileName, err)
		}

		if _, err = os.Lstat(filename); os.IsNotExist(err) {
			return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
		}
		var includes = []string{"."}

		excludes, err := utils.ReadDockerIgnore(path.Join(root, ".dockerignore"))
		if err != nil {
			return err
		}

		// If .dockerignore mentions .dockerignore or the Dockerfile
		// then make sure we send both files over to the daemon
		// because Dockerfile is, obviously, needed no matter what, and
		// .dockerignore is needed to know if either one needs to be
		// removed.  The deamon will remove them for us, if needed, after it
		// parses the Dockerfile.
		keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
		keepThem2, _ := fileutils.Matches(*dockerfileName, excludes)
		if keepThem1 || keepThem2 {
			includes = append(includes, ".dockerignore", *dockerfileName)
		}

		if err := utils.ValidateContextDirectory(root, excludes); err != nil {
			return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
		}
		options := &archive.TarOptions{
			Compression:     archive.Uncompressed,
			ExcludePatterns: excludes,
			IncludeFiles:    includes,
		}
		context, err = archive.TarWithOptions(root, options)
		if err != nil {
			return err
		}
	}

	// windows: show error message about modified file permissions
	// FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build.
	if runtime.GOOS == "windows" {
		fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
	}

	var body io.Reader
	// Setup an upload progress bar
	// FIXME: ProgressReader shouldn't be this annoying to use
	if context != nil {
		sf := streamformatter.NewStreamFormatter()
		body = progressreader.New(progressreader.Config{
			In:        context,
			Out:       cli.out,
			Formatter: sf,
			NewLines:  true,
			ID:        "",
			Action:    "Sending build context to Docker daemon",
		})
	}

	var memory int64
	if *flMemoryString != "" {
		parsedMemory, err := units.RAMInBytes(*flMemoryString)
		if err != nil {
			return err
		}
		memory = parsedMemory
	}

	var memorySwap int64
	if *flMemorySwap != "" {
		if *flMemorySwap == "-1" {
			memorySwap = -1
		} else {
			parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap)
			if err != nil {
				return err
			}
			memorySwap = parsedMemorySwap
		}
	}
	// Send the build context
	v := &url.Values{}

	//Check if the given image name can be resolved
	if *tag != "" {
		repository, tag := parsers.ParseRepositoryTag(*tag)
		if err := registry.ValidateRepositoryName(repository); err != nil {
			return err
		}
		if len(tag) > 0 {
			if err := tags.ValidateTagName(tag); err != nil {
				return err
			}
		}
	}

	v.Set("t", *tag)

	if *suppressOutput {
		v.Set("q", "1")
	}
	if isRemote {
		v.Set("remote", cmd.Arg(0))
	}
	if *noCache {
		v.Set("nocache", "1")
	}
	if *rm {
		v.Set("rm", "1")
	} else {
		v.Set("rm", "0")
	}

	if *forceRm {
		v.Set("forcerm", "1")
	}

	if *pull {
		v.Set("pull", "1")
	}

	v.Set("cpusetcpus", *flCPUSetCpus)
	v.Set("cpusetmems", *flCPUSetMems)
	v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10))
	v.Set("cpuquota", strconv.FormatInt(*flCpuQuota, 10))
	v.Set("cpuperiod", strconv.FormatInt(*flCpuPeriod, 10))
	v.Set("memory", strconv.FormatInt(memory, 10))
	v.Set("memswap", strconv.FormatInt(memorySwap, 10))
	v.Set("cgroupparent", *flCgroupParent)

	v.Set("dockerfile", *dockerfileName)

	headers := http.Header(make(map[string][]string))
	buf, err := json.Marshal(cli.configFile.AuthConfigs)
	if err != nil {
		return err
	}
	headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))

	if context != nil {
		headers.Set("Content-Type", "application/tar")
	}
	sopts := &streamOpts{
		rawTerminal: true,
		in:          body,
		out:         cli.out,
		headers:     headers,
	}
	err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts)
	if jerr, ok := err.(*jsonmessage.JSONError); ok {
		// If no error code is set, default to 1
		if jerr.Code == 0 {
			jerr.Code = 1
		}
		return StatusError{Status: jerr.Message, StatusCode: jerr.Code}
	}
	return err
}
Exemplo n.º 9
0
// hyper build [OPTIONS] PATH
func (cli *HyperClient) HyperCmdBuild(args ...string) error {
	var opts struct {
		ImageName      string `long:"tag" short:"t" default:"" value-name:"\"\"" default-mask:"-" description:"Repository name (and optionally a tag) to be applied to the resulting image in case of success"`
		DockerfileName string `long:"file" short:"f" default:"" value-name:"\"\"" default-mask:"-" description:"Customized docker file"`
	}

	var parser = gflag.NewParser(&opts, gflag.Default)
	parser.Usage = "build [OPTIONS] PATH\n\nBuild a new image from the source code at PATH"
	args, err := parser.ParseArgs(args)
	if err != nil {
		if !strings.Contains(err.Error(), "Usage") {
			return err
		} else {
			return nil
		}
	}

	if len(args) == 0 {
		return fmt.Errorf("%s: \"build\" requires a minimum of 1 argument, See 'hyper build --help'.", os.Args[0])
	}
	var (
		filename = ""
		context  archive.Archive
		name     = ""
	)
	root := args[0]
	if _, err := os.Stat(root); err != nil {
		return err
	}

	absRoot, err := filepath.Abs(root)
	if err != nil {
		return err
	}

	filename = opts.DockerfileName // path to Dockerfile

	if opts.DockerfileName == "" {
		// No -f/--file was specified so use the default
		opts.DockerfileName = api.DefaultDockerfileName
		filename = filepath.Join(absRoot, opts.DockerfileName)

		// Just to be nice ;-) look for 'dockerfile' too but only
		// use it if we found it, otherwise ignore this check
		if _, err = os.Lstat(filename); os.IsNotExist(err) {
			tmpFN := path.Join(absRoot, strings.ToLower(opts.DockerfileName))
			if _, err = os.Lstat(tmpFN); err == nil {
				opts.DockerfileName = strings.ToLower(opts.DockerfileName)
				filename = tmpFN
			}
		}
	}

	origDockerfile := opts.DockerfileName // used for error msg
	if filename, err = filepath.Abs(filename); err != nil {
		return err
	}

	// Verify that 'filename' is within the build context
	filename, err = symlink.FollowSymlinkInScope(filename, absRoot)
	if err != nil {
		return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root)
	}

	// Now reset the dockerfileName to be relative to the build context
	opts.DockerfileName, err = filepath.Rel(absRoot, filename)
	if err != nil {
		return err
	}
	// And canonicalize dockerfile name to a platform-independent one
	opts.DockerfileName, err = archive.CanonicalTarNameForPath(opts.DockerfileName)
	if err != nil {
		return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", opts.DockerfileName, err)
	}

	if _, err = os.Lstat(filename); os.IsNotExist(err) {
		return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
	}
	var includes = []string{"."}

	f, err := os.Open(filepath.Join(root, ".dockerignore"))
	if err != nil && !os.IsNotExist(err) {
		return err
	}
	defer f.Close()

	var excludes []string
	if err == nil {
		excludes, err = utils.ReadDockerIgnore(f)
		if err != nil {
			return err
		}
	}

	if err := utils.ValidateContextDirectory(root, excludes); err != nil {
		return fmt.Errorf("Error checking context: '%s'.", err)
	}

	// If .dockerignore mentions .dockerignore or the Dockerfile
	// then make sure we send both files over to the daemon
	// because Dockerfile is, obviously, needed no matter what, and
	// .dockerignore is needed to know if either one needs to be
	// removed.  The deamon will remove them for us, if needed, after it
	// parses the Dockerfile.
	keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
	keepThem2, _ := fileutils.Matches(opts.DockerfileName, excludes)
	if keepThem1 || keepThem2 {
		includes = append(includes, ".dockerignore", opts.DockerfileName)
	}

	if err := utils.ValidateContextDirectory(root, excludes); err != nil {
		return fmt.Errorf("Error checking context: '%s'.", err)
	}
	options := &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: excludes,
		IncludeFiles:    includes,
	}
	context, err = archive.TarWithOptions(root, options)
	if err != nil {
		return err
	}
	var body io.Reader
	// Setup an upload progress bar
	// FIXME: ProgressReader shouldn't be this annoying to use
	if context != nil {
		sf := streamformatter.NewStreamFormatter()
		body = progressreader.New(progressreader.Config{
			In:        context,
			Out:       os.Stdout,
			Formatter: sf,
			NewLines:  true,
			ID:        "",
			Action:    "Sending build context to Docker daemon",
		})
	}

	if opts.ImageName == "" {
		// set a image name
		name = rand.RandStr(10, "alphanum")
	} else {
		name = opts.ImageName
		repository, tag := parsers.ParseRepositoryTag(name)
		if err := registry.ValidateRepositoryName(repository); err != nil {
			return err
		}
		if len(tag) > 0 {
			if err := tags.ValidateTagName(tag); err != nil {
				return err
			}
		}
	}
	v := url.Values{}
	v.Set("name", name)
	headers := http.Header(make(map[string][]string))
	if context != nil {
		headers.Set("Content-Type", "application/tar")
	}
	err = cli.stream("POST", "/image/build?"+v.Encode(), body, cli.out, headers)
	if err != nil {
		return err
	}
	return nil
}
Exemplo n.º 10
0
func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
	var (
		authConfigs        = map[string]cliconfig.AuthConfig{}
		authConfigsEncoded = r.Header.Get("X-Registry-Config")
		buildConfig        = &dockerfile.Config{}
	)

	if authConfigsEncoded != "" {
		authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded))
		if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil {
			// for a pull it is not an error if no auth was given
			// to increase compatibility with the existing api it is defaulting
			// to be empty.
		}
	}

	w.Header().Set("Content-Type", "application/json")

	version := httputils.VersionFromContext(ctx)
	output := ioutils.NewWriteFlusher(w)
	sf := streamformatter.NewJSONStreamFormatter()
	errf := func(err error) error {
		// Do not write the error in the http output if it's still empty.
		// This prevents from writing a 200(OK) when there is an interal error.
		if !output.Flushed() {
			return err
		}
		_, err = w.Write(sf.FormatError(errors.New(utils.GetErrorMessage(err))))
		if err != nil {
			logrus.Warnf("could not write error response: %v", err)
		}
		return nil
	}

	if httputils.BoolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") {
		buildConfig.Remove = true
	} else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") {
		buildConfig.Remove = true
	} else {
		buildConfig.Remove = httputils.BoolValue(r, "rm")
	}
	if httputils.BoolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") {
		buildConfig.Pull = true
	}

	repoName, tag := parsers.ParseRepositoryTag(r.FormValue("t"))
	if repoName != "" {
		if err := registry.ValidateRepositoryName(repoName); err != nil {
			return errf(err)
		}
		if len(tag) > 0 {
			if err := tags.ValidateTagName(tag); err != nil {
				return errf(err)
			}
		}
	}

	buildConfig.DockerfileName = r.FormValue("dockerfile")
	buildConfig.Verbose = !httputils.BoolValue(r, "q")
	buildConfig.UseCache = !httputils.BoolValue(r, "nocache")
	buildConfig.ForceRemove = httputils.BoolValue(r, "forcerm")
	buildConfig.MemorySwap = httputils.Int64ValueOrZero(r, "memswap")
	buildConfig.Memory = httputils.Int64ValueOrZero(r, "memory")
	buildConfig.CPUShares = httputils.Int64ValueOrZero(r, "cpushares")
	buildConfig.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod")
	buildConfig.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota")
	buildConfig.CPUSetCpus = r.FormValue("cpusetcpus")
	buildConfig.CPUSetMems = r.FormValue("cpusetmems")
	buildConfig.CgroupParent = r.FormValue("cgroupparent")

	var buildUlimits = []*ulimit.Ulimit{}
	ulimitsJSON := r.FormValue("ulimits")
	if ulimitsJSON != "" {
		if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil {
			return errf(err)
		}
		buildConfig.Ulimits = buildUlimits
	}

	var buildArgs = map[string]string{}
	buildArgsJSON := r.FormValue("buildargs")
	if buildArgsJSON != "" {
		if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil {
			return errf(err)
		}
		buildConfig.BuildArgs = buildArgs
	}

	remoteURL := r.FormValue("remote")

	// Currently, only used if context is from a remote url.
	// The field `In` is set by DetectContextFromRemoteURL.
	// Look at code in DetectContextFromRemoteURL for more information.
	pReader := &progressreader.Config{
		// TODO: make progressreader streamformatter-agnostic
		Out:       output,
		Formatter: sf,
		Size:      r.ContentLength,
		NewLines:  true,
		ID:        "Downloading context",
		Action:    remoteURL,
	}

	var (
		context        builder.ModifiableContext
		dockerfileName string
		err            error
	)
	context, dockerfileName, err = daemonbuilder.DetectContextFromRemoteURL(r.Body, remoteURL, pReader)
	if err != nil {
		return errf(err)
	}
	defer func() {
		if err := context.Close(); err != nil {
			logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
		}
	}()

	uidMaps, gidMaps := s.daemon.GetUIDGIDMaps()
	defaultArchiver := &archive.Archiver{
		Untar:   chrootarchive.Untar,
		UIDMaps: uidMaps,
		GIDMaps: gidMaps,
	}
	docker := daemonbuilder.Docker{s.daemon, output, authConfigs, defaultArchiver}

	b, err := dockerfile.NewBuilder(buildConfig, docker, builder.DockerIgnoreContext{context}, nil)
	if err != nil {
		return errf(err)
	}
	b.Stdout = &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf}
	b.Stderr = &streamformatter.StderrFormatter{Writer: output, StreamFormatter: sf}

	if closeNotifier, ok := w.(http.CloseNotifier); ok {
		finished := make(chan struct{})
		defer close(finished)
		go func() {
			select {
			case <-finished:
			case <-closeNotifier.CloseNotify():
				logrus.Infof("Client disconnected, cancelling job: build")
				b.Cancel()
			}
		}()
	}

	if len(dockerfileName) > 0 {
		b.DockerfileName = dockerfileName
	}

	imgID, err := b.Build()
	if err != nil {
		return errf(err)
	}

	if repoName != "" {
		if err := s.daemon.TagImage(repoName, tag, string(imgID), true); err != nil {
			return errf(err)
		}
	}

	return nil
}
Exemplo n.º 11
0
func Build(d *daemon.Daemon, buildConfig *Config) error {
	var (
		repoName string
		tag      string
		context  io.ReadCloser
	)

	repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName)
	if repoName != "" {
		if err := registry.ValidateRepositoryName(repoName); err != nil {
			return err
		}
		if len(tag) > 0 {
			if err := tags.ValidateTagName(tag); err != nil {
				return err
			}
		}
	}

	if buildConfig.RemoteURL == "" {
		context = ioutil.NopCloser(buildConfig.Context)
	} else if urlutil.IsGitURL(buildConfig.RemoteURL) {
		root, err := utils.GitClone(buildConfig.RemoteURL)
		if err != nil {
			return err
		}
		defer os.RemoveAll(root)

		c, err := archive.Tar(root, archive.Uncompressed)
		if err != nil {
			return err
		}
		context = c
	} else if urlutil.IsURL(buildConfig.RemoteURL) {
		f, err := httputils.Download(buildConfig.RemoteURL)
		if err != nil {
			return err
		}
		defer f.Body.Close()
		dockerFile, err := ioutil.ReadAll(f.Body)
		if err != nil {
			return err
		}

		// When we're downloading just a Dockerfile put it in
		// the default name - don't allow the client to move/specify it
		buildConfig.DockerfileName = api.DefaultDockerfileName

		c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile))
		if err != nil {
			return err
		}
		context = c
	}
	defer context.Close()

	sf := streamformatter.NewJSONStreamFormatter()

	builder := &Builder{
		Daemon: d,
		OutStream: &streamformatter.StdoutFormater{
			Writer:          buildConfig.Stdout,
			StreamFormatter: sf,
		},
		ErrStream: &streamformatter.StderrFormater{
			Writer:          buildConfig.Stdout,
			StreamFormatter: sf,
		},
		Verbose:         !buildConfig.SuppressOutput,
		UtilizeCache:    !buildConfig.NoCache,
		Remove:          buildConfig.Remove,
		ForceRemove:     buildConfig.ForceRemove,
		Pull:            buildConfig.Pull,
		OutOld:          buildConfig.Stdout,
		StreamFormatter: sf,
		AuthConfig:      buildConfig.AuthConfig,
		ConfigFile:      buildConfig.ConfigFile,
		dockerfileName:  buildConfig.DockerfileName,
		cpuShares:       buildConfig.CpuShares,
		cpuPeriod:       buildConfig.CpuPeriod,
		cpuQuota:        buildConfig.CpuQuota,
		cpuSetCpus:      buildConfig.CpuSetCpus,
		cpuSetMems:      buildConfig.CpuSetMems,
		cgroupParent:    buildConfig.CgroupParent,
		memory:          buildConfig.Memory,
		memorySwap:      buildConfig.MemorySwap,
		cancelled:       buildConfig.WaitCancelled(),
	}

	id, err := builder.Run(context)
	if err != nil {
		return err
	}

	if repoName != "" {
		return d.Repositories().Tag(repoName, tag, id, true)
	}
	return nil
}
Exemplo n.º 12
0
Arquivo: build.go Projeto: m1911/hyper
func (cli Docker) SendImageBuild(name string, size int, ctx io.ReadCloser) ([]byte, int, error) {
	var (
		authConfigs   = map[string]cliconfig.AuthConfig{}
		buildConfig   = &dockerfile.Config{}
		buildArgs     = map[string]string{}
		buildUlimits  = []*ulimit.Ulimit{}
		isolation     = "" // r.FormValue("isolation")
		ulimitsJSON   = "" // r.FormValue("ulimits")
		buildArgsJSON = "" //  r.FormValue("buildargs")
		remoteURL     = "" // r.FormValue("remote")
	)
	buildConfig.Remove = true
	buildConfig.Pull = true
	output := ioutils.NewWriteFlusher(os.Stdout)
	defer output.Close()
	sf := streamformatter.NewJSONStreamFormatter()
	errf := func(err error) error {
		// Do not write the error in the http output if it's still empty.
		// This prevents from writing a 200(OK) when there is an interal error.
		if !output.Flushed() {
			return err
		}
		return nil
	}
	buildConfig.DockerfileName = "" // r.FormValue("dockerfile")
	buildConfig.Verbose = true      // !httputils.BoolValue(r, "q")
	buildConfig.UseCache = true     // !httputils.BoolValue(r, "nocache")
	buildConfig.ForceRemove = true  // httputils.BoolValue(r, "forcerm")
	buildConfig.MemorySwap = 0      // httputils.Int64ValueOrZero(r, "memswap")
	buildConfig.Memory = 0          // httputils.Int64ValueOrZero(r, "memory")
	buildConfig.ShmSize = 0         // httputils.Int64ValueOrZero(r, "shmsize")
	buildConfig.CPUShares = 0       // httputils.Int64ValueOrZero(r, "cpushares")
	buildConfig.CPUPeriod = 0       // httputils.Int64ValueOrZero(r, "cpuperiod")
	buildConfig.CPUQuota = 0        // httputils.Int64ValueOrZero(r, "cpuquota")
	buildConfig.CPUSetCpus = ""     // r.FormValue("cpusetcpus")
	buildConfig.CPUSetMems = ""     // r.FormValue("cpusetmems")
	buildConfig.CgroupParent = ""   // r.FormValue("cgroupparent")

	if i := runconfig.IsolationLevel(isolation); i != "" {
		if !runconfig.IsolationLevel.IsValid(i) {
			return nil, -1, errf(fmt.Errorf("Unsupported isolation: %q", i))
		}
		buildConfig.Isolation = i
	}

	if ulimitsJSON != "" {
		if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil {
			return nil, -1, errf(err)
		}
		buildConfig.Ulimits = buildUlimits
	}

	if buildArgsJSON != "" {
		if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil {
			return nil, -1, errf(err)
		}
		buildConfig.BuildArgs = buildArgs
	}

	uidMaps, gidMaps := cli.daemon.GetUIDGIDMaps()
	defaultArchiver := &archive.Archiver{
		Untar:   chrootarchive.Untar,
		UIDMaps: uidMaps,
		GIDMaps: gidMaps,
	}
	docker := &daemonbuilder.Docker{
		Daemon:      cli.daemon,
		OutOld:      output,
		AuthConfigs: authConfigs,
		Archiver:    defaultArchiver,
	}

	// Currently, only used if context is from a remote url.
	// The field `In` is set by DetectContextFromRemoteURL.
	// Look at code in DetectContextFromRemoteURL for more information.
	pReader := &progressreader.Config{
		// TODO: make progressreader streamformatter-agnostic
		Out:       output,
		Formatter: sf,
		Size:      int64(size),
		NewLines:  true,
		ID:        "Downloading context",
		Action:    remoteURL,
	}
	context, dockerfileName, err := daemonbuilder.DetectContextFromRemoteURL(ctx, remoteURL, pReader)
	if err != nil {
		return nil, -1, errf(err)
	}
	defer func() {
		if err := context.Close(); err != nil {
			logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
		}
	}()
	buildConfig.DockerfileName = dockerfileName
	b, err := dockerfile.NewBuilder(cli.daemon, buildConfig, docker, builder.DockerIgnoreContext{ModifiableContext: context}, nil)
	if err != nil {
		return nil, -1, errf(err)
	}

	b.Stdout = &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf}
	b.Stderr = &streamformatter.StderrFormatter{Writer: output, StreamFormatter: sf}

	imgID, err := b.Build()
	if err != nil {
		return nil, -1, errf(err)
	}

	repo, tag := parsers.ParseRepositoryTag(name)
	if err := registry.ValidateRepositoryName(repo); err != nil {
		return nil, -1, errf(err)
	}
	if len(tag) > 0 {
		if err := tags.ValidateTagName(tag); err != nil {
			return nil, -1, errf(err)
		}
	} else {
		tag = tags.DefaultTag
	}

	if err := cli.daemon.TagImage(repo, tag, string(imgID), true); err != nil {
		return nil, -1, errf(err)
	}
	return nil, 0, nil
}