Exemplo n.º 1
0
func (b *DockerBuilder) BuildImage(build *Build, stream *streams.Output) error {
	repoPath := fmt.Sprintf("%s/%s", b.cachePath, build.RepositoryName)
	repo, err := findOrClone(repoPath, build.CloneURL)
	if err != nil {
		return err
	}

	remote, err := repo.LookupRemote("origin")
	if err != nil {
		return err
	}

	refSpecs, err := remote.FetchRefspecs()
	handleError(err)

	err = remote.Fetch(refSpecs, nil, "")
	if err != nil {
		return err
	}

	oid, err := git.NewOid(build.CommitID)
	if err != nil {
		return err
	}

	commit, err := repo.LookupCommit(oid)
	handleError(err)
	tree, err := commit.Tree()
	handleError(err)
	err = repo.CheckoutTree(tree, &git.CheckoutOpts{Strategy: git.CheckoutForce})
	handleError(err)
	err = repo.SetHeadDetached(oid, nil, "")
	handleError(err)

	build.ImageTag = build.CommitID[:7]
	options := &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: []string{".git"},
		IncludeFiles:    []string{"."},
	}
	context, err := archive.TarWithOptions(repoPath, options)
	name := fmt.Sprintf("%s/%s:%s", b.registryURL, build.RepositoryName, build.ImageTag)
	err = b.dockerClient.BuildImage(docker.BuildImageOptions{
		Dockerfile:   "Dockerfile",
		Name:         name,
		OutputStream: stream,
		InputStream:  context,
	})
	if err != nil {
		return err
	}

	err = b.dockerClient.TagImage(name, docker.TagImageOptions{
		Repo:  fmt.Sprintf("%s/%s", b.registryURL, build.RepositoryName),
		Tag:   "latest",
		Force: true,
	})

	return err
}
Exemplo n.º 2
0
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
	// AUFS doesn't need the parent layer to produce a diff.
	return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: []string{".wh..wh.*"},
	})
}
Exemplo n.º 3
0
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (a *Driver) Diff(id, parent string) (archive.Archive, error) {
	// AUFS doesn't need the parent layer to produce a diff.
	return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir},
	})
}
Exemplo n.º 4
0
func (v *Volume) Export(resource, name string) (io.ReadCloser, error) {
	if v.IsBindMount && filepath.Base(resource) == name {
		name = ""
	}

	basePath, err := v.getResourcePath(resource)
	if err != nil {
		return nil, err
	}
	stat, err := os.Stat(basePath)
	if err != nil {
		return nil, err
	}
	var filter []string
	if !stat.IsDir() {
		d, f := path.Split(basePath)
		basePath = d
		filter = []string{f}
	} else {
		filter = []string{path.Base(basePath)}
		basePath = path.Dir(basePath)
	}
	return archive.TarWithOptions(basePath, &archive.TarOptions{
		Compression: archive.Uncompressed,
		Name:        name,
		Includes:    filter,
	})
}
Exemplo n.º 5
0
func (container *Container) Copy(resource string) (io.ReadCloser, error) {
	container.Lock()
	defer container.Unlock()
	var err error
	if err := container.Mount(); err != nil {
		return nil, err
	}
	defer func() {
		if err != nil {
			// unmount any volumes
			container.UnmountVolumes(true)
			// unmount the container's rootfs
			container.Unmount()
		}
	}()
	mounts, err := container.setupMounts()
	if err != nil {
		return nil, err
	}
	for _, m := range mounts {
		dest, err := container.GetResourcePath(m.Destination)
		if err != nil {
			return nil, err
		}
		if err := mount.Mount(m.Source, dest, "bind", "rbind,ro"); err != nil {
			return nil, err
		}
	}
	basePath, err := container.GetResourcePath(resource)
	if err != nil {
		return nil, err
	}
	stat, err := os.Stat(basePath)
	if err != nil {
		return nil, err
	}
	var filter []string
	if !stat.IsDir() {
		d, f := filepath.Split(basePath)
		basePath = d
		filter = []string{f}
	} else {
		filter = []string{filepath.Base(basePath)}
		basePath = filepath.Dir(basePath)
	}
	archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
		Compression:  archive.Uncompressed,
		IncludeFiles: filter,
	})
	if err != nil {
		return nil, err
	}
	return ioutils.NewReadCloserWrapper(archive, func() error {
			err := archive.Close()
			container.UnmountVolumes(true)
			container.Unmount()
			return err
		}),
		nil
}
Exemplo n.º 6
0
func (container *Container) Copy(resource string) (io.ReadCloser, error) {
	if err := container.Mount(); err != nil {
		return nil, err
	}

	basePath, err := container.getResourcePath(resource)
	if err != nil {
		container.Unmount()
		return nil, err
	}

	// Check if this is actually in a volume
	for _, mnt := range container.VolumeMounts() {
		if len(mnt.MountToPath) > 0 && strings.HasPrefix(resource, mnt.MountToPath[1:]) {
			return mnt.Export(resource)
		}
	}

	// Check if this is a special one (resolv.conf, hostname, ..)
	if resource == "etc/resolv.conf" {
		basePath = container.ResolvConfPath
	}
	if resource == "etc/hostname" {
		basePath = container.HostnamePath
	}
	if resource == "etc/hosts" {
		basePath = container.HostsPath
	}

	stat, err := os.Stat(basePath)
	if err != nil {
		container.Unmount()
		return nil, err
	}
	var filter []string
	if !stat.IsDir() {
		d, f := path.Split(basePath)
		basePath = d
		filter = []string{f}
	} else {
		filter = []string{path.Base(basePath)}
		basePath = path.Dir(basePath)
	}

	archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
		Compression:  archive.Uncompressed,
		IncludeFiles: filter,
	})
	if err != nil {
		container.Unmount()
		return nil, err
	}
	return ioutils.NewReadCloserWrapper(archive, func() error {
			err := archive.Close()
			container.Unmount()
			return err
		}),
		nil
}
Exemplo n.º 7
0
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (a *Driver) Diff(id, parent string, options *archive.ChangeOptions) (archive.Archive, error) {
	// AUFS doesn't need the parent layer to produce a diff.
	return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
		Compression: archive.Uncompressed,
		Includes:    options.Includes,
		Excludes:    options.Excludes,
	})
}
Exemplo n.º 8
0
func (container *Container) Copy(resource string) (io.ReadCloser, error) {
	container.Lock()
	defer container.Unlock()
	var err error
	if err := container.Mount(); err != nil {
		return nil, err
	}
	defer func() {
		if err != nil {
			container.Unmount()
		}
	}()

	if err = container.mountVolumes(); err != nil {
		container.unmountVolumes()
		return nil, err
	}
	defer func() {
		if err != nil {
			container.unmountVolumes()
		}
	}()

	basePath, err := container.GetResourcePath(resource)
	if err != nil {
		return nil, err
	}

	stat, err := os.Stat(basePath)
	if err != nil {
		return nil, err
	}
	var filter []string
	if !stat.IsDir() {
		d, f := path.Split(basePath)
		basePath = d
		filter = []string{f}
	} else {
		filter = []string{path.Base(basePath)}
		basePath = path.Dir(basePath)
	}

	archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
		Compression:  archive.Uncompressed,
		IncludeFiles: filter,
	})
	if err != nil {
		return nil, err
	}

	return ioutils.NewReadCloserWrapper(archive, func() error {
			err := archive.Close()
			container.unmountVolumes()
			container.Unmount()
			return err
		}),
		nil
}
Exemplo n.º 9
0
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (d *Driver) Diff(id, parent string) (archive.Archive, error) {
	diffPath := d.getDiffPath(id)
	logrus.Debugf("Tar with options on %s", diffPath)
	return archive.TarWithOptions(diffPath, &archive.TarOptions{
		Compression:    archive.Uncompressed,
		UIDMaps:        d.uidMaps,
		GIDMaps:        d.gidMaps,
		WhiteoutFormat: archive.OverlayWhiteoutFormat,
	})
}
Exemplo n.º 10
0
func (c *aufs) TarStream(id, parent string) (io.ReadCloser, error) {
	path, _, err := c.Mount(id)
	if err != nil {
		return nil, err
	}

	return archive.TarWithOptions(path, &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir},
	})
}
Exemplo n.º 11
0
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) {
	if !a.isParent(id, parent) {
		return a.naiveDiff.Diff(id, parent)
	}

	// AUFS doesn't need the parent layer to produce a diff.
	return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir},
		UIDMaps:         a.uidMaps,
		GIDMaps:         a.gidMaps,
	})
}
Exemplo n.º 12
0
func do_export(ctx *context, ctrl *ext.Controller) bool {
	r := ctrl.Request
	if r.MethodToLower() != "get" {
		ctrl.JsonError(http.StatusMethodNotAllowed, errors.New("method not allowed"))
		return true
	}

	directory := path.Join(ctx.app.Options.Root, r.TrimSuffixURI(".export"))
	var tarOpt archive.TarOptions
	tarOpt.ExcludePatterns = append(tarOpt.ExcludePatterns,
		".tmp", ".h2object", "h2object.pid", "h2object.conf")
	tarOpt.Compression = archive.Gzip
	rd, err := archive.TarWithOptions(directory, &tarOpt)
	if err != nil {
		ctrl.JsonError(http.StatusInternalServerError, err)
		return true
	}
	defer rd.Close()

	_, fname := path.Split(r.TrimSuffixURI(".export"))
	if fname == "" {
		fname = "h2object"
	}
	fname = fname + ".tar.gz"
	fn := path.Join(ctx.app.Options.TempRoot, fname)

	fd, err := os.Create(fn)
	if err != nil {
		ctrl.JsonError(http.StatusInternalServerError, err)
		return true
	}
	if _, err := io.Copy(fd, rd); err != nil {
		ctrl.JsonError(http.StatusInternalServerError, err)
		return true
	}
	fd.Close()

	fout, err := os.Open(fn)
	if err != nil {
		ctrl.JsonError(http.StatusInternalServerError, err)
		return true
	}
	fstat, err := fout.Stat()
	if err != nil {
		ctrl.JsonError(http.StatusInternalServerError, err)
		return true
	}

	ctrl.Binary(fname, fout, fstat.Size())
	return true
}
Exemplo n.º 13
0
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
	if !d.isParent(id, parent) {
		return d.naiveDiff.Diff(id, parent)
	}

	diffPath := d.getDiffPath(id)
	logrus.Debugf("Tar with options on %s", diffPath)
	return archive.TarWithOptions(diffPath, &archive.TarOptions{
		Compression:    archive.Uncompressed,
		UIDMaps:        d.uidMaps,
		GIDMaps:        d.gidMaps,
		WhiteoutFormat: archive.OverlayWhiteoutFormat,
	})
}
Exemplo n.º 14
0
func (c *tarArchiver) Compress(dirPath string) (io.ReadCloser, error) {
	excludePatterns, err := parseExcludePatternsFiles(dirPath, c.opts.ExcludePatternsFiles)
	if err != nil {
		return nil, err
	}
	return archive.TarWithOptions(
		dirPath,
		&archive.TarOptions{
			IncludeFiles:    []string{"."},
			ExcludePatterns: excludePatterns,
			Compression:     archive.Uncompressed,
			NoLchown:        true,
		},
	)
}
Exemplo n.º 15
0
func createTarStream(srcPath string) (io.ReadCloser, error) {
	excludes, err := parseDockerignore(srcPath)
	if err != nil {
		return nil, err
	}

	if err := validateContextDirectory(srcPath, excludes); err != nil {
		return nil, err
	}
	tarOpts := &archive.TarOptions{
		Excludes:    excludes,
		Compression: archive.Uncompressed,
		NoLchown:    true,
	}
	return archive.TarWithOptions(srcPath, tarOpts)
}
Exemplo n.º 16
0
Arquivo: util.go Projeto: sr/protoeasy
func tar(dirPath string, includeFiles []string) (retVal []byte, retErr error) {
	readCloser, err := archive.TarWithOptions(
		dirPath,
		&archive.TarOptions{
			IncludeFiles: includeFiles,
			Compression:  archive.Uncompressed,
			NoLchown:     true,
		},
	)
	if err != nil {
		return nil, err
	}
	defer func() {
		if err := readCloser.Close(); err != nil && retErr == nil {
			retErr = err
		}
	}()
	return ioutil.ReadAll(readCloser)
}
Exemplo n.º 17
0
Arquivo: tar.go Projeto: CNDonny/scope
func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
	excludes, err := parseDockerignore(srcPath)
	if err != nil {
		return nil, err
	}

	includes := []string{"."}

	// If .dockerignore mentions .dockerignore or the Dockerfile
	// then make sure we send both files over to the daemon
	// because Dockerfile is, obviously, needed no matter what, and
	// .dockerignore is needed to know if either one needs to be
	// removed.  The deamon will remove them for us, if needed, after it
	// parses the Dockerfile.
	//
	// https://github.com/docker/docker/issues/8330
	//
	forceIncludeFiles := []string{".dockerignore", dockerfilePath}

	for _, includeFile := range forceIncludeFiles {
		if includeFile == "" {
			continue
		}
		keepThem, err := fileutils.Matches(includeFile, excludes)
		if err != nil {
			return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err)
		}
		if keepThem {
			includes = append(includes, includeFile)
		}
	}

	if err := validateContextDirectory(srcPath, excludes); err != nil {
		return nil, err
	}
	tarOpts := &archive.TarOptions{
		ExcludePatterns: excludes,
		IncludeFiles:    includes,
		Compression:     archive.Uncompressed,
		NoLchown:        true,
	}
	return archive.TarWithOptions(srcPath, tarOpts)
}
Exemplo n.º 18
0
// getArchive returns the tarfile io.ReadCloser. It is a direct copy of the
// logic found in the official docker client.
// See <https://github.com/docker/docker/blob/78f2b8d8/api/client/build.go#L126-L172>.
func getArchive(contextDir, relDockerfile string) (io.ReadCloser, error) {
	var err error

	// And canonicalize dockerfile name to a platform-independent one
	relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
	if err != nil {
		return nil, fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
	}

	f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
	if err != nil && !os.IsNotExist(err) {
		return nil, err
	}

	var excludes []string
	if err == nil {
		excludes, err = dockerignore.ReadAll(f)
		if err != nil {
			return nil, err
		}
	}

	// If .dockerignore mentions .dockerignore or the Dockerfile
	// then make sure we send both files over to the daemon
	// because Dockerfile is, obviously, needed no matter what, and
	// .dockerignore is needed to know if either one needs to be
	// removed. The daemon will remove them for us, if needed, after it
	// parses the Dockerfile. Ignore errors here, as they will have been
	// caught by validateContextDirectory above.
	var includes = []string{"."}
	keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
	keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
	if keepThem1 || keepThem2 {
		includes = append(includes, ".dockerignore", relDockerfile)
	}

	return archive.TarWithOptions(contextDir, &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: excludes,
		IncludeFiles:    includes,
	})
}
Exemplo n.º 19
0
func runCreate(dockerCli *command.DockerCli, options pluginCreateOptions) error {
	var (
		createCtx io.ReadCloser
		err       error
	)

	if err := validateTag(options.repoName); err != nil {
		return err
	}

	absContextDir, err := validateContextDir(options.context)
	if err != nil {
		return err
	}

	if err := validateConfig(options.context); err != nil {
		return err
	}

	compression := archive.Uncompressed
	if options.compress {
		logrus.Debugf("compression enabled")
		compression = archive.Gzip
	}

	createCtx, err = archive.TarWithOptions(absContextDir, &archive.TarOptions{
		Compression: compression,
	})

	if err != nil {
		return err
	}

	ctx := context.Background()

	createOptions := types.PluginCreateOptions{RepoName: options.repoName}
	if err = dockerCli.Client().PluginCreate(ctx, createCtx, createOptions); err != nil {
		return err
	}
	fmt.Fprintln(dockerCli.Out(), options.repoName)
	return nil
}
Exemplo n.º 20
0
func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCloser, error) {
	if err := daemon.Mount(container); err != nil {
		return nil, err
	}

	uidMaps, gidMaps := daemon.GetUIDGIDMaps()
	archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{
		Compression: archive.Uncompressed,
		UIDMaps:     uidMaps,
		GIDMaps:     gidMaps,
	})
	if err != nil {
		daemon.Unmount(container)
		return nil, err
	}
	arch := ioutils.NewReadCloserWrapper(archive, func() error {
		err := archive.Close()
		daemon.Unmount(container)
		return err
	})
	daemon.LogContainerEvent(container, "export")
	return arch, err
}
Exemplo n.º 21
0
func (container *Container) export() (archive.Archive, error) {
	if err := container.Mount(); err != nil {
		return nil, err
	}

	uidMaps, gidMaps := container.daemon.GetUIDGIDMaps()
	archive, err := archive.TarWithOptions(container.basefs, &archive.TarOptions{
		Compression: archive.Uncompressed,
		UIDMaps:     uidMaps,
		GIDMaps:     gidMaps,
	})
	if err != nil {
		container.Unmount()
		return nil, err
	}
	arch := ioutils.NewReadCloserWrapper(archive, func() error {
		err := archive.Close()
		container.Unmount()
		return err
	})
	container.logEvent("export")
	return arch, err
}
Exemplo n.º 22
0
// CreateTar create a build context tar for the specified project and service name.
func CreateTar(p *project.Project, name string) (io.ReadCloser, error) {
	// This code was ripped off from docker/api/client/build.go

	serviceConfig := p.Configs[name]
	root := serviceConfig.Build
	dockerfileName := filepath.Join(root, serviceConfig.Dockerfile)

	absRoot, err := filepath.Abs(root)
	if err != nil {
		return nil, err
	}

	filename := dockerfileName

	if dockerfileName == "" {
		// No -f/--file was specified so use the default
		dockerfileName = DefaultDockerfileName
		filename = filepath.Join(absRoot, dockerfileName)

		// Just to be nice ;-) look for 'dockerfile' too but only
		// use it if we found it, otherwise ignore this check
		if _, err = os.Lstat(filename); os.IsNotExist(err) {
			tmpFN := path.Join(absRoot, strings.ToLower(dockerfileName))
			if _, err = os.Lstat(tmpFN); err == nil {
				dockerfileName = strings.ToLower(dockerfileName)
				filename = tmpFN
			}
		}
	}

	origDockerfile := dockerfileName // used for error msg
	if filename, err = filepath.Abs(filename); err != nil {
		return nil, err
	}

	// Now reset the dockerfileName to be relative to the build context
	dockerfileName, err = filepath.Rel(absRoot, filename)
	if err != nil {
		return nil, err
	}

	// And canonicalize dockerfile name to a platform-independent one
	dockerfileName, err = archive.CanonicalTarNameForPath(dockerfileName)
	if err != nil {
		return nil, fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", dockerfileName, err)
	}

	if _, err = os.Lstat(filename); os.IsNotExist(err) {
		return nil, fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
	}
	var includes = []string{"."}
	var excludes []string

	dockerIgnorePath := path.Join(root, ".dockerignore")
	dockerIgnore, err := os.Open(dockerIgnorePath)
	if err != nil {
		if !os.IsNotExist(err) {
			return nil, err
		}
		logrus.Warnf("Error while reading .dockerignore (%s) : %s", dockerIgnorePath, err.Error())
		excludes = make([]string, 0)
	} else {
		excludes, err = utils.ReadDockerIgnore(dockerIgnore)
		if err != nil {
			return nil, err
		}
	}

	// If .dockerignore mentions .dockerignore or the Dockerfile
	// then make sure we send both files over to the daemon
	// because Dockerfile is, obviously, needed no matter what, and
	// .dockerignore is needed to know if either one needs to be
	// removed.  The deamon will remove them for us, if needed, after it
	// parses the Dockerfile.
	keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
	keepThem2, _ := fileutils.Matches(dockerfileName, excludes)
	if keepThem1 || keepThem2 {
		includes = append(includes, ".dockerignore", dockerfileName)
	}

	if err := utils.ValidateContextDirectory(root, excludes); err != nil {
		return nil, fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
	}

	options := &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: excludes,
		IncludeFiles:    includes,
	}

	return archive.TarWithOptions(root, options)
}
Exemplo n.º 23
0
func (container *Container) copy(resource string) (rc io.ReadCloser, err error) {
	container.Lock()

	defer func() {
		if err != nil {
			// Wait to unlock the container until the archive is fully read
			// (see the ReadCloseWrapper func below) or if there is an error
			// before that occurs.
			container.Unlock()
		}
	}()

	if err := container.Mount(); err != nil {
		return nil, err
	}

	defer func() {
		if err != nil {
			// unmount any volumes
			container.unmountVolumes(true)
			// unmount the container's rootfs
			container.Unmount()
		}
	}()

	if err := container.mountVolumes(); err != nil {
		return nil, err
	}

	basePath, err := container.GetResourcePath(resource)
	if err != nil {
		return nil, err
	}
	stat, err := os.Stat(basePath)
	if err != nil {
		return nil, err
	}
	var filter []string
	if !stat.IsDir() {
		d, f := filepath.Split(basePath)
		basePath = d
		filter = []string{f}
	} else {
		filter = []string{filepath.Base(basePath)}
		basePath = filepath.Dir(basePath)
	}
	archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
		Compression:  archive.Uncompressed,
		IncludeFiles: filter,
	})
	if err != nil {
		return nil, err
	}

	reader := ioutils.NewReadCloserWrapper(archive, func() error {
		err := archive.Close()
		container.unmountVolumes(true)
		container.Unmount()
		container.Unlock()
		return err
	})
	container.logEvent("copy")
	return reader, nil
}
Exemplo n.º 24
0
func createTarball(base string) ([]byte, error) {
	cwd, err := os.Getwd()
	if err != nil {
		return nil, err
	}

	sym, err := filepath.EvalSymlinks(base)
	if err != nil {
		return nil, err
	}

	err = os.Chdir(sym)
	if err != nil {
		return nil, err
	}

	var includes = []string{"."}
	var excludes []string

	dockerIgnorePath := path.Join(sym, ".dockerignore")
	dockerIgnore, err := os.Open(dockerIgnorePath)
	if err != nil {
		if !os.IsNotExist(err) {
			return nil, err
		}
		//There is no docker ignore
		excludes = make([]string, 0)
	} else {
		excludes, err = dockerignore.ReadAll(dockerIgnore)
		if err != nil {
			return nil, err
		}
	}

	// If .dockerignore mentions .dockerignore or the Dockerfile
	// then make sure we send both files over to the daemon
	// because Dockerfile is, obviously, needed no matter what, and
	// .dockerignore is needed to know if either one needs to be
	// removed.  The deamon will remove them for us, if needed, after it
	// parses the Dockerfile.
	keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
	keepThem2, _ := fileutils.Matches("Dockerfile", excludes)
	if keepThem1 || keepThem2 {
		includes = append(includes, ".dockerignore", "Dockerfile")
	}

	// if err := builder.ValidateContextDirectory(contextDirectory, excludes); err != nil {
	// 	return nil, fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
	// }

	options := &archive.TarOptions{
		Compression:     archive.Gzip,
		ExcludePatterns: excludes,
		IncludeFiles:    includes,
	}

	out, err := archive.TarWithOptions(sym, options)
	if err != nil {
		return nil, err
	}

	bytes, err := ioutil.ReadAll(out)
	if err != nil {
		return nil, err
	}

	err = os.Chdir(cwd)
	if err != nil {
		return nil, err
	}

	return bytes, nil
}
Exemplo n.º 25
0
// TarFromSource creates a stream buffer with the tarballed content of the user source
func TarFromSource(ctx CommandContext, source string, gateway string) (*io.ReadCloser, error) {
	var tarOutputStream io.ReadCloser

	// source is a server address + path (scp-like uri)
	if strings.Contains(source, ":") {
		logrus.Debugf("Creating a tarball remotely and streaming it using SSH")
		serverParts := strings.Split(source, ":")
		if len(serverParts) != 2 {
			return nil, fmt.Errorf("invalid source uri, see 'scw cp -h' for usage")
		}

		serverID, err := ctx.API.GetServerID(serverParts[0])
		if err != nil {
			return nil, err
		}

		server, err := ctx.API.GetServer(serverID)
		if err != nil {
			return nil, err
		}

		dir, base := utils.PathToTARPathparts(serverParts[1])
		logrus.Debugf("Equivalent to 'scp root@%s:%s/%s ...'", server.PublicAddress.IP, dir, base)

		// remoteCommand is executed on the remote server
		// it streams a tarball raw content
		remoteCommand := []string{"tar"}
		remoteCommand = append(remoteCommand, "-C", dir)
		if ctx.Getenv("DEBUG") == "1" {
			remoteCommand = append(remoteCommand, "-v")
		}
		remoteCommand = append(remoteCommand, "-cf", "-")
		remoteCommand = append(remoteCommand, base)

		// Resolve gateway
		if gateway == "" {
			gateway = ctx.Getenv("SCW_GATEWAY")
		}

		if gateway == serverID || gateway == serverParts[0] {
			gateway = ""
		} else {
			gateway, err = api.ResolveGateway(ctx.API, gateway)
			if err != nil {
				return nil, fmt.Errorf("cannot resolve Gateway '%s': %v", gateway, err)
			}
		}

		// execCmd contains the ssh connection + the remoteCommand
		sshCommand := utils.NewSSHExecCmd(server.PublicAddress.IP, server.PrivateIP, false, remoteCommand, gateway)
		logrus.Debugf("Executing: %s", sshCommand)
		spawnSrc := exec.Command("ssh", sshCommand.Slice()[1:]...)

		tarOutputStream, err = spawnSrc.StdoutPipe()
		if err != nil {
			return nil, err
		}

		tarErrorStream, err := spawnSrc.StderrPipe()
		if err != nil {
			return nil, err
		}
		defer tarErrorStream.Close()
		io.Copy(ctx.Stderr, tarErrorStream)

		err = spawnSrc.Start()
		if err != nil {
			return nil, err
		}
		defer spawnSrc.Wait()

		return &tarOutputStream, nil
	}

	// source is stdin
	if source == "-" {
		logrus.Debugf("Streaming tarball from stdin")
		// FIXME: should be ctx.Stdin
		tarOutputStream = os.Stdin
		return &tarOutputStream, nil
	}

	// source is a path on localhost
	logrus.Debugf("Taring local path %s", source)
	path, err := filepath.Abs(source)
	if err != nil {
		return nil, err
	}
	path, err = filepath.EvalSymlinks(path)
	if err != nil {
		return nil, err
	}
	logrus.Debugf("Real local path is %s", path)

	dir, base := utils.PathToTARPathparts(path)

	tarOutputStream, err = archive.TarWithOptions(dir, &archive.TarOptions{
		Compression:  archive.Uncompressed,
		IncludeFiles: []string{base},
	})
	if err != nil {
		return nil, err
	}
	return &tarOutputStream, nil
}
Exemplo n.º 26
0
Arquivo: build.go Projeto: pbx0/docker
// CmdBuild builds a new image from the source code at a given path.
//
// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN.
//
// Usage: docker build [OPTIONS] PATH | URL | -
func (cli *DockerCli) CmdBuild(args ...string) error {
	cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH", true)
	tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image")
	suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
	noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
	rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers")
	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
	dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
	flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
	flCpuQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
	flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
	flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
	flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")

	cmd.Require(flag.Exact, 1)
	cmd.ParseFlags(args, true)

	var (
		context  archive.Archive
		isRemote bool
		err      error
	)

	_, err = exec.LookPath("git")
	hasGit := err == nil
	if cmd.Arg(0) == "-" {
		// As a special case, 'docker build -' will build from either an empty context with the
		// contents of stdin as a Dockerfile, or a tar-ed context from stdin.
		buf := bufio.NewReader(cli.in)
		magic, err := buf.Peek(tarHeaderSize)
		if err != nil && err != io.EOF {
			return fmt.Errorf("failed to peek context header from STDIN: %v", err)
		}
		if !archive.IsArchive(magic) {
			dockerfile, err := ioutil.ReadAll(buf)
			if err != nil {
				return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err)
			}

			// -f option has no meaning when we're reading it from stdin,
			// so just use our default Dockerfile name
			*dockerfileName = api.DefaultDockerfileName
			context, err = archive.Generate(*dockerfileName, string(dockerfile))
		} else {
			context = ioutil.NopCloser(buf)
		}
	} else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) {
		isRemote = true
	} else {
		root := cmd.Arg(0)
		if urlutil.IsGitURL(root) {
			root, err = utils.GitClone(root)
			if err != nil {
				return err
			}
			defer os.RemoveAll(root)
		}
		if _, err := os.Stat(root); err != nil {
			return err
		}

		absRoot, err := filepath.Abs(root)
		if err != nil {
			return err
		}

		filename := *dockerfileName // path to Dockerfile

		if *dockerfileName == "" {
			// No -f/--file was specified so use the default
			*dockerfileName = api.DefaultDockerfileName
			filename = filepath.Join(absRoot, *dockerfileName)

			// Just to be nice ;-) look for 'dockerfile' too but only
			// use it if we found it, otherwise ignore this check
			if _, err = os.Lstat(filename); os.IsNotExist(err) {
				tmpFN := path.Join(absRoot, strings.ToLower(*dockerfileName))
				if _, err = os.Lstat(tmpFN); err == nil {
					*dockerfileName = strings.ToLower(*dockerfileName)
					filename = tmpFN
				}
			}
		}

		origDockerfile := *dockerfileName // used for error msg
		if filename, err = filepath.Abs(filename); err != nil {
			return err
		}

		// Verify that 'filename' is within the build context
		filename, err = symlink.FollowSymlinkInScope(filename, absRoot)
		if err != nil {
			return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root)
		}

		// Now reset the dockerfileName to be relative to the build context
		*dockerfileName, err = filepath.Rel(absRoot, filename)
		if err != nil {
			return err
		}
		// And canonicalize dockerfile name to a platform-independent one
		*dockerfileName, err = archive.CanonicalTarNameForPath(*dockerfileName)
		if err != nil {
			return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", *dockerfileName, err)
		}

		if _, err = os.Lstat(filename); os.IsNotExist(err) {
			return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
		}
		var includes = []string{"."}

		excludes, err := utils.ReadDockerIgnore(path.Join(root, ".dockerignore"))
		if err != nil {
			return err
		}

		// If .dockerignore mentions .dockerignore or the Dockerfile
		// then make sure we send both files over to the daemon
		// because Dockerfile is, obviously, needed no matter what, and
		// .dockerignore is needed to know if either one needs to be
		// removed.  The deamon will remove them for us, if needed, after it
		// parses the Dockerfile.
		keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
		keepThem2, _ := fileutils.Matches(*dockerfileName, excludes)
		if keepThem1 || keepThem2 {
			includes = append(includes, ".dockerignore", *dockerfileName)
		}

		if err := utils.ValidateContextDirectory(root, excludes); err != nil {
			return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
		}
		options := &archive.TarOptions{
			Compression:     archive.Uncompressed,
			ExcludePatterns: excludes,
			IncludeFiles:    includes,
		}
		context, err = archive.TarWithOptions(root, options)
		if err != nil {
			return err
		}
	}

	// windows: show error message about modified file permissions
	// FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build.
	if runtime.GOOS == "windows" {
		logrus.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
	}

	var body io.Reader
	// Setup an upload progress bar
	// FIXME: ProgressReader shouldn't be this annoying to use
	if context != nil {
		sf := streamformatter.NewStreamFormatter(false)
		body = progressreader.New(progressreader.Config{
			In:        context,
			Out:       cli.out,
			Formatter: sf,
			NewLines:  true,
			ID:        "",
			Action:    "Sending build context to Docker daemon",
		})
	}

	var memory int64
	if *flMemoryString != "" {
		parsedMemory, err := units.RAMInBytes(*flMemoryString)
		if err != nil {
			return err
		}
		memory = parsedMemory
	}

	var memorySwap int64
	if *flMemorySwap != "" {
		if *flMemorySwap == "-1" {
			memorySwap = -1
		} else {
			parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap)
			if err != nil {
				return err
			}
			memorySwap = parsedMemorySwap
		}
	}
	// Send the build context
	v := &url.Values{}

	//Check if the given image name can be resolved
	if *tag != "" {
		repository, tag := parsers.ParseRepositoryTag(*tag)
		if err := registry.ValidateRepositoryName(repository); err != nil {
			return err
		}
		if len(tag) > 0 {
			if err := graph.ValidateTagName(tag); err != nil {
				return err
			}
		}
	}

	v.Set("t", *tag)

	if *suppressOutput {
		v.Set("q", "1")
	}
	if isRemote {
		v.Set("remote", cmd.Arg(0))
	}
	if *noCache {
		v.Set("nocache", "1")
	}
	if *rm {
		v.Set("rm", "1")
	} else {
		v.Set("rm", "0")
	}

	if *forceRm {
		v.Set("forcerm", "1")
	}

	if *pull {
		v.Set("pull", "1")
	}

	v.Set("cpusetcpus", *flCPUSetCpus)
	v.Set("cpusetmems", *flCPUSetMems)
	v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10))
	v.Set("cpuquota", strconv.FormatInt(*flCpuQuota, 10))
	v.Set("memory", strconv.FormatInt(memory, 10))
	v.Set("memswap", strconv.FormatInt(memorySwap, 10))
	v.Set("cgroupparent", *flCgroupParent)

	v.Set("dockerfile", *dockerfileName)

	headers := http.Header(make(map[string][]string))
	buf, err := json.Marshal(cli.configFile.AuthConfigs)
	if err != nil {
		return err
	}
	headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))

	if context != nil {
		headers.Set("Content-Type", "application/tar")
	}
	sopts := &streamOpts{
		rawTerminal: true,
		in:          body,
		out:         cli.out,
		headers:     headers,
	}
	err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts)
	if jerr, ok := err.(*jsonmessage.JSONError); ok {
		// If no error code is set, default to 1
		if jerr.Code == 0 {
			jerr.Code = 1
		}
		return StatusError{Status: jerr.Message, StatusCode: jerr.Code}
	}
	return err
}
Exemplo n.º 27
0
// CmdBuild builds a new image from the source code at a given path.
//
// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN.
//
// Usage: docker build [OPTIONS] PATH | URL | -
func (cli *DockerCli) CmdBuild(args ...string) error {
	cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true)
	flTags := opts.NewListOpts(validateTag)
	cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format")
	suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
	noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
	rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers")
	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
	dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap")
	flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
	flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
	flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
	flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
	flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
	flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")
	flBuildArg := opts.NewListOpts(opts.ValidateEnv)
	cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables")

	ulimits := make(map[string]*ulimit.Ulimit)
	flUlimits := opts.NewUlimitOpt(&ulimits)
	cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options")

	cmd.Require(flag.Exact, 1)

	// For trusted pull on "FROM <image>" instruction.
	addTrustedFlags(cmd, true)

	cmd.ParseFlags(args, true)

	var (
		context  io.ReadCloser
		isRemote bool
		err      error
	)

	_, err = exec.LookPath("git")
	hasGit := err == nil

	specifiedContext := cmd.Arg(0)

	var (
		contextDir    string
		tempDir       string
		relDockerfile string
	)

	switch {
	case specifiedContext == "-":
		tempDir, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName)
	case urlutil.IsGitURL(specifiedContext) && hasGit:
		tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName)
	case urlutil.IsURL(specifiedContext):
		tempDir, relDockerfile, err = getContextFromURL(cli.out, specifiedContext, *dockerfileName)
	default:
		contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName)
	}

	if err != nil {
		return fmt.Errorf("unable to prepare context: %s", err)
	}

	if tempDir != "" {
		defer os.RemoveAll(tempDir)
		contextDir = tempDir
	}

	// Resolve the FROM lines in the Dockerfile to trusted digest references
	// using Notary. On a successful build, we must tag the resolved digests
	// to the original name specified in the Dockerfile.
	newDockerfile, resolvedTags, err := rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference)
	if err != nil {
		return fmt.Errorf("unable to process Dockerfile: %v", err)
	}
	defer newDockerfile.Close()

	// And canonicalize dockerfile name to a platform-independent one
	relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
	if err != nil {
		return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
	}

	f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
	if err != nil && !os.IsNotExist(err) {
		return err
	}

	var excludes []string
	if err == nil {
		excludes, err = utils.ReadDockerIgnore(f)
		if err != nil {
			return err
		}
	}

	if err := utils.ValidateContextDirectory(contextDir, excludes); err != nil {
		return fmt.Errorf("Error checking context: '%s'.", err)
	}

	// If .dockerignore mentions .dockerignore or the Dockerfile
	// then make sure we send both files over to the daemon
	// because Dockerfile is, obviously, needed no matter what, and
	// .dockerignore is needed to know if either one needs to be
	// removed.  The deamon will remove them for us, if needed, after it
	// parses the Dockerfile. Ignore errors here, as they will have been
	// caught by ValidateContextDirectory above.
	var includes = []string{"."}
	keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
	keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
	if keepThem1 || keepThem2 {
		includes = append(includes, ".dockerignore", relDockerfile)
	}

	context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: excludes,
		IncludeFiles:    includes,
	})
	if err != nil {
		return err
	}

	// Wrap the tar archive to replace the Dockerfile entry with the rewritten
	// Dockerfile which uses trusted pulls.
	context = replaceDockerfileTarWrapper(context, newDockerfile, relDockerfile)

	// Setup an upload progress bar
	// FIXME: ProgressReader shouldn't be this annoying to use
	sf := streamformatter.NewStreamFormatter()
	var body io.Reader = progressreader.New(progressreader.Config{
		In:        context,
		Out:       cli.out,
		Formatter: sf,
		NewLines:  true,
		ID:        "",
		Action:    "Sending build context to Docker daemon",
	})

	var memory int64
	if *flMemoryString != "" {
		parsedMemory, err := units.RAMInBytes(*flMemoryString)
		if err != nil {
			return err
		}
		memory = parsedMemory
	}

	var memorySwap int64
	if *flMemorySwap != "" {
		if *flMemorySwap == "-1" {
			memorySwap = -1
		} else {
			parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap)
			if err != nil {
				return err
			}
			memorySwap = parsedMemorySwap
		}
	}

	// Send the build context
	v := url.Values{
		"t": flTags.GetAll(),
	}
	if *suppressOutput {
		v.Set("q", "1")
	}
	if isRemote {
		v.Set("remote", cmd.Arg(0))
	}
	if *noCache {
		v.Set("nocache", "1")
	}
	if *rm {
		v.Set("rm", "1")
	} else {
		v.Set("rm", "0")
	}

	if *forceRm {
		v.Set("forcerm", "1")
	}

	if *pull {
		v.Set("pull", "1")
	}

	v.Set("cpusetcpus", *flCPUSetCpus)
	v.Set("cpusetmems", *flCPUSetMems)
	v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10))
	v.Set("cpuquota", strconv.FormatInt(*flCPUQuota, 10))
	v.Set("cpuperiod", strconv.FormatInt(*flCPUPeriod, 10))
	v.Set("memory", strconv.FormatInt(memory, 10))
	v.Set("memswap", strconv.FormatInt(memorySwap, 10))
	v.Set("cgroupparent", *flCgroupParent)

	v.Set("dockerfile", relDockerfile)

	ulimitsVar := flUlimits.GetList()
	ulimitsJSON, err := json.Marshal(ulimitsVar)
	if err != nil {
		return err
	}
	v.Set("ulimits", string(ulimitsJSON))

	// collect all the build-time environment variables for the container
	buildArgs := runconfig.ConvertKVStringsToMap(flBuildArg.GetAll())
	buildArgsJSON, err := json.Marshal(buildArgs)
	if err != nil {
		return err
	}
	v.Set("buildargs", string(buildArgsJSON))

	headers := http.Header(make(map[string][]string))
	buf, err := json.Marshal(cli.configFile.AuthConfigs)
	if err != nil {
		return err
	}
	headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
	headers.Set("Content-Type", "application/tar")

	sopts := &streamOpts{
		rawTerminal: true,
		in:          body,
		out:         cli.out,
		headers:     headers,
	}

	serverResp, err := cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), sopts)

	// Windows: show error message about modified file permissions.
	if runtime.GOOS == "windows" {
		h, err := httputils.ParseServerHeader(serverResp.header.Get("Server"))
		if err == nil {
			if h.OS != "windows" {
				fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
			}
		}
	}

	if jerr, ok := err.(*jsonmessage.JSONError); ok {
		// If no error code is set, default to 1
		if jerr.Code == 0 {
			jerr.Code = 1
		}
		return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
	}

	if err != nil {
		return err
	}

	// Since the build was successful, now we must tag any of the resolved
	// images from the above Dockerfile rewrite.
	for _, resolved := range resolvedTags {
		if err := cli.tagTrusted(resolved.repoInfo, resolved.digestRef, resolved.tagRef); err != nil {
			return err
		}
	}

	return nil
}
Exemplo n.º 28
0
func runBuild(dockerCli *command.DockerCli, options buildOptions) error {

	var (
		buildCtx io.ReadCloser
		err      error
	)

	specifiedContext := options.context

	var (
		contextDir    string
		tempDir       string
		relDockerfile string
		progBuff      io.Writer
		buildBuff     io.Writer
	)

	progBuff = dockerCli.Out()
	buildBuff = dockerCli.Out()
	if options.quiet {
		progBuff = bytes.NewBuffer(nil)
		buildBuff = bytes.NewBuffer(nil)
	}

	switch {
	case specifiedContext == "-":
		buildCtx, relDockerfile, err = builder.GetContextFromReader(dockerCli.In(), options.dockerfileName)
	case urlutil.IsGitURL(specifiedContext):
		tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, options.dockerfileName)
	case urlutil.IsURL(specifiedContext):
		buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName)
	default:
		contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, options.dockerfileName)
	}

	if err != nil {
		if options.quiet && urlutil.IsURL(specifiedContext) {
			fmt.Fprintln(dockerCli.Err(), progBuff)
		}
		return fmt.Errorf("unable to prepare context: %s", err)
	}

	if tempDir != "" {
		defer os.RemoveAll(tempDir)
		contextDir = tempDir
	}

	if buildCtx == nil {
		// And canonicalize dockerfile name to a platform-independent one
		relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
		if err != nil {
			return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
		}

		f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
		if err != nil && !os.IsNotExist(err) {
			return err
		}
		defer f.Close()

		var excludes []string
		if err == nil {
			excludes, err = dockerignore.ReadAll(f)
			if err != nil {
				return err
			}
		}

		if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil {
			return fmt.Errorf("Error checking context: '%s'.", err)
		}

		// If .dockerignore mentions .dockerignore or the Dockerfile
		// then make sure we send both files over to the daemon
		// because Dockerfile is, obviously, needed no matter what, and
		// .dockerignore is needed to know if either one needs to be
		// removed. The daemon will remove them for us, if needed, after it
		// parses the Dockerfile. Ignore errors here, as they will have been
		// caught by validateContextDirectory above.
		var includes = []string{"."}
		keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
		keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
		if keepThem1 || keepThem2 {
			includes = append(includes, ".dockerignore", relDockerfile)
		}

		buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
			Compression:     archive.Uncompressed,
			ExcludePatterns: excludes,
			IncludeFiles:    includes,
		})
		if err != nil {
			return err
		}
	}

	ctx := context.Background()

	var resolvedTags []*resolvedTag
	if command.IsTrusted() {
		translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) {
			return TrustedReference(ctx, dockerCli, ref)
		}
		// Wrap the tar archive to replace the Dockerfile entry with the rewritten
		// Dockerfile which uses trusted pulls.
		buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags)
	}

	// Setup an upload progress bar
	progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true)
	if !dockerCli.Out().IsTerminal() {
		progressOutput = &lastProgressOutput{output: progressOutput}
	}

	var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")

	var memory int64
	if options.memory != "" {
		parsedMemory, err := units.RAMInBytes(options.memory)
		if err != nil {
			return err
		}
		memory = parsedMemory
	}

	var memorySwap int64
	if options.memorySwap != "" {
		if options.memorySwap == "-1" {
			memorySwap = -1
		} else {
			parsedMemorySwap, err := units.RAMInBytes(options.memorySwap)
			if err != nil {
				return err
			}
			memorySwap = parsedMemorySwap
		}
	}

	var shmSize int64
	if options.shmSize != "" {
		shmSize, err = units.RAMInBytes(options.shmSize)
		if err != nil {
			return err
		}
	}

	authConfig, _ := dockerCli.CredentialsStore().GetAll()
	buildOptions := types.ImageBuildOptions{
		Memory:         memory,
		MemorySwap:     memorySwap,
		Tags:           options.tags.GetAll(),
		SuppressOutput: options.quiet,
		NoCache:        options.noCache,
		Remove:         options.rm,
		ForceRemove:    options.forceRm,
		PullParent:     options.pull,
		Isolation:      container.Isolation(options.isolation),
		CPUSetCPUs:     options.cpuSetCpus,
		CPUSetMems:     options.cpuSetMems,
		CPUShares:      options.cpuShares,
		CPUQuota:       options.cpuQuota,
		CPUPeriod:      options.cpuPeriod,
		CgroupParent:   options.cgroupParent,
		Dockerfile:     relDockerfile,
		ShmSize:        shmSize,
		Ulimits:        options.ulimits.GetList(),
		BuildArgs:      runconfigopts.ConvertKVStringsToMap(options.buildArgs.GetAll()),
		AuthConfigs:    authConfig,
		Labels:         runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()),
		CacheFrom:      options.cacheFrom,
	}

	response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions)
	if err != nil {
		if options.quiet {
			fmt.Fprintf(dockerCli.Err(), "%s", progBuff)
		}
		return err
	}
	defer response.Body.Close()

	err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), nil)
	if err != nil {
		if jerr, ok := err.(*jsonmessage.JSONError); ok {
			// If no error code is set, default to 1
			if jerr.Code == 0 {
				jerr.Code = 1
			}
			if options.quiet {
				fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff)
			}
			return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
		}
	}

	// Windows: show error message about modified file permissions if the
	// daemon isn't running Windows.
	if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet {
		fmt.Fprintln(dockerCli.Err(), `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
	}

	// Everything worked so if -q was provided the output from the daemon
	// should be just the image ID and we'll print that to stdout.
	if options.quiet {
		fmt.Fprintf(dockerCli.Out(), "%s", buildBuff)
	}

	if command.IsTrusted() {
		// Since the build was successful, now we must tag any of the resolved
		// images from the above Dockerfile rewrite.
		for _, resolved := range resolvedTags {
			if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil {
				return err
			}
		}
	}

	return nil
}
Exemplo n.º 29
0
// CmdBuild builds a new image from the source code at a given path.
//
// If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN.
//
// Usage: docker build [OPTIONS] PATH | URL | -
func (cli *DockerCli) CmdBuild(args ...string) error {
	cmd := Cli.Subcmd("build", []string{"PATH | URL | -"}, Cli.DockerCommands["build"].Description, true)
	flTags := opts.NewListOpts(validateTag)
	cmd.Var(&flTags, []string{"t", "-tag"}, "Name and optionally a tag in the 'name:tag' format")
	suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the build output and print image ID on success")
	noCache := cmd.Bool([]string{"-no-cache"}, false, "Do not use cache when building the image")
	rm := cmd.Bool([]string{"-rm"}, true, "Remove intermediate containers after a successful build")
	forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers")
	pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image")
	dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
	flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit")
	flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
	flShmSize := cmd.String([]string{"-shm-size"}, "", "Size of /dev/shm, default value is 64MB")
	flCPUShares := cmd.Int64([]string{"#c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
	flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
	flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
	flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)")
	flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)")
	flCgroupParent := cmd.String([]string{"-cgroup-parent"}, "", "Optional parent cgroup for the container")
	flBuildArg := opts.NewListOpts(runconfigopts.ValidateEnv)
	cmd.Var(&flBuildArg, []string{"-build-arg"}, "Set build-time variables")
	isolation := cmd.String([]string{"-isolation"}, "", "Container isolation level")

	ulimits := make(map[string]*units.Ulimit)
	flUlimits := runconfigopts.NewUlimitOpt(&ulimits)
	cmd.Var(flUlimits, []string{"-ulimit"}, "Ulimit options")

	cmd.Require(flag.Exact, 1)

	// For trusted pull on "FROM <image>" instruction.
	addTrustedFlags(cmd, true)

	cmd.ParseFlags(args, true)

	var (
		context  io.ReadCloser
		isRemote bool
		err      error
	)

	_, err = exec.LookPath("git")
	hasGit := err == nil

	specifiedContext := cmd.Arg(0)

	var (
		contextDir    string
		tempDir       string
		relDockerfile string
		progBuff      io.Writer
		buildBuff     io.Writer
	)

	progBuff = cli.out
	buildBuff = cli.out
	if *suppressOutput {
		progBuff = bytes.NewBuffer(nil)
		buildBuff = bytes.NewBuffer(nil)
	}

	switch {
	case specifiedContext == "-":
		context, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName)
	case urlutil.IsGitURL(specifiedContext) && hasGit:
		tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName)
	case urlutil.IsURL(specifiedContext):
		context, relDockerfile, err = getContextFromURL(progBuff, specifiedContext, *dockerfileName)
	default:
		contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName)
	}

	if err != nil {
		if *suppressOutput && urlutil.IsURL(specifiedContext) {
			fmt.Fprintln(cli.err, progBuff)
		}
		return fmt.Errorf("unable to prepare context: %s", err)
	}

	if tempDir != "" {
		defer os.RemoveAll(tempDir)
		contextDir = tempDir
	}

	if context == nil {
		// And canonicalize dockerfile name to a platform-independent one
		relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
		if err != nil {
			return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
		}

		f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
		if err != nil && !os.IsNotExist(err) {
			return err
		}

		var excludes []string
		if err == nil {
			excludes, err = dockerignore.ReadAll(f)
			if err != nil {
				return err
			}
		}

		if err := validateContextDirectory(contextDir, excludes); err != nil {
			return fmt.Errorf("Error checking context: '%s'.", err)
		}

		// If .dockerignore mentions .dockerignore or the Dockerfile
		// then make sure we send both files over to the daemon
		// because Dockerfile is, obviously, needed no matter what, and
		// .dockerignore is needed to know if either one needs to be
		// removed. The daemon will remove them for us, if needed, after it
		// parses the Dockerfile. Ignore errors here, as they will have been
		// caught by validateContextDirectory above.
		var includes = []string{"."}
		keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
		keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
		if keepThem1 || keepThem2 {
			includes = append(includes, ".dockerignore", relDockerfile)
		}

		context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
			Compression:     archive.Uncompressed,
			ExcludePatterns: excludes,
			IncludeFiles:    includes,
		})
		if err != nil {
			return err
		}
	}

	var resolvedTags []*resolvedTag
	if isTrusted() {
		// Wrap the tar archive to replace the Dockerfile entry with the rewritten
		// Dockerfile which uses trusted pulls.
		context = replaceDockerfileTarWrapper(context, relDockerfile, cli.trustedReference, &resolvedTags)
	}

	// Setup an upload progress bar
	progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true)

	var body io.Reader = progress.NewProgressReader(context, progressOutput, 0, "", "Sending build context to Docker daemon")

	var memory int64
	if *flMemoryString != "" {
		parsedMemory, err := units.RAMInBytes(*flMemoryString)
		if err != nil {
			return err
		}
		memory = parsedMemory
	}

	var memorySwap int64
	if *flMemorySwap != "" {
		if *flMemorySwap == "-1" {
			memorySwap = -1
		} else {
			parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap)
			if err != nil {
				return err
			}
			memorySwap = parsedMemorySwap
		}
	}

	var shmSize int64
	if *flShmSize != "" {
		shmSize, err = units.RAMInBytes(*flShmSize)
		if err != nil {
			return err
		}
	}

	var remoteContext string
	if isRemote {
		remoteContext = cmd.Arg(0)
	}

	options := types.ImageBuildOptions{
		Context:        body,
		Memory:         memory,
		MemorySwap:     memorySwap,
		Tags:           flTags.GetAll(),
		SuppressOutput: *suppressOutput,
		RemoteContext:  remoteContext,
		NoCache:        *noCache,
		Remove:         *rm,
		ForceRemove:    *forceRm,
		PullParent:     *pull,
		IsolationLevel: container.IsolationLevel(*isolation),
		CPUSetCPUs:     *flCPUSetCpus,
		CPUSetMems:     *flCPUSetMems,
		CPUShares:      *flCPUShares,
		CPUQuota:       *flCPUQuota,
		CPUPeriod:      *flCPUPeriod,
		CgroupParent:   *flCgroupParent,
		Dockerfile:     relDockerfile,
		ShmSize:        shmSize,
		Ulimits:        flUlimits.GetList(),
		BuildArgs:      runconfigopts.ConvertKVStringsToMap(flBuildArg.GetAll()),
		AuthConfigs:    cli.configFile.AuthConfigs,
	}

	response, err := cli.client.ImageBuild(options)
	if err != nil {
		return err
	}

	err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, cli.outFd, cli.isTerminalOut, nil)
	if err != nil {
		if jerr, ok := err.(*jsonmessage.JSONError); ok {
			// If no error code is set, default to 1
			if jerr.Code == 0 {
				jerr.Code = 1
			}
			if *suppressOutput {
				fmt.Fprintf(cli.err, "%s%s", progBuff, buildBuff)
			}
			return Cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
		}
	}

	// Windows: show error message about modified file permissions if the
	// daemon isn't running Windows.
	if response.OSType != "windows" && runtime.GOOS == "windows" {
		fmt.Fprintln(cli.err, `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`)
	}

	// Everything worked so if -q was provided the output from the daemon
	// should be just the image ID and we'll print that to stdout.
	if *suppressOutput {
		fmt.Fprintf(cli.out, "%s", buildBuff)
	}

	if isTrusted() {
		// Since the build was successful, now we must tag any of the resolved
		// images from the above Dockerfile rewrite.
		for _, resolved := range resolvedTags {
			if err := cli.tagTrusted(resolved.digestRef, resolved.tagRef); err != nil {
				return err
			}
		}
	}

	return nil
}
Exemplo n.º 30
0
// hyper build [OPTIONS] PATH
func (cli *HyperClient) HyperCmdBuild(args ...string) error {
	var opts struct {
		ImageName      string `long:"tag" short:"t" default:"" value-name:"\"\"" default-mask:"-" description:"Repository name (and optionally a tag) to be applied to the resulting image in case of success"`
		DockerfileName string `long:"file" short:"f" default:"" value-name:"\"\"" default-mask:"-" description:"Customized docker file"`
	}

	var parser = gflag.NewParser(&opts, gflag.Default)
	parser.Usage = "build [OPTIONS] PATH\n\nBuild a new image from the source code at PATH"
	args, err := parser.ParseArgs(args)
	if err != nil {
		if !strings.Contains(err.Error(), "Usage") {
			return err
		} else {
			return nil
		}
	}

	if len(args) == 0 {
		return fmt.Errorf("%s: \"build\" requires a minimum of 1 argument, See 'hyper build --help'.", os.Args[0])
	}
	var (
		filename = ""
		context  archive.Archive
		name     = ""
	)
	root := args[0]
	if _, err := os.Stat(root); err != nil {
		return err
	}

	absRoot, err := filepath.Abs(root)
	if err != nil {
		return err
	}

	filename = opts.DockerfileName // path to Dockerfile

	if opts.DockerfileName == "" {
		// No -f/--file was specified so use the default
		opts.DockerfileName = api.DefaultDockerfileName
		filename = filepath.Join(absRoot, opts.DockerfileName)

		// Just to be nice ;-) look for 'dockerfile' too but only
		// use it if we found it, otherwise ignore this check
		if _, err = os.Lstat(filename); os.IsNotExist(err) {
			tmpFN := path.Join(absRoot, strings.ToLower(opts.DockerfileName))
			if _, err = os.Lstat(tmpFN); err == nil {
				opts.DockerfileName = strings.ToLower(opts.DockerfileName)
				filename = tmpFN
			}
		}
	}

	origDockerfile := opts.DockerfileName // used for error msg
	if filename, err = filepath.Abs(filename); err != nil {
		return err
	}

	// Verify that 'filename' is within the build context
	filename, err = symlink.FollowSymlinkInScope(filename, absRoot)
	if err != nil {
		return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root)
	}

	// Now reset the dockerfileName to be relative to the build context
	opts.DockerfileName, err = filepath.Rel(absRoot, filename)
	if err != nil {
		return err
	}
	// And canonicalize dockerfile name to a platform-independent one
	opts.DockerfileName, err = archive.CanonicalTarNameForPath(opts.DockerfileName)
	if err != nil {
		return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", opts.DockerfileName, err)
	}

	if _, err = os.Lstat(filename); os.IsNotExist(err) {
		return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
	}
	var includes = []string{"."}

	f, err := os.Open(filepath.Join(root, ".dockerignore"))
	if err != nil && !os.IsNotExist(err) {
		return err
	}
	defer f.Close()

	var excludes []string
	if err == nil {
		excludes, err = utils.ReadDockerIgnore(f)
		if err != nil {
			return err
		}
	}

	if err := utils.ValidateContextDirectory(root, excludes); err != nil {
		return fmt.Errorf("Error checking context: '%s'.", err)
	}

	// If .dockerignore mentions .dockerignore or the Dockerfile
	// then make sure we send both files over to the daemon
	// because Dockerfile is, obviously, needed no matter what, and
	// .dockerignore is needed to know if either one needs to be
	// removed.  The deamon will remove them for us, if needed, after it
	// parses the Dockerfile.
	keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
	keepThem2, _ := fileutils.Matches(opts.DockerfileName, excludes)
	if keepThem1 || keepThem2 {
		includes = append(includes, ".dockerignore", opts.DockerfileName)
	}

	if err := utils.ValidateContextDirectory(root, excludes); err != nil {
		return fmt.Errorf("Error checking context: '%s'.", err)
	}
	options := &archive.TarOptions{
		Compression:     archive.Uncompressed,
		ExcludePatterns: excludes,
		IncludeFiles:    includes,
	}
	context, err = archive.TarWithOptions(root, options)
	if err != nil {
		return err
	}
	var body io.Reader
	// Setup an upload progress bar
	// FIXME: ProgressReader shouldn't be this annoying to use
	if context != nil {
		sf := streamformatter.NewStreamFormatter()
		body = progressreader.New(progressreader.Config{
			In:        context,
			Out:       os.Stdout,
			Formatter: sf,
			NewLines:  true,
			ID:        "",
			Action:    "Sending build context to Docker daemon",
		})
	}

	if opts.ImageName == "" {
		// set a image name
		name = rand.RandStr(10, "alphanum")
	} else {
		name = opts.ImageName
		repository, tag := parsers.ParseRepositoryTag(name)
		if err := registry.ValidateRepositoryName(repository); err != nil {
			return err
		}
		if len(tag) > 0 {
			if err := tags.ValidateTagName(tag); err != nil {
				return err
			}
		}
	}
	v := url.Values{}
	v.Set("name", name)
	headers := http.Header(make(map[string][]string))
	if context != nil {
		headers.Set("Content-Type", "application/tar")
	}
	err = cli.stream("POST", "/image/build?"+v.Encode(), body, cli.out, headers)
	if err != nil {
		return err
	}
	return nil
}