func (e *ClientExecutor) Archive(src, dst string, allowDecompression, allowDownload bool) (io.Reader, io.Closer, error) { var closer closers var base string var infos []CopyInfo var err error if isURL(src) { if !allowDownload { return nil, nil, fmt.Errorf("source can't be a URL") } infos, base, err = DownloadURL(src, dst) if len(base) > 0 { closer = append(closer, func() error { return os.RemoveAll(base) }) } } else { base = e.Directory infos, err = CalcCopyInfo(src, base, allowDecompression, true) } if err != nil { closer.Close() return nil, nil, err } options := archiveOptionsFor(infos, dst, e.Excludes) glog.V(4).Infof("Tar of directory %s %#v", base, options) rc, err := archive.TarWithOptions(base, options) closer = append(closer, rc.Close) return rc, closer, err }
func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) { excludes, err := parseDockerignore(srcPath) if err != nil { return nil, err } includes := []string{"."} // If .dockerignore mentions .dockerignore or the Dockerfile // then make sure we send both files over to the daemon // because Dockerfile is, obviously, needed no matter what, and // .dockerignore is needed to know if either one needs to be // removed. The deamon will remove them for us, if needed, after it // parses the Dockerfile. // // https://github.com/docker/docker/issues/8330 // forceIncludeFiles := []string{".dockerignore", dockerfilePath} for _, includeFile := range forceIncludeFiles { if includeFile == "" { continue } keepThem, err := fileutils.Matches(includeFile, excludes) if err != nil { return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err) } if keepThem { includes = append(includes, includeFile) } } if err := validateContextDirectory(srcPath, excludes); err != nil { return nil, err } tarOpts := &archive.TarOptions{ ExcludePatterns: excludes, IncludeFiles: includes, Compression: archive.Uncompressed, NoLchown: true, } return archive.TarWithOptions(srcPath, tarOpts) }
func conformanceTester(t *testing.T, c *docker.Client, test conformanceTest, i int, deep bool) { dockerfile := test.Dockerfile if len(dockerfile) == 0 { dockerfile = "Dockerfile" } tmpDir, err := ioutil.TempDir("", "dockerbuild-conformance-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) dir := tmpDir contextDir := filepath.Join(dir, test.ContextDir) dockerfilePath := filepath.Join(dir, dockerfile) // clone repo or copy the Dockerfile var input string switch { case len(test.Git) > 0: input = test.Git cmd := exec.Command("git", "clone", test.Git, dir) out, err := cmd.CombinedOutput() if err != nil { t.Errorf("unable to clone %q: %v\n%s", test.Git, err, out) return } if test.PostClone != nil { if err := test.PostClone(dir); err != nil { t.Errorf("unable to fixup clone: %v", err) return } } case len(test.Dockerfile) > 0: input = dockerfile dockerfilePath = filepath.Join(dir, "Dockerfile") if _, err := fileutils.CopyFile(filepath.Join("", dockerfile), dockerfilePath); err != nil { t.Fatal(err) } dockerfile = "Dockerfile" default: input = filepath.Join(test.ContextDir, dockerfile) dockerfilePath = input contextDir = test.ContextDir dir = test.ContextDir } // read the dockerfile data, err := ioutil.ReadFile(dockerfilePath) if err != nil { t.Errorf("%d: unable to read Dockerfile %q: %v", i, input, err) return } node, err := parser.Parse(bytes.NewBuffer(data)) if err != nil { t.Errorf("%d: can't parse Dockerfile %q: %v", i, input, err) return } from, err := NewBuilder().From(node) if err != nil { t.Errorf("%d: can't get base FROM %q: %v", i, input, err) return } nameFormat := "conformance-dockerbuild-%d-%s-%d" var toDelete []string steps := node.Children lastImage := from ignoreSmallFileChange := func(a, b *tar.Header) bool { if a == nil || b == nil { return false } diff := a.Size - b.Size if differOnlyByFileSize(a, b, 10) { t.Logf("WARNING: %s differs only in size by %d bytes, probably a timestamp value change", a.Name, diff) return true } return false } if deep { // execute each step on both Docker build and the direct builder, comparing as we // go fail := false for j := range steps { testFile := dockerfileWithFrom(lastImage, steps[j:j+1]) nameDirect := fmt.Sprintf(nameFormat, i, "direct", j) nameDocker := fmt.Sprintf(nameFormat, i, "docker", j) // run docker build if err := ioutil.WriteFile(dockerfilePath, []byte(testFile), 0600); err != nil { t.Errorf("%d: unable to update Dockerfile %q: %v", i, dockerfilePath, err) break } in, err := archive.TarWithOptions(dir, &archive.TarOptions{IncludeFiles: []string{"."}}) if err != nil { t.Errorf("%d: unable to generate build context %q: %v", i, dockerfilePath, err) break } out := &bytes.Buffer{} if err := c.BuildImage(docker.BuildImageOptions{ Name: nameDocker, Dockerfile: dockerfile, RmTmpContainer: true, ForceRmTmpContainer: true, InputStream: in, OutputStream: out, }); err != nil { in.Close() t.Errorf("%d: unable to build Docker image %q: %v\n%s", i, test.Git, err, out) break } toDelete = append(toDelete, nameDocker) // run direct build e := NewClientExecutor(c) out = &bytes.Buffer{} e.Out, e.ErrOut = out, out e.Directory = contextDir e.Tag = nameDirect if err := e.Build(bytes.NewBufferString(testFile), nil); err != nil { t.Errorf("%d: failed to build step %d in dockerfile %q: %s\n%s", i, j, dockerfilePath, steps[j].Original, out) break } toDelete = append(toDelete, nameDirect) // only compare filesystem on layers that change the filesystem mutation := steps[j].Value == command.Add || steps[j].Value == command.Copy || steps[j].Value == command.Run // metadata must be strictly equal if !equivalentImages( t, c, nameDocker, nameDirect, mutation, metadataEqual, append(ignoreFuncs{ignoreSmallFileChange}, test.Ignore...)..., ) { t.Errorf("%d: layered Docker build was not equivalent to direct layer image metadata %s", i, input) fail = true } lastImage = nameDocker } if fail { t.Fatalf("%d: Conformance test failed for %s", i, input) } } else { exclude, _ := ParseDockerignore(dir) //exclude = append(filtered, ".dockerignore") in, err := archive.TarWithOptions(dir, &archive.TarOptions{IncludeFiles: []string{"."}, ExcludePatterns: exclude}) if err != nil { t.Errorf("%d: unable to generate build context %q: %v", i, dockerfilePath, err) return } out := &bytes.Buffer{} nameDocker := fmt.Sprintf(nameFormat, i, "docker", 0) if err := c.BuildImage(docker.BuildImageOptions{ Name: nameDocker, Dockerfile: dockerfile, RmTmpContainer: true, ForceRmTmpContainer: true, InputStream: in, OutputStream: out, }); err != nil { in.Close() t.Errorf("%d: unable to build Docker image %q: %v\n%s", i, test.Git, err, out) return } lastImage = nameDocker toDelete = append(toDelete, nameDocker) } // if we ran more than one step, compare the squashed output with the docker build output if len(steps) > 1 || !deep { nameDirect := fmt.Sprintf(nameFormat, i, "direct", len(steps)-1) e := NewClientExecutor(c) out := &bytes.Buffer{} e.Out, e.ErrOut = out, out e.Directory = contextDir e.Tag = nameDirect if err := e.Build(bytes.NewBuffer(data), nil); err != nil { t.Errorf("%d: failed to build complete image in %q: %v\n%s", i, input, err, out) } else { if !equivalentImages( t, c, lastImage, nameDirect, true, // metadata should be loosely equivalent, but because we squash and because of limitations // in docker commit, there are some differences metadataLayerEquivalent, append(ignoreFuncs{ ignoreSmallFileChange, // the direct dockerfile contains all steps, the layered image is synthetic from our previous // test and so only contains the last layer ignoreDockerfileSize(dockerfile), }, test.Ignore...)..., ) { t.Errorf("%d: full Docker build was not equivalent to squashed image metadata %s", i, input) } } } for _, s := range toDelete { c.RemoveImageExtended(s, docker.RemoveImageOptions{Force: true}) } }