// build takes a Dockerfile and builds an image. func build(c cookoo.Context, path, tag string, client *docli.Client) error { dfile := filepath.Join(path, "Dockerfile") // Stat the file info, err := os.Stat(dfile) if err != nil { return fmt.Errorf("Dockerfile stat: %s", err) } file, err := os.Open(dfile) if err != nil { return fmt.Errorf("Dockerfile open: %s", err) } defer file.Close() var buf bytes.Buffer tw := tar.NewWriter(&buf) tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Size: info.Size(), ModTime: info.ModTime(), }) io.Copy(tw, file) if err := tw.Close(); err != nil { return fmt.Errorf("Dockerfile tar: %s", err) } options := docli.BuildImageOptions{ Name: tag, InputStream: &buf, OutputStream: os.Stdout, } return client.BuildImage(options) }
func buildImage(client *docker.Client, name string, archive []byte) error { if err := client.BuildImage(docker.BuildImageOptions{ Name: name, OutputStream: os.Stderr, InputStream: bytes.NewBuffer(archive), }); err != nil { fmt.Println("Could not build image \n", err) return err } return nil }
// BuildAndPushImageOfSizeWithDocker tries to build an image of wanted size and number of layers. It instructs // Docker daemon directly. Built image is stored as an image stream tag <name>:<tag>. If shouldSucceed is // false, a push is expected to fail with a denied error. Note the size is only approximate. Resulting image // size will be different depending on used compression algorithm and metadata overhead. func BuildAndPushImageOfSizeWithDocker( oc *exutil.CLI, dClient *dockerclient.Client, name, tag string, size uint64, numberOfLayers int, outSink io.Writer, shouldSucceed bool, ) (imageDigest string, err error) { registryURL, err := GetDockerRegistryURL(oc) if err != nil { return "", err } tempDir, err := ioutil.TempDir("", "name-build") if err != nil { return "", err } dataSize := calculateRoughDataSize(oc.Stdout(), size, numberOfLayers) lines := make([]string, numberOfLayers+1) lines[0] = "FROM scratch" for i := 1; i <= numberOfLayers; i++ { blobName := fmt.Sprintf("data%d", i) if err := createRandomBlob(path.Join(tempDir, blobName), dataSize); err != nil { return "", err } lines[i] = fmt.Sprintf("COPY %s /%s", blobName, blobName) } if err := ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte(strings.Join(lines, "\n")+"\n"), 0644); err != nil { return "", err } imageName := fmt.Sprintf("%s/%s/%s", registryURL, oc.Namespace(), name) taggedName := fmt.Sprintf("%s:%s", imageName, tag) err = dClient.BuildImage(dockerclient.BuildImageOptions{ Name: taggedName, RmTmpContainer: true, ForceRmTmpContainer: true, ContextDir: tempDir, OutputStream: outSink, }) if err != nil { return "", fmt.Errorf("failed to build %q image: %v", taggedName, err) } image, err := dClient.InspectImage(taggedName) if err != nil { return "", err } defer dClient.RemoveImageExtended(image.ID, dockerclient.RemoveImageOptions{Force: true}) digest := "" if len(image.RepoDigests) == 1 { digest = image.RepoDigests[0] } out, err := oc.Run("whoami").Args("-t").Output() if err != nil { return "", err } token := strings.TrimSpace(out) var buf bytes.Buffer err = dClient.PushImage(dockerclient.PushImageOptions{ Name: imageName, Tag: tag, Registry: registryURL, OutputStream: &buf, }, dockerclient.AuthConfiguration{ Username: "******", Password: token, Email: "*****@*****.**", ServerAddress: registryURL, }) out = buf.String() outSink.Write([]byte(out)) if shouldSucceed { if err != nil { return "", fmt.Errorf("Got unexpected push error: %v", err) } if len(digest) == 0 { outSink.Write([]byte("matching digest string\n")) match := rePushedImageDigest.FindStringSubmatch(out) if len(match) < 2 { return "", fmt.Errorf("Failed to parse digest") } digest = match[1] } return digest, nil } if err == nil { return "", fmt.Errorf("Push unexpectedly succeeded") } if !reExpectedDeniedError.MatchString(err.Error()) { return "", fmt.Errorf("Failed to match expected %q in: %q", reExpectedDeniedError.String(), err.Error()) } return "", nil }
func conformanceTester(t *testing.T, c *docker.Client, test conformanceTest, i int, deep bool) { dockerfile := test.Dockerfile if len(dockerfile) == 0 { dockerfile = "Dockerfile" } tmpDir, err := ioutil.TempDir("", "dockerbuild-conformance-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) dir := tmpDir contextDir := filepath.Join(dir, test.ContextDir) dockerfilePath := filepath.Join(dir, dockerfile) // clone repo or copy the Dockerfile var input string switch { case len(test.Git) > 0: input = test.Git cmd := exec.Command("git", "clone", test.Git, dir) out, err := cmd.CombinedOutput() if err != nil { t.Errorf("unable to clone %q: %v\n%s", test.Git, err, out) return } if test.PostClone != nil { if err := test.PostClone(dir); err != nil { t.Errorf("unable to fixup clone: %v", err) return } } case len(test.Dockerfile) > 0: input = dockerfile dockerfilePath = filepath.Join(dir, "Dockerfile") if _, err := fileutils.CopyFile(filepath.Join("", dockerfile), dockerfilePath); err != nil { t.Fatal(err) } dockerfile = "Dockerfile" default: input = filepath.Join(test.ContextDir, dockerfile) dockerfilePath = input contextDir = test.ContextDir dir = test.ContextDir } // read the dockerfile data, err := ioutil.ReadFile(dockerfilePath) if err != nil { t.Errorf("%d: unable to read Dockerfile %q: %v", i, input, err) return } node, err := parser.Parse(bytes.NewBuffer(data)) if err != nil { t.Errorf("%d: can't parse Dockerfile %q: %v", i, input, err) return } from, err := NewBuilder().From(node) if err != nil { t.Errorf("%d: can't get base FROM %q: %v", i, input, err) return } nameFormat := "conformance-dockerbuild-%d-%s-%d" var toDelete []string steps := node.Children lastImage := from ignoreSmallFileChange := func(a, b *tar.Header) bool { if a == nil || b == nil { return false } diff := a.Size - b.Size if differOnlyByFileSize(a, b, 10) { t.Logf("WARNING: %s differs only in size by %d bytes, probably a timestamp value change", a.Name, diff) return true } return false } if deep { // execute each step on both Docker build and the direct builder, comparing as we // go fail := false for j := range steps { testFile := dockerfileWithFrom(lastImage, steps[j:j+1]) nameDirect := fmt.Sprintf(nameFormat, i, "direct", j) nameDocker := fmt.Sprintf(nameFormat, i, "docker", j) // run docker build if err := ioutil.WriteFile(dockerfilePath, []byte(testFile), 0600); err != nil { t.Errorf("%d: unable to update Dockerfile %q: %v", i, dockerfilePath, err) break } in, err := archive.TarWithOptions(dir, &archive.TarOptions{IncludeFiles: []string{"."}}) if err != nil { t.Errorf("%d: unable to generate build context %q: %v", i, dockerfilePath, err) break } out := &bytes.Buffer{} if err := c.BuildImage(docker.BuildImageOptions{ Name: nameDocker, Dockerfile: dockerfile, RmTmpContainer: true, ForceRmTmpContainer: true, InputStream: in, OutputStream: out, }); err != nil { in.Close() t.Errorf("%d: unable to build Docker image %q: %v\n%s", i, test.Git, err, out) break } toDelete = append(toDelete, nameDocker) // run direct build e := NewClientExecutor(c) out = &bytes.Buffer{} e.Out, e.ErrOut = out, out e.Directory = contextDir e.Tag = nameDirect if err := e.Build(bytes.NewBufferString(testFile), nil); err != nil { t.Errorf("%d: failed to build step %d in dockerfile %q: %s\n%s", i, j, dockerfilePath, steps[j].Original, out) break } toDelete = append(toDelete, nameDirect) // only compare filesystem on layers that change the filesystem mutation := steps[j].Value == command.Add || steps[j].Value == command.Copy || steps[j].Value == command.Run // metadata must be strictly equal if !equivalentImages( t, c, nameDocker, nameDirect, mutation, metadataEqual, append(ignoreFuncs{ignoreSmallFileChange}, test.Ignore...)..., ) { t.Errorf("%d: layered Docker build was not equivalent to direct layer image metadata %s", i, input) fail = true } lastImage = nameDocker } if fail { t.Fatalf("%d: Conformance test failed for %s", i, input) } } else { exclude, _ := ParseDockerignore(dir) //exclude = append(filtered, ".dockerignore") in, err := archive.TarWithOptions(dir, &archive.TarOptions{IncludeFiles: []string{"."}, ExcludePatterns: exclude}) if err != nil { t.Errorf("%d: unable to generate build context %q: %v", i, dockerfilePath, err) return } out := &bytes.Buffer{} nameDocker := fmt.Sprintf(nameFormat, i, "docker", 0) if err := c.BuildImage(docker.BuildImageOptions{ Name: nameDocker, Dockerfile: dockerfile, RmTmpContainer: true, ForceRmTmpContainer: true, InputStream: in, OutputStream: out, }); err != nil { in.Close() t.Errorf("%d: unable to build Docker image %q: %v\n%s", i, test.Git, err, out) return } lastImage = nameDocker toDelete = append(toDelete, nameDocker) } // if we ran more than one step, compare the squashed output with the docker build output if len(steps) > 1 || !deep { nameDirect := fmt.Sprintf(nameFormat, i, "direct", len(steps)-1) e := NewClientExecutor(c) out := &bytes.Buffer{} e.Out, e.ErrOut = out, out e.Directory = contextDir e.Tag = nameDirect if err := e.Build(bytes.NewBuffer(data), nil); err != nil { t.Errorf("%d: failed to build complete image in %q: %v\n%s", i, input, err, out) } else { if !equivalentImages( t, c, lastImage, nameDirect, true, // metadata should be loosely equivalent, but because we squash and because of limitations // in docker commit, there are some differences metadataLayerEquivalent, append(ignoreFuncs{ ignoreSmallFileChange, // the direct dockerfile contains all steps, the layered image is synthetic from our previous // test and so only contains the last layer ignoreDockerfileSize(dockerfile), }, test.Ignore...)..., ) { t.Errorf("%d: full Docker build was not equivalent to squashed image metadata %s", i, input) } } } for _, s := range toDelete { c.RemoveImageExtended(s, docker.RemoveImageOptions{Force: true}) } }