// BuildAndPushImageOfSizeWithBuilder tries to build an image of wanted size and number of layers. Built image // is stored as an image stream tag <name>:<tag>. If shouldSucceed is false, a build is expected to fail with // a denied error. Note the size is only approximate. Resulting image size will be different depending on used // compression algorithm and metadata overhead. func BuildAndPushImageOfSizeWithBuilder( oc *exutil.CLI, dClient *dockerclient.Client, namespace, name, tag string, size uint64, numberOfLayers int, shouldSucceed bool, ) error { istName := name if tag != "" { istName += ":" + tag } bc, err := oc.REST().BuildConfigs(namespace).Get(name) if err == nil { if bc.Spec.CommonSpec.Output.To.Kind != "ImageStreamTag" { return fmt.Errorf("Unexpected kind of buildspec's output (%s != %s)", bc.Spec.CommonSpec.Output.To.Kind, "ImageStreamTag") } bc.Spec.CommonSpec.Output.To.Name = istName if _, err = oc.REST().BuildConfigs(namespace).Update(bc); err != nil { return err } } else { err = oc.Run("new-build").Args("--binary", "--name", name, "--to", istName).Execute() if err != nil { return err } } tempDir, err := ioutil.TempDir("", "name-build") if err != nil { return err } dataSize := calculateRoughDataSize(oc.Stdout(), size, numberOfLayers) lines := make([]string, numberOfLayers+1) lines[0] = "FROM scratch" for i := 1; i <= numberOfLayers; i++ { blobName := fmt.Sprintf("data%d", i) if err := createRandomBlob(path.Join(tempDir, blobName), dataSize); err != nil { return err } lines[i] = fmt.Sprintf("COPY %s /%s", blobName, blobName) } if err := ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte(strings.Join(lines, "\n")+"\n"), 0644); err != nil { return err } out, err := oc.Run("start-build").Args(name, "--from-dir", tempDir, "--wait").Output() fmt.Fprintf(g.GinkgoWriter, "\nstart-build output:\n%s\n", out) buildLog, logsErr := oc.Run("logs").Args("bc/" + name).Output() if match := reSuccessfulBuild.FindStringSubmatch(buildLog); len(match) > 1 { defer dClient.RemoveImageExtended(match[1], dockerclient.RemoveImageOptions{Force: true}) } if shouldSucceed && err != nil { return fmt.Errorf("Got unexpected build error: %v", err) } if !shouldSucceed { if err == nil { return fmt.Errorf("Build unexpectedly succeeded") } if logsErr != nil { return fmt.Errorf("Failed to show log of build config %s: %v", name, err) } if !reExpectedDeniedError.MatchString(buildLog) { return fmt.Errorf("Failed to match expected %q in: %q", reExpectedDeniedError.String(), buildLog) } } return nil }
func syncImages(client *dockerclient.Client, imageRoot string) error { logrus.Debugf("Syncing images from %s", imageRoot) f, err := os.Open(filepath.Join(imageRoot, "images.json")) if err != nil { return fmt.Errorf("error opening image json file: %v", err) } defer f.Close() var m tagMap if err := json.NewDecoder(f).Decode(&m); err != nil { return fmt.Errorf("error decoding images json: %v", err) } allTags := map[string]struct{}{} neededImages := map[string]struct{}{} for imageID, tags := range m { neededImages[imageID] = struct{}{} for _, t := range tags { allTags[t] = struct{}{} } } images, err := client.ListImages(dockerclient.ListImagesOptions{}) if err != nil { return fmt.Errorf("error listing images: %v", err) } for _, img := range images { expectedTags, ok := m[img.ID] if ok { delete(neededImages, img.ID) repoTags := filterRepoTags(img.RepoTags) logrus.Debugf("Tags for %s: %#v", img.ID, repoTags) // Sync tags for image ID removedTags, addedTags := listDiff(repoTags, expectedTags) for _, t := range addedTags { if err := tagImage(client, img.ID, t); err != nil { return err } } for _, t := range removedTags { // Check if this image tag conflicts with an expected // tag, in which case force tag will update if _, ok := allTags[t]; !ok { logrus.Debugf("Removing tag %s", t) if err := client.RemoveImage(t); err != nil { return fmt.Errorf("error removing tag %s: %v", t, err) } } } } else { removeOptions := dockerclient.RemoveImageOptions{ Force: true, } if err := client.RemoveImageExtended(img.ID, removeOptions); err != nil { return fmt.Errorf("error moving image %s: %v", img.ID, err) } } } for imageID := range neededImages { tags, ok := m[imageID] if !ok { return fmt.Errorf("missing image %s in tag map", imageID) } _, err := client.InspectImage(imageID) if err != nil { tf, err := os.Open(filepath.Join(imageRoot, imageID+".tar")) if err != nil { return fmt.Errorf("error opening image tar %s: %v", imageID, err) } defer tf.Close() loadOptions := dockerclient.LoadImageOptions{ InputStream: tf, } if err := client.LoadImage(loadOptions); err != nil { return fmt.Errorf("error loading image %s: %v", imageID, err) } } for _, t := range tags { if err := tagImage(client, imageID, t); err != nil { return err } } } return nil }
// BuildAndPushImageOfSizeWithDocker tries to build an image of wanted size and number of layers. It instructs // Docker daemon directly. Built image is stored as an image stream tag <name>:<tag>. If shouldSucceed is // false, a push is expected to fail with a denied error. Note the size is only approximate. Resulting image // size will be different depending on used compression algorithm and metadata overhead. func BuildAndPushImageOfSizeWithDocker( oc *exutil.CLI, dClient *dockerclient.Client, name, tag string, size uint64, numberOfLayers int, outSink io.Writer, shouldSucceed bool, ) (imageDigest string, err error) { registryURL, err := GetDockerRegistryURL(oc) if err != nil { return "", err } tempDir, err := ioutil.TempDir("", "name-build") if err != nil { return "", err } dataSize := calculateRoughDataSize(oc.Stdout(), size, numberOfLayers) lines := make([]string, numberOfLayers+1) lines[0] = "FROM scratch" for i := 1; i <= numberOfLayers; i++ { blobName := fmt.Sprintf("data%d", i) if err := createRandomBlob(path.Join(tempDir, blobName), dataSize); err != nil { return "", err } lines[i] = fmt.Sprintf("COPY %s /%s", blobName, blobName) } if err := ioutil.WriteFile(path.Join(tempDir, "Dockerfile"), []byte(strings.Join(lines, "\n")+"\n"), 0644); err != nil { return "", err } imageName := fmt.Sprintf("%s/%s/%s", registryURL, oc.Namespace(), name) taggedName := fmt.Sprintf("%s:%s", imageName, tag) err = dClient.BuildImage(dockerclient.BuildImageOptions{ Name: taggedName, RmTmpContainer: true, ForceRmTmpContainer: true, ContextDir: tempDir, OutputStream: outSink, }) if err != nil { return "", fmt.Errorf("failed to build %q image: %v", taggedName, err) } image, err := dClient.InspectImage(taggedName) if err != nil { return "", err } defer dClient.RemoveImageExtended(image.ID, dockerclient.RemoveImageOptions{Force: true}) digest := "" if len(image.RepoDigests) == 1 { digest = image.RepoDigests[0] } out, err := oc.Run("whoami").Args("-t").Output() if err != nil { return "", err } token := strings.TrimSpace(out) var buf bytes.Buffer err = dClient.PushImage(dockerclient.PushImageOptions{ Name: imageName, Tag: tag, Registry: registryURL, OutputStream: &buf, }, dockerclient.AuthConfiguration{ Username: "******", Password: token, Email: "*****@*****.**", ServerAddress: registryURL, }) out = buf.String() outSink.Write([]byte(out)) if shouldSucceed { if err != nil { return "", fmt.Errorf("Got unexpected push error: %v", err) } if len(digest) == 0 { outSink.Write([]byte("matching digest string\n")) match := rePushedImageDigest.FindStringSubmatch(out) if len(match) < 2 { return "", fmt.Errorf("Failed to parse digest") } digest = match[1] } return digest, nil } if err == nil { return "", fmt.Errorf("Push unexpectedly succeeded") } if !reExpectedDeniedError.MatchString(err.Error()) { return "", fmt.Errorf("Failed to match expected %q in: %q", reExpectedDeniedError.String(), err.Error()) } return "", nil }
func conformanceTester(t *testing.T, c *docker.Client, test conformanceTest, i int, deep bool) { dockerfile := test.Dockerfile if len(dockerfile) == 0 { dockerfile = "Dockerfile" } tmpDir, err := ioutil.TempDir("", "dockerbuild-conformance-") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) dir := tmpDir contextDir := filepath.Join(dir, test.ContextDir) dockerfilePath := filepath.Join(dir, dockerfile) // clone repo or copy the Dockerfile var input string switch { case len(test.Git) > 0: input = test.Git cmd := exec.Command("git", "clone", test.Git, dir) out, err := cmd.CombinedOutput() if err != nil { t.Errorf("unable to clone %q: %v\n%s", test.Git, err, out) return } if test.PostClone != nil { if err := test.PostClone(dir); err != nil { t.Errorf("unable to fixup clone: %v", err) return } } case len(test.Dockerfile) > 0: input = dockerfile dockerfilePath = filepath.Join(dir, "Dockerfile") if _, err := fileutils.CopyFile(filepath.Join("", dockerfile), dockerfilePath); err != nil { t.Fatal(err) } dockerfile = "Dockerfile" default: input = filepath.Join(test.ContextDir, dockerfile) dockerfilePath = input contextDir = test.ContextDir dir = test.ContextDir } // read the dockerfile data, err := ioutil.ReadFile(dockerfilePath) if err != nil { t.Errorf("%d: unable to read Dockerfile %q: %v", i, input, err) return } node, err := parser.Parse(bytes.NewBuffer(data)) if err != nil { t.Errorf("%d: can't parse Dockerfile %q: %v", i, input, err) return } from, err := NewBuilder().From(node) if err != nil { t.Errorf("%d: can't get base FROM %q: %v", i, input, err) return } nameFormat := "conformance-dockerbuild-%d-%s-%d" var toDelete []string steps := node.Children lastImage := from ignoreSmallFileChange := func(a, b *tar.Header) bool { if a == nil || b == nil { return false } diff := a.Size - b.Size if differOnlyByFileSize(a, b, 10) { t.Logf("WARNING: %s differs only in size by %d bytes, probably a timestamp value change", a.Name, diff) return true } return false } if deep { // execute each step on both Docker build and the direct builder, comparing as we // go fail := false for j := range steps { testFile := dockerfileWithFrom(lastImage, steps[j:j+1]) nameDirect := fmt.Sprintf(nameFormat, i, "direct", j) nameDocker := fmt.Sprintf(nameFormat, i, "docker", j) // run docker build if err := ioutil.WriteFile(dockerfilePath, []byte(testFile), 0600); err != nil { t.Errorf("%d: unable to update Dockerfile %q: %v", i, dockerfilePath, err) break } in, err := archive.TarWithOptions(dir, &archive.TarOptions{IncludeFiles: []string{"."}}) if err != nil { t.Errorf("%d: unable to generate build context %q: %v", i, dockerfilePath, err) break } out := &bytes.Buffer{} if err := c.BuildImage(docker.BuildImageOptions{ Name: nameDocker, Dockerfile: dockerfile, RmTmpContainer: true, ForceRmTmpContainer: true, InputStream: in, OutputStream: out, }); err != nil { in.Close() t.Errorf("%d: unable to build Docker image %q: %v\n%s", i, test.Git, err, out) break } toDelete = append(toDelete, nameDocker) // run direct build e := NewClientExecutor(c) out = &bytes.Buffer{} e.Out, e.ErrOut = out, out e.Directory = contextDir e.Tag = nameDirect if err := e.Build(bytes.NewBufferString(testFile), nil); err != nil { t.Errorf("%d: failed to build step %d in dockerfile %q: %s\n%s", i, j, dockerfilePath, steps[j].Original, out) break } toDelete = append(toDelete, nameDirect) // only compare filesystem on layers that change the filesystem mutation := steps[j].Value == command.Add || steps[j].Value == command.Copy || steps[j].Value == command.Run // metadata must be strictly equal if !equivalentImages( t, c, nameDocker, nameDirect, mutation, metadataEqual, append(ignoreFuncs{ignoreSmallFileChange}, test.Ignore...)..., ) { t.Errorf("%d: layered Docker build was not equivalent to direct layer image metadata %s", i, input) fail = true } lastImage = nameDocker } if fail { t.Fatalf("%d: Conformance test failed for %s", i, input) } } else { exclude, _ := ParseDockerignore(dir) //exclude = append(filtered, ".dockerignore") in, err := archive.TarWithOptions(dir, &archive.TarOptions{IncludeFiles: []string{"."}, ExcludePatterns: exclude}) if err != nil { t.Errorf("%d: unable to generate build context %q: %v", i, dockerfilePath, err) return } out := &bytes.Buffer{} nameDocker := fmt.Sprintf(nameFormat, i, "docker", 0) if err := c.BuildImage(docker.BuildImageOptions{ Name: nameDocker, Dockerfile: dockerfile, RmTmpContainer: true, ForceRmTmpContainer: true, InputStream: in, OutputStream: out, }); err != nil { in.Close() t.Errorf("%d: unable to build Docker image %q: %v\n%s", i, test.Git, err, out) return } lastImage = nameDocker toDelete = append(toDelete, nameDocker) } // if we ran more than one step, compare the squashed output with the docker build output if len(steps) > 1 || !deep { nameDirect := fmt.Sprintf(nameFormat, i, "direct", len(steps)-1) e := NewClientExecutor(c) out := &bytes.Buffer{} e.Out, e.ErrOut = out, out e.Directory = contextDir e.Tag = nameDirect if err := e.Build(bytes.NewBuffer(data), nil); err != nil { t.Errorf("%d: failed to build complete image in %q: %v\n%s", i, input, err, out) } else { if !equivalentImages( t, c, lastImage, nameDirect, true, // metadata should be loosely equivalent, but because we squash and because of limitations // in docker commit, there are some differences metadataLayerEquivalent, append(ignoreFuncs{ ignoreSmallFileChange, // the direct dockerfile contains all steps, the layered image is synthetic from our previous // test and so only contains the last layer ignoreDockerfileSize(dockerfile), }, test.Ignore...)..., ) { t.Errorf("%d: full Docker build was not equivalent to squashed image metadata %s", i, input) } } } for _, s := range toDelete { c.RemoveImageExtended(s, docker.RemoveImageOptions{Force: true}) } }