func writeReadTar(t *testing.T, tmpDir string, tarStream io.ReadCloser) string { data, err := ioutil.ReadAll(tarStream) if err != nil { t.Fatal(err) } defer tarStream.Close() tarSum, err := tarsum.NewTarSum(bytes.NewReader(data), true, tarsum.Version1) if err != nil { t.Fatal(err) } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { t.Fatal(err) } t.Logf("tarsum: %s", tarSum.Sum(nil)) if err := ioutil.WriteFile(tmpDir+"/archive.tar", data, 0644); err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir + "/archive.tar") cmd := exec.Command("tar", "-tf", tmpDir+"/archive.tar") out, err := cmd.CombinedOutput() if err != nil { t.Fatal(err) } return string(out) }
func (b *builder) readContext(context io.Reader) (err error) { tmpdirPath, err := ioutil.TempDir("", "docker-build") if err != nil { return } // Make sure we clean-up upon error. In the happy case the caller // is expected to manage the clean-up defer func() { if err != nil { if e := os.RemoveAll(tmpdirPath); e != nil { logrus.Debugf("[BUILDER] failed to remove temporary context: %s", e) } } }() decompressedStream, err := archive.DecompressStream(context) if err != nil { return } if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil { return } if err = chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil { return } b.contextPath = tmpdirPath return }
// MakeTarSumContext returns a build Context from a tar stream. // // It extracts the tar stream to a temporary folder that is deleted as soon as // the Context is closed. // As the extraction happens, a tarsum is calculated for every file, and the set of // all those sums then becomes the source of truth for all operations on this Context. // // Closing tarStream has to be done by the caller. func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) { root, err := ioutils.TempDir("", "docker-builder") if err != nil { return nil, err } tsc := &tarSumContext{root: root} // Make sure we clean-up upon error. In the happy case the caller // is expected to manage the clean-up defer func() { if err != nil { tsc.Close() } }() decompressedStream, err := archive.DecompressStream(tarStream) if err != nil { return nil, err } sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) if err != nil { return nil, err } if err := chrootarchive.Untar(sum, root, nil); err != nil { return nil, err } tsc.sums = sum.GetSums() return tsc, nil }
// StoreImage stores file system layer data for the given image to the // image's registered storage driver. Image metadata is stored in a file // at the specified root directory. This function also computes the TarSum // of `layerData` (currently using tarsum.dev). func StoreImage(img *Image, layerData archive.ArchiveReader, root string) error { // Store the layer var ( size int64 err error driver = img.graph.Driver() layerTarSum tarsum.TarSum ) // If layerData is not nil, unpack it into the new layer if layerData != nil { // If the image doesn't have a checksum, we should add it. The layer // checksums are verified when they are pulled from a remote, but when // a container is committed it should be added here. if img.Checksum == "" { layerDataDecompressed, err := archive.DecompressStream(layerData) if err != nil { return err } defer layerDataDecompressed.Close() if layerTarSum, err = tarsum.NewTarSum(layerDataDecompressed, true, tarsum.VersionDev); err != nil { return err } if size, err = driver.ApplyDiff(img.ID, img.Parent, layerTarSum); err != nil { return err } img.Checksum = layerTarSum.Sum(nil) } else if size, err = driver.ApplyDiff(img.ID, img.Parent, layerData); err != nil { return err } } img.Size = size if err := img.SaveSize(root); err != nil { return err } f, err := os.OpenFile(jsonPath(root), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) if err != nil { return err } defer f.Close() return json.NewEncoder(f).Encode(img) }
// StoreImage stores file system layer data for the given image to the // image's registered storage driver. Image metadata is stored in a file // at the specified root directory. This function also computes the TarSum // of `layerData` (currently using tarsum.dev). func StoreImage(img *Image, layerData archive.ArchiveReader, root string) error { // Store the layer var ( size int64 err error driver = img.graph.Driver() layerTarSum tarsum.TarSum ) // If layerData is not nil, unpack it into the new layer if layerData != nil { layerDataDecompressed, err := archive.DecompressStream(layerData) if err != nil { return err } defer layerDataDecompressed.Close() if layerTarSum, err = tarsum.NewTarSum(layerDataDecompressed, true, tarsum.VersionDev); err != nil { return err } if size, err = driver.ApplyDiff(img.ID, img.Parent, layerTarSum); err != nil { return err } checksum := layerTarSum.Sum(nil) if img.Checksum != "" && img.Checksum != checksum { log.Warnf("image layer checksum mismatch: computed %q, expected %q", checksum, img.Checksum) } img.Checksum = checksum } img.Size = size if err := img.SaveSize(root); err != nil { return err } f, err := os.OpenFile(jsonPath(root), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) if err != nil { return err } defer f.Close() return json.NewEncoder(f).Encode(img) }
// NewDigestVerifier returns a verifier that compares the written bytes // against a passed in digest. func NewDigestVerifier(d Digest) (Verifier, error) { if err := d.Validate(); err != nil { return nil, err } alg := d.Algorithm() switch alg { case "sha256", "sha384", "sha512": return hashVerifier{ hash: alg.Hash(), digest: d, }, nil default: // Assume we have a tarsum. version, err := tarsum.GetVersionFromTarsum(string(d)) if err != nil { return nil, err } pr, pw := io.Pipe() // TODO(stevvooe): We may actually want to ban the earlier versions of // tarsum. That decision may not be the place of the verifier. ts, err := tarsum.NewTarSum(pr, true, version) if err != nil { return nil, err } // TODO(sday): Ick! A goroutine per digest verification? We'll have to // get the tarsum library to export an io.Writer variant. go func() { if _, err := io.Copy(ioutil.Discard, ts); err != nil { pr.CloseWithError(err) } else { pr.Close() } }() return &tarsumVerifier{ digest: d, ts: ts, pr: pr, pw: pw, }, nil } }
// FromTarArchive produces a tarsum digest from reader rd. func FromTarArchive(rd io.Reader) (Digest, error) { ts, err := tarsum.NewTarSum(rd, true, tarsum.Version1) if err != nil { return "", err } if _, err := io.Copy(ioutil.Discard, ts); err != nil { return "", err } d, err := ParseDigest(ts.Sum(nil)) if err != nil { return "", err } return d, nil }
func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { u := registry + "images/" + imgID + "/layer" logrus.Debugf("[registry] Calling PUT %s", u) tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) if err != nil { return "", "", err } h := sha256.New() h.Write(jsonRaw) h.Write([]byte{'\n'}) checksumLayer := io.TeeReader(tarsumLayer, h) req, err := http.NewRequest("PUT", u, checksumLayer) if err != nil { return "", "", err } req.Header.Add("Content-Type", "application/octet-stream") req.ContentLength = -1 req.TransferEncoding = []string{"chunked"} res, err := r.client.Do(req) if err != nil { return "", "", fmt.Errorf("Failed to upload layer: %v", err) } if rc, ok := layer.(io.Closer); ok { if err := rc.Close(); err != nil { return "", "", err } } defer res.Body.Close() if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) } checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) return tarsumLayer.Sum(jsonRaw), checksumPayload, nil }
func (b *Builder) readContext(context io.Reader) error { tmpdirPath, err := ioutil.TempDir("", "docker-build") if err != nil { return err } decompressedStream, err := archive.DecompressStream(context) if err != nil { return err } if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil { return err } if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { return err } b.contextPath = tmpdirPath return nil }
// if out is not nil, then the tar input is written there instead func SumTarLayerVersioned(tarReader io.Reader, json io.Reader, out io.Writer, v tarsum.Version) (string, error) { var writer io.Writer = ioutil.Discard if out != nil { writer = out } ts, err := tarsum.NewTarSum(tarReader, false, v) if err != nil { return "", err } _, err = io.Copy(writer, ts) if err != nil { return "", err } var buf []byte if json != nil { if buf, err = ioutil.ReadAll(json); err != nil { return "", err } } return ts.Sum(buf), nil }
func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error { if origPath != "" && origPath[0] == '/' && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "./") // Twiddle the destPath when its a relative path - meaning, make it // relative to the WORKINGDIR if !filepath.IsAbs(destPath) { hasSlash := strings.HasSuffix(destPath, "/") destPath = filepath.Join("/", b.Config.WorkingDir, destPath) // Make sure we preserve any trailing slash if hasSlash { destPath += "/" } } // In the remote/URL case, download it and gen its hashcode if urlutil.IsURL(origPath) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } ci := copyInfo{} ci.origPath = origPath ci.hash = origPath // default to this but can change ci.destPath = destPath ci.decompress = false *cInfos = append(*cInfos, &ci) // Initiate the download resp, err := utils.Download(ci.origPath) if err != nil { return err } // Create a tmp dir tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") if err != nil { return err } ci.tmpDir = tmpDirName // Create a tmp file within our tmp dir tmpFileName := path.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } // Download and dump result to tmp file if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil { tmpFile.Close() return err } fmt.Fprintf(b.OutStream, "\n") tmpFile.Close() // Set the mtime to the Last-Modified header value if present // Otherwise just remove atime and mtime times := make([]syscall.Timespec, 2) lastMod := resp.Header.Get("Last-Modified") if lastMod != "" { mTime, err := http.ParseTime(lastMod) // If we can't parse it then just let it default to 'zero' // otherwise use the parsed time value if err == nil { times[1] = syscall.NsecToTimespec(mTime.UnixNano()) } } if err := system.UtimesNano(tmpFileName, times); err != nil { return err } ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // If the destination is a directory, figure out the filename. if strings.HasSuffix(ci.destPath, "/") { u, err := url.Parse(origPath) if err != nil { return err } path := u.Path if strings.HasSuffix(path, "/") { path = path[:len(path)-1] } parts := strings.Split(path, "/") filename := parts[len(parts)-1] if filename == "" { return fmt.Errorf("cannot determine filename from url: %s", u) } ci.destPath = ci.destPath + filename } // Calc the checksum, even if we're using the cache r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return err } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0) if err != nil { return err } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return err } ci.hash = tarSum.Sum(nil) r.Close() return nil } // Deal with wildcards if ContainsWildcards(origPath) { for _, fileInfo := range b.context.GetSums() { if fileInfo.Name() == "" { continue } match, _ := path.Match(origPath, fileInfo.Name()) if !match { continue } calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression) } return nil } // Must be a dir or a file if err := b.checkPathForAddition(origPath); err != nil { return err } fi, _ := os.Stat(path.Join(b.contextPath, origPath)) ci := copyInfo{} ci.origPath = origPath ci.hash = origPath ci.destPath = destPath ci.decompress = allowDecompression *cInfos = append(*cInfos, &ci) // Deal with the single file case if !fi.IsDir() { // This will match first file in sums of the archive fis := b.context.GetSums().GetFile(ci.origPath) if fis != nil { ci.hash = "file:" + fis.Sum() } return nil } // Must be a dir var subfiles []string absOrigPath := path.Join(b.contextPath, ci.origPath) // Add a trailing / to make sure we only pick up nested files under // the dir and not sibling files of the dir that just happen to // start with the same chars if !strings.HasSuffix(absOrigPath, "/") { absOrigPath += "/" } // Need path w/o / too to find matching dir w/o trailing / absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] for _, fileInfo := range b.context.GetSums() { absFile := path.Join(b.contextPath, fileInfo.Name()) // Any file in the context that starts with the given path will be // picked up and its hashcode used. However, we'll exclude the // root dir itself. We do this for a coupel of reasons: // 1 - ADD/COPY will not copy the dir itself, just its children // so there's no reason to include it in the hash calc // 2 - the metadata on the dir will change when any child file // changes. This will lead to a miss in the cache check if that // child file is in the .dockerignore list. if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash { subfiles = append(subfiles, fileInfo.Sum()) } } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) return nil }
func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { // get filename from URL u, err := url.Parse(srcURL) if err != nil { return } path := filepath.FromSlash(u.Path) // Ensure in platform semantics if strings.HasSuffix(path, string(os.PathSeparator)) { path = path[:len(path)-1] } parts := strings.Split(path, string(os.PathSeparator)) filename := parts[len(parts)-1] if filename == "" { err = fmt.Errorf("cannot determine filename from url: %s", u) return } // Initiate the download resp, err := httputils.Download(srcURL) if err != nil { return } // Prepare file in a tmp dir tmpDir, err := ioutils.TempDir("", "docker-remote") if err != nil { return } defer func() { if err != nil { os.RemoveAll(tmpDir) } }() tmpFileName := filepath.Join(tmpDir, filename) tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return } stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") // Download and dump result to tmp file if _, err = io.Copy(tmpFile, progressReader); err != nil { tmpFile.Close() return } fmt.Fprintln(b.Stdout) // ignoring error because the file was already opened successfully tmpFileSt, err := tmpFile.Stat() if err != nil { return } tmpFile.Close() // Set the mtime to the Last-Modified header value if present // Otherwise just remove atime and mtime mTime := time.Time{} lastMod := resp.Header.Get("Last-Modified") if lastMod != "" { // If we can't parse it then just let it default to 'zero' // otherwise use the parsed time value if parsedMTime, err := http.ParseTime(lastMod); err == nil { mTime = parsedMTime } } if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { return } // Calc the checksum, even if we're using the cache r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) if err != nil { return } if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { return } hash := tarSum.Sum(nil) r.Close() return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil }
func (s *TagStore) newManifest(localName, remoteName, tag string) ([]byte, error) { manifest := ®istry.ManifestData{ Name: remoteName, Tag: tag, SchemaVersion: 1, } localRepo, err := s.Get(localName) if err != nil { return nil, err } if localRepo == nil { return nil, fmt.Errorf("Repo does not exist: %s", localName) } // Get the top-most layer id which the tag points to layerId, exists := localRepo[tag] if !exists { return nil, fmt.Errorf("Tag does not exist for %s: %s", localName, tag) } layersSeen := make(map[string]bool) layer, err := s.graph.Get(layerId) if err != nil { return nil, err } manifest.Architecture = layer.Architecture manifest.FSLayers = make([]*registry.FSLayer, 0, 4) manifest.History = make([]*registry.ManifestHistory, 0, 4) var metadata runconfig.Config if layer.Config != nil { metadata = *layer.Config } for ; layer != nil; layer, err = layer.GetParent() { if err != nil { return nil, err } if layersSeen[layer.ID] { break } if layer.Config != nil && metadata.Image != layer.ID { err = runconfig.Merge(&metadata, layer.Config) if err != nil { return nil, err } } checksum, err := layer.GetCheckSum(s.graph.ImageRoot(layer.ID)) if err != nil { return nil, fmt.Errorf("Error getting image checksum: %s", err) } if tarsum.VersionLabelForChecksum(checksum) != tarsum.Version1.String() { archive, err := layer.TarLayer() if err != nil { return nil, err } tarSum, err := tarsum.NewTarSum(archive, true, tarsum.Version1) if err != nil { return nil, err } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return nil, err } checksum = tarSum.Sum(nil) // Save checksum value if err := layer.SaveCheckSum(s.graph.ImageRoot(layer.ID), checksum); err != nil { return nil, err } } jsonData, err := layer.RawJson() if err != nil { return nil, fmt.Errorf("Cannot retrieve the path for {%s}: %s", layer.ID, err) } manifest.FSLayers = append(manifest.FSLayers, ®istry.FSLayer{BlobSum: checksum}) layersSeen[layer.ID] = true manifest.History = append(manifest.History, ®istry.ManifestHistory{V1Compatibility: string(jsonData)}) } manifestBytes, err := json.MarshalIndent(manifest, "", " ") if err != nil { return nil, err } return manifestBytes, nil }
func calcCopyInfo(b *Builder, cmdName string, ci *copyInfo, allowRemote bool, allowDecompression bool) error { var ( remoteHash string isRemote bool ) saveOrig := ci.origPath isRemote = utils.IsURL(ci.origPath) if isRemote && !allowRemote { return fmt.Errorf("Source can't be an URL for %s", cmdName) } else if isRemote { // Initiate the download resp, err := utils.Download(ci.origPath) if err != nil { return err } // Create a tmp dir tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") if err != nil { return err } ci.tmpDir = tmpDirName // Create a tmp file within our tmp dir tmpFileName := path.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } // Download and dump result to tmp file if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil { tmpFile.Close() return err } fmt.Fprintf(b.OutStream, "\n") tmpFile.Close() // Remove the mtime of the newly created tmp file if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { return err } ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // Process the checksum r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return err } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0) if err != nil { return err } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return err } remoteHash = tarSum.Sum(nil) r.Close() // If the destination is a directory, figure out the filename. if strings.HasSuffix(ci.destPath, "/") { u, err := url.Parse(saveOrig) if err != nil { return err } path := u.Path if strings.HasSuffix(path, "/") { path = path[:len(path)-1] } parts := strings.Split(path, "/") filename := parts[len(parts)-1] if filename == "" { return fmt.Errorf("cannot determine filename from url: %s", u) } ci.destPath = ci.destPath + filename } } if err := b.checkPathForAddition(ci.origPath); err != nil { return err } // Hash path and check the cache if b.UtilizeCache { var ( sums = b.context.GetSums() ) if remoteHash != "" { ci.hashPath = remoteHash } else if fi, err := os.Stat(path.Join(b.contextPath, ci.origPath)); err != nil { return err } else if fi.IsDir() { var subfiles []string for _, fileInfo := range sums { absFile := path.Join(b.contextPath, fileInfo.Name()) absOrigPath := path.Join(b.contextPath, ci.origPath) if strings.HasPrefix(absFile, absOrigPath) { subfiles = append(subfiles, fileInfo.Sum()) } } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) ci.hashPath = "dir:" + hex.EncodeToString(hasher.Sum(nil)) } else { if ci.origPath[0] == '/' && len(ci.origPath) > 1 { ci.origPath = ci.origPath[1:] } ci.origPath = strings.TrimPrefix(ci.origPath, "./") // This will match on the first file in sums of the archive if fis := sums.GetFile(ci.origPath); fis != nil { ci.hashPath = "file:" + fis.Sum() } } } if !allowDecompression || isRemote { ci.decompress = false } return nil }
// CreateRandomTarFile creates a random tarfile, returning it as an // io.ReadSeeker along with its tarsum. An error is returned if there is a // problem generating valid content. func CreateRandomTarFile() (rs io.ReadSeeker, tarSum string, err error) { nFiles := mrand.Intn(10) + 10 target := &bytes.Buffer{} wr := tar.NewWriter(target) // Perturb this on each iteration of the loop below. header := &tar.Header{ Mode: 0644, ModTime: time.Now(), Typeflag: tar.TypeReg, Uname: "randocalrissian", Gname: "cloudcity", AccessTime: time.Now(), ChangeTime: time.Now(), } for fileNumber := 0; fileNumber < nFiles; fileNumber++ { fileSize := mrand.Int63n(1<<20) + 1<<20 header.Name = fmt.Sprint(fileNumber) header.Size = fileSize if err := wr.WriteHeader(header); err != nil { return nil, "", err } randomData := make([]byte, fileSize) // Fill up the buffer with some random data. n, err := rand.Read(randomData) if n != len(randomData) { return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) } if err != nil { return nil, "", err } nn, err := io.Copy(wr, bytes.NewReader(randomData)) if nn != fileSize { return nil, "", fmt.Errorf("short copy writing random file to tar") } if err != nil { return nil, "", err } if err := wr.Flush(); err != nil { return nil, "", err } } if err := wr.Close(); err != nil { return nil, "", err } reader := bytes.NewReader(target.Bytes()) // A tar builder that supports tarsum inline calculation would be awesome // here. ts, err := tarsum.NewTarSum(reader, true, tarsum.Version1) if err != nil { return nil, "", err } nn, err := io.Copy(ioutil.Discard, ts) if nn != int64(len(target.Bytes())) { return nil, "", fmt.Errorf("short copy when getting tarsum of random layer: %v != %v", nn, len(target.Bytes())) } if err != nil { return nil, "", err } return bytes.NewReader(target.Bytes()), ts.Sum(nil), nil }
func copyFiles(b *Build, args []string, cmdName string) (s State, err error) { s = b.state if len(args) < 2 { return s, fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) } var ( tarSum tarsum.TarSum src = args[0 : len(args)-1] dest = filepath.FromSlash(args[len(args)-1]) // last one is always the dest u *upload excludes = s.NoCache.Dockerignore ) // If destination is not a directory (no trailing slash) hasTrailingSlash := strings.HasSuffix(dest, string(os.PathSeparator)) if !hasTrailingSlash && len(src) > 1 { return s, fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } if !filepath.IsAbs(dest) { dest = filepath.Join(s.Config.WorkingDir, dest) // Add the trailing slash back if we had it before if hasTrailingSlash { dest += string(os.PathSeparator) } } if u, err = makeTarStream(b.cfg.ContextDir, dest, cmdName, src, excludes, b.urlFetcher); err != nil { return s, err } // skip COPY if no files matched if len(u.files) == 0 { log.Infof("| No files matched") return s, nil } log.Infof("| Calculating tarsum for %d files (%s total)", len(u.files), units.HumanSize(float64(u.size))) if tarSum, err = tarsum.NewTarSum(u.tar, true, tarsum.Version1); err != nil { return s, err } if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { return s, err } u.tar.Close() // TODO: useful commit comment? message := fmt.Sprintf("%s %s to %s", cmdName, tarSum.Sum(nil), dest) s.Commit(message) // Check cache s, hit, err := b.probeCache(s) if err != nil { return s, err } if hit { return s, nil } origCmd := s.Config.Cmd s.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + message} if s.NoCache.ContainerID, err = b.client.CreateContainer(s); err != nil { return s, err } s.Config.Cmd = origCmd // We need to make a new tar stream, because the previous one has been // read by the tarsum; maybe, optimize this in future if u, err = makeTarStream(b.cfg.ContextDir, dest, cmdName, src, excludes, b.urlFetcher); err != nil { return s, err } // Copy to "/" because we made the prefix inside the tar archive // Do that because we are not able to reliably create directories inside the container if err = b.client.UploadToContainer(s.NoCache.ContainerID, u.tar, "/"); err != nil { return s, err } return s, nil }
func main() { var jobs []job flag.Usage = usage flag.Parse() if showVersion { version.PrintVersion() return } var fail bool // if we fail on one item, foul the exit code if flag.NArg() > 0 { for _, path := range flag.Args() { fp, err := os.Open(path) if err != nil { log.Printf("%s: %v", path, err) fail = true continue } defer fp.Close() jobs = append(jobs, job{name: path, reader: fp}) } } else { // just read stdin jobs = append(jobs, job{name: "-", reader: os.Stdin}) } digestFn := algorithm.FromReader if !algorithm.Available() { // we cannot digest if is not available. An exception is made for // tarsum. if !strings.HasPrefix(algorithm.String(), "tarsum") { unsupported() } var version tarsum.Version if algorithm == "tarsum" { // small hack: if we just have tarsum, use latest version = tarsum.Version1 } else { var err error version, err = tarsum.GetVersionFromTarsum(algorithm.String()) if err != nil { unsupported() } } digestFn = func(rd io.Reader) (digest.Digest, error) { ts, err := tarsum.NewTarSum(rd, true, version) if err != nil { return "", err } if _, err := io.Copy(ioutil.Discard, ts); err != nil { return "", err } return digest.Digest(ts.Sum(nil)), nil } } for _, job := range jobs { dgst, err := digestFn(job.reader) if err != nil { log.Printf("%s: %v", job.name, err) fail = true continue } fmt.Printf("%v\t%s\n", dgst, job.name) } if fail { os.Exit(1) } }
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } if len(args) != 2 { return fmt.Errorf("Invalid %s format", cmdName) } orig := args[0] dest := args[1] cmd := b.Config.Cmd b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)} defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) b.Config.Image = b.image var ( origPath = orig destPath = dest remoteHash string isRemote bool decompress = true ) isRemote = utils.IsURL(orig) if isRemote && !allowRemote { return fmt.Errorf("Source can't be an URL for %s", cmdName) } else if utils.IsURL(orig) { // Initiate the download resp, err := utils.Download(orig) if err != nil { return err } // Create a tmp dir tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") if err != nil { return err } // Create a tmp file within our tmp dir tmpFileName := path.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } defer os.RemoveAll(tmpDirName) // Download and dump result to tmp file if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil { tmpFile.Close() return err } fmt.Fprintf(b.OutStream, "\n") tmpFile.Close() // Remove the mtime of the newly created tmp file if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { return err } origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // Process the checksum r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return err } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0) if err != nil { return err } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return err } remoteHash = tarSum.Sum(nil) r.Close() // If the destination is a directory, figure out the filename. if strings.HasSuffix(dest, "/") { u, err := url.Parse(orig) if err != nil { return err } path := u.Path if strings.HasSuffix(path, "/") { path = path[:len(path)-1] } parts := strings.Split(path, "/") filename := parts[len(parts)-1] if filename == "" { return fmt.Errorf("cannot determine filename from url: %s", u) } destPath = dest + filename } } if err := b.checkPathForAddition(origPath); err != nil { return err } // Hash path and check the cache if b.UtilizeCache { var ( hash string sums = b.context.GetSums() ) if remoteHash != "" { hash = remoteHash } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil { return err } else if fi.IsDir() { var subfiles []string for file, sum := range sums { absFile := path.Join(b.contextPath, file) absOrigPath := path.Join(b.contextPath, origPath) if strings.HasPrefix(absFile, absOrigPath) { subfiles = append(subfiles, sum) } } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) } else { if origPath[0] == '/' && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "./") if h, ok := sums[origPath]; ok { hash = "file:" + h } } b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)} hit, err := b.probeCache() if err != nil { return err } // If we do not have a hash, never use the cache if hit && hash != "" { return nil } } // Create the container container, _, err := b.Daemon.Create(b.Config, "") if err != nil { return err } b.TmpContainers[container.ID] = struct{}{} if err := container.Mount(); err != nil { return err } defer container.Unmount() if !allowDecompression || isRemote { decompress = false } if err := b.addContext(container, origPath, destPath, decompress); err != nil { return err } if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil { return err } return nil }
func (s *TagStore) newManifest(localName, remoteName, tag string) ([]byte, error) { manifest := ®istry.ManifestData{ Name: remoteName, Tag: tag, SchemaVersion: 1, } localRepo, err := s.Get(localName) if err != nil { return nil, err } if localRepo == nil { return nil, fmt.Errorf("Repo does not exist: %s", localName) } // Get the top-most layer id which the tag points to layerId, exists := localRepo[tag] if !exists { return nil, fmt.Errorf("Tag does not exist for %s: %s", localName, tag) } layersSeen := make(map[string]bool) layer, err := s.graph.Get(layerId) if err != nil { return nil, err } if layer.Config == nil { return nil, errors.New("Missing layer configuration") } manifest.Architecture = layer.Architecture manifest.FSLayers = make([]*registry.FSLayer, 0, 4) manifest.History = make([]*registry.ManifestHistory, 0, 4) var metadata runconfig.Config metadata = *layer.Config for ; layer != nil; layer, err = layer.GetParent() { if err != nil { return nil, err } if layersSeen[layer.ID] { break } if layer.Config != nil && metadata.Image != layer.ID { err = runconfig.Merge(&metadata, layer.Config) if err != nil { return nil, err } } archive, err := layer.TarLayer() if err != nil { return nil, err } tarSum, err := tarsum.NewTarSum(archive, true, tarsum.Version1) if err != nil { return nil, err } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return nil, err } tarId := tarSum.Sum(nil) manifest.FSLayers = append(manifest.FSLayers, ®istry.FSLayer{BlobSum: tarId}) layersSeen[layer.ID] = true jsonData, err := ioutil.ReadFile(path.Join(s.graph.Root, layer.ID, "json")) if err != nil { return nil, fmt.Errorf("Cannot retrieve the path for {%s}: %s", layer.ID, err) } manifest.History = append(manifest.History, ®istry.ManifestHistory{V1Compatibility: string(jsonData)}) } manifestBytes, err := json.MarshalIndent(manifest, "", " ") if err != nil { return nil, err } return manifestBytes, nil }
func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error { if origPath != "" && origPath[0] == '/' && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "./") // In the remote/URL case, download it and gen its hashcode if utils.IsURL(origPath) { if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } ci := copyInfo{} ci.origPath = origPath ci.hash = origPath // default to this but can change ci.destPath = destPath ci.decompress = false *cInfos = append(*cInfos, &ci) // Initiate the download resp, err := utils.Download(ci.origPath) if err != nil { return err } // Create a tmp dir tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") if err != nil { return err } ci.tmpDir = tmpDirName // Create a tmp file within our tmp dir tmpFileName := path.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } // Download and dump result to tmp file if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil { tmpFile.Close() return err } fmt.Fprintf(b.OutStream, "\n") tmpFile.Close() // Remove the mtime of the newly created tmp file if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { return err } ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // If the destination is a directory, figure out the filename. if strings.HasSuffix(ci.destPath, "/") { u, err := url.Parse(origPath) if err != nil { return err } path := u.Path if strings.HasSuffix(path, "/") { path = path[:len(path)-1] } parts := strings.Split(path, "/") filename := parts[len(parts)-1] if filename == "" { return fmt.Errorf("cannot determine filename from url: %s", u) } ci.destPath = ci.destPath + filename } // Calc the checksum, only if we're using the cache if b.UtilizeCache { r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return err } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0) if err != nil { return err } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return err } ci.hash = tarSum.Sum(nil) r.Close() } return nil } // Deal with wildcards if ContainsWildcards(origPath) { for _, fileInfo := range b.context.GetSums() { if fileInfo.Name() == "" { continue } match, _ := path.Match(origPath, fileInfo.Name()) if !match { continue } calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression) } return nil } // Must be a dir or a file if err := b.checkPathForAddition(origPath); err != nil { return err } fi, _ := os.Stat(path.Join(b.contextPath, origPath)) ci := copyInfo{} ci.origPath = origPath ci.hash = origPath ci.destPath = destPath ci.decompress = allowDecompression *cInfos = append(*cInfos, &ci) // If not using cache don't need to do anything else. // If we are using a cache then calc the hash for the src file/dir if !b.UtilizeCache { return nil } // Deal with the single file case if !fi.IsDir() { // This will match first file in sums of the archive fis := b.context.GetSums().GetFile(ci.origPath) if fis != nil { ci.hash = "file:" + fis.Sum() } return nil } // Must be a dir var subfiles []string absOrigPath := path.Join(b.contextPath, ci.origPath) // Add a trailing / to make sure we only pick up nested files under // the dir and not sibling files of the dir that just happen to // start with the same chars if !strings.HasSuffix(absOrigPath, "/") { absOrigPath += "/" } // Need path w/o / too to find matching dir w/o trailing / absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] for _, fileInfo := range b.context.GetSums() { absFile := path.Join(b.contextPath, fileInfo.Name()) if strings.HasPrefix(absFile, absOrigPath) || absFile == absOrigPathNoSlash { subfiles = append(subfiles, fileInfo.Sum()) } } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) return nil }