Пример #1
0
// GetTar generates a tar stream frmo the contents of db, and streams
// it to `dst`.
func (db *DB) GetTar(dst io.Writer) error {
	tw := tar.NewWriter(dst)
	defer tw.Close()
	// Walk the data tree
	return db.Walk(DataTree, func(name string, obj git.Object) error {
		fmt.Fprintf(os.Stderr, "Generating tar entry for '%s'...\n", name)
		metaBlob, err := db.Get(metaPath(name))
		if err != nil {
			return err
		}
		tr := tar.NewReader(bytes.NewReader([]byte(metaBlob)))
		hdr, err := tr.Next()
		if err != nil {
			return err
		}
		// Write the reconstituted tar header+content
		if err := tw.WriteHeader(hdr); err != nil {
			return err
		}
		if blob, isBlob := obj.(*git.Blob); isBlob {
			fmt.Fprintf(os.Stderr, "--> writing %d bytes for blob %s\n", hdr.Size, hdr.Name)
			if _, err := tw.Write(blob.Contents()[:hdr.Size]); err != nil {
				return err
			}
		}
		return nil
	})
	return nil
}
Пример #2
0
func headerReader(hdr *tar.Header) (io.Reader, error) {
	var buf bytes.Buffer
	w := tar.NewWriter(&buf)
	defer w.Close()
	if err := w.WriteHeader(hdr); err != nil {
		return nil, err
	}
	return &buf, nil
}
Пример #3
0
// Tar creates an archive from the directory at `path`, only including files whose relative
// paths are included in `filter`. If `filter` is nil, then all files are included.
func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) {
	pipeReader, pipeWriter := io.Pipe()

	compressWriter, err := CompressStream(pipeWriter, options.Compression)
	if err != nil {
		return nil, err
	}

	tw := tar.NewWriter(compressWriter)

	go func() {
		// In general we log errors here but ignore them because
		// during e.g. a diff operation the container can continue
		// mutating the filesystem and we can see transient errors
		// from this

		if options.Includes == nil {
			options.Includes = []string{"."}
		}

		for _, include := range options.Includes {
			filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
				if err != nil {
					utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err)
					return nil
				}

				relFilePath, err := filepath.Rel(srcPath, filePath)
				if err != nil {
					return nil
				}

				if err := addTarFile(filePath, relFilePath, tw); err != nil {
					utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err)
				}
				return nil
			})
		}

		// Make sure to check the error on Close.
		if err := tw.Close(); err != nil {
			utils.Debugf("Can't close tar writer: %s\n", err)
		}
		if err := compressWriter.Close(); err != nil {
			utils.Debugf("Can't close compress writer: %s\n", err)
		}
		if err := pipeWriter.Close(); err != nil {
			utils.Debugf("Can't close pipe writer: %s\n", err)
		}
	}()

	return pipeReader, nil
}
Пример #4
0
// CopyFileWithTar emulates the behavior of the 'cp' command-line
// for a single file. It copies a regular file from path `src` to
// path `dst`, and preserves all its metadata.
//
// If `dst` ends with a trailing slash '/', the final destination path
// will be `dst/base(src)`.
func CopyFileWithTar(src, dst string) (err error) {
	utils.Debugf("CopyFileWithTar(%s, %s)", src, dst)
	srcSt, err := os.Stat(src)
	if err != nil {
		return err
	}
	if srcSt.IsDir() {
		return fmt.Errorf("Can't copy a directory")
	}
	// Clean up the trailing /
	if dst[len(dst)-1] == '/' {
		dst = path.Join(dst, filepath.Base(src))
	}
	// Create the holding directory if necessary
	if err := os.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
		return err
	}

	r, w := io.Pipe()
	errC := utils.Go(func() error {
		defer w.Close()

		srcF, err := os.Open(src)
		if err != nil {
			return err
		}
		defer srcF.Close()

		tw := tar.NewWriter(w)
		hdr, err := tar.FileInfoHeader(srcSt, "")
		if err != nil {
			return err
		}
		hdr.Name = filepath.Base(dst)
		if err := tw.WriteHeader(hdr); err != nil {
			return err
		}
		if _, err := io.Copy(tw, srcF); err != nil {
			return err
		}
		tw.Close()
		return nil
	})
	defer func() {
		if er := <-errC; err != nil {
			err = er
		}
	}()
	return Untar(r, filepath.Dir(dst), nil)
}
Пример #5
0
func fakeTar() (io.Reader, error) {
	content := []byte("Hello world!\n")
	buf := new(bytes.Buffer)
	tw := tar.NewWriter(buf)
	for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
		hdr := new(tar.Header)
		hdr.Size = int64(len(content))
		hdr.Name = name
		if err := tw.WriteHeader(hdr); err != nil {
			return nil, err
		}
		tw.Write([]byte(content))
	}
	tw.Close()
	return buf, nil
}
Пример #6
0
// make a tar:
// * num is the number of files the tar should have
// * size is the bytes per file
// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros)
// * realFile will write to a TempFile, instead of an in memory buffer
func sizedTar(opts sizedOptions) io.Reader {
	var (
		fh  io.ReadWriter
		err error
	)
	if opts.realFile {
		fh, err = ioutil.TempFile("", "tarsum")
		if err != nil {
			return nil
		}
	} else {
		fh = bytes.NewBuffer([]byte{})
	}
	tarW := tar.NewWriter(fh)
	for i := int64(0); i < opts.num; i++ {
		err := tarW.WriteHeader(&tar.Header{
			Name: fmt.Sprintf("/testdata%d", i),
			Mode: 0755,
			Uid:  0,
			Gid:  0,
			Size: opts.size,
		})
		if err != nil {
			return nil
		}
		var rBuf []byte
		if opts.isRand {
			rBuf = make([]byte, 8)
			_, err = rand.Read(rBuf)
			if err != nil {
				return nil
			}
		} else {
			rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0}
		}

		for i := int64(0); i < opts.size/int64(8); i++ {
			tarW.Write(rBuf)
		}
	}
	return fh
}
Пример #7
0
func ExportChanges(dir string, changes []Change) (Archive, error) {
	reader, writer := io.Pipe()
	tw := tar.NewWriter(writer)

	go func() {
		twBuf := bufio.NewWriterSize(nil, twBufSize)
		// In general we log errors here but ignore them because
		// during e.g. a diff operation the container can continue
		// mutating the filesystem and we can see transient errors
		// from this
		for _, change := range changes {
			if change.Kind == ChangeDelete {
				whiteOutDir := filepath.Dir(change.Path)
				whiteOutBase := filepath.Base(change.Path)
				whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
				timestamp := time.Now()
				hdr := &tar.Header{
					Name:       whiteOut[1:],
					Size:       0,
					ModTime:    timestamp,
					AccessTime: timestamp,
					ChangeTime: timestamp,
				}
				if err := tw.WriteHeader(hdr); err != nil {
					utils.Debugf("Can't write whiteout header: %s\n", err)
				}
			} else {
				path := filepath.Join(dir, change.Path)
				if err := addTarFile(path, change.Path[1:], tw, twBuf); err != nil {
					utils.Debugf("Can't add file %s to tar: %s\n", path, err)
				}
			}
		}

		// Make sure to check the error on Close.
		if err := tw.Close(); err != nil {
			utils.Debugf("Can't close layer: %s\n", err)
		}
		writer.Close()
	}()
	return reader, nil
}
Пример #8
0
// Generate generates a new archive from the content provided
// as input.
//
// `files` is a sequence of path/content pairs. A new file is
// added to the archive for each pair.
// If the last pair is incomplete, the file is created with an
// empty content. For example:
//
// Generate("foo.txt", "hello world", "emptyfile")
//
// The above call will return an archive with 2 files:
//  * ./foo.txt with content "hello world"
//  * ./empty with empty content
//
// FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata
func Generate(input ...string) (Archive, error) {
	files := parseStringPairs(input...)
	buf := new(bytes.Buffer)
	tw := tar.NewWriter(buf)
	for _, file := range files {
		name, content := file[0], file[1]
		hdr := &tar.Header{
			Name: name,
			Size: int64(len(content)),
		}
		if err := tw.WriteHeader(hdr); err != nil {
			return nil, err
		}
		if _, err := tw.Write([]byte(content)); err != nil {
			return nil, err
		}
	}
	if err := tw.Close(); err != nil {
		return nil, err
	}
	return ioutil.NopCloser(buf), nil
}
Пример #9
0
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.Includes` (if non-nil) or not in `options.Excludes`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
	pipeReader, pipeWriter := io.Pipe()

	compressWriter, err := CompressStream(pipeWriter, options.Compression)
	if err != nil {
		return nil, err
	}

	tw := tar.NewWriter(compressWriter)

	go func() {
		// In general we log errors here but ignore them because
		// during e.g. a diff operation the container can continue
		// mutating the filesystem and we can see transient errors
		// from this

		if options.Includes == nil {
			options.Includes = []string{"."}
		}

		twBuf := bufio.NewWriterSize(nil, twBufSize)

		for _, include := range options.Includes {
			filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
				if err != nil {
					utils.Debugf("Tar: Can't stat file %s to tar: %s\n", srcPath, err)
					return nil
				}

				relFilePath, err := filepath.Rel(srcPath, filePath)
				if err != nil {
					return nil
				}

				for _, exclude := range options.Excludes {
					matched, err := filepath.Match(exclude, relFilePath)
					if err != nil {
						utils.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
						return err
					}
					if matched {
						utils.Debugf("Skipping excluded path: %s", relFilePath)
						if f.IsDir() {
							return filepath.SkipDir
						}
						return nil
					}
				}

				if err := addTarFile(filePath, relFilePath, tw, twBuf); err != nil {
					utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err)
				}
				return nil
			})
		}

		// Make sure to check the error on Close.
		if err := tw.Close(); err != nil {
			utils.Debugf("Can't close tar writer: %s\n", err)
		}
		if err := compressWriter.Close(); err != nil {
			utils.Debugf("Can't close compress writer: %s\n", err)
		}
		if err := pipeWriter.Close(); err != nil {
			utils.Debugf("Can't close pipe writer: %s\n", err)
		}
	}()

	return pipeReader, nil
}
Пример #10
0
func (ts *TarSum) Read(buf []byte) (int, error) {
	if ts.gz == nil {
		ts.bufTar = bytes.NewBuffer([]byte{})
		ts.bufGz = bytes.NewBuffer([]byte{})
		ts.tarR = tar.NewReader(ts.Reader)
		ts.tarW = tar.NewWriter(ts.bufTar)
		if !ts.DisableCompression {
			ts.gz = gzip.NewWriter(ts.bufGz)
		} else {
			ts.gz = &nopCloseFlusher{Writer: ts.bufGz}
		}
		ts.h = sha256.New()
		ts.h.Reset()
		ts.first = true
		ts.sums = make(map[string]string)
	}

	if ts.finished {
		return ts.bufGz.Read(buf)
	}
	buf2 := make([]byte, len(buf), cap(buf))

	n, err := ts.tarR.Read(buf2)
	if err != nil {
		if err == io.EOF {
			if _, err := ts.h.Write(buf2[:n]); err != nil {
				return 0, err
			}
			if !ts.first {
				ts.sums[ts.currentFile] = hex.EncodeToString(ts.h.Sum(nil))
				ts.h.Reset()
			} else {
				ts.first = false
			}

			currentHeader, err := ts.tarR.Next()
			if err != nil {
				if err == io.EOF {
					if err := ts.gz.Close(); err != nil {
						return 0, err
					}
					ts.finished = true
					return n, nil
				}
				return n, err
			}
			ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/")
			if err := ts.encodeHeader(currentHeader); err != nil {
				return 0, err
			}
			if err := ts.tarW.WriteHeader(currentHeader); err != nil {
				return 0, err
			}
			if _, err := ts.tarW.Write(buf2[:n]); err != nil {
				return 0, err
			}
			ts.tarW.Flush()
			if _, err := io.Copy(ts.gz, ts.bufTar); err != nil {
				return 0, err
			}
			ts.gz.Flush()

			return ts.bufGz.Read(buf)
		}
		return n, err
	}

	// Filling the hash buffer
	if _, err = ts.h.Write(buf2[:n]); err != nil {
		return 0, err
	}

	// Filling the tar writter
	if _, err = ts.tarW.Write(buf2[:n]); err != nil {
		return 0, err
	}
	ts.tarW.Flush()

	// Filling the gz writter
	if _, err = io.Copy(ts.gz, ts.bufTar); err != nil {
		return 0, err
	}
	ts.gz.Flush()

	return ts.bufGz.Read(buf)
}