Example #1
0
func (pk *packer) scanChunks() error {
	schemaSeen := map[blob.Ref]bool{}
	return pk.fr.ForeachChunk(func(schemaPath []blob.Ref, p schema.BytesPart) error {
		if !p.BlobRef.Valid() {
			return errors.New("sparse files are not packed")
		}
		if p.Offset != 0 {
			// TODO: maybe care about this later, if we ever start making
			// these sorts of files.
			return errors.New("file uses complicated schema. not packing.")
		}
		pk.schemaParent[p.BlobRef] = append([]blob.Ref(nil), schemaPath...) // clone it
		pk.dataSize[p.BlobRef] = uint32(p.Size)
		for _, schemaRef := range schemaPath {
			if schemaSeen[schemaRef] {
				continue
			}
			schemaSeen[schemaRef] = true
			pk.schemaRefs = append(pk.schemaRefs, schemaRef)
			if b, err := blob.FromFetcher(pk.s, schemaRef); err != nil {
				return err
			} else {
				pk.schemaBlob[schemaRef] = b
			}
		}
		pk.dataRefs = append(pk.dataRefs, p.BlobRef)
		return nil
	})
}
Example #2
0
func (fr *FileReader) getBlob(br blob.Ref) (*blob.Blob, error) {
	if root := fr.rootReader(); root != fr {
		return root.getBlob(br)
	}
	fr.blobmu.Lock()
	last := fr.lastBlob
	fr.blobmu.Unlock()
	if last != nil && last.Ref() == br {
		return last, nil
	}
	blob, err := blob.FromFetcher(fr.fetcher, br)
	if err != nil {
		return nil, err
	}

	fr.blobmu.Lock()
	fr.lastBlob = blob
	fr.blobmu.Unlock()
	return blob, nil
}
Example #3
0
// readerForOffset returns a ReadCloser that reads some number of bytes and then EOF
// from the provided offset.  Seeing EOF doesn't mean the end of the whole file; just the
// chunk at that offset.  The caller must close the ReadCloser when done reading.
func (fr *FileReader) readerForOffset(off int64) (io.ReadCloser, error) {
	if debug {
		log.Printf("(%p) readerForOffset %d + %d = %d", fr, fr.rootOff, off, fr.rootOff+off)
	}
	if off < 0 {
		panic("negative offset")
	}
	if off >= fr.size {
		return eofReader, nil
	}
	offRemain := off
	var skipped int64
	parts := fr.ss.Parts
	for len(parts) > 0 && parts[0].Size <= uint64(offRemain) {
		offRemain -= int64(parts[0].Size)
		skipped += int64(parts[0].Size)
		parts = parts[1:]
	}
	if len(parts) == 0 {
		return eofReader, nil
	}
	p0 := parts[0]
	var rsc types.ReadSeekCloser
	var err error
	switch {
	case p0.BlobRef.Valid() && p0.BytesRef.Valid():
		return nil, fmt.Errorf("part illegally contained both a blobRef and bytesRef")
	case !p0.BlobRef.Valid() && !p0.BytesRef.Valid():
		return &nZeros{int(p0.Size - uint64(offRemain))}, nil
	case p0.BlobRef.Valid():
		blob, err := blob.FromFetcher(fr.fetcher, p0.BlobRef)
		if err != nil {
			return nil, err
		}
		rsc = blob.Open()
	case p0.BytesRef.Valid():
		var ss *superset
		ss, err = fr.getSuperset(p0.BytesRef)
		if err != nil {
			return nil, err
		}
		rsc, err = ss.NewFileReader(fr.fetcher)
		if err == nil {
			subFR := rsc.(*FileReader)
			subFR.parent = fr.rootReader()
			subFR.rootOff = fr.rootOff + skipped
		}
	}
	if err != nil {
		return nil, err
	}
	offRemain += int64(p0.Offset)
	if offRemain > 0 {
		newPos, err := rsc.Seek(offRemain, os.SEEK_SET)
		if err != nil {
			return nil, err
		}
		if newPos != offRemain {
			panic("Seek didn't work")
		}
	}
	return struct {
		io.Reader
		io.Closer
	}{
		io.LimitReader(rsc, int64(p0.Size)),
		rsc,
	}, nil
}