Exemplo n.º 1
0
// length -1 means entire file
func (ds *DiskStorage) fetch(br blob.Ref, offset, length int64) (rc io.ReadCloser, size uint32, err error) {
	fileName := ds.blobPath(br)
	stat, err := os.Stat(fileName)
	if os.IsNotExist(err) {
		return nil, 0, os.ErrNotExist
	}
	size = types.U32(stat.Size())
	file, err := os.Open(fileName)
	if err != nil {
		if os.IsNotExist(err) {
			err = os.ErrNotExist
		}
		return nil, 0, err
	}
	// normal Fetch:
	if length < 0 {
		return file, size, nil
	}
	// SubFetch:
	if offset < 0 || offset > stat.Size() {
		if offset < 0 {
			return nil, 0, blob.ErrNegativeSubFetch
		}
		return nil, 0, blob.ErrOutOfRangeOffsetSubFetch
	}
	return struct {
		io.Reader
		io.Closer
	}{
		io.NewSectionReader(file, offset, length),
		file,
	}, 0 /* unused */, err
}
Exemplo n.º 2
0
func (ds *DiskStorage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
	if len(blobs) == 0 {
		return nil
	}

	statSend := func(ref blob.Ref) error {
		fi, err := os.Stat(ds.blobPath(ref))
		switch {
		case err == nil && fi.Mode().IsRegular():
			dest <- blob.SizedRef{Ref: ref, Size: types.U32(fi.Size())}
			return nil
		case err != nil && !os.IsNotExist(err):
			return err
		}
		return nil
	}

	if len(blobs) == 1 {
		return statSend(blobs[0])
	}

	var wg syncutil.Group
	for _, ref := range blobs {
		ref := ref
		statGate.Start()
		wg.Go(func() error {
			defer statGate.Done()
			return statSend(ref)
		})
	}
	return wg.Err()
}
Exemplo n.º 3
0
func (fi *FakeIndex) AddMeta(br blob.Ref, camliType string, size int64) {
	fi.lk.Lock()
	defer fi.lk.Unlock()
	fi.meta[br] = camtypes.BlobMeta{
		Ref:       br,
		Size:      types.U32(size),
		CamliType: camliType,
	}
}
Exemplo n.º 4
0
func (s *storage) Fetch(plainBR blob.Ref) (file io.ReadCloser, size uint32, err error) {
	meta, err := s.fetchMeta(plainBR)
	if err != nil {
		return nil, 0, err
	}
	encData, _, err := s.blobs.Fetch(meta.EncBlobRef)
	if err != nil {
		log.Printf("encrypt: plaintext %s's encrypted %v blob not found", plainBR, meta.EncBlobRef)
		return
	}
	defer encData.Close()

	// Quick sanity check that the blob begins with the same IV we
	// have in our metadata.
	blobIV := make([]byte, len(meta.IV))
	_, err = io.ReadFull(encData, blobIV)
	if err != nil {
		return nil, 0, fmt.Errorf("Error reading off IV header from blob: %v", err)
	}
	if !bytes.Equal(blobIV, meta.IV) {
		return nil, 0, fmt.Errorf("Blob and meta IV don't match")
	}

	// Slurp the whole blob into memory to validate its plaintext
	// checksum (no tampered bits) before returning it. Clients
	// should be the party doing this in the general case, but
	// we'll be extra paranoid and always do it here, at the cost
	// of sometimes having it be done twice.
	var plain bytes.Buffer
	plainHash := plainBR.Hash()
	plainSize, err := io.Copy(io.MultiWriter(&plain, plainHash), cipher.StreamReader{
		S: cipher.NewCTR(s.block, meta.IV),
		R: encData,
	})
	if err != nil {
		return nil, 0, err
	}
	size = types.U32(plainSize)
	if !plainBR.HashMatches(plainHash) {
		return nil, 0, blobserver.ErrCorruptBlob
	}
	return struct {
		*bytes.Reader
		io.Closer
	}{
		bytes.NewReader(plain.Bytes()),
		types.NopCloser,
	}, uint32(plainSize), nil
}
Exemplo n.º 5
0
func (ds *DiskStorage) Fetch(blob blob.Ref) (io.ReadCloser, uint32, error) {
	fileName := ds.blobPath(blob)
	stat, err := os.Stat(fileName)
	if os.IsNotExist(err) {
		return nil, 0, os.ErrNotExist
	}
	size := types.U32(stat.Size())
	file, err := os.Open(fileName)
	if err != nil {
		if os.IsNotExist(err) {
			err = os.ErrNotExist
		}
		return nil, 0, err
	}
	return file, size, nil
}