Example #1
0
func (ix *Index) ReceiveBlob(blobRef *blobref.BlobRef, source io.Reader) (retsb blobref.SizedBlobRef, err error) {
	sniffer := new(BlobSniffer)
	hash := blobRef.Hash()
	var written int64
	written, err = io.Copy(io.MultiWriter(hash, sniffer), source)
	if err != nil {
		return
	}

	if !blobRef.HashMatches(hash) {
		err = blobserver.ErrCorruptBlob
		return
	}
	sniffer.Parse()

	bm := ix.s.BeginBatch()

	err = ix.populateMutation(blobRef, sniffer, bm)
	if err != nil {
		return
	}

	err = ix.s.CommitBatch(bm)
	if err != nil {
		return
	}

	mimeType := sniffer.MimeType()
	log.Printf("indexer: received %s; type=%v; truncated=%v", blobRef, mimeType, sniffer.IsTruncated())

	return blobref.SizedBlobRef{blobRef, written}, nil
}
Example #2
0
func (s *storage) FetchStreaming(plainBR *blobref.BlobRef) (file io.ReadCloser, size int64, err error) {
	meta, err := s.fetchMeta(plainBR)
	if err != nil {
		return nil, 0, err
	}
	encData, _, err := s.blobs.FetchStreaming(meta.EncBlobRef)
	if err != nil {
		log.Printf("encrypt: plaintext %s's encrypted %v blob not found", plainBR, meta.EncBlobRef)
		return
	}
	defer encData.Close()

	// Quick sanity check that the blob begins with the same IV we
	// have in our metadata.
	blobIV := make([]byte, len(meta.IV))
	_, err = io.ReadFull(encData, blobIV)
	if err != nil {
		return nil, 0, fmt.Errorf("Error reading off IV header from blob: %v", err)
	}
	if !bytes.Equal(blobIV, meta.IV) {
		return nil, 0, fmt.Errorf("Blob and meta IV don't match")
	}

	// Slurp the whole blob into memory to validate its plaintext
	// checksum (no tampered bits) before returning it. Clients
	// should be the party doing this in the general case, but
	// we'll be extra paranoid and always do it here, at the cost
	// of sometimes having it be done twice.
	var plain bytes.Buffer
	plainHash := plainBR.Hash()
	plainSize, err := io.Copy(io.MultiWriter(&plain, plainHash), cipher.StreamReader{
		S: cipher.NewCTR(s.block, meta.IV),
		R: encData,
	})
	if err != nil {
		return nil, 0, err
	}
	if !plainBR.HashMatches(plainHash) {
		return nil, 0, blobserver.ErrCorruptBlob
	}
	return struct {
		*bytes.Reader
		io.Closer
	}{
		bytes.NewReader(plain.Bytes()),
		dummyCloser,
	}, plainSize, nil
}
Example #3
0
func (tf *Fetcher) ReceiveBlob(br *blobref.BlobRef, source io.Reader) (blobref.SizedBlobRef, error) {
	sb := blobref.SizedBlobRef{}
	h := br.Hash()
	if h == nil {
		return sb, fmt.Errorf("Unsupported blobref hash for %s", br)
	}
	all, err := ioutil.ReadAll(io.TeeReader(source, h))
	if err != nil {
		return sb, err
	}
	if !br.HashMatches(h) {
		return sb, fmt.Errorf("Hash mismatch receiving blob %s", br)
	}
	blob := &Blob{Contents: string(all)}
	tf.AddBlob(blob)
	return blobref.SizedBlobRef{br, int64(len(all))}, nil
}
Example #4
0
func (sto *s3Storage) ReceiveBlob(blob *blobref.BlobRef, source io.Reader) (outsb blobref.SizedBlobRef, outerr error) {
	zero := outsb
	slurper := newAmazonSlurper(blob)
	defer slurper.Cleanup()

	hash := blob.Hash()
	size, err := io.Copy(io.MultiWriter(hash, slurper), source)
	if err != nil {
		return zero, err
	}
	if !blob.HashMatches(hash) {
		return zero, blobserver.ErrCorruptBlob
	}
	err = sto.s3Client.PutObject(blob.String(), sto.bucket, slurper.md5, size, slurper)
	if err != nil {
		return zero, err
	}
	return blobref.SizedBlobRef{BlobRef: blob, Size: size}, nil
}
Example #5
0
func (x *appengineIndex) ReceiveBlob(br *blobref.BlobRef, in io.Reader) (sb blobref.SizedBlobRef, err os.Error) {
	if x.ctx == nil {
		err = errNoContext
		return
	}
	var b bytes.Buffer
	hash := br.Hash()
	written, err := io.Copy(io.MultiWriter(hash, &b), in)
	if err != nil {
		return
	}
	if !br.HashMatches(hash) {
		err = blobserver.ErrCorruptBlob
		return
	}

	// TODO(bradfitz): implement

	return blobref.SizedBlobRef{br, written}, nil
}
Example #6
0
func (s *storage) ReceiveBlob(plainBR *blobref.BlobRef, source io.Reader) (sb blobref.SizedBlobRef, err error) {
	iv := s.randIV()
	stream := cipher.NewCTR(s.block, iv)

	hash := plainBR.Hash()
	var buf bytes.Buffer
	// TODO: compress before encrypting?
	buf.Write(iv) // TODO: write more structured header w/ version & IV length? or does that weaken it?
	sw := cipher.StreamWriter{S: stream, W: &buf}
	plainSize, err := io.Copy(io.MultiWriter(sw, hash), source)
	if err != nil {
		return sb, err
	}
	if !plainBR.HashMatches(hash) {
		return sb, blobserver.ErrCorruptBlob
	}

	encBR := blobref.SHA1FromBytes(buf.Bytes())
	_, err = s.blobs.ReceiveBlob(encBR, bytes.NewReader(buf.Bytes()))
	if err != nil {
		log.Printf("encrypt: error writing encrypted blob %v (plaintext %v): %v", encBR, plainBR, err)
		return sb, errors.New("encrypt: error writing encrypted blob")
	}

	meta := encodeMetaValue(plainSize, iv, encBR, buf.Len())
	metaBlob := s.makeSingleMetaBlob(plainBR, meta)
	_, err = s.meta.ReceiveBlob(blobref.SHA1FromBytes(metaBlob), bytes.NewReader(metaBlob))
	if err != nil {
		log.Printf("encrypt: error writing encrypted meta for plaintext %v (encrypted blob %v): %v", plainBR, encBR, err)
		return sb, errors.New("encrypt: error writing encrypted meta")
	}

	err = s.index.Set(plainBR.String(), meta)
	if err != nil {
		return sb, fmt.Errorf("encrypt: error updating index for encrypted %v (plaintext %v): %v", err)
	}

	return blobref.SizedBlobRef{plainBR, plainSize}, nil
}
Example #7
0
func (gs *Storage) ReceiveBlob(blob *blobref.BlobRef, source io.Reader) (blobref.SizedBlobRef, error) {
	buf := &bytes.Buffer{}
	hash := blob.Hash()
	size, err := io.Copy(io.MultiWriter(hash, buf), source)
	if err != nil {
		return blobref.SizedBlobRef{}, err
	}
	if !blob.HashMatches(hash) {
		return blobref.SizedBlobRef{}, blobserver.ErrCorruptBlob
	}

	for tries, shouldRetry := 0, true; tries < 2 && shouldRetry; tries++ {
		shouldRetry, err = gs.client.PutObject(
			&googlestorage.Object{Bucket: gs.bucket, Key: blob.String()},
			ioutil.NopCloser(bytes.NewReader(buf.Bytes())))
	}
	if err != nil {
		return blobref.SizedBlobRef{}, err
	}

	return blobref.SizedBlobRef{BlobRef: blob, Size: size}, nil
}
Example #8
0
func (ix *Index) ReceiveBlob(blobRef *blobref.BlobRef, source io.Reader) (retsb blobref.SizedBlobRef, err error) {
	sniffer := new(BlobSniffer)
	hash := blobRef.Hash()
	var written int64
	written, err = io.Copy(io.MultiWriter(hash, sniffer), source)
	if err != nil {
		return
	}

	if !blobRef.HashMatches(hash) {
		err = blobserver.ErrCorruptBlob
		return
	}
	sniffer.Parse()

	bm := ix.s.BeginBatch()

	err = ix.populateMutation(blobRef, sniffer, bm)
	if err != nil {
		return
	}

	err = ix.s.CommitBatch(bm)
	if err != nil {
		return
	}

	// TODO(bradfitz): log levels? These are generally noisy
	// (especially in tests, like search/handler_test), but I
	// could see it being useful in production. For now, disabled:
	//
	// mimeType := sniffer.MimeType()
	// log.Printf("indexer: received %s; type=%v; truncated=%v", blobRef, mimeType, sniffer.IsTruncated())

	return blobref.SizedBlobRef{blobRef, written}, nil
}
Example #9
0
func (ds *DiskStorage) ReceiveBlob(blobRef *blobref.BlobRef, source io.Reader) (blobGot blobref.SizedBlobRef, err error) {
	pname := ds.partition
	if pname != "" {
		err = fmt.Errorf("refusing upload directly to queue partition %q", pname)
		return
	}
	hashedDirectory := ds.blobDirectory(pname, blobRef)
	err = os.MkdirAll(hashedDirectory, 0700)
	if err != nil {
		return
	}

	tempFile, err := ioutil.TempFile(hashedDirectory, BlobFileBaseName(blobRef)+".tmp")
	if err != nil {
		return
	}

	success := false // set true later
	defer func() {
		if !success {
			log.Println("Removing temp file: ", tempFile.Name())
			os.Remove(tempFile.Name())
		}
	}()

	hash := blobRef.Hash()
	written, err := io.Copy(io.MultiWriter(hash, tempFile), source)
	if err != nil {
		return
	}
	if err = tempFile.Sync(); err != nil {
		return
	}
	if err = tempFile.Close(); err != nil {
		return
	}

	if !blobRef.HashMatches(hash) {
		err = blobserver.ErrCorruptBlob
		return
	}

	fileName := ds.blobPath("", blobRef)
	if err = os.Rename(tempFile.Name(), fileName); err != nil {
		return
	}

	stat, err := os.Lstat(fileName)
	if err != nil {
		return
	}
	if !!stat.IsDir() || stat.Size() != written {
		err = errors.New("Written size didn't match.")
		return
	}

	for _, mirror := range ds.mirrorPartitions {
		pname := mirror.partition
		if pname == "" {
			panic("expected partition name")
		}
		partitionDir := ds.blobDirectory(pname, blobRef)

		// Prevent the directory from being unlinked by
		// enumerate code, which cleans up.
		defer keepDirectoryLock(partitionDir).Unlock()
		defer keepDirectoryLock(filepath.Dir(partitionDir)).Unlock()
		defer keepDirectoryLock(filepath.Dir(filepath.Dir(partitionDir))).Unlock()

		if err = os.MkdirAll(partitionDir, 0700); err != nil {
			return blobref.SizedBlobRef{}, fmt.Errorf("localdisk.receive: MkdirAll(%q) after lock on it: %v", partitionDir, err)
		}
		partitionFileName := ds.blobPath(pname, blobRef)
		pfi, err := os.Stat(partitionFileName)
		if err == nil && !pfi.IsDir() {
			log.Printf("Skipped dup on partition %q", pname)
		} else {
			if err = linkOrCopy(fileName, partitionFileName); err != nil && !linkAlreadyExists(err) {
				log.Fatalf("got link or copy error %T %#v", err, err)
				return blobref.SizedBlobRef{}, err
			}
			log.Printf("Mirrored blob %s to partition %q", blobRef, pname)
		}
	}

	blobGot = blobref.SizedBlobRef{BlobRef: blobRef, Size: stat.Size()}
	success = true

	hub := ds.GetBlobHub()
	hub.NotifyBlobReceived(blobRef)
	for _, mirror := range ds.mirrorPartitions {
		mirror.GetBlobHub().NotifyBlobReceived(blobRef)
	}
	return
}
Example #10
0
func (sto *appengineStorage) ReceiveBlob(br *blobref.BlobRef, in io.Reader) (sb blobref.SizedBlobRef, err os.Error) {
	if sto.ctx == nil {
		err = errNoContext
		return
	}

	var b bytes.Buffer
	hash := br.Hash()
	written, err := io.Copy(io.MultiWriter(hash, &b), in)
	if err != nil {
		return
	}
	if !br.HashMatches(hash) {
		err = blobserver.ErrCorruptBlob
		return
	}

	// bkey is non-empty once we've uploaded the blob.
	var bkey appengine.BlobKey

	// uploadBlob uploads the blob, unless it's already been done.
	uploadBlob := func(ctx appengine.Context) os.Error {
		if len(bkey) > 0 {
			return nil // already done in previous transaction attempt
		}
		bw, err := blobstore.Create(ctx, "application/octet-stream")
		if err != nil {
			return err
		}
		_, err = io.Copy(bw, &b)
		if err != nil {
			// TODO(bradfitz): try to clean up; close it, see if we can find the key, delete it.
			ctx.Errorf("blobstore Copy error: %v", err)
			return err
		}
		err = bw.Close()
		if err != nil {
			// TODO(bradfitz): try to clean up; see if we can find the key, delete it.
			ctx.Errorf("blobstore Close error: %v", err)
			return err
		}
		k, err := bw.Key()
		if err == nil {
			bkey = k
		}
		return err
	}

	tryFunc := func(tc appengine.Context) os.Error {
		row, err := fetchEnt(sto.ctx, br)
		switch err {
		case datastore.ErrNoSuchEntity:
			if err := uploadBlob(sto.ctx); err != nil {
				tc.Errorf("uploadBlob failed: %v", err)
				return err
			}
			row = &blobEnt{
				Size:       []byte(fmt.Sprintf("%d", written)),
				BlobKey:    []byte(string(bkey)),
				Namespaces: []byte(sto.namespace),
			}
			_, err = datastore.Put(tc, entKey(tc, br), row)
			if err != nil {
				return err
			}
		case nil:
			if row.inNamespace(sto.namespace) {
				// Nothing to do
				return nil
			}
			row.Namespaces = []byte(string(row.Namespaces) + "|" + sto.namespace)
			_, err = datastore.Put(tc, entKey(tc, br), row)
			if err != nil {
				return err
			}
		default:
			return err
		}

		// Add membership row
		_, err = datastore.Put(tc, sto.memKey(tc, br), &memEnt{
			Size: []byte(fmt.Sprintf("%d", written)),
		})
		return err
	}
	err = datastore.RunInTransaction(sto.ctx, tryFunc, crossGroupTransaction)
	if err != nil {
		if len(bkey) > 0 {
			// If we just created this blob but we
			// ultimately failed, try our best to delete
			// it so it's not orphaned.
			blobstore.Delete(sto.ctx, bkey)
		}
		return
	}
	return blobref.SizedBlobRef{br, written}, nil
}