func receive(dst BlobReceiver, br blob.Ref, src io.Reader, checkHash bool) (sb blob.SizedRef, err error) { if checkHash { src = &checkHashReader{br.Hash(), br, src} } sb, err = dst.ReceiveBlob(br, src) if err != nil { return } GetHub(dst).NotifyBlobReceived(sb.Ref) return }
func testSizedBlob(t *testing.T, r io.Reader, b1 blob.Ref, size int64) { h := b1.Hash() n, err := io.Copy(h, r) if err != nil { t.Fatalf("error reading from %s: %v", r, err) } if n != size { t.Fatalf("read %d bytes from %s, metadata said %d!", n, r, size) } b2 := blob.RefFromHash(h) if b2 != b1 { t.Fatalf("content mismatch (awaited %s, got %s)", b1, b2) } }
func (s *storage) Fetch(plainBR blob.Ref) (file io.ReadCloser, size uint32, err error) { meta, err := s.fetchMeta(plainBR) if err != nil { return nil, 0, err } encData, _, err := s.blobs.Fetch(meta.EncBlobRef) if err != nil { log.Printf("encrypt: plaintext %s's encrypted %v blob not found", plainBR, meta.EncBlobRef) return } defer encData.Close() // Quick sanity check that the blob begins with the same IV we // have in our metadata. blobIV := make([]byte, len(meta.IV)) _, err = io.ReadFull(encData, blobIV) if err != nil { return nil, 0, fmt.Errorf("Error reading off IV header from blob: %v", err) } if !bytes.Equal(blobIV, meta.IV) { return nil, 0, fmt.Errorf("Blob and meta IV don't match") } // Slurp the whole blob into memory to validate its plaintext // checksum (no tampered bits) before returning it. Clients // should be the party doing this in the general case, but // we'll be extra paranoid and always do it here, at the cost // of sometimes having it be done twice. var plain bytes.Buffer plainHash := plainBR.Hash() plainSize, err := io.Copy(io.MultiWriter(&plain, plainHash), cipher.StreamReader{ S: cipher.NewCTR(s.block, meta.IV), R: encData, }) if err != nil { return nil, 0, err } size = types.U32(plainSize) if !plainBR.HashMatches(plainHash) { return nil, 0, blobserver.ErrCorruptBlob } return struct { *bytes.Reader io.Closer }{ bytes.NewReader(plain.Bytes()), types.NopCloser, }, uint32(plainSize), nil }
func receive(dst BlobReceiver, br blob.Ref, src io.Reader, checkHash bool) (sb blob.SizedRef, err error) { src = io.LimitReader(src, MaxBlobSize) if checkHash { src = &checkHashReader{br.Hash(), br, src, false} } sb, err = dst.ReceiveBlob(br, src) if err != nil { if checkHash && src.(*checkHashReader).corrupt { err = ErrCorruptBlob } return } err = GetHub(dst).NotifyBlobReceived(sb) return }
// NewSigner returns an Signer given an armored public key's blobref, // its armored content, and its associated private key entity. // The privateKeySource must be either an *openpgp.Entity or a string filename to a secret key. func NewSigner(pubKeyRef blob.Ref, armoredPubKey io.Reader, privateKeySource interface{}) (*Signer, error) { hash := pubKeyRef.Hash() keyId, armoredPubKeyString, err := jsonsign.ParseArmoredPublicKey(io.TeeReader(armoredPubKey, hash)) if err != nil { return nil, err } if !pubKeyRef.HashMatches(hash) { return nil, fmt.Errorf("pubkey ref of %v doesn't match provided armored public key", pubKeyRef) } var privateKey *openpgp.Entity switch v := privateKeySource.(type) { case *openpgp.Entity: privateKey = v case string: privateKey, err = jsonsign.EntityFromSecring(keyId, v) if err != nil { return nil, err } default: return nil, fmt.Errorf("invalid privateKeySource type %T", v) } if privateKey == nil { return nil, errors.New("nil privateKey") } return &Signer{ keyId: keyId, pubref: pubKeyRef, privEntity: privateKey, baseSigReq: jsonsign.SignRequest{ ServerMode: true, // shouldn't matter, since we're supplying the rest of the fields Fetcher: memoryBlobFetcher{ pubKeyRef: func() (uint32, io.ReadCloser) { return uint32(len(armoredPubKeyString)), ioutil.NopCloser(strings.NewReader(armoredPubKeyString)) }, }, EntityFetcher: entityFetcherFunc(func(wantKeyId string) (*openpgp.Entity, error) { if privateKey.PrivateKey.KeyIdString() != wantKeyId && privateKey.PrivateKey.KeyIdShortString() != wantKeyId { return nil, fmt.Errorf("jsonsign code unexpectedly requested keyId %q; only have %q", wantKeyId, keyId) } return privateKey, nil }), }, }, nil }
func (tf *Fetcher) ReceiveBlob(br blob.Ref, source io.Reader) (blob.SizedRef, error) { sb := blob.SizedRef{} h := br.Hash() if h == nil { return sb, fmt.Errorf("Unsupported blobref hash for %s", br) } all, err := ioutil.ReadAll(io.TeeReader(source, h)) if err != nil { return sb, err } if !br.HashMatches(h) { return sb, fmt.Errorf("Hash mismatch receiving blob %s", br) } b := &Blob{Contents: string(all)} tf.AddBlob(b) return blob.SizedRef{br, int64(len(all))}, nil }
func (sto *s3Storage) ReceiveBlob(b blob.Ref, source io.Reader) (outsb blob.SizedRef, outerr error) { zero := outsb slurper := newAmazonSlurper(b) defer slurper.Cleanup() hash := b.Hash() size, err := io.Copy(io.MultiWriter(hash, slurper), source) if err != nil { return zero, err } if !b.HashMatches(hash) { return zero, blobserver.ErrCorruptBlob } err = sto.s3Client.PutObject(b.String(), sto.bucket, slurper.md5, size, slurper) if err != nil { return zero, err } return blob.SizedRef{Ref: b, Size: size}, nil }
func (tf *Fetcher) ReceiveBlob(br blob.Ref, source io.Reader) (blob.SizedRef, error) { sb := blob.SizedRef{} h := br.Hash() if h == nil { return sb, fmt.Errorf("Unsupported blobref hash for %s", br) } all, err := ioutil.ReadAll(io.TeeReader(source, h)) if err != nil { return sb, err } if !br.HashMatches(h) { // This is a somewhat redundant check, since // blobserver.Receive now does it. But for testing code, // it's worth the cost. return sb, fmt.Errorf("Hash mismatch receiving blob %s", br) } b := &Blob{Contents: string(all)} tf.AddBlob(b) return blob.SizedRef{br, int64(len(all))}, nil }
func (s *storage) ReceiveBlob(plainBR blob.Ref, source io.Reader) (sb blob.SizedRef, err error) { iv := s.randIV() stream := cipher.NewCTR(s.block, iv) hash := plainBR.Hash() var buf bytes.Buffer // TODO: compress before encrypting? buf.Write(iv) // TODO: write more structured header w/ version & IV length? or does that weaken it? sw := cipher.StreamWriter{S: stream, W: &buf} plainSize, err := io.Copy(io.MultiWriter(sw, hash), source) if err != nil { return sb, err } if !plainBR.HashMatches(hash) { return sb, blobserver.ErrCorruptBlob } encBR := blob.SHA1FromBytes(buf.Bytes()) _, err = blobserver.Receive(s.blobs, encBR, bytes.NewReader(buf.Bytes())) if err != nil { log.Printf("encrypt: error writing encrypted blob %v (plaintext %v): %v", encBR, plainBR, err) return sb, errors.New("encrypt: error writing encrypted blob") } meta := encodeMetaValue(uint32(plainSize), iv, encBR, buf.Len()) metaBlob := s.makeSingleMetaBlob(plainBR, meta) _, err = blobserver.ReceiveNoHash(s.meta, blob.SHA1FromBytes(metaBlob), bytes.NewReader(metaBlob)) if err != nil { log.Printf("encrypt: error writing encrypted meta for plaintext %v (encrypted blob %v): %v", plainBR, encBR, err) return sb, errors.New("encrypt: error writing encrypted meta") } err = s.index.Set(plainBR.String(), meta) if err != nil { return sb, fmt.Errorf("encrypt: error updating index for encrypted %v (plaintext %v): %v", encBR, plainBR, err) } return blob.SizedRef{plainBR, uint32(plainSize)}, nil }
func (gs *Storage) ReceiveBlob(br blob.Ref, source io.Reader) (blob.SizedRef, error) { buf := &bytes.Buffer{} hash := br.Hash() size, err := io.Copy(io.MultiWriter(hash, buf), source) if err != nil { return blob.SizedRef{}, err } if !br.HashMatches(hash) { return blob.SizedRef{}, blobserver.ErrCorruptBlob } for tries, shouldRetry := 0, true; tries < 2 && shouldRetry; tries++ { shouldRetry, err = gs.client.PutObject( &googlestorage.Object{Bucket: gs.bucket, Key: br.String()}, ioutil.NopCloser(bytes.NewReader(buf.Bytes()))) } if err != nil { return blob.SizedRef{}, err } return blob.SizedRef{Ref: br, Size: size}, nil }
func (s *Storage) ReceiveBlob(br blob.Ref, source io.Reader) (blob.SizedRef, error) { sb := blob.SizedRef{} h := br.Hash() if h == nil { return sb, fmt.Errorf("Unsupported blobref hash for %s", br) } all, err := ioutil.ReadAll(io.TeeReader(source, h)) if err != nil { return sb, err } if !br.HashMatches(h) { // This is a somewhat redundant check, since // blobserver.Receive now does it. But for testing code, // it's worth the cost. return sb, fmt.Errorf("Hash mismatch receiving blob %s", br) } s.mu.Lock() defer s.mu.Unlock() if s.m == nil { s.m = make(map[blob.Ref][]byte) } _, had := s.m[br] if !had { s.m[br] = all if s.lru != nil { s.lru.Add(br.String(), nil) } s.size += int64(len(all)) for s.maxSize != 0 && s.size > s.maxSize { if key, _ := s.lru.RemoveOldest(); key != "" { s.removeBlobLocked(blob.MustParse(key)) } else { break // shouldn't happen } } } return blob.SizedRef{br, uint32(len(all))}, nil }
func (ix *Index) ReceiveBlob(blobRef blob.Ref, source io.Reader) (retsb blob.SizedRef, err error) { sniffer := NewBlobSniffer(blobRef) hash := blobRef.Hash() var written int64 written, err = io.Copy(io.MultiWriter(hash, sniffer), source) if err != nil { return } if !blobRef.HashMatches(hash) { err = blobserver.ErrCorruptBlob return } sniffer.Parse() bm := ix.s.BeginBatch() err = ix.populateMutation(blobRef, sniffer, bm) if err != nil { return } err = ix.s.CommitBatch(bm) if err != nil { return } // TODO(bradfitz): log levels? These are generally noisy // (especially in tests, like search/handler_test), but I // could see it being useful in production. For now, disabled: // // mimeType := sniffer.MIMEType() // log.Printf("indexer: received %s; type=%v; truncated=%v", blobRef, mimeType, sniffer.IsTruncated()) return blob.SizedRef{blobRef, written}, nil }
func (ds *DiskStorage) ReceiveBlob(blobRef blob.Ref, source io.Reader) (blobGot blob.SizedRef, err error) { pname := ds.partition if pname != "" { err = fmt.Errorf("refusing upload directly to queue partition %q", pname) return } hashedDirectory := ds.blobDirectory(pname, blobRef) err = os.MkdirAll(hashedDirectory, 0700) if err != nil { return } tempFile, err := ioutil.TempFile(hashedDirectory, blobFileBaseName(blobRef)+".tmp") if err != nil { return } success := false // set true later defer func() { if !success { log.Println("Removing temp file: ", tempFile.Name()) os.Remove(tempFile.Name()) } }() hash := blobRef.Hash() written, err := io.Copy(io.MultiWriter(hash, tempFile), source) if err != nil { return } if err = tempFile.Sync(); err != nil { return } if err = tempFile.Close(); err != nil { return } if !blobRef.HashMatches(hash) { err = blobserver.ErrCorruptBlob return } stat, err := os.Lstat(tempFile.Name()) if err != nil { return } if stat.Size() != written { err = fmt.Errorf("temp file %q size %d didn't match written size %d", tempFile.Name(), stat.Size(), written) return } fileName := ds.blobPath("", blobRef) if err = os.Rename(tempFile.Name(), fileName); err != nil { return } stat, err = os.Lstat(fileName) if err != nil { return } if stat.Size() != written { err = errors.New("Written size didn't match.") return } for _, mirror := range ds.mirrorPartitions { pname := mirror.partition if pname == "" { panic("expected partition name") } partitionDir := ds.blobDirectory(pname, blobRef) // Prevent the directory from being unlinked by // enumerate code, which cleans up. defer keepDirectoryLock(partitionDir).Unlock() defer keepDirectoryLock(filepath.Dir(partitionDir)).Unlock() defer keepDirectoryLock(filepath.Dir(filepath.Dir(partitionDir))).Unlock() if err = os.MkdirAll(partitionDir, 0700); err != nil { return blob.SizedRef{}, fmt.Errorf("localdisk.receive: MkdirAll(%q) after lock on it: %v", partitionDir, err) } partitionFileName := ds.blobPath(pname, blobRef) pfi, err := os.Stat(partitionFileName) if err == nil && !pfi.IsDir() { log.Printf("Skipped dup on partition %q", pname) } else { if err = linkOrCopy(fileName, partitionFileName); err != nil && !linkAlreadyExists(err) { log.Fatalf("got link or copy error %T %#v", err, err) return blob.SizedRef{}, err } log.Printf("Mirrored blob %s to partition %q", blobRef, pname) } } blobGot = blob.SizedRef{Ref: blobRef, Size: stat.Size()} success = true hub := ds.GetBlobHub() hub.NotifyBlobReceived(blobRef) for _, mirror := range ds.mirrorPartitions { mirror.GetBlobHub().NotifyBlobReceived(blobRef) } return }
func (sto *appengineStorage) ReceiveBlob(br blob.Ref, in io.Reader) (sb blob.SizedRef, err error) { ctx := sto.ctx if ctx == nil { loan := ctxPool.Get() defer loan.Return() ctx = loan } var b bytes.Buffer hash := br.Hash() written, err := io.Copy(io.MultiWriter(hash, &b), in) if err != nil { return } if !br.HashMatches(hash) { err = blobserver.ErrCorruptBlob return } // bkey is non-empty once we've uploaded the blob. var bkey appengine.BlobKey // uploadBlob uploads the blob, unless it's already been done. uploadBlob := func(ctx appengine.Context) error { if len(bkey) > 0 { return nil // already done in previous transaction attempt } bw, err := blobstore.Create(ctx, "application/octet-stream") if err != nil { return err } _, err = io.Copy(bw, &b) if err != nil { // TODO(bradfitz): try to clean up; close it, see if we can find the key, delete it. ctx.Errorf("blobstore Copy error: %v", err) return err } err = bw.Close() if err != nil { // TODO(bradfitz): try to clean up; see if we can find the key, delete it. ctx.Errorf("blobstore Close error: %v", err) return err } k, err := bw.Key() if err == nil { bkey = k } return err } tryFunc := func(tc appengine.Context) error { row, err := fetchEnt(tc, br) switch err { case datastore.ErrNoSuchEntity: if err := uploadBlob(tc); err != nil { tc.Errorf("uploadBlob failed: %v", err) return err } row = &blobEnt{ Size: written, BlobKey: bkey, Namespaces: sto.namespace, } _, err = datastore.Put(tc, entKey(tc, br), row) if err != nil { return err } case nil: if row.inNamespace(sto.namespace) { // Nothing to do return nil } row.Namespaces = row.Namespaces + "|" + sto.namespace _, err = datastore.Put(tc, entKey(tc, br), row) if err != nil { return err } default: return err } // Add membership row _, err = datastore.Put(tc, sto.memKey(tc, br), &memEnt{ Size: written, }) return err } err = datastore.RunInTransaction(ctx, tryFunc, crossGroupTransaction) if err != nil { if len(bkey) > 0 { // If we just created this blob but we // ultimately failed, try our best to delete // it so it's not orphaned. blobstore.Delete(ctx, bkey) } return } return blob.SizedRef{br, written}, nil }