func (ph *PublishHandler) signUpload(jsonSign *signhandler.Handler, name string, bb *schema.Builder) (blob.Ref, error) { signed, err := jsonSign.Sign(bb) if err != nil { return blob.Ref{}, fmt.Errorf("error signing %s: %v", name, err) } uh := client.NewUploadHandleFromString(signed) _, err = ph.Storage.ReceiveBlob(uh.BlobRef, uh.Contents) if err != nil { return blob.Ref{}, fmt.Errorf("error uploading %s: %v", name, err) } return uh.BlobRef, nil }
func (ph *PublishHandler) signUpload(jsonSign *JSONSignHandler, name string, m map[string]interface{}) (*blobref.BlobRef, error) { signed, err := jsonSign.SignMap(m) if err != nil { return nil, fmt.Errorf("error signing %s: %v", name, err) } uh := client.NewUploadHandleFromString(signed) _, err = ph.Storage.ReceiveBlob(uh.BlobRef, uh.Contents) if err != nil { return nil, fmt.Errorf("error uploading %s: %v", name, err) } return uh.BlobRef, nil }
// fileMapFromDuplicate queries the server's search interface for an // existing file with an entire contents of sum (a blobref string). // If the server has it, it's validated, and then fileMap (which must // already be partially populated) has its "parts" field populated, // and then fileMap is uploaded (if necessary) and its blobref is // returned. If there's any problem, or a dup doesn't exist, ok is // false. func (up *Uploader) fileMapFromDuplicate(bs blobserver.StatReceiver, fileMap schema.Map, sum string) (fileSchema *blobref.BlobRef, ok bool) { _, err := up.Client.SearchRoot() if err != nil { return } dupFileRef, err := up.Client.SearchExistingFileSchema(blobref.MustParse(sum)) if err != nil { log.Printf("Warning: error searching for already-uploaded copy of %s: %v", sum, err) return nil, false } if dupFileRef == nil { return nil, false } if *flagVerbose { log.Printf("Found dup of contents %s in file schema %s", sum, dupFileRef) } dupMap, err := up.Client.FetchMap(dupFileRef) if err != nil { log.Printf("Warning: error fetching %v: %v", dupFileRef, err) return nil, false } parts, ok := dupMap["parts"].([]interface{}) if !ok { return nil, false } fileMap["parts"] = parts // safe, since dupMap never escapes, so sharing parts is okay // Hack: convert all the parts' float64 to int64, so they encode as e.g. "1000035" // and not "1.000035e+06". Perhaps we should work in *schema.SuperSets here, and not // JSON maps. // TODO(bradfitz): clean up? for _, p := range parts { pm := p.(map[string]interface{}) pm["size"] = int64(pm["size"].(float64)) } json, err := fileMap.JSON() if err != nil { return nil, false } uh := client.NewUploadHandleFromString(json) if uh.BlobRef.Equal(dupFileRef) { // Unchanged (same filename, modtime, JSON serialization, etc) return dupFileRef, true } pr, err := up.uploadHandle(uh) if err != nil { log.Printf("Warning: error uploading file map after finding server dup of %v: %v", sum, err) return nil, false } return pr.BlobRef, true }
func (up *Uploader) uploadString(s string) (*client.PutResult, error) { uh := client.NewUploadHandleFromString(s) if c := up.haveCache; c != nil && c.BlobExists(uh.BlobRef) { cachelog.Printf("HaveCache HIT for %s / %d", uh.BlobRef, uh.Size) return &client.PutResult{BlobRef: uh.BlobRef, Size: uh.Size, Skipped: true}, nil } pr, err := up.Upload(uh) if err == nil && up.haveCache != nil { up.haveCache.NoteBlobExists(uh.BlobRef) } if pr == nil && err == nil { log.Fatalf("Got nil/nil in uploadString while uploading %s", s) } return pr, err }
// fileMapFromDuplicate queries the server's search interface for an // existing file with an entire contents of sum (a blobref string). // If the server has it, it's validated, and then fileMap (which must // already be partially populated) has its "parts" field populated, // and then fileMap is uploaded (if necessary) and a PutResult with // its blobref is returned. If there's any problem, or a dup doesn't // exist, ok is false. // If required, Vivify is also done here. func (up *Uploader) fileMapFromDuplicate(bs blobserver.StatReceiver, fileMap *schema.Builder, sum string) (pr *client.PutResult, ok bool) { if noDupSearch { return } _, err := up.Client.SearchRoot() if err != nil { return } dupFileRef, err := up.Client.SearchExistingFileSchema(blob.MustParse(sum)) if err != nil { log.Printf("Warning: error searching for already-uploaded copy of %s: %v", sum, err) return nil, false } if !dupFileRef.Valid() { return nil, false } if *cmdmain.FlagVerbose { log.Printf("Found dup of contents %s in file schema %s", sum, dupFileRef) } dupMap, err := up.Client.FetchSchemaBlob(dupFileRef) if err != nil { log.Printf("Warning: error fetching %v: %v", dupFileRef, err) return nil, false } fileMap.PopulateParts(dupMap.PartsSize(), dupMap.ByteParts()) json, err := fileMap.JSON() if err != nil { return nil, false } uh := client.NewUploadHandleFromString(json) if up.fileOpts.wantVivify() { uh.Vivify = true } if !uh.Vivify && uh.BlobRef == dupFileRef { // Unchanged (same filename, modtime, JSON serialization, etc) return &client.PutResult{BlobRef: dupFileRef, Size: uint32(len(json)), Skipped: true}, true } pr, err = up.Upload(uh) if err != nil { log.Printf("Warning: error uploading file map after finding server dup of %v: %v", sum, err) return nil, false } return pr, true }
func (up *Uploader) uploadString(s string) (*client.PutResult, error) { return up.Upload(client.NewUploadHandleFromString(s)) }
func (up *Uploader) uploadString(s string) (*client.PutResult, error) { uh := client.NewUploadHandleFromString(s) return up.uploadHandle(uh) }