func (c *permanodeCmd) RunCommand(up *Uploader, args []string) error { if len(args) > 0 { return errors.New("Permanode command doesn't take any additional arguments") } var ( permaNode *client.PutResult err error ) permaNode, err = up.UploadNewPermanode() if handleResult("permanode", permaNode, err) != nil { return err } if c.name != "" { put, err := up.UploadAndSignMap(schema.NewSetAttributeClaim(permaNode.BlobRef, "title", c.name)) handleResult("claim-permanode-title", put, err) } if c.tag != "" { tags := strings.Split(c.tag, ",") m := schema.NewSetAttributeClaim(permaNode.BlobRef, "tag", tags[0]) for _, tag := range tags { m = schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag) put, err := up.UploadAndSignMap(m) handleResult("claim-permanode-tag", put, err) } } return nil }
func (c *attrCmd) RunCommand(args []string) error { if len(args) != 3 { return errors.New("Attr takes 3 args: <permanode> <attr> <value>") } permanode, attr, value := args[0], args[1], args[2] var err error pn := blobref.Parse(permanode) if pn == nil { return fmt.Errorf("Error parsing blobref %q", permanode) } bb := schema.NewSetAttributeClaim(pn, attr, value) if c.add { if c.del { return errors.New("Add and del options are exclusive") } bb = schema.NewAddAttributeClaim(pn, attr, value) } else { // TODO: del, which can make <value> be optional if c.del { return errors.New("del not yet implemented") } } put, err := getUploader().UploadAndSignBlob(bb) handleResult(bb.Type(), put, err) return nil }
// SetAttrValues sets multi-valued attribute. func (o *Object) SetAttrValues(key string, attrs []string) error { exists := asSet(o.Attrs(key)) actual := asSet(attrs) o.mu.Lock() defer o.mu.Unlock() // add new values for v := range actual { if exists[v] { delete(exists, v) continue } _, err := o.h.upload(schema.NewAddAttributeClaim(o.pn, key, v)) if err != nil { return err } } // delete unneeded values for v := range exists { _, err := o.h.upload(schema.NewDelAttributeClaim(o.pn, key, v)) if err != nil { return err } } if o.attr == nil { o.attr = make(map[string][]string) } o.attr[key] = attrs return nil }
// uploadFilePermanode creates and uploads the planned permanode (with sum as a // fixed key) associated with the file blobref fileRef. // It also sets the optional tags for this permanode. func (up *Uploader) uploadFilePermanode(sum string, fileRef blob.Ref, claimTime time.Time) error { // Use a fixed time value for signing; not using modtime // so two identical files don't have different modtimes? // TODO(bradfitz): consider this more? permaNodeSigTime := time.Unix(0, 0) permaNode, err := up.UploadPlannedPermanode(sum, permaNodeSigTime) if err != nil { return fmt.Errorf("Error uploading planned permanode: %v", err) } handleResult("node-permanode", permaNode, nil) contentAttr := schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", fileRef.String()) contentAttr.SetClaimDate(claimTime) signer, err := up.Signer() if err != nil { return err } signed, err := contentAttr.SignAt(signer, claimTime) if err != nil { return fmt.Errorf("Failed to sign content claim: %v", err) } put, err := up.uploadString(signed) if err != nil { return fmt.Errorf("Error uploading permanode's attribute: %v", err) } handleResult("node-permanode-contentattr", put, nil) if tags := up.fileOpts.tags(); len(tags) > 0 { errch := make(chan error) for _, tag := range tags { go func(tag string) { m := schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag) m.SetClaimDate(claimTime) signed, err := m.SignAt(signer, claimTime) if err != nil { errch <- fmt.Errorf("Failed to sign tag claim: %v", err) return } put, err := up.uploadString(signed) if err != nil { errch <- fmt.Errorf("Error uploading permanode's tag attribute %v: %v", tag, err) return } handleResult("node-permanode-tag", put, nil) errch <- nil }(tag) } for range tags { if e := <-errch; e != nil && err == nil { err = e } } if err != nil { return err } } return nil }
func (c *permanodeCmd) RunCommand(args []string) error { if len(args) > 0 { return errors.New("Permanode command doesn't take any additional arguments") } var ( permaNode *client.PutResult err error up = getUploader() ) if (c.key != "") != (c.sigTime != "") { return errors.New("Both --key and --sigtime must be used to produce deterministic permanodes.") } if c.key == "" { // Normal case, with a random permanode. permaNode, err = up.UploadNewPermanode() } else { const format = "2006-01-02 15:04:05" sigTime, err := time.Parse(format, c.sigTime) if err != nil { return fmt.Errorf("Error parsing time %q; expecting time of form %q", c.sigTime, format) } permaNode, err = up.UploadPlannedPermanode(c.key, sigTime) } if handleResult("permanode", permaNode, err) != nil { return err } if c.title != "" { put, err := up.UploadAndSignBlob(schema.NewSetAttributeClaim(permaNode.BlobRef, "title", c.title)) handleResult("claim-permanode-title", put, err) } if c.tag != "" { tags := strings.Split(c.tag, ",") m := schema.NewSetAttributeClaim(permaNode.BlobRef, "tag", tags[0]) for _, tag := range tags { m = schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag) put, err := up.UploadAndSignBlob(m) handleResult("claim-permanode-tag", put, err) } } return nil }
func storePhoto(p photo) (string, error) { srcFile := localPathOf(p) f, err := os.Open(srcFile) if err != nil { return "", err } defer f.Close() fileRef, err := schema.WriteFileFromReader(camliClient, p.Id+"."+p.Extension, f) res, err := camliClient.UploadNewPermanode() if err != nil { return "", err } perma := res.BlobRef p.Description = cleanHTML(p.Description) claims := []*schema.Builder{} claims = append(claims, schema.NewSetAttributeClaim(perma, "camliContent", fileRef.String())) claims = append(claims, schema.NewSetAttributeClaim(perma, "title", mkTitle(p.Description))) claims = append(claims, schema.NewSetAttributeClaim(perma, "description", p.Description)) for _, t := range p.Tags { claims = append(claims, schema.NewAddAttributeClaim(perma, "tag", t)) } if p.Cat == "Public" { claims = append(claims, schema.NewSetAttributeClaim(perma, "camliAccess", "public")) } grp := syncutil.Group{} for _, claimBuilder := range claims { claim := claimBuilder.Blob() grp.Go(func() error { _, err := camliClient.UploadAndSignBlob(claim) return err }) } return perma.String(), grp.Err() }
func (id *IndexDeps) AddAttribute(permaNode *blobref.BlobRef, attr, value string) *blobref.BlobRef { m := schema.NewAddAttributeClaim(permaNode, attr, value) m.SetClaimDate(id.advanceTime()) return id.uploadAndSign(m) }
func (c *fileCmd) RunCommand(args []string) error { if c.vivify { if c.makePermanode || c.filePermanodes || c.tag != "" || c.title != "" { return cmdmain.UsageError("--vivify excludes any other option") } } if c.title != "" && !c.makePermanode { return cmdmain.UsageError("Can't set title without using --permanode") } if c.tag != "" && !c.makePermanode && !c.filePermanodes { return cmdmain.UsageError("Can't set tag without using --permanode or --filenodes") } if c.histo != "" && !c.memstats { return cmdmain.UsageError("Can't use histo without memstats") } if c.deleteAfterUpload && !c.filePermanodes { return cmdmain.UsageError("Can't set use --delete_after_upload without --filenodes") } if c.filePermanodes && c.contentsOnly { return cmdmain.UsageError("--contents_only and --filenodes are exclusive. Use --permanode instead.") } // TODO(mpl): do it for other modes too. Or even better, do it once for all modes. if *cmdmain.FlagVerbose { log.SetOutput(cmdmain.Stderr) } else { log.SetOutput(ioutil.Discard) } up := getUploader() if c.memstats { sr := new(statspkg.Receiver) up.altStatReceiver = sr defer func() { DumpStats(sr, c.histo) }() } c.initCaches(up) if c.makePermanode || c.filePermanodes { testSigBlobRef := up.Client.SignerPublicKeyBlobref() if !testSigBlobRef.Valid() { return cmdmain.UsageError("A GPG key is needed to create permanodes; configure one or use vivify mode.") } } up.fileOpts = &fileOptions{ permanode: c.filePermanodes, tag: c.tag, vivify: c.vivify, exifTime: c.exifTime, capCtime: c.capCtime, contentsOnly: c.contentsOnly, } var ( permaNode *client.PutResult lastPut *client.PutResult err error ) if c.makePermanode { if len(args) != 1 { return fmt.Errorf("The --permanode flag can only be used with exactly one file or directory argument") } permaNode, err = up.UploadNewPermanode() if err != nil { return fmt.Errorf("Uploading permanode: %v", err) } } if c.diskUsage { if len(args) != 1 { return fmt.Errorf("The --du flag can only be used with exactly one directory argument") } dir := args[0] fi, err := up.stat(dir) if err != nil { return err } if !fi.IsDir() { return fmt.Errorf("%q is not a directory.", dir) } t := up.NewTreeUpload(dir) t.DiskUsageMode = true t.Start() pr, err := t.Wait() if err != nil { return err } handleResult("tree-upload", pr, err) return nil } if c.argsFromInput { if len(args) > 0 { return errors.New("args not supported with -argsfrominput") } tu := up.NewRootlessTreeUpload() tu.Start() br := bufio.NewReader(os.Stdin) for { path, err := br.ReadString('\n') if path = strings.TrimSpace(path); path != "" { tu.Enqueue(path) } if err == io.EOF { android.PreExit() os.Exit(0) } if err != nil { log.Fatal(err) } } } if len(args) == 0 { return cmdmain.UsageError("No files or directories given.") } if up.statCache != nil { defer up.statCache.Close() } for _, filename := range args { fi, err := os.Stat(filename) if err != nil { return err } // Skip ignored files or base directories. Failing to skip the // latter results in a panic. if up.Client.IsIgnoredFile(filename) { log.Printf("Client configured to ignore %s; skipping.", filename) continue } if fi.IsDir() { if up.fileOpts.wantVivify() { vlog.Printf("Directories not supported in vivify mode; skipping %v\n", filename) continue } t := up.NewTreeUpload(filename) t.Start() lastPut, err = t.Wait() } else { lastPut, err = up.UploadFile(filename) if err == nil && c.deleteAfterUpload { if err := os.Remove(filename); err != nil { log.Printf("Error deleting %v: %v", filename, err) } else { log.Printf("Deleted %v", filename) } } } if handleResult("file", lastPut, err) != nil { return err } } if permaNode != nil && lastPut != nil { put, err := up.UploadAndSignBlob(schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", lastPut.BlobRef.String())) if handleResult("claim-permanode-content", put, err) != nil { return err } if c.title != "" { put, err := up.UploadAndSignBlob(schema.NewSetAttributeClaim(permaNode.BlobRef, "title", c.title)) handleResult("claim-permanode-title", put, err) } if c.tag != "" { tags := strings.Split(c.tag, ",") for _, tag := range tags { m := schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag) put, err := up.UploadAndSignBlob(m) handleResult("claim-permanode-tag", put, err) } } handleResult("permanode", permaNode, nil) } return nil }
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) { // TODO(mpl): maybe break this func into more maintainable pieces? filebb := schema.NewCommonFileMap(n.fullPath, n.fi) filebb.SetType("file") file, err := up.open(n.fullPath) if err != nil { return nil, err } defer file.Close() if up.fileOpts.exifTime { ra, ok := file.(io.ReaderAt) if !ok { return nil, errors.New("Error asserting local file to io.ReaderAt") } modtime, err := schema.FileTime(ra) if err != nil { log.Printf("warning: getting time from EXIF failed for %v: %v", n.fullPath, err) } else { filebb.SetModTime(modtime) } } var ( size = n.fi.Size() fileContents io.Reader = io.LimitReader(file, size) br *blobref.BlobRef // of file schemaref sum string // sha1 hashsum of the file to upload pr *client.PutResult // of the final "file" schema blob ) const dupCheckThreshold = 256 << 10 if size > dupCheckThreshold { sumRef, err := up.wholeFileDigest(n.fullPath) if err == nil { sum = sumRef.String() ok := false pr, ok = up.fileMapFromDuplicate(up.statReceiver(n), filebb, sum) if ok { br = pr.BlobRef noteFileUploaded(n.fullPath, !pr.Skipped) if up.fileOpts.wantVivify() { // we can return early in that case, because the other options // are disallowed in the vivify case. return pr, nil } } } } if up.fileOpts.wantVivify() { // If vivify wasn't already done in fileMapFromDuplicate. err := schema.WriteFileChunks(up.statReceiver(n), filebb, fileContents) if err != nil { return nil, err } json, err := filebb.JSON() if err != nil { return nil, err } br = blobref.SHA1FromString(json) h := &client.UploadHandle{ BlobRef: br, Size: int64(len(json)), Contents: strings.NewReader(json), Vivify: true, } pr, err = up.Upload(h) if err != nil { return nil, err } noteFileUploaded(n.fullPath, true) return pr, nil } if br == nil { // br still nil means fileMapFromDuplicate did not find the file on the server, // and the file has not just been uploaded subsequently to a vivify request. // So we do the full file + file schema upload here. if sum == "" && up.fileOpts.wantFilePermanode() { fileContents = &trackDigestReader{r: fileContents} } br, err = schema.WriteFileMap(up.statReceiver(n), filebb, fileContents) if err != nil { return nil, err } } // TODO(mpl): test that none of these claims get uploaded if they've already been done if up.fileOpts.wantFilePermanode() { if td, ok := fileContents.(*trackDigestReader); ok { sum = td.Sum() } // Use a fixed time value for signing; not using modtime // so two identical files don't have different modtimes? // TODO(bradfitz): consider this more? permaNodeSigTime := time.Unix(0, 0) permaNode, err := up.UploadPlannedPermanode(sum, permaNodeSigTime) if err != nil { return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err) } handleResult("node-permanode", permaNode, nil) // claimTime is both the time of the "claimDate" in the // JSON claim, as well as the date in the OpenPGP // header. // TODO(bradfitz): this is a little clumsy to do by hand. // There should probably be a method on *Uploader to do this // from an unsigned schema map. Maybe ditch the schema.Claimer // type and just have the Uploader override the claimDate. claimTime, ok := filebb.ModTime() if !ok { return nil, fmt.Errorf("couldn't get modtime back for file %v", n.fullPath) } contentAttr := schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", br.String()) contentAttr.SetClaimDate(claimTime) signed, err := up.SignBlob(contentAttr, claimTime) if err != nil { return nil, fmt.Errorf("Failed to sign content claim for node %v: %v", n, err) } put, err := up.uploadString(signed) if err != nil { return nil, fmt.Errorf("Error uploading permanode's attribute for node %v: %v", n, err) } handleResult("node-permanode-contentattr", put, nil) if tags := up.fileOpts.tags(); len(tags) > 0 { errch := make(chan error) for _, tag := range tags { go func(tag string) { m := schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag) m.SetClaimDate(claimTime) signed, err := up.SignBlob(m, claimTime) if err != nil { errch <- fmt.Errorf("Failed to sign tag claim for node %v: %v", n, err) return } put, err := up.uploadString(signed) if err != nil { errch <- fmt.Errorf("Error uploading permanode's tag attribute %v for node %v: %v", tag, n, err) return } handleResult("node-permanode-tag", put, nil) errch <- nil }(tag) } for _ = range tags { if e := <-errch; e != nil && err == nil { err = e } } if err != nil { return nil, err } } } // TODO(bradfitz): faking a PutResult here to return // is kinda gross. should instead make a // blobserver.Storage wrapper type (wrapping // statReceiver) that can track some of this? or make // schemaWriteFileMap return it? json, _ := filebb.JSON() pr = &client.PutResult{BlobRef: br, Size: int64(len(json)), Skipped: false} return pr, nil }
func (c *fileCmd) RunCommand(args []string) error { if c.vivify { if c.makePermanode || c.filePermanodes || c.tag != "" || c.name != "" { return cmdmain.UsageError("--vivify excludes any other option") } } if c.name != "" && !c.makePermanode { return cmdmain.UsageError("Can't set name without using --permanode") } if c.tag != "" && !c.makePermanode && !c.filePermanodes { return cmdmain.UsageError("Can't set tag without using --permanode or --filenodes") } if c.histo != "" && !c.memstats { return cmdmain.UsageError("Can't use histo without memstats") } up := getUploader() if c.memstats { sr := new(statsStatReceiver) up.altStatReceiver = sr defer func() { sr.DumpStats(c.histo) }() } c.initCaches(up) if c.makePermanode || c.filePermanodes { testSigBlobRef := up.Client.SignerPublicKeyBlobref() if testSigBlobRef == nil { return cmdmain.UsageError("A GPG key is needed to create permanodes; configure one or use vivify mode.") } } up.fileOpts = &fileOptions{ permanode: c.filePermanodes, tag: c.tag, vivify: c.vivify, exifTime: c.exifTime, } var ( permaNode *client.PutResult lastPut *client.PutResult err error ) if c.makePermanode { if len(args) != 1 { return fmt.Errorf("The --permanode flag can only be used with exactly one file or directory argument") } permaNode, err = up.UploadNewPermanode() if err != nil { return fmt.Errorf("Uploading permanode: %v", err) } } if c.diskUsage { if len(args) != 1 { return fmt.Errorf("The --du flag can only be used with exactly one directory argument") } dir := args[0] fi, err := up.stat(dir) if err != nil { return err } if !fi.IsDir() { return fmt.Errorf("%q is not a directory.", dir) } t := up.NewTreeUpload(dir) t.DiskUsageMode = true t.Start() pr, err := t.Wait() if err != nil { return err } handleResult("tree-upload", pr, err) return nil } if c.argsFromInput { if len(args) > 0 { return errors.New("args not supported with -argsfrominput") } tu := up.NewRootlessTreeUpload() tu.Start() br := bufio.NewReader(os.Stdin) for { path, err := br.ReadString('\n') if path = strings.TrimSpace(path); path != "" { tu.Enqueue(path) } if err == io.EOF { os.Exit(0) } if err != nil { log.Fatal(err) } } } if len(args) == 0 { return cmdmain.UsageError("No files or directories given.") } for _, filename := range args { fi, err := os.Stat(filename) if err != nil { return err } if fi.IsDir() { if up.fileOpts.wantVivify() { vlog.Printf("Directories not supported in vivify mode; skipping %v\n", filename) continue } t := up.NewTreeUpload(filename) t.Start() lastPut, err = t.Wait() } else { lastPut, err = up.UploadFile(filename) } if handleResult("file", lastPut, err) != nil { return err } } if permaNode != nil { put, err := up.UploadAndSignBlob(schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", lastPut.BlobRef.String())) if handleResult("claim-permanode-content", put, err) != nil { return err } if c.name != "" { put, err := up.UploadAndSignBlob(schema.NewSetAttributeClaim(permaNode.BlobRef, "name", c.name)) handleResult("claim-permanode-name", put, err) } if c.tag != "" { tags := strings.Split(c.tag, ",") m := schema.NewSetAttributeClaim(permaNode.BlobRef, "tag", tags[0]) for _, tag := range tags { m = schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag) put, err := up.UploadAndSignBlob(m) handleResult("claim-permanode-tag", put, err) } } handleResult("permanode", permaNode, nil) } return nil }
func (c *fileCmd) RunCommand(up *Uploader, args []string) error { if len(args) == 0 { return UsageError("No files or directories given.") } if c.name != "" && !c.makePermanode { return UsageError("Can't set name without using --permanode") } if c.tag != "" && !c.makePermanode { return UsageError("Can't set tag without using --permanode") } if c.histo != "" && !c.memstats { return UsageError("Can't use histo without memstats") } if c.memstats { sr := new(statsStatReceiver) if c.histo != "" { num := 100 sr.histo = histo.NewHisto(num) } up.altStatReceiver = sr defer func() { sr.DumpStats(c.histo) }() } if c.statcache { cache := NewFlatStatCache() up.statCache = cache } if c.havecache { cache := NewFlatHaveCache() up.haveCache = cache } var ( permaNode *client.PutResult lastPut *client.PutResult err error ) if c.makePermanode { if len(args) != 1 { return fmt.Errorf("The --permanode flag can only be used with exactly one file or directory argument") } permaNode, err = up.UploadNewPermanode() if err != nil { return fmt.Errorf("Uploading permanode: %v", err) } } if c.diskUsage { if len(args) != 1 { return fmt.Errorf("The --du flag can only be used with exactly one directory argument") } dir := args[0] fi, err := up.stat(dir) if err != nil { return err } if !fi.IsDir() { return fmt.Errorf("%q is not a directory.", dir) } t := up.NewTreeUpload(dir) t.DiskUsageMode = true t.Start() pr, err := t.Wait() if err != nil { return err } handleResult("tree-upload", pr, err) return nil } if c.rollSplits { up.rollSplits = true } for _, filename := range args { if fi, err := os.Stat(filename); err == nil && fi.IsDir() { t := up.NewTreeUpload(filename) t.Start() lastPut, err = t.Wait() } else { lastPut, err = up.UploadFile(filename) } if handleResult("file", lastPut, err) != nil { return err } } if permaNode != nil { put, err := up.UploadAndSignMap(schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", lastPut.BlobRef.String())) if handleResult("claim-permanode-content", put, err) != nil { return err } if c.name != "" { put, err := up.UploadAndSignMap(schema.NewSetAttributeClaim(permaNode.BlobRef, "name", c.name)) handleResult("claim-permanode-name", put, err) } if c.tag != "" { tags := strings.Split(c.tag, ",") m := schema.NewSetAttributeClaim(permaNode.BlobRef, "tag", tags[0]) for _, tag := range tags { m = schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag) put, err := up.UploadAndSignMap(m) handleResult("claim-permanode-tag", put, err) } } handleResult("permanode", permaNode, nil) } return nil }
func (up *Uploader) uploadNodeRegularFile(n *node) (*client.PutResult, error) { m := schema.NewCommonFileMap(n.fullPath, n.fi) m["camliType"] = "file" file, err := up.open(n.fullPath) if err != nil { return nil, err } defer file.Close() size := n.fi.Size() var fileContents io.Reader = io.LimitReader(file, size) if up.fileOpts.wantVivify() { err := schema.WriteFileChunks(up.statReceiver(), m, fileContents) if err != nil { return nil, err } json, err := m.JSON() if err != nil { return nil, err } bref := blobref.SHA1FromString(json) h := &client.UploadHandle{ BlobRef: bref, Size: int64(len(json)), Contents: strings.NewReader(json), Vivify: true, } return up.Upload(h) } var ( blobref *blobref.BlobRef // of file schemaref sum string // "sha1-xxxxx" ) const dupCheckThreshold = 256 << 10 if size > dupCheckThreshold { sumRef, err := up.wholeFileDigest(n.fullPath) if err == nil { sum = sumRef.String() if ref, ok := up.fileMapFromDuplicate(up.statReceiver(), m, sum); ok { blobref = ref } } } if blobref == nil { if sum == "" && up.fileOpts.wantFilePermanode() { fileContents = &trackDigestReader{r: fileContents} } blobref, err = schema.WriteFileMap(up.statReceiver(), m, fileContents) if err != nil { return nil, err } } // TODO(mpl): test that none of these claims get uploaded if they've already been done if up.fileOpts.wantFilePermanode() { if td, ok := fileContents.(*trackDigestReader); ok { sum = td.Sum() } // Use a fixed time value for signing; not using modtime // so two identical files don't have different modtimes? // TODO(bradfitz): consider this more? permaNodeSigTime := time.Unix(0, 0) permaNode, err := up.UploadPlannedPermanode(sum, permaNodeSigTime) if err != nil { return nil, fmt.Errorf("Error uploading permanode for node %v: %v", n, err) } handleResult("node-permanode", permaNode, nil) // claimTime is both the time of the "claimDate" in the // JSON claim, as well as the date in the OpenPGP // header. // TODO(bradfitz): this is a little clumsy to do by hand. // There should probably be a method on *Uploader to do this // from an unsigned schema map. Maybe ditch the schema.Claimer // type and just have the Uploader override the claimDate. claimTime := n.fi.ModTime() contentAttr := schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", blobref.String()) contentAttr.SetClaimDate(claimTime) signed, err := up.SignMap(contentAttr, claimTime) if err != nil { return nil, fmt.Errorf("Failed to sign content claim for node %v: %v", n, err) } put, err := up.uploadString(signed) if err != nil { return nil, fmt.Errorf("Error uploading permanode's attribute for node %v: %v", n, err) } handleResult("node-permanode-contentattr", put, nil) if tags := up.fileOpts.tags(); len(tags) > 0 { // TODO(mpl): do these claims concurrently, not in series for _, tag := range tags { m := schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag) m.SetClaimDate(claimTime) // TODO(mpl): verify that SetClaimDate does modify the GPG signature date of the claim signed, err := up.SignMap(m, claimTime) if err != nil { return nil, fmt.Errorf("Failed to sign tag claim for node %v: %v", n, err) } put, err := up.uploadString(signed) if err != nil { return nil, fmt.Errorf("Error uploading permanode's tag attribute %v for node %v: %v", tag, n, err) } handleResult("node-permanode-tag", put, nil) } } } // TODO(bradfitz): faking a PutResult here to return // is kinda gross. should instead make a // blobserver.Storage wrapper type (wrapping // statReceiver) that can track some of this? or make // schemaWriteFileMap return it? json, _ := m.JSON() pr := &client.PutResult{BlobRef: blobref, Size: int64(len(json)), Skipped: false} return pr, nil }
func (c *fileCmd) RunCommand(up *Uploader, args []string) error { if len(args) == 0 { return UsageError("No files or directories given.") } if c.vivify { if c.makePermanode || c.filePermanodes || c.tag != "" || c.name != "" { return UsageError("--vivify excludes any other option") } } if c.name != "" && !c.makePermanode { return UsageError("Can't set name without using --permanode") } if c.tag != "" && !c.makePermanode && !c.filePermanodes { return UsageError("Can't set tag without using --permanode or --filenodes") } if c.histo != "" && !c.memstats { return UsageError("Can't use histo without memstats") } if c.memstats { sr := new(statsStatReceiver) up.altStatReceiver = sr defer func() { sr.DumpStats(c.histo) }() } if c.statcache || c.havecache { gen, err := up.StorageGeneration() if err != nil { log.Printf("WARNING: not using local caches; failed to retrieve server's storage generation: %v", err) } else { if c.statcache { cache := NewFlatStatCache(gen) up.statCache = cache } if c.havecache { cache := NewFlatHaveCache(gen) up.haveCache = cache up.Client.SetHaveCache(cache) } } } if c.makePermanode || c.filePermanodes { testSigBlobRef := up.Client.SignerPublicKeyBlobref() if testSigBlobRef == nil { return UsageError("A gpg key is needed to create permanodes; configure one or use vivify mode.") } } up.fileOpts = &fileOptions{ permanode: c.filePermanodes, tag: c.tag, vivify: c.vivify, exifTime: c.exifTime, } var ( permaNode *client.PutResult lastPut *client.PutResult err error ) if c.makePermanode { if len(args) != 1 { return fmt.Errorf("The --permanode flag can only be used with exactly one file or directory argument") } permaNode, err = up.UploadNewPermanode() if err != nil { return fmt.Errorf("Uploading permanode: %v", err) } } if c.diskUsage { if len(args) != 1 { return fmt.Errorf("The --du flag can only be used with exactly one directory argument") } dir := args[0] fi, err := up.stat(dir) if err != nil { return err } if !fi.IsDir() { return fmt.Errorf("%q is not a directory.", dir) } t := up.NewTreeUpload(dir) t.DiskUsageMode = true t.Start() pr, err := t.Wait() if err != nil { return err } handleResult("tree-upload", pr, err) return nil } for _, filename := range args { fi, err := os.Stat(filename) if err != nil { return err } if fi.IsDir() { if up.fileOpts.wantVivify() { vlog.Printf("Directories not supported in vivify mode; skipping %v\n", filename) continue } t := up.NewTreeUpload(filename) t.Start() lastPut, err = t.Wait() } else { lastPut, err = up.UploadFile(filename) } if handleResult("file", lastPut, err) != nil { return err } } if permaNode != nil { put, err := up.UploadAndSignMap(schema.NewSetAttributeClaim(permaNode.BlobRef, "camliContent", lastPut.BlobRef.String())) if handleResult("claim-permanode-content", put, err) != nil { return err } if c.name != "" { put, err := up.UploadAndSignMap(schema.NewSetAttributeClaim(permaNode.BlobRef, "name", c.name)) handleResult("claim-permanode-name", put, err) } if c.tag != "" { tags := strings.Split(c.tag, ",") m := schema.NewSetAttributeClaim(permaNode.BlobRef, "tag", tags[0]) for _, tag := range tags { m = schema.NewAddAttributeClaim(permaNode.BlobRef, "tag", tag) put, err := up.UploadAndSignMap(m) handleResult("claim-permanode-tag", put, err) } } handleResult("permanode", permaNode, nil) } return nil }