func TestHubFiring(t *testing.T) { hub := &SimpleBlobHub{} ch := make(chan *blobref.BlobRef) bch := make(chan *blobref.BlobRef) blob := blobref.Parse("sha1-0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") blobsame := blobref.Parse("sha1-0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") hub.NotifyBlobReceived(blob) // no-op hub.RegisterListener(ch) hub.RegisterBlobListener(blob, bch) hub.NotifyBlobReceived(blobsame) tmr1 := time.NewTimer(1e9) select { case <-tmr1.C: t.Fatal("timer expired on receiving from ch") case got := <-ch: if !blob.Equal(got) { t.Fatalf("got wrong blob") } } select { case <-tmr1.C: t.Fatal("timer expired on receiving from bch") case got := <-bch: if !blob.Equal(got) { t.Fatalf("got wrong blob") } } tmr1.Stop() }
// StaticSet returns the whole of the static set members of that directory func (dr *DirReader) StaticSet() ([]*blobref.BlobRef, error) { if dr.staticSet != nil { return dr.staticSet, nil } staticSetBlobref := blobref.Parse(dr.ss.Entries) if staticSetBlobref == nil { return nil, fmt.Errorf("schema/filereader: Invalid blobref\n") } rsc, _, err := dr.fetcher.Fetch(staticSetBlobref) if err != nil { return nil, fmt.Errorf("schema/filereader: fetching schema blob %s: %v", staticSetBlobref, err) } ss, err := ParseSuperset(rsc) if err != nil { return nil, fmt.Errorf("schema/filereader: decoding schema blob %s: %v", staticSetBlobref, err) } if ss.Type != "static-set" { return nil, fmt.Errorf("schema/filereader: expected \"static-set\" schema blob for %s, got %q", staticSetBlobref, ss.Type) } for _, s := range ss.Members { member := blobref.Parse(s) if member == nil { return nil, fmt.Errorf("schema/filereader: invalid (static-set member) blobref\n") } dr.staticSet = append(dr.staticSet, member) } return dr.staticSet, nil }
func (x *Index) PathsOfSignerTarget(signer, target *blobref.BlobRef) (paths []*search.Path, err error) { paths = []*search.Path{} keyId, err := x.keyId(signer) if err != nil { if err == ErrNotFound { err = nil } return } mostRecent := make(map[string]*search.Path) maxClaimDates := make(map[string]string) it := x.queryPrefix(keyPathBackward, keyId, target) defer closeIterator(it, &err) for it.Next() { keyPart := strings.Split(it.Key(), "|")[1:] valPart := strings.Split(it.Value(), "|") if len(keyPart) < 3 || len(valPart) < 4 { continue } claimRef := blobref.Parse(keyPart[2]) baseRef := blobref.Parse(valPart[1]) if claimRef == nil || baseRef == nil { continue } claimDate := valPart[0] active := valPart[2] suffix := urld(valPart[3]) key := baseRef.String() + "/" + suffix if claimDate > maxClaimDates[key] { maxClaimDates[key] = claimDate if active == "Y" { mostRecent[key] = &search.Path{ Claim: claimRef, ClaimDate: claimDate, Base: baseRef, Suffix: suffix, Target: target, } } else { delete(mostRecent, key) } } } for _, v := range mostRecent { paths = append(paths, v) } return paths, nil }
// fromHTTP panics with an httputil value on failure func (r *WithAttrRequest) fromHTTP(req *http.Request) { r.Signer = blobref.Parse(req.FormValue("signer")) r.Value = req.FormValue("value") fuzzy := req.FormValue("fuzzy") // exact match if empty fuzzyMatch := false if fuzzy != "" { lowered := strings.ToLower(fuzzy) if lowered == "true" || lowered == "t" { fuzzyMatch = true } } r.Attr = req.FormValue("attr") // all attributes if empty if r.Attr == "" { // and force fuzzy in that case. fuzzyMatch = true } r.Fuzzy = fuzzyMatch r.ThumbnailSize = thumbnailSize(req) max := req.FormValue("max") if max != "" { maxR, err := strconv.Atoi(max) if err != nil { panic(httputil.InvalidParameterError("max")) } r.N = maxR } r.N = r.n() }
// MustGetBlobRef returns a non-nil BlobRef from req, as given by param. // If it doesn't, it panics with a value understood by Recover or RecoverJSON. func MustGetBlobRef(req *http.Request, param string) *blobref.BlobRef { br := blobref.Parse(MustGet(req, param)) if br == nil { panic(InvalidParameterError(param)) } return br }
func newHandlerFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { indexPrefix := conf.RequiredString("index") // TODO: add optional help tips here? ownerBlobStr := conf.RequiredString("owner") devBlockStartupPrefix := conf.OptionalString("devBlockStartupOn", "") if err := conf.Validate(); err != nil { return nil, err } if devBlockStartupPrefix != "" { _, err := ld.GetHandler(devBlockStartupPrefix) if err != nil { return nil, fmt.Errorf("search handler references bogus devBlockStartupOn handler %s: %v", devBlockStartupPrefix, err) } } indexHandler, err := ld.GetHandler(indexPrefix) if err != nil { return nil, fmt.Errorf("search config references unknown handler %q", indexPrefix) } indexer, ok := indexHandler.(Index) if !ok { return nil, fmt.Errorf("search config references invalid indexer %q (actually a %T)", indexPrefix, indexHandler) } ownerBlobRef := blobref.Parse(ownerBlobStr) if ownerBlobRef == nil { return nil, fmt.Errorf("search 'owner' has malformed blobref %q; expecting e.g. sha1-xxxxxxxxxxxx", ownerBlobStr) } return &Handler{ index: indexer, owner: ownerBlobRef, }, nil }
// Given a described blob, optionally follows a camliContent and // returns the file's schema blobref and its fileinfo (if found). func (pr *publishRequest) fileSchemaRefFromBlob(des *search.DescribedBlob) (fileref *blobref.BlobRef, fileinfo *search.FileInfo, ok bool) { if des == nil { http.NotFound(pr.rw, pr.req) return } if des.Permanode != nil { // TODO: get "forceMime" attr out of the permanode? or // fileName content-disposition? if cref := des.Permanode.Attr.Get("camliContent"); cref != "" { cbr := blobref.Parse(cref) if cbr == nil { http.Error(pr.rw, "bogus camliContent", 500) return } des = des.PeerBlob(cbr) if des == nil { http.Error(pr.rw, "camliContent not a peer in describe", 500) return } } } if des.CamliType == "file" { return des.BlobRef, des.File, true } http.Error(pr.rw, "failed to find fileSchemaRefFromBlob", 404) return }
func (x *Index) GetOwnerClaims(permaNode, owner *blobref.BlobRef) (cl search.ClaimList, err error) { keyId, err := x.keyId(owner) if err == ErrNotFound { err = nil return } if err != nil { return nil, err } prefix := pipes("claim", permaNode, keyId, "") it := x.queryPrefixString(prefix) defer closeIterator(it, &err) for it.Next() { keyPart := strings.Split(it.Key(), "|") valPart := strings.Split(it.Value(), "|") if len(keyPart) < 5 || len(valPart) < 3 { continue } claimRef := blobref.Parse(keyPart[4]) if claimRef == nil { continue } date, _ := time.Parse(time.RFC3339, keyPart[3]) cl = append(cl, &search.Claim{ BlobRef: claimRef, Signer: owner, Permanode: permaNode, Date: date, Type: urld(valPart[0]), Attr: urld(valPart[1]), Value: urld(valPart[2]), }) } return }
func (ui *UIHandler) serveDownload(rw http.ResponseWriter, req *http.Request) { if ui.root.Storage == nil { http.Error(rw, "No BlobRoot configured", 500) return } suffix := req.Header.Get("X-PrefixHandler-PathSuffix") m := downloadPattern.FindStringSubmatch(suffix) if m == nil { httputil.ErrorRouting(rw, req) return } fbr := blobref.Parse(m[1]) if fbr == nil { http.Error(rw, "Invalid blobref", 400) return } dh := &DownloadHandler{ Fetcher: ui.root.Storage, Cache: ui.Cache, } dh.ServeHTTP(rw, req, fbr) }
// isDeleted returns whether br (a blobref or a claim) should be considered deleted. func (x *Index) isDeleted(br *blobref.BlobRef) bool { var err error it := x.queryPrefix(keyDeleted, br) defer closeIterator(it, &err) for it.Next() { // parts are ["deleted", br.String(), blobref-of-delete-claim]. // see keyDeleted in keys.go parts := strings.SplitN(it.Key(), "|", 3) if len(parts) != 3 { continue } delClaimRef := blobref.Parse(parts[2]) if delClaimRef == nil { panic(fmt.Errorf("invalid deleted claim for %v", parts[1])) } // The recursive call on the blobref of the delete claim // checks that the claim itself was not deleted, in which case // br is not considered deleted anymore. // TODO(mpl): Each delete and undo delete adds a level of // recursion so this could recurse far. is there a way to // go faster in a worst case scenario? return !x.isDeleted(delClaimRef) } return false }
func (vr *VerifyRequest) ParsePayloadMap() bool { vr.PayloadMap = make(map[string]interface{}) pm := vr.PayloadMap if err := json.Unmarshal(vr.bpj, &pm); err != nil { return vr.fail("parse error; payload JSON is invalid") } if _, hasVersion := pm["camliVersion"]; !hasVersion { return vr.fail("missing 'camliVersion' in the JSON payload") } signer, hasSigner := pm["camliSigner"] if !hasSigner { return vr.fail("missing 'camliSigner' in the JSON payload") } if _, ok := signer.(string); !ok { return vr.fail("invalid 'camliSigner' in the JSON payload") } vr.CamliSigner = blobref.Parse(signer.(string)) if vr.CamliSigner == nil { return vr.fail("malformed 'camliSigner' blobref in the JSON payload") } return true }
func (c *attrCmd) RunCommand(args []string) error { if len(args) != 3 { return errors.New("Attr takes 3 args: <permanode> <attr> <value>") } permanode, attr, value := args[0], args[1], args[2] var err error pn := blobref.Parse(permanode) if pn == nil { return fmt.Errorf("Error parsing blobref %q", permanode) } bb := schema.NewSetAttributeClaim(pn, attr, value) if c.add { if c.del { return errors.New("Add and del options are exclusive") } bb = schema.NewAddAttributeClaim(pn, attr, value) } else { // TODO: del, which can make <value> be optional if c.del { return errors.New("del not yet implemented") } } put, err := getUploader().UploadAndSignBlob(bb) handleResult(bb.Type(), put, err) return nil }
// TODO(rh): tame copy/paste code from cammount func main() { client.AddFlags() flag.Parse() cacheDir, err := ioutil.TempDir("", "camlicache") if err != nil { log.Fatalf("Error creating temp cache directory: %v", err) } defer os.RemoveAll(cacheDir) diskcache, err := localdisk.New(cacheDir) if err != nil { log.Fatalf("Error setting up local disk cache: %v", err) } if flag.NArg() != 1 { log.Fatal("usage: camwebdav <blobref>") } br := blobref.Parse(flag.Arg(0)) if br == nil { log.Fatalf("%s was not a valid blobref.", flag.Arg(0)) } client := client.NewOrFail() fetcher := cacher.NewCachingFetcher(diskcache, client) f = fs.NewCamliFileSystem(fetcher, br) http.HandleFunc("/", webdav) err = http.ListenAndServe(*davaddr, nil) if err != nil { log.Fatalf("Error starting WebDAV server: %v", err) } }
func (ui *UIHandler) serveFileTree(rw http.ResponseWriter, req *http.Request) { if ui.root.Storage == nil { http.Error(rw, "No BlobRoot configured", 500) return } suffix := req.Header.Get("X-PrefixHandler-PathSuffix") m := treePattern.FindStringSubmatch(suffix) if m == nil { httputil.ErrorRouting(rw, req) return } blobref := blobref.Parse(m[1]) if blobref == nil { http.Error(rw, "Invalid blobref", 400) return } fth := &FileTreeHandler{ Fetcher: ui.root.Storage, file: blobref, } fth.ServeHTTP(rw, req) }
func main() { // Scans the arg list and sets up flags debug := flag.Bool("debug", false, "print debugging messages.") client.AddFlags() flag.Parse() errorf := func(msg string, args ...interface{}) { fmt.Fprintf(os.Stderr, msg, args...) os.Exit(2) } if n := flag.NArg(); n < 1 || n > 2 { errorf("usage: cammount <mountpoint> [<root-blobref>]\n") } mountPoint := flag.Arg(0) client := client.NewOrFail() // automatic from flags cacheDir, err := ioutil.TempDir("", "camlicache") if err != nil { errorf("Error creating temp cache directory: %v\n", err) } defer os.RemoveAll(cacheDir) diskcache, err := localdisk.New(cacheDir) if err != nil { errorf("Error setting up local disk cache: %v", err) } fetcher := cacher.NewCachingFetcher(diskcache, client) var camfs *fs.CamliFileSystem if flag.NArg() == 2 { root := blobref.Parse(flag.Arg(1)) if root == nil { errorf("Error parsing root blobref: %q\n", root) } var err error camfs, err = fs.NewRootedCamliFileSystem(fetcher, root) if err != nil { errorf("Error creating root with %v: %v", root, err) } } else { camfs = fs.NewCamliFileSystem(fetcher) log.Printf("starting with fs %#v", camfs) } if *debug { // TODO: set fs's logger } conn, err := fuse.Mount(mountPoint) if err != nil { log.Fatalf("Mount: %v", err) } err = conn.Serve(camfs) if err != nil { log.Fatalf("Serve: %v", err) } log.Printf("fuse process ending.") }
func (sh *Handler) serveFiles(rw http.ResponseWriter, req *http.Request) { ret := jsonMap() defer httputil.ReturnJSON(rw, ret) br := blobref.Parse(req.FormValue("wholedigest")) if br == nil { ret["error"] = "Missing or invalid 'wholedigest' param" ret["errorType"] = "input" return } files, err := sh.index.ExistingFileSchemas(br) if err != nil { ret["error"] = err.Error() ret["errorType"] = "server" return } strList := []string{} for _, br := range files { strList = append(strList, br.String()) } ret["files"] = strList return }
func (sto *appengineStorage) EnumerateBlobs(dest chan<- blobref.SizedBlobRef, after string, limit int, wait time.Duration) error { defer close(dest) ctx := sto.ctx if ctx == nil { loan := ctxPool.Get() defer loan.Return() ctx = loan } prefix := sto.namespace + "|" keyBegin := datastore.NewKey(ctx, memKind, prefix+after, 0, nil) keyEnd := datastore.NewKey(ctx, memKind, sto.namespace+"~", 0, nil) q := datastore.NewQuery(memKind).Limit(int(limit)).Filter("__key__>", keyBegin).Filter("__key__<", keyEnd) it := q.Run(ctx) var row memEnt for { key, err := it.Next(&row) if err == datastore.Done { break } if err != nil { return err } dest <- blobref.SizedBlobRef{blobref.Parse(key.StringID()[len(prefix):]), row.Size} } return nil }
func (n *root) Lookup(name string, intr fuse.Intr) (fuse.Node, fuse.Error) { switch name { case ".quitquitquit": log.Fatalf("Shutting down due to root .quitquitquit lookup.") case "WELCOME.txt": return staticFileNode("Welcome to CamlistoreFS.\n\nFor now you can only cd into a sha1-xxxx directory, if you know the blobref of a directory or a file.\n"), nil case "recent": return n.getRecentDir(), nil case "tag", "date": return notImplementDirNode{}, nil case "roots": return n.getRootsDir(), nil case "sha1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx": return notImplementDirNode{}, nil case ".camli_fs_stats": return statsDir{}, nil case "mach_kernel", ".hidden", "._.": // Just quiet some log noise on OS X. return nil, fuse.ENOENT } br := blobref.Parse(name) log.Printf("Root lookup of %q = %v", name, br) if br != nil { return &node{fs: n.fs, blobref: br}, nil } return nil, fuse.ENOENT }
func (s *storage) EnumerateBlobs(dest chan<- blobref.SizedBlobRef, after string, limit int, wait time.Duration) error { if wait != 0 { panic("TODO: support wait in EnumerateBlobs") } defer close(dest) iter := s.index.Find(after) n := 0 for iter.Next() { if iter.Key() == after { continue } br := blobref.Parse(iter.Key()) if br == nil { panic("Bogus encrypt index key: " + iter.Key()) } plainSize, ok := parseMetaValuePlainSize(iter.Value()) if !ok { panic("Bogus encrypt index value: " + iter.Value()) } dest <- blobref.SizedBlobRef{br, plainSize} n++ if limit != 0 && n >= limit { break } } return iter.Close() }
func (sto *appengineStorage) EnumerateBlobs(dest chan<- blobref.SizedBlobRef, after string, limit uint, waitSeconds int) os.Error { defer close(dest) if sto.ctx == nil { return errNoContext } prefix := sto.namespace + "|" keyBegin := datastore.NewKey(sto.ctx, memKind, prefix+after, 0, nil) keyEnd := datastore.NewKey(sto.ctx, memKind, sto.namespace+"~", 0, nil) q := datastore.NewQuery(memKind).Limit(int(limit)).Filter("__key__>", keyBegin).Filter("__key__<", keyEnd) it := q.Run(sto.ctx) var row memEnt for { key, err := it.Next(&row) if err == datastore.Done { break } if err != nil { return err } size, err := row.size() if err != nil { return err } dest <- blobref.SizedBlobRef{blobref.Parse(key.StringID()[len(prefix):]), size} } return nil }
func NewFromShareRoot(shareBlobURL string) (c *Client, target *blobref.BlobRef, err error) { var root string if m := shareURLRx.FindStringSubmatch(shareBlobURL); m == nil { return nil, nil, fmt.Errorf("Unkown URL base; doesn't contain /camli/") } else { c = New(m[1]) c.discoOnce.Do(func() { /* nothing */ }) c.prefixOnce.Do(func() { /* nothing */ }) c.prefixv = m[1] c.authMode = auth.None{} c.via = make(map[string]string) root = m[2] } res, err := http.Get(shareBlobURL) if err != nil { return nil, nil, fmt.Errorf("Error fetching %s: %v", shareBlobURL, err) } defer res.Body.Close() blob, err := schema.BlobFromReader(blobref.Parse(root), res.Body) if err != nil { return nil, nil, fmt.Errorf("Error parsing JSON from %s: %v", shareBlobURL, err) } if blob.ShareAuthType() != "haveref" { return nil, nil, fmt.Errorf("Unknown share authType of %q", blob.ShareAuthType()) } target = blob.ShareTarget() if target == nil { return nil, nil, fmt.Errorf("No target.") } c.via[target.String()] = root return c, target, nil }
func main() { client.AddFlags() flag.Parse() if len(*flagVia) > 0 { vs := strings.Split(*flagVia, ",") viaRefs = make([]*blobref.BlobRef, len(vs)) for i, sbr := range vs { viaRefs[i] = blobref.Parse(sbr) if viaRefs[i] == nil { log.Fatalf("Invalid -via blobref: %q", sbr) } if *flagVerbose { log.Printf("via: %s", sbr) } } } cl := client.NewOrFail() for n := 0; n < flag.NArg(); n++ { arg := flag.Arg(n) br := blobref.Parse(arg) if br == nil { log.Fatalf("Failed to parse argument %q as a blobref.", arg) } if *flagCheck { // TODO: do HEAD requests checking if the blobs exists. log.Fatal("not implemented") return } if *flagOutput == "-" { rc, err := fetch(cl, br) if err != nil { log.Fatal(err) } defer rc.Close() if _, err := io.Copy(os.Stdout, rc); err != nil { log.Fatalf("Failed reading %q: %v", br, err) } return } if err := smartFetch(cl, *flagOutput, br); err != nil { log.Fatal(err) } } }
func (h *shareHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { blobRef := blobref.Parse(httputil.PathSuffix(req)) if blobRef == nil { http.Error(rw, "Malformed share URL.", 400) return } handleGetViaSharing(rw, req, blobRef, h.fetcher) }
func (n *recentDir) ReadDir(intr fuse.Intr) ([]fuse.Dirent, fuse.Error) { log.Printf("fs.recent: ReadDir / searching") n.mu.Lock() defer n.mu.Unlock() n.ents = make(map[string]*search.DescribedBlob) n.modTime = make(map[string]time.Time) req := &search.RecentRequest{N: 100} res, err := n.fs.client.GetRecentPermanodes(req) if err != nil { log.Printf("fs.recent: GetRecentPermanodes error in ReadDir: %v", err) return nil, fuse.EIO } var ents []fuse.Dirent for _, ri := range res.Recent { modTime := ri.ModTime.Time() meta := res.Meta.Get(ri.BlobRef) if meta == nil || meta.Permanode == nil { continue } cc := blobref.Parse(meta.Permanode.Attr.Get("camliContent")) if cc == nil { continue } ccMeta := res.Meta.Get(cc) if ccMeta == nil { continue } var name string switch { case ccMeta.File != nil: name = ccMeta.File.FileName if mt := ccMeta.File.Time; !mt.IsZero() { modTime = mt.Time() } case ccMeta.Dir != nil: name = ccMeta.Dir.FileName default: continue } if name == "" || n.ents[name] != nil { name = ccMeta.BlobRef.String() + path.Ext(name) if n.ents[name] != nil { continue } } n.ents[name] = ccMeta n.modTime[name] = modTime log.Printf("fs.recent: name %q = %v (at %v -> %v)", name, ccMeta.BlobRef, ri.ModTime.Time(), modTime) ents = append(ents, fuse.Dirent{ Name: name, }) } log.Printf("fs.recent returning %d entries", len(ents)) return ents, nil }
func (dh *DownloadHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, file *blobref.BlobRef) { if req.Method != "GET" && req.Method != "HEAD" { http.Error(rw, "Invalid download method", 400) return } if req.Header.Get("If-Modified-Since") != "" { // Immutable, so any copy's a good copy. rw.WriteHeader(http.StatusNotModified) return } fr, err := schema.NewFileReader(dh.storageSeekFetcher(), file) if err != nil { http.Error(rw, "Can't serve file: "+err.Error(), 500) return } defer fr.Close() schema := fr.FileSchema() h := rw.Header() h.Set("Content-Length", fmt.Sprintf("%d", schema.SumPartsSize())) h.Set("Expires", time.Now().Add(oneYear).Format(http.TimeFormat)) mimeType := magic.MIMETypeFromReaderAt(fr) if dh.ForceMime != "" { mimeType = dh.ForceMime } if mimeType == "" { mimeType = "application/octet-stream" } h.Set("Content-Type", mimeType) if mimeType == "application/octet-stream" { // Chrome seems to silently do nothing on // application/octet-stream unless this is set. // Maybe it's confused by lack of URL it recognizes // along with lack of mime type? rw.Header().Set("Content-Disposition", "attachment; filename=file-"+file.String()+".dat") } if req.Method == "HEAD" && req.FormValue("verifycontents") != "" { vbr := blobref.Parse(req.FormValue("verifycontents")) if vbr == nil { return } hash := vbr.Hash() if hash == nil { return } io.Copy(hash, fr) // ignore errors, caught later if vbr.HashMatches(hash) { rw.Header().Set("X-Camli-Contents", vbr.String()) } return } http.ServeContent(rw, req, "", time.Now(), fr) }
func (b *DescribedBlob) ContentRef() (br *blobref.BlobRef, ok bool) { if b != nil && b.Permanode != nil { if cref := b.Permanode.Attr.Get("camliContent"); cref != "" { br = blobref.Parse(cref) return br, br != nil } } return }
func handleRemove(conn http.ResponseWriter, req *http.Request, storage blobserver.Storage) { if w, ok := storage.(blobserver.ContextWrapper); ok { storage = w.WrapContext(req) } if req.Method != "POST" { log.Fatalf("Invalid method; handlers misconfigured") } configer, ok := storage.(blobserver.Configer) if !ok { conn.WriteHeader(http.StatusForbidden) fmt.Fprintf(conn, "Remove handler's blobserver.Storage isn't a blobserver.Configer; can't remove") return } if !configer.Config().IsQueue { conn.WriteHeader(http.StatusForbidden) fmt.Fprintf(conn, "Can only remove blobs from a queue.\n") return } n := 0 toRemove := make([]*blobref.BlobRef, 0) toRemoveStr := make([]string, 0) for { n++ if n > maxRemovesPerRequest { httputil.BadRequestError(conn, fmt.Sprintf("Too many removes in this request; max is %d", maxRemovesPerRequest)) return } key := fmt.Sprintf("blob%v", n) value := req.FormValue(key) if value == "" { break } ref := blobref.Parse(value) if ref == nil { httputil.BadRequestError(conn, "Bogus blobref for key "+key) return } toRemove = append(toRemove, ref) toRemoveStr = append(toRemoveStr, ref.String()) } err := storage.RemoveBlobs(toRemove) if err != nil { conn.WriteHeader(http.StatusInternalServerError) log.Printf("Server error during remove: %v", err) fmt.Fprintf(conn, "Server error") return } reply := make(map[string]interface{}, 0) reply["removed"] = toRemoveStr httputil.ReturnJSON(conn, reply) }
// parent returns the base path and the blobRef of pr.subject's parent. // It returns an error if pr.subject or pr.subjectBasePath were not set // properly (with findSubject), or if the parent was not found. func (pr *publishRequest) parent() (parentPath string, parentBlobRef *blobref.BlobRef, err error) { if pr.subject == nil { return "", nil, errors.New("subject not set") } if pr.subjectBasePath == "" { return "", nil, errors.New("subjectBasePath not set") } hops := publishedPath(pr.subjectBasePath).splitHops() if len(hops) == 0 { return "", nil, errors.New("No subresource digest in subjectBasePath") } subjectDigest := hops[len(hops)-1] if subjectDigest != pr.subject.DigestPrefix(digestLen) { return "", nil, errors.New("subject digest not in subjectBasePath") } parentPath = strings.TrimSuffix(pr.subjectBasePath, "/"+digestPrefix+subjectDigest) if len(hops) == 1 { // the parent is the suffix, not one of the subresource hops for br, _ := range pr.inSubjectChain { if br != pr.subject.String() { parentBlobRef = blobref.Parse(br) break } } } else { // nested collection(s) parentDigest := hops[len(hops)-2] for br, _ := range pr.inSubjectChain { bref := blobref.Parse(br) if bref == nil { return "", nil, fmt.Errorf("Could not parse %q as blobRef", br) } if bref.DigestPrefix(10) == parentDigest { parentBlobRef = bref break } } } if parentBlobRef == nil { return "", nil, fmt.Errorf("No parent found for %v", pr.subjectBasePath) } return parentPath, parentBlobRef, nil }
func (x *Index) PathsLookup(signer, base *blobref.BlobRef, suffix string) (paths []*search.Path, err error) { paths = []*search.Path{} keyId, err := x.keyId(signer) if err != nil { if err == ErrNotFound { err = nil } return } it := x.queryPrefix(keyPathForward, keyId, base, suffix) defer closeIterator(it, &err) for it.Next() { keyPart := strings.Split(it.Key(), "|")[1:] valPart := strings.Split(it.Value(), "|") if len(keyPart) < 5 || len(valPart) < 2 { continue } claimRef := blobref.Parse(keyPart[4]) baseRef := blobref.Parse(keyPart[1]) if claimRef == nil || baseRef == nil { continue } claimDate := unreverseTimeString(keyPart[3]) suffix := urld(keyPart[2]) target := blobref.Parse(valPart[1]) // TODO(bradfitz): investigate what's up with deleted // forward path claims here. Needs docs with the // interface too, and tests. active := valPart[0] _ = active path := &search.Path{ Claim: claimRef, ClaimDate: claimDate, Base: baseRef, Suffix: suffix, Target: target, } paths = append(paths, path) } return }
func TestHubRegistration(t *testing.T) { hub := &SimpleBlobHub{} ch := make(chan *blobref.BlobRef) ch2 := make(chan *blobref.BlobRef) b1 := blobref.Parse("sha1-0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") b2 := blobref.Parse("sha1-62cdb7020ff920e5aa642c3d4066950dd1f01f4d") Expect(t, hub.listeners == nil, "hub.listeners is nil before RegisterListener") hub.RegisterListener(ch) ExpectInt(t, 1, len(hub.listeners), "len(hub.listeners) after RegisterListener") hub.RegisterListener(ch2) ExpectInt(t, 2, len(hub.listeners), "len(hub.listeners) after ch2 RegisterListener") hub.UnregisterListener(ch) ExpectInt(t, 1, len(hub.listeners), "len(hub.listeners) after UnregisterListener") hub.UnregisterListener(ch2) ExpectInt(t, 0, len(hub.listeners), "len(hub.listeners) after UnregisterListener") Expect(t, hub.blobListeners == nil, "hub.blobListeners is nil before RegisterBlobListener") hub.RegisterBlobListener(b1, ch) Expect(t, hub.blobListeners != nil, "hub.blobListeners is not nil before RegisterBlobListener") Expect(t, hub.blobListeners[b1.String()] != nil, "b1 in hub.blobListeners map") ExpectInt(t, 1, len(hub.blobListeners[b1.String()]), "hub.blobListeners[b1] size") ExpectInt(t, 1, len(hub.blobListeners), "hub.blobListeners size") hub.RegisterBlobListener(b2, ch) ExpectInt(t, 1, len(hub.blobListeners[b2.String()]), "hub.blobListeners[b1] size") ExpectInt(t, 2, len(hub.blobListeners), "hub.blobListeners size") hub.UnregisterBlobListener(b2, ch) Expect(t, hub.blobListeners[b2.String()] == nil, "b2 not in hub.blobListeners") ExpectInt(t, 1, len(hub.blobListeners), "hub.blobListeners size") hub.UnregisterBlobListener(b1, ch) Expect(t, hub.blobListeners[b1.String()] == nil, "b1 not in hub.blobListeners") ExpectInt(t, 0, len(hub.blobListeners), "hub.blobListeners size") }