func kvDeleted(k string) (c camtypes.Claim, ok bool) { // TODO(bradfitz): garbage keyPart := strings.Split(k, "|") if len(keyPart) != 4 { return } if keyPart[0] != "deleted" { return } target, ok := blob.Parse(keyPart[1]) if !ok { return } claimRef, ok := blob.Parse(keyPart[3]) if !ok { return } date, err := time.Parse(time.RFC3339, unreverseTimeString(keyPart[2])) if err != nil { return } return camtypes.Claim{ BlobRef: claimRef, Target: target, Date: date, Type: string(schema.DeleteClaim), }, true }
func (sto *s3Storage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) (err error) { defer close(dest) if faultEnumerate.FailErr(&err) { return } startAt := after if _, ok := blob.Parse(after); ok { startAt = nextStr(after) } objs, err := sto.s3Client.ListBucket(sto.bucket, startAt, limit) if err != nil { log.Printf("s3 ListBucket: %v", err) return err } for _, obj := range objs { if obj.Key == after { continue } br, ok := blob.Parse(obj.Key) if !ok { continue } select { case dest <- blob.SizedRef{Ref: br, Size: uint32(obj.Size)}: case <-ctx.Done(): return context.ErrCanceled } } return nil }
func (sto *s3Storage) EnumerateBlobs(ctx context.Context, dest chan<- blob.SizedRef, after string, limit int) (err error) { defer close(dest) if faultEnumerate.FailErr(&err) { return } startAt := after if _, ok := blob.Parse(after); ok { startAt = nextStr(after) } objs, err := sto.s3Client.ListBucket(sto.bucket, sto.dirPrefix+startAt, limit) if err != nil { log.Printf("s3 ListBucket: %v", err) return err } for _, obj := range objs { dir, file := path.Split(obj.Key) if dir != sto.dirPrefix { continue } if file == after { continue } br, ok := blob.Parse(file) if !ok { // TODO(mpl): I've noticed that on GCS we error out for this case. Do the same here ? continue } select { case dest <- blob.SizedRef{Ref: br, Size: uint32(obj.Size)}: case <-ctx.Done(): return ctx.Err() } } return nil }
func kvEdgeBackward(k, v string) (edge *camtypes.Edge, ok bool) { // TODO(bradfitz): garbage keyPart := strings.Split(k, "|") valPart := strings.Split(v, "|") if len(keyPart) != 4 || len(valPart) != 2 { // TODO(mpl): use glog log.Printf("bogus keyEdgeBackward index entry: %q = %q", k, v) return } if keyPart[0] != "edgeback" { return } parentRef, ok := blob.Parse(keyPart[2]) if !ok { log.Printf("bogus parent in keyEdgeBackward index entry: %q", keyPart[2]) return } blobRef, ok := blob.Parse(keyPart[3]) if !ok { log.Printf("bogus blobref in keyEdgeBackward index entry: %q", keyPart[3]) return } return &camtypes.Edge{ From: parentRef, FromType: valPart[0], FromTitle: valPart[1], BlobRef: blobRef, }, true }
func kvSignerAttrValue(k, v string) (c camtypes.Claim, ok bool) { // TODO(bradfitz): garbage keyPart := strings.Split(k, "|") valPart := strings.Split(v, "|") if len(keyPart) != 6 || len(valPart) != 1 { // TODO(mpl): use glog log.Printf("bogus keySignerAttrValue index entry: %q = %q", k, v) return } if keyPart[0] != "signerattrvalue" { return } date, err := time.Parse(time.RFC3339, unreverseTimeString(keyPart[4])) if err != nil { log.Printf("bogus time in keySignerAttrValue index entry: %q", keyPart[4]) return } claimRef, ok := blob.Parse(keyPart[5]) if !ok { log.Printf("bogus claim in keySignerAttrValue index entry: %q", keyPart[5]) return } permaNode, ok := blob.Parse(valPart[0]) if !ok { log.Printf("bogus permanode in keySignerAttrValue index entry: %q", valPart[0]) return } return camtypes.Claim{ BlobRef: claimRef, Permanode: permaNode, Date: date, Attr: urld(keyPart[2]), Value: urld(keyPart[3]), }, true }
func (x *Index) PathsOfSignerTarget(signer, target blob.Ref) (paths []*search.Path, err error) { paths = []*search.Path{} keyId, err := x.keyId(signer) if err != nil { if err == ErrNotFound { err = nil } return } mostRecent := make(map[string]*search.Path) maxClaimDates := make(map[string]string) it := x.queryPrefix(keyPathBackward, keyId, target) defer closeIterator(it, &err) for it.Next() { keyPart := strings.Split(it.Key(), "|")[1:] valPart := strings.Split(it.Value(), "|") if len(keyPart) < 3 || len(valPart) < 4 { continue } claimRef, ok := blob.Parse(keyPart[2]) if !ok { continue } baseRef, ok := blob.Parse(valPart[1]) if !ok { continue } claimDate := valPart[0] active := valPart[2] suffix := urld(valPart[3]) key := baseRef.String() + "/" + suffix if claimDate > maxClaimDates[key] { maxClaimDates[key] = claimDate if active == "Y" { mostRecent[key] = &search.Path{ Claim: claimRef, ClaimDate: claimDate, Base: baseRef, Suffix: suffix, Target: target, } } else { delete(mostRecent, key) } } } for _, v := range mostRecent { paths = append(paths, v) } return paths, nil }
func (x *Index) PathsLookup(signer, base blob.Ref, suffix string) (paths []*search.Path, err error) { paths = []*search.Path{} keyId, err := x.keyId(signer) if err != nil { if err == ErrNotFound { err = nil } return } it := x.queryPrefix(keyPathForward, keyId, base, suffix) defer closeIterator(it, &err) for it.Next() { keyPart := strings.Split(it.Key(), "|")[1:] valPart := strings.Split(it.Value(), "|") if len(keyPart) < 5 || len(valPart) < 2 { continue } claimRef, ok := blob.Parse(keyPart[4]) if !ok { continue } baseRef, ok := blob.Parse(keyPart[1]) if !ok { continue } claimDate := unreverseTimeString(keyPart[3]) suffix := urld(keyPart[2]) target, ok := blob.Parse(valPart[1]) if !ok { continue } // TODO(bradfitz): investigate what's up with deleted // forward path claims here. Needs docs with the // interface too, and tests. active := valPart[0] _ = active path := &search.Path{ Claim: claimRef, ClaimDate: claimDate, Base: baseRef, Suffix: suffix, Target: target, } paths = append(paths, path) } return }
func (sh *Handler) serveFiles(rw http.ResponseWriter, req *http.Request) { ret := jsonMap() defer httputil.ReturnJSON(rw, ret) br, ok := blob.Parse(req.FormValue("wholedigest")) if !ok { ret["error"] = "Missing or invalid 'wholedigest' param" ret["errorType"] = "input" return } files, err := sh.index.ExistingFileSchemas(br) if err != nil { ret["error"] = err.Error() ret["errorType"] = "server" return } strList := []string{} for _, br := range files { strList = append(strList, br.String()) } ret["files"] = strList return }
func (c *shareCmd) RunCommand(args []string) error { unsigned := schema.NewShareRef(schema.ShareHaveRef, c.transitive) if c.search != "" { if len(args) != 0 { return cmdmain.UsageError("when using the -search flag, share takes zero arguments") } var q search.SearchQuery if err := json.Unmarshal([]byte(c.search), &q); err != nil { return cmdmain.UsageError(fmt.Sprintf("invalid search: %s", err)) } unsigned.SetShareSearch(&q) } else { if len(args) != 1 { return cmdmain.UsageError("share takes at most one argument") } target, ok := blob.Parse(args[0]) if !ok { return cmdmain.UsageError("invalid blobref") } unsigned.SetShareTarget(target) } if c.duration != 0 { unsigned.SetShareExpiration(time.Now().Add(c.duration)) } pr, err := getUploader().UploadAndSignBlob(unsigned) handleResult("share", pr, err) return nil }
// MustGetBlobRef returns a non-nil BlobRef from req, as given by param. // If it doesn't, it panics with a value understood by Recover or RecoverJSON. func MustGetBlobRef(req *http.Request, param string) blob.Ref { br, ok := blob.Parse(MustGet(req, param)) if !ok { panic(InvalidParameterError(param)) } return br }
func (s *storage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) iter := s.index.Find(after, "") n := 0 for iter.Next() { if iter.Key() == after { continue } br, ok := blob.Parse(iter.Key()) if !ok { panic("Bogus encrypt index key: " + iter.Key()) } plainSize, ok := parseMetaValuePlainSize(iter.Value()) if !ok { panic("Bogus encrypt index value: " + iter.Value()) } select { case dest <- blob.SizedRef{br, plainSize}: case <-ctx.Done(): return context.ErrCanceled } n++ if limit != 0 && n >= limit { break } } return iter.Close() }
// Given a described blob, optionally follows a camliContent and // returns the file's schema blobref and its fileinfo (if found). func (pr *publishRequest) fileSchemaRefFromBlob(des *search.DescribedBlob) (fileref blob.Ref, fileinfo *search.FileInfo, ok bool) { if des == nil { http.NotFound(pr.rw, pr.req) return } if des.Permanode != nil { // TODO: get "forceMime" attr out of the permanode? or // fileName content-disposition? if cref := des.Permanode.Attr.Get("camliContent"); cref != "" { cbr, ok2 := blob.Parse(cref) if !ok2 { http.Error(pr.rw, "bogus camliContent", 500) return } des = des.PeerBlob(cbr) if des == nil { http.Error(pr.rw, "camliContent not a peer in describe", 500) return } } } if des.CamliType == "file" { return des.BlobRef, des.File, true } http.Error(pr.rw, "failed to find fileSchemaRefFromBlob", 404) return }
func (h *shareHandler) serveHTTP(rw http.ResponseWriter, req *http.Request) error { var err error pathSuffix := httputil.PathSuffix(req) if len(pathSuffix) == 0 { // This happens during testing because we don't go through PrefixHandler pathSuffix = strings.TrimLeft(req.URL.Path, "/") } pathParts := strings.SplitN(pathSuffix, "/", 2) blobRef, ok := blob.Parse(pathParts[0]) if !ok { err = &shareError{code: invalidURL, response: badRequest, message: fmt.Sprintf("Malformed share pathSuffix: %s", pathSuffix)} } else { err = handleGetViaSharing(rw, req, blobRef, h.fetcher) } if se, ok := err.(*shareError); ok { switch se.response { case badRequest: httputil.BadRequestError(rw, err.Error()) case unauthorizedRequest: log.Print(err) auth.SendUnauthorized(rw, req) } } return err }
func newHandlerFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, error) { indexPrefix := conf.RequiredString("index") // TODO: add optional help tips here? ownerBlobStr := conf.RequiredString("owner") devBlockStartupPrefix := conf.OptionalString("devBlockStartupOn", "") if err := conf.Validate(); err != nil { return nil, err } if devBlockStartupPrefix != "" { _, err := ld.GetHandler(devBlockStartupPrefix) if err != nil { return nil, fmt.Errorf("search handler references bogus devBlockStartupOn handler %s: %v", devBlockStartupPrefix, err) } } indexHandler, err := ld.GetHandler(indexPrefix) if err != nil { return nil, fmt.Errorf("search config references unknown handler %q", indexPrefix) } indexer, ok := indexHandler.(Index) if !ok { return nil, fmt.Errorf("search config references invalid indexer %q (actually a %T)", indexPrefix, indexHandler) } ownerBlobRef, ok := blob.Parse(ownerBlobStr) if !ok { return nil, fmt.Errorf("search 'owner' has malformed blobref %q; expecting e.g. sha1-xxxxxxxxxxxx", ownerBlobStr) } return &Handler{ index: indexer, owner: ownerBlobRef, }, nil }
func (sh *Handler) serveFiles(rw http.ResponseWriter, req *http.Request) { var ret camtypes.FileSearchResponse defer httputil.ReturnJSON(rw, &ret) br, ok := blob.Parse(req.FormValue("wholedigest")) if !ok { ret.Error = "Missing or invalid 'wholedigest' param" ret.ErrorType = "input" return } files, err := sh.index.ExistingFileSchemas(br) if err != nil { ret.Error = err.Error() ret.ErrorType = "server" return } // the ui code expects an object if files == nil { files = []blob.Ref{} } ret.Files = files return }
func (c *desCmd) RunCommand(args []string) error { if len(args) == 0 { return cmdmain.UsageError("requires blobref") } var blobs []blob.Ref for _, arg := range args { br, ok := blob.Parse(arg) if !ok { return cmdmain.UsageError(fmt.Sprintf("invalid blobref %q", arg)) } blobs = append(blobs, br) } var at time.Time // TODO: implement. from "2 days ago" "-2d", "-2h", "2013-02-05", etc cl := newClient(c.server) res, err := cl.Describe(&search.DescribeRequest{ BlobRefs: blobs, Depth: c.depth, At: types.Time3339(at), }) if err != nil { return err } resj, err := json.MarshalIndent(res, "", " ") if err != nil { return err } resj = append(resj, '\n') _, err = os.Stdout.Write(resj) return err }
func (OAuth1) CallbackRequestAccount(r *http.Request) (blob.Ref, error) { acctRef, ok := blob.Parse(r.FormValue("acct")) if !ok { return blob.Ref{}, errors.New("missing 'acct=' blobref param") } return acctRef, nil }
func (n *root) Lookup(ctx context.Context, name string) (fs.Node, error) { log.Printf("root.Lookup(%s)", name) switch name { case ".quitquitquit": log.Fatalf("Shutting down due to root .quitquitquit lookup.") case "WELCOME.txt": return staticFileNode("Welcome to CamlistoreFS.\n\nFor now you can only cd into a sha1-xxxx directory, if you know the blobref of a directory or a file.\n"), nil case "recent": return n.getRecentDir(), nil case "tag", "date": return notImplementDirNode{}, nil case "at": return n.getAtDir(), nil case "roots": return n.getRootsDir(), nil case "sha1-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx": return notImplementDirNode{}, nil case ".camli_fs_stats": return statsDir{}, nil case "mach_kernel", ".hidden", "._.": // Just quiet some log noise on OS X. return nil, fuse.ENOENT } if br, ok := blob.Parse(name); ok { log.Printf("Root lookup of blobref. %q => %v", name, br) return &node{fs: n.fs, blobref: br}, nil } log.Printf("Bogus root lookup of %q", name) return nil, fuse.ENOENT }
func (vr *VerifyRequest) ParsePayloadMap() bool { vr.PayloadMap = make(map[string]interface{}) pm := vr.PayloadMap if err := json.Unmarshal(vr.bpj, &pm); err != nil { return vr.fail("parse error; payload JSON is invalid") } if _, hasVersion := pm["camliVersion"]; !hasVersion { return vr.fail("missing 'camliVersion' in the JSON payload") } signer, hasSigner := pm["camliSigner"] if !hasSigner { return vr.fail("missing 'camliSigner' in the JSON payload") } if _, ok := signer.(string); !ok { return vr.fail("invalid 'camliSigner' in the JSON payload") } var ok bool vr.CamliSigner, ok = blob.Parse(signer.(string)) if !ok { return vr.fail("malformed 'camliSigner' blobref in the JSON payload") } return true }
func (ui *UIHandler) serveFileTree(rw http.ResponseWriter, req *http.Request) { if ui.root.Storage == nil { http.Error(rw, "No BlobRoot configured", 500) return } suffix := httputil.PathSuffix(req) m := treePattern.FindStringSubmatch(suffix) if m == nil { httputil.ErrorRouting(rw, req) return } blobref, ok := blob.Parse(m[1]) if !ok { http.Error(rw, "Invalid blobref", 400) return } fth := &FileTreeHandler{ Fetcher: ui.root.Storage, file: blobref, } fth.ServeHTTP(rw, req) }
func (ui *UIHandler) serveDownload(rw http.ResponseWriter, req *http.Request) { if ui.root.Storage == nil { http.Error(rw, "No BlobRoot configured", 500) return } suffix := httputil.PathSuffix(req) m := downloadPattern.FindStringSubmatch(suffix) if m == nil { httputil.ErrorRouting(rw, req) return } fbr, ok := blob.Parse(m[1]) if !ok { http.Error(rw, "Invalid blobref", 400) return } dh := &DownloadHandler{ Fetcher: ui.root.Storage, Cache: ui.Cache, } dh.ServeHTTP(rw, req, fbr) }
func (ix *Index) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) (err error) { defer close(dest) it := ix.s.Find("have:"+after, "have~") defer func() { closeErr := it.Close() if err == nil { err = closeErr } }() n := int(0) for n < limit && it.Next() { k := it.Key() if k <= after { continue } if !strings.HasPrefix(k, "have:") { break } n++ br, ok := blob.Parse(k[len("have:"):]) size, err := strconv.ParseUint(it.Value(), 10, 32) if ok && err == nil { select { case dest <- blob.SizedRef{br, int64(size)}: case <-ctx.Done(): return context.ErrCanceled } } } return nil }
// ServeHTTP serves: // http://host/importer/ // http://host/importer/twitter/ // http://host/importer/twitter/callback // http://host/importer/twitter/sha1-abcabcabcabcabc (single account) func (h *Host) ServeHTTP(w http.ResponseWriter, r *http.Request) { suffix := httputil.PathSuffix(r) seg := strings.Split(suffix, "/") if suffix == "" || len(seg) == 0 { h.serveImportersRoot(w, r) return } impName := seg[0] imp, ok := h.imp[impName] if !ok { http.NotFound(w, r) return } if len(seg) == 1 || seg[1] == "" { h.serveImporter(w, r, imp) return } if seg[1] == "callback" { h.serveImporterAcctCallback(w, r, imp) return } acctRef, ok := blob.Parse(seg[1]) if !ok { http.NotFound(w, r) return } h.serveImporterAccount(w, r, imp, acctRef) }
// UpdateShareChain reads the schema of b from r, and instructs the client that // all blob refs found in this schema should use b as a preceding chain link, in // all subsequent shared blobs fetches. If the client was not created with // NewFromShareRoot, ErrNotSharing is returned. func (c *Client) UpdateShareChain(b blob.Ref, r io.Reader) error { c.viaMu.Lock() defer c.viaMu.Unlock() if c.via == nil { // Not in sharing mode, so return immediately. return ErrNotSharing } // Slurp 1 MB to find references to other blobrefs for the via path. var buf bytes.Buffer const maxSlurp = 1 << 20 if _, err := io.Copy(&buf, io.LimitReader(r, maxSlurp)); err != nil { return err } // If it looks like a JSON schema blob (starts with '{') if schema.LikelySchemaBlob(buf.Bytes()) { for _, blobstr := range blobsRx.FindAllString(buf.String(), -1) { br, ok := blob.Parse(blobstr) if !ok { log.Printf("Invalid blob ref %q noticed in schema of %v", blobstr, b) continue } c.via[br] = b } } return nil }
func (c *PermanodeConstraint) permanodeMatchesAttrVal(s *search, val string) (bool, error) { if c.Value != "" && c.Value != val { return false, nil } if c.ValueMatches != nil && !c.ValueMatches.stringMatches(val) { return false, nil } if c.ValueMatchesInt != nil { if i, err := strconv.ParseInt(val, 10, 64); err != nil || !c.ValueMatchesInt.intMatches(i) { return false, nil } } if c.ValueMatchesFloat != nil { if f, err := strconv.ParseFloat(val, 64); err != nil || !c.ValueMatchesFloat.floatMatches(f) { return false, nil } } if subc := c.ValueInSet; subc != nil { br, ok := blob.Parse(val) // TODO: use corpus's parse, or keep this as blob.Ref in corpus attr if !ok { return false, nil } meta, err := s.blobMeta(br) if err == os.ErrNotExist { return false, nil } if err != nil { return false, err } return subc.matcher()(s, br, meta) } return true, nil }
// zero value of at means current func (c *Corpus) PermanodeLatLong(pn blob.Ref, at time.Time) (lat, long float64, ok bool) { nodeType := c.PermanodeAttrValue(pn, "camliNodeType", at, blob.Ref{}) if nodeType == "" { return } // TODO: make these pluggable, e.g. registered from an importer or something? // How will that work when they're out-of-process? if nodeType == "foursquare.com:checkin" { venuePn, hasVenue := blob.Parse(c.PermanodeAttrValue(pn, "foursquareVenuePermanode", at, blob.Ref{})) if !hasVenue { return } return c.PermanodeLatLong(venuePn, at) } if nodeType == "foursquare.com:venue" || nodeType == "twitter.com:tweet" { var err error lat, err = strconv.ParseFloat(c.PermanodeAttrValue(pn, "latitude", at, blob.Ref{}), 64) if err != nil { return } long, err = strconv.ParseFloat(c.PermanodeAttrValue(pn, "longitude", at, blob.Ref{}), 64) if err != nil { return } return lat, long, true } return }
func parseMetaValue(v string) (mv *metaValue, err error) { f := strings.Split(v, "/") if len(f) != 4 { return nil, errors.New("wrong number of fields") } mv = &metaValue{} plainSize, err := strconv.ParseUint(f[0], 10, 32) if err != nil { return nil, fmt.Errorf("bad plaintext size in meta %q", v) } mv.PlainSize = uint32(plainSize) mv.IV, err = hex.DecodeString(f[1]) if err != nil { return nil, fmt.Errorf("bad iv in meta %q", v) } var ok bool mv.EncBlobRef, ok = blob.Parse(f[2]) if !ok { return nil, fmt.Errorf("bad blobref in meta %q", v) } encSize, err := strconv.ParseUint(f[3], 10, 32) if err != nil { return nil, fmt.Errorf("bad encrypted size in meta %q", v) } mv.EncSize = uint32(encSize) return mv, nil }
func (s *storage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) (err error) { defer close(dest) t := s.index.Find(after, "") defer func() { closeErr := t.Close() if err == nil { err = closeErr } }() for i := 0; i < limit && t.Next(); { key := t.Key() if key <= after { // EnumerateBlobs' semantics are '>', but sorted.KeyValue.Find is '>='. continue } br, ok := blob.Parse(key) if !ok { return fmt.Errorf("diskpacked: couldn't parse index key %q", key) } m, ok := parseBlobMeta(t.Value()) if !ok { return fmt.Errorf("diskpacked: couldn't parse index value %q: %q", key, t.Value()) } select { case dest <- m.SizedRef(br): case <-ctx.Done(): return context.ErrCanceled } i++ } return nil }
func (c *Corpus) mergeClaimRow(k, v []byte) error { // TODO: update kvClaim to take []byte instead of string cl, ok := kvClaim(string(k), string(v), c.blobParse) if !ok || !cl.Permanode.Valid() { return fmt.Errorf("bogus claim row: %q -> %q", k, v) } cl.Type = c.str(cl.Type) cl.Attr = c.str(cl.Attr) cl.Value = c.str(cl.Value) // less likely to intern, but some (tags) do pn := c.br(cl.Permanode) pm, ok := c.permanodes[pn] if !ok { pm = new(PermanodeMeta) c.permanodes[pn] = pm } pm.Claims = append(pm.Claims, &cl) if !c.building { // Unless we're still starting up (at which we sort at // the end instead), keep claims sorted and attrs in sync. pm.fixupLastClaim() } if vbr, ok := blob.Parse(cl.Value); ok { c.claimBack[vbr] = append(c.claimBack[vbr], &cl) } return nil }
func (m *mongoStorage) EnumerateBlobs(ctx context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) var b blobDoc var qry bson.M if after != "" { qry = bson.M{"key": bson.M{"$gt": after}} } iter := m.c.Find(qry).Limit(limit).Select(bson.M{"key": 1, "size": 1}).Sort("key").Iter() for iter.Next(&b) { br, ok := blob.Parse(b.Key) if !ok { continue } select { case dest <- blob.SizedRef{Ref: br, Size: uint32(b.Size)}: case <-ctx.Done(): // Close the iterator but ignore the error value since we are already cancelling if err := iter.Close(); err != nil { log.Printf("Error closing iterator after enumerating: %v", err) } return ctx.Err() } } if err := iter.Close(); err != nil { return err } return nil }