// StaticSet returns the whole of the static set members of that directory func (dr *DirReader) StaticSet() ([]*blobref.BlobRef, os.Error) { if dr.staticSet != nil { return dr.staticSet, nil } staticSetBlobref := blobref.Parse(dr.ss.Entries) if staticSetBlobref == nil { return nil, fmt.Errorf("schema/filereader: Invalid blobref\n") } rsc, _, err := dr.fetcher.Fetch(staticSetBlobref) if err != nil { return nil, fmt.Errorf("schema/filereader: fetching schema blob %s: %v", staticSetBlobref, err) } ss := new(Superset) if err = json.NewDecoder(rsc).Decode(ss); err != nil { return nil, fmt.Errorf("schema/filereader: decoding schema blob %s: %v", staticSetBlobref, err) } if ss.Type != "static-set" { return nil, fmt.Errorf("schema/filereader: expected \"static-set\" schema blob for %s, got %q", staticSetBlobref, ss.Type) } for _, s := range ss.Members { member := blobref.Parse(s) if member == nil { return nil, fmt.Errorf("schema/filereader: invalid (static-set member) blobref\n") } dr.staticSet = append(dr.staticSet, member) } return dr.staticSet, nil }
func (mi *Indexer) PathsLookup(signer, base *blobref.BlobRef, suffix string) (paths []*search.Path, err os.Error) { keyId, err := mi.keyIdOfSigner(signer) if err != nil { return } rs, err := mi.db.Query("SELECT claimref, claimdate, targetref FROM path "+ "WHERE keyid=? AND baseref=? AND suffix=?", keyId, base.String(), suffix) if err != nil { return } defer rs.Close() var claimref, claimdate, targetref string for rs.Next() { if err = rs.Scan(&claimref, &claimdate, &targetref); err != nil { return } t, err := time.Parse(time.RFC3339, trimRFC3339Subseconds(claimdate)) if err != nil { log.Printf("Skipping bogus path row with bad time: %q", claimref) continue } _ = t // TODO: use this? paths = append(paths, &search.Path{ Claim: blobref.Parse(claimref), ClaimDate: claimdate, Base: base, Target: blobref.Parse(targetref), Suffix: suffix, }) } return }
func TestHubFiring(t *testing.T) { hub := &SimpleBlobHub{} ch := make(chan *blobref.BlobRef) bch := make(chan *blobref.BlobRef) blob := blobref.Parse("sha1-0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") blobsame := blobref.Parse("sha1-0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") hub.NotifyBlobReceived(blob) // no-op hub.RegisterListener(ch) hub.RegisterBlobListener(blob, bch) hub.NotifyBlobReceived(blobsame) tmr1 := time.NewTimer(1e9) select { case <-tmr1.C: t.Fatal("timer expired on receiving from ch") case got := <-ch: if !blob.Equals(got) { t.Fatalf("got wrong blob") } } select { case <-tmr1.C: t.Fatal("timer expired on receiving from bch") case got := <-bch: if !blob.Equals(got) { t.Fatalf("got wrong blob") } } tmr1.Stop() }
func (fs *CamliFileSystem) OpenDir(name string) (stream chan fuse.DirEntry, code fuse.Status) { defer func() { log.Printf("cammount: OpenDir(%q) = %v", name, code) }() dirss, status := fs.getSchemaBlobByNameAndType(name, "directory") if status != fuse.OK { return nil, status } if dirss.Entries == "" { // TODO: can this be empty for an empty directory? // clarify in spec one way or another. probably best // to make it required to remove special cases. log.Printf("Expected %s to have 'entries'", dirss.BlobRef) return nil, fuse.ENOTDIR } entriesBlob := blobref.Parse(dirss.Entries) if entriesBlob == nil { log.Printf("Blob %s had invalid blobref %q for its 'entries'", dirss.BlobRef, dirss.Entries) return nil, fuse.ENOTDIR } entss, status := fs.getSchemaBlobByBlobRefAndType(entriesBlob, "static-set") if status != fuse.OK { return nil, status } retch := make(chan fuse.DirEntry, 20) wg := new(sync.WaitGroup) for _, m := range entss.Members { wg.Add(1) go func(memberBlobstr string) { defer wg.Done() memberBlob := blobref.Parse(memberBlobstr) if memberBlob == nil { log.Printf("invalid blobref of %q in static set %s", memberBlobstr, entss) return } childss, err := fs.fetchSchemaSuperset(memberBlob) if err == nil { if fileName := childss.FileNameString(); fileName != "" { mode := childss.UnixMode() //log.Printf("adding to dir %s: file=%q, mode=%d", dirBlob, childss.FileName, mode) retch <- fuse.DirEntry{Name: childss.FileNameString(), Mode: mode} } else { log.Printf("Blob %s had no filename", childss.BlobRef) } } else { log.Printf("Error fetching %s: %v", memberBlobstr, err) } }(m) } go func() { wg.Wait() close(retch) }() return retch, fuse.OK }
func main() { flag.Parse() client := client.NewOrFail() if *flagCheck { // Simply do HEAD requests checking if the blobs exists. return } var w io.Writer = os.Stdout for n := 0; n < flag.NArg(); n++ { arg := flag.Arg(n) br := blobref.Parse(arg) if br == nil { log.Fatalf("Failed to parse argument \"%s\" as a blobref.", arg) } if *flagVerbose { log.Printf("Need to fetch %s", br.String()) } var ( r io.ReadCloser err os.Error ) if len(*flagVia) > 0 { vs := strings.Split(*flagVia, ",") abr := make([]*blobref.BlobRef, len(vs)) for i, sbr := range vs { abr[i] = blobref.Parse(sbr) if abr[i] == nil { log.Fatalf("Invalid -via blobref: %q", sbr) } if *flagVerbose { log.Printf("via: %s", sbr) } } r, _, err = client.FetchVia(br, abr) } else { r, _, err = client.FetchStreaming(br) } if err != nil { log.Fatalf("Failed to fetch %q: %s", br, err) } defer r.Close() _, err = io.Copy(w, r) if err != nil { log.Fatalf("Failed transferring %q: %s", br, err) } } }
func (vr *VerifyRequest) ParsePayloadMap() bool { vr.PayloadMap = make(map[string]interface{}) pm := vr.PayloadMap if err := json.Unmarshal(vr.bpj, &pm); err != nil { return vr.fail("parse error; payload JSON is invalid") } if _, hasVersion := pm["camliVersion"]; !hasVersion { return vr.fail("Missing 'camliVersion' in the JSON payload") } signer, hasSigner := pm["camliSigner"] if !hasSigner { return vr.fail("Missing 'camliSigner' in the JSON payload") } if _, ok := signer.(string); !ok { return vr.fail("Invalid 'camliSigner' in the JSON payload") } vr.CamliSigner = blobref.Parse(signer.(string)) if vr.CamliSigner == nil { return vr.fail("Malformed 'camliSigner' blobref in the JSON payload") } return true }
func (sh *Handler) serveFiles(rw http.ResponseWriter, req *http.Request) { ret := jsonMap() defer httputil.ReturnJson(rw, ret) br := blobref.Parse(req.FormValue("wholedigest")) if br == nil { ret["error"] = "Missing or invalid 'wholedigest' param" ret["errorType"] = "input" return } files, err := sh.index.ExistingFileSchemas(br) if err != nil { ret["error"] = err.String() ret["errorType"] = "server" return } strList := []string{} for _, br := range files { strList = append(strList, br.String()) } ret["files"] = strList return }
func (ui *UIHandler) serveDownload(rw http.ResponseWriter, req *http.Request) { if ui.Storage == nil { http.Error(rw, "No BlobRoot configured", 500) return } suffix := req.Header.Get("X-PrefixHandler-PathSuffix") m := downloadPattern.FindStringSubmatch(suffix) if m == nil { httputil.ErrorRouting(rw, req) return } fbr := blobref.Parse(m[1]) if fbr == nil { http.Error(rw, "Invalid blobref", 400) return } dh := &DownloadHandler{ Fetcher: ui.Storage, Cache: ui.Cache, } dh.ServeHTTP(rw, req, fbr) }
func newHandlerFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Handler, os.Error) { indexPrefix := conf.RequiredString("index") // TODO: add optional help tips here? ownerBlobStr := conf.RequiredString("owner") if err := conf.Validate(); err != nil { return nil, err } indexHandler, err := ld.GetHandler(indexPrefix) if err != nil { return nil, fmt.Errorf("search config references unknown handler %q", indexPrefix) } indexer, ok := indexHandler.(Index) if !ok { return nil, fmt.Errorf("search config references invalid indexer %q (actually a %T)", indexPrefix, indexHandler) } ownerBlobRef := blobref.Parse(ownerBlobStr) if ownerBlobRef == nil { return nil, fmt.Errorf("search 'owner' has malformed blobref %q; expecting e.g. sha1-xxxxxxxxxxxx", ownerBlobStr) } return &Handler{ index: indexer, owner: ownerBlobRef, }, nil }
// TODO(rh): tame copy/paste code from cammount func main() { flag.Parse() cacheDir, err := ioutil.TempDir("", "camlicache") if err != nil { log.Fatalf("Error creating temp cache directory: %v", err) } defer os.RemoveAll(cacheDir) diskcache, err := localdisk.New(cacheDir) if err != nil { log.Fatalf("Error setting up local disk cache: %v", err) } if flag.NArg() != 1 { log.Fatal("usage: camwebdav <blobref>") } br := blobref.Parse(flag.Arg(0)) if br == nil { log.Fatalf("%s was not a valid blobref.", flag.Arg(0)) } client := client.NewOrFail() fetcher := cacher.NewCachingFetcher(diskcache, client) f = fs.NewCamliFileSystem(fetcher, br) http.HandleFunc("/", webdav) err = http.ListenAndServe(*davaddr, nil) if err != nil { log.Fatalf("Error starting WebDAV server: %v", err) } }
func (mi *Indexer) GetOwnerClaims(permanode, owner *blobref.BlobRef) (claims search.ClaimList, err os.Error) { claims = make(search.ClaimList, 0) // TODO: ignore rows where unverified = 'N' rs, err := mi.db.Query("SELECT blobref, date, claim, attr, value FROM claims WHERE permanode = ? AND signer = ?", permanode.String(), owner.String()) if err != nil { return } defer rs.Close() var row claimsRow for rs.Next() { err = rs.Scan(&row.blobref, &row.date, &row.claim, &row.attr, &row.value) if err != nil { return } t, err := time.Parse(time.RFC3339, trimRFC3339Subseconds(row.date)) if err != nil { log.Printf("Skipping; error parsing time %q: %v", row.date, err) continue } claims = append(claims, &search.Claim{ BlobRef: blobref.Parse(row.blobref), Signer: owner, Permanode: permanode, Type: row.claim, Date: t, Attr: row.attr, Value: row.value, }) } return }
// Given a described blob, optionally follows a camliContent and // returns the file's schema blobref and its fileinfo (if found). func (pr *publishRequest) fileSchemaRefFromBlob(des *search.DescribedBlob) (fileref *blobref.BlobRef, fileinfo *search.FileInfo, ok bool) { if des == nil { http.NotFound(pr.rw, pr.req) return } if des.Permanode != nil { // TODO: get "forceMime" attr out of the permanode? or // fileName content-disposition? if cref := des.Permanode.Attr.Get("camliContent"); cref != "" { cbr := blobref.Parse(cref) if cbr == nil { http.Error(pr.rw, "bogus camliContent", 500) return } des = des.PeerBlob(cbr) if des == nil { http.Error(pr.rw, "camliContent not a peer in describe", 500) return } } } if des.CamliType == "file" { return des.BlobRef, des.File, true } http.Error(pr.rw, "failed to find fileSchemaRefFromBlob", 404) return }
func (mi *Indexer) ExistingFileSchemas(bytesRef *blobref.BlobRef) (files []*blobref.BlobRef, err os.Error) { client, err := mi.getConnection() if err != nil { return } defer func() { if err == nil { mi.releaseConnection(client) } else { client.Close() } }() err = client.Query(fmt.Sprintf("SELECT fileschemaref FROM files WHERE bytesref=%q", bytesRef.String())) if err != nil { return } result, err := client.StoreResult() if err != nil { return } defer client.FreeResult() for { row := result.FetchRow() if row == nil { break } files = append(files, blobref.Parse(row[0].(string))) } return }
func (c *attrCmd) RunCommand(up *Uploader, args []string) os.Error { if len(args) != 3 { return os.NewError("Attr takes 3 args: <permanode> <attr> <value>") } permanode, attr, value := args[0], args[1], args[2] var err os.Error pn := blobref.Parse(permanode) if pn == nil { return fmt.Errorf("Error parsing blobref %q", permanode) } m := schema.NewSetAttributeClaim(pn, attr, value) if c.add { if c.del { return os.NewError("Add and del options are exclusive") } m = schema.NewAddAttributeClaim(pn, attr, value) } else { // TODO: del, which can make <value> be optional if c.del { return os.NewError("del not yet implemented") } } put, err := up.UploadAndSignMap(m) handleResult(m["claimType"].(string), put, err) return nil }
func (ui *UIHandler) serveFileTree(rw http.ResponseWriter, req *http.Request) { if ui.Storage == nil { http.Error(rw, "No BlobRoot configured", 500) return } suffix := req.Header.Get("X-PrefixHandler-PathSuffix") m := treePattern.FindStringSubmatch(suffix) if m == nil { httputil.ErrorRouting(rw, req) return } blobref := blobref.Parse(m[1]) if blobref == nil { http.Error(rw, "Invalid blobref", 400) return } fth := &FileTreeHandler{ Fetcher: ui.Storage, file: blobref, } fth.ServeHTTP(rw, req) }
func TestPaths(t *testing.T) { br := blobref.Parse("digalg-abc") ds := &DiskStorage{root: "/tmp/dir"} if e, g := "/tmp/dir/digalg/abc/___", ds.blobDirectory("", br); e != g { t.Errorf("short blobref dir; expected path %q; got %q", e, g) } if e, g := "/tmp/dir/digalg/abc/___/digalg-abc.dat", ds.blobPath("", br); e != g { t.Errorf("short blobref path; expected path %q; got %q", e, g) } br = blobref.Parse("sha1-c22b5f9178342609428d6f51b2c5af4c0bde6a42") if e, g := "/tmp/dir/partition/foo/sha1/c22/b5f", ds.blobDirectory("foo", br); e != g { t.Errorf("amazon queue dir; expected path %q; got %q", e, g) } }
func (dh *DownloadHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, file *blobref.BlobRef) { if req.Method != "GET" && req.Method != "HEAD" { http.Error(rw, "Invalid download method", 400) return } fetchSeeker, err := dh.storageSeekFetcher() if err != nil { http.Error(rw, err.String(), 500) return } fr, err := schema.NewFileReader(fetchSeeker, file) if err != nil { http.Error(rw, "Can't serve file: "+err.String(), 500) return } defer fr.Close() schema := fr.FileSchema() rw.Header().Set("Content-Length", fmt.Sprintf("%d", schema.SumPartsSize())) // TODO: fr.FileSchema() and guess a mime type? For now: mimeType := "application/octet-stream" if dh.ForceMime != "" { mimeType = dh.ForceMime } rw.Header().Set("Content-Type", mimeType) if req.Method == "HEAD" { vbr := blobref.Parse(req.FormValue("verifycontents")) if vbr == nil { return } hash := vbr.Hash() if hash == nil { return } io.Copy(hash, fr) // ignore errors, caught later if vbr.HashMatches(hash) { rw.Header().Set("X-Camli-Contents", vbr.String()) } return } n, err := io.Copy(rw, fr) log.Printf("For %q request of %s: copied %d, %v", req.Method, req.URL.RawPath, n, err) if err != nil { log.Printf("error serving download of file schema %s: %v", file, err) return } if size := schema.SumPartsSize(); n != int64(size) { log.Printf("error serving download of file schema %s: sent %d, expected size of %d", file, n, size) return } }
func (b *DescribedBlob) ContentRef() (br *blobref.BlobRef, ok bool) { if b != nil && b.Permanode != nil { if cref := b.Permanode.Attr.Get("camliContent"); cref != "" { br = blobref.Parse(cref) return br, br != nil } } return }
func handleRemove(conn http.ResponseWriter, req *http.Request, storage blobserver.Storage) { if w, ok := storage.(blobserver.ContextWrapper); ok { storage = w.WrapContext(req) } if req.Method != "POST" { log.Fatalf("Invalid method; handlers misconfigured") } configer, ok := storage.(blobserver.Configer) if !ok { conn.WriteHeader(http.StatusForbidden) fmt.Fprintf(conn, "Remove handler's blobserver.Storage isn't a blobserver.Configuer; can't remove") return } if !configer.Config().IsQueue { conn.WriteHeader(http.StatusForbidden) fmt.Fprintf(conn, "Can only remove blobs from a queue.\n") return } n := 0 toRemove := make([]*blobref.BlobRef, 0) toRemoveStr := make([]string, 0) for { n++ if n > maxRemovesPerRequest { httputil.BadRequestError(conn, fmt.Sprintf("Too many removes in this request; max is %d", maxRemovesPerRequest)) return } key := fmt.Sprintf("blob%v", n) value := req.FormValue(key) if value == "" { break } ref := blobref.Parse(value) if ref == nil { httputil.BadRequestError(conn, "Bogus blobref for key "+key) return } toRemove = append(toRemove, ref) toRemoveStr = append(toRemoveStr, ref.String()) } err := storage.RemoveBlobs(toRemove) if err != nil { conn.WriteHeader(http.StatusInternalServerError) log.Printf("Server error during remove: %v", err) fmt.Fprintf(conn, "Server error") return } reply := make(map[string]interface{}, 0) reply["removed"] = toRemoveStr httputil.ReturnJson(conn, reply) }
func (mi *Indexer) populateClaim(client *mysql.Client, blobRef *blobref.BlobRef, camli *schema.Superset, sniffer *blobSniffer) (err os.Error) { pnBlobref := blobref.Parse(camli.Permanode) if pnBlobref == nil { // Skip bogus claim with malformed permanode. return } verifiedKeyId := "" if rawJson, err := sniffer.Body(); err == nil { vr := jsonsign.NewVerificationRequest(rawJson, mi.KeyFetcher) if vr.Verify() { verifiedKeyId = vr.SignerKeyId log.Printf("mysqlindex: verified claim %s from %s", blobRef, verifiedKeyId) if err = execSQL(client, "INSERT IGNORE INTO signerkeyid (blobref, keyid) "+ "VALUES (?, ?)", vr.CamliSigner.String(), verifiedKeyId); err != nil { return } } else { log.Printf("mysqlindex: verification failure on claim %s: %v", blobRef, vr.Err) } } if err = execSQL(client, "INSERT IGNORE INTO claims (blobref, signer, verifiedkeyid, date, unverified, claim, permanode, attr, value) "+ "VALUES (?, ?, ?, ?, 'Y', ?, ?, ?, ?)", blobRef.String(), camli.Signer, verifiedKeyId, camli.ClaimDate, camli.ClaimType, camli.Permanode, camli.Attribute, camli.Value); err != nil { return } if verifiedKeyId != "" { // TODO: limit this to only certain attributes (for now, just "camliRoot") once search handler // is working and the UI permits setting camliRoot. if err = execSQL(client, "INSERT IGNORE INTO signerattrvalue (keyid, attr, value, claimdate, blobref, permanode) "+ "VALUES (?, ?, ?, ?, ?, ?)", verifiedKeyId, camli.Attribute, camli.Value, camli.ClaimDate, blobRef.String(), camli.Permanode); err != nil { return } } // And update the lastmod on the permanode row. if err = execSQL(client, "INSERT IGNORE INTO permanodes (blobref) VALUES (?)", pnBlobref.String()); err != nil { return } if err = execSQL(client, "UPDATE permanodes SET lastmod=? WHERE blobref=? AND ? > lastmod", camli.ClaimDate, pnBlobref.String(), camli.ClaimDate); err != nil { return } return nil }
func (mi *Indexer) GetRecentPermanodes(dest chan *search.Result, owner []*blobref.BlobRef, limit int) os.Error { defer close(dest) if len(owner) == 0 { return nil } if len(owner) > 1 { panic("TODO: remove support for more than one owner. push it to caller") } rs, err := mi.db.Query("SELECT blobref, signer, lastmod FROM permanodes WHERE signer = ? AND lastmod <> '' "+ "ORDER BY lastmod DESC LIMIT ?", owner[0].String(), limit) if err != nil { return err } defer rs.Close() var blobstr, signerstr, modstr string for rs.Next() { if err := rs.Scan(&blobstr, &signerstr, &modstr); err != nil { return err } br := blobref.Parse(blobstr) if br == nil { continue } signer := blobref.Parse(signerstr) if signer == nil { continue } modstr = trimRFC3339Subseconds(modstr) t, err := time.Parse(time.RFC3339, modstr) if err != nil { log.Printf("Skipping; error parsing time %q: %v", modstr, err) continue } dest <- &search.Result{ BlobRef: br, Signer: signer, LastModTime: t.Seconds(), } } return nil }
func (mi *Indexer) GetOwnerClaims(permanode, owner *blobref.BlobRef) (claims search.ClaimList, reterr os.Error) { claims = make(search.ClaimList, 0) client, err := mi.getConnection() if err != nil { reterr = err return } defer mi.releaseConnection(client) // TODO: ignore rows where unverified = 'N' stmt, err := client.Prepare("SELECT blobref, date, claim, attr, value FROM claims WHERE permanode = ? AND signer = ?") if err != nil { reterr = err return } err = stmt.BindParams(permanode.String(), owner.String()) if err != nil { reterr = err return } err = stmt.Execute() if err != nil { reterr = err return } var row claimsRow stmt.BindResult(&row.blobref, &row.date, &row.claim, &row.attr, &row.value) defer stmt.Close() for { done, err := stmt.Fetch() if err != nil { reterr = err return } if done { break } t, err := time.Parse(time.RFC3339, trimRFC3339Subseconds(row.date)) if err != nil { log.Printf("Skipping; error parsing time %q: %v", row.date, err) continue } claims = append(claims, &search.Claim{ BlobRef: blobref.Parse(row.blobref), Signer: owner, Permanode: permanode, Type: row.claim, Date: t, Attr: row.attr, Value: row.value, }) } return }
func main() { flag.Parse() if sumSet(flagFile, flagBlob, flagPermanode, flagInit, flagShare) != 1 { // TODO: say which ones are conflicting usage("Conflicting mode options.") } client := client.NewOrFail() if !*flagVerbose { client.SetLogger(nil) } uploader := &Uploader{client} switch { case *flagInit: doInit() return case *flagPermanode: if flag.NArg() > 0 { log.Exitf("--permanode doesn't take any additional arguments") } pr, err := uploader.UploadNewPermanode() handleResult("permanode", pr, err) case *flagFile || *flagBlob: for n := 0; n < flag.NArg(); n++ { if *flagBlob { pr, err := uploader.UploadFileBlob(flag.Arg(n)) handleResult("blob", pr, err) } else { pr, err := uploader.UploadFile(flag.Arg(n)) handleResult("file", pr, err) } } case *flagShare: if flag.NArg() != 1 { log.Exitf("--share only supports one blobref") } br := blobref.Parse(flag.Arg(0)) if br == nil { log.Exitf("BlobRef is invalid: %q", flag.Arg(0)) } pr, err := uploader.UploadShare(br, *flagTransitive) handleResult("share", pr, err) } if *flagVerbose { stats := uploader.Stats() log.Printf("Client stats: %s", stats.String()) } if wereErrors { os.Exit(2) } }
func TestHubRegistration(t *testing.T) { hub := &SimpleBlobHub{} ch := make(chan *blobref.BlobRef) ch2 := make(chan *blobref.BlobRef) b1 := blobref.Parse("sha1-0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") b2 := blobref.Parse("sha1-62cdb7020ff920e5aa642c3d4066950dd1f01f4d") Expect(t, hub.listeners == nil, "hub.listeners is nil before RegisterListener") hub.RegisterListener(ch) ExpectInt(t, 1, len(hub.listeners), "len(hub.listeners) after RegisterListener") hub.RegisterListener(ch2) ExpectInt(t, 2, len(hub.listeners), "len(hub.listeners) after ch2 RegisterListener") hub.UnregisterListener(ch) ExpectInt(t, 1, len(hub.listeners), "len(hub.listeners) after UnregisterListener") hub.UnregisterListener(ch2) ExpectInt(t, 0, len(hub.listeners), "len(hub.listeners) after UnregisterListener") Expect(t, hub.blobListeners == nil, "hub.blobListeners is nil before RegisterBlobListener") hub.RegisterBlobListener(b1, ch) Expect(t, hub.blobListeners != nil, "hub.blobListeners is not nil before RegisterBlobListener") Expect(t, hub.blobListeners[b1.String()] != nil, "b1 in hub.blobListeners map") ExpectInt(t, 1, len(hub.blobListeners[b1.String()]), "hub.blobListeners[b1] size") ExpectInt(t, 1, len(hub.blobListeners), "hub.blobListeners size") hub.RegisterBlobListener(b2, ch) ExpectInt(t, 1, len(hub.blobListeners[b2.String()]), "hub.blobListeners[b1] size") ExpectInt(t, 2, len(hub.blobListeners), "hub.blobListeners size") hub.UnregisterBlobListener(b2, ch) Expect(t, hub.blobListeners[b2.String()] == nil, "b2 not in hub.blobListeners") ExpectInt(t, 1, len(hub.blobListeners), "hub.blobListeners size") hub.UnregisterBlobListener(b1, ch) Expect(t, hub.blobListeners[b1.String()] == nil, "b1 not in hub.blobListeners") ExpectInt(t, 0, len(hub.blobListeners), "hub.blobListeners size") }
func (c *shareCmd) RunCommand(up *Uploader, args []string) os.Error { if len(args) != 1 { return UsageError("share takes exactly one argument, a blobref") } br := blobref.Parse(flag.Arg(0)) if br == nil { return UsageError("invalid blobref") } pr, err := up.UploadShare(br, c.transitive) handleResult("share", pr, err) return nil }
func sendTestBlobs(ch chan blobref.SizedBlobRef, list string) { defer close(ch) if list == "" { return } for _, b := range strings.Split(list, ",", -1) { br := blobref.Parse(b) if br == nil { panic("Invalid blobref: " + b) } ch <- blobref.SizedBlobRef{BlobRef: br, Size: 123} } }
func (b *DescribedBlob) Members() []*DescribedBlob { if b == nil { return nil } m := make([]*DescribedBlob, 0) if b.Permanode != nil { for _, bstr := range b.Permanode.Attr["camliMember"] { if br := blobref.Parse(bstr); br != nil { m = append(m, b.PeerBlob(br)) } } } return m }
func (mi *Indexer) Stat(dest chan<- blobref.SizedBlobRef, blobs []*blobref.BlobRef, waitSeconds int) os.Error { error := func(err os.Error) os.Error { log.Printf("mysqlindexer: stat error: %v", err) return err } // MySQL connection stuff. client, err := mi.getConnection() if err != nil { return error(err) } defer mi.releaseConnection(client) quotedBlobRefs := []string{} for _, br := range blobs { quotedBlobRefs = append(quotedBlobRefs, fmt.Sprintf("%q", br.String())) } sql := "SELECT blobref, size FROM blobs WHERE blobref IN (" + strings.Join(quotedBlobRefs, ", ") + ")" log.Printf("Running: [%s]", sql) stmt, err := client.Prepare(sql) if err != nil { return error(err) } err = stmt.Execute() if err != nil { return error(err) } var row blobRow stmt.BindResult(&row.blobref, &row.size) for { done, err := stmt.Fetch() if err != nil { return error(err) } if done { break } br := blobref.Parse(row.blobref) if br == nil { continue } dest <- blobref.SizedBlobRef{ BlobRef: br, Size: row.size, } } return nil }
func (sh *Handler) serveDescribe(rw http.ResponseWriter, req *http.Request) { ret := jsonMap() defer httputil.ReturnJson(rw, ret) br := blobref.Parse(req.FormValue("blobref")) if br == nil { ret["error"] = "Missing or invalid 'blobref' param" ret["errorType"] = "input" return } dr := sh.NewDescribeRequest() dr.Describe(br, 4) dr.PopulateJSON(ret) }
func (mi *Indexer) ExistingFileSchemas(wholeDigest *blobref.BlobRef) (files []*blobref.BlobRef, err os.Error) { rs, err := mi.db.Query("SELECT schemaref FROM bytesfiles WHERE wholedigest=?", wholeDigest.String()) if err != nil { return } defer rs.Close() ref := "" for rs.Next() { if err := rs.Scan(&ref); err != nil { return nil, err } files = append(files, blobref.Parse(ref)) } return }