func (sto *s3Storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) (err error) { if faultStat.FailErr(&err) { return } // TODO: use sto.cache var wg syncutil.Group for _, br := range blobs { br := br statGate.Start() wg.Go(func() error { defer statGate.Done() size, err := sto.s3Client.Stat(sto.dirPrefix+br.String(), sto.bucket) if err == nil { dest <- blob.SizedRef{Ref: br, Size: uint32(size)} return nil } if err == os.ErrNotExist { return nil } return fmt.Errorf("error statting %v: %v", br, err) }) } return wg.Err() }
func (s *Storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error { // TODO: use cache var grp syncutil.Group gate := syncutil.NewGate(20) // arbitrary cap for i := range blobs { br := blobs[i] gate.Start() grp.Go(func() error { defer gate.Done() fi, err := s.b.GetFileInfoByName(s.dirPrefix + br.String()) if err == b2.FileNotFoundError { return nil } if err != nil { return err } if br.HashName() == "sha1" && fi.ContentSHA1 != br.Digest() { return errors.New("b2: remote ContentSHA1 mismatch") } size := fi.ContentLength if size > constants.MaxBlobSize { return fmt.Errorf("blob %s stat size too large (%d)", br, size) } dest <- blob.SizedRef{Ref: br, Size: uint32(size)} return nil }) } return grp.Err() }
func (ds *DiskStorage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error { if len(blobs) == 0 { return nil } statSend := func(ref blob.Ref) error { fi, err := os.Stat(ds.blobPath(ref)) switch { case err == nil && fi.Mode().IsRegular(): dest <- blob.SizedRef{Ref: ref, Size: u32(fi.Size())} return nil case err != nil && !os.IsNotExist(err): return err } return nil } if len(blobs) == 1 { return statSend(blobs[0]) } var wg syncutil.Group for _, ref := range blobs { ref := ref statGate.Start() wg.Go(func() error { defer statGate.Done() return statSend(ref) }) } return wg.Err() }
// getInstalledTLS returns the TLS certificate and key stored on Google Cloud Storage for the // instance defined in d.Conf. // // If either the TLS keypair doesn't exist, the error is os.ErrNotExist. func (d *Deployer) getInstalledTLS() (certPEM, keyPEM []byte, err error) { ctx := context.Background() stoClient, err := cloudstorage.NewClient(ctx, cloud.WithBaseHTTP(d.Client)) if err != nil { return nil, nil, fmt.Errorf("error creating Cloud Storage client to fetch TLS cert & key from new instance: %v", err) } getFile := func(name string) ([]byte, error) { sr, err := stoClient.Bucket(d.Conf.bucketBase()).Object(path.Join(configDir, name)).NewReader(ctx) if err == cloudstorage.ErrObjectNotExist { return nil, os.ErrNotExist } if err != nil { return nil, err } defer sr.Close() return ioutil.ReadAll(sr) } var grp syncutil.Group grp.Go(func() (err error) { certPEM, err = getFile(certFilename()) return }) grp.Go(func() (err error) { keyPEM, err = getFile(keyFilename()) return }) err = grp.Err() return }
func (s *Storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error { // TODO: use cache // TODO(mpl): use context from caller, once one is available (issue 733) ctx := context.TODO() var grp syncutil.Group gate := syncutil.NewGate(20) // arbitrary cap for i := range blobs { br := blobs[i] gate.Start() grp.Go(func() error { defer gate.Done() attrs, err := s.client.Bucket(s.bucket).Object(s.dirPrefix + br.String()).Attrs(ctx) if err == storage.ErrObjectNotExist { return nil } if err != nil { return err } size := attrs.Size if size > constants.MaxBlobSize { return fmt.Errorf("blob %s stat size too large (%d)", br, size) } dest <- blob.SizedRef{Ref: br, Size: uint32(size)} return nil }) } return grp.Err() }
// ctx will be canceled on failure func (c *Collector) markItem(ctx context.Context, it Item, isRoot bool) error { if !isRoot { marked, err := c.Marker.IsMarked(it) if err != nil { return err } if marked { return nil } } if err := c.Marker.Mark(it); err != nil { return err } // FIXME(tgulacsi): is it a problem that we cannot cancel the parent? ctx, cancel := context.WithCancel(ctx) ch := make(chan Item, buffered) var grp syncutil.Group grp.Go(func() error { return c.ItemEnumerator.EnumerateItem(ctx, it, ch) }) grp.Go(func() error { for it := range ch { if err := c.markItem(ctx, it, false); err != nil { return err } } return nil }) if err := grp.Err(); err != nil { cancel() return err } return nil }
func (s *Storage) RemoveBlobs(blobs []blob.Ref) error { if s.cache != nil { s.cache.RemoveBlobs(blobs) } gate := syncutil.NewGate(50) // arbitrary var grp syncutil.Group for i := range blobs { gate.Start() br := blobs[i] grp.Go(func() error { defer gate.Done() fi, err := s.b.GetFileInfoByName(s.dirPrefix + br.String()) if err != nil { return err } if fi == nil { return nil } if br.HashName() == "sha1" && fi.ContentSHA1 != br.Digest() { return errors.New("b2: remote ContentSHA1 mismatch") } return s.cl.DeleteFile(fi.ID, fi.Name) }) } return grp.Err() }
func (h *memHub) NotifyBlobReceived(sb blob.SizedRef) error { h.mu.RLock() defer h.mu.RUnlock() br := sb.Ref // Synchronous hooks. If error, prevents notifying other // subscribers. var grp syncutil.Group for i := range h.hooks { hook := h.hooks[i] grp.Go(func() error { return hook(sb) }) } if err := grp.Err(); err != nil { return err } // Global listeners for ch := range h.listeners { ch := ch go func() { ch <- br }() } // Blob-specific listeners for ch := range h.blobListeners[br] { ch := ch go func() { ch <- br }() } return nil }
func (s *Storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error { // TODO: use cache var grp syncutil.Group gate := syncutil.NewGate(20) // arbitrary cap for i := range blobs { br := blobs[i] gate.Start() grp.Go(func() error { defer gate.Done() size, exists, err := s.client.StatObject( &googlestorage.Object{Bucket: s.bucket, Key: s.dirPrefix + br.String()}) if err != nil { return err } if !exists { return nil } if size > constants.MaxBlobSize { return fmt.Errorf("blob %s stat size too large (%d)", br, size) } dest <- blob.SizedRef{Ref: br, Size: uint32(size)} return nil }) } return grp.Err() }
func (up *Uploader) Close() error { var grp syncutil.Group if up.haveCache != nil { grp.Go(up.haveCache.Close) } grp.Go(up.Client.Close) return grp.Err() }
func projections(c context.Context, u User, days int) (int64, int64, error) { var projected, earned int64 g := syncutil.Group{} g.Go(func() error { q := datastore.NewQuery("Task"). Filter("Disabled = ", false). Filter("Assignee = ", u.Email) for t := q.Run(c); ; { var x Task _, err := t.Next(&x) if err == datastore.Done { return nil } else if err != nil { return err } log.Debugf(c, "Item worth %v every %v", x.Value, x.Period) projected += int64(float64(x.Value) * (float64(days) / float64(x.Period))) } }) g.Go(func() error { q := datastore.NewQuery("LoggedTask"). Filter("User = "******"Completed >=", time.Now().Add(-24*time.Hour*time.Duration(days))) for t := q.Run(c); ; { var x LoggedTask _, err := t.Next(&x) if err == datastore.Done { return nil } else if err != nil { return err } log.Debugf(c, "Logged task worth %v", x.Amount) earned += int64(x.Amount) } }) g.Wait() return projected, earned, g.Err() }
// projectHasInstance checks for all the possible zones if there's already an instance for the project. // It returns the name of the zone at the first instance it finds, if any. func (d *Deployer) projectHasInstance() (zone string, err error) { s, err := compute.New(d.Client) if err != nil { return "", err } // TODO(mpl): make use of the handler's cached zones. zl, err := compute.NewZonesService(s).List(d.Conf.Project).Do() if err != nil { return "", fmt.Errorf("could not get a list of zones: %v", err) } computeService, _ := compute.New(d.Client) var zoneOnce sync.Once var grp syncutil.Group errc := make(chan error, 1) zonec := make(chan string, 1) timeout := time.NewTimer(30 * time.Second) defer timeout.Stop() for _, z := range zl.Items { z := z grp.Go(func() error { list, err := computeService.Instances.List(d.Conf.Project, z.Name).Do() if err != nil { return fmt.Errorf("could not list existing instances: %v", err) } if len(list.Items) > 0 { zoneOnce.Do(func() { zonec <- z.Name }) } return nil }) } go func() { errc <- grp.Err() }() // We block until either an instance was found in a zone, or all the instance // listing is done. Or we timed-out. select { case err = <-errc: return "", err case zone = <-zonec: // We voluntarily ignore any listing error if we found at least one instance // because that's what we primarily want to report about. return zone, nil case <-timeout.C: return "", errors.New("timed out") } }
func (s *Storage) RemoveBlobs(blobs []blob.Ref) error { if s.cache != nil { s.cache.RemoveBlobs(blobs) } gate := syncutil.NewGate(50) // arbitrary var grp syncutil.Group for i := range blobs { gate.Start() br := blobs[i] grp.Go(func() error { defer gate.Done() return s.client.DeleteObject(&googlestorage.Object{Bucket: s.bucket, Key: s.dirPrefix + br.String()}) }) } return grp.Err() }
func (sto *s3Storage) RemoveBlobs(blobs []blob.Ref) error { if sto.cache != nil { sto.cache.RemoveBlobs(blobs) } var wg syncutil.Group for _, blob := range blobs { blob := blob removeGate.Start() wg.Go(func() error { defer removeGate.Done() return sto.s3Client.Delete(sto.bucket, sto.dirPrefix+blob.String()) }) } return wg.Err() }
// SetAttrs2 sets multiple attributes and returns whether there were // any changes. The provided keyval should be an even number of // alternating key/value pairs to set. func (o *Object) SetAttrs2(keyval ...string) (changes bool, err error) { if len(keyval)%2 == 1 { panic("importer.SetAttrs: odd argument count") } g := syncutil.Group{} for i := 0; i < len(keyval); i += 2 { key, val := keyval[i], keyval[i+1] if val != o.Attr(key) { changes = true g.Go(func() error { return o.SetAttr(key, val) }) } } return changes, g.Err() }
func (m *mongoStorage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error { var wg syncutil.Group for _, b := range blobs { b := b statGate.Start() wg.Go(func() error { defer statGate.Done() var doc blobDoc if err := m.c.Find(bson.M{"key": b.String()}).Select(bson.M{"size": 1}).One(&doc); err != nil { return fmt.Errorf("error statting %v: %v", b, err) } dest <- blob.SizedRef{Ref: b, Size: doc.Size} return nil }) } return wg.Err() }
func (m *mongoStorage) RemoveBlobs(blobs []blob.Ref) error { var wg syncutil.Group for _, blob := range blobs { blob := blob removeGate.Start() wg.Go(func() error { defer removeGate.Done() err := m.c.Remove(bson.M{"key": blob.String()}) if err == mgo.ErrNotFound { return nil } return err }) } return wg.Err() }
func (n *rootsDir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { if n.isRO() { return nil, fuse.EPERM } name := req.Name // Create a Permanode for the root. pr, err := n.fs.client.UploadNewPermanode() if err != nil { log.Printf("rootsDir.Create(%q): %v", name, err) return nil, fuse.EIO } var grp syncutil.Group // Add a camliRoot attribute to the root permanode. grp.Go(func() (err error) { claim := schema.NewSetAttributeClaim(pr.BlobRef, "camliRoot", name) _, err = n.fs.client.UploadAndSignBlob(claim) return }) // Set the title of the root permanode to the root name. grp.Go(func() (err error) { claim := schema.NewSetAttributeClaim(pr.BlobRef, "title", name) _, err = n.fs.client.UploadAndSignBlob(claim) return }) if err := grp.Err(); err != nil { log.Printf("rootsDir.Create(%q): %v", name, err) return nil, fuse.EIO } nod := &mutDir{ fs: n.fs, permanode: pr.BlobRef, name: name, xattrs: map[string][]byte{}, } n.mu.Lock() n.m[name] = pr.BlobRef n.mu.Unlock() return nod, nil }
func (s *storage) RemoveBlobs(blobs []blob.Ref) error { // Plan: // -- delete from small (if it's there) // -- if in big, update the meta index to note that it's there, but deleted. // -- fetch big's zip file (constructed from a ReaderAt that is all dummy zeros + // the zip's TOC only, relying on big being a SubFetcher, and keeping info in // the meta about the offset of the TOC+total size of each big's zip) // -- iterate over the zip's blobs (at some point). If all are marked deleted, actually RemoveBlob // on big to delete the full zip and then delete all the meta rows. var ( mu sync.Mutex unpacked []blob.Ref packed []blob.Ref large = map[blob.Ref]bool{} // the large blobs that packed are in ) var grp syncutil.Group delGate := syncutil.NewGate(removeLookups) for _, br := range blobs { br := br delGate.Start() grp.Go(func() error { defer delGate.Done() m, err := s.getMetaRow(br) if err != nil { return err } mu.Lock() defer mu.Unlock() if m.isPacked() { packed = append(packed, br) large[m.largeRef] = true } else { unpacked = append(unpacked, br) } return nil }) } if err := grp.Err(); err != nil { return err } if len(unpacked) > 0 { grp.Go(func() error { return s.small.RemoveBlobs(unpacked) }) } if len(packed) > 0 { grp.Go(func() error { bm := s.meta.BeginBatch() now := time.Now() for zipRef := range large { bm.Set("d:"+zipRef.String(), fmt.Sprint(now.Unix())) } for _, br := range packed { bm.Delete("b:" + br.String()) } return s.meta.CommitBatch(bm) }) } return grp.Err() }
func (s *storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) (err error) { var wg syncutil.Group for _, br := range blobs { br := br statGate.Start() wg.Go(func() error { defer statGate.Done() m, err := s.meta(br) if err == nil { dest <- m.SizedRef(br) return nil } if err == os.ErrNotExist { return nil } return err }) } return wg.Err() }
func (s *storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error { if len(blobs) == 0 { return nil } var ( grp syncutil.Group trySmallMu sync.Mutex trySmall []blob.Ref ) statGate := syncutil.NewGate(50) // arbitrary for _, br := range blobs { br := br statGate.Start() grp.Go(func() error { defer statGate.Done() m, err := s.getMetaRow(br) if err != nil { return err } if m.exists { dest <- blob.SizedRef{Ref: br, Size: m.size} } else { trySmallMu.Lock() trySmall = append(trySmall, br) // Assume append cannot fail or panic trySmallMu.Unlock() } return nil }) } if err := grp.Err(); err != nil { return err } if len(trySmall) == 0 { return nil } return s.small.StatBlobs(dest, trySmall) }
func (s *Storage) RemoveBlobs(blobs []blob.Ref) error { if s.cache != nil { s.cache.RemoveBlobs(blobs) } // TODO(mpl): use context from caller, once one is available (issue 733) ctx := context.TODO() gate := syncutil.NewGate(50) // arbitrary var grp syncutil.Group for i := range blobs { gate.Start() br := blobs[i] grp.Go(func() error { defer gate.Done() err := s.client.Bucket(s.bucket).Object(s.dirPrefix + br.String()).Delete(ctx) if err == storage.ErrObjectNotExist { return nil } return err }) } return grp.Err() }
// RemoveBlobs removes the blobs from index and pads data with zero bytes func (s *storage) RemoveBlobs(blobs []blob.Ref) error { batch := s.index.BeginBatch() var wg syncutil.Group for _, br := range blobs { br := br removeGate.Start() batch.Delete(br.String()) wg.Go(func() error { defer removeGate.Done() if err := s.delete(br); err != nil && err != os.ErrNotExist { return err } return nil }) } err1 := wg.Err() err2 := s.index.CommitBatch(batch) if err1 != nil { return err1 } return err2 }
func (s *storage) zipPartsInUse(br blob.Ref) ([]blob.Ref, error) { var ( mu sync.Mutex inUse []blob.Ref ) var grp syncutil.Group gate := syncutil.NewGate(20) // arbitrary constant err := s.foreachZipBlob(br, func(bap BlobAndPos) error { gate.Start() grp.Go(func() error { defer gate.Done() mr, err := s.getMetaRow(bap.Ref) if err != nil { return err } if mr.isPacked() { mu.Lock() inUse = append(inUse, mr.largeRef) mu.Unlock() } return nil }) return nil }) if os.IsNotExist(err) { // An already-deleted blob from large isn't considered // to be in-use. return nil, nil } if err != nil { return nil, err } if err := grp.Err(); err != nil { return nil, err } return inUse, nil }
func (r *run) importTweetsFromZip(userID string, zr *zip.Reader) error { log.Printf("Processing zip file with %d files", len(zr.File)) tweetsNode, err := r.getTopLevelNode("tweets") if err != nil { return err } var ( gate = syncutil.NewGate(tweetsAtOnce) grp syncutil.Group ) total := 0 for _, zf := range zr.File { if !(strings.HasPrefix(zf.Name, "data/js/tweets/2") && strings.HasSuffix(zf.Name, ".js")) { continue } tweets, err := tweetsFromZipFile(zf) if err != nil { return fmt.Errorf("error reading tweets from %s: %v", zf.Name, err) } for i := range tweets { total++ tweet := tweets[i] gate.Start() grp.Go(func() error { defer gate.Done() _, err := r.importTweet(tweetsNode, tweet, false) return err }) } } err = grp.Err() log.Printf("zip import of tweets: %d total, err = %v", total, err) return err }
func adminListUsers(w http.ResponseWriter, r *http.Request) { c := appengine.NewContext(r) q := datastore.NewQuery("User").Order("Name") results := []User{} fillKeyQuery(c, q, &results) g := syncutil.Group{} for i := range results { i := i g.Go(func() error { var err error results[i].Projected, results[i].Earned, err = projections(c, results[i], 90) return err }) } if err := g.Err(); err != nil { log.Errorf(c, "Error getting projections: %v", err) } mustEncode(c, w, results) }
func (c *Corpus) scanFromStorage(s sorted.KeyValue) error { c.building = true var ms0 *runtime.MemStats if logCorpusStats { ms0 = memstats() log.Printf("Slurping corpus to memory from index...") log.Printf("Slurping corpus to memory from index... (1/%d: meta rows)", len(slurpPrefixes)) } scanmu := new(sync.Mutex) // We do the "meta" rows first, before the prefixes below, because it // populates the blobs map (used for blobref interning) and the camBlobs // map (used for hinting the size of other maps) if err := c.scanPrefix(scanmu, s, "meta:"); err != nil { return err } c.files = make(map[blob.Ref]camtypes.FileInfo, len(c.camBlobs["file"])) c.permanodes = make(map[blob.Ref]*PermanodeMeta, len(c.camBlobs["permanode"])) cpu0 := osutil.CPUUsage() var grp syncutil.Group for i, prefix := range slurpPrefixes[1:] { if logCorpusStats { log.Printf("Slurping corpus to memory from index... (%d/%d: prefix %q)", i+2, len(slurpPrefixes), prefix[:len(prefix)-1]) } prefix := prefix grp.Go(func() error { return c.scanPrefix(scanmu, s, prefix) }) } if err := grp.Err(); err != nil { return err } // Post-load optimizations and restoration of invariants. for _, pm := range c.permanodes { // Restore invariants violated during building: pm.restoreInvariants() // And intern some stuff. for _, cl := range pm.Claims { cl.BlobRef = c.br(cl.BlobRef) cl.Signer = c.br(cl.Signer) cl.Permanode = c.br(cl.Permanode) cl.Target = c.br(cl.Target) } } c.brOfStr = nil // drop this now. c.building = false // log.V(1).Printf("interned blob.Ref = %d", c.brInterns) if err := c.initDeletes(s); err != nil { return fmt.Errorf("Could not populate the corpus deletes: %v", err) } if logCorpusStats { cpu := osutil.CPUUsage() - cpu0 ms1 := memstats() memUsed := ms1.Alloc - ms0.Alloc if ms1.Alloc < ms0.Alloc { memUsed = 0 } log.Printf("Corpus stats: %.3f MiB mem: %d blobs (%.3f GiB) (%d schema (%d permanode, %d file (%d image), ...)", float64(memUsed)/(1<<20), len(c.blobs), float64(c.sumBlobBytes)/(1<<30), c.numSchemaBlobs(), len(c.permanodes), len(c.files), len(c.imageInfo)) log.Printf("Corpus scanning CPU usage: %v", cpu) } return nil }
// Collect performs a garbage collection. func (c *Collector) Collect(ctx context.Context) (err error) { if c.World == nil { return errors.New("no World") } if c.Marker == nil { return errors.New("no Marker") } if c.Roots == nil { return errors.New("no Roots") } if c.Sweeper == nil { return errors.New("no Sweeper") } if c.ItemEnumerator == nil { return errors.New("no ItemEnumerator") } if c.Deleter == nil { return errors.New("no Deleter") } if err := c.World.Stop(); err != nil { return err } defer func() { startErr := c.World.Start() if err == nil { err = startErr } }() // Mark. roots := make(chan Item, buffered) markCtx, cancelMark := context.WithCancel(ctx) var marker syncutil.Group marker.Go(func() error { defer cancelMark() for it := range roots { if err := c.markItem(markCtx, it, true); err != nil { return err } } return nil }) marker.Go(func() error { return c.Roots.Enumerate(markCtx, roots) }) if err := marker.Err(); err != nil { return fmt.Errorf("Mark failure: %v", err) } // Sweep. all := make(chan Item, buffered) sweepCtx, _ := context.WithCancel(ctx) var sweeper syncutil.Group sweeper.Go(func() error { return c.Sweeper.Enumerate(sweepCtx, all) }) sweeper.Go(func() error { defer sweepCtx.Done() for it := range all { ok, err := c.Marker.IsMarked(it) if err != nil { return err } if !ok { if err := c.Deleter.Delete(it); err != nil { return err } } } return nil }) if err := sweeper.Err(); err != nil { return fmt.Errorf("Sweep failure: %v", err) } return nil }
// setFirewall adds the firewall rules needed for ports 80 & 433 to the default network. func (d *Deployer) setFirewall(ctx context.Context, computeService *compute.Service) error { defaultNet, err := computeService.Networks.Get(d.Conf.Project, "default").Do() if err != nil { return fmt.Errorf("error getting default network: %v", err) } needRules := map[string]compute.Firewall{ "default-allow-http": compute.Firewall{ Name: "default-allow-http", SourceRanges: []string{"0.0.0.0/0"}, SourceTags: []string{"http-server"}, Allowed: []*compute.FirewallAllowed{{IPProtocol: "tcp", Ports: []string{"80"}}}, Network: defaultNet.SelfLink, }, "default-allow-https": compute.Firewall{ Name: "default-allow-https", SourceRanges: []string{"0.0.0.0/0"}, SourceTags: []string{"https-server"}, Allowed: []*compute.FirewallAllowed{{IPProtocol: "tcp", Ports: []string{"443"}}}, Network: defaultNet.SelfLink, }, } rules, err := computeService.Firewalls.List(d.Conf.Project).Do() if err != nil { return fmt.Errorf("error listing rules: %v", err) } for _, it := range rules.Items { delete(needRules, it.Name) } if len(needRules) == 0 { return nil } if Verbose { d.Printf("Need to create rules: %v", needRules) } var wg syncutil.Group for name, rule := range needRules { select { case <-ctx.Done(): return ctx.Err() default: } name, rule := name, rule wg.Go(func() error { if Verbose { d.Printf("Creating rule %s", name) } r, err := computeService.Firewalls.Insert(d.Conf.Project, &rule).Do() if err != nil { return fmt.Errorf("error creating rule %s: %v", name, err) } if Verbose { d.Printf("Created rule %s: %+v", name, r) } return nil }) } return wg.Err() }
// requires n.mu is held func (n *rootsDir) condRefresh(ctx context.Context) error { if n.lastQuery.After(time.Now().Add(-refreshTime)) { return nil } log.Printf("fs.roots: querying") var rootRes, impRes *search.WithAttrResponse var grp syncutil.Group grp.Go(func() (err error) { rootRes, err = n.fs.client.GetPermanodesWithAttr(&search.WithAttrRequest{N: 100, Attr: "camliRoot"}) return }) grp.Go(func() (err error) { impRes, err = n.fs.client.GetPermanodesWithAttr(&search.WithAttrRequest{N: 100, Attr: "camliImportRoot"}) return }) if err := grp.Err(); err != nil { log.Printf("fs.roots: error refreshing permanodes: %v", err) return fuse.EIO } n.m = make(map[string]blob.Ref) if n.children == nil { n.children = make(map[string]fs.Node) } dr := &search.DescribeRequest{ Depth: 1, } for _, wi := range rootRes.WithAttr { dr.BlobRefs = append(dr.BlobRefs, wi.Permanode) } for _, wi := range impRes.WithAttr { dr.BlobRefs = append(dr.BlobRefs, wi.Permanode) } if len(dr.BlobRefs) == 0 { return nil } dres, err := n.fs.client.Describe(ctx, dr) if err != nil { log.Printf("Describe failure: %v", err) return fuse.EIO } // Roots currentRoots := map[string]bool{} for _, wi := range rootRes.WithAttr { pn := wi.Permanode db := dres.Meta[pn.String()] if db != nil && db.Permanode != nil { name := db.Permanode.Attr.Get("camliRoot") if name != "" { currentRoots[name] = true n.m[name] = pn } } } // Remove any children objects we have mapped that are no // longer relevant. for name := range n.children { if !currentRoots[name] { delete(n.children, name) } } // Importers (mapped as roots for now) for _, wi := range impRes.WithAttr { pn := wi.Permanode db := dres.Meta[pn.String()] if db != nil && db.Permanode != nil { name := db.Permanode.Attr.Get("camliImportRoot") if name != "" { name = strings.Replace(name, ":", "-", -1) name = strings.Replace(name, "/", "-", -1) n.m["importer-"+name] = pn } } } n.lastQuery = time.Now() return nil }