func (sto *appengineStorage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) loan := ctxPool.Get() defer loan.Return() actx := loan prefix := sto.namespace + "|" keyBegin := datastore.NewKey(actx, memKind, prefix+after, 0, nil) keyEnd := datastore.NewKey(actx, memKind, sto.namespace+"~", 0, nil) q := datastore.NewQuery(memKind).Limit(int(limit)).Filter("__key__>", keyBegin).Filter("__key__<", keyEnd) it := q.Run(actx) var row memEnt for { key, err := it.Next(&row) if err == datastore.Done { break } if err != nil { return err } select { case dest <- blob.SizedRef{blob.ParseOrZero(key.StringID()[len(prefix):]), row.Size}: case <-ctx.Done(): return context.ErrCanceled } } return nil }
func (ns *nsto) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) done := ctx.Done() it := ns.inventory.Find(after, "") first := true for limit > 0 && it.Next() { if first { first = false if after != "" && it.Key() == after { continue } } br, ok := blob.ParseBytes(it.KeyBytes()) size, err := strutil.ParseUintBytes(it.ValueBytes(), 10, 32) if !ok || err != nil { log.Printf("Bogus namespace key %q / value %q", it.Key(), it.Value()) continue } select { case dest <- blob.SizedRef{br, uint32(size)}: case <-done: return context.ErrCanceled } limit-- } if err := it.Close(); err != nil { return err } return nil }
// EnumeratePermanodesLastModified sends all permanodes, sorted by most recently modified first, to ch, // or until ctx is done. // // The Corpus must already be locked with RLock. func (c *Corpus) EnumeratePermanodesLastModifiedLocked(ctx *context.Context, ch chan<- camtypes.BlobMeta) error { defer close(ch) // TODO: keep these sorted in memory pns := make([]pnAndTime, 0, len(c.permanodes)) for pn := range c.permanodes { if modt, ok := c.PermanodeModtimeLocked(pn); ok { pns = append(pns, pnAndTime{pn, modt}) } } sort.Sort(sort.Reverse(byPermanodeModtime(pns))) for _, cand := range pns { bm := c.blobs[cand.pn] if bm == nil { continue } select { case ch <- *bm: continue case <-ctx.Done(): return context.ErrCanceled } } return nil }
// ctx will be canceled on failure func (c *Collector) markItem(ctx *context.Context, it Item, isRoot bool) error { if !isRoot { marked, err := c.Marker.IsMarked(it) if err != nil { return err } if marked { return nil } } if err := c.Marker.Mark(it); err != nil { return err } ch := make(chan Item, buffered) var grp syncutil.Group grp.Go(func() error { return c.ItemEnumerator.EnumerateItem(ctx, it, ch) }) grp.Go(func() error { for it := range ch { if err := c.markItem(ctx, it, false); err != nil { return err } } return nil }) if err := grp.Err(); err != nil { ctx.Cancel() return err } return nil }
func (sto *s3Storage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) (err error) { defer close(dest) if faultEnumerate.FailErr(&err) { return } startAt := after if _, ok := blob.Parse(after); ok { startAt = nextStr(after) } objs, err := sto.s3Client.ListBucket(sto.bucket, startAt, limit) if err != nil { log.Printf("s3 ListBucket: %v", err) return err } for _, obj := range objs { if obj.Key == after { continue } br, ok := blob.Parse(obj.Key) if !ok { continue } select { case dest <- blob.SizedRef{Ref: br, Size: uint32(obj.Size)}: case <-ctx.Done(): return context.ErrCanceled } } return nil }
func (s *storage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) (err error) { defer close(dest) t := s.index.Find(after, "") defer func() { closeErr := t.Close() if err == nil { err = closeErr } }() for i := 0; i < limit && t.Next(); { key := t.Key() if key <= after { // EnumerateBlobs' semantics are '>', but sorted.KeyValue.Find is '>='. continue } br, ok := blob.Parse(key) if !ok { return fmt.Errorf("diskpacked: couldn't parse index key %q", key) } m, ok := parseBlobMeta(t.Value()) if !ok { return fmt.Errorf("diskpacked: couldn't parse index value %q: %q", key, t.Value()) } select { case dest <- m.SizedRef(br): case <-ctx.Done(): return context.ErrCanceled } i++ } return nil }
func (ix *Index) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) (err error) { defer close(dest) it := ix.s.Find("have:"+after, "have~") defer func() { closeErr := it.Close() if err == nil { err = closeErr } }() n := int(0) for n < limit && it.Next() { k := it.Key() if k <= after { continue } if !strings.HasPrefix(k, "have:") { break } n++ br, ok := blob.Parse(k[len("have:"):]) size, err := strconv.ParseUint(it.Value(), 10, 32) if ok && err == nil { select { case dest <- blob.SizedRef{br, int64(size)}: case <-ctx.Done(): return context.ErrCanceled } } } return nil }
func (s *Storage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) s.mu.RLock() defer s.mu.RUnlock() // TODO(bradfitz): care about keeping this sorted like we used // to? I think it was more expensive than it was worth before, // since maintaining it was more costly than how often it was // used. But perhaps it'd make sense to maintain it lazily: // construct it on EnumerateBlobs but invalidate it everywhere // else. Probably doesn't matter much. sorted := make([]blob.Ref, 0, len(s.m)) for br := range s.m { sorted = append(sorted, br) } sort.Sort(blob.ByRef(sorted)) n := 0 for _, br := range sorted { if after != "" && br.String() <= after { continue } select { case dest <- blob.SizedRef{br, uint32(len(s.m[br]))}: case <-ctx.Done(): return context.ErrCanceled } n++ if limit > 0 && n == limit { break } } return nil }
func (m *mongoStorage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) var b blobDoc var qry bson.M if after != "" { qry = bson.M{"key": bson.M{"$gt": after}} } iter := m.c.Find(qry).Limit(limit).Select(bson.M{"key": 1, "size": 1}).Sort("key").Iter() for iter.Next(&b) { br, ok := blob.Parse(b.Key) if !ok { continue } select { case dest <- blob.SizedRef{Ref: br, Size: uint32(b.Size)}: case <-ctx.Done(): // Close the iterator but ignore the error value since we are already cancelling if err := iter.Close(); err != nil { log.Printf("Error closing iterator after enumerating: %v", err) } return context.ErrCanceled } } if err := iter.Close(); err != nil { return err } return nil }
// Lookup returns rectangles for the given address. Currently the only // implementation is the Google geocoding service. func Lookup(ctx *context.Context, address string) ([]Rect, error) { mu.RLock() rects, ok := cache[address] mu.RUnlock() if ok { return rects, nil } rectsi, err := sf.Do(address, func() (interface{}, error) { // TODO: static data files from OpenStreetMap, Wikipedia, etc? urlStr := "https://maps.googleapis.com/maps/api/geocode/json?address=" + url.QueryEscape(address) + "&sensor=false" res, err := ctx.HTTPClient().Get(urlStr) if err != nil { return nil, err } defer httputil.CloseBody(res.Body) rects, err := decodeGoogleResponse(res.Body) log.Printf("Google geocode lookup (%q) = %#v, %v", address, rects, err) if err == nil { mu.Lock() cache[address] = rects mu.Unlock() } return rects, err }) if err != nil { return nil, err } return rectsi.([]Rect), nil }
func (s *Storage) StreamBlobs(ctx *context.Context, dest chan<- blobserver.BlobAndToken, contToken string) error { // for this impl, contToken is >= blobref.String() defer close(dest) s.mu.RLock() defer s.mu.RUnlock() sorted := make([]blob.Ref, 0, len(s.m)) for br := range s.m { sorted = append(sorted, br) } sort.Sort(blob.ByRef(sorted)) for _, br := range sorted { if br.String() < contToken { continue } select { case <-ctx.Done(): return context.ErrCanceled case dest <- blobserver.BlobAndToken{ Blob: blob.NewBlob(br, uint32(len(s.m[br])), func() types.ReadSeekCloser { return blob.NewLazyReadSeekCloser(s, br) }), Token: br.String(), }: } } return nil }
func (s *storage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) iter := s.index.Find(after, "") n := 0 for iter.Next() { if iter.Key() == after { continue } br, ok := blob.Parse(iter.Key()) if !ok { panic("Bogus encrypt index key: " + iter.Key()) } plainSize, ok := parseMetaValuePlainSize(iter.Value()) if !ok { panic("Bogus encrypt index value: " + iter.Value()) } select { case dest <- blob.SizedRef{br, plainSize}: case <-ctx.Done(): return context.ErrCanceled } n++ if limit != 0 && n >= limit { break } } return iter.Close() }
// setFirewall adds the firewall rules needed for ports 80 & 433 to the default network. func (d *Deployer) setFirewall(ctx *context.Context, computeService *compute.Service) error { defaultNet, err := computeService.Networks.Get(d.Conf.Project, "default").Do() if err != nil { return fmt.Errorf("error getting default network: %v", err) } needRules := map[string]compute.Firewall{ "default-allow-http": compute.Firewall{ Name: "default-allow-http", SourceRanges: []string{"0.0.0.0/0"}, SourceTags: []string{"http-server"}, Allowed: []*compute.FirewallAllowed{{"tcp", []string{"80"}}}, Network: defaultNet.SelfLink, }, "default-allow-https": compute.Firewall{ Name: "default-allow-https", SourceRanges: []string{"0.0.0.0/0"}, SourceTags: []string{"https-server"}, Allowed: []*compute.FirewallAllowed{{"tcp", []string{"443"}}}, Network: defaultNet.SelfLink, }, } rules, err := computeService.Firewalls.List(d.Conf.Project).Do() if err != nil { return fmt.Errorf("error listing rules: %v", err) } for _, it := range rules.Items { delete(needRules, it.Name) } if len(needRules) == 0 { return nil } if Verbose { log.Printf("Need to create rules: %v", needRules) } var wg syncutil.Group for name, rule := range needRules { if ctx.IsCanceled() { return context.ErrCanceled } name, rule := name, rule wg.Go(func() error { if Verbose { log.Printf("Creating rule %s", name) } r, err := computeService.Firewalls.Insert(d.Conf.Project, &rule).Do() if err != nil { return fmt.Errorf("error creating rule %s: %v", name, err) } if Verbose { log.Printf("Created rule %s: %+v", name, r) } return nil }) } return wg.Err() }
// contToken is of forms: // "" : start from beginning of zip files // "sha1-xxxxx:n" : start at == (sha1-xxxx, file n), else next zip func (st largeBlobStreamer) StreamBlobs(ctx *context.Context, dest chan<- blobserver.BlobAndToken, contToken string) (err error) { defer close(dest) s := st.sto large := s.large var after string // for enumerateAll var skipFiles int var firstRef blob.Ref // first we care about if contToken != "" { f := strings.SplitN(contToken, ":", 2) if len(f) != 2 { return errContToken } firstRef, _ = blob.Parse(f[0]) skipFiles, err = strconv.Atoi(f[1]) if !firstRef.Valid() || err != nil { return errContToken } // EnumerateAllFrom takes a cursor that's greater, but // we want to start _at_ firstRef. So start // enumerating right before our target. after = firstRef.StringMinusOne() } return blobserver.EnumerateAllFrom(ctx, large, after, func(sb blob.SizedRef) error { if firstRef.Valid() { if sb.Ref.Less(firstRef) { // Skip. return nil } if firstRef.Less(sb.Ref) { skipFiles = 0 // reset it. } } fileN := 0 return s.foreachZipBlob(sb.Ref, func(bap BlobAndPos) error { if skipFiles > 0 { skipFiles-- fileN++ return nil } select { case dest <- blobserver.BlobAndToken{ Blob: blob.NewBlob(bap.Ref, bap.Size, func() types.ReadSeekCloser { return blob.NewLazyReadSeekCloser(s, bap.Ref) }), Token: fmt.Sprintf("%s:%d", sb.Ref, fileN), }: fileN++ return nil case <-ctx.Done(): return context.ErrCanceled } }) }) }
// doDest is false for source and true for dest. func (sh *SyncHandler) startValidatePrefix(ctx *context.Context, pfx string, doDest bool) (<-chan blob.SizedRef, <-chan error) { var e blobserver.BlobEnumerator if doDest { e = sh.to } else { e = sh.from } c := make(chan blob.SizedRef, 64) errc := make(chan error, 1) go func() { defer close(c) var last string // last blobref seen; to double check storage's enumeration works correctly. err := blobserver.EnumerateAllFrom(ctx, e, pfx, func(sb blob.SizedRef) error { // Just double-check that the storage target is returning sorted results correctly. brStr := sb.Ref.String() if brStr < pfx { log.Fatalf("Storage target %T enumerate not behaving: %q < requested prefix %q", e, brStr, pfx) } if last != "" && last >= brStr { log.Fatalf("Storage target %T enumerate not behaving: previous %q >= current %q", e, last, brStr) } last = brStr // TODO: could add a more efficient method on blob.Ref to do this, // that doesn't involve call String(). if !strings.HasPrefix(brStr, pfx) { return errNotPrefix } select { case c <- sb: sh.mu.Lock() if doDest { sh.vdestCount++ sh.vdestBytes += int64(sb.Size) } else { sh.vsrcCount++ sh.vsrcBytes += int64(sb.Size) } sh.mu.Unlock() return nil case <-ctx.Done(): return context.ErrCanceled } }) if err == errNotPrefix { err = nil } if err != nil { // Send a zero value to shut down ListMissingDestinationBlobs. c <- blob.SizedRef{} } errc <- err }() return c, errc }
func (s testEnum) Enumerate(ctx *context.Context, dest chan<- Item) error { defer close(dest) for _, v := range s { select { case dest <- v: case <-ctx.Done(): return context.ErrCanceled } } return nil }
// EnumerateBlobMetaLocked sends all known blobs to ch, or until the context is canceled. // // The Corpus must already be locked with RLock. func (c *Corpus) EnumerateBlobMetaLocked(ctx *context.Context, ch chan<- camtypes.BlobMeta) error { defer close(ch) for _, bm := range c.blobs { select { case ch <- *bm: case <-ctx.Done(): return context.ErrCanceled } } return nil }
func (m testItemEnum) EnumerateItem(ctx *context.Context, it Item, dest chan<- Item) error { defer close(dest) for _, v := range m[it.(string)] { select { case dest <- v: case <-ctx.Done(): return context.ErrCanceled } } return nil }
// setBuckets defines the buckets needed by the instance and creates them. func (d *Deployer) setBuckets(storageService *storage.Service, ctx *context.Context) error { projBucket := d.Conf.Project + "-camlistore" needBucket := map[string]bool{ projBucket: true, } buckets, err := storageService.Buckets.List(d.Conf.Project).Do() if err != nil { return fmt.Errorf("error listing buckets: %v", err) } for _, it := range buckets.Items { delete(needBucket, it.Name) } if len(needBucket) > 0 { if Verbose { log.Printf("Need to create buckets: %v", needBucket) } var waitBucket sync.WaitGroup var bucketErr error for name := range needBucket { if ctx.IsCanceled() { return context.ErrCanceled } name := name waitBucket.Add(1) go func() { defer waitBucket.Done() if Verbose { log.Printf("Creating bucket %s", name) } b, err := storageService.Buckets.Insert(d.Conf.Project, &storage.Bucket{ Id: name, Name: name, }).Do() if err != nil && bucketErr == nil { bucketErr = fmt.Errorf("error creating bucket %s: %v", name, err) return } if Verbose { log.Printf("Created bucket %s: %+v", name, b) } }() } waitBucket.Wait() if bucketErr != nil { return bucketErr } } d.Conf.configDir = path.Join(projBucket, configDir) d.Conf.blobDir = path.Join(projBucket, "blobs") return nil }
func (im *imp) doGet(ctx *context.Context, url string, form url.Values) (*http.Response, error) { creds := im.creds() if creds == nil { return nil, errors.New("No OAuth credentials. Not logged in?") } res, err := oauthClient.Get(ctx.HTTPClient(), creds, url, form) if err != nil { return nil, fmt.Errorf("Error fetching %s: %v", url, err) } if res.StatusCode != http.StatusOK { return nil, fmt.Errorf("Get request on %s failed with: %s", url, res.Status) } return res, nil }
func (ds *DiskStorage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) if limit == 0 { log.Printf("Warning: localdisk.EnumerateBlobs called with a limit of 0") } limitMutable := limit return ds.readBlobs(readBlobRequest{ done: ctx.Done(), ch: dest, dirRoot: ds.root, after: after, remain: &limitMutable, }) }
func doGet(ctx *context.Context, url string, form url.Values) (*http.Response, error) { requestURL := url + "?" + form.Encode() req, err := http.NewRequest("GET", requestURL, nil) if err != nil { return nil, err } res, err := ctx.HTTPClient().Do(req) if err != nil { log.Printf("Error fetching %s: %v", url, err) return nil, err } if res.StatusCode != http.StatusOK { return nil, fmt.Errorf("Get request on %s failed with: %s", requestURL, res.Status) } return res, nil }
func doGet(ctx *context.Context, url string) ([]byte, error) { req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } res, err := ctx.HTTPClient().Do(req) if err != nil { log.Printf("Error fetching %s: %v", url, err) return nil, err } defer httputil.CloseBody(res.Body) if res.StatusCode != http.StatusOK { return nil, fmt.Errorf("Get request on %s failed with: %s", url, res.Status) } return ioutil.ReadAll(io.LimitReader(res.Body, 8<<20)) }
// EnumerateCamliBlobsLocked sends just camlistore meta blobs to ch. // // The Corpus must already be locked with RLock. // // If camType is empty, all camlistore blobs are sent, otherwise it specifies // the camliType to send. // ch is closed at the end. The err will either be nil or context.ErrCanceled. func (c *Corpus) EnumerateCamliBlobsLocked(ctx *context.Context, camType string, ch chan<- camtypes.BlobMeta) error { defer close(ch) for t, m := range c.camBlobs { if camType != "" && camType != t { continue } for _, bm := range m { select { case ch <- *bm: case <-ctx.Done(): return context.ErrCanceled } } } return nil }
// EnumerateBlobMeta sends all metadata about all known blobs to ch and then closes ch. func (x *Index) EnumerateBlobMeta(ctx *context.Context, ch chan<- camtypes.BlobMeta) (err error) { if x.corpus != nil { x.corpus.RLock() defer x.corpus.RUnlock() return x.corpus.EnumerateBlobMetaLocked(ctx, ch) } defer close(ch) return enumerateBlobMeta(x.s, func(bm camtypes.BlobMeta) error { select { case ch <- bm: case <-ctx.Done(): return context.ErrCanceled } return nil }) }
func enumerateAllBlobs(ctx *context.Context, s blobserver.Storage, destc chan<- blob.SizedRef) error { // Use *client.Client's support for enumerating all blobs if // possible, since it could probably do a better job knowing // HTTP boundaries and such. if c, ok := s.(*client.Client); ok { return c.SimpleEnumerateBlobs(ctx, destc) } defer close(destc) return blobserver.EnumerateAll(ctx, s, func(sb blob.SizedRef) error { select { case destc <- sb: case <-ctx.Done(): return context.ErrCanceled } return nil }) }
func (im *imp) importTweets(ctx *context.Context) error { maxId := "" continueRequests := true for continueRequests { if ctx.IsCanceled() { log.Printf("Twitter importer: interrupted") return context.ErrCanceled } var resp []*tweetItem if err := im.doAPI(&resp, "statuses/user_timeline.json", "count", strconv.Itoa(tweetRequestLimit), "max_id", maxId); err != nil { return err } tweetsNode, err := im.getTopLevelNode("tweets", "Tweets") if err != nil { return err } itemcount := len(resp) log.Printf("Twitter importer: Importing %d tweets", itemcount) if itemcount < tweetRequestLimit { continueRequests = false } else { lastTweet := resp[len(resp)-1] maxId = lastTweet.Id } for _, tweet := range resp { if ctx.IsCanceled() { log.Printf("Twitter importer: interrupted") return context.ErrCanceled } err = im.importTweet(tweetsNode, tweet) if err != nil { log.Printf("Twitter importer: error importing tweet %s %v", tweet.Id, err) continue } } } return nil }
func (gs *Storage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) objs, err := gs.client.EnumerateObjects(gs.bucket, after, limit) if err != nil { log.Printf("gstorage EnumerateObjects: %v", err) return err } for _, obj := range objs { br, ok := blob.Parse(obj.Key) if !ok { continue } select { case dest <- blob.SizedRef{Ref: br, Size: uint32(obj.Size)}: case <-ctx.Done(): return context.ErrCanceled } } return nil }
func (sto *s3Storage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) objs, err := sto.s3Client.ListBucket(sto.bucket, after, limit) if err != nil { log.Printf("s3 ListBucket: %v", err) return err } for _, obj := range objs { br, ok := blob.Parse(obj.Key) if !ok { continue } select { case dest <- blob.SizedRef{Ref: br, Size: obj.Size}: case <-ctx.Done(): return context.ErrCanceled } } return nil }
func (sto *swiftStorage) EnumerateBlobs(ctx *context.Context, dest chan<- blob.SizedRef, after string, limit int) error { defer close(dest) objs, err := sto.client.ObjectsAll(sto.container, nil) if err != nil { log.Printf("swift ObjectsAll: %v", err) return err } for _, obj := range objs { br, ok := blob.Parse(obj.Name) if !ok { continue } select { case dest <- blob.SizedRef{Ref: br, Size: uint32(obj.Bytes)}: case <-ctx.Done(): return context.ErrCanceled } } return nil }