func (c *Client) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref, wait time.Duration) error { var needStat []blob.Ref for _, br := range blobs { if !br.Valid() { panic("invalid blob") } if size, ok := c.haveCache.StatBlobCache(br); ok { dest <- blob.SizedRef{br, size} } else { if needStat == nil { needStat = make([]blob.Ref, 0, len(blobs)) } needStat = append(needStat, br) } } if len(needStat) == 0 { return nil } if wait > 0 { // No batching on wait requests. return c.doStat(dest, needStat, wait, true) } // Here begins all the batching logic. In a SPDY world, this // will all be somewhat useless, so consider detecting SPDY on // the underlying connection and just always calling doStat // instead. The one thing this code below is also cut up // >1000 stats into smaller batches. But with SPDY we could // even just do lots of little 1-at-a-time stats. var errcs []chan error // one per blob to stat c.pendStatMu.Lock() { if c.pendStat == nil { c.pendStat = make(map[string][]statReq) } for _, blob := range needStat { errc := make(chan error, 1) errcs = append(errcs, errc) brStr := blob.String() c.pendStat[brStr] = append(c.pendStat[brStr], statReq{blob, dest, errc}) } } c.pendStatMu.Unlock() // Kick off at least one worker. It may do nothing and lose // the race, but somebody will handle our requests in // pendStat. go c.doSomeStats() for _, errc := range errcs { if err := <-errc; err != nil { return err } } return nil }
func (sto *driveStorage) RemoveBlobs(blobs []blob.Ref) error { var reterr error for _, blob := range blobs { if err := sto.service.Trash(blob.String()); err != nil { reterr = err } } return reterr }
func (sto *s3Storage) RemoveBlobs(blobs []blob.Ref) error { // TODO: do these in parallel var reterr error for _, blob := range blobs { if err := sto.s3Client.Delete(sto.bucket, blob.String()); err != nil { reterr = err } } return reterr }
func (sto *s3Storage) RemoveBlobs(blobs []blob.Ref) error { var wg syncutil.Group for _, blob := range blobs { blob := blob removeGate.Start() wg.Go(func() error { defer removeGate.Done() return sto.s3Client.Delete(sto.bucket, blob.String()) }) } return wg.Err() }
func (m *mongoStorage) RemoveBlobs(blobs []blob.Ref) error { var wg syncutil.Group for _, blob := range blobs { blob := blob removeGate.Start() wg.Go(func() error { defer removeGate.Done() err := m.c.Remove(bson.M{"key": blob.String()}) if err == mgo.ErrNotFound { return nil } return err }) } return wg.Err() }
func (h *Host) startPeriodicImporters() { res, err := h.search.Query(&search.SearchQuery{ Expression: "attr:camliNodeType:importerAccount", Describe: &search.DescribeRequest{ Depth: 1, }, }) if err != nil { log.Printf("periodic importer search fail: %v", err) return } if res.Describe == nil { log.Printf("No describe response in search result") return } for _, resBlob := range res.Blobs { blob := resBlob.Blob desBlob, ok := res.Describe.Meta[blob.String()] if !ok || desBlob.Permanode == nil { continue } attrs := desBlob.Permanode.Attr if attrs.Get(attrNodeType) != nodeTypeImporterAccount { panic("Search result returned non-importerAccount") } impType := attrs.Get("importerType") imp, ok := h.imp[impType] if !ok { continue } ia, err := imp.account(blob) if err != nil { log.Printf("Can't load importer account %v for regular importing: %v", blob, err) continue } go ia.maybeStart() } }