func sendFileBlobs(c *blobserv.Client, part *multipart.Part) (respMeta map[string]interface{}) { meta := blob.NewMeta() defer func() { respMeta = map[string]interface{}{} respMeta["name"] = meta.Name respMeta["size"] = meta.Size if r := recover(); r != nil { respMeta["error"] = r.(error).Error() } }() obj := blob.NewObject() meta.RcasObjectRef = obj.Ref() meta.Name = part.FileName() data, err := ioutil.ReadAll(part) util.Check(err) meta.Size = int64(len(data)) blobs := blob.SplitRaw(data, blob.DefaultChunkSize) meta.ContentRefs = blob.RefsFor(blobs) m, err := blob.Marshal(meta) util.Check(err) blobs = append(blobs, m, obj) for _, b := range blobs { err = c.PutBlob(b) util.Check(err) } return respMeta }
func getMatches() []string { q := query.New() ft := q.NewFilter(filtFn) q.SetRoots(ft) q.Open() defer q.Close() batchN := 1000 timeout := time.After(10 * time.Second) for skip, done := 0, false; !done; skip += batchN { blobs, err := cl.BlobsBackward(time.Now(), batchN, skip) if len(blobs) > 0 { q.Process(blobs...) } if *max > 0 && len(q.Results) == *max { break } if err != nil { break } select { case <-timeout: done = true default: } } if *max > 0 { q.Results = q.Results[:*max] } return blob.RefsFor(q.Results) }