// Empirically: // open for read: req.Flags == 0 // open for append: req.Flags == 1 // open for write: req.Flags == 1 // open for read/write (+<) == 2 (bitmask? of?) // // open flags are O_WRONLY (1), O_RDONLY (0), or O_RDWR (2). and also // bitmaks of O_SYMLINK (0x200000) maybe. (from // fuse_filehandle_xlate_to_oflags in macosx/kext/fuse_file.h) func (n *mutFile) Open(req *fuse.OpenRequest, res *fuse.OpenResponse, intr fuse.Intr) (fuse.Handle, fuse.Error) { mutFileOpen.Incr() log.Printf("mutFile.Open: %v: content: %v dir=%v flags=%v mode=%v", n.permanode, n.content, req.Dir, req.Flags, req.Mode) r, err := schema.NewFileReader(n.fs.fetcher, n.content) if err != nil { mutFileOpenError.Incr() log.Printf("mutFile.Open: %v", err) return nil, fuse.EIO } // Turn off the OpenDirectIO bit (on by default in rsc fuse server.go), // else append operations don't work for some reason. res.Flags &= ^fuse.OpenDirectIO // Read-only. if req.Flags == 0 { mutFileOpenRO.Incr() log.Printf("mutFile.Open returning read-only file") n := &node{ fs: n.fs, blobref: n.content, } return &nodeReader{n: n, fr: r}, nil } mutFileOpenRW.Incr() log.Printf("mutFile.Open returning read-write filehandle") defer r.Close() return n.newHandle(r) }
// Empirically: // open for read: req.Flags == 0 // open for append: req.Flags == 1 // open for write: req.Flags == 1 // open for read/write (+<) == 2 (bitmask? of?) // // open flags are O_WRONLY (1), O_RDONLY (0), or O_RDWR (2). and also // bitmaks of O_SYMLINK (0x200000) maybe. (from // fuse_filehandle_xlate_to_oflags in macosx/kext/fuse_file.h) func (n *roFile) Open(ctx context.Context, req *fuse.OpenRequest, res *fuse.OpenResponse) (fs.Handle, error) { roFileOpen.Incr() if isWriteFlags(req.Flags) { return nil, fuse.EPERM } log.Printf("roFile.Open: %v: content: %v dir=%v flags=%v", n.permanode, n.content, req.Dir, req.Flags) r, err := schema.NewFileReader(n.fs.fetcher, n.content) if err != nil { roFileOpenError.Incr() log.Printf("roFile.Open: %v", err) return nil, fuse.EIO } // Turn off the OpenDirectIO bit (on by default in rsc fuse server.go), // else append operations don't work for some reason. res.Flags &= ^fuse.OpenDirectIO // Read-only. nod := &node{ fs: n.fs, blobref: n.content, } return &nodeReader{n: nod, fr: r}, nil }
// Empirically: // open for read: req.Flags == 0 // open for append: req.Flags == 1 // open for write: req.Flags == 1 // open for read/write (+<) == 2 (bitmask? of?) // // open flags are O_WRONLY (1), O_RDONLY (0), or O_RDWR (2). and also // bitmaks of O_SYMLINK (0x200000) maybe. (from // fuse_filehandle_xlate_to_oflags in macosx/kext/fuse_file.h) func (n *mutFile) Open(req *fuse.OpenRequest, res *fuse.OpenResponse, intr fs.Intr) (fs.Handle, fuse.Error) { mutFileOpen.Incr() log.Printf("mutFile.Open: %v: content: %v dir=%v flags=%v", n.permanode, n.content, req.Dir, req.Flags) r, err := schema.NewFileReader(n.fs.fetcher, n.content) if err != nil { mutFileOpenError.Incr() log.Printf("mutFile.Open: %v", err) return nil, fuse.EIO } // Read-only. if !isWriteFlags(req.Flags) { mutFileOpenRO.Incr() log.Printf("mutFile.Open returning read-only file") n := &node{ fs: n.fs, blobref: n.content, } return &nodeReader{n: n, fr: r}, nil } mutFileOpenRW.Incr() log.Printf("mutFile.Open returning read-write filehandle") defer r.Close() return n.newHandle(r) }
func (dh *DownloadHandler) fileInfo(req *http.Request, file blob.Ref) (fi fileInfo, packed bool, err error) { // Fast path for blobpacked. fi, ok := fileInfoPacked(dh.Search, dh.Fetcher, req, file) if debugPack { log.Printf("download.go: fileInfoPacked: ok=%v, %+v", ok, fi) } if ok { return fi, true, nil } fr, err := schema.NewFileReader(dh.blobSource(), file) if err != nil { return } mime := dh.ForceMIME if mime == "" { mime = magic.MIMETypeFromReaderAt(fr) } if mime == "" { mime = "application/octet-stream" } return fileInfo{ mime: mime, name: fr.FileName(), size: fr.Size(), rs: fr, close: fr.Close, }, false, nil }
func (ih *ImageHandler) scaleImage(fileRef blob.Ref) (*formatAndImage, error) { fr, err := schema.NewFileReader(ih.storageSeekFetcher(), fileRef) if err != nil { return nil, err } defer fr.Close() var buf bytes.Buffer scaleImageGateSlurp.Start() n, err := io.Copy(&buf, fr) scaleImageGateSlurp.Done() imageBytesFetchedVar.Add(n) if err != nil { return nil, fmt.Errorf("image resize: error reading image %s: %v", fileRef, err) } scaleImageGateResize.Start() defer scaleImageGateResize.Done() i, imConfig, err := images.Decode(bytes.NewReader(buf.Bytes()), &images.DecodeOpts{MaxWidth: ih.MaxWidth, MaxHeight: ih.MaxHeight}) if err != nil { return nil, err } b := i.Bounds() format := imConfig.Format useBytesUnchanged := !imConfig.Modified && format != "cr2" // always recompress CR2 files isSquare := b.Dx() == b.Dy() if ih.Square && !isSquare { useBytesUnchanged = false i = squareImage(i) b = i.Bounds() } if !useBytesUnchanged { // Encode as a new image buf.Reset() switch format { case "png": err = png.Encode(&buf, i) case "cr": // Recompress CR2 files as JPEG format = "jpeg" fallthrough default: err = jpeg.Encode(&buf, i, &jpeg.Options{ Quality: 90, }) } if err != nil { return nil, err } } return &formatAndImage{format: format, image: buf.Bytes()}, nil }
func (dh *DownloadHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, file blob.Ref) { if req.Method != "GET" && req.Method != "HEAD" { http.Error(rw, "Invalid download method", 400) return } if req.Header.Get("If-Modified-Since") != "" { // Immutable, so any copy's a good copy. rw.WriteHeader(http.StatusNotModified) return } fr, err := schema.NewFileReader(dh.blobSource(), file) if err != nil { http.Error(rw, "Can't serve file: "+err.Error(), 500) return } defer fr.Close() schema := fr.FileSchema() h := rw.Header() h.Set("Content-Length", fmt.Sprintf("%d", schema.SumPartsSize())) h.Set("Expires", time.Now().Add(oneYear).Format(http.TimeFormat)) mimeType := magic.MIMETypeFromReaderAt(fr) if dh.ForceMime != "" { mimeType = dh.ForceMime } if mimeType == "" { mimeType = "application/octet-stream" } h.Set("Content-Type", mimeType) if mimeType == "application/octet-stream" { // Chrome seems to silently do nothing on // application/octet-stream unless this is set. // Maybe it's confused by lack of URL it recognizes // along with lack of mime type? rw.Header().Set("Content-Disposition", "attachment; filename=file-"+file.String()+".dat") } if req.Method == "HEAD" && req.FormValue("verifycontents") != "" { vbr, ok := blob.Parse(req.FormValue("verifycontents")) if !ok { return } hash := vbr.Hash() if hash == nil { return } io.Copy(hash, fr) // ignore errors, caught later if vbr.HashMatches(hash) { rw.Header().Set("X-Camli-Contents", vbr.String()) } return } http.ServeContent(rw, req, "", time.Now(), fr) }
func (ih *ImageHandler) cached(br *blobref.BlobRef) (fr *schema.FileReader, err error) { fetchSeeker := blobref.SeekerFromStreamingFetcher(ih.Cache) fr, err = schema.NewFileReader(fetchSeeker, br) if err != nil { return nil, err } if imageDebug { log.Printf("Image Cache: hit: %v\n", br) } return fr, nil }
// cached returns a FileReader for the given file schema blobref. // The FileReader should be closed when done reading. func (ih *ImageHandler) cached(fileRef blob.Ref) (*schema.FileReader, error) { fetchSeeker := blob.SeekerFromStreamingFetcher(ih.Cache) fr, err := schema.NewFileReader(fetchSeeker, fileRef) if err != nil { return nil, err } if imageDebug { log.Printf("Image Cache: hit: %v\n", fileRef) } return fr, nil }
func (ih *ImageHandler) scaleImage(buf *bytes.Buffer, file blob.Ref) (format string, err error) { fr, err := schema.NewFileReader(ih.storageSeekFetcher(), file) if err != nil { return format, err } defer fr.Close() _, err = io.Copy(buf, fr) if err != nil { return format, fmt.Errorf("image resize: error reading image %s: %v", file, err) } i, imConfig, err := images.Decode(bytes.NewReader(buf.Bytes()), &images.DecodeOpts{MaxWidth: ih.MaxWidth, MaxHeight: ih.MaxHeight}) if err != nil { return format, err } b := i.Bounds() format = imConfig.Format useBytesUnchanged := !imConfig.Modified && format != "cr2" // always recompress CR2 files isSquare := b.Dx() == b.Dy() if ih.Square && !isSquare { useBytesUnchanged = false i = squareImage(i) b = i.Bounds() } if !useBytesUnchanged { // Encode as a new image buf.Reset() switch format { case "png": err = png.Encode(buf, i) case "cr": // Recompress CR2 files as JPEG format = "jpeg" fallthrough default: err = jpeg.Encode(buf, i, nil) } if err != nil { return format, err } } return format, nil }
func (s *storage) packFile(fileRef blob.Ref) (err error) { s.Logf("Packing file %s ...", fileRef) defer func() { if err == nil { s.Logf("Packed file %s", fileRef) } else { s.Logf("Error packing file %s: %v", fileRef, err) } }() fr, err := schema.NewFileReader(s, fileRef) if err != nil { return err } return newPacker(s, fileRef, fr).pack() }
// serveRef gets the file at ref from fetcher and serves its contents. // It is used by Service as a one time handler to serve to the thumbnail child process on localhost. func serveRef(rw http.ResponseWriter, req *http.Request, ref blob.Ref, fetcher blob.Fetcher) { if !httputil.IsGet(req) { http.Error(rw, "Invalid download method.", 400) return } if !httputil.IsLocalhost(req) { http.Error(rw, "Forbidden.", 403) return } parts := strings.Split(req.URL.Path, "/") if len(parts) < 2 { http.Error(rw, "Malformed GET URL.", 400) return } blobRef, ok := blob.Parse(parts[1]) if !ok { http.Error(rw, "Malformed GET URL.", 400) return } // only serves its ref if blobRef != ref { log.Printf("videothumbnail: access to %v forbidden; wrong blobref for handler", blobRef) http.Error(rw, "Forbidden.", 403) return } rw.Header().Set("Content-Type", "application/octet-stream") fr, err := schema.NewFileReader(fetcher, ref) if err != nil { httputil.ServeError(rw, req, err) return } defer fr.Close() http.ServeContent(rw, req, "", time.Now(), fr) }
func (ih *ImageHandler) newFileReader(fileRef blob.Ref) (io.ReadCloser, error) { fi, ok := fileInfoPacked(ih.Search, ih.Fetcher, nil, fileRef) if debugPack { log.Printf("pkg/server/image.go: fileInfoPacked: ok=%v, %+v", ok, fi) } if ok { // This would be less gross if fileInfoPacked just // returned an io.ReadCloser, but then the download // handler would need more invasive changes for // ServeContent. So tolerate this for now. return struct { io.Reader io.Closer }{ fi.rs, types.CloseFunc(fi.close), }, nil } // Default path, not going through blobpacked's fast path: return schema.NewFileReader(ih.Fetcher, fileRef) }
func TestStorage(t *testing.T) { store, ref := storageAndBlobRef(t) fr, err := schema.NewFileReader(store, ref) if err != nil { t.Fatal(err) } inFile, err := os.Open(testFilepath) if err != nil { t.Fatal(err) } data, err := ioutil.ReadAll(inFile) if err != nil { t.Fatal(err) } bd, err := ioutil.ReadAll(fr) if err != nil { t.Fatal(err) } if !bytes.Equal(bd, data) { t.Error("expected to be the same") } }
// vivify verifies that all the chunks for the file described by fileblob are on the blobserver. // It makes a planned permanode, signs it, and uploads it. It finally makes a camliContent claim // on that permanode for fileblob, signs it, and uploads it to the blobserver. func vivify(blobReceiver blobserver.BlobReceiveConfiger, fileblob blob.SizedRef) error { sf, ok := blobReceiver.(blob.StreamingFetcher) if !ok { return fmt.Errorf("BlobReceiver is not a StreamingFetcher") } fetcher := blob.SeekerFromStreamingFetcher(sf) fr, err := schema.NewFileReader(fetcher, fileblob.Ref) if err != nil { return fmt.Errorf("Filereader error for blobref %v: %v", fileblob.Ref.String(), err) } defer fr.Close() h := sha1.New() n, err := io.Copy(h, fr) if err != nil { return fmt.Errorf("Could not read all file of blobref %v: %v", fileblob.Ref.String(), err) } if n != fr.Size() { return fmt.Errorf("Could not read all file of blobref %v. Wanted %v, got %v", fileblob.Ref.String(), fr.Size(), n) } config := blobReceiver.Config() if config == nil { return errors.New("blobReceiver has no config") } hf := config.HandlerFinder if hf == nil { return errors.New("blobReceiver config has no HandlerFinder") } JSONSignRoot, sh, err := hf.FindHandlerByType("jsonsign") if err != nil || sh == nil { return errors.New("jsonsign handler not found") } sigHelper, ok := sh.(*signhandler.Handler) if !ok { return errors.New("handler is not a JSON signhandler") } discoMap := sigHelper.DiscoveryMap(JSONSignRoot) publicKeyBlobRef, ok := discoMap["publicKeyBlobRef"].(string) if !ok { return fmt.Errorf("Discovery: json decoding error: %v", err) } // The file schema must have a modtime to vivify, as the modtime is used for all three of: // 1) the permanode's signature // 2) the camliContent attribute claim's "claimDate" // 3) the signature time of 2) claimDate, err := time.Parse(time.RFC3339, fr.FileSchema().UnixMtime) if err != nil { return fmt.Errorf("While parsing modtime for file %v: %v", fr.FileSchema().FileName, err) } permanodeBB := schema.NewHashPlannedPermanode(h) permanodeBB.SetSigner(blob.MustParse(publicKeyBlobRef)) permanodeBB.SetClaimDate(claimDate) permanodeSigned, err := sigHelper.Sign(permanodeBB) if err != nil { return fmt.Errorf("Signing permanode %v: %v", permanodeSigned, err) } permanodeRef := blob.SHA1FromString(permanodeSigned) _, err = blobserver.ReceiveNoHash(blobReceiver, permanodeRef, strings.NewReader(permanodeSigned)) if err != nil { return fmt.Errorf("While uploading signed permanode %v, %v: %v", permanodeRef, permanodeSigned, err) } contentClaimBB := schema.NewSetAttributeClaim(permanodeRef, "camliContent", fileblob.Ref.String()) contentClaimBB.SetSigner(blob.MustParse(publicKeyBlobRef)) contentClaimBB.SetClaimDate(claimDate) contentClaimSigned, err := sigHelper.Sign(contentClaimBB) if err != nil { return fmt.Errorf("Signing camliContent claim: %v", err) } contentClaimRef := blob.SHA1FromString(contentClaimSigned) _, err = blobserver.ReceiveNoHash(blobReceiver, contentClaimRef, strings.NewReader(contentClaimSigned)) if err != nil { return fmt.Errorf("While uploading signed camliContent claim %v, %v: %v", contentClaimRef, contentClaimSigned, err) } return nil }
func main() { client.AddFlags() flag.Parse() if *flagVersion { fmt.Fprintf(os.Stderr, "camget version: %s\n", buildinfo.Version()) return } if *flagGraph && flag.NArg() != 1 { log.Fatalf("The --graph option requires exactly one parameter.") } var cl *client.Client var items []*blobref.BlobRef if *flagShared != "" { if client.ExplicitServer() != "" { log.Fatal("Can't use --shared with an explicit blobserver; blobserver is implicit from the --shared URL.") } if flag.NArg() != 0 { log.Fatal("No arguments permitted when using --shared") } cl1, target, err := client.NewFromShareRoot(*flagShared, client.OptionInsecure(*flagInsecureTLS)) if err != nil { log.Fatal(err) } cl = cl1 items = append(items, target) } else { cl = client.NewOrFail() for n := 0; n < flag.NArg(); n++ { arg := flag.Arg(n) br := blobref.Parse(arg) if br == nil { log.Fatalf("Failed to parse argument %q as a blobref.", arg) } items = append(items, br) } } cl.InsecureTLS = *flagInsecureTLS tr := cl.TransportForConfig(&client.TransportConfig{ Verbose: *flagHTTP, }) httpStats, _ := tr.(*httputil.StatsTransport) cl.SetHTTPClient(&http.Client{Transport: tr}) diskCacheFetcher, err := cacher.NewDiskCache(cl) if err != nil { log.Fatalf("Error setting up local disk cache: %v", err) } defer diskCacheFetcher.Clean() if *flagVerbose { log.Printf("Using temp blob cache directory %s", diskCacheFetcher.Root) } for _, br := range items { if *flagGraph { printGraph(diskCacheFetcher, br) return } if *flagCheck { // TODO: do HEAD requests checking if the blobs exists. log.Fatal("not implemented") return } if *flagOutput == "-" { var rc io.ReadCloser var err error if *flagContents { rc, err = schema.NewFileReader(diskCacheFetcher, br) if err == nil { rc.(*schema.FileReader).LoadAllChunks() } } else { rc, err = fetch(diskCacheFetcher, br) } if err != nil { log.Fatal(err) } defer rc.Close() if _, err := io.Copy(os.Stdout, rc); err != nil { log.Fatalf("Failed reading %q: %v", br, err) } } else { if err := smartFetch(diskCacheFetcher, *flagOutput, br); err != nil { log.Fatal(err) } } } if *flagVerbose { log.Printf("HTTP requests: %d\n", httpStats.Requests()) } }
// smartFetch the things that blobs point to, not just blobs. func smartFetch(src blobref.StreamingFetcher, targ string, br *blobref.BlobRef) error { rc, err := fetch(src, br) if err != nil { return err } defer rc.Close() sniffer := index.NewBlobSniffer(br) _, err = io.CopyN(sniffer, rc, sniffSize) if err != nil && err != io.EOF { return err } sniffer.Parse() blob, ok := sniffer.SchemaBlob() if !ok { if *flagVerbose { log.Printf("Fetching opaque data %v into %q", br, targ) } // opaque data - put it in a file f, err := os.Create(targ) if err != nil { return fmt.Errorf("opaque: %v", err) } defer f.Close() body, _ := sniffer.Body() r := io.MultiReader(bytes.NewReader(body), rc) _, err = io.Copy(f, r) return err } switch blob.Type() { case "directory": dir := filepath.Join(targ, blob.FileName()) if *flagVerbose { log.Printf("Fetching directory %v into %s", br, dir) } if err := os.MkdirAll(dir, blob.FileMode()); err != nil { return err } if err := setFileMeta(dir, blob); err != nil { log.Print(err) } entries := blob.DirectoryEntries() if entries == nil { return fmt.Errorf("bad entries blobref in dir %v", blob.BlobRef()) } return smartFetch(src, dir, entries) case "static-set": if *flagVerbose { log.Printf("Fetching directory entries %v into %s", br, targ) } // directory entries const numWorkers = 10 type work struct { br *blobref.BlobRef errc chan<- error } members := blob.StaticSetMembers() workc := make(chan work, len(members)) defer close(workc) for i := 0; i < numWorkers; i++ { go func() { for wi := range workc { wi.errc <- smartFetch(src, targ, wi.br) } }() } var errcs []<-chan error for _, mref := range members { errc := make(chan error, 1) errcs = append(errcs, errc) workc <- work{mref, errc} } for _, errc := range errcs { if err := <-errc; err != nil { return err } } return nil case "file": seekFetcher := blobref.SeekerFromStreamingFetcher(src) fr, err := schema.NewFileReader(seekFetcher, br) if err != nil { return fmt.Errorf("NewFileReader: %v", err) } fr.LoadAllChunks() defer fr.Close() name := filepath.Join(targ, blob.FileName()) if fi, err := os.Stat(name); err == nil && fi.Size() == fi.Size() { if *flagVerbose { log.Printf("Skipping %s; already exists.", name) return nil } } if *flagVerbose { log.Printf("Writing %s to %s ...", br, name) } f, err := os.Create(name) if err != nil { return fmt.Errorf("file type: %v", err) } defer f.Close() if _, err := io.Copy(f, fr); err != nil { return fmt.Errorf("Copying %s to %s: %v", br, name, err) } if err := setFileMeta(name, blob); err != nil { log.Print(err) } return nil default: return errors.New("unknown blob type: " + blob.Type()) } panic("unreachable") }
// vivify verifies that all the chunks for the file described by fileblob are on the blobserver. // It makes a planned permanode, signs it, and uploads it. It finally makes a camliContent claim // on that permanode for fileblob, signs it, and uploads it to the blobserver. func vivify(blobReceiver blobserver.BlobReceiveConfiger, fileblob blobref.SizedBlobRef) error { sf, ok := blobReceiver.(blobref.StreamingFetcher) if !ok { return fmt.Errorf("BlobReceiver is not a StreamingFetcher") } fetcher := blobref.SeekerFromStreamingFetcher(sf) fr, err := schema.NewFileReader(fetcher, fileblob.BlobRef) if err != nil { return fmt.Errorf("Filereader error for blobref %v: %v", fileblob.BlobRef.String(), err) } defer fr.Close() h := sha1.New() n, err := io.Copy(h, fr) if err != nil { return fmt.Errorf("Could not read all file of blobref %v: %v", fileblob.BlobRef.String(), err) } if n != fr.Size() { return fmt.Errorf("Could not read all file of blobref %v. Wanted %v, got %v", fileblob.BlobRef.String(), fr.Size(), n) } config := blobReceiver.Config() if config == nil { return errors.New("blobReceiver has no config") } hf := config.HandlerFinder if hf == nil { return errors.New("blobReceiver config has no HandlerFinder") } JSONSignRoot, sh, err := hf.FindHandlerByType("jsonsign") // TODO(mpl): second check should not be necessary, and yet it happens. Figure it out. if err != nil || sh == nil { return errors.New("jsonsign handler not found") } sigHelper, ok := sh.(*signhandler.Handler) if !ok { return errors.New("handler is not a JSON signhandler") } discoMap := sigHelper.DiscoveryMap(JSONSignRoot) publicKeyBlobRef, ok := discoMap["publicKeyBlobRef"].(string) if !ok { return fmt.Errorf("Discovery: json decoding error: %v", err) } unsigned := schema.NewHashPlannedPermanode(h) unsigned["camliSigner"] = publicKeyBlobRef signed, err := sigHelper.SignMap(unsigned) if err != nil { return fmt.Errorf("Signing permanode %v: %v", signed, err) } signedPerm := blobref.SHA1FromString(signed) _, err = blobReceiver.ReceiveBlob(signedPerm, strings.NewReader(signed)) if err != nil { return fmt.Errorf("While uploading signed permanode %v: %v", signed, err) } contentAttr := schema.NewSetAttributeClaim(signedPerm, "camliContent", fileblob.BlobRef.String()) claimDate, err := time.Parse(time.RFC3339, fr.FileSchema().UnixMtime) contentAttr.SetClaimDate(claimDate) contentAttr["camliSigner"] = publicKeyBlobRef signed, err = sigHelper.SignMap(contentAttr) if err != nil { return fmt.Errorf("Signing camliContent claim: %v", err) } signedClaim := blobref.SHA1FromString(signed) _, err = blobReceiver.ReceiveBlob(signedClaim, strings.NewReader(signed)) if err != nil { return fmt.Errorf("While uploading signed camliContent claim %v: %v", signed, err) } return nil }
func (ih *ImageHandler) scaleImage(fileRef blob.Ref) (*formatAndImage, error) { fr, err := schema.NewFileReader(ih.Fetcher, fileRef) if err != nil { return nil, err } defer fr.Close() sr := types.NewStatsReader(imageBytesFetchedVar, fr) sr, conf, err := imageConfigFromReader(sr) if err != nil { return nil, err } // TODO(wathiede): build a size table keyed by conf.ColorModel for // common color models for a more exact size estimate. // This value is an estimate of the memory required to decode an image. // PNGs range from 1-64 bits per pixel (not all of which are supported by // the Go standard parser). JPEGs encoded in YCbCr 4:4:4 are 3 byte/pixel. // For all other JPEGs this is an overestimate. For GIFs it is 3x larger // than needed. How accurate this estimate is depends on the mix of // images being resized concurrently. ramSize := int64(conf.Width) * int64(conf.Height) * 3 if err = ih.resizeSem.Acquire(ramSize); err != nil { return nil, err } defer ih.resizeSem.Release(ramSize) i, imConfig, err := images.Decode(sr, &images.DecodeOpts{ MaxWidth: ih.MaxWidth, MaxHeight: ih.MaxHeight, }) if err != nil { return nil, err } b := i.Bounds() format := imConfig.Format isSquare := b.Dx() == b.Dy() if ih.Square && !isSquare { i = squareImage(i) b = i.Bounds() } // Encode as a new image var buf bytes.Buffer switch format { case "png": err = png.Encode(&buf, i) case "cr2": // Recompress CR2 files as JPEG format = "jpeg" fallthrough default: err = jpeg.Encode(&buf, i, &jpeg.Options{ Quality: 90, }) } if err != nil { return nil, err } return &formatAndImage{format: format, image: buf.Bytes()}, nil }
func (dh *DownloadHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, file *blobref.BlobRef) { if req.Method != "GET" && req.Method != "HEAD" { http.Error(rw, "Invalid download method", 400) return } fr, err := schema.NewFileReader(dh.storageSeekFetcher(), file) if err != nil { http.Error(rw, "Can't serve file: "+err.Error(), 500) return } defer fr.Close() schema := fr.FileSchema() rw.Header().Set("Content-Length", fmt.Sprintf("%d", schema.SumPartsSize())) mimeType, reader := magic.MimeTypeFromReader(fr) if dh.ForceMime != "" { mimeType = dh.ForceMime } if mimeType == "" { mimeType = "application/octet-stream" } rw.Header().Set("Content-Type", mimeType) if mimeType == "application/octet-stream" { // Chrome seems to silently do nothing on // application/octet-stream unless this is set. // Maybe it's confused by lack of URL it recognizes // along with lack of mime type? rw.Header().Set("Content-Disposition", "attachment; filename=file-"+file.String()+".dat") } if req.Method == "HEAD" { vbr := blobref.Parse(req.FormValue("verifycontents")) if vbr == nil { return } hash := vbr.Hash() if hash == nil { return } io.Copy(hash, reader) // ignore errors, caught later if vbr.HashMatches(hash) { rw.Header().Set("X-Camli-Contents", vbr.String()) } return } n, err := io.Copy(rw, reader) log.Printf("For %q request of %s: copied %d, %v", req.Method, req.URL.Path, n, err) if err != nil { log.Printf("error serving download of file schema %s: %v", file, err) return } if size := schema.SumPartsSize(); n != int64(size) { log.Printf("error serving download of file schema %s: sent %d, expected size of %d", file, n, size) return } }
// smartFetch the things that blobs point to, not just blobs. func smartFetch(cl *client.Client, targ string, br *blobref.BlobRef) error { if *flagVerbose { log.Printf("Fetching %v into %q", br, targ) } rc, err := fetch(cl, br) if err != nil { return err } defer rc.Close() sniffer := new(index.BlobSniffer) _, err = io.CopyN(sniffer, rc, sniffSize) if err != nil && err != io.EOF { return err } sniffer.Parse() sc, ok := sniffer.Superset() if !ok { // opaque data - put it in a file f, err := os.Create(targ) if err != nil { return fmt.Errorf("opaque: %v", err) } defer f.Close() body, _ := sniffer.Body() r := io.MultiReader(bytes.NewBuffer(body), rc) _, err = io.Copy(f, r) return err } sc.BlobRef = br switch sc.Type { case "directory": dir := filepath.Join(targ, sc.FileName) if err := os.MkdirAll(dir, sc.FileMode()); err != nil { return err } if err := setFileMeta(dir, sc); err != nil { log.Print(err) } entries := blobref.Parse(sc.Entries) if entries == nil { return fmt.Errorf("bad entries blobref: %v", sc.Entries) } return smartFetch(cl, dir, entries) case "static-set": // directory entries for _, m := range sc.Members { dref := blobref.Parse(m) if dref == nil { return fmt.Errorf("bad member blobref: %v", m) } if err := smartFetch(cl, targ, dref); err != nil { return err } } return nil case "file": name := filepath.Join(targ, sc.FileName) f, err := os.Create(name) if err != nil { return fmt.Errorf("file type: %v", err) } defer f.Close() seekFetcher := blobref.SeekerFromStreamingFetcher(cl) fr, err := schema.NewFileReader(seekFetcher, br) if err != nil { return fmt.Errorf("NewFileReader: %v", err) } defer fr.Close() if err := setFileMeta(name, sc); err != nil { log.Print(err) } return nil default: return errors.New("unknown blob type: " + sc.Type) } panic("unreachable") }
func main() { client.AddFlags() flag.Parse() if *cmdmain.FlagHelp { flag.PrintDefaults() } if *flagVersion { fmt.Fprintf(os.Stderr, "camget version: %s\n", buildinfo.Version()) return } if *cmdmain.FlagLegal { cmdmain.PrintLicenses() return } if *flagGraph && flag.NArg() != 1 { log.Fatalf("The --graph option requires exactly one parameter.") } var cl *client.Client var items []blob.Ref optTransportConfig := client.OptionTransportConfig(&client.TransportConfig{ Verbose: *flagHTTP, }) if *flagShared != "" { if client.ExplicitServer() != "" { log.Fatal("Can't use --shared with an explicit blobserver; blobserver is implicit from the --shared URL.") } if flag.NArg() != 0 { log.Fatal("No arguments permitted when using --shared") } cl1, target, err := client.NewFromShareRoot(*flagShared, client.OptionInsecure(*flagInsecureTLS), client.OptionTrustedCert(*flagTrustedCert), optTransportConfig, ) if err != nil { log.Fatal(err) } cl = cl1 items = append(items, target) } else { if *flagTrustedCert != "" { log.Fatal("Can't use --cert without --shared.") } cl = client.NewOrFail(client.OptionInsecure(*flagInsecureTLS), optTransportConfig) for n := 0; n < flag.NArg(); n++ { arg := flag.Arg(n) br, ok := blob.Parse(arg) if !ok { log.Fatalf("Failed to parse argument %q as a blobref.", arg) } items = append(items, br) } } httpStats := cl.HTTPStats() diskCacheFetcher, err := cacher.NewDiskCache(cl) if err != nil { log.Fatalf("Error setting up local disk cache: %v", err) } defer diskCacheFetcher.Clean() if *flagVerbose { log.Printf("Using temp blob cache directory %s", diskCacheFetcher.Root) } if *flagShared != "" { diskCacheFetcher.SetCacheHitHook(func(br blob.Ref, rc io.ReadCloser) (io.ReadCloser, error) { var buf bytes.Buffer if err := cl.UpdateShareChain(br, io.TeeReader(rc, &buf)); err != nil { rc.Close() return nil, err } return struct { io.Reader io.Closer }{io.MultiReader(&buf, rc), rc}, nil }) } for _, br := range items { if *flagGraph { printGraph(diskCacheFetcher, br) return } if *flagCheck { // TODO: do HEAD requests checking if the blobs exists. log.Fatal("not implemented") return } if *flagOutput == "-" { var rc io.ReadCloser var err error if *flagContents { rc, err = schema.NewFileReader(diskCacheFetcher, br) if err == nil { rc.(*schema.FileReader).LoadAllChunks() } } else { rc, err = fetch(diskCacheFetcher, br) } if err != nil { log.Fatal(err) } defer rc.Close() if _, err := io.Copy(os.Stdout, rc); err != nil { log.Fatalf("Failed reading %q: %v", br, err) } } else { if err := smartFetch(diskCacheFetcher, *flagOutput, br); err != nil { log.Fatal(err) } } } if *flagVerbose { log.Printf("HTTP requests: %d\n", httpStats.Requests()) h1, h2 := httpStats.ProtoVersions() log.Printf(" responses: %d (h1), %d (h2)\n", h1, h2) } }
// smartFetch the things that blobs point to, not just blobs. func smartFetch(src blob.Fetcher, targ string, br blob.Ref) error { rc, err := fetch(src, br) if err != nil { return err } rcc := types.NewOnceCloser(rc) defer rcc.Close() sniffer := index.NewBlobSniffer(br) _, err = io.CopyN(sniffer, rc, sniffSize) if err != nil && err != io.EOF { return err } sniffer.Parse() b, ok := sniffer.SchemaBlob() if !ok { if *flagVerbose { log.Printf("Fetching opaque data %v into %q", br, targ) } // opaque data - put it in a file f, err := os.Create(targ) if err != nil { return fmt.Errorf("opaque: %v", err) } defer f.Close() body, _ := sniffer.Body() r := io.MultiReader(bytes.NewReader(body), rc) _, err = io.Copy(f, r) return err } rcc.Close() switch b.Type() { case "directory": dir := filepath.Join(targ, b.FileName()) if *flagVerbose { log.Printf("Fetching directory %v into %s", br, dir) } if err := os.MkdirAll(dir, b.FileMode()); err != nil { return err } if err := setFileMeta(dir, b); err != nil { log.Print(err) } entries, ok := b.DirectoryEntries() if !ok { return fmt.Errorf("bad entries blobref in dir %v", b.BlobRef()) } return smartFetch(src, dir, entries) case "static-set": if *flagVerbose { log.Printf("Fetching directory entries %v into %s", br, targ) } // directory entries const numWorkers = 10 type work struct { br blob.Ref errc chan<- error } members := b.StaticSetMembers() workc := make(chan work, len(members)) defer close(workc) for i := 0; i < numWorkers; i++ { go func() { for wi := range workc { wi.errc <- smartFetch(src, targ, wi.br) } }() } var errcs []<-chan error for _, mref := range members { errc := make(chan error, 1) errcs = append(errcs, errc) workc <- work{mref, errc} } for _, errc := range errcs { if err := <-errc; err != nil { return err } } return nil case "file": fr, err := schema.NewFileReader(src, br) if err != nil { return fmt.Errorf("NewFileReader: %v", err) } fr.LoadAllChunks() defer fr.Close() name := filepath.Join(targ, b.FileName()) if fi, err := os.Stat(name); err == nil && fi.Size() == fr.Size() { if *flagVerbose { log.Printf("Skipping %s; already exists.", name) } return nil } if *flagVerbose { log.Printf("Writing %s to %s ...", br, name) } f, err := os.Create(name) if err != nil { return fmt.Errorf("file type: %v", err) } defer f.Close() if _, err := io.Copy(f, fr); err != nil { return fmt.Errorf("Copying %s to %s: %v", br, name, err) } if err := setFileMeta(name, b); err != nil { log.Print(err) } return nil case "symlink": if *flagSkipIrregular { return nil } sf, ok := b.AsStaticFile() if !ok { return errors.New("blob is not a static file") } sl, ok := sf.AsStaticSymlink() if !ok { return errors.New("blob is not a symlink") } name := filepath.Join(targ, sl.FileName()) if _, err := os.Lstat(name); err == nil { if *flagVerbose { log.Printf("Skipping creating symbolic link %s: A file with that name exists", name) } return nil } target := sl.SymlinkTargetString() if target == "" { return errors.New("symlink without target") } // On Windows, os.Symlink isn't yet implemented as of Go 1.3. // See https://code.google.com/p/go/issues/detail?id=5750 err := os.Symlink(target, name) // We won't call setFileMeta for a symlink because: // the permissions of a symlink do not matter and Go's // os.Chtimes always dereferences (does not act on the // symlink but its target). return err case "fifo": if *flagSkipIrregular { return nil } name := filepath.Join(targ, b.FileName()) sf, ok := b.AsStaticFile() if !ok { return errors.New("blob is not a static file") } _, ok = sf.AsStaticFIFO() if !ok { return errors.New("blob is not a static FIFO") } if _, err := os.Lstat(name); err == nil { log.Printf("Skipping FIFO %s: A file with that name already exists", name) return nil } err = osutil.Mkfifo(name, 0600) if err == osutil.ErrNotSupported { log.Printf("Skipping FIFO %s: Unsupported filetype", name) return nil } if err != nil { return fmt.Errorf("%s: osutil.Mkfifo(): %v", name, err) } if err := setFileMeta(name, b); err != nil { log.Print(err) } return nil case "socket": if *flagSkipIrregular { return nil } name := filepath.Join(targ, b.FileName()) sf, ok := b.AsStaticFile() if !ok { return errors.New("blob is not a static file") } _, ok = sf.AsStaticSocket() if !ok { return errors.New("blob is not a static socket") } if _, err := os.Lstat(name); err == nil { log.Printf("Skipping socket %s: A file with that name already exists", name) return nil } err = osutil.Mksocket(name) if err == osutil.ErrNotSupported { log.Printf("Skipping socket %s: Unsupported filetype", name) return nil } if err != nil { return fmt.Errorf("%s: %v", name, err) } if err := setFileMeta(name, b); err != nil { log.Print(err) } return nil default: return errors.New("unknown blob type: " + b.Type()) } panic("unreachable") }
func main() { client.AddFlags() flag.Parse() if len(*flagVia) > 0 { vs := strings.Split(*flagVia, ",") viaRefs = make([]*blobref.BlobRef, len(vs)) for i, sbr := range vs { viaRefs[i] = blobref.Parse(sbr) if viaRefs[i] == nil { log.Fatalf("Invalid -via blobref: %q", sbr) } if *flagVerbose { log.Printf("via: %s", sbr) } } } if *flagGraph && flag.NArg() != 1 { log.Fatalf("The --graph option requires exactly one parameter.") } cl := client.NewOrFail() for n := 0; n < flag.NArg(); n++ { arg := flag.Arg(n) br := blobref.Parse(arg) if br == nil { log.Fatalf("Failed to parse argument %q as a blobref.", arg) } if *flagGraph { printGraph(cl, br) return } if *flagCheck { // TODO: do HEAD requests checking if the blobs exists. log.Fatal("not implemented") return } if *flagOutput == "-" { var rc io.ReadCloser var err error if *flagContents { seekFetcher := blobref.SeekerFromStreamingFetcher(cl) rc, err = schema.NewFileReader(seekFetcher, br) } else { rc, err = fetch(cl, br) } if err != nil { log.Fatal(err) } defer rc.Close() if _, err := io.Copy(os.Stdout, rc); err != nil { log.Fatalf("Failed reading %q: %v", br, err) } return } if err := smartFetch(cl, *flagOutput, br); err != nil { log.Fatal(err) } } }
// ServeHTTP streams a zip archive of all the files "under" // zh.root. That is, all the files pointed by file permanodes, // which are directly members of zh.root or recursively down // directory permanodes and permanodes members. // To build the fullpath of a file in a collection, it uses // the collection title if present, its blobRef otherwise, as // a directory name. func (zh *zipHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { // TODO: use http.ServeContent, so Range requests work and downloads can be resumed. // Will require calculating the zip length once first (ideally as cheaply as possible, // with dummy counting writer and dummy all-zero-byte-files of a fixed size), // and then making a dummy ReadSeeker for ServeContent that can seek to the end, // and then seek back to the beginning, but then seeks forward make it remember // to skip that many bytes from the archive/zip writer when answering Reads. if !httputil.IsGet(req) { http.Error(rw, "Invalid method", http.StatusMethodNotAllowed) return } bf, err := zh.blobList("/", zh.root) if err != nil { log.Printf("Could not serve zip for %v: %v", zh.root, err) http.Error(rw, "Server error", http.StatusInternalServerError) return } blobFiles := renameDuplicates(bf) // TODO(mpl): streaming directly won't work on appengine if the size goes // over 32 MB. Deal with that. h := rw.Header() h.Set("Content-Type", "application/zip") filename := zh.filename if filename == "" { filename = "download.zip" } h.Set("Content-Disposition", mime.FormatMediaType("attachment", map[string]string{"filename": filename})) zw := zip.NewWriter(rw) etag := sha1.New() for _, file := range blobFiles { etag.Write([]byte(file.blobRef.String())) } h.Set("Etag", fmt.Sprintf(`"%x"`, etag.Sum(nil))) for _, file := range blobFiles { fr, err := schema.NewFileReader(zh.storageSeekFetcher(), file.blobRef) if err != nil { log.Printf("Can not add %v in zip, not a file: %v", file.blobRef, err) http.Error(rw, "Server error", http.StatusInternalServerError) return } f, err := zw.CreateHeader( &zip.FileHeader{ Name: file.path, Method: zip.Store, }) if err != nil { log.Printf("Could not create %q in zip: %v", file.path, err) http.Error(rw, "Server error", http.StatusInternalServerError) return } _, err = io.Copy(f, fr) fr.Close() if err != nil { log.Printf("Could not zip %q: %v", file.path, err) return } } err = zw.Close() if err != nil { log.Printf("Could not close zipwriter: %v", err) return } }
func (im *imp) Run(ctx *importer.RunContext) error { clientId, secret, err := ctx.Credentials() if err != nil { return fmt.Errorf("no API credentials: %v", err) } acctNode := ctx.AccountNode() accessToken := acctNode.Attr(importer.AcctAttrAccessToken) accessSecret := acctNode.Attr(importer.AcctAttrAccessTokenSecret) if accessToken == "" || accessSecret == "" { return errors.New("access credentials not found") } r := &run{ RunContext: ctx, im: im, incremental: !forceFullImport && acctNode.Attr(importer.AcctAttrCompletedVersion) == runCompleteVersion, oauthClient: &oauth.Client{ TemporaryCredentialRequestURI: temporaryCredentialRequestURL, ResourceOwnerAuthorizationURI: resourceOwnerAuthorizationURL, TokenRequestURI: tokenRequestURL, Credentials: oauth.Credentials{ Token: clientId, Secret: secret, }, }, accessCreds: &oauth.Credentials{ Token: accessToken, Secret: accessSecret, }, } userID := acctNode.Attr(importer.AcctAttrUserID) if userID == "" { return errors.New("UserID hasn't been set by account setup.") } skipAPITweets, _ := strconv.ParseBool(os.Getenv("CAMLI_TWITTER_SKIP_API_IMPORT")) if !skipAPITweets { if err := r.importTweets(userID); err != nil { return err } } zipRef := acctNode.Attr(acctAttrTweetZip) zipDoneVal := zipRef + ":" + runCompleteVersion if zipRef != "" && !(r.incremental && acctNode.Attr(acctAttrZipDoneVersion) == zipDoneVal) { zipbr, ok := blob.Parse(zipRef) if !ok { return fmt.Errorf("invalid zip file blobref %q", zipRef) } fr, err := schema.NewFileReader(r.Host.BlobSource(), zipbr) if err != nil { return fmt.Errorf("error opening zip %v: %v", zipbr, err) } defer fr.Close() zr, err := zip.NewReader(fr, fr.Size()) if err != nil { return fmt.Errorf("Error opening twitter zip file %v: %v", zipRef, err) } if err := r.importTweetsFromZip(userID, zr); err != nil { return err } if err := acctNode.SetAttrs(acctAttrZipDoneVersion, zipDoneVal); err != nil { return err } } r.mu.Lock() anyErr := r.anyErr r.mu.Unlock() if !anyErr { if err := acctNode.SetAttrs(importer.AcctAttrCompletedVersion, runCompleteVersion); err != nil { return err } } return nil }
// smartFetch the things that blobs point to, not just blobs. func smartFetch(src blob.Fetcher, targ string, br blob.Ref) error { rc, err := fetch(src, br) if err != nil { return err } defer rc.Close() sniffer := index.NewBlobSniffer(br) _, err = io.CopyN(sniffer, rc, sniffSize) if err != nil && err != io.EOF { return err } sniffer.Parse() b, ok := sniffer.SchemaBlob() if !ok { if *flagVerbose { log.Printf("Fetching opaque data %v into %q", br, targ) } // opaque data - put it in a file f, err := os.Create(targ) if err != nil { return fmt.Errorf("opaque: %v", err) } defer f.Close() body, _ := sniffer.Body() r := io.MultiReader(bytes.NewReader(body), rc) _, err = io.Copy(f, r) return err } switch b.Type() { case "directory": dir := filepath.Join(targ, b.FileName()) if *flagVerbose { log.Printf("Fetching directory %v into %s", br, dir) } if err := os.MkdirAll(dir, b.FileMode()); err != nil { return err } if err := setFileMeta(dir, b); err != nil { log.Print(err) } entries, ok := b.DirectoryEntries() if !ok { return fmt.Errorf("bad entries blobref in dir %v", b.BlobRef()) } return smartFetch(src, dir, entries) case "static-set": if *flagVerbose { log.Printf("Fetching directory entries %v into %s", br, targ) } // directory entries const numWorkers = 10 type work struct { br blob.Ref errc chan<- error } members := b.StaticSetMembers() workc := make(chan work, len(members)) defer close(workc) for i := 0; i < numWorkers; i++ { go func() { for wi := range workc { wi.errc <- smartFetch(src, targ, wi.br) } }() } var errcs []<-chan error for _, mref := range members { errc := make(chan error, 1) errcs = append(errcs, errc) workc <- work{mref, errc} } for _, errc := range errcs { if err := <-errc; err != nil { return err } } return nil case "file": fr, err := schema.NewFileReader(src, br) if err != nil { return fmt.Errorf("NewFileReader: %v", err) } fr.LoadAllChunks() defer fr.Close() name := filepath.Join(targ, b.FileName()) if fi, err := os.Stat(name); err == nil && fi.Size() == fi.Size() { if *flagVerbose { log.Printf("Skipping %s; already exists.", name) return nil } } if *flagVerbose { log.Printf("Writing %s to %s ...", br, name) } f, err := os.Create(name) if err != nil { return fmt.Errorf("file type: %v", err) } defer f.Close() if _, err := io.Copy(f, fr); err != nil { return fmt.Errorf("Copying %s to %s: %v", br, name, err) } if err := setFileMeta(name, b); err != nil { log.Print(err) } return nil case "symlink": sf, ok := b.AsStaticFile() if !ok { return errors.New("blob is not a static file") } sl, ok := sf.AsStaticSymlink() if !ok { return errors.New("blob is not a symlink") } name := filepath.Join(targ, sl.FileName()) if _, err := os.Lstat(name); err == nil { if *flagVerbose { log.Printf("Skipping creating symbolic link %s: A file with that name exists", name) } return nil } target := sl.SymlinkTargetString() if target == "" { return errors.New("symlink without target") } // TODO (marete): The Go docs promise that everything // in pkg os should work the same everywhere. Not true // for os.Symlin() at the moment. See what to do for // windows here. err := os.Symlink(target, name) // We won't call setFileMeta for a symlink because: // the permissions of a symlink do not matter and Go's // os.Chtimes always dereferences (does not act on the // symlink but its target). return err default: return errors.New("unknown blob type: " + b.Type()) } panic("unreachable") }
func main() { client.AddFlags() flag.Parse() if *flagGraph && flag.NArg() != 1 { log.Fatalf("The --graph option requires exactly one parameter.") } var cl *client.Client var items []*blobref.BlobRef if *flagShared != "" { if client.ExplicitServer() != "" { log.Fatal("Can't use --shared with an explicit blobserver; blobserver is implicit from the --shared URL.") } if flag.NArg() != 0 { log.Fatal("No arguments permitted when using --shared") } cl1, target, err := client.NewFromShareRoot(*flagShared) if err != nil { log.Fatal(err) } cl = cl1 items = append(items, target) } else { cl = client.NewOrFail() for n := 0; n < flag.NArg(); n++ { arg := flag.Arg(n) br := blobref.Parse(arg) if br == nil { log.Fatalf("Failed to parse argument %q as a blobref.", arg) } items = append(items, br) } } httpStats := &httputil.StatsTransport{ VerboseLog: *flagHTTP, } if *flagHTTP { httpStats.Transport = &http.Transport{ Dial: func(net_, addr string) (net.Conn, error) { log.Printf("Dialing %s", addr) return net.Dial(net_, addr) }, } } cl.SetHTTPClient(&http.Client{Transport: httpStats}) // Put a local disk cache in front of the HTTP client. // TODO: this could be better about proactively cleaning things. // Fetching 2 TB shouldn't write 2 TB to /tmp before it's done. // Maybe the cache needs an LRU/size cap. cacheDir, err := ioutil.TempDir("", "camlicache") if err != nil { log.Fatalf("Error creating temp cache directory: %v\n", err) } defer os.RemoveAll(cacheDir) diskcache, err := localdisk.New(cacheDir) if err != nil { log.Fatalf("Error setting up local disk cache: %v", err) } if *flagVerbose { log.Printf("Using temp blob cache directory %s", cacheDir) } fetcher := cacher.NewCachingFetcher(diskcache, cl) for _, br := range items { if *flagGraph { printGraph(fetcher, br) return } if *flagCheck { // TODO: do HEAD requests checking if the blobs exists. log.Fatal("not implemented") return } if *flagOutput == "-" { var rc io.ReadCloser var err error if *flagContents { seekFetcher := blobref.SeekerFromStreamingFetcher(fetcher) rc, err = schema.NewFileReader(seekFetcher, br) if err == nil { rc.(*schema.FileReader).LoadAllChunks() } } else { rc, err = fetch(fetcher, br) } if err != nil { log.Fatal(err) } defer rc.Close() if _, err := io.Copy(os.Stdout, rc); err != nil { log.Fatalf("Failed reading %q: %v", br, err) } } else { if err := smartFetch(fetcher, *flagOutput, br); err != nil { log.Fatal(err) } } } if *flagVerbose { log.Printf("HTTP requests: %d\n", httpStats.Requests()) } }
func (ih *ImageHandler) scaleImage(buf *bytes.Buffer, file *blobref.BlobRef) (format string, err error) { mw, mh := ih.MaxWidth, ih.MaxHeight fr, err := schema.NewFileReader(ih.storageSeekFetcher(), file) if err != nil { return format, err } _, err = io.Copy(buf, fr) if err != nil { return format, fmt.Errorf("image resize: error reading image %s: %v", file, err) } i, format, err := images.Decode(bytes.NewReader(buf.Bytes()), nil) if err != nil { return format, err } b := i.Bounds() // TODO(mpl): sort the useBytesUnchanged story out, // so that a rotation/flip is not being ignored // when there was no rescaling required. useBytesUnchanged := true isSquare := b.Dx() == b.Dy() if ih.Square && !isSquare { useBytesUnchanged = false i = squareImage(i) b = i.Bounds() } // only do downscaling, otherwise just serve the original image if mw < b.Dx() || mh < b.Dy() { useBytesUnchanged = false const huge = 2400 // If it's gigantic, it's more efficient to downsample first // and then resize; resizing will smooth out the roughness. // (trusting the moustachio guys on that one). if b.Dx() > huge || b.Dy() > huge { w, h := mw*2, mh*2 if b.Dx() > b.Dy() { w = b.Dx() * h / b.Dy() } else { h = b.Dy() * w / b.Dx() } i = resize.Resample(i, i.Bounds(), w, h) b = i.Bounds() } // conserve proportions. use the smallest of the two as the decisive one. if mw > mh { mw = b.Dx() * mh / b.Dy() } else { mh = b.Dy() * mw / b.Dx() } } if !useBytesUnchanged { i = resize.Resize(i, b, mw, mh) // Encode as a new image buf.Reset() switch format { case "jpeg": err = jpeg.Encode(buf, i, nil) default: err = png.Encode(buf, i) } if err != nil { return format, err } } return format, nil }