func (sto *swiftStorage) StatBlobs(dest chan<- blob.SizedInfoRef, blobs []blob.Ref) error { var wg syncutil.Group for _, br := range blobs { br := sto.createPathRef(br) statGate.Start() wg.Go(func() error { defer statGate.Done() ref, cont := sto.refContainer(br) log.Println("REF:", ref, cont) info, _, err := sto.conn.Object(cont, ref) log.Println("Stat:", info, err, ref, br.Path) if err == nil { dest <- blob.SizedInfoRef{ Ref: br, Size: uint32(info.Bytes), MD5: info.Hash, } return nil } if err == swift.ObjectNotFound { return nil } return fmt.Errorf("error statting %v: %v", br, err) }) } return wg.Err() }
func (sto *swiftStorage) ReceiveBlob(b blob.Ref, source io.Reader) (sr blob.SizedRef, err error) { slurper := newSwiftSlurper(b) defer slurper.Cleanup() size, err := io.Copy(slurper, source) if err != nil { return sr, err } hash := hex.EncodeToString(slurper.md5.Sum(nil)) name, cont := sto.refContainer(b) retries := 1 retry: _, err = sto.conn.ObjectPut(cont, name, slurper, false, hash, "", nil) if err != nil { // assume both of these mean container not found in this context. Create the container first if retries > 0 && (err == swift.ObjectNotFound || err == swift.ContainerNotFound) { retries-- if err = sto.createContainer(cont); err != nil { return sr, err } slurper.Seek(0, 0) goto retry } return sr, err } ref := sto.createPathRef(b) ref.SetHash(slurper.md5) log.Println("Create: ", ref) return blob.SizedRef{Ref: ref, Size: uint32(size)}, nil }
func main() { flag.Parse() log.Println("start uploader") if flag.NArg() == 0 { log.Fatal("Expected file argument missing") } c, err := client.New(*serverAddr) if err != nil { log.Fatal(err) } res, _ := c.MultiUploader(flag.Args()) var enc client.ResourceEncoder switch *outputType { case "json": enc = client.NewJSONEncoder(os.Stdout) default: os.Exit(0) } err = enc.Encode(res) if err != nil { log.Fatal(err) } }
func main() { flag.Usage = usage flag.Parse() log.Println("start policy service …") if *version { fmt.Fprintln(os.Stdout, Version) return } if *help { flag.Usage() os.Exit(1) } runtime.GOMAXPROCS(runtime.NumCPU()) if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } err := policy.ListenAndServe(*laddr) if err != nil { log.Errorln(err) } monitoring.MeasuringPointsPrintAll() }
func main() { flag.Usage = usage flag.Parse() log.Println("start uploader") if *help { flag.Usage() os.Exit(1) } ctx, err := newCtx(*serverAddr, flag.Args()) if err != nil { log.Fatal(err) } ctx.search() err = ctx.upload() if err != nil { log.Fatal(err) } ctx.replace() ctx.WriteTo(os.Stdout) }
func main() { flag.Usage = usage flag.Parse() log.Println("start blobserver service …") if *version { fmt.Fprintln(os.Stdout, Version) return } if *help { flag.Usage() os.Exit(1) } conf, err := config.ReadFile(*configFilename) if err != nil { log.Fatal(err) } if conf.Listen == "" && *laddr == "" { log.Fatal("Listen address required") } else if conf.Listen == "" { conf.Listen = *laddr } runtime.GOMAXPROCS(runtime.NumCPU()) if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } storage, err := blobserver.CreateStorage(conf) if err != nil { log.Fatalf("error instantiating storage for type %s: %v", conf.StorageType(), err) } log.Printf("Using `%s` storage", conf.StorageType()) err = server.ListenAndServe(*laddr, storage) if err != nil { log.Errorln(err) } monitoring.MeasuringPointsPrintAll() }
func imageHandle(w http.ResponseWriter, r *http.Request, f ImageFilter) { start := time.Now() m := mux.Vars(r) log.Println(m["fileinfo"]) fi, err := f.SizeParser(m["fileinfo"]) if err != nil { writeError(w, err.Error(), 400) return } log.Println(fi) data, err := imageBackend.ReadFile(fi.filepath) if err != nil { writeError(w, err.Error(), 400) return } mimeType := http.DetectContentType(data) if err := validContentType(mimeType); err != nil { writeError(w, err.Error(), 400) return } thumb, err := f.Filter(data, fi) if err != nil { writeError(w, err.Error(), 400) return } newMimeType := http.DetectContentType(thumb) w.Header().Set("Content-Type", newMimeType) w.Header().Set("Content-Length", strconv.Itoa(len(thumb))) w.Write(thumb) log.Printf("Image Handle OK %v", time.Since(start)) }
func (a *authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err := a.loadSession(r); err != nil { log.Println("handler error: ", err) if a.mustAuth { log.Println("Status Unauthorized") w.WriteHeader(http.StatusUnauthorized) return } } a.handler.ServeHTTP(w, r) //rec := httptest.NewRecorder() //a.handler.ServeHTTP(rec, r) //for k, v := range rec.Header() { // w.Header()[k] = v //} //w.Header().Set("X-Authenticated-By", "kogama") //w.WriteHeader(rec.Code) //rec.Body.WriteTo(w) }
func (sto *swiftStorage) Fetch(br blob.Ref) (file io.ReadCloser, size uint32, err error) { ref, cont := sto.refContainer(br) log.Println("Fetch: ", ref, cont) f, h, err := sto.conn.ObjectOpen(cont, ref, true, nil) if err != nil { return } n, err := getInt64FromHeader(h, "Content-Length") if err != nil { return } return f, uint32(n), err }
func main() { flag.Parse() log.Println("start blobperf") queue := make(chan int) for i := 0; i < *workers; i++ { go uploader(queue) } lastPrint = time.Now() for { queue <- 1 } }
func (sto *swiftStorage) RemoveBlobs(blobs []blob.Ref) error { var wg syncutil.Group for _, br := range blobs { br := br removeGate.Start() wg.Go(func() error { defer removeGate.Done() ref, cont := sto.refContainer(br) log.Println("Remove: ", cont, ref) return sto.conn.ObjectDelete(cont, ref) }) } return wg.Err() }
func main() { flag.Usage = usage flag.Parse() if *version { fmt.Fprintln(os.Stdout, Version) return } if *help { flag.Usage() os.Exit(1) } if *laddr == "" { fmt.Fprintln(os.Stderr, "listen address required") os.Exit(1) } runtime.GOMAXPROCS(runtime.NumCPU()) if *cpuprofile != "" { f, err := os.Create(*cpuprofile) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } var imgBackend backend.ImageBackend if *fsBaseDir != "" { imgBackend = backend.Dir(*fsBaseDir) } else if *awsAccessKeyId != "" && *awsSecretAccessKey != "" && *awsRegion != "" && *awsBucket != "" { imgBackend = backend.NewS3(*awsAccessKeyId, *awsSecretAccessKey, *awsRegion, *awsBucket) } else { log.Errorln("Expected either aws-* or fs-* arguments") os.Exit(1) } err := server.ListenAndServe(*laddr, imgBackend) if err != nil { log.Println(err) } }
func currentRegion(r *http.Request) *Region { cookie, err := r.Cookie("region") if err != nil { log.Println(err) } else { codename := cookie.Value for k, v := range config.Regions { if codename == k { b := *v return &b } } } for _, v := range config.Regions { b := *v return &b } panic("no regions") }
func (c *Client) multiUpload(results Resources, toUpload []string) error { req, err := c.multiMultipartRequest("/blob/upload/", toUpload) if err != nil { return err } res, err := http.DefaultClient.Do(req) if err != nil { return err } if res.StatusCode != 201 { return fmt.Errorf("Unexpected status code %d", res.StatusCode) } ur := new(protocol.UploadResponse) parseResponse(res, ur) if len(ur.Received) != len(toUpload) { return fmt.Errorf("Expected %d received got %d", len(toUpload), len(ur.Received)) } for _, rec := range ur.Received { log.Println("got:", rec.Path) cur := results.findByPath(rec.Path) if cur == nil { return fmt.Errorf("upload error %s", rec.Path) } cur.URL = c.CDNBaseURL + rec.Path } return nil }
func handleMultiPartUpload(req *http.Request, blobReceiver blobserver.Storage) (interface{}, error) { res := new(protocol.UploadResponse) receivedBlobs := make([]blob.SizedRef, 0, 4) multipart, err := req.MultipartReader() if err != nil { return nil, newHTTPError(fmt.Sprintf("Expected multipart/form-data POST request; %v", err), http.StatusBadRequest) } useFilename := false req.ParseForm() if req.FormValue("use-filename") != "" { useFilename = true } for { mimePart, err := multipart.NextPart() if err == io.EOF { break } if err != nil { return nil, newHTTPError(fmt.Sprintf("Error reading multipart section: %v", err), http.StatusBadRequest) } contentDisposition, _, err := mime.ParseMediaType(mimePart.Header.Get("Content-Disposition")) if err != nil { return nil, newHTTPError("Invalid Content-Disposition", http.StatusBadRequest) } if contentDisposition != "form-data" { return nil, newHTTPError(fmt.Sprintf("Expected Content-Disposition of \"form-data\"; got %q", contentDisposition), http.StatusBadRequest) } var ref blob.Ref var tooBig int64 = blobserver.MaxBlobSize + 1 var readBytes int64 filename := mimePart.FileName() log.Println("filename:", filename) if useFilename { log.Println("Use filename") ref = blob.NewRefFilename(filename) } else { ref = blob.NewRef(filename) } blobGot, err := blobReceiver.ReceiveBlob(ref, &readerutil.CountingReader{ Reader: io.LimitReader(mimePart, tooBig), N: &readBytes, }) if readBytes == tooBig { err = fmt.Errorf("blob over the limit of %d bytes", blobserver.MaxBlobSize) } if err != nil { var errmsg string if log.Severity >= log.LevelInfo { errmsg = fmt.Sprintf("Error receiving blob (read bytes: %d) %v: %v\n", readBytes, ref, err) } else { errmsg = fmt.Sprintf("Error receiving blob: %v\n", err) } return nil, newHTTPError(errmsg, http.StatusInternalServerError) } log.Printf("Received blob %v\n", blobGot) receivedBlobs = append(receivedBlobs, blobGot) } for _, got := range receivedBlobs { rv := protocol.RefInfo{ Ref: got.Ref, Size: uint32(got.Size), } if h := got.Hash(); h != nil { rv.MD5 = hex.EncodeToString(h.Sum(nil)) } res.Received = append(res.Received, rv) } return res, nil }
func handleConfig(w http.ResponseWriter, req *http.Request, storage blobserver.StorageConfiger) { res := new(protocol.ConfigResponse) res.Data = storage.Config() log.Println("config:", res) httputil.ReturnJSON(w, res) }
// MeasureHandler adds measuring to http requests func MeasureHandler(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { log.Println("MeasureHandler is deprecated") h.ServeHTTP(w, r) }) }