func (c *Client) PutObject(key, bucket string, md5 hash.Hash, size int64, body io.Reader) error { req := newReq(c.keyURL(bucket, key)) req.Method = "PUT" req.ContentLength = size if md5 != nil { b64 := new(bytes.Buffer) encoder := base64.NewEncoder(base64.StdEncoding, b64) encoder.Write(md5.Sum(nil)) encoder.Close() req.Header.Set("Content-MD5", b64.String()) } c.Auth.SignRequest(req) req.Body = ioutil.NopCloser(body) res, err := c.transport().RoundTrip(req) if res != nil && res.Body != nil { defer httputil.CloseBody(res.Body) } if err != nil { return err } if res.StatusCode != http.StatusOK { // res.Write(os.Stderr) return fmt.Errorf("Got response code %d from s3", res.StatusCode) } return nil }
func (c *Client) PutObject(name, bucket string, md5 hash.Hash, size int64, body io.Reader) error { req := newReq("https://" + bucket + "." + c.hostname() + "/" + name) req.Method = "PUT" req.ContentLength = size if md5 != nil { b64 := new(bytes.Buffer) encoder := base64.NewEncoder(base64.StdEncoding, b64) encoder.Write(md5.Sum(nil)) encoder.Close() req.Header.Set("Content-MD5", b64.String()) } c.Auth.SignRequest(req) req.Body = ioutil.NopCloser(body) res, err := c.httpClient().Do(req) if res != nil && res.Body != nil { defer httputil.CloseBody(res.Body) } if err != nil { return err } if res.StatusCode != 200 { res.Write(os.Stderr) return fmt.Errorf("Got response code %d from s3", res.StatusCode) } return nil }
// Lookup returns rectangles for the given address. Currently the only // implementation is the Google geocoding service. func Lookup(ctx *context.Context, address string) ([]Rect, error) { mu.RLock() rects, ok := cache[address] mu.RUnlock() if ok { return rects, nil } rectsi, err := sf.Do(address, func() (interface{}, error) { // TODO: static data files from OpenStreetMap, Wikipedia, etc? urlStr := "https://maps.googleapis.com/maps/api/geocode/json?address=" + url.QueryEscape(address) + "&sensor=false" res, err := ctx.HTTPClient().Get(urlStr) if err != nil { return nil, err } defer httputil.CloseBody(res.Body) rects, err := decodeGoogleResponse(res.Body) log.Printf("Google geocode lookup (%q) = %#v, %v", address, rects, err) if err == nil { mu.Lock() cache[address] = rects mu.Unlock() } return rects, err }) if err != nil { return nil, err } return rectsi.([]Rect), nil }
func (c *Client) Buckets() ([]*Bucket, error) { req := newReq("https://" + c.hostname() + "/") c.Auth.SignRequest(req) res, err := c.transport().RoundTrip(req) if err != nil { return nil, err } defer httputil.CloseBody(res.Body) if res.StatusCode != http.StatusOK { return nil, fmt.Errorf("s3: Unexpected status code %d fetching bucket list", res.StatusCode) } return parseListAllMyBuckets(res.Body) }
func doGet(ctx *context.Context, url string) ([]byte, error) { req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } res, err := ctx.HTTPClient().Do(req) if err != nil { log.Printf("Error fetching %s: %v", url, err) return nil, err } defer httputil.CloseBody(res.Body) if res.StatusCode != http.StatusOK { return nil, fmt.Errorf("Get request on %s failed with: %s", url, res.Status) } return ioutil.ReadAll(io.LimitReader(res.Body, 8<<20)) }
// ListBucket returns 0 to maxKeys (inclusive) items from the provided // bucket. Keys before startAt will be skipped. (This is the S3 // 'marker' value). If the length of the returned items is equal to // maxKeys, there is no indication whether or not the returned list is // truncated. func (c *Client) ListBucket(bucket string, startAt string, maxKeys int) (items []*Item, err error) { if maxKeys < 0 { return nil, errors.New("invalid negative maxKeys") } marker := startAt for len(items) < maxKeys { fetchN := maxKeys - len(items) if fetchN > maxList { fetchN = maxList } var bres listBucketResults url_ := fmt.Sprintf("https://%s.%s/?marker=%s&max-keys=%d", bucket, c.hostname(), url.QueryEscape(marker), fetchN) req := newReq(url_) c.Auth.SignRequest(req) res, err := c.httpClient().Do(req) if err != nil { return nil, err } err = xml.NewDecoder(res.Body).Decode(&bres) httputil.CloseBody(res.Body) if err != nil { return nil, err } for _, it := range bres.Contents { if it.Key == marker && it.Key != startAt { // Skip first dup on pages 2 and higher. continue } if it.Key < startAt { return nil, fmt.Errorf("Unexpected response from Amazon: item key %q but wanted greater than %q", it.Key, startAt) } items = append(items, it) marker = it.Key } if !bres.IsTruncated { break } } return items, nil }
// EnumerateObjects lists the objects in a bucket. // If after is non-empty, listing will begin with lexically greater object names. // If limit is non-zero, the length of the list will be limited to that number. func (gsa *Client) EnumerateObjects(bucket, after string, limit int) ([]SizedObject, error) { // Build url, with query params var params []string if after != "" { params = append(params, "marker="+url.QueryEscape(after)) } if limit > 0 { params = append(params, fmt.Sprintf("max-keys=%v", limit)) } query := "" if len(params) > 0 { query = "?" + strings.Join(params, "&") } resp, err := gsa.simpleRequest("GET", gsAccessURL+"/"+bucket+"/"+query) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("Bad enumerate response code: %v", resp.Status) } var xres struct { Contents []SizedObject } defer httputil.CloseBody(resp.Body) if err = xml.NewDecoder(resp.Body).Decode(&xres); err != nil { return nil, err } // Fill in the Bucket on all the SizedObjects for _, o := range xres.Contents { o.Bucket = bucket } return xres.Contents, nil }
// ListBucket returns 0 to maxKeys (inclusive) items from the provided // bucket. The items will have keys greater than the provided after, which // may be empty. (Note: this is not greater than or equal to, like the S3 // API's 'marker' parameter). If the length of the returned items is equal // to maxKeys, there is no indication whether or not the returned list is // truncated. func (c *Client) ListBucket(bucket string, after string, maxKeys int) (items []*Item, err error) { if maxKeys < 0 { return nil, errors.New("invalid negative maxKeys") } const s3APIMaxFetch = 1000 for len(items) < maxKeys { fetchN := maxKeys - len(items) if fetchN > s3APIMaxFetch { fetchN = s3APIMaxFetch } var bres listBucketResults url_ := fmt.Sprintf("http://%s.%s/?marker=%s&max-keys=%d", bucket, c.hostname(), url.QueryEscape(marker(after)), fetchN) req := newReq(url_) c.Auth.SignRequest(req) res, err := c.httpClient().Do(req) if err != nil { return nil, err } err = xml.NewDecoder(res.Body).Decode(&bres) httputil.CloseBody(res.Body) if err != nil { return nil, err } for _, it := range bres.Contents { if it.Key <= after { return nil, fmt.Errorf("Unexpected response from Amazon: item key %q but wanted greater than %q", it.Key, after) } items = append(items, it) after = it.Key } if !bres.IsTruncated { break } } return items, nil }
// ListBucket returns 0 to maxKeys (inclusive) items from the provided // bucket. Keys before startAt will be skipped. (This is the S3 // 'marker' value). If the length of the returned items is equal to // maxKeys, there is no indication whether or not the returned list is // truncated. func (c *Client) ListBucket(bucket string, startAt string, maxKeys int) (items []*Item, err error) { if maxKeys < 0 { return nil, errors.New("invalid negative maxKeys") } marker := startAt for len(items) < maxKeys { fetchN := maxKeys - len(items) if fetchN > maxList { fetchN = maxList } var bres listBucketResults url_ := fmt.Sprintf("%s?marker=%s&max-keys=%d", c.bucketURL(bucket), url.QueryEscape(marker), fetchN) // Try the enumerate three times, since Amazon likes to close // https connections a lot, and Go sucks at dealing with it: // https://code.google.com/p/go/issues/detail?id=3514 const maxTries = 5 for try := 1; try <= maxTries; try++ { time.Sleep(time.Duration(try-1) * 100 * time.Millisecond) req := newReq(url_) c.Auth.SignRequest(req) res, err := c.transport().RoundTrip(req) if err != nil { if try < maxTries { continue } return nil, err } if res.StatusCode != http.StatusOK { if res.StatusCode < 500 { body, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) aerr := &Error{ Op: "ListBucket", Code: res.StatusCode, Body: body, Header: res.Header, } aerr.parseXML() res.Body.Close() return nil, aerr } } else { bres = listBucketResults{} var logbuf bytes.Buffer err = xml.NewDecoder(io.TeeReader(res.Body, &logbuf)).Decode(&bres) if err != nil { log.Printf("Error parsing s3 XML response: %v for %q", err, logbuf.Bytes()) } else if bres.MaxKeys != fetchN || bres.Name != bucket || bres.Marker != marker { err = fmt.Errorf("Unexpected parse from server: %#v from: %s", bres, logbuf.Bytes()) log.Print(err) } } httputil.CloseBody(res.Body) if err != nil { if try < maxTries-1 { continue } log.Print(err) return nil, err } break } for _, it := range bres.Contents { if it.Key == marker && it.Key != startAt { // Skip first dup on pages 2 and higher. continue } if it.Key < startAt { return nil, fmt.Errorf("Unexpected response from Amazon: item key %q but wanted greater than %q", it.Key, startAt) } items = append(items, it) marker = it.Key } if !bres.IsTruncated { // log.Printf("Not truncated. so breaking. items = %d; len Contents = %d, url = %s", len(items), len(bres.Contents), url_) break } } return items, nil }