func (r *ZipScannerImpl) Reader() (io.Reader, error) { switch r.fh.Method { case zip.Deflate: if r.Debug { fmt.Println("inflating...") } r.fr = flate.NewReader(r.reader) case zip.Store: if r.Debug { fmt.Println("reading...") } if r.fh.UncompressedSize > 0 { r.fr = io.LimitReader(r.reader, int64(r.fh.UncompressedSize)) } else if r.fh.UncompressedSize == 0 && (r.fh.Flags&hasDataDescriptor) == 0 { // file of 0 bytes or directory ? r.fr = io.LimitReader(r.reader, 0) } else { return r.readError(NoUncompressedSize) } default: return r.readError(UnsupportedCompression) } r.err = nil return r.fr, r.err }
func (s *BufferTest) TestMBuf(t *C) { pool := NewBufferPool(1000*1024*1024, 200*1024*1024) h := pool.NewPoolHandle() n := uint64(2 * BUF_SIZE) mb := MBuf{}.Init(h, n) t.Assert(len(mb.buffers), Equals, 2) r := io.LimitReader(&SeqReader{}, int64(n)) for { nread, err := mb.WriteFrom(r) t.Assert(err, IsNil) if nread == 0 { break } } t.Assert(mb.wbuf, Equals, 1) t.Assert(mb.wp, Equals, BUF_SIZE) diff, err := CompareReader(mb, io.LimitReader(&SeqReader{}, int64(n))) t.Assert(err, IsNil) t.Assert(diff, Equals, -1) t.Assert(mb.rbuf, Equals, 1) t.Assert(mb.rp, Equals, BUF_SIZE) t.Assert(h.inUseBuffers, Equals, int64(2)) mb.Free() t.Assert(h.inUseBuffers, Equals, int64(0)) }
func (cOff chunkOffTs) readPreChunk(r io.ReadSeeker) (*preChunk, error) { pc := preChunk{ts: cOff.ts} if _, err := r.Seek(cOff.offset, 0); err != nil { return nil, err } lr := io.LimitReader(r, cOff.size) var length uint32 if err := binary.Read(lr, binary.BigEndian, &length); err != nil { return nil, err } lr = io.LimitReader(lr, int64(length)) compType, err := kagus.ReadByte(lr) if err != nil { return nil, err } pc.compression = compType buf := new(bytes.Buffer) if _, err := io.Copy(buf, lr); err != nil { return nil, err } pc.data = buf.Bytes() return &pc, err }
func (s *BufferTest) TestBufferWrite(t *C) { h := NewBufferPool(1000 * 1024 * 1024) n := uint64(2 * BUF_SIZE) mb := MBuf{}.Init(h, n, true) t.Assert(len(mb.buffers), Equals, 2) nwritten, err := io.Copy(mb, io.LimitReader(&SeqReader{}, int64(n))) t.Assert(nwritten, Equals, int64(n)) t.Assert(err, IsNil) diff, err := CompareReader(mb, io.LimitReader(&SeqReader{}, int64(n))) t.Assert(err, IsNil) t.Assert(diff, Equals, -1) cur, err := mb.Seek(0, 1) t.Assert(err, IsNil) t.Assert(cur, Equals, int64(n)) cur, err = mb.Seek(0, 2) t.Assert(err, IsNil) t.Assert(cur, Equals, int64(n)) cur, err = mb.Seek(0, 0) t.Assert(err, IsNil) t.Assert(cur, Equals, int64(0)) t.Assert(mb.rbuf, Equals, 0) t.Assert(mb.rp, Equals, 0) diff, err = CompareReader(mb, io.LimitReader(&SeqReader{}, int64(n))) t.Assert(err, IsNil) t.Assert(diff, Equals, -1) }
// LimitReader returns a Reader that reads from r but stops with EOF after n // bytes. The underlying implementation is a *io.LimitedReader unless n is // greater than the maximum value representable as an int64, in which case it // uses io.MultiReader with a pair of *io.LimitReader. // func LimitReader(r io.Reader, n uint64) io.Reader { if n > uint64(math.MaxInt64) { return io.MultiReader(io.LimitReader(r, math.MaxInt64), io.LimitReader(r, int64(n-uint64(math.MaxInt64)))) } else { return io.LimitReader(r, int64(n)) } }
// archiveFileVisitor is called for each file in an archive. It may set // tempFile and signature. func archiveFileVisitor(dir string, tempFile *string, signature *[]byte, archivePath string, filedata io.Reader) error { var err error filename := path.Base(archivePath) archiveDir := path.Dir(archivePath) l.Debugf("considering file %s", archivePath) switch filename { case "syncthing", "syncthing.exe": archiveDirs := strings.Split(archiveDir, "/") if len(archiveDirs) > 1 { // Don't consider "syncthing" files found too deeply, as they may be // other things. return nil } l.Debugf("found upgrade binary %s", archivePath) *tempFile, err = writeBinary(dir, io.LimitReader(filedata, maxBinarySize)) if err != nil { return err } case "release.sig": l.Debugf("found signature %s", archivePath) *signature, err = ioutil.ReadAll(io.LimitReader(filedata, maxSignatureSize)) if err != nil { return err } } return nil }
func (c *CompressionSnappyDecoder) readHeader() (int, error) { header := make([]byte, 4, 4) _, err := c.source.Read(header[:3]) if err != nil { return 0, err } headerVal := binary.LittleEndian.Uint32(header) c.isOriginal = headerVal%2 == 1 c.chunkLength = int(headerVal / 2) if !c.isOriginal { // ORC does not use snappy's framing as implemented in the // github.com/golang/snappy Reader implementation. As a result // we have to read and decompress the entire chunk. // TODO: find reader implementation with optional framing. r := io.LimitReader(c.source, int64(c.chunkLength)) src, err := ioutil.ReadAll(r) if err != nil { return 0, err } decodedBytes, err := snappy.Decode(nil, src) if err != nil { return 0, err } c.decoded = bytes.NewReader(decodedBytes) } else { c.decoded = io.LimitReader(c.source, int64(c.chunkLength)) } return 0, nil }
func (c *Connection) readReply() (*replyMsg, os.Error) { size_bits, _ := ioutil.ReadAll(io.LimitReader(c.conn, 4)) size := binary.LittleEndian.Uint32(size_bits) rest, _ := ioutil.ReadAll(io.LimitReader(c.conn, int64(size)-4)) reply := parseReply(rest) return reply, nil }
// Extract form fields and file data from a HTTP POST Policy func extractPostPolicyFormValues(reader *multipart.Reader) (filePart io.Reader, fileName string, formValues map[string]string, err error) { /// HTML Form values formValues = make(map[string]string) fileName = "" for err == nil { var part *multipart.Part part, err = reader.NextPart() if part != nil { canonicalFormName := http.CanonicalHeaderKey(part.FormName()) if canonicalFormName != "File" { var buffer []byte limitReader := io.LimitReader(part, maxFormFieldSize+1) buffer, err = ioutil.ReadAll(limitReader) if err != nil { return nil, "", nil, err } if int64(len(buffer)) > maxFormFieldSize { return nil, "", nil, errSizeUnexpected } formValues[canonicalFormName] = string(buffer) } else { filePart = io.LimitReader(part, maxObjectSize) fileName = part.FileName() // As described in S3 spec, we expect file to be the last form field break } } } return filePart, fileName, formValues, nil }
/* Gets the message of reply from database. */ func (self *Connection) readReply() (*opReply, os.Error) { size_bits, _ := ioutil.ReadAll(io.LimitReader(self.conn, 4)) size := pack.Uint32(size_bits) rest, _ := ioutil.ReadAll(io.LimitReader(self.conn, int64(size)-4)) reply := parseReply(rest) return reply, nil }
func TestVolumeCreateBadDispersionValues(t *testing.T) { tmpfile := tests.Tempfile() defer os.Remove(tmpfile) // Create the app app := NewTestApp(tmpfile) defer app.Close() router := mux.NewRouter() app.SetRoutes(router) // Setup the server ts := httptest.NewServer(router) defer ts.Close() // VolumeCreate JSON Request request := []byte(`{ "size" : 100, "durability": { "type": "disperse", "disperse": { "data" : 8, "redundancy" : 1 } } }`) // Send request r, err := http.Post(ts.URL+"/volumes", "application/json", bytes.NewBuffer(request)) tests.Assert(t, err == nil) tests.Assert(t, r.StatusCode == http.StatusBadRequest) body, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength)) tests.Assert(t, err == nil) r.Body.Close() tests.Assert(t, strings.Contains(string(body), "Invalid dispersion combination")) // VolumeCreate JSON Request request = []byte(`{ "size" : 100, "durability": { "type": "disperse", "disperse": { "data" : 4, "redundancy" : 3 } } }`) // Send request r, err = http.Post(ts.URL+"/volumes", "application/json", bytes.NewBuffer(request)) tests.Assert(t, err == nil) tests.Assert(t, r.StatusCode == http.StatusBadRequest) body, err = ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength)) tests.Assert(t, err == nil) r.Body.Close() tests.Assert(t, strings.Contains(string(body), "Invalid dispersion combination")) }
func (decoder *packetDecoder) Decode(msg interface{}) (err error) { decoder.buffer.Reset() if _, err = decoder.buffer.ReadFrom(io.LimitReader(decoder.reader, int64(decoder.n))); err != nil { return err } n := decoder.decodeHead(decoder.buffer.Next(decoder.n)) if _, err = decoder.buffer.ReadFrom(io.LimitReader(decoder.reader, int64(n))); err != nil { return err } return decoder.base.Decode(msg) }
func main() { // START OMIT var r io.Reader = ByteReader('A') r = io.LimitReader(r, 1e6) r = LogReader{r} io.Copy(ioutil.Discard, r) // STOP OMIT return io.Copy(ioutil.Discard, LogReader{io.LimitReader(ByteReader('A'), 1e6)}) }
func TestPrependedBlocks(t *testing.T) { const BLOCKSIZE = 100 const BLOCK_COUNT = 20 checksum := NewFileChecksumGenerator(BLOCKSIZE) file1 := io.LimitReader( readers.NewNonRepeatingSequence(0), BLOCKSIZE*BLOCK_COUNT, ) file2 := io.LimitReader( io.MultiReader( readers.OneReader(BLOCKSIZE), // Off by one block readers.NewNonRepeatingSequence(0), ), BLOCKSIZE*BLOCK_COUNT, ) output1 := bytes.NewBuffer(nil) chksum1, _ := checksum.GenerateChecksums(file1, output1) output2 := bytes.NewBuffer(nil) chksum2, _ := checksum.GenerateChecksums(file2, output2) if bytes.Compare(chksum1, chksum2) == 0 { t.Fatal("Checksums should be different") } weaksize, strongSize := checksum.GetChecksumSizes() sums1, _ := chunks.LoadChecksumsFromReader(output1, weaksize, strongSize) sums2, _ := chunks.LoadChecksumsFromReader(output2, weaksize, strongSize) if len(sums1) != len(sums2) { t.Fatalf("Checksum lengths differ %v vs %v", len(sums1), len(sums2)) } if sums1[0].Match(sums2[0]) { t.Error("Chunk sums1[0] should differ from sums2[0]") } for i, _ := range sums2 { if i == 0 { continue } if !sums1[i-1].Match(sums2[i]) { t.Errorf("Chunk sums1[%v] equal sums2[%v]", i-1, i) } } }
func passThru(w io.Writer, req *http.Request) error { var body bytes.Buffer _, err := io.Copy(&body, io.LimitReader(req.Body, maxSnippetSize+1)) req.Body.Close() if err != nil { return fmt.Errorf("Error reading body: %q", err) } if body.Len() > maxSnippetSize { return fmt.Errorf("Snippet is too large") } snip := &Snippet{Body: body.Bytes()} id := snip.Id() key := []byte(id) var output bytes.Buffer if err = db.Update(func(tx *bolt.Tx) error { b := tx.Bucket(bucketCache) data := b.Get(key) if data == nil || *flagDisableCache { client := http.Client{} r, err := client.Post(*flagCompileURL, req.Header.Get("Content-Type"), &body) if err != nil { return err } defer r.Body.Close() data, err = ioutil.ReadAll(io.LimitReader(r.Body, maxSnippetSize+1)) if len(data) > maxSnippetSize { return fmt.Errorf("Output is too large.") } if err = b.Put(key, data); err != nil { return err } } output.Write(data) return nil }); err != nil { return err } if _, err := io.Copy(w, &output); err != nil { return err } return nil }
func TestCallbacksWork(t *testing.T) { fake := new(FakeStream) var sent int64 var recv int64 sentCB := func(n int64, proto protocol.ID, p peer.ID) { sent += n } recvCB := func(n int64, proto protocol.ID, p peer.ID) { recv += n } ms := newMeteredStream(fake, protocol.ID("TEST"), peer.ID("PEER"), recvCB, sentCB) toWrite := int64(100000) toRead := int64(100000) fake.ReadBuf = io.LimitReader(randbo.New(), toRead) writeData := io.LimitReader(randbo.New(), toWrite) n, err := io.Copy(ms, writeData) if err != nil { t.Fatal(err) } if n != toWrite { t.Fatal("incorrect write amount") } if toWrite != sent { t.Fatal("incorrectly reported writes", toWrite, sent) } n, err = io.Copy(ioutil.Discard, ms) if err != nil { t.Fatal(err) } if n != toRead { t.Fatal("incorrect read amount") } if toRead != recv { t.Fatal("incorrectly reported reads") } }
func AddStudent(w http.ResponseWriter, r *http.Request) { var student Student body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576)) if err != nil { panic(err) } if err := r.Body.Close(); err != nil { panic(err) } if err := json.Unmarshal(body, &student); err != nil { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(422) // unprocessable entity if err := json.NewEncoder(w).Encode(err); err != nil { panic(err) } } // add to db //t := RepoCreateTodo(student) //w.Header().Set("Content-Type", "application/json; charset=UTF-8") //w.WriteHeader(http.StatusCreated) //if err := json.NewEncoder(w).Encode(t); err != nil { // panic(err) //} }
// matchesCurrent returns whether or not an existing git hook is able to be // written to or upgraded. A git hook matches those conditions if and only if // its contents match the current contents, or any past "upgrade-able" contents // of this hook. func (h *Hook) matchesCurrent() (bool, error) { file, err := os.Open(h.Path()) if err != nil { return false, err } by, err := ioutil.ReadAll(io.LimitReader(file, 1024)) file.Close() if err != nil { return false, err } contents := strings.TrimSpace(string(by)) if contents == h.Contents || len(contents) == 0 { return true, nil } for _, u := range h.Upgradeables { if u == contents { return true, nil } } return false, fmt.Errorf("Hook already exists: %s\n\n%s\n", string(h.Type), contents) }
// readPayload reads the HTTP response in chunks, making the read buffer available // to MeekConn.Read() calls after each chunk; the intention is to allow bytes to // flow back to the reader as soon as possible instead of buffering the entire payload. func (meek *MeekConn) readPayload(receivedPayload io.ReadCloser) (totalSize int64, err error) { defer receivedPayload.Close() totalSize = 0 for { reader := io.LimitReader(receivedPayload, READ_PAYLOAD_CHUNK_LENGTH) // Block until there is capacity in the receive buffer var receiveBuffer *bytes.Buffer select { case receiveBuffer = <-meek.emptyReceiveBuffer: case receiveBuffer = <-meek.partialReceiveBuffer: case <-meek.broadcastClosed: return 0, nil } // Note: receiveBuffer size may exceed FULL_RECEIVE_BUFFER_LENGTH by up to the size // of one received payload. The FULL_RECEIVE_BUFFER_LENGTH value is just a threshold. n, err := receiveBuffer.ReadFrom(reader) meek.replaceReceiveBuffer(receiveBuffer) if err != nil { return 0, ContextError(err) } totalSize += n if n == 0 { break } } return totalSize, nil }
func UninstallHooks() error { if !InRepo() { return newInvalidRepoError(nil) } prePushHookPath := filepath.Join(LocalGitDir, "hooks", "pre-push") file, err := os.Open(prePushHookPath) if err != nil { // hook doesn't exist, our work here is done return nil } by, err := ioutil.ReadAll(io.LimitReader(file, 1024)) file.Close() if err != nil { return err } contents := strings.TrimSpace(string(by)) if contents == prePushHook || prePushUpgrades[contents] { return os.RemoveAll(prePushHookPath) } return nil }
func (r *RoomSetResource) Post(request *http.Request) (int, interface{}, http.Header) { var ( roomObject RoomObject success bool ) body, err := ioutil.ReadAll(io.LimitReader(request.Body, READ_BUFFER_SIZE)) if nil != err { return http.StatusInternalServerError, InternalServerError(err), nil } err = request.Body.Close() if nil != err { return http.StatusInternalServerError, InternalServerError(err), nil } if err := json.Unmarshal(body, &roomObject); nil != err { return gogoapi.HTTP_UNPROCESSABLE, gogoapi.JSONError{gogoapi.HTTP_UNPROCESSABLE, "Unprocessable entity."}, nil } err = r.postStatement.QueryRow( roomObject.Name, roomObject.Description, ).Scan(&success, &roomObject.RoomId) if nil != err { return http.StatusInternalServerError, InternalServerError(err), nil } if !success { status := http.StatusConflict return status, gogoapi.JSONError{status, "Resource already exists."}, nil } else { return http.StatusCreated, roomObject, nil } }
// reads a bulk reply (i.e $5\r\nhello) func readBulk(reader *bufio.Reader, head string) ([]byte, error) { var err error var data []byte if head == "" { head, err = reader.ReadString('\n') if err != nil { return nil, err } } switch head[0] { case ':': data = []byte(strings.TrimSpace(head[1:])) case '$': size, err := strconv.Atoi(strings.TrimSpace(head[1:])) if err != nil { return nil, err } if size == -1 { return nil, doesNotExist } lr := io.LimitReader(reader, int64(size)) data, err = ioutil.ReadAll(lr) if err == nil { // read end of line _, err = reader.ReadString('\n') } default: return nil, RedisError("Expecting Prefix '$' or ':'") } return data, err }
func (self *ProtobufClient) readResponses() { message := make([]byte, 0, MAX_RESPONSE_SIZE) buff := bytes.NewBuffer(message) for !self.stopped { buff.Reset() conn := self.getConnection() if conn == nil { time.Sleep(200 * time.Millisecond) continue } var messageSizeU uint32 var err error err = binary.Read(conn, binary.LittleEndian, &messageSizeU) if err != nil { log.Error("Error while reading messsage size: %d", err) time.Sleep(200 * time.Millisecond) continue } messageSize := int64(messageSizeU) messageReader := io.LimitReader(conn, messageSize) _, err = io.Copy(buff, messageReader) if err != nil { log.Error("Error while reading message: %d", err) time.Sleep(200 * time.Millisecond) continue } response, err := protocol.DecodeResponse(buff) if err != nil { log.Error("error unmarshaling response: %s", err) time.Sleep(200 * time.Millisecond) } else { self.sendResponse(response) } } }
func probe(path string) modem.Modem { if path != "" { b, err := ioutil.ReadFile(path) if err != nil { glog.Errorf("Failed to read %q: %v", path, err) return nil } if isSB6121(b) { m, err := NewFakeData(path) if err != nil { glog.Errorf("Failed to create fake SB6121: %v", err) return nil } return m } return nil } rc, err := get() if err != nil { glog.Errorf("Failed to get status page: %v", err) return nil } defer rc.Close() b, err := ioutil.ReadAll(io.LimitReader(rc, 1<<20)) if err != nil { glog.Errorf("Failed to read status page: %v", err) return nil } if isSB6121(b) { return New() } return nil }
func TestDecompInlen(t *testing.T) { data := bytes.Repeat([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 1000) cmp := Compress1X(data) for i := 1; i < 16; i++ { for j := -16; j < 16; j++ { _, err := Decompress1X(io.LimitReader(bytes.NewReader(cmp), int64(len(cmp)-i)), len(cmp)+j, 0) if err != io.EOF { t.Error("EOF expected for truncated input, found:", err) } } } for j := -16; j < 16; j++ { data2, err := Decompress1X(bytes.NewReader(cmp), len(cmp)+j, 0) if j < 0 && err != io.EOF { t.Error("EOF expected for truncated input, found:", err) } if j >= 0 { if err != nil { t.Error("error for normal decompression:", err, j) } else if !reflect.DeepEqual(data, data2) { t.Error("data doesn't match after decompression") } } } }
func TestMultiReaderSourceError(t *testing.T) { t.Parallel() // This test fails if it doesn't complete quickly. timer := time.AfterFunc(2*time.Second, func() { t.Fatalf("Test seems to have hung.") }) defer timer.Stop() randomSrc := randomDataMaker{rand.NewSource(1028890720402726901)} tordr := iotest.TimeoutReader(&randomSrc) lr := io.LimitReader(tordr, expSize) r1, _ := newMultiReaderTimeout(lr, 10*time.Millisecond) b1 := &bytes.Buffer{} rs := make(chan copyRes, 2) go bgCopy(b1, r1, rs) res1 := <-rs if res1.e != Timeout { t.Errorf("Expected a timeout, got %v", res1.e) t.Fail() } }
func TestMultiReader(t *testing.T) { t.Parallel() randomSrc := randomDataMaker{rand.NewSource(1028890720402726901)} lr := io.LimitReader(&randomSrc, expSize) r1, r2 := newMultiReader(lr) b1 := &bytes.Buffer{} b2 := &bytes.Buffer{} rs := make(chan copyRes, 2) go bgCopy(b1, r1, rs) go bgCopy(b2, r2, rs) res1 := <-rs res2 := <-rs if res1.e != nil || res2.e != nil { t.Logf("Error copying data: %v/%v", res1.e, res2.e) } if res1.s != res2.s || res1.s != expSize { t.Fatalf("Read %v/%v bytes, expected %v", res1.s, res2.s, expSize) } if !reflect.DeepEqual(b1, b2) { t.Fatalf("Didn't read the same data from the two things") } }
func ServerCreate(w http.ResponseWriter, r *http.Request) { if authentication.IsAllowed(w, r) { var server pulp.Server body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576)) if err != nil { logger.Log("could not read POST body, Error: "+err.Error(), logger.ERROR) } if err := r.Body.Close(); err != nil { logger.Log("could not close POST body, Error: "+err.Error(), logger.ERROR) } if err := json.Unmarshal(body, &server); err != nil { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.WriteHeader(422) // unprocessable entity if err := json.NewEncoder(w).Encode(err); err != nil { logger.Log("could not json/encode error, Error: "+err.Error(), logger.ERROR) } } session, collection := db.InitServerCollection() defer session.Close() server.Added = time.Now() err = collection.Insert(server) if err != nil { logger.Log("could not insert server to DB, Error: "+err.Error(), logger.ERROR) } w.Header().Set("Content-Type", "application/json; charset=UTF=8") w.WriteHeader(http.StatusCreated) if err := json.NewEncoder(w).Encode(server); err != nil { panic(err) } } }
func (x *XdrStream) ReadOne(in interface{}) error { var nbytes uint32 err := binary.Read(x.rdr, binary.BigEndian, &nbytes) if err != nil { x.rdr.Close() if err == io.ErrUnexpectedEOF { return io.EOF } else { return err } } nbytes &= 0x7fffffff x.buf.Reset() if nbytes == 0 { x.rdr.Close() return io.EOF } x.buf.Grow(int(nbytes)) read, err := x.buf.ReadFrom(io.LimitReader(x.rdr, int64(nbytes))) if read != int64(nbytes) { x.rdr.Close() return errors.New("Read wrong number of bytes from XDR") } if err != nil { x.rdr.Close() return err } readi, err := xdr.Unmarshal(&x.buf, in) if int64(readi) != int64(nbytes) { return fmt.Errorf("Unmarshalled %d bytes from XDR, expected %d)", readi, nbytes) } return err }
// GetSized downloads the named meta file with the given size. A short body // is acceptable because in the case of timestamp.json, the size is a cap, // not an exact length. // If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a // predefined threshold "notary.MaxDownloadSize". func (s HTTPStore) GetSized(name string, size int64) ([]byte, error) { url, err := s.buildMetaURL(name) if err != nil { return nil, err } req, err := http.NewRequest("GET", url.String(), nil) if err != nil { return nil, err } resp, err := s.roundTrip.RoundTrip(req) if err != nil { return nil, NetworkError{Wrapped: err} } defer resp.Body.Close() if err := translateStatusToError(resp, name); err != nil { logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name) return nil, err } if size == NoSizeLimit { size = notary.MaxDownloadSize } if resp.ContentLength > size { return nil, ErrMaliciousServer{} } logrus.Debugf("%d when retrieving metadata for %s", resp.StatusCode, name) b := io.LimitReader(resp.Body, size) body, err := ioutil.ReadAll(b) if err != nil { return nil, err } return body, nil }