// appends file to tar func appendFile(tw *tar.Writer, tfh io.Seeker, fn string) (pos1 uint64, pos2 uint64, err error) { logger.Tracef("adding %s (%s) to %s", tfh, fn, tw) hdr, e := FileTarHeader(fn) if e != nil { err = e return } sfh, e := os.Open(fn) if e != nil { err = e return } defer sfh.Close() p, e := tfh.Seek(0, 1) if e != nil { err = e return } pos1 = uint64(p) if err = WriteTar(tw, hdr, sfh); err != nil { return } _ = tw.Flush() if p, err = tfh.Seek(0, 1); err != nil { return } pos2 = uint64(p) return }
func appendLink(tw *tar.Writer, tfh io.Seeker, fn string) (pos1 uint64, pos2 uint64, err error) { if !fileIsSymlink(fn) { return appendFile(tw, tfh, fn) } logger.Tracef("adding link %s (%s) to %s", tfh, fn, tw) hdr, e := FileTarHeader(fn) hdr.Size = 0 hdr.Typeflag = tar.TypeSymlink hdr.Linkname = BaseName(FindLinkOrigin(fn, false)) // logger.Printf("fn=%s hdr=%+v tm=%s", fn, hdr, hdr.Typeflag) if e != nil { err = e return } p, e := tfh.Seek(0, 1) if e != nil { err = e return } pos1 = uint64(p) if err = WriteTar(tw, hdr, nil); err != nil { return } _ = tw.Flush() if p, err = tfh.Seek(0, 1); err != nil { return } pos2 = uint64(p) return }
func rewind(s io.Seeker, err error) error { _, err1 := s.Seek(0, os.SEEK_SET) if err == nil { err = err1 } return err }
func seek(rs io.Seeker, offset int64, whence int) { if _, err := rs.Seek(offset, whence); err != nil { if err != io.EOF { log.Fatal(err) } log.Println(err) } }
func getSize(r io.Seeker) (int64, error) { var size int64 var err error size, err = r.Seek(0, os.SEEK_END) if err != nil { return 0, err } _, err = r.Seek(0, os.SEEK_SET) if err != nil { return size, err } return size, nil }
// Determine the size of a Seeker by seeking to the end. This function will // attempt to bring the file pointer back to the original location. func SeekerSize(sk io.Seeker) (pos int64, err error) { var curPos int64 if curPos, err = sk.Seek(0, SeekCur); err != nil { return } if pos, err = sk.Seek(0, SeekEnd); err != nil { return } if _, err = sk.Seek(curPos, SeekSet); err != nil { return } return }
// Size returns the total size in bytes of the provided io.Seeker. The original // position is preserved. func Size(r io.Seeker) (n int64, err error) { // Record original position. orig, err := r.Seek(0, os.SEEK_CUR) if err != nil { return 0, err } // Seek end position. end, err := r.Seek(0, os.SEEK_END) if err != nil { return 0, err } // Reset original position. _, err = r.Seek(orig, os.SEEK_SET) if err != nil { return 0, err } return end, nil }
// Helper function to get size of io.Seeker func getSeekerSize(s io.Seeker) (int64, error) { // Save the original position. originalPos, err := s.Seek(0, 1) if err != nil { return 0, err } // Seek to the end of the file. endPos, err := s.Seek(0, 2) if err != nil { return 0, err } // Restore original position _, err = s.Seek(originalPos, 0) if err != nil { return 0, err } // All good! return endPos, nil }
func seekEnd(s io.Seeker) (int64, error) { return s.Seek(0, os.SEEK_END) }
func StreamConversion(destination_name string, source io.Reader, writer io.Writer, seeker io.Seeker) { seconds := ConvertSrtStream(destination_name, source, writer) seeker.Seek(0, 0) WriteFileLine(writer, seconds, destination_name) }
func tell(r io.Seeker) uint32 { pos, _ := r.Seek(0, 1) return uint32(pos) }
// executeMethod - instantiates a given method, and retries the // request upon any error up to maxRetries attempts in a binomially // delayed manner using a standard back off algorithm. func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) { var isRetryable bool // Indicates if request can be retried. var bodySeeker io.Seeker // Extracted seeker from io.Reader. if metadata.contentBody != nil { // Check if body is seekable then it is retryable. bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) } // Create a done channel to control 'ListObjects' go routine. doneCh := make(chan struct{}, 1) // Indicate to our routine to exit cleanly upon return. defer close(doneCh) // Blank indentifier is kept here on purpose since 'range' without // blank identifiers is only supported since go1.4 // https://golang.org/doc/go1.4#forrange. for _ = range c.newRetryTimer(MaxRetry, time.Second, time.Second*30, MaxJitter, doneCh) { // Retry executes the following function body if request has an // error until maxRetries have been exhausted, retry attempts are // performed after waiting for a given period of time in a // binomial fashion. if isRetryable { // Seek back to beginning for each attempt. if _, err = bodySeeker.Seek(0, 0); err != nil { // If seek failed, no need to retry. return nil, err } } // Instantiate a new request. var req *http.Request req, err = c.newRequest(method, metadata) if err != nil { errResponse := ToErrorResponse(err) if isS3CodeRetryable(errResponse.Code) { continue // Retry. } return nil, err } // Initiate the request. res, err = c.do(req) if err != nil { // For supported network errors verify. if isNetErrorRetryable(err) { continue // Retry. } // For other errors, return here no need to retry. return nil, err } // For any known successful http status, return quickly. for _, httpStatus := range successStatus { if httpStatus == res.StatusCode { return res, nil } } // Read the body to be saved later. errBodyBytes, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } // Save the body. errBodySeeker := bytes.NewReader(errBodyBytes) res.Body = ioutil.NopCloser(errBodySeeker) // For errors verify if its retryable otherwise fail quickly. errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) // Bucket region if set in error response, we can retry the // request with the new region. if errResponse.Region != "" { c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) continue // Retry. } // Verify if error response code is retryable. if isS3CodeRetryable(errResponse.Code) { continue // Retry. } // Verify if http status code is retryable. if isHTTPStatusRetryable(res.StatusCode) { continue // Retry. } // Save the body back again. errBodySeeker.Seek(0, 0) // Seek back to starting point. res.Body = ioutil.NopCloser(errBodySeeker) // For all other cases break out of the retry loop. break } return res, err }
// Len returns the length of a Seeker. // If s has a Len, Size, or Stat method, one of those will be used. Otherwise, // Seek will be used to determine the length, before restoring the cursor to its // previous position. func Len(s io.Seeker) (int64, error) { switch s := s.(type) { case sizer: return s.Size(), nil case lener: return int64(s.Len()), nil case stater: info, err := s.Stat() if err != nil { return 0, err } return info.Size(), nil } i, err := s.Seek(0, os.SEEK_CUR) if err != nil { return 0, err } j, err := s.Seek(0, os.SEEK_END) if err != nil { return j, err } _, err = s.Seek(i, os.SEEK_SET) return j, err }
// Write writes an HTTP/1.1 request -- header and body -- in wire format. // This method consults the following fields of req: // URL // Method (defaults to "GET") // UserAgent (defaults to defaultUserAgent) // Referer // Header // Body // // If Body is present, and is a Seeker, then "Content-length" is forced as a // header, else if Body is present "Transfer-Encoding: chunked" is forced as a header. func (req *Request) Write(w io.Writer) os.Error { uri := urlEscape(req.URL.Path, false) if req.URL.RawQuery != "" { uri += "?" + req.URL.RawQuery } fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), uri) fmt.Fprintf(w, "Host: %s\r\n", req.URL.Host) fmt.Fprintf(w, "User-Agent: %s\r\n", valueOrDefault(req.UserAgent, defaultUserAgent)) if req.Referer != "" { fmt.Fprintf(w, "Referer: %s\r\n", req.Referer) } useContentLength := false contentLength := int64(0) if req.Body != nil { var seeker io.Seeker seeker, useContentLength = req.Body.(io.Seeker) if useContentLength { // Seek to the end of the stream and back to // discover the content length. currentPos, err := seeker.Seek(0, 1) if err != nil { return err } endPos, err := seeker.Seek(0, 2) if err != nil { return err } _, err = seeker.Seek(currentPos, 0) if err != nil { return err } contentLength = endPos - currentPos // Force Content-length req.Header["Content-length"] = strconv.Itoa64(contentLength) } else { // Force chunked encoding req.Header["Transfer-Encoding"] = "chunked" } } // TODO: split long values? (If so, should share code with Conn.Write) // TODO: if Header includes values for Host, User-Agent, or Referer, this // may conflict with the User-Agent or Referer headers we add manually. // One solution would be to remove the Host, UserAgent, and Referer fields // from Request, and introduce Request methods along the lines of // Response.{GetHeader,AddHeader} and string constants for "Host", // "User-Agent" and "Referer". for k, v := range req.Header { // Host, User-Agent, and Referer were sent from structure fields // above; ignore them if they also appear in req.Header. if k == "Host" || k == "User-Agent" || k == "Referer" { continue } io.WriteString(w, k+": "+v+"\r\n") } io.WriteString(w, "\r\n") if req.Body != nil { if useContentLength { _, ec := io.Copyn(w, req.Body, contentLength) if ec != nil { return ec } } else { buf := make([]byte, chunkSize) Loop: for { var nr, nw int var er, ew os.Error if nr, er = req.Body.Read(buf); nr > 0 { if er == nil || er == os.EOF { fmt.Fprintf(w, "%x\r\n", nr) nw, ew = w.Write(buf[0:nr]) fmt.Fprint(w, "\r\n") } } switch { case er != nil: if er == os.EOF { break Loop } return er case ew != nil: return ew case nw < nr: return io.ErrShortWrite } } // last-chunk CRLF fmt.Fprint(w, "0\r\n\r\n") } } return nil }
func seekCur(s io.Seeker) (int64, error) { return s.Seek(0, os.SEEK_CUR) }
// TODO: handle Seek() errors func (f *File) calcLength(stream io.Seeker) { offset, _ := stream.Seek(0, 1) // remember current position length, _ := stream.Seek(0, 2) _, _ = stream.Seek(offset, 0) f.length = length }
func seekToOffset(s io.Seeker, offset int) error { _, err := s.Seek(int64(offset), os.SEEK_CUR) return err }
func seekToIndex(s io.Seeker, index int) error { _, err := s.Seek(int64(index*blockSize), os.SEEK_SET) return err }