// NewStreamReader returns a reader, which replays all the data generated by sw. // // The returned reader may be passed to Response.SetBodyStream. // // Close must be called on the returned reader after all the required data // has been read. Otherwise goroutine leak may occur. // // See also Response.SetBodyStreamWriter. func NewStreamReader(sw StreamWriter) io.ReadCloser { pc := fasthttputil.NewPipeConns() pw := pc.Conn1() pr := pc.Conn2() var bw *bufio.Writer v := streamWriterBufPool.Get() if v == nil { bw = bufio.NewWriter(pw) } else { bw = v.(*bufio.Writer) bw.Reset(pw) } go func() { defer func() { if r := recover(); r != nil { defaultLogger.Printf("panic in StreamWriter: %s\nStack trace:\n%s", r, debug.Stack()) } }() sw(bw) bw.Flush() pw.Close() streamWriterBufPool.Put(bw) }() return pr }
func packFile( writer *tar.Writer, buffer *bufio.Writer, filePath string, relativePath string, info os.FileInfo) error { link := "" var err error if info.Mode()&os.ModeSymlink != 0 { link, err = os.Readlink(filePath) if err != nil { return err } } header, err := tar.FileInfoHeader(info, link) if err != nil { return err } // Build up the name of the file in the tar archive. header.Name = relativePath if runtime.GOOS == "windows" { if strings.Contains(header.Name, "/") { return fmt.Errorf("Forward slash in path when running on windows") } header.Name = strings.Replace(header.Name, string(os.PathSeparator), "/", -1) } if info.IsDir() && !strings.HasSuffix(header.Name, "/") { header.Name += "/" } err = writer.WriteHeader(header) if err != nil { return err } if header.Typeflag == tar.TypeReg { // Regular file. Write its contents. file, err := os.Open(filePath) if err != nil { return err } buffer.Reset(writer) _, err = io.Copy(buffer, file) if err != nil { file.Close() return err } err = file.Close() if err != nil { return err } err = buffer.Flush() if err != nil { return err } } return nil }
func addDockerfileToTar(srcPath string, tarWriter *tar.Writer, tmpWriter *bufio.Writer) error { fi, err := os.Lstat(srcPath) if err != nil { log.Errorf("Can't get file info: %s, error: %s", srcPath, err) return err } dockerfileToUpload := fileToUpload if fi.Mode().IsDir() { dockerfileToUpload = folderToUpload } tmpl, err := template.New("Dockerfile").Parse(dockerfileToUpload) if err != nil { return err } var buf bytes.Buffer if err := tmpl.Execute(&buf, struct { Src string }{ Src: filepath.Base(srcPath), }); err != nil { log.Errorf("Can't execute template to upload: %s", err) return err } hdr, err := tar.FileInfoHeader(fi, "") if err != nil { log.Errorf("Can't get file info header: %s, error: %s", srcPath, err) return err } hdr.Name = DOCKERFILE hdr.Mode = 0100644 // Regular file + rw-r--r-- hdr.Size = int64(buf.Len()) hdr.ModTime = time.Now() hdr.Typeflag = tar.TypeReg hdr.Linkname = "" if err := tarWriter.WriteHeader(hdr); err != nil { log.Errorf("Can't write tar header: %s", err) return err } tmpWriter.Reset(tarWriter) defer tmpWriter.Reset(nil) if _, err := io.Copy(tmpWriter, &buf); err != nil { log.Errorf("Can't write Dockerfile to tar: %s", err) return err } if err := tmpWriter.Flush(); err != nil { log.Errorf("Can't flush Dockerfile to tar: %s", err) return err } return nil }
func (wpool *WriterPool) Put(writer *bufio.Writer) error { writer.Reset(nil) select { case wpool.buf <- writer: default: return errors.New("poolfull") } return nil }
func main() { var rp sync.Pool var wp sync.Pool for i := 0; i < 1024; i++ { rb := new([]byte) *rb = make([]byte, 2048) rp.Put(rb) } var counter uint32 log.Fatal(nsk.ListenAndServe(":8000", func(conn *net.TCPConn) { var s string var c uint32 var rb *[]byte var w *bufio.Writer if v := wp.Get(); v != nil { w = v.(*bufio.Writer) w.Reset(conn) } else { w = bufio.NewWriter(conn) } if v := rp.Get(); v != nil { rb = v.(*[]byte) } else { rb = new([]byte) *rb = make([]byte, 2048) } n, err := conn.Read(*rb) if err != nil || n <= 0 { goto E } c = atomic.AddUint32(&counter, 1) s = strconv.FormatUint(uint64(c), 10) w.WriteString("HTTP/1.1 200 OK\r\n") w.WriteString("Connection: close\r\n") w.WriteString(fmt.Sprintf("Content-Length: %d\r\n\r\n", len(s))) w.WriteString(s) w.Flush() E: conn.Close() rp.Put(rb) wp.Put(w) })) }
// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and // writes it to a backup stream, and also saves any files that will be mutated // by the import layer process to a backup location. func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { var bcdBackup *os.File var bcdBackupWriter *winio.BackupFileWriter if backupPath, ok := mutatedFiles[hdr.Name]; ok { bcdBackup, err = os.Create(filepath.Join(root, backupPath)) if err != nil { return nil, err } defer func() { cerr := bcdBackup.Close() if err == nil { err = cerr } }() bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) defer func() { cerr := bcdBackupWriter.Close() if err == nil { err = cerr } }() buf.Reset(io.MultiWriter(w, bcdBackupWriter)) } else { buf.Reset(w) } defer func() { ferr := buf.Flush() if err == nil { err = ferr } }() return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) }
// NewStreamReader returns a reader, which replays all the data generated by sw. // // The returned reader may be passed to Response.SetBodyStream. // // Close must be called on the returned reader after all the required data // has been read. Otherwise goroutine leak may occur. // // See also Response.SetBodyStreamWriter. func NewStreamReader(sw StreamWriter) io.ReadCloser { pc := fasthttputil.NewPipeConns() pw := pc.Conn1() pr := pc.Conn2() var bw *bufio.Writer v := streamWriterBufPool.Get() if v == nil { bw = bufio.NewWriter(pw) } else { bw = v.(*bufio.Writer) bw.Reset(pw) } go func() { sw(bw) bw.Flush() pw.Close() streamWriterBufPool.Put(bw) }() return pr }
func putBufioWriter(bw *bufio.Writer) { bw.Reset(nil) if pool := bufioWriterPool(bw.Available()); pool != nil { pool.Put(bw) } }
func (bcl *BufferedConnListener) putBufioWriter(bw *bufio.Writer) { bw.Reset(nil) bcl.bufioWriterPool.Put(bw) }
func PutBufioWriter(pool *sync.Pool, bw *bufio.Writer) { bw.Reset(nil) pool.Put(bw) }
func releaseWriter(s *Server, w *bufio.Writer) { w.Reset(nil) s.writerPool.Put(w) }
func putBufioWriter(w *bufio.Writer) { w.Reset(nil) bufioWriterPool.Put(w) }
func putBufioWriter(bw *bufio.Writer) { bw.Reset(nil) bufioWriterPool.Put(bw) }
func releaseWriter(ctx *RequestCtx, w *bufio.Writer) { w.Reset(nil) ctx.s.writerPool.Put(w) }
// Put puts the bufio.Writer back into the pool. func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { b.Reset(nil) bufPool.pool.Put(b) }
func addTarFile(path, name string, tw *tar.Writer, twBuf *bufio.Writer) error { fi, err := os.Lstat(path) if err != nil { return err } link := "" if fi.Mode()&os.ModeSymlink != 0 { if link, err = os.Readlink(path); err != nil { return err } } hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return err } if fi.IsDir() && !strings.HasSuffix(name, "/") { name = name + "/" } hdr.Name = name stat, ok := fi.Sys().(*syscall.Stat_t) if ok { // Currently go does not fill in the major/minors if stat.Mode&syscall.S_IFBLK == syscall.S_IFBLK || stat.Mode&syscall.S_IFCHR == syscall.S_IFCHR { hdr.Devmajor = int64(major(uint64(stat.Rdev))) hdr.Devminor = int64(minor(uint64(stat.Rdev))) } } capability, _ := system.Lgetxattr(path, "security.capability") if capability != nil { hdr.Xattrs = make(map[string]string) hdr.Xattrs["security.capability"] = string(capability) } if err := tw.WriteHeader(hdr); err != nil { return err } if hdr.Typeflag == tar.TypeReg { file, err := os.Open(path) if err != nil { return err } twBuf.Reset(tw) _, err = io.Copy(twBuf, file) file.Close() if err != nil { return err } err = twBuf.Flush() if err != nil { return err } twBuf.Reset(nil) } return nil }
func (c *HostClient) releaseWriter(bw *bufio.Writer) { bw.Reset(nil) c.writerPool.Put(bw) }