func encode(r io.Reader, pw *io.PipeWriter) { br, ok := r.(io.ByteReader) if !ok { br = bufio.NewReader(r) } pw.CloseWithError(encode1(pw, br)) }
// decode decompresses bytes from r and writes them to pw. // read specifies how to decode bytes into codes. // litWidth is the width in bits of literal codes. func decode(r io.Reader, read func(*decoder) (uint16, os.Error), litWidth int, pw *io.PipeWriter) { br, ok := r.(io.ByteReader) if !ok { br = bufio.NewReader(r) } pw.CloseWithError(decode1(pw, br, read, uint(litWidth))) }
func HandleRead(filename string, w *io.PipeWriter) { fmt.Printf("Filename : %v \n", []byte(filename)) var exists bool d, err := localConf.fs.Get("tftp/" + filename[0:len(filename)-1]) defer d.Close() fmt.Println(d, err) if err == nil { exists = true } if exists { // copy all the data into a buffer data, err := ioutil.ReadAll(d) if err != nil { fmt.Fprintf(os.Stderr, "Copy Error : %v\n", err) } buf := bytes.NewBuffer(data) c, e := io.Copy(w, buf) d.Close() if e != nil { fmt.Fprintf(os.Stderr, "Can't send %s: %v\n", filename, e) } else { fmt.Fprintf(os.Stderr, "Sent %s (%d bytes)\n", filename, c) } w.Close() } else { w.CloseWithError(fmt.Errorf("File not exists: %s", filename)) } }
func newMultipartReader(parts []typeReader) *multipartReader { mp := &multipartReader{pipeOpen: true} var pw *io.PipeWriter mp.pr, pw = io.Pipe() mpw := multipart.NewWriter(pw) mp.ctype = "multipart/related; boundary=" + mpw.Boundary() go func() { for _, part := range parts { w, err := mpw.CreatePart(typeHeader(part.typ)) if err != nil { mpw.Close() pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) return } _, err = io.Copy(w, part.Reader) if err != nil { mpw.Close() pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) return } } mpw.Close() pw.Close() }() return mp }
func encode(out *io.PipeWriter, in io.ReadCloser, enc string, level int) { var ( e encoder err error ) defer func() { if e != nil { e.Close() } if err == nil { err = io.EOF } out.CloseWithError(err) in.Close() }() if level == flate.BestSpeed { pool := encoderPool(enc) pe := pool.Get() if pe != nil { e = pe.(encoder) defer pool.Put(pe) } } if e == nil { e, err = newEncoder(enc, level) if err != nil { return } } e.Reset(out) b := make([]byte, bufferSize) for { n, rerr := in.Read(b) if n > 0 { _, err = e.Write(b[:n]) if err != nil { break } err = e.Flush() if err != nil { break } } if rerr != nil { err = rerr break } } }
func checkRequestEnd(w *io.PipeWriter, c io.Reader) { req, err := http.ReadRequest(bufio.NewReaderSize(io.TeeReader(c, w), h2FrameSize)) if err != nil { w.CloseWithError(err) return } defer req.Body.Close() _, err = io.Copy(ioutil.Discard, req.Body) w.CloseWithError(err) }
func (s *GardenServer) streamInput(decoder *json.Decoder, in *io.PipeWriter, process garden.Process, connCloseCh chan struct{}) { for { var payload transport.ProcessPayload err := decoder.Decode(&payload) if err != nil { close(connCloseCh) in.CloseWithError(errors.New("Connection closed")) return } switch { case payload.TTY != nil: process.SetTTY(*payload.TTY) case payload.Source != nil: if payload.Data == nil { in.Close() return } else { _, err := in.Write([]byte(*payload.Data)) if err != nil { return } } case payload.Signal != nil: s.logger.Info("stream-input-process-signal", lager.Data{"payload": payload}) switch *payload.Signal { case garden.SignalKill: err = process.Signal(garden.SignalKill) if err != nil { s.logger.Error("stream-input-process-signal-kill-failed", err, lager.Data{"payload": payload}) } case garden.SignalTerminate: err = process.Signal(garden.SignalTerminate) if err != nil { s.logger.Error("stream-input-process-signal-terminate-failed", err, lager.Data{"payload": payload}) } default: s.logger.Error("stream-input-unknown-process-payload-signal", nil, lager.Data{"payload": payload}) in.Close() return } default: s.logger.Error("stream-input-unknown-process-payload", nil, lager.Data{"payload": payload}) in.Close() return } } }
func (donut API) mergeMultipart(parts *CompleteMultipartUpload, uploadID string, fullObjectWriter *io.PipeWriter) { for _, part := range parts.Part { recvMD5 := part.ETag object, ok := donut.multiPartObjects[uploadID].Get(part.PartNumber) if ok == false { fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidPart{}))) return } calcMD5Bytes := md5.Sum(object) // complete multi part request header md5sum per part is hex encoded recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\"")) if err != nil { fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidDigest{Md5: recvMD5}))) return } if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) { fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(BadDigest{}))) return } if _, err := io.Copy(fullObjectWriter, bytes.NewReader(object)); err != nil { fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(err))) return } object = nil } fullObjectWriter.Close() return }
func (c *compressed) reset() { var w *io.PipeWriter c.r, w = io.Pipe() c.wg.Add(1) go func() { defer c.wg.Done() compressor := isolated.GetCompressor(w) _, err := io.Copy(compressor, c.src) if err2 := compressor.Close(); err == nil { err = err2 } w.CloseWithError(err) }() }
func PipeWrite(pipeWriter *io.PipeWriter) { var ( i = 0 err error n int ) data := []byte("Go语言学习园地") for _, err = pipeWriter.Write(data); err == nil; n, err = pipeWriter.Write(data) { i++ if i == 3 { pipeWriter.CloseWithError(errors.New("输出3次后结束")) } } fmt.Println("close 后输出的字节数:", n, " error:", err) }
func joinFilesInGoRoutine(fileInfos []os.FileInfo, writer *io.PipeWriter) { for _, fileInfo := range fileInfos { file, err := os.Open(fileInfo.Name()) defer file.Close() for err != nil { writer.CloseWithError(err) return } _, err = io.Copy(writer, file) if err != nil { writer.CloseWithError(err) return } } writer.Close() }
func readDat(filename string, c chan io.Reader) { f, err := os.Open("testdata/webkit/" + filename) if err != nil { c <- pipeErr(err) return } defer f.Close() // Loop through the lines of the file. Each line beginning with "#" denotes // a new section, which is returned as a separate io.Reader. r := bufio.NewReader(f) var pw *io.PipeWriter for { line, err := r.ReadSlice('\n') if err != nil { if pw != nil { pw.CloseWithError(err) pw = nil } else { c <- pipeErr(err) } return } if len(line) == 0 { continue } if line[0] == '#' { if pw != nil { pw.Close() } var pr *io.PipeReader pr, pw = io.Pipe() c <- pr continue } if line[0] != '|' { // Strip the trailing '\n'. line = line[:len(line)-1] } if pw != nil { if _, err := pw.Write(line); err != nil { pw.CloseWithError(err) pw = nil } } } }
func PipeWrite(pipeWriter *io.PipeWriter) { var ( i = 0 err error n int ) data := []byte("Go语言学习园地") // 循环往管道中写数据,写第三次时,我们调用 CloseWithError 方法关闭管道的写入端,之后再一次调用 Write 方法,发现返回了error,于是退出了循环。 for _, err = pipeWriter.Write(data); err == nil; n, err = pipeWriter.Write(data) { i++ if i == 3 { // 对于管道的close方法(非CloseWithError时),err会被置为EOF pipeWriter.CloseWithError(errors.New("输出3次后结束")) } } fmt.Println("close 后输出的字节数:", n, " error:", err) }
// secReadLoop copies data from r into pw // doing a nacl open decryption on the data in the process using shared as the key func secReadLoop(r io.Reader, pw *io.PipeWriter, shared *[32]byte) { var failed bool // check for an error, stops the loop and // closes the pipe with err to signal the reader we failed var check = func(err error) { if err != nil { log.Println("secReadLoop err:", err) if err2 := pw.CloseWithError(err); err2 != nil { log.Println("CloseWithError failed", err2) } failed = true } } for !failed { // until an error occurs // read next ciphered message from the passed reader msg := make([]byte, 32*1024) n, err := io.ReadAtLeast(r, msg, 25) // the closed conn check could be nicer but there is no way to access the abstracted TCPConn cleanly with the pipes involved if err != nil && (err == io.EOF || strings.Contains(err.Error(), "use of closed network connection")) { checkFatal(pw.Close()) return } check(err) // slice of the unused rest of the buffer msg = msg[:n] // copy the nonce from the message var nonce [24]byte copy(nonce[:], msg[:24]) // cut of the nonce msg = msg[24:] // decrypt message clearMsg, ok := box.OpenAfterPrecomputation([]byte{}, msg, &nonce, shared) if !ok { check(errors.New("open failed")) } // copy the decrypted message to our pipe _, err = io.Copy(pw, bytes.NewReader(clearMsg)) check(err) } }
func sqlplusWrite(pw *io.PipeWriter) { var err error defer func() { pw.CloseWithError(err) }() err = sqlplusFormatOutput(pw) if err != nil { return } for _, p := range sqlplusParsers { _, err = io.WriteString(pw, p.query) if err != nil { return } } err = sqlplusExit(pw) }
func sendSnapFile(snapPath string, snapFile *os.File, pw *io.PipeWriter, mw *multipart.Writer, action *actionData) { defer snapFile.Close() if action.SnapOptions == nil { action.SnapOptions = &SnapOptions{} } errs := []error{ mw.WriteField("action", action.Action), mw.WriteField("name", action.Name), mw.WriteField("snap-path", action.SnapPath), mw.WriteField("channel", action.Channel), mw.WriteField("devmode", strconv.FormatBool(action.DevMode)), } for _, err := range errs { if err != nil { pw.CloseWithError(err) return } } fw, err := mw.CreateFormFile("snap", filepath.Base(snapPath)) if err != nil { pw.CloseWithError(err) return } _, err = io.Copy(fw, snapFile) if err != nil { pw.CloseWithError(err) return } mw.Close() pw.Close() }
// readEncodedData - func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, objMetadata ObjectMetadata) { readers, err := b.getDiskReaders(objectName, "data") if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } for _, reader := range readers { defer reader.Close() } expectedMd5sum, err := hex.DecodeString(objMetadata.MD5Sum) if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } hasher := md5.New() mwriter := io.MultiWriter(writer, hasher) switch len(readers) == 1 { case false: if objMetadata.ErasureTechnique == "" { writer.CloseWithError(iodine.New(MissingErasureTechnique{}, nil)) return } encoder, err := newEncoder(objMetadata.DataDisks, objMetadata.ParityDisks, objMetadata.ErasureTechnique) if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } totalLeft := objMetadata.Size for i := 0; i < objMetadata.ChunkCount; i++ { decodedData, err := b.decodeEncodedData(totalLeft, int64(objMetadata.BlockSize), readers, encoder, writer) if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } _, err = io.Copy(mwriter, bytes.NewBuffer(decodedData)) if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } totalLeft = totalLeft - int64(objMetadata.BlockSize) } case true: _, err := io.Copy(writer, readers[0]) if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } } // check if decodedData md5sum matches if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) { writer.CloseWithError(iodine.New(ChecksumMismatch{}, nil)) return } writer.Close() return }
// readObjectData - func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMetadata ObjectMetadata) { readers, err := b.getObjectReaders(objectName, "data") if err != nil { writer.CloseWithError(probe.NewWrappedError(err)) return } for _, reader := range readers { defer reader.Close() } var expected512Sum, expectedMd5sum []byte { var err error expectedMd5sum, err = hex.DecodeString(objMetadata.MD5Sum) if err != nil { writer.CloseWithError(probe.NewWrappedError(probe.NewError(err))) return } expected512Sum, err = hex.DecodeString(objMetadata.SHA512Sum) if err != nil { writer.CloseWithError(probe.NewWrappedError(probe.NewError(err))) return } } hasher := md5.New() sum512hasher := sha256.New() mwriter := io.MultiWriter(writer, hasher, sum512hasher) switch len(readers) > 1 { case true: if objMetadata.ErasureTechnique == "" { writer.CloseWithError(probe.NewWrappedError(probe.NewError(MissingErasureTechnique{}))) return } encoder, err := newEncoder(objMetadata.DataDisks, objMetadata.ParityDisks, objMetadata.ErasureTechnique) if err != nil { writer.CloseWithError(probe.NewWrappedError(err)) return } totalLeft := objMetadata.Size for i := 0; i < objMetadata.ChunkCount; i++ { decodedData, err := b.decodeEncodedData(totalLeft, int64(objMetadata.BlockSize), readers, encoder, writer) if err != nil { writer.CloseWithError(probe.NewWrappedError(err)) return } if _, err := io.Copy(mwriter, bytes.NewReader(decodedData)); err != nil { writer.CloseWithError(probe.NewWrappedError(probe.NewError(err))) return } totalLeft = totalLeft - int64(objMetadata.BlockSize) } case false: _, err := io.Copy(writer, readers[0]) if err != nil { writer.CloseWithError(probe.NewWrappedError(probe.NewError(err))) return } } // check if decodedData md5sum matches if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) { writer.CloseWithError(probe.NewWrappedError(probe.NewError(ChecksumMismatch{}))) return } if !bytes.Equal(expected512Sum, sum512hasher.Sum(nil)) { writer.CloseWithError(probe.NewWrappedError(probe.NewError(ChecksumMismatch{}))) return } writer.Close() return }
// BuildHTTP creates a new http request based on the data from the params func (r *request) BuildHTTP(producer httpkit.Producer, registry strfmt.Registry) (*http.Request, error) { // build the data if err := r.writer.WriteToRequest(r, registry); err != nil { return nil, err } // create http request path := r.pathPattern for k, v := range r.pathParams { path = strings.Replace(path, "{"+k+"}", v, -1) } var body io.ReadCloser var pr *io.PipeReader var pw *io.PipeWriter buf := bytes.NewBuffer(nil) body = ioutil.NopCloser(buf) if r.fileFields != nil { pr, pw = io.Pipe() body = pr } req, err := http.NewRequest(r.method, path, body) if err != nil { return nil, err } req.URL.RawQuery = r.query.Encode() req.Header = r.header // check if this is a form type request if len(r.formFields) > 0 || len(r.fileFields) > 0 { // check if this is multipart if len(r.fileFields) > 0 { mp := multipart.NewWriter(pw) req.Header.Set(httpkit.HeaderContentType, mp.FormDataContentType()) go func() { defer func() { mp.Close() pw.Close() }() for fn, v := range r.formFields { if len(v) > 0 { if err := mp.WriteField(fn, v[0]); err != nil { pw.CloseWithError(err) log.Fatal(err) } } } for fn, f := range r.fileFields { wrtr, err := mp.CreateFormFile(fn, filepath.Base(f.Name())) if err != nil { pw.CloseWithError(err) log.Fatal(err) } defer func() { for _, ff := range r.fileFields { ff.Close() } }() if _, err := io.Copy(wrtr, f); err != nil { pw.CloseWithError(err) log.Fatal(err) } } }() return req, nil } buf.WriteString(r.formFields.Encode()) return req, nil } // write the form values as body // if there is payload, use the producer to write the payload if r.payload != nil { if err := producer.Produce(buf, r.payload); err != nil { return nil, err } } return req, nil }
// BuildHTTP creates a new http request based on the data from the params func (r *request) BuildHTTP(mediaType string, producers map[string]httpkit.Producer, registry strfmt.Registry) (*http.Request, error) { // build the data if err := r.writer.WriteToRequest(r, registry); err != nil { return nil, err } // create http request path := r.pathPattern for k, v := range r.pathParams { path = strings.Replace(path, "{"+k+"}", v, -1) } var body io.ReadCloser var pr *io.PipeReader var pw *io.PipeWriter buf := bytes.NewBuffer(nil) body = ioutil.NopCloser(buf) if r.fileFields != nil { pr, pw = io.Pipe() body = pr } req, err := http.NewRequest(r.method, path, body) if err != nil { return nil, err } req.URL.RawQuery = r.query.Encode() req.Header = r.header // check if this is a form type request if len(r.formFields) > 0 || len(r.fileFields) > 0 { // check if this is multipart if len(r.fileFields) > 0 { mp := multipart.NewWriter(pw) req.Header.Set(httpkit.HeaderContentType, mp.FormDataContentType()) go func() { defer func() { mp.Close() pw.Close() }() for fn, v := range r.formFields { if len(v) > 0 { if err := mp.WriteField(fn, v[0]); err != nil { pw.CloseWithError(err) log.Fatal(err) } } } for fn, f := range r.fileFields { wrtr, err := mp.CreateFormFile(fn, filepath.Base(f.Name())) if err != nil { pw.CloseWithError(err) log.Fatal(err) } defer func() { for _, ff := range r.fileFields { ff.Close() } }() if _, err := io.Copy(wrtr, f); err != nil { pw.CloseWithError(err) log.Fatal(err) } } }() return req, nil } else { req.Header.Set(httpkit.HeaderContentType, mediaType) // write the form values as the body buf.WriteString(r.formFields.Encode()) return req, nil } } // if there is payload, use the producer to write the payload, and then // set the header to the content-type appropriate for the payload produced if r.payload != nil { // TODO: infer most appropriate content type based on the producer used, // and the `consumers` section of the spec/operation req.Header.Set(httpkit.HeaderContentType, mediaType) if rdr, ok := r.payload.(io.ReadCloser); ok { req.Body = rdr return req, nil } if rdr, ok := r.payload.(io.Reader); ok { req.Body = ioutil.NopCloser(rdr) return req, nil } // set the content length of the request or else a chunked transfer is // declared, and this corrupts outgoing JSON payloads. the content's // length must be set prior to the body being written per the spec at // https://golang.org/pkg/net/http // // If Body is present, Content-Length is <= 0 and TransferEncoding // hasn't been set to "identity", Write adds // "Transfer-Encoding: chunked" to the header. Body is closed // after it is sent. // // to that end a temporary buffer, b, is created to produce the payload // body, and then its size is used to set the request's content length var b bytes.Buffer producer := producers[mediaType] if err := producer.Produce(&b, r.payload); err != nil { return nil, err } req.ContentLength = int64(b.Len()) if _, err := buf.Write(b.Bytes()); err != nil { return nil, err } } return req, nil }
// readEncodedData - func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, donutObjectMetadata map[string]string) { expectedMd5sum, err := hex.DecodeString(donutObjectMetadata["sys.md5"]) if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } readers, err := b.getDiskReaders(objectName, "data") if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } hasher := md5.New() mwriter := io.MultiWriter(writer, hasher) switch len(readers) == 1 { case false: totalChunks, totalLeft, blockSize, k, m, err := b.donutMetadata2Values(donutObjectMetadata) if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } technique, ok := donutObjectMetadata["sys.erasureTechnique"] if !ok { writer.CloseWithError(iodine.New(MissingErasureTechnique{}, nil)) return } encoder, err := NewEncoder(uint8(k), uint8(m), technique) if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } for i := 0; i < totalChunks; i++ { decodedData, err := b.decodeEncodedData(totalLeft, blockSize, readers, encoder, writer) if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } _, err = io.Copy(mwriter, bytes.NewBuffer(decodedData)) if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } totalLeft = totalLeft - int64(blockSize) } case true: _, err := io.Copy(writer, readers[0]) if err != nil { writer.CloseWithError(iodine.New(err, nil)) return } } // check if decodedData md5sum matches if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) { writer.CloseWithError(iodine.New(ChecksumMismatch{}, nil)) return } writer.Close() return }