func teardownPeer(t *testing.T, c *Client, in *io.PipeReader, out *io.PipeWriter) { // in.Close() out.Close() c.shares.halt() os.RemoveAll(c.DownloadRoot) }
func HandleRead(filename string, w *io.PipeWriter) { fmt.Printf("Filename : %v \n", []byte(filename)) var exists bool d, err := localConf.fs.Get("tftp/" + filename[0:len(filename)-1]) defer d.Close() fmt.Println(d, err) if err == nil { exists = true } if exists { // copy all the data into a buffer data, err := ioutil.ReadAll(d) if err != nil { fmt.Fprintf(os.Stderr, "Copy Error : %v\n", err) } buf := bytes.NewBuffer(data) c, e := io.Copy(w, buf) d.Close() if e != nil { fmt.Fprintf(os.Stderr, "Can't send %s: %v\n", filename, e) } else { fmt.Fprintf(os.Stderr, "Sent %s (%d bytes)\n", filename, c) } w.Close() } else { w.CloseWithError(fmt.Errorf("File not exists: %s", filename)) } }
// We overload the Wait() method to enable subprocess termination if a // timeout has been exceeded. func (mc *ManagedCmd) Wait() (err error) { go func() { mc.done <- mc.Cmd.Wait() }() if mc.timeout_duration != 0 { select { case <-mc.Stopchan: err = fmt.Errorf("CommandChain was stopped with error: [%s]", mc.kill()) case <-time.After(mc.timeout_duration): err = fmt.Errorf("CommandChain timedout with error: [%s]", mc.kill()) case err = <-mc.done: } } else { select { case <-mc.Stopchan: err = fmt.Errorf("CommandChain was stopped with error: [%s]", mc.kill()) case err = <-mc.done: } } var writer *io.PipeWriter var ok bool writer, ok = mc.Stdout.(*io.PipeWriter) if ok { writer.Close() } writer, ok = mc.Stderr.(*io.PipeWriter) if ok { writer.Close() } return err }
// decode decompresses bytes from r and writes them to pw. // read specifies how to decode bytes into codes. // litWidth is the width in bits of literal codes. func decode(r io.Reader, read func(*decoder) (uint16, os.Error), litWidth int, pw *io.PipeWriter) { br, ok := r.(io.ByteReader) if !ok { br = bufio.NewReader(r) } pw.CloseWithError(decode1(pw, br, read, uint(litWidth))) }
func (donut API) mergeMultipart(parts *CompleteMultipartUpload, uploadID string, fullObjectWriter *io.PipeWriter) { for _, part := range parts.Part { recvMD5 := part.ETag object, ok := donut.multiPartObjects[uploadID].Get(part.PartNumber) if ok == false { fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidPart{}))) return } calcMD5Bytes := md5.Sum(object) // complete multi part request header md5sum per part is hex encoded recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\"")) if err != nil { fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidDigest{Md5: recvMD5}))) return } if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) { fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(BadDigest{}))) return } if _, err := io.Copy(fullObjectWriter, bytes.NewReader(object)); err != nil { fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(err))) return } object = nil } fullObjectWriter.Close() return }
func encode(r io.Reader, pw *io.PipeWriter) { br, ok := r.(io.ByteReader) if !ok { br = bufio.NewReader(r) } pw.CloseWithError(encode1(pw, br)) }
func StreamWriteMultipartForm(params map[string]string, fileField, path, boundary string, pw *io.PipeWriter, buf *bytes.Buffer) { defer pw.Close() mpw := multipart.NewWriter(pw) mpw.SetBoundary(boundary) if fileField != "" && path != "" { fw, err := mpw.CreateFormFile(fileField, filepath.Base(path)) if err != nil { log.Fatal(err) return } if buf != nil { _, err = io.Copy(fw, buf) if err != nil { log.Fatal(err) return } } } for key, val := range params { _ = mpw.WriteField(key, val) } err := mpw.Close() if err != nil { log.Fatal(err) return } }
// Streams upload directly from file -> mime/multipart -> pipe -> http-request func streamingUploadFile(params map[string]string, paramName, path string, w *io.PipeWriter, file *os.File) { defer file.Close() defer w.Close() writer := multipart.NewWriter(w) part, err := writer.CreateFormFile(paramName, filepath.Base(path)) if err != nil { log.Fatal(err) return } _, err = io.Copy(part, file) if err != nil { log.Fatal(err) return } for key, val := range params { _ = writer.WriteField(key, val) } err = writer.Close() if err != nil { log.Fatal(err) return } }
// Encode the file and request parameters in a multipart body. // File contents are streamed into the request using an io.Pipe in a separated goroutine func streamUploadBody(client *FlickrClient, photo io.Reader, body *io.PipeWriter, fileName string, boundary string) { // multipart writer to fill the body defer body.Close() writer := multipart.NewWriter(body) writer.SetBoundary(boundary) // create the "photo" field part, err := writer.CreateFormFile("photo", filepath.Base(fileName)) if err != nil { log.Fatal(err) return } // fill the photo field _, err = io.Copy(part, photo) if err != nil { log.Fatal(err) return } // dump other params for key, val := range client.Args { _ = writer.WriteField(key, val[0]) } // close the form writer err = writer.Close() if err != nil { log.Fatal(err) return } }
// startReading starts a goroutine receiving the lines out of the reader // in the background and passing them to a created string channel. This // will used in the assertions. func startReading(c *gc.C, tailer *tailer.Tailer, reader *io.PipeReader, writer *io.PipeWriter) chan string { linec := make(chan string) // Start goroutine for reading. go func() { defer close(linec) reader := bufio.NewReader(reader) for { line, err := reader.ReadString('\n') switch err { case nil: linec <- line case io.EOF: return default: c.Fail() } } }() // Close writer when tailer is stopped or has an error. Tailer using // components can do it the same way. go func() { tailer.Wait() writer.Close() }() return linec }
func encode(out *io.PipeWriter, in io.ReadCloser, enc string, level int) { var ( e encoder err error ) defer func() { if e != nil { e.Close() } if err == nil { err = io.EOF } out.CloseWithError(err) in.Close() }() if level == flate.BestSpeed { pool := encoderPool(enc) pe := pool.Get() if pe != nil { e = pe.(encoder) defer pool.Put(pe) } } if e == nil { e, err = newEncoder(enc, level) if err != nil { return } } e.Reset(out) b := make([]byte, bufferSize) for { n, rerr := in.Read(b) if n > 0 { _, err = e.Write(b[:n]) if err != nil { break } err = e.Flush() if err != nil { break } } if rerr != nil { err = rerr break } } }
func (s *GardenServer) streamProcess(logger lager.Logger, conn net.Conn, process garden.Process, stdinPipe *io.PipeWriter, connCloseCh chan struct{}) { statusCh := make(chan int, 1) errCh := make(chan error, 1) go func() { status, err := process.Wait() if err != nil { logger.Error("wait-failed", err, lager.Data{ "id": process.ID(), }) errCh <- err } else { logger.Info("exited", lager.Data{ "status": status, "id": process.ID(), }) statusCh <- status } }() for { select { case status := <-statusCh: transport.WriteMessage(conn, &transport.ProcessPayload{ ProcessID: process.ID(), ExitStatus: &status, }) stdinPipe.Close() return case err := <-errCh: e := err.Error() transport.WriteMessage(conn, &transport.ProcessPayload{ ProcessID: process.ID(), Error: &e, }) stdinPipe.Close() return case <-s.stopping: logger.Debug("detaching", lager.Data{ "id": process.ID(), }) return case <-connCloseCh: return } } }
func PipeWriter(data map[string]interface{}) { w := io.PipeWriter{} defer w.Close() w.Write([]byte("pipeWriter")) // err := tpl.Execute(w, data) // if err != nil { // fmt.Println(err) // } }
func checkRequestEnd(w *io.PipeWriter, c io.Reader) { req, err := http.ReadRequest(bufio.NewReaderSize(io.TeeReader(c, w), h2FrameSize)) if err != nil { w.CloseWithError(err) return } defer req.Body.Close() _, err = io.Copy(ioutil.Discard, req.Body) w.CloseWithError(err) }
func (s *GardenServer) streamInput(decoder *json.Decoder, in *io.PipeWriter, process garden.Process, connCloseCh chan struct{}) { for { var payload transport.ProcessPayload err := decoder.Decode(&payload) if err != nil { close(connCloseCh) in.CloseWithError(errors.New("Connection closed")) return } switch { case payload.TTY != nil: process.SetTTY(*payload.TTY) case payload.Source != nil: if payload.Data == nil { in.Close() return } else { _, err := in.Write([]byte(*payload.Data)) if err != nil { return } } case payload.Signal != nil: s.logger.Info("stream-input-process-signal", lager.Data{"payload": payload}) switch *payload.Signal { case garden.SignalKill: err = process.Signal(garden.SignalKill) if err != nil { s.logger.Error("stream-input-process-signal-kill-failed", err, lager.Data{"payload": payload}) } case garden.SignalTerminate: err = process.Signal(garden.SignalTerminate) if err != nil { s.logger.Error("stream-input-process-signal-terminate-failed", err, lager.Data{"payload": payload}) } default: s.logger.Error("stream-input-unknown-process-payload-signal", nil, lager.Data{"payload": payload}) in.Close() return } default: s.logger.Error("stream-input-unknown-process-payload", nil, lager.Data{"payload": payload}) in.Close() return } } }
func (c *compressed) reset() { var w *io.PipeWriter c.r, w = io.Pipe() c.wg.Add(1) go func() { defer c.wg.Done() compressor := isolated.GetCompressor(w) _, err := io.Copy(compressor, c.src) if err2 := compressor.Close(); err == nil { err = err2 } w.CloseWithError(err) }() }
func sendSnapFile(snapPath string, snapFile *os.File, pw *io.PipeWriter, mw *multipart.Writer, action *actionData) { defer snapFile.Close() if action.SnapOptions == nil { action.SnapOptions = &SnapOptions{} } errs := []error{ mw.WriteField("action", action.Action), mw.WriteField("name", action.Name), mw.WriteField("snap-path", action.SnapPath), mw.WriteField("channel", action.Channel), mw.WriteField("devmode", strconv.FormatBool(action.DevMode)), } for _, err := range errs { if err != nil { pw.CloseWithError(err) return } } fw, err := mw.CreateFormFile("snap", filepath.Base(snapPath)) if err != nil { pw.CloseWithError(err) return } _, err = io.Copy(fw, snapFile) if err != nil { pw.CloseWithError(err) return } mw.Close() pw.Close() }
// Streams upload directly from file -> mime/multipart -> pipe -> http-request func manageStream(dst *io.PipeWriter, src io.Reader) error { // Close the stream when the job is done defer dst.Close() // Copy n, err := io.Copy(dst, src) fmt.Println(n) if err != nil { return err } return err }
func cleanup(p1, p2 *io.PipeWriter, stdinCh, stderrCh chan error) error { p1.Close() p2.Close() if stdinCh != nil { if err := <-stdinCh; err != nil { return err } } if stderrCh != nil { if err := <-stderrCh; err != nil { return err } } return nil }
func (s *Sequence) done(p1, p2 *io.PipeWriter, stdinCh, stderrCh chan error) error { p1.Close() p2.Close() defer s.reset() if stdinCh != nil { if err := <-stdinCh; err != nil { return err } } if stderrCh != nil { if err := <-stderrCh; err != nil { return err } } return nil }
func logHandler(name string, debug bool, actionFunc func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { if debug { buf := new(bytes.Buffer) io.Copy(buf, r.Body) log.Debugf("Dispatching %s with %v", name, strings.TrimSpace(string(buf.Bytes()))) var writer *io.PipeWriter r.Body, writer = io.Pipe() go func() { io.Copy(writer, buf) writer.Close() }() } actionFunc(w, r) } }
func newMultipartReader(parts []typeReader) *multipartReader { mp := &multipartReader{pipeOpen: true} var pw *io.PipeWriter mp.pr, pw = io.Pipe() mpw := multipart.NewWriter(pw) mp.ctype = "multipart/related; boundary=" + mpw.Boundary() go func() { for _, part := range parts { w, err := mpw.CreatePart(typeHeader(part.typ)) if err != nil { mpw.Close() pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) return } _, err = io.Copy(w, part.Reader) if err != nil { mpw.Close() pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) return } } mpw.Close() pw.Close() }() return mp }
// secReadLoop copies data from r into pw // doing a nacl open decryption on the data in the process using shared as the key func secReadLoop(r io.Reader, pw *io.PipeWriter, shared *[32]byte) { var failed bool // check for an error, stops the loop and // closes the pipe with err to signal the reader we failed var check = func(err error) { if err != nil { log.Println("secReadLoop err:", err) if err2 := pw.CloseWithError(err); err2 != nil { log.Println("CloseWithError failed", err2) } failed = true } } for !failed { // until an error occurs // read next ciphered message from the passed reader msg := make([]byte, 32*1024) n, err := io.ReadAtLeast(r, msg, 25) // the closed conn check could be nicer but there is no way to access the abstracted TCPConn cleanly with the pipes involved if err != nil && (err == io.EOF || strings.Contains(err.Error(), "use of closed network connection")) { checkFatal(pw.Close()) return } check(err) // slice of the unused rest of the buffer msg = msg[:n] // copy the nonce from the message var nonce [24]byte copy(nonce[:], msg[:24]) // cut of the nonce msg = msg[24:] // decrypt message clearMsg, ok := box.OpenAfterPrecomputation([]byte{}, msg, &nonce, shared) if !ok { check(errors.New("open failed")) } // copy the decrypted message to our pipe _, err = io.Copy(pw, bytes.NewReader(clearMsg)) check(err) } }
// HandleRequest .. func HandleRequest(api *api.API, body io.Reader, bodyWriter *io.PipeWriter, ip string) error { defer func() { _ = bodyWriter.Close() }() omahaReq, err := readOmahaRequest(body) if err != nil { logger.Warn("HandleRequest problem with readOmahaRequest", "error", err.Error()) return ErrMalformedRequest } omahaResp, err := buildOmahaResponse(api, omahaReq, ip) if err != nil { logger.Warn("HandleRequest problem with buildOmahaResponse", "error", err.Error()) return ErrMalformedResponse } return writeXMLResponse(bodyWriter, omahaResp) }
func readDat(filename string, c chan io.Reader) { f, err := os.Open("testdata/webkit/" + filename) if err != nil { c <- pipeErr(err) return } defer f.Close() // Loop through the lines of the file. Each line beginning with "#" denotes // a new section, which is returned as a separate io.Reader. r := bufio.NewReader(f) var pw *io.PipeWriter for { line, err := r.ReadSlice('\n') if err != nil { if pw != nil { pw.CloseWithError(err) pw = nil } else { c <- pipeErr(err) } return } if len(line) == 0 { continue } if line[0] == '#' { if pw != nil { pw.Close() } var pr *io.PipeReader pr, pw = io.Pipe() c <- pr continue } if line[0] != '|' { // Strip the trailing '\n'. line = line[:len(line)-1] } if pw != nil { if _, err := pw.Write(line); err != nil { pw.CloseWithError(err) pw = nil } } } }
func sqlplusWrite(pw *io.PipeWriter) { var err error defer func() { pw.CloseWithError(err) }() err = sqlplusFormatOutput(pw) if err != nil { return } for _, p := range sqlplusParsers { _, err = io.WriteString(pw, p.query) if err != nil { return } } err = sqlplusExit(pw) }
// Streams upload directly from file -> mime/multipart -> pipe -> http-request func streamingUploadFile(id, field, path, store string, w *io.PipeWriter, file io.Reader) { // defer file.Close() defer w.Close() writer := multipart.NewWriter(w) part, err := writer.CreateFormFile("file", filepath.Base(path)) if err != nil { log.Fatal("err", "err", err) return } _, err = io.Copy(part, file) if err != nil { log.Fatal("err", "err", err) return } err = writer.Close() if err != nil { log.Fatal("err", "err", err) return } }
func (c *combinedReader) readTo(r io.Reader, w *io.PipeWriter) { p := make([]byte, 1e5) for { n, err := r.Read(p) if n > 0 { c.wlk.Lock() w.Write(p[:n]) c.wlk.Unlock() } if err != nil { c.wlk.Lock() defer c.wlk.Unlock() c.closed++ if c.closed == 2 { w.Close() } return } } }
// We overload the Wait() method to enable subprocess termination if a // timeout has been exceeded. func (mc *ManagedCmd) Wait() (err error) { go func() { mc.done <- mc.Cmd.Wait() }() done := false if mc.timeout_duration != 0 { for !done { select { case <-mc.Stopchan: err = fmt.Errorf("ManagedCmd was stopped with error: [%s]", mc.kill()) done = true case <-time.After(mc.timeout_duration): mc.Stopchan <- true err = fmt.Errorf("ManagedCmd timedout") case err = <-mc.done: done = true } } } else { select { case <-mc.Stopchan: err = fmt.Errorf("ManagedCmd was stopped with error: [%s]", mc.kill()) case err = <-mc.done: } } var writer *io.PipeWriter var ok bool writer, ok = mc.Stdout.(*io.PipeWriter) if ok { writer.Close() } writer, ok = mc.Stderr.(*io.PipeWriter) if ok { writer.Close() } return err }
// streamingUpload streams a file via a pipe through a multipart.Writer. // Generally one should use newStreamingUpload instead of calling this directly. func streamingUpload(file *os.File, fileName string, postBodyWriter *multipart.Writer, w *io.PipeWriter) { defer GinkgoRecover() defer file.Close() defer w.Close() // Set up the form file fileWriter, err := postBodyWriter.CreateFormFile("file", fileName) if err != nil { Failf("Unable to to write file at %s to buffer. Error: %s", fileName, err) } // Copy kubectl binary into the file writer if _, err := io.Copy(fileWriter, file); err != nil { Failf("Unable to to copy file at %s into the file writer. Error: %s", fileName, err) } // Nothing more should be written to this instance of the postBodyWriter if err := postBodyWriter.Close(); err != nil { Failf("Unable to close the writer for file upload. Error: %s", err) } }