func collectToOutputChannel(name string, in io.ReadCloser, ch chan<- commandOutput) { go func() { buf, err := ioutil.ReadAll(in) ch <- commandOutput{name, buf, err} in.Close() }() }
// CopyBundleExcept copies all the blobs in the bundle src, except for those in // the list, into the current place in the bundle writer. func (bw *BundleWriter) CopyBundleExcept(src int, except []BlobID) error { r, err := OpenBundle(bw.store, sugar(bw.item.ID, src)) if err != nil { return err } defer r.Close() var badnames = make([]string, 1+len(except)) badnames[0] = "item-info.json" for i, id := range except { badnames[i+1] = fmt.Sprintf("blob/%d", id) } for _, fname := range r.Files() { if contains(badnames, fname) { continue } var rc io.ReadCloser rc, err = r.Open(fname) if err != nil { return err } // TODO(dbrower): check for errors blob := bw.item.blobByID(extractBlobID(fname)) err = bw.WriteBlob(blob, rc) rc.Close() if err != nil { return err } } return nil }
// readPayload reads the HTTP response in chunks, making the read buffer available // to MeekConn.Read() calls after each chunk; the intention is to allow bytes to // flow back to the reader as soon as possible instead of buffering the entire payload. func (meek *MeekConn) readPayload(receivedPayload io.ReadCloser) (totalSize int64, err error) { defer receivedPayload.Close() totalSize = 0 for { reader := io.LimitReader(receivedPayload, READ_PAYLOAD_CHUNK_LENGTH) // Block until there is capacity in the receive buffer var receiveBuffer *bytes.Buffer select { case receiveBuffer = <-meek.emptyReceiveBuffer: case receiveBuffer = <-meek.partialReceiveBuffer: case <-meek.broadcastClosed: return 0, nil } // Note: receiveBuffer size may exceed FULL_RECEIVE_BUFFER_LENGTH by up to the size // of one received payload. The FULL_RECEIVE_BUFFER_LENGTH value is just a threshold. n, err := receiveBuffer.ReadFrom(reader) meek.replaceReceiveBuffer(receiveBuffer) if err != nil { return 0, ContextError(err) } totalSize += n if n == 0 { break } } return totalSize, nil }
func (omxHandler) Display(ctx *context.T, mimetype string, r io.ReadCloser) (func(), error) { defer r.Close() tmp, err := ioutil.TempFile("", "") if err != nil { return nil, err } if _, err := io.Copy(tmp, r); err != nil { os.Remove(tmp.Name()) return nil, err } tmp.Close() args := []string{ "-b", tmp.Name(), } vlog.Infof("Running: omxplayer %s", strings.Join(args, " ")) cmd := exec.Command("omxplayer", args...) cmd.Stdin = r cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Start(); err != nil { return nil, err } return func() { if err := cmd.Process.Kill(); err != nil { vlog.Errorf("Could not kill omx: %v", err) } cmd.Wait() os.Remove(tmp.Name()) }, nil }
// loadWav reads a valid wave file into a header and a bunch audio data into bytes. // Invalid files return a nil header and an empty data slice. // FUTURE: Handle the info block. func (l *loader) loadWav(file io.ReadCloser) (wh *WavHdr, bytes []byte, err error) { wh = &WavHdr{} if err = binary.Read(file, binary.LittleEndian, wh); err != nil { return nil, []byte{}, fmt.Errorf("Invalid .wav audio file: %s", err) } // check that it really is a WAVE file. riff, wave := string(wh.RiffId[:]), string(wh.WaveId[:]) if riff != "RIFF" || wave != "WAVE" { return nil, []byte{}, fmt.Errorf("Invalid .wav audio file") } // read the audio data. bytesRead := uint32(0) data := []byte{} inbuff := make([]byte, wh.DataSize) for bytesRead < wh.DataSize { inbytes, readErr := file.Read(inbuff) if readErr != nil { return nil, []byte{}, fmt.Errorf("Corrupt .wav audio file") } data = append(data, inbuff...) bytesRead += uint32(inbytes) } if bytesRead != wh.DataSize { return nil, []byte{}, fmt.Errorf("Invalid .wav audio file %d %d", bytesRead, wh.DataSize) } return wh, data, nil }
// followFile outputs the contents of the file to stdout relative to the end of // the file. func (l *LogsCommand) followFile(client *api.Client, alloc *api.Allocation, follow bool, task, logType, origin string, offset int64) (io.ReadCloser, error) { cancel := make(chan struct{}) frames, err := client.AllocFS().Logs(alloc, follow, task, logType, origin, offset, cancel, nil) if err != nil { return nil, err } signalCh := make(chan os.Signal, 1) signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) // Create a reader var r io.ReadCloser frameReader := api.NewFrameReader(frames, cancel) frameReader.SetUnblockTime(500 * time.Millisecond) r = frameReader go func() { <-signalCh // End the streaming r.Close() }() return r, nil }
func (c *ArchiveInsertCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{groupId}/archive") if c.media_ != nil || c.resumableBuffer_ != nil { urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) protocol := "multipart" if c.resumableBuffer_ != nil { protocol = "resumable" } c.urlParams_.Set("uploadType", protocol) } urls += "?" + c.urlParams_.Encode() body = new(bytes.Buffer) ctype := "application/json" if c.media_ != nil { var combined io.ReadCloser combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) defer combined.Close() body = combined } req, _ := http.NewRequest("POST", urls, body) googleapi.Expand(req.URL, map[string]string{ "groupId": c.groupId, }) if c.resumableBuffer_ != nil { req.Header.Set("X-Upload-Content-Type", c.mediaType_) } req.Header.Set("Content-Type", ctype) req.Header.Set("User-Agent", c.s.userAgent()) if c.ctx_ != nil { return ctxhttp.Do(c.ctx_, c.s.client, req) } return c.s.client.Do(req) }
func (container *Container) startPty() error { stdoutMaster, stdoutSlave, err := pty.Open() if err != nil { return err } container.ptyStdoutMaster = stdoutMaster container.cmd.Stdout = stdoutSlave stderrMaster, stderrSlave, err := pty.Open() if err != nil { return err } container.ptyStderrMaster = stderrMaster container.cmd.Stderr = stderrSlave // Copy the PTYs to our broadcasters go func() { defer container.stdout.CloseWriters() Debugf("[startPty] Begin of stdout pipe") io.Copy(container.stdout, stdoutMaster) Debugf("[startPty] End of stdout pipe") }() go func() { defer container.stderr.CloseWriters() Debugf("[startPty] Begin of stderr pipe") io.Copy(container.stderr, stderrMaster) Debugf("[startPty] End of stderr pipe") }() // stdin var stdinSlave io.ReadCloser if container.Config.OpenStdin { var stdinMaster io.WriteCloser stdinMaster, stdinSlave, err = pty.Open() if err != nil { return err } container.ptyStdinMaster = stdinMaster container.cmd.Stdin = stdinSlave // FIXME: The following appears to be broken. // "cannot set terminal process group (-1): Inappropriate ioctl for device" // container.cmd.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true} go func() { defer container.stdin.Close() Debugf("[startPty] Begin of stdin pipe") io.Copy(stdinMaster, container.stdin) Debugf("[startPty] End of stdin pipe") }() } if err := container.cmd.Start(); err != nil { return err } stdoutSlave.Close() stderrSlave.Close() if stdinSlave != nil { stdinSlave.Close() } return nil }
// UntarOne writes the contents up a single file to dst func UntarOne(name string, dst io.Writer, src io.ReadCloser) error { // ungzipped, err := gzip.NewReader(src) // if err != nil { // return err // } tarball := tar.NewReader(src) defer src.Close() // defer tarball.Close() for { hdr, err := tarball.Next() if err == io.EOF { // finished the tar break } if err != nil { return err } if hdr.Name != name { continue } // We found the file we care about _, err = io.Copy(dst, tarball) break } return nil }
// DownloadDiffs downloads a filename by differences into destfile func DownloadDiffs(destfile string, diffs *Diffs) (downloaded int64, hash string, err error) { file, err := os.OpenFile(destfile, os.O_CREATE|os.O_WRONLY, 0750) // For write access if err != nil { return } defer file.Close() h := NewHasher() var source io.ReadCloser sink := io.MultiWriter(file, h) localHnd := &LocalHashNDump{"."} remoteHnd := &RemoteHashNDump{diffs.Server} done := int64(0) for _, diff := range diffs.Diffs { if diff.Different { source, _, err = remoteHnd.Dump(diffs.Filename, diff.Offset, diff.Size) } else { source, _, err = localHnd.Dump(diffs.Alike, diff.Offset, diff.Size) } if err != nil { return downloaded, "", err } n, err := io.CopyN(sink, source, diff.Size) source.Close() if err != nil { return downloaded, "", err } if n != diff.Size { return downloaded, "", fmt.Errorf("Expected to copy %v but copied %v instead!", diff.Size, n) } downloaded += n done += n } return downloaded, fmt.Sprintf("%x", h.Sum(nil)), nil }
func (client *DockerClient) readJSONStream(stream io.ReadCloser, decode func(*json.Decoder) decodingResult, stopChan <-chan struct{}) <-chan decodingResult { resultChan := make(chan decodingResult) go func() { decoder := json.NewDecoder(stream) stopped := make(chan struct{}) go func() { <-stopChan stream.Close() stopped <- struct{}{} }() defer close(resultChan) for { decodeResult := decode(decoder) select { case <-stopped: return default: resultChan <- decodeResult if decodeResult.err != nil { stream.Close() return } } } }() return resultChan }
func LoadSchema(uri string, localCopy bool) (sd *Schema, err error) { var protocol, localPath string var rc io.ReadCloser if pos := strings.Index(uri, protSep); pos < 0 { protocol = "http" + protSep } else { protocol = uri[:pos+len(protSep)] uri = uri[pos+len(protSep):] } if localCopy { if localPath = filepath.Join(PkgGen.BaseCodePath, uri); !ufs.FileExists(localPath) { if err = ufs.EnsureDirExists(filepath.Dir(localPath)); err == nil { err = unet.DownloadFile(protocol+uri, localPath) } } if err == nil { if sd, err = loadSchemaFile(localPath, uri); sd != nil { sd.loadLocalPath = localPath } } } else if rc, err = unet.OpenRemoteFile(protocol + uri); err == nil { defer rc.Close() sd, err = loadSchema(rc, uri, "") } return }
func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { type diffPathDriver interface { DiffPath(string) (string, func() error, error) } diffDriver, ok := ls.driver.(diffPathDriver) if !ok { diffDriver = &naiveDiffPathDriver{ls.driver} } defer metadata.Close() // get our relative path to the container fsPath, releasePath, err := diffDriver.DiffPath(graphID) if err != nil { return err } defer releasePath() metaUnpacker := storage.NewJSONUnpacker(metadata) upackerCounter := &unpackSizeCounter{metaUnpacker, size} fileGetter := storage.NewPathFileGetter(fsPath) logrus.Debugf("Assembling tar data for %s from %s", graphID, fsPath) return asm.WriteOutputTarStream(fileGetter, upackerCounter, w) }
func (this *PacketCodecZlib) Decode(reader io.Reader) (packet Packet, err error) { rawBytes := reader.(Byteser).Bytes() // FIXME assuming the caller is a Byteser is a bad idea length, err := ReadVarInt(reader) if err != nil { return } if length < 0 { err = errors.New(fmt.Sprintf("Decode, Compressed length is below zero: %d", length)) return } if length == 0 { packet, err = this.codec.Decode(reader) } else { zlibBytes := reader.(Byteser).Bytes() // FIXME assuming the caller is a Byteser is a bad idea var zlibReader io.ReadCloser zlibReader, err = NewZlibToggleReaderBuffer(rawBytes, zlibBytes) if err != nil { return } packet, err = this.codec.Decode(zlibReader) if err != nil { return } zlibReader.Close() } return }
func (fs *GDriveFileSystem) Put(p string, bytes io.ReadCloser) webdav.StatusCode { defer bytes.Close() parent := path.Dir(p) base := path.Base(p) parentId := fs.getFileId(parent, true) if parentId == "" { log.Errorf("ERROR: Parent not found") return webdav.StatusCode(http.StatusConflict) // 409 } parentRef := &drive.ParentReference{ Id: parentId, IsRoot: "parent" == "/", } f := &drive.File{ Title: base, Parents: []*drive.ParentReference{parentRef}, } _, err := fs.client.Files.Insert(f).Media(bytes).Do() if err != nil { log.Errorf("can't put: %v", err) return webdav.StatusCode(500) } fs.invalidatePath(p) fs.invalidatePath(parent) return webdav.StatusCode(201) }
// readAndClose reads and closes the given ReadCloser. // // Trying to read from a nil simply returns nil, no error. func readAndClose(stream io.ReadCloser) ([]byte, error) { if stream == nil { return nil, nil } defer stream.Close() return ioutil.ReadAll(stream) }
func runImport(c *cli.Context) { var input io.ReadCloser var err error input = os.Stdin cfg, err := config.LoadConfig() if err != nil { log.Fatal(err) } inputFile := c.String("input") if inputFile != "" { input, err = os.Open(inputFile) if err != nil { log.Fatal(err) } defer input.Close() } bytes, err := ioutil.ReadAll(input) if err != nil { log.Fatal(err) } cfg, err = cfg.Import(bytes) if err != nil { log.Fatal(err) } if err := cfg.Save(); err != nil { log.Fatal(err) } }
func (cmd *cmdRestore) Main() { input, target := args.input, args.target if len(target) == 0 { log.Panic("invalid argument: target") } if len(input) == 0 { input = "/dev/stdin" } log.Infof("restore from '%s' to '%s'\n", input, target) var readin io.ReadCloser var nsize int64 if input != "/dev/stdin" { readin, nsize = openReadFile(input) defer readin.Close() } else { readin, nsize = os.Stdin, 0 } reader := bufio.NewReaderSize(ioutils.NewCountReader(readin, &cmd.nread), ReaderBufferSize) cmd.RestoreRDBFile(reader, target, nsize) if !args.extra { return } if nsize != 0 && nsize == cmd.nread.Get() { return } cmd.RestoreCommand(reader, target, nsize) }
func readBody(b io.ReadCloser, ctype string, encoding string) (body string, err error) { defer b.Close() var r io.Reader if encoding == gzipHeader { gr, err := gzip.NewReader(b) if err != nil { return "", err } r = gr defer gr.Close() } else if encoding == "" { r = b } else { return "", fmt.Errorf("Unknown %s: %s", encHeader, encoding) } // TODO(iantw): If we find a need, allow character set conversions... // Unlikely to be an issue for now. // if ctype != "" { // r, err = charset.NewReader(r, ctype) // // if err != nil { // return "", err // } // } bytes, err := ioutil.ReadAll(r) return string(bytes), err }
func openArmoredPublicKeyFile(reader io.ReadCloser) (*packet.PublicKeyPacket, os.Error) { defer reader.Close() var lr = io.LimitReader(reader, publicKeyMaxSize) data, err := ioutil.ReadAll(lr) if err != nil { return nil, os.NewError(fmt.Sprintf("Error reading public key file: %v", err)) } if len(data) == publicKeyMaxSize { return nil, os.NewError(fmt.Sprintf("Public key blob is too large")) } block, _ := armor.Decode(data) if block == nil { return nil, os.NewError("Couldn't find PGP block in public key file") } if block.Type != "PGP PUBLIC KEY BLOCK" { return nil, os.NewError("Invalid public key blob.") } buf := bytes.NewBuffer(block.Bytes) p, err := packet.ReadPacket(buf) if err != nil { return nil, os.NewError(fmt.Sprintf("Invalid public key blob: %v", err)) } pk, ok := p.(packet.PublicKeyPacket) if !ok { return nil, os.NewError(fmt.Sprintf("Invalid public key blob; not a public key packet")) } return &pk, nil }
func handleFile(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) return } var key *cafs.SKey if k, err := cafs.ParseKey(r.URL.Path[6:]); err != nil { http.NotFound(w, r) log.Printf("Error parsing key from URL %v: %v", r.URL, err) return } else { key = k } var reader io.ReadCloser if f, err := client.GetActivityManager().GetStorage().Get(key); err != nil { http.NotFound(w, r) log.Printf("Error retrieving key %v: %v", key, err) return } else { reader = f.Open() f.Dispose() } defer func() { if err := reader.Close(); err != nil { log.Printf("Error closing file: %v", err) } }() if _, err := io.Copy(w, reader); err != nil { log.Printf("Error sending file contents to client: %v", err) } }
//Restore - method to execute restore func (s *MysqlPlugin) Restore() (err error) { lo.G.Debug("Starting restore of mysql-tile") var reader io.ReadCloser var persistanceBackuper cfbackup.PersistanceBackup var mysqlUserName, mysqlPassword string var sshConfigs []command.SshConfig if sshConfigs, err = s.getSSHConfig(); err == nil { //take first node to execute restore on sshConfig := sshConfigs[0] if mysqlUserName, mysqlPassword, err = s.getMysqlCredentials(); err == nil { if persistanceBackuper, err = s.GetPersistanceBackup(mysqlUserName, mysqlPassword, sshConfig); err == nil { if reader, err = s.PivotalCF.NewArchiveReader(outputFileName); err == nil { defer reader.Close() if err = persistanceBackuper.Import(reader); err == nil { err = s.GetPrivilegeFlusher(sshConfig, mysqlPassword) } } } } } lo.G.Debug("Finished restore of mysql-tile", err) return }
func (eogHandler) Display(ctx *context.T, mimetype string, r io.ReadCloser) (func(), error) { // eog cannot read from a pipe, so we have to write the file to // the filesystem before displaying it. defer r.Close() tmp, err := ioutil.TempFile("", "") if err != nil { return nil, err } if _, err := io.Copy(tmp, r); err != nil { os.Remove(tmp.Name()) return nil, err } tmp.Close() cmd := exec.Command("eog", "--display", ":0", "-f", tmp.Name()) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr stop := func() { if err := cmd.Process.Kill(); err != nil { vlog.Errorf("Could not kill eog: %v", err) } cmd.Wait() os.Remove(tmp.Name()) } if err := cmd.Start(); err != nil { return stop, err } return stop, nil }
func (pbp *PBP) Read(rc io.ReadCloser) error { binary.Read(rc, binary.LittleEndian, &pbp.cookie) if pbp.cookie == 0x464C457f { fmt.Printf("File is an elf, converting to empty PBP") bytes, _ := ioutil.ReadAll(rc) pbp.data[6] = append([]byte{0x7f, 0x45, 0x4c, 0x46}[:], bytes...) pbp.cookie = 0x50425000 pbp.version = 0x00010000 return nil } if pbp.cookie != 0x50425000 { return errors.New("bad cookie") } binary.Read(rc, binary.LittleEndian, &pbp.version) for i := 0; i < 8; i++ { binary.Read(rc, binary.LittleEndian, &pbp.offsets[i]) } for i := 0; i < 7; i++ { pbp.data[i] = make([]byte, pbp.offsets[i+1]-pbp.offsets[i]) if len(pbp.data[i]) > 0 { _, err := rc.Read(pbp.data[i]) if err != nil { return err } } } var err error pbp.data[7], err = ioutil.ReadAll(rc) return err }
// not follow redirect.... func Gethtml3(url string) { client := new(http.Client) request, _ := http.NewRequest("GET", "http://www.baidu.com", nil) request.Header.Add("Accept-Encoding", "gzip") response, _ := client.Do(request) defer response.Body.Close() for k, v := range response.Header { fmt.Println(k) fmt.Println(v) } // Check that the server actually sent compressed data var reader io.ReadCloser switch response.Header.Get("Content-Encoding") { case "gzip": fmt.Println("XXXXXXXXXX gzip") reader, _ = gzip.NewReader(response.Body) defer reader.Close() default: reader = response.Body } var s string if b, err := ioutil.ReadAll(reader); err == nil { s = string(b) } println(s) }
func (c *child) serveRequest(req *request, body io.ReadCloser) { r := newResponse(c, req) httpReq, err := cgi.RequestFromMap(req.params) // 根据参数构建http请求 if err != nil { // there was an error reading the request r.WriteHeader(http.StatusInternalServerError) c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error())) } else { httpReq.Body = body c.handler.ServeHTTP(r, httpReq) } r.Close() c.mu.Lock() delete(c.requests, req.reqId) c.mu.Unlock() c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete) // Consume the entire body, so the host isn't still writing to // us when we close the socket below in the !keepConn case, // otherwise we'd send a RST. (golang.org/issue/4183) // TODO(bradfitz): also bound this copy in time. Or send // some sort of abort request to the host, so the host // can properly cut off the client sending all the data. // For now just bound it a little and io.CopyN(ioutil.Discard, body, 100<<20) body.Close() if !req.keepConn { c.conn.Close() } }
func (mfsm *MyFsm) Restore(inp io.ReadCloser) error { defer inp.Close() fmt.Printf("Restore......................\n") mfsm.mutex.Lock() defer mfsm.mutex.Unlock() var buffer bytes.Buffer readdata := make([]byte, 1024) for { n, err := inp.Read(readdata) if err != nil { panic(err) } if n < 1024 { if n > 0 { lastbytes := make([]byte, n) copy(readdata, lastbytes) buffer.Write(lastbytes) } break } else { buffer.Write(readdata) } } dec := gob.NewDecoder(&buffer) err := dec.Decode(&mfsm.data) errorOnExit(err) return nil }
func (fr *FileReader) ReadAt(p []byte, offset int64) (n int, err error) { if offset < 0 { return 0, errors.New("schema/filereader: negative offset") } if offset >= fr.Size() { return 0, io.EOF } want := len(p) for len(p) > 0 && err == nil { var rc io.ReadCloser rc, err = fr.readerForOffset(offset) if err != nil { return } var n1 int64 // never bigger than an int n1, err = io.CopyN(&sliceWriter{p}, rc, int64(len(p))) rc.Close() if err == io.EOF { err = nil } if n1 == 0 { break } p = p[n1:] offset += int64(n1) n += int(n1) } if n < want && err == nil { err = io.ErrUnexpectedEOF } return n, err }
// Checks for input and output flags: // - If output is set: // - Create a file as output location with ".tmp" suffix added // - When process is finished: // - If there are no errors, move temporary file to the output location // - Else, remove temporary file // - Else, use stdout // // - If input is set, use processLoc func // - Else, pass process func Stdin // // - After processing - if err exists, write to Stderr func main() { var ( err error input io.ReadCloser output io.WriteCloser i, o, tmp string = getFlagLocs() ) if input, err = getInput(i); err != nil { stderr("", err) } if output, err = getOutput(o, tmp); err != nil { stderr("", err) } if err == nil { err = process(input, output) } input.Close() output.Close() handleOutput(err, o, tmp) reportErrors(err) }
func decodeBidRequest(requestBody io.ReadCloser) *auction.BidRequest { decoder := json.NewDecoder(requestBody) var bidRequest auction.BidRequest decoder.Decode(&bidRequest) requestBody.Close() return &bidRequest }