// read ignores the cache, and returns an io.ReadCloser representing the // decompressed data for the given key, streamed from the disk. Clients should // acquire a read lock on the Diskv and check the cache themselves before // calling read. func (d *Diskv) read(key string) (io.ReadCloser, error) { filename := d.completeFilename(key) fi, err := os.Stat(filename) if err != nil { return nil, err } if fi.IsDir() { return nil, os.ErrNotExist } f, err := os.Open(filename) if err != nil { return nil, err } r := newSiphon(f, d, key) var rc = io.ReadCloser(ioutil.NopCloser(r)) if d.Compression != nil { rc, err = d.Compression.Reader(r) if err != nil { return nil, err } } return rc, nil }
// read ignores the cache, and returns an io.ReadCloser representing the // decompressed data for the given key, streamed from the disk. Clients should // acquire a read lock on the Diskv and check the cache themselves before // calling read. func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) { filename := d.completeFilename(key) fi, err := os.Stat(filename) if err != nil { return nil, err } if fi.IsDir() { return nil, os.ErrNotExist } f, err := os.Open(filename) if err != nil { return nil, err } var r io.Reader if d.CacheSizeMax > 0 { r = newSiphon(f, d, key) } else { r = &closingReader{f} } var rc = io.ReadCloser(ioutil.NopCloser(r)) if d.Compression != nil { rc, err = d.Compression.Reader(r) if err != nil { return nil, err } } return rc, nil }
// RsyncSend sets up the sending half of an rsync, to recursively send the // directory pointed to by path over the websocket. func RsyncSend(path string, conn *websocket.Conn, readWrapper func(io.ReadCloser) io.ReadCloser) error { cmd, dataSocket, stderr, err := rsyncSendSetup(path) if dataSocket != nil { defer dataSocket.Close() } if err != nil { return err } readPipe := io.ReadCloser(dataSocket) if readWrapper != nil { readPipe = readWrapper(dataSocket) } readDone, writeDone := shared.WebsocketMirror(conn, dataSocket, readPipe) output, err := ioutil.ReadAll(stderr) if err != nil { shared.LogDebugf("problem reading rsync stderr %s", err) } err = cmd.Wait() if err != nil { shared.LogDebugf("problem with rsync send of %s: %s: %s", path, err, string(output)) } <-readDone <-writeDone return err }
func (r *progressReader) Read(p []byte) (n int, err error) { read, err := io.ReadCloser(r.reader).Read(p) r.readProgress += read updateEvery := 4096 if r.readTotal > 0 { // Only update progress for every 1% read if increment := int(0.01 * float64(r.readTotal)); increment > updateEvery { updateEvery = increment } } if r.readProgress-r.lastUpdate > updateEvery || err != nil { if r.readTotal > 0 { fmt.Fprintf(r.output, r.template, r.readProgress, r.readTotal, fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100)) } else { fmt.Fprintf(r.output, r.template, r.readProgress, "?", "n/a") } r.lastUpdate = r.readProgress } // Send newline when complete if err != nil { r.output.Write(r.sf.FormatStatus("")) } return read, err }
// create a ReadCloser that reads from r and closes c. func makeReadCloser(r io.Reader, c io.Closer) io.ReadCloser { rc := struct { io.Reader io.Closer }{r, c} return io.ReadCloser(rc) }
func stubReader() io.ReadCloser { file, err := os.Open("_support/notification.json") if err != nil { panic(err) // should never happen } return io.ReadCloser(file) }
// Load the JSON config file func Load(configFile string, p Parser) { var err error var input = io.ReadCloser(os.Stdin) if input, err = os.Open(configFile); err != nil { log.Fatalln(err) } // Read the config file jsonBytes, err := ioutil.ReadAll(input) input.Close() if err != nil { log.Fatalln(err) } // Parse the config if err := p.ParseJSON(jsonBytes); err != nil { log.Fatalln("Could not parse %q: %v", configFile, err) } }
func (s *zfsMigrationSourceDriver) send(conn *websocket.Conn, zfsName string, zfsParent string, readWrapper func(io.ReadCloser) io.ReadCloser) error { fields := strings.SplitN(s.container.Name(), shared.SnapshotDelimiter, 2) args := []string{"send", fmt.Sprintf("%s/containers/%s@%s", s.zfs.zfsPool, fields[0], zfsName)} if zfsParent != "" { args = append(args, "-i", fmt.Sprintf("%s/containers/%s@%s", s.zfs.zfsPool, s.container.Name(), zfsParent)) } cmd := exec.Command("zfs", args...) stdout, err := cmd.StdoutPipe() if err != nil { return err } readPipe := io.ReadCloser(stdout) if readWrapper != nil { readPipe = readWrapper(stdout) } stderr, err := cmd.StderrPipe() if err != nil { return err } if err := cmd.Start(); err != nil { return err } <-shared.WebsocketSendStream(conn, readPipe, 4*1024*1024) output, err := ioutil.ReadAll(stderr) if err != nil { shared.LogError("problem reading zfs send stderr", log.Ctx{"err": err}) } err = cmd.Wait() if err != nil { shared.LogError("problem with zfs send", log.Ctx{"output": string(output)}) } return err }
func (r *progressReader) Read(p []byte) (n int, err error) { read, err := io.ReadCloser(r.reader).Read(p) r.readProgress += read // Only update progress for every 1% read updateEvery := int(0.01 * float64(r.readTotal)) if r.readProgress-r.lastUpdate > updateEvery || r.readProgress == r.readTotal { fmt.Fprintf(r.output, "%d/%d (%.0f%%)\r", r.readProgress, r.readTotal, float64(r.readProgress)/float64(r.readTotal)*100) r.lastUpdate = r.readProgress } // Send newline when complete if err == io.EOF { fmt.Fprintf(r.output, "\n") } return read, err }
func (r *progressReader) Read(p []byte) (n int, err error) { read, err := io.ReadCloser(r.reader).Read(p) r.progress.Current += read updateEvery := 1024 * 512 //512kB if r.progress.Total > 0 { // Update progress for every 1% read if 1% < 512kB if increment := int(0.01 * float64(r.progress.Total)); increment < updateEvery { updateEvery = increment } } if r.progress.Current-r.lastUpdate > updateEvery || err != nil { r.output.Write(r.sf.FormatProgress(r.ID, r.action, &r.progress)) r.lastUpdate = r.progress.Current } // Send newline when complete if r.newLine && err != nil { r.output.Write(r.sf.FormatStatus("", "")) } return read, err }
func (r *progressReader) Read(p []byte) (n int, err error) { read, err := io.ReadCloser(r.reader).Read(p) r.readProgress += read updateEvery := 1024 * 512 //512kB if r.readTotal > 0 { // Update progress for every 1% read if 1% < 512kB if increment := int(0.01 * float64(r.readTotal)); increment < updateEvery { updateEvery = increment } } if r.readProgress-r.lastUpdate > updateEvery || err != nil { if r.readTotal > 0 { fmt.Fprintf(r.output, r.template, HumanSize(int64(r.readProgress)), HumanSize(int64(r.readTotal)), fmt.Sprintf("%.0f%%", float64(r.readProgress)/float64(r.readTotal)*100)) } else { fmt.Fprintf(r.output, r.template, r.readProgress, "?", "n/a") } r.lastUpdate = r.readProgress } return read, err }
//UntarFile extracts archived file. func UntarFile(file *os.File, destinationDir string) error { isGzipped, err := IsGzip(file) if err != nil { return err } fileReader := io.ReadCloser(file) if isGzipped { fileReader, err = gzip.NewReader(file) if err != nil { return err } defer fileReader.Close() } err = UntarStream(fileReader, destinationDir) if err != nil { return err } return nil }
func (s *btrfsMigrationSourceDriver) send(conn *websocket.Conn, btrfsPath string, btrfsParent string, readWrapper func(io.ReadCloser) io.ReadCloser) error { args := []string{"send", btrfsPath} if btrfsParent != "" { args = append(args, "-p", btrfsParent) } cmd := exec.Command("btrfs", args...) stdout, err := cmd.StdoutPipe() if err != nil { return err } readPipe := io.ReadCloser(stdout) if readWrapper != nil { readPipe = readWrapper(stdout) } stderr, err := cmd.StderrPipe() if err != nil { return err } if err := cmd.Start(); err != nil { return err } <-shared.WebsocketSendStream(conn, readPipe, 4*1024*1024) output, err := ioutil.ReadAll(stderr) if err != nil { shared.LogError("problem reading btrfs send stderr", log.Ctx{"err": err}) } err = cmd.Wait() if err != nil { shared.LogError("problem with btrfs send", log.Ctx{"output": string(output)}) } return err }
//Return the json content of files func (c *Config) getConfigJSON(configFolder string, configFile string) ([]byte, error) { var err error var input = io.ReadCloser(os.Stdin) configPath, err := filepath.Abs(configFolder + "/" + configFile + ".json") if err != nil { log.Fatalln("Could not parse %q: %v", configPath, err) } if input, err = os.Open(configPath); err != nil { log.Fatalln(err) } // Read the config file jsonBytes, err := ioutil.ReadAll(input) input.Close() if err != nil { log.Fatalln(err) } return jsonBytes, err }
func main() { flagExample := flag.Bool("example", false, "just dump out an example versioninfo.json to stdout") flagOut := flag.String("o", "resource.syso", "output file name") flagIcon := flag.String("icon", "", "icon file name") flagComment := flag.String("comment", "", "StringFileInfo.Comments") flagCompany := flag.String("company", "", "StringFileInfo.CompanyName") flagDescription := flag.String("description", "", "StringFileInfo.FileDescription") flagFileVersion := flag.String("file-version", "", "StringFileInfo.FileVersion") flagInternalName := flag.String("internal-name", "", "StringFileInfo.InternalName") flagCopyright := flag.String("copyright", "", "StringFileInfo.LegalCopyright") flagTrademark := flag.String("trademark", "", "StringFileInfo.LegalTrademarks") flagOriginalName := flag.String("original-name", "", "StringFileInfo.OriginalFilename") flagPrivateBuild := flag.String("private-build", "", "StringFileInfo.PrivateBuild") flagProductName := flag.String("product-name", "", "StringFileInfo.ProductName") flagProductVersion := flag.String("product-version", "", "StringFileInfo.ProductVersion") flagSpecialBuild := flag.String("special-build", "", "StringFileInfo.SpecialBuild") flagTranslation := flag.Int("translation", 0, "translation ID") flagCharset := flag.Int("charset", 0, "charset ID") flagVerMajor := flag.Int("ver-major", -1, "FileVersion.Major") flagVerMinor := flag.Int("ver-minor", -1, "FileVersion.Minor") flagVerPatch := flag.Int("ver-patch", -1, "FileVersion.Patch") flagVerBuild := flag.Int("ver-build", -1, "FileVersion.Build") flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [flags] <versioninfo.json>\n\nPossible flags:\n", os.Args[0]) flag.PrintDefaults() } flag.Parse() if *flagExample { io.WriteString(os.Stdout, example) return } configFile := flag.Arg(0) if configFile == "" { configFile = "versioninfo.json" } var err error var input = io.ReadCloser(os.Stdin) if configFile != "-" { if input, err = os.Open(configFile); err != nil { log.Printf("Cannot open %q: %v", configFile, err) os.Exit(1) } } // Read the config file jsonBytes, err := ioutil.ReadAll(input) input.Close() if err != nil { log.Printf("Error reading %q: %v", configFile, err) os.Exit(1) } // Create a new container vi := &goversioninfo.VersionInfo{} // Parse the config if err := vi.ParseJSON(jsonBytes); err != nil { log.Printf("Could not parse the .json file: %v", err) os.Exit(2) } // Override from flags if *flagIcon != "" { vi.IconPath = *flagIcon } if *flagComment != "" { vi.StringFileInfo.Comments = *flagComment } if *flagCompany != "" { vi.StringFileInfo.CompanyName = *flagCompany } if *flagDescription != "" { vi.StringFileInfo.FileDescription = *flagDescription } if *flagFileVersion != "" { vi.StringFileInfo.FileVersion = *flagFileVersion } if *flagInternalName != "" { vi.StringFileInfo.InternalName = *flagInternalName } if *flagCopyright != "" { vi.StringFileInfo.LegalCopyright = *flagCopyright } if *flagTrademark != "" { vi.StringFileInfo.LegalTrademarks = *flagTrademark } if *flagOriginalName != "" { vi.StringFileInfo.OriginalFilename = *flagOriginalName } if *flagPrivateBuild != "" { vi.StringFileInfo.PrivateBuild = *flagPrivateBuild } if *flagProductName != "" { vi.StringFileInfo.ProductName = *flagProductName } if *flagProductVersion != "" { vi.StringFileInfo.ProductVersion = *flagProductVersion } if *flagSpecialBuild != "" { vi.StringFileInfo.SpecialBuild = *flagSpecialBuild } if *flagTranslation > 0 { vi.VarFileInfo.Translation.LangID = goversioninfo.LangID(*flagTranslation) } if *flagCharset > 0 { vi.VarFileInfo.Translation.CharsetID = goversioninfo.CharsetID(*flagCharset) } if *flagVerMajor >= 0 { vi.FixedFileInfo.FileVersion.Major = *flagVerMajor } if *flagVerMinor >= 0 { vi.FixedFileInfo.FileVersion.Minor = *flagVerMinor } if *flagVerPatch >= 0 { vi.FixedFileInfo.FileVersion.Patch = *flagVerPatch } if *flagVerBuild >= 0 { vi.FixedFileInfo.FileVersion.Build = *flagVerBuild } // Fill the structures with config data vi.Build() // Write the data to a buffer vi.Walk() // Create the file if err := vi.WriteSyso(*flagOut); err != nil { log.Printf("Error writing syso: %v", err) os.Exit(3) } }
//ClientProxySSHProtocol builds on top of the base proxy func ClientProxySSHProtocol(s *SSHProtocol, cmk ChannelMaker) (base *SSHProxyProtocol) { base = BaseProxySSHProtocol(s) base.NetworkOpen.Subscribe(func(b interface{}, sub *flux.Sub) { nc, ok := b.(*ChannelNetwork) if !ok { return } log.Println("Network Open received network packet, prepare.....") si, err := base.Sessions().GetSession(nc.Conn.RemoteAddr()) if err != nil { log.Printf("Unable to find session client for (%+v)", nc.Conn.RemoteAddr()) return } session, ok := si.(SSHSession) if !ok { return } log.Printf("Session retrieved: (%+v) (%+v)", nc.Conn.RemoteAddr(), session.User()) // defer session.Connection().Close() pid := uuid.New() session.UseType(pid) client := session.Connection() log.Printf("Session connection gained: %s, OpenChannel for %s", client.RemoteAddr(), nc.MasterNewChan.ChannelType()) rcChannel, rcReq, err := client.OpenChannel(nc.MasterNewChan.ChannelType(), nc.MasterNewChan.ExtraData()) if err != nil { log.Printf("Error creating ClientChannel for %+v %+v", nc.MasterNewChan.ChannelType(), err) return } log.Println("Success Creating Client proxy channel:", err) replyMaker := func(rq *ssh.Request, dest ssh.Channel) { do, err := dest.SendRequest(rq.Type, rq.WantReply, rq.Payload) if err != nil { log.Printf("Request proxy failed on: (%s) (%+v) with error (%+v)", nc.Conn.RemoteAddr(), rq.Type, err) } if rq.WantReply { rq.Reply(do, nil) } } go func() { clientloop: for { select { case <-nc.ChanCloser: break clientloop case <-nc.MaseterCloser: break clientloop case mrq, ok := <-nc.MasterReqChannel: if !ok { break clientloop } replyMaker(mrq, rcChannel) switch mrq.Type { case "exit-status": break clientloop } case rq, ok := <-rcReq: if !ok { break clientloop } replyMaker(rq, nc.MasterChan) switch rq.Type { case "exit-status": break clientloop } default: //logit } } log.Println("Closing Client and Master Channels for:", session.Addr()) rcChannel.Close() nc.MasterChan.Close() }() log.Println("Creating channel readers and connection") //handle closing and state management of copying op copyCloser := new(sync.Once) copyState := make(chan struct{}) loopCloser := make(chan struct{}) log.Println("Creating channel and sync.Closer") copyCloseFn := func() { log.Println("Closing copying channel and client Operation operation for:", session.Addr()) close(copyState) close(loopCloser) } log.Println("Setting up Writers") wrapMaster := io.ReadCloser(nc.MasterChan) wrapSlave := io.ReadCloser(rcChannel) if cmk != nil { rw, err := cmk(nc, session, rcChannel) if err != nil { log.Println("Error creating custom reader for channel", err) } else { wrapSlave = rw } } log.Printf("Connecting Sessions for (%s) At (%s) Packet Snifers", session.User(), session.Addr()) outwriter := io.ReadWriteCloser(session.Outgoing()) inwriter := io.ReadWriteCloser(session.Incoming()) mwriter := io.MultiWriter(nc.MasterChan, outwriter) swriter := io.MultiWriter(rcChannel, inwriter) go func() { // io.Copy(rcChannel, wrapMaster) // io.Copy(session.Incoming(), wrapMaster) defer copyCloser.Do(copyCloseFn) io.Copy(swriter, wrapMaster) }() go func() { // io.Copy(nc.MasterChan, wrapSlave) // io.Copy(session.Outgoing(), wrapSlave) defer copyCloser.Do(copyCloseFn) io.Copy(mwriter, wrapSlave) }() go func() { defer base.NetworkClose.Emit(nc) <-copyState log.Println("Closing Incoming and Outgoing monitory Channels!") log.Println("Closing all Channels!") wrapMaster.Close() wrapSlave.Close() log.Println("closing session connection") session.Connection().Close() session.Close() }() return }) return }
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package promhlp import ( "io" "io/ioutil" "net/http" ) var _ = io.ReadCloser((*CountingReadCloser)(nil)) type CountingReadCloser struct { io.ReadCloser Size int64 } func NewCountingReader(r io.Reader) *CountingReadCloser { if rc, ok := r.(io.ReadCloser); ok { return &CountingReadCloser{ReadCloser: rc} } return &CountingReadCloser{ReadCloser: struct { io.Reader io.Closer }{r, ioutil.NopCloser(nil)}} }
//Reader returns the reader for the conn func (p *StreamConn) Reader() io.ReadCloser { return io.ReadCloser(p.src) }
func (r *progressReader) Close() error { return io.ReadCloser(r.reader).Close() }
func proxyChannel(c *ConnInsight, mcha ssh.Channel, mreq <-chan *ssh.Request, master ssh.NewChannel, client *SSHClient, killer <-chan struct{}) error { do := new(sync.Once) cochan, coreq, err := client.OpenChannel(master.ChannelType(), master.ExtraData()) checkError(err, fmt.Sprintf("Creating Client Channel for %s", client.RemoteAddr().String())) if err != nil { return err } stop := make(chan struct{}) endClose := func() { close(stop) } flux.GoDefer("proxyChannelCopy", func() { defer cochan.Close() defer mcha.Close() func() { ploop: for { select { case <-stop: break ploop case <-killer: break ploop case slx, ok := <-coreq: if !ok { return } Reply(slx, mcha, c) switch slx.Type { case "exit-status": break ploop } case mlx, ok := <-mreq: if !ok { return } Reply(mlx, cochan, c) switch mlx.Type { case "exit-status": break ploop } } } }() }) mastercloser := io.ReadCloser(mcha) slavecloser := io.ReadCloser(cochan) wrapmaster := io.MultiWriter(mcha, c.Out()) wrapsl := io.MultiWriter(cochan, c.In()) flux.GoDefer("CopyToSlave", func() { defer do.Do(endClose) io.Copy(wrapsl, mastercloser) }) flux.GoDefer("CopyToMaster", func() { defer do.Do(endClose) io.Copy(wrapmaster, slavecloser) }) flux.GoDefer("CopyCloser", func() { defer c.Close() <-stop mx := mastercloser.Close() checkError(mx, "Master Writer Closer") sx := slavecloser.Close() checkError(sx, "Slave Writer Closer") ex := client.Close() checkError(ex, "Client Writer Closer") }) return nil }