// startCommand starts up a command and creates a stdin pipe and a buffered // stdout & stderr pipes, wrapped in a wrappedCmd. The stdout buffer will be of stdoutBufSize // bytes. func startCommand(command string, args ...string) (*wrappedCmd, error) { cmd := exec.Command(command, args...) stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } stderr, err := cmd.StderrPipe() if err != nil { return nil, err } stdin, err := cmd.StdinPipe() if err != nil { return nil, err } tracerx.Printf("run_command: %s %s", command, strings.Join(args, " ")) if err := cmd.Start(); err != nil { return nil, err } return &wrappedCmd{ stdin, bufio.NewReaderSize(stdout, stdoutBufSize), bufio.NewReaderSize(stderr, stdoutBufSize), cmd, }, nil }
func (c *RedisCmd) parseRESP() (interface{}, error) { //reader := bytes.NewReader(c.data) //bufio 的 NewReader Size默认值比reader大的话,会reset多损耗性能 //br := bufio.NewReaderSize(reader, reader.Len()) if c.r == nil { c.r = bytes.NewReader(c.data) c.br = bufio.NewReaderSize(c.r, c.r.Len()) } else { c.r.Reset(c.data) //reset并不能扩充buffer的大小,所以用buffered并不能得出剩余数据的大小,而仅仅是buff里面还未读出来的,可能还有部分 //数据在c.r中 //c.br.Reset(c.r) c.br = bufio.NewReaderSize(c.r, c.r.Len()) } if resp, neededDataLen, err := parseRESP(c.br); err != nil { if err == ErrUnexpectedRESPEOF || err == ErrBufferFullRESP { //数据还没收完 } c.neededDataLen = neededDataLen return nil, err } else { c.neededDataLen = 0 return resp, nil } }
// ***************************************************************************** // CREATION OF INDEX FILE // ***************************************************************************** func (g *YIndex) CreateIndexFiles() (int, error) { count := 0 // ------------------------------------------------------------------------- // OPEN AN INPUT FILE // ------------------------------------------------------------------------- fi, err := os.Open(g.InputFileName) if err != nil { return count, errors.New( fmt.Sprintf("Cannot open file <%s>.", g.InputFileName), ) } bfi := bufio.NewReaderSize(fi, 1*1024*1024) defer fi.Close() // ------------------------------------------------------------------------- // CREATE MAIN INDEXING FILE // ------------------------------------------------------------------------- fo1, err := os.Create(g.InputFileName + ".yidx1") if err != nil { return count, errors.New( fmt.Sprintf("Cannot create main indexing file <%s.yidx1>.", g.InputFileName+".yidx1"), ) } bfo1 := bufio.NewWriter(fo1) count, err = g.createIndexFile1(bfi, bfo1) if err != nil { return count, err } fo1.Close() // ------------------------------------------------------------------------- // CREATE DOUBLE INDEXING FILE (THISFILE WILL BE LOADED INTO MEMORY) // ------------------------------------------------------------------------- fi1, err := os.Open(g.InputFileName + ".yidx1") if err != nil { return count, errors.New( fmt.Sprintf("Cannot read main indexing file <%s>.", g.InputFileName+".yidx1"), ) } bfi1 := bufio.NewReaderSize(fi1, 1*1024*1024) defer fi1.Close() fo2, err := os.Create(g.InputFileName + ".yidx2") if err != nil { return count, errors.New( fmt.Sprintf("Cannot create double indexing file <%s>.", g.InputFileName+".yidx2"), ) } bfo2 := bufio.NewWriter(fo2) count, err = g.createIndexFile2(bfi1, bfo2) if err != nil { return count, err } fo2.Close() return count, nil }
func main() { Log.SetHandler(log15.StderrHandler) flagConnect := flag.String("connect", os.Getenv("BRUNO_ID"), "database connection string") flagCharset := flag.String("charset", term.GetTTYEncodingName(), "input charset of the csv") flagTruncate := flag.Bool("truncate", false, "truncate table?") flagSep := flag.String("sep", ";", "csv field separator") flag.Parse() var enc encoding.Encoding if *flagCharset != "" { enc = text.GetEncoding(*flagCharset) if enc == nil { Log.Error("unknown charset " + *flagCharset) os.Exit(1) } } db, err := sql.Open("ora", *flagConnect) if err != nil { Log.Crit("connect to db", "dsn", *flagConnect, "error", err) os.Exit(1) } defer db.Close() fh, err := os.Open(flag.Arg(0)) if err != nil { Log.Crit("open csv", "file", flag.Arg(0), "error", err) os.Exit(1) } defer fh.Close() r := io.Reader(fh) if enc != nil { Log.Debug("NewReader", "encoding", enc) r = text.NewReader(bufio.NewReaderSize(r, 1<<20), enc) } if *flagTruncate { if _, err = db.Exec("TRUNCATE TABLE " + flag.Arg(1)); err != nil { Log.Error("TRUNCATE", "table", flag.Arg(1), "error", err) os.Exit(1) } } if os.Getenv("GOMAXPROCS") == "" { Log.Info("Setting GOMAXPROCS", "numCPU", runtime.NumCPU()) runtime.GOMAXPROCS(runtime.NumCPU()) } cr := csv.NewReader(bufio.NewReaderSize(r, 16<<20)) cr.Comma = ([]rune(*flagSep))[0] cr.TrimLeadingSpace = true cr.LazyQuotes = true if err := load(db, flag.Arg(1), cr); err != nil { Log.Error("load", "error", err) os.Exit(2) } }
// handleConnection wraps the in and out connection endpoints for transfer between them. func handleConnection(clientConn net.Conn, serverConn net.Conn, txHandlers []func([]byte), rxHandlers []func([]byte)) { clientReader := bufio.NewReaderSize(clientConn, 8192) clientWriter := bufio.NewWriterSize(clientConn, 8192) serverReader := bufio.NewReaderSize(serverConn, 8192) serverWriter := bufio.NewWriterSize(serverConn, 8192) go pipeReaderToWriter(clientReader, serverWriter, txHandlers) go pipeReaderToWriter(serverReader, clientWriter, rxHandlers) }
func TestPeek(t *testing.T) { p := make([]byte, 10) // string is 16 (minReadBufferSize) long. buf := bufio.NewReaderSize(strings.NewReader("abcdefghijklmnop"), minReadBufferSize) if s, err := buf.Peek(1); string(s) != "a" || err != nil { t.Fatalf("want %q got %q, err=%v", "a", string(s), err) } if s, err := buf.Peek(4); string(s) != "abcd" || err != nil { t.Fatalf("want %q got %q, err=%v", "abcd", string(s), err) } if _, err := buf.Peek(32); err != bufio.ErrBufferFull { t.Fatalf("want ErrBufFull got %v", err) } if _, err := buf.Read(p[0:3]); string(p[0:3]) != "abc" || err != nil { t.Fatalf("want %q got %q, err=%v", "abc", string(p[0:3]), err) } if s, err := buf.Peek(1); string(s) != "d" || err != nil { t.Fatalf("want %q got %q, err=%v", "d", string(s), err) } if s, err := buf.Peek(2); string(s) != "de" || err != nil { t.Fatalf("want %q got %q, err=%v", "de", string(s), err) } if _, err := buf.Read(p[0:3]); string(p[0:3]) != "def" || err != nil { t.Fatalf("want %q got %q, err=%v", "def", string(p[0:3]), err) } if s, err := buf.Peek(4); string(s) != "ghij" || err != nil { t.Fatalf("want %q got %q, err=%v", "ghij", string(s), err) } if _, err := buf.Read(p[0:]); string(p[0:]) != "ghijklmnop" || err != nil { t.Fatalf("want %q got %q, err=%v", "ghijklmnop", string(p[0:minReadBufferSize]), err) } if s, err := buf.Peek(0); string(s) != "" || err != nil { t.Fatalf("want %q got %q, err=%v", "", string(s), err) } if _, err := buf.Peek(1); err != io.EOF { t.Fatalf("want EOF got %v", err) } // Test for issue 3022, not exposing a reader's error on a successful Peek. buf = bufio.NewReaderSize(dataAndEOFReader("abcd"), 32) if s, err := buf.Peek(2); string(s) != "ab" || err != nil { t.Errorf(`Peek(2) on "abcd", EOF = %q, %v; want "ab", nil`, string(s), err) } if s, err := buf.Peek(4); string(s) != "abcd" || err != nil { t.Errorf(`Peek(4) on "abcd", EOF = %q, %v; want "abcd", nil`, string(s), err) } if n, err := buf.Read(p[0:5]); string(p[0:n]) != "abcd" || err != nil { t.Fatalf("Read after peek = %q, %v; want abcd, EOF", p[0:n], err) } if n, err := buf.Read(p[0:1]); string(p[0:n]) != "" || err != io.EOF { t.Fatalf(`second Read after peek = %q, %v; want "", EOF`, p[0:n], err) } }
func (cmd *cmdSync) SendPSyncCmd(master, passwd string) (pipe.Reader, int64) { c := openNetConn(master, passwd) br := bufio.NewReaderSize(c, ReaderBufferSize) bw := bufio.NewWriterSize(c, WriterBufferSize) runid, offset, wait := sendPSyncFullsync(br, bw) log.Infof("psync runid = %s offset = %d, fullsync", runid, offset) var nsize int64 for nsize == 0 { select { case nsize = <-wait: if nsize == 0 { log.Info("+") } case <-time.After(time.Second): log.Info("-") } } piper, pipew := pipe.NewSize(ReaderBufferSize) go func() { defer pipew.Close() p := make([]byte, 8192) for rdbsize := int(nsize); rdbsize != 0; { rdbsize -= iocopy(br, pipew, p, rdbsize) } for { n, err := cmd.PSyncPipeCopy(c, br, bw, offset, pipew) if err != nil { log.PanicErrorf(err, "psync runid = %s, offset = %d, pipe is broken", runid, offset) } offset += n for { time.Sleep(time.Second) c = openNetConnSoft(master, passwd) if c != nil { log.Infof("psync reopen connection, offset = %d", offset) break } else { log.Infof("psync reopen connection, failed") } } authPassword(c, passwd) br = bufio.NewReaderSize(c, ReaderBufferSize) bw = bufio.NewWriterSize(c, WriterBufferSize) sendPSyncContinue(br, bw, runid, offset) } }() return piper, nsize }
func TestNewReaderSizeIdempotent(t *testing.T) { const BufSize = 1000 b := bufio.NewReaderSize(bytes.NewBufferString("hello world"), BufSize) // Does it recognize itself? b1 := bufio.NewReaderSize(b, BufSize) if b1 != b { t.Error("bufio.NewReaderSize did not detect underlying Reader") } // Does it wrap if existing buffer is too small? b2 := bufio.NewReaderSize(b, 2*BufSize) if b2 == b { t.Error("bufio.NewReaderSize did not enlarge buffer") } }
// Return a buffered reader from an io.Reader // If f == "-", then it will attempt to read from os.Stdin. // If the file is gzipped, it will be read as such. func Buf(r io.Reader) *Reader { b := bufio.NewReaderSize(r, getSize()) var rdr *gzip.Reader if is, err := IsGzip(b); err != nil && err != io.EOF { log.Fatal(err) } else if is { rdr, err = gzip.NewReader(b) if err != nil { log.Fatal(err) } b = bufio.NewReaderSize(rdr, getSize()) } return &Reader{b, r, rdr} }
// this method returns an array or responses that client has to handle // considering adding error to the reply type but i don't know if we really need it // response can contain nil's! func (client *Client) executeAnyArrayCommand(command []byte) ([]*Reply, error) { _, err := client.runCommand(command) if err != nil { return nil, err } reader := bufio.NewReaderSize(client.connection, bufSize) result, err := readResponse(reader) if err != nil { return nil, err } if res, ok := result.Response.([]interface{}); ok { result_array := make([]*Reply, len(res)) for i, elem := range res { origin, _ := elem.(*Reply) result_array[i] = origin } return result_array, nil } else { return nil, errors.New("Internal reddan error, unexpected response type") } }
func (client *Client) executeStringArrayCommand(command []byte) ([]string, error) { _, err := client.runCommand(command) if err != nil { return nil, err } reader := bufio.NewReaderSize(client.connection, bufSize) result, err := readResponse(reader) if err != nil { return nil, err } if res, ok := result.Response.([]interface{}); ok { result_array := make([]string, len(res)) // TODO requires work on errors and refactor for i, elem := range res { origin, _ := elem.(*Reply) // change _ to ok and refactor Array responses el := origin.Response.([]byte) result_array[i] = string(el) } return result_array, nil } else { return nil, errors.New("Internal reddan error, unexpected response type") } }
func (s *ProxyServer) handleClient(w http.ResponseWriter, r *http.Request) error { ip, _, _ := net.SplitHostPort(r.RemoteAddr) cs := &Session{ip: ip, enc: json.NewEncoder(w)} defer r.Body.Close() connbuff := bufio.NewReaderSize(r.Body, MaxReqSize) for { data, isPrefix, err := connbuff.ReadLine() if isPrefix { log.Printf("Socket flood detected") return errors.New("Socket flood") } else if err == io.EOF { break } if len(data) > 1 { var req JSONRpcReq err = json.Unmarshal(data, &req) if err != nil { log.Printf("Malformed request: %v", err) return err } cs.handleMessage(s, r, &req) } } return nil }
func (s *Server) newConn(rwc net.Conn) (c *conn, err error) { c = new(conn) c.server = s c.conn = rwc c.rwc = bufio.NewReadWriter(bufio.NewReaderSize(rwc, 1048576), bufio.NewWriter(rwc)) return c, nil }
// Creates and opens a new connection to server per ConnectionSpec. // The new connection is wrapped by a new connHdl with its bufio.Reader // delegating to the net.Conn's reader. // func newConnHdl (spec *ConnectionSpec) (hdl *connHdl, err os.Error) { here := "newConnHdl"; if hdl = new(connHdl); hdl == nil { return nil, withNewError (fmt.Sprintf("%s(): failed to allocate connHdl", here)); } addr := fmt.Sprintf("%s:%d", spec.host, spec.port); raddr, e:= net.ResolveTCPAddr(addr); if e != nil { return nil, withNewError (fmt.Sprintf("%s(): failed to resolve remote address %s", here, addr)); } conn, e:= net.DialTCP(TCP, nil, raddr); switch { case e != nil: err = withOsError (fmt.Sprintf("%s(): could not open connection", here), e); case conn == nil: err = withNewError (fmt.Sprintf("%s(): net.Dial returned nil, nil (?)", here)); default: configureConn(conn, spec); hdl.spec = spec; hdl.conn = conn; bufsize := 4096; hdl.reader, e = bufio.NewReaderSize(conn, bufsize); if e != nil { err = withNewError (fmt.Sprintf("%s(): bufio.NewReaderSize (%d) error", here, bufsize)); } else { if debug() {log.Stdout("[Go-Redis] Opened SynchConnection connection to ", addr);} } } return hdl, err; }
func sbclBin(path string) string { u := uname_m() + "-" + uname() condPrintf(1, "open %s\n", path) in, err := os.Open(path) if err != nil { panic(err) } defer in.Close() r := bufio.NewReaderSize(in, 4096) doc, err := html.Parse(r) if err != nil { panic(err) } var f func(n *html.Node) string f = func(n *html.Node) string { if n.Type == html.ElementNode && n.Data == "a" { for _, attr := range n.Attr { if k, v := attr.Key, attr.Val; k == "href" && (v[len(v)-3:] == "bz2" || v[len(v)-3:] == "msi") && strings.Index(v, u) != -1 { return strings.Split(v, "-")[1] } } } for c := n.FirstChild; c != nil; c = c.NextSibling { if result := f(c); result != "" { return result } } return "" } return f(doc) }
func doForEachLine(do stringOperation, filename string) { //open the file file, err := os.Open(filename) if err != nil { panic(err) } defer func() { if err := file.Close(); err != nil { panic(err) } }() reader := bufio.NewReaderSize(file, 4*1024) var endof bool = false for !endof { line, isPrefix, err := reader.ReadLine() if err != nil && err != io.EOF { panic(err) } if err == io.EOF { endof = true } else { //do stuff to line do(string(line)) } if isPrefix { fmt.Println("buffer not large enough") return } } }
func (codecType *BufioCodecType) NewDecoder(r io.Reader) mudoo.Decoder { return &bufioDecoder{ Base: codecType.Base.NewDecoder( bufio.NewReaderSize(r, codecType.ReadBufferSize), ), } }
// returns an input channel with a raw key, value without collating keys func RawInternalInputProtocol(input io.Reader) <-chan KeyValue { out := make(chan KeyValue, 100) go func() { var line []byte var lineErr error r := bufio.NewReaderSize(input, 1024*1024*2) var lastKey []byte for { if lineErr == io.EOF { break } line, lineErr = r.ReadBytes('\n') if len(line) <= 1 { continue } chunks := bytes.SplitN(line, []byte("\t"), 2) if len(chunks) != 2 { Counter("RawInternalInputProtocol", "invalid line - no tab", 1) log.Printf("invalid line. no tab - %s", line) lastKey = lastKey[:0] continue } out <- KeyValue{chunks[0], chunks[1]} } close(out) }() return out }
func TestReader(t *testing.T) { var texts [31]string str := "" all := "" for i := 0; i < len(texts)-1; i++ { texts[i] = str + "\n" all += texts[i] str += string(i%26 + 'a') } texts[len(texts)-1] = all for h := 0; h < len(texts); h++ { text := texts[h] for i := 0; i < len(readMakers); i++ { for j := 0; j < len(bufreaders); j++ { for k := 0; k < len(bufsizes); k++ { readmaker := readMakers[i] bufreader := bufreaders[j] bufsize := bufsizes[k] read := readmaker.fn(bytes.NewBufferString(text)) buf := bufio.NewReaderSize(read, bufsize) s := bufreader.fn(buf) if s != text { t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q", readmaker.name, bufreader.name, bufsize, text, s) } } } } } }
// Creates and opens a new connection to server per ConnectionSpec. // The new connection is wrapped by a new connHdl with its bufio.Reader // delegating to the net.Conn's reader. // func newConnHdl(spec *ConnectionSpec) (hdl *connHdl, err Error) { here := "newConnHdl" if hdl = new(connHdl); hdl == nil { return nil, NewError(SYSTEM_ERR, fmt.Sprintf("%s(): failed to allocate connHdl", here)) } addr := fmt.Sprintf("%s:%d", spec.host, spec.port) raddr, e := net.ResolveTCPAddr(addr) if e != nil { msg := fmt.Sprintf("%s(): failed to resolve remote address %s", here, addr) return nil, NewErrorWithCause(SYSTEM_ERR, msg, e) } conn, e := net.DialTCP(TCP, nil, raddr) switch { case e != nil: err = NewErrorWithCause(SYSTEM_ERR, fmt.Sprintf("%s(): could not open connection", here), e) case conn == nil: err = NewError(SYSTEM_ERR, fmt.Sprintf("%s(): net.Dial returned nil, nil (?)", here)) default: configureConn(conn, spec) hdl.spec = spec hdl.conn = conn bufsize := 4096 hdl.reader, e = bufio.NewReaderSize(conn, bufsize) if e != nil { msg := fmt.Sprintf("%s(): bufio.NewReaderSize (%d) error", here, bufsize) err = NewErrorWithCause(SYSTEM_ERR, msg, e) } else { if err = hdl.onConnect(); err == nil && debug() { log.Stdout("[Go-Redis] Opened SynchConnection connection to ", addr) } } } return hdl, err }
func NewClientV2(conn net.Conn) *ClientV2 { var identifier string if conn != nil { identifier, _, _ = net.SplitHostPort(conn.RemoteAddr().String()) } return &ClientV2{ Conn: conn, // ReadyStateChan has a buffer of 1 to guarantee that in the event // there is a race the state update is not lost ReadyStateChan: make(chan int, 1), ExitChan: make(chan int), ConnectTime: time.Now(), ShortIdentifier: identifier, LongIdentifier: identifier, Reader: bufio.NewReaderSize(conn, 16*1024), Writer: bufio.NewWriterSize(conn, 16*1024), State: nsq.StateInit, SubEventChan: make(chan *Channel, 1), // heartbeats are client configurable but default to 30s HeartbeatInterval: nsqd.options.clientTimeout / 2, HeartbeatUpdateChan: make(chan time.Duration, 1), } }
func readFile(fileName string) { f, _ := os.Open(fileName) defer f.Close() r := bufio.NewReaderSize(f, 2*1024) line, isPrefix, err := r.ReadLine() i := 1 for err == nil && !isPrefix { s := string(line) if i == 1 { // Find Iam t := strings.Split(s, ":") Iam = Node{t[0], t[1], false, false, false} } else { k := strings.Split(s, ":") if k[0] == "initiator" { // Find if initiator iniFlag = true leader = k[2] fmt.Println("I've just set the Leader: ", k[2]) } else { // Find neighbors neighbors = append(neighbors, Node{k[0], k[1], false, false, false}) } } i++ line, isPrefix, err = r.ReadLine() } }
// returns a channel of simplejson.Json objects. This channel will be closed // when the input stream closes. Errors will be logged func JsonInputProtocol(input io.Reader) <-chan *simplejson.Json { out := make(chan *simplejson.Json, 100) go func() { var line []byte var lineErr error r := bufio.NewReaderSize(input, 1024*1024*2) for { if lineErr == io.EOF { break } line, lineErr = r.ReadBytes('\n') if len(line) <= 1 { continue } data, err := simplejson.NewJson(line) if err != nil { Counter("JsonInputProtocol", "invalid line", 1) log.Printf("%s - failed parsing %s", err, line) } else { out <- data } } close(out) }() return out }
// NewStream returns a new buffered I/O channel. channel is an underlying I/O channel that implements io.ReadWriteCloser. func NewStream(channel io.ReadWriteCloser) *Stream { r := bufio.NewReaderSize(channel, 0xFFFF) return &Stream{ channel: channel, reader: r, } }
func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { mu := make(chan bool, 1) mu <- true if readBufferSize == 0 { readBufferSize = defaultReadBufferSize } if readBufferSize < maxControlFramePayloadSize { readBufferSize = maxControlFramePayloadSize } if writeBufferSize == 0 { writeBufferSize = defaultWriteBufferSize } c := &Conn{ isServer: isServer, br: bufio.NewReaderSize(conn, readBufferSize), conn: conn, mu: mu, readFinal: true, writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize), writeFrameType: noFrame, writePos: maxFrameHeaderSize, } c.SetPingHandler(nil) c.SetPongHandler(nil) return c }
func testReadLine(t *testing.T, input []byte) { //for stride := 1; stride < len(input); stride++ { for stride := 1; stride < 2; stride++ { done := 0 reader := testReader{input, stride} l := bufio.NewReaderSize(&reader, len(input)+1) for { line, isPrefix, err := l.ReadLine() if len(line) > 0 && err != nil { t.Errorf("ReadLine returned both data and error: %s", err) } if isPrefix { t.Errorf("ReadLine returned prefix") } if err != nil { if err != io.EOF { t.Fatalf("Got unknown error: %s", err) } break } if want := testOutput[done : done+len(line)]; !bytes.Equal(want, line) { t.Errorf("Bad line at stride %d: want: %x got: %x", stride, want, line) } done += len(line) } if done != len(testOutput) { t.Errorf("ReadLine didn't return everything: got: %d, want: %d (stride: %d)", done, len(testOutput), stride) } } }
// read func (self *Tunnel) PumpOut() (err error) { defer close(self.outputCh) var header struct { Linkid uint16 Sz uint8 } rd := bufio.NewReaderSize(self.conn, 4096) for { err = binary.Read(rd, binary.LittleEndian, &header) if err != nil { Error("read tunnel failed:%s", err.Error()) return } var data []byte // if header.Sz == 0, it's ok too data = make([]byte, header.Sz) c := 0 for c < int(header.Sz) { var n int n, err = rd.Read(data[c:]) if err != nil { Error("read tunnel failed:%s", err.Error()) return } c += n } self.outputCh <- TunnelPayload{header.Linkid, data} } return }
func TestLineTooLong(t *testing.T) { data := make([]byte, 0) for i := 0; i < minReadBufferSize*5/2; i++ { data = append(data, '0'+byte(i%10)) } buf := bytes.NewBuffer(data) l := bufio.NewReaderSize(buf, minReadBufferSize) line, isPrefix, err := l.ReadLine() if !isPrefix || !bytes.Equal(line, data[:minReadBufferSize]) || err != nil { t.Errorf("bad result for first line: got %q want %q %v", line, data[:minReadBufferSize], err) } data = data[len(line):] line, isPrefix, err = l.ReadLine() if !isPrefix || !bytes.Equal(line, data[:minReadBufferSize]) || err != nil { t.Errorf("bad result for second line: got %q want %q %v", line, data[:minReadBufferSize], err) } data = data[len(line):] line, isPrefix, err = l.ReadLine() if isPrefix || !bytes.Equal(line, data[:minReadBufferSize/2]) || err != nil { t.Errorf("bad result for third line: got %q want %q %v", line, data[:minReadBufferSize/2], err) } line, isPrefix, err = l.ReadLine() if isPrefix || err == nil { t.Errorf("expected no more lines: %x %s", line, err) } }
func ParseFile(fname string) chan *TsPkt { pkts := make(chan *TsPkt) go func() { f, err := os.Open(fname) if err != nil { panic(err) } defer f.Close() reader := bufio.NewReaderSize(f, readBufSize) var count int64 for { buf := make([]byte, TSPacketSize) n, err := io.ReadFull(reader, buf) if n == 0 { if err != io.EOF { panic(err) } break } pkt := ParseTsPkt(buf) if pkt.SyncByte != 0x47 { panic("Sync byte error") } pkt.Pos = count pkts <- pkt count += 1 } close(pkts) }() return pkts }
func TestReadEmptyBuffer(t *testing.T) { l := bufio.NewReaderSize(new(bytes.Buffer), minReadBufferSize) line, isPrefix, err := l.ReadLine() if err != io.EOF { t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err) } }