func scanBlock(scanner *bufio.Scanner) (*pfs.BlockRef, []byte, error) { var buffer bytes.Buffer var bytesWritten int hash := newHash() for scanner.Scan() { // they take out the newline, put it back bytes := append(scanner.Bytes(), '\n') buffer.Write(bytes) hash.Write(bytes) bytesWritten += len(bytes) if bytesWritten > blockSize { break } } if err := scanner.Err(); err != nil { return nil, nil, err } return &pfs.BlockRef{ Block: getBlock(hash), Range: &pfs.ByteRange{ Lower: 0, Upper: uint64(buffer.Len()), }, }, buffer.Bytes(), nil }
func ReadBlock(sc *bufio.Scanner) (*Block, error) { // Ignore empty lines for { if len(sc.Bytes()) != 0 { break } if ok := sc.Scan(); !ok && sc.Err() != nil { return nil, fmt.Errorf("Error occured while reading file: %v", sc.Err()) } else if !ok && sc.Err() == nil { return nil, nil } } // Read the index indexStr := sc.Text() index, err := strconv.Atoi(indexStr) if err != nil { return nil, fmt.Errorf("Could not parse block index: %v", err) } // Read the timestamp if ok := sc.Scan(); !ok { return nil, fmt.Errorf("Could not read timestamp: %v", err) } // Parse the timestamps from, to, posInfo, err := ParseTimestamp(sc.Text()) if err != nil { return nil, fmt.Errorf("Could not parse timestamp: %v", err) } // Read the actual subtitle text text := []*string{} for { if ok := sc.Scan(); !ok && sc.Err() != nil { return nil, fmt.Errorf("Could not read text: %v", sc.Err()) } else if !ok && sc.Err() == nil { // EOF break } if len(sc.Bytes()) == 0 { break } str := sc.Text() text = append(text, &str) } return &Block{ Index: index, From: *from, To: *to, PosInfo: posInfo, Text: text, }, nil }
func parseBody(scanner *bufio.Scanner, contentLength int64) []byte { bytes := make([]byte, contentLength) scanner.Split(bufio.ScanBytes) var i int64 for i = 0; i < contentLength; i++ { scanner.Scan() bytes[i] = scanner.Bytes()[0] } return bytes }
func (s *localBlockAPIServer) putOneBlock(scanner *bufio.Scanner) (result *pfs.BlockRef, retErr error) { hash := newHash() tmp, err := ioutil.TempFile(s.tmpDir(), "block") if err != nil { return nil, err } defer func() { if err := tmp.Close(); err != nil && retErr == nil { retErr = err return } if result == nil { return } // check if it's a new block if _, err := os.Stat(s.blockPath(result.Block)); !os.IsNotExist(err) { // already have this block, remove tmp if err := os.Remove(tmp.Name()); err != nil && retErr == nil { retErr = err return } return } // it's a new block, rename it accordingly if err := os.Rename(tmp.Name(), s.blockPath(result.Block)); err != nil && retErr == nil { retErr = err return } }() var bytesWritten int for scanner.Scan() { // they take out the newline, put it back bytes := append(scanner.Bytes(), '\n') if _, err := hash.Write(bytes); err != nil { return nil, err } if _, err := tmp.Write(bytes); err != nil { return nil, err } bytesWritten += len(bytes) if bytesWritten > blockSize { break } } if err := scanner.Err(); err != nil { return nil, err } return &pfs.BlockRef{ Block: getBlock(hash), Range: &pfs.ByteRange{ Lower: 0, Upper: uint64(bytesWritten), }, }, nil }
func scanValue(scanner *bufio.Scanner, pos int) ([]byte, string, int, error) { if scanner.Scan() { raw := scanner.Bytes() pos += bytes.Count(raw, []byte{'\n'}) return raw, strings.TrimSpace(string(raw)), pos, nil } if err := scanner.Err(); err != nil { return nil, "", pos, &Error{pos, err.Error()} } return nil, "", pos, nil }
func scanPara(r *bufio.Scanner) ([][]byte, bool) { var para [][]byte for r.Scan() { line := r.Bytes() if len(bytes.TrimSpace(line)) == 0 { return para, true } para = append(para, append([]byte(nil), line...)) } return para, false }
func (w *Worker) handle(scanner *bufio.Scanner) error { for scanner.Scan() { p, err := PacketFromBytes(scanner.Bytes()) if err != nil { return err } if err := w.handlePacket(p); err != nil { return err } } return scanner.Err() }
func assertNextChunk(t *testing.T, r *bufio.Scanner, expected string) { if !r.Scan() { t.Fatalf("Expected chunk, but ran out early: %v", r.Err()) } if r.Err() != nil { t.Fatalf("Error reading chunk: %q", r.Err()) } data := r.Bytes() if string(data) != expected { t.Errorf("chunk reader read %q; want %q", data, expected) } }
func eatLines(scan *bufio.Scanner, prefix string, n int) error { bprefix := []byte(prefix) for i := 0; i < n; i++ { if !scan.Scan() { return io.ErrUnexpectedEOF } if !bytes.HasPrefix(scan.Bytes(), bprefix) { return fmt.Errorf("line %q does not have expected prefix %q", scan.Bytes(), bprefix) } } return nil }
func fillArr(s *bufio.Scanner) (l []loc) { var err error for s.Scan() { if bytes.Count(s.Bytes(), []byte{','}) != 1 { if len(l) == 0 { ln, err := strconv.ParseInt(string(bytes.TrimSpace(s.Bytes())), 10, 0) check(err) l = make([]loc, 0, ln+10) } continue } t1 := make([]byte, len(s.Bytes())) copy(t1, s.Bytes()) tmploc := loc{t1, 0, 0} tmp := bytes.SplitN(bytes.Trim(tmploc.pts, "() "), []byte{','}, 3) tmploc.x, err = strconv.ParseFloat(string(bytes.TrimSpace(tmp[0])), 64) check(err) tmploc.y, err = strconv.ParseFloat(string(bytes.TrimSpace(tmp[1])), 64) check(err) l = append(l, tmploc) } if s.Err() != nil { log.Fatal(s.Err()) } sort.Sort(locA(l)) return }
func dovecot(db *HTTPDB, decoder *bufio.Scanner, encoder func([]byte)) error { for decoder.Scan() { data := decoder.Bytes() if data[0] == 'H' { continue } if data[0] != 'L' { encoder([]byte{'F'}) continue } msg := bytes.SplitN(data[1:], []byte{'/'}, 3) res, err := db.Request(&Query{ Verb: string(msg[1]), Object: map[string]string{ "context": string(msg[0]), "object": string(msg[2]), }, }) if err != nil { encoder([]byte{'F'}) panic(err) } switch res.Status { case "OK": data, err := json.Marshal(res.Data) if err != nil { encoder([]byte{'F'}) panic(errors.New(fmt.Sprintf("strange Resp %+v", res))) } encoder(append([]byte{'O'}, data...)) case "KO": encoder([]byte{'N'}) default: encoder([]byte{'F'}) panic(errors.New(fmt.Sprintf("strange Resp %+v", res))) } } return decoder.Err() }
func (c *Client) handle(first Packet, scanner *bufio.Scanner) error { if err := c.handlePacket(first); err != nil { return err } for scanner.Scan() { p, err := PacketFromBytes(scanner.Bytes()) if err != nil { return err } if err := c.handlePacket(p); err != nil { return err } } return scanner.Err() }
func (conf *Config) doLoad(scanner *bufio.Scanner) (err error) { var read bool for { read = scanner.Scan() if !read { break } re := configLine.FindSubmatch(scanner.Bytes()) if re != nil { conf.values[string(re[1])] = re[2] } } return }
func getChapterHeader(scanner *bufio.Scanner) ([]byte, error) { buf := new(bytes.Buffer) for scanner.Scan() { l := scanner.Bytes() buf.Write(l) buf.WriteByte('\n') if reBody.Match(l) { break } } if e := scanner.Err(); e != nil { return nil, e } return removeUtf8Bom(buf.Bytes()), nil }
func scancodeblock(n int, scanner *bufio.Scanner) (int, Lines, error) { var lines Lines for scanner.Scan() { n++ line, err := ParseLine(n, scanner.Bytes()) if err != nil { return n, nil, err } if line.Type == LineTypeCodeBlock { // we're done return n, lines, nil } lines = append(lines, line) } // shouldn't reach the end return n, lines, errMissingEndCodeblock }
func prepareTasks() (*[]string, error) { var tasks []string var scanner *bufio.Scanner var err error if *urlFile == "-" { scanner = bufio.NewScanner(os.Stdin) } else { file, err := os.Open(*urlFile) if err != nil { return nil, err } defer file.Close() scanner = bufio.NewScanner(file) } if *isJSON { var jsonInput []byte scanner.Split(bufio.ScanBytes) for scanner.Scan() { for _, b := range scanner.Bytes() { jsonInput = append(jsonInput, b) } } if err := scanner.Err(); err != nil { return nil, err } if err = json.Unmarshal(jsonInput, &tasks); err != nil { return nil, err } } else { for scanner.Scan() { tasks = append(tasks, scanner.Text()) } if err := scanner.Err(); err != nil { return nil, err } } return &tasks, err }
func (this *EpubMaker) splitChapter(header []byte, scanner *bufio.Scanner) error { maxDepth := this.cfg.GetInt("/book/depth", 1) if maxDepth < 1 || maxDepth > this.book.MaxDepth() { this.writeLog("invalid 'depth' value, reset to '1'.") maxDepth = 1 } re := reHeader if d := strings.ToLower(this.cfg.GetString("/book/separator", "header")); d != "header" { if d == "comment" { re = reComment } else { this.writeLog("invalid 'separator' value, use 'header' as default.") } } depth, title, buf := 1, "", new(bytes.Buffer) for scanner.Scan() { l := scanner.Bytes() if nd, nt := checkNewChapter(re, l); nd > 0 && nd <= maxDepth { if buf.Len() > 0 { buf.WriteString(" </body>\n</html>") if e := this.book.AddChapter(title, buf.Bytes(), depth); e != nil { return e } buf.Reset() } depth, title = nd, nt buf.Write(header) } buf.Write(l) buf.WriteByte('\n') } if e := scanner.Err(); e != nil { return e } if buf.Len() > 0 { return this.book.AddChapter(title, buf.Bytes(), depth) } return nil }
func (p *Parser) scan(scanner *bufio.Scanner) error { for scanner.Scan() { // Get next line lineRaw := scanner.Bytes() lineTrimmed := bytes.TrimSpace(lineRaw) p.currentLine++ // Check for blank lines and comments if len(lineTrimmed) < 1 || lineTrimmed[0] == '#' { continue } if lineTrimmed[0] == '@' { incFilename := string(lineTrimmed[1:]) // Save current state curLine := p.currentLine curFile := p.currentFile if err := p.parseIncludeFile(incFilename); err != nil { return err } // Restore state p.currentLine = curLine p.currentFile = curFile continue } if p.runningMode == modeCommand { if err := p.parseCommandLine(lineRaw); err != nil { return err } } else if p.runningMode == modeDevices { if err := p.parseDeviceLine(lineRaw); err != nil { return err } } else { if err := p.parseLine(lineRaw); err != nil { return err } } } return nil }
func (cache *AuthorizedServicesCache) LoadAuthorizedServices(s *bufio.Scanner) (err error) { var read bool count := 1 for { read = s.Scan() if !read { break // Skip empty lines } else if len(s.Bytes()) == 0 { continue } _, _ = NewAuthorizedServicesSpec(s.Bytes()) count = count + 1 } return }
func postfix(db *HTTPDB, decoder *bufio.Scanner, encoder func([]byte)) error { for decoder.Scan() { msg := bytes.SplitN(decoder.Bytes(), []byte{' '}, 2) res, err := db.Request(&Query{ Verb: string(msg[0]), Object: string(msg[1]), }) if err != nil { encoder([]byte("TIMEOUT error in backend")) panic(err) } switch res.Status { case "OK": switch data := res.Data.(type) { case string: encoder([]byte("OK " + data)) default: encoder([]byte("TIMEOUT error in backend")) panic(errors.New(fmt.Sprintf("strange Resp %+v", res))) } case "KO": encoder([]byte("NOTFOUND ")) default: encoder([]byte("TIMEOUT error in backend")) panic(errors.New(fmt.Sprintf("strange Resp %+v", res))) } } return decoder.Err() }
// create a new MsgDB and KeyDB. func (ce *CtrlEngine) dbCreate( w, statusfp io.Writer, homedir string, c *cli.Context, ) error { msgdbname := filepath.Join(c.GlobalString("homedir"), "msgs") // read passphrase fmt.Fprintf(statusfp, "read passphrase from fd %d (not echoed)\n", ce.fileTable.PassphraseFD) log.Infof("read passphrase from fd %d (not echoed)", ce.fileTable.PassphraseFD) var ( scanner *bufio.Scanner passphrase []byte passphrase2 []byte err error ) defer bzero.Bytes(passphrase) defer bzero.Bytes(passphrase2) isTerminal := terminal.IsTerminal(int(ce.fileTable.PassphraseFD)) if isTerminal { passphrase, err = terminal.ReadPassword(int(ce.fileTable.PassphraseFD)) if err != nil { return log.Error(err) } } else { scanner = bufio.NewScanner(ce.fileTable.PassphraseFP) if scanner.Scan() { passphrase = scanner.Bytes() } else if err := scanner.Err(); err != nil { return log.Error(err) } } log.Info("done") // read passphrase again fmt.Fprintf(statusfp, "read passphrase from fd %d again (not echoed)\n", ce.fileTable.PassphraseFD) log.Infof("read passphrase from fd %d again (not echoed)", ce.fileTable.PassphraseFD) if isTerminal { passphrase2, err = terminal.ReadPassword(int(ce.fileTable.PassphraseFD)) if err != nil { return log.Error(err) } } else { if scanner.Scan() { passphrase2 = scanner.Bytes() } else if err := scanner.Err(); err != nil { return log.Error(err) } } log.Info("done") // compare passphrases if !bytes.Equal(passphrase, passphrase2) { return log.Error(ErrPassphrasesDiffer) } // create msgDB log.Infof("create msgDB '%s'", msgdbname) if err := msgdb.Create(msgdbname, passphrase, c.Int("iterations")); err != nil { return err } // open msgDB msgDB, err := msgdb.Open(msgdbname, passphrase) if err != nil { return err } defer msgDB.Close() // configure to make sure mutecrypt has config file err = ce.upkeepFetchconf(msgDB, homedir, false, nil, statusfp) if err != nil { return err } // create keyDB log.Info("create keyDB") if err := createKeyDB(c, w, ce.fileTable.OutputFD, passphrase); err != nil { return err } // status fmt.Fprintf(statusfp, "database files created\n") log.Info("database files created") // determine private walletKey walletKey := c.String("walletkey") if walletKey == "" { // generate wallet key _, privateKey, err := ed25519.GenerateKey(cipher.RandReader) if err != nil { return err } walletKey = base64.Encode(privateKey[:]) } // store wallet key if err := msgDB.AddValue(msgdb.WalletKey, walletKey); err != nil { return err } // print wallet key if err := printWalletKey(w, walletKey); err != nil { return err } return nil }
func (cache *ServiceCache) DoScan(s *bufio.Scanner) (err error) { cache.clearNoLock() // var entries int = 0 // Scan through buf by lines according to this basic ABNF // (SLOP* SEP CLASSRECORD NL CERT NL SIG NL NL)* var classRecordsRaw, certRaw, sigRaw []byte for { var didScan bool for { didScan = s.Scan() if bytes.Equal(s.Bytes(), sep) || !didScan { break } } if !didScan { break } s.Scan() // consume the separator if len(s.Bytes()) == 0 { // err = errors.New("unexpected newline after separator") break } classRecordsRaw = make([]byte, len(s.Bytes())) copy(classRecordsRaw, s.Bytes()) s.Scan() // consume the classRecords if len(s.Bytes()) != 0 { err = errors.New("expected newline after class records") return } var certBuffer bytes.Buffer for s.Scan() { // Error.Printf("%s", s.Bytes()) if len(s.Bytes()) == 0 { break } certBuffer.Write(s.Bytes()) certBuffer.Write(newline) } certRaw = certBuffer.Bytes()[0 : len(certBuffer.Bytes())-1] var sigBuffer bytes.Buffer for s.Scan() { // Error.Printf("%s", s.Bytes()) if len(s.Bytes()) == 0 { break } else if bytes.Equal(s.Bytes(), sep) { break } sigBuffer.Write(s.Bytes()) sigBuffer.Write(newline) } sigRaw = sigBuffer.Bytes()[0 : len(sigBuffer.Bytes())-1] // Error.Printf("`%s`", sigRaw) // Use those extracted value to make an instance serviceProxy, err := NewServiceProxy(classRecordsRaw, certRaw, sigRaw) if err != nil { return fmt.Errorf("NewServiceProxy: %s", err) } // Validating is a very expensive operation in the benchmarks if cache.verifyRecords { err = serviceProxy.Validate() if err != nil { err = cache.removeNoLock(serviceProxy) if err != nil { Error.Printf("could not remove service proxy (benign on first pass, otherwise it means the service has gone to a bad state): `%s`", err) } continue } } cache.storeNoLock(serviceProxy) } // fmt.Println("entries:", entries) return }
// rekey MsgDB and KeyDB. func (ce *CtrlEngine) dbRekey(statusfp io.Writer, c *cli.Context) error { msgdbname := filepath.Join(c.GlobalString("homedir"), "msgs") // read old passphrase fmt.Fprintf(statusfp, "read old passphrase from fd %d (not echoed)\n", ce.fileTable.PassphraseFD) log.Infof("read old passphrase from fd %d (not echoed)", ce.fileTable.PassphraseFD) var ( scanner *bufio.Scanner oldPassphrase []byte newPassphrase []byte newPassphrase2 []byte err error ) defer bzero.Bytes(oldPassphrase) defer bzero.Bytes(newPassphrase) defer bzero.Bytes(newPassphrase2) isTerminal := terminal.IsTerminal(int(ce.fileTable.PassphraseFD)) if isTerminal { oldPassphrase, err = terminal.ReadPassword(int(ce.fileTable.PassphraseFD)) if err != nil { return log.Error(err) } } else { scanner = bufio.NewScanner(ce.fileTable.PassphraseFP) if scanner.Scan() { oldPassphrase = scanner.Bytes() } else if err := scanner.Err(); err != nil { return log.Error(err) } } log.Info("done") // read new passphrase fmt.Fprintf(statusfp, "read new passphrase from fd %d (not echoed)\n", ce.fileTable.PassphraseFD) log.Infof("read new passphrase from fd %d (not echoed)", ce.fileTable.PassphraseFD) if isTerminal { newPassphrase, err = terminal.ReadPassword(int(ce.fileTable.PassphraseFD)) if err != nil { return log.Error(err) } } else { if scanner.Scan() { newPassphrase = scanner.Bytes() } else if err := scanner.Err(); err != nil { return log.Error(err) } } log.Info("done") // read new passphrase again fmt.Fprintf(statusfp, "read new passphrase from fd %d again (not echoed)\n", ce.fileTable.PassphraseFD) log.Infof("read new passphrase from fd %d again (not echoed)", ce.fileTable.PassphraseFD) if isTerminal { newPassphrase2, err = terminal.ReadPassword(int(ce.fileTable.PassphraseFD)) if err != nil { return log.Error(err) } } else { if scanner.Scan() { newPassphrase2 = scanner.Bytes() } else if err := scanner.Err(); err != nil { return log.Error(err) } } log.Info("done") // compare new passphrases if !bytes.Equal(newPassphrase, newPassphrase2) { return log.Error(ErrPassphrasesDiffer) } // rekey msgDB log.Infof("rekey msgDB '%s'", msgdbname) if err := msgdb.Rekey(msgdbname, oldPassphrase, newPassphrase, c.Int("iterations")); err != nil { return err } // rekey keyDB log.Info("rekey keyDB") if err := rekeyKeyDB(c, oldPassphrase, newPassphrase); err != nil { return err } return nil }
// scanFile scans each file, line by line, gathering statistics using a scanner. func (wc *wcstat) scanFile(scanner *bufio.Scanner) { for scanner.Scan() { wc.getStats(scanner.Bytes()) } }
func (cache *ServiceCache) LoadAnnounceCache(s *bufio.Scanner) (err error) { // Scan through buf by lines according to this basic ABNF // (SLOP* SEP CLASSRECORD NL CERT NL SIG NL NL)* var classRecordsRaw, certRaw, sigRaw []byte for { var didScan bool for { didScan = s.Scan() // Trace.Printf("slop: %s", s.Bytes()) if bytes.Equal(s.Bytes(), sep) || !didScan { break } } if !didScan { // Trace.Printf("break out") break } s.Scan() // consume the separator if len(s.Bytes()) == 0 { err = errors.New("unexpected newline after separator") return } classRecordsRaw = make([]byte, len(s.Bytes())) copy(classRecordsRaw, s.Bytes()) // Trace.Printf("classRecordsRaw: %s", s.Bytes()) s.Scan() // consume the classRecords if len(s.Bytes()) != 0 { err = errors.New("expected newline after class records") return } var certBuffer bytes.Buffer for s.Scan() { // Trace.Printf("certBuf: %s", s.Bytes()) if len(s.Bytes()) == 0 { break } certBuffer.Write(s.Bytes()) certBuffer.Write(newline) } certRaw = certBuffer.Bytes()[0 : len(certBuffer.Bytes())-1] var sigBuffer bytes.Buffer for s.Scan() { // Trace.Printf("sigBuf: %s", s.Bytes()) if len(s.Bytes()) == 0 { break } sigBuffer.Write(s.Bytes()) sigBuffer.Write(newline) } sigRaw = sigBuffer.Bytes()[0 : len(sigBuffer.Bytes())-1] // Use those extracted value to make an instance serviceProxy, err := NewServiceProxy(classRecordsRaw, certRaw, sigRaw) if err != nil { return err } err = serviceProxy.Validate() if err != nil { Error.Printf("could not validate service proxy `%s`. Skipping.", err) continue } cache.Store(serviceProxy) } return }
/* ProcessChanges takes `git status -z` output and returns all status items. (Note: in our case, we actually use `git status -bz` and remove branch header when we process it earlier, but the results are binary identical.) This is a complicated process because the format is weird. Each line is a variable length number of columns (2-3), but the separator for 1-2 is a space (but the content of columns can contain spaces too!), and the seperator for 2-3 is a NUL character (ASCII 0), *if* there is a third column. But here's where it gets wacky: NUL is also the entry terminator (rather than a LF like in normal porcelain mode) Thankfully(?), column 1 which contains the status codes is a fixed length of two bytes, and in theory the status codes contain enough secrets for us to determine whether we should expect 2 or 3 columns (current hypothesis is we only get the third column which is PATH2 when there is a "rename" operation). Sooo... we can just read those two bytes and use that to determine how many NULs to scan to until we have consumed a full entry. We put up with this because it means no shell escaping, which should mean better cross-platform support. Better hope some Windows people end up using it someday! */ func ProcessChanges(s *bufio.Scanner, root string) (results []*StatusItem) { // Before we process any changes, get the Current Working Directory. // We're going to need use to calculate absolute and relative filepaths for // every change, so we get it once now and pass it along. // If for some reason this fails (?), fallback to the git worktree root. wd, err := os.Getwd() if err != nil { wd = root } for s.Scan() { chunk := s.Bytes() // ...if chunk represents a rename or copy op, need to append another chunk // to get the full change item, with NUL manually reinserted because scanner // will extract past it. if (chunk[0] == 'R' || chunk[0] == 'C') && s.Scan() { chunk = append(chunk, '\x00') chunk = append(chunk, s.Bytes()...) } results = append(results, processChange(chunk, wd, root)...) } return }