// Read a single sequence and return it or an error. // TODO: Does not read multi-line fastq. func (self *Reader) Read() (s seq.Sequence, err error) { var ( buff, line, label []byte isPrefix bool seqBuff []alphabet.QLetter t seqio.SequenceAppender ) inQual := false for { if buff, isPrefix, err = self.r.ReadLine(); err == nil { if isPrefix { line = append(line, buff...) continue } else { line = buff } line = bytes.TrimSpace(line) if len(line) == 0 { continue } switch { case !inQual && line[0] == '@': t = self.readHeader(line) label, line = line, nil case !inQual && line[0] == '+': if len(label) == 0 { return nil, bio.NewError("fastq: no header line parsed before +line in fastq format", 0) } if len(line) > 1 && bytes.Compare(label[1:], line[1:]) != 0 { return nil, bio.NewError("fastq: quality header does not match sequence header", 0) } inQual = true case !inQual: line = bytes.Join(bytes.Fields(line), nil) seqBuff = make([]alphabet.QLetter, len(line)) for i := range line { seqBuff[i].L = alphabet.Letter(line[i]) } case inQual: line = bytes.Join(bytes.Fields(line), nil) if len(line) != len(seqBuff) { return nil, bio.NewError("fastq: sequence/quality length mismatch", 0) } for i := range line { seqBuff[i].Q = alphabet.DecodeToQphred(line[i], self.enc) } t.AppendQLetters(seqBuff...) return t, nil } } else { return } } panic("cannot reach") }
func ram() string { f, err := os.Open("/proc/meminfo") if err != nil { return fmt.Sprint(err) } defer f.Close() bufReader := bufio.NewReader(f) b := make([]byte, 100) var free, total string for line, isPrefix, err := bufReader.ReadLine(); err != io.EOF; line, isPrefix, err = bufReader.ReadLine() { b = append(b, line...) if !isPrefix { switch { case bytes.Contains(b, []byte("MemFree")): s := bytes.Fields(b) free = string(s[1]) case bytes.Contains(b, []byte("MemTotal")): s := bytes.Fields(b) total = string(s[1]) } b = b[:0] } } return fmt.Sprintf("%s/%s", free, total) }
func ram() interface{} { f, err := os.Open("/proc/meminfo") if err != nil { return "Unsupported" } defer f.Close() bufReader := bufio.NewReader(f) b := make([]byte, 0, 100) var free, total int for line, isPrefix, err := bufReader.ReadLine(); err != io.EOF; line, isPrefix, err = bufReader.ReadLine() { if err != nil { log.Fatal("bufReader.ReadLine: ", err) } b = append(b, line...) if !isPrefix { switch { case bytes.Contains(b, []byte("MemFree")): free = toInt(bytes.Fields(b)[1]) case bytes.Contains(b, []byte("MemTotal")): total = toInt(bytes.Fields(b)[1]) } b = b[:0] } } return Ram{free, total} }
func readDataSetFile(filename string) (d *DataSet) { d = new(DataSet) data, err := ioutil.ReadFile(filename) if err != nil { fmt.Println("Error opening file: ", filename) return } lines := bytes.Split(data, []byte{'\n'}) names := bytes.Fields(lines[0]) for i := 0; i < len(names)-1; i++ { d.var_names = append(d.var_names, string(names[i])) } d.out_name = string(names[len(names)-1]) for i := 1; i < len(lines); i++ { val_strs := bytes.Fields(lines[i]) if len(val_strs) < len(d.var_names)+1 { break } input := make([]float64, len(d.var_names)) for p := 0; p < len(d.var_names); p++ { var val float64 fmt.Sscanf(string(val_strs[p]), "%f", &val) input[p] = val } d.input = append(d.input, input) var out float64 fmt.Sscanf(string(val_strs[len(val_strs)-1]), "%f", &out) d.output = append(d.output, out) } return }
func New(r io.Reader) (*Grammar, error) { buf := bufio.NewReader(r) g := &Grammar{data: make(map[string][][]string)} start, err := buf.Peek(80) if err != nil { return nil, err } g.start = string(bytes.Fields(start)[0]) line, err := buf.ReadSlice('.') for ; err == nil; line, err = buf.ReadSlice('.') { splat := bytes.Fields(line) // First field is left side, last is ".". stringified := make([]string, len(splat)-2) for i, word := range splat[1 : len(splat)-1] { stringified[i] = string(word) } key := string(splat[0]) g.data[key] = append(g.data[key], stringified) } if err != io.EOF { return g, err } for _, c := range line { // leftovers if !unicode.IsSpace(rune(c)) { // i.e. there's something after the last '.': return g, io.ErrUnexpectedEOF } } return g, nil }
func parseCommit(r io.Reader, resultSize string, name SHA) (Commit, error) { var commit = Commit{_type: "commit", size: resultSize} scnr := bufio.NewScanner(r) scnr.Split(ScanLinesNoTrim) var commitMessageLines [][]byte for scnr.Scan() { line := scnr.Bytes() trimmedLine := bytes.TrimRight(line, "\r\n") if commitMessageLines == nil && len(bytes.Fields(trimmedLine)) == 0 { // Everything after the first empty line is the commit message commitMessageLines = [][]byte{} continue } if commitMessageLines != nil { // We have already seen an empty line commitMessageLines = append(commitMessageLines, line) continue } parts := bytes.Fields(trimmedLine) key := parts[0] switch keyType(key) { case treeKey: commit.Tree = string(parts[1]) case parentKey: commit.Parents = append(commit.Parents, SHA(string(parts[1]))) case authorKey: authorline := string(bytes.Join(parts[1:], []byte(" "))) author, date, err := parseAuthorString(authorline) if err != nil { return commit, err } commit.Author = author commit.AuthorDate = date case committerKey: committerline := string(bytes.Join(parts[1:], []byte(" "))) committer, date, err := parseCommitterString(committerline) if err != nil { return commit, err } commit.Committer = committer commit.CommitterDate = date default: err := fmt.Errorf("encountered unknown field in commit: %s", key) return commit, err } } commit.Name = name commit.Message = bytes.Join(commitMessageLines, []byte("\n")) return commit, nil }
// Read a single sequence and return it or an error. // TODO: Does not read interleaved fastq. func (self *Reader) Read() (sequence *seq.Seq, err error) { if self.r == nil { self.r = bufio.NewReader(self.f) } var line, label, seqBody, qualBody []byte sequence = &seq.Seq{} inQual := false READ: for { if line, err = self.r.ReadBytes('\n'); err == nil { if len(line) > 0 && line[len(line)-1] == '\r' { line = line[:len(line)-1] } line = bytes.TrimSpace(line) if len(line) == 0 { continue } switch { case !inQual && line[0] == '@': label = line[1:] case !inQual && line[0] == '+': if len(label) == 0 { return nil, errors.New("No ID line parsed at +line in fastq format") } if len(line) > 1 && bytes.Compare(label, line[1:]) != 0 { return nil, errors.New("Quality ID does not match sequence ID") } inQual = true case !inQual: line = bytes.Join(bytes.Fields(line), nil) seqBody = append(seqBody, line...) case inQual: line = bytes.Join(bytes.Fields(line), nil) qualBody = append(qualBody, line...) if len(qualBody) >= len(seqBody) { break READ } } } else { return } } if len(seqBody) != len(qualBody) { return nil, errors.New("Quality length does not match sequence length") } sequence = seq.New(label, seqBody, qualBody) return }
func discoverGatewayUsingRoute() (ip net.IP, err error) { routeCmd := exec.Command("route", "-n") stdOut, err := routeCmd.StdoutPipe() if err != nil { return } if err = routeCmd.Start(); err != nil { return } output, err := ioutil.ReadAll(stdOut) if err != nil { return } // Linux route out format is always like this: // Kernel IP routing table // Destination Gateway Genmask Flags Metric Ref Use Iface // 0.0.0.0 192.168.1.1 0.0.0.0 UG 0 0 0 eth0 outputLines := bytes.Split(output, []byte("\n")) for _, line := range outputLines { if bytes.Contains(line, []byte("0.0.0.0")) { ipFields := bytes.Fields(line) ip = net.ParseIP(string(ipFields[1])) break } } err = routeCmd.Wait() return }
func discoverGatewayUsingIp() (ip net.IP, err error) { routeCmd := exec.Command("ip", "route", "show") stdOut, err := routeCmd.StdoutPipe() if err != nil { return } if err = routeCmd.Start(); err != nil { return } output, err := ioutil.ReadAll(stdOut) if err != nil { return } // Linux 'ip route show' format looks like this: // default via 192.168.178.1 dev wlp3s0 metric 303 // 192.168.178.0/24 dev wlp3s0 proto kernel scope link src 192.168.178.76 metric 303 outputLines := bytes.Split(output, []byte("\n")) for _, line := range outputLines { if bytes.Contains(line, []byte("default")) { ipFields := bytes.Fields(line) ip = net.ParseIP(string(ipFields[2])) break } } err = routeCmd.Wait() return }
// loadGoodTable can be used to parse string heap that // headers and values are arranged in right order func loadGoodTable(table []byte, dumpZeros bool) (map[string]interface{}, error) { entries := map[string]interface{}{} fields := bytes.Fields(table) var value int64 var err error // iterate over two values each time // first value is header, second is value for i := 0; i < len(fields); i = i + 2 { // counter is zero if bytes.Equal(fields[i+1], zeroByte) { if !dumpZeros { continue } else { entries[string(fields[i])] = int64(0) continue } } // the counter is not zero, so parse it. value, err = strconv.ParseInt(string(fields[i+1]), 10, 64) if err == nil { entries[string(fields[i])] = value } } return entries, nil }
// ValidatePacket validates a carbon message. func ValidatePacket(buf []byte) error { fields := bytes.Fields(buf) if len(fields) != 3 { return errors.New("packet must consist of 3 fields") } version := GetVersionB(fields[0]) err := InitialValidationB(fields[0], version) if err != nil { return err } _, err = strconv.ParseFloat(string(fields[1]), 32) if err != nil { return errors.New("value field is not a float or int") } _, err = strconv.ParseUint(string(fields[2]), 10, 0) if err != nil { return errors.New("timestamp field is not a unix timestamp") } return nil }
func (s *Statements) Scan() { go s.Parser.Parse() scanner := bufio.NewScanner(s.Parser) depth := 0 scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { if i := bytes.IndexAny(data, ";{}"); i >= 0 { if string(data[i]) == "{" { depth++ } else if string(data[i]) == "}" { depth-- } return i + 1, data[0:i], nil } else if atEOF { return len(data), nil, nil } return 0, nil, nil }) for scanner.Scan() { stmt := string(bytes.Join(bytes.Fields(scanner.Bytes()), []byte{' '})) s.stmts <- &stmtMsg{stmt, depth} } }
func (f Format) TheHour(n int, t time.Time) (b []byte) { b = make([]byte, 0, 64) for _, f := range bytes.Fields(f) { switch f[0] { case '#': b = append(b, Numbers[n]...) case 'h': b = append(b, []byte(`THE HOUR`)...) case 'm': m := t.Minute() b = append(b, Numbers[m]...) if len(f) == 2 && f[1] == '_' { b = append(b, Sminute...) if m != 1 { b = append(b, 'S') } } case 's': s := t.Second() b = append(b, Numbers[s]...) if len(f) == 2 && f[1] == '_' { b = append(b, Ssecond...) if s != 1 { b = append(b, 'S') } } default: b = append(b, f...) } b = append(b, ' ') } return b[:len(b)-1] }
func TestCache(t *testing.T) { // Test data was generated from the python code f, err := os.Open("testdata/domains.txt") if err != nil { t.Fatal(err) } scanner := bufio.NewScanner(f) cache := New(200) for scanner.Scan() { fields := bytes.Fields(scanner.Bytes()) key := string(fields[0]) wantHit := fields[1][0] == 'h' var hit bool v := cache.Get(key) if v == nil { cache.Set(key, key) } else { hit = true if v.(string) != key { t.Errorf("cache returned bad data: got %+v , want %+v\n", v, key) } } if hit != wantHit { t.Errorf("cache hit mismatch: got %v, want %v\n", hit, wantHit) } } }
func DiscoverGateway() (ip net.IP, err error) { routeCmd := exec.Command("route", "-n", "get", "0.0.0.0") stdOut, err := routeCmd.StdoutPipe() if err != nil { return } if err = routeCmd.Start(); err != nil { return } output, err := ioutil.ReadAll(stdOut) if err != nil { return } // Darwin route out format is always like this: // route to: default // destination: default // mask: default // gateway: 192.168.1.1 outputLines := bytes.Split(output, []byte("\n")) for _, line := range outputLines { if bytes.Contains(line, []byte("gateway:")) { gatewayFields := bytes.Fields(line) ip = net.ParseIP(string(gatewayFields[1])) break } } err = routeCmd.Wait() return }
func (self *Reader) metaSequence(moltype, id string) (sequence *seq.Seq, err error) { var line, body []byte for { if line, err = self.r.ReadBytes('\n'); err == nil { if len(line) > 0 && line[len(line)-1] == '\r' { line = line[:len(line)-1] } if len(line) == 0 { continue } if len(line) < 2 || !bytes.HasPrefix(line, []byte("##")) { return nil, bio.NewError("Corrupt metasequence", 0, line) } line = bytes.TrimSpace(line[2:]) if string(line) == "end-"+moltype { break } else { line = bytes.Join(bytes.Fields(line), nil) body = append(body, line...) } } else { return nil, err } } sequence = seq.New(id, body, nil) sequence.Moltype = bio.ParseMoltype(moltype) return }
func main() { languages := []byte("golang haskell ruby python") individualLanguages := bytes.Fields(languages) log.Printf("Fields split %q on whitespace into %q", languages, individualLanguages) vowelsAndSpace := "aeiouy " split := bytes.FieldsFunc(languages, func(r rune) bool { return strings.ContainsRune(vowelsAndSpace, r) }) log.Printf("FieldsFunc split %q on vowels and space into %q", languages, split) space := []byte{' '} splitLanguages := bytes.Split(languages, space) log.Printf("Split split %q on a single space into %q", languages, splitLanguages) numberOfSubslices := 2 // Not number of splits singleSplit := bytes.SplitN(languages, space, numberOfSubslices) log.Printf("SplitN split %q on a single space into %d subslices: %q", languages, numberOfSubslices, singleSplit) splitAfterLanguages := bytes.SplitAfter(languages, space) log.Printf("SplitAfter split %q AFTER a single space (keeping the space) into %q", languages, splitAfterLanguages) splitAfterNLanguages := bytes.SplitAfterN(languages, space, numberOfSubslices) log.Printf("SplitAfterN split %q AFTER a single space (keeping the space) into %d subslices: %q", languages, numberOfSubslices, splitAfterNLanguages) languagesBackTogether := bytes.Join(individualLanguages, space) log.Printf("Languages are back togeher again! %q == %q? %v", languagesBackTogether, languages, bytes.Equal(languagesBackTogether, languages)) }
func parseIndexLine(l []byte) *indexInfo { // fmt.Fprintf (os.Stderr, "Processing line:\n%s\n", l) newIndexInfo := indexInfo{} var err os.Error fields := bytes.Fields(l) ptr_cnt, err := strconv.Atoi(string(fields[PTR_CNT])) if err != nil { fmt.Fprintf(os.Stderr, "I had a problem trying to convert '%s' to int\n", fields[PTR_CNT]) os.Exit(1) } newIndexInfo.lemma = fields[LEMMA] // newIndexInfo.pos, err = strconv.Atoui64(string(fields[POS])) if len(fields[POS]) > 1 { fmt.Fprintf(os.Stderr, "POS has to be 1 letter code ('n', 'v', 'a' or 'r') and I have %s\n", fields[POS]) os.Exit(1) } newIndexInfo.pos = fields[POS][0] newIndexInfo.tagsense_cnt, err = strconv.Atoi(string(fields[TAGSENSE_CNT+ptr_cnt])) if err != nil { fmt.Fprintf(os.Stderr, "I had a problem trying to convert %s to int32\n", fields[TAGSENSE_CNT+ptr_cnt]) os.Exit(1) } offsets_strs := fields[(SYNSET_OFFSET + ptr_cnt):] offsets := make([]int64, len(offsets_strs)) for i, offset := range offsets_strs { offsets[i], err = strconv.Atoi64(string(offset)) if err != nil { fmt.Fprintf(os.Stderr, "I had a problem trying to convert the offset %s to int63\n", offset) os.Exit(1) // log.Fatal? } } newIndexInfo.offsets = offsets return &newIndexInfo }
func main() { ex := make(map[string]entropy.Exact) scanner := bufio.NewScanner(os.Stdin) var epoch int for scanner.Scan() { fields := bytes.Fields(scanner.Bytes()) e, _ := strconv.Atoi(string(fields[0])) if e != epoch { for k, v := range ex { fmt.Println(epoch, k, v.Entropy()) } ex = make(map[string]entropy.Exact) epoch = e } m, ok := ex[string(fields[2])] if !ok { m = entropy.NewExact() ex[string(fields[2])] = m } m.Push(fields[1], 1) } for k, v := range ex { fmt.Println(epoch, k, v.Entropy()) } }
func (v *ValueParser) Parse(buf []byte) ([]telegraf.Metric, error) { // separate out any fields in the buffer, ignore anything but the last. values := bytes.Fields(buf) if len(values) < 1 { return []telegraf.Metric{}, nil } valueStr := string(values[len(values)-1]) var value interface{} var err error switch v.DataType { case "", "int", "integer": value, err = strconv.Atoi(valueStr) case "float", "long": value, err = strconv.ParseFloat(valueStr, 64) case "str", "string": value = valueStr case "bool", "boolean": value, err = strconv.ParseBool(valueStr) } if err != nil { return nil, err } fields := map[string]interface{}{"value": value} metric, err := telegraf.NewMetric(v.MetricName, v.DefaultTags, fields, time.Now().UTC()) if err != nil { return nil, err } return []telegraf.Metric{metric}, nil }
// exec `ps` to get all process states func (p *Processes) gatherFromPS(fields map[string]interface{}) error { out, err := p.execPS() if err != nil { return err } for i, status := range bytes.Fields(out) { if i == 0 && string(status) == "STAT" { // This is a header, skip it continue } switch status[0] { case 'W': fields["wait"] = fields["wait"].(int64) + int64(1) case 'U', 'D', 'L': // Also known as uninterruptible sleep or disk sleep fields["blocked"] = fields["blocked"].(int64) + int64(1) case 'Z': fields["zombies"] = fields["zombies"].(int64) + int64(1) case 'T': fields["stopped"] = fields["stopped"].(int64) + int64(1) case 'R': fields["running"] = fields["running"].(int64) + int64(1) case 'S': fields["sleeping"] = fields["sleeping"].(int64) + int64(1) case 'I': fields["idle"] = fields["idle"].(int64) + int64(1) default: log.Printf("processes: Unknown state [ %s ] from ps", string(status[0])) } fields["total"] = fields["total"].(int64) + int64(1) } return nil }
func parseIndexLine(l []byte) (*indexInfo, error) { // fmt.Fprintf (os.Stderr, "Processing line:\n%s\n", l) newIndexInfo := indexInfo{} var err error fields := bytes.Fields(l) newIndexInfo.lemma = fields[LEMMA] // newIndexInfo.pos, err = strconv.Atoui64(string(fields[POS])) if len(fields[POS]) > 1 { return nil, ERR_MSG(SYNTACTIC_CATEGORY_TOO_LONG) } newIndexInfo.pos = fields[POS][0] ptr_cnt, err := strconv.Atoi(string(fields[PTR_CNT])) if err != nil { return nil, err } ptr_symbols := fields[SYMBOL : SYMBOL+ptr_cnt] newIndexInfo.ptr_symbols = ptr_symbols newIndexInfo.tagsense_cnt, err = strconv.Atoi(string(fields[TAGSENSE_CNT+ptr_cnt])) if err != nil { return nil, err } offsets_strs := fields[(SYNSET_OFFSET + ptr_cnt - 1):] offsets := make([]int64, len(offsets_strs)) for i, offset := range offsets_strs { offsets[i], err = strconv.Atoi64(string(offset)) if err != nil { return nil, err } } newIndexInfo.offsets = offsets return &newIndexInfo, nil }
func procUsage(pcpu *float64, rss, vss *int64) error { contents, err := ioutil.ReadFile(procStatFile) if err != nil { return err } fields := bytes.Fields(contents) *rss = (parseInt64(fields[rssPos])) << 12 *vss = parseInt64(fields[vssPos]) startTime := parseInt64(fields[startPos]) utime := parseInt64(fields[utimePos]) stime := parseInt64(fields[stimePos]) totalTime := utime + stime var sysinfo syscall.Sysinfo_t if err := syscall.Sysinfo(&sysinfo); err != nil { return err } seconds := int64(sysinfo.Uptime) - (startTime / ticks) if seconds > 0 { ipcpu := (totalTime * 1000 / ticks) / seconds *pcpu = float64(ipcpu) / 10.0 } return nil }
// takes in a byte array representing an edge list and loads the graph func (g *Graph) LoadEdgeList(edgelist []byte) { log.Println(g.Names) fields := bytes.Fields(edgelist) // create name map from string to index log.Println(g.Names) names := make(map[string]int) for i, n := range g.Names { names[n] = i } // read fields in groups of three: from, to, edgeweight for i := 0; i < len(fields)-2; i += 3 { from := string(fields[i]) to := string(fields[i+1]) weight, err := strconv.ParseFloat(string(fields[i+2]), 64) if err != nil { log.Println(err) continue } fi, ok := names[from] if !ok { log.Println("from not ok:", from) continue } ti, ok := names[to] if !ok { log.Println("to not ok:", to) continue } g.Weights[fi][ti] = weight } }
// See word.WrapAsEqualer func WrapAsEqualer(sb []byte, sorted bool) []ls_core.Equaler { sbf := bytes.Fields(sb) if sorted { sort.Sort(sortBoB(sbf)) // weed out doublettes su, prev := make([][]byte, 0, len(sbf)), []byte{} for _, v := range sbf { if bytes.Equal(v, prev) { continue } su = append(su, v) prev = v } sbf = su } ret := make([]ls_core.Equaler, 0, len(sbf)) for _, v := range sbf { cnv := ls_core.Equaler(Token(v)) ret = append(ret, cnv) } return ret }
func compareT() { fmt.Println(bytes.Compare([]byte("你好"), []byte("你hao"))) sl := []byte("s '! ' !") sli := []byte("'") fmt.Println(string(sl), sli, bytes.Contains(sl, sli), bytes.Count(sl, sli), bytes.Fields(sl), bytes.Fields(sli), bytes.Index(sl, sli), bytes.Join([][]byte{sl, sli}, []byte("_______________-----")), //php implode string(bytes.Replace(sl, []byte("!"), []byte("?"), -1)), bytes.Split(sl, []byte("!")), //php explode bytes.SplitAfter(sl, []byte("!")), // 前两个切片包含! ) }
func DiscoverGateway() (ip net.IP, err error) { routeCmd := exec.Command("route", "print", "0.0.0.0") stdOut, err := routeCmd.StdoutPipe() if err != nil { return } if err = routeCmd.Start(); err != nil { return } output, err := ioutil.ReadAll(stdOut) if err != nil { return } // Windows route output format is always like this: // =========================================================================== // Active Routes: // Network Destination Netmask Gateway Interface Metric // 0.0.0.0 0.0.0.0 192.168.1.1 192.168.1.100 20 // =========================================================================== // I'm trying to pick the active route, // then jump 2 lines and pick the third IP // Not using regex because output is quite standard from Windows XP to 8 (NEEDS TESTING) outputLines := bytes.Split(output, []byte("\n")) for idx, line := range outputLines { if bytes.Contains(line, []byte("Active Routes:")) { ipFields := bytes.Fields(outputLines[idx+2]) ip = net.ParseIP(string(ipFields[2])) break } } err = routeCmd.Wait() return }
func main() { //Read the input file file, err := ioutil.ReadFile("unixdict.txt") if err != nil { fmt.Println(err) return } //Convert every character to lowercase, as the program is case-insensitive. file = bytes.ToLower(file) //Anagrams are stored in a map by there sorted string anagramMap := make(map[string][]string) for _, word := range bytes.Fields(file) { wordAsByteSlice := make(byteSlice, len(word)) copy(wordAsByteSlice, word) sort.Sort(wordAsByteSlice) sortedString := string(wordAsByteSlice) wordString := string(word) anagram := append(anagramMap[sortedString], wordString) anagramMap[sortedString] = anagram } for _, anagram := range anagramMap { if len(anagram) > 1 { fmt.Printf("%s\n", anagram) } } }
// serveConn is the main loop serving one connection to the proxy. func serveConn(conn net.Conn, ctx netcontext.Context) { defer conn.Close() // Use a larger buffer than default to help reading larger memcache // values, which can go up to 10^6 bytes (1Mb). s := &streams{ctx, bufio.NewReaderSize(conn, 65535), &lineWriter{*bufio.NewWriter(conn)}} for { line, err := s.in.ReadBytes('\n') if err != nil { if err != io.EOF { log.Printf("ERROR: Client read error: %v", err) } return } // Note, to avoid unnecessary allocation that might // cause GC pauses we avoid converting to strings // except when necessary. if args := bytes.Fields(bytes.TrimSpace(line)); len(args) == 0 { badCommandf("bogus empty line").writeTo(s) } else { if err := s.demux(string(args[0]), args[1:]...); err != nil { err, ok := err.(memcacheError) if !ok { // We don't actually expect this to happen, but let's be paranoid. err = serverError{fmt.Errorf("internal problem: %v", err)} } err.writeTo(s) } } s.out.Flush() } }
func parseMovie(movie []byte, m *imdb.Movie) bool { // We start backwards and greedily consume the following attributes: // (YYYY) - The year the movie was released. // Everything after (errm, before) this is the title. // (TV) - Made for TV // (V) - Made for video // (VG) - A video game! Skip it. var field []byte fields := bytes.Fields(movie) for i := len(fields) - 1; i >= 0; i-- { field = fields[i] switch { // Try the common case first. case hasEntryYear(field): err := parseEntryYear(field[1:len(field)-1], &m.Year, &m.Sequence) if err != nil { pef("Could not convert '%s' to year: %s", field, err) return false } m.Title = unicode(bytes.Join(fields[0:i], []byte{' '})) return true case bytes.Equal(field, attrVg): return false case bytes.Equal(field, attrTv): m.Tv = true case bytes.Equal(field, attrVid): m.Video = true } } pef("Could not find title in '%s'.", movie) return false }