func parseLine(line []byte) logKv { m := bytes.LastIndex(line, startKey) if m < 1 { return nil } n := bytes.LastIndex(line, stopKey) if n < m { return nil } kv := make(logKv) sub := line[m+1 : n] items := bytes.Split(sub, kvSpace) for _, item := range items { x := bytes.Index(item, eqSign) if x < 1 { continue } key := string(item[:x]) val := string(item[x+1:]) kv[key] = val } return kv }
// handle the socket request func (this *Server) HandleRequest(conn net.Conn, d []byte) { // example: {"usr":"","pwd":"123"}>>Member.Login // defer func(){ // if e := recover();e!= nil{ // Println(e) // debug.PrintStack() // } // }() if len(d) < len(CmdOperateBytes)+len(cmdDot) { conn.Write(invalidBytes) return } i := bytes.LastIndex(d, CmdOperateBytes) di := bytes.LastIndex(d, cmdDot) if i != -1 && di != -1 { rd, err := this.handle(d[i+len(CmdOperateBytes):di], d[di+len(cmdDot):], d[:i]) if err != nil { Println("[Server][ ERROR]: " + err.Error()) conn.Write([]byte(err.Error())) } else { Println(fmt.Sprintf("[Server][Output]:%s", string(rd))) conn.Write(rd) } } else { conn.Write(invalidBytes) } }
func ExampleLastIndex() { fmt.Println(bytes.Index([]byte("go gopher"), []byte("go"))) fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("go"))) fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("rodent"))) // Output: // 0 // 3 // -1 }
// Функция разбора входящих данных и формирования объектов HttpRequest // Возвращаемый результат: сформированный запрос (при наличии), необработанные данные, // успешность выполнения (если false - получен некорректный запрос) func ParseOneReq(data []byte) (req *HttpRequest, unworked_data []byte, success bool) { unworked_data = data success = true endl := []byte(endline_str) // Конец строки req_identifier := []byte(http_version + endline_str) // Признак запроса (версия HTTP + символы конца строки) // Ищем заголовок p_req_id := bytes.Index(unworked_data, req_identifier) p_endl := -1 if p_req_id >= 0 { p_endl = bytes.LastIndex(unworked_data[:p_req_id], endl) } else { p_endl = bytes.LastIndex(unworked_data, endl) } if p_endl >= 0 { // Обнаружен переход на следующую строку перед признаком запроса - косяк unworked_data = unworked_data[p_endl+len(endline_str):] success = false return } else if p_req_id >= 0 { // Найден признак запроса p_req_id += len(req_identifier) params := strings.Split(string(unworked_data[:p_req_id]), " ") unworked_data = unworked_data[p_req_id:] if len(params) != 3 { // Неверный заголовок success = false return } // if len(params) != 3 req = &HttpRequest{Type: params[0], Host: params[1]} } else { // Не нашли ни признака запроса, ни перехода на следующую строку unworked_data = data return } // Формируем параметры и тело запроса var rr *req_resp rr, unworked_data, success = parse_one_req_resp(unworked_data) if rr != nil { req.req_resp = *rr } else { req = nil if success { unworked_data = data } } return } // func ParseOneReq(data []byte) (req *HttpRequest, unworked_data []byte, success bool)
func shortText(t []byte) []byte { if t == nil { return nil } // Cut signature. i := bytes.LastIndex(t, sigDash) j := bytes.LastIndex(t, quote) if i > j && bytes.Count(t[i+1:], nl) <= 10 { t = t[:i+1] } // Cut trailing quoted text. for { rest, last := lastLine(t) trim := bytes.TrimSpace(last) if len(rest) < len(t) && (len(trim) == 0 || trim[0] == '>') { t = rest continue } break } // Cut 'On foo.*wrote:' line. rest, last := lastLine(t) if onwrote.Match(last) { t = rest } // Cut trailing blank lines. for { rest, last := lastLine(t) trim := bytes.TrimSpace(last) if len(rest) < len(t) && len(trim) == 0 { t = rest continue } break } // Cut signature again. i = bytes.LastIndex(t, sigDash) j = bytes.LastIndex(t, quote) if i > j && bytes.Count(t[i+1:], nl) <= 10 { t = t[:i+1] } return t }
// TODO: func (f *File) OffsetLine(ln, start int) (offset int, e error) { if start < 0 || start > len(f.b) { return 0, memfile.OutOfBounds } if ln == 0 { i := bytes.LastIndex(f.b[:start], []byte("\n")) return i + 1, nil } if ln < 0 { i := 0 return bytes.LastIndexFunc(f.b[:start], func(r rune) bool { if r == '\n' { if i == ln { return true } i-- } return false }) + 1, nil } i := 0 va := bytes.IndexFunc(f.b[start:], func(r rune) bool { if r == '\n' { i++ if i == ln { return true } } return false }) if va != -1 { return va + start + 1, nil } return len(f.b), nil }
// need access to the original query contents in order to print it out properly, // unfortunately. func formatPgErr(contents *[]byte, pgerr *pq.Error) string { pos, _ := strconv.Atoi(pgerr.Position) lineNo := bytes.Count((*contents)[:pos], []byte("\n")) + 1 columnNo := pos - bytes.LastIndex((*contents)[:pos], []byte("\n")) - 1 return fmt.Sprint("PGERROR: line ", lineNo, " pos ", columnNo, ": ", pgerr.Message, ". ", pgerr.Detail) }
// FileGetLastLine reads the last line from a file. // In case of a network file, the whole file is read. // In case of a local file, the last 64kb are read, // so if the last line is longer than 64kb it is not returned completely. // The first optional timeout is used for network files only. func FileGetLastLine(filenameOrURL string, timeout ...time.Duration) (line string, err error) { if strings.Index(filenameOrURL, "file://") == 0 { return FileGetLastLine(filenameOrURL[len("file://"):]) } var data []byte if strings.Contains(filenameOrURL, "://") { data, err = FileGetBytes(filenameOrURL, timeout...) if err != nil { return "", err } } else { file, err := os.Open(filenameOrURL) if err != nil { return "", err } defer file.Close() info, err := file.Stat() if err != nil { return "", err } if start := info.Size() - 64*1024; start > 0 { file.Seek(start, os.SEEK_SET) } data, err = ioutil.ReadAll(file) if err != nil { return "", err } } pos := bytes.LastIndex(data, []byte{'\n'}) return string(data[pos+1:]), nil }
// FromBytes takes a byte slice corresponding to a MaxMind DB file and returns // a Reader structure or an error. func FromBytes(buffer []byte) (*Reader, error) { metadataStart := bytes.LastIndex(buffer, metadataStartMarker) if metadataStart == -1 { return nil, fmt.Errorf("error opening database file: invalid MaxMind DB file") } metadataStart += len(metadataStartMarker) metadataDecoder := decoder{buffer[metadataStart:]} var metadata Metadata rvMetdata := reflect.ValueOf(&metadata) _, err := metadataDecoder.decode(0, rvMetdata) if err != nil { return nil, err } searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4 decoder := decoder{ buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)], } reader := &Reader{ buffer: buffer, decoder: decoder, Metadata: metadata, ipv4Start: 0, } reader.ipv4Start, err = reader.startNode() return reader, err }
// New returns a new Article by decoding it from the given io.Reader. func New(r io.Reader) (*Article, error) { data, err := ioutil.ReadAll(r) if err != nil { return nil, err } article := &Article{} var text, props []byte // Find last empty line sep := bytes.LastIndex(data, propertySeparator) if sep >= 0 { text = data[:sep] props = data[sep:] } else { text = data } for _, v := range propertyRe.FindAllSubmatch(props, -1) { key := string(v[1]) value := string(v[2]) if value != "" && value[0] == '"' && value[len(value)-1] == '"' { val, err := strconv.Unquote(value) if err != nil { return nil, fmt.Errorf("error unquoting %q: %s", value, err) } value = val } if err := article.add(key, value, false); err != nil { return nil, fmt.Errorf("error setting key %q with value %q: %s", key, string(value), err) } } text = append(text, propertyRe.ReplaceAll(props, nil)...) article.Text = bytes.TrimSpace(text) return article, nil }
// Valid returns true if token is a valid, unexpired token returned by Generate. func validAtTime(token, key, userID string, now time.Time) bool { // Decode the token. data, err := base64.URLEncoding.DecodeString(token) if err != nil { return false } // Extract the issue time of the token. sep := bytes.LastIndex(data, []byte{':'}) if sep < 0 { return false } nanos, err := strconv.ParseInt(string(data[sep+1:]), 10, 64) if err != nil { return false } issueTime := time.Unix(0, nanos) // Check that the token is not expired. if now.Sub(issueTime) >= timeout { return false } // Check that the token is not from the future. // Allow 1 minute grace period in case the token is being verified on a // machine whose clock is behind the machine that issued the token. if issueTime.After(now.Add(1 * time.Minute)) { return false } // Check that the token matches the expected value. expected := generateAtTime(key, userID, issueTime) return token == expected }
func (l *lexer) lexOptionValueFunc(section, name string) lexStep { return func() (lexStep, error) { var partial bytes.Buffer for { line, err := l.toEOL() if err != nil { return nil, err } // lack of continuation means this value has been exhausted idx := bytes.LastIndex(line, []byte{'\\'}) if idx == -1 { partial.Write(line) break } partial.Write(line[0:idx]) partial.WriteRune(' ') } val := strings.TrimSpace(partial.String()) l.optchan <- &UnitOption{Section: section, Name: name, Value: val} return l.lexNextSectionOrOptionFunc(section), nil } }
func Unmarshall(ns []byte) ([]byte, error) { if ns == nil { return nil, errors.New("ns is nil") } colonPos := bytes.Index(ns, []byte(":")) if colonPos == -1 { return nil, ErrNoColon } nsLenBytes := ns[:colonPos] nsLen, err := strconv.Atoi(string(nsLenBytes)) if err != nil { return nil, ErrNsLenBytesConv } commaPos := bytes.LastIndex(ns, []byte(",")) if commaPos == -1 { return nil, ErrNoComma } if commaPos-1 == colonPos { return nil, errors.New("org is nil") } if nsLen == commaPos-colonPos-1 { return ns[colonPos+1 : commaPos], nil } else { return nil, ErrNsLenNotEqaulOrgLen } }
//Main parse loop func (blp *Bls) parseBinlogEvents(sendReply proto.SendBinlogResponse, binlogReader io.Reader) { // read over the stream and buffer up the transactions var err error var line []byte bigLine := make([]byte, 0, BINLOG_BLOCK_SIZE) lineReader := bufio.NewReaderSize(binlogReader, BINLOG_BLOCK_SIZE) readAhead := false var event *blsEventBuffer var delimIndex int for { line = line[:0] bigLine = bigLine[:0] line, err = blp.readBlsLine(lineReader, bigLine) if err != nil { if err == io.EOF { //end of stream blp.globalState.blsStats.parseStats.Add("EOFErrors."+blp.keyrangeTag, 1) panic(newBinlogServerError(fmt.Sprintf("EOF"))) } panic(newBinlogServerError(fmt.Sprintf("ReadLine err: , %v", err))) } if len(line) == 0 { continue } if line[0] == '#' { //parse positional data line = bytes.TrimSpace(line) blp.currentLine = string(line) blp.parsePositionData(line) } else { //parse event data if readAhead { event.LogLine = append(event.LogLine, line...) } else { event = newBlsEventBuffer(blp.currentPosition, line) } delimIndex = bytes.LastIndex(event.LogLine, BINLOG_DELIMITER) if delimIndex != -1 { event.LogLine = event.LogLine[:delimIndex] readAhead = false } else { readAhead = true continue } event.LogLine = bytes.TrimSpace(event.LogLine) event.firstKw = string(bytes.ToLower(bytes.SplitN(event.LogLine, SPACE, 2)[0])) blp.currentLine = string(event.LogLine) //processes statements only for the dbname that it is subscribed to. blp.parseDbChange(event) blp.parseEventData(sendReply, event) } } }
func (this *simpleSigner) bytesSplit(value []byte) ([]byte, []byte) { idx := bytes.LastIndex(value, this.options.Separator) if idx == -1 { return value, nil } return value[:idx], value[idx+len(this.options.Separator):] }
// Decode parses a serialized assertion. // // The expected serialisation format looks like: // // HEADER ("\n\n" BODY?)? "\n\n" SIGNATURE // // where: // // HEADER is a set of header lines separated by "\n" // BODY can be arbitrary, // SIGNATURE is the signature // // A header line looks like: // // NAME ": " VALUE // // The following headers are mandatory: // // type // authority-id (the signer id) // // The following headers expect integer values and if omitted // otherwise are assumed to be 0: // // revision (a positive int) // body-length (expected to be equal to the length of BODY) // func Decode(serializedAssertion []byte) (Assertion, error) { // copy to get an independent backstorage that can't be mutated later assertionSnapshot := make([]byte, len(serializedAssertion)) copy(assertionSnapshot, serializedAssertion) contentSignatureSplit := bytes.LastIndex(assertionSnapshot, nlnl) if contentSignatureSplit == -1 { return nil, fmt.Errorf("assertion content/signature separator not found") } content := assertionSnapshot[:contentSignatureSplit] signature := assertionSnapshot[contentSignatureSplit+2:] headersBodySplit := bytes.Index(content, nlnl) var body, head []byte if headersBodySplit == -1 { head = content } else { body = content[headersBodySplit+2:] if len(body) == 0 { body = nil } head = content[:headersBodySplit] } headers, err := parseHeaders(head) if err != nil { return nil, fmt.Errorf("parsing assertion headers: %v", err) } if len(signature) == 0 { return nil, fmt.Errorf("empty assertion signature") } return buildAssertion(headers, body, content, signature) }
func grep(re *Regexp, b []byte) []int { var m []int lineno := 1 for { i := re.Match(b, true, true) if i < 0 { break } start := bytes.LastIndex(b[:i], nl) + 1 end := i + 1 if end > len(b) { end = len(b) } lineno += bytes.Count(b[:start], nl) m = append(m, lineno) if start < end && b[end-1] == '\n' { lineno++ } b = b[end:] if len(b) == 0 { break } } return m }
func snip(in []byte) []byte { raw := in // snip header if i := bytes.Index(raw, snips[0]); i > -1 { if j := bytes.IndexByte(raw[i:], '\n'); j > -1 { raw = raw[i+j+1:] } } // snip footer if i := bytes.LastIndex(raw, snips[1]); i > -1 { if j := bytes.LastIndex(raw[:i], []byte{'\n'}); j > -1 { raw = raw[:j] } } return raw }
func (evs *EventStreamer) buildDMLEvent(sql []byte, insertid int64) (dmlEvent *proto.StreamEvent, newinsertid int64, err error) { commentIndex := bytes.LastIndex(sql, STREAM_COMMENT_START) if commentIndex == -1 { return &proto.StreamEvent{Category: "ERR", Sql: string(sql)}, insertid, nil } streamComment := string(sql[commentIndex+len(STREAM_COMMENT_START):]) eventNode, err := parseStreamComment(streamComment) if err != nil { return nil, insertid, err } dmlEvent = new(proto.StreamEvent) dmlEvent.Category = "DML" dmlEvent.TableName = eventNode.Table dmlEvent.PKColNames = eventNode.Columns dmlEvent.PKValues = make([][]interface{}, 0, len(eventNode.Tuples)) for _, tuple := range eventNode.Tuples { if len(tuple) != len(eventNode.Columns) { return nil, insertid, fmt.Errorf("length mismatch in values") } var rowPk []interface{} rowPk, insertid, err = encodePKValues(tuple, insertid) if err != nil { return nil, insertid, err } dmlEvent.PKValues = append(dmlEvent.PKValues, rowPk) } return dmlEvent, insertid, nil }
// Parse a *json.SyntaxError into a pretty error message func (c *Config) parseSyntaxError(js []byte, err error) error { json_err, ok := err.(*json.SyntaxError) if !ok { return err } start := bytes.LastIndex(js[:json_err.Offset], []byte("\n")) + 1 end := bytes.Index(js[start:], []byte("\n")) if end >= 0 { end += start } else { end = len(js) } line, pos := bytes.Count(js[:start], []byte("\n")), int(json_err.Offset)-start-1 var posStr string if pos > 0 { posStr = strings.Repeat(" ", pos) } else { posStr = "" } return fmt.Errorf("%s on line %d\n%s\n%s^", err, line, js[start:end], posStr) }
func loadConfig() *Config { var config *Config file, err := ioutil.ReadFile("mipples.json") if err != nil { panic(err) } if err := json.Unmarshal(file, &config); err != nil { syntaxErr, ok := err.(*json.SyntaxError) if !ok { log.Fatalf("Cannot read config: %s", err) } // We have a syntax error. Extract out the line number and friends. // https://groups.google.com/forum/#!topic/golang-nuts/fizimmXtVfc newline := []byte{'\x0a'} // Calculate the start/end position of the line where the error is start := bytes.LastIndex(file[:syntaxErr.Offset], newline) + 1 end := len(file) if idx := bytes.Index(file[start:], newline); idx >= 0 { end = start + idx } // Count the line number we're on plus the offset in the line line := bytes.Count(file[:start], newline) + 1 pos := int(syntaxErr.Offset) - start - 1 log.Fatalf("Cannot read config. Error in line %d, char %d: %s\n%s", line, pos, syntaxErr, file[start:end]) } return config }
func (w *FormattingWriter) Write(data []byte) (int, error) { if w.isClosed { return 0, fmt.Errorf("Attempt to write to a closed FormattingWriter") } lastEOL := bytes.LastIndex(data, []byte("\n")) if lastEOL == -1 { return w.remainder.Write(data) } defer func() { w.remainder.Reset() w.remainder.Write(data[lastEOL+1:]) }() _, err := w.remainder.Write(data[0 : lastEOL+1]) if err != nil { return 0, err } scanner := bufio.NewScanner(w.remainder) for scanner.Scan() { _, err := fmt.Fprintln(w.Writer, w.color(scanner.Text())) if err != nil { return len(data), err } } return len(data), scanner.Err() }
func (r *Repository) execAndParseCols(subcmd string) ([][2]string, error) { cmd := exec.Command("hg", "-v", "--debug", subcmd) cmd.Dir = r.Dir out, err := cmd.CombinedOutput() if err != nil { return nil, fmt.Errorf("exec `hg -v --debug %s` failed: %s. Output was:\n\n%s", subcmd, err, out) } out = bytes.TrimSuffix(out, []byte("\n")) // remove trailing newline lines := bytes.Split(out, []byte("\n")) sort.Sort(byteSlices(lines)) // sort for consistency refs := make([][2]string, len(lines)) for i, line := range lines { line = bytes.TrimSuffix(line, []byte(" (inactive)")) // format: "NAME SEQUENCE:ID" (arbitrary amount of whitespace between NAME and SEQUENCE) if len(line) <= 41 { return nil, fmt.Errorf("unexpectedly short (<=41 bytes) line in `hg -v --debug %s` output", subcmd) } id := line[len(line)-40:] // find where the SEQUENCE begins seqIdx := bytes.LastIndex(line, []byte(" ")) if seqIdx == -1 { return nil, fmt.Errorf("unexpectedly no whitespace in line in `hg -v --debug %s` output", subcmd) } name := bytes.TrimRight(line[:seqIdx], " ") refs[i] = [2]string{string(id), string(name)} } return refs, nil }
// key顺序扫描,常用于数据导出、附近搜索 // 返回的key是面向用户的key,而非内部结构的raw_key func (l *LevelRedis) KeyEnumerate(seek []byte, direction IterDirection, fn func(i int, key, keytype, value []byte, quit *bool)) { var iter *gorocks.Iterator if l.snap != nil { iter = l.db.NewIterator(l.ro) } else { ro := gorocks.NewReadOptions() ro.SetFillCache(false) defer ro.Close() iter = l.db.NewIterator(ro) } defer iter.Close() minkey := joinStringBytes(KEY_PREFIX, SEP_LEFT, string(seek)) maxkey := []byte{MAXBYTE} prefix := joinStringBytes(KEY_PREFIX, SEP_LEFT) l.Enumerate(iter, minkey, maxkey, direction, func(i int, key, value []byte, quit *bool) { if !bytes.HasPrefix(key, prefix) { *quit = true return } left := bytes.Index(key, []byte(SEP_LEFT)) right := bytes.LastIndex(key, []byte(SEP_RIGHT)) if left == -1 || right == -1 { return // just skip } fn(i, key[left+1:right], key[right+1:], value, quit) }) }
// synopsis extracts the first sentence from s. All runs of whitespace are // replaced by a single space. func synopsis(s string) string { parts := strings.SplitN(s, "\n\n", 2) s = parts[0] var buf []byte const ( other = iota period space ) last := space Loop: for i := 0; i < len(s); i++ { b := s[i] switch b { case ' ', '\t', '\r', '\n': switch last { case period: break Loop case other: buf = append(buf, ' ') last = space } case '.': last = period buf = append(buf, b) default: last = other buf = append(buf, b) } } // Ensure that synopsis fits an App Engine datastore text property. const m = 400 if len(buf) > m { buf = buf[:m] if i := bytes.LastIndex(buf, []byte{' '}); i >= 0 { buf = buf[:i] } buf = append(buf, " ..."...) } s = string(buf) r, n := utf8.DecodeRuneInString(s) if n < 0 || unicode.IsPunct(r) || unicode.IsSymbol(r) { // ignore Markdown headings, editor settings, Go build constraints, and * in poorly formatted block comments. s = "" } else { for _, prefix := range badSynopsisPrefixes { if strings.HasPrefix(s, prefix) { s = "" break } } } return s }
// isMultipart returns true if message contains from multiple tcp packets func (t *TCPMessage) checkIfComplete() { if t.seqMissing || t.headerPacket == -1 { return } if t.methodType == httpMethodNotFound { return } // Responses can be emitted only if we found request if !t.IsIncoming && t.AssocMessage == nil { return } // If one GET, OPTIONS, or HEAD request if t.methodType == httpMethodWithoutBody { t.complete = true } else { switch t.bodyType { case httpBodyEmpty: t.complete = true case httpBodyContentLength: if t.contentLength == 0 || t.contentLength == t.BodySize() { t.complete = true } case httpBodyChunked: lastPacket := t.packets[len(t.packets)-1] if bytes.LastIndex(lastPacket.Data, bChunkEnd) != -1 { t.complete = true } } } }
func GetSnippets(data []byte, re *regexp.Regexp, nSnippets int) []*Snippet { var ( s = []*Snippet{} chunkStart = 0 lineNo = 1 ) for chunkStart < len(data) { if len(s) >= nSnippets { break } mIdx := re.Match(data[chunkStart:], true, true) + chunkStart if mIdx < chunkStart { break } lineStart := bytes.LastIndex(data[chunkStart:mIdx], []byte("\n")) + 1 + chunkStart lineEnd := mIdx if lineEnd > len(data) { lineEnd = len(data) } lineNo += countNL(data[chunkStart:lineStart]) line := string(data[lineStart:lineEnd]) snip := &Snippet{ Content: line, LineNumber: int32(lineNo), } s = append(s, snip) chunkStart = lineEnd } return s }
func NewVerificationRequest(sjson string, fetcher blob.Fetcher) (vr *VerifyRequest) { if fetcher == nil { panic("NewVerificationRequest fetcher is nil") } vr = new(VerifyRequest) vr.ba = []byte(sjson) vr.fetcher = fetcher sigIndex := bytes.LastIndex(vr.ba, []byte(sigSeparator)) if sigIndex == -1 { vr.Err = errors.New("jsonsign: no 13-byte camliSig separator found in sjson") return } // "Bytes Payload" vr.bp = vr.ba[:sigIndex] // "Bytes Payload JSON". Note we re-use the memory (the ",") // from BA in BPJ, so we can't re-use that "," byte for // the opening "{" in "BS". vr.bpj = vr.ba[:sigIndex+1] vr.bpj[sigIndex] = '}' vr.bs = []byte("{" + sjson[sigIndex+1:]) return }
func certificateBytes(req *http.Request) []byte { if req.TLS != nil && len(req.TLS.PeerCertificates) > 0 { return req.TLS.PeerCertificates[0].Raw } if hdr := req.Header.Get("X-SSL-Cert"); hdr != "" { bs := []byte(hdr) // The certificate is in PEM format but with spaces for newlines. We // need to reinstate the newlines for the PEM decoder. But we need to // leave the spaces in the BEGIN and END lines - the first and last // space - alone. firstSpace := bytes.Index(bs, []byte(" ")) lastSpace := bytes.LastIndex(bs, []byte(" ")) for i := firstSpace + 1; i < lastSpace; i++ { if bs[i] == ' ' { bs[i] = '\n' } } block, _ := pem.Decode(bs) if block == nil { // Decoding failed return nil } return block.Bytes } return nil }
func splitRight(b []byte, sep []byte) [][]byte { ind := bytes.LastIndex(b, sep) if ind <= -1 { return [][]byte{b} } return [][]byte{b[:ind], b[ind+1:]} }