func isVagrant() (bool, error) { p, err := execCmd("lsmod") if err != nil { return false, errors.New("IsVagrant error: " + err.Error()) } for { // a single line of lsmod output is in format: // // module 249035 2 modules // // if at least one VirtualBox module is loaded into kernel, // it means it's a vagrant box. i := bytes.IndexByte(p, ' ') if i == -1 { break } module := string(p[:i]) for _, vboxModule := range vboxModules { if module == vboxModule { return true, nil } } if i = bytes.IndexByte(p, '\n'); i == -1 { break } p = p[i+1:] } return false, nil }
// ReadMIMEHeader reads a MIME-style header from r. // The header is a sequence of possibly continued Key: Value lines // ending in a blank line. // The returned map m maps CanonicalMIMEHeaderKey(key) to a // sequence of values in the same order encountered in the input. // // For example, consider this input: // // My-Key: Value 1 // Long-Key: Even // Longer Value // My-Key: Value 2 // // Given that input, ReadMIMEHeader returns the map: // // map[string][]string{ // "My-Key": {"Value 1", "Value 2"}, // "Long-Key": {"Even Longer Value"}, // } // func (r *Reader) ReadMIMEHeader() (MIMEHeader, os.Error) { m := make(MIMEHeader) for { kv, err := r.readContinuedLineSlice() if len(kv) == 0 { return m, err } // Key ends at first colon; must not have spaces. i := bytes.IndexByte(kv, ':') if i < 0 || bytes.IndexByte(kv[0:i], ' ') >= 0 { return m, ProtocolError("malformed MIME header line: " + string(kv)) } key := CanonicalMIMEHeaderKey(string(kv[0:i])) // Skip initial spaces in value. i++ // skip colon for i < len(kv) && (kv[i] == ' ' || kv[i] == '\t') { i++ } value := string(kv[i:]) m[key] = append(m[key], value) if err != nil { return m, err } } panic("unreachable") }
// ReadSlice reads until the first occurrence of delim in the input, // returning a slice pointing at the bytes in the buffer. // The bytes stop being valid at the next read call. // If ReadSlice encounters an error before finding a delimiter, // it returns all the data in the buffer and the error itself (often io.EOF). // ReadSlice fails with error ErrBufferFull if the buffer fills without a delim. // Because the data returned from ReadSlice will be overwritten // by the next I/O operation, most clients should use // ReadBytes or ReadString instead. // ReadSlice returns err != nil if and only if line does not end in delim. func (b *Reader) ReadSlice(delim byte) (line []byte, err error) { // Look in buffer. if i := bytes.IndexByte(b.buf[b.r:b.w], delim); i >= 0 { line1 := b.buf[b.r : b.r+i+1] b.r += i + 1 return line1, nil } // Read more into buffer, until buffer fills or we find delim. for { if b.err != nil { line := b.buf[b.r:b.w] b.r = b.w return line, b.readErr() } n := b.Buffered() b.fill() // Search new part of buffer if i := bytes.IndexByte(b.buf[n:b.w], delim); i >= 0 { line := b.buf[0 : n+i+1] b.r = n + i + 1 return line, nil } // Buffer is full? if b.Buffered() >= len(b.buf) { b.r = b.w return b.buf, ErrBufferFull } } panic("not reached") }
func parse(remains, b []byte) []byte { if len(remains) > 0 { n := bytes.IndexByte(b, '\n') if n == -1 { return concat(remains, b) } line(concat(remains, b[:n])) b = b[n+1:] } for { n := bytes.IndexByte(b, '\n') if n == -1 { return b } // line(b[:n]) line(b) n++ if len(b) == n { return nil } b = b[n+1:] } }
func readRawHeaders(dst, buf []byte) ([]byte, int, error) { n := bytes.IndexByte(buf, '\n') if n < 0 { return nil, 0, errNeedMore } if (n == 1 && buf[0] == '\r') || n == 0 { // empty headers return dst, n + 1, nil } n++ b := buf m := n for { b = b[m:] m = bytes.IndexByte(b, '\n') if m < 0 { return nil, 0, errNeedMore } m++ n += m if (m == 2 && b[0] == '\r') || m == 1 { dst = append(dst, buf[:n]...) return dst, n, nil } } }
// parseJavaSamples parses the samples from a java profile and // populates the Samples in a profile. Returns the remainder of the // buffer after the samples. func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { nextNewLine := bytes.IndexByte(b, byte('\n')) locs := make(map[uint64]*Location) for nextNewLine != -1 { line := string(bytes.TrimSpace(b[0:nextNewLine])) if line != "" { sample := javaSampleRx.FindStringSubmatch(line) if sample == nil { // Not a valid sample, exit. return b, locs, nil } // Java profiles have data/fields inverted compared to other // profile types. value1, value2, addrs := sample[2], sample[1], sample[3] var sloc []*Location for _, addr := range parseHexAddresses(addrs) { loc := locs[addr] if locs[addr] == nil { loc = &Location{ Address: addr, } p.Location = append(p.Location, loc) locs[addr] = loc } sloc = append(sloc, loc) } s := &Sample{ Value: make([]int64, 2), Location: sloc, } var err error if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) } if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) } switch pType { case "heap": const javaHeapzSamplingRate = 524288 // 512K s.NumLabel = map[string][]int64{"bytes": []int64{s.Value[1] / s.Value[0]}} s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) case "contention": if period := p.Period; period != 0 { s.Value[0] = s.Value[0] * p.Period s.Value[1] = s.Value[1] * p.Period } } p.Sample = append(p.Sample, s) } // Grab next line. b = b[nextNewLine+1:] nextNewLine = bytes.IndexByte(b, byte('\n')) } return b, locs, nil }
// Write is used to implement io.Writer func (s *SyslogWrapper) Write(p []byte) (int, error) { // Skip syslog if the log level doesn't apply if !s.filt.Check(p) { return 0, nil } // Extract log level var level string afterLevel := p x := bytes.IndexByte(p, '[') if x >= 0 { y := bytes.IndexByte(p[x:], ']') if y >= 0 { level = string(p[x+1 : x+y]) afterLevel = p[x+y+2:] } } // Each log level will be handled by a specific syslog priority priority, ok := levelPriority[level] if !ok { priority = gsyslog.LOG_NOTICE } // Attempt the write err := s.l.WriteLevel(priority, afterLevel) return len(p), err }
func (i *ImageIndex) FetchPage(data []byte, deep int) { // openTag: <a openTag := []byte{0x3c, 0x61} openPos := 0 closePos := 0 // hrefTag: href hrefTag := []byte{0x68, 0x72, 0x65, 0x66} hrefPos := 0 // quote: " (0x22) quoteOpenPos := 0 quoteClosePos := 0 found := bytes.Index(data[openPos:], openTag) var tmpSlice []byte var url string for found = bytes.Index(data[openPos:], openTag); found != -1; found = bytes.Index(data[openPos:], openTag) { openPos = openPos + found + 3 closePos = bytes.IndexByte(data[openPos:], 0x3e) tmpSlice = data[openPos : openPos+closePos] hrefPos = bytes.Index(tmpSlice, hrefTag) if hrefPos != -1 { quoteOpenPos = bytes.IndexByte(tmpSlice[hrefPos+5:], 0x22) if quoteOpenPos != -1 { quoteClosePos = bytes.IndexByte(tmpSlice[hrefPos+5+quoteOpenPos+1:], 0x22) if quoteClosePos != -1 { url, _ = FullURL(i.rootURL, string(tmpSlice[hrefPos+5+quoteOpenPos+1:hrefPos+5+quoteOpenPos+quoteClosePos+1])) i.pageList.PushBack(pageInfo{url, deep}) } } } } }
func mySplitter(data []byte, atEOF bool) (advance int, token []byte, err error) { if atEOF && len(data) == 0 { return 0, nil, nil } j := bytes.IndexByte(data[0:], '\n') k := bytes.IndexByte(data[0:], '\r') // -1, 3; 2, -1; 2, 3 if j >= 0 && k >= 0 { value := min(j, k) return value + 1, data[0:value], nil } else { value := max(j, k) if value >= 0 { return value + 1, data[0:value], nil } } if atEOF { return len(data), data, nil } // Request more data. return 0, nil, nil }
// Parse initializes URI from the given host and uri. // // It is safe modifying host and uri buffers after the Parse call. func (x *URI) Parse(host, uri []byte) { x.Reset() scheme, host, uri := splitHostUri(host, uri) x.Scheme = append(x.Scheme, scheme...) lowercaseBytes(x.Scheme) x.Host = append(x.Host, host...) lowercaseBytes(x.Host) b := uri n := bytes.IndexByte(b, '?') if n < 0 { x.PathOriginal = append(x.PathOriginal, b...) x.Path = normalizePath(x.Path, b) return } x.PathOriginal = append(x.PathOriginal, b[:n]...) x.Path = normalizePath(x.Path, x.PathOriginal) b = b[n+1:] n = bytes.IndexByte(b, '#') if n >= 0 { x.Hash = append(x.Hash, b[n+1:]...) b = b[:n] } x.QueryString = append(x.QueryString, b...) }
func parseRequestLine(bs []byte, isCRLF bool) (lineLen int, method, requestUri, proto, protoVer string, err error) { newline := bytes.IndexByte(bs, '\n') if newline < 0 { err = errors.New("missing newline") return } var line []byte if isCRLF { line = bs[0 : newline-1] } else { line = bs[0:newline] } s1 := bytes.IndexByte(line, ' ') s2 := bytes.IndexByte(line[s1+1:], ' ') if s1 < 0 || s2 < 0 { err = errors.New("deformed parts") return } s2 += s1 + 1 p := line[s2+1:] ps := bytes.IndexByte(p, '/') if ps < 0 { err = errors.New("deformed proto") return } return newline, string(line[:s1]), string(line[s1+1 : s2]), string(p[:ps]), string(p[ps+1:]), nil }
func lmemfind(s1 []byte, s2 []byte) int { //fmt.Printf("Begin lmemfind('%s', '%s')\n", s1, s2) l1, l2 := len(s1), len(s2) if l2 == 0 { return 0 } else if l2 > l1 { return -1 } else { init := bytes.IndexByte(s1, s2[0]) end := init + l2 for end <= l1 && init != -1 { //fmt.Printf("l1: %d, l2: %d, init: %d, end: %d, slice: %s\n", l1, l2, init, end, s1[init:end]) init++ // 1st char is already checked by IndexBytes if bytes.Equal(s1[init-1:end], s2) { return init - 1 } else { // find the next 'init' and try again next := bytes.IndexByte(s1[init:], s2[0]) if next == -1 { return -1 } else { init = init + next end = init + l2 } } } } return -1 }
func (c *ClientConn) readHandshakeResponse() error { data, err := c.readPacket() if err != nil { return err } pos := 0 //capability c.capability = binary.LittleEndian.Uint32(data[:4]) pos += 4 //skip max packet size pos += 4 //charset, skip, if you want to use another charset, use set names //c.collation = CollationId(data[pos]) pos++ //skip reserved 23[00] pos += 23 //user name c.user = string(data[pos : pos+bytes.IndexByte(data[pos:], 0)]) pos += len(c.user) + 1 //auth length and auth authLen := int(data[pos]) pos++ auth := data[pos : pos+authLen] checkAuth := mysql.CalcPassword(c.salt, []byte(c.proxy.cfg.Password)) if c.user != c.proxy.cfg.User || !bytes.Equal(auth, checkAuth) { golog.Error("ClientConn", "readHandshakeResponse", "error", 0, "auth", auth, "checkAuth", checkAuth, "client_user", c.user, "config_set_user", c.proxy.cfg.User, "passworld", c.proxy.cfg.Password) return mysql.NewDefaultError(mysql.ER_ACCESS_DENIED_ERROR, c.user, c.c.RemoteAddr().String(), "Yes") } pos += authLen var db string if c.capability&mysql.CLIENT_CONNECT_WITH_DB > 0 { if len(data[pos:]) == 0 { return nil } db = string(data[pos : pos+bytes.IndexByte(data[pos:], 0)]) pos += len(c.db) + 1 } c.db = db return nil }
func (w *Window) statusTag(status xmpp.Status, statusMsg string) { data, err := w.ReadAll("tag") if err != nil { log.Printf("read tag: %v", err) return } //log.Printf("tag1: %s\n", data) i := bytes.IndexByte(data, '|') if i >= 0 { data = data[i+1:] } else { data = nil } //log.Printf("tag2: %s\n", data) j := bytes.IndexByte(data, '|') if j >= 0 { data = data[j+1:] } //log.Printf("tag3: %s\n", data) msg := "" if statusMsg != "" { msg = " " + statusMsg } w.Ctl("cleartag\n") w.Write("tag", []byte(" "+short(status)+msg+" |"+string(data))) }
func bspReadEntities(b []byte) (ents []Entity, err error) { ents = make([]Entity, 0, 64) ent := make(Entity) inBlock := 0 for i := 0; i < len(b); { c := b[i] i++ if c == '{' { inBlock++ } else if c == '}' { if inBlock == 1 { ents = append(ents, ent) ent = make(Entity) } inBlock-- } else if c == '"' && inBlock == 1 { keyIndex := bytes.IndexByte(b[i:], '"') if keyIndex < 0 { err = fmt.Errorf("key not closed with doublequote") break } key := stringFrom(b[i : i+keyIndex]) i += keyIndex + 1 for i < len(b) { c = b[i] i++ if c == ' ' || c == '\t' { continue } else if c == '"' { valueIndex := bytes.IndexByte(b[i:], '"') if valueIndex < 0 { err = fmt.Errorf("key not closed with doublequote") break } if valueIndex == 0 { ent[key] = "" } else { ent[key] = stringFrom(b[i : i+valueIndex]) } i += valueIndex + 1 break } else { err = fmt.Errorf("bsp: unexpected char %q at pos %d", c, i) } } } else if c != ' ' && c != '\t' && c != '\r' && c != '\n' && c != 0 { err = fmt.Errorf("bsp: unexpected char %q at pos %d", c, i) return } } return }
func substPatternBytes(pat, repl, str []byte) (pre, subst, post []byte) { i := bytes.IndexByte(pat, '%') if i < 0 { if bytes.Equal(str, pat) { return repl, nil, nil } return str, nil, nil } in := str trimed := str if i > 0 { trimed = bytes.TrimPrefix(in, pat[:i]) if bytes.Equal(trimed, in) { return str, nil, nil } } in = trimed if i < len(pat)-1 { trimed = bytes.TrimSuffix(in, pat[i+1:]) if bytes.Equal(trimed, in) { return str, nil, nil } } i = bytes.IndexByte(repl, '%') if i < 0 { return repl, nil, nil } return repl[:i], trimed, repl[i+1:] }
func (x *URI) parse(host, uri []byte, h *RequestHeader) { x.Reset() x.h = h scheme, host, uri := splitHostURI(host, uri) x.scheme = append(x.scheme, scheme...) lowercaseBytes(x.scheme) x.host = append(x.host, host...) lowercaseBytes(x.host) b := uri n := bytes.IndexByte(b, '?') if n < 0 { x.pathOriginal = append(x.pathOriginal, b...) x.path = normalizePath(x.path, b) return } x.pathOriginal = append(x.pathOriginal, b[:n]...) x.path = normalizePath(x.path, x.pathOriginal) b = b[n+1:] n = bytes.IndexByte(b, '#') if n >= 0 { x.hash = append(x.hash, b[n+1:]...) b = b[:n] } x.queryString = append(x.queryString, b...) }
// Helper to get a signature from the commit line, which looks like these: // author Patrick Gundlach <*****@*****.**> 1378823654 +0200 // author Patrick Gundlach <*****@*****.**> Thu, 07 Apr 2005 22:13:13 +0200 // but without the "author " at the beginning (this method should) // be used for author and committer. // // FIXME: include timezone for timestamp! func newSignatureFromCommitline(line []byte) (_ *Signature, err error) { sig := new(Signature) emailStart := bytes.IndexByte(line, '<') sig.Name = string(line[:emailStart-1]) emailEnd := bytes.IndexByte(line, '>') sig.Email = string(line[emailStart+1 : emailEnd]) // Check date format. firstChar := line[emailEnd+2] if firstChar >= 48 && firstChar <= 57 { timestop := bytes.IndexByte(line[emailEnd+2:], ' ') timestring := string(line[emailEnd+2 : emailEnd+2+timestop]) seconds, err := strconv.ParseInt(timestring, 10, 64) if err != nil { return nil, err } sig.When = time.Unix(seconds, 0) } else { sig.When, err = time.Parse("Mon Jan _2 15:04:05 2006 -0700", string(line[emailEnd+2:])) if err != nil { return nil, err } } return sig, nil }
func main() { v := map[string]*url.URL{} scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { b := scanner.Bytes() if bytes.IndexByte(b, '#') == 0 { continue } p := bytes.IndexByte(b, '\t') if p <= 0 || len(b) <= p { continue } u, err := url.Parse(string(b[p+1:])) if err != nil { log.Println(err) continue } v[string(b[1:p])] = u } if scanner.Err() != nil { log.Fatal(scanner.Err()) } err := tpl.Execute(os.Stdout, v) if err != nil { log.Fatal(err) } }
func (r *Rarity) UnmarshalJSON(p []byte) error { max := len(p) if max > 32 { max = 32 } switch p[0] { case '[': d := p[1:] end := bytes.IndexByte(d, ']') if end < 3 || d[0] != '"' || d[end-1] != '"' { break } // TODO: missing whitespace cleanup. Ok in intended dataset *r = strings.Split(string(d[1:end-1]), `","`) return nil case '"': d := p[1:] end := bytes.IndexByte(d, '"') if end < 0 { break } *r = []string{string(d[:end])} return nil } return fmt.Errorf("invalid rarity entry: %q...", p[:max]) }
// ![>|<|=]{$styleOpt}($classOpt)$imgSrc($altOptional)!:$urlOptional // TODO: should return nil for alt instead of empty slice if not found? func parseImg(l []byte) (rest, url, imgSrc, alt []byte, attrs *AttributesOpt) { //fmt.Printf("l: '%s'\n", string(l)) if len(l) < 3 { return nil, nil, nil, nil, nil } if l[0] != '!' { return nil, nil, nil, nil, nil } l = l[1:] l, attrs = parseAttributesOpt(l, true) endIdx := bytes.IndexByte(l, '!') if endIdx == -1 { return nil, nil, nil, nil, nil } imgSrc = l[:endIdx] l = l[endIdx+1:] endIdx = bytes.IndexByte(imgSrc, '(') if endIdx != -1 { alt = imgSrc[endIdx+1:] imgSrc = imgSrc[:endIdx] endIdx = bytes.IndexByte(alt, ')') if endIdx == -1 { return nil, nil, nil, nil, nil } alt = alt[:endIdx] } if len(l) > 0 && l[0] == ':' { l, url = extractUrlOrRefName(l[1:]) } return l, url, imgSrc, alt, attrs }
func main() { var n = 5 err := g5t.Setup("messages", "./translation", "de", g5t.GettextParser) if err != nil { fmt.Println(err) os.Exit(1) } s := G("father of %d children") s2 := GN("father of one kid", "father of %d children", n) fmt.Printf(s2, n) fmt.Println(" ") c1 := "Vater von %" c2 := " Kindern" if !(len(s) > len(c1)+len(c2) && s[0:len(c1)] == c1 && s[len(s)-len(c2):] == c2) { fmt.Println(s) fmt.Fprintf(os.Stderr, "String not translated.\n") os.Exit(1) } if bytes.IndexByte([]byte(s), '<') != -1 || bytes.IndexByte([]byte(s), '>') != -1 { fmt.Fprintf(os.Stderr, "Translation contains <...> markers.\n") os.Exit(1) } buf := []byte(fmt.Sprintf(s, n)) strc := []byte("Vater von 5 Kindern") if bytes.Compare(buf, strc) != 0 { fmt.Fprintf(os.Stderr, "printf of translation wrong.\n") os.Exit(1) } }
func (e *jsonEncDriver) encodeFloat(f float64, numbits int) { x := strconv.AppendFloat(e.b[:0], f, 'G', -1, numbits) e.w.writeb(x) if bytes.IndexByte(x, 'E') == -1 && bytes.IndexByte(x, '.') == -1 { e.w.writen2('.', '0') } }
func newUser(data []byte) (*User, error) { var ( user User pos int ) if pos = bytes.IndexByte(data, '<'); pos == -1 { return nil, ErrUnknownFormat } user.Name = string(data[:pos-1]) data = data[pos+1:] if pos = bytes.IndexByte(data, '>'); pos == -1 { return nil, ErrUnknownFormat } user.Email = string(data[:pos]) data = data[pos+2:] if pos = bytes.IndexByte(data, ' '); pos == -1 { return nil, ErrUnknownFormat } sec, err := strconv.ParseInt(string(data[:pos]), 10, 64) if err != nil { return nil, err } t, err := time.Parse("-0700", string(data[pos+1:])) if err != nil { return nil, err } user.Date = time.Unix(sec, 0).In(t.Location()) return &user, nil }
func (f *SourceMapFilter) Write(p []byte) (n int, err error) { var n2 int for { i := bytes.IndexByte(p, '\b') w := p if i != -1 { w = p[:i] } n2, err = f.Writer.Write(w) n += n2 for { i := bytes.IndexByte(w, '\n') if i == -1 { f.column += len(w) break } f.line++ f.column = 0 w = w[i+1:] } if err != nil || i == -1 { return } if f.MappingCallback != nil { f.MappingCallback(f.line+1, f.column, f.fileSet.Position(token.Pos(binary.BigEndian.Uint32(p[i+1:i+5])))) } p = p[i+5:] n += 5 } }
func resolveLine(line []byte) { // defer func() { // if err := recover(); err != nil { // fmt.Println(err) // } // }() n := 0 ip := [4]string{} for i := 0; i < 3; i++ { n = bytes.IndexByte(line, '.') if n < 1 { return } ip[i] = string(line[:n]) line = line[n+1:] } // for ip3 n = bytes.IndexByte(line, ' ') ip[3] = string(line[:n]) ip0, ip123, err := resolveIp4(&ip) if err == nil { total_IPs++ bufData(int(ip0), ip123) } }
func (w ReplacingWriter) Write(bs []byte) (int, error) { var n, written int var err error newlineIdx := bytes.IndexByte(bs, w.From) for newlineIdx >= 0 { n, err = w.Writer.Write(bs[:newlineIdx]) written += n if err != nil { break } if len(w.To) > 0 { n, err := w.Writer.Write(w.To) if n == len(w.To) { written++ } if err != nil { break } } bs = bs[newlineIdx+1:] newlineIdx = bytes.IndexByte(bs, w.From) } n, err = w.Writer.Write(bs) written += n return written, err }
// parses: // "<size_u32> <big-blobref> <big-offset>" func parseMetaRow(v []byte) (m meta, err error) { row := v sp := bytes.IndexByte(v, ' ') if sp < 1 || sp == len(v)-1 { return meta{}, fmt.Errorf("invalid metarow %q", v) } m.exists = true size, err := strutil.ParseUintBytes(v[:sp], 10, 32) if err != nil { return meta{}, fmt.Errorf("invalid metarow size %q", v) } m.size = uint32(size) v = v[sp+1:] // remains: "<big-blobref> <big-offset>" if bytes.Count(v, singleSpace) != 1 { return meta{}, fmt.Errorf("invalid metarow %q: wrong number of spaces", row) } sp = bytes.IndexByte(v, ' ') largeRef, ok := blob.ParseBytes(v[:sp]) if !ok { return meta{}, fmt.Errorf("invalid metarow %q: bad blobref %q", row, v[:sp]) } m.largeRef = largeRef off, err := strutil.ParseUintBytes(v[sp+1:], 10, 32) if err != nil { return meta{}, fmt.Errorf("invalid metarow %q: bad offset: %v", row, err) } m.largeOff = uint32(off) return m, nil }
// chopStack trims a stack trace so that the function which panics or calls // Report is first. func chopStack(s []byte, isPanic bool) string { var f []byte if isPanic { f = []byte("panic(") } else { f = []byte("cloud.google.com/go/errors.(*Client).Report") } lfFirst := bytes.IndexByte(s, '\n') if lfFirst == -1 { return string(s) } stack := s[lfFirst:] panicLine := bytes.Index(stack, f) if panicLine == -1 { return string(s) } stack = stack[panicLine+1:] for i := 0; i < 2; i++ { nextLine := bytes.IndexByte(stack, '\n') if nextLine == -1 { return string(s) } stack = stack[nextLine+1:] } return string(s[:lfFirst+1]) + string(stack) }
// parseHostTag parses and strips the host tag from the provided event. func parseHostTag(event *statse.Event) (string, error) { var tagsArray [128]byte tags := append(tagsArray[:0], event.Tags...) i := bytes.Index(tags, []byte("host=")) if i == -1 || (i > 0 && tags[i-1] != ' ') { return "", errors.New("missing host tag") } tag := tags[i:] if i := bytes.IndexByte(tag, ' '); i > 0 { tag = tag[:i] } host := string(tag[bytes.IndexByte(tag, '=')+1:]) if host == "" { return "", errors.New("host tag has empty value") } tagLen := len("host=") + len(host) if i == 0 { // host is the first tag. copy(tags[i:], tags[i+tagLen:]) tags = tags[:len(tags)-tagLen] } else { // host is not the first tag, strip the leading space too. copy(tags[i-1:], tags[i+tagLen:]) tags = tags[:len(tags)-tagLen-1] } event.Tags = string(tags) return host, nil }