// Read reads and decodes quoted-printable data from the underlying reader. func (r *Reader) Read(p []byte) (int, error) { // Deviations from RFC 2045: // 1. in addition to "=\r\n", "=\n" is also treated as soft line break. // 2. it will pass through a '\r' or '\n' not preceded by '=', consistent // with other broken QP encoders & decoders. var n int var err error for len(p) > 0 { if len(r.line) == 0 { if err = r.Fn(); err != nil { return n, err } r.line, r.rerr = r.br.ReadSlice('\n') r.gerr.addUnrecover(r.rerr) // Does the line end in CRLF instead of just LF? hasLF := bytes.HasSuffix(r.line, lf) hasCR := bytes.HasSuffix(r.line, crlf) wholeLine := r.line r.line = bytes.TrimRightFunc(wholeLine, isQPDiscardWhitespace) if bytes.HasSuffix(r.line, softSuffix) { rightStripped := wholeLine[len(r.line):] r.line = r.line[:len(r.line)-1] if !bytes.HasPrefix(rightStripped, lf) && !bytes.HasPrefix(rightStripped, crlf) { r.rerr = fmt.Errorf("quotedprintable: invalid bytes after =: %q", rightStripped) r.gerr.add(r.rerr) } } else if hasLF { if hasCR { r.line = append(r.line, '\r', '\n') } else { r.line = append(r.line, '\n') } } continue } b := r.line[0] switch { case b == '=': b, err = readHexByte(r.line[1:]) if err != nil { b = '=' r.gerr.add(err) break // this modification allow bad email to be parsed too //return n, err } r.line = r.line[2:] // 2 of the 3; other 1 is done below case b == '\t' || b == '\r' || b == '\n': case b < ' ' || b > '~': //return n, fmt.Errorf("quotedprintable: invalid unescaped byte 0x%02x in body", b) r.gerr.add(fmt.Errorf("quotedprintable: invalid unescaped byte 0x%02x in body", b)) } p[0] = b p = p[1:] r.line = r.line[1:] n++ } return n, r.Fn() }
// setKeys sets n random keys and values across each machine in a // cluster and returns these values to later be checked with checkKeys. // If all the values don't get set due to a machine that is down and // error is NOT returned. An error is returned if no keys are able to be // set. func setKeys(cluster platform.Cluster, n int) (map[string]string, error) { var written = map[string]string{} for _, m := range cluster.Machines() { for i := 0; i < n; i++ { // random key and value, may overwrwite previous sets if // collision which is fine key := strconv.Itoa(rand.Int())[0:3] value := strconv.Itoa(rand.Int())[0:3] cmd := cluster.NewCommand("curl", "-w", "%{http_code}", "-s", fmt.Sprintf("http://%v:2379/v2/keys/%v", m.IP(), key), "-XPUT", "-d", "value="+value) b, err := cmd.Output() if err != nil { continue } // check for 201 or 200 resp header if !bytes.HasSuffix(b, []byte("200")) && !bytes.HasSuffix(b, []byte("201")) { continue } written[key] = value } } if len(written) == 0 { return nil, fmt.Errorf("failed to write any keys") } plog.Infof("wrote %v keys", len(written)) return written, nil }
// isDate detects if we see one of the following formats: // August 12, 2014 // Aug 10, 2014 1:02 PM EDT // Sunday August 10 2014 // Sunday, August 10, 2014 2:36 PM EDT // Monday, August 11, 2014 9:18:59 AM // Sat., Feb. 7, 2015 04:35 PM // Tue., Apr. 21, 2015 4:17 p.m. func isDate(line []byte) bool { // Trim dots 'n periods line = bytes.Trim(line, "• .\u00a0") // check if it starts with a day or month dateStart := false for _, day := range daysOfWeek { if bytes.HasPrefix(line, day) { dateStart = true break } } if !dateStart { for _, day := range daysOfWeekShort { if bytes.HasPrefix(line, day) { dateStart = true break } } } if !dateStart { for _, month := range months { if bytes.HasPrefix(line, month) { dateStart = true break } } } if !dateStart { return false } // check if it ends with a timezone/daytime/year dateEnd := false for _, ap := range amPM { if bytes.HasSuffix(line, ap) { dateEnd = true break } } if !dateEnd { // newshound started in 2012. adjust if you want older data for i := 2012; i <= time.Now().Year(); i++ { if bytes.HasSuffix(line, []byte(strconv.Itoa(i))) { dateEnd = true break } } } if !dateEnd { for _, zone := range timezones { if bytes.HasSuffix(line, zone) { dateEnd = true break } } } return dateEnd }
// codeLines takes a source file and returns the lines that // span the byte range specified by start and end. // It discards lines that end in "OMIT" and in "OMIT -->" func codeLines(src []byte, start, end int) (lines []byte) { startLine := 1 for i, b := range src { if i == start { break } if b == '\n' { startLine++ } } s := bufio.NewScanner(bytes.NewReader(src[start:end])) for n := startLine; s.Scan(); n++ { l := s.Bytes() if bytes.HasSuffix(l, []byte("OMIT")) { continue } if bytes.HasSuffix(l, []byte("OMIT -->")) { continue } lines = append(lines, l...) lines = append(lines, '\n') } // TODO(miek): trim leading and trailing blanklines return }
// equivalent does a linewise comparison of a and b. // For each line: // got exactly equals want OR // want ends in " //substr" and is a substring of got OR // want ends in " //slashes" and runtime.GOOS == "windows" and got equals want with its slashes swapped for backslashes // Otherwise equivalent returns false. func equivalent(got, want []byte) bool { var ( gotLines = bytes.Split(got, newline) wantLines = bytes.Split(want, newline) substr = []byte(" //substr") slashes = []byte(" //slashes") slash = []byte{'/'} gg, ww []byte ) if len(gotLines) != len(wantLines) { return false } for i := range gotLines { gg, ww = gotLines[i], wantLines[i] if bytes.HasSuffix(ww, slashes) { ww = bytes.Replace(ww[:len(ww)-len(slashes)], slash, []byte{filepath.Separator}, -1) } if !(bytes.Equal(gg, ww) || bytes.HasSuffix(ww, substr) && bytes.Contains(gg, ww[:len(ww)-len(substr)])) { return false } } return true }
// Blocks of ``` need to have blank lines on both sides or they don't look // right in HTML. func checkPreformatted(filePath string, fileBytes []byte) ([]byte, error) { f := splitByPreformatted(fileBytes) f = append(fileBlocks{{false, []byte{}}}, f...) f = append(f, fileBlock{false, []byte{}}) output := []byte(nil) for i := 1; i < len(f)-1; i++ { prev := &f[i-1] block := &f[i] next := &f[i+1] if !block.preformatted { continue } neededSuffix := []byte("\n\n") for !bytes.HasSuffix(prev.data, neededSuffix) { prev.data = append(prev.data, '\n') } for !bytes.HasSuffix(block.data, neededSuffix) { block.data = append(block.data, '\n') if bytes.HasPrefix(next.data, []byte("\n")) { // don't change the number of newlines unless needed. next.data = next.data[1:] if len(next.data) == 0 { f = append(f[:i+1], f[i+2:]...) } } } } for _, block := range f { output = append(output, block.data...) } return output, nil }
// setKeys sets n random keys and values across each machine in a // cluster and returns these values to later be checked with checkKeys. // If all the values don't get set due to a machine that is down and // error is NOT returned. An error is returned if no keys are able to be // set. func SetKeys(cluster platform.Cluster, n int) (map[string]string, error) { var written = map[string]string{} for _, m := range cluster.Machines() { for i := 0; i < n; i++ { // random key and value, may overwrwite previous sets if // collision which is fine key := strconv.Itoa(rand.Int())[0:3] value := strconv.Itoa(rand.Int())[0:3] b, err := m.SSH(fmt.Sprintf("curl -s -w %%{http_code} -s http://127.0.0.1:2379/v2/keys/%v -XPUT -d value=%v", key, value)) if err != nil { return nil, err } // check for 201 or 200 resp header if !bytes.HasSuffix(b, []byte("200")) && !bytes.HasSuffix(b, []byte("201")) { continue } written[key] = value } } if len(written) == 0 { return nil, fmt.Errorf("failed to write any keys") } plog.Infof("wrote %v keys", len(written)) return written, nil }
func (m *fakeDDB) PutItem(input *dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) { m.assert.NotNil(input.Item[refAttr], "%s should have been present", refAttr) m.assert.NotNil(input.Item[refAttr].B, "key should have been a blob: %+v", input.Item[refAttr]) key := input.Item[refAttr].B if bytes.HasSuffix(key, dynamoVersionKey) { m.assert.NotNil(input.Item[numAttr], "%s should have been present", numAttr) m.assert.NotNil(input.Item[numAttr].S, "vers should have been a string: %+v", input.Item[numAttr]) m.version = aws.StringValue(input.Item[numAttr].S) return &dynamodb.PutItemOutput{}, nil } m.assert.NotNil(input.Item[chunkAttr], "%s should have present", chunkAttr) m.assert.NotNil(input.Item[chunkAttr].B, "value should have been a blob: %+v", input.Item[chunkAttr]) value := input.Item[chunkAttr].B mustNotExist := *(input.ConditionExpression) == valueNotExistsExpression current, present := m.data[string(key)] if mustNotExist && present { return nil, mockAWSError("ConditionalCheckFailedException") } else if !mustNotExist && !bytes.Equal(current.chunk, input.ExpressionAttributeValues[":prev"].B) { return nil, mockAWSError("ConditionalCheckFailedException") } m.put(key, value, noneValue) if !bytes.HasSuffix(key, dynamoRootKey) { m.numPuts++ } return &dynamodb.PutItemOutput{}, nil }
func parseSessionFromBytes(b []byte) *session { var s session b = normalizeCRLF(b) if bytes.HasSuffix(b, newline) { b = b[:len(b)-1] } lines := bytes.Split(b, newline) lines = removeSessionComment(lines) for _, line := range lines { if bytes.HasSuffix(line, []byte{'\r'}) { // convert CRLF to LF line = line[:len(line)-1] } line = append(line, '\n') if bytes.HasPrefix(line, prompt) { s.input = append(s.input, line[len(prompt):]...) } s.fullSession = append(s.fullSession, line...) } return &s }
func five_a(body []byte) []byte { if bytes.HasSuffix(body, []byte("e")) && Measure(body[:len(body)-1]) > 1 { return body[:len(body)-1] } else if bytes.HasSuffix(body, []byte("e")) && Measure(body[:len(body)-1]) == 1 && !star_o(body[:len(body)-1]) { return body[:len(body)-1] } return body }
func periodCheck(line []byte) []byte { if len(line) > 0 && !bytes.HasSuffix(bytes.TrimSpace(line), period) && !bytes.HasSuffix(bytes.TrimSpace(line), comma) { line = append(line, periodWithSpace...) } return line }
// Read reads and decodes quoted-printable data from the underlying reader. func (r *Reader) Read(p []byte) (n int, err error) { // Deviations from RFC 2045: // 1. in addition to "=\r\n", "=\n" is also treated as soft line break. // 2. it will pass through a '\r' or '\n' not preceded by '=', consistent // with other broken QP encoders & decoders. // 3. it accepts soft line-break (=) at end of message (issue 15486); i.e. // the final byte read from the underlying reader is allowed to be '=', // and it will be silently ignored. for len(p) > 0 { if len(r.line) == 0 { if r.rerr != nil { return n, r.rerr } r.line, r.rerr = r.br.ReadSlice('\n') // Does the line end in CRLF instead of just LF? hasLF := bytes.HasSuffix(r.line, lf) hasCR := bytes.HasSuffix(r.line, crlf) wholeLine := r.line r.line = bytes.TrimRightFunc(wholeLine, isQPDiscardWhitespace) if bytes.HasSuffix(r.line, softSuffix) { rightStripped := wholeLine[len(r.line):] r.line = r.line[:len(r.line)-1] if !bytes.HasPrefix(rightStripped, lf) && !bytes.HasPrefix(rightStripped, crlf) && !(len(rightStripped) == 0 && len(r.line) > 0 && r.rerr == io.EOF) { r.rerr = fmt.Errorf("quotedprintable: invalid bytes after =: %q", rightStripped) } } else if hasLF { if hasCR { r.line = append(r.line, '\r', '\n') } else { r.line = append(r.line, '\n') } } continue } b := r.line[0] switch { case b == '=': b, err = readHexByte(r.line[1:]) if err != nil { return n, err } r.line = r.line[2:] // 2 of the 3; other 1 is done below case b == '\t' || b == '\r' || b == '\n': break case b < ' ' || b > '~': return n, fmt.Errorf("quotedprintable: invalid unescaped byte 0x%02x in body", b) } p[0] = b p = p[1:] r.line = r.line[1:] n++ } return n, nil }
func one_a(body []byte) []byte { if bytes.HasSuffix(body, []byte("sses")) || bytes.HasSuffix(body, []byte("ies")) { return body[:len(body)-2] } else if bytes.HasSuffix(body, []byte("ss")) { return body } else if bytes.HasSuffix(body, []byte("s")) { return body[:len(body)-1] } return body }
func (q *qpReader) Read(p []byte) (n int, err error) { for len(p) > 0 { if len(q.line) == 0 { if q.rerr != nil { return n, q.rerr } q.skipWhite = true q.line, q.rerr = q.br.ReadSlice('\n') // Does the line end in CRLF instead of just LF? hasLF := bytes.HasSuffix(q.line, lf) hasCR := bytes.HasSuffix(q.line, crlf) wholeLine := q.line q.line = bytes.TrimRightFunc(wholeLine, isQPDiscardWhitespace) if bytes.HasSuffix(q.line, softSuffix) { rightStripped := wholeLine[len(q.line):] q.line = q.line[:len(q.line)-1] if !bytes.HasPrefix(rightStripped, lf) && !bytes.HasPrefix(rightStripped, crlf) { q.rerr = fmt.Errorf("multipart: invalid bytes after =: %q", rightStripped) } } else if hasLF { if hasCR { q.line = append(q.line, '\r', '\n') } else { q.line = append(q.line, '\n') } } continue } b := q.line[0] if q.skipWhite && isQPSkipWhiteByte(b) { q.line = q.line[1:] continue } q.skipWhite = false switch { case b == '=': b, err = q.readHexByte(q.line[1:]) if err != nil { return n, err } q.line = q.line[2:] // 2 of the 3; other 1 is done below case b == '\t' || b == '\r' || b == '\n': break case b < ' ' || b > '~': return n, fmt.Errorf("multipart: invalid unescaped byte 0x%02x in quoted-printable body", b) } p[0] = b p = p[1:] q.line = q.line[1:] n++ } return n, nil }
func ExampleHasSuffix() { fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("go"))) fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("O"))) fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("Ami"))) fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte(""))) // Output: // true // false // false // true }
// IsContainerized returns true if we are running inside a container. func IsContainerized() (bool, error) { b, err := ioutil.ReadFile(proc1Cgroup) if err != nil { return false, err } for _, line := range bytes.Split(b, []byte{'\n'}) { if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { return true, nil } } return false, nil }
func (suite *LevelDBStoreTestSuite) TestReservedKeys() { // Apparently, the following: // s := []byte("") // s = append(s, 1, 2, 3) // f := append(s, 10, 20, 30) // g := append(s, 4, 5, 6) // // Results in both f and g being [1, 2, 3, 4, 5, 6] // This was happening to us here, so ldb.chunkPrefix was "/chunk/" and ldb.rootKey was "/chun" instead of "/root" ldb := suite.factory.CreateStore("").(*LevelDBStore) suite.True(bytes.HasSuffix(ldb.rootKey, []byte(rootKeyConst))) suite.True(bytes.HasSuffix(ldb.chunkPrefix, []byte(chunkPrefixConst))) }
// deleteOldAddrIndex deletes the entire addrindex stored within the DB for a // 2-byte addrIndexKeyPrefix. It also resets the cached in-memory metadata about // the addr index. func (db *LevelDb) deleteOldAddrIndex() error { db.dbLock.Lock() defer db.dbLock.Unlock() batch := db.lBatch() defer batch.Reset() // Delete the entire index along with any metadata about it. iter := db.lDb.NewIterator(bytesPrefix([]byte("a-")), db.ro) numInBatch := 0 for iter.Next() { key := iter.Key() // With a 24-bit index key prefix, 1 in every 2^24 keys is a collision. // We check the length to make sure we only delete address index keys. // We also check the last two bytes to make sure the suffix doesn't // match other types of index that are 34 bytes long. if len(key) == 34 && !bytes.HasSuffix(key, recordSuffixTx) && !bytes.HasSuffix(key, recordSuffixSpentTx) { batch.Delete(key) numInBatch++ } // Delete in chunks to potentially avoid very large batches. if numInBatch >= batchDeleteThreshold { if err := db.lDb.Write(batch, db.wo); err != nil { iter.Release() return err } batch.Reset() numInBatch = 0 } } iter.Release() if err := iter.Error(); err != nil { return err } batch.Delete(addrIndexMetaDataKey) batch.Delete(addrIndexVersionKey) if err := db.lDb.Write(batch, db.wo); err != nil { return err } db.lastAddrIndexBlkIdx = -1 db.lastAddrIndexBlkSha = wire.ShaHash{} return nil }
// read one line from input and strip off terminating LF or terminating CR-LF func (r *Reader) readLine() (line []byte, err error) { line, err = r.reader.ReadBytes(newline) if err != nil { return } switch { case bytes.HasSuffix(line, crlfSlice): line = line[0 : len(line)-len(crlfSlice)] case bytes.HasSuffix(line, newlineSlice): line = line[0 : len(line)-len(newlineSlice)] } return }
func (dm *dynoLoadMsg) HandleLogfmt(key, val []byte) error { switch { case bytes.Equal(key, keySource): dm.Source = string(val) case bytes.Equal(key, keyDyno): dm.Dyno = string(val) case bytes.HasSuffix(key, keyLoadAvg1Min): dm.LoadAvg1Min, _ = strconv.ParseFloat(string(val), 64) case bytes.HasSuffix(key, keyLoadAvg5Min): dm.LoadAvg5Min, _ = strconv.ParseFloat(string(val), 64) case bytes.HasSuffix(key, keyLoadAvg15Min): dm.LoadAvg15Min, _ = strconv.ParseFloat(string(val), 64) } return nil }
func (db *Instance) withHave(folder, device, prefix []byte, truncate bool, fn Iterator) { t := db.newReadOnlyTransaction() defer t.close() dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, prefix)[:keyPrefixLen+keyFolderLen+keyDeviceLen+len(prefix)]), nil) defer dbi.Release() slashedPrefix := prefix if !bytes.HasSuffix(prefix, []byte{'/'}) { slashedPrefix = append(slashedPrefix, '/') } for dbi.Next() { name := db.deviceKeyName(dbi.Key()) if len(prefix) > 0 && !bytes.Equal(name, prefix) && !bytes.HasPrefix(name, slashedPrefix) { return } // The iterator function may keep a reference to the unmarshalled // struct, which in turn references the buffer it was unmarshalled // from. dbi.Value() just returns an internal slice that it reuses, so // we need to copy it. f, err := unmarshalTrunc(append([]byte{}, dbi.Value()...), truncate) if err != nil { panic(err) } if cont := fn(f); !cont { return } } }
// parseGetResponse reads a GET response from r and calls cb for each // read and allocated Item func parseGetResponse(r *bufio.Reader, cb func(*Item)) error { for { line, err := r.ReadSlice('\n') if err != nil { return err } if bytes.Equal(line, resultEnd) { return nil } it := new(Item) size, err := scanGetResponseLine(line, it) if err != nil { return err } it.Value, err = ioutil.ReadAll(io.LimitReader(r, int64(size)+2)) if err != nil { return err } if !bytes.HasSuffix(it.Value, crlf) { return fmt.Errorf("memcache: corrupt get result read") } it.Value = it.Value[:size] cb(it) } panic("unreached") }
// parseGetResponse reads a GET response from r and calls cb for each // read and allocated Item func parseGetResponse(r *bufio.Reader, cb func(*Item)) error { for { line, err := r.ReadSlice('\n') if err != nil { return err } if bytes.Equal(line, resultEnd) { return nil } it := new(Item) var size int n, err := fmt.Sscanf(string(line), "VALUE %s %d %d %d\r\n", &it.Key, &it.Flags, &size, &it.casid) if err != nil { return err } if n != 4 { return fmt.Errorf("memcache: unexpected line in get response: %q", string(line)) } it.Value, err = ioutil.ReadAll(io.LimitReader(r, int64(size)+2)) if err != nil { return err } if !bytes.HasSuffix(it.Value, crlf) { return fmt.Errorf("memcache: corrupt get result read") } it.Value = it.Value[:size] cb(it) } panic("unreached") }
func (p *Parser) getRange(i int) (first, last rune) { b := p.getField(i) if k := bytes.Index(b, []byte("..")); k != -1 { return p.parseRune(b[:k]), p.parseRune(b[k+2:]) } x := p.parseRune(b) // Special case for UnicodeData that was retained for backwards compatibility. if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) { if p.parsedRange { return p.rangeStart, p.rangeEnd } mf := reRange.FindStringSubmatch(p.scanner.Text()) if mf == nil || !p.scanner.Scan() { p.setError(errIncorrectLegacyRange) return x, x } // Using Bytes would be more efficient here, but Text is a lot easier // and this is not a frequent case. ml := reRange.FindStringSubmatch(p.scanner.Text()) if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] { p.setError(errIncorrectLegacyRange) return x, x } p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])]) p.parsedRange = true return p.rangeStart, p.rangeEnd } return x, x }
func localRangeKeyPrint(key roachpb.Key) string { var buf bytes.Buffer for _, s := range rangeSuffixDict { if s.atEnd { if bytes.HasSuffix(key, s.suffix) { key = key[:len(key)-len(s.suffix)] fmt.Fprintf(&buf, "%s/%s", decodeKeyPrint(key), s.name) return buf.String() } } else { begin := bytes.Index(key, s.suffix) if begin > 0 { addrKey := key[:begin] txnID, err := uuid.FromBytes(key[(begin + len(s.suffix)):]) if err != nil { return fmt.Sprintf("/%q/err:%v", key, err) } fmt.Fprintf(&buf, "%s/%s/addrKey:/id:%q", decodeKeyPrint(addrKey), s.name, txnID) return buf.String() } } } fmt.Fprintf(&buf, "%s", decodeKeyPrint(key)) return buf.String() }
func one_c(body []byte) []byte { if bytes.HasSuffix(body, []byte("y")) && hasVowel(body[:len(body)-1]) { body[len(body)-1] = 'i' return body } return body }
// Image defines how images should be processed to produce corresponding HTML elements. func (r *Renderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { prefix := strings.Replace(r.urlPrefix, "/src/", "/raw/", 1) if len(link) > 0 { if isLink(link) { // External link with .svg suffix usually means CI status. // TODO: define a keyword to allow non-svg images render as external link. if bytes.HasSuffix(link, svgSuffix) || bytes.Contains(link, svgSuffixWithMark) { r.Renderer.Image(out, link, title, alt) return } } else { if link[0] != '/' { prefix += "/" } link = bytes.Replace([]byte((prefix + string(link))), spaceBytes, spaceEncodedBytes, -1) fmt.Println(333, string(link)) } } out.WriteString(`<a href="`) out.Write(link) out.WriteString(`">`) r.Renderer.Image(out, link, title, alt) out.WriteString("</a>") }
func (c *codec) Decode(b []byte) (Message, error) { // [message type] ':' [message id ('+')] ':' [message endpoint] (':' [message data]) parts := bytes.SplitN(b, []byte(":"), 4) if len(parts) < 3 { return nil, errors.New("invalid packet") } mtype, err := strconv.Atoi(string(parts[0])) if err != nil { return nil, err } idPart := parts[1] ack := bytes.HasSuffix(idPart, []byte("+")) if ack { idPart = idPart[:len(idPart)-1] } pid, err := strconv.Atoi(string(idPart)) if err != nil { return nil, err } endpoint := string(parts[2]) var r io.Reader if len(parts) == 4 { r = bytes.NewBuffer(parts[3]) } return newMessage(mtype, pid, ack, endpoint, r), nil }
func (options *Html) TocHeader(text []byte, level int) { for level > options.currentLevel { switch { case bytes.HasSuffix(options.toc.Bytes(), []byte("</li>\n")): // this sublist can nest underneath a header size := options.toc.Len() options.toc.Truncate(size - len("</li>\n")) case options.currentLevel > 0: options.toc.WriteString("<li>") } if options.toc.Len() > 0 { options.toc.WriteByte('\n') } options.toc.WriteString("<ul>\n") options.currentLevel++ } for level < options.currentLevel { options.toc.WriteString("</ul>") if options.currentLevel > 1 { options.toc.WriteString("</li>\n") } options.currentLevel-- } options.toc.WriteString("<li><a href=\"#toc_") options.toc.WriteString(strconv.Itoa(options.headerCount)) options.toc.WriteString("\">") options.headerCount++ options.toc.Write(text) options.toc.WriteString("</a></li>\n") }
func one_b(body []byte) []byte { if bytes.HasSuffix(body, []byte("eed")) { if Measure(body[:len(body)-3]) > 0 { return body[:len(body)-1] } } else if bytes.HasSuffix(body, []byte("ed")) { if hasVowel(body[:len(body)-2]) { return one_b_a(body[:len(body)-2]) } } else if bytes.HasSuffix(body, []byte("ing")) { if hasVowel(body[:len(body)-3]) { return one_b_a(body[:len(body)-3]) } } return body }