func TestAttachmentOnly(t *testing.T) { var aTests = []struct { filename string attachmentsLen int inlinesLen int }{ {filename: "attachment-only.raw", attachmentsLen: 1, inlinesLen: 0}, {filename: "attachment-only-inline.raw", attachmentsLen: 0, inlinesLen: 1}, } for _, a := range aTests { // Mail with disposition attachment msg := readMessage(a.filename) m, err := ParseMIMEBody(msg) if err != nil { t.Fatal("Failed to parse MIME:", err) } if len(m.Attachments) != a.attachmentsLen { t.Fatal("len(Attachments) got:", len(m.Attachments), "want:", a.attachmentsLen) } if a.attachmentsLen > 0 && !bytes.HasPrefix(m.Attachments[0].Content(), []byte{0x89, 'P', 'N', 'G'}) { t.Error("Content should be PNG image") } if len(m.Inlines) != a.inlinesLen { t.Fatal("len(Inlines) got:", len(m.Inlines), "want:", a.inlinesLen) } if a.inlinesLen > 0 && !bytes.HasPrefix(m.Inlines[0].Content(), []byte{0x89, 'P', 'N', 'G'}) { t.Error("Content should be PNG image") } } }
func (t *tScreen) parseFunctionKey(buf *bytes.Buffer) (bool, bool) { b := buf.Bytes() partial := false for e, k := range t.keycodes { esc := []byte(e) if (len(esc) == 1) && (esc[0] == '\x1b') { continue } if bytes.HasPrefix(b, esc) { // matched var r rune if len(esc) == 1 { r = rune(b[0]) } mod := k.mod if t.escaped { mod |= ModAlt t.escaped = false } ev := NewEventKey(k.key, r, mod) t.PostEvent(ev) for i := 0; i < len(esc); i++ { buf.ReadByte() } return true, true } if bytes.HasPrefix(esc, b) { partial = true } } return partial, false }
// rangeAddressing updates or deletes the range addressing metadata // for the range specified by desc. The action to take is specified by // the supplied metaAction function. // // The rules for meta1 and meta2 records are as follows: // // 1. If desc.StartKey or desc.EndKey is meta1: // - ERROR // 2. If desc.EndKey is meta2: // - meta1(desc.EndKey) // 3. If desc.EndKey is normal user key: // - meta2(desc.EndKey) // 3a. If desc.StartKey is KeyMin or meta2: // - meta1(KeyMax) func rangeAddressing(b *client.Batch, desc *roachpb.RangeDescriptor, action metaAction) error { // 1. handle illegal case of start or end key being meta1. if bytes.HasPrefix(desc.EndKey, keys.Meta1Prefix) || bytes.HasPrefix(desc.StartKey, keys.Meta1Prefix) { return errors.Errorf("meta1 addressing records cannot be split: %+v", desc) } // Note that both cases 2 and 3 are handled by keys.RangeMetaKey. // // 2. the case of the range ending with a meta2 prefix. This means // the range is full of meta2. We must update the relevant meta1 // entry pointing to the end of this range. // // 3. the range ends with a normal user key, so we must update the // relevant meta2 entry pointing to the end of this range. action(b, keys.RangeMetaKey(desc.EndKey), desc) if !bytes.HasPrefix(desc.EndKey, keys.Meta2Prefix) { // 3a. the range starts with KeyMin or a meta2 addressing record, // update the meta1 entry for KeyMax. if bytes.Equal(desc.StartKey, roachpb.RKeyMin) || bytes.HasPrefix(desc.StartKey, keys.Meta2Prefix) { action(b, keys.Meta1KeyMax, desc) } } return nil }
func TestAttachmentOnly(t *testing.T) { var aTests = []struct { filename string attachmentsLen int inlinesLen int }{ {filename: "attachment-only.raw", attachmentsLen: 1, inlinesLen: 0}, {filename: "attachment-only-inline.raw", attachmentsLen: 0, inlinesLen: 1}, } for _, a := range aTests { // Mail with disposition attachment msg := readMessage(a.filename) m, err := ParseMIMEBody(msg) assert.Equal(t, err, nil) assert.Equal(t, a.attachmentsLen, len(m.Attachments)) assert.Equal(t, a.inlinesLen, len(m.Inlines)) if a.attachmentsLen > 0 { assert.True(t, bytes.HasPrefix(m.Attachments[0].Content(), []byte{0x89, 'P', 'N', 'G'}), "Content should be PNG image") } if a.inlinesLen > 0 { assert.True(t, bytes.HasPrefix(m.Inlines[0].Content(), []byte{0x89, 'P', 'N', 'G'}), "Content should be PNG image") } } }
// WalkPrefix is used to walk the tree under a prefix func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { search := prefix for { // Check for key exhaution if len(search) == 0 { recursiveWalk(n, fn) return } // Look for an edge _, n = n.getEdge(search[0]) if n == nil { break } // Consume the search prefix if bytes.HasPrefix(search, n.prefix) { search = search[len(n.prefix):] } else if bytes.HasPrefix(n.prefix, search) { // Child may be under our search prefix recursiveWalk(n, fn) return } else { break } } }
func (rc *ResponseCache) decodeResponseCacheKey(encKey engine.MVCCKey) ([]byte, error) { key, _, isValue, err := engine.MVCCDecodeKey(encKey) if err != nil { return nil, err } if isValue { return nil, util.Errorf("key %s is not a raw MVCC value", encKey) } if !bytes.HasPrefix(key, keys.LocalRangeIDPrefix) { return nil, util.Errorf("key %s does not have %s prefix", key, keys.LocalRangeIDPrefix) } // Cut the prefix and the Range ID. b := key[len(keys.LocalRangeIDPrefix):] b, _, err = encoding.DecodeUvarint(b) if err != nil { return nil, err } if !bytes.HasPrefix(b, keys.LocalResponseCacheSuffix) { return nil, util.Errorf("key %s does not contain the response cache suffix %s", key, keys.LocalResponseCacheSuffix) } // Cut the response cache suffix. b = b[len(keys.LocalResponseCacheSuffix):] // Decode the family. b, fm, err := encoding.DecodeBytes(b, nil) if err != nil { return nil, err } if len(b) > 0 { return nil, util.Errorf("key %s has leftover bytes after decode: %s; indicates corrupt key", encKey, b) } return fm, nil }
func IgnoredStatement(line []byte) bool { if bytes.HasPrefix(line, COMMENT) || bytes.HasPrefix(line, SET_SESSION_VAR) || bytes.HasPrefix(line, DELIMITER) || bytes.HasPrefix(line, BINLOG) || bytes.HasPrefix(line, BINLOG_DB_CHANGE) { return true } return false }
// UnmarshalFile loads a file and parses a Plist from the loaded data. // If the file is a binary plist, the plutil system command is used to convert // it to XML text. func UnmarshalFile(filename string) (*Plist, error) { xmlFile, err := os.Open(filename) if err != nil { return nil, fmt.Errorf("plist: error opening plist: %s", err) } defer xmlFile.Close() xmlData, err := ioutil.ReadAll(xmlFile) if err != nil { return nil, fmt.Errorf("plist: error reading plist file: %s", err) } if !bytes.HasPrefix(xmlData, []byte("<?xml ")) { debug("non-text XML -- assuming binary") xmlData, err = exec.Command("plutil", "-convert", "xml1", "-o", "-", filename).Output() if err != nil || !bytes.HasPrefix(xmlData, []byte("<?xml ")) { return nil, fmt.Errorf("plist: invalid plist file " + filename) } } var plist Plist err = Unmarshal(xmlData, &plist) if err != nil { return nil, err } return &plist, err }
func toRunes(text []byte) ([]rune, error) { var runes []rune if bytes.HasPrefix(text, []byte{0, 0, 0xFE, 0xFF}) { return nil, fmt.Errorf("mof: unsupported encoding: UTF-32, big-endian") } else if bytes.HasPrefix(text, []byte{0xFF, 0xFE, 0, 0}) { return nil, fmt.Errorf("mof: unsupported encoding: UTF-32, little-endian") } else if bytes.HasPrefix(text, []byte{0xFE, 0xFF}) { return nil, fmt.Errorf("mof: unsupported encoding: UTF-16, big-endian") } else if bytes.HasPrefix(text, []byte{0xFF, 0xFE}) { // UTF-16, little-endian for len(text) > 0 { u := binary.LittleEndian.Uint16(text) runes = append(runes, rune(u)) text = text[2:] } return runes, nil } // Assume UTF-8. for len(text) > 0 { r, s := utf8.DecodeRune(text) if r == utf8.RuneError { return nil, fmt.Errorf("mof: unrecognized encoding") } runes = append(runes, r) text = text[s:] } return runes, nil }
func (r *Reader) Read() ([]byte, error) { buf := []byte{} var isErr bool for { line, err := r.ReadBytes('\n') if err != nil { return nil, err } if bytes.HasPrefix(line, []byte("event: error")) { isErr = true } if bytes.HasPrefix(line, []byte("data: ")) { data := bytes.TrimSuffix(bytes.TrimPrefix(line, []byte("data: ")), []byte("\n")) buf = append(buf, data...) } // peek ahead one byte to see if we have a double newline (terminator) if peek, err := r.Peek(1); err == nil && string(peek) == "\n" { break } } if isErr { return nil, Error(string(buf)) } return buf, nil }
// UnmarshalPublic decodes a byte slice containing an OpenSSH public key // into an public key. It supports RSA and ECDSA keys. func UnmarshalPublic(raw []byte) (key *SSHPublicKey, err error) { kb64 := pubkeyRegexp.ReplaceAll(raw, []byte("$1")) kb := make([]byte, base64.StdEncoding.DecodedLen(len(raw))) i, err := base64.StdEncoding.Decode(kb, kb64) if err != nil { return } kb = kb[:i] key = new(SSHPublicKey) if commentRegexp.Match(raw) { key.Comment = string(commentRegexp.ReplaceAll(raw, []byte("$3"))) key.Comment = strings.TrimSpace(key.Comment) } switch { case bytes.HasPrefix(raw, []byte("ssh-rsa")): key.Type = KEY_RSA key.Key, err = parseRSAPublicKey(kb) case bytes.HasPrefix(raw, []byte("ecdsa")): key.Type = KEY_ECDSA key.Key, err = parseECDSAPublicKey(kb) case bytes.HasPrefix(raw, []byte("ssh-dss")): key.Type = KEY_DSA key.Key, err = parseDSAPublicKey(kb) default: key.Type = KEY_UNSUPPORTED err = ErrUnsupportedPublicKey } return }
// Given a source file, extract keywords and values into the given map. // The map must be filled with keywords to look for. // The keywords in the data must be on the form "keyword: value", // and can be within single-line HTML comments (<-- ... -->). // Returns the data for the lines that does not contain any of the keywords. func extractKeywords(data []byte, special map[string]string) []byte { bnl := []byte("\n") // Find and separate the lines starting with one of the keywords in the special map _, regular := filterIntoGroups(bytes.Split(data, bnl), func(byteline []byte) bool { // Check if the current line has one of the special keywords for keyword := range special { // Check for lines starting with the keyword and a ":" if bytes.HasPrefix(byteline, []byte(keyword+":")) { // Set (possibly overwrite) the value in the map, if the keyword is found. // Trim the surrounding whitespace and skip the letters of the keyword itself. special[keyword] = strings.TrimSpace(string(byteline)[len(keyword)+1:]) return true } // Check for lines that starts with "<!--", ends with "-->" and contains the keyword and a ":" if bytes.HasPrefix(byteline, []byte("<!--")) && bytes.HasSuffix(byteline, []byte("-->")) { // Strip away the comment markers stripped := strings.TrimSpace(string(byteline[5 : len(byteline)-3])) // Check if one of the relevant keywords are present if strings.HasPrefix(stripped, keyword+":") { // Set (possibly overwrite) the value in the map, if the keyword is found. // Trim the surrounding whitespace and skip the letters of the keyword itself. special[keyword] = strings.TrimSpace(stripped[len(keyword)+1:]) return true } } } // Not special return false }) // Use the regular lines as the new data (remove the special lines) return bytes.Join(regular, bnl) }
func TestKeyEncoding(t *testing.T) { tests := []*spb.Entry{ entry(vname("sig", "corpus", "root", "path", "language"), "", nil, "fact", "value"), entry(vname("sig", "corpus", "root", "path", "language"), "someEdge", vname("anotherVName", "", "", "", ""), "/", ""), entry(vname(entryKeyPrefix, "~!@#$%^&*()_+`-={}|:;\"'?/>.<,", "", "", ""), "", nil, "/", ""), } for _, test := range tests { key, err := EncodeKey(test.Source, test.FactName, test.EdgeKind, test.Target) fatalOnErr(t, "Error encoding key: %v", err) if !bytes.HasPrefix(key, entryKeyPrefixBytes) { t.Fatalf("Key missing entry prefix: %q", string(key)) } prefix, err := KeyPrefix(test.Source, test.EdgeKind) fatalOnErr(t, "Error creating key prefix: %v", err) if !bytes.HasPrefix(key, prefix) { t.Fatalf("Key missing KeyPrefix: %q %q", string(key), string(prefix)) } entry, err := Entry(key, test.FactValue) fatalOnErr(t, "Error creating Entry from key: %v", err) if !proto.Equal(entry, test) { t.Errorf("Expected Entry: {%+v}; Got: {%+v}", test, entry) } } }
func getRulesInfo(host string, timeout int) ([]byte, error) { conn, err := net.DialTimeout("udp", host, time.Duration(timeout)*time.Second) if err != nil { logger.LogSteamError(ErrHostConnection(err.Error())) return nil, ErrHostConnection(err.Error()) } conn.SetDeadline(time.Now().Add(time.Duration(timeout-1) * time.Second)) defer conn.Close() _, err = conn.Write(rulesChallengeReq) if err != nil { logger.LogSteamError(ErrDataTransmit(err.Error())) return nil, ErrDataTransmit(err.Error()) } challengeNumResp := make([]byte, maxPacketSize) _, err = conn.Read(challengeNumResp) if err != nil { logger.LogSteamError(ErrDataTransmit(err.Error())) return nil, ErrDataTransmit(err.Error()) } if !bytes.HasPrefix(challengeNumResp, expectedRulesRespHeader) { logger.LogSteamError(ErrChallengeResponse) return nil, ErrChallengeResponse } challengeNum := bytes.TrimLeft(challengeNumResp, headerStr) challengeNum = challengeNum[1:5] request := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0x56} request = append(request, challengeNum...) _, err = conn.Write(request) if err != nil { logger.LogSteamError(ErrDataTransmit(err.Error())) return nil, ErrDataTransmit(err.Error()) } var buf [maxPacketSize]byte numread, err := conn.Read(buf[:maxPacketSize]) if err != nil { logger.LogSteamError(ErrDataTransmit(err.Error())) return nil, ErrDataTransmit(err.Error()) } var rulesInfo []byte if bytes.HasPrefix(buf[:maxPacketSize], multiPacketRespHeader) { // handle multi-packet response first := buf[:maxPacketSize] first = first[:numread] rulesInfo, err = handleMultiPacketResponse(conn, first) if err != nil { logger.LogSteamError(ErrDataTransmit(err.Error())) return nil, ErrDataTransmit(err.Error()) } } else { rulesInfo = make([]byte, numread) copy(rulesInfo, buf[:numread]) } return rulesInfo, nil }
func parseTag(source []byte) (tag interface{}) { defer func() { if e := recover(); e != nil { v := e.(*err.OpenErr) v.Text += "err parse tpl: " + string(source) err.Panic(v) } }() list := parser.SplitWord(source, 32) switch list[0] { // основные теги case "!": case "inc": tag = parseTagInclude(list[1:]) case "i18n": tag = parseTagi18n(list[1:]) case "for": tag = parseTagFor(list[1:]) default: // переменная контекста // либо функция (расширенные теги) slice := []byte(list[0]) if bytes.HasPrefix(slice, []byte(".")) { tag = tTagVar(list[0][1:]) } else if bytes.HasPrefix(slice, []byte("@.")) { tag = tTagVarHtmlEsc(list[0][2:]) } else { tag = parseTagFunc(list) } } return }
// DecodeAbortCacheKey decodes the provided abort cache entry, // returning the transaction ID. func DecodeAbortCacheKey(key roachpb.Key, dest []byte) (*uuid.UUID, error) { // TODO(tschottdorf): redundant check. if !bytes.HasPrefix(key, LocalRangeIDPrefix) { return nil, util.Errorf("key %s does not have %s prefix", key, LocalRangeIDPrefix) } // Cut the prefix, the Range ID, and the infix specifier. b := key[len(LocalRangeIDPrefix):] b, _, err := encoding.DecodeUvarintAscending(b) if err != nil { return nil, err } b = b[1:] if !bytes.HasPrefix(b, LocalAbortCacheSuffix) { return nil, util.Errorf("key %s does not contain the abort cache suffix %s", key, LocalAbortCacheSuffix) } // Cut the abort cache suffix. b = b[len(LocalAbortCacheSuffix):] // Decode the id. b, idBytes, err := encoding.DecodeBytesAscending(b, dest) if err != nil { return nil, err } if len(b) > 0 { return nil, util.Errorf("key %q has leftover bytes after decode: %s; indicates corrupt key", key, b) } txnID, err := uuid.FromBytes(idBytes) return txnID, err }
func isRelativeLink(link []byte) (yes bool) { // a tag begin with '#' if link[0] == '#' { return true } // link begin with '/' but not '//', the second maybe a protocol relative link if len(link) >= 2 && link[0] == '/' && link[1] != '/' { return true } // only the root '/' if len(link) == 1 && link[0] == '/' { return true } // current directory : begin with "./" if bytes.HasPrefix(link, []byte("./")) { return true } // parent directory : begin with "../" if bytes.HasPrefix(link, []byte("../")) { return true } return false }
// IdentifyRedHatRelease tires to identify the derivretives of Red Hat Linux. // It supports following distributions: // // - CentOS // - Red Hat Enterprise Linux // // If failed to identify the platform or the platform is the derivatives of // Red Hat Linux, IdentifyRedHatRelease returns an ErrNotIdentifier. func IdentifyRedHatRelease() (*Info, error) { file, err := os.Open("/etc/redhat-release") if err != nil { if os.IsNotExist(err) { return nil, ErrNotIdentified } return nil, err } b, err := ioutil.ReadAll(file) if err != nil { return nil, err } var platform Name lb := bytes.ToLower(b) switch { case bytes.HasPrefix(lb, []byte("centos")): platform = PlatformCentOS case bytes.HasPrefix(lb, []byte("red hat enterprise")): platform = PlatformRHEL } version := "" if matches := regexp.MustCompile(`release (\d[\d.]*)`).FindSubmatch(b); len(matches) > 1 { version = string(matches[1]) } return &Info{ Platform: platform, Family: FamilyRHEL, Version: version, }, nil }
func (rc *ResponseCache) decodeResponseCacheKey(encKey proto.EncodedKey) (proto.ClientCmdID, error) { ret := proto.ClientCmdID{} key, _, isValue := engine.MVCCDecodeKey(encKey) if isValue { return ret, util.Errorf("key %s is not a raw MVCC value", encKey) } if !bytes.HasPrefix(key, keys.LocalRangeIDPrefix) { return ret, util.Errorf("key %s does not have %s prefix", key, keys.LocalRangeIDPrefix) } // Cut the prefix and the Raft ID. b := key[len(keys.LocalRangeIDPrefix):] b, _ = encoding.DecodeUvarint(b) if !bytes.HasPrefix(b, keys.LocalResponseCacheSuffix) { return ret, util.Errorf("key %s does not contain the response cache suffix %s", key, keys.LocalResponseCacheSuffix) } // Cut the response cache suffix. b = b[len(keys.LocalResponseCacheSuffix):] // Now, decode the command ID. b, wt := encoding.DecodeUvarint(b) b, rd := encoding.DecodeUint64(b) if len(b) > 0 { return ret, util.Errorf("key %s has leftover bytes after decode: %s; indicates corrupt key", encKey, b) } ret.WallTime = int64(wt) ret.Random = int64(rd) return ret, nil }
// Expected format: shallow <hash> func decodeShallow(d *Decoder) decoderStateFn { if bytes.HasPrefix(d.line, deepen) { return decodeDeepen } if len(d.line) == 0 { return nil } if !bytes.HasPrefix(d.line, shallow) { d.error("unexpected payload while expecting a shallow: %q", d.line) return nil } d.line = bytes.TrimPrefix(d.line, shallow) hash, ok := d.readHash() if !ok { return nil } d.data.Shallows = append(d.data.Shallows, hash) if ok := d.nextLine(); !ok { return nil } return decodeShallow }
// Read reads and decodes quoted-printable data from the underlying reader. func (r *Reader) Read(p []byte) (int, error) { // Deviations from RFC 2045: // 1. in addition to "=\r\n", "=\n" is also treated as soft line break. // 2. it will pass through a '\r' or '\n' not preceded by '=', consistent // with other broken QP encoders & decoders. var n int var err error for len(p) > 0 { if len(r.line) == 0 { if err = r.Fn(); err != nil { return n, err } r.line, r.rerr = r.br.ReadSlice('\n') r.gerr.addUnrecover(r.rerr) // Does the line end in CRLF instead of just LF? hasLF := bytes.HasSuffix(r.line, lf) hasCR := bytes.HasSuffix(r.line, crlf) wholeLine := r.line r.line = bytes.TrimRightFunc(wholeLine, isQPDiscardWhitespace) if bytes.HasSuffix(r.line, softSuffix) { rightStripped := wholeLine[len(r.line):] r.line = r.line[:len(r.line)-1] if !bytes.HasPrefix(rightStripped, lf) && !bytes.HasPrefix(rightStripped, crlf) { r.rerr = fmt.Errorf("quotedprintable: invalid bytes after =: %q", rightStripped) r.gerr.add(r.rerr) } } else if hasLF { if hasCR { r.line = append(r.line, '\r', '\n') } else { r.line = append(r.line, '\n') } } continue } b := r.line[0] switch { case b == '=': b, err = readHexByte(r.line[1:]) if err != nil { b = '=' r.gerr.add(err) break // this modification allow bad email to be parsed too //return n, err } r.line = r.line[2:] // 2 of the 3; other 1 is done below case b == '\t' || b == '\r' || b == '\n': case b < ' ' || b > '~': //return n, fmt.Errorf("quotedprintable: invalid unescaped byte 0x%02x in body", b) r.gerr.add(fmt.Errorf("quotedprintable: invalid unescaped byte 0x%02x in body", b)) } p[0] = b p = p[1:] r.line = r.line[1:] n++ } return n, r.Fn() }
// readEvent reads a single binlog event. It can be a single comment line, // or multiple lines terminated by the delimiter. func (bls *BinlogStreamer) readEvent(bufReader *bufio.Reader) (event []byte, err error) { for { fragment, err := bufReader.ReadSlice('\n') event = append(event, fragment...) if err == bufio.ErrBufferFull { continue } if err != nil { if err == io.EOF { return event, err } return event, fmt.Errorf("read error: %v", err) } // Use a different var than event, because you have to keep // the trailing \n if we continue trimmed := bytes.TrimSpace(event) if bytes.HasPrefix(trimmed, HASH_COMMENT) || bytes.HasPrefix(trimmed, SLASH_COMMENT) || bytes.HasPrefix(trimmed, DELIM_STMT) { return trimmed, nil } if bytes.HasSuffix(trimmed, bls.delim) { return bytes.TrimSpace(trimmed[:len(trimmed)-len(bls.delim)]), nil } } }
// isDate detects if we see one of the following formats: // August 12, 2014 // Aug 10, 2014 1:02 PM EDT // Sunday August 10 2014 // Sunday, August 10, 2014 2:36 PM EDT // Monday, August 11, 2014 9:18:59 AM // Sat., Feb. 7, 2015 04:35 PM // Tue., Apr. 21, 2015 4:17 p.m. func isDate(line []byte) bool { // Trim dots 'n periods line = bytes.Trim(line, "• .\u00a0") // check if it starts with a day or month dateStart := false for _, day := range daysOfWeek { if bytes.HasPrefix(line, day) { dateStart = true break } } if !dateStart { for _, day := range daysOfWeekShort { if bytes.HasPrefix(line, day) { dateStart = true break } } } if !dateStart { for _, month := range months { if bytes.HasPrefix(line, month) { dateStart = true break } } } if !dateStart { return false } // check if it ends with a timezone/daytime/year dateEnd := false for _, ap := range amPM { if bytes.HasSuffix(line, ap) { dateEnd = true break } } if !dateEnd { // newshound started in 2012. adjust if you want older data for i := 2012; i <= time.Now().Year(); i++ { if bytes.HasSuffix(line, []byte(strconv.Itoa(i))) { dateEnd = true break } } } if !dateEnd { for _, zone := range timezones { if bytes.HasSuffix(line, zone) { dateEnd = true break } } } return dateEnd }
// strip comments, returning the number of comment stripped func stripComments(wikitext []byte) (out []byte, n int) { var i int out = make([]byte, 0, len(wikitext)) top: for i = 0; i < len(wikitext); i++ { if bytes.HasPrefix(wikitext[i:], commentLeft) { goto strip } } out = append(out, wikitext...) // add what's left return strip: n++ out = append(out, wikitext[:i]...) wikitext = wikitext[i+4:] // skip <!-- for i = 0; i < len(wikitext); i++ { if bytes.HasPrefix(wikitext[i:], commentRight) { goto endstrip } } return // unclosed comment; automatically close it endstrip: wikitext = wikitext[i+3:] // skip --> goto top panic("unreachable") // please the compiler }
// Addr returns the address for the key, used to lookup the range containing // the key. In the normal case, this is simply the key's value. However, for // local keys, such as transaction records, range-spanning binary tree node // pointers, the address is the inner encoded key, with the local key prefix // and the suffix and optional detail removed. This address unwrapping is // performed repeatedly in the case of doubly-local keys. In this way, local // keys address to the same range as non-local keys, but are stored separately // so that they don't collide with user-space or global system keys. // // However, not all local keys are addressable in the global map. Only range // local keys incorporating a range key (start key or transaction key) are // addressable (e.g. range metadata and txn records). Range local keys // incorporating the Range ID are not (e.g. abort cache entries, and range // stats). func Addr(k roachpb.Key) (roachpb.RKey, error) { if !IsLocal(k) { return roachpb.RKey(k), nil } for { if bytes.HasPrefix(k, localStorePrefix) { return nil, errors.Errorf("store-local key %q is not addressable", k) } if bytes.HasPrefix(k, LocalRangeIDPrefix) { return nil, errors.Errorf("local range ID key %q is not addressable", k) } if !bytes.HasPrefix(k, LocalRangePrefix) { return nil, errors.Errorf("local key %q malformed; should contain prefix %q", k, LocalRangePrefix) } k = k[len(LocalRangePrefix):] var err error // Decode the encoded key, throw away the suffix and detail. if _, k, err = encoding.DecodeBytesAscending(k, nil); err != nil { return nil, err } if !bytes.HasPrefix(k, localPrefix) { break } } return roachpb.RKey(k), nil }
// InitialValidationB is like InitialValidation but for byte array inputs. func InitialValidationB(metric_id []byte, version metricVersion) error { if version == Legacy { if bytes.Contains(metric_id, doubleDot) { return fmt.Errorf("metric '%s' has an empty node", metric_id) } return ValidateSensibleCharsB(metric_id) } if version == M20 { if bytes.Contains(metric_id, m20Is) { return fmt.Errorf("metric '%s' has both = and _is_", metric_id) } if !bytes.HasPrefix(metric_id, m20UnitPre) && !bytes.Contains(metric_id, m20UnitMid) { return fmt.Errorf("metric '%s' has no unit tag", metric_id) } if !bytes.HasPrefix(metric_id, m20TTPre) && !bytes.Contains(metric_id, m20TTMid) { return fmt.Errorf("metric '%s' has no target_type tag", metric_id) } } else { //version == M20NoEquals if bytes.Contains(metric_id, m20NEIS) { return fmt.Errorf("metric '%s' has both = and _is_", metric_id) } if !bytes.HasPrefix(metric_id, m20NEUnitPre) && !bytes.Contains(metric_id, m20NEUnitMid) { return fmt.Errorf("metric '%s' has no unit tag", metric_id) } if !bytes.HasPrefix(metric_id, m20NETTPre) && !bytes.Contains(metric_id, m20NETTMid) { return fmt.Errorf("metric '%s' has no target_type tag", metric_id) } } if bytes.Count(metric_id, dot) < 2 { return fmt.Errorf("metric '%s': must have at least one tag_k/tag_v pair beyond unit and target_type", metric_id) } return nil }
// skipSpaceOrComment returns data with any leading spaces or comments removed. func skipSpaceOrComment(data []byte) []byte { for len(data) > 0 { switch data[0] { case ' ', '\t', '\r', '\n': data = data[1:] continue case '/': if bytes.HasPrefix(data, slashSlash) { i := bytes.Index(data, newline) if i < 0 { return nil } data = data[i+1:] continue } if bytes.HasPrefix(data, slashStar) { data = data[2:] i := bytes.Index(data, starSlash) if i < 0 { return nil } data = data[i+2:] continue } } break } return data }
func (options *xml) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte, subfigure bool) { // use title as caption is we have it and wrap everything in a figure // check the extension of the local include to set the type of the thing. if options.para { // close it out.WriteString("</t>") defer out.WriteString("<t>") } // if subfigure, no <figure> s := options.inlineAttr().String() if bytes.HasPrefix(link, []byte("http://")) || bytes.HasPrefix(link, []byte("https://")) { // link to external entity out.WriteString("<artwork" + s) out.WriteString(" alt=\"") out.Write(alt) out.WriteString("\"") out.WriteString(" src=\"") out.Write(link) out.WriteString("\"/>") } else { // local file, xi:include it out.WriteString("<artwork" + s) out.WriteString(" alt=\"") out.Write(alt) out.WriteString("\">") out.WriteString("<xi:include href=\"") out.Write(link) out.WriteString("\"/>\n") out.WriteString("</artwork>\n") } }
func (options *Html) AutoLink(out *bytes.Buffer, link []byte, kind int) { if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) && kind != LINK_TYPE_EMAIL { // mark it but don't link it if it is not a safe link: no smartypants out.WriteString("<tt>") options.attrEscape(out, link) out.WriteString("</tt>") return } out.WriteString("<a href=\"") if kind == LINK_TYPE_EMAIL { out.WriteString("mailto:") } options.attrEscape(out, link) out.WriteString("\">") // Pretty print: if we get an email address as // an actual URI, e.g. `mailto:[email protected]`, we don't // want to print the `mailto:` prefix switch { case bytes.HasPrefix(link, []byte("mailto://")): options.attrEscape(out, link[len("mailto://"):]) case bytes.HasPrefix(link, []byte("mailto:")): options.attrEscape(out, link[len("mailto:"):]) default: options.attrEscape(out, link) } out.WriteString("</a>") }
func (options *html) ListItem(out *bytes.Buffer, text []byte, flags int) { if flags&_LIST_ITEM_CONTAINS_BLOCK != 0 || flags&_LIST_ITEM_BEGINNING_OF_LIST != 0 { doubleSpace(out) } if flags&_LIST_TYPE_DEFINITION != 0 && flags&_LIST_TYPE_TERM == 0 { out.WriteString("<dd>") out.Write(text) out.WriteString("</dd>\n") return } if flags&_LIST_TYPE_TERM != 0 { out.WriteString("<dt>") out.Write(text) out.WriteString("</dt>") return } // task lists switch { case bytes.HasPrefix(text, []byte("[ ] ")): text = append([]byte(`<input type="checkbox" disabled="">`), text[3:]...) case bytes.HasPrefix(text, []byte("[x] ")) || bytes.HasPrefix(text, []byte("[X] ")): text = append([]byte(`<input type="checkbox" checked="" disabled="">`), text[3:]...) } out.WriteString("<li>") out.Write(text) out.WriteString("</li>\n") }