func forwardMsg(msg protocol.Message, node nodes.Nodes, readonly bool) (msgAck protocol.Message) { var ( clusterConnPool connection.Pool isMaster bool ) if readonly { clusterConnPool, isMaster = node.GetRandom() } else { clusterConnPool = node.GetMaster() isMaster = true } msgAck = forwardMsgToPool(msg, clusterConnPool, isMaster, false) if msgAck.GetProtocolType() == protocol.ErrorsStringsType { var msgAckBytesValueSplit [][]byte = bytes.Fields(msgAck.GetBytesValue()) if len(msgAckBytesValueSplit) == 3 { switch { case bytes.EqualFold(msgAckBytesValueSplit[0], MOVED): msgAck = forwardMsgToPool(msg, cluster.GetClusterParameter().GetNodePool(string(msgAckBytesValueSplit[2])), true, false) case bytes.EqualFold(msgAckBytesValueSplit[0], ASK): msgAck = forwardMsgToPool(msg, cluster.GetClusterParameter().GetNodePool(string(msgAckBytesValueSplit[2])), true, true) } } } return }
func (a loginAuth) Next(fromServer []byte, more bool) ([]byte, error) { if more { if bytes.EqualFold([]byte("username:"******"password:"), fromServer) { return []byte(a.password), nil } } return nil, nil }
func findDownloadURL(relTo url.URL, r io.Reader) (string, error) { z := html.NewTokenizer(r) Outer: for { tt := z.Next() if tt == html.ErrorToken { if z.Err() == io.EOF { break } Log.Error("parse page", "error", z.Err()) return "", z.Err() } if tt != html.StartTagToken { continue } tn, hasAttr := z.TagName() if !(hasAttr && bytes.EqualFold(tn, []byte("input"))) { continue } var onClick string for { key, val, more := z.TagAttr() if bytes.EqualFold(key, []byte("name")) && !bytes.Equal(val, []byte("letolt")) { continue Outer } if bytes.EqualFold(key, []byte("onclick")) { onClick = string(val) if i := strings.IndexAny(onClick, `'"`); i >= 0 { sep := onClick[i] onClick = onClick[i+1:] if i = strings.LastIndex(onClick, string(sep)); i >= 0 { onClick = onClick[:i] } } nxt, err := url.Parse(onClick) if err != nil { return onClick, err } relTo := &relTo return relTo.ResolveReference(nxt).String(), nil } if !more { break } } } return "", errors.New("no download URL found") }
// if no need full resync, returns false and sync offset func (h *Handler) needFullReSync(c *conn, args [][]byte) (bool, int64) { masterRunID := args[0] if !bytes.EqualFold(masterRunID, h.runID) { if !bytes.Equal(masterRunID, []byte{'?'}) { log.Infof("Partial resynchronization not accepted, runid mismatch, server is %s, but client is %s", h.runID, masterRunID) } else { log.Infof("Full resync requested by slave.") } return true, 0 } syncOffset, err := strconv.ParseInt(string(args[1]), 10, 64) if err != nil { log.Errorf("PSYNC parse sync offset err, try full resync - %s", err) return true, 0 } r := &h.repl h.repl.RLock() defer h.repl.RUnlock() if r.backlogBuf == nil || syncOffset < r.backlogOffset || syncOffset > (r.backlogOffset+int64(r.backlogBuf.Len())) { log.Infof("unable to partial resync with the slave for lack of backlog, slave offset %d", syncOffset) if syncOffset > r.masterOffset { log.Infof("slave tried to PSYNC with an offset %d larger than master offset %d", syncOffset, r.masterOffset) } return true, 0 } return false, syncOffset }
func (f *backRefNodeFiber) Resume() (output, error) { if f.cnt == 0 { f.cnt++ var b []byte if f.node.Index > 0 { if r, ok := f.I.sub.i[f.node.Index]; ok { b = r.b } } else if len(f.node.Name) > 0 { if r, ok := f.I.sub.n[f.node.Name]; ok { b = r.b } } l := len(b) if l > len(f.I.b) { l = len(f.I.b) } if f.node.Flags&syntax.FoldCase != 0 && bytes.EqualFold(b, f.I.b[:l]) { return output{offset: l}, nil } else if bytes.Equal(b, f.I.b[:l]) { return output{offset: l}, nil } } return output{}, errDeadFiber }
func ParseFeed(c appengine.Context, contentType, origUrl, fetchUrl string, body []byte) (*Feed, []*Story, error) { cr := defaultCharsetReader if !bytes.EqualFold(body[:len(xml.Header)], []byte(xml.Header)) { enc, err := encodingReader(body, contentType) if err != nil { return nil, nil, err } if enc != encoding.Nop { cr = nilCharsetReader body, err = ioutil.ReadAll(transform.NewReader(bytes.NewReader(body), enc.NewDecoder())) if err != nil { return nil, nil, err } } } var feed *Feed var stories []*Story var atomerr, rsserr, rdferr error feed, stories, atomerr = parseAtom(c, body, cr) if feed == nil { feed, stories, rsserr = parseRSS(c, body, cr) } if feed == nil { feed, stories, rdferr = parseRDF(c, body, cr) } if feed == nil { c.Warningf("atom parse error: %s", atomerr.Error()) c.Warningf("xml parse error: %s", rsserr.Error()) c.Warningf("rdf parse error: %s", rdferr.Error()) return nil, nil, fmt.Errorf("Could not parse feed data") } feed.Url = origUrl return parseFix(c, feed, stories, fetchUrl) }
func equalFold(a, b []byte) { if bytes.EqualFold(a, b) { log.Printf("%s and %s are equal", a, b) } else { log.Printf("%s and %s are NOT equal", a, b) } }
func (lm *LocationMuxer) longestMatchingPath(path string) *types.Location { pathAsPrefix := patricia.Prefix(path) var matchSoFar patricia.Prefix var matchedItem *types.Location err := lm.locationTrie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { if len(prefix) > len(pathAsPrefix) { return patricia.SkipSubtree } else if len(prefix) > len(matchSoFar) && bytes.EqualFold(prefix, pathAsPrefix[:len(prefix)]) { exactMatch := len(prefix) == len(pathAsPrefix) matchedLocation := item.(*types.Location) if isLocationType(matchedLocation, exact) && !exactMatch { return nil } matchedItem = matchedLocation matchSoFar = prefix if exactMatch { return errExactMatch // not an error, just so the search is canceled } } return nil }) if err != nil && err != errExactMatch { panic(err) // an impossible error } return matchedItem }
func TestFoldAgainstUnicode(t *testing.T) { const bufSize = 5 buf1 := make([]byte, 0, bufSize) buf2 := make([]byte, 0, bufSize) var runes []rune for i := 0x20; i <= 0x7f; i++ { runes = append(runes, rune(i)) } runes = append(runes, kelvin, smallLongEss) funcs := []struct { name string fold func(s, t []byte) bool letter bool // must be ASCII letter simple bool // must be simple ASCII letter (not 'S' or 'K') }{ { name: "equalFoldRight", fold: equalFoldRight, }, { name: "asciiEqualFold", fold: asciiEqualFold, simple: true, }, { name: "simpleLetterEqualFold", fold: simpleLetterEqualFold, simple: true, letter: true, }, } for _, ff := range funcs { for _, r := range runes { if r >= utf8.RuneSelf { continue } if ff.letter && !isASCIILetter(byte(r)) { continue } if ff.simple && (r == 's' || r == 'S' || r == 'k' || r == 'K') { continue } for _, r2 := range runes { buf1 := append(buf1[:0], 'x') buf2 := append(buf2[:0], 'x') buf1 = buf1[:1+utf8.EncodeRune(buf1[1:bufSize], r)] buf2 = buf2[:1+utf8.EncodeRune(buf2[1:bufSize], r2)] buf1 = append(buf1, 'x') buf2 = append(buf2, 'x') want := bytes.EqualFold(buf1, buf2) if got := ff.fold(buf1, buf2); got != want { t.Errorf("%s(%q, %q) = %v; want %v", ff.name, buf1, buf2, got, want) } } } } }
// compare two clients: name and network connection func (c *ClientChat) Equal(cl *ClientChat) bool { if bytes.EqualFold([]byte(c.Name), []byte(cl.Name)) { if c.Con == cl.Con { return true } } return false }
func getBase(ctx context.Context, page string, client *http.Client) (string, error) { resp, err := client.Get(page) if err != nil { Log.Error("get start page", "url", page, "error", err) return "", err } defer resp.Body.Close() select { case <-ctx.Done(): return "", ctx.Err() default: } z := html.NewTokenizer(resp.Body) for { tt := z.Next() if tt == html.ErrorToken { if z.Err() == io.EOF { break } Log.Error("parse page", "error", z.Err()) return "", z.Err() } if tt != html.StartTagToken { continue } tn, hasAttr := z.TagName() if !(hasAttr && bytes.EqualFold(tn, []byte("iframe"))) { continue } for { key, val, more := z.TagAttr() if bytes.EqualFold(key, []byte("src")) { return string(val), nil } if !more { break } } } return "", errgo.Notef(errgo.New("no iframe found"), "page="+page) }
func (lr *LDIFReader) ReadLDIFEntry() (LDIFRecord, *Error) { if lr.NoMoreEntries { return nil, nil } ldiflines, err := lr.readLDIFEntryIntoSlice() if err != nil { return nil, err } if ldiflines == nil { return nil, nil } if bytes.EqualFold(ldiflines[0][0:7], []byte("version")) { lr.Version = string(versionRegex.Find(ldiflines[0])) return lr.ReadLDIFEntry() } if bytes.EqualFold(ldiflines[0][0:7], []byte("charset")) { lr.Charset = string(charsetRegex.Find(ldiflines[0])) return lr.ReadLDIFEntry() } return sliceToLDIFRecord(ldiflines) }
// clientsender(): read from stdin and send it via network func clientsender(cn net.Conn) { reader := bufio.NewReader(os.Stdin) for { fmt.Print("you> ") input, _ := reader.ReadBytes('\n') if bytes.EqualFold(input, []byte("/quit\n")) { cn.Write([]byte("/quit")) running = false break } Log("clientsender(): send: ", string(input[0:len(input)-1])) cn.Write(input[0 : len(input)-1]) } }
func (f *literalNodeFiber) Resume() (output, error) { if f.cnt == 0 { f.cnt++ l := len(f.node.L) if l > len(f.I.b) { l = len(f.I.b) } if f.node.Flags&syntax.FoldCase != 0 && bytes.EqualFold(f.node.L, f.I.b[:l]) { return output{offset: l}, nil } else if bytes.Equal(f.node.L, f.I.b[:l]) { return output{offset: l}, nil } } return output{}, errDeadFiber }
func processEventConnection(c io.ReadWriteCloser, decodeEvent bool, hander ipcEventHandler) { //test connection type magicBuf := make([]byte, 4) magic, err := readMagicHeader(c, magicBuf) if nil != err { if err != io.EOF { glog.Errorf("Failed to read magic header for error:%v from %v", err, c) } } else { if bytes.Equal(magic, MAGIC_EVENT_HEADER) { processSSFEventConnection(c, decodeEvent, hander) } else if bytes.EqualFold(magic, MAGIC_OTSC_HEADER) { ots.ProcessTroubleShooting(c) } else { glog.Errorf("Invalid magic header:%s", string(magic)) c.Close() } } }
// UnmarshalFromConfig strores the data in config in the value pointed to by v. // v must be a pointer to a struct. // // UnmarshalFromConfig matches incoming keys to either the struct field name // or its tag, preferring an exact match but also accepting // a case-insensitive match. // Only exported fields can be populated. The tag value "-" is used to skip // a field. // // If the type indicated in the struct field does not match the type in the // config the field is skipped. Eg. the field type is int but contains a non // numerical string value in the config data. func UnmarshalFromConfig(c *Config, v interface{}) error { // Check that the type v we will populate is a struct rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr || rv.Elem().Kind() != reflect.Struct { return errors.New("cfg: interface must be a pointer to struct") } // Dereference the pointer if it is one rv = rv.Elem() // Loop through all fields of the struct for i := 0; i < rv.NumField(); i++ { fv := rv.Field(i) // Save the Value of the field sf := rv.Type().Field(i) // Save the StructField of the field // Check if the field should be skipped if sf.PkgPath != "" { // unexported continue } tag := sf.Tag.Get(tagKey) if tag == "-" { continue } // Loop through all keys and match them against the field // set the value if it matches. for key := range c.values { // Check so the tag, or the name case insensitive matches, if not // go on to the next key if key != tag && bytes.EqualFold([]byte(key), []byte(sf.Name)) == false { continue } err := setValue(&fv, c, key) if err != nil { return fmt.Errorf("cfg: error setting field value: %s", err) } } } return nil }
// go routine spun up by clientHandling to manage incoming // client traffic and terminate upon receipt of /quit command from client // prints out client string data and thens forwards that data // to the handlingINOUT routine via a channel notifier func clientreceiver(client *ClientChat) { buf := make([]byte, 2048) Log("clientreceiver(): start for: ", client.Name) for client.Read(buf) { if bytes.EqualFold(buf, []byte("/quit")) { client.Close() break } Log("clientreceiver(): received from ", client.Name, " (", string(buf), ")") send := client.Name + "> " + string(buf) client.OUT <- send for i := 0; i < 2048; i++ { buf[i] = 0x00 } } client.OUT <- client.Name + " has left chat" Log("clientreceiver(): stop for: ", client.Name) }
// read, [match, seek] till empty. repeat. validate func (s *subset) findParts() (err error) { if s.current == nil { s.current = &part{offset: []int64{s.offsets[0], s.offsets[1]}, n: len(s.parts)} } s.Read() if s.read[1] > 0 { if bytes.EqualFold(s.bufA(), s.bufB()) { s.current.length += int64(s.read[0]) s.offsets[0] += int64(s.read[0]) s.offsets[1] += int64(s.read[1]) } else { s.overlap() s.seak() } return s.findParts() } else { if s.current.length > 0 { s.parts = append(s.parts, s.current) } } return }
func (self *Decoder) find_map_key(generic_map map[interface{}]interface{}, keyname string) (res interface{}, ok bool) { if len(keyname) == 0 || keyname == "-" { return nil, false } for key, value := range generic_map { switch key.(type) { case string: if key.(string) == keyname { return value, true } case []byte: if bytes.Equal(key.([]byte), []byte(keyname)) { return value, true } default: if fmt.Sprintf("%v", key) == keyname { return value, true } } } for key, value := range generic_map { switch key.(type) { case string: if strings.EqualFold(key.(string), keyname) { return value, true } case []byte: if bytes.EqualFold(key.([]byte), []byte(keyname)) { return value, true } default: if strings.EqualFold(fmt.Sprintf("%v", key), keyname) { return value, true } } } return nil, false }
// indexTagEnd finds the index of a special tag end in a case insensitive way, or returns -1 func indexTagEnd(s []byte, tag []byte) int { res := 0 plen := len(specialTagEndPrefix) for len(s) > 0 { // Try to find the tag end prefix first i := bytes.Index(s, specialTagEndPrefix) if i == -1 { return i } s = s[i+plen:] // Try to match the actual tag if there is still space for it if len(tag) <= len(s) && bytes.EqualFold(tag, s[:len(tag)]) { s = s[len(tag):] // Check the tag is followed by a proper separator if len(s) > 0 && bytes.IndexByte(tagEndSeparators, s[0]) != -1 { return res + i } res += len(tag) } res += i + plen } return -1 }
/*参数列表 s 字节切片 f 过滤函数 返回值 [][]byte 被分割的字节切片的切片 功能说明 FieldsFunc把s解释为UTF-8编码的字符序列,对于每个Unicode字符c,如果f(c)返回true就把c作为分隔字符对s进行拆分。如果所有都字符满足f(c)为true,则返回空的切片。 */ func main() { fmt.Println(bytes.EqualFold([]byte("abc"), []byte("abc"))) fmt.Println(bytes.EqualFold([]byte("abc"), []byte("abd"))) fmt.Println(bytes.EqualFold([]byte("abc"), []byte("aBc"))) }
func ExampleEqualFold() { fmt.Println(bytes.EqualFold([]byte("Go"), []byte("go"))) // Output: true }
func Compare(a, b []byte) (ok bool) { return bytes.EqualFold(a, b) }
func (t *Token) ValueEqual(val []byte) bool { return bytes.EqualFold(t.Value, val) }
func HasPrefixFold(input []byte, prefix string) bool { return len(input) >= len(prefix) && bytes.EqualFold(input[:len(prefix)], []byte(prefix)) }
func sliceToLDIFRecord(lines [][]byte) (LDIFRecord, *Error) { var dn string var dataLineStart int // better name, after dn/controls/changetype controls := make([]Control, 0) recordtype := EntryRecord LINES: for i, line := range lines { attrName, value, _, err := findAttrAndValue(line) if err != nil { return nil, err } switch { case i == 0 && bytes.EqualFold(attrName, []byte("dn")): dn = string(value) continue LINES case i == 0 && !bytes.EqualFold(attrName, []byte("dn")): return nil, NewError(ErrorLDIFRead, errors.New("'dn:' not at the start of line in LDIF record")) case bytes.EqualFold(attrName, []byte("changetype")): switch strings.ToLower(string(value)) { // check the record type, if one. case "add": recordtype = AddRecord case "modify": recordtype = ModifyRecord case "moddn": recordtype = ModDnRecord case "modrdn": recordtype = ModRdnRecord case "delete": recordtype = DeleteRecord } continue LINES case bytes.EqualFold(attrName, []byte("control")): //TODO handle controls continue LINES } dataLineStart = i break } // TODO - add the missing record types unsupportedError := NewError(ErrorLDIFRead, errors.New("Unsupported LDIF record type")) switch recordtype { case AddRecord: addEntry, err := ldifLinesToEntryRecord(dn, lines[dataLineStart:]) if err != nil { return nil, err } addRequest := AddRequest{Entry: addEntry, Controls: controls} if LDIFDebug { log.Printf("dn: %s, changetype: %d, datastart: %d\n", dn, AddRecord, dataLineStart) } return &addRequest, nil case ModifyRecord: if LDIFDebug { log.Printf("dn: %s, changetype: %d, datastart: %d\n", dn, ModifyRecord, dataLineStart) } modRequest, err := ldifLinesToModifyRecord(dn, lines[dataLineStart:]) if err != nil { return nil, err } modRequest.Controls = controls return modRequest, nil case ModDnRecord: if LDIFDebug { log.Printf("dn: %s, changetype: %d, datastart: %d\n", dn, ModDnRecord, dataLineStart) } return nil, unsupportedError case ModRdnRecord: if LDIFDebug { log.Printf("dn: %s, changetype: %d, datastart: %d\n", dn, ModRdnRecord, dataLineStart) } return nil, unsupportedError case DeleteRecord: if LDIFDebug { log.Printf("dn: %s, changetype: %d, datastart: %d\n", dn, DeleteRecord, dataLineStart) } deleteRequest := NewDeleteRequest(dn) for _, control := range controls { deleteRequest.AddControl(control) } return deleteRequest, nil case EntryRecord: if LDIFDebug { log.Printf("dn: %s, changetype: %d, datastart: %d\n", dn, EntryRecord, dataLineStart) } return ldifLinesToEntryRecord(dn, lines[dataLineStart:]) } return nil, NewError(ErrorLDIFRead, errors.New("Unkown LDIF record type")) }
// Equal returns true if current uuid equal to passed uuid. func (this UUID) Equal(another UUID) bool { return bytes.EqualFold(this, another) }
func getFormParams(ctx context.Context, base string, client *http.Client) (formParams, error) { params := formParams{URL: base} resp, err := client.Get(base) if err != nil { return params, err } defer resp.Body.Close() select { case <-ctx.Done(): return params, ctx.Err() default: } params.fields = make(map[string][]string, 4) z := html.NewTokenizer(resp.Body) state := 0 for { tt := z.Next() if tt == html.ErrorToken { if z.Err() == io.EOF { break } Log.Error("parse page", "error", z.Err()) return params, z.Err() } if tt != html.StartTagToken { continue } tn, hasAttr := z.TagName() if state == 0 { if !bytes.EqualFold(tn, []byte("form")) { continue } state++ continue } if !(hasAttr && bytes.EqualFold(tn, []byte("input"))) { continue } var name, value, typ string for { key, val, more := z.TagAttr() switch { case bytes.EqualFold(key, []byte("name")): name = string(val) case bytes.EqualFold(key, []byte("type")): typ = string(bytes.ToLower(val)) case bytes.EqualFold(key, []byte("value")): value = string(val) } if !more { break } } if typ == "file" { params.fileName = name } else { params.fields[name] = append(params.fields[name], value) } } return params, nil }
// parseAtom returns the next atom, number, or NIL. The syntax rules are relaxed // to treat sequences such as "BODY[...]<...>" as a single atom. Numbers are // converted to uint32, NIL is converted to nil, everything else becomes a // string. Flags (e.g. "\Seen") are converted to title case, other strings are // left in their original form. func (raw *rawResponse) parseAtom(astring bool) (f Field, err error) { n, flag := 0, false for end := len(raw.tail); n < end; n++ { if c := raw.tail[n]; c >= char || atomSpecials[c] { switch c { case '\\': if n == 0 { flag = true astring = false continue // ABNF: flag (e.g. `\Seen`) } case '*': if n == 1 && flag { n++ // ABNF: flag-perm (`\*`), end of atom } case '[': if n == 4 && bytes.EqualFold(raw.tail[:4], []byte("BODY")) { pos := raw.pos() raw.tail = raw.tail[n+1:] // Temporary shift for parseFields // TODO: Literals between '[' and ']' are handled correctly, // but only the octet count will make it into the returned // atom. Would any server actually send a literal here, and // is it a problem to discard it since the client already // knows what was requested? if _, err = raw.parseFields(']'); err != nil { return } n = raw.pos() - pos - 1 raw.tail = raw.line[pos:] // Undo temporary shift end = len(raw.tail) astring = false } continue // ABNF: fetch-att ("BODY[...]<...>"), atom, or astring case ']': if astring { continue // ABNF: ASTRING-CHAR } } break // raw.tail[n] is a delimiter or an unexpected byte } } // Atom must have at least one character, two if it starts with a backslash if n < 2 && (n == 0 || flag) { err = raw.unexpected(0) return } // Take whatever was found, let parseFields report delimiter errors atom := raw.tail[:n] if norm := normalize(atom); flag { f = norm } else if norm != "NIL" { if c := norm[0]; '0' <= c && c <= '9' { if ui, err := strconv.ParseUint(norm, 10, 32); err == nil { f = uint32(ui) } } if f == nil { if raw.Label == "" { raw.Label = norm } f = string(atom) } } raw.tail = raw.tail[n:] return }
func handleXSort(c *client, tp string) error { args := c.args if len(args) == 0 { return ErrCmdParams } key := args[0] desc := false alpha := false offset := 0 size := 0 var storeKey []byte var sortBy []byte var sortGet [][]byte var err error for i := 1; i < len(args); { if bytes.EqualFold(args[i], ascArg) { desc = false } else if bytes.EqualFold(args[i], descArg) { desc = true } else if bytes.EqualFold(args[i], alphaArg) { alpha = true } else if bytes.EqualFold(args[i], limitArg) && i+2 < len(args) { if offset, err = strconv.Atoi(string(args[i+1])); err != nil { return err } if size, err = strconv.Atoi(string(args[i+2])); err != nil { return err } i = i + 2 } else if bytes.EqualFold(args[i], storeArg) && i+1 < len(args) { storeKey = args[i+1] i++ } else if bytes.EqualFold(args[i], byArg) && i+1 < len(args) { sortBy = args[i+1] i++ } else if bytes.EqualFold(args[i], getArg) && i+1 < len(args) { sortGet = append(sortGet, args[i+1]) i++ } else { return ErrCmdParams } i++ } ay, err := xsort(c, tp, key, offset, size, alpha, desc, sortBy, sortGet) if err != nil { return err } if storeKey == nil { c.resp.writeSliceArray(ay) } else { // not threadsafe now, need lock??? if _, err = c.db.LClear(storeKey); err != nil { return err } if n, err := c.db.RPush(storeKey, ay...); err != nil { return err } else { c.resp.writeInteger(n) } } return nil }