func main() { var Stdin = os.NewFile(uintptr(syscall.Stdin), "/dev/stdin") var s scanner.Scanner s.Init(Stdin) s.Mode = scanner.ScanIdents | scanner.ScanFloats | scanner.ScanChars | scanner.ScanStrings | scanner.ScanRawStrings | scanner.ScanComments tok := s.Scan() for tok != scanner.EOF { switch tok { case scanner.Ident: fmt.Println("Ident") case scanner.Int: fmt.Println("Int") case scanner.Float: fmt.Println("Float") case scanner.Char: fmt.Println("Char") case scanner.String: fmt.Println("String") case scanner.RawString: fmt.Println("RawString") case scanner.Comment: fmt.Println("Comment") } tok = s.Scan() } }
// ParseKeySet parses a string of comma separated keys // made of dot separated sections into a KeySet. func ParseKeySet(s string) (KeySet, error) { sc := new(scanner.Scanner).Init(strings.NewReader(s)) sc.Mode = scanner.ScanIdents | scanner.ScanStrings | scanner.ScanInts var k Key var ks KeySet for { // scan section switch sc.Scan() { case scanner.String: u, _ := strconv.Unquote(sc.TokenText()) if len(u) == 0 { return nil, fmt.Errorf("part of key missing in %q", s) } k = append(k, u) case scanner.Ident, scanner.Int: k = append(k, sc.TokenText()) default: return nil, fmt.Errorf("bad formatting in %q", s) } // scan separator switch sc.Scan() { case '.': continue case ',': ks = append(ks, k) k = nil case scanner.EOF: return append(ks, k), nil default: return nil, fmt.Errorf("invalid separator in %q", s) } } }
// parseKey reads a private key from r. It returns a map[string]string, // with the key-value pairs, or an error when the file is not correct. func parseKey(r io.Reader, file string) (map[string]string, error) { var s scanner.Scanner m := make(map[string]string) c := make(chan lex) k := "" s.Init(r) s.Mode = 0 s.Whitespace = 0 // Start the lexer go klexer(s, c) for l := range c { // It should alternate switch l.value { case _KEY: k = l.token case _VALUE: if k == "" { return nil, &ParseError{file, "No key seen", l} } //println("Setting", strings.ToLower(k), "to", l.token, "b") m[strings.ToLower(k)] = l.token k = "" } } return m, nil }
func format(str, dbname string, tm time.Time) string { buf := bytes.Buffer{} var s scanner.Scanner s.Init(strings.NewReader(str)) s.Mode = 0 s.Whitespace = 0 for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() { if tok != '%' { buf.WriteRune(tok) continue } switch s := s.Scan(); s { case '%': buf.WriteRune('%') case 'n': buf.WriteString(dbname) case 'Y', 'y': buf.WriteString(strconv.Itoa(tm.Year())) case 'm': buf.WriteString(strconv.Itoa(int(tm.Month()))) case 'd': buf.WriteString(strconv.Itoa(tm.Day())) case 'H': buf.WriteString(twodig(strconv.Itoa(tm.Hour()))) case 'M': buf.WriteString(twodig(strconv.Itoa(tm.Minute()))) case 'S': buf.WriteString(twodig(strconv.Itoa(tm.Second()))) } } return buf.String() }
func split(expr string) (keys []string, err error) { var msgs []string var s scanner.Scanner s.Init(strings.NewReader(expr)) s.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanStrings s.Error = func(s *scanner.Scanner, msg string) { msgs = append(msgs, fmt.Sprintf("%s %s", s.Pos(), msg)) } key := "" keys = []string{} for err == nil { t := s.Peek() // fmt.Printf(">>> %s: %s %s\n", s.Pos(), scanner.TokenString(t), s.TokenText()) switch t { case '[': key, err = scanBracketedKey(&s) case '.': s.Scan() continue case scanner.EOF: goto end default: key, err = scanKey(&s) } if len(msgs) > 0 { err = errors.New(strings.Join(msgs, "\n")) } if err == nil { keys = append(keys, key) } } end: return }
/** * This function parses a value string parameter and returns Number value * embedded within the string. It returns nil if it doesn't find any * Number value in the value string. * Example: "some4.56more" would return 4.56 */ func extractFloat32(value string) Number { var sc scanner.Scanner var tok rune var valFloat64 float64 var valFloat32 Number var err error var isFound bool if len(value) > 0 { sc.Init(strings.NewReader(value)) sc.Mode = scanner.ScanFloats for tok != scanner.EOF { tok = sc.Scan() // fmt.Println("At position", sc.Pos(), ":", sc.TokenText()) valFloat64, err = strconv.ParseFloat(sc.TokenText(), 64) if err == nil { isFound = true break } } } if isFound { valFloat32 = Number(valFloat64) } return valFloat32 }
// gofmtFlags looks for a comment of the form // // //gofmt flags // // within the first maxLines lines of the given file, // and returns the flags string, if any. Otherwise it // returns the empty string. func gofmtFlags(filename string, maxLines int) string { f, err := os.Open(filename) if err != nil { return "" // ignore errors - they will be found later } defer f.Close() // initialize scanner var s scanner.Scanner s.Init(f) s.Error = func(*scanner.Scanner, string) {} // ignore errors s.Mode = scanner.GoTokens &^ scanner.SkipComments // want comments // look for //gofmt comment for s.Line <= maxLines { switch s.Scan() { case scanner.Comment: const prefix = "//gofmt " if t := s.TokenText(); strings.HasPrefix(t, prefix) { return strings.TrimSpace(t[len(prefix):]) } case scanner.EOF: return "" } } return "" }
func NewScanner(src []byte) *scanner.Scanner { var s scanner.Scanner s.Init(bytes.NewReader(src)) s.Error = func(_ *scanner.Scanner, _ string) {} s.Whitespace = 0 s.Mode = s.Mode ^ scanner.SkipComments return &s }
// NewScannerReader takes a reader src and creates a Scanner. func NewScannerReader(src io.Reader) *scanner.Scanner { var s scanner.Scanner s.Init(src) s.Error = func(_ *scanner.Scanner, _ string) {} s.Whitespace = 0 s.Mode = s.Mode ^ scanner.SkipComments return &s }
// New returns new lexer func New(r io.Reader) *Lex { var s scanner.Scanner s.Init(r) // only scan characters. implement lexer myself. s.Mode &^= scanner.ScanChars | scanner.ScanRawStrings return &Lex{ Scanner: &s, } }
func NewParser(r io.Reader, c ParserClient) *Parser { s := new(scanner.Scanner).Init(r) s.Mode = (scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.SkipComments | scanner.ScanStrings) return &Parser{ s: s, c: c, } }
// Parse parses a SPARQL query from the reader func (sparql *SPARQLUpdate) Parse(src io.Reader) error { b, _ := ioutil.ReadAll(src) s := new(scanner.Scanner).Init(bytes.NewReader(b)) s.Mode = scanner.ScanIdents | scanner.ScanStrings start := 0 level := 0 verb := "" tok := s.Scan() for tok != scanner.EOF { switch tok { case -2: if level == 0 { if len(verb) > 0 { verb += " " } verb += s.TokenText() } case 123: // { if level == 0 { start = s.Position.Offset } level++ case 125: // } level-- if level == 0 { query := SPARQLUpdateQuery{ body: string(b[start+1 : s.Position.Offset]), graph: NewGraph(sparql.baseURI), verb: verb, } query.graph.Parse(strings.NewReader(query.body), "text/turtle") sparql.queries = append(sparql.queries, query) } case 59: // ; if level == 0 { verb = "" } } tok = s.Scan() } return nil }
// Text satifies the Extractor interface // //ReplaceGo is a specialized routine for correcting Golang source // files. Currently only checks comments, not identifiers for // spelling. // // Other items: // - check strings, but need to ignore // * import "statements" blocks // * import ( "blocks" ) // - skip first comment (line 0) if build comment // func (p *GolangText) Text(raw []byte) []byte { out := bytes.Buffer{} s := scanner.Scanner{} s.Init(bytes.NewReader(raw)) s.Error = (func(s *scanner.Scanner, msg string) {}) s.Mode = scanner.ScanIdents | scanner.ScanFloats | scanner.ScanChars | scanner.ScanStrings | scanner.ScanRawStrings | scanner.ScanComments for { switch s.Scan() { case scanner.Comment: out.WriteString(s.TokenText()) out.WriteByte('\n') case scanner.EOF: return out.Bytes() } } }
func ScanText(file string) { text, err := os.Open(file) if err != nil { log.Fatal(err) } defer text.Close() var s scanner.Scanner s.Init(text) s.Mode = scanner.ScanIdents tok := s.Scan() for tok != scanner.EOF { // do something with tok doSomething(s.TokenText()) tok = s.Scan() } doSomething(nonword) }
func (theParser *CommandParser) golangTokenizer(line string) []*PreToken { var theScanner scanner.Scanner result := []*PreToken{} theScanner.Init(strings.NewReader(line)) theScanner.Mode = scanner.ScanFloats | scanner.ScanIdents | scanner.ScanInts | scanner.ScanStrings tok := theScanner.Scan() for tok != scanner.EOF && tok != COMMENTCHAR { s := theScanner.TokenText() theToken := &PreToken{ Type: tok, Text: s, Position: theScanner.Position, } result = append(result, theToken) tok = theScanner.Scan() } return result }
func parseCode(code string) string { highlighted, linenumbers, out := "", "", "" b := bytes.NewBufferString(code) var scan scanner.Scanner scan.Init(b) scan.Mode = scanner.ScanIdents | scanner.ScanRawStrings | scanner.ScanChars | scanner.ScanFloats | scanner.ScanStrings | scanner.ScanRawStrings | scanner.ScanComments scan.Whitespace = 0 tok := scan.Scan() for tok != scanner.EOF { escaped := html.EscapeString(scan.TokenText()) switch scanner.TokenString(tok) { case "Ident": if isKeyword(scan.TokenText()) { out += "<span class=\"ident\">" + escaped + "</span>" } else { out += html.EscapeString(scan.TokenText()) } case "String": out += "<span class=\"string\">" + escaped + "</span>" case "Char": out += "<span class=\"char\">" + escaped + "</span>" case "RawString": out += "<span class=\"rawstring\">" + escaped + "</span>" case "Int": out += "<span class=\"int\">" + escaped + "</span>" case "Comment": out += "<span class=\"comment\">" + escaped + "</span>" default: out += html.EscapeString(scan.TokenText()) } tok = scan.Scan() } out = strings.Replace(out, "\t", " ", -1) lines := strings.Split(out, "\n") for k, v := range lines[0 : len(lines)-1] { highlighted += fmt.Sprintf("\n%s<br/>", v) linenumbers += fmt.Sprintf("%v<br/>", k+1) } gutter := "<div class=\"gutter\">" + linenumbers + "</div>" parsedcode := "<div class=\"code\">" + highlighted + "</div>" return "<div class=\"shaban-syntax\">" + gutter + parsedcode + "</div>" }
func funcFromFormula(form string) (string, error) { f, _, err := extractFormula(form) if err != nil { return "", err } var s scanner.Scanner s.Init(strings.NewReader(f)) s.Error = func(s *scanner.Scanner, msg string) { err = fmt.Errorf("error parsing plural formula %s: %s", s.Pos(), msg) } s.Mode = scanner.ScanIdents | scanner.ScanInts s.Whitespace = 0 tok := s.Scan() var code []string var buf bytes.Buffer for tok != scanner.EOF && err == nil { switch tok { case scanner.Ident, scanner.Int: buf.WriteString(s.TokenText()) case '?': code = append(code, fmt.Sprintf("if %s {\n", buf.String())) buf.Reset() case ':': code = append(code, fmt.Sprintf("return %s\n}\n", buf.String())) buf.Reset() default: buf.WriteRune(tok) } tok = s.Scan() } if err != nil { return "", err } if len(code) == 0 && buf.Len() > 0 && buf.String() != "0" { code = append(code, fmt.Sprintf("if %s {\nreturn 1\n}\nreturn 0\n", buf.String())) buf.Reset() } if buf.Len() > 0 { code = append(code, fmt.Sprintf("\nreturn %s\n", buf.String())) } return strings.Join(code, ""), nil }
func newScanner(r io.Reader) *scmScanner { var s scanner.Scanner s.Init(r) s.Mode = scanner.ScanIdents | scanner.ScanStrings | scanner.ScanFloats s.IsIdentRune = func(ch rune, i int) bool { if i == 0 && unicode.IsDigit(ch) { return false } return ch != '\'' && ch != '"' && ch != '(' && ch != ')' && ch != ';' && !unicode.IsSpace(ch) && unicode.IsPrint(ch) } return &scmScanner{Scanner: s} }
// ReplaceGo is a specialized routine for correcting Golang source // files. Currently only checks comments, not identifiers for // spelling. // // Other items: // - check strings, but need to ignore // * import "statements" blocks // * import ( "blocks" ) // - skip first comment (line 0) if build comment // func ReplaceGo(input string, debug bool) string { var s scanner.Scanner s.Init(strings.NewReader(input)) s.Mode = scanner.ScanIdents | scanner.ScanFloats | scanner.ScanChars | scanner.ScanStrings | scanner.ScanRawStrings | scanner.ScanComments lastPos := 0 output := "" for { switch s.Scan() { case scanner.Comment: origComment := s.TokenText() var newComment string if debug { newComment = ReplaceDebug(origComment) } else { newComment = Replace(origComment) } if origComment != newComment { // s.Pos().Offset is the end of the current token // subtract len(origComment) to get the start of token offset := s.Pos().Offset output = output + input[lastPos:offset-len(origComment)] + newComment lastPos = offset } case scanner.EOF: // no changes, no copies if lastPos == 0 { return input } if lastPos >= len(input) { return output } return output + input[lastPos:] } } }
// parseCommand reads command name and arguments from the given input. func parseCommand(data string) (string, []string) { var scan scanner.Scanner var list []string scan.Mode = scanner.ScanIdents | scanner.ScanFloats | scanner.SkipComments | scanner.ScanRawStrings | scanner.ScanChars scan.Init(bytes.NewBufferString(data)) tok := scan.Scan() for tok != scanner.EOF { if scan.ErrorCount > 0 { break } list = append(list, scan.TokenText()) tok = scan.Scan() } if len(list) == 0 { return "", nil } return strings.ToLower(list[0]), list[1:] }
func NewTokenizer(name string, r io.Reader, file *os.File) *Tokenizer { var s scanner.Scanner s.Init(r) // Newline is like a semicolon; other space characters are fine. s.Whitespace = 1<<'\t' | 1<<'\r' | 1<<' ' // Don't skip comments: we need to count newlines. s.Mode = scanner.ScanChars | scanner.ScanFloats | scanner.ScanIdents | scanner.ScanInts | scanner.ScanStrings | scanner.ScanComments s.Position.Filename = name s.IsIdentRune = isIdentRune if file != nil { linkCtxt.LineHist.Push(histLine, name) } return &Tokenizer{ s: &s, line: 1, fileName: name, file: file, } }
func parsePo(r io.Reader, filename string) (*Po, error) { comment := false s := new(scanner.Scanner) var err error s.Init(r) s.Filename = filename s.Error = func(s *scanner.Scanner, msg string) { if !comment { err = fmt.Errorf("error parsing %s: %s", s.Pos(), msg) } } s.Mode = scanner.ScanIdents | scanner.ScanStrings | scanner.ScanInts tok := s.Scan() po := &Po{Attrs: make(map[string]string)} var trans *Translation for tok != scanner.EOF && err == nil { if tok == '#' { // Skip until EOL comment = true s.Whitespace = whitespace for tok != '\n' && tok != scanner.EOF { tok = s.Scan() } s.Whitespace = scanner.GoWhitespace comment = false tok = s.Scan() continue } if tok != scanner.Ident { err = unexpected(s, tok) break } text := s.TokenText() switch text { case "msgctxt": if trans != nil { if len(trans.Translations) == 0 { err = unexpected(s, tok) break } po.addTranslation(trans) } trans = &Translation{Context: readString(s, &tok, &err)} case "msgid": if trans != nil { if len(trans.Translations) > 0 || trans.Singular != "" { po.addTranslation(trans) } else if trans.Context != "" { trans.Singular = readString(s, &tok, &err) break } } trans = &Translation{Singular: readString(s, &tok, &err)} case "msgid_plural": if trans == nil || trans.Plural != "" { err = unexpected(s, tok) break } trans.Plural = readString(s, &tok, &err) case "msgstr": str := readString(s, &tok, &err) if tok == '[' { tok = s.Scan() if tok != scanner.Int { err = unexpected(s, tok) break } ii, _ := strconv.Atoi(s.TokenText()) if ii != len(trans.Translations) { err = unexpected(s, tok) break } if tok = s.Scan(); tok != ']' { err = unexpected(s, tok) break } str = readString(s, &tok, &err) } trans.Translations = append(trans.Translations, str) default: err = unexpected(s, tok) } } if trans != nil { po.addTranslation(trans) } if err != nil { return nil, err } for _, v := range po.Messages { if v.Context == "" && v.Singular == "" { if len(v.Translations) > 0 { meta := v.Translations[0] for _, line := range strings.Split(meta, "\n") { colon := strings.Index(line, ":") if colon > 0 { key := strings.TrimSpace(line[:colon]) value := strings.TrimSpace(line[colon+1:]) po.Attrs[key] = value } } } break } } return po, nil }
func parseZone(r io.Reader, f string, t chan Token, include int) { defer func() { if include == 0 { close(t) } }() var s scanner.Scanner c := make(chan lex) s.Init(r) s.Mode = 0 s.Whitespace = 0 // Start the lexer go zlexer(s, c) // 5 possible beginnings of a line, _ is a space // 1. _OWNER _ _RRTYPE -> class/ttl omitted // 2. _OWNER _ _STRING _ _RRTYPE -> class omitted // 3. _OWNER _ _STRING _ _CLASS _ _RRTYPE -> ttl/class // 4. _OWNER _ _CLASS _ _RRTYPE -> ttl omitted // 5. _OWNER _ _CLASS _ _STRING _ _RRTYPE -> class/ttl (reversed) // After detecting these, we know the _RRTYPE so we can jump to functions // handling the rdata for each of these types. st := _EXPECT_OWNER_DIR var h RR_Header var ok bool var defttl uint32 = DefaultTtl var origin string = "." for l := range c { if _DEBUG { fmt.Printf("[%v]\n", l) } // Lexer spotted an error already if l.err != "" { t <- Token{Error: &ParseError{f, l.err, l}} return } switch st { case _EXPECT_OWNER_DIR: // We can also expect a directive, like $TTL or $ORIGIN h.Ttl = defttl h.Class = ClassINET switch l.value { case _NEWLINE: // Empty line st = _EXPECT_OWNER_DIR case _OWNER: h.Name = l.token if _, ok := IsDomainName(l.token); !ok { t <- Token{Error: &ParseError{f, "bad owner name", l}} return } if !IsFqdn(h.Name) { h.Name += origin } st = _EXPECT_OWNER_BL case _DIRTTL: st = _EXPECT_DIRTTL_BL case _DIRORIGIN: st = _EXPECT_DIRORIGIN_BL case _DIRINCLUDE: st = _EXPECT_DIRINCLUDE_BL default: t <- Token{Error: &ParseError{f, "Error at the start", l}} return } case _EXPECT_DIRINCLUDE_BL: if l.value != _BLANK { t <- Token{Error: &ParseError{f, "No blank after $INCLUDE-directive", l}} return } st = _EXPECT_DIRINCLUDE case _EXPECT_DIRINCLUDE: if l.value != _STRING { t <- Token{Error: &ParseError{f, "Expecting $INCLUDE value, not this...", l}} return } // Start with the new file r1, e1 := os.Open(l.token) if e1 != nil { t <- Token{Error: &ParseError{f, "Failed to open `" + l.token + "'", l}} return } if include+1 > 7 { t <- Token{Error: &ParseError{f, "Too deeply nested $INCLUDE", l}} return } parseZone(r1, l.token, t, include+1) st = _EXPECT_OWNER_DIR case _EXPECT_DIRTTL_BL: if l.value != _BLANK { t <- Token{Error: &ParseError{f, "No blank after $TTL-directive", l}} return } st = _EXPECT_DIRTTL case _EXPECT_DIRTTL: if l.value != _STRING { t <- Token{Error: &ParseError{f, "Expecting $TTL value, not this...", l}} return } if ttl, ok := stringToTtl(l, f, t); !ok { return } else { defttl = ttl } st = _EXPECT_OWNER_DIR case _EXPECT_DIRORIGIN_BL: if l.value != _BLANK { t <- Token{Error: &ParseError{f, "No blank after $ORIGIN-directive", l}} return } st = _EXPECT_DIRORIGIN case _EXPECT_DIRORIGIN: if l.value != _STRING { t <- Token{Error: &ParseError{f, "Expecting $ORIGIN value, not this...", l}} return } if !IsFqdn(l.token) { origin = l.token + origin // Append old origin if the new one isn't a fqdn } else { origin = l.token } case _EXPECT_OWNER_BL: if l.value != _BLANK { t <- Token{Error: &ParseError{f, "No blank after owner", l}} return } st = _EXPECT_ANY case _EXPECT_ANY: switch l.value { case _RRTYPE: h.Rrtype, _ = Str_rr[strings.ToUpper(l.token)] st = _EXPECT_RDATA case _CLASS: h.Class, ok = Str_class[strings.ToUpper(l.token)] if !ok { t <- Token{Error: &ParseError{f, "Unknown class", l}} return } st = _EXPECT_ANY_NOCLASS_BL case _STRING: // TTL is this case if ttl, ok := stringToTtl(l, f, t); !ok { return } else { h.Ttl = ttl } st = _EXPECT_ANY_NOTTL_BL default: t <- Token{Error: &ParseError{f, "Expecting RR type, TTL or class, not this...", l}} return } case _EXPECT_ANY_NOCLASS_BL: if l.value != _BLANK { t <- Token{Error: &ParseError{f, "No blank before NOCLASS", l}} return } st = _EXPECT_ANY_NOCLASS case _EXPECT_ANY_NOTTL_BL: if l.value != _BLANK { t <- Token{Error: &ParseError{f, "No blank before NOTTL", l}} return } st = _EXPECT_ANY_NOTTL case _EXPECT_ANY_NOTTL: switch l.value { case _CLASS: h.Class, ok = Str_class[strings.ToUpper(l.token)] if !ok { t <- Token{Error: &ParseError{f, "Unknown class", l}} return } st = _EXPECT_RRTYPE_BL case _RRTYPE: h.Rrtype, _ = Str_rr[strings.ToUpper(l.token)] st = _EXPECT_RDATA } case _EXPECT_ANY_NOCLASS: switch l.value { case _STRING: // TTL if ttl, ok := stringToTtl(l, f, t); !ok { return } else { h.Ttl = ttl } st = _EXPECT_RRTYPE_BL case _RRTYPE: h.Rrtype, _ = Str_rr[strings.ToUpper(l.token)] st = _EXPECT_RDATA default: t <- Token{Error: &ParseError{f, "Expecting RR type or TTL, not this...", l}} return } case _EXPECT_RRTYPE_BL: if l.value != _BLANK { t <- Token{Error: &ParseError{f, "No blank after", l}} return } st = _EXPECT_RRTYPE case _EXPECT_RRTYPE: if l.value != _RRTYPE { t <- Token{Error: &ParseError{f, "Unknown RR type", l}} return } h.Rrtype, _ = Str_rr[strings.ToUpper(l.token)] st = _EXPECT_RDATA case _EXPECT_RDATA: // I could save my token here...? l r, e := setRR(h, c, origin, f) if e != nil { // If e.lex is nil than we have encounter a unknown RR type // in that case we substitute our current lex token if e.lex.token == "" && e.lex.value == 0 { e.lex = l // Uh, dirty } t <- Token{Error: e} return } t <- Token{RR: r} st = _EXPECT_OWNER_DIR } } }
func (m *Server) handleControlConnection(conn net.Conn) { reader := bufio.NewReader(conn) writer := bufio.NewWriter(conn) defer func() { writer.Flush() log.Printf("Closing control connection for %s", conn.RemoteAddr()) conn.Close() }() writer.WriteString("Welcome to PushServer console.\n") for { writer.WriteString("> ") writer.Flush() cmd, err := reader.ReadString('\n') if err != nil { if err != io.EOF { log.Printf("Error reading from control connection: %s", err.Error()) } break } var s scanner.Scanner s.Init(strings.NewReader(cmd)) s.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanStrings tok := s.Scan() if tok == scanner.Ident { switch s.TokenText() { case "l": result := make(chan []string) writer.WriteString("Listing clients:\n") m.masterChan <- &Event{LIST_ALL, writer, result} clients := <-result for _, clientId := range clients { writer.WriteString(" " + clientId + "\n") } case "s": tok = s.Scan() if tok != scanner.Ident { writer.WriteString("s <client id> <\"string to send\">\n") break } clientId := s.TokenText() tok = s.Scan() if tok != scanner.String { writer.WriteString("s <client id> <\"string to send\">\n") break } if m.SendText(clientId, s.TokenText()) { writer.WriteString("Message sent.\n") } else { writer.WriteString("Can't find " + clientId + "\n") } case "b": tok := s.Scan() if tok != scanner.String { writer.WriteString("b <\"string to broadcast\">\n") break } m.Broadcast(s.TokenText()) writer.WriteString("Message broadcasted.\n") case "q": return default: writer.WriteString("Unrecognized command " + cmd + "\n") } } else { writer.WriteString("<cmd> [args]\n") } writer.Flush() } }
func parseMarkdown(str string) (string, []*Fact, error) { var s scanner.Scanner var tok rune // result fields var facts []*Fact var header string // current parsing state var buf bytes.Buffer var fact *Fact inHeader := false inNote := false indentCount := 0 whitespaceCount := 0 debug := func(format string, a ...interface{}) { if debugEnabled { info := "(indent=%v header=%v note=%v)" a = append(a, indentCount, boolToString(inHeader), boolToString(inNote)) fmt.Printf("scanner: "+format+"\t\t"+info+"\n", a...) } } debugPlain := func(format string, a ...interface{}) { if debugEnabled { fmt.Printf("scanner: "+format+"\n", a...) } } s.Init(strings.NewReader(str)) // set mode to 0 to avoid any attempt at string reading, which may cause // problems when we run into apostrophes s.Mode = scanner.ScanFloats | scanner.ScanIdents | scanner.ScanInts s.Whitespace = 1 << '\r' for tok != scanner.EOF { tok = s.Scan() switch s.TokenText() { case " ": debug("[SPACE]") if inHeader || inNote { buf.WriteString(s.TokenText()) } else { whitespaceCount += 1 if whitespaceCount == 4 { indentCount += 1 whitespaceCount = 0 } } // reset everything and handle a completed string case "\n": debug("\\n") // string is likely left with leading and possibly trailing // whitespace trimmed := strings.TrimSpace(buf.String()) if inNote && trimmed != "" { if indentCount == 0 { debugPlain("FACT (front): %v\n", trimmed) fact = &Fact{Front: trimmed} facts = append(facts, fact) } else { if fact != nil { debugPlain("FACT (back): %v\n", trimmed) fact.Back = trimmed } } } if inHeader { debugPlain("HEADER: %v\n", trimmed) header = trimmed } buf.Reset() inHeader = false inNote = false indentCount = 0 whitespaceCount = 0 case "#": debug("#") if !inHeader && !inNote { inHeader = true } else { buf.WriteString(s.TokenText()) } case "*": debug("*") if !inHeader && !inNote { inNote = true } else { buf.WriteString(s.TokenText()) } default: debug("%s", s.TokenText()) if inHeader || inNote { buf.WriteString(s.TokenText()) } // any non-whitespace character resets whitespace count to zero whitespaceCount = 0 } } return header, facts, nil }
func ParseMoves(s *scanner.Scanner, g *Game) error { //fmt.Println("starting moves parse") s.Mode = scanner.ScanIdents | scanner.ScanChars | scanner.ScanInts | scanner.ScanStrings run := s.Peek() board := NewBoard() var err error if len(g.Tags["FEN"]) > 0 { board, err = NewBoardFEN(g.Tags["FEN"]) if err != nil { return err } } num := "" white := "" black := "" for run != scanner.EOF { switch run { case '(': for run != ')' && run != scanner.EOF { run = s.Next() } case '{': for run != '}' && run != scanner.EOF { run = s.Next() } case '#', '.', '+', '!', '?', '\n', '\r': run = s.Next() run = s.Peek() default: s.Scan() if s.TokenText() == "{" { run = '{' continue } if num == "" { num = s.TokenText() for s.Peek() == '-' { s.Scan() num += s.TokenText() s.Scan() num += s.TokenText() } for s.Peek() == '/' { s.Scan() num += s.TokenText() s.Scan() num += s.TokenText() s.Scan() num += s.TokenText() s.Scan() num += s.TokenText() s.Scan() num += s.TokenText() s.Scan() num += s.TokenText() } if isEnd(num) { return nil } } else if white == "" { white = s.TokenText() for s.Peek() == '-' { s.Scan() white += s.TokenText() s.Scan() white += s.TokenText() } for s.Peek() == '/' { s.Scan() white += s.TokenText() s.Scan() white += s.TokenText() s.Scan() white += s.TokenText() s.Scan() white += s.TokenText() s.Scan() white += s.TokenText() s.Scan() white += s.TokenText() } if isEnd(white) { return nil } if s.Peek() == '=' { s.Scan() white += s.TokenText() s.Scan() white += s.TokenText() } move, err := board.MoveFromAlgebraic(white, White) if err != nil { fmt.Println(board) return err } g.Moves = append(g.Moves, move) board.MakeMove(move) } else if black == "" { black = s.TokenText() for s.Peek() == '-' { s.Scan() black += s.TokenText() s.Scan() black += s.TokenText() } for s.Peek() == '/' { s.Scan() black += s.TokenText() s.Scan() black += s.TokenText() s.Scan() black += s.TokenText() s.Scan() black += s.TokenText() s.Scan() black += s.TokenText() s.Scan() black += s.TokenText() } if isEnd(black) { return nil } if s.Peek() == '=' { s.Scan() black += s.TokenText() s.Scan() black += s.TokenText() } move, err := board.MoveFromAlgebraic(black, Black) if err != nil { fmt.Println(board) return err } g.Moves = append(g.Moves, move) board.MakeMove(move) num = "" white = "" black = "" } run = s.Peek() } } return nil }