// run runs the command argv, feeding in stdin on standard input. // It returns the output to standard output and standard error. // ok indicates whether the command exited successfully. func run(stdin []byte, argv []string) (stdout, stderr []byte, ok bool) { cmd, err := exec.LookPath(argv[0]); if err != nil { fatal("exec %s: %s", argv[0], err); } r0, w0, err := os.Pipe(); if err != nil { fatal("%s", err); } r1, w1, err := os.Pipe(); if err != nil { fatal("%s", err); } r2, w2, err := os.Pipe(); if err != nil { fatal("%s", err); } pid, err := os.ForkExec(cmd, argv, os.Environ(), "", []*os.File{r0, w1, w2}); if err != nil { fatal("%s", err); } r0.Close(); w1.Close(); w2.Close(); c := make(chan bool); go func() { w0.Write(stdin); w0.Close(); c <- true; }(); var xstdout []byte; // TODO(rsc): delete after 6g can take address of out parameter go func() { xstdout, _ = io.ReadAll(r1); r1.Close(); c <- true; }(); stderr, _ = io.ReadAll(r2); r2.Close(); <-c; <-c; stdout = xstdout; w, err := os.Wait(pid, 0); if err != nil { fatal("%s", err); } ok = w.Exited() && w.ExitStatus() == 0; return; }
// getResponseBody grabs the string out of the response body and closes the responses stream. func getResponseBody(response *http.Response) string { var b []byte b, _ = io.ReadAll(response.Body) response.Body.Close() body := string(b) return body }
func TWITTER_REPLIES(c *http.Conn, req *http.Request) { log.Stderrf(">REPLIES:"); s := session_service.GetSession(c,req); for k,v := range s.Data { log.Stderrf("session kv:%s:%s", k, v); } auth_token, atx := s.Data["oauth_token"]; if atx { log.Stderrf("TOKEN FOUND!"); auth_token_secret := s.Data["oauth_token_secret"]; r, finalUrl, err := twitter_client.MakeRequest("http://twitter.com/statuses/mentions.json", map[string]string{"oauth_token":auth_token}, auth_token_secret, false); //{"since_id":s.last_reply_id}) if err != nil { log.Stderrf(":REPLIES:err:%s",err); } else { log.Stderrf(":REPLIES:r:%s:finalUrl:%s", r, finalUrl); b, _ := io.ReadAll(r.Body); print ("REPLIES!"); str := bytes.NewBuffer(b).String(); println (str); j, ok, errtok := json.StringToJson(str); log.Stderr("REPLIES:j:%s:ok:%s:errtok:%s", j, ok, errtok); c.Write(strings.Bytes(j.String())); } } else { log.Stderrf("NO TOKEN FOUND!"); http.Redirect(c, "/login/twitter?returnto=/twitter/replies", http.StatusFound); // should be 303 instead of 302? } }
// ParseForm parses the request body as a form for POST requests, or the raw query for GET requests. // It is idempotent. func (r *Request) ParseForm() (err os.Error) { if r.Form != nil { return } var query string switch r.Method { case "GET": query = r.Url.RawQuery case "POST": if r.Body == nil { return os.ErrorString("missing form body") } ct, _ := r.Header["Content-Type"] switch strings.Split(ct, ";", 2)[0] { case "text/plain", "application/x-www-form-urlencoded", "": var b []byte if b, err = io.ReadAll(r.Body); err != nil { return } query = string(b) // TODO(dsymonds): Handle multipart/form-data default: return &badStringError{"unknown Content-Type", ct} } } r.Form, err = parseForm(query) return }
func parse_response(r *http.Response) map[string]string { b, _ := io.ReadAll(r.Body); print ("RESULT:"); s := bytes.NewBuffer(b).String(); println (s); vals := strings.Split(s, "&", 0); kvmap := make(map[string]string, len(vals)); for i := range vals { // TODO: this crashes server for bad response right now kv := strings.Split(vals[i], "=", 2); kvmap[kv[0]] = kv[1]; // breaks server here on 503 not avail } // TODO: close r.Body ? return kvmap; }
func TestClient(t *testing.T) { // TODO: add a proper test suite. Current test merely verifies that // we can retrieve the Google robots.txt file. r, _, err := Get("http://www.google.com/robots.txt") var b []byte if err == nil { b, err = io.ReadAll(r.Body) r.Body.Close() } if err != nil { t.Error(err) } else if s := string(b); !strings.HasPrefix(s, "User-agent:") { t.Errorf("Incorrect page body (did not begin with User-agent): %q", s) } }
func TestRunEcho(t *testing.T) { cmd, err := Run("/bin/echo", []string{"echo", "hello", "world"}, nil, DevNull, Pipe, DevNull) if err != nil { t.Fatalf("opencmd /bin/echo: %v", err) } buf, err := io.ReadAll(cmd.Stdout) if err != nil { t.Fatalf("reading from /bin/echo: %v", err) } if string(buf) != "hello world\n" { t.Fatalf("reading from /bin/echo: got %q", buf) } if err = cmd.Close(); err != nil { t.Fatalf("closing /bin/echo: %v", err) } }
func testToFromWithLevel(t *testing.T, level int, input []byte, name string) os.Error { buffer := bytes.NewBuffer([]byte{}) w := NewDeflater(buffer, level) w.Write(input) w.Close() inflater := NewInflater(buffer) decompressed, err := io.ReadAll(inflater) if err != nil { t.Errorf("reading inflater: %s", err) return err } inflater.Close() if bytes.Compare(input, decompressed) != 0 { t.Errorf("decompress(compress(data)) != data: level=%d input=%s", level, name) } return nil }
func TestRunCat(t *testing.T) { cmd, err := Run("/bin/cat", []string{"cat"}, nil, Pipe, Pipe, DevNull) if err != nil { t.Fatalf("opencmd /bin/cat: %v", err) } io.WriteString(cmd.Stdin, "hello, world\n") cmd.Stdin.Close() buf, err := io.ReadAll(cmd.Stdout) if err != nil { t.Fatalf("reading from /bin/cat: %v", err) } if string(buf) != "hello, world\n" { t.Fatalf("reading from /bin/cat: got %q", buf) } if err = cmd.Close(); err != nil { t.Fatalf("closing /bin/cat: %v", err) } }
func main() { url := "http://twitter.com/statuses/public_timeline.json" var req http.Request req.URL, _ = http.ParseURL(url) addr := req.URL.Host addr += ":http" conn, _ := net.Dial("tcp", "", addr) _ = req.Write(conn) reader := bufio.NewReader(conn) resp, _ := http.ReadResponse(reader) r := io.Reader(reader) if v := resp.GetHeader("Content-Length"); v != "" { n, _ := strconv.Atoi64(v) r = io.LimitReader(r, n) } resp.Body = readClose{r, conn} b, _ := io.ReadAll(resp.Body) resp.Body.Close() fmt.Println(string(b)) }
func main() { in = bufio.NewReader(os.Stdin) three := strings.Bytes(">THREE ") for { line, err := in.ReadSlice('\n') if err != nil { fmt.Fprintln(os.Stderr, "ReadLine err:", err) os.Exit(2) } if line[0] == '>' && bytes.Equal(line[0:len(three)], three) { break } } data, err := io.ReadAll(in) if err != nil { fmt.Fprintln(os.Stderr, "ReadAll err:", err) os.Exit(2) } // delete the newlines and convert to upper case j := 0 for i := 0; i < len(data); i++ { if data[i] != '\n' { data[j] = data[i] &^ ' ' // upper case j++ } } str := string(data[0:j]) print(count(str, 1)) fmt.Print("\n") print(count(str, 2)) fmt.Print("\n") interests := []string{"GGT", "GGTA", "GGTATT", "GGTATTTTAATT", "GGTATTTTAATTTATAGT"} for _, s := range interests { fmt.Printf("%d %s\n", countOne(str, s), s) } }
func main() { flag.Usage = usage flag.Parse() args := flag.Args() var data []byte var err os.Error switch len(args) { case 0: data, err = io.ReadAll(os.Stdin) case 1: data, err = io.ReadFile(args[0]) default: usage() } chk(err) pset, err := patch.Parse(data) chk(err) // Change to hg root directory, because // patch paths are relative to root. root, err := hgRoot() chk(err) chk(os.Chdir(root)) // Make sure there are no pending changes on the server. if *checkSync && hgIncoming() { fmt.Fprintf(os.Stderr, "incoming changes waiting; run hg sync first\n") os.Exit(2) } // Make sure we won't be editing files with local pending changes. dirtylist, err := hgModified() chk(err) dirty := make(map[string]int) for _, f := range dirtylist { dirty[f] = 1 } conflict := make(map[string]int) for _, f := range pset.File { if f.Verb == patch.Delete || f.Verb == patch.Rename { if _, ok := dirty[f.Src]; ok { conflict[f.Src] = 1 } } if f.Verb != patch.Delete { if _, ok := dirty[f.Dst]; ok { conflict[f.Dst] = 1 } } } if len(conflict) > 0 { fmt.Fprintf(os.Stderr, "cannot apply patch to locally modified files:\n") for name := range conflict { fmt.Fprintf(os.Stderr, "\t%s\n", name) } os.Exit(2) } // Apply changes in memory. op, err := pset.Apply(io.ReadFile) chk(err) // Write changes to disk copy: order of commands matters. // Accumulate undo log as we go, in case there is an error. // Also accumulate list of modified files to print at end. changed := make(map[string]int) // Copy, Rename create the destination file, so they // must happen before we write the data out. // A single patch may have a Copy and a Rename // with the same source, so we have to run all the // Copy in one pass, then all the Rename. for i := range op { o := &op[i] if o.Verb == patch.Copy { makeParent(o.Dst) chk(hgCopy(o.Dst, o.Src)) undoRevert(o.Dst) changed[o.Dst] = 1 } } for i := range op { o := &op[i] if o.Verb == patch.Rename { makeParent(o.Dst) chk(hgRename(o.Dst, o.Src)) undoRevert(o.Dst) undoRevert(o.Src) changed[o.Src] = 1 changed[o.Dst] = 1 } } // Run Delete before writing to files in case one of the // deleted paths is becoming a directory. for i := range op { o := &op[i] if o.Verb == patch.Delete { chk(hgRemove(o.Src)) undoRevert(o.Src) changed[o.Src] = 1 } } // Write files. for i := range op { o := &op[i] if o.Verb == patch.Delete { continue } if o.Verb == patch.Add { makeParent(o.Dst) changed[o.Dst] = 1 } if o.Data != nil { chk(io.WriteFile(o.Dst, o.Data, 0644)) if o.Verb == patch.Add { undoRm(o.Dst) } else { undoRevert(o.Dst) } changed[o.Dst] = 1 } if o.Mode != 0 { chk(os.Chmod(o.Dst, o.Mode&0755)) undoRevert(o.Dst) changed[o.Dst] = 1 } } // hg add looks at the destination file, so it must happen // after we write the data out. for i := range op { o := &op[i] if o.Verb == patch.Add { chk(hgAdd(o.Dst)) undoRevert(o.Dst) changed[o.Dst] = 1 } } // Finished editing files. Write the list of changed files to stdout. list := make([]string, len(changed)) i := 0 for f := range changed { list[i] = f i++ } sort.SortStrings(list) for _, f := range list { fmt.Printf("%s\n", f) } }
func Tokenize(input io.Reader) string { bytedata, err := io.ReadAll(input); if err != nil { //panic } data := string(bytedata); //step through each. As soon as a regexp is recognized, turn on a flag. Keep stepping until no regexp's are recognized, then take a step back and tokenize that lexeme matches := false; tokenized := ""; //where all the final tokens get concatonized together lexeme := ""; lines := strings.Split(data, "\n", 0); // for _, line := range lines { // fmt.Printf("%s\n", line); // } var indentDefines = map[int] int { 0: 0, }; biggest := 0; for _, line := range lines { //do something about indentation here spaces := numLeadingSpaces(line); var indent string; indent, indentDefines, biggest = calcIndent(spaces, indentDefines, biggest); tokenized = fmt.Sprintf("%s%s", tokenized, indent); //fmt.Printf("\n\n%s\n\n", tokenized); scan := new(Scanner); scan.data = line; for { trail, still := scan.Step(); if !still { //at the last character, do something with it or forget it forever :) tokenized = cat(tokenized, fmt.Sprintf("%s ", tokenizeLexeme(lexeme))); //fmt.Printf("adding %s\n", fmt.Sprintf("%s ", tokenizeLexeme(lexeme))); lexeme = ""; break } lexeme = cat(lexeme, trail); //TODO: this is a crude solution: make it better? if lexeme == " " { lexeme = ""; continue } //fmt.Printf("lexeme='%s'\n", lexeme); if doesMatch(lexeme) { //it matches a regexp matches = true; //fmt.Printf("%s match...\n", lexeme); } else { //no match if matches == true { //fmt.Printf("woop! lost it. Take a step back...\n"); matches = false; scan.StepBack(); lexeme = trim(lexeme, 1); tokenized = cat(tokenized, fmt.Sprintf("%s ", tokenizeLexeme(lexeme))); //fmt.Printf("adding %s\n", fmt.Sprintf("%s ", tokenizeLexeme(lexeme))); lexeme = ""; } } } tokenized = fmt.Sprintf("%s\n", tokenized); //break } return tokenized; }
// Tests that compressing and then decompressing the given file at the given compression level // yields equivalent bytes to the original file. func testFileLevel(t *testing.T, fn string, level int) { // Read the file, as golden output. golden, err := os.Open(fn, os.O_RDONLY, 0444) if err != nil { t.Errorf("%s (level=%d): %v", fn, level, err) return } defer golden.Close() // Read the file again, and push it through a pipe that compresses at the write end, and decompresses at the read end. raw, err := os.Open(fn, os.O_RDONLY, 0444) if err != nil { t.Errorf("%s (level=%d): %v", fn, level, err) return } piper, pipew := io.Pipe() defer piper.Close() go func() { defer raw.Close() defer pipew.Close() zlibw, err := NewDeflaterLevel(pipew, level) if err != nil { t.Errorf("%s (level=%d): %v", fn, level, err) return } defer zlibw.Close() var b [1024]byte for { n, err0 := raw.Read(&b) if err0 != nil && err0 != os.EOF { t.Errorf("%s (level=%d): %v", fn, level, err0) return } _, err1 := zlibw.Write(b[0:n]) if err1 == os.EPIPE { // Fail, but do not report the error, as some other (presumably reportable) error broke the pipe. return } if err1 != nil { t.Errorf("%s (level=%d): %v", fn, level, err1) return } if err0 == os.EOF { break } } }() zlibr, err := NewInflater(piper) if err != nil { t.Errorf("%s (level=%d): %v", fn, level, err) return } defer zlibr.Close() // Compare the two. b0, err0 := io.ReadAll(golden) b1, err1 := io.ReadAll(zlibr) if err0 != nil { t.Errorf("%s (level=%d): %v", fn, level, err0) return } if err1 != nil { t.Errorf("%s (level=%d): %v", fn, level, err1) return } if len(b0) != len(b1) { t.Errorf("%s (level=%d): length mismatch %d versus %d", fn, level, len(b0), len(b1)) return } for i := 0; i < len(b0); i++ { if b0[i] != b1[i] { t.Errorf("%s (level=%d): mismatch at %d, 0x%02x versus 0x%02x\n", fn, level, i, b0[i], b1[i]) return } } }