func send(req *http.Request) (resp *http.Response, err os.Error) { addr := req.URL.Host if !hasPort(addr) { addr += ":http" } conn, err := net.Dial("tcp", addr) if err != nil { return nil, err } err = req.Write(conn) if err != nil { conn.Close() return nil, err } reader := bufio.NewReader(conn) resp, err = http.ReadResponse(reader, req.Method) if err != nil { conn.Close() return nil, err } r := io.Reader(reader) if n := resp.ContentLength; n != -1 { r = io.LimitReader(r, n) } resp.Body = readClose{r, conn} return }
// Accept starts a new SMTP session using io.ReadWriteCloser func Accept(remoteAddress string, conn io.ReadWriteCloser, storage storage.Storage, messageChan chan *data.Message, hostname string, monkey monkey.ChaosMonkey) { defer conn.Close() proto := smtp.NewProtocol() proto.Hostname = hostname var link *linkio.Link reader := io.Reader(conn) writer := io.Writer(conn) if monkey != nil { linkSpeed := monkey.LinkSpeed() if linkSpeed != nil { link = linkio.NewLink(*linkSpeed * linkio.BytePerSecond) reader = link.NewLinkReader(io.Reader(conn)) writer = link.NewLinkWriter(io.Writer(conn)) } } session := &Session{conn, proto, storage, messageChan, remoteAddress, false, "", link, reader, writer, monkey} proto.LogHandler = session.logf proto.MessageReceivedHandler = session.acceptMessage proto.ValidateSenderHandler = session.validateSender proto.ValidateRecipientHandler = session.validateRecipient proto.ValidateAuthenticationHandler = session.validateAuthentication proto.GetAuthenticationMechanismsHandler = func() []string { return []string{"PLAIN"} } session.logf("Starting session") session.Write(proto.Start()) for session.Read() == true { if monkey != nil && monkey.Disconnect != nil && monkey.Disconnect() { session.conn.Close() break } } session.logf("Session ended") }
func send(req *http.Request) (resp *http.Response, err error) { addr := req.URL.Host if !hasPort(addr) { addr += ":http" } conn, err := net.Dial("tcp", addr) if err != nil { return nil, err } err = req.Write(conn) if err != nil { conn.Close() return nil, err } reader := bufio.NewReader(conn) resp, err = http.ReadResponse(reader, req) if err != nil { conn.Close() return nil, err } r := io.Reader(reader) if v := resp.Header["Content-Length"]; v != nil { n, err := strconv.Atoi(v[0]) if err != nil { return nil, &badStringError{"invalid Content-Length", v[0]} } v := int64(n) r = io.LimitReader(r, v) } resp.Body = readClose{r, conn} return }
func main() { stdout := os.Stdout os.Stdout = os.Stderr executable, err := exec.LookPath("dd") if err != nil { fmt.Printf("ddp: failed to find dd: %s\n", err) os.Exit(1) } // Create pipe attached to a reader: output, input, err := os.Pipe() if err != nil { panic(err) } // Setup process with _the_ three file descriptors: files := []*os.File{ os.Stdin, stdout, input, } process, err := os.StartProcess(executable, os.Args, &os.ProcAttr{ Files: files, }) if err != nil { fmt.Printf("ddp: failed to start dd: %s\n", err) os.Exit(1) } Trap(process) target := GuessTargetSize(os.Args) bar := pb.New64(target) bar.SetUnits(pb.U_BYTES) bar.ShowSpeed = true bar.Output = os.Stderr started := false OutputScanner(io.Reader(output), os.Stderr, func(bytes int64) { if !started { started = true bar.Start() } bar.Set64(bytes) }) Interrupter(process, pb.DEFAULT_REFRESH_RATE) state, err := process.Wait() if err != nil { panic(err) } if started && state.Success() { bar.Finish() } output.Close() if !state.Success() { os.Exit(1) } }
func (d *Decrypter) InitReader(fileReader io.Reader) (io.Reader, error) { iv := make([]byte, aes.BlockSize) n, err := fileReader.Read(iv) if err != nil { return io.Reader(d), err } else if n != aes.BlockSize { return io.Reader(d), fmt.Errorf("Encrypted file is too small") } decStream, err := getDecryptStream(d.passphrase, iv) if err != nil { return io.Reader(d), err } d.streamReader = &cipher.StreamReader{S: decStream, R: fileReader} return io.Reader(d), nil }
func getCounter(name string, node int64) uint64 { var counter uint64 var offset int64 fd, err := os.Open(name, os.O_RDONLY, 0666) if err == nil { defer fd.Close() // node 1 is the first node offset = (node - 1) * 8 _, seekerr := fd.Seek(offset, 0) if seekerr == nil { ior := io.Reader(fd) if err := binary.Read(ior, binary.LittleEndian, &counter); err != nil { fmt.Printf("binary.Read failed: %s\n", err) } else { fd.Close() return counter } } else { fmt.Printf("Seek error for node %d to offset %d\n", node, offset) fd.Close() } } else { cantOpenMessageCounterFile(name) } return 0 }
func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser { pr, pw := io.Pipe() go func() { tarReader := tar.NewReader(in) tarWriter := tar.NewWriter(pw) defer in.Close() hasRootFS := false for { hdr, err := tarReader.Next() if err == io.EOF { if !hasRootFS { pw.CloseWithError(errors.Wrap(err, "no rootfs found")) return } // Signals end of archive. tarWriter.Close() pw.Close() return } if err != nil { pw.CloseWithError(errors.Wrap(err, "failed to read from tar")) return } content := io.Reader(tarReader) name := path.Clean(hdr.Name) if path.IsAbs(name) { name = name[1:] } if name == configFileName { dt, err := ioutil.ReadAll(content) if err != nil { pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName)) return } *config = dt } if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName { hdr.Name = path.Clean(path.Join(parts[1:]...)) if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") { hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:] } if err := tarWriter.WriteHeader(hdr); err != nil { pw.CloseWithError(errors.Wrap(err, "error writing tar header")) return } if _, err := pools.Copy(tarWriter, content); err != nil { pw.CloseWithError(errors.Wrap(err, "error copying tar data")) return } hasRootFS = true } else { io.Copy(ioutil.Discard, content) } } }() return pr }
func getQueue(url string, dumpFlag bool) ([]Queue, error) { client := NewClient(url) res, err := client.get("queue/api/json") if err != nil { return nil, err } defer res.Body.Close() if dumpFlag == true { buf := new(bytes.Buffer) buf.ReadFrom(res.Body) fmt.Println(buf.String()) return nil, nil } var r struct { QueueItems []Queue `json:"items"` } err = json.NewDecoder(io.Reader(res.Body)).Decode(&r) if err != nil { return nil, err } return r.QueueItems, nil }
func (s *Server) acceptUDP() { // Looping for new messages for { buf := make([]byte, MAX_MSG_SIZE) n, adr, err := s.udpsock.ReadFrom(buf) if s.comm.running { if err != nil { log.Error("Error while reading UDP (read %d) from %s: %s\n", n, adr, err) } else { abcon := net.Conn(s.udpsock) connection := NewConnection(s.comm.pool, P_UDP, D_Inbound, abcon) read := io.Reader(bytes.NewBuffer(buf)) msg := s.comm.NewMessage() msg.connection = connection err := msg.readMessage(read) if err != nil { log.Error("Couldn't handle message received from UDP because of errors: %s %s\n", msg, err) } else { go s.comm.handleMessage(msg) } } } else { log.Info("Dropping connection because communications have been paused") } } }
func (t taxTree) loadNames(fname string, dict map[int]int) error { namesf, eopen := os.OpenFile(fname, os.O_RDONLY, 0644) defer namesf.Close() if eopen != nil { fmt.Fprintf(os.Stderr, "file doesn't exist %s\n", fname) return eopen } b := bufio.NewReader(io.Reader(namesf)) for { line, _, err := b.ReadLine() if err == io.EOF { return nil } if pos := bytes.Index(line, []byte("scientific name")); pos == -1 { continue } gi, name, e := parsename([]byte(line)[0 : len(line)-2]) // HINT: ends in "\t|" if e != nil { return e } t[dict[gi]].Name = make([]byte, len(name)) copy(t[dict[gi]].Name, name) } return nil }
// Run the cat command with the given arguments. // // The name of the stream to send data to is required. Any other arguments are // filenames that should be sent line-by-line into Kinesis. If no files are // passed, data is sent from Stdin. func runCat(args []string) { if len(args) < 1 { log.Fatalln("error: no stream name given") } stream := args[0] inputFiles := args[1:] reader := io.Reader(os.Stdin) if len(inputFiles) > 0 { reader = openFiles(inputFiles) } scanner := bufio.NewScanner(reader) p := producer.New(stream) p.Debug = envBool(VERBOSE) for scanner.Scan() { line := scanner.Text() if len(line) > 0 { fatalOnErr(p.PutString(line)) } } if err := scanner.Err(); err != nil { log.Fatalln("error:", err) } fatalOnErr(p.Flush()) }
//////GENOME TRAINING///////////////////////////////////////////////////////////////// //Trains a genome according to the contents of a directory func (genome *Genome) TrainOnDir(dir string) (err os.Error) { //open the directory files, err := ioutil.ReadDir(dir) if err != nil { return err } fmt.Println("analyzing", len(files), "files") for fileNum, fileDesc := range files { if fileDesc.IsRegular() { file, err := os.Open(strings.Join([]string{dir, fileDesc.Name}, "/"), os.O_RDONLY, 0666) if err != nil { fmt.Println("oops!!", err) } fReader := io.Reader(file) data, err := ioutil.ReadAll(fReader) genome.trainer.Train(data) fmt.Println("on file", fileNum) file.Close() } } genes := genome.trainer.GetGenes() for _, gene := range genes { genome.addGene(Gene(gene)) } return }
func main() { flag.Parse() if flag.NArg() != 1 { fmt.Fprintln(os.Stderr, "Usage: distance vectors.bin") os.Exit(1) } f, err := os.Open(flag.Arg(0)) defer f.Close() if err != nil { log.Fatal(err) } embeds, err := go2vec.ReadWord2VecBinary(bufio.NewReader(io.Reader(f)), true) if err != nil { log.Fatal(err) } scanner := bufio.NewScanner(os.Stdin) scanner.Split(bufio.ScanWords) for scanner.Scan() { token := scanner.Text() results, err := embeds.Similarity(token, 10) if err != nil { fmt.Println(os.Stderr, err.Error()) os.Exit(1) } for _, wordSimilarity := range results { fmt.Println(wordSimilarity.Word, wordSimilarity.Similarity) } } }
func addTagsToFile(file *os.File, tags []string) (success bool) { file.Seek(0, 0) // rewind to start with r, err := ioutil.ReadAll(io.Reader(file)) if err != nil { log.Printf("ERROR: %s\n", err) return } loc := bytes.Index(r, gitScissorMarker) if loc < 0 { if _, err := file.Seek(0, 2); err != nil { // seek to end of file log.Printf("ERROR: %s\n", err) return } } else { if _, err := file.Seek(int64(loc), 0); err != nil { // seek to scissor line then truncate the file here log.Printf("ERROR: %s\n", err) return } if err := file.Truncate(int64(loc + 1)); err != nil { log.Printf("ERROR: %s\n", err) return } } fmt.Fprintln(file, "") // blank line first to ensure it's not stuck to the summary for _, tag := range tags { fmt.Fprintln(file, tag) } return true }
func linkParser(page_chan chan string) <-chan string { link_chan := make(chan string) go func() { for page := range page_chan { //page := <-page_chan page_bytes := bytes.NewBufferString(page) d := html.NewTokenizer(io.Reader(page_bytes)) for { tokenType := d.Next() if tokenType == html.ErrorToken { fmt.Println("\nFinished to parse page") break } token := d.Token() switch tokenType { case html.StartTagToken: if strings.EqualFold(token.Data, "A") { for _, a := range token.Attr { if strings.EqualFold(a.Key, "HREF") { link_chan <- a.Val } } } } } } close(link_chan) }() return link_chan }
func send(req *http.Request) (resp *http.Response, err os.Error) { addr := req.URL.Host if !hasPort(addr) { addr += ":http" } conn, err := net.Dial("tcp", "", addr) if err != nil { return nil, err } err = req.Write(conn) if err != nil { conn.Close() return nil, err } reader := bufio.NewReader(conn) resp, err = http.ReadResponse(reader, "GET") if err != nil { conn.Close() return nil, err } r := io.Reader(reader) if v := resp.GetHeader("Content-Length"); v != "" { n, err := strconv.Atoi64(v) if err != nil { // return nil, &badStringError{"invalid Content-Length", v} } r = io.LimitReader(r, n) } resp.Body = readClose{r, conn} return }
func getJobs(targetUrl string, dumpFlag bool) ([]Job, error) { client := NewClient(targetUrl) res, err := client.get("api/json?depth=1") if err != nil { return nil, err } if res.StatusCode != 200 { return nil, err } defer res.Body.Close() if dumpFlag == true { buf := new(bytes.Buffer) buf.ReadFrom(res.Body) fmt.Println(buf.String()) return nil, nil } var r struct { Jobs []Job `json:"jobs"` } err = json.NewDecoder(io.Reader(res.Body)).Decode(&r) if err != nil { return nil, err } return r.Jobs, nil }
func StoreGet(w http.ResponseWriter, req *http.Request) { split := strings.Split(req.URL.Path, "/") if len(split) == 4 { folder := split[2] filename := split[3] file, err := os.Open(repo + "/" + folder + "/" + filename) if err == nil { defer file.Close() io.Copy(w, io.Reader(file)) } else { NotFoundHandler(w, req) } } else { NotFoundHandler(w, req) } }
func getJobInfo(url string, jobName string, dumpFlag bool) ([]JobBuildInfo, error) { client := NewClient(url) res, err := client.get(fmt.Sprintf("job/%s/api/json?depth=1", jobName)) if err != nil { return nil, err } defer res.Body.Close() if dumpFlag == true { buf := new(bytes.Buffer) buf.ReadFrom(res.Body) fmt.Println(buf.String()) return nil, nil } var r struct { JobInfo []JobBuildInfo `json:"builds"` } err = json.NewDecoder(io.Reader(res.Body)).Decode(&r) if err != nil { return nil, err } return r.JobInfo, nil }
func (st *SimpleTasks) PostTask(c context.Context, reqUrl string, params string) error { if strings.HasPrefix(reqUrl, "/done") { st.done <- reqUrl return nil } print("posting task ", reqUrl, "\n") body := io.Reader(nil) if params != "" { body = strings.NewReader(url.Values{"json": []string{params}}.Encode()) } req, _ := http.NewRequest("POST", reqUrl, body) if params != "" { req.Header["Content-Type"] = []string{"application/x-www-form-urlencoded"} } st.group.Add(1) go func() { defer st.group.Done() print("running task ", reqUrl, "\n") w := httptest.NewRecorder() st.handler.ServeHTTP(w, req) if w.Code != 200 { fmt.Printf("Got bad response code %s for url %s\n", w.Code, reqUrl) } print("done running task ", reqUrl, "\n") }() return nil }
func main() { var s sample r := io.Reader(os.Stdin) w := io.Writer(os.Stdout) records := make(map[string]uint64, 16384) backtraces := make(map[string][]uint64, 1024) /* ignore the documentation, it's wrong, first word must be zero. * the perl code that figures word length depends on it. */ hdr := hdr{0, 3, 0, 10000, 0} trailer := trailer{0, 1, 0} start := uint64(0) end := start nsamples := end for binary.Read(r, binary.LittleEndian, &s) == nil { numpcs := int(s.Wordcount) bt := make([]uint64, numpcs) binary.Read(r, binary.LittleEndian, &bt) //fmt.Printf("%v\n", bt) record := "" /* Fix the symbols. pprof was unhappy about the 0xfffffff. * N.B. The fact that we have to mess with the bt values * is the reason we did not write a stringer for bt. */ for i := range bt { bt[i] = bt[i] & ((uint64(1) << 32) - 1) record = record + fmt.Sprintf("0x%x ", bt[i]) } records[record]++ backtraces[record] = bt //fmt.Printf("%v %d %d %x %v record %v\n", s, s.Wordcount, s.Coreid, s.Ns, bt, record) /* how sad, once we go to ticks this gets ugly. */ if start == 0 { start = s.Ns } end = s.Ns nsamples++ } /* we'll need to fix this once we go to ticks. */ hdr.Period = (end - start) / nsamples hdr.Count = uint64(0) // !@$@!#$!@#$len(records)) //fmt.Printf("start %v end %v nsamples %d period %d\n", start, end, nsamples, hdr.Period) binary.Write(w, binary.LittleEndian, &hdr) out := make([]uint64, 2) /* note that the backtrace length varies. But we're good with that. */ for key, v := range records { bt := backtraces[key] out[0] = v out[1] = uint64(len(bt)) dump := append(out, bt...) //fmt.Printf("dump %v\n", dump) binary.Write(w, binary.LittleEndian, &dump) } binary.Write(w, binary.LittleEndian, &trailer) }
func serveConn(clientConn net.Conn, remoteaddr string, connNumber int) { defer clientConn.Close() clientAddr := clientConn.RemoteAddr().String() logPrefix := fmt.Sprintf("[%d]", connNumber) logger := log.New(os.Stderr, logPrefix+" ", log.Ldate|log.Ltime|log.Lmicroseconds) logger.Printf("Client connected from %v", clientAddr) logger.Printf("Creating relay to server %v", remoteaddr) serverConn, err := net.Dial("tcp", remoteaddr) if err != nil { logger.Printf("Error connecting to server: %v", err) return } defer serverConn.Close() logger.Print("Connected to server") // clientReader reads data sent from the client. clientReader := io.Reader(clientConn) if *recordBase != "" { recordFilename := fmt.Sprintf("%s-%d.record", *recordBase, connNumber) recordOutput, err := os.Create(recordFilename) if err != nil { logger.Printf( "Failed to open file %q to record connection: %v", recordFilename, err) } else { recorder := record.NewReaderRecorder(recordOutput, clientConn) defer recorder.Close() clientReader = recorder } } clientParser := new(MessageParser) serverParser := new(MessageParser) // Set up for parsing messages from server to client scLogger := log.New(os.Stderr, logPrefix+"(S->C) ", log.Ldate|log.Ltime|log.Lmicroseconds) serverToClientReportChan := spliceParser( func(reader io.Reader) { serverParser.ScParse(reader, scLogger) }, clientConn, serverConn) // Set up for parsing messages from client to server csLogger := log.New(os.Stderr, logPrefix+"(C->S) ", log.Ldate|log.Ltime|log.Lmicroseconds) clientToServerReportChan := spliceParser( func(reader io.Reader) { clientParser.CsParse(reader, csLogger) }, serverConn, clientReader) // Wait for the both relay/splices to stop, then we let the connections // close via deferred calls report := <-serverToClientReportChan logger.Printf("Server->client relay after %d bytes with error: %v", report.written, report.err) report = <-clientToServerReportChan logger.Printf("Client->server relay after %d bytes with error: %v", report.written, report.err) logger.Print("Client disconnected") }
func TestClone(t *testing.T) { var c1 Config v := reflect.ValueOf(&c1).Elem() rnd := rand.New(rand.NewSource(time.Now().Unix())) typ := v.Type() for i := 0; i < typ.NumField(); i++ { f := v.Field(i) if !f.CanSet() { // unexported field; not cloned. continue } // testing/quick can't handle functions or interfaces. fn := typ.Field(i).Name switch fn { case "Rand": f.Set(reflect.ValueOf(io.Reader(os.Stdin))) continue case "Time", "GetCertificate", "GetConfigForClient", "VerifyPeerCertificate", "GetClientCertificate": // DeepEqual can't compare functions. continue case "Certificates": f.Set(reflect.ValueOf([]Certificate{ {Certificate: [][]byte{{'b'}}}, })) continue case "NameToCertificate": f.Set(reflect.ValueOf(map[string]*Certificate{"a": nil})) continue case "RootCAs", "ClientCAs": f.Set(reflect.ValueOf(x509.NewCertPool())) continue case "ClientSessionCache": f.Set(reflect.ValueOf(NewLRUClientSessionCache(10))) continue case "KeyLogWriter": f.Set(reflect.ValueOf(io.Writer(os.Stdout))) continue } q, ok := quick.Value(f.Type(), rnd) if !ok { t.Fatalf("quick.Value failed on field %s", fn) } f.Set(q) } c2 := c1.Clone() // DeepEqual also compares unexported fields, thus c2 needs to have run // serverInit in order to be DeepEqual to c1. Cloning it and discarding // the result is sufficient. c2.Clone() if !reflect.DeepEqual(&c1, c2) { t.Errorf("clone failed to copy a field") } }
func main() { flag.Parse() if flag.NArg() == 0 { fmt.Println("No input file to process") os.Exit(1) } file_name := flag.Arg(0) vm := gelo.NewVM(extensions.Stdio) defer vm.Destroy() vm.RegisterBundle(gelo.Core) vm.RegisterBundles(commands.All) if !*no_prelude { prelude, err := os.Open("prelude.gel") defer prelude.Close() check("Could not open prelude.gel", err) _, err = vm.Run(prelude, nil) check("Could not load prelude", err) } file, err := os.Open(file_name) defer file.Close() check("Could not open: "+file_name, err) reader := io.Reader(file) if *lit || file_name[len(file_name)-3:] == "lit" { reader = NewLiterateReader(reader) t := make([]byte, 64) for { n, err := reader.Read(t) fmt.Println(string(t), "n:", n, "err", err == nil) } } tracer := extensions.Stderr if *logit { out, err := os.Create(flag.Arg(0) + ".log") defer out.Close() check("Could not create log file", err) logger := extensions.Logger(out, log.Ldate|log.Ltime) tracer = extensions.Tee(tracer, logger) } gelo.SetTracer(tracer) if *trace || *logit { gelo.TraceOn(gelo.All_traces) } ret, err := vm.Run(reader, flag.Args()[1:]) check("===PROGRAM=ERROR===", err) vm.API.Trace("The ultimate result of the program was", ret) }
// Some assertions around filehandle's applicability func TestTypes(t *testing.T) { _ = os.FileInfo(&FileHandle{}) _ = io.Closer(&FileHandle{}) _ = io.Reader(&FileHandle{}) _ = io.ReaderAt(&FileHandle{}) _ = io.WriterTo(&FileHandle{}) _ = io.Seeker(&FileHandle{}) }
func TestReadUserInput(t *testing.T) { input := "this is a test\r\n" is := InputStub{value: input} result := UserInput(io.Reader(is)) if result != input[0:len(input)-2] { t.Errorf("UserInput failed. Output: %v", result) } }
func (self *Salt) do(urls string) (*http.Response, error) { u, err := url.Parse(urls) self.Request.URL = u if self.Query == nil { self.Query = url.Values{} } for k, v := range self.Request.URL.Query() { self.Query[k] = v } self.Request.URL.RawQuery = self.Query.Encode() //post data if self.Data != nil { self.Request.Header.Set("Content-Type", "application/x-www-form-urlencoded") // [fix] I have a hunch that this part will cause some weird bug body := io.Reader(strings.NewReader(self.Data.Encode())) rc, ok := body.(io.ReadCloser) if !ok && body != nil { rc = ioutil.NopCloser(body) } self.Request.Body = rc // [fix] if data exist, then POST method must be used self.Request.Method = "Post" } // user requested to override Host field in request header // this action should be honored, user are expected to handle the error host := self.Request.Header.Get("Host") if !strings.EqualFold("", host) { self.Request.Host = host fmt.Println(self.Request.Host) } //send the request now tic := time.Now() ret, err := self.client.Do(self.Request) if err != nil { self.Response = nil return nil, err } toc := time.Now() self.ResponseTime = toc.Sub(tic) self.Raw, err = ioutil.ReadAll(ret.Body) defer ret.Body.Close() if err != nil { self.Response = nil return nil, err } self.Text = string(self.Raw) self.Request = ret.Request self.Data = nil self.Query = nil self.Response = ret return ret, nil }
func TestVersionTooShort(t *testing.T) { d := decoder{} r := io.Reader(strings.NewReader("too short")) d.version(&r) if d.err == nil { t.Fatalf("Too short version failed") } }
func TestTempoTooShort(t *testing.T) { d := decoder{} r := io.Reader(strings.NewReader("xx")) d.tempo(&r) if d.err == nil { t.Fatalf("Too short tempo failed") } }
// List the contents below the given location. func List(ustr string) (ListResult, error) { result := ListResult{} inputUrl, err := url.Parse(ustr) if err != nil { return result, err } inputUrl.Path = "/.cbfs/list" + inputUrl.Path for strings.HasSuffix(inputUrl.Path, "/") { inputUrl.Path = inputUrl.Path[:len(inputUrl.Path)-1] } if inputUrl.Path == "/.cbfs/list" { inputUrl.Path = "/.cbfs/list/" } inputUrl.RawQuery = "includeMeta=true" req, err := http.NewRequest("GET", inputUrl.String(), nil) if err != nil { return result, err } req.Header.Set("Accept-Encoding", "gzip") res, err := http.DefaultClient.Do(req) if err != nil { return result, err } defer res.Body.Close() switch res.StatusCode { case 404: return result, fourOhFour case 200: // ok default: return result, fmt.Errorf("Error in request to %v: %v", inputUrl, res.Status) } r := io.Reader(res.Body) if res.Header.Get("Content-Encoding") == "gzip" { gzr, err := gzip.NewReader(res.Body) if err != nil { return result, err } r = gzr } d := json.NewDecoder(r) err = d.Decode(&result) if err != nil { return result, err } return result, nil }