func handleConn(conn net.Conn) { defer conn.Close() log.Println("connected from:", conn.RemoteAddr()) dst, err := dialer.Dial("tcp", *dstAddr) if err != nil { log.Println(err) return } defer dst.Close() wg := &sync.WaitGroup{} wg.Add(2) go func(wg *sync.WaitGroup) { buf := bufferPool.Get() _, err = io.CopyBuffer(conn, dst, buf.([]byte)) if err != nil { log.Println(err) } bufferPool.Put(buf) wg.Done() }(wg) go func(wg *sync.WaitGroup) { buf := bufferPool.Get() _, err = io.CopyBuffer(dst, conn, buf.([]byte)) if err != nil { log.Println(err) } bufferPool.Put(buf) wg.Done() }(wg) wg.Wait() }
func forward(lconn net.Conn, raddr string) { if err := exec.Command("wakeonlan", "18:66:DA:17:A2:95").Run(); err != nil { glog.Warningf("exec wakeonlan error: %+v", err) } glog.Infof("try connect to %+v for %+v forwarding", raddr, lconn.RemoteAddr()) rconn, err := net.Dial("tcp", raddr) if err != nil { glog.Errorf("net.Dial(%#v) error: %v", raddr, err) return } glog.Infof("forward %+v to %+v", lconn.RemoteAddr(), rconn.RemoteAddr()) go func() { buf := bufpool.Get().([]byte) defer bufpool.Put(buf) defer rconn.Close() defer lconn.Close() io.CopyBuffer(rconn, lconn, buf) }() go func() { buf := bufpool.Get().([]byte) defer bufpool.Put(buf) defer lconn.Close() defer rconn.Close() io.CopyBuffer(lconn, rconn, buf) }() }
func (p *Proxy) proxy(w http.ResponseWriter, r *http.Request) { if p.daemon.VM == nil { w.WriteHeader(http.StatusServiceUnavailable) w.Write([]byte("The virtual machine has not been started")) return } addr, err := p.daemon.VM.Address() if err != nil { w.WriteHeader(http.StatusServiceUnavailable) w.Write([]byte("Unable to locate the virtual machine")) return } backend, err := net.DialTCP("tcp", nil, addr) if err != nil { w.WriteHeader(http.StatusServiceUnavailable) w.Write([]byte("Unable to connect to the virtual machine")) return } defer backend.Close() r.URL.Scheme = "http" r.URL.Host = fmt.Sprintf("%s:%d", addr.IP.String(), addr.Port) hijacker, ok := w.(http.Hijacker) if !ok { w.WriteHeader(http.StatusServiceUnavailable) w.Write([]byte("Unable to create hijacker")) return } conn, _, err := hijacker.Hijack() if err != nil { w.WriteHeader(http.StatusServiceUnavailable) w.Write([]byte("Unable to hijack connection")) return } r.Write(backend) finished := make(chan error, 1) go func(backend *net.TCPConn, conn net.Conn, finished chan error) { buf := make([]byte, 8092) _, err := io.CopyBuffer(backend, conn, buf) backend.CloseWrite() finished <- err }(backend, conn, finished) go func(backend *net.TCPConn, conn net.Conn, finished chan error) { buf := make([]byte, 8092) _, err := io.CopyBuffer(conn, backend, buf) conn.Close() finished <- err }(backend, conn, finished) <-finished <-finished }
func TestBigFile(t *testing.T) { src := testutil.CreateDummyBuf(147611) dst := &bytes.Buffer{} srcEnc := &bytes.Buffer{} wEnc, err := encrypt.NewWriter(srcEnc, TestKey) if err != nil { t.Errorf("Cannot create write-encryption layer: %v", err) return } if err := wEnc.Close(); err != nil { t.Errorf("Cannot close write-encryption layer: %v", err) return } wDec, err := encrypt.NewReader(bytes.NewReader(srcEnc.Bytes()), TestKey) if err != nil { t.Errorf("Cannot create read-encryption layer: %v", err) return } // Act a bit like the fuse layer: lay := NewLayer(wDec) lay.Truncate(0) bufSize := 128 * 1024 if _, err := io.CopyBuffer(lay, bytes.NewReader(src), make([]byte, bufSize)); err != nil { t.Errorf("Could not encrypt data") return } lay.Truncate(int64(len(src))) if _, err := lay.Seek(0, os.SEEK_SET); err != nil { t.Errorf("Seeking to 0 in big file failed: %v", err) return } n, err := io.CopyBuffer(dst, lay, make([]byte, bufSize)) if err != nil { t.Errorf("Could not copy big file data over overlay: %v", err) return } if n != int64(len(src)) { t.Errorf("Did not fully copy big file: got %d, should be %d bytes", n, len(src)) return } if !bytes.Equal(dst.Bytes(), src) { t.Errorf("Source and destination buffers differ.") return } }
func testDeterministic(i int, t *testing.T) { t.Parallel() // Test so much we cross a good number of block boundaries. var length = maxStoreBlockSize*30 + 500 if testing.Short() { length /= 10 } // Create a random, but compressible stream. rng := rand.New(rand.NewSource(1)) t1 := make([]byte, length) for i := range t1 { t1[i] = byte(rng.Int63() & 7) } // Do our first encode. var b1 bytes.Buffer br := bytes.NewBuffer(t1) w, err := NewWriter(&b1, i) if err != nil { t.Fatal(err) } // Use a very small prime sized buffer. cbuf := make([]byte, 787) _, err = io.CopyBuffer(w, struct{ io.Reader }{br}, cbuf) if err != nil { t.Fatal(err) } w.Close() // We choose a different buffer size, // bigger than a maximum block, and also a prime. var b2 bytes.Buffer cbuf = make([]byte, 81761) br2 := bytes.NewBuffer(t1) w2, err := NewWriter(&b2, i) if err != nil { t.Fatal(err) } _, err = io.CopyBuffer(w2, struct{ io.Reader }{br2}, cbuf) if err != nil { t.Fatal(err) } w2.Close() b1b := b1.Bytes() b2b := b2.Bytes() if !bytes.Equal(b1b, b2b) { t.Errorf("level %d did not produce deterministic result, result mismatch, len(a) = %d, len(b) = %d", i, len(b1b), len(b2b)) } }
// In performance critical applications, Reset can be used to discard the // current compressor or decompressor state and reinitialize them quickly // by taking advantage of previously allocated memory. func Example_reset() { proverbs := []string{ "Don't communicate by sharing memory, share memory by communicating.\n", "Concurrency is not parallelism.\n", "The bigger the interface, the weaker the abstraction.\n", "Documentation is for users.\n", } var r strings.Reader var b bytes.Buffer buf := make([]byte, 32<<10) zw, err := flate.NewWriter(nil, flate.DefaultCompression) if err != nil { log.Fatal(err) } zr := flate.NewReader(nil) for _, s := range proverbs { r.Reset(s) b.Reset() // Reset the compressor and encode from some input stream. zw.Reset(&b) if _, err := io.CopyBuffer(zw, &r, buf); err != nil { log.Fatal(err) } if err := zw.Close(); err != nil { log.Fatal(err) } // Reset the decompressor and decode to some output stream. if err := zr.(flate.Resetter).Reset(&b, nil); err != nil { log.Fatal(err) } if _, err := io.CopyBuffer(os.Stdout, zr, buf); err != nil { log.Fatal(err) } if err := zr.Close(); err != nil { log.Fatal(err) } } // Output: // Don't communicate by sharing memory, share memory by communicating. // Concurrency is not parallelism. // The bigger the interface, the weaker the abstraction. // Documentation is for users. }
func (z *unzipper) unzip(f *zip.File) error { if f.FileInfo().IsDir() { return nil } fName := filepath.Join(z.dst, f.Name) dir, _ := filepath.Split(fName) if err := os.MkdirAll(dir, perm); err != nil && os.IsNotExist(err) { return err } r, err := f.Open() if err != nil { return err } defer r.Close() w, err := os.Create(filepath.Join(z.dst, f.Name)) if err != nil { return err } defer w.Close() if _, err := io.CopyBuffer(w, r, z.buffer); err != nil { w.Close() return err } if err := r.Close(); err != nil { return err } return w.Close() }
// zipper.walk gets called for each file in given directory tree func (z *zipper) walk(path string, info os.FileInfo, err error) error { if err != nil { return err } if !info.Mode().IsRegular() || info.Size() == 0 { return nil } file, err := os.Open(path) if err != nil { return err } defer file.Close() fileName := strings.TrimPrefix(path, z.src+string(filepath.Separator)) w, err := z.writer.Create(fileName) if err != nil { return err } _, err = io.CopyBuffer(w, file, z.buffer) return err }
// hashCopyBuffer is identical to hashCopyN except that it stages // through the provided buffer (if one is required) rather than // allocating a temporary one. If buf is nil, one is allocated for 5MiB. func (c Client) hashCopyBuffer(writer io.Writer, reader io.Reader, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) { // MD5 and SHA256 hasher. var hashMD5, hashSHA256 hash.Hash // MD5 and SHA256 hasher. hashMD5 = md5.New() hashWriter := io.MultiWriter(writer, hashMD5) if c.signature.isV4() { hashSHA256 = sha256.New() hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256) } // Allocate buf if not initialized. if buf == nil { buf = make([]byte, optimalReadBufferSize) } // Using io.CopyBuffer to copy in large buffers, default buffer // for io.Copy of 32KiB is too small. size, err = io.CopyBuffer(hashWriter, reader, buf) if err != nil { return nil, nil, 0, err } // Finalize md5 sum and sha256 sum. md5Sum = hashMD5.Sum(nil) if c.signature.isV4() { sha256Sum = hashSHA256.Sum(nil) } return md5Sum, sha256Sum, size, err }
// Verify returns nil or an error describing the mismatch between the block // list and actual reader contents func Verify(r io.Reader, blocksize int, blocks []protocol.BlockInfo) error { hf := sha256.New() // A 32k buffer is used for copying into the hash function. buf := make([]byte, 32<<10) for i, block := range blocks { lr := &io.LimitedReader{R: r, N: int64(blocksize)} _, err := io.CopyBuffer(hf, lr, buf) if err != nil { return err } hash := hf.Sum(nil) hf.Reset() if !bytes.Equal(hash, block.Hash) { return fmt.Errorf("hash mismatch %x != %x for block %d", hash, block.Hash, i) } } // We should have reached the end now bs := make([]byte, 1) n, err := r.Read(bs) if n != 0 || err != io.EOF { return fmt.Errorf("file continues past end of blocks") } return nil }
func handleArticle(c *v1Conn, line string, hooks EventHooks) (err error) { msgid := MessageID(line[8:]) if msgid.Valid() && c.storage.HasArticle(msgid.String()) == nil { // valid id and we have it var r io.ReadCloser var buff [1024]byte r, err = c.storage.OpenArticle(msgid.String()) if err == nil { err = c.printfLine("%s %s", RPL_Article, msgid) for err == nil { _, err = io.CopyBuffer(c.C.W, r, buff[:]) } if err == io.EOF { err = nil } if err == nil { err = c.printfLine(".") } r.Close() return } } // invalid id or we don't have it err = c.printfLine("%s %s", RPL_NoArticleMsgID, msgid) return }
func (t *tarmonster) walk(path string, info os.FileInfo, err error) error { if err != nil { return err } if !info.Mode().IsRegular() || info.Size() == 0 { return nil } file, err := os.Open(path) if err != nil { return err } defer file.Close() // Get tar.Header fih, err := tar.FileInfoHeader(info, "") if err != nil { return err } fih.Name = strings.TrimPrefix(path, t.src+string(filepath.Separator)) // Begin a new file if err := t.writer.WriteHeader(fih); err != nil { return err } // Write the file if _, err := io.CopyBuffer(t.writer, file, t.buffer); err != nil { return err } return err }
// AppendFile - append a byte array at path, if file doesn't exist at // path this call explicitly creates it. func (s *posix) AppendFile(volume, path string, buf []byte) (err error) { defer func() { if err == syscall.EIO { atomic.AddInt32(&s.ioErrCount, 1) } }() if s.ioErrCount > maxAllowedIOError { return errFaultyDisk } // Create file if not found w, err := s.createFile(volume, path) if err != nil { return err } // Close upon return. defer w.Close() bufp := s.pool.Get().(*[]byte) // Reuse buffer. defer s.pool.Put(bufp) // Return io.Copy _, err = io.CopyBuffer(w, bytes.NewReader(buf), *bufp) return err }
func copyZeroAlloc(w io.Writer, r io.Reader) (int64, error) { vbuf := copyBufPool.Get() buf := vbuf.([]byte) n, err := io.CopyBuffer(w, r, buf) copyBufPool.Put(vbuf) return n, err }
// get ftp-file and parse to lines func (v *FTPConn) FTPFile(filename *ftp.Entry) (good bool, errOut error) { rd, err := v.ServerConn.Retr(filename.Name) if err != nil { errOut = err CLog.PrintLog(true, "Error getting the file: ", filename.Name, ". ", errOut) return false, errOut } reader := bufio.NewReader(rd) err = nil fiDesc, err := os.Create(filename.Name) if err != nil { errOut = err CLog.PrintLog(true, "Error creating the file: ", filename.Name, ". ", errOut) rd.Close() return false, errOut } err = nil buf := make([]byte, bufferSize) writer := bufio.NewWriter(fiDesc) writeBytes, err := io.CopyBuffer(writer, reader, buf) //writeBytes, err := io.Copy(writer, reader) if err != nil || uint64(writeBytes) != filename.Size { errOut = err CLog.PrintLog(true, "Error writing the file: ", filename.Name, ". ", errOut) rd.Close() return false, errOut } writer.Flush() rd.Close() return true, errOut }
func doCopy(source string, dest string, createReader createReaderFunc, createWriter createWriterFunc) (err error) { sf, err := os.Open(source) if err != nil { return err } defer sf.Close() reader := createReader(sf) df, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, 0644) if err != nil { return err } var writer io.Writer defer func() { if writer != nil { if flusher, ok := writer.(Flusher); ok { flusher.Flush() } } df.Close() }() writer = createWriter(df) buffer := make([]byte, ioBufferSize) _, err = io.CopyBuffer(writer, reader, buffer) if err != nil && err != io.EOF { return err } err = df.Sync() if err != nil { return err } return nil }
func writeBodyFixedSize(w *bufio.Writer, r io.Reader, size int) error { vbuf := copyBufPool.Get() if vbuf == nil { vbuf = make([]byte, 4096) } buf := vbuf.([]byte) vlr := limitReaderPool.Get() if vlr == nil { vlr = &io.LimitedReader{} } lr := vlr.(*io.LimitedReader) lr.R = r lr.N = int64(size) n, err := io.CopyBuffer(w, lr, buf) limitReaderPool.Put(vlr) copyBufPool.Put(vbuf) if n != int64(size) && err == nil { err = fmt.Errorf("read %d bytes from BodyStream instead of %d bytes", n, size) } return err }
func checkForCorrectFile(t *testing.T, path string, data []byte) bool { // Try to read it over fuse: helloBuffer := &bytes.Buffer{} fd, err := os.Open(path) if err != nil { t.Errorf("Unable to open simple file over fuse: %v", err) return false } defer func() { if err := fd.Close(); err != nil { t.Errorf("Unable to close simple file over fuse: %v", err) } }() n, err := io.CopyBuffer(helloBuffer, fd, make([]byte, 128*1024)) if err != nil { t.Errorf("Unable to read full simple file over fuse: %v", err) return false } if n != int64(len(data)) { t.Errorf("Data differs over fuse: got %d, should be %d bytes", n, len(data)) return false } if !bytes.Equal(helloBuffer.Bytes(), data) { t.Errorf("Data from simple file does not match source. Len: %d", len(data)) t.Errorf("\tExpected: %v", data) t.Errorf("\tGot: %v", helloBuffer.Bytes()) return false } return true }
// XServerProxy creates a TCP proxy on port 6000 to a the Unix // socket that XQuartz is listening on. // // NOTE: this function does not start/install the XQuartz service func XServerProxy(port int) { if runtime.GOOS != "darwin" { log.Debug("Not running an OSX environment, skip run X Server Proxy") return } l, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) if err != nil { log.Fatal(err) } defer l.Close() // Send all traffic back to unix $DISPLAY socket on a running XQuartz server addr, err := net.ResolveUnixAddr("unix", os.Getenv("DISPLAY")) if err != nil { log.Error("Error: ", err.Error()) } log.Info("X Service Proxy available on all network interfaces on port %d", port) if host, err := utils.DockerVMHost(); err == nil { log.Info("Parity has detected your Docker environment and recommends running 'export DISPLAY=%s:0' in your container to forward the X display", host) } for { xServerClient, err := net.DialUnix("unix", nil, addr) if err != nil { log.Error("Error: ", err.Error()) } defer xServerClient.Close() conn, err := l.Accept() log.Debug("X Service Proxy connected to client on: %s (remote: %s)", conn.LocalAddr(), conn.RemoteAddr()) if err != nil { log.Fatal(err) } go func(c net.Conn, s *net.UnixConn) { buf := make([]byte, 8092) io.CopyBuffer(s, c, buf) s.CloseWrite() }(conn, xServerClient) go func(c net.Conn, s *net.UnixConn) { buf := make([]byte, 8092) io.CopyBuffer(c, s, buf) c.Close() }(conn, xServerClient) } }
// UnpackModpack downloads mods and unpacks file contents. func UnpackModpack(fname string) { z, ze := zip.OpenReader(fname) util.Must(ze) defer util.MustClose(z) info := new(modpackmanifest) var zf *zip.File ov := "overrides/" buf := make([]byte, 32*1024) for _, zf = range z.File { if zf.Name == "manifest.json" { ozf, oze := zf.Open() util.Must(oze) util.Must(json.NewDecoder(ozf).Decode(info)) util.MustClose(ozf) break } } if info != nil { ov = info.Overrides + "/" for i, xf := range info.Files { pid := strconv.FormatInt(int64(xf.PID), 10) pkg := GetPackage(pid) if pkg == nil { fmt.Printf("Package with ID %s is missing!\n", pid) continue } fmt.Printf("%d / %d ", i, len(info.Files)) pkg.DownloadFileWithID(strconv.FormatInt(int64(xf.FID), 10), buf) } } lov := len(ov) for _, zf = range z.File { if len(zf.Name) < lov { continue } n := zf.Name[:lov] if n == ov { n = zf.Name[lov:] if n == "" { continue } if zf.FileInfo().IsDir() { util.Must(util.MkDirIfNotExist(n)) } else { xf, xe := zf.Open() util.Must(xe) fmt.Printf("Unpacking %#v...\n", n) pr := util.NewReadProgress(xf, zf.UncompressedSize64) f, fe := os.Create(n) util.Must(fe) _, ce := io.CopyBuffer(f, pr, buf) util.Must(ce) util.MustClose(f) util.MustClose(xf) pr.Close() } } } }
func (r *stringReader) Discard(n int) (int, error) { r.limRd = io.LimitedReader{R: r, N: int64(n)} n64, err := io.CopyBuffer(ioutil.Discard, &r.limRd, r.scratch[:]) if err == nil && n64 < int64(n) { err = io.EOF } return int(n64), err }
// Decrypt is a utility function which decrypts the data from source with key // and writes the resulting encrypted data to dest. func Decrypt(key []byte, source io.Reader, dest io.Writer) (int64, error) { layer, err := NewReader(source, key) if err != nil { return 0, err } return io.CopyBuffer(dest, layer, make([]byte, defaultDecBufferSize)) }
func throughput(b *testing.B, totalBytes int64, dynamicRecordSizingDisabled bool) { ln := newLocalListener(b) defer ln.Close() N := b.N // Less than 64KB because Windows appears to use a TCP rwin < 64KB. // See Issue #15899. const bufsize = 32 << 10 go func() { buf := make([]byte, bufsize) for i := 0; i < N; i++ { sconn, err := ln.Accept() if err != nil { // panic rather than synchronize to avoid benchmark overhead // (cannot call b.Fatal in goroutine) panic(fmt.Errorf("accept: %v", err)) } serverConfig := testConfig.Clone() serverConfig.CipherSuites = nil // the defaults may prefer faster ciphers serverConfig.DynamicRecordSizingDisabled = dynamicRecordSizingDisabled srv := Server(sconn, serverConfig) if err := srv.Handshake(); err != nil { panic(fmt.Errorf("handshake: %v", err)) } if _, err := io.CopyBuffer(srv, srv, buf); err != nil { panic(fmt.Errorf("copy buffer: %v", err)) } } }() b.SetBytes(totalBytes) clientConfig := testConfig.Clone() clientConfig.CipherSuites = nil // the defaults may prefer faster ciphers clientConfig.DynamicRecordSizingDisabled = dynamicRecordSizingDisabled buf := make([]byte, bufsize) chunks := int(math.Ceil(float64(totalBytes) / float64(len(buf)))) for i := 0; i < N; i++ { conn, err := Dial("tcp", ln.Addr().String(), clientConfig) if err != nil { b.Fatal(err) } for j := 0; j < chunks; j++ { _, err := conn.Write(buf) if err != nil { b.Fatal(err) } _, err = io.ReadFull(conn, buf) if err != nil { b.Fatal(err) } } conn.Close() } }
func ExampleCopyBuffer() { r1 := strings.NewReader("first reader\n") r2 := strings.NewReader("second reader\n") buf := make([]byte, 8) // buf is used here... if _, err := io.CopyBuffer(os.Stdout, r1, buf); err != nil { log.Fatal(err) } // ... reused here also. No need to allocate an extra buffer. if _, err := io.CopyBuffer(os.Stdout, r2, buf); err != nil { log.Fatal(err) } // Output: // first reader // second reader }
// readMetaData reads meta data according to RFC section 9.2. func (br *Reader) readMetaData() { rd := io.LimitReader(&br.rd, int64(br.blkLen)) br.metaBuf = extendUint8s(br.metaBuf, 4096) // Lazy allocate if cnt, err := io.CopyBuffer(br.metaWr, rd, br.metaBuf); err != nil { panic(err) } else if cnt < int64(br.blkLen) { panic(io.ErrUnexpectedEOF) } br.readBlockHeader() }
func BenchmarkReadWriter(b *testing.B) { buf := newWriter(nil) data, _ := ioutil.ReadAll(io.LimitReader(rand.Reader, 32*1024)) temp := make([]byte, 32*1024) for i := 0; i < b.N; i++ { buf.Write(data) io.CopyBuffer(ioutil.Discard, buf, temp) } b.ReportAllocs() }
// Test if errors from the underlying writer is passed upwards. func TestWriteError(t *testing.T) { t.Parallel() buf := new(bytes.Buffer) n := 65536 if !testing.Short() { n *= 4 } for i := 0; i < n; i++ { fmt.Fprintf(buf, "asdasfasf%d%dfghfgujyut%dyutyu\n", i, i, i) } in := buf.Bytes() // We create our own buffer to control number of writes. copyBuffer := make([]byte, 128) for l := 0; l < 10; l++ { for fail := 1; fail <= 256; fail *= 2 { // Fail after 'fail' writes ew := &errorWriter{N: fail} w, err := NewWriter(ew, l) if err != nil { t.Fatalf("NewWriter: level %d: %v", l, err) } n, err := io.CopyBuffer(w, struct{ io.Reader }{bytes.NewBuffer(in)}, copyBuffer) if err == nil { t.Fatalf("Level %d: Expected an error, writer was %#v", l, ew) } n2, err := w.Write([]byte{1, 2, 2, 3, 4, 5}) if n2 != 0 { t.Fatal("Level", l, "Expected 0 length write, got", n) } if err == nil { t.Fatal("Level", l, "Expected an error") } err = w.Flush() if err == nil { t.Fatal("Level", l, "Expected an error on flush") } err = w.Close() if err == nil { t.Fatal("Level", l, "Expected an error on close") } w.Reset(ioutil.Discard) n2, err = w.Write([]byte{1, 2, 3, 4, 5, 6}) if err != nil { t.Fatal("Level", l, "Got unexpected error after reset:", err) } if n2 == 0 { t.Fatal("Level", l, "Got 0 length write, expected > 0") } if testing.Short() { return } } } }
// 下载对账单到 io.Writer. func downloadBillToWriter(writer io.Writer, req map[string]string, httpClient *http.Client) (written int64, err error) { if httpClient == nil { httpClient = http.DefaultClient } buf := make([]byte, 32*1024) // 与 io.copyBuffer 里的默认大小一致 reqBuf := bytes.NewBuffer(buf[:0]) if err = util.EncodeXMLFromMap(reqBuf, req, "xml"); err != nil { return } httpResp, err := httpClient.Post("https://api.mch.weixin.qq.com/pay/downloadbill", "text/xml; charset=utf-8", reqBuf) if err != nil { return } defer httpResp.Body.Close() if httpResp.StatusCode != http.StatusOK { err = fmt.Errorf("http.Status: %s", httpResp.Status) return } n, err := io.ReadFull(httpResp.Body, buf) switch { case err == nil: // n == len(buf), 可以认为返回的是对账单而不是xml格式的错误信息 written, err = bytes.NewReader(buf).WriteTo(writer) if err != nil { return } var n2 int64 n2, err = io.CopyBuffer(writer, httpResp.Body, buf) written += n2 return case err == io.ErrUnexpectedEOF: readBytes := buf[:n] if index := bytes.Index(readBytes, downloadBillErrorRootNodeStartElement); index != -1 { if bytes.Contains(readBytes[index+len(downloadBillErrorRootNodeStartElement):], downloadBillErrorReturnCodeNodeStartElement) { // 可以认为是错误信息了, 尝试解析xml var result core.Error if err = xml.Unmarshal(readBytes, &result); err == nil { err = &result return } // err != nil 执行默认的动作, 写入 writer } } return bytes.NewReader(readBytes).WriteTo(writer) case err == io.EOF: // 返回空的body err = nil return default: // 其他的错误 return } }
func walk(path string, info os.FileInfo, inErr error) (err error) { if inErr != nil { return inErr } if !info.Mode().IsRegular() { return } shouldRemove := false file, err := os.Open(path) if err != nil { return } defer func() { file.Close() if shouldRemove { err = os.Remove(path) } }() crc32Hash.Reset() md5Hash.Reset() sha1Hash.Reset() md5TeeReader := io.TeeReader(file, md5Hash) sha1TeeReader := io.TeeReader(md5TeeReader, sha1Hash) io.CopyBuffer(crc32Hash, sha1TeeReader, copyBuffer) var key FileSum key.size = info.Size() key.crc32Sum = crc32Hash.Sum32() copy(key.md5Sum[:], md5Hash.Sum(nil)) copy(key.sha1Sum[:], sha1Hash.Sum(nil)) fmt.Printf("path: %s\r\nsize: %d bytes\r\ncrc32: %x\r\nmd5: %s\r\nsha1: %s\r\n\r\n", path, key.size, key.crc32Sum, hex.EncodeToString(key.md5Sum[:]), hex.EncodeToString(key.sha1Sum[:]), ) if pathx, ok := FileSumSet[key]; ok { shouldRemove = true fmt.Printf("%s 与 %s 重复, 将被移除\r\n", path, pathx) } else { FileSumSet[key] = path } return }
// deliver mail to this maildir func (d MailDir) Deliver(body io.Reader) (err error) { var oldwd string oldwd, err = os.Getwd() if err == nil { // no error getting working directory, let's begin // when done chdir to previous directory defer func() { err := os.Chdir(oldwd) if err != nil { glog.Fatal("chdir failed", err) } }() // chdir to maildir err = os.Chdir(d.String()) if err == nil { fname := d.File() for { _, err = os.Stat(d.Temp(fname)) if os.IsNotExist(err) { break } time.Sleep(time.Second * 2) fname = d.File() } // set err to nil err = nil var f *os.File // create tmp file f, err = os.Create(d.Temp(fname)) if err == nil { // success creation err = f.Close() } // try writing file if err == nil { f, err = os.OpenFile(d.Temp(fname), os.O_CREATE|os.O_WRONLY, 0600) if err == nil { // write body _, err = io.CopyBuffer(f, body, nil) f.Close() if err == nil { // now symlink err = os.Symlink(filepath.Join("tmp", fname), filepath.Join("new", fname)) // if err is nil it's delivered } } } } } return }