func main() { _, err := OutFunc() if err != nil { log.Printf("error is : %v", err) } fmt.Printf("End of main") data := []int{0, 1, 2, 3, 4} s1 := data[:2] fmt.Printf("data ptr is %p, s1 ptr is %p", &data, &s1) s2 := append(s1, 100, 200) fmt.Println("\n", data) fmt.Println(s1) fmt.Println(s2) fmt.Printf("data ptr is %p, s1 ptr is %p, s2 ptr is %p \n", &data, &s1, &s2) var pool *sync.Pool pool = new(sync.Pool) pool.New = func() interface{} { return 1 } //pool.Put(1) i := pool.Get() if i == nil { fmt.Println("pool.Get is non-block function. nil could be returned. not cool") } else { fmt.Println("number is ", i) } }
func Gzip(level int) gin.HandlerFunc { var gzPool sync.Pool gzPool.New = func() interface{} { gz, err := gzip.NewWriterLevel(ioutil.Discard, level) if err != nil { panic(err) } return gz } return func(c *gin.Context) { if !shouldCompress(c.Request) { return } gz := gzPool.Get().(*gzip.Writer) defer gzPool.Put(gz) gz.Reset(c.Writer) c.Header("Content-Encoding", "gzip") c.Header("Vary", "Accept-Encoding") c.Writer = &gzipWriter{c.Writer, gz} defer func() { c.Header("Content-Length", "0") gz.Close() }() c.Next() } }
func (p person) dine(wg *sync.WaitGroup, foodPool *sync.Pool) { for { if !foodRemains() { break } potentialDish := foodPool.Get() if potentialDish == nil { fmt.Printf("%s recieved a nil dish\n", p.Name) break } someDish, ok := potentialDish.(dish) if !ok { fmt.Printf("%s was unable to turn a potential dish into a real dish\n", p.Name) continue } p.consume(&someDish) if someDish.Bites <= 0 { fmt.Printf("%s finished %s\n", p.Name, someDish.Name) } else { foodPool.Put(someDish) } } wg.Done() }
func NewByteArraySize(pool *sync.Pool, size int) []byte { if v := pool.Get(); v != nil { ba := v.([]byte) return ba } return make([]byte, size) }
func (s *Server) ReadSocket(packetPool *sync.Pool) { // each goroutine gets its own socket // if the sockets support SO_REUSEPORT, then this will cause the // kernel to distribute datagrams across them, for better read // performance s.logger.WithField("address", s.UDPAddr).Info("UDP server listening") serverConn, err := NewSocket(s.UDPAddr, s.RcvbufBytes) if err != nil { // if any goroutine fails to create the socket, we can't really // recover, so we just blow up // this probably indicates a systemic issue, eg lack of // SO_REUSEPORT support s.logger.WithError(err).Fatal("Error listening for UDP") } for { buf := packetPool.Get().([]byte) n, _, err := serverConn.ReadFrom(buf) if err != nil { s.logger.WithError(err).Error("Error reading from UDP") continue } s.HandlePacket(buf[:n]) // the Metric struct created by HandlePacket has no byte slices in it, // only strings // therefore there are no outstanding references to this byte slice, we // can return it to the pool packetPool.Put(buf) } }
// Example use of a pool to manage a free list of numbers func ExampleNumberPool_1() { // Create a Context ctx := dec.NewContext(dec.InitDecimal128, 0) // New() function for the pool to create new numbers newFunc := func() interface{} { return dec.NewNumber(ctx.Digits()) } // create a pool. Either dec.Pool or sync.Pool will do syncPool := sync.Pool{New: newFunc} // We can use Get().(*dec.Number) to get new or reusable numbers number := syncPool.Get().(*dec.Number) fmt.Printf("from sync.Pool: %s\n", number.Zero()) // We're done with it, put it back in the pool syncPool.Put(number) // Or, wrap it with a NumberPool so that Get() returns *Number instead of interface{}. // NumberPool also helps keeping track of the context. pool := &dec.NumberPool{&syncPool, ctx} // and benefit: no need to type-cast number = pool.Get() // Introducing the idiomatic code: defer Put() the *Number right after Get() defer pool.Put(number) fmt.Printf("from sync.Pool: %s\n", number.FromString("1243", pool.Context)) // Output: // from sync.Pool: 0 // from sync.Pool: 1243 }
// Middleware encodes the response using Gzip encoding and sets all the appropriate // headers. If the Content-Type is not set, it will be set by calling // http.DetectContentType on the data being written. func Middleware(level int) goa.Middleware { gzipPool := sync.Pool{ New: func() interface{} { gz, err := gzip.NewWriterLevel(ioutil.Discard, level) if err != nil { panic(err) } return gz }, } return func(h goa.Handler) goa.Handler { return func(ctx *goa.Context) (err error) { r := ctx.Request() // Skip compression if the client doesn't accept gzip encoding, is // requesting a WebSocket or the data is already compressed. if !strings.Contains(r.Header.Get(headerAcceptEncoding), encodingGzip) || len(r.Header.Get(headerSecWebSocketKey)) > 0 || ctx.Header().Get(headerContentEncoding) == encodingGzip { return h(ctx) } // Set the appropriate gzip headers. ctx.Header().Set(headerContentEncoding, encodingGzip) ctx.Header().Set(headerVary, headerAcceptEncoding) // Retrieve gzip writer from the pool. Reset it to use the ResponseWriter. // This allows us to re-use an already allocated buffer rather than // allocating a new buffer for every request. gz := gzipPool.Get().(*gzip.Writer) // Get the original http.ResponseWriter w := ctx.SetResponseWriter(nil) // Reset our gzip writer to use the http.ResponseWriter gz.Reset(w) // Wrap the original http.ResponseWriter with our gzipResponseWriter grw := gzipResponseWriter{ ResponseWriter: w, gzw: gz, } // Set the new http.ResponseWriter ctx.SetResponseWriter(grw) // Call the next handler supplying the gzipResponseWriter instead of // the original. err = h(ctx) if err != nil { return } // Delete the content length after we know we have been written to. grw.Header().Del(headerContentLength) gz.Close() gzipPool.Put(gz) return } } }
// TODO func NewBufioReaderSize(pool *sync.Pool, r io.Reader, size int) *bufio.Reader { if v := pool.Get(); v != nil { br := v.(*bufio.Reader) br.Reset(r) return br } return bufio.NewReaderSize(r, size) }
func NewBufioWriterSize(pool *sync.Pool, w io.Writer, size int) *bufio.Writer { if v := pool.Get(); v != nil { bw := v.(*bufio.Writer) bw.Reset(w) return bw } return bufio.NewWriterSize(w, size) }
func BenchmarkAllocSyncPool(b *testing.B) { var p sync.Pool s := make([]byte, 10) for i := 0; i < b.N; i++ { p.Put(s) s = p.Get().([]byte) } }
func BenchmarkRuntimeCallersSyncPool(b *testing.B) { pool := sync.Pool{New: func() interface{} { return make([]uintptr, 32) }} for i := 0; i < b.N; i++ { pcs := pool.Get().([]uintptr) runtime.Callers(0, pcs[:]) pcs = pcs[0:] pool.Put(pcs) } }
func newWriterLevel(pool *sync.Pool, w io.Writer, level int) *gzip.Writer { if v := pool.Get(); v != nil { zw := v.(*gzip.Writer) zw.Reset(w) return zw } zw, _ := gzip.NewWriterLevel(w, level) return zw }
func main() { var pool sync.Pool var a = 1 pool.Put(a) pool.Put(new(User)) fmt.Println(pool.Get()) runtime.GC() fmt.Println(pool.Get()) fmt.Println(pool.Get()) }
func main() { var rp sync.Pool var wp sync.Pool for i := 0; i < 1024; i++ { rb := new([]byte) *rb = make([]byte, 2048) rp.Put(rb) } var counter uint32 log.Fatal(nsk.ListenAndServe(":8000", func(conn *net.TCPConn) { var s string var c uint32 var rb *[]byte var w *bufio.Writer if v := wp.Get(); v != nil { w = v.(*bufio.Writer) w.Reset(conn) } else { w = bufio.NewWriter(conn) } if v := rp.Get(); v != nil { rb = v.(*[]byte) } else { rb = new([]byte) *rb = make([]byte, 2048) } n, err := conn.Read(*rb) if err != nil || n <= 0 { goto E } c = atomic.AddUint32(&counter, 1) s = strconv.FormatUint(uint64(c), 10) w.WriteString("HTTP/1.1 200 OK\r\n") w.WriteString("Connection: close\r\n") w.WriteString(fmt.Sprintf("Content-Length: %d\r\n\r\n", len(s))) w.WriteString(s) w.Flush() E: conn.Close() rp.Put(rb) wp.Put(w) })) }
func benchPool(i int, b *testing.B) { pool := sync.Pool{New: func() interface{} { return make([]int, 0, i) }} b.RunParallel(func(pb *testing.PB) { for pb.Next() { s := pool.Get().([]int)[:0] pool.Put(s) } }) }
func BenchmarkAllocSyncPool(b *testing.B) { var p sync.Pool s := make([]byte, 10) for i := 0; i < b.N; i++ { p.Put(s) si := p.Get() var ok bool s, ok = si.([]byte) if !ok { s = make([]byte, 10) } } }
func main() { // 禁用GC,并保证在main函数执行结束前恢复GC defer debug.SetGCPercent(debug.SetGCPercent(-1)) var count int32 newFunc := func() interface{} { return atomic.AddInt32(&count, 1) } pool := sync.Pool{New: newFunc} // New 字段值的作用 v1 := pool.Get() fmt.Printf("v1: %v\n", v1) // 临时对象池的存取 pool.Put(newFunc()) pool.Put(newFunc()) pool.Put(newFunc()) v2 := pool.Get() fmt.Printf("v2: %v\n", v2) // 垃圾回收对临时对象池的影响 debug.SetGCPercent(100) runtime.GC() v3 := pool.Get() fmt.Printf("v3: %v\n", v3) pool.New = nil v4 := pool.Get() fmt.Printf("v4: %v\n", v4) }
func callServer(clientPool *sync.Pool, s rpcx.ClientSelector) { client := clientPool.Get().(*rpcx.Client) args := &Args{7, 8} var reply Reply err := client.Call("Arith.Mul", args, &reply) if err != nil { fmt.Printf("error for Arith: %d*%d, %v \n", args.A, args.B, err) } else { fmt.Printf("Arith: %d*%d=%d, client: %p \n", args.A, args.B, reply.C, client) } clientPool.Put(client) }
func (t *Transport) fromtxpool(async bool, pool *sync.Pool) (arg *txproto) { if async { arg = pool.Get().(*txproto) arg.flush, arg.async = false, false arg.n, arg.err, arg.respch = 0, nil, nil if arg.packet == nil { arg.packet = make([]byte, t.buffersize) } } else { arg = pool.Get().(*txproto) arg.packet, arg.flush, arg.async = nil, false, false arg.n, arg.err, arg.respch = 0, nil, nil } return arg }
func main() { var pool sync.Pool pool.New = func() interface{} { return person{name: "name", timestamp: time.Now()} } var p, q, r person p = pool.Get().(person) fmt.Println(p.timestamp.String()) pool.Put(p) r = pool.Get().(person) q = pool.Get().(person) fmt.Println(r.timestamp.String()) fmt.Println(q.timestamp.String()) }
func main() { // Task 1: Define a pool (of ints). Just as the task says, a sync.Pool // allocates individually and can free as a group. p := sync.Pool{New: func() interface{} { fmt.Println("pool empty") return new(int) }} // Task 2: Allocate some ints. i := new(int) j := new(int) // Show that they're usable. *i = 1 *j = 2 fmt.Println(*i + *j) // prints 3 // Task 2 continued: Put allocated ints in pool p. // Task explanation: Variable p has a pool as its value. Another pool // could be be created and assigned to a different variable. You choose // a pool simply by using the appropriate variable, p here. p.Put(i) p.Put(j) // Drop references to i and j. This allows them to be garbage collected; // that is, freed as a group. i = nil j = nil // Get ints for i and j again, this time from the pool. P.Get may reuse // an object allocated above as long as objects haven't been garbage // collected yet; otherwise p.Get will allocate a new object. i = p.Get().(*int) j = p.Get().(*int) *i = 4 *j = 5 fmt.Println(*i + *j) // prints 9 // One more test, this time forcing a garbage collection. p.Put(i) p.Put(j) i = nil j = nil runtime.GC() i = p.Get().(*int) j = p.Get().(*int) *i = 7 *j = 8 fmt.Println(*i + *j) // prints 15 }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) pool := sync.Pool{ New: func() interface{} { data := new(Data) data.tag = "new" data.buffer = make([]int, 10) return data }, } for i := 0; i < 10; i++ { go func() { data := pool.Get().(*Data) for index := range data.buffer { data.buffer[index] = rand.Intn(100) } fmt.Println(data) data.tag = "used" pool.Put(data) }() } for i := 0; i < 10; i++ { go func() { data := pool.Get().(*Data) n := 0 for index := range data.buffer { data.buffer[index] = n n += 2 } fmt.Println(data) data.tag = "used" pool.Put(data) }() } fmt.Scanln() }
func init() { // Private data var pool sync.Pool const bufSize = 32768 bufPool.Get = func() []byte { b, ok := pool.Get().([]byte) if !ok || len(b) < bufSize { b = make([]byte, bufSize) } return b } bufPool.Free = func(b []byte) { if len(b) >= bufSize { pool.Put(b) } } }
func acquirePipelineWork(pool *sync.Pool, timeout time.Duration) *pipelineWork { v := pool.Get() if v == nil { v = &pipelineWork{ done: make(chan struct{}, 1), } } w := v.(*pipelineWork) if timeout > 0 { if w.t == nil { w.t = time.NewTimer(timeout) } else { w.t.Reset(timeout) } w.deadline = time.Now().Add(timeout) } else { w.deadline = zeroTime } return w }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) // 모든 CPU 사용 pool := sync.Pool{ // 풀 할당 New: func() interface{} { // Get 함수를 사용했을 때 호출될 함수 정의 data := new(Data) // 새 메모리 할당 data.tag = "new" // 태그 설정 data.buffer = make([]int, 10) // 슬라이스 공간 할당 return data // 할당한 메모리(객체) 리턴 }, } for i := 0; i < 10; i++ { go func() { // 고루틴 10개 생성 data := pool.Get().(*Data) // 풀에서 *Data 타입으로 데이터를 가져옴 for index := range data.buffer { data.buffer[index] = rand.Intn(100) // 슬라이스에 랜덤 값 저장 } fmt.Println(data) // data 내용 출력 data.tag = "used" // 객체가 사용되었다는 태그 설정 pool.Put(data) // 풀에 객체를 보관 }() } for i := 0; i < 10; i++ { go func() { // 고루틴 10개 생성 data := pool.Get().(*Data) // 풀에서 *Data 타입으로 데이터를 가져옴 n := 0 for index := range data.buffer { data.buffer[index] = n // 슬라이스에 짝수 저장 n += 2 } fmt.Println(data) // data 내용 출력 data.tag = "used" // 객체가 사용되었다는 태그 설정 pool.Put(data) // 풀에 객체 보관 }() } fmt.Scanln() }
// Handle provides on-the-fly gzip encoding for other handlers. // // Usage: // // func DL1Handler(w http.ResponseWriter, req *http.Request) { // fmt.Fprintln(w, "foobar") // } // // func DL2Handler(w http.ResponseWriter, req *http.Request) { // fmt.Fprintln(w, "zzz") // } // // // func main() { // http.HandleFunc("/download1", DL1Handler) // http.HandleFunc("/download2", DL2Handler) // http.ListenAndServe(":8080", Gzip(http.DefaultServeMux)) // } func Gzip(h http.Handler) http.HandlerFunc { var pool sync.Pool pool.New = func() interface{} { return gzip.NewWriter(ioutil.Discard) } return func(w http.ResponseWriter, r *http.Request) { // Do nothing on a HEAD request if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") || r.Method == "HEAD" || w.Header().Get("Content-Encoding") == "gzip" { // Skip compression if already compressed h.ServeHTTP(w, r) return } w.Header().Set("Content-Encoding", "gzip") gz := pool.Get().(*gzip.Writer) defer pool.Put(gz) gz.Reset(w) h.ServeHTTP(IOResponseWriter{Writer: gz, ResponseWriter: WrapWriter(w)}, r) gz.Close() } }
func BenchmarkJsonDecoderWithPool(b *testing.B) { f, err := os.Open(httpTestFilename) decoder := &HttpJSONDecoder{} if err != nil { b.Fatal(err) } defer f.Close() r := bufio.NewReader(f) jsonDoc, isPrefix, err := r.ReadLine() if err != nil || isPrefix { b.Fatal(errors.New("Couldn't properly read ammo sample from data file")) } var a Ammo pool := sync.Pool{ New: func() interface{} { return &Http{} }, } for n := 0; n < b.N; n++ { h := pool.Get().(*Http) a, _ = decoder.Decode(jsonDoc, h) pool.Put(h) } _ = a }
// GzipLevel returns a middleware which compresses HTTP response using gzip compression // scheme using the level specified func GzipLevel(level int) lars.HandlerFunc { // test gzip level, then don't have to each time one is created // in the pool if _, err := gzip.NewWriterLevel(ioutil.Discard, level); err != nil { panic(err) } var pool = sync.Pool{ New: func() interface{} { z, _ := gzip.NewWriterLevel(ioutil.Discard, level) return z }, } return func(c lars.Context) { c.Response().Header().Add(lars.Vary, lars.AcceptEncoding) if strings.Contains(c.Request().Header.Get(lars.AcceptEncoding), lars.Gzip) { w := pool.Get().(*gzip.Writer) w.Reset(c.Response().Writer()) defer func() { w.Close() pool.Put(w) }() gw := gzipWriter{Writer: w, ResponseWriter: c.Response().Writer()} c.Response().Header().Set(lars.ContentEncoding, lars.Gzip) c.Response().SetWriter(gw) } c.Next() } }
func test1() { //Pool内存缓存 GC时将被清除 var bp sync.Pool var looks sync.RWMutex bp.New = func() interface{} { return &bytes.Buffer{} } buf := bp.Get().(*bytes.Buffer) buf.WriteString("test") fmt.Println(buf.String()) bp.Put(buf) //记录至缓存 limit := make(chan struct{}, 10) go func() { buf := bp.Get().(*bytes.Buffer) buf.WriteString("tttt") looks.Lock() bp.Put(buf) looks.Unlock() }() time.Sleep(1 * time.Second) for { limit <- struct{}{} go func() { //提取以后必须再次Put 否则将被清除 buf := bp.Get().(*bytes.Buffer) if buf.Len() != 0 { fmt.Println(buf.String()) looks.Lock() bp.Put(buf) looks.Unlock() //runtime.GC() } <-limit }() } return runtime.GC() //手动启动GC buf = bp.Get().(*bytes.Buffer) //由于缓存被清空 反问内容为空 fmt.Println(buf.String()) }
n := c.pn.Next() if n == nil { // Done! bufPool.Put(c.buf) return nil } if proc, ok := c.procs[n.inode]; ok { n.Proc = proc } return n } // cbConnections sets Connections() var cbConnections = func(processes bool) (ConnIter, error) { // buffer for contents of /proc/<pid>/net/tcp buf := bufPool.Get().(*bytes.Buffer) buf.Reset() var procs map[uint64]Proc if processes { var err error if procs, err = walkProcPid(buf); err != nil { return nil, err } } if buf.Len() == 0 { readFile(procRoot+"/net/tcp", buf) readFile(procRoot+"/net/tcp6", buf) }