func main() { _, err := OutFunc() if err != nil { log.Printf("error is : %v", err) } fmt.Printf("End of main") data := []int{0, 1, 2, 3, 4} s1 := data[:2] fmt.Printf("data ptr is %p, s1 ptr is %p", &data, &s1) s2 := append(s1, 100, 200) fmt.Println("\n", data) fmt.Println(s1) fmt.Println(s2) fmt.Printf("data ptr is %p, s1 ptr is %p, s2 ptr is %p \n", &data, &s1, &s2) var pool *sync.Pool pool = new(sync.Pool) pool.New = func() interface{} { return 1 } //pool.Put(1) i := pool.Get() if i == nil { fmt.Println("pool.Get is non-block function. nil could be returned. not cool") } else { fmt.Println("number is ", i) } }
func (s *Server) ReadSocket(packetPool *sync.Pool) { // each goroutine gets its own socket // if the sockets support SO_REUSEPORT, then this will cause the // kernel to distribute datagrams across them, for better read // performance s.logger.WithField("address", s.UDPAddr).Info("UDP server listening") serverConn, err := NewSocket(s.UDPAddr, s.RcvbufBytes) if err != nil { // if any goroutine fails to create the socket, we can't really // recover, so we just blow up // this probably indicates a systemic issue, eg lack of // SO_REUSEPORT support s.logger.WithError(err).Fatal("Error listening for UDP") } for { buf := packetPool.Get().([]byte) n, _, err := serverConn.ReadFrom(buf) if err != nil { s.logger.WithError(err).Error("Error reading from UDP") continue } s.HandlePacket(buf[:n]) // the Metric struct created by HandlePacket has no byte slices in it, // only strings // therefore there are no outstanding references to this byte slice, we // can return it to the pool packetPool.Put(buf) } }
// Example use of a pool to manage a free list of numbers func ExampleNumberPool_1() { // Create a Context ctx := dec.NewContext(dec.InitDecimal128, 0) // New() function for the pool to create new numbers newFunc := func() interface{} { return dec.NewNumber(ctx.Digits()) } // create a pool. Either dec.Pool or sync.Pool will do syncPool := sync.Pool{New: newFunc} // We can use Get().(*dec.Number) to get new or reusable numbers number := syncPool.Get().(*dec.Number) fmt.Printf("from sync.Pool: %s\n", number.Zero()) // We're done with it, put it back in the pool syncPool.Put(number) // Or, wrap it with a NumberPool so that Get() returns *Number instead of interface{}. // NumberPool also helps keeping track of the context. pool := &dec.NumberPool{&syncPool, ctx} // and benefit: no need to type-cast number = pool.Get() // Introducing the idiomatic code: defer Put() the *Number right after Get() defer pool.Put(number) fmt.Printf("from sync.Pool: %s\n", number.FromString("1243", pool.Context)) // Output: // from sync.Pool: 0 // from sync.Pool: 1243 }
func (p person) dine(wg *sync.WaitGroup, foodPool *sync.Pool) { for { if !foodRemains() { break } potentialDish := foodPool.Get() if potentialDish == nil { fmt.Printf("%s recieved a nil dish\n", p.Name) break } someDish, ok := potentialDish.(dish) if !ok { fmt.Printf("%s was unable to turn a potential dish into a real dish\n", p.Name) continue } p.consume(&someDish) if someDish.Bites <= 0 { fmt.Printf("%s finished %s\n", p.Name, someDish.Name) } else { foodPool.Put(someDish) } } wg.Done() }
func NewByteArraySize(pool *sync.Pool, size int) []byte { if v := pool.Get(); v != nil { ba := v.([]byte) return ba } return make([]byte, size) }
// Middleware encodes the response using Gzip encoding and sets all the appropriate // headers. If the Content-Type is not set, it will be set by calling // http.DetectContentType on the data being written. func Middleware(level int) goa.Middleware { gzipPool := sync.Pool{ New: func() interface{} { gz, err := gzip.NewWriterLevel(ioutil.Discard, level) if err != nil { panic(err) } return gz }, } return func(h goa.Handler) goa.Handler { return func(ctx *goa.Context) (err error) { r := ctx.Request() // Skip compression if the client doesn't accept gzip encoding, is // requesting a WebSocket or the data is already compressed. if !strings.Contains(r.Header.Get(headerAcceptEncoding), encodingGzip) || len(r.Header.Get(headerSecWebSocketKey)) > 0 || ctx.Header().Get(headerContentEncoding) == encodingGzip { return h(ctx) } // Set the appropriate gzip headers. ctx.Header().Set(headerContentEncoding, encodingGzip) ctx.Header().Set(headerVary, headerAcceptEncoding) // Retrieve gzip writer from the pool. Reset it to use the ResponseWriter. // This allows us to re-use an already allocated buffer rather than // allocating a new buffer for every request. gz := gzipPool.Get().(*gzip.Writer) // Get the original http.ResponseWriter w := ctx.SetResponseWriter(nil) // Reset our gzip writer to use the http.ResponseWriter gz.Reset(w) // Wrap the original http.ResponseWriter with our gzipResponseWriter grw := gzipResponseWriter{ ResponseWriter: w, gzw: gz, } // Set the new http.ResponseWriter ctx.SetResponseWriter(grw) // Call the next handler supplying the gzipResponseWriter instead of // the original. err = h(ctx) if err != nil { return } // Delete the content length after we know we have been written to. grw.Header().Del(headerContentLength) gz.Close() gzipPool.Put(gz) return } } }
func BenchmarkAllocSyncPool(b *testing.B) { var p sync.Pool s := make([]byte, 10) for i := 0; i < b.N; i++ { p.Put(s) s = p.Get().([]byte) } }
func newBufferPool() *bufferPool { var p sync.Pool p.New = func() interface{} { return make([]byte, 20480) } return &bufferPool{p} }
func NewByteSlicePool(size int) *ByteSlicePool { p := new(sync.Pool) p.New = func() interface{} { return make([]byte, size) } return &ByteSlicePool{p} }
// TODO func NewBufioReaderSize(pool *sync.Pool, r io.Reader, size int) *bufio.Reader { if v := pool.Get(); v != nil { br := v.(*bufio.Reader) br.Reset(r) return br } return bufio.NewReaderSize(r, size) }
func NewBufioWriterSize(pool *sync.Pool, w io.Writer, size int) *bufio.Writer { if v := pool.Get(); v != nil { bw := v.(*bufio.Writer) bw.Reset(w) return bw } return bufio.NewWriterSize(w, size) }
func BenchmarkRuntimeCallersSyncPool(b *testing.B) { pool := sync.Pool{New: func() interface{} { return make([]uintptr, 32) }} for i := 0; i < b.N; i++ { pcs := pool.Get().([]uintptr) runtime.Callers(0, pcs[:]) pcs = pcs[0:] pool.Put(pcs) } }
func newWriterLevel(pool *sync.Pool, w io.Writer, level int) *gzip.Writer { if v := pool.Get(); v != nil { zw := v.(*gzip.Writer) zw.Reset(w) return zw } zw, _ := gzip.NewWriterLevel(w, level) return zw }
func NewMPool(sz int) *MPool { p := &MPool{sz: sz} pool := new(sync.Pool) pool.New = func() interface{} { buf := make([]byte, p.sz) atomic.AddInt32(&p.alloced, 1) return buf } p.pool = pool return p }
func releasePipelineWork(pool *sync.Pool, w *pipelineWork) { if w.t != nil { w.t.Stop() } w.reqCopy.Reset() w.respCopy.Reset() w.req = nil w.resp = nil w.err = nil pool.Put(w) }
func benchPool(i int, b *testing.B) { pool := sync.Pool{New: func() interface{} { return make([]int, 0, i) }} b.RunParallel(func(pb *testing.PB) { for pb.Next() { s := pool.Get().([]int)[:0] pool.Put(s) } }) }
func BenchmarkAllocSyncPool(b *testing.B) { var p sync.Pool s := make([]byte, 10) for i := 0; i < b.N; i++ { p.Put(s) si := p.Get() var ok bool s, ok = si.([]byte) if !ok { s = make([]byte, 10) } } }
func callServer(clientPool *sync.Pool, s rpcx.ClientSelector) { client := clientPool.Get().(*rpcx.Client) args := &Args{7, 8} var reply Reply err := client.Call("Arith.Mul", args, &reply) if err != nil { fmt.Printf("error for Arith: %d*%d, %v \n", args.A, args.B, err) } else { fmt.Printf("Arith: %d*%d=%d, client: %p \n", args.A, args.B, reply.C, client) } clientPool.Put(client) }
func (t *Transport) fromtxpool(async bool, pool *sync.Pool) (arg *txproto) { if async { arg = pool.Get().(*txproto) arg.flush, arg.async = false, false arg.n, arg.err, arg.respch = 0, nil, nil if arg.packet == nil { arg.packet = make([]byte, t.buffersize) } } else { arg = pool.Get().(*txproto) arg.packet, arg.flush, arg.async = nil, false, false arg.n, arg.err, arg.respch = 0, nil, nil } return arg }
func main() { var pool sync.Pool var a = 1 pool.Put(a) pool.Put(new(User)) fmt.Println(pool.Get()) runtime.GC() fmt.Println(pool.Get()) fmt.Println(pool.Get()) }
func (s *Server) CreateRegistrator(username, password string) *Registrator { pool := sync.Pool{New: func() interface{} { return new(string) }} for i := 1000; i < 9999; i++ { pool.Put(strconv.Itoa(i)) } registrator := &Registrator{ Login: username, Password: password, MsgChan: make(chan interface{}), Server: s, Queue: make(map[string]*QueueMember), GroupManager: &GroupManager{Requests: make(map[string]interface{})}, UniqueCodes: &pool, } return registrator }
func main() { rand.Seed(time.Now().UTC().UnixNano()) var foodPool sync.Pool for _, d := range dishes { foodPool.Put(d) } fmt.Println("Bon appétit!") var wg sync.WaitGroup for _, p := range people { wg.Add(1) go p.dine(&wg, &foodPool) } wg.Wait() fmt.Println("That was delicious!") }
func init() { // Private data var pool sync.Pool const bufSize = 32768 bufPool.Get = func() []byte { b, ok := pool.Get().([]byte) if !ok || len(b) < bufSize { b = make([]byte, bufSize) } return b } bufPool.Free = func(b []byte) { if len(b) >= bufSize { pool.Put(b) } } }
func acquirePipelineWork(pool *sync.Pool, timeout time.Duration) *pipelineWork { v := pool.Get() if v == nil { v = &pipelineWork{ done: make(chan struct{}, 1), } } w := v.(*pipelineWork) if timeout > 0 { if w.t == nil { w.t = time.NewTimer(timeout) } else { w.t.Reset(timeout) } w.deadline = time.Now().Add(timeout) } else { w.deadline = zeroTime } return w }
func Gzip(level int) gin.HandlerFunc { var gzPool sync.Pool gzPool.New = func() interface{} { gz, err := gzip.NewWriterLevel(ioutil.Discard, level) if err != nil { panic(err) } return gz } return func(c *gin.Context) { if !shouldCompress(c.Request) { return } gz := gzPool.Get().(*gzip.Writer) defer gzPool.Put(gz) gz.Reset(c.Writer) c.Header("Content-Encoding", "gzip") c.Header("Vary", "Accept-Encoding") c.Writer = &gzipWriter{c.Writer, gz} defer func() { c.Header("Content-Length", "0") gz.Close() }() c.Next() } }
func main() { // Task 1: Define a pool (of ints). Just as the task says, a sync.Pool // allocates individually and can free as a group. p := sync.Pool{New: func() interface{} { fmt.Println("pool empty") return new(int) }} // Task 2: Allocate some ints. i := new(int) j := new(int) // Show that they're usable. *i = 1 *j = 2 fmt.Println(*i + *j) // prints 3 // Task 2 continued: Put allocated ints in pool p. // Task explanation: Variable p has a pool as its value. Another pool // could be be created and assigned to a different variable. You choose // a pool simply by using the appropriate variable, p here. p.Put(i) p.Put(j) // Drop references to i and j. This allows them to be garbage collected; // that is, freed as a group. i = nil j = nil // Get ints for i and j again, this time from the pool. P.Get may reuse // an object allocated above as long as objects haven't been garbage // collected yet; otherwise p.Get will allocate a new object. i = p.Get().(*int) j = p.Get().(*int) *i = 4 *j = 5 fmt.Println(*i + *j) // prints 9 // One more test, this time forcing a garbage collection. p.Put(i) p.Put(j) i = nil j = nil runtime.GC() i = p.Get().(*int) j = p.Get().(*int) *i = 7 *j = 8 fmt.Println(*i + *j) // prints 15 }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) pool := sync.Pool{ New: func() interface{} { data := new(Data) data.tag = "new" data.buffer = make([]int, 10) return data }, } for i := 0; i < 10; i++ { go func() { data := pool.Get().(*Data) for index := range data.buffer { data.buffer[index] = rand.Intn(100) } fmt.Println(data) data.tag = "used" pool.Put(data) }() } for i := 0; i < 10; i++ { go func() { data := pool.Get().(*Data) n := 0 for index := range data.buffer { data.buffer[index] = n n += 2 } fmt.Println(data) data.tag = "used" pool.Put(data) }() } fmt.Scanln() }
func main() { var pool sync.Pool pool.New = func() interface{} { return person{name: "name", timestamp: time.Now()} } var p, q, r person p = pool.Get().(person) fmt.Println(p.timestamp.String()) pool.Put(p) r = pool.Get().(person) q = pool.Get().(person) fmt.Println(r.timestamp.String()) fmt.Println(q.timestamp.String()) }
func BenchmarkJsonDecoderWithPool(b *testing.B) { f, err := os.Open(httpTestFilename) decoder := &HttpJSONDecoder{} if err != nil { b.Fatal(err) } defer f.Close() r := bufio.NewReader(f) jsonDoc, isPrefix, err := r.ReadLine() if err != nil || isPrefix { b.Fatal(errors.New("Couldn't properly read ammo sample from data file")) } var a Ammo pool := sync.Pool{ New: func() interface{} { return &Http{} }, } for n := 0; n < b.N; n++ { h := pool.Get().(*Http) a, _ = decoder.Decode(jsonDoc, h) pool.Put(h) } _ = a }
// GzipLevel returns a middleware which compresses HTTP response using gzip compression // scheme using the level specified func GzipLevel(level int) lars.HandlerFunc { // test gzip level, then don't have to each time one is created // in the pool if _, err := gzip.NewWriterLevel(ioutil.Discard, level); err != nil { panic(err) } var pool = sync.Pool{ New: func() interface{} { z, _ := gzip.NewWriterLevel(ioutil.Discard, level) return z }, } return func(c lars.Context) { c.Response().Header().Add(lars.Vary, lars.AcceptEncoding) if strings.Contains(c.Request().Header.Get(lars.AcceptEncoding), lars.Gzip) { w := pool.Get().(*gzip.Writer) w.Reset(c.Response().Writer()) defer func() { w.Close() pool.Put(w) }() gw := gzipWriter{Writer: w, ResponseWriter: c.Response().Writer()} c.Response().Header().Set(lars.ContentEncoding, lars.Gzip) c.Response().SetWriter(gw) } c.Next() } }