func (p person) dine(wg *sync.WaitGroup, foodPool *sync.Pool) {
	for {
		if !foodRemains() {
			break
		}

		potentialDish := foodPool.Get()
		if potentialDish == nil {
			fmt.Printf("%s recieved a nil dish\n", p.Name)
			break
		}
		someDish, ok := potentialDish.(dish)
		if !ok {
			fmt.Printf("%s was unable to turn a potential dish into a real dish\n", p.Name)
			continue
		}
		p.consume(&someDish)
		if someDish.Bites <= 0 {
			fmt.Printf("%s finished %s\n", p.Name, someDish.Name)
		} else {
			foodPool.Put(someDish)
		}
	}
	wg.Done()
}
Beispiel #2
0
func Gzip(level int) gin.HandlerFunc {
	var gzPool sync.Pool
	gzPool.New = func() interface{} {
		gz, err := gzip.NewWriterLevel(ioutil.Discard, level)
		if err != nil {
			panic(err)
		}
		return gz
	}
	return func(c *gin.Context) {
		if !shouldCompress(c.Request) {
			return
		}

		gz := gzPool.Get().(*gzip.Writer)
		defer gzPool.Put(gz)
		gz.Reset(c.Writer)

		c.Header("Content-Encoding", "gzip")
		c.Header("Vary", "Accept-Encoding")
		c.Writer = &gzipWriter{c.Writer, gz}
		defer func() {
			c.Header("Content-Length", "0")
			gz.Close()
		}()
		c.Next()
	}
}
Beispiel #3
0
func main() {
	// 禁用GC,并保证在main函数执行结束前恢复GC
	defer debug.SetGCPercent(debug.SetGCPercent(-1))
	var count int32
	newFunc := func() interface{} {
		return atomic.AddInt32(&count, 1)
	}
	pool := sync.Pool{New: newFunc}

	// New 字段值的作用
	v1 := pool.Get()
	fmt.Printf("v1: %v\n", v1)

	// 临时对象池的存取
	pool.Put(newFunc())
	pool.Put(newFunc())
	pool.Put(newFunc())
	v2 := pool.Get()
	fmt.Printf("v2: %v\n", v2)

	// 垃圾回收对临时对象池的影响
	debug.SetGCPercent(100)
	runtime.GC()
	v3 := pool.Get()
	fmt.Printf("v3: %v\n", v3)
	pool.New = nil
	v4 := pool.Get()
	fmt.Printf("v4: %v\n", v4)
}
Beispiel #4
0
func (s *Server) ReadSocket(packetPool *sync.Pool) {
	// each goroutine gets its own socket
	// if the sockets support SO_REUSEPORT, then this will cause the
	// kernel to distribute datagrams across them, for better read
	// performance
	s.logger.WithField("address", s.UDPAddr).Info("UDP server listening")
	serverConn, err := NewSocket(s.UDPAddr, s.RcvbufBytes)
	if err != nil {
		// if any goroutine fails to create the socket, we can't really
		// recover, so we just blow up
		// this probably indicates a systemic issue, eg lack of
		// SO_REUSEPORT support
		s.logger.WithError(err).Fatal("Error listening for UDP")
	}

	for {
		buf := packetPool.Get().([]byte)
		n, _, err := serverConn.ReadFrom(buf)
		if err != nil {
			s.logger.WithError(err).Error("Error reading from UDP")
			continue
		}
		s.HandlePacket(buf[:n])
		// the Metric struct created by HandlePacket has no byte slices in it,
		// only strings
		// therefore there are no outstanding references to this byte slice, we
		// can return it to the pool
		packetPool.Put(buf)
	}
}
Beispiel #5
0
// Example use of a pool to manage a free list of numbers
func ExampleNumberPool_1() {
	// Create a Context
	ctx := dec.NewContext(dec.InitDecimal128, 0)

	// New() function for the pool to create new numbers
	newFunc := func() interface{} { return dec.NewNumber(ctx.Digits()) }

	// create a pool. Either dec.Pool or sync.Pool will do
	syncPool := sync.Pool{New: newFunc}

	// We can use Get().(*dec.Number) to get new or reusable numbers
	number := syncPool.Get().(*dec.Number)
	fmt.Printf("from sync.Pool: %s\n", number.Zero())
	// We're done with it, put it back in the pool
	syncPool.Put(number)

	// Or, wrap it with a NumberPool so that Get() returns *Number instead of interface{}.
	// NumberPool also helps keeping track of the context.
	pool := &dec.NumberPool{&syncPool, ctx}
	// and benefit: no need to type-cast
	number = pool.Get()
	// Introducing the idiomatic code: defer Put() the *Number right after Get()
	defer pool.Put(number)
	fmt.Printf("from sync.Pool: %s\n", number.FromString("1243", pool.Context))

	// Output:
	// from sync.Pool: 0
	// from sync.Pool: 1243
}
// Middleware encodes the response using Gzip encoding and sets all the appropriate
// headers. If the Content-Type is not set, it will be set by calling
// http.DetectContentType on the data being written.
func Middleware(level int) goa.Middleware {
	gzipPool := sync.Pool{
		New: func() interface{} {
			gz, err := gzip.NewWriterLevel(ioutil.Discard, level)
			if err != nil {
				panic(err)
			}
			return gz
		},
	}
	return func(h goa.Handler) goa.Handler {
		return func(ctx *goa.Context) (err error) {
			r := ctx.Request()
			// Skip compression if the client doesn't accept gzip encoding, is
			// requesting a WebSocket or the data is already compressed.
			if !strings.Contains(r.Header.Get(headerAcceptEncoding), encodingGzip) ||
				len(r.Header.Get(headerSecWebSocketKey)) > 0 ||
				ctx.Header().Get(headerContentEncoding) == encodingGzip {
				return h(ctx)
			}

			// Set the appropriate gzip headers.
			ctx.Header().Set(headerContentEncoding, encodingGzip)
			ctx.Header().Set(headerVary, headerAcceptEncoding)

			// Retrieve gzip writer from the pool. Reset it to use the ResponseWriter.
			// This allows us to re-use an already allocated buffer rather than
			// allocating a new buffer for every request.
			gz := gzipPool.Get().(*gzip.Writer)

			// Get the original http.ResponseWriter
			w := ctx.SetResponseWriter(nil)
			// Reset our gzip writer to use the http.ResponseWriter
			gz.Reset(w)

			// Wrap the original http.ResponseWriter with our gzipResponseWriter
			grw := gzipResponseWriter{
				ResponseWriter: w,
				gzw:            gz,
			}

			// Set the new http.ResponseWriter
			ctx.SetResponseWriter(grw)

			// Call the next handler supplying the gzipResponseWriter instead of
			// the original.
			err = h(ctx)
			if err != nil {
				return
			}

			// Delete the content length after we know we have been written to.
			grw.Header().Del(headerContentLength)
			gz.Close()
			gzipPool.Put(gz)
			return
		}
	}
}
Beispiel #7
0
func BenchmarkAllocSyncPool(b *testing.B) {
	var p sync.Pool
	s := make([]byte, 10)
	for i := 0; i < b.N; i++ {
		p.Put(s)
		s = p.Get().([]byte)
	}
}
Beispiel #8
0
func BenchmarkRuntimeCallersSyncPool(b *testing.B) {
	pool := sync.Pool{New: func() interface{} { return make([]uintptr, 32) }}
	for i := 0; i < b.N; i++ {
		pcs := pool.Get().([]uintptr)
		runtime.Callers(0, pcs[:])
		pcs = pcs[0:]
		pool.Put(pcs)
	}
}
Beispiel #9
0
func releasePipelineWork(pool *sync.Pool, w *pipelineWork) {
	if w.t != nil {
		w.t.Stop()
	}
	w.reqCopy.Reset()
	w.respCopy.Reset()
	w.req = nil
	w.resp = nil
	w.err = nil
	pool.Put(w)
}
Beispiel #10
0
func main() {
	var pool sync.Pool

	var a = 1
	pool.Put(a)
	pool.Put(new(User))
	fmt.Println(pool.Get())
	runtime.GC()

	fmt.Println(pool.Get())
	fmt.Println(pool.Get())
}
Beispiel #11
0
func benchPool(i int, b *testing.B) {
	pool := sync.Pool{New: func() interface{} {
		return make([]int, 0, i)
	}}

	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			s := pool.Get().([]int)[:0]
			pool.Put(s)
		}
	})
}
Beispiel #12
0
func BenchmarkAllocSyncPool(b *testing.B) {
	var p sync.Pool
	s := make([]byte, 10)
	for i := 0; i < b.N; i++ {
		p.Put(s)
		si := p.Get()
		var ok bool
		s, ok = si.([]byte)
		if !ok {
			s = make([]byte, 10)
		}
	}
}
Beispiel #13
0
func callServer(clientPool *sync.Pool, s rpcx.ClientSelector) {
	client := clientPool.Get().(*rpcx.Client)

	args := &Args{7, 8}
	var reply Reply
	err := client.Call("Arith.Mul", args, &reply)
	if err != nil {
		fmt.Printf("error for Arith: %d*%d, %v \n", args.A, args.B, err)
	} else {
		fmt.Printf("Arith: %d*%d=%d, client: %p \n", args.A, args.B, reply.C, client)
	}

	clientPool.Put(client)
}
Beispiel #14
0
func main() {
	var pool sync.Pool
	pool.New = func() interface{} {
		return person{name: "name", timestamp: time.Now()}
	}
	var p, q, r person

	p = pool.Get().(person)
	fmt.Println(p.timestamp.String())
	pool.Put(p)
	r = pool.Get().(person)
	q = pool.Get().(person)
	fmt.Println(r.timestamp.String())
	fmt.Println(q.timestamp.String())

}
Beispiel #15
0
func (s *Server) CreateRegistrator(username, password string) *Registrator {
	pool := sync.Pool{New: func() interface{} { return new(string) }}
	for i := 1000; i < 9999; i++ {
		pool.Put(strconv.Itoa(i))
	}
	registrator := &Registrator{
		Login:        username,
		Password:     password,
		MsgChan:      make(chan interface{}),
		Server:       s,
		Queue:        make(map[string]*QueueMember),
		GroupManager: &GroupManager{Requests: make(map[string]interface{})},
		UniqueCodes:  &pool,
	}

	return registrator
}
func main() {
	rand.Seed(time.Now().UTC().UnixNano())

	var foodPool sync.Pool

	for _, d := range dishes {
		foodPool.Put(d)
	}

	fmt.Println("Bon appétit!")
	var wg sync.WaitGroup
	for _, p := range people {
		wg.Add(1)
		go p.dine(&wg, &foodPool)
	}
	wg.Wait()
	fmt.Println("That was delicious!")
}
func main() {
	// Task 1:  Define a pool (of ints).  Just as the task says, a sync.Pool
	// allocates individually and can free as a group.
	p := sync.Pool{New: func() interface{} {
		fmt.Println("pool empty")
		return new(int)
	}}
	// Task 2: Allocate some ints.
	i := new(int)
	j := new(int)
	// Show that they're usable.
	*i = 1
	*j = 2
	fmt.Println(*i + *j) // prints 3
	// Task 2 continued:  Put allocated ints in pool p.
	// Task explanation:  Variable p has a pool as its value.  Another pool
	// could be be created and assigned to a different variable.  You choose
	// a pool simply by using the appropriate variable, p here.
	p.Put(i)
	p.Put(j)
	// Drop references to i and j.  This allows them to be garbage collected;
	// that is, freed as a group.
	i = nil
	j = nil
	// Get ints for i and j again, this time from the pool.  P.Get may reuse
	// an object allocated above as long as objects haven't been garbage
	// collected yet; otherwise p.Get will allocate a new object.
	i = p.Get().(*int)
	j = p.Get().(*int)
	*i = 4
	*j = 5
	fmt.Println(*i + *j) // prints 9
	// One more test, this time forcing a garbage collection.
	p.Put(i)
	p.Put(j)
	i = nil
	j = nil
	runtime.GC()
	i = p.Get().(*int)
	j = p.Get().(*int)
	*i = 7
	*j = 8
	fmt.Println(*i + *j) // prints 15
}
Beispiel #18
0
func init() {
	// Private data
	var pool sync.Pool
	const bufSize = 32768

	bufPool.Get = func() []byte {
		b, ok := pool.Get().([]byte)
		if !ok || len(b) < bufSize {
			b = make([]byte, bufSize)
		}
		return b
	}

	bufPool.Free = func(b []byte) {
		if len(b) >= bufSize {
			pool.Put(b)
		}
	}
}
Beispiel #19
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())

	pool := sync.Pool{
		New: func() interface{} {
			data := new(Data)
			data.tag = "new"
			data.buffer = make([]int, 10)
			return data
		},
	}

	for i := 0; i < 10; i++ {
		go func() {
			data := pool.Get().(*Data)

			for index := range data.buffer {
				data.buffer[index] = rand.Intn(100)
			}
			fmt.Println(data)
			data.tag = "used"
			pool.Put(data)
		}()
	}

	for i := 0; i < 10; i++ {
		go func() {
			data := pool.Get().(*Data)

			n := 0
			for index := range data.buffer {
				data.buffer[index] = n
				n += 2
			}
			fmt.Println(data)
			data.tag = "used"
			pool.Put(data)
		}()
	}

	fmt.Scanln()

}
Beispiel #20
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU()) // 모든 CPU 사용

	pool := sync.Pool{ // 풀 할당
		New: func() interface{} { // Get 함수를 사용했을 때 호출될 함수 정의
			data := new(Data)             // 새 메모리 할당
			data.tag = "new"              // 태그 설정
			data.buffer = make([]int, 10) // 슬라이스 공간 할당
			return data                   // 할당한 메모리(객체) 리턴
		},
	}

	for i := 0; i < 10; i++ {
		go func() { // 고루틴 10개 생성
			data := pool.Get().(*Data) // 풀에서 *Data 타입으로 데이터를 가져옴
			for index := range data.buffer {
				data.buffer[index] = rand.Intn(100) // 슬라이스에 랜덤 값 저장
			}
			fmt.Println(data) // data 내용 출력
			data.tag = "used" // 객체가 사용되었다는 태그 설정
			pool.Put(data)    // 풀에 객체를 보관
		}()
	}

	for i := 0; i < 10; i++ {
		go func() { // 고루틴 10개 생성
			data := pool.Get().(*Data) // 풀에서 *Data 타입으로 데이터를 가져옴
			n := 0
			for index := range data.buffer {
				data.buffer[index] = n // 슬라이스에 짝수 저장
				n += 2
			}
			fmt.Println(data) // data 내용 출력
			data.tag = "used" // 객체가 사용되었다는 태그 설정
			pool.Put(data)    // 풀에 객체 보관
		}()
	}

	fmt.Scanln()
}
Beispiel #21
0
// Handle provides on-the-fly gzip encoding for other handlers.
//
// Usage:
//
//	func DL1Handler(w http.ResponseWriter, req *http.Request) {
//		fmt.Fprintln(w, "foobar")
//	}
//
//	func DL2Handler(w http.ResponseWriter, req *http.Request) {
//		fmt.Fprintln(w, "zzz")
//	}
//
//
//	func main() {
//		http.HandleFunc("/download1", DL1Handler)
//		http.HandleFunc("/download2", DL2Handler)
//		http.ListenAndServe(":8080", Gzip(http.DefaultServeMux))
//	}
func Gzip(h http.Handler) http.HandlerFunc {
	var pool sync.Pool
	pool.New = func() interface{} {
		return gzip.NewWriter(ioutil.Discard)
	}
	return func(w http.ResponseWriter, r *http.Request) {
		// Do nothing on a HEAD request
		if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") || r.Method == "HEAD" ||
			w.Header().Get("Content-Encoding") == "gzip" { // Skip compression if already compressed

			h.ServeHTTP(w, r)
			return
		}
		w.Header().Set("Content-Encoding", "gzip")
		gz := pool.Get().(*gzip.Writer)
		defer pool.Put(gz)
		gz.Reset(w)

		h.ServeHTTP(IOResponseWriter{Writer: gz, ResponseWriter: WrapWriter(w)}, r)
		gz.Close()
	}
}
Beispiel #22
0
func BenchmarkJsonDecoderWithPool(b *testing.B) {
	f, err := os.Open(httpTestFilename)
	decoder := &HttpJSONDecoder{}
	if err != nil {
		b.Fatal(err)
	}
	defer f.Close()
	r := bufio.NewReader(f)
	jsonDoc, isPrefix, err := r.ReadLine()
	if err != nil || isPrefix {
		b.Fatal(errors.New("Couldn't properly read ammo sample from data file"))
	}
	var a Ammo
	pool := sync.Pool{
		New: func() interface{} { return &Http{} },
	}
	for n := 0; n < b.N; n++ {
		h := pool.Get().(*Http)
		a, _ = decoder.Decode(jsonDoc, h)
		pool.Put(h)
	}
	_ = a
}
Beispiel #23
0
// GzipLevel returns a middleware which compresses HTTP response using gzip compression
// scheme using the level specified
func GzipLevel(level int) lars.HandlerFunc {

	// test gzip level, then don't have to each time one is created
	// in the pool

	if _, err := gzip.NewWriterLevel(ioutil.Discard, level); err != nil {
		panic(err)
	}

	var pool = sync.Pool{
		New: func() interface{} {
			z, _ := gzip.NewWriterLevel(ioutil.Discard, level)
			return z
		},
	}

	return func(c lars.Context) {
		c.Response().Header().Add(lars.Vary, lars.AcceptEncoding)

		if strings.Contains(c.Request().Header.Get(lars.AcceptEncoding), lars.Gzip) {

			w := pool.Get().(*gzip.Writer)
			w.Reset(c.Response().Writer())

			defer func() {
				w.Close()
				pool.Put(w)
			}()

			gw := gzipWriter{Writer: w, ResponseWriter: c.Response().Writer()}
			c.Response().Header().Set(lars.ContentEncoding, lars.Gzip)
			c.Response().SetWriter(gw)
		}

		c.Next()
	}
}
Beispiel #24
0
// WrapLevel behaves like GzipHandler but allows a custom GZIP
// compression level. Invalid compression level inputs are reset to default.
func WrapLevel(h http.Handler, level int) http.Handler {
	if level < gzip.DefaultCompression || level > gzip.BestCompression {
		level = gzip.DefaultCompression
	}
	pool := new(sync.Pool)

	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		w.Header().Add(vary, acceptEncoding)

		if !strings.Contains(r.Header.Get(acceptEncoding), scheme) {
			h.ServeHTTP(w, r)
			return
		}

		// Bytes written during ServeHTTP are redirected to this gzip writer
		// before being written to the underlying response.
		zw := newWriterLevel(pool, w, level)
		w.Header().Set(contentEncoding, scheme)
		h.ServeHTTP(gzipResponseWriter{zw, w}, r)

		zw.Close()
		pool.Put(zw)
	})
}
Beispiel #25
0
func main() {
	var rp sync.Pool
	var wp sync.Pool

	for i := 0; i < 1024; i++ {
		rb := new([]byte)
		*rb = make([]byte, 2048)
		rp.Put(rb)
	}

	var counter uint32
	log.Fatal(nsk.ListenAndServe(":8000", func(conn *net.TCPConn) {

		var s string
		var c uint32
		var rb *[]byte
		var w *bufio.Writer

		if v := wp.Get(); v != nil {
			w = v.(*bufio.Writer)
			w.Reset(conn)
		} else {
			w = bufio.NewWriter(conn)
		}

		if v := rp.Get(); v != nil {
			rb = v.(*[]byte)
		} else {
			rb = new([]byte)
			*rb = make([]byte, 2048)
		}

		n, err := conn.Read(*rb)
		if err != nil || n <= 0 {
			goto E
		}

		c = atomic.AddUint32(&counter, 1)
		s = strconv.FormatUint(uint64(c), 10)

		w.WriteString("HTTP/1.1 200 OK\r\n")
		w.WriteString("Connection: close\r\n")
		w.WriteString(fmt.Sprintf("Content-Length: %d\r\n\r\n", len(s)))
		w.WriteString(s)
		w.Flush()

	E:
		conn.Close()
		rp.Put(rb)
		wp.Put(w)
	}))

}
Beispiel #26
0
func test1() {
	//Pool内存缓存 GC时将被清除

	var bp sync.Pool
	var looks sync.RWMutex
	bp.New = func() interface{} {
		return &bytes.Buffer{}
	}

	buf := bp.Get().(*bytes.Buffer)
	buf.WriteString("test")
	fmt.Println(buf.String())
	bp.Put(buf) //记录至缓存

	limit := make(chan struct{}, 10)

	go func() {
		buf := bp.Get().(*bytes.Buffer)
		buf.WriteString("tttt")
		looks.Lock()
		bp.Put(buf)
		looks.Unlock()
	}()
	time.Sleep(1 * time.Second)
	for {
		limit <- struct{}{}
		go func() {
			//提取以后必须再次Put 否则将被清除
			buf := bp.Get().(*bytes.Buffer)
			if buf.Len() != 0 {
				fmt.Println(buf.String())
				looks.Lock()
				bp.Put(buf)
				looks.Unlock()
				//runtime.GC()
			}
			<-limit
		}()
	}

	return
	runtime.GC() //手动启动GC

	buf = bp.Get().(*bytes.Buffer) //由于缓存被清空 反问内容为空
	fmt.Println(buf.String())
}
Beispiel #27
0
func (f encFnInfo) kStruct(rv reflect.Value) {
	fti := f.ti
	e := f.e
	tisfi := fti.sfip
	toMap := !(fti.toArray || e.h.StructToArray)
	newlen := len(fti.sfi)
	// Use sync.Pool to reduce allocating slices unnecessarily.
	// The cost of the occasional locking is less than the cost of locking.

	var fkvs []encStructFieldKV
	var pool *sync.Pool
	var poolv interface{}
	idxpool := newlen / 8
	if encStructPoolLen != 4 {
		panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed
	}
	if idxpool < encStructPoolLen {
		pool = &encStructPool[idxpool]
		poolv = pool.Get()
		switch vv := poolv.(type) {
		case *[8]encStructFieldKV:
			fkvs = vv[:newlen]
		case *[16]encStructFieldKV:
			fkvs = vv[:newlen]
		case *[32]encStructFieldKV:
			fkvs = vv[:newlen]
		case *[64]encStructFieldKV:
			fkvs = vv[:newlen]
		}
	}
	if fkvs == nil {
		fkvs = make([]encStructFieldKV, newlen)
	}
	// if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct)
	if toMap {
		tisfi = fti.sfi
	}
	newlen = 0
	var kv encStructFieldKV
	for _, si := range tisfi {
		kv.v = si.field(rv, false)
		// if si.i != -1 {
		// 	rvals[newlen] = rv.Field(int(si.i))
		// } else {
		// 	rvals[newlen] = rv.FieldByIndex(si.is)
		// }
		if toMap {
			if si.omitEmpty && isEmptyValue(kv.v) {
				continue
			}
			kv.k = si.encName
		} else {
			// use the zero value.
			// if a reference or struct, set to nil (so you do not output too much)
			if si.omitEmpty && isEmptyValue(kv.v) {
				switch kv.v.Kind() {
				case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array,
					reflect.Map, reflect.Slice:
					kv.v = reflect.Value{} //encode as nil
				}
			}
		}
		fkvs[newlen] = kv
		newlen++
	}

	// debugf(">>>> kStruct: newlen: %v", newlen)
	// sep := !e.be
	ee := f.ee //don't dereference everytime

	if toMap {
		ee.EncodeMapStart(newlen)
		// asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
		asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0
		for j := 0; j < newlen; j++ {
			kv = fkvs[j]
			if asSymbols {
				ee.EncodeSymbol(kv.k)
			} else {
				ee.EncodeString(c_UTF8, kv.k)
			}
			e.encodeValue(kv.v, encFn{})
		}
	} else {
		ee.EncodeArrayStart(newlen)
		for j := 0; j < newlen; j++ {
			kv = fkvs[j]
			e.encodeValue(kv.v, encFn{})
		}
	}
	ee.EncodeEnd()

	// do not use defer. Instead, use explicit pool return at end of function.
	// defer has a cost we are trying to avoid.
	// If there is a panic and these slices are not returned, it is ok.
	if pool != nil {
		pool.Put(poolv)
	}
}
Beispiel #28
0
func startHTTPServer(hostport string, msgchs []chan<- msg, quit <-chan struct{}, wg *sync.WaitGroup) {
	var bufPool sync.Pool
	var msgPool sync.Pool
	msgPool.New = func() interface{} {
		return &msg{nil, make(chan error, 1)}
	}

	var rndPool sync.Pool
	rndPool.New = func() interface{} {
		return rand.New(rand.NewSource(time.Now().UnixNano()))
	}

	handler := func(w http.ResponseWriter, r *http.Request) {
		defer r.Body.Close()

		cl := int(r.ContentLength)
		if cl <= 0 {
			http.Error(w, http.StatusText(http.StatusLengthRequired), http.StatusLengthRequired)
			return
		}

		buf, ok := bufPool.Get().([]byte)
		if ok && cap(buf) < cl+16 { // + 16, in case of worker will need to encode object
			bufPool.Put(buf)
			buf = make([]byte, cl, cl+16)
		} else if ok {
			buf = buf[:cl]
		} else {
			buf = make([]byte, cl, cl+16)
		}

		defer bufPool.Put(buf)

		if _, err := io.ReadFull(r.Body, buf); err != nil {
			log.Println("HTTP server: failed to read body", err)
			http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
			return
		}

		msg := msgPool.Get().(*msg)
		defer msgPool.Put(msg)
		msg.buf = buf

		rnd := rndPool.Get().(*rand.Rand)
		defer rndPool.Put(rnd)

		msgchs[rnd.Intn(len(msgchs))] <- *msg
		if err := <-msg.err; err != nil {
			log.Println("HTTP server: failed to process message", err)
			http.Error(w, err.Error(), http.StatusBadRequest)
			return
		}
	}

	addr, err := net.ResolveTCPAddr("tcp4", hostport)
	if err != nil {
		log.Fatal("failed to resolve addr: ", err)
	}

	conn, err := net.ListenTCP("tcp4", addr)
	if err != nil {
		log.Fatal("failed to lister UDP socket: ", err)
	}

	http.HandleFunc("/", handler)
	srv := &http.Server{Addr: hostport}
	go func() {
		log.Println("starting HTTP server", hostport)

		wg.Add(1)
		defer wg.Done()

		srv.Serve(newStoppableListener(conn, quit))
	}()
}
Beispiel #29
0
// putChunkedFile puts in to continer/obectPath storing the chunks in
// chunksContainer/chunksPath.  It returns the number of bytes
// uploaded and an error
func (s *Snapshot) putChunkedFile(in io.Reader, container, objectPath string, chunksContainer, chunksPath string, mimeType string) (int64, error) {
	// Pool of buffers for upload
	bufPool := sync.Pool{
		New: func() interface{} {
			return make([]byte, s.Manager.ChunkSize)
		},
	}

	const inFlight = 2
	type upload struct {
		chunkPath string
		buf       []byte
		n         int
	}
	uploads := make(chan upload, inFlight)
	errs := make(chan error, inFlight)

	// Read chunks from the file
	size := int64(0)
	finished := false
	go func() {
		for chunk := 1; !finished; chunk++ {
			buf := bufPool.Get().([]byte)
			n, err := io.ReadFull(in, buf)
			size += int64(n)
			if err == io.EOF {
				break
			} else if err == io.ErrUnexpectedEOF {
				finished = true
			} else if err != io.ErrUnexpectedEOF && err != nil {
				errs <- fmt.Errorf("error reading %v", err)
				break
			}
			uploads <- upload{
				chunkPath: fmt.Sprintf("%s/%04d", chunksPath, chunk),
				buf:       buf,
				n:         n,
			}
		}
		close(uploads)
	}()

	// Upload chunks as they come in
	go func() {
		for upload := range uploads {
			// FIXME retry
			log.Printf("Uploading chunk %q", upload.chunkPath)
			err := s.Manager.Swift.ObjectPutBytes(container, upload.chunkPath, upload.buf[:upload.n], mimeType)
			if err != nil {
				errs <- fmt.Errorf("failed to upload chunk %q: %v", upload.chunkPath, err)
			}
			bufPool.Put(upload.buf)
		}
		close(errs)
	}()

	// Collect errors
	for err := range errs {
		finished = true
		return size, err
	}

	// Put the manifest if all was successful
	log.Printf("Uploading manifest %q", objectPath)
	contents := strings.NewReader("")
	headers := swift.Headers{
		"X-Object-Manifest": chunksContainer + "/" + chunksPath,
	}
	_, err := s.Manager.Swift.ObjectPut(container, objectPath, contents, true, "", "application/octet-stream", headers)
	return size, err
}
Beispiel #30
0
// GenJobs generates and returns a slice of Jobs with timings
// derived from exponentially distributed random variables.
func GenJobs(sysLoad float64, simTime int, clusterSize int, outPath string, stats chan string,
	precision chan float64, threadCap *models.Semaphore, jobPool *sync.Pool) {

	threadCap.Lock()
	var totalComputation = 0
	var numJobs = round(
		(sysLoad * float64(simTime) * float64(clusterSize)) / realExecAvg)
	var numSubmitters = round(math.Sqrt(float64(numJobs)))
	submitterEstAvgs := make([]float64, numSubmitters)
	for i := 0; i < numSubmitters; i++ {
		for {
			submitterEstAvgs[i] = rand.ExpFloat64() * estExecAvg
			if submitterEstAvgs[i] > estExecAvg/15.0 && submitterEstAvgs[i] < estExecAvg*15.0 {
				break
			}
		}
	}

	var burstSize int
	var burstAvg = sysLoad * float64(clusterSize) * burstMult

	var interval int
	var intervalLambda = float64(simTime) / (float64(numJobs) / burstAvg)

	var jobs []models.Job
	var arrival = 0
	var estExec, realExec, deadline, submitter int

	for i := 0; i < numJobs; {
		submitter = rand.Intn(numSubmitters)
		if i != 0 {
			interval = round(rand.ExpFloat64() * float64(intervalLambda))
			arrival += interval
		}
		burstSize = round(rand.ExpFloat64() * burstAvg)
		for j := 0; j < burstSize; j++ {
			realExec = round(rand.ExpFloat64() * realExecAvg)
			for { // estExec must always be > realExec
				estExec = round(rand.ExpFloat64() * submitterEstAvgs[submitter])
				if estExec > realExec {
					break
				}
			}
			for { // deadline must always be > estExec (n.b. this implicitly makes it > realExec)
				deadline = round(rand.ExpFloat64() * deadlineAvg)
				if deadline > estExec {
					break
				}
			}

			var newJob *models.Job
			poolFetch := jobPool.Get()
			if poolFetch == nil {
				newJob = &models.Job{}
			} else {
				newJob = poolFetch.(*models.Job)
			}
			newJob.ID = i
			newJob.SubmitterID = submitter
			newJob.Arrival = arrival
			newJob.EstExec = estExec
			newJob.RealExec = realExec
			newJob.Deadline = deadline
			jobs = append(jobs, *newJob)
			jobPool.Put(newJob)

			totalComputation += realExec
			i++
		}
	}
	WriteJobsToCSV(jobs, outPath)
	var trueLoad = (float64(totalComputation) / (float64(simTime) * float64(clusterSize)))
	precision <- (trueLoad / sysLoad)
	var statsMsg = "Generated jobs for:\n" +
		"    Cluster Size: %v\n" +
		"    System Load: %v\n" +
		"    Time Span: %v\n" +
		"    Actual computation load: %v\n"
	statsMsg = fmt.Sprintf(statsMsg, clusterSize, sysLoad, simTime, trueLoad)
	stats <- statsMsg
	threadCap.Free()
}