// deriveKey fills out the Key field. func (sk *SecretKey) deriveKey(password *[]byte) error { key, err := scrypt.Key(*password, sk.Parameters.Salt[:], sk.Parameters.N, sk.Parameters.R, sk.Parameters.P, len(sk.Key)) if err != nil { return err } copy(sk.Key[:], key) zero.Bytes(key) // I'm not a fan of forced garbage collections, but scrypt allocates a // ton of memory and calling it back to back without a GC cycle in // between means you end up needing twice the amount of memory. For // example, if your scrypt parameters are such that you require 1GB and // you call it twice in a row, without this you end up allocating 2GB // since the first GB probably hasn't been released yet. debug.FreeOSMemory() // I'm not a fan of forced garbage collections, but scrypt allocates a // ton of memory and calling it back to back without a GC cycle in // between means you end up needing twice the amount of memory. For // example, if your scrypt parameters are such that you require 1GB and // you call it twice in a row, without this you end up allocating 2GB // since the first GB probably hasn't been released yet. debug.FreeOSMemory() return nil }
// Starts another thread to perform OS memory freeing func FreeMemory(durationMinutes int) { go func() { for { debug.FreeOSMemory() debug.FreeOSMemory() time.Sleep(time.Duration(durationMinutes) * time.Minute) } }() }
func write(wr writeRequest, id string) { cmds, idArr, bufTypeArr := createCommands(wr, id) qr := qReport{ Cmd: "Queued", //Type: bufTypeArr, Ids: idArr, D: cmds, QCnt: wr.p.itemsInBuffer, Port: wr.p.portConf.Name, } json, _ := json.Marshal(qr) h.broadcastSys <- json // now send off the commands to the appropriate channel for index, cmdToSendToChannel := range cmds { //cmdIdCtr++ //cmdId := "fakeid-" + strconv.Itoa(cmdIdCtr) cmdId := idArr[index] if bufTypeArr[index] == "Buf" { log.Println("Send was normal send, so sending to wr.p.sendBuffered") wr.p.sendBuffered <- Cmd{cmdToSendToChannel, cmdId, false, false} } else { log.Println("Send was sendnobuf, so sending to wr.p.sendNoBuf") wr.p.sendNoBuf <- Cmd{cmdToSendToChannel, cmdId, true, false} } } // garbage collect if *gcType == "max" { debug.FreeOSMemory() } }
func fillseq() { dbname := os.Args[0] + ".db" f, err := os.OpenFile(dbname, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666) if err != nil { log.Fatal(err) } defer func() { f.Close() os.Remove(f.Name()) }() filer := lldb.NewSimpleFileFiler(f) a, err := lldb.NewAllocator(filer, &lldb.Options{}) if err != nil { log.Println(err) return } a.Compress = true b, _, err := lldb.CreateBTree(a, nil) if err != nil { log.Println(err) return } var keys [N][16]byte for i := range keys { binary.BigEndian.PutUint32(keys[i][:], uint32(i)) } debug.FreeOSMemory() t0 := time.Now() for _, key := range keys { if err = b.Set(key[:], value100); err != nil { log.Println(err) return } } if err := filer.Sync(); err != nil { log.Println(err) return } var ms runtime.MemStats runtime.ReadMemStats(&ms) d := time.Since(t0) fi, err := f.Stat() if err != nil { log.Println(err) return } secs := float64(d/time.Nanosecond) / float64(time.Second) sz := fi.Size() fmt.Printf("fillseq :%19v/op;%7.1f MB/s (%g secs, %d bytes)\n", d/N, float64(sz)/secs/1e6, secs, sz) nn, bytes := bufs.GCache.Stats() fmt.Printf("%d %d\n", nn, bytes) fmt.Printf("%+v\n", ms) }
func (s *MultipartTestSuite) TestPartGC() { part := NewPart() wrapper := NewDataWrapper() part.SetContentObject(wrapper) s.part.AddPart(part) allParts := s.part.GetPart(0) allParts2 := s.part.GetPart(0) _, ok := allParts.(Part) _, ok = allParts2.(Part) allParts = nil part = nil wrapper = nil debug.FreeOSMemory() // Try to access allParts2. // This should not fail under valgrind typeString := "multipart/mixed" boundary := "--foo++bar==foo--" contentType := NewContentTypeFromString(typeString) contentType.SetParameter("boundary", boundary) allParts2.SetContentType(contentType) assert.True(s.T(), ok) }
func benchmarkPrev(b *testing.B, n int) { t := TreeNew(cmp) for i := 0; i < n; i++ { t.Set(i, 0) } debug.FreeOSMemory() b.ResetTimer() for i := 0; i < b.N; i++ { en, err := t.SeekLast() if err != nil { b.Fatal(err) } m := 0 for { if _, _, err = en.Prev(); err != nil { break } m++ } if m != n { b.Fatal(m) } } }
// // for test only, run GC for profile GC in huge map func (eng *Engine) gcRunner(interval int) { tk := time.NewTicker(time.Duration(interval) * time.Second) defer tk.Stop() var pre, cnt int64 var iopre, iocnt int64 var rwpre, rwcnt int64 var norepeatgc bool for { // cnt = atomic.LoadInt64(eng.aliveCount) iocnt = atomic.LoadInt64(eng.newCount) rwcnt = atomic.LoadInt64(eng.rwCount) if cnt > 0 { if eng.conf.gctest && (pre != cnt || iopre != iocnt || rwpre != rwcnt) { fmt.Printf("GC with %d connections.\n", cnt) runtime.GC() //fmt.Printf("GC done.\n") pre = cnt } norepeatgc = false } else if norepeatgc == false { // even if eng.conf.gctest == false, still call FreeOSMemory when connections == 0 norepeatgc = true fmt.Printf("FreeOSMemory with %d connections.\n", cnt) // free memory debug.FreeOSMemory() //fmt.Printf("FreeOSMemory done.\n") pre = cnt } <-tk.C } }
func benchmarkNext(b *testing.B, n int) { t := TreeNew(cmp) for i := int64(0); i < int64(n); i++ { t.Set(i, struct{}{}) } debug.FreeOSMemory() b.ResetTimer() for i := 0; i < b.N; i++ { en, err := t.SeekFirst() if err != nil { b.Fatal(err) } m := 0 for { if _, _, err = en.Next(); err != nil { break } m++ } if m != n { b.Fatal(m) } } b.StopTimer() t.Close() }
func monitor() { c := time.Tick(1 * time.Second) mem := new(runtime.MemStats) origPct := debug.SetGCPercent(100) debug.SetGCPercent(origPct) for _ = range c { runtime.ReadMemStats(mem) mu.Lock() defer mu.Unlock() if tSize < 0 { continue } // Occupancy fraction: 70%. Don't GC before hitting this. softLimit := float64(tSize) * 0.7 pct := softLimit / float64(mem.Alloc) * 100 fmt.Printf("gctune: pct: %0.5f, target: %d, softLimit: %0.2f, Alloc: %d, Sys: %d\n", pct, tSize, softLimit, mem.Alloc, mem.Sys) if pct < 50 { // If this is too low, GC frequency increases too much. pct = 50 } debug.SetGCPercent(int(pct)) if mem.Sys > uint64(tSize*70/100) { fmt.Println("freeing") debug.FreeOSMemory() } } }
func receiveDecrypt(conn net.Conn) (Message, error) { // Our work is: // (receive) -> [de-GOB] -> [DECRYPT] -> [de-GOB] -> msg // Receive data and de-serialize to get the encrypted message encMsg := new([]byte) receive := gob.NewDecoder(conn) if err := receive.Decode(encMsg); err != nil { return Message{}, err } // Create decrypter and pass it the encrypted message r := bytes.NewReader(*encMsg) decrypter, err := saltsecret.NewReader(r, conf.Key, saltsecret.DECRYPT, false) if err != nil { return Message{}, err } // Read unencrypted serialized message and de-serialize it msg := new(Message) dec := gob.NewDecoder(decrypter) if err = dec.Decode(msg); err != nil { return Message{}, err } debug.FreeOSMemory() return *msg, nil }
func encryptDispatch(conn net.Conn, m Message) error { // We want to sent encrypted data. // In order to encrypt, we need to first serialize the message. // In order to sent/receive hassle free, we need to serialize the encrypted message // So: msg -> [GOB] -> [ENCRYPT] -> [GOB] -> (dispatch) // Create encrypter var encMsg bytes.Buffer encrypter, err := saltsecret.NewWriter(&encMsg, conf.Key, saltsecret.ENCRYPT, true) if err != nil { return err } // Serialize message enc := gob.NewEncoder(encrypter) if err = enc.Encode(m); err != nil { return err } // Flush encrypter to actuall encrypt the message if err = encrypter.Flush(); err != nil { return err } // Serialize encrypted message and dispatch it dispatch := gob.NewEncoder(conn) if err = dispatch.Encode(encMsg.Bytes()); err != nil { return err } debug.FreeOSMemory() return nil }
func (cmc *FileMonitor) cmpStat(f_old map[string]os.FileInfo, f_new map[string]os.FileInfo) { var fo FileOp for i, v := range f_old { if in, ok := f_new[i]; ok { if !v.ModTime().Equal(in.ModTime()) { // update f_old[i] = in delete(f_new, i) fo.fname = i fo.op = OP_MOD cmc.f <- fo } delete(f_new, i) // no changed } else { delete(f_old, i) fo.fname = i fo.op = OP_DEL cmc.f <- fo // delete } } for i, v := range f_new { // add f_old[i] = v delete(f_new, i) fo.fname = i fo.op = OP_ADD cmc.f <- fo } debug.FreeOSMemory() }
func (chat *Chat) removeConnection(toRemove *Connection) { for i, el := range chat.connections { if *toRemove == el { //chat.connections = append(chat.connections[:i], chat.connections[i+1:]...) // THE FOLLOWING IS UNNECESSARILY COMPLEX AND ONLY TO RULE OUT A MemLeak copy(chat.connections[i:], chat.connections[i+1:]) chat.connections[len(chat.connections)-1] = Connection{} //nil // or the zero value of T chat.connections = chat.connections[:len(chat.connections)-1] // THE FOLLOWING IS UNNECESSARILY COMPLEX AND ONLY TO RULE OUT A MemLeak if cap(chat.connections) > 2*len(chat.connections) { fmt.Println("SHRINK") shrink := make([]Connection, len(chat.connections)) copy(shrink, chat.connections) chat.connections = shrink debug.FreeOSMemory() } break } } // DEBUG:: fmt.Println("Num GoRoutines", runtime.NumGoroutine()) fmt.Println("Len(connections)", len(chat.connections)) fmt.Println("Cap(connections)", cap(chat.connections)) GoRuntimeStats() }
func TestDispositionIsAttachment(t *testing.T) { loop := 1 for i := 0; i < loop; i++ { // Case 1: not an attachment contentString := "" cd := NewContentDispositionFromString(contentString) assert.False(t, cd.IsAttachment()) // Case 2: is an attachment contentString = "attachment" cd = NewContentDispositionFromString(contentString) assert.True(t, cd.IsAttachment()) // Case 3: is an inline attachment contentString = "inline" cd = NewContentDispositionFromString(contentString) assert.True(t, cd.IsAttachment()) // Case 4: anything is an attachment for now contentString = "bogus" cd = NewContentDispositionFromString(contentString) assert.True(t, cd.IsAttachment()) } debug.FreeOSMemory() }
//10亿设备量的过滤器,占用1.8G*1 = 1.8G 内存 存在碰撞的数量: 236*1000 (cityhash) [大致值] //10亿设备量的过滤器,占用1.8G*2 = 3.6G 内存 存在碰撞的数量: 236 (cityhash+md5) //10亿设备量的过滤器,占用1.8G*3 = 5.4G 内存 存在碰撞的数量: 0 (cityhash+md5+fnv.New64()) func generateGarbage() { var capSize uint = 1000000000 // // fvv := []string{} // fvv = append(fvv,"hello") filter := NewBloomWrap(capSize) v := []byte("Love") b := filter.Add(v).Check(v) log.Println("check @v:", b) bad := 0 for i := 0; i < 1000000000; i++ { if i%1000000 == 0 { log.Println(i) debug.FreeOSMemory() } d := []byte(fmt.Sprint("data", i)) if filter.Check(d) == true { bad++ // panic(fmt.Sprint("should not exist @d:",string(d))) } if flag := filter.Add(d).Check(d); flag == false { panic(d) } } log.Println("====>>That is all @bad:", bad) }
// RebalanceGC rebalances the tree and runs the garbage collector. func (st *SimpleTree) RebalanceGC() *SimpleTree { st.Rebalance() runtime.GC() debug.FreeOSMemory() return st }
func bash(bash, content string) (out string, err error) { var buf bytes.Buffer cmd := exec.Command("/bin/sh", "-c", bash) cmd.Stdin = strings.NewReader(content) cmd.Stderr = &buf cmd.Stdout = &buf err = cmd.Run() if err != nil { printStackAndError(err) cmd.Process.Release() buf.Reset() return } out = buf.String() // Clean up resource cmd.Process.Kill() buf.Reset() debug.FreeOSMemory() return }
// DeleteGC sets all the pointers of the tree to nil and runs the garbage // collector. func (st *SimpleTree) DeleteGC() *SimpleTree { st.Delete() runtime.GC() debug.FreeOSMemory() return nil }
func main() { var m runtime.MemStats /*If that is not working, or it is too much time, you can add a periodic call to FreeOSMemory (no need to call runtime.GC() before, it is done by debug.FreeOSMemory() ) Something like this: http://play.golang.org/p/mP7_sMpX4F package main import ( "runtime/debug" "time" ) func main() { go periodicFree(1 * time.Minute) // Your program goes here } func periodicFree(d time.Duration) { tick := time.Tick(d) for _ = range tick { debug.FreeOSMemory() } } Take into account that every call to FreeOSMemory will take some time (not much) and it can be partly run in parallel if GOMAXPROCS>1 since Go1.3.*/ debug.FreeOSMemory() /*Then you can either render it to a dot file with graphical representation of the heap or convert it to hprof format. To render it to a dot file: $ go get github.com/randall77/hprof/dumptodot $ dumptodot heapdump mybinary > heap.dot and open heap.dot with Graphviz.*/ f, err := os.Create("heapdump") if err != nil { panic(err) } debug.WriteHeapDump(f.Fd()) fmt.Println(runtime.GOOS) fmt.Println(runtime.NumCPU()) fmt.Println(runtime.NumGoroutine()) fmt.Println(runtime.GOARCH) runtime.ReadMemStats(&m) fmt.Println(m.TotalAlloc) fmt.Println(m.Alloc) fmt.Println(m.Sys) }
func StartGC() { for { time.Sleep(10 * time.Second) runtime.GC() debug.FreeOSMemory() log.Println("Current Routines", runtime.NumGoroutine()) } }
func startGcLoop(period time.Duration) { go func(period time.Duration) { for { debug.FreeOSMemory() time.Sleep(period * time.Second) } }(period) }
func (memory *memoryDriver) expiredPart(a ...interface{}) { key := a[0].(string) // loop through all buckets for _, storedBucket := range memory.storedBuckets { delete(storedBucket.partMetadata, key) } debug.FreeOSMemory() }
func hello(w http.ResponseWriter, r *http.Request) { a := bigBytes() fmt.Printf("memory type:%T address: %p size: %d\n", a, &a, unsafe.Sizeof(a)) b := bigBytes() fmt.Printf("memory type:%T address: %p size: %d\n", b, &b, unsafe.Sizeof(b)) time.Sleep(3000 * time.Millisecond) io.WriteString(w, "Hello world!") debug.FreeOSMemory() }
func startGcLoop(period time.Duration) { go func(period time.Duration) { for { log.Printf("gc\n") debug.FreeOSMemory() time.Sleep(period * time.Second) } }(period) }
func TestNewContentDispositionFromString(t *testing.T) { loop := 1 for i := 0; i < loop; i++ { contentString := "hola!" cd := NewContentDispositionFromString(contentString) assert.Equal(t, cd.Disposition(), contentString) } debug.FreeOSMemory() }
func main() { DoSomeThing() for { println("idle") //runtime.GC() debug.FreeOSMemory() time.Sleep(2 * time.Second) } }
// Periodic release of unused memory back to the OS. // Long running network daemons, especially that see numerous TCP connections // open and close end up consuming a lot of memory which doesn't seem to be // freed in an efficient manner. func periodMemoryRelease(interval int) { ticker := time.NewTicker(time.Duration(interval) * time.Second) go func() { for _ = range ticker.C { Logger.Println("FreeOSMemory()") debug.FreeOSMemory() } }() }
// evictedPart - call back function called by caching module during individual cache evictions func (donut API) evictedPart(a ...interface{}) { // loop through all buckets buckets := donut.storedBuckets.GetAll() for bucketName, bucket := range buckets { b := bucket.(storedBucket) donut.storedBuckets.Set(bucketName, b) } debug.FreeOSMemory() }
// CreateObjectPart - create a part in a multipart session func (donut API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (string, *probe.Error) { donut.lock.Lock() etag, err := donut.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data, signature) donut.lock.Unlock() // possible free debug.FreeOSMemory() return etag, err.Trace() }
func memoryRelease(interval int) { ticker := time.NewTicker(time.Duration(interval) * time.Second) go func() { for _ = range ticker.C { debug("FreeOSMemory()") d.FreeOSMemory() } }() }