func TestProtectFreeNestPtr(t *testing.T) { g_t = t tlist := New(TestNestData{}, 50000) createData(tlist, func(e *Element, i int) { v := e.Value().(*TestNestData) v.a = i v.b = TestDataPtr{a: i, b: &TestData{a: int64(i + 1)}} runtime.SetFinalizer(v.b.b, on_gc) e.Commit() }) // fmt.Println(tlist.Front().DumpPicks()) runtime.GC() for e := tlist.Front(); e != nil; e = e.Next() { v := e.Value().(*TestNestData) runtime.SetFinalizer(v.b.b, nil) if !reflect.ValueOf(v.b.b).IsValid() { t.Error("FreeNestPtr: data is freed") } } }
func BenchmarkFinalizer(b *testing.B) { const CallsPerSched = 1000 procs := runtime.GOMAXPROCS(-1) N := int32(b.N / CallsPerSched) var wg sync.WaitGroup wg.Add(procs) for p := 0; p < procs; p++ { go func() { var data [CallsPerSched]*int for i := 0; i < CallsPerSched; i++ { data[i] = new(int) } for atomic.AddInt32(&N, -1) >= 0 { runtime.Gosched() for i := 0; i < CallsPerSched; i++ { runtime.SetFinalizer(data[i], fin) } for i := 0; i < CallsPerSched; i++ { runtime.SetFinalizer(data[i], nil) } } wg.Done() }() } wg.Wait() }
func allocObject(cobj *C.git_object) Object { switch ObjectType(C.git_object_type(cobj)) { case ObjectCommit: commit := &Commit{ gitObject: gitObject{cobj}, cast_ptr: (*C.git_commit)(cobj), } runtime.SetFinalizer(commit, (*Commit).Free) return commit case ObjectTree: tree := &Tree{ gitObject: gitObject{cobj}, cast_ptr: (*C.git_tree)(cobj), } runtime.SetFinalizer(tree, (*Tree).Free) return tree case ObjectBlob: blob := &Blob{ gitObject: gitObject{cobj}, cast_ptr: (*C.git_blob)(cobj), } runtime.SetFinalizer(blob, (*Blob).Free) return blob } return nil }
// newAsyncWaiter creates an asyncWaiterImpl and starts its worker goroutine. func newAsyncWaiter() *asyncWaiterImpl { result, h0, h1 := system.GetCore().CreateMessagePipe(nil) if result != system.MOJO_RESULT_OK { panic(fmt.Sprintf("can't create message pipe %v", result)) } waitChan := make(chan waitRequest, 10) cancelChan := make(chan AsyncWaitId, 10) isNotified := new(int32) worker := &asyncWaiterWorker{ []system.Handle{h1}, []system.MojoHandleSignals{system.MOJO_HANDLE_SIGNAL_READABLE}, []AsyncWaitId{0}, []chan<- WaitResponse{make(chan WaitResponse)}, isNotified, waitChan, cancelChan, 0, } runtime.SetFinalizer(worker, finalizeWorker) go worker.runLoop() waiter := &asyncWaiterImpl{ wakingHandle: h0, isWorkerNotified: isNotified, waitChan: waitChan, cancelChan: cancelChan, } runtime.SetFinalizer(waiter, finalizeAsyncWaiter) return waiter }
func CreateContextFromTypeUnsafe(properties *C.cl_context_properties, device_type C.cl_device_type, pfn_notify CL_ctx_notify, user_data unsafe.Pointer) (*Context, error) { var err C.cl_int var clContext C.cl_context if pfn_notify != nil { var c_user_data []unsafe.Pointer c_user_data = make([]unsafe.Pointer, 2) c_user_data[0] = user_data c_user_data[1] = unsafe.Pointer(&pfn_notify) ctx_notify[c_user_data[1]] = pfn_notify clContext = C.CLCreateContextFromType(properties, device_type, unsafe.Pointer(&c_user_data), &err) } else { clContext = C.clCreateContextFromType(properties, device_type, nil, nil, &err) } if err != C.CL_SUCCESS { return nil, toError(err) } if clContext == nil { return nil, ErrUnknown } contextTmp := &Context{clContext: clContext, devices: nil} cDevices, errD := contextTmp.GetDevices() if errD != nil { runtime.SetFinalizer(contextTmp, releaseContext) return contextTmp, toError(err) } context := &Context{clContext: clContext, devices: cDevices} runtime.SetFinalizer(context, releaseContext) return context, nil }
//export remoteCreateCallback func remoteCreateCallback(cremote unsafe.Pointer, crepo unsafe.Pointer, cname, curl *C.char, payload unsafe.Pointer) C.int { name := C.GoString(cname) url := C.GoString(curl) repo := newRepositoryFromC((*C.git_repository)(crepo)) // We don't own this repository, so make sure we don't try to free it runtime.SetFinalizer(repo, nil) if opts, ok := pointerHandles.Get(payload).(CloneOptions); ok { remote, err := opts.RemoteCreateCallback(repo, name, url) // clear finalizer as the calling C function will // free the remote itself runtime.SetFinalizer(remote, nil) if err == ErrOk && remote != nil { cptr := (**C.git_remote)(cremote) *cptr = remote.ptr } else if err == ErrOk && remote == nil { panic("no remote created by callback") } return C.int(err) } else { panic("invalid remote create callback") } }
// Pipe creates a synchronous in-memory pipe. // It can be used to connect code expecting an io.Reader // with code expecting an io.Writer. // Reads on one end are matched with writes on the other, // copying data directly between the two; there is no internal buffering. func Pipe() (*PipeReader, *PipeWriter) { p := &pipe{ r1: make(chan []byte), r2: make(chan pipeResult), w1: make(chan []byte), w2: make(chan pipeResult), rclose: make(chan os.Error), wclose: make(chan os.Error), done: make(chan int), } go p.run() // NOTE: Cannot use composite literal here: // pipeHalf{c1: p.cr1, c2: p.cr2, cclose: p.crclose, cdone: p.cdone} // because this implicitly copies the pipeHalf, which copies the inner mutex. r := new(PipeReader) r.c1 = p.r1 r.c2 = p.r2 r.cclose = p.rclose r.done = p.done runtime.SetFinalizer(r, (*PipeReader).finalizer) w := new(PipeWriter) w.c1 = p.w1 w.c2 = p.w2 w.cclose = p.wclose w.done = p.done runtime.SetFinalizer(w, (*PipeWriter).finalizer) return r, w }
func TestWriteHeapDumpFinalizers(t *testing.T) { if runtime.GOOS == "nacl" { t.Skip("WriteHeapDump is not available on NaCl.") } f, err := ioutil.TempFile("", "heapdumptest") if err != nil { t.Fatalf("TempFile failed: %v", err) } defer os.Remove(f.Name()) defer f.Close() // bug 9172: WriteHeapDump couldn't handle more than one finalizer println("allocating objects") x := &Obj{} runtime.SetFinalizer(x, objfin) y := &Obj{} runtime.SetFinalizer(y, objfin) // Trigger collection of x and y, queueing of their finalizers. println("starting gc") runtime.GC() // Make sure WriteHeapDump doesn't fail with multiple queued finalizers. println("starting dump") WriteHeapDump(f.Fd()) println("done dump") }
func (d *_qt_drv) SetAutoGC(b bool) { d.gc = b if d.gc { runtime.SetFinalizer(d, (*_qt_drv).Delete) } else { runtime.SetFinalizer(d, nil) } }
func Allocate() { p := new(P) *p = new(P) **p = new(P) runtime.SetFinalizer(p, Print) runtime.SetFinalizer(*p, Print) runtime.SetFinalizer(**p, Print) }
func (n *node) run() { var ( mate = time.After(n.delay) suiters = make([]evo.Genome, len(n.peers)) done = make(chan evo.Genome) nextval = n.val ) runtime.SetFinalizer(n.val, nil) runtime.SetFinalizer(n.val, func(val evo.Genome) { val.Close() }) for { select { case n.delay = <-n.delayc: case n.valc <- n.val: case nextval = <-n.valc: case <-mate: go func(oldval evo.Genome) { var ok bool for i := range n.peers { suiters[i], ok = <-n.peers[i].valc if !ok { return } } newval := oldval.Evolve(suiters...) done <- newval }(n.val) case val := <-done: if nextval == n.val { nextval = val } else if val != n.val && val != nextval { val.Close() } else { n.val = nextval runtime.SetFinalizer(n.val, nil) runtime.SetFinalizer(n.val, func(val evo.Genome) { val.Close() }) } mate = time.After(n.delay) case ch := <-n.closec: ch <- struct{}{} return } } }
func main() { runtime.GOMAXPROCS(4) for i = 0; i < N; i++ { b := &B{i} a := &A{b, i} runtime.SetFinalizer(b, finalB) runtime.SetFinalizer(a, finalA) } for i := 0; i < N; i++ { runtime.GC() runtime.Gosched() } if nfinal < N*9/10 { panic("not enough finalizing:", nfinal, "/", N) } }
// Release releases any resources associated with the Process. func (p *Process) Release() Error { // NOOP for unix. p.Pid = -1 // no need for a finalizer anymore runtime.SetFinalizer(p, nil) return nil }
// Prepare query string. Return a new statement. func (c *SQLiteConn) Prepare(query string) (driver.Stmt, error) { pquery := C.CString(query) defer C.free(unsafe.Pointer(pquery)) var s *C.sqlite3_stmt var tail *C.char rv := C.sqlite3_prepare_v2(c.db, pquery, -1, &s, &tail) if rv != C.SQLITE_OK { return nil, c.lastError() } var t string if tail != nil && *tail != '\000' { t = strings.TrimSpace(C.GoString(tail)) } nv := int(C.sqlite3_bind_parameter_count(s)) var nn []string for i := 0; i < nv; i++ { pn := C.GoString(C.sqlite3_bind_parameter_name(s, C.int(i+1))) if len(pn) > 1 && pn[0] == '$' && 48 <= pn[1] && pn[1] <= 57 { nn = append(nn, C.GoString(C.sqlite3_bind_parameter_name(s, C.int(i+1)))) } } ss := &SQLiteStmt{c: c, s: s, nv: nv, nn: nn, t: t} runtime.SetFinalizer(ss, (*SQLiteStmt).Close) return ss, nil }
func (s *driverStmt) Close() error { if s != nil && s.res != nil { C.PQclear(s.res) runtime.SetFinalizer(s, nil) } return nil }
// NewCaretakerd creates a new Caretakerd instance from the given config func NewCaretakerd(conf Config, syncGroup *usync.Group) (*Caretakerd, error) { err := conf.Validate() if err != nil { return nil, err } log, err := logger.NewLogger(conf.Logger, "caretakerd", syncGroup) if err != nil { return nil, errors.New("Could not create logger for caretakerd.").CausedBy(err) } ks, err := keyStore.NewKeyStore(bool(conf.RPC.Enabled), conf.KeyStore) if err != nil { return nil, err } ctl, err := control.NewControl(conf.Control, ks) if err != nil { return nil, err } services, err := service.NewServices(conf.Services, syncGroup, ks) if err != nil { return nil, err } result := Caretakerd{ open: true, config: conf, logger: log, control: ctl, keyStore: ks, services: services, lock: new(sync.Mutex), syncGroup: syncGroup, signalChannel: nil, } runtime.SetFinalizer(&result, finalize) return &result, nil }
// NewTestEnvironment creates a new Environment suitable for use in tests. // // This function should only be used in testing func newTestEnvironment() *runtime.Environment { storage, err := runtime.NewTemporaryStorage(os.TempDir()) nilOrPanic(err, "Failed to create temporary storage at: ", os.TempDir()) folder, err := storage.NewFolder() nilOrPanic(err, "Failed to create temporary storage folder") // Set finalizer so that we always get the temporary folder removed. // This is should really only be used in tests, otherwise it would better to // call Remove() manually. rt.SetFinalizer(folder, func(f runtime.TemporaryFolder) { f.Remove() }) logger, err := runtime.CreateLogger(os.Getenv("LOGGING_LEVEL")) if err != nil { fmt.Fprintf(os.Stderr, "Error creating logger. %s", err) os.Exit(1) } return &runtime.Environment{ GarbageCollector: &gc.GarbageCollector{}, TemporaryStorage: folder, Log: logger, } }
func Clone(url string, path string, options *CloneOptions) (*Repository, error) { repo := new(Repository) curl := C.CString(url) defer C.free(unsafe.Pointer(curl)) cpath := C.CString(path) defer C.free(unsafe.Pointer(cpath)) var copts C.git_clone_options populateCloneOptions(&copts, options) defer freeCheckoutOpts(&copts.checkout_opts) if len(options.CheckoutBranch) != 0 { copts.checkout_branch = C.CString(options.CheckoutBranch) defer C.free(unsafe.Pointer(copts.checkout_branch)) } runtime.LockOSThread() defer runtime.UnlockOSThread() ret := C.git_clone(&repo.ptr, curl, cpath, &copts) if ret < 0 { return nil, MakeGitError(ret) } runtime.SetFinalizer(repo, (*Repository).Free) return repo, nil }
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) switch level { case DebugLevel: printFunc = logger.Debug case InfoLevel: printFunc = logger.Info case WarnLevel: printFunc = logger.Warn case ErrorLevel: printFunc = logger.Error case FatalLevel: printFunc = logger.Fatal case PanicLevel: printFunc = logger.Panic default: printFunc = logger.Print } go logger.writerScanner(reader, printFunc) runtime.SetFinalizer(writer, writerFinalizer) return writer }
// OpenFile returns a new filesytem-backed storage implementation with the given // path. This also hold a file lock, so any subsequent attempt to open the same // path will fail. // // The storage must be closed after use, by calling Close method. func OpenFile(path string) (Storage, error) { if err := os.MkdirAll(path, 0755); err != nil { return nil, err } flock, err := newFileLock(filepath.Join(path, "LOCK")) if err != nil { return nil, err } defer func() { if err != nil { flock.release() } }() rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old")) logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644) if err != nil { return nil, err } fs := &fileStorage{path: path, flock: flock, logw: logw} runtime.SetFinalizer(fs, (*fileStorage).Close) return fs, nil }
func (c *OCI8Conn) Prepare(query string) (driver.Stmt, error) { pquery := C.CString(query) defer C.free(unsafe.Pointer(pquery)) var s, bp, defp unsafe.Pointer if rv := C.WrapOCIHandleAlloc( c.env, C.OCI_HTYPE_STMT, (C.size_t)(unsafe.Sizeof(bp)*2)); rv.rv != C.OCI_SUCCESS { return nil, ociGetError(c.err) } else { s = rv.ptr bp = rv.extra defp = unsafe.Pointer(uintptr(rv.extra) + unsafe.Sizeof(unsafe.Pointer(nil))) } if rv := C.OCIStmtPrepare( (*C.OCIStmt)(s), (*C.OCIError)(c.err), (*C.OraText)(unsafe.Pointer(pquery)), C.ub4(C.strlen(pquery)), C.ub4(C.OCI_NTV_SYNTAX), C.ub4(C.OCI_DEFAULT)); rv != C.OCI_SUCCESS { return nil, ociGetError(c.err) } ss := &OCI8Stmt{c: c, s: s, bp: (**C.OCIBind)(bp), defp: (**C.OCIDefine)(defp)} runtime.SetFinalizer(ss, (*OCI8Stmt).Close) return ss, nil }
// Close closes the rows iterator func (rows *rows) Close() error { //Verify that rows has not already been closed if rows.isClosed { return nil } //Close the cursor var err error ret := odbc.SQLCloseCursor(rows.handle) if isError(ret) { err = errorStatement(rows.handle, rows.sqlStmt) } //Clear the finalizer runtime.SetFinalizer(rows, nil) //Mark the rows as closed rows.isClosed = true // Return any error if err != nil { return err } return nil }
func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { var islice *util.Range if slice != nil { islice = &util.Range{} if slice.Start != nil { islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek) } if slice.Limit != nil { islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek) } } rawIter := db.newRawIterator(islice, ro) iter := &dbIter{ db: db, icmp: db.s.icmp, iter: rawIter, seq: seq, strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader), key: make([]byte, 0), value: make([]byte, 0), } atomic.AddInt32(&db.aliveIters, 1) runtime.SetFinalizer(iter, (*dbIter).Release) return iter }
func newVSphere(cfg VSphereConfig) (*VSphere, error) { if cfg.Disk.SCSIControllerType == "" { cfg.Disk.SCSIControllerType = LSILogicSASControllerType } else if !checkControllerSupported(cfg.Disk.SCSIControllerType) { glog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType) return nil, errors.New("Controller type not supported. Please configure 'lsilogic-sas' OR 'pvscsi'") } if cfg.Global.WorkingDir != "" { cfg.Global.WorkingDir = path.Clean(cfg.Global.WorkingDir) + "/" } if cfg.Global.RoundTripperCount == 0 { cfg.Global.RoundTripperCount = RoundTripperDefaultCount } c, err := newClient(&cfg, context.TODO()) if err != nil { return nil, err } id, cluster, err := readInstance(c, &cfg) if err != nil { return nil, err } vs := VSphere{ client: c, cfg: &cfg, localInstanceID: id, clusterName: cluster, } runtime.SetFinalizer(&vs, logout) return &vs, nil }
func (c *driverConn) Prepare(query string) (driver.Stmt, error) { // Generate unique statement name. stmtname := strconv.Itoa(c.stmtNum) cstmtname := C.CString(stmtname) c.stmtNum++ defer C.free(unsafe.Pointer(cstmtname)) stmtstr := C.CString(query) defer C.free(unsafe.Pointer(stmtstr)) res := C.PQprepare(c.db, cstmtname, stmtstr, 0, nil) err := resultError(res) if err != nil { C.PQclear(res) return nil, err } stmtinfo := C.PQdescribePrepared(c.db, cstmtname) err = resultError(stmtinfo) if err != nil { C.PQclear(stmtinfo) return nil, err } defer C.PQclear(stmtinfo) nparams := int(C.PQnparams(stmtinfo)) statement := &driverStmt{stmtname, c.db, res, nparams} runtime.SetFinalizer(statement, (*driverStmt).Close) return statement, nil }
// GetGroupTarget is a wrapper around cairo_get_group_target(). func (v *Context) GetGroupTarget() *Surface { c := C.cairo_get_group_target(v.native()) s := wrapSurface(c) s.reference() runtime.SetFinalizer(s, (*Surface).destroy) return s }
func newResult(res *C.PGresult) *driverRows { ncols := int(C.PQnfields(res)) nrows := int(C.PQntuples(res)) result := &driverRows{res: res, nrows: nrows, currRow: -1, ncols: ncols, cols: nil} runtime.SetFinalizer(result, (*driverRows).Close) return result }
// CreateSimilar is a wrapper around cairo_surface_create_similar(). func (v *Surface) CreateSimilar(content Content, width, height int) *Surface { c := C.cairo_surface_create_similar(v.native(), C.cairo_content_t(content), C.int(width), C.int(height)) s := wrapSurface(c) runtime.SetFinalizer(s, (*Surface).destroy) return s }
func NewJSRuntime() (*JsVm, error) { vm := otto.New() jsvm := NewJsVm(vm) RegisterModules(jsvm) path := defaultPath fileC, err := ioutil.ReadFile(path) if err != nil { return nil, err } /* fileHash := md5.Sum(fileC) if len(lastMd5) == 0 || lastMd5 != fileHash { lastMd5 = fileHash fmt.Println("compiling") compiled, err = vm.Compile(path, nil) if err != nil { return nil, err } } _, err = vm.Run(compiled) */ _, err = jsvm.Run(string(fileC)) if err == nil { UsedRuntimes += 1 fmt.Println("used runtimes " + strconv.Itoa(UsedRuntimes)) runtime.SetFinalizer(vm, finalizer) } return jsvm, err }
// CreateForRectangle is a wrapper around cairo_surface_create_for_rectangle(). func (v *Surface) CreateForRectangle(x, y, width, height float64) *Surface { c := C.cairo_surface_create_for_rectangle(v.native(), C.double(x), C.double(y), C.double(width), C.double(height)) s := wrapSurface(c) runtime.SetFinalizer(s, (*Surface).destroy) return s }