// write particle information func PrintParticle(idx int, f *os.File) { if f == nil { f = os.Stdout } c_fd := C.int(f.Fd()) _ = f.Sync() c_mode := C.CString("a") defer C.free(unsafe.Pointer(c_mode)) c_f := C.fdopen(c_fd, c_mode) C.fflush(c_f) C.hepevt_print_particle(C.int(idx+1), c_f) C.fflush(c_f) _ = f.Sync() }
func (s *Serializer) end() { C.raptor_serializer_serialize_end(s.serializer) if s.fh != nil { C.fflush(s.fh) } s.running = false }
func logLinuxStats() { if !log.V(1) { return } // We don't know which fields in struct mallinfo are most relevant to us yet, // so log it all for now. // // A major caveat is that mallinfo() returns stats only for the main arena. // glibc uses multiple allocation arenas to increase malloc performance for // multithreaded processes, so mallinfo may not report on significant parts // of the heap. mi := C.mallinfo() log.Infof("mallinfo stats: ordblks=%s, smblks=%s, hblks=%s, hblkhd=%s, usmblks=%s, fsmblks=%s, "+ "uordblks=%s, fordblks=%s, keepcost=%s", humanize.IBytes(uint64(mi.ordblks)), humanize.IBytes(uint64(mi.smblks)), humanize.IBytes(uint64(mi.hblks)), humanize.IBytes(uint64(mi.hblkhd)), humanize.IBytes(uint64(mi.usmblks)), humanize.IBytes(uint64(mi.fsmblks)), humanize.IBytes(uint64(mi.uordblks)), humanize.IBytes(uint64(mi.fordblks)), humanize.IBytes(uint64(mi.fsmblks))) // malloc_info() emits a *lot* of XML, partly because it generates stats for // all arenas, unlike mallinfo(). // // TODO(cdo): extract the useful bits and record to time-series DB. if !log.V(2) { return } // Create a memstream and make malloc_info() output to it. var buf *C.char var bufSize C.size_t memstream := C.open_memstream(&buf, &bufSize) if memstream == nil { log.Warning("couldn't open memstream") return } defer func() { C.fclose(memstream) C.free(unsafe.Pointer(buf)) }() if rc := C.malloc_info(0, memstream); rc != 0 { log.Warningf("malloc_info returned %d", rc) return } if rc := C.fflush(memstream); rc != 0 { log.Warningf("fflush returned %d", rc) return } log.Infof("malloc_info: %s", C.GoString(buf)) }
// check for problems with HEPEVT common block func CheckHepevtConsistency(f *os.File) bool { if f == nil { f = os.Stdout } c_fd := C.int(f.Fd()) _ = f.Sync() c_mode := C.CString("a") defer C.free(unsafe.Pointer(c_mode)) c_f := C.fdopen(c_fd, c_mode) C.fflush(c_f) o := C.hepevt_check_hepevt_consistency(c_f) C.fflush(c_f) _ = f.Sync() if o != C.int(0) { return true } return false }
// cFileWriterWrapper copies writeFn's data into w. writeFn takes a *C.FILE, and // whatever writeFn writes to that *C.FILE, cFileWriterWrapper will then // copy to w. This wrapper allows the Go API to write to io.Writers anything // PicoSAT writes to a *C.FILE. writeFn need not close the *C.FILE. func cFileWriterWrapper(w io.Writer, writeFn func(*C.FILE) error) (err error) { rp, wp, err := os.Pipe() if err != nil { return err } // To avoid double closing wp, close it explicitly at each error branch. // Closing rp here is a data race with the io.Copy(w, rp) call in the // goroutine below, but only if there is an error that causes the the outer // function to return early. But then io.Copy will just return an error, // which we (reasonably) ignore. defer func() { if e := rp.Close(); e != nil { // Don't hide prior errors. err = e } }() cfile, err := cfdopen(wp, "a") // wp.Close() below closes cfile. if err != nil { wp.Close() return err } // We have to read from the pipe in a separate goroutine because the write // end of the pipe will block if the pipe gets full. errChan := make(chan error, 1) go func() { _, e := io.Copy(w, rp) errChan <- e }() if err = writeFn(cfile); err != nil { wp.Close() return err } // We have to close wp or rp won't know it's hit the end of the data. // Without flushing cfile, the data might get stuck in the C buffer. if ok, err := C.fflush(cfile); ok != 0 { wp.Close() return err } if err = wp.Close(); err != nil { return err } return <-errChan }
func (f *File) Flush() { C.fflush((*C.FILE)(f)) }