func (d *deadlockDetector) Watch(name string, mut sync.Locker) { d.lockers[name] = mut go func() { for { time.Sleep(d.timeout / 4) ok := make(chan bool, 2) go func() { mut.Lock() mut.Unlock() ok <- true }() go func() { time.Sleep(d.timeout) ok <- false }() if r := <-ok; !r { msg := fmt.Sprintf("deadlock detected at %s", name) for otherName, otherMut := range d.lockers { if otherHolder, ok := otherMut.(Holder); ok { holder, goid := otherHolder.Holder() msg += fmt.Sprintf("\n %s = current holder: %s at routine %d", otherName, holder, goid) } } panic(msg) } } }() }
func (d *deadlockDetector) Watch(name string, mut sync.Locker) { d.lockers[name] = mut go func() { for { time.Sleep(d.timeout / 4) ok := make(chan bool, 2) go func() { mut.Lock() _ = 1 // empty critical section mut.Unlock() ok <- true }() go func() { time.Sleep(d.timeout) ok <- false }() if r := <-ok; !r { msg := fmt.Sprintf("deadlock detected at %s", name) for otherName, otherMut := range d.lockers { if otherHolder, ok := otherMut.(Holdable); ok { msg += "\n===" + otherName + "===\n" + otherHolder.Holders() } } panic(msg) } } }() }
// Running benchmarks in parallel leads to multiple chrome instances coming up // at the same time, when there are crashes chrome processes stick around which // can severely impact the machine's performance. To stop this from // happening chrome zombie processes are periodically killed. func ChromeProcessesCleaner(locker sync.Locker, chromeCleanerTimer time.Duration) { for _ = range time.Tick(chromeCleanerTimer) { glog.Info("The chromeProcessesCleaner goroutine has started") glog.Info("Waiting for all existing tasks to complete before killing zombie chrome processes") locker.Lock() util.LogErr(ExecuteCmd("pkill", []string{"-9", "chrome"}, []string{}, PKILL_TIMEOUT, nil, nil)) locker.Unlock() } }
// if this value is a Locker, lock it and add it to the locks slice func (w *walker) lock(v reflect.Value) { if !w.useLocks { return } if !v.IsValid() || !v.CanInterface() { return } type rlocker interface { RLocker() sync.Locker } var locker sync.Locker // We can't call Interface() on a value directly, since that requires // a copy. This is OK, since the pointer to a value which is a sync.Locker // is also a sync.Locker. if v.Kind() == reflect.Ptr { switch l := v.Interface().(type) { case rlocker: // don't lock a mutex directly if _, ok := l.(*sync.RWMutex); !ok { locker = l.RLocker() } case sync.Locker: locker = l } } else if v.CanAddr() { switch l := v.Addr().Interface().(type) { case rlocker: // don't lock a mutex directly if _, ok := l.(*sync.RWMutex); !ok { locker = l.RLocker() } case sync.Locker: locker = l } } // still no callable locker if locker == nil { return } // don't lock a mutex directly switch locker.(type) { case *sync.Mutex, *sync.RWMutex: return } locker.Lock() w.locks[w.depth] = locker }
func sleepWhile(l sync.Locker, cond func() bool) { for { l.Lock() val := cond() l.Unlock() if !val { break } time.Sleep(time.Millisecond) } }
func WaitEvents(l sync.Locker, evs ...*Event) { cases := make([]reflect.SelectCase, 0, len(evs)) for _, ev := range evs { cases = append(cases, reflect.SelectCase{ Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ev.C()), }) } l.Unlock() reflect.Select(cases) l.Lock() }
// if this value is a Locker, lock it and add it to the locks slice func (w *walker) lock(v reflect.Value) { if !w.useLocks { return } if !v.IsValid() || !v.CanInterface() { return } type rlocker interface { RLocker() sync.Locker } var locker sync.Locker // first check if we can get a locker from the value switch l := v.Interface().(type) { case rlocker: // don't lock a mutex directly if _, ok := l.(*sync.RWMutex); !ok { locker = l.RLocker() } case sync.Locker: locker = l } // the value itself isn't a locker, so check the method on a pointer too if locker == nil && v.CanAddr() { switch l := v.Addr().Interface().(type) { case rlocker: // don't lock a mutex directly if _, ok := l.(*sync.RWMutex); !ok { locker = l.RLocker() } case sync.Locker: locker = l } } // still no callable locker if locker == nil { return } // don't lock a mutex directly switch locker.(type) { case *sync.Mutex, *sync.RWMutex: return } locker.Lock() w.locks[w.depth] = locker }
// run kernel on inputs, produce outputs func (b *Block) process() Interrupt { b.Monitor <- MonitorMessage{ BI_KERNEL, nil, } if b.state.Processed == true { return nil } // block until connected to source if necessary if b.sourceType != NONE && b.routing.Source == nil { select { case f := <-b.routing.InterruptChan: return f } } // we should only be able to get here if // - we don't need an shared state // - we have an external shared state and it has been attached // if we have a store, lock it var store sync.Locker var ok bool if store, ok = b.routing.Source.(sync.Locker); ok { store.Lock() } // run the kernel interrupt := b.kernel(b.state.inputValues, b.state.outputValues, b.state.internalValues, b.routing.Source, b.routing.InterruptChan) // unlock the store if necessary if store != nil { store.Unlock() } // if an interrupt was receieved, return it if interrupt != nil { return interrupt } b.state.Processed = true return nil }
func New(obj interface{}, path string, codec Codec, locker sync.Locker) (*File, error) { // check object if reflect.TypeOf(obj).Kind() != reflect.Ptr { return nil, makeErr(nil, "object must be a pointer") } // init file := &File{ obj: obj, path: path, codec: codec, locker: locker, cbs: make(chan func()), } // try lock done := make(chan struct{}) go func() { locker.Lock() close(done) }() select { case <-time.NewTimer(time.Second * 1).C: return nil, makeErr(nil, "lock fail") case <-done: } // try load from file dbFile, err := os.Open(path) if err == nil { defer dbFile.Close() err = codec.Decode(dbFile, obj) if err != nil { return nil, makeErr(err, "decode error") } } // loop go func() { for { cb, ok := <-file.cbs if !ok { return } cb() } }() return file, nil }
func RunHostBenchmark( ctx *common.Context, inputManager *common.InputManager, sandbox Sandbox, ioLock sync.Locker, ) (BenchmarkResults, error) { ioLock.Lock() defer ioLock.Unlock() ctx.Log.Info("Running benchmark") benchmarkResults := make(BenchmarkResults) for idx, benchmarkCase := range cases { input, err := inputManager.Add( benchmarkCase.hash, NewRunnerTarInputFactory( &ctx.Config, benchmarkCase.hash, &benchmarkCase, ), ) if err != nil { return nil, err } defer input.Release(input) run := common.Run{ AttemptID: uint64(idx), Source: benchmarkCase.source, Language: benchmarkCase.language, InputHash: benchmarkCase.hash, MaxScore: 1.0, Debug: false, } results, err := Grade(ctx, nil, &run, input, sandbox) if err != nil { return nil, err } benchmarkResults[benchmarkCase.name] = BenchmarkResult{ Time: results.Time, WallTime: results.WallTime, Memory: results.Memory, } } return benchmarkResults, nil }
func TestRLocker(t *testing.T) { var wl RWMutex var rl sync.Locker wlocked := make(chan bool, 1) rlocked := make(chan bool, 1) rl = wl.RLocker() n := 10 go func() { for i := 0; i < n; i++ { rl.Lock() rl.Lock() rlocked <- true wl.Lock() wlocked <- true } }() for i := 0; i < n; i++ { <-rlocked rl.Unlock() select { case <-wlocked: t.Fatal("RLocker() didn't read-lock it") default: } rl.Unlock() <-wlocked select { case <-rlocked: t.Fatal("RLocker() didn't respect the write lock") default: } wl.Unlock() } }
// Open returns a LockedHandle if the open was permitted, holding either the // read or the write lock, depending on the opening mode. func (f *LockedFile) Open(user string, mode qp.OpenMode) (ReadWriteAtCloser, error) { of, err := f.File.Open(user, mode) if err != nil { return of, err } write := mode&3 == qp.OWRITE || mode&3 == qp.ORDWR var l sync.Locker if write { l = &f.OpenLock } else { l = f.OpenLock.RLocker() } l.Lock() return &LockedHandle{ ReadWriteAtCloser: of, Locker: l, }, nil }
func heartbeatStart(job *Job, done chan bool, heartbeat int, l sync.Locker) { tick := time.NewTicker(time.Duration(heartbeat) * time.Duration(time.Second)) for { select { case <-done: tick.Stop() return case <-tick.C: l.Lock() success, err := job.HeartbeatWithNoData() l.Unlock() if err != nil { log.Printf("failed HeartbeatWithNoData jid:%v, queue:%v, success:%v, error:%v", job.Jid, job.Queue, success, err) } else { log.Printf("warning, slow, HeartbeatWithNoData jid:%v, queue:%v, success:%v", job.Jid, job.Queue, success) } } } }
func (l *LockedOrca) GetE(req common.GetRequest) error { // Lock for each read key, complete the read, and then move on. // The last key sent through should have a noop at the end to complete the // whole interaction between the client and this server. var ret error var lock sync.Locker // guarantee that an operation that failed with a panic will unlock its lock defer func() { if r := recover(); r != nil { if lock != nil { lock.Unlock() } panic(r) } }() for idx, key := range req.Keys { // Acquire read lock (true == read) lock = l.getlock(key, true) lock.Lock() // The last request will have these set to complete the interaction noopOpaque := uint32(0) noopEnd := false if idx == len(req.Keys)-1 { noopOpaque = req.NoopOpaque noopEnd = req.NoopEnd } subreq := common.GetRequest{ Keys: [][]byte{key}, Opaques: []uint32{req.Opaques[idx]}, Quiet: []bool{req.Quiet[idx]}, NoopOpaque: noopOpaque, NoopEnd: noopEnd, } // Make the actual request ret = l.wrapped.GetE(subreq) // release read lock lock.Unlock() // Bail out early if there was an error (misses are not errors in this sense) // This will probably end up breaking the connection anyway, so no worries // about leaving the gets half-done. if ret != nil { break } } return ret }
func deadlockDetect(mut sync.Locker, timeout time.Duration) { go func() { for { time.Sleep(timeout / 4) ok := make(chan bool, 2) go func() { mut.Lock() mut.Unlock() ok <- true }() go func() { time.Sleep(timeout) ok <- false }() if r := <-ok; !r { panic("deadlock detected") } } }() }
// PreloadInputs reads all files in path, runs them through the specified // filter, and tries to add them into the InputManager. PreloadInputs acquires // the ioLock just before doing I/O in order to guarantee that the system will // not be doing expensive I/O operations in the middle of a // performance-sensitive operation (like running contestants' code). func (mgr *InputManager) PreloadInputs( rootdir string, factory CachedInputFactory, ioLock sync.Locker, ) error { // Since all the filenames in the cache directory are (or contain) the hash, // it is useful to introduce 256 intermediate directories with the first two // nibbles of the hash to avoid the cache directory entry to grow too large // and become inefficient. for i := 0; i < 256; i++ { dirname := path.Join(rootdir, fmt.Sprintf("%02x", i)) contents, err := ioutil.ReadDir(dirname) if err != nil { continue } for _, info := range contents { hash, ok := factory.GetInputHash(dirname, info) if !ok { continue } // Make sure no other I/O is being made while we pre-fetch this input. ioLock.Lock() input, err := mgr.Add(hash, factory) if err != nil { os.RemoveAll(path.Join(dirname, info.Name())) mgr.ctx.Log.Error("Cached input corrupted", "hash", hash) } else { input.Release(input) } ioLock.Unlock() } } mgr.ctx.Log.Info("Finished preloading cached inputs", "cache_size", mgr.Size()) return nil }
func TestTryMutexLocker(t *testing.T) { var mu syncx.TryMutex var l sync.Locker = &mu l.Lock() ch := make(chan struct{}) go func() { l.Lock() ch <- struct{}{} }() runtime.Gosched() if mu.TryLock() { t.Fatal("mu should be locked") } l.Unlock() <-ch l.Unlock() if !mu.TryLock() { t.Fatal("mu should be unlocked") } }
// withLock runs while holding lk. func withLock(lk sync.Locker, fn func()) { lk.Lock() defer lk.Unlock() // in case fn panics fn() }
// RunProc runs event handling loop on component ports. // It returns true on success or panics with error message and returns false on error. func RunProc(c interface{}) bool { // Check if passed interface is a valid pointer to struct name := reflect.TypeOf(c) v := reflect.ValueOf(c) if v.Kind() != reflect.Ptr || v.IsNil() { panic("Argument of flow.Run() is not a valid pointer") return false } vp := v v = v.Elem() if v.Kind() != reflect.Struct { panic("Argument of flow.Run() is not a valid pointer to structure. Got type: " + vp.Type().Name()) return false } t := v.Type() // Get internal state lock if available hasLock := false var locker sync.Locker if lockField := v.FieldByName("StateLock"); lockField.IsValid() && lockField.Elem().IsValid() { locker, hasLock = lockField.Interface().(sync.Locker) } // Call user init function if exists if initable, ok := c.(Initializable); ok { initable.Init() } // A group to wait for all inputs to be closed inputsClose := new(sync.WaitGroup) // A group to wait for all recv handlers to finish handlersDone := new(sync.WaitGroup) // Get the embedded flow.Component vCom := v.FieldByName("Component") isComponent := vCom.IsValid() && vCom.Type().Name() == "Component" if !isComponent { panic("Argument of flow.Run() is not a flow.Component") } // Get the component mode componentMode := DefaultComponentMode var poolSize uint8 = 0 if vComMode := vCom.FieldByName("Mode"); vComMode.IsValid() { componentMode = int(vComMode.Int()) } if vComPoolSize := vCom.FieldByName("PoolSize"); vComPoolSize.IsValid() { poolSize = uint8(vComPoolSize.Uint()) } // Create a slice of select cases and port handlers cases := make([]reflect.SelectCase, 0, t.NumField()) handlers := make([]portHandler, 0, t.NumField()) // Make and listen on termination channel vCom.FieldByName("Term").Set(reflect.MakeChan(vCom.FieldByName("Term").Type(), 0)) cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: vCom.FieldByName("Term")}) handlers = append(handlers, portHandler{}) // Detect active components looper, isLooper := c.(Looper) // Iterate over struct fields and bind handlers inputCount := 0 for i := 0; i < t.NumField(); i++ { fv := v.Field(i) ff := t.Field(i) ft := fv.Type() // Detect control channels if fv.IsValid() && fv.Kind() == reflect.Chan && !fv.IsNil() && (ft.ChanDir()&reflect.RecvDir) != 0 { // Bind handlers for an input channel cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: fv}) h := portHandler{onRecv: vp.MethodByName("On" + ff.Name), onClose: vp.MethodByName("On" + ff.Name + "Close")} handlers = append(handlers, h) if h.onClose.IsValid() || h.onRecv.IsValid() { // Add the input to the wait group inputsClose.Add(1) inputCount++ } } } if inputCount == 0 && !isLooper { panic(fmt.Sprintf("Components with no input ports are not supported:%s", name)) } // Prepare handler closures recvHandler := func(onRecv, value reflect.Value) { if hasLock { locker.Lock() } valArr := [1]reflect.Value{value} onRecv.Call(valArr[:]) if hasLock { locker.Unlock() } handlersDone.Done() } closeHandler := func(onClose reflect.Value) { if onClose.IsValid() { // Lock the state and call OnClose handler if hasLock { locker.Lock() } onClose.Call([]reflect.Value{}) if hasLock { locker.Unlock() } } inputsClose.Done() } terminate := func() { if !vCom.FieldByName("IsRunning").Bool() { return } vCom.FieldByName("IsRunning").SetBool(false) for i := 0; i < inputCount; i++ { inputsClose.Done() } } // closePorts closes all output channels of a process. closePorts := func() { // Iterate over struct fields for i := 0; i < t.NumField(); i++ { fv := v.Field(i) ft := fv.Type() vNet := vCom.FieldByName("Net") // Detect and close send-only channels if fv.IsValid() { if fv.Kind() == reflect.Chan && (ft.ChanDir()&reflect.SendDir) != 0 && (ft.ChanDir()&reflect.RecvDir) == 0 { if vNet.IsValid() && !vNet.IsNil() { if vNet.Interface().(*Graph).DecSendChanRefCount(fv) { fv.Close() } } else { fv.Close() } } else if fv.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Chan { ll := fv.Len() if vNet.IsValid() && !vNet.IsNil() { for i := 0; i < ll; i += 1 { if vNet.Interface().(*Graph).DecSendChanRefCount(fv.Index(i)) { fv.Index(i).Close() } } } else { for i := 0; i < ll; i += 1 { fv.Index(i).Close() } } } } } } // shutdown represents a standard process shutdown procedure. shutdown := func() { if s, ok := c.(Shutdowner); ok { // Custom shutdown behavior s.Shutdown() } else { // Call user finish function if exists if finable, ok := c.(Finalizable); ok { finable.Finish() } // Close all output ports if the process is still running if vCom.FieldByName("IsRunning").Bool() { closePorts() } } } // This accomodates the looper behaviour specifically. // Because a looper does not rely on having a declared input handler, there is no blocking for inputsClosed. // This opens a race condition for handlersDone. handlersEst := make(chan bool, 1) // Run the port handlers depending on component mode if componentMode == ComponentModePool && poolSize > 0 { // Pool mode, prefork limited goroutine pool for all inputs var poolIndex uint8 poolWait := new(sync.WaitGroup) once := new(sync.Once) for poolIndex = 0; poolIndex < poolSize; poolIndex++ { poolWait.Add(1) go func() { // TODO add pool of Loopers support for { chosen, recv, recvOK := reflect.Select(cases) if !recvOK { poolWait.Done() if chosen == 0 { // Term signal terminate() } else { // Port has been closed once.Do(func() { // Wait for other workers poolWait.Wait() // Close output down closeHandler(handlers[chosen].onClose) }) } return } if handlers[chosen].onRecv.IsValid() { handlersDone.Add(1) recvHandler(handlers[chosen].onRecv, recv) } } }() } handlersEst <- true } else { go func() { if isLooper { defer func() { terminate() handlersDone.Done() }() handlersDone.Add(1) handlersEst <- true looper.Loop() return } handlersEst <- true for { chosen, recv, recvOK := reflect.Select(cases) if !recvOK { if chosen == 0 { // Term signal terminate() } else { // Port has been closed closeHandler(handlers[chosen].onClose) } return } if handlers[chosen].onRecv.IsValid() { handlersDone.Add(1) if componentMode == ComponentModeAsync || componentMode == ComponentModeUndefined && DefaultComponentMode == ComponentModeAsync { // Async mode go recvHandler(handlers[chosen].onRecv, recv) } else { // Sync mode recvHandler(handlers[chosen].onRecv, recv) } } } }() } // Indicate the process as running <-handlersEst vCom.FieldByName("IsRunning").SetBool(true) go func() { // Wait for all inputs to be closed inputsClose.Wait() // Wait all inport handlers to finish their job handlersDone.Wait() // Call shutdown handler (user or default) shutdown() // Get the embedded flow.Component and check if it belongs to a network if vNet := vCom.FieldByName("Net"); vNet.IsValid() && !vNet.IsNil() { if vNetCtr, hasNet := vNet.Interface().(netController); hasNet { // Remove the instance from the network's WaitGroup vNetCtr.getWait().Done() } } }() return true }
// writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] e := pv.Interface().(Message) var m map[int32]Extension var mu sync.Locker if em, ok := e.(extensionsBytes); ok { eb := em.GetExtensions() var err error m, err = BytesToExtensionsMap(*eb) if err != nil { return err } mu = notLocker{} } else if _, ok := e.(extendableProto); ok { ep, _ := extendable(e) m, mu = ep.extensionsRead() if m == nil { return nil } } // Order the extensions by ID. // This isn't strictly necessary, but it will give us // canonical output, which will also make testing easier. mu.Lock() ids := make([]int32, 0, len(m)) for id := range m { ids = append(ids, id) } sort.Sort(int32Slice(ids)) mu.Unlock() for _, extNum := range ids { ext := m[extNum] var desc *ExtensionDesc if emap != nil { desc = emap[extNum] } if desc == nil { // Unknown extension. if err := writeUnknownStruct(w, ext.enc); err != nil { return err } continue } pb, err := GetExtension(e, desc) if err != nil { return fmt.Errorf("failed getting extension: %v", err) } // Repeated extensions will appear as a slice. if !desc.repeated() { if err := tm.writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } } } return nil }
// Performs the actual xdr decode via some C helper functions and libganglia. func xdrDecode(lock sync.Locker, buf []byte) (msg Message, nbytes int, err error) { var xdr *C.XDR var cbuf *C.char lock.Lock() defer lock.Unlock() xdr = (*C.XDR)(C.malloc(C.XDR_size)) defer C.free(unsafe.Pointer(xdr)) buflen := len(buf) if buflen > GANGLIA_MAX_MESSAGE_LEN { buflen = GANGLIA_MAX_MESSAGE_LEN } else if buflen == 0 { panic("empty buffer") } cbuf = (*C.char)(C.calloc(1, C.size_t(GANGLIA_MAX_MESSAGE_LEN))) if cbuf == nil { panic("out of memory calling C.calloc") } defer C.free(unsafe.Pointer(cbuf)) if buflen > 0 { C.memcpy(unsafe.Pointer(cbuf), unsafe.Pointer(&buf[0]), C.size_t(buflen)) } C.xdrmem_create(xdr, cbuf, C.u_int(GANGLIA_MAX_MESSAGE_LEN), C.XDR_DECODE) defer C.helper_destroy_xdr(xdr) if cbuf != nil { // perform the actual decode var fmsg *C.Ganglia_metadata_msg var vmsg *C.Ganglia_value_msg var mf *C.Ganglia_msg_formats fmsg = (*C.Ganglia_metadata_msg)(C.malloc(C.Ganglia_metadata_msg_size)) if fmsg == nil { panic("out of memory allocating for decoding ganglia xdr msg") } vmsg = (*C.Ganglia_value_msg)(C.malloc(C.Ganglia_metadata_val_size)) if vmsg == nil { panic("out of memory allocating for decoding ganglia xdr value") } defer C.free(unsafe.Pointer(fmsg)) defer C.free(unsafe.Pointer(vmsg)) mf = (*C.Ganglia_msg_formats)(C.calloc(1, C.size_t(unsafe.Sizeof(*mf)))) if mf == nil { panic("out of memory allocating for ganglia msg formats") } defer C.free(unsafe.Pointer(mf)) if !xdrBool(C.helper_init_xdr(xdr, mf)) { err = XDRDecodeFailure return } defer C.helper_uninit_xdr(xdr, mf) nbytes = int(C.helper_perform_xdr(xdr, fmsg, vmsg, mf)) if nbytes > 0 { var info *MetricInfo var metric_id *C.Ganglia_metric_id id := MsgFormat(*mf) // log.Printf("XDR bytes=%v id=%v", nbytes,id) switch id { case GMETADATA_REQUEST: greq := C.Ganglia_metadata_msg_u_grequest(fmsg) msg = &MetadataReq{ gangliaMsg: gangliaMsg{formatIdentifier: id}, MetricIdentifier: &MetricIdentifier{ Host: C.GoString(greq.metric_id.host), Name: C.GoString(greq.metric_id.name), Spoof: xdrBool(greq.metric_id.spoof), Exists: true, }, } C.helper_free_xdr(xdr, mf, unsafe.Pointer(fmsg)) case GMETADATA_FULL: gfull := C.Ganglia_metadata_msg_u_gfull(fmsg) var extra_metadata_keys []KeyValueMetadata if int(gfull.metric.metadata.metadata_len) > 0 { exLen := int(gfull.metric.metadata.metadata_len) extra_metadata := &extraMetadata{ values: make([]string, exLen), mapping: make(map[string][]byte), } hdr := &reflect.SliceHeader{Data: uintptr(unsafe.Pointer(gfull.metric.metadata.metadata_val)), Len: exLen, Cap: exLen} extra := *(*[]C.Ganglia_extra_data)(unsafe.Pointer(hdr)) for i, val := range extra { key := C.GoString(val.name) extra_metadata.values[i] = C.GoString(val.data) extra_metadata.mapping[key] = []byte(extra_metadata.values[i]) extra_metadata_keys = append(extra_metadata_keys, &extraMetadataKey{ key: key, data: extra_metadata}) } } mid := &MetricIdentifier{ Host: C.GoString(gfull.metric_id.host), Name: C.GoString(gfull.metric_id.name), Spoof: xdrBool(gfull.metric_id.spoof), Exists: true, } msg = &MetadataDef{ gangliaMsg: gangliaMsg{formatIdentifier: id}, MetricIdentifier: mid, metric: Metadata{ Type: C.GoString(gfull.metric._type), Name: C.GoString(gfull.metric.name), Units: C.GoString(gfull.metric.units), Tmax: uint(gfull.metric.tmax), Dmax: uint(gfull.metric.dmax), Slope: Slope(gfull.metric.slope), metric_id: mid, extra: extra_metadata_keys, }, } //log.Printf("DEBUG: metadata name=%v/%v type=%v",mid.Name,msg.MetricId().Name, // msg.GetMetadata().Type) C.helper_free_xdr(xdr, mf, unsafe.Pointer(fmsg)) case GMETRIC_STRING: gstr := C.Ganglia_value_msg_u_gstr(vmsg) metric_id = &gstr.metric_id info = &MetricInfo{ Value: C.GoString(gstr.str), Format: C.GoString(gstr.fmt), } case GMETRIC_USHORT: gus := C.Ganglia_value_msg_u_gu_short(vmsg) metric_id = &gus.metric_id f := C.GoString(gus.fmt) info = &MetricInfo{ Value: uint16(gus.us), Format: f, } case GMETRIC_SHORT: gss := C.Ganglia_value_msg_u_gs_short(vmsg) metric_id = &gss.metric_id f := C.GoString(gss.fmt) info = &MetricInfo{ Value: int16(gss.ss), Format: f, } case GMETRIC_UINT: gint := C.Ganglia_value_msg_u_gu_int(vmsg) metric_id = &gint.metric_id f := C.GoString(gint.fmt) info = &MetricInfo{ Value: uint32(gint.ui), Format: f, } case GMETRIC_INT: gint := C.Ganglia_value_msg_u_gs_int(vmsg) metric_id = &gint.metric_id f := C.GoString(gint.fmt) info = &MetricInfo{ Value: int32(gint.si), Format: f, } fallthrough case GMETRIC_FLOAT: gflt := C.Ganglia_value_msg_u_gf(vmsg) metric_id = &gflt.metric_id info = &MetricInfo{ Value: float32(gflt.f), Format: C.GoString(gflt.fmt), } case GMETRIC_DOUBLE: gdbl := C.Ganglia_value_msg_u_gd(vmsg) metric_id = &gdbl.metric_id info = &MetricInfo{ Value: float64(gdbl.d), Format: C.GoString(gdbl.fmt), } default: log.Printf("XDR value decode failure, unsupported metric %v", id) C.helper_free_xdr(xdr, mf, unsafe.Pointer(vmsg)) } if err == nil && info != nil { if metric_id != nil { info.Spoof = xdrBool(metric_id.spoof) if metric_id.host != nil { info.Host = []byte(C.GoString(metric_id.host)) } if metric_id.name != nil { info.Name = []byte(C.GoString(metric_id.name)) } } msg, err = NewMetric(id, *info) C.helper_free_xdr(xdr, mf, unsafe.Pointer(vmsg)) } } } // log.Printf("xdr bytes consumed: %v",nbytes) if err == nil && msg != nil && !msg.HasMetadata() { md, err := MetadataServer.Lookup(msg) if err == nil { if md == nil { panic("bad metadata from metadata server") } msg.(*gmetric).metadata = md //log.Printf("SET MD for msg %v to %v",msg.(*gmetric).Name,msg.GetMetadata().Type) } } return }
func (me *Event) LockedChan(lock sync.Locker) <-chan struct{} { lock.Lock() ch := me.C() lock.Unlock() return ch }
func withLock(l sync.Locker, fn func()) { l.Lock() fn() l.Unlock() }
// LockDo do function in lock func LockDo(lock sync.Locker, fn func()) { lock.Lock() fn() lock.Unlock() }
// LockedMutate wraps Mutate in yet another layer consisting of a // l.Lock/l.Unlock pair. All other limitations apply as in Mutate, e.g. no // panics are allowed to happen - otherwise no guarantees can be made about // Unlock matching the Lock. func LockedMutate(a Accessor, l sync.Locker, f func() error) (err error) { l.Lock() defer l.Unlock() return Mutate(a, f) }
// withLock runs while holding lk. func withLock(lk sync.Locker, fn func()) { lk.Lock() fn() lk.Unlock() }
func With(mu sync.Locker, f func()) { mu.Lock() defer mu.Unlock() f() }
// RunProc runs event handling loop on component ports. // It returns true on success or panics with error message and returns false on error. func RunProc(c interface{}) bool { // Check if passed interface is a valid pointer to struct v := reflect.ValueOf(c) if v.Kind() != reflect.Ptr || v.IsNil() { panic("Argument of flow.Run() is not a valid pointer") return false } vp := v v = v.Elem() if v.Kind() != reflect.Struct { panic("Argument of flow.Run() is not a valid pointer to structure. Got type: " + vp.Type().Name()) return false } t := v.Type() // Get internal state lock if available hasLock := false var locker sync.Locker if lockField := v.FieldByName("StateLock"); lockField.IsValid() && lockField.Elem().IsValid() { locker, hasLock = lockField.Interface().(sync.Locker) } // Call user init function if exists if initable, ok := c.(Initializable); ok { initable.Init() } // A group to wait for all inputs to be closed inputsClose := new(sync.WaitGroup) // A group to wait for all recv handlers to finish handlersDone := new(sync.WaitGroup) emptyArr := [0]reflect.Value{} empty := emptyArr[:] // Get the embedded flow.Component vCom := v.FieldByName("Component") isComponent := vCom.IsValid() && vCom.Type().Name() == "Component" // Get the component mode componentMode := DefaultComponentMode if isComponent { if vComMode := vCom.FieldByName("Mode"); vComMode.IsValid() { componentMode = int(vComMode.Int()) } } // Bind channel event handlers // Iterate over struct fields for i := 0; i < t.NumField(); i++ { fv := v.Field(i) ff := t.Field(i) ft := fv.Type() // Detect control channels if fv.IsValid() && fv.Kind() == reflect.Chan && !fv.IsNil() && (ft.ChanDir()&reflect.RecvDir) != 0 { // Bind handlers for an input channel onClose := vp.MethodByName("On" + ff.Name + "Close") hasClose := onClose.IsValid() onRecv := vp.MethodByName("On" + ff.Name) hasRecv := onRecv.IsValid() if hasClose || hasRecv { // Add the input to the wait group inputsClose.Add(1) // Listen on an input channel go func() { for { val, ok := fv.Recv() if !ok { // The channel closed if hasClose { // Lock the state and call OnClose handler if hasLock { locker.Lock() } onClose.Call(empty) if hasLock { locker.Unlock() } } inputsClose.Done() return } if hasRecv { // Call the receival handler for this channel handlersDone.Add(1) if componentMode == ComponentModeAsync { go func() { if hasLock { locker.Lock() } valArr := [1]reflect.Value{val} onRecv.Call(valArr[:]) if hasLock { locker.Unlock() } handlersDone.Done() }() } else { valArr := [1]reflect.Value{val} onRecv.Call(valArr[:]) handlersDone.Done() } } } }() } } } go func() { // Wait for all inputs to be closed inputsClose.Wait() // Wait all inport handlers to finish their job handlersDone.Wait() // Call shutdown handler (user or default) shutdownProc(c) // Get the embedded flow.Component and check if it belongs to a network if isComponent { if vNet := vCom.FieldByName("Net"); vNet.IsValid() && !vNet.IsNil() { if vNetCtr, hasNet := vNet.Interface().(netController); hasNet { // Remove the instance from the network's WaitGroup vNetCtr.getWait().Done() } } } }() return true }
// RunProc runs event handling loop on component ports. // It returns true on success or panics with error message and returns false on error. func RunProc(c interface{}) bool { // Check if passed interface is a valid pointer to struct v := reflect.ValueOf(c) if v.Kind() != reflect.Ptr || v.IsNil() { panic("Argument of flow.Run() is not a valid pointer") return false } vp := v v = v.Elem() if v.Kind() != reflect.Struct { panic("Argument of flow.Run() is not a valid pointer to structure. Got type: " + vp.Type().Name()) return false } t := v.Type() // Get internal state lock if available hasLock := false var locker sync.Locker if lockField := v.FieldByName("StateLock"); lockField.IsValid() && lockField.Elem().IsValid() { locker, hasLock = lockField.Interface().(sync.Locker) } // Call user init function if exists if initable, ok := c.(Initializable); ok { initable.Init() } // A group to wait for all inputs to be closed inputsClose := new(sync.WaitGroup) // A group to wait for all recv handlers to finish handlersDone := new(sync.WaitGroup) // Get the embedded flow.Component vCom := v.FieldByName("Component") isComponent := vCom.IsValid() && vCom.Type().Name() == "Component" if !isComponent { panic("Argument of flow.Run() is not a flow.Component") } // Get the component mode componentMode := DefaultComponentMode var poolSize uint8 = 0 if isComponent { if vComMode := vCom.FieldByName("Mode"); vComMode.IsValid() { componentMode = int(vComMode.Int()) } if vComPoolSize := vCom.FieldByName("PoolSize"); vComPoolSize.IsValid() { poolSize = uint8(vComPoolSize.Uint()) } } // Create a slice of select cases and port handlers cases := make([]reflect.SelectCase, 0, t.NumField()) handlers := make([]portHandler, 0, t.NumField()) // Iterate over struct fields and bind handlers for i := 0; i < t.NumField(); i++ { fv := v.Field(i) ff := t.Field(i) ft := fv.Type() // Detect control channels if fv.IsValid() && fv.Kind() == reflect.Chan && !fv.IsNil() && (ft.ChanDir()&reflect.RecvDir) != 0 { // Bind handlers for an input channel cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: fv}) h := portHandler{onRecv: vp.MethodByName("On" + ff.Name), onClose: vp.MethodByName("On" + ff.Name + "Close")} handlers = append(handlers, h) if h.onClose.IsValid() || h.onRecv.IsValid() { // Add the input to the wait group inputsClose.Add(1) } } } // Prepare handler closures recvHandler := func(onRecv, value reflect.Value) { if hasLock { locker.Lock() } valArr := [1]reflect.Value{value} onRecv.Call(valArr[:]) if hasLock { locker.Unlock() } handlersDone.Done() } closeHandler := func(onClose reflect.Value) { if onClose.IsValid() { // Lock the state and call OnClose handler if hasLock { locker.Lock() } onClose.Call([]reflect.Value{}) if hasLock { locker.Unlock() } } inputsClose.Done() } // Run the port handlers depending on component mode if componentMode == ComponentModePool && poolSize > 0 { // Pool mode, prefork limited goroutine pool for all inputs var poolIndex uint8 poolWait := new(sync.WaitGroup) once := new(sync.Once) for poolIndex = 0; poolIndex < poolSize; poolIndex++ { poolWait.Add(1) go func() { for { chosen, recv, recvOK := reflect.Select(cases) if !recvOK { // Port has been closed poolWait.Done() once.Do(func() { // Wait for other workers poolWait.Wait() // Close output down closeHandler(handlers[chosen].onClose) }) return } if handlers[chosen].onRecv.IsValid() { handlersDone.Add(1) recvHandler(handlers[chosen].onRecv, recv) } } }() } } else { go func() { for { chosen, recv, recvOK := reflect.Select(cases) if !recvOK { // Port has been closed closeHandler(handlers[chosen].onClose) return } if handlers[chosen].onRecv.IsValid() { handlersDone.Add(1) if componentMode == ComponentModeAsync || componentMode == ComponentModeUndefined && DefaultComponentMode == ComponentModeAsync { // Async mode go recvHandler(handlers[chosen].onRecv, recv) } else { // Sync mode recvHandler(handlers[chosen].onRecv, recv) } } } }() } go func() { // Wait for all inputs to be closed inputsClose.Wait() // Wait all inport handlers to finish their job handlersDone.Wait() // Call shutdown handler (user or default) shutdownProc(c) // Get the embedded flow.Component and check if it belongs to a network if isComponent { if vNet := vCom.FieldByName("Net"); vNet.IsValid() && !vNet.IsNil() { if vNetCtr, hasNet := vNet.Interface().(netController); hasNet { // Remove the instance from the network's WaitGroup vNetCtr.getWait().Done() } } } }() return true }