예제 #1
0
func TestRLocker(t *testing.T) {
	var wl RWMutex
	var rl sync.Locker
	wlocked := make(chan bool, 1)
	rlocked := make(chan bool, 1)
	rl = wl.RLocker()
	n := 10
	go func() {
		for i := 0; i < n; i++ {
			rl.Lock()
			rl.Lock()
			rlocked <- true
			wl.Lock()
			wlocked <- true
		}
	}()
	for i := 0; i < n; i++ {
		<-rlocked
		rl.Unlock()
		select {
		case <-wlocked:
			t.Fatal("RLocker() didn't read-lock it")
		default:
		}
		rl.Unlock()
		<-wlocked
		select {
		case <-rlocked:
			t.Fatal("RLocker() didn't respect the write lock")
		default:
		}
		wl.Unlock()
	}
}
예제 #2
0
파일: util.go 프로젝트: nrm21/syncthing
func (d *deadlockDetector) Watch(name string, mut sync.Locker) {
	d.lockers[name] = mut
	go func() {
		for {
			time.Sleep(d.timeout / 4)
			ok := make(chan bool, 2)

			go func() {
				mut.Lock()
				_ = 1 // empty critical section
				mut.Unlock()
				ok <- true
			}()

			go func() {
				time.Sleep(d.timeout)
				ok <- false
			}()

			if r := <-ok; !r {
				msg := fmt.Sprintf("deadlock detected at %s", name)
				for otherName, otherMut := range d.lockers {
					if otherHolder, ok := otherMut.(Holdable); ok {
						msg += "\n===" + otherName + "===\n" + otherHolder.Holders()
					}
				}
				panic(msg)
			}
		}
	}()
}
예제 #3
0
파일: util.go 프로젝트: brgmnn/syncthing
func (d *deadlockDetector) Watch(name string, mut sync.Locker) {
	d.lockers[name] = mut
	go func() {
		for {
			time.Sleep(d.timeout / 4)
			ok := make(chan bool, 2)

			go func() {
				mut.Lock()
				mut.Unlock()
				ok <- true
			}()

			go func() {
				time.Sleep(d.timeout)
				ok <- false
			}()

			if r := <-ok; !r {
				msg := fmt.Sprintf("deadlock detected at %s", name)
				for otherName, otherMut := range d.lockers {
					if otherHolder, ok := otherMut.(Holder); ok {
						holder, goid := otherHolder.Holder()
						msg += fmt.Sprintf("\n %s = current holder: %s at routine %d", otherName, holder, goid)
					}
				}
				panic(msg)
			}
		}
	}()
}
예제 #4
0
// Running benchmarks in parallel leads to multiple chrome instances coming up
// at the same time, when there are crashes chrome processes stick around which
// can severely impact the machine's performance. To stop this from
// happening chrome zombie processes are periodically killed.
func ChromeProcessesCleaner(locker sync.Locker, chromeCleanerTimer time.Duration) {
	for _ = range time.Tick(chromeCleanerTimer) {
		glog.Info("The chromeProcessesCleaner goroutine has started")
		glog.Info("Waiting for all existing tasks to complete before killing zombie chrome processes")
		locker.Lock()
		util.LogErr(ExecuteCmd("pkill", []string{"-9", "chrome"}, []string{}, PKILL_TIMEOUT, nil, nil))
		locker.Unlock()
	}
}
예제 #5
0
파일: locked.go 프로젝트: Netflix/rend
func (l *LockedOrca) GetE(req common.GetRequest) error {
	// Lock for each read key, complete the read, and then move on.
	// The last key sent through should have a noop at the end to complete the
	// whole interaction between the client and this server.
	var ret error
	var lock sync.Locker

	// guarantee that an operation that failed with a panic will unlock its lock
	defer func() {
		if r := recover(); r != nil {
			if lock != nil {
				lock.Unlock()
			}

			panic(r)
		}
	}()

	for idx, key := range req.Keys {
		// Acquire read lock (true == read)
		lock = l.getlock(key, true)
		lock.Lock()

		// The last request will have these set to complete the interaction
		noopOpaque := uint32(0)
		noopEnd := false
		if idx == len(req.Keys)-1 {
			noopOpaque = req.NoopOpaque
			noopEnd = req.NoopEnd
		}

		subreq := common.GetRequest{
			Keys:       [][]byte{key},
			Opaques:    []uint32{req.Opaques[idx]},
			Quiet:      []bool{req.Quiet[idx]},
			NoopOpaque: noopOpaque,
			NoopEnd:    noopEnd,
		}

		// Make the actual request
		ret = l.wrapped.GetE(subreq)

		// release read lock
		lock.Unlock()

		// Bail out early if there was an error (misses are not errors in this sense)
		// This will probably end up breaking the connection anyway, so no worries
		// about leaving the gets half-done.
		if ret != nil {
			break
		}
	}

	return ret
}
예제 #6
0
func sleepWhile(l sync.Locker, cond func() bool) {
	for {
		l.Lock()
		val := cond()
		l.Unlock()
		if !val {
			break
		}
		time.Sleep(time.Millisecond)
	}
}
예제 #7
0
func WaitEvents(l sync.Locker, evs ...*Event) {
	cases := make([]reflect.SelectCase, 0, len(evs))
	for _, ev := range evs {
		cases = append(cases, reflect.SelectCase{
			Dir:  reflect.SelectRecv,
			Chan: reflect.ValueOf(ev.C()),
		})
	}
	l.Unlock()
	reflect.Select(cases)
	l.Lock()
}
예제 #8
0
파일: block.go 프로젝트: nytlabs/st-core
// run kernel on inputs, produce outputs
func (b *Block) process() Interrupt {

	b.Monitor <- MonitorMessage{
		BI_KERNEL,
		nil,
	}

	if b.state.Processed == true {
		return nil
	}

	// block until connected to source if necessary

	if b.sourceType != NONE && b.routing.Source == nil {
		select {
		case f := <-b.routing.InterruptChan:
			return f
		}
	}

	// we should only be able to get here if
	// - we don't need an shared state
	// - we have an external shared state and it has been attached

	// if we have a store, lock it
	var store sync.Locker
	var ok bool
	if store, ok = b.routing.Source.(sync.Locker); ok {
		store.Lock()
	}

	// run the kernel
	interrupt := b.kernel(b.state.inputValues,
		b.state.outputValues,
		b.state.internalValues,
		b.routing.Source,
		b.routing.InterruptChan)

	// unlock the store if necessary
	if store != nil {
		store.Unlock()
	}

	// if an interrupt was receieved, return it
	if interrupt != nil {
		return interrupt
	}

	b.state.Processed = true
	return nil
}
예제 #9
0
파일: benchmark.go 프로젝트: lhchavez/quark
func RunHostBenchmark(
	ctx *common.Context,
	inputManager *common.InputManager,
	sandbox Sandbox,
	ioLock sync.Locker,
) (BenchmarkResults, error) {
	ioLock.Lock()
	defer ioLock.Unlock()

	ctx.Log.Info("Running benchmark")

	benchmarkResults := make(BenchmarkResults)
	for idx, benchmarkCase := range cases {
		input, err := inputManager.Add(
			benchmarkCase.hash,
			NewRunnerTarInputFactory(
				&ctx.Config,
				benchmarkCase.hash,
				&benchmarkCase,
			),
		)
		if err != nil {
			return nil, err
		}
		defer input.Release(input)

		run := common.Run{
			AttemptID: uint64(idx),
			Source:    benchmarkCase.source,
			Language:  benchmarkCase.language,
			InputHash: benchmarkCase.hash,
			MaxScore:  1.0,
			Debug:     false,
		}
		results, err := Grade(ctx, nil, &run, input, sandbox)
		if err != nil {
			return nil, err
		}

		benchmarkResults[benchmarkCase.name] = BenchmarkResult{
			Time:     results.Time,
			WallTime: results.WallTime,
			Memory:   results.Memory,
		}
	}

	return benchmarkResults, nil
}
예제 #10
0
파일: worker.go 프로젝트: ngaut/goqless
func heartbeatStart(job *Job, done chan bool, heartbeat int, l sync.Locker) {
	tick := time.NewTicker(time.Duration(heartbeat) * time.Duration(time.Second))
	for {
		select {
		case <-done:
			tick.Stop()
			return
		case <-tick.C:
			l.Lock()
			success, err := job.HeartbeatWithNoData()
			l.Unlock()
			if err != nil {
				log.Printf("failed HeartbeatWithNoData jid:%v, queue:%v, success:%v, error:%v",
					job.Jid, job.Queue, success, err)
			} else {
				log.Printf("warning, slow, HeartbeatWithNoData jid:%v, queue:%v, success:%v",
					job.Jid, job.Queue, success)
			}
		}
	}
}
예제 #11
0
func TestTryMutexLocker(t *testing.T) {
	var mu syncx.TryMutex
	var l sync.Locker = &mu

	l.Lock()
	ch := make(chan struct{})
	go func() {
		l.Lock()
		ch <- struct{}{}
	}()
	runtime.Gosched()

	if mu.TryLock() {
		t.Fatal("mu should be locked")
	}
	l.Unlock()
	<-ch
	l.Unlock()
	if !mu.TryLock() {
		t.Fatal("mu should be unlocked")
	}
}
예제 #12
0
func deadlockDetect(mut sync.Locker, timeout time.Duration) {
	go func() {
		for {
			time.Sleep(timeout / 4)
			ok := make(chan bool, 2)

			go func() {
				mut.Lock()
				mut.Unlock()
				ok <- true
			}()

			go func() {
				time.Sleep(timeout)
				ok <- false
			}()

			if r := <-ok; !r {
				panic("deadlock detected")
			}
		}
	}()
}
예제 #13
0
// PreloadInputs reads all files in path, runs them through the specified
// filter, and tries to add them into the InputManager. PreloadInputs acquires
// the ioLock just before doing I/O in order to guarantee that the system will
// not be doing expensive I/O operations in the middle of a
// performance-sensitive operation (like running contestants' code).
func (mgr *InputManager) PreloadInputs(
	rootdir string,
	factory CachedInputFactory,
	ioLock sync.Locker,
) error {
	// Since all the filenames in the cache directory are (or contain) the hash,
	// it is useful to introduce 256 intermediate directories with the first two
	// nibbles of the hash to avoid the cache directory entry to grow too large
	// and become inefficient.
	for i := 0; i < 256; i++ {
		dirname := path.Join(rootdir, fmt.Sprintf("%02x", i))
		contents, err := ioutil.ReadDir(dirname)
		if err != nil {
			continue
		}
		for _, info := range contents {
			hash, ok := factory.GetInputHash(dirname, info)
			if !ok {
				continue
			}

			// Make sure no other I/O is being made while we pre-fetch this input.
			ioLock.Lock()
			input, err := mgr.Add(hash, factory)
			if err != nil {
				os.RemoveAll(path.Join(dirname, info.Name()))
				mgr.ctx.Log.Error("Cached input corrupted", "hash", hash)
			} else {
				input.Release(input)
			}
			ioLock.Unlock()
		}
	}
	mgr.ctx.Log.Info("Finished preloading cached inputs",
		"cache_size", mgr.Size())
	return nil
}
예제 #14
0
func With(mu sync.Locker, f func()) {
	mu.Lock()
	defer mu.Unlock()
	f()
}
예제 #15
0
// RunProc runs event handling loop on component ports.
// It returns true on success or panics with error message and returns false on error.
func RunProc(c interface{}) bool {
	// Check if passed interface is a valid pointer to struct
	name := reflect.TypeOf(c)
	v := reflect.ValueOf(c)
	if v.Kind() != reflect.Ptr || v.IsNil() {
		panic("Argument of flow.Run() is not a valid pointer")
		return false
	}
	vp := v
	v = v.Elem()
	if v.Kind() != reflect.Struct {
		panic("Argument of flow.Run() is not a valid pointer to structure. Got type: " + vp.Type().Name())
		return false
	}
	t := v.Type()

	// Get internal state lock if available
	hasLock := false
	var locker sync.Locker
	if lockField := v.FieldByName("StateLock"); lockField.IsValid() && lockField.Elem().IsValid() {
		locker, hasLock = lockField.Interface().(sync.Locker)
	}

	// Call user init function if exists
	if initable, ok := c.(Initializable); ok {
		initable.Init()
	}

	// A group to wait for all inputs to be closed
	inputsClose := new(sync.WaitGroup)
	// A group to wait for all recv handlers to finish
	handlersDone := new(sync.WaitGroup)

	// Get the embedded flow.Component
	vCom := v.FieldByName("Component")
	isComponent := vCom.IsValid() && vCom.Type().Name() == "Component"

	if !isComponent {
		panic("Argument of flow.Run() is not a flow.Component")
	}

	// Get the component mode
	componentMode := DefaultComponentMode
	var poolSize uint8 = 0
	if vComMode := vCom.FieldByName("Mode"); vComMode.IsValid() {
		componentMode = int(vComMode.Int())
	}
	if vComPoolSize := vCom.FieldByName("PoolSize"); vComPoolSize.IsValid() {
		poolSize = uint8(vComPoolSize.Uint())
	}

	// Create a slice of select cases and port handlers
	cases := make([]reflect.SelectCase, 0, t.NumField())
	handlers := make([]portHandler, 0, t.NumField())

	// Make and listen on termination channel
	vCom.FieldByName("Term").Set(reflect.MakeChan(vCom.FieldByName("Term").Type(), 0))
	cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: vCom.FieldByName("Term")})
	handlers = append(handlers, portHandler{})

	// Detect active components
	looper, isLooper := c.(Looper)

	// Iterate over struct fields and bind handlers
	inputCount := 0

	for i := 0; i < t.NumField(); i++ {
		fv := v.Field(i)
		ff := t.Field(i)
		ft := fv.Type()

		// Detect control channels
		if fv.IsValid() && fv.Kind() == reflect.Chan && !fv.IsNil() && (ft.ChanDir()&reflect.RecvDir) != 0 {
			// Bind handlers for an input channel
			cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: fv})
			h := portHandler{onRecv: vp.MethodByName("On" + ff.Name), onClose: vp.MethodByName("On" + ff.Name + "Close")}
			handlers = append(handlers, h)
			if h.onClose.IsValid() || h.onRecv.IsValid() {
				// Add the input to the wait group
				inputsClose.Add(1)
				inputCount++
			}
		}
	}

	if inputCount == 0 && !isLooper {
		panic(fmt.Sprintf("Components with no input ports are not supported:%s", name))
	}

	// Prepare handler closures
	recvHandler := func(onRecv, value reflect.Value) {
		if hasLock {
			locker.Lock()
		}
		valArr := [1]reflect.Value{value}
		onRecv.Call(valArr[:])
		if hasLock {
			locker.Unlock()
		}
		handlersDone.Done()
	}
	closeHandler := func(onClose reflect.Value) {
		if onClose.IsValid() {
			// Lock the state and call OnClose handler
			if hasLock {
				locker.Lock()
			}
			onClose.Call([]reflect.Value{})
			if hasLock {
				locker.Unlock()
			}
		}
		inputsClose.Done()
	}
	terminate := func() {
		if !vCom.FieldByName("IsRunning").Bool() {
			return
		}
		vCom.FieldByName("IsRunning").SetBool(false)
		for i := 0; i < inputCount; i++ {
			inputsClose.Done()
		}
	}
	// closePorts closes all output channels of a process.
	closePorts := func() {
		// Iterate over struct fields
		for i := 0; i < t.NumField(); i++ {
			fv := v.Field(i)
			ft := fv.Type()
			vNet := vCom.FieldByName("Net")
			// Detect and close send-only channels
			if fv.IsValid() {
				if fv.Kind() == reflect.Chan && (ft.ChanDir()&reflect.SendDir) != 0 && (ft.ChanDir()&reflect.RecvDir) == 0 {
					if vNet.IsValid() && !vNet.IsNil() {
						if vNet.Interface().(*Graph).DecSendChanRefCount(fv) {
							fv.Close()
						}
					} else {
						fv.Close()
					}
				} else if fv.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Chan {
					ll := fv.Len()
					if vNet.IsValid() && !vNet.IsNil() {
						for i := 0; i < ll; i += 1 {
							if vNet.Interface().(*Graph).DecSendChanRefCount(fv.Index(i)) {
								fv.Index(i).Close()
							}
						}
					} else {
						for i := 0; i < ll; i += 1 {
							fv.Index(i).Close()
						}
					}
				}
			}
		}
	}
	// shutdown represents a standard process shutdown procedure.
	shutdown := func() {
		if s, ok := c.(Shutdowner); ok {
			// Custom shutdown behavior
			s.Shutdown()
		} else {
			// Call user finish function if exists
			if finable, ok := c.(Finalizable); ok {
				finable.Finish()
			}
			// Close all output ports if the process is still running
			if vCom.FieldByName("IsRunning").Bool() {
				closePorts()
			}
		}
	}

	// This accomodates the looper behaviour specifically.
	// Because a looper does not rely on having a declared input handler, there is no blocking for inputsClosed.
	// This opens a race condition for handlersDone.
	handlersEst := make(chan bool, 1)

	// Run the port handlers depending on component mode
	if componentMode == ComponentModePool && poolSize > 0 {
		// Pool mode, prefork limited goroutine pool for all inputs
		var poolIndex uint8
		poolWait := new(sync.WaitGroup)
		once := new(sync.Once)
		for poolIndex = 0; poolIndex < poolSize; poolIndex++ {
			poolWait.Add(1)
			go func() {
				// TODO add pool of Loopers support
				for {
					chosen, recv, recvOK := reflect.Select(cases)
					if !recvOK {
						poolWait.Done()
						if chosen == 0 {
							// Term signal
							terminate()
						} else {
							// Port has been closed
							once.Do(func() {
								// Wait for other workers
								poolWait.Wait()
								// Close output down
								closeHandler(handlers[chosen].onClose)
							})
						}
						return
					}
					if handlers[chosen].onRecv.IsValid() {
						handlersDone.Add(1)
						recvHandler(handlers[chosen].onRecv, recv)
					}
				}
			}()
		}
		handlersEst <- true
	} else {
		go func() {
			if isLooper {
				defer func() {
					terminate()
					handlersDone.Done()
				}()
				handlersDone.Add(1)
				handlersEst <- true
				looper.Loop()
				return
			}
			handlersEst <- true
			for {
				chosen, recv, recvOK := reflect.Select(cases)
				if !recvOK {
					if chosen == 0 {
						// Term signal
						terminate()
					} else {
						// Port has been closed
						closeHandler(handlers[chosen].onClose)
					}
					return
				}
				if handlers[chosen].onRecv.IsValid() {
					handlersDone.Add(1)
					if componentMode == ComponentModeAsync || componentMode == ComponentModeUndefined && DefaultComponentMode == ComponentModeAsync {
						// Async mode
						go recvHandler(handlers[chosen].onRecv, recv)
					} else {
						// Sync mode
						recvHandler(handlers[chosen].onRecv, recv)
					}
				}
			}
		}()
	}

	// Indicate the process as running
	<-handlersEst
	vCom.FieldByName("IsRunning").SetBool(true)

	go func() {
		// Wait for all inputs to be closed
		inputsClose.Wait()
		// Wait all inport handlers to finish their job
		handlersDone.Wait()

		// Call shutdown handler (user or default)
		shutdown()

		// Get the embedded flow.Component and check if it belongs to a network
		if vNet := vCom.FieldByName("Net"); vNet.IsValid() && !vNet.IsNil() {
			if vNetCtr, hasNet := vNet.Interface().(netController); hasNet {
				// Remove the instance from the network's WaitGroup
				vNetCtr.getWait().Done()
			}
		}
	}()
	return true
}
예제 #16
0
파일: sql.go 프로젝트: rjeczalik/go
// withLock runs while holding lk.
func withLock(lk sync.Locker, fn func()) {
	lk.Lock()
	defer lk.Unlock() // in case fn panics
	fn()
}
예제 #17
0
파일: text.go 프로젝트: spxtr/contrib
// writeExtensions writes all the extensions in pv.
// pv is assumed to be a pointer to a protocol message struct that is extendable.
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
	emap := extensionMaps[pv.Type().Elem()]
	e := pv.Interface().(Message)

	var m map[int32]Extension
	var mu sync.Locker
	if em, ok := e.(extensionsBytes); ok {
		eb := em.GetExtensions()
		var err error
		m, err = BytesToExtensionsMap(*eb)
		if err != nil {
			return err
		}
		mu = notLocker{}
	} else if _, ok := e.(extendableProto); ok {
		ep, _ := extendable(e)
		m, mu = ep.extensionsRead()
		if m == nil {
			return nil
		}
	}

	// Order the extensions by ID.
	// This isn't strictly necessary, but it will give us
	// canonical output, which will also make testing easier.

	mu.Lock()
	ids := make([]int32, 0, len(m))
	for id := range m {
		ids = append(ids, id)
	}
	sort.Sort(int32Slice(ids))
	mu.Unlock()

	for _, extNum := range ids {
		ext := m[extNum]
		var desc *ExtensionDesc
		if emap != nil {
			desc = emap[extNum]
		}
		if desc == nil {
			// Unknown extension.
			if err := writeUnknownStruct(w, ext.enc); err != nil {
				return err
			}
			continue
		}

		pb, err := GetExtension(e, desc)
		if err != nil {
			return fmt.Errorf("failed getting extension: %v", err)
		}

		// Repeated extensions will appear as a slice.
		if !desc.repeated() {
			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
				return err
			}
		} else {
			v := reflect.ValueOf(pb)
			for i := 0; i < v.Len(); i++ {
				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
					return err
				}
			}
		}
	}
	return nil
}
예제 #18
0
파일: event.go 프로젝트: skycoin/skycoin
func (me *Event) LockedChan(lock sync.Locker) <-chan struct{} {
	lock.Lock()
	ch := me.C()
	lock.Unlock()
	return ch
}
예제 #19
0
파일: storage.go 프로젝트: matomesc/rkt
// LockedMutate wraps Mutate in yet another layer consisting of a
// l.Lock/l.Unlock pair. All other limitations apply as in Mutate, e.g. no
// panics are allowed to happen - otherwise no guarantees can be made about
// Unlock matching the Lock.
func LockedMutate(a Accessor, l sync.Locker, f func() error) (err error) {
	l.Lock()
	defer l.Unlock()

	return Mutate(a, f)
}
예제 #20
0
파일: sql.go 프로젝트: arnold8/go
// withLock runs while holding lk.
func withLock(lk sync.Locker, fn func()) {
	lk.Lock()
	fn()
	lk.Unlock()
}
예제 #21
0
파일: utils.go 프로젝트: Comdex/taobao
func withLock(l sync.Locker, fn func()) {
	l.Lock()
	fn()
	l.Unlock()
}
예제 #22
0
파일: sync.go 프로젝트: VicentLiu/gohper
// LockDo do function in lock
func LockDo(lock sync.Locker, fn func()) {
	lock.Lock()
	fn()
	lock.Unlock()
}
예제 #23
0
// Performs the actual xdr decode via some C helper functions and libganglia.
func xdrDecode(lock sync.Locker, buf []byte) (msg Message, nbytes int, err error) {
	var xdr *C.XDR
	var cbuf *C.char

	lock.Lock()
	defer lock.Unlock()

	xdr = (*C.XDR)(C.malloc(C.XDR_size))
	defer C.free(unsafe.Pointer(xdr))
	buflen := len(buf)
	if buflen > GANGLIA_MAX_MESSAGE_LEN {
		buflen = GANGLIA_MAX_MESSAGE_LEN
	} else if buflen == 0 {
		panic("empty buffer")
	}

	cbuf = (*C.char)(C.calloc(1, C.size_t(GANGLIA_MAX_MESSAGE_LEN)))
	if cbuf == nil {
		panic("out of memory calling C.calloc")
	}
	defer C.free(unsafe.Pointer(cbuf))
	if buflen > 0 {
		C.memcpy(unsafe.Pointer(cbuf), unsafe.Pointer(&buf[0]), C.size_t(buflen))
	}

	C.xdrmem_create(xdr, cbuf, C.u_int(GANGLIA_MAX_MESSAGE_LEN), C.XDR_DECODE)
	defer C.helper_destroy_xdr(xdr)

	if cbuf != nil {
		// perform the actual decode
		var fmsg *C.Ganglia_metadata_msg
		var vmsg *C.Ganglia_value_msg
		var mf *C.Ganglia_msg_formats

		fmsg = (*C.Ganglia_metadata_msg)(C.malloc(C.Ganglia_metadata_msg_size))
		if fmsg == nil {
			panic("out of memory allocating for decoding ganglia xdr msg")
		}
		vmsg = (*C.Ganglia_value_msg)(C.malloc(C.Ganglia_metadata_val_size))
		if vmsg == nil {
			panic("out of memory allocating for decoding ganglia xdr value")
		}
		defer C.free(unsafe.Pointer(fmsg))
		defer C.free(unsafe.Pointer(vmsg))

		mf = (*C.Ganglia_msg_formats)(C.calloc(1, C.size_t(unsafe.Sizeof(*mf))))
		if mf == nil {
			panic("out of memory allocating for ganglia msg formats")
		}
		defer C.free(unsafe.Pointer(mf))
		if !xdrBool(C.helper_init_xdr(xdr, mf)) {
			err = XDRDecodeFailure
			return
		}
		defer C.helper_uninit_xdr(xdr, mf)
		nbytes = int(C.helper_perform_xdr(xdr, fmsg, vmsg, mf))
		if nbytes > 0 {
			var info *MetricInfo
			var metric_id *C.Ganglia_metric_id

			id := MsgFormat(*mf)
			// log.Printf("XDR bytes=%v id=%v", nbytes,id)
			switch id {
			case GMETADATA_REQUEST:
				greq := C.Ganglia_metadata_msg_u_grequest(fmsg)
				msg = &MetadataReq{
					gangliaMsg: gangliaMsg{formatIdentifier: id},
					MetricIdentifier: &MetricIdentifier{
						Host:   C.GoString(greq.metric_id.host),
						Name:   C.GoString(greq.metric_id.name),
						Spoof:  xdrBool(greq.metric_id.spoof),
						Exists: true,
					},
				}
				C.helper_free_xdr(xdr, mf, unsafe.Pointer(fmsg))
			case GMETADATA_FULL:
				gfull := C.Ganglia_metadata_msg_u_gfull(fmsg)
				var extra_metadata_keys []KeyValueMetadata
				if int(gfull.metric.metadata.metadata_len) > 0 {
					exLen := int(gfull.metric.metadata.metadata_len)
					extra_metadata := &extraMetadata{
						values:  make([]string, exLen),
						mapping: make(map[string][]byte),
					}
					hdr := &reflect.SliceHeader{Data: uintptr(unsafe.Pointer(gfull.metric.metadata.metadata_val)),
						Len: exLen,
						Cap: exLen}
					extra := *(*[]C.Ganglia_extra_data)(unsafe.Pointer(hdr))
					for i, val := range extra {
						key := C.GoString(val.name)
						extra_metadata.values[i] = C.GoString(val.data)
						extra_metadata.mapping[key] = []byte(extra_metadata.values[i])
						extra_metadata_keys = append(extra_metadata_keys, &extraMetadataKey{
							key:  key,
							data: extra_metadata})
					}
				}
				mid := &MetricIdentifier{
					Host:   C.GoString(gfull.metric_id.host),
					Name:   C.GoString(gfull.metric_id.name),
					Spoof:  xdrBool(gfull.metric_id.spoof),
					Exists: true,
				}
				msg = &MetadataDef{
					gangliaMsg:       gangliaMsg{formatIdentifier: id},
					MetricIdentifier: mid,
					metric: Metadata{
						Type:      C.GoString(gfull.metric._type),
						Name:      C.GoString(gfull.metric.name),
						Units:     C.GoString(gfull.metric.units),
						Tmax:      uint(gfull.metric.tmax),
						Dmax:      uint(gfull.metric.dmax),
						Slope:     Slope(gfull.metric.slope),
						metric_id: mid,
						extra:     extra_metadata_keys,
					},
				}
				//log.Printf("DEBUG: metadata name=%v/%v type=%v",mid.Name,msg.MetricId().Name,
				//            msg.GetMetadata().Type)
				C.helper_free_xdr(xdr, mf, unsafe.Pointer(fmsg))
			case GMETRIC_STRING:
				gstr := C.Ganglia_value_msg_u_gstr(vmsg)
				metric_id = &gstr.metric_id
				info = &MetricInfo{
					Value:  C.GoString(gstr.str),
					Format: C.GoString(gstr.fmt),
				}
			case GMETRIC_USHORT:
				gus := C.Ganglia_value_msg_u_gu_short(vmsg)
				metric_id = &gus.metric_id
				f := C.GoString(gus.fmt)
				info = &MetricInfo{
					Value:  uint16(gus.us),
					Format: f,
				}
			case GMETRIC_SHORT:
				gss := C.Ganglia_value_msg_u_gs_short(vmsg)
				metric_id = &gss.metric_id
				f := C.GoString(gss.fmt)
				info = &MetricInfo{
					Value:  int16(gss.ss),
					Format: f,
				}
			case GMETRIC_UINT:
				gint := C.Ganglia_value_msg_u_gu_int(vmsg)
				metric_id = &gint.metric_id
				f := C.GoString(gint.fmt)
				info = &MetricInfo{
					Value:  uint32(gint.ui),
					Format: f,
				}
			case GMETRIC_INT:
				gint := C.Ganglia_value_msg_u_gs_int(vmsg)
				metric_id = &gint.metric_id
				f := C.GoString(gint.fmt)
				info = &MetricInfo{
					Value:  int32(gint.si),
					Format: f,
				}
				fallthrough
			case GMETRIC_FLOAT:
				gflt := C.Ganglia_value_msg_u_gf(vmsg)
				metric_id = &gflt.metric_id
				info = &MetricInfo{
					Value:  float32(gflt.f),
					Format: C.GoString(gflt.fmt),
				}
			case GMETRIC_DOUBLE:
				gdbl := C.Ganglia_value_msg_u_gd(vmsg)
				metric_id = &gdbl.metric_id
				info = &MetricInfo{
					Value:  float64(gdbl.d),
					Format: C.GoString(gdbl.fmt),
				}
			default:
				log.Printf("XDR value decode failure, unsupported metric %v", id)
				C.helper_free_xdr(xdr, mf, unsafe.Pointer(vmsg))
			}
			if err == nil && info != nil {
				if metric_id != nil {
					info.Spoof = xdrBool(metric_id.spoof)
					if metric_id.host != nil {
						info.Host = []byte(C.GoString(metric_id.host))
					}
					if metric_id.name != nil {
						info.Name = []byte(C.GoString(metric_id.name))
					}
				}
				msg, err = NewMetric(id, *info)
				C.helper_free_xdr(xdr, mf, unsafe.Pointer(vmsg))
			}
		}
	}
	// log.Printf("xdr bytes consumed: %v",nbytes)
	if err == nil && msg != nil && !msg.HasMetadata() {
		md, err := MetadataServer.Lookup(msg)
		if err == nil {
			if md == nil {
				panic("bad metadata from metadata server")
			}
			msg.(*gmetric).metadata = md
			//log.Printf("SET MD for msg %v to %v",msg.(*gmetric).Name,msg.GetMetadata().Type)
		}
	}
	return
}
예제 #24
0
// RunProc runs event handling loop on component ports.
// It returns true on success or panics with error message and returns false on error.
func RunProc(c interface{}) bool {
	// Check if passed interface is a valid pointer to struct
	v := reflect.ValueOf(c)
	if v.Kind() != reflect.Ptr || v.IsNil() {
		panic("Argument of flow.Run() is not a valid pointer")
		return false
	}
	vp := v
	v = v.Elem()
	if v.Kind() != reflect.Struct {
		panic("Argument of flow.Run() is not a valid pointer to structure. Got type: " + vp.Type().Name())
		return false
	}
	t := v.Type()

	// Get internal state lock if available
	hasLock := false
	var locker sync.Locker
	if lockField := v.FieldByName("StateLock"); lockField.IsValid() && lockField.Elem().IsValid() {
		locker, hasLock = lockField.Interface().(sync.Locker)
	}

	// Call user init function if exists
	if initable, ok := c.(Initializable); ok {
		initable.Init()
	}

	// A group to wait for all inputs to be closed
	inputsClose := new(sync.WaitGroup)
	// A group to wait for all recv handlers to finish
	handlersDone := new(sync.WaitGroup)

	emptyArr := [0]reflect.Value{}
	empty := emptyArr[:]

	// Get the embedded flow.Component
	vCom := v.FieldByName("Component")
	isComponent := vCom.IsValid() && vCom.Type().Name() == "Component"

	// Get the component mode
	componentMode := DefaultComponentMode
	if isComponent {
		if vComMode := vCom.FieldByName("Mode"); vComMode.IsValid() {
			componentMode = int(vComMode.Int())
		}
	}

	// Bind channel event handlers
	// Iterate over struct fields
	for i := 0; i < t.NumField(); i++ {
		fv := v.Field(i)
		ff := t.Field(i)
		ft := fv.Type()
		// Detect control channels
		if fv.IsValid() && fv.Kind() == reflect.Chan && !fv.IsNil() && (ft.ChanDir()&reflect.RecvDir) != 0 {
			// Bind handlers for an input channel
			onClose := vp.MethodByName("On" + ff.Name + "Close")
			hasClose := onClose.IsValid()
			onRecv := vp.MethodByName("On" + ff.Name)
			hasRecv := onRecv.IsValid()
			if hasClose || hasRecv {
				// Add the input to the wait group
				inputsClose.Add(1)
				// Listen on an input channel
				go func() {
					for {
						val, ok := fv.Recv()
						if !ok {
							// The channel closed
							if hasClose {
								// Lock the state and call OnClose handler
								if hasLock {
									locker.Lock()
								}
								onClose.Call(empty)
								if hasLock {
									locker.Unlock()
								}
							}
							inputsClose.Done()
							return
						}
						if hasRecv {
							// Call the receival handler for this channel
							handlersDone.Add(1)
							if componentMode == ComponentModeAsync {
								go func() {
									if hasLock {
										locker.Lock()
									}
									valArr := [1]reflect.Value{val}
									onRecv.Call(valArr[:])
									if hasLock {
										locker.Unlock()
									}
									handlersDone.Done()
								}()
							} else {
								valArr := [1]reflect.Value{val}
								onRecv.Call(valArr[:])
								handlersDone.Done()
							}
						}
					}
				}()
			}
		}
	}
	go func() {
		// Wait for all inputs to be closed
		inputsClose.Wait()
		// Wait all inport handlers to finish their job
		handlersDone.Wait()

		// Call shutdown handler (user or default)
		shutdownProc(c)

		// Get the embedded flow.Component and check if it belongs to a network
		if isComponent {
			if vNet := vCom.FieldByName("Net"); vNet.IsValid() && !vNet.IsNil() {
				if vNetCtr, hasNet := vNet.Interface().(netController); hasNet {
					// Remove the instance from the network's WaitGroup
					vNetCtr.getWait().Done()
				}
			}
		}
	}()
	return true
}
예제 #25
0
파일: component.go 프로젝트: ngaut/goflow
// RunProc runs event handling loop on component ports.
// It returns true on success or panics with error message and returns false on error.
func RunProc(c interface{}) bool {
	// Check if passed interface is a valid pointer to struct
	v := reflect.ValueOf(c)
	if v.Kind() != reflect.Ptr || v.IsNil() {
		panic("Argument of flow.Run() is not a valid pointer")
		return false
	}
	vp := v
	v = v.Elem()
	if v.Kind() != reflect.Struct {
		panic("Argument of flow.Run() is not a valid pointer to structure. Got type: " + vp.Type().Name())
		return false
	}
	t := v.Type()

	// Get internal state lock if available
	hasLock := false
	var locker sync.Locker
	if lockField := v.FieldByName("StateLock"); lockField.IsValid() && lockField.Elem().IsValid() {
		locker, hasLock = lockField.Interface().(sync.Locker)
	}

	// Call user init function if exists
	if initable, ok := c.(Initializable); ok {
		initable.Init()
	}

	// A group to wait for all inputs to be closed
	inputsClose := new(sync.WaitGroup)
	// A group to wait for all recv handlers to finish
	handlersDone := new(sync.WaitGroup)

	// Get the embedded flow.Component
	vCom := v.FieldByName("Component")
	isComponent := vCom.IsValid() && vCom.Type().Name() == "Component"

	if !isComponent {
		panic("Argument of flow.Run() is not a flow.Component")
	}

	// Get the component mode
	componentMode := DefaultComponentMode
	var poolSize uint8 = 0
	if isComponent {
		if vComMode := vCom.FieldByName("Mode"); vComMode.IsValid() {
			componentMode = int(vComMode.Int())
		}
		if vComPoolSize := vCom.FieldByName("PoolSize"); vComPoolSize.IsValid() {
			poolSize = uint8(vComPoolSize.Uint())
		}
	}

	// Create a slice of select cases and port handlers
	cases := make([]reflect.SelectCase, 0, t.NumField())
	handlers := make([]portHandler, 0, t.NumField())

	// Iterate over struct fields and bind handlers
	for i := 0; i < t.NumField(); i++ {
		fv := v.Field(i)
		ff := t.Field(i)
		ft := fv.Type()
		// Detect control channels
		if fv.IsValid() && fv.Kind() == reflect.Chan && !fv.IsNil() && (ft.ChanDir()&reflect.RecvDir) != 0 {
			// Bind handlers for an input channel
			cases = append(cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: fv})
			h := portHandler{onRecv: vp.MethodByName("On" + ff.Name), onClose: vp.MethodByName("On" + ff.Name + "Close")}
			handlers = append(handlers, h)
			if h.onClose.IsValid() || h.onRecv.IsValid() {
				// Add the input to the wait group
				inputsClose.Add(1)
			}
		}
	}

	// Prepare handler closures
	recvHandler := func(onRecv, value reflect.Value) {
		if hasLock {
			locker.Lock()
		}
		valArr := [1]reflect.Value{value}
		onRecv.Call(valArr[:])
		if hasLock {
			locker.Unlock()
		}
		handlersDone.Done()
	}
	closeHandler := func(onClose reflect.Value) {
		if onClose.IsValid() {
			// Lock the state and call OnClose handler
			if hasLock {
				locker.Lock()
			}
			onClose.Call([]reflect.Value{})
			if hasLock {
				locker.Unlock()
			}
		}
		inputsClose.Done()
	}

	// Run the port handlers depending on component mode
	if componentMode == ComponentModePool && poolSize > 0 {
		// Pool mode, prefork limited goroutine pool for all inputs
		var poolIndex uint8
		poolWait := new(sync.WaitGroup)
		once := new(sync.Once)
		for poolIndex = 0; poolIndex < poolSize; poolIndex++ {
			poolWait.Add(1)
			go func() {
				for {
					chosen, recv, recvOK := reflect.Select(cases)
					if !recvOK {
						// Port has been closed
						poolWait.Done()
						once.Do(func() {
							// Wait for other workers
							poolWait.Wait()
							// Close output down
							closeHandler(handlers[chosen].onClose)
						})
						return
					}
					if handlers[chosen].onRecv.IsValid() {
						handlersDone.Add(1)
						recvHandler(handlers[chosen].onRecv, recv)
					}
				}
			}()
		}
	} else {
		go func() {
			for {
				chosen, recv, recvOK := reflect.Select(cases)
				if !recvOK {
					// Port has been closed
					closeHandler(handlers[chosen].onClose)
					return
				}
				if handlers[chosen].onRecv.IsValid() {
					handlersDone.Add(1)
					if componentMode == ComponentModeAsync || componentMode == ComponentModeUndefined && DefaultComponentMode == ComponentModeAsync {
						// Async mode
						go recvHandler(handlers[chosen].onRecv, recv)
					} else {
						// Sync mode
						recvHandler(handlers[chosen].onRecv, recv)
					}
				}
			}
		}()
	}

	go func() {
		// Wait for all inputs to be closed
		inputsClose.Wait()
		// Wait all inport handlers to finish their job
		handlersDone.Wait()

		// Call shutdown handler (user or default)
		shutdownProc(c)

		// Get the embedded flow.Component and check if it belongs to a network
		if isComponent {
			if vNet := vCom.FieldByName("Net"); vNet.IsValid() && !vNet.IsNil() {
				if vNetCtr, hasNet := vNet.Interface().(netController); hasNet {
					// Remove the instance from the network's WaitGroup
					vNetCtr.getWait().Done()
				}
			}
		}
	}()
	return true
}