Esempio n. 1
0
func (h *Heartbeat) discoveryCycle(w core.WorkerContainer) {
	interval := time.Duration(0)
	for {
		select {
		case <-w.QC():
			w.Quit()
			return
		case <-time.After(interval):
			core.Info.Println("start to discovery network every", interval)

			if err := h.discovery(); err != nil {
				core.Warn.Println("heartbeat discovery failed, err is", err)
			} else {
				if len(h.ips) <= 0 {
					interval = discoveryEmptyInterval
					continue
				}
				core.Trace.Println("local ip is", h.ips, "exported", h.exportIp)
				interval = discoveryRefreshInterval
			}
		}
	}

	return
}
Esempio n. 2
0
// the goroutine cycle absolutely safe, no panic no error to quit.
func ExampleWorkerContainer_safe() {
	var wc core.WorkerContainer
	wc.GFork("myservice", func(wc core.WorkerContainer) {
		defer func() {
			if r := recover(); r != nil {
				// log the r and ignore.
				return
			}
		}()

		for {
			select {
			case <-time.After(3 * time.Second):
				// select other channel, do something cycle to get error.
				if err := error(nil); err != nil {
					// recoverable error, log it only and continue or return.
					continue
				}
			case <-wc.QC():
				// when got a quit signal, break the loop.
				// and must notify the container again for other workers
				// in container to quit.
				wc.Quit()
				return
			}
		}
	})
}
Esempio n. 3
0
func (s *Server) Run() (err error) {
	func() {
		s.lock.Lock()
		defer s.lock.Unlock()

		if s.closed != StateReady {
			panic("server invalid state.")
		}
		s.closed = StateRunning
	}()

	// when terminated, notify the chan.
	defer func() {
		select {
		case s.closing <- true:
		default:
		}
	}()

	core.Info.Println("server cycle running")

	// run server, apply settings.
	s.applyMultipleProcesses(core.Conf.Workers)

	var wc core.WorkerContainer = s
	for {
		var gcc <-chan time.Time = nil
		if core.Conf.Go.GcInterval > 0 {
			gcc = time.After(time.Second * time.Duration(core.Conf.Go.GcInterval))
		}

		select {
		case signal := <-s.sigs:
			core.Trace.Println("got signal", signal)
			switch signal {
			case os.Interrupt, syscall.SIGTERM:
				// SIGINT, SIGTERM
				wc.Quit()
			}
		case <-wc.QC():
			wc.Quit()

			// wait for all goroutines quit.
			s.wg.Wait()
			core.Warn.Println("server cycle ok")
			return
		case <-gcc:
			runtime.GC()
			core.Info.Println("go runtime gc every", core.Conf.Go.GcInterval, "seconds")
		}
	}

	return
}
Esempio n. 4
0
func (h *Heartbeat) beatCycle(w core.WorkerContainer) {
	for {
		c := &core.Conf.Heartbeat

		select {
		case <-w.QC():
			w.Quit()
			return
		case <-time.After(time.Millisecond * time.Duration(1000*c.Interval)):
			if !c.Enabled {
				continue
			}

			core.Info.Println("start to heartbeat every", c.Interval)

			if err := h.beat(); err != nil {
				core.Warn.Println("heartbeat to", c.Url, "every", c.Interval, "failed, err is", err)
			} else {
				core.Info.Println("heartbeat to", c.Url, "every", c.Interval)
			}
		}
	}
}
Esempio n. 5
0
// the goroutine cycle notify container to quit when error.
func ExampleWorkerContainer_fatal() {
	var wc core.WorkerContainer
	wc.GFork("myservice", func(wc core.WorkerContainer) {
		for {
			select {
			case <-time.After(3 * time.Second):
				// select other channel, do something cycle to get error.
				if err := error(nil); err != nil {
					// when got none-recoverable error, notify container to quit.
					wc.Quit()
					return
				}
			case <-wc.QC():
				// when got a quit signal, break the loop.
				// and must notify the container again for other workers
				// in container to quit.
				wc.Quit()
				return
			}
		}
	})
}
Esempio n. 6
0
func (s *Server) Initialize() (err error) {
	s.lock.Lock()
	defer s.lock.Unlock()

	if s.closed != StateInit {
		panic("server invalid state.")
	}

	// about the runtime.
	if err = s.initializeRuntime(); err != nil {
		return
	}

	// use worker container to fork.
	var wc core.WorkerContainer = s

	// reload goroutine
	wc.GFork("reload", core.Conf.ReloadCycle)
	// heartbeat goroutine
	wc.GFork("htbt(discovery)", s.htbt.discoveryCycle)
	wc.GFork("htbt(main)", s.htbt.beatCycle)
	// open rtmp agent.
	if err = s.rtmp.Open(); err != nil {
		core.Error.Println("open rtmp agent failed. err is", err)
		return
	}

	c := core.Conf
	l := fmt.Sprintf("%v(%v/%v)", c.Log.Tank, c.Log.Level, c.Log.File)
	if !c.LogToFile() {
		l = fmt.Sprintf("%v(%v)", c.Log.Tank, c.Log.Level)
	}
	core.Trace.Println(fmt.Sprintf("init server ok, conf=%v, log=%v, workers=%v/%v, gc=%v/%v%%, daemon=%v",
		c.Conf(), l, c.Workers, runtime.NumCPU(), c.Go.GcInterval, c.Go.GcPercent, c.Daemon))

	// set to ready, requires cleanup.
	s.closed = StateReady

	return
}