示例#1
0
文件: client.go 项目: badoo/thunder
func NewClient(address string, p Protocol, c ClientCodec, connect_timeout, request_timeout time.Duration) *Client {
	ips, err := dns.LookupHostPort(address)
	if err != nil {
		log.Errorf("dns.LookupHostPort() faield: %v", err)
		// FIXME(antoxa): just reusing existing ips here, which actually sucks
		//                this only works because cli.Call() uses net.Dial() which resolves the name again
	}

	canUseClient := func(client *Client) bool {
		// readLoop() might be modifying this conn
		// but don't really need to lock for ips comparison, since ips are never modified for existing client
		client.lk.Lock()
		defer client.lk.Unlock()

		// TODO(antoxa): can just use one ip for client and recheck not full equality
		//               but only if new ips contain old ip
		if !util.StrSliceEqual(client.ips, ips) {
			return false
		}

		if client.closed {
			return false
		}

		return true
	}

	const max_tries = 3 // arbitrary limit, i know

	for done_tries := 0; done_tries < max_tries; done_tries++ {
		client := Pcm.GetClient(address)
		if client == nil {
			break
		}

		if !canUseClient(client) {
			client.closeNoReuse()
			continue
		}

		log.Debugf("reused existing client %p for %s (after %d tries)", client, address, done_tries)
		return client
	}

	log.Debugf("creating new cli for %s", address)
	return &Client{
		address:         address,
		ips:             ips,
		Proto:           p,
		Codec:           c,
		connect_timeout: connect_timeout,
		request_timeout: request_timeout,
	}
}
示例#2
0
func (d *DispatcherData) processAllJobs(jobs []*TimetableEntry) {
	newJobs := make([]*TimetableEntry, 0)
	toProcessFinished := make([]*FinishEvent, 0)

	for _, row := range jobs {
		if el, ok := d.waitingMap[row.id]; ok {

			if row.finish_count >= el.finish_count && row.added_to_queue_ts.Valid && !row.finished_ts.Valid {
				d.removeFromWaiting(el)
				row.reportedDup = el.reportedDup
				d.addToAdded(row)

				log.Warnf("external waiting to added promotion OLD:%+v---NEW:%+v", el, row)
			} else if row.finish_count > el.finish_count && !row.added_to_queue_ts.Valid {
				d.removeFromWaiting(el)
				d.addToWaiting(row)
				log.Warnf("external finish_count promotion OLD:%+v---NEW:%+v", el, row)
			}
			continue
		}

		if el, ok := d.addedMap[row.id]; ok {
			// External job notification can update timetable by incrementing it's finish count.
			// We do not really care about any modifications made by the script except for finished_successfully field
			if row.finish_count > el.finish_count {
				log.Debugf("External finish for tt row %+v", row)
				toProcessFinished = append(toProcessFinished, &FinishEvent{
					timetable_id: row.id, success: row.finished_successfully != 0,
					havePrevFinishCount: true, prevFinishCount: row.finish_count, errorCh: make(chan error, 1)})
			}
		} else if d.deletedIds[row.id] == 0 {
			log.Debugf("External timetable row %+v", row)
			newJobs = append(newJobs, row)
		}
	}

	if len(newJobs) > 0 {
		d.acceptNewJobs(newJobs)
	}

	for _, el := range toProcessFinished {
		log.Debugf("processFinished: %+v", el)
		d.processFinished(el)
	}

	for ttId, refCount := range d.deletedIds {
		if refCount--; refCount <= 0 {
			delete(d.deletedIds, ttId)
		} else {
			d.deletedIds[ttId] = refCount
		}
	}
}
示例#3
0
文件: client.go 项目: badoo/thunder
func (client *Client) establishConnection(network string) error {
	client.lk.Lock()
	defer client.lk.Unlock()

	// conn might've been closed in between Call*() invocations
	if client.closed {
		client.closed = false
		client.conn = nil
	}

	// already established, can reuse
	if client.conn != nil {
		return nil
	}

	// we're creating new connection here; (closed == false && conn == nil)

	// TODO(antoxa): rewrite this to use ips, that we've resolved earlier
	conn, err := net.DialTimeout(network, client.address, client.connect_timeout)
	if err != nil {
		client.closed = true // connection attempt has failed -> do not reuse
		return fmt.Errorf("connect error: %v", err)
	}

	log.Debugf("connected %s -> %s", conn.LocalAddr(), conn.RemoteAddr())

	client.conn = conn
	client.respch = make(chan response, 1) // needs to be buffered, to avoid stalling readLoop() on Write failure
	client.numExpectedResponses = 0        // this is needed to reset counter in case last Write has failed

	go client.readLoop()

	return nil
}
示例#4
0
文件: restart.go 项目: badoo/thunder
func restartRunChild(childData *RestartChildData) (*RestartContext, error) {
	if currentRestart != nil {
		panic("can't call this function when restart is already in progress")
	}

	child_data_json, err := json.Marshal(childData)
	if err != nil {
		return nil, fmt.Errorf("can't json encode child data: %v", err)
	}

	os.Setenv(RESTART_ENV_VAR, string(child_data_json))
	log.Debugf("%s = %s", RESTART_ENV_VAR, child_data_json)

	// can start child now
	child, err := forkExec(childData.files)
	if err != nil {
		return nil, fmt.Errorf("forkExec error: %v", err)
	}

	log.Debugf("started child: %d", child.Pid)

	os.Setenv(RESTART_ENV_VAR, "") // reset env after child starts, just in case

	// save state
	rctx := &RestartContext{
		Child:         child,
		ChildWaitC:    make(chan RestartProcStatus, 1), // needs to be buffered (in case we drop restart state before goroutine has the chance to write)
		ChildTimeoutC: time.After(5 * time.Second),     // FIXME: make this configureable?
	}

	// start child wait goroutine, this goroutine never dies if child starts up successfuly
	//   but it doesn't matter, since this process will exit soon in that case
	go func(rctx *RestartContext) {
		status, err := rctx.Child.Wait()
		rctx.ChildWaitC <- RestartProcStatus{status, err}
	}(rctx)

	return rctx, nil
}
示例#5
0
func (d *DispatcherData) checkZero(src string) {
	if len(d.addedMap) == 0 && d.waitingList.Len() == 0 {
		log.Debugf("No rows left in class=%s, location=%s (%s)", d.className, d.location, src)
		trigger(d.zeroTTCh, "zerott")

		if d.killRequest != nil {
			log.Printf("Killed all jobs in class=%s, location=%s, waiting on continue channel", d.className, d.location)
			d.killRequest.ResCh <- nil
			d.killRequest = nil
			log.Printf("Can continue dispatching in class=%s, location=%s", d.className, d.location)
		}
	}
}
示例#6
0
文件: jobgen.go 项目: badoo/thunder
func loadFullState(funcs ...*LoadStateFunc) (err error) {
	for _, funEntry := range funcs {
		startTs := time.Now().UnixNano()
		err = funEntry.fun()

		if err != nil {
			log.Errorf("Could not load %s: %s", funEntry.name, err.Error())
			return err
		}

		log.Debugf("Selected from %s for %.5f sec", funEntry.name, float64(time.Now().UnixNano()-startTs)/1e9)
	}

	return nil
}
示例#7
0
文件: restart.go 项目: badoo/thunder
func ParseRestartDataFromEnv() (*RestartChildData, error) {
	rcd := &RestartChildData{}
	rcd_env := os.Getenv(RESTART_ENV_VAR)

	if rcd_env == "" {
		return nil, nil // ok, but no data is set
	}

	log.Debugf("ParseRestartDataFromEnv; %s: %s", RESTART_ENV_VAR, rcd_env)
	if err := json.Unmarshal([]byte(rcd_env), rcd); err != nil {
		return nil, err
	}

	if rcd.PPid <= 0 {
		return nil, fmt.Errorf("restart_data.Ppid <= 0 (%d)", rcd.PPid)
	}

	if rcd.GpbrpcSockets == nil {
		return nil, fmt.Errorf("restart_data.GpbrpcSockets == nil")
	}

	return rcd, nil
}
示例#8
0
文件: logfile.go 项目: badoo/thunder
func initSyslogHook() error {
	if syslogHookInited {
		return nil
	}

	if DaemonConfig().SyslogIdentity != nil {
		syslogIdentity := parseSyslogIdentity(DaemonConfig().GetSyslogIdentity())
		log.Debugf("syslog_identity: <%s>", syslogIdentity)

		syslogAddr := fmt.Sprintf("%s:%d", DaemonConfig().GetSyslogIp(), DaemonConfig().GetSyslogPort())

		network := func() string {

			if DaemonConfig().GetSyslogIp() != "" && DaemonConfig().GetSyslogPort() != 0 {
				return "udp"
			}

			return ""
		}()

		hook, err := syslog_hook.NewSyslogHookNoFrozen(network, syslogAddr, syslog.LOG_LOCAL6, syslogIdentity)
		if err != nil {
			return err
		}

		log.AddHook(hook)

		if DaemonConfig().GetSyslogSendBufferSize() != 0 {
			log.Warn("unsupported config option syslog_send_buffer_size, ignored")
		}
	}

	syslogHookInited = true

	return nil
}
示例#9
0
文件: jobgen.go 项目: badoo/thunder
func doCycle() bool {
	var (
		jiRows         map[string]map[string]*JobInfoEntry
		scripts        map[string]*ScriptEntry
		flags          map[string]*FlagEntry
		scriptsRusage  map[string]*ScriptRusageEntry
		classLocTTRows map[string]map[string][]*TimetableEntry
	)

	unifiedStartTs := time.Now().UnixNano()

	startTs := time.Now().UnixNano()
	err := loadFullState(
		&LoadStateFunc{name: "Scripts", fun: func() (err error) { scripts, err = getGroupedScriptsForPlatform(); return }},
		&LoadStateFunc{name: "JobInfo", fun: func() (err error) { jiRows, err = getGroupedJobInfo(); return }},
		&LoadStateFunc{name: "Flags", fun: func() (err error) { flags, err = getFlags(); return }},
		&LoadStateFunc{name: "ScriptsRusage", fun: func() (err error) { scriptsRusage, err = getScriptRusageStats(); return }},
		&LoadStateFunc{name: "ScriptTimetable", fun: func() (err error) { classLocTTRows, err = selectTimetable(); return }})

	if err != nil {
		log.Errorf("Failed to select state in doCycle: %s", err.Error())
		return false
	}

	log.Debugf("Loaded for %.5f sec", float64(time.Now().UnixNano()-startTs)/1e9)

	startTs = time.Now().UnixNano()
	err = loadSettingsFromRows(jiRows, scripts)
	if err != nil {
		log.Errorf("Could not load settings from rows: %s", err.Error())
		return false
	}

	func() {
		allSettingsMutex.Lock()
		defer allSettingsMutex.Unlock()

		for _, row := range scripts {
			row.settings = allSettings[row.settings_id]
		}
	}()

	scriptsMap.Lock()
	scriptsMap.v = scripts
	scriptsMap.Unlock()

	log.Debugf("  Selected %d rows from flags", len(flags))
	log.Debugf("  Selected %d rows from scripts rusage", len(scriptsRusage))
	log.Debugf("Load settings for %.5f sec", float64(time.Now().UnixNano()-startTs)/1e9)

	startTs = time.Now().UnixNano()

	// We should not try to generate jobs for scripts that are not present in Script table
	// But we should not forget settings (e.g. last generation_id) for that script
	for class_name := range jiRows {
		if _, ok := scripts[class_name]; !ok {
			delete(jiRows, class_name)
		}
	}

	log.Debugf("Selected all for %.5f sec", float64(time.Now().UnixNano()-unifiedStartTs)/1e9)

	startTs = time.Now().UnixNano()
	updateLoadEstimates()

	log.Debugf("Load estimates updated for %.5f sec", float64(time.Now().UnixNano()-startTs)/1e9)
	func() {
		rusageInfo.Lock()
		defer rusageInfo.Unlock()
		log.Debugf("Group hosts: %+v", rusageInfo.groupHosts)
	}()

	startTs = time.Now().UnixNano()

	failedLocationsMutex.Lock()
	failedLocations = make(map[string]bool)
	failedLocationsMutex.Unlock()

	success := true

	if len(scripts) > 0 {
		throttle.setIntervalCh <- time.Second / time.Duration(len(scripts))
	}

	trigger(throttle.c, "throttle, start of cycle")

	for className, script := range scripts {
		<-throttle.c

		tx := new(db.LazyTrx)
		err := tx.Begin()
		if err != nil {
			log.Errorf("Could not start transaction in job generate: %s", err.Error())
			success = false
			continue
		}

		have := make(map[string]bool)
		locTtRows := classLocTTRows[className]
		if locTtRows != nil {
			for rawLoc, v := range locTtRows {
				loc, err := getLocationIdx(script.settings.location_type, rawLoc)
				if err != nil {
					log.Warningf("Broken settings for class %s: %s", className, err.Error())
					loc = rawLoc
				}
				if len(v) > 0 {
					have[loc] = true
				}
			}
		}

		add_to_timetable, err := generateJobs(tx, className, script.settings, jiRows[className], have, flags[className])

		if err != nil {
			log.Errorf("Could generate jobs for class %s: %s", className, err.Error())
			tx.Rollback()
			success = false
			continue
		}

		err = tx.Commit()
		if err != nil {
			log.Errorf("Could not commit generate jobs for class %s: %s", className, err.Error())
			success = false
			continue
		}

		per_location := make(map[string][]*TimetableEntry)

		for _, row := range add_to_timetable {
			allSettingsMutex.Lock()
			row.settings = allSettings[row.settings_id]
			allSettingsMutex.Unlock()

			if row.settings == nil {
				log.Warningf("Internal inconsistency error: Invalid settings for generated row: %+v", row)
				continue
			}

			key := DEFAULT_LOCATION_IDX
			if row.settings.location_type == LOCATION_TYPE_EACH {
				key = row.location
			}

			if _, ok := per_location[key]; !ok {
				per_location[key] = make([]*TimetableEntry, 0)
			}

			per_location[key] = append(per_location[key], row)
		}

		for location, rows := range per_location {
			notifyAboutNewTTRows(className, location, rows, true)
		}
	}

	notifyForFullTTSelect(classLocTTRows, true)

	log.Debugf("Processed %d classes for %.5f sec", len(scripts), float64(time.Now().UnixNano()-startTs)/1e9)
	log.Debugf("Total %.5f sec", float64(time.Now().UnixNano()-unifiedStartTs)/1e9)

	return success
}
示例#10
0
func (d *DispatcherData) printTrace() {
	trace := make([]byte, 8192)
	n := runtime.Stack(trace, false)
	log.Debugf("class:%s location:%s\nStack trace: %s\n", d.className, d.location, trace[0:n])
}
示例#11
0
文件: server.go 项目: badoo/thunder
func (server *Server) serveConnection(conn net.Conn) {
	log.Debugf("accepted connection: %s -> %s", conn.LocalAddr(), conn.RemoteAddr())
	defer func() {
		atomic.AddUint64(&server.Stats.ConnCur, ^uint64(0)) // decrements the value, ref: http://golang.org/pkg/sync/atomic/#AddUint64
		log.Debugf("closing connection: %s -> %s", conn.LocalAddr(), conn.RemoteAddr())

		if server.onDisconnect != nil {
			server.onDisconnect(RequestT{
				Server: server,
				Conn:   conn,
			})
		}

		conn.Close()
	}()

	atomic.AddUint64(&server.Stats.ConnCur, 1)
	atomic.AddUint64(&server.Stats.ConnTotal, 1)

	for {
		// need these as var here, since goto might jump over their definition below
		var (
			result      ResultT
			request     RequestT
			startTime   time.Time
			pinbaReq    pinba.Request
			requestTime time.Duration
		)

		msgid, msg, bytes_read, status, err := server.Codec.ReadRequest(server.Proto, conn)
		if err != nil {
			if status == ConnEOF {
				// connection was closed gracefully from client side
				break
			}
			if status == ConnOK {
				// misunderstanding on protobuf level
				result = server.Proto.ErrorGeneric(err)

				// FIXME(antoxa): remove this goto, it's annoying to code around
				goto write_response
			}
			// IO error or similar
			log.Infof("aborting connection: %v", err)
			break
		}

		// FIXME(antoxa): stats will not be incremented when ReadRequest returns err != nil and status == ConnOK
		atomic.AddUint64(&server.Stats.BytesRead, uint64(bytes_read))
		atomic.AddUint64(&server.Stats.Requests, 1)

		request = RequestT{
			Server:    server,
			Conn:      conn,
			RequestId: atomic.AddUint64(&globalRequestId, 1),
			MessageId: msgid,
			Message:   msg,
			PinbaReq:  &pinbaReq,
		}

		if msgid < MaxMsgID {
			atomic.AddUint64(&server.Stats.RequestsIdStat[msgid], 1)
		}

		startTime = time.Now()

		result = server.Proto.Dispatch(request, server.Handler)

		requestTime = time.Since(startTime)

		// log slow request if needed
		if server.slowRequestTime > 0 && server.slowRequestTime <= requestTime {
			if msg != nil {
				requestInfo := func() string {
					reqName := MapRequestIdToName(server.Proto, msgid)
					body, err := json.Marshal(msg)
					if err != nil {
						return fmt.Sprintf("%s %v", reqName, err)
					}
					return fmt.Sprintf("%s %s", reqName, body)
				}()

				log.Warnf("slow request (%d ms): %s; addr: %s <- %s", requestTime/time.Millisecond, requestInfo, conn.LocalAddr(), conn.RemoteAddr())
			}
		}

		server.sendToPinba(&pinbaReq, server.pinbaReqNames[msgid], requestTime)

	write_response:

		if result.Action == ACTION_RESULT {
			writeln, err := server.Codec.WriteResponse(server.Proto, conn, result.Message)
			if err != nil {
				// write error: can't recover
				log.Infof("unrecoverable error while writing: %v", err)
				break
			}
			atomic.AddUint64(&server.Stats.BytesWritten, uint64(writeln))
		}

		if result.Action == ACTION_CLOSE {
			break
		}
	}
}
示例#12
0
文件: service.go 项目: badoo/thunder
func Initialize(default_config_path string, service_conf Config) {
	flag.StringVar(&flags.ConfFile, "c", default_config_path, "path to config file")
	flag.StringVar(&flags.LogFile, "l", "", "path to log file, special value '-' means 'stdout'")
	flag.StringVar(&flags.PidFile, "p", "", "path to pid file. if empty, pidfile will not be created")
	flag.BoolVar(&flags.Testconf, "t", false, "test configuration and exit")
	flag.BoolVar(&flags.Version, "v", false, "print version")
	flag.BoolVar(&flags.FullVersion, "V", false, "print full version info")
	flag.BoolVar(&flags.Debug, "debug", false, "force DEBUG log level")
	flag.Parse()

	if flags.Version {
		fmt.Printf("%s\n", VersionInfo.GetVersion())
		os.Exit(0)
	}

	if flags.FullVersion {
		data, _ := json.MarshalIndent(VersionInfo, "", "  ")
		fmt.Printf("%s\n", data)
		os.Exit(0)
	}

	var err error

	config = service_conf                    // save a pointer to service's config (NOT a copy, mon!)
	commandLine = strings.Join(os.Args, " ") // XXX(antoxa): couldn't think of a better way
	hostname = getHostname()                 // get hostname early

	// moved here from init(), just importing a package should not publish expvars
	initExpvars()

	// current executable full path (symlinks and shit sometimes complicate things)
	binaryPath = func() string {
		path, err := osutil.GetCurrentBinary()
		if err != nil {
			// log as notice, non-critical error (only stats affected)
			log.Infof("couldn't get current binary (using argv[0] = %q): %v", os.Args[0], err)
			return os.Args[0]
		}
		return path
	}()

	// config path
	confPath := func() string {
		if flags.ConfFile != "" {
			return flags.ConfFile
		}
		return default_config_path
	}()

	// resolve absolute config path, convenient for stats
	configPath = func(path string) string {
		var err error
		if path, err = filepath.Abs(path); err != nil {
			return path
		}
		if path, err = filepath.EvalSymlinks(path); err != nil {
			return path
		}
		return path
	}(confPath)

	// parse config and construct final config merged with command line flags
	// use path as supplied to us in args (i.e. unresolved), just to avoid 'too smart, outsmarted yourself' gotchas
	err = ParseConfigFromFile(confPath, service_conf)
	if err != nil {
		err_message := func(err error) string {
			switch real_err := err.(type) {
			case nil:
				return "syntax is ok"
			case *json.SyntaxError:
				return fmt.Sprintf("%v at offset %d", real_err, real_err.Offset)
			case *os.PathError:
				return fmt.Sprintf("%v", real_err)
			default:
				return fmt.Sprintf("(%T) %v", real_err, real_err)
			}
		}(err)

		stderrLogger.Fatalf("Error in config: %s", err_message)

	} else {
		if flags.Testconf {
			fmt.Printf("testconf %s: syntax is ok\n", configPath)
		}
	}

	mergeCommandlineFlagsToConfig(flags, config)

	daemonConfig := config.GetDaemonConfig()

	// antoxa: need the fancy wrapper function to have testconf behave properly
	// FIXME: testconf should check more stuff (below) and graceful restart also
	initPidfileLogfile := func() error {

		// FIXME(antoxa): this testconf thingy is everywhere! must... resist... full rewrite
		if flags.Testconf {
			err = PidfileTest(daemonConfig.GetPidFile())
		} else {
			pidfile, err = PidfileOpen(daemonConfig.GetPidFile())
		}

		if err != nil {
			return fmt.Errorf("can't open pidfile: %s", err)
		}

		// FIXME: this is shit ugly
		//  need better integration between logger and daemon-config
		//  or 1-to-1 mapping
		//  or better log package :)
		log_level := daemonConfig.GetLogLevel()
		if log_level == 0 {
			return fmt.Errorf("unknown log_level, supported: %v", badoo_config.ServiceConfigDaemonConfigTLogLevels_name)
		}
		err = reopenLogfile(daemonConfig.GetLogFile(), log.Level(log_level))
		if err != nil {
			return fmt.Errorf("can't open logfile: %s", err)
		}

		return nil
	}

	err = initPidfileLogfile()
	if err != nil {
		if flags.Testconf {
			stderrLogger.Errorf("%v", err)
			fmt.Printf("testconf failed\n")
		} else {
			stderrLogger.Errorf("%v", err) // always pidfile/logfile errors to stderr
		}
		os.Exit(1)
	} else {
		if flags.Testconf {
			fmt.Printf("testconf successful\n")
			os.Exit(0)
		}
	}

	// log some version info like libangel does
	versionString := func() string {
		vi := &VersionInfo
		version := func() string {
			if vi.GetAutoBuildTag() != "" {
				return fmt.Sprintf("%s-%s", vi.GetVersion(), vi.GetAutoBuildTag())
			} else {
				return vi.GetVersion()
			}
		}()
		return fmt.Sprintf("%s version %s, git %s, built %s on %s",
			vi.GetVcsBasename(), version, vi.GetVcsShortHash(), vi.GetBuildDate(), vi.GetBuildHost())
	}()
	log.Infof("%s", versionString)

	// max cpus, 0 = all of them
	numCPU := func() int {
		maxCpus := int(daemonConfig.GetMaxCpus())
		if maxCpus <= 0 || maxCpus > runtime.NumCPU() {
			maxCpus = runtime.NumCPU()
		}
		return maxCpus
	}()
	runtime.GOMAXPROCS(numCPU)

	// gc percent, <0 - disables GC
	if daemonConfig.GcPercent != nil {
		debug.SetGCPercent(int(daemonConfig.GetGcPercent()))
	}

	// process pinba configuration and related stuff
	pinbaSender, err = func() (*PinbaSender, error) { // assigns a global
		if daemonConfig.GetPinbaAddress() == "" {
			return nil, nil // user doesn't want pinba configured
		}

		pi, err := PinbaInfoFromConfig(config)
		if err != nil {
			return nil, err
		}

		return NewPinbaSender(pi), nil
	}()

	if err != nil {
		log.Fatalf("pinba config error: %v", err)
	}

	// graceful restart handling
	//  see restart.go and signals.go for more details
	restartData, err = ParseRestartDataFromEnv()
	if err != nil {
		log.Fatalf("can't parse restart data: %v", err)
	}
	if restartData != nil {
		log.Debugf("[CHILD] this is a restart, parent: %d, me: %d", restartData.PPid, os.Getpid())
	}

	// start http pprof server (possibly - inherit fd from parent if this is a restart)
	err = func() (err error) {
		HttpServer, err = newHttpServer(config, restartData) // assigning a global here
		if err != nil {
			return err
		}

		if HttpServer != nil { // nil here means it has not been configured
			go HttpServer.Serve()
		}

		return nil
	}()
	if err != nil {
		log.Fatalf("can't start http_pprof server: %v", err)
	}
}
示例#13
0
文件: service.go 项目: badoo/thunder
// Call this when you want to start your servers and stuff
func EventLoop(ports []Port) {
	defer log.Debug("exiting")

	initPhaseDuration = time.Since(startupTime)

	daemonConfig := config.GetDaemonConfig()

	// service-stats ports
	ports = append(ports, GpbPort("service-stats-gpb", stats_ctx, badoo_service.Gpbrpc))
	ports = append(ports, JsonPort("service-stats-gpb/json", stats_ctx, badoo_service.Gpbrpc))

	// build map of ports and do some sanity checks
	ph := make(map[string]*Port)
	for i := 0; i < len(ports); i++ {
		p := &ports[i]
		ph[p.Name] = p

		// json and gpb ports should have the same context
		//  so try and warn user about passing plain values in (as it makes a copy)
		if reflect.ValueOf(p.Handler).Kind() != reflect.Ptr {
			log.Infof("port[%d].Handler should be a pointer (you want gpbs and json to use the same context, right?) (now: %T)", i, p.Handler)
		}
	}

	getRestartSocket := func(rcd *RestartChildData, portName, portAddr string) (*RestartSocket, *os.File) {
		if rcd == nil {
			return nil, nil
		}

		restartSocket, exists := rcd.GpbrpcSockets[portName]
		if exists == false {
			return nil, nil
		}

		restartFile := os.NewFile(restartSocket.Fd, "")

		if restartSocket.Address != portAddr {
			return nil, restartFile
		}

		return &restartSocket, restartFile
	}

	// start 'em all
	for _, lcf := range daemonConfig.GetListen() {
		portName, portAddr := lcf.GetProto(), lcf.GetAddress()
		port := ph[portName]

		if nil == port {
			log.Warnf("ignoring unknown port: %s at %s", portName, portAddr)
			continue
		}

		if port.IsStarted {
			log.Warnf("ignoring double startup for port: %s at %s", portName, portAddr)
		}

		listener, err := func() (listener net.Listener, err error) { // it's important that this should be a function, see defer inside
			restartSocket, restartFile := getRestartSocket(restartData, portName, portAddr)

			// this whole fd/file affair is very inconvenient to
			//  since when getRestartSocket() returns fd - it can't close it yet, as it can be used by FileListener
			defer restartFile.Close()

			if restartSocket == nil {
				listener, err = net.Listen("tcp", portAddr)
				if err != nil {
					log.Errorf("listen failed for server %s at %s: %s", portName, portAddr, err)
					return
				}
				log.Infof("port %s bound to address %s", portName, listener.Addr())

			} else {

				listener, err = net.FileListener(restartFile) // this dup()-s
				if err != nil {
					log.Errorf("failed to grab parent fd %d for %s at %s: %s", restartSocket.Fd, portName, portAddr, err)
					return
				}

				log.Infof("port %s bound to address %s (parent fd: %d)", portName, listener.Addr(), restartSocket.Fd)
			}
			return
		}()

		if err != nil {
			os.Exit(1)
		}

		// enable pinba only for ports that explicitly request it
		ps := func() gpbrpc.PinbaSender {
			if !lcf.GetPinbaEnabled() {
				return nil // explicit nil here
			}

			if pinbaSender == nil {
				log.Warnf("pinba is not configured, but pinba_enabled IS set for port %s: %s", portName, portAddr)
				return nil // explicit nil here
			}

			log.Infof("pinba configured for port %s:%s -> %s", portName, portAddr, pinbaSender.Address)
			return pinbaSender
		}()

		// slow request log time
		slowRequestTime := time.Duration(daemonConfig.GetSlowRequestMs()) * time.Millisecond

		srv := &Server{
			Name:    lcf.GetProto(),
			Address: lcf.GetAddress(),
			Server:  gpbrpc.NewServer(listener, port.Proto, port.Codec, port.Handler, ps, slowRequestTime),
		}
		go srv.Server.Serve()

		port.IsStarted = true
		StartedServers[port.Name] = srv // save it for laterz
	}

	// kill parent if this is a child of graceful restart
	if restartData != nil {
		syscall.Kill(restartData.PPid, syscall.SIGQUIT)
	}

	log.Infof("entering event loop")

	exitMethod := wait_for_signals()

	if exitMethod == EXIT_GRACEFULLY { // wait for established connections to close and then die

		// FIXME: should stop servers from accepting new connections here!

		const ATTEMPTS_PER_SEC = 2
		maxAttempts := daemonConfig.GetParentWaitTimeout() * ATTEMPTS_PER_SEC

		for i := uint32(0); i < maxAttempts; i++ {
			for _, srv := range StartedServers {
				currConn := atomic.LoadUint64(&srv.Server.Stats.ConnCur)
				if currConn > 0 {
					log.Debugf("%s still has %d connections", srv.Name, currConn)
					time.Sleep(time.Second / ATTEMPTS_PER_SEC)
				}
			}
		}
	} else {
		// do nothing for EXIT_IMMEDIATELY
	}

	// doing cleanups here
	// XXX: can this be moved to defer at the start of this function?
	if pidfile != nil {
		pidfile.CloseAndRemove()
	}
}