func NewDbMap() *gorp.DbMap { dsn, _ := globalCfg.ReadString("dsn", "root:root@/tyrant") dbType, _ := globalCfg.ReadString("db", "mysql") if dbType != "mysql" && dbType != "sqlite3" { log.Fatal("db must be mysql or sqlite3") } db, err := sql.Open(dbType, dsn) if err != nil { log.Fatal(err) } var dbmap *gorp.DbMap if dbType == "mysql" { dbmap = &gorp.DbMap{Db: db, Dialect: gorp.MySQLDialect{}} } else { dbmap = &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}} } tbl := dbmap.AddTableWithName(Job{}, "jobs").SetKeys(true, "Id") tbl.ColMap("name").SetMaxSize(512).SetUnique(true) tbl.ColMap("executor").SetMaxSize(4096) tbl.ColMap("executor_flags").SetMaxSize(4096) tbl.ColMap("uris").SetMaxSize(2048) tbl = dbmap.AddTableWithName(Task{}, "tasks").SetKeys(true, "Id") tbl.ColMap("id").SetMaxSize(128).SetUnique(true) err = dbmap.CreateTablesIfNotExists() if err != nil { log.Fatal(err) } return dbmap }
func main() { fmt.Print(banner) log.SetLevelByString("info") args, err := docopt.Parse(usage, nil, true, "codis proxy v0.1", true) if err != nil { log.Error(err) } // set config file if args["-c"] != nil { configFile = args["-c"].(string) } // set output log file if args["-L"] != nil { log.SetOutputByName(args["-L"].(string)) } // set log level if args["--log-level"] != nil { log.SetLevelByString(args["--log-level"].(string)) } // set cpu if args["--cpu"] != nil { cpus, err = strconv.Atoi(args["--cpu"].(string)) if err != nil { log.Fatal(err) } } // set addr if args["--addr"] != nil { addr = args["--addr"].(string) } // set http addr if args["--http-addr"] != nil { httpAddr = args["--http-addr"].(string) } wd, _ := os.Getwd() log.Info("wd:", wd) log.CrashLog(wd + ".dump") router.CheckUlimit(1024) runtime.GOMAXPROCS(cpus) http.HandleFunc("/setloglevel", handleSetLogLevel) go http.ListenAndServe(httpAddr, nil) log.Info("running on ", addr) conf, err := router.LoadConf(configFile) if err != nil { log.Fatal(err) } s := router.NewServer(addr, httpAddr, conf) s.Run() log.Warning("exit") }
func (s *Server) waitOnline() { s.mu.Lock() defer s.mu.Unlock() for { pi, err := s.top.GetProxyInfo(s.pi.Id) if err != nil { log.Fatal(errors.ErrorStack(err)) } if pi.State == models.PROXY_STATE_MARK_OFFLINE { s.handleMarkOffline() } if pi.State == models.PROXY_STATE_ONLINE { s.pi.State = pi.State println("good, we are on line", s.pi.Id) log.Info("we are online", s.pi.Id) _, err := s.top.WatchNode(path.Join(models.GetProxyPath(s.top.ProductName), s.pi.Id), s.evtbus) if err != nil { log.Fatal(errors.ErrorStack(err)) } return } println("wait to be online ", s.pi.Id) log.Warning(s.pi.Id, "wait to be online") time.Sleep(3 * time.Second) } }
func (p *Proxy) Run() { tcpAddr, err := net.ResolveTCPAddr("tcp", p.addr) if err != nil { log.Fatal(err) } listener, err := net.ListenTCP("tcp", tcpAddr) if err != nil { log.Fatal(err) } else { log.Infof("proxy listens on %s", p.addr) } defer listener.Close() go p.dispatcher.Run() for { conn, err := listener.AcceptTCP() if err != nil { log.Error(err) continue } log.Infof("accept client: %s", conn.RemoteAddr()) go p.handleConnection(conn) } }
func LoadCodisEnv(cfg *cfg.Cfg) Env { if cfg == nil { log.Fatal("config is nil") } productName, err := cfg.ReadString("product", "test") if err != nil { log.Fatal(err) } zkAddr, err := cfg.ReadString("zk", "localhost:2181") if err != nil { log.Fatal(err) } hostname, _ := os.Hostname() dashboardAddr, err := cfg.ReadString("dashboard_addr", hostname+":18087") if err != nil { log.Fatal(err) } provider, err := cfg.ReadString("coordinator", "zookeeper") if err != nil { log.Fatal(err) } return &CodisEnv{ zkAddr: zkAddr, dashboardAddr: dashboardAddr, productName: productName, provider: provider, } }
func Fatal(msg interface{}) { switch msg.(type) { case string: log.Fatal(msg) case error: log.Fatal(errors.ErrorStack(msg.(error))) } }
func (pc *ProxyConfig) apply() { log.SetLevelByString(pc.logLevel) if pc.logFile != "" { err := log.SetOutputByName(pc.logFile) if err != nil { log.Fatalf("ProxyConfig SetOutputByName %s failed %s ", pc.logFile, err.Error()) } log.SetRotateByDay() } if pc.name == "" { log.Fatal("ProxyConfig name must not empty") } if pc.port == 0 { log.Fatal("ProxyConfig port must not 0") } if pc.cpu > runtime.NumCPU() { log.Warningf("ProxyConfig cpu %d exceed %d, adjust to %d ", pc.cpu, runtime.NumCPU(), runtime.NumCPU()) pc.cpu = runtime.NumCPU() } if pc.maxConn > 10000 { log.Warningf("ProxyConfig maxconn %d exceed 10000, adjust to 10000", pc.maxConn) pc.maxConn = 10000 } runtime.GOMAXPROCS(pc.cpu) if pc.poolSize <= 0 || pc.poolSize > 30 { log.Warning("ProxyConfig poolSize %d , adjust to 10 ", pc.poolSize) pc.poolSize = 10 } if pc.cpuFile != "" { f, err := os.Create(pc.cpuFile) if err != nil { log.Fatal(err) } log.Warning("Archer start CPUProfile ", pc.cpuFile) pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } if pc.memFile != "" { f, err := os.Create(pc.memFile) if err == nil { log.Warning("Archer start HeapProfile ", pc.memFile) pprof.WriteHeapProfile(f) } } go func() { log.Warning(http.ListenAndServe(":6061", nil)) }() }
func NewServer(addr string, debugVarAddr string, conf *Conf) *Server { log.Infof("%+v", conf) s := &Server{ evtbus: make(chan interface{}, 100), top: topo.NewTopo(conf.productName, conf.zkAddr, conf.f), net_timeout: conf.net_timeout, counter: stats.NewCounters("router"), lastActionSeq: -1, startAt: time.Now(), addr: addr, concurrentLimiter: tokenlimiter.NewTokenLimiter(100), moper: NewMultiOperator(addr), pools: cachepool.NewCachePool(), } s.broker = conf.broker slot_num = conf.slot_num s.mu.Lock() s.pi.Id = conf.proxyId s.pi.State = models.PROXY_STATE_OFFLINE hname, err := os.Hostname() if err != nil { log.Fatal("get host name failed", err) } s.pi.Addr = hname + ":" + strings.Split(addr, ":")[1] s.pi.DebugVarAddr = hname + ":" + strings.Split(debugVarAddr, ":")[1] log.Infof("proxy_info:%+v", s.pi) s.mu.Unlock() //todo:fill more field stats.Publish("evtbus", stats.StringFunc(func() string { return strconv.Itoa(len(s.evtbus)) })) stats.Publish("startAt", stats.StringFunc(func() string { return s.startAt.String() })) s.RegisterAndWait() _, err = s.top.WatchChildren(models.GetWatchActionPath(conf.productName), s.evtbus) if err != nil { log.Fatal(errors.ErrorStack(err)) } s.FillSlots() //start event handler go s.handleTopoEvent() log.Info("proxy start ok") return s }
func Fatal(msg interface{}) { // cleanup releaseDashboardNode() switch msg.(type) { case string: log.Fatal(msg) case error: log.Fatal(errors.ErrorStack(msg.(error))) } }
// You must call Put after Get. func (cp *CachePool) Get(timeout time.Duration) *memcache.Connection { pool := cp.getPool() if pool == nil { log.Fatal("cache pool is not open") } r, err := pool.Get() if err != nil { log.Fatal(err) } return r.(*memcache.Connection) }
func (s *Server) processAction(e interface{}) { if strings.Index(GetEventPath(e), models.GetProxyPath(s.top.ProductName)) == 0 { //proxy event, should be order for me to suicide s.handleProxyCommand() return } //re-watch nodes, err := s.top.WatchChildren(models.GetWatchActionPath(s.top.ProductName), s.evtbus) if err != nil { log.Fatal(errors.ErrorStack(err)) } seqs, err := models.ExtraSeqList(nodes) if err != nil { log.Fatal(errors.ErrorStack(err)) } if len(seqs) == 0 || !s.top.IsChildrenChangedEvent(e) { return } //get last pos index := -1 for i, seq := range seqs { if s.lastActionSeq < seq { index = i break } } if index < 0 { return } actions := seqs[index:] for _, seq := range actions { exist, err := s.top.Exist(path.Join(s.top.GetActionResponsePath(seq), s.pi.Id)) if err != nil { log.Fatal(errors.ErrorStack(err)) } if exist { continue } if s.checkAndDoTopoChange(seq) { s.responseAction(int64(seq)) } } s.lastActionSeq = seqs[len(seqs)-1] }
func (c *Conn) dispatch(data []byte) error { cmd := data[0] data = data[1:] log.Debug(c.connectionId, cmd, hack.String(data)) c.lastCmd = hack.String(data) token := c.server.GetToken() c.server.GetRWlock().RLock() defer func() { c.server.GetRWlock().RUnlock() c.server.ReleaseToken(token) }() c.server.IncCounter(mysql.MYSQL_COMMAND(cmd).String()) switch mysql.MYSQL_COMMAND(cmd) { case mysql.COM_QUIT: c.Close() return nil case mysql.COM_QUERY: return c.handleQuery(hack.String(data)) case mysql.COM_PING: return c.writeOkFlush(nil) case mysql.COM_INIT_DB: log.Debug(cmd, hack.String(data)) if err := c.useDB(hack.String(data)); err != nil { return errors.Trace(err) } return c.writeOkFlush(nil) case mysql.COM_FIELD_LIST: return c.handleFieldList(data) case mysql.COM_STMT_PREPARE: // not support server side prepare yet case mysql.COM_STMT_EXECUTE: log.Fatal("not support", data) case mysql.COM_STMT_CLOSE: return c.handleStmtClose(data) case mysql.COM_STMT_SEND_LONG_DATA: log.Fatal("not support", data) case mysql.COM_STMT_RESET: log.Fatal("not support", data) default: msg := fmt.Sprintf("command %d not supported now", cmd) return mysql.NewError(mysql.ER_UNKNOWN_ERROR, msg) } return nil }
func Fatal(msg interface{}) { // cleanup releaseDashboardNode() if globalMigrateManager != nil { globalMigrateManager.removeNode() } switch msg.(type) { case string: log.Fatal(msg) case error: log.Fatal(errors.ErrorStack(msg.(error))) } }
func Fatal(msg interface{}) { unRegisterConfigNode() // try unlock force if zkLock != nil { zkLock.Unlock() } switch msg.(type) { case string: log.Fatal(msg) case error: log.Fatal(errors.ErrorStack(msg.(error))) } }
func LoadConf(configFile string) (*Conf, error) { srvConf := &Conf{} conf, err := utils.InitConfigFromFile(configFile) if err != nil { log.Fatal(err) } srvConf.productName, _ = conf.ReadString("product", "test") if len(srvConf.productName) == 0 { log.Fatalf("invalid config: product entry is missing in %s", configFile) } srvConf.zkAddr, _ = conf.ReadString("zk", "") if len(srvConf.zkAddr) == 0 { log.Fatalf("invalid config: need zk entry is missing in %s", configFile) } srvConf.proxyId, _ = conf.ReadString("proxy_id", "") if len(srvConf.proxyId) == 0 { log.Fatalf("invalid config: need proxy_id entry is missing in %s", configFile) } srvConf.broker, _ = conf.ReadString("broker", "ledisdb") if len(srvConf.broker) == 0 { log.Fatalf("invalid config: need broker entry is missing in %s", configFile) } srvConf.slot_num, _ = conf.ReadInt("slot_num", 16) srvConf.net_timeout, _ = conf.ReadInt("net_timeout", 5) return srvConf, nil }
func (s *Server) checkAndDoTopoChange(seq int) (needResponse bool) { act, err := s.top.GetActionWithSeq(int64(seq)) if err != nil { log.Fatal(errors.ErrorStack(err), "action seq", seq) } if !StringsContain(act.Receivers, s.pi.Id) { //no need to response return false } switch act.Type { case models.ACTION_TYPE_SLOT_MIGRATE, models.ACTION_TYPE_SLOT_CHANGED, models.ACTION_TYPE_SLOT_PREMIGRATE: slot := &models.Slot{} s.getActionObject(seq, slot) s.fillSlot(slot.Id, true) case models.ACTION_TYPE_SERVER_GROUP_CHANGED: serverGroup := &models.ServerGroup{} s.getActionObject(seq, serverGroup) s.OnGroupChange(serverGroup.Id) case models.ACTION_TYPE_SERVER_GROUP_REMOVE: //do not care case models.ACTION_TYPE_MULTI_SLOT_CHANGED: param := &models.SlotMultiSetParam{} s.getActionObject(seq, param) s.OnSlotRangeChange(param) default: log.Fatalf("unknown action %+v", act) } return true }
func (self *Server) Start(addr string) { ln, err := net.Listen("tcp", addr) if err != nil { log.Fatal(err) } go self.EvtLoop() log.Debug("listening on", addr) go registerWebHandler(self) //load background jobs from storage err = self.store.Init() if err != nil { log.Error(err) self.store = nil } else { self.getAllJobs() } for { conn, err := ln.Accept() if err != nil { // handle error continue } session := &session{} go session.handleConnection(self, conn) } }
func main() { println("executor started") log.SetHighlighting(false) f, err := os.Create("executor.log") if err != nil { println(err.Error()) } log.SetOutput(f) log.Warning("executor start...") pwd, err := os.Getwd() if err != nil { log.Fatal(err) } se := &ShellExecutor{pwd: pwd, finish: make(chan string), process: make(map[string]*contex)} driver := mesos.ExecutorDriver{ Executor: &mesos.Executor{ Registered: se.OnRegister, KillTask: se.OnKillTask, LaunchTask: se.OnLaunchTask, Shutdown: se.OnShutdown, Error: se.OnError, Disconnected: se.OnDisconnected, }, } go se.EventLoop() driver.Init() defer driver.Destroy() driver.Run() }
func (top *Topology) InitZkConn() { var err error top.zkConn, err = top.fact(top.zkAddr) if err != nil { log.Fatal(err) } }
func NewSchemaInfo(rowCacheConf RowCacheConfig, dbAddr string, user, pwd, dbName string, overrides []SchemaOverride) *SchemaInfo { si := &SchemaInfo{ queries: cache.NewLRUCache(128 * 1024 * 1024), tables: make(map[string]*TableInfo), cachePool: NewCachePool(dbName, rowCacheConf, 3*time.Second, 3*time.Second), } var err error si.connPool, err = mysql.Open(dbAddr, user, pwd, dbName) if err != nil { //todo: return error log.Fatal(err) } si.overrides = overrides si.connPool.SetMaxIdleConnNum(100) log.Infof("%+v", si.overrides) si.cachePool.Open() for _, or := range si.overrides { si.CreateOrUpdateTable(or.Name) } si.override() return si }
func main() { pwd, err := os.Getwd() if err != nil { log.Fatal(err) } se := &ShellExecutor{pwd: pwd, finish: make(chan string), process: make(map[string]*exec.Cmd)} driver := mesos.ExecutorDriver{ Executor: &mesos.Executor{ Registered: se.OnRegister, KillTask: se.OnKillTask, LaunchTask: se.OnLaunchTask, Shutdown: se.OnShutdown, Error: se.OnError, Disconnected: se.OnDisconnected, }, } go se.EventLoop() driver.Init() defer driver.Destroy() driver.Run() }
func (cp *CachePool) startMemcache() { if strings.Contains(cp.port, "/") { _ = os.Remove(cp.port) } commandLine := cp.rowCacheConfig.GetSubprocessFlags() cp.cmd = exec.Command(commandLine[0], commandLine[1:]...) if err := cp.cmd.Start(); err != nil { log.Fatalf("can't start memcache: %v", err) } attempts := 0 for { time.Sleep(100 * time.Millisecond) c, err := memcache.Connect(cp.port, 30*time.Millisecond) if err != nil { attempts++ if attempts >= 50 { cp.cmd.Process.Kill() // Avoid zombies go cp.cmd.Wait() // FIXME(sougou): Throw proper error if we can recover log.Fatal("Can't connect to memcache") } continue } if _, err = c.Set("health", 0, 0, []byte("ok")); err != nil { log.Fatalf("can't communicate with memcache: %v", err) } c.Close() break } }
func InitConfig(path string) { globalCfg = cfg.NewCfg(path) err := globalCfg.Load() if err != nil { log.Fatal(err) } }
func LoadConf(configFile string) (*Conf, error) { srvConf := &Conf{} conf, err := utils.InitConfigFromFile(configFile) if err != nil { log.Fatal(err) } srvConf.productName, _ = conf.ReadString("product", "test") if len(srvConf.productName) == 0 { log.Fatalf("invalid config: product entry is missing in %s", configFile) } srvConf.zkAddr, _ = conf.ReadString("zk", "") if len(srvConf.zkAddr) == 0 { log.Fatalf("invalid config: need zk entry is missing in %s", configFile) } srvConf.zkAddr = strings.TrimSpace(srvConf.zkAddr) srvConf.proxyId, _ = conf.ReadString("proxy_id", "") if len(srvConf.proxyId) == 0 { log.Fatalf("invalid config: need proxy_id entry is missing in %s", configFile) } srvConf.netTimeout, _ = conf.ReadInt("net_timeout", 5) srvConf.proto, _ = conf.ReadString("proto", "tcp") srvConf.provider, _ = conf.ReadString("coordinator", "zookeeper") log.Infof("%+v", srvConf) return srvConf, nil }
func main() { log.SetLevelByString("info") c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) signal.Notify(c, syscall.SIGTERM) go func() { <-c if createdDashboardNode { releaseDashboardNode() } Fatal("ctrl-c or SIGTERM found, exit") }() args, err := docopt.Parse(usage, nil, true, "codis config v0.1", true) if err != nil { log.Error(err) } // set config file var configFile string var config *cfg.Cfg if args["-c"] != nil { configFile = args["-c"].(string) config, err = utils.InitConfigFromFile(configFile) if err != nil { log.Warning("load config file error") Fatal(err) } } else { config, err = utils.InitConfig() if err != nil { log.Warning("load config file error") Fatal(err) } } // load global vars globalEnv = env.LoadCodisEnv(config) // set output log file if args["-L"] != nil { log.SetOutputByName(args["-L"].(string)) } // set log level if args["--log-level"] != nil { log.SetLevelByString(args["--log-level"].(string)) } cmd := args["<command>"].(string) cmdArgs := args["<args>"].([]string) go http.ListenAndServe(":10086", nil) err = runCommand(cmd, cmdArgs) if err != nil { log.Fatal(errors.ErrorStack(err)) } }
func (s *Server) getActionObject(seq int, target interface{}) { act := &models.Action{Target: target} log.Infof("%+v", act) err := s.top.GetActionWithSeqObject(int64(seq), act) if err != nil { log.Fatal(errors.ErrorStack(err)) } }
func tryRunAsLeader() { config := cfg.NewCfg(*conf) err := config.Load() if err != nil { log.Fatal(err) } zkaddr, err := config.ReadString("zk", "localhost:2181/tyrant") log.Debug(zkaddr) zkConn, _, err := zk.Connect(strings.Split(zkaddr, ","), 3*time.Second) if err != nil { log.Fatal(err) } leader := zkhelper.CreateElection(*zkConn, tyrant_zk_path) task := &LeaderTask{} leader.RunTask(task) }
func (s *Server) RegisterAndWait() { _, err := s.top.CreateProxyInfo(&s.pi) if err != nil { log.Fatal(errors.ErrorStack(err)) } s.waitOnline() }
func main() { autoflags.Define(&config) flag.Parse() log.SetLevelByString(config.LogLevel) // to avoid pprof being optimized by gofmt log.Debug(pprof.Handler("profile")) if len(config.LogFile) != 0 { log.SetOutputByName(config.LogFile) log.SetRotateByDay() } if config.LogEveryN <= 0 { proxy.LogEveryN = 1 } else { proxy.LogEveryN = config.LogEveryN } log.Infof("%#v", config) sigChan := make(chan os.Signal) signal.Notify(sigChan, os.Interrupt, os.Kill) log.Infof("pid %d", os.Getpid()) if len(config.DebugAddr) != 0 { http.HandleFunc("/setloglevel", handleSetLogLevel) go func() { log.Fatal(http.ListenAndServe(config.DebugAddr, nil)) }() log.Infof("debug service listens on %s", config.DebugAddr) } // shuffle startup nodes startupNodes := strings.Split(config.StartupNodes, ",") indexes := rand.Perm(len(startupNodes)) for i, startupNode := range startupNodes { startupNodes[i] = startupNodes[indexes[i]] startupNodes[indexes[i]] = startupNode } connPool := proxy.NewConnPool(config.BackendIdleConnections, config.ConnectTimeout, config.ReadPrefer != proxy.READ_PREFER_MASTER) dispatcher := proxy.NewDispatcher(startupNodes, config.SlotsReloadInterval, connPool, config.ReadPrefer) if err := dispatcher.InitSlotTable(); err != nil { log.Fatal(err) } proxy := proxy.NewProxy(config.Addr, dispatcher, connPool) go proxy.Run() sig := <-sigChan log.Infof("terminated by %#v", sig) proxy.Exit() }
func GetExecutorPath() string { filedirectory := filepath.Dir(os.Args[0]) execPath, err := filepath.Abs(filedirectory) if err != nil { log.Fatal(err) } return execPath }