func cmdProxy(argv []string) (err error) { usage := `usage: cconfig proxy list cconfig proxy offline <proxy_name> cconfig proxy online <proxy_name> ` args, err := docopt.Parse(usage, argv, true, "", false) if err != nil { log.Error(err) return err } log.Debug(args) zkLock.Lock(fmt.Sprintf("proxy, %+v", argv)) defer func() { err := zkLock.Unlock() if err != nil { log.Error(err) } }() if args["list"].(bool) { log.Warning(err) return runProxyList() } proxyName := args["<proxy_name>"].(string) if args["online"].(bool) { return runSetProxyStatus(proxyName, models.PROXY_STATE_ONLINE) } if args["offline"].(bool) { return runSetProxyStatus(proxyName, models.PROXY_STATE_MARK_OFFLINE) } return nil }
func (s *MemcacheStats) updateItemsStats() { if s.items == nil { return } s.readStats("items", func(sKey, sValue string) { ival, err := strconv.ParseInt(sValue, 10, 64) if err != nil { log.Error(err) internalErrors.Add("MemcacheStats", 1) return } subkey, slabid, err := parseItemKey(sKey) if err != nil { log.Error(err) internalErrors.Add("MemcacheStats", 1) return } m, ok := s.items[subkey] if !ok { log.Errorf("Unknown memcache items stats %v %v: %v", subkey, slabid, ival) internalErrors.Add("MemcacheStats", 1) return } m[slabid] = ival }) }
func (cp *ConnPool) postConnect(conn net.Conn, err error) (net.Conn, error) { if err != nil || !cp.sendReadOnly { return conn, err } defer func() { if err != nil { conn.Close() conn = nil } }() if _, err = conn.Write(REDIS_CMD_READ_ONLY); err != nil { log.Error("write READONLY failed", conn.RemoteAddr().String(), err) return conn, err } var data *resp.Data reader := bufio.NewReader(conn) data, err = resp.ReadData(reader) if err != nil { log.Error("read READONLY resp failed", conn.RemoteAddr().String(), err) return conn, err } if data.T == resp.T_Error { log.Error("READONLY resp is not OK", conn.RemoteAddr().String()) err = errors.New("post connect error: READONLY resp is not OK") } return conn, err }
func cmdServer(argv []string) (err error) { usage := `usage: cconfig server list cconfig server add <group_id> <redis_addr> <role> cconfig server remove <group_id> <redis_addr> cconfig server promote <group_id> <redis_addr> cconfig server add-group <group_id> cconfig server remove-group <group_id> ` args, err := docopt.Parse(usage, argv, true, "", false) if err != nil { log.Error(err) return err } log.Debug(args) zkLock.Lock(fmt.Sprintf("server, %+v", argv)) defer func() { err := zkLock.Unlock() if err != nil { log.Error(err) } }() if args["list"].(bool) { return runListServerGroup() } groupId, err := strconv.Atoi(args["<group_id>"].(string)) if err != nil { log.Warning(err) return err } if args["remove-group"].(bool) { return runRemoveServerGroup(groupId) } if args["add-group"].(bool) { return runAddServerGroup(groupId) } serverAddr := args["<redis_addr>"].(string) if args["add"].(bool) { role := args["<role>"].(string) return runAddServerToGroup(groupId, serverAddr, role) } if args["remove"].(bool) { return runRemoveServerFromGroup(groupId, serverAddr) } if args["promote"].(bool) { return runPromoteServerToMaster(groupId, serverAddr) } return nil }
func (self *SQLite3Storage) Init() error { var err error self.db, err = sql.Open("sqlite3", self.Source) if err != nil { log.Error(err) return err } _, create_err := self.db.Exec(createTableSQL) if create_err != nil { log.Error(create_err) return create_err } return self.db.Ping() }
func (s *Server) resetSchemaInfo() error { for _, c := range s.clients { if len(c.txConns) > 0 { return errors.Errorf("transaction exist") } } cfg, err := config.ParseConfigFile(s.configFile) if err != nil { log.Error(err) return err } s.cleanup() s.autoSchamas = make(map[string]*tabletserver.SchemaInfo) for _, n := range s.shards { n.Close() } s.shards = nil s.schemas = nil log.Warningf("%#v", cfg) log.SetLevelByString(cfg.LogLevel) s.cfg = cfg return s.loadSchemaInfo() }
func main() { fmt.Print(banner) log.SetLevelByString("info") args, err := docopt.Parse(usage, nil, true, "codis proxy v0.1", true) if err != nil { log.Error(err) } // set config file if args["-c"] != nil { configFile = args["-c"].(string) } // set output log file if args["-L"] != nil { log.SetOutputByName(args["-L"].(string)) } // set log level if args["--log-level"] != nil { log.SetLevelByString(args["--log-level"].(string)) } // set cpu if args["--cpu"] != nil { cpus, err = strconv.Atoi(args["--cpu"].(string)) if err != nil { log.Fatal(err) } } // set addr if args["--addr"] != nil { addr = args["--addr"].(string) } // set http addr if args["--http-addr"] != nil { httpAddr = args["--http-addr"].(string) } wd, _ := os.Getwd() log.Info("wd:", wd) log.CrashLog(wd + ".dump") router.CheckUlimit(1024) runtime.GOMAXPROCS(cpus) http.HandleFunc("/setloglevel", handleSetLogLevel) go http.ListenAndServe(httpAddr, nil) log.Info("running on ", addr) conf, err := router.LoadConf(configFile) if err != nil { log.Fatal(err) } s := router.NewServer(addr, httpAddr, conf) s.Run() log.Warning("exit") }
func cmdProxy(argv []string) (err error) { usage := `usage: codis-config proxy list codis-config proxy offline <proxy_name> codis-config proxy online <proxy_name> ` args, err := docopt.Parse(usage, argv, true, "", false) if err != nil { log.Error(err) return err } log.Debug(args) if args["list"].(bool) { return runProxyList() } proxyName := args["<proxy_name>"].(string) if args["online"].(bool) { return runSetProxyStatus(proxyName, models.PROXY_STATE_ONLINE) } if args["offline"].(bool) { return runSetProxyStatus(proxyName, models.PROXY_STATE_MARK_OFFLINE) } return nil }
func cmdDashboard(argv []string) (err error) { usage := `usage: codis-config dashboard [--addr=<address>] [--http-log=<log_file>] options: --addr listen ip:port, e.g. localhost:12345, :8086, [default: :8086] --http-log http request log [default: request.log ] ` args, err := docopt.Parse(usage, argv, true, "", false) if err != nil { log.Error(err) return err } log.Debug(args) logFileName := "request.log" if args["--http-log"] != nil { logFileName = args["--http-log"].(string) } addr := ":8086" if args["--addr"] != nil { addr = args["--addr"].(string) } runDashboard(addr, logFileName) return nil }
func (self *ResMan) addReadyTask(id string) (string, error) { if self.ready.Exist(id) { return "", fmt.Errorf("%s already exist: %+v", id, self.ready.Get(id)) } job, err := scheduler.GetJobById(id) if err != nil { return "", err } persistentTask := &scheduler.Task{TaskId: self.genTaskId(), Status: scheduler.STATUS_READY, StartTs: time.Now().Unix(), JobName: job.Name} log.Warningf("%+v", persistentTask) err = persistentTask.Save() if err != nil { log.Error(err) return "", err } job.LastTaskId = persistentTask.TaskId job.Save() t := &Task{Tid: persistentTask.TaskId, job: job, state: taskReady} self.ready.Add(t.Tid, t) log.Debugf("ready task %+v, total count:%d", t, self.ready.Length()) return persistentTask.TaskId, nil }
func (self *RedisQ) GetJobs() ([]*Job, error) { strs, err := self.client.Keys(JobPrefix + "*").Result() if err != nil { return nil, err } if len(strs) == 0 { //no jobs on redis return nil, nil } vals, err := self.client.MGet(strs...).Result() if err != nil { return nil, err } jobs := make([]*Job, len(strs), len(strs)) for i, s := range vals { err := json.Unmarshal([]byte(s.(string)), &jobs[i]) if err != nil { log.Error(s) return nil, err } } return jobs, nil }
func WaitForReceiverWithTimeout(zkConn zkhelper.Conn, productName string, actionZkPath string, proxies []ProxyInfo, timeoutInMs int) error { if len(proxies) == 0 { return nil } times := 0 proxyIds := make(map[string]struct{}) var offlineProxyIds []string for _, p := range proxies { proxyIds[p.Id] = struct{}{} } checkTimes := timeoutInMs / 500 // check every 500ms for times < checkTimes { if times >= 6 && (times*500)%1000 == 0 { log.Warning("abnormal waiting time for receivers", actionZkPath, offlineProxyIds) } // get confirm ids nodes, _, err := zkConn.Children(actionZkPath) if err != nil { return errors.Trace(err) } confirmIds := make(map[string]struct{}) for _, node := range nodes { id := path.Base(node) confirmIds[id] = struct{}{} } if len(confirmIds) != 0 { match := true // check if all proxy have responsed var notMatchList []string for id, _ := range proxyIds { // if proxy id not in confirm ids, means someone didn't response if _, ok := confirmIds[id]; !ok { match = false notMatchList = append(notMatchList, id) } } if match { return nil } offlineProxyIds = notMatchList } times += 1 time.Sleep(500 * time.Millisecond) } if len(offlineProxyIds) > 0 { log.Error("proxies didn't responed: ", offlineProxyIds) } // set offline proxies for _, id := range offlineProxyIds { log.Errorf("mark proxy %s to PROXY_STATE_MARK_OFFLINE", id) if err := SetProxyStatus(zkConn, productName, id, PROXY_STATE_MARK_OFFLINE); err != nil { return errors.Trace(err) } } return errors.Trace(ErrReceiverTimeout) }
//todo: test select a == b && c == d //select c ==d && a == b func generateSelectSql(ti *tabletserver.TableInfo, plan *planbuilder.ExecPlan) (string, error) { if len(ti.PKColumns) != len(plan.PKValues) { log.Error("PKColumns and PKValues not match") return "", errors.Errorf("PKColumns and PKValues not match, %+v, %+v", ti.PKColumns, plan.PKValues) } pks := make([]schema.TableColumn, 0, len(ti.PKColumns)) for i, _ := range ti.PKColumns { pks = append(pks, ti.Columns[ti.PKColumns[i]]) } buf := &bytes.Buffer{} buf.WriteString(fmt.Sprintf("select * from %s where ", ti.Name)) for i, pk := range pks { buf.WriteString(pk.Name) buf.WriteString("=") plan.PKValues[i].(sqltypes.Value).EncodeSql(buf) if i < len(pks)-1 { buf.WriteString(" and ") } } buf.WriteString(";") return buf.String(), nil }
func (s *Server) responseAction(seq int64) { log.Info("send response", seq) err := s.top.DoResponse(int(seq), &s.pi) if err != nil { log.Error(errors.ErrorStack(err)) } }
func (self *Server) Start(addr string) { ln, err := net.Listen("tcp", addr) if err != nil { log.Fatal(err) } go self.EvtLoop() log.Debug("listening on", addr) go registerWebHandler(self) //load background jobs from storage err = self.store.Init() if err != nil { log.Error(err) self.store = nil } else { self.getAllJobs() } for { conn, err := ln.Accept() if err != nil { // handle error continue } session := &session{} go session.handleConnection(self, conn) } }
func (p *Proxy) Run() { tcpAddr, err := net.ResolveTCPAddr("tcp", p.addr) if err != nil { log.Fatal(err) } listener, err := net.ListenTCP("tcp", tcpAddr) if err != nil { log.Fatal(err) } else { log.Infof("proxy listens on %s", p.addr) } defer listener.Close() go p.dispatcher.Run() for { conn, err := listener.AcceptTCP() if err != nil { log.Error(err) continue } log.Infof("accept client: %s", conn.RemoteAddr()) go p.handleConnection(conn) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() if len(*configFile) == 0 { log.Error("must use a config file") return } cfg, err := config.ParseConfigFile(*configFile) if err != nil { log.Error(err.Error()) return } log.SetLevelByString(cfg.LogLevel) log.CrashLog("./cm-proxy.dump") var svr *proxy.Server svr, err = proxy.NewServer(*configFile) if err != nil { log.Error(err.Error()) return } sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) go func() { sig := <-sc log.Infof("Got signal [%d] to exit.", sig) svr.Close() os.Exit(0) }() go svr.Run() http.HandleFunc("/api/reload", svr.HandleReload) //todo: using configuration http.ListenAndServe(":8888", nil) }
func main() { log.SetLevelByString("info") c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) signal.Notify(c, syscall.SIGTERM) go func() { <-c if createdDashboardNode { releaseDashboardNode() } Fatal("ctrl-c or SIGTERM found, exit") }() args, err := docopt.Parse(usage, nil, true, "codis config v0.1", true) if err != nil { log.Error(err) } // set config file var configFile string var config *cfg.Cfg if args["-c"] != nil { configFile = args["-c"].(string) config, err = utils.InitConfigFromFile(configFile) if err != nil { log.Warning("load config file error") Fatal(err) } } else { config, err = utils.InitConfig() if err != nil { log.Warning("load config file error") Fatal(err) } } // load global vars globalEnv = env.LoadCodisEnv(config) // set output log file if args["-L"] != nil { log.SetOutputByName(args["-L"].(string)) } // set log level if args["--log-level"] != nil { log.SetLevelByString(args["--log-level"].(string)) } cmd := args["<command>"].(string) cmdArgs := args["<args>"].([]string) go http.ListenAndServe(":10086", nil) err = runCommand(cmd, cmdArgs) if err != nil { log.Fatal(errors.ErrorStack(err)) } }
func (s *Server) createTaskRunners() { for _, slot := range s.slots { if err := s.createTaskRunner(slot); err != nil { log.Error(err) return } } }
// Save implements the Storage Save method. func (self *MYSQLStorage) AddJob(j *Job) error { _, err := self.db.Exec(saveJobSQL, j.Handle, j.Id, j.Priority, j.CreateAt.UTC(), j.FuncName, j.Data) if err != nil { log.Error(err) return err } return nil }
func (m *MigrateManager) createNode() error { zkhelper.CreateRecursive(m.zkConn, fmt.Sprintf("/zk/codis/db_%s/migrate_tasks", m.productName), "", 0, zkhelper.DefaultDirACLs()) _, err := m.zkConn.Create(getManagerPath(m.productName), []byte(""), zk.FlagEphemeral, zkhelper.DefaultFileACLs()) if err != nil { log.Error("dashboard already exists! err: ", err) } return nil }
func (m *MigrateManager) loop() error { for { m.lck.RLock() ele := m.pendingTasks.Front() m.lck.RUnlock() if ele == nil { time.Sleep(500 * time.Millisecond) continue } // get pending task, and run m.lck.Lock() m.pendingTasks.Remove(ele) m.lck.Unlock() t := ele.Value.(*MigrateTask) t.zkConn = m.zkConn t.productName = m.productName m.runningTask = t if m.preCheck != nil { log.Info("start migration pre-check") if ok, err := m.preCheck(t); !ok { if err != nil { log.Error(err) } log.Error("migration pre-check error", t) continue } } log.Info("migration pre-check done") // do migrate err := t.run() if err != nil { log.Error(err) } // reset runningtask m.lck.Lock() m.runningTask = nil m.lck.Unlock() } }
func (self *MYSQLStorage) Init() error { var err error self.db, err = sql.Open("mysql", self.Source) if err != nil { log.Error(err) return err } return self.db.Ping() }
// redirect send request to backend again to new server told by redis cluster func (s *Session) redirect(server string, plRsp *PipelineResponse, ask bool) { var conn net.Conn var err error plRsp.err = nil conn, err = s.connPool.GetConn(server) if err != nil { log.Error(err) plRsp.err = err return } defer func() { if err != nil { log.Error(err) conn.(*pool.PoolConn).MarkUnusable() } conn.Close() }() reader := bufio.NewReader(conn) if ask { if _, err = conn.Write(ASK_CMD_BYTES); err != nil { plRsp.err = err return } } if _, err = conn.Write(plRsp.ctx.cmd.Format()); err != nil { plRsp.err = err return } if ask { if _, err = resp.ReadData(reader); err != nil { plRsp.err = err return } } obj := resp.NewObject() if err = resp.ReadDataBytes(reader, obj); err != nil { plRsp.err = err } else { plRsp.rsp = obj } }
// DelKey implements the Storage DelKey method. func (self *MYSQLStorage) DoneJob(j *Job) error { log.Debug("DoneJob:", j.Handle) _, err := self.db.Exec(delJobSQL, j.Handle) if err != nil { log.Error(err, j.Handle) return err } return nil }
func (self *ResMan) handleMesosStatusUpdate(t *cmdMesosStatusUpdate) { status := t.status defer func() { t.wait <- struct{}{} }() taskId := status.TaskId.GetValue() log.Debugf("Received task %+v status: %+v", taskId, status) currentTask := self.running.Get(taskId) if currentTask == nil { task, err := scheduler.GetTaskByTaskId(taskId) if err != nil { return } job, err := scheduler.GetJobByName(task.JobName) if err != nil { return } currentTask = &Task{Tid: task.TaskId, job: job, SlaveId: status.SlaveId.GetValue(), state: taskRuning} self.running.Add(currentTask.Tid, currentTask) //add this alone task to runing queue } pwd := string(status.Data) if len(pwd) > 0 && len(currentTask.Pwd) == 0 { currentTask.Pwd = pwd } currentTask.LastUpdate = time.Now() switch *status.State { case mesos.TaskState_TASK_FINISHED: currentTask.job.LastSuccessTs = time.Now().Unix() self.removeRunningTask(taskId) case mesos.TaskState_TASK_FAILED, mesos.TaskState_TASK_KILLED, mesos.TaskState_TASK_LOST: currentTask.job.LastErrTs = time.Now().Unix() self.removeRunningTask(taskId) case mesos.TaskState_TASK_STAGING: //todo: update something case mesos.TaskState_TASK_STARTING: //todo:update something case mesos.TaskState_TASK_RUNNING: //todo:update something default: log.Fatalf("should never happend %+v", status.State) } persistentTask, err := scheduler.GetTaskByTaskId(taskId) if err != nil { log.Error(err) } self.saveTaskStatus(persistentTask, status, currentTask) }
func (top *Topology) Close(proxyName string) { // delete fence znode pi, err := models.GetProxyInfo(top.zkConn, top.ProductName, proxyName) if err != nil { log.Error("killing fence error, proxy %s is not exists", proxyName) } else { zkhelper.DeleteRecursive(top.zkConn, path.Join(models.GetProxyFencePath(top.ProductName), pi.Addr), -1) } // delete ephemeral znode zkhelper.DeleteRecursive(top.zkConn, path.Join(models.GetProxyPath(top.ProductName), proxyName), -1) top.zkConn.Close() }
func JobExists(id string) bool { j, err := GetJobById(id) if err != nil { log.Error(err, id) return false } if j.Id == 0 { return false } return true }
func (s *Server) handleMigrateState(slotIndex int, key []byte) error { shd := s.slots[slotIndex] if shd.slotInfo.State.Status != models.SLOT_STATUS_MIGRATE { return nil } if shd.migrateFrom == nil { log.Fatalf("migrateFrom not exist %+v", shd) } if shd.dst.Master() == shd.migrateFrom.Master() { log.Fatalf("the same migrate src and dst, %+v", shd) } redisConn, err := s.pools.GetConn(shd.migrateFrom.Master()) if err != nil { return errors.Trace(err) } defer s.pools.ReleaseConn(redisConn) redisReader := redisConn.(*redispool.PooledConn).BufioReader() err = WriteMigrateKeyCmd(redisConn.(*redispool.PooledConn), shd.dst.Master(), 30*1000, key) if err != nil { redisConn.Close() log.Warningf("migrate key %s error, from %s to %s", string(key), shd.migrateFrom.Master(), shd.dst.Master()) return errors.Trace(err) } //handle migrate result resp, err := parser.Parse(redisReader) if err != nil { redisConn.Close() return errors.Trace(err) } result, err := resp.Bytes() log.Debug("migrate", string(key), "from", shd.migrateFrom.Master(), "to", shd.dst.Master(), string(result)) if resp.Type == parser.ErrorResp { redisConn.Close() log.Error(string(key), string(resp.Raw), "migrateFrom", shd.migrateFrom.Master()) return errors.New(string(resp.Raw)) } s.counter.Add("Migrate", 1) return nil }
// Get implements the Storage Get method. func (self *MYSQLStorage) GetJobs() ([]*Job, error) { rows, err := self.db.Query(getJobsSQL) if err != nil { log.Error(err) return nil, err } defer rows.Close() jobs := make([]*Job, 0) for rows.Next() { j := &Job{} if err := rows.Scan(&j.Handle, &j.Id, &j.Priority, &j.CreateAt, &j.FuncName, &j.Data); err != nil { log.Error("rows.Scan() failed (%v)", err) return nil, err } jobs = append(jobs, j) } return jobs, nil }