func handleCrashedServer(s *models.Server) error { switch s.Type { case models.SERVER_TYPE_MASTER: //get slave and do promote slave, err := getSlave(s) if err != nil { log.Warning(errors.ErrorStack(err)) return err } log.Infof("try promote %+v", slave) err = callHttp(nil, genUrl(*apiServer, "/api/server_group/", slave.GroupId, "/promote"), "POST", slave) if err != nil { log.Errorf("do promote %v failed %v", slave, errors.ErrorStack(err)) return err } refreshSlave(s) //刷新 case models.SERVER_TYPE_SLAVE: log.Errorf("slave is down: %+v", s) case models.SERVER_TYPE_OFFLINE: //no need to handle it default: log.Fatalf("unkonwn type %+v", s) } return nil }
func TestNewAction(t *testing.T) { fakeZkConn := zkhelper.NewConn() err := NewAction(fakeZkConn, productName, ACTION_TYPE_SLOT_CHANGED, nil, "desc", false) if err != nil { t.Error(errors.ErrorStack(err)) } prefix := GetWatchActionPath(productName) if exist, _, err := fakeZkConn.Exists(prefix); !exist { t.Error(errors.ErrorStack(err)) } //test if response node exists d, _, err := fakeZkConn.Get(prefix + "/0000000001") if err != nil { t.Error(errors.ErrorStack(err)) } //test get action data d, _, err = fakeZkConn.Get(GetActionResponsePath(productName) + "/0000000001") if err != nil { t.Error(errors.ErrorStack(err)) } var action Action json.Unmarshal(d, &action) if action.Desc != "desc" || action.Type != ACTION_TYPE_SLOT_CHANGED { t.Error("create action error") } }
func main() { flag.Parse() conf, err := config.NewConfigFromFile(*configFile) if err != nil { log.Fatal(errors.ErrorStack(err)) } if err = initLogger(conf); err != nil { log.Fatal(errors.ErrorStack(err)) } server, err := service.NewServer(conf) if err != nil { log.Fatal(errors.ErrorStack(err)) } err = metrics.Init(conf) if err != nil { log.Fatalf("init metrics err: %v", err) } err = server.Start() if err != nil { log.Fatal(errors.ErrorStack(err)) } c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM, os.Interrupt, os.Kill) log.Debug("Process start") <-c server.Stop() log.Debug("Process stop") }
// Run reads client query and writes query result to client in for loop, if there is a panic during query handling, // it will be recovered and log the panic error. // This function returns and the connection is closed if there is an IO error or there is a panic. func (cc *clientConn) Run() { defer func() { r := recover() if r != nil { const size = 4096 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] log.Errorf("lastCmd %s, %v, %s", cc.lastCmd, r, buf) } cc.Close() }() for { cc.alloc.Reset() data, err := cc.readPacket() if err != nil { if terror.ErrorNotEqual(err, io.EOF) { log.Error(errors.ErrorStack(err)) } return } if err := cc.dispatch(data); err != nil { if terror.ErrorEqual(err, io.EOF) { return } log.Warnf("dispatch error %s, %s", errors.ErrorStack(err), cc) log.Warnf("cmd: %s", string(data[1:])) cc.writeError(err) } cc.pkt.sequence = 0 } }
func (s *Server) waitOnline() { s.mu.Lock() defer s.mu.Unlock() for { pi, err := s.top.GetProxyInfo(s.pi.Id) if err != nil { log.Fatal(errors.ErrorStack(err)) } if pi.State == models.PROXY_STATE_MARK_OFFLINE { s.handleMarkOffline() } if pi.State == models.PROXY_STATE_ONLINE { s.pi.State = pi.State println("good, we are on line", s.pi.Id) log.Info("we are online", s.pi.Id) _, err := s.top.WatchNode(path.Join(models.GetProxyPath(s.top.ProductName), s.pi.Id), s.evtbus) if err != nil { log.Fatal(errors.ErrorStack(err)) } return } println("wait to be online ", s.pi.Id) log.Warning(s.pi.Id, "wait to be online") time.Sleep(3 * time.Second) } }
func (s *Server) groupUpdate(group string, queue string, write string, read string, url string, ips string) string { config, err := s.queue.GetSingleGroup(group, queue) if err != nil { log.Debugf("GetSingleGroup err:%s", errors.ErrorStack(err)) return `{"action":"update","result":false}` } if write != "" { w, err := strconv.ParseBool(write) if err == nil { config.Write = w } } if read != "" { r, err := strconv.ParseBool(read) if err == nil { config.Read = r } } if url != "" { config.Url = url } if ips != "" { config.Ips = strings.Split(ips, ",") } err = s.queue.UpdateGroup(group, queue, config.Write, config.Read, config.Url, config.Ips) if err != nil { log.Debugf("groupUpdate failed: %s", errors.ErrorStack(err)) return `{"action":"update","result":false}` } return `{"action":"update","result":true}` }
func (s *Scan) nextBatch() int { startKey := s.nextStartKey if startKey == nil { startKey = s.StartRow } // Notice: ignore error here. // TODO: add error check, now only add a log. rs, err := s.getData(startKey, 0) if err != nil { log.Errorf("scan next batch failed - [startKey=%q], %v", startKey, errors.ErrorStack(err)) } // Current region get 0 data, try switch to next region. if len(rs) == 0 && len(s.nextStartKey) > 0 { // TODO: add error check, now only add a log. rs, err = s.getData(s.nextStartKey, 0) if err != nil { log.Errorf("scan next batch failed - [startKey=%q], %v", s.nextStartKey, errors.ErrorStack(err)) } } s.cache = rs return len(s.cache) }
func (s *Server) onConn(c net.Conn) { conn, err := s.newConn(c) if err != nil { log.Errorf("newConn error %s", errors.ErrorStack(err)) return } if err := conn.handshake(); err != nil { log.Errorf("handshake error %s", errors.ErrorStack(err)) c.Close() return } conn.ctx, err = s.driver.OpenCtx(conn.capability, uint8(conn.collation), conn.dbname) if err != nil { log.Errorf("open ctx error %s", errors.ErrorStack(err)) c.Close() return } defer func() { log.Infof("close %s", conn) }() s.rwlock.Lock() s.clients[conn.connectionID] = conn s.rwlock.Unlock() conn.Run() }
func (s *session) WritingLoop() { s.lastUnsentResponseSeq = 1 for { select { case resp, ok := <-s.backQ: if !ok { s.Close() s.closeSignal.Done() return } flush, err := s.handleResponse(resp) if err != nil { log.Warning(s.RemoteAddr(), resp.ctx, errors.ErrorStack(err)) s.Close() //notify reader to exit continue } if flush && len(s.backQ) == 0 { err := s.w.Flush() if err != nil { s.Close() //notify reader to exit log.Warning(s.RemoteAddr(), resp.ctx, errors.ErrorStack(err)) continue } } } } }
func (s *ApiServer) WebhookHandler(w http.ResponseWriter, r *http.Request) { //todo: post a tyrant job and start task err := r.ParseForm() if err != nil { http.Error(w, errors.ErrorStack(err), http.StatusInternalServerError) return } vars := mux.Vars(r) log.Debug(vars) repo := r.Form.Get("repo") log.Debug(r.Form, "repo", repo) h := &task.JobHelper{Server: s.Server, ExecutorUrls: s.ExecutorUrls} job := h.BuildRepoJob(repo) if err := h.CreateJob(job); err != nil { http.Error(w, errors.ErrorStack(err), http.StatusInternalServerError) return } log.Debugf("%+v", job) if err := h.RunJob(job); err != nil { http.Error(w, errors.ErrorStack(err), http.StatusInternalServerError) return } }
func main() { tidb.RegisterLocalStore("boltdb", boltdb.Driver{}) tidb.RegisterStore("tikv", tikv.Driver{}) metric.RunMetric(3 * time.Second) printer.PrintTiDBInfo() runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() if *lease < 0 { log.Fatalf("invalid lease seconds %d", *lease) } tidb.SetSchemaLease(time.Duration(*lease) * time.Second) cfg := &server.Config{ Addr: fmt.Sprintf(":%s", *port), LogLevel: *logLevel, StatusAddr: fmt.Sprintf(":%s", *statusPort), Socket: *socket, } log.SetLevelByString(cfg.LogLevel) store, err := tidb.NewStore(fmt.Sprintf("%s://%s", *store, *storePath)) if err != nil { log.Fatal(errors.ErrorStack(err)) } // Create a session to load information schema. se, err := tidb.CreateSession(store) if err != nil { log.Fatal(errors.ErrorStack(err)) } se.Close() var driver server.IDriver driver = server.NewTiDBDriver(store) var svr *server.Server svr, err = server.NewServer(cfg, driver) if err != nil { log.Fatal(errors.ErrorStack(err)) } sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) go func() { sig := <-sc log.Infof("Got signal [%d] to exit.", sig) svr.Close() os.Exit(0) }() log.Error(svr.Run()) }
func (s *Server) monitorHandler(w http.ResponseWriter, r *http.Request) { r.ParseForm() monitorType := r.FormValue("type") queue := r.FormValue("queue") group := r.FormValue("group") end := time.Now().Unix() start := end - 5*60 //5min interval := int64(1) startTime := r.FormValue("start") if startTime != "" { start, _ = strconv.ParseInt(startTime, 10, 0) } endTime := r.FormValue("end") if endTime != "" { end, _ = strconv.ParseInt(endTime, 10, 0) } intervalTime := r.FormValue("interval") if intervalTime != "" { interval, _ = strconv.ParseInt(intervalTime, 10, 0) } var result string switch monitorType { case "send": m, err := s.queue.GetSendMetrics(queue, group, start, end, interval) if err != nil { log.Debug("GetSendMetrics err: %s", errors.ErrorStack(err)) return } sm, err := json.Marshal(m) if err != nil { log.Debugf("GetSendMetrics Marshal err: %s", err) return } result = string(sm) case "receive": m, err := s.queue.GetReceiveMetrics(queue, group, start, end, interval) if err != nil { log.Debug("GetReceiveMetrics err: %s", errors.ErrorStack(err)) return } rm, err := json.Marshal(m) if err != nil { log.Debugf("GetReceiveMetrics Marshal err: %s", err) return } result = string(rm) default: result = "error, param type=" + monitorType + " not support!" } fmt.Fprintf(w, result) }
// runDDLJob runs a DDL job. func (d *ddl) runDDLJob(t *meta.Meta, job *model.Job) { log.Infof("[ddl] run DDL job %s", job) if job.IsFinished() { return } if job.State != model.JobRollback { job.State = model.JobRunning } var err error switch job.Type { case model.ActionCreateSchema: err = d.onCreateSchema(t, job) case model.ActionDropSchema: err = d.onDropSchema(t, job) case model.ActionCreateTable: err = d.onCreateTable(t, job) case model.ActionDropTable: err = d.onDropTable(t, job) case model.ActionAddColumn: err = d.onAddColumn(t, job) case model.ActionDropColumn: err = d.onDropColumn(t, job) case model.ActionModifyColumn: err = d.onModifyColumn(t, job) case model.ActionAddIndex: err = d.onCreateIndex(t, job) case model.ActionDropIndex: err = d.onDropIndex(t, job) case model.ActionAddForeignKey: err = d.onCreateForeignKey(t, job) case model.ActionDropForeignKey: err = d.onDropForeignKey(t, job) case model.ActionTruncateTable: err = d.onTruncateTable(t, job) default: // Invalid job, cancel it. job.State = model.JobCancelled err = errInvalidDDLJob.Gen("invalid ddl job %v", job) } // Save errors in job, so that others can know errors happened. if err != nil { // If job is not cancelled, we should log this error. if job.State != model.JobCancelled { log.Errorf("[ddl] run ddl job err %v", errors.ErrorStack(err)) } else { log.Infof("[ddl] the job is normal to cancel because %v", errors.ErrorStack(err)) } job.Error = toTError(err) job.ErrorCount++ } }
func (s *Server) processAction(e interface{}) { if strings.Index(GetEventPath(e), models.GetProxyPath(s.top.ProductName)) == 0 { //proxy event, should be order for me to suicide s.handleProxyCommand() return } //re-watch nodes, err := s.top.WatchChildren(models.GetWatchActionPath(s.top.ProductName), s.evtbus) if err != nil { log.Fatal(errors.ErrorStack(err)) } seqs, err := models.ExtraSeqList(nodes) if err != nil { log.Fatal(errors.ErrorStack(err)) } if len(seqs) == 0 || !s.top.IsChildrenChangedEvent(e) { return } //get last pos index := -1 for i, seq := range seqs { if s.lastActionSeq < seq { index = i break } } if index < 0 { return } actions := seqs[index:] for _, seq := range actions { exist, err := s.top.Exist(path.Join(s.top.GetActionResponsePath(seq), s.pi.Id)) if err != nil { log.Fatal(errors.ErrorStack(err)) } if exist { continue } if s.checkAndDoTopoChange(seq) { s.responseAction(int64(seq)) } } s.lastActionSeq = seqs[len(seqs)-1] }
func (s *Server) RegisterAndWait() { _, err := s.top.CreateProxyInfo(&s.pi) if err != nil { log.Fatal(errors.ErrorStack(err)) } _, err = s.top.CreateProxyFenceNode(&s.pi) if err != nil { log.Warning(errors.ErrorStack(err)) } s.registerSignal() s.waitOnline() }
func main() { flag.Parse() b := replication.NewBinlogSyncer(101, *flavor) if err := b.RegisterSlave(*host, uint16(*port), *user, *password); err != nil { fmt.Printf("Register slave error: %v \n", errors.ErrorStack(err)) return } b.SetRawMode(*rawMode) if *semiSync { if err := b.EnableSemiSync(); err != nil { fmt.Printf("Enable semi sync replication mode err: %v\n", errors.ErrorStack(err)) return } } pos := mysql.Position{*file, uint32(*pos)} if len(*backupPath) > 0 { // must raw mode b.SetRawMode(true) err := b.StartBackup(*backupPath, pos, 0) if err != nil { fmt.Printf("Start backup error: %v\n", errors.ErrorStack(err)) return } } else { s, err := b.StartSync(pos) if err != nil { fmt.Printf("Start sync error: %v\n", errors.ErrorStack(err)) return } for { e, err := s.GetEvent() if err != nil { fmt.Printf("Get event error: %v\n", errors.ErrorStack(err)) return } e.Dump(os.Stdout) } } }
func (s *Server) checkAndDoTopoChange(seq int) (needResponse bool) { act, err := s.top.GetActionWithSeq(int64(seq)) if err != nil { log.Fatal(errors.ErrorStack(err), "action seq", seq) } if !StringsContain(act.Receivers, s.pi.Id) { //no need to response return false } switch act.Type { case models.ACTION_TYPE_SLOT_MIGRATE, models.ACTION_TYPE_SLOT_CHANGED, models.ACTION_TYPE_SLOT_PREMIGRATE: slot := &models.Slot{} s.getActionObject(seq, slot) s.fillSlot(slot.Id, true) case models.ACTION_TYPE_SERVER_GROUP_CHANGED: serverGroup := &models.ServerGroup{} s.getActionObject(seq, serverGroup) s.OnGroupChange(serverGroup.Id) case models.ACTION_TYPE_SERVER_GROUP_REMOVE: //do not care case models.ACTION_TYPE_MULTI_SLOT_CHANGED: param := &models.SlotMultiSetParam{} s.getActionObject(seq, param) s.OnSlotRangeChange(param) default: log.Fatalf("unknown action %+v", act) } return true }
// LoggerMiddleware logs to stderr the error. func LoggerMiddleware(ctx context.Context, w http.ResponseWriter, r *http.Request, next NextMiddlewareFn) error { if err := next(ctx); err != nil { log.Printf("HANDLER ERROR\n%s\n", errors.ErrorStack(err)) } return nil }
func (s *Server) responseAction(seq int64) { log.Info("send response", seq) err := s.top.DoResponse(int(seq), &s.pi) if err != nil { log.Error(errors.ErrorStack(err)) } }
func TestParserBulk(t *testing.T) { sample := "*2\r\n$4\r\nLLEN\r\n$6\r\nmylist\r\n" buf := bytes.NewBuffer([]byte(sample)) r := bufio.NewReader(buf) resp, err := Parse(r) if err != nil { t.Error(errors.ErrorStack(err)) } b, err := resp.Bytes() if err != nil { t.Error(err) } if resp == nil { t.Error("unknown error") } if len(b) != len(sample) { t.Error("to bytes error", string(b), "................", sample) } op, keys, err := resp.GetOpKeys() if !bytes.Equal(op, []byte("LLEN")) { t.Errorf("get op error, got %s, expect LLEN", string(op)) } if !bytes.Equal(keys[0], []byte("mylist")) { t.Error("get key error") } }
// onWorker is for async online schema change, it will try to become the owner first, // then wait or pull the job queue to handle a schema change job. func (d *ddl) onWorker() { defer d.wait.Done() // we use 4 * lease time to check owner's timeout, so here, we will update owner's status // every 2 * lease time, if lease is 0, we will use default 10s. checkTime := chooseLeaseTime(2*d.lease, 10*time.Second) ticker := time.NewTicker(checkTime) defer ticker.Stop() for { select { case <-ticker.C: log.Debugf("wait %s to check DDL status again", checkTime) case <-d.jobCh: case <-d.quitCh: return } err := d.handleJobQueue() if err != nil { log.Errorf("handle job err %v", errors.ErrorStack(err)) } } }
func main() { flag.Parse() if err := run(); err != nil { log.Fatal(errors.ErrorStack(err)) } }
func TestKeys(t *testing.T) { table := []string{ "*2\r\n$3\r\nfoo\r\n$3\r\nbar\r\n", } for _, s := range table { buf := bytes.NewBuffer([]byte(s)) r := bufio.NewReader(buf) resp, err := Parse(r) if err != nil { t.Error(errors.ErrorStack(err)) } b, err := resp.Bytes() if err != nil { t.Error(err) } if s != string(b) { t.Fatalf("not match, expect %s, got %s", s, string(b)) } _, keys, err := resp.GetOpKeys() if err != nil { t.Error(err) } if len(keys) != 1 || string(keys[0]) != "bar" { t.Error("Keys failed", keys) } } }
func (t *tenet) OpenService() (TenetService, error) { ds, err := t.Driver.Service() if err != nil { log.Print(err.Error()) // TODO(waigani) this logs are quick hacks. Work out the error paths and log them all at the root. return nil, errors.Trace(err) } cfg := &api.Config{} for k, v := range t.options { cfg.Options = append(cfg.Options, &api.Option{ Name: k, Value: fmt.Sprintf("%v", v), }) } s := &tenetService{ Service: ds, cfg: cfg, editFilename: t.Driver.EditFilename, editIssue: t.Driver.EditIssue, mutex: &sync.Mutex{}, } if err := s.start(); err != nil { log.Println("got err opening service") // TODO(waigani) add retry logic here. 1. Keep retrying until service // is up. 2. Keep retrying until service is connected. log.Printf("err: %#v", errors.ErrorStack(err)) return nil, errors.Trace(err) } log.Print("opened service, no issue") return s, nil }
func (ex *defaultExecuter) completeTask(id string, task func(string) error, onFailure func(string, string)) { defer func() { if r := recover(); r != nil { log.WithField("task", id). Errorln("Task failed", r) debug.PrintStack() go onFailure(id, "The error message is below. Please check logs for more details."+"\n\n"+"panic occurred") ex.cMap.setStatus(id, FAILURE) } }() // Run the task. if err := task(id); err != nil { log.WithFields(log.Fields{ "task": id, "error": errors.ErrorStack(err), }).Error("Task failed") go onFailure(id, "The error message is below. Please check logs for more details."+"\n\n"+errors.ErrorStack(err)) ex.cMap.setStatus(id, FAILURE) return } log.WithField("task", id). Info("Task succeeded") ex.cMap.setStatus(id, SUCCESS) }
func TestMulOpKeys(t *testing.T) { table := []string{ "*7\r\n$4\r\nmset\r\n$4\r\nkey1\r\n$6\r\nvalue1\r\n$4\r\nkey2\r\n$6\r\nvalue2\r\n$4\r\nkey3\r\n$0\r\n\r\n", } for _, s := range table { buf := bytes.NewBuffer([]byte(s)) r := bufio.NewReader(buf) resp, err := Parse(r) if err != nil { t.Error(errors.ErrorStack(err)) } b, err := resp.Bytes() if err != nil { t.Error(err) } if s != string(b) { t.Fatalf("not match, expect %s, got %s", s, string(b)) } _, keys, err := resp.GetOpKeys() if err != nil { t.Error(err) } if len(keys) != 6 || string(keys[5]) != "" { t.Error("Keys failed", string(keys[5])) } } }
func (c *MigrateCommand) Run(ctx *cmd.Context) (err error) { defer func() { if err != nil { fmt.Fprintf(ctx.Stdout, "error stack:\n"+errors.ErrorStack(err)) } }() loggo.GetLogger("juju").SetLogLevel(loggo.DEBUG) conf, err := agent.ReadConfig(agent.ConfigPath(c.dataDir, c.machineTag)) if err != nil { return err } info, ok := conf.MongoInfo() if !ok { return errors.Errorf("no state info available") } st, err := state.Open(conf.Model(), info, mongo.DefaultDialOpts(), environs.NewStatePolicy()) if err != nil { return err } defer st.Close() if c.operation == "export" { return c.exportModel(ctx, st) } return c.importModel(ctx, st) }
// LoggerMiddleware logs to Cloud Logging the error. func LoggerMiddleware(ctx context.Context, w http.ResponseWriter, r *http.Request, next NextMiddlewareFn) error { if err := next(ctx); err != nil { log.Errorf(ctx, "%s", errors.ErrorStack(err)) } return nil }
// blind put bench func batchRawPut(value []byte) { cli, err := tikv.NewRawKVClient(strings.Split(*pdAddr, ",")) if err != nil { log.Fatal(err) } wg := sync.WaitGroup{} base := *dataCnt / *workerCnt wg.Add(*workerCnt) for i := 0; i < *workerCnt; i++ { go func(i int) { defer wg.Done() for j := 0; j < base; j++ { k := base*i + j key := fmt.Sprintf("key_%d", k) err = cli.Put([]byte(key), value) if err != nil { log.Fatal(errors.ErrorStack(err)) } } }(i) } wg.Wait() }
// add redis server to exist server group func apiAddServerToGroup(server models.Server, param martini.Params) (int, string) { groupId, _ := strconv.Atoi(param["id"]) lock := utils.GetZkLock(safeZkConn, globalEnv.ProductName()) lock.Lock(fmt.Sprintf("add server to group, %+v", server)) defer func() { err := lock.Unlock() if err != nil { log.Warning(err) } }() // check group exists first serverGroup := models.NewServerGroup(globalEnv.ProductName(), groupId) exists, err := serverGroup.Exists(safeZkConn) if err != nil { log.Warning(err) return 500, err.Error() } // create new group if not exists if !exists { if err := serverGroup.Create(safeZkConn); err != nil { return 500, err.Error() } } if err := serverGroup.AddServer(safeZkConn, &server); err != nil { log.Warning(errors.ErrorStack(err)) return 500, err.Error() } return jsonRetSucc() }