// TableFromMeta creates a Table instance from model.TableInfo. func TableFromMeta(alloc autoid.Allocator, tblInfo *model.TableInfo) table.Table { if tblInfo.State == model.StateNone { log.Fatalf("table %s can't be in none state", tblInfo.Name) } columns := make([]*column.Col, 0, len(tblInfo.Columns)) for _, colInfo := range tblInfo.Columns { if colInfo.State == model.StateNone { log.Fatalf("column %s can't be in none state", colInfo.Name) } col := &column.Col{ColumnInfo: *colInfo} columns = append(columns, col) } t := NewTable(tblInfo.ID, tblInfo.Name.O, columns, alloc) for _, idxInfo := range tblInfo.Indices { if idxInfo.State == model.StateNone { log.Fatalf("index %s can't be in none state", idxInfo.Name) } idx := &column.IndexedCol{ IndexInfo: *idxInfo, X: kv.NewKVIndex(t.indexPrefix, idxInfo.Name.L, idxInfo.Unique), } t.AddIndex(idx) } t.state = tblInfo.State return t }
func LoadConf(configFile string) (*Conf, error) { srvConf := &Conf{} conf, err := utils.InitConfigFromFile(configFile) if err != nil { log.Fatal(err) } srvConf.ProductName, _ = conf.ReadString("product", "test") if len(srvConf.ProductName) == 0 { log.Fatalf("invalid config: product entry is missing in %s", configFile) } srvConf.CoordinatorAddr, _ = conf.ReadString("coordinator_addr", "") if len(srvConf.CoordinatorAddr) == 0 { log.Fatalf("invalid config: need coordinator addr entry is missing in %s", configFile) } srvConf.CoordinatorAddr = strings.TrimSpace(srvConf.CoordinatorAddr) srvConf.Coordinator, _ = conf.ReadString("coordinator", "zookeeper") srvConf.StoreAuth, _ = conf.ReadString("store_auth", "") // below configs should be set from command flag. We will remove below code later. srvConf.NetTimeout, _ = conf.ReadInt("net_timeout", 5) srvConf.Proto, _ = conf.ReadString("proto", "tcp") srvConf.Addr, _ = conf.ReadString("addr", "") srvConf.HTTPAddr, _ = conf.ReadString("http_addr", "") srvConf.ProxyID, _ = conf.ReadString("proxy_id", "") srvConf.PidFile, _ = conf.ReadString("pidfile", "") srvConf.ProxyAuth, _ = conf.ReadString("proxy_auth", "") return srvConf, nil }
func encodeRawBytes(w *BufWriter, refs ...interface{}) { for _, i := range refs { var err error switch x := i.(type) { case byte: err = w.WriteByte(x) case ObjectCode: err = w.WriteByte(byte(x)) case *uint32: err = w.WriteUvarint(uint64(*x)) case *uint64: err = w.WriteUvarint(*x) case *int64: err = w.WriteVarint(*x) case *float64: err = w.WriteFloat64(*x) case *[]byte: err = w.WriteVarbytes(*x) case *scoreInt: err = w.WriteUint64(uint64(*x)) default: log.Fatalf("unsupported type in row value: %+v", x) } if err != nil { log.Fatalf("encode raw bytes failed - %s", err) } } }
func (s *Server) handleMigrateState(slotIndex int, keys ...[]byte) error { shd := s.slots[slotIndex] if shd.slotInfo.State.Status != models.SLOT_STATUS_MIGRATE { return nil } if shd.migrateFrom == nil { log.Fatalf("migrateFrom not exist %+v", shd) } if shd.dst.Master() == shd.migrateFrom.Master() { log.Fatalf("the same migrate src and dst, %+v", shd) } redisConn, err := s.pools.GetConn(shd.migrateFrom.Master()) if err != nil { return errors.Trace(err) } defer s.pools.PutConn(redisConn) err = writeMigrateKeyCmd(redisConn, shd.dst.Master(), MigrateKeyTimeoutMs, keys...) if err != nil { redisConn.Close() log.Errorf("migrate key %s error, from %s to %s, err:%v", string(keys[0]), shd.migrateFrom.Master(), shd.dst.Master(), err) return errors.Trace(err) } redisReader := redisConn.BufioReader() // handle migrate result for i := 0; i < len(keys); i++ { resp, err := parser.Parse(redisReader) if err != nil { log.Errorf("migrate key %s error, from %s to %s, err:%v", string(keys[i]), shd.migrateFrom.Master(), shd.dst.Master(), err) redisConn.Close() return errors.Trace(err) } result, err := resp.Bytes() log.Debug("migrate", string(keys[0]), "from", shd.migrateFrom.Master(), "to", shd.dst.Master(), string(result)) if resp.Type == parser.ErrorResp { redisConn.Close() log.Error(string(keys[0]), string(resp.Raw), "migrateFrom", shd.migrateFrom.Master()) return errors.New(string(resp.Raw)) } } s.counter.Add("Migrate", int64(len(keys))) return nil }
func createPidFile(name string) { os.MkdirAll(path.Dir(name), 0755) f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { log.Fatalf("create pid file %s err - %s", name, err) } defer f.Close() if _, err = f.WriteString(fmt.Sprintf("%d", os.Getpid())); err != nil { log.Fatalf("write pid into pid file %s err - %s", name, err) } }
func (s *Server) handleTopoEvent() { for { select { case r := <-s.reqCh: if s.slots[r.slotIdx].slotInfo.State.Status == models.SLOT_STATUS_PRE_MIGRATE { s.bufferedReq.PushBack(r) continue } for e := s.bufferedReq.Front(); e != nil; { next := e.Next() if s.dispatch(e.Value.(*PipelineRequest)) { s.bufferedReq.Remove(e) } e = next } if !s.dispatch(r) { log.Fatalf("should never happend, %+v, %+v", r, s.slots[r.slotIdx].slotInfo) } case e := <-s.evtbus: switch e.(type) { case *killEvent: s.handleMarkOffline() e.(*killEvent).done <- nil default: if s.top.IsSessionExpiredEvent(e) { log.Fatalf("session expired: %+v", e) } evtPath := GetEventPath(e) log.Infof("got event %s, %v, lastActionSeq %d", s.pi.ID, e, s.lastActionSeq) if strings.Index(evtPath, models.GetActionResponsePath(s.conf.ProductName)) == 0 { seq, err := strconv.Atoi(path.Base(evtPath)) if err != nil { log.Warning(err) } else { if seq < s.lastActionSeq { log.Infof("ignore, lastActionSeq %d, seq %d", s.lastActionSeq, seq) continue } } } s.processAction(e) } } } }
func (do *Domain) loadSchemaInLoop(lease time.Duration) { ticker := time.NewTicker(lease) defer ticker.Stop() for { select { case <-ticker.C: err := do.Reload() // we may close store in test, but the domain load schema loop is still checking, // so we can't panic for ErrDBClosed and just return here. if terror.ErrorEqual(err, localstore.ErrDBClosed) { return } else if err != nil { log.Fatalf("[ddl] reload schema err %v", errors.ErrorStack(err)) } case newLease := <-do.leaseCh: if lease == newLease { // nothing to do continue } lease = newLease // reset ticker too. ticker.Stop() ticker = time.NewTicker(lease) } } }
func (e *RowsEvent) Decode(data []byte) error { pos := 0 e.TableID = FixedLengthInt(data[0:e.tableIDSize]) pos += e.tableIDSize e.Flags = binary.LittleEndian.Uint16(data[pos:]) pos += 2 if e.Version == 2 { dataLen := binary.LittleEndian.Uint16(data[pos:]) pos += 2 e.ExtraData = data[pos : pos+int(dataLen-2)] pos += int(dataLen - 2) } var n int e.ColumnCount, _, n = LengthEncodedInt(data[pos:]) pos += n bitCount := bitmapByteSize(int(e.ColumnCount)) e.ColumnBitmap1 = data[pos : pos+bitCount] pos += bitCount if e.needBitmap2 { e.ColumnBitmap2 = data[pos : pos+bitCount] pos += bitCount } var ok bool e.Table, ok = e.tables[e.TableID] if !ok { return errors.Errorf("invalid table id %d, no correspond table map event", e.TableID) } var err error // ... repeat rows until event-end defer func() { if r := recover(); r != nil { log.Fatalf("parse rows event panic %v, data %q, parsed rows %#v, table map %#v\n%s", r, data, e, e.Table, Pstack()) } }() for pos < len(data) { if n, err = e.decodeRows(data[pos:], e.Table, e.ColumnBitmap1); err != nil { return errors.Trace(err) } pos += n if e.needBitmap2 { if n, err = e.decodeRows(data[pos:], e.Table, e.ColumnBitmap2); err != nil { return errors.Trace(err) } pos += n } } return nil }
func isBoostrapped(store kv.Storage) bool { // check in memory _, ok := storeBootstrapped[store.UUID()] if ok { return true } // check in kv store err := kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { var err error t := meta.NewMeta(txn) ok, err = t.IsBootstrapped() return errors.Trace(err) }) if err != nil { log.Fatalf("check bootstrapped err %v", err) } if ok { // here mean memory is not ok, but other server has already finished it storeBootstrapped[store.UUID()] = true } return ok }
func MustDecodeFromBytes(p []byte) Resp { resp, err := DecodeFromBytes(p) if err != nil { log.Fatalf("decode redis resp from bytes failed - %s", err) } return resp }
func MustDecode(r *bufio.Reader) Resp { resp, err := Decode(r) if err != nil { log.Fatalf("decode redis resp failed - %s", err) } return resp }
func MustParse(s string) int64 { v, err := Parse(s) if err != nil { log.Fatalf("parse bytesize failed - %s", err) } return v }
func main() { flag.Parse() if *lease < 0 { log.Fatalf("invalid lease seconds %d", *lease) } tidb.SetSchemaLease(time.Duration(*lease) * time.Second) log.SetLevelByString(*logLevel) store, err := tidb.NewStore(fmt.Sprintf("%s://%s", *store, *storePath)) if err != nil { log.Fatal(err) } var driver relay.IDriver driver = relay.NewTiDBDriver(store) replayer, err := relay.NewReplayer(driver, *relayPath, *check) if err != nil { log.Fatal(err) } replayer.OnRecordRead = func(rec *relay.Record) { fmt.Printf("%s\n", rec) } err = replayer.Run() if err != nil { log.Fatal(errors.ErrorStack(err)) } }
func (do *Domain) mustReload() { // if reload error, we will terminate whole program to guarantee data safe. err := do.Reload() if err != nil { log.Fatalf("[ddl] reload schema err %v", errors.ErrorStack(err)) } }
func MustHandlerTable(o interface{}) map[string]HandlerFunc { t, err := NewHandlerTable(o) if err != nil { log.Fatalf("create redis handler map failed - %s", err) } return t }
func getStoreBootstrapVersion(store kv.Storage) int64 { // check in memory _, ok := storeBootstrapped[store.UUID()] if ok { return currentBootstrapVersion } var ver int64 // check in kv store err := kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { var err error t := meta.NewMeta(txn) ver, err = t.GetBootstrapVersion() return errors.Trace(err) }) if err != nil { log.Fatalf("check bootstrapped err %v", err) } if ver > notBootstrapped { // here mean memory is not ok, but other server has already finished it storeBootstrapped[store.UUID()] = true } return ver }
func MustEncodeToBytes(r Resp) []byte { b, err := EncodeToBytes(r) if err != nil { log.Fatalf("encode redis resp to bytes failed - %s", err) } return b }
func (do *Domain) loadSchemaInLoop(lease time.Duration) { if lease <= 0 { lease = defaultLoadTime } ticker := time.NewTicker(lease) defer ticker.Stop() reloadTimeout := getReloadTimeout(lease) reloadErrCh := make(chan error, 1) for { select { case <-ticker.C: go func() { reloadErrCh <- do.reload() }() select { case err := <-reloadErrCh: // we may close store in test, but the domain load schema loop is still checking, // so we can't panic for ErrDBClosed and just return here. if terror.ErrorEqual(err, localstore.ErrDBClosed) { return } else if err != nil { log.Fatalf("reload schema err %v", errors.ErrorStack(err)) } case <-time.After(reloadTimeout): log.Fatalf("reload schema timeout:%d", reloadTimeout) } case newLease := <-do.leaseCh: if newLease <= 0 { newLease = defaultLoadTime } if lease == newLease { // nothing to do continue } lease = newLease reloadTimeout = getReloadTimeout(lease) // reset ticker too. ticker.Stop() ticker = time.NewTicker(lease) } } }
func main() { tidb.RegisterLocalStore("boltdb", boltdb.Driver{}) tidb.RegisterStore("tikv", tikv.Driver{}) metric.RunMetric(3 * time.Second) printer.PrintTiDBInfo() runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() if *lease < 0 { log.Fatalf("invalid lease seconds %d", *lease) } tidb.SetSchemaLease(time.Duration(*lease) * time.Second) cfg := &server.Config{ Addr: fmt.Sprintf(":%s", *port), LogLevel: *logLevel, StatusAddr: fmt.Sprintf(":%s", *statusPort), Socket: *socket, } log.SetLevelByString(cfg.LogLevel) store, err := tidb.NewStore(fmt.Sprintf("%s://%s", *store, *storePath)) if err != nil { log.Fatal(errors.ErrorStack(err)) } // Create a session to load information schema. se, err := tidb.CreateSession(store) if err != nil { log.Fatal(errors.ErrorStack(err)) } se.Close() var driver server.IDriver driver = server.NewTiDBDriver(store) var svr *server.Server svr, err = server.NewServer(cfg, driver) if err != nil { log.Fatal(errors.ErrorStack(err)) } sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) go func() { sig := <-sc log.Infof("Got signal [%d] to exit.", sig) svr.Close() os.Exit(0) }() log.Error(svr.Run()) }
func register(name string, f CommandFunc) { funcName := strings.ToLower(name) if _, ok := globalCommands[funcName]; ok { log.Fatalf("%s has been registered", name) } globalCommands[funcName] = f }
func genProcID() string { u, err := uuid.NewV4() if err != nil { log.Fatalf("gen uuid err: %v", err) } return strings.ToLower(hex.EncodeToString(u[0:16])) }
func register(name string, f CommandFunc, flag CommandFlag) { funcName := strings.ToLower(name) if _, ok := globalCommands[funcName]; ok { log.Fatalf("%s has been registered", name) } globalCommands[funcName] = &command{name, f, flag} }
func (do *Domain) mustReload() { // if reload error, we will terminate whole program to guarantee data safe. // TODO: retry some times if reload error. err := do.reload() if err != nil { log.Fatalf("reload schema err %v", err) } }
func setIntFromOpt(dest *int, d map[string]interface{}, key string) { if s, ok := d[key].(string); ok && len(s) != 0 { if n, err := strconv.Atoi(s); err != nil { log.Fatalf("parse %s failed - %s", key, err) } else { *dest = n } } }
// parseLease parses lease argument string. func parseLease() time.Duration { dur, err := time.ParseDuration(*lease) if err != nil { dur, err = time.ParseDuration(*lease + "s") } if err != nil || dur < 0 { log.Fatalf("invalid lease duration %s", *lease) } return dur }
func setIntArgFromOpt(dest *int, args map[string]interface{}, key string) { if s, ok := args[key].(string); ok && len(s) != 0 { n, err := strconv.Atoi(s) if err != nil { log.Fatalf("parse int arg err %v", err) return } *dest = n } }
func CheckUlimit(min int) { ulimitN, err := exec.Command("/bin/sh", "-c", "ulimit -n").Output() if err != nil { log.Warning("get ulimit failed", err) } n, err := strconv.Atoi(strings.TrimSpace(string(ulimitN))) if err != nil || n < min { log.Fatalf("ulimit too small: %d, should be at least %d", n, min) } }
func finishBoostrap(store kv.Storage) { storeBootstrapped[store.UUID()] = true err := kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { t := meta.NewMeta(txn) err := t.FinishBootstrap() return errors.Trace(err) }) if err != nil { log.Fatalf("finish bootstrap err %v", err) } }
func NewGroup(groupInfo models.ServerGroup) *Group { g := &Group{ redisServers: make(map[string]*models.Server), } for _, server := range groupInfo.Servers { if server.Type == models.SERVER_TYPE_MASTER { if len(g.master) > 0 { log.Fatalf("two masters are not allowed: %+v", groupInfo) } g.master = server.Addr } g.redisServers[server.Addr] = server } if len(g.master) == 0 { log.Fatalf("master not found: %+v", groupInfo) } return g }
func (s *dbStore) unLockKeys(txn *dbTxn) error { for k := range txn.lockedKeys { if tid, ok := s.keysLocked[k]; !ok || tid != txn.tid { debug.PrintStack() log.Fatalf("should never happend:%v, %v", tid, txn.tid) } delete(s.keysLocked, k) s.recentUpdates.Set([]byte(k), txn.version, true) } return nil }