func (b BlockContainer) getSuitableBlock(nodeID uint64, compressedSize int) *Block { if nodeID < MinNodeId { logger.Fatal("NodeId Error") } if compressedSize == 0 { logger.Fatal("CompressedSize = 0") } block := b.getBlockByNodeID(nodeID) // if block != nil && !block.availableNewSize(uint32(compressedSize)) { // b.evitAndReallocBlock(block) // } if block == nil { for i := 0; i < MaxBlockCount; i++ { bl := b.blocks[i] if bl.available(uint32(compressedSize)) { block = bl break } } } if block == nil { logger.Fatal("Not Found Available Block, Error!!!") } return block }
// blind put bench func batchRawPut(value []byte) { cli, err := tikv.NewRawKVClient(strings.Split(*pdAddr, ",")) if err != nil { log.Fatal(err) } wg := sync.WaitGroup{} base := *dataCnt / *workerCnt wg.Add(*workerCnt) for i := 0; i < *workerCnt; i++ { go func(i int) { defer wg.Done() for j := 0; j < base; j++ { k := base*i + j key := fmt.Sprintf("key_%d", k) err = cli.Put([]byte(key), value) if err != nil { log.Fatal(errors.ErrorStack(err)) } } }(i) } wg.Wait() }
func main() { flag.Parse() if *lease < 0 { log.Fatalf("invalid lease seconds %d", *lease) } tidb.SetSchemaLease(time.Duration(*lease) * time.Second) log.SetLevelByString(*logLevel) store, err := tidb.NewStore(fmt.Sprintf("%s://%s", *store, *storePath)) if err != nil { log.Fatal(err) } var driver relay.IDriver driver = relay.NewTiDBDriver(store) replayer, err := relay.NewReplayer(driver, *relayPath, *check) if err != nil { log.Fatal(err) } replayer.OnRecordRead = func(rec *relay.Record) { fmt.Printf("%s\n", rec) } err = replayer.Run() if err != nil { log.Fatal(errors.ErrorStack(err)) } }
// Execute DML statements in bootstrap stage. // All the statements run in a single transaction. func doDMLWorks(s Session) { mustExecute(s, "BEGIN") // Insert a default user with empty password. mustExecute(s, `INSERT INTO mysql.user VALUES ("%", "root", "", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y", "Y")`) // Init global system variables table. values := make([]string, 0, len(variable.SysVars)) for k, v := range variable.SysVars { value := fmt.Sprintf(`("%s", "%s")`, strings.ToLower(k), v.Value) values = append(values, value) } sql := fmt.Sprintf("INSERT INTO %s.%s VALUES %s;", mysql.SystemDB, mysql.GlobalVariablesTable, strings.Join(values, ", ")) mustExecute(s, sql) sql = fmt.Sprintf(`INSERT INTO %s.%s VALUES("%s", "%s", "Bootstrap flag. Do not delete.") ON DUPLICATE KEY UPDATE VARIABLE_VALUE="%s"`, mysql.SystemDB, mysql.TiDBTable, bootstrappedVar, bootstrappedVarTrue, bootstrappedVarTrue) mustExecute(s, sql) _, err := s.Execute("COMMIT") if err != nil { time.Sleep(1 * time.Second) // Check if TiDB is already bootstrapped. b, err1 := checkBootstrapped(s) if err1 != nil { log.Fatal(err1) } if b { return } log.Fatal(err) } }
func main() { cus, err := nsq.NewConsumer("log", "logxx", nsq.NewConfig()) if err != nil { log.Fatal(err) } var th = &TailHandler{SouceMap: getSourceMap()} go func() { t := time.NewTicker(time.Hour) for { select { case <-t.C: th.SouceMap = getSourceMap() } } }() cus.AddHandler(th) if err := cus.ConnectToNSQD(*nsqhost); err != nil { log.Fatal(err) } for { select { case <-cus.StopChan: return } } }
func (n *Node) get(key string) (message *Message, err error) { index := n.getSuitablePivotIndex(key) if index >= int(n.pivotCount) { logger.Fatal("Unknow Error, pivot index out range") } pivot := n.pivots[index] if pivot == nil { logger.Fatal("Unknow error, Pivot Is Nil, Min pivot count is 1, but current = 0") } message = pivot.get(key) if message != nil { return } if n.isLeaf { return } if pivot.hasChild() { childNode := n.tree.nodes[pivot.childNodeID] if childNode == nil { logger.Debug("Get Node Error") } message, err = childNode.get(key) } return }
func main() { pub, err := nsq.NewProducer(*tHost, nsq.NewConfig()) if err != nil { log.Fatal(err) } t, err := tail.TailFile(*tFile, tail.Config{Follow: true, //Location: &tail.SeekInfo{Offset: 0, Whence: os.SEEK_END}}) Location: &tail.SeekInfo{Offset: 0, Whence: os.SEEK_CUR}}) if err != nil { log.Fatal(err) } for line := range t.Lines { if *tTag != "" { for k, tag := range strings.Split(*tTag, "|") { var ks = []string{"pv", "click", "other"} if strings.Contains(line.Text, tag) { if *tDebug == "1" { fmt.Println(fmt.Println(line.Text)) } pub.Publish("log", []byte(ks[k]+"\t"+line.Text)) } } } } }
func (h *Header) flush() { file := h.tree.file buffer := &bytes.Buffer{} serializeHeader(buffer, h) if _, err := file.WriteAt(buffer.Bytes(), 0); err != nil { logger.Fatal(err) } // h.serializeContainerToDisk() buffer = &bytes.Buffer{} h.container.writeTo(buffer) crcValue := checksum(buffer) // uint32 crcBuffer := &bytes.Buffer{} binary.Write(crcBuffer, binary.LittleEndian, crcValue) binary.Write(crcBuffer, binary.LittleEndian, uint32(buffer.Len())) if _, err := file.WriteAt(crcBuffer.Bytes(), HeaderSize); err != nil { logger.Fatal(err) } offset := HeaderSize + int64(Uint32Size)*2 if _, err := file.WriteAt(buffer.Bytes(), offset); err != nil { logger.Fatal(err) } if err := file.Sync(); err != nil { logger.Fatal(err) } }
func deserializeBlockContainer(reader *bytes.Reader, blockContainer *BlockContainer) { var nodesOffset uint64 if err := binary.Read(reader, binary.LittleEndian, &nodesOffset); err != nil { logger.Fatal(err) } blockContainer.nodesOffset = nodesOffset logger.Debug("Serialize nodesOffset =", nodesOffset) var blockCount uint32 if err := binary.Read(reader, binary.LittleEndian, &blockCount); err != nil { logger.Fatal(err) } blockContainer.blockCount = blockCount var blockSize uint64 if err := binary.Read(reader, binary.LittleEndian, &blockSize); err != nil { logger.Fatal(err) } blockContainer.blockSize = blockSize var blockUsedCount uint32 if err := binary.Read(reader, binary.LittleEndian, &blockUsedCount); err != nil { logger.Fatal(err) } blockContainer.blockUsedCount = blockUsedCount }
func main() { tidb.RegisterLocalStore("boltdb", boltdb.Driver{}) tidb.RegisterStore("tikv", tikv.Driver{}) metric.RunMetric(3 * time.Second) printer.PrintTiDBInfo() runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() if *lease < 0 { log.Fatalf("invalid lease seconds %d", *lease) } tidb.SetSchemaLease(time.Duration(*lease) * time.Second) cfg := &server.Config{ Addr: fmt.Sprintf(":%s", *port), LogLevel: *logLevel, StatusAddr: fmt.Sprintf(":%s", *statusPort), Socket: *socket, } log.SetLevelByString(cfg.LogLevel) store, err := tidb.NewStore(fmt.Sprintf("%s://%s", *store, *storePath)) if err != nil { log.Fatal(errors.ErrorStack(err)) } // Create a session to load information schema. se, err := tidb.CreateSession(store) if err != nil { log.Fatal(errors.ErrorStack(err)) } se.Close() var driver server.IDriver driver = server.NewTiDBDriver(store) var svr *server.Server svr, err = server.NewServer(cfg, driver) if err != nil { log.Fatal(errors.ErrorStack(err)) } sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) go func() { sig := <-sc log.Infof("Got signal [%d] to exit.", sig) svr.Close() os.Exit(0) }() log.Error(svr.Run()) }
func (s *Server) processAction(e interface{}) { if strings.Index(GetEventPath(e), models.GetProxyPath(s.top.ProductName)) == 0 { // proxy event, should be order for me to suicide s.handleProxyCommand() return } // re-watch nodes, err := s.top.WatchChildren(models.GetWatchActionPath(s.top.ProductName), s.evtbus) if err != nil { log.Fatal(errors.ErrorStack(err)) } seqs, err := models.ExtraSeqList(nodes) if err != nil { log.Fatal(errors.ErrorStack(err)) } if len(seqs) == 0 || !s.top.IsChildrenChangedEvent(e) { return } // get last pos index := -1 for i, seq := range seqs { if s.lastActionSeq < seq { index = i break } } if index < 0 { return } actions := seqs[index:] for _, seq := range actions { exist, err := s.top.Exist(path.Join(s.top.GetActionResponsePath(seq), s.pi.ID)) if err != nil { log.Fatal(errors.ErrorStack(err)) } if exist { continue } if s.checkAndDoTopoChange(seq) { s.responseAction(int64(seq)) } } s.lastActionSeq = seqs[len(seqs)-1] }
func prepareBenchSession() Session { store, err := NewStore("memory://bench") if err != nil { log.Fatal(err) } log.SetLevel(log.LOG_LEVEL_ERROR) se, err := CreateSession(store) if err != nil { log.Fatal(err) } mustExecute(se, "use test") return se }
func (p Pivot) valid() bool { if p.messageContainer == nil { logger.Fatal("Pivot MessageContainer Is Nil") return false } if p.node == nil { logger.Fatal("Pivot Node Is Nil") return false } return true }
func readResult(rs ast.RecordSet, count int) { for count > 0 { x, err := rs.Next() if err != nil { log.Fatal(err) } if x == nil { log.Fatal(count) } count-- } rs.Close() }
func fatal(msg interface{}) { if globalConn != nil { globalConn.Close() } // cleanup switch msg.(type) { case string: log.Fatal(msg) case error: log.Fatal(errors.ErrorStack(msg.(error))) } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() if len(*configFile) == 0 { log.Fatal("must use a config file") os.Exit(1) } cfg, err := config.LoadConfig(*configFile) if err != nil { log.Fatal(err.Error()) os.Exit(1) } if err = cluster.Init(cfg); err != nil { log.Fatal(err.Error()) os.Exit(1) } mysql.SetLogger(log.Logger()) sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) var svr *proxy.Server svr, err = proxy.NewServer(cfg) if err != nil { log.Fatal(err.Error()) os.Exit(1) } go func() { http.ListenAndServe(":11888", nil) }() go func() { sig := <-sc log.Infof("Got signal [%d] to exit.", sig) svr.Close() }() svr.Serve() }
func main() { metric.RunMetric(3 * time.Second) printer.PrintTiDBInfo() runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() if *lease <= 0 { log.Fatalf("invalid lease seconds %d", *lease) } tidb.SetSchemaLease(time.Duration(*lease) * time.Second) cfg := &server.Config{ Addr: fmt.Sprintf(":%s", *port), LogLevel: *logLevel, } log.SetLevelByString(cfg.LogLevel) store, err := tidb.NewStore(fmt.Sprintf("%s://%s", *store, *storePath)) if err != nil { log.Fatal(err) } var driver server.IDriver driver = server.NewTiDBDriver(store) var svr *server.Server svr, err = server.NewServer(cfg, driver) if err != nil { log.Fatal(err) } sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) go func() { sig := <-sc log.Infof("Got signal [%d] to exit.", sig) svr.Close() os.Exit(0) }() log.Error(svr.Run()) }
func mustExecute(s Session, sql string) { _, err := s.Execute(sql) if err != nil { debug.PrintStack() log.Fatal(err) } }
func (top *Topology) InitCoordConn() { var err error top.coordConn, err = top.fact(top.coordAddr) if err != nil { log.Fatal(err) } }
func (s *Server) startStatusHTTP() { once.Do(func() { go func() { http.HandleFunc("/status", func(w http.ResponseWriter, req *http.Request) { w.Header().Set("Content-Type", "application/json") s := status{ Connections: s.ConnectionCount(), Version: mysql.ServerVersion, GitHash: printer.TiDBGitHash, } js, err := json.Marshal(s) if err != nil { w.WriteHeader(http.StatusInternalServerError) log.Error("Encode json error", err) } else { w.Write(js) } }) // HTTP path for prometheus. http.Handle("/metrics", prometheus.Handler()) addr := s.cfg.StatusAddr if len(addr) == 0 { addr = defaultStatusAddr } log.Infof("Listening on %v for status and metrics report.", addr) err := http.ListenAndServe(addr, nil) if err != nil { log.Fatal(err) } }() }) }
func main() { flag.Parse() oracle, err := server.NewTimestampOracle(*addr) if err != nil { log.Fatal(err) } go http.ListenAndServe(":5555", nil) sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) go func() { sig := <-sc log.Infof("Got signal [%d] to exit.", sig) oracle.Close() os.Exit(0) }() oracle.Run() }
func main() { flag.Parse() log.SetLevelByString(*logLevel) cfg := &server.Config{ Addr: *addr, ZKAddr: *zk, RootPath: *rootPath, SaveInterval: *interval, } oracle, err := server.NewTimestampOracle(cfg) if err != nil { log.Fatal(err) } go http.ListenAndServe(":5555", nil) sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) go func() { sig := <-sc log.Infof("Got signal [%d] to exit.", sig) oracle.Close() os.Exit(0) }() oracle.Run() }
func main() { tso := &TimestampOracle{ ticker: time.NewTicker(10 * time.Millisecond), } current := &atomicObject{ physical: time.Now(), } tso.ts.Store(current) go tso.updateTicker() go http.ListenAndServe(":5555", nil) ln, err := net.Listen("tcp", ":1234") if err != nil { log.Fatal(err) } for { conn, err := ln.Accept() if err != nil { log.Warning(err) continue // handle error } s := &session{ r: bufio.NewReaderSize(conn, 8192), w: bufio.NewWriterSize(conn, 8192), conn: conn, } go tso.handleConnection(s) } }
func getTestZkHosts() []string { zks := strings.Split(*zk, ",") if len(zks) == 0 { log.Fatal("invalid zk") } return zks }
func init() { flag.Parse() if *tFile == "" { log.Fatal("参数缺失") } }
func newTxn(c hbase.HBaseClient, cfg TxnConfig) Txn { txn, err := NewTxnWithConf(c, cfg, globalOracle) if err != nil { log.Fatal(err) } return txn }
// without conflict func batchRW(value []byte) { wg := sync.WaitGroup{} base := *dataCnt / *workerCnt wg.Add(*workerCnt) for i := 0; i < *workerCnt; i++ { go func(i int) { defer wg.Done() for j := 0; j < base; j++ { txnCounter.WithLabelValues("txn").Inc() start := time.Now() k := base*i + j txn, err := store.Begin() if err != nil { log.Fatal(err) } key := fmt.Sprintf("key_%d", k) txn.Set([]byte(key), value) err = txn.Commit() if err != nil { txnRolledbackCounter.WithLabelValues("txn").Inc() txn.Rollback() } txnDurations.WithLabelValues("txn").Observe(time.Since(start).Seconds()) } }(i) } wg.Wait() }
func initConfig() { var err error appini, err = GetConfObj(GetConfigPath()) if err != nil { log.Fatal(err) } }
func LoadConf(configFile string) (*Conf, error) { srvConf := &Conf{} conf, err := utils.InitConfigFromFile(configFile) if err != nil { log.Fatal(err) } srvConf.ProductName, _ = conf.ReadString("product", "test") if len(srvConf.ProductName) == 0 { log.Fatalf("invalid config: product entry is missing in %s", configFile) } srvConf.CoordinatorAddr, _ = conf.ReadString("coordinator_addr", "") if len(srvConf.CoordinatorAddr) == 0 { log.Fatalf("invalid config: need coordinator addr entry is missing in %s", configFile) } srvConf.CoordinatorAddr = strings.TrimSpace(srvConf.CoordinatorAddr) srvConf.Coordinator, _ = conf.ReadString("coordinator", "zookeeper") srvConf.StoreAuth, _ = conf.ReadString("store_auth", "") // below configs should be set from command flag. We will remove below code later. srvConf.NetTimeout, _ = conf.ReadInt("net_timeout", 5) srvConf.Proto, _ = conf.ReadString("proto", "tcp") srvConf.Addr, _ = conf.ReadString("addr", "") srvConf.HTTPAddr, _ = conf.ReadString("http_addr", "") srvConf.ProxyID, _ = conf.ReadString("proxy_id", "") srvConf.PidFile, _ = conf.ReadString("pidfile", "") srvConf.ProxyAuth, _ = conf.ReadString("proxy_auth", "") return srvConf, nil }
// 初始化投放标签 func (this *ZjPut) initPutTags(tagkey string, prefix1 string, prefix2 string) { rdb, err := lib.GetRedisObj() if err != nil { log.Fatal(err) } rdb.SelectDb("0") for _, key := range rdb.Keys(tagkey) { rkey := strings.TrimPrefix(key, strings.TrimSuffix(tagkey, "*")+"_") if lib.IsMongo(rkey) { rkey = prefix2 + rkey } else { rkey = prefix1 + rkey } if _, ok := this.putTags[rkey]; !ok { this.putTags[rkey] = make(map[string]int) } for _, aid := range rdb.SMembers(key) { if _, ok := this.putAdverts[aid]; ok { this.putTags[rkey][aid] = 1 } } } //特殊标签 this.putTags["url_FFF"] = map[string]int{"99999": 1} }