// Open opens or creates a storage with specific format for a local engine Driver. func (d Driver) Open(schema string) (kv.Storage, error) { mc.mu.Lock() defer mc.mu.Unlock() if store, ok := mc.cache[schema]; ok { // TODO: check the cache store has the same engine with this Driver. log.Info("cache store", schema) return store, nil } db, err := d.Driver.Open(schema) if err != nil { return nil, errors.Trace(err) } log.Info("New store", schema) s := &dbStore{ txns: make(map[uint64]*dbTxn), keysLocked: make(map[string]uint64), uuid: uuid.NewV4().String(), path: schema, db: db, compactor: newLocalCompactor(localCompactDefaultPolicy, db), } mc.cache[schema] = s s.compactor.Start() return s, nil }
func (s *HBasePutTestSuit) TestGetPut(c *C) { log.Info(codec.EncodeKey(170)) p := NewPut([]byte("1_\xff\xff")) p2 := NewPut([]byte("1_\xff\xfe")) p3 := NewPut([]byte("1_\xff\xee")) p.AddValue([]byte("cf"), []byte("q"), []byte("!")) p2.AddValue([]byte("cf"), []byte("q"), []byte("!")) p3.AddValue([]byte("cf"), []byte("q"), []byte("!")) cli, err := NewClient(getTestZkHosts(), "/hbase") c.Assert(err, Equals, nil) cli.Put("t2", p) cli.Put("t2", p2) cli.Put("t2", p3) scan := NewScan([]byte("t2"), 100, cli) scan.StartRow = []byte("1_") for { r := scan.Next() if r == nil { break } log.Info(r.SortedColumns[0].Row) } cli.Delete("t2", NewDelete([]byte("1_\xff\xff"))) cli.Delete("t2", NewDelete([]byte("1_\xff\xfe"))) cli.Delete("t2", NewDelete([]byte("1_\xff\xee"))) }
func (s *testModelSuite) TestNewAction(c *C) { log.Info("[TestNewAction][start]") fakeCoordConn := zkhelper.NewConn() err := NewAction(fakeCoordConn, productName, ACTION_TYPE_SLOT_CHANGED, nil, "desc", false) c.Assert(err, IsNil) prefix := GetWatchActionPath(productName) exist, _, err := fakeCoordConn.Exists(prefix) c.Assert(exist, Equals, true) c.Assert(err, IsNil) // test if response node exists d, _, err := fakeCoordConn.Get(prefix + "/0000000001") c.Assert(err, IsNil) // test get action data d, _, err = fakeCoordConn.Get(GetActionResponsePath(productName) + "/0000000001") c.Assert(err, IsNil) var action Action json.Unmarshal(d, &action) c.Assert(action.Desc, Equals, "desc") c.Assert(action.Type, Equals, ACTION_TYPE_SLOT_CHANGED) fakeCoordConn.Close() log.Info("[TestNewAction][end]") }
func BulkInsertData(reader *bufio.Reader, ktype string) { bk := es.Bulk() num := 0 for { line, err := reader.ReadString('\n') if err == io.EOF { break } info := strings.Split(line, "\t#\t") log.Info(len(info)) if len(info) != 2 { continue } ad, ua := getAdUa(info[0], "\t") keyword := getKeyWord(info[1], "\t") lonlat := GetLonLat(ad) if lonlat == "" { continue } num++ id := encrypt.DefaultMd5.Encode("1456185600" + ad + ua) pinfo := map[string]interface{}{ "ad": ad, "ua": ua, ktype: keyword, "geo": lonlat, } bk.Add(elastic.NewBulkUpdateRequest().Index("map_trace").Type("map").Doc(pinfo).Id(id).DocAsUpsert(true)) bk.Add(elastic.NewBulkUpdateRequest().Index("map_trace_search").Type("map").Doc(pinfo).Id(id).DocAsUpsert(true)) if num%10000 == 0 { log.Error(bk.Do()) } } log.Info(bk.Do()) }
func (gc *localstoreCompactor) checkExpiredKeysWorker() { gc.workerWaitGroup.Add(1) defer gc.workerWaitGroup.Done() for { select { case <-gc.stopCh: log.Info("GC stopped") return case <-gc.ticker.C: gc.mu.Lock() m := gc.recentKeys if len(m) == 0 { gc.mu.Unlock() continue } gc.recentKeys = make(map[string]struct{}) gc.mu.Unlock() log.Info("GC trigger") for k := range m { err := gc.Compact(nil, []byte(k)) if err != nil { log.Error(err) } } } } }
func (c *Cluster) DB(isread bool) (*mysql.DB, error) { if isread { log.Info("return a master conn") return c.Master() } log.Info("return a slave conn") return c.Slave() }
func (e *etcdImpl) Create(wholekey string, value []byte, flags int32, aclv []zk.ACL) (keyCreated string, err error) { seq := (flags & zk.FlagSequence) != 0 tmp := (flags & zk.FlagEphemeral) != 0 ttl := uint64(MAX_TTL) if tmp { ttl = 5 } var resp *etcd.Response conn, err := e.pool.Get() if err != nil { return "", err } defer e.pool.Put(conn) c := conn.(*PooledEtcdClient).c fn := c.Create log.Info("create", wholekey) if seq { wholekey = path.Dir(wholekey) fn = c.CreateInOrder } else { for _, v := range aclv { if v.Perms == PERM_DIRECTORY { log.Info("etcdImpl:create directory", wholekey) fn = nil resp, err = c.CreateDir(wholekey, uint64(ttl)) if err != nil { return "", convertToZkError(err) } } } } if fn == nil { if tmp { e.keepAlive(wholekey, ttl) } return resp.Node.Key, nil } resp, err = fn(wholekey, string(value), uint64(ttl)) if err != nil { return "", convertToZkError(err) } if tmp { e.keepAlive(resp.Node.Key, ttl) } return resp.Node.Key, nil }
// Push metircs in background. func Push(cfg *MetricConfig) { if cfg.PushInterval.Duration == zeroDuration || len(cfg.PushAddress) == 0 { log.Info("disable Prometheus push client") return } log.Info("start Prometheus push client") interval := cfg.PushInterval.Duration go prometheusPushClient(cfg.PushJob, cfg.PushAddress, interval) }
func NewCookiePut() *CookiePut { var zj = &CookiePut{} zj.kf = dbfactory.NewKVFile(fmt.Sprintf("./%s.txt", convert.ToString(time.Now().Unix()))) zj.putTags = make(map[string]map[string]int) zj.Timestamp = timestamp.GetHourTimestamp(-1) zj.initPutAdverts() zj.initPutTags("TAGS_5*", "cookie_", "") log.Info(zj.putAdverts) log.Info(zj.putTags) return zj }
// experimental simple auto rebalance :) func Rebalance(coordConn zkhelper.Conn, delay int) error { targetQuota, err := getQuotaMap(coordConn) if err != nil { return errors.Trace(err) } livingNodes, err := getLivingNodeInfos(coordConn) if err != nil { return errors.Trace(err) } log.Info("start rebalance") for _, node := range livingNodes { for len(node.CurSlots) > targetQuota[node.GroupId] { for _, dest := range livingNodes { if dest.GroupId != node.GroupId && len(dest.CurSlots) < targetQuota[dest.GroupId] { slot := node.CurSlots[len(node.CurSlots)-1] // create a migration task t := NewMigrateTask(MigrateTaskInfo{ Delay: delay, FromSlot: slot, ToSlot: slot, NewGroupId: dest.GroupId, Status: MIGRATE_TASK_MIGRATING, CreateAt: strconv.FormatInt(time.Now().Unix(), 10), }) u, err := uuid.NewV4() if err != nil { return errors.Trace(err) } t.Id = u.String() if ok, err := preMigrateCheck(t); ok { // do migrate err := t.run() if err != nil { log.Warning(err) return errors.Trace(err) } } else { log.Warning(err) return errors.Trace(err) } node.CurSlots = node.CurSlots[0 : len(node.CurSlots)-1] dest.CurSlots = append(dest.CurSlots, slot) } } } } log.Info("rebalance finish") return nil }
// Fix https://github.com/pingcap/go-themis/issues/19 func (s *TransactionTestSuit) TestPrewriteSecondaryMissingRows(c *C) { conf := defaultTxnConf hook := createChoosePrimaryRowHook("A") hook.beforePrewriteSecondary = func(txn *themisTxn, ctx interface{}) (bool, interface{}, error) { go func() { hook2 := createChoosePrimaryRowHook("B") hook2.onSecondaryOccursLock = func(txn *themisTxn, ctx interface{}) (bool, interface{}, error) { log.Info("tx2 occurs secondary lock", ctx) return true, nil, nil } tx2 := newTxn(s.cli, conf) tx2.(*themisTxn).setHook(hook2) tx2.Put(themisTestTableName, hbase.NewPut([]byte("A")).AddValue(cf, q, []byte("A"))) tx2.Put(themisTestTableName, hbase.NewPut([]byte("B")).AddValue(cf, q, []byte("B"))) tx2.Put(themisTestTableName, hbase.NewPut([]byte("C")).AddValue(cf, q, []byte("C"))) tx2.Commit() }() time.Sleep(500 * time.Millisecond) return true, nil, nil } hook.onSecondaryOccursLock = func(txn *themisTxn, ctx interface{}) (bool, interface{}, error) { log.Info("tx1", ctx) return true, nil, nil } hook.onPrewriteRow = func(txn *themisTxn, ctx interface{}) (bool, interface{}, error) { containPrimary := ctx.([]interface{})[1].(bool) if !containPrimary { rm := ctx.([]interface{})[0].(*rowMutation) log.Info(string(rm.row)) } return true, nil, nil } tx1 := newTxn(s.cli, conf) tx1.(*themisTxn).setHook(hook) tx1.Put(themisTestTableName, hbase.NewPut([]byte("A")).AddValue(cf, q, []byte("A"))) tx1.Put(themisTestTableName, hbase.NewPut([]byte("B")).AddValue(cf, q, []byte("B"))) tx1.Put(themisTestTableName, hbase.NewPut([]byte("C")).AddValue(cf, q, []byte("C"))) err := tx1.Commit() c.Assert(err, IsNil) tx3 := newTxn(s.cli, conf) rs, err := tx3.Get(themisTestTableName, hbase.NewGet([]byte("C")).AddColumn(cf, q)) c.Assert(rs, NotNil) c.Assert(err, IsNil) tx3.Commit() }
func (s *testModelSuite) TestAddSlaveToEmptyGroup(c *C) { log.Info("[TestAddSlaveToEmptyGroup][start]") fakeCoordConn := zkhelper.NewConn() g := NewServerGroup(productName, 1) g.Create(fakeCoordConn) s1 := NewServer(SERVER_TYPE_SLAVE, s.s1.addr) err := g.AddServer(fakeCoordConn, s1, auth) c.Assert(err, IsNil) c.Assert(g.Servers[0].Type, Equals, SERVER_TYPE_MASTER) fakeCoordConn.Close() log.Info("[TestAddSlaveToEmptyGroup][end]") }
func (s *testModelSuite) TestServerGroup(c *C) { log.Info("[TestServerGroup][start]") fakeCoordConn := zkhelper.NewConn() g := NewServerGroup(productName, 1) g.Create(fakeCoordConn) // test create new group groups, err := ServerGroups(fakeCoordConn, productName) c.Assert(err, IsNil) c.Assert(len(groups), Not(Equals), 0) ok, err := g.Exists(fakeCoordConn) c.Assert(err, IsNil) c.Assert(ok, Equals, true) gg, err := GetGroup(fakeCoordConn, productName, 1) c.Assert(err, IsNil) c.Assert(gg, NotNil) c.Assert(gg.Id, Equals, g.Id) s1 := NewServer(SERVER_TYPE_MASTER, s.s1.addr) s2 := NewServer(SERVER_TYPE_MASTER, s.s2.addr) err = g.AddServer(fakeCoordConn, s1, auth) c.Assert(err, IsNil) servers, err := g.GetServers(fakeCoordConn) c.Assert(err, IsNil) c.Assert(len(servers), Equals, 1) g.AddServer(fakeCoordConn, s2, auth) c.Assert(len(g.Servers), Equals, 1) s2.Type = SERVER_TYPE_SLAVE g.AddServer(fakeCoordConn, s2, auth) c.Assert(len(g.Servers), Equals, 2) err = g.Promote(fakeCoordConn, s2.Addr, auth) c.Assert(err, IsNil) m, err := g.Master(fakeCoordConn) c.Assert(err, IsNil) c.Assert(m.Addr, Equals, s2.Addr) fakeCoordConn.Close() log.Info("[TestServerGroup][stop]") }
func (s *ParallelTestSuit) TestParallelHbaseCall(c *C) { runtime.GOMAXPROCS(runtime.NumCPU() / 2) cli, err := createHBaseClient() c.Assert(err, Equals, nil) err = createNewTableAndDropOldTable(cli, themisTestTableName, "cf", nil) c.Assert(err, Equals, nil) wg := sync.WaitGroup{} for i := 0; i < 10; i++ { wg.Add(1) go func(i int) { defer wg.Done() tx := newTxn(cli, defaultTxnConf) p := hbase.NewPut(getTestRowKey(c)) p.AddValue(cf, q, []byte(strconv.Itoa(i))) tx.Put(themisTestTableName, p) tx.Commit() }(i) } wg.Wait() g := hbase.NewGet(getTestRowKey(c)).AddColumn(cf, q) rs, err := cli.Get(themisTestTableName, g) if err != nil { log.Fatal(err) } log.Info(string(rs.SortedColumns[0].Value)) }
func (s *Server) responseAction(seq int64) { log.Info("send response", seq) err := s.top.DoResponse(int(seq), &s.pi) if err != nil { log.Error(errors.ErrorStack(err)) } }
// 初始化cox对应区域 func (this *JsPut) initArea() { this.areamap = make(map[string]string) f, err := os.Open(lib.GetConfVal("jiangsu::areapath")) if err != nil { log.Error(err) return } defer f.Close() bi := bufio.NewReader(f) for { line, err := bi.ReadString('\n') if err == io.EOF { break } //0006f21d119b032d59acc3c2b90f10624eeaebe8,511 info := strings.Split(line, ",") if len(info) != 2 { continue } this.areamap[info[0]] = strings.TrimSpace(info[1]) } log.Info("江苏区域数量", len(this.areamap)) }
// 保存投放轨迹到电信ftp func (this *JsPut) saveTraceToDianxin() { var ( ftp = lib.GetConfVal("jiangsu::ftp_path") ppath = lib.GetConfVal("jiangsu::put_path") rk = "account.10046.sha1." + time.Now().Add(-time.Hour).Format("200601021504") fname = ppath + "/" + rk adcount = 0 ) f, err := os.Create(fname) if err != nil { log.Error("创建文件失败", err) return } defer f.Close() this.kf.AdSet(func(ad string) { if v, ok := this.areamap[ad]; ok { f.WriteString(ad + "," + v + "\n") adcount++ } }) cmd := exec.Command(ftp, rk) str, err := cmd.Output() log.Info(string(str), err) // 广告数量统计数据 dx_stats , js_1461016800, 11111 lib.StatisticsData("dx_stats", fmt.Sprintf("js_%s", timestamp.GetHourTimestamp(-1)), convert.ToString(adcount), "") }
func NewServer(cfg *config.Conf) (*Server, error) { s := new(Server) s.cfg = cfg var err error s.fingerprints = make(map[string]*LimitReqNode) // s.users = make(map[string]*User) // s.qpsOnServer = &LimitReqNode{} s.mu = &sync.Mutex{} s.restart = false port := s.cfg.GetConfig().Global.Port // get listenfd from file when restart if os.Getenv("_GRACEFUL_RESTART") == "true" { log.Info("graceful restart with previous listenfd") //get the linstenfd file := os.NewFile(3, "") s.listener, err = net.FileListener(file) if err != nil { log.Warn("get linstener err ") } } else { s.listener, err = net.Listen("tcp4", fmt.Sprintf(":%d", port)) } if err != nil { return nil, err } log.Infof("Dbatman Listen(tcp4) at [%d]", port) return s, nil }
func (sg *ServerGroup) RemoveServer(coordConn zkhelper.Conn, addr string) error { coordPath := fmt.Sprintf("/zk/reborn/db_%s/servers/group_%d/%s", sg.ProductName, sg.Id, addr) data, _, err := coordConn.Get(coordPath) if err != nil { return errors.Trace(err) } var s Server err = json.Unmarshal(data, &s) if err != nil { return errors.Trace(err) } log.Info(s) if s.Type == SERVER_TYPE_MASTER { return errors.New("cannot remove master, use promote first") } err = coordConn.Delete(coordPath, -1) if err != nil { return errors.Trace(err) } // update server list for i := 0; i < len(sg.Servers); i++ { if sg.Servers[i].Addr == s.Addr { sg.Servers = append(sg.Servers[:i], sg.Servers[i+1:]...) break } } // remove slave won't need proxy confirm err = NewAction(coordConn, sg.ProductName, ACTION_TYPE_SERVER_GROUP_CHANGED, sg, "", false) return errors.Trace(err) }
func (s *HBaseDelTestSuit) TestWithClient(c *C) { // create new p := NewPut([]byte("test")) p.AddValue([]byte("cf"), []byte("q"), []byte("val")) s.cli.Put("t2", p) // check it g := NewGet([]byte("test")) g.AddStringFamily("cf") r, err := s.cli.Get("t2", g) c.Assert(err, Equals, nil) c.Assert(string(r.Columns["cf:q"].Value), Equals, "val") log.Info(string(r.Columns["cf:q"].Value)) // delete it d := NewDelete([]byte("test")) d.AddColumn([]byte("cf"), []byte("q")) b, err := s.cli.Delete("t2", d) c.Assert(err, Equals, nil) c.Assert(b, Equals, true) // check it r, err = s.cli.Get("t2", g) c.Assert(err, Equals, nil) c.Assert(r == nil, Equals, true) }
// 域名找回信息获取 func (this *ZjPut) VisitorData(out chan interface{}, in chan int8) { var datacount = 0 defer func() { // 统计数据 zhejiang_put , other_1461016800, 11111 lib.StatisticsData("dsource_stats", "zj_"+this.Timestamp+"_visitor", convert.ToString(datacount), "") }() m, err := lib.GetMongoObj() if err != nil { log.Error(err) in <- 1 return } defer m.Close() qconf := mongodb.MongodbQueryConf{} qconf.Db = "data_source" qconf.Table = "zhejiang_visitor" qconf.Query = mongodb.MM{} m.Query(qconf, func(info map[string]interface{}) { ad := convert.ToString(info["ad"]) ua := encrypt.DefaultMd5.Encode(encrypt.DefaultBase64.Decode(convert.ToString(info["ua"]))) aids := convert.ToString(info["aids"]) datacount++ out <- fmt.Sprintf("%s\t%s\t%s", ad, ua, aids) }) log.Info("访客ok") in <- 1 }
func runStmt(ctx context.Context, s ast.Statement, args ...interface{}) (ast.RecordSet, error) { var err error var rs ast.RecordSet // before every execution, we must clear affectedrows. variable.GetSessionVars(ctx).SetAffectedRows(0) if s.IsDDL() { err = ctx.CommitTxn() if err != nil { return nil, errors.Trace(err) } } rs, err = s.Exec(ctx) // All the history should be added here. se := ctx.(*session) se.history.add(0, s) // MySQL DDL should be auto-commit. ac, err1 := autocommit.ShouldAutocommit(ctx) if err1 != nil { return nil, errors.Trace(err1) } if s.IsDDL() || ac { if err != nil { log.Info("RollbackTxn for ddl/autocommit error.") ctx.RollbackTxn() } else { err = ctx.CommitTxn() } } return rs, errors.Trace(err) }
func sscript(buf string) []byte { cmd := exec.Command("php", "a.php", encrypt.DefaultBase64.Encode(buf)) var out bytes.Buffer cmd.Stdout = &out err := cmd.Start() if err != nil { log.Info(err) return nil } err = cmd.Wait() if err != nil { log.Info(err) return nil } return out.Bytes() }
func ForceRemoveDeadFence(coordConn zkhelper.Conn, productName string) error { proxies, err := ProxyList(coordConn, productName, func(p *ProxyInfo) bool { return p.State == PROXY_STATE_ONLINE }) if err != nil { return errors.Trace(err) } fenceProxies, err := GetFenceProxyMap(coordConn, productName) if err != nil { return errors.Trace(err) } // remove online proxies's fence for _, proxy := range proxies { delete(fenceProxies, proxy.Addr) } // delete dead fence in zookeeper path := GetProxyFencePath(productName) for remainFence, _ := range fenceProxies { fencePath := filepath.Join(path, remainFence) log.Info("removing fence: ", fencePath) if err := zkhelper.DeleteRecursive(coordConn, fencePath, -1); err != nil { return errors.Trace(err) } } return nil }
func (s *ScanTestSuit) SetUpSuite(c *C) { var ( ok bool err error ) s.cli, err = NewClient(getTestZkHosts(), "/hbase") c.Assert(err, Equals, nil) log.Info("create table") table := NewTableNameWithDefaultNS("scan_test") s.cli.DisableTable(table) s.cli.DropTable(table) tblDesc := NewTableDesciptor(table) cf := newColumnFamilyDescriptor("cf", 3) tblDesc.AddColumnDesc(cf) err = s.cli.CreateTable(tblDesc, nil) c.Assert(err, IsNil) for i := 1; i <= 10000; i++ { p := NewPut([]byte(strconv.Itoa(i))) p.AddValue([]byte("cf"), []byte("q"), []byte(strconv.Itoa(i))) ok, err = s.cli.Put("scan_test", p) c.Assert(ok, IsTrue) c.Assert(err, IsNil) } }
func (c *Cluster) HeartBeat() (*CrashDb, error) { if c.masterNode == nil { log.Info("master node did not exists") err := fmt.Errorf("config is nil") return nil, err } ret := &CrashDb{crashNum: 0, masterNode: nil} //get all the cluster cfg masterDb := c.masterNode slaveDbs := c.slaveNodes err := masterDb.HeartBeatPing() //HeartBeatPing or Ping use single conn or user conn from conn pools if err != nil { // log.Warn("db ping error ,", err.Error()) ret.crashNum++ ret.masterNode = masterDb } for _, slavedb := range slaveDbs { // check the alive status of db if has been cut down don't need to detect again if slavedb.GetDbAliveStatus() == true { err := slavedb.HeartBeatPing() if err != nil { ret.crashNum++ // log.Warn("db ping error ,", err.Error()) ret.slaveNode = append(ret.slaveNode, slavedb) } } } return ret, nil }
func (th *TailHandler) Click(info []string) { var exstr = th.Script([]byte(strings.Join(info, "\t"))) var exmap map[string]string if err := json.Unmarshal(exstr, &exmap); err != nil { return } if len(exmap) == 0 { return } log.Info(exmap) var ml MLog p, c := th.getIp(info) ml.Province = p ml.City = c ml.AdwId = convert.ToInt(th.getmap(exmap, "pd")) ml.AdId = convert.ToInt(th.getmap(exmap, "hd")) ml.UA = th.getmap(exmap, "ua") ml.AD = th.getmap(exmap, "ad") ml.Click = 1 ml.Clock = convert.ToInt(time.Now().Format("15")) ml.Date = convert.ToInt(timestamp.GetDayTimestamp(0)) ml.Url = strings.TrimSpace(th.get_url(th.getmap(exmap, "ltu"), th.getmap(exmap, "lftu"))) ml.Domain = th.get_domain(ml.Url) ml.Cookie = th.get_cookie(info[9]) ml.CusId = encrypt.DefaultMd5.Encode(ml.AD + ml.UA + ml.Cookie) ml.CusId2 = ml.CusId ml.Money, ml.Fmoney = th.click_money(ml.AdwId, ml.AdId) if _, ok := th.SouceMap[th.getmap(exmap, "pd")]; ok { ml.Source = 1 } th.SaveData(ml) }
func (s *MutationCacheTestSuit) TestMutationCache(c *C) { cache := newColumnMutationCache() row := []byte("r1") col := &hbase.Column{[]byte("f1"), []byte("q1")} cache.addMutation([]byte("tbl"), row, col, hbase.TypePut, []byte("test"), false) cache.addMutation([]byte("tbl"), row, col, hbase.TypeDeleteColumn, []byte("test"), false) cache.addMutation([]byte("tbl"), row, col, hbase.TypePut, []byte("test"), false) cc := &hbase.ColumnCoordinate{ Table: []byte("tbl"), Row: []byte("r1"), Column: hbase.Column{ Family: []byte("f1"), Qual: []byte("q1"), }, } mutation := cache.getMutation(cc) if mutation == nil || mutation.typ != hbase.TypePut || bytes.Compare(mutation.value, []byte("test")) != 0 { c.Error("cache error") } else { log.Info(mutation) } p := hbase.NewPut([]byte("row")) p.AddStringValue("cf", "q", "v") p.AddStringValue("cf", "q1", "v") p.AddStringValue("cf", "q2", "v") p.AddStringValue("cf", "q3", "v") entries := getEntriesFromPut(p) c.Assert(len(entries), Equals, 4) }
func (s *Server) Serve() error { log.Debug("this is ddbatman v4") s.running = true var sessionId int64 = 0 for s.running { select { case sessionChan <- sessionId: //do nothing default: //warnning! log.Warnf("TASK_CHANNEL is full!") } conn, err := s.Accept() if err != nil { log.Warning("accept error %s", err.Error()) continue } //allocate a sessionId for a session go s.onConn(conn) sessionId += 1 } if s.restart == true { log.Debug("Begin to restart graceful") listenerFile, err := s.listener.(*net.TCPListener).File() if err != nil { log.Fatal("Fail to get socket file descriptor:", err) } listenerFd := listenerFile.Fd() os.Setenv("_GRACEFUL_RESTART", "true") execSpec := &syscall.ProcAttr{ Env: os.Environ(), Files: []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd(), listenerFd}, } fork, err := syscall.ForkExec(os.Args[0], os.Args, execSpec) if err != nil { return fmt.Errorf("failed to forkexec: %v", err) } log.Infof("start new process success, pid %d.", fork) } timeout := time.NewTimer(time.Minute) wait := make(chan struct{}) go func() { s.wg.Wait() wait <- struct{}{} }() select { case <-timeout.C: log.Error("server : Waittimeout error when close the service") return nil case <-wait: log.Info("server : all goroutine has been done") return nil } return nil }
// PushMetric pushs metircs in background. func pushMetric(addr string, interval time.Duration) { if interval == zeroDuration || len(addr) == 0 { log.Info("disable Prometheus push client") return } log.Infof("start Prometheus push client with server addr %s and interval %d", addr, interval) go prometheusPushClient(addr, interval) }