func TestUmgmt(t *testing.T) { go serve() time.Sleep(1e9) client, err := Dial("/tmp/test-sock") if err != nil { t.Fatalf("can't connect %v", err) } request := new(Request) reply := new(Reply) callErr := client.Call("UmgmtService.Ping", request, reply) if callErr != nil { t.Fatalf("callErr: %v", callErr) } relog.Info("reply: %v", reply.Message) callErr = client.Call("UmgmtService.CloseListeners", reply, reply) if callErr != nil { t.Fatalf("callErr: %v", callErr) } relog.Info("reply: %v", reply.Message) time.Sleep(5e9) callErr = client.Call("UmgmtService.GracefulShutdown", reply, reply) if callErr != nil { t.Fatalf("callErr: %v", callErr) } relog.Info("reply: %v", reply.Message) }
func Init(logPrefix string) { if logPrefix != "" { logPrefix += " " } logPrefix += fmt.Sprintf("[%v]", os.Getpid()) f, err := logfile.Open(*logfileName, *logFrequency, *logMaxSize, *logMaxFiles) if err != nil { panic(fmt.Sprintf("unable to open logfile %s: %v", *logfileName, err)) } logger := relog.New(f, logPrefix+" ", log.Ldate|log.Lmicroseconds|log.Lshortfile, relog.LogNameToLogLevel(*logLevel)) relog.SetLogger(logger) if *gomaxprocs != 0 { runtime.GOMAXPROCS(*gomaxprocs) relog.Info("set GOMAXPROCS = %v", *gomaxprocs) } fdLimit := &syscall.Rlimit{*maxOpenFds, *maxOpenFds} if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, fdLimit); err != nil { relog.Fatal("can't Setrlimit %#v: err %v", *fdLimit, err) } else { relog.Info("set max-open-fds = %v", *maxOpenFds) } }
func GcHandler(response http.ResponseWriter, request *http.Request) { go func() { // NOTE(msolomon) I'm not sure if this blocks or not - a cursory glance at the // code didn't reveal enough and I'm being lazy relog.Info("start forced garbage collection") runtime.GC() relog.Info("finished forced garbage collection") }() data := "forced gc\n" response.Write([]byte(data)) }
func process_action(ac chan action, sv *server) { var a action var timer *time.Timer ch := make(chan bool, 2) defer func() { if timer != nil { timer.Stop() } sv.lock.Lock() delete(sv.kt, a.key) close(ac) sv.lock.Unlock() close(ch) }() for { select { case a = <-ac: if timer != nil { timer.Stop() } timer = time.AfterFunc(a.exptime, func() { sv.s.db.Delete([]byte(a.key), sv.s.wo) ch <- true }) case <-ch: relog.Info("delete successed") return } } }
func (self *SchemaInfo) Reload() { conn, err := self.ConnFactory() if err != nil { relog.Error("Could not get connection for reload: %v", err) return } defer conn.Close() query_for_schema_reload := fmt.Sprintf("show table status where unix_timestamp(create_time) > %v", self.LastReload.Unix()) self.LastReload = time.Now() tables, err := conn.ExecuteFetch([]byte(query_for_schema_reload), 10000) if err != nil { relog.Error("Could not get table list for reload: %v", err) return } if len(tables.Rows) == 0 { return } for _, row := range tables.Rows { tableName := row[0].(string) relog.Info("Reloading: %s", tableName) tableInfo := self.get(tableName) if tableInfo != nil { self.Put(tableInfo) self.AlterTable(tableName, 0) } else { self.CreateTable(tableName, 0) } } }
func (server *UmgmtServer) Serve() error { relog.Info("started umgmt server: %v", server.listener.Addr()) for !server.quit { conn, err := server.listener.Accept() if err != nil { if checkError(err, syscall.EINVAL) { if server.quit { return nil } return err } // syscall.EMFILE, syscall.ENFILE could happen here if you run out of file descriptors relog.Error("accept error %v", err) continue } server.Lock() server.connMap[conn] = true server.Unlock() rpc.ServeConn(conn) server.Lock() delete(server.connMap, conn) server.Unlock() } return nil }
func Run_server(laddr string) error { listen_sock, err := net.Listen("tcp", laddr) if err != nil { panic(err) } defer listen_sock.Close() sv := new(server) store, err := newLeveldb() if err != nil { panic(err) } sv.s = store sv.kt = make(map[string]chan action) sv.lock = new(sync.RWMutex) defer store.db.Close() for { conn, err := listen_sock.Accept() if err != nil { relog.Warning("%s", err) continue } c := newConn(conn, sv) relog.Info("accept successed, client ip is %s", c.remoteAddr) go c.serve() } panic("not reached") }
func (self *SqlQuery) allowQueries(ConnFactory CreateConnectionFunc, cachingInfo map[string]uint64) { self.mu.Lock() defer self.mu.Unlock() atomic.StoreInt32(&self.state, INIT_FAILED) start := time.Now().UnixNano() self.schemaInfo.Open(ConnFactory, cachingInfo) relog.Info("Time taken to load the schema: %v ms", (time.Now().UnixNano()-start)/1e6) self.connPool.Open(ConnFactory) self.reservedPool.Open(ConnFactory) self.txPool.Open(ConnFactory) self.activeTxPool.Open() self.activePool.Open(ConnFactory) self.sessionId = Rand() relog.Info("Session id: %d", self.sessionId) atomic.StoreInt32(&self.state, OPEN) }
func serve() { AddShutdownCallback(ShutdownCallback(func() error { relog.Error("testserver GracefulShutdown callback"); return nil })) err := ListenAndServe("/tmp/test-sock") if err != nil { relog.Fatal("listen err:%v", err) } relog.Info("test server finished") }
func (self *ActivePool) kill(connid int64) { self.Remove(connid) killStats.Add("Queries", 1) relog.Info("killing query %d", connid) killConn := self.connPool.Get() defer killConn.Recycle() sql := []byte(fmt.Sprintf("kill %d", connid)) if _, err := killConn.ExecuteFetch(sql, 10000); err != nil { relog.Error("Could not kill query %d: %v", connid, err) } }
func (self *ActiveTxPool) TransactionKiller() { for self.ticks.Next() { for _, v := range self.pool.GetTimedout(time.Duration(self.Timeout())) { conn := v.(*TxConnection) relog.Info("killing transaction %d", conn.transactionId) killStats.Add("Transactions", 1) conn.Close() conn.discard() } } }
func unmarshalFile(name string, val interface{}) { if name != "" { data, err := ioutil.ReadFile(name) if err != nil { relog.Fatal("could not read %v: %v", val, err) } if err = json.Unmarshal(data, val); err != nil { relog.Fatal("could not read %s: %v", val, err) } } data, _ := json.MarshalIndent(val, "", " ") relog.Info("config: %s\n", data) }
func (self *SqlQuery) disallowQueries() { // set this before obtaining lock so new incoming requests // can serve "unavailable" immediately atomic.StoreInt32(&self.state, SHUTTING_DOWN) relog.Info("Stopping query service: %d", self.sessionId) self.activeTxPool.WaitForEmpty() self.mu.Lock() defer self.mu.Unlock() atomic.StoreInt32(&self.state, CLOSED) self.activePool.Close() self.schemaInfo.Close() self.activeTxPool.Close() self.txPool.Close() self.reservedPool.Close() self.connPool.Close() self.sessionId = 0 }
func (service *UmgmtService) Ping(request *Request, reply *Reply) error { relog.Info("ping") reply.Message = "pong" return nil }
func readRequest(b *bufio.Reader) (req *request, err error) { tp := textproto.NewReader(b) req = new(request) var s string if s, err = tp.ReadLine(); err != nil { return nil, err } defer func() { if err == io.EOF { err = io.ErrUnexpectedEOF } }() var f []string if f = strings.Split(s, " "); len(f) < 2 { return req, &badStringError{"malformed agent request", s} } param_count := len(f) req.method = f[0] relog.Info("request method is [%s]", req.method) switch req.method { case "set", "add", "replace": var err error if param_count < 4 || param_count > 5 { return req, &badStringError{"invalid request param count", string(param_count)} } req.key = make([]string, 1) req.key[0] = f[1] req.exptime, err = time.ParseDuration(f[2]) if err != nil { return req, &badStringError{"expire time is invalid", f[2]} } req.value_len, err = strconv.Atoi(f[3]) if err != nil { return req, &badStringError{"data size is invalid", f[3]} } if param_count == 5 { err = req.set_noreply(f[4]) if err != nil { return req, err } } if req.value_len > max_value_size { return req, &badStringError{"invalid data size", string(req.value_len)} } req.value = make([][]byte, 1) req.value[0], err = ioutil.ReadAll(io.LimitReader(b, int64(req.value_len))) if err != nil { return nil, err } case "get": if param_count > (2 + max_key_count) { return req, &badStringError{"invalid request param count", string(param_count)} } req.key = f[1:] case "delete": if param_count > 3 { return req, &badStringError{"invalid request param count", string(param_count)} } req.key = make([]string, 1) req.key[0] = f[1] if param_count == 3 { err = req.set_noreply(f[2]) if err != nil { return req, err } } case "touch": if param_count > 3 { return req, &badStringError{"invalid request param count", string(param_count)} } req.key = make([]string, 1) req.key[0] = f[1] req.exptime, err = time.ParseDuration(f[2]) if err != nil { return req, &badStringError{"expire time is invalid", f[2]} } case "mset": l := len(f) - 1 if (l % 2) != 0 { return req, &badStringError{"invalid request param count", string(param_count)} } req.key = make([]string, l/2) req.value = make([][]byte, l/2) for i, j := 0, 0; j < l; i, j = i+1, j+2 { req.key[i] = f[j+1] req.value[i] = []byte(f[j+2]) } } return req, nil }
func SigTermHandler(signal os.Signal) { relog.Info("SigTermHandler") defaultService.closeListeners() time.Sleep(lameDuckPeriod) defaultService.gracefulShutdown() }
func main() { memProfileRate := flag.Int("mem-profile-rate", 512*1024, "profile every n bytes allocated") maxOpenFds := flag.Uint64("max-open-fds", 32768, "max open file descriptors") configFile := flag.String("config", "", "config file name") dbConfigFile := flag.String("dbconfig", "", "db config file name") lameDuckPeriod := flag.Float64("lame-duck-period", DefaultLameDuckPeriod, "how long to give in-flight transactions to finish") rebindDelay := flag.Float64("rebind-delay", DefaultRebindDelay, "artificial delay before rebinding a hijacked listener") logfileName := flag.String("logfile", "/dev/stderr", "base log file name") logFrequency := flag.Int64("logfile.frequency", 0, "rotation frequency in seconds") logMaxSize := flag.Int64("logfile.maxsize", 0, "max file size in bytes") logMaxFiles := flag.Int64("logfile.maxfiles", 0, "max number of log files") queryLog := flag.String("querylog", "", "for testing: log all queries to this file") flag.Parse() exportBinaryVersion() runtime.MemProfileRate = *memProfileRate f, err := logfile.Open(*logfileName, *logFrequency, *logMaxSize, *logMaxFiles) if err != nil { panic(fmt.Sprintf("unable to open logfile %s", *logfileName)) } logger := relog.New(f, "vtocc ", log.Ldate|log.Lmicroseconds|log.Lshortfile, relog.DEBUG) relog.SetLogger(logger) if *queryLog != "" { if f, err = os.OpenFile(*queryLog, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644); err == nil { ts.QueryLogger = relog.New(f, "", log.Ldate|log.Lmicroseconds, relog.DEBUG) } } unmarshalFile(*configFile, &config) unmarshalFile(*dbConfigFile, &dbconfig) // work-around for jsonism if v, ok := dbconfig["port"].(float64); ok { dbconfig["port"] = int(v) } fdLimit := &syscall.Rlimit{*maxOpenFds, *maxOpenFds} if err = syscall.Setrlimit(RLIMIT_NOFILE, fdLimit); err != nil { relog.Fatal("can't Setrlimit %#v: err %v", *fdLimit, err) } else { relog.Info("set max-open-fds = %v", *maxOpenFds) } snitch.RegisterCommand("reload_schema", "Rescan the schema for new tables", ReloadHandler) snitch.Register() qm := &OccManager{config, dbconfig} rpc.Register(qm) ts.StartQueryService( config.PoolSize, config.TransactionCap, config.TransactionTimeout, config.MaxResultSize, config.QueryCacheSize, config.SchemaReloadTime, config.QueryTimeout, config.IdleTimeout, ) ts.AllowQueries(ts.GenericConnectionCreator(dbconfig), nil) rpc.HandleHTTP() jsonrpc.ServeHTTP() jsonrpc.ServeRPC() bsonrpc.ServeHTTP() bsonrpc.ServeRPC() relog.Info("started vtocc %v", config.Port) // we delegate out startup to the micromanagement server so these actions // will occur after we have obtained our socket. usefulLameDuckPeriod := float64(config.QueryTimeout + 1) if usefulLameDuckPeriod > *lameDuckPeriod { *lameDuckPeriod = usefulLameDuckPeriod relog.Info("readjusted -lame-duck-period to %f", *lameDuckPeriod) } umgmt.SetLameDuckPeriod(float32(*lameDuckPeriod)) umgmt.SetRebindDelay(float32(*rebindDelay)) umgmt.AddStartupCallback(func() { umgmt.StartHttpServer(fmt.Sprintf(":%v", config.Port)) }) umgmt.AddStartupCallback(func() { sighandler.SetSignalHandler(syscall.SIGTERM, umgmt.SigTermHandler) }) umgmt.AddCloseCallback(func() { ts.DisallowQueries() }) umgmt.AddShutdownCallback(func() error { HandleGracefulShutdown() return nil }) umgmtSocket := fmt.Sprintf(config.UmgmtSocket, config.Port) if umgmtErr := umgmt.ListenAndServe(umgmtSocket); umgmtErr != nil { relog.Error("umgmt.ListenAndServe err: %v", umgmtErr) } relog.Info("done") }
func HandleGracefulShutdown() { relog.Info("HandleGracefulShutdown") }
func ListenAndServe(addr string) error { rpc.Register(&defaultService) DefaultServer = new(UmgmtServer) DefaultServer.connMap = make(map[net.Conn]bool) defer DefaultServer.Close() var umgmtClient *Client for i := 2; i > 0; i-- { l, e := net.Listen("unix", addr) if e != nil { if checkError(e, syscall.EADDRINUSE) { var clientErr error umgmtClient, clientErr = Dial(addr) if clientErr == nil { closeErr := umgmtClient.CloseListeners() if closeErr != nil { relog.Error("closeErr:%v", closeErr) } // wait for rpc to finish if rebindDelay > 0.0 { relog.Info("delaying rebind: %vs", rebindDelay) time.Sleep(rebindDelay) } continue } else if checkError(clientErr, syscall.ECONNREFUSED) { if unlinkErr := syscall.Unlink(addr); unlinkErr != nil { relog.Error("can't unlink %v err:%v", addr, unlinkErr) } } else { return e } } else { return e } } else { DefaultServer.listener = l break } } if DefaultServer.listener == nil { panic("unable to rebind umgmt socket") } // register the umgmt server itself for dropping - this seems like // the common case. i can't see when you *wouldn't* want to drop yourself defaultService.addListener(DefaultServer) defaultService.addShutdownCallback(func() error { return DefaultServer.handleGracefulShutdown() }) // fire off the startup callbacks. if these bind ports, they should // call AddListener. for e := defaultService.startupCallbacks.Front(); e != nil; e = e.Next() { if startupCallback, ok := e.Value.(StartupCallback); ok { startupCallback() } else { relog.Error("bad callback %T %v", e.Value, e.Value) } } if umgmtClient != nil { go func() { time.Sleep(lameDuckPeriod) umgmtClient.GracefulShutdown() umgmtClient.Close() }() } return DefaultServer.Serve() }
func (self *ActiveTxPool) Open() { relog.Info("Starting transaction id: %d", self.lastId) go self.TransactionKiller() }