func main() { var err error // Parse cmd-line arguments flag.Parse() log.Info("web ver: \"%s\" start", ver.Version) if err = InitConfig(); err != nil { panic(err) } // Set max routine runtime.GOMAXPROCS(Conf.MaxProc) // init log log.LoadConfiguration(Conf.Log) defer log.Close() // init zookeeper zkConn, err := InitZK() if err != nil { if zkConn != nil { zkConn.Close() } panic(err) } // start pprof http perf.Init(Conf.PprofBind) // start http listen. StartHTTP() // process init if err = process.Init(Conf.User, Conf.Dir, Conf.PidFile); err != nil { panic(err) } // init signals, block wait signals signalCH := InitSignal() HandleSignal(signalCH) log.Info("web stop") }
// RegisterTmp create a ephemeral node, and watch it, if node droped then send a SIGQUIT to self. func RegisterTemp(conn *zk.Conn, fpath string, data []byte) error { tpath, err := conn.Create(path.Join(fpath)+"/", data, zk.FlagEphemeral|zk.FlagSequence, zk.WorldACL(zk.PermAll)) if err != nil { log.Error("conn.Create(\"%s\", \"%s\", zk.FlagEphemeral|zk.FlagSequence) error(%v)", fpath, string(data), err) return err } log.Debug("create a zookeeper node:%s", tpath) // watch self go func() { for { log.Info("zk path: \"%s\" set a watch", tpath) exist, _, watch, err := conn.ExistsW(tpath) if err != nil { log.Error("zk.ExistsW(\"%s\") error(%v)", tpath, err) log.Warn("zk path: \"%s\" set watch failed, kill itself", tpath) killSelf() return } if !exist { log.Warn("zk path: \"%s\" not exist, kill itself", tpath) killSelf() return } event := <-watch log.Info("zk path: \"%s\" receive a event %v", tpath, event) } }() return nil }
func (self *DataSummer) PrintResultLog() { costime := (Resultdata.GetEndTime() - Resultdata.GetBeginTime()) / 1000 / 1000 / 1000 if costime == 0 { costime = 1 } log.Info("---------------------------------------------------") log.Info("|* %s:%s ", Resultdata.Data_type, Resultdata.Cmd_type) RequestCount := Resultdata.GetSendQuantity() log.Info("|* total: %d, Cost_sec: %d, Qps: %d", RequestCount, costime, int64(RequestCount)/costime) RequestSuccCount := Resultdata.GetSuccQuantity() log.Info("|* succ : %d, Cost_sec: %d, Qps: %d", RequestSuccCount, costime, int64(RequestSuccCount)/costime) RequestFailCount := Resultdata.GetFailQuantity() log.Info("|* fail : %d, Cost_sec: %d, Qps: %d", RequestFailCount, costime, int64(RequestFailCount)/costime) RequestErrorCount := Resultdata.GetErrorQuantity() log.Info("|* error: %d, Cost_sec: %d, Qps: %d", RequestErrorCount, costime, int64(RequestErrorCount)/costime) if self.total_ret != 0 { log.Info("|* data check %s:%s ", self.Datatype, self.Cmd) log.Info("|* total check num %d ", self.total_ret) log.Info("|* succ check num %d ", self.succ_ret) log.Info("|* err check num %d ", self.err_ret) } //func (self *DayReport) AddData(data, cmd string, total, succ, fail, err int32) { Dayreport.AddData(self.Datatype, self.Cmd, RequestCount, RequestCount-int32(self.err_ret), RequestFailCount, int32(self.err_ret)) }
func (self *ClusterConfiguration) RecoverFromWAL() error { writeBuffer := NewWriteBuffer("local", self.shardStore, self.wal, self.LocalServer.Id, self.config.LocalStoreWriteBufferSize) self.writeBuffers = append(self.writeBuffers, writeBuffer) self.shardStore.SetWriteBuffer(writeBuffer) var waitForAll sync.WaitGroup for _, _server := range self.servers { server := _server waitForAll.Add(1) if server.RaftName == self.LocalRaftName { self.LocalServer = server go func(serverId uint32) { log.Info("Recovering local server") self.recover(serverId, self.shardStore) log.Info("Recovered local server") waitForAll.Done() }(server.Id) } else { go func(serverId uint32) { if server.connection == nil { server.connection = self.connectionCreator(server.ProtobufConnectionString) server.Connect() } log.Info("Recovering remote server %d", serverId) self.recover(serverId, server) log.Info("Recovered remote server %d", serverId) waitForAll.Done() }(server.Id) } } log.Info("Waiting for servers to recover") waitForAll.Wait() return nil }
func (s *RaftServer) Serve(l net.Listener) error { s.listener = l log.Info("Initializing Raft HTTP server") // Initialize and start HTTP server. s.httpServer = &http.Server{ Handler: s.router, } s.router.HandleFunc("/join", s.joinHandler).Methods("POST") s.router.HandleFunc("/process_command/{command_type}", s.processCommandHandler).Methods("POST") log.Info("Raft Server Listening at %s", s.config.RaftListenString()) go func() { err := s.httpServer.Serve(l) if !strings.Contains(err.Error(), "closed network") { panic(err) } }() started := make(chan error) go func() { started <- s.startRaft() }() err := <-started // time.Sleep(3 * time.Second) return err }
func (f FtpBase) PutFile(srcFile string, dstFile string) (err error) { // fmt.Printf("PutFile: srcFile[%s] dstFile[%s] \n", srcFile, dstFile) if f.isFileExistLocal(srcFile) == false { log.Info("isFileExistLocal: srcFile[%s] not exist at local", srcFile) return os.ErrNotExist } // 在服务器上创建目录 if dstFile[len(dstFile)-1] == '/' { dstFile = dstFile[:len(dstFile)-1] } i := len(dstFile) - 1 for ; dstFile[i] != '/'; i-- { } dstPath := dstFile[:i] err = f.CreateFolderServer(dstPath) if err != nil { log.Info("CreateFolderServer[%s] failed[err]", dstPath, err) return err } b, err := ioutil.ReadFile(srcFile) if err != nil { log.Info("read srcFile[%s] failed[%s]", srcFile, err) return err } // 在服务器端创建文件 err = f.conn.Stor(dstFile, bytes.NewBufferString(string(b))) if err != nil { log.Info("stor srcName[%s] dstName[%s] failed[%s]", srcFile, dstFile, err) } return err }
func (self *DayDataItem) Log() { log.Info("|$ %s", self.name) log.Info("|$ total : %d", self.msg_send) log.Info("|$ succ : %d", self.msg_succ) log.Info("|$ fail : %d", self.msg_failed) log.Info("|$ error : %d", self.msg_error) }
func (f FtpBase) GetFile(srcFile string, dstFile string) (err error) { // fmt.Printf("GetFile: srcFile[%s] dstFile[%s] \n", srcFile, dstFile) // create folder i := len(dstFile) - 1 for ; i >= 0 && dstFile[i] != '/'; i-- { } f.CreateFolderLocal(dstFile[:i]) // create file r, err := f.conn.Retr(srcFile) if err != nil { log.Info("GetFile: f.conn.Retr[%s] error[%s]", srcFile, err) return err } defer r.Close() buf, err := ioutil.ReadAll(r) if err == nil { err = ioutil.WriteFile(dstFile, buf, 0766) //os.ModeAppend if err != nil { log.Info("ioutil.WriteFile: dstFile[%s] err[%s]", dstFile, err) } } return err }
func (self *ClusterConfiguration) AddPotentialServer(server *ClusterServer) { self.serversLock.Lock() defer self.serversLock.Unlock() self.servers = append(self.servers, server) self.lastServerIdUsed++ server.Id = self.lastServerIdUsed log.Info("Added server to cluster config: %d, %s, %s", server.Id, server.RaftConnectionString, server.ProtobufConnectionString) log.Info("Checking whether this is the local server local: %s, new: %s", self.config.ProtobufConnectionString(), server.ProtobufConnectionString) if server.RaftName == self.LocalRaftName && self.addedLocalServer { panic("how did we add the same server twice ?") } // if this is the local server unblock WaitForLocalServerLoaded() // and set the local connection string and id if server.RaftName == self.LocalRaftName { log.Info("Added the local server") self.LocalServer = server self.addedLocalServerWait <- true self.addedLocalServer = true return } // if this isn't the local server, connect to it log.Info("Connecting to ProtobufServer: %s from %s", server.ProtobufConnectionString, self.config.ProtobufConnectionString()) if server.connection == nil { server.connection = self.connectionCreator(server.ProtobufConnectionString) server.Connect() } writeBuffer := NewWriteBuffer(fmt.Sprintf("%d", server.GetId()), server, self.wal, server.Id, self.config.PerServerWriteBufferSize) self.writeBuffers = append(self.writeBuffers, writeBuffer) server.SetWriteBuffer(writeBuffer) server.StartHeartbeat() return }
// StartHTTP start listen http. func StartHTTP() { // external httpServeMux := http.NewServeMux() // 2 httpServeMux.HandleFunc("/2/server/get", GetServer2) // 1.0 httpServeMux.HandleFunc("/1/server/get", GetServer) httpServeMux.HandleFunc("/1/msg/get", GetOfflineMsg) httpServeMux.HandleFunc("/1/time/get", GetTime) // old httpServeMux.HandleFunc("/server/get", GetServer0) httpServeMux.HandleFunc("/msg/get", GetOfflineMsg0) httpServeMux.HandleFunc("/time/get", GetTime0) // internal httpAdminServeMux := http.NewServeMux() // 1.0 httpAdminServeMux.HandleFunc("/1/admin/push/private", PushPrivate) httpAdminServeMux.HandleFunc("/1/admin/push/mprivate", PushMultiPrivate) httpAdminServeMux.HandleFunc("/1/admin/msg/del", DelPrivate) // old httpAdminServeMux.HandleFunc("/admin/push", PushPrivate) httpAdminServeMux.HandleFunc("/admin/msg/clean", DelPrivate) for _, bind := range Conf.HttpBind { log.Info("start http listen addr:\"%s\"", bind) go httpListen(httpServeMux, bind) } for _, bind := range Conf.AdminBind { log.Info("start admin http listen addr:\"%s\"", bind) go httpListen(httpAdminServeMux, bind) } }
func (self *WAL) openLog(logFileName string) (*log, *index, error) { logger.Info("Opening log file %s", logFileName) logFile, err := os.OpenFile(logFileName, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0644) if err != nil { return nil, nil, err } log, err := newLog(logFile, self.config) if err != nil { return nil, nil, err } self.logFiles = append(self.logFiles, log) suffix := strings.TrimPrefix(path.Base(logFileName), "log.") indexFileName := path.Join(self.config.WalDir, "index."+suffix) logger.Info("Opening index file %s", indexFileName) index, err := newIndex(indexFileName) if err != nil { logger.Error("Cannot open index file %s", err) log.close() return nil, nil, err } self.logIndex = append(self.logIndex, index) return log, index, nil }
// Get a user channel from ChannleList. func (l *ChannelList) Get(key string, newOne bool) (Channel, error) { // validate if err := l.validate(key); err != nil { return nil, err } // get a channel bucket b := l.Bucket(key) b.Lock() if c, ok := b.Data[key]; !ok { if !Conf.Auth && newOne { c = NewSeqChannel() b.Data[key] = c b.Unlock() ChStat.IncrCreate() log.Info("user_key:\"%s\" create a new channel", key) return c, nil } else { b.Unlock() log.Warn("user_key:\"%s\" channle not exists", key) return nil, ErrChannelNotExist } } else { b.Unlock() ChStat.IncrAccess() log.Info("user_key:\"%s\" refresh channel bucket expire time", key) return c, nil } }
// Migrate migrate portion of connections which don't belong to this comet. func (l *ChannelList) Migrate(nw map[string]int) (err error) { migrate := false // check new/update node for k, v := range nw { weight, ok := nodeWeightMap[k] // not found or weight change if !ok || weight != v { migrate = true break } } // check del node if !migrate { for k, _ := range nodeWeightMap { // node deleted if _, ok := nw[k]; !ok { migrate = true break } } } if !migrate { return } // init ketama ring := ketama.NewRing(ketama.Base) for node, weight := range nw { ring.AddNode(node, weight) } ring.Bake() // atomic update nodeWeightMap = nw CometRing = ring // get all the channel lock channels := []Channel{} for i, c := range l.Channels { c.Lock() for k, v := range c.Data { hn := ring.Hash(k) if hn != Conf.ZookeeperCometNode { channels = append(channels, v) delete(c.Data, k) log.Debug("migrate delete channel key \"%s\"", k) } } c.Unlock() log.Debug("migrate channel bucket:%d finished", i) } // close all the migrate channels log.Info("close all the migrate channels") for _, channel := range channels { if err := channel.Close(); err != nil { log.Error("channel.Close() error(%v)", err) continue } } log.Info("close all the migrate channels finished") return }
/** * 关闭服务器指令 */ func (self *Nexus) Shutdown() { // 关闭所有连接 for _, conn := range self.connMap { log.Info("close:" + conn.RemoteAddr().String()) conn.Close() } log.Info("Shutdown") }
func (self *DayReport) PrintLog() { for _, v := range self.datalist { log.Info("----------daily report--------------") v.Log() v.Clear() log.Info("----------over--------------") } }
func main() { parseCmds() utils.InitTranslations() utils.LoadConfig(flagConfigFile) if flagRunCmds { utils.ConfigureCmdLineLog() } pwd, _ := os.Getwd() l4g.Info(utils.T("mattermost.current_version"), model.CurrentVersion, model.BuildNumber, model.BuildDate, model.BuildHash) l4g.Info(utils.T("mattermost.entreprise_enabled"), model.BuildEnterpriseReady) l4g.Info(utils.T("mattermost.working_dir"), pwd) l4g.Info(utils.T("mattermost.config_file"), utils.FindConfigFile(flagConfigFile)) api.NewServer() api.InitApi() web.InitWeb() if model.BuildEnterpriseReady == "true" { api.LoadLicense() } if !utils.IsLicensed && len(utils.Cfg.SqlSettings.DataSourceReplicas) > 1 { l4g.Critical(utils.T("store.sql.read_replicas_not_licensed.critical")) time.Sleep(time.Second) panic(fmt.Sprintf(utils.T("store.sql.read_replicas_not_licensed.critical"))) } if flagRunCmds { runCmds() } else { api.StartServer() // If we allow testing then listen for manual testing URL hits if utils.Cfg.ServiceSettings.EnableTesting { manualtesting.InitManualTesting() } setDiagnosticId() runSecurityAndDiagnosticsJobAndForget() if einterfaces.GetComplianceInterface() != nil { einterfaces.GetComplianceInterface().StartComplianceDailyJob() } // wait for kill signal before attempting to gracefully shutdown // the running service c := make(chan os.Signal) signal.Notify(c, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) <-c api.StopServer() } }
func StopServer() { l4g.Info("Stopping Server...") manners.Close() Srv.Store.Close() hub.Stop() l4g.Info("Server stopped") }
func StopServer() { l4g.Info(utils.T("api.server.stop_server.stopping.info")) Srv.GracefulServer.Stop(TIME_TO_WAIT_FOR_CONNECTIONS_TO_CLOSE_ON_SERVER_SHUTDOWN) Srv.Store.Close() HubStop() l4g.Info(utils.T("api.server.stop_server.stopped.info")) }
func shouldPerformUpgrade(sqlStore *SqlStore, currentSchemaVersion string, expectedSchemaVersion string) bool { if sqlStore.SchemaVersion == currentSchemaVersion { l4g.Info(utils.T("store.sql.schema_out_of_date.warn"), currentSchemaVersion) l4g.Info(utils.T("store.sql.schema_upgrade_attempt.warn"), expectedSchemaVersion) return true } return false }
func StopServer() { l4g.Info(utils.T("api.server.stop_server.stopping.info")) manners.Close() Srv.Store.Close() hub.Stop() l4g.Info(utils.T("api.server.stop_server.stopped.info")) }
// Creates a new server. func NewRaftServer(config *configuration.Configuration, clusterConfig *cluster.ClusterConfiguration) *RaftServer { if !registeredCommands { registeredCommands = true for _, command := range internalRaftCommands { raft.RegisterCommand(command) } } s := &RaftServer{ path: config.RaftDir, clusterConfig: clusterConfig, notLeader: make(chan bool, 1), router: mux.NewRouter(), config: config, } // Read existing name or generate a new one. if b, err := ioutil.ReadFile(filepath.Join(s.path, "name")); err == nil { s.name = string(b) } else { var i uint64 if _, err := os.Stat("/dev/urandom"); err == nil { log.Info("Using /dev/urandom to initialize the raft server name") f, err := os.Open("/dev/urandom") if err != nil { panic(err) } defer f.Close() readBytes := 0 b := make([]byte, RAFT_NAME_SIZE) for readBytes < RAFT_NAME_SIZE { n, err := f.Read(b[readBytes:]) if err != nil { panic(err) } readBytes += n } err = binary.Read(bytes.NewBuffer(b), binary.BigEndian, &i) if err != nil { panic(err) } } else { log.Info("Using rand package to generate raft server name") rand.Seed(time.Now().UnixNano()) i = uint64(rand.Int()) } s.name = fmt.Sprintf("%08x", i) log.Info("Setting raft name to %s", s.name) if err = ioutil.WriteFile(filepath.Join(s.path, "name"), []byte(s.name), 0644); err != nil { panic(err) } } return s }
func (self *HttpServer) Close() { if self.conn != nil { log.Info("Closing http server") self.conn.Close() log.Info("Waiting for all requests to finish before killing the process") select { case <-time.After(time.Second * 5): log.Error("There seems to be a hanging request. Closing anyway") case <-self.shutdown: } } }
func NewServer(enableProfiler bool) { l4g.Info(utils.T("api.server.new_server.init.info")) Srv = &Server{} Srv.Store = store.NewSqlStore() Srv.Router = mux.NewRouter() if enableProfiler { AttachProfiler(Srv.Router) l4g.Info("Enabled HTTP Profiler") } Srv.Router.NotFoundHandler = http.HandlerFunc(Handle404) }
func (self *WAL) processClose(shouldBookmark bool) error { logger.Info("Closing WAL") for idx, logFile := range self.logFiles { logFile.syncFile() logFile.close() self.logIndex[idx].syncFile() self.logIndex[idx].close() } if shouldBookmark { self.bookmark() } logger.Info("Closed WAL") return nil }
func (s *RaftServer) raftEventHandler(e raft.Event) { if e.Value() == "leader" { log.Info("(raft:%s) Selected as leader. Starting leader loop.", s.raftServer.Name()) config := s.clusterConfig.GetLocalConfiguration() retentionSweepPeriod := config.StorageRetentionSweepPeriod.Duration retentionSweepTimer := time.NewTicker(retentionSweepPeriod) go s.raftLeaderLoop(time.NewTicker(1*time.Second), retentionSweepTimer) } if e.PrevValue() == "leader" { log.Info("(raft:%s) Demoted from leader. Ending leader loop.", s.raftServer.Name()) s.notLeader <- true } }
func (ss SqlStore) Close() { l4g.Info(utils.T("store.sql.closing.info")) ss.master.Db.Close() for _, replica := range ss.replicas { replica.Db.Close() } }
func (self *SortedSetGenerator) checkZrange(conn redis.Conn, req_num int32, range_data int32, mytime *time.Timer) { var pos int32 = 0 for ; pos < req_num; pos++ { select { case <-mytime.C: return default: value := self.data_list[pos%ZRANGE_ALL] reporter.Resultdata.AddSendQuantity() reply, err := redis.Values(conn.Do("ZRANGE", value.key, 0, range_data)) //log.Info("redis operating: zrange %d 0 %d", value.key, range_data) if err != nil { log.Info("redis operating: zrange %d 0 %d error %v", value.key, range_data, err) reporter.Resultdata.AddFailQuantity() } else { //log.Info(" zrange %d 0 %d ret:%v", value.key, range_data, reply) /*type IntArrayChecker struct { myuid []int64 youruid []interface{} }*/ datacheck := &reporter.IntArrayChecker{Myuid: value.sortedlist, Youruid: reply} reporter.Datasummer.AddChecker(datacheck) } } } }
func (self *SortedSetGenerator) checkZrevrange(conn redis.Conn, req_num int32, mytime *time.Timer) { var pos int32 = 0 for _, value := range self.data_list { select { case <-mytime.C: return default: if pos >= req_num { break } reporter.Resultdata.AddSendQuantity() reply, err := redis.Values(conn.Do("zrevrange", value.key, 0, ZRANGE_100)) //log.Info("redis operating: zrevrange %d 0 %d", value.key, ZRANGE_100) if err != nil { log.Info("redis operating: zrevrange %d 0 %d error %v", value.key, ZRANGE_100, err) reporter.Resultdata.AddFailQuantity() } else { if len(reply) > 0 { reporter.Resultdata.AddSuccQuantity() } else { reporter.Resultdata.AddErrorQuantity() } } pos++ } } }
func (self *CoordinatorMonitor) EndQuery(q *RunningQuery) { took := time.Now().Sub(q.startTime) if took > SLOW_QUERY_THRESHOLD { log.Info("Slow Query [took %fs]: remote_addr: %s, db: %s, u: %s, q: %s", took.Seconds(), q.remoteAddr, q.databaseName, q.userName, q.queryString) } self.runningQueries.Remove(q) }
func UpgradeDatabase(sqlStore *SqlStore) { UpgradeDatabaseToVersion31(sqlStore) UpgradeDatabaseToVersion32(sqlStore) UpgradeDatabaseToVersion33(sqlStore) UpgradeDatabaseToVersion34(sqlStore) UpgradeDatabaseToVersion35(sqlStore) UpgradeDatabaseToVersion36(sqlStore) UpgradeDatabaseToVersion37(sqlStore) // If the SchemaVersion is empty this this is the first time it has ran // so lets set it to the current version. if sqlStore.SchemaVersion == "" { if result := <-sqlStore.system.Save(&model.System{Name: "Version", Value: model.CurrentVersion}); result.Err != nil { l4g.Critical(result.Err.Error()) time.Sleep(time.Second) os.Exit(EXIT_VERSION_SAVE_MISSING) } sqlStore.SchemaVersion = model.CurrentVersion l4g.Info(utils.T("store.sql.schema_set.info"), model.CurrentVersion) } // If we're not on the current version then it's too old to be upgraded if sqlStore.SchemaVersion != model.CurrentVersion { l4g.Critical(utils.T("store.sql.schema_version.critical"), sqlStore.SchemaVersion) time.Sleep(time.Second) os.Exit(EXIT_TOO_OLD) } }