// auth for goim handshake with client, use rsa & aes. func (server *Server) auth(rr io.Reader, wr io.Writer, fr Flusher, dbm cipher.BlockMode, p *Proto) (subKey string, heartbeat time.Duration, err error) { log.Debug("get auth request protocol") if err = server.readRequest(rr, p); err != nil { return } if p.Operation != OP_AUTH { log.Warn("auth operation not valid: %d", p.Operation) err = ErrOperation return } if p.Body, err = server.cryptor.Decrypt(dbm, p.Body); err != nil { log.Error("auth decrypt client proto error(%v)", err) return } if subKey, heartbeat, err = server.operator.Connect(p); err != nil { log.Error("operator.Connect error(%v)", err) return } log.Debug("send auth response protocol") p.Body = nil p.Operation = OP_AUTH_REPLY if err = server.sendResponse(wr, fr, p); err != nil { log.Error("[%s] server.SendResponse() error(%v)", subKey, err) } return }
/* * GameStateFinishing * All bets are over. Now collect the words made by users and compute the winner and give him the pot amount. * BREAKS ON: computing winner finished */ func (g *Game) GameStateFinishing() { defer func() { recover() }() for { select { case <-g.stateFinishing: log.Debug("STATE: finishing") winnerIds := make([]string, 0) winners := g.computeWinner() if len(winners) > 0 { winnerShare := g.PotAmount / len(winners) for _, p := range winners { if p != nil { p.Cash += winnerShare winnerIds = append(winnerIds, strconv.Itoa(p.Id)) } } log.Debug("Winners: %v", winnerIds) } else { log.Debug("No winner. House wins the pot.") } g.sendUpdate(ACT_GAME_OVER, strings.Join(winnerIds, ","), 0, nil) time.Sleep(time.Millisecond * DUR_WAIT_GAME_OVER) log.Debug("---- end of game ----") g.State = GS_WAITING g.stateWaiting <- 1 } } }
func (self *HttpServer) createDbUser(w libhttp.ResponseWriter, r *libhttp.Request) { body, err := ioutil.ReadAll(r.Body) if err != nil { w.WriteHeader(libhttp.StatusInternalServerError) w.Write([]byte(err.Error())) return } newUser := &NewUser{} err = json.Unmarshal(body, newUser) if err != nil { w.WriteHeader(libhttp.StatusBadRequest) w.Write([]byte(err.Error())) return } db := r.URL.Query().Get(":db") self.tryAsDbUserAndClusterAdmin(w, r, func(u User) (int, interface{}) { username := newUser.Name if err := self.userManager.CreateDbUser(u, db, username, newUser.Password); err != nil { log.Error("Cannot create user: %s", err) return errorToStatusCode(err), err.Error() } log.Debug("Created user %s", username) if newUser.IsAdmin { err = self.userManager.SetDbAdmin(u, db, newUser.Name, true) if err != nil { return libhttp.StatusInternalServerError, err.Error() } } log.Debug("Successfully changed %s password", username) return libhttp.StatusOK, nil }) }
func (self *WAL) processAppendEntry(e *appendEntry) { nextRequestNumber := self.state.getNextRequestNumber() e.request.RequestNumber = proto.Uint32(nextRequestNumber) if len(self.logFiles) == 0 { if _, err := self.createNewLog(nextRequestNumber); err != nil { e.confirmation <- &confirmation{0, err} return } self.state.FirstSuffix = nextRequestNumber } lastLogFile := self.logFiles[len(self.logFiles)-1] self.assignSequenceNumbers(e.shardId, e.request) logger.Debug("appending request %d", e.request.GetRequestNumber()) err := lastLogFile.appendRequest(e.request, e.shardId) if err != nil { e.confirmation <- &confirmation{0, err} return } self.state.CurrentFileOffset = self.logFiles[len(self.logFiles)-1].offset() self.requestsSinceLastIndex++ self.requestsSinceLastBookmark++ self.requestsSinceLastFlush++ self.requestsSinceRotation++ logger.Debug("requestsSinceRotation: %d", self.requestsSinceRotation) if rotated, err := self.rotateTheLogFile(nextRequestNumber); err != nil || rotated { e.confirmation <- &confirmation{e.request.GetRequestNumber(), err} return } self.conditionalBookmarkAndIndex() e.confirmation <- &confirmation{e.request.GetRequestNumber(), nil} }
func (self *Passthrough) Yield(seriesIncoming *protocol.Series) (bool, error) { log.Debug("PassthroughEngine YieldSeries %d", len(seriesIncoming.Points)) self.limiter.calculateLimitAndSlicePoints(seriesIncoming) if len(seriesIncoming.Points) == 0 { return false, nil } if self.series == nil { self.series = seriesIncoming } else if self.series.GetName() != seriesIncoming.GetName() { log.Debug("Yielding to %s: %s", self.next.Name(), self.series) ok, err := self.next.Yield(self.series) if !ok || err != nil { return ok, err } self.series = seriesIncoming } else if len(self.series.Points) > self.maxPointsInResponse { log.Debug("Yielding to %s: %s", self.next.Name(), self.series) ok, err := self.next.Yield(self.series) if !ok || err != nil { return ok, err } self.series = seriesIncoming } else { self.series = common.MergeSeries(self.series, seriesIncoming) } return !self.limiter.hitLimit(seriesIncoming.GetName()), nil }
func (c *DefaultServerCodec) ReadRequestHeader(rd *bufio.Reader, proto *Proto) (err error) { if err = binary.Read(rd, binary.BigEndian, &proto.PackLen); err != nil { log.Error("packLen: binary.Read() error(%v)", err) return } log.Debug("packLen: %d", proto.PackLen) if proto.PackLen > maxPackLen { return ErrProtoPackLen } if err = binary.Read(rd, binary.BigEndian, &proto.HeaderLen); err != nil { log.Error("headerLen: binary.Read() error(%v)", err) return } log.Debug("headerLen: %d", proto.HeaderLen) if proto.HeaderLen != rawHeaderLen { return ErrProtoHeaderLen } if err = binary.Read(rd, binary.BigEndian, &proto.Ver); err != nil { log.Error("protoVer: binary.Read() error(%v)", err) return } log.Debug("protoVer: %d", proto.Ver) if err = binary.Read(rd, binary.BigEndian, &proto.Operation); err != nil { log.Error("Operation: binary.Read() error(%v)", err) return } log.Debug("operation: %d", proto.Operation) if err = binary.Read(rd, binary.BigEndian, &proto.SeqId); err != nil { log.Error("seqId: binary.Read() error(%v)", err) return } log.Debug("seqId: %d", proto.SeqId) return }
func ReadProto(rd *bufio.Reader, proto *Proto) (err error) { // read if err = binary.Read(rd, binary.BigEndian, &proto.PackLen); err != nil { return } log.Debug("packLen: %d", proto.PackLen) if err = binary.Read(rd, binary.BigEndian, &proto.HeaderLen); err != nil { return } log.Debug("headerLen: %d", proto.HeaderLen) if err = binary.Read(rd, binary.BigEndian, &proto.Ver); err != nil { return } log.Debug("ver: %d", proto.Ver) if err = binary.Read(rd, binary.BigEndian, &proto.Operation); err != nil { return } log.Debug("operation: %d", proto.Operation) if err = binary.Read(rd, binary.BigEndian, &proto.SeqId); err != nil { return } log.Debug("seqId: %d", proto.SeqId) if err = ReadBody(rd, proto); err != nil { } return }
func (t *Timer) del(td *TimerData) { var ( i = td.index last = len(t.timers) - 1 ) if i < 0 || i > last || t.timers[i] != td { // already remove, usually by expire if Debug { log.Debug("timer del i: %d, last: %d, %p", i, last, td) } return } if i != last { t.swap(i, last) t.down(i, last) t.up(i) } // remove item is the last node t.timers[last].index = -1 // for safety t.timers = t.timers[:last] if Debug { log.Debug("timer: remove item key: %s, expire: %s, index: %d", td.Key, td.ExpireString(), td.index) } return }
// Initializes a new Droplet application object func (a *Application) Initialize() error { log.Debug("Initializing Droplet Application") log.Debug("Configuring Application") err := a.Configure() if err != nil { log.Error("Error configuring application: %s", err) panic(err) } log.Debug("Configuring Server Instance") err = a.ConfigureServer() if err != nil { log.Error("Error configuring server: ", err) panic(err) } log.Debug("Configuring Router") err = a.ConfigureRouter() if err != nil { log.Error("Error configuring router: ", err) panic(err) } a.Handler = func(w http.ResponseWriter, r *http.Request) { a.Router.ServeHTTP(w, r) } return nil }
// In the case where this server is running and another one in the // cluster stops responding, at some point this server will have to // just write requests to disk. When the downed server comes back up, // it's this server's responsibility to send out any writes that were // queued up. If the yield function returns nil then the request is // committed. func (self *WAL) RecoverServerFromRequestNumber(requestNumber uint32, shardIds []uint32, yield func(request *protocol.Request, shardId uint32) error) error { // don't replay if we don't have any log files yet if len(self.logFiles) == 0 { return nil } firstIndex := 0 firstOffset := int64(-1) // find the log file from which replay will start if the request // number is in range, otherwise replay from all log files if !self.isInRange(requestNumber) { return nil } for idx, logIndex := range self.logIndex { logger.Debug("Trying to find request %d in %s", requestNumber, self.logFiles[idx].file.Name()) if firstOffset = logIndex.requestOffset(requestNumber); firstOffset != -1 { logger.Debug("Found reqeust %d in %s at offset %d", requestNumber, self.logFiles[idx].file.Name(), firstOffset) firstIndex = idx break } } // the request must be at the end of the current log file if firstOffset == -1 { firstIndex = len(self.logIndex) - 1 firstOffset = self.logIndex[firstIndex].requestOrLastOffset(requestNumber) } outer: for idx := firstIndex; idx < len(self.logFiles); idx++ { logFile := self.logFiles[idx] if idx > firstIndex { firstOffset = -1 } logger.Info("Replaying from %s:%d", logFile.file.Name(), firstOffset) count := 0 ch, stopChan := logFile.dupAndReplayFromOffset(shardIds, firstOffset, requestNumber) for { x := <-ch if x == nil { logger.Info("%s yielded %d requests", logFile.file.Name(), count) continue outer } if x.err != nil { return x.err } logger.Debug("Yielding request %d", x.request.GetRequestNumber()) if err := yield(x.request, x.shardId); err != nil { logger.Debug("Stopping replay due to error: %s", err) stopChan <- struct{}{} return err } count++ } close(stopChan) } return nil }
func (ctx *CommunicationContext) writeLoop() { ctx.communicating.Add(1) defer ctx.communicating.Done() log4go.Debug("write loop started") for ctx.isOpen { tckt, ok := <-ctx.Output if ok { log4go.Debug("found new output in output channel") err := ctx.sender.Send(tckt.msg) if err != nil { log4go.Warn("error while sending to device: %v", err.Error()) tckt.error <- err } else { tckt.isSend = true tckt.send <- tckt.msg } } else { log4go.Warn("output channel closed") } } log4go.Debug("write loop finished") }
func (self *Shard) Query(querySpec *parser.QuerySpec, processor engine.Processor) error { self.closeLock.RLock() defer self.closeLock.RUnlock() if self.closed { return fmt.Errorf("Shard is closed") } if querySpec.IsListSeriesQuery() { return fmt.Errorf("List series queries should never come to the shard") } else if querySpec.IsDeleteFromSeriesQuery() { return self.executeDeleteQuery(querySpec, processor) } if !self.hasReadAccess(querySpec) { return errors.New("User does not have access to one or more of the series requested.") } switch t := querySpec.SelectQuery().FromClause.Type; t { case parser.FromClauseArray: log.Debug("Shard %s: running a regular query", self.db.Path()) return self.executeArrayQuery(querySpec, processor) case parser.FromClauseMerge, parser.FromClauseInnerJoin: log.Debug("Shard %s: running a merge query", self.db.Path()) return self.executeMergeQuery(querySpec, processor, t) default: panic(fmt.Errorf("Unknown from clause type %s", t)) } }
func printTimer(timer *Timer) { log.Debug("----------timers: %d ----------", len(timer.timers)) for i := 0; i < len(timer.timers); i++ { log.Debug("timer: %s, %s, index: %d", timer.timers[i].Key, timer.timers[i].ExpireString(), timer.timers[i].index) } log.Debug("--------------------") }
// auth for goim handshake with client, use rsa & aes. func (server *Server) auth(rd *bufio.Reader, wr *bufio.Writer, dbm cipher.BlockMode, proto *Proto) (subKey string, heartbeat time.Duration, bucket *Bucket, channel *Channel, err error) { log.Debug("get auth request protocol") if err = server.readRequest(rd, proto); err != nil { return } if proto.Operation != OP_AUTH { log.Warn("auth operation not valid: %d", proto.Operation) err = ErrOperation return } if proto.Body, err = server.cryptor.Decrypt(dbm, proto.Body); err != nil { log.Error("auth decrypt client proto error(%v)", err) return } if subKey, heartbeat, err = server.operator.Connect(proto); err != nil { log.Error("operator.Connect error(%v)", err) return } // TODO how to reuse channel // register key->channel bucket = server.Bucket(subKey) channel = NewChannel(Conf.CliProto, Conf.SvrProto) bucket.Put(subKey, channel) log.Debug("send auth response protocol") proto.Body = nil proto.Operation = OP_AUTH_REPLY if err = server.sendResponse(wr, proto); err != nil { log.Error("[%s] server.SendResponse() error(%v)", subKey, err) } return }
func (self *FoodPriceService) getDistrictFoodPrice(district string) (string, error) { foodPriceMsg, hitCache := districtFoodPriceMsgCache[district] if hitCache { l4g.Debug("Hit districtFoodPriceMsgCache, district: %s", district) return foodPriceMsg, nil } var districtFoodPrices []*DistrictFoodPrice entities, err := self.dbHelper.GetLatestDistrictFoodPriceEntity(district, int64(overTime)) if err != nil { return "", err } if len(entities) > 0 { districtFoodPrices = self.convertDistrictEntityToFoodPrice(entities) } if len(districtFoodPrices) == 0 { var e error l4g.Debug("get %s food price from web", district) districtFoodPrices, e = FetchDistrictFoodPrice(district) if e != nil { return "", e } } if len(districtFoodPrices) == 0 { return "无记录", nil } msg := self.formatDistrictFoodPrice(districtFoodPrices, district) url, _ := GetDistrictFoodPriceUrl(district) msg = msg + fmt.Sprintf("\n详细信息请点击:%s", url) districtFoodPriceMsgCache[district] = msg return msg, nil }
// Configure the Application func (a *Application) Configure() error { log.Debug("Reading logging config file") log.LoadConfiguration("./loggers.xml") log.Debug("Reading application config file") file, err := ioutil.ReadFile("./config.json") if err != nil { log.Error("Error opening config file: ", err) panic(err) } log.Info("Config Loaded:\n\n" + string(file)) var config Config err = json.Unmarshal(file, &config) if err != nil { log.Error("Error parsing config file: ", err) panic(err) } debug, ok := config["debug"].(bool) if !ok { debug = false } a.Config = config a.Debug = debug return nil }
// hanleTCPConn handle a long live tcp connection. func handleTcpConn(conn net.Conn, readerChan chan *bufio.Reader) { addr := conn.RemoteAddr().String() log.Debug("<%s> handleTcpConn routine start", addr) reader := newBufioReader(readerChan, conn) if args, err := parseCmd(reader); err == nil { // return buffer bufio.Reader putBufioReader(readerChan, reader) switch args[0] { case CmdSubscribe: subscribeTcpHandle(conn, args[1:]) default: conn.Write(ParamErrorReply) log.Warn("<%s> unknown cmd \"%s\"", addr, args[0]) } } else { // return buffer bufio.Reader putBufioReader(readerChan, reader) log.Error("<%s> parseCmd() error(%v)", addr, err) } // close the connection if err := conn.Close(); err != nil { log.Error("<%s> conn.Close() error(%v)", addr, err) } log.Debug("<%s> handleTcpConn routine stop", addr) }
func InitWeb() { l4g.Debug("Initializing web routes") staticDir := utils.FindDir("web/static") l4g.Debug("Using static directory at %v", staticDir) api.Srv.Router.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(staticDir)))) api.Srv.Router.Handle("/", api.AppHandler(root)).Methods("GET") api.Srv.Router.Handle("/login", api.AppHandler(login)).Methods("GET") api.Srv.Router.Handle("/signup_team_confirm/", api.AppHandler(signupTeamConfirm)).Methods("GET") api.Srv.Router.Handle("/signup_team_complete/", api.AppHandler(signupTeamComplete)).Methods("GET") api.Srv.Router.Handle("/signup_user_complete/", api.AppHandler(signupUserComplete)).Methods("GET") api.Srv.Router.Handle("/logout", api.AppHandler(logout)).Methods("GET") api.Srv.Router.Handle("/verify", api.AppHandler(verifyEmail)).Methods("GET") api.Srv.Router.Handle("/find_team", api.AppHandler(findTeam)).Methods("GET") api.Srv.Router.Handle("/reset_password", api.AppHandler(resetPassword)).Methods("GET") csr := api.Srv.Router.PathPrefix("/channels").Subrouter() csr.Handle("/{name:[A-Za-z0-9-]+(__)?[A-Za-z0-9-]+}", api.UserRequired(getChannel)).Methods("GET") watchAndParseTemplates() }
func (c *DefaultServerCodec) ReadRequestBody(rd *bufio.Reader, proto *Proto) (err error) { var ( n = int(0) t = int(0) bodyLen = int(proto.PackLen - int32(proto.HeaderLen)) ) log.Debug("read body len: %d", bodyLen) if bodyLen > 0 { proto.Body = make([]byte, bodyLen) // no deadline, because readheader always incoming calls readbody for { if t, err = rd.Read(proto.Body[n:]); err != nil { log.Error("body: buf.Read() error(%v)", err) return } if n += t; n == bodyLen { log.Debug("body: rd.Read() fill ok") break } else if n < bodyLen { log.Debug("body: rd.Read() need %d bytes", bodyLen-n) } else { log.Error("body: readbytes %d > %d", n, bodyLen) } } } else { proto.Body = nil } return }
func (e *Editor) RunCommand(name string, args Args) { // TODO? var ( wnd *Window v *View ) if wnd = e.ActiveWindow(); wnd != nil { v = wnd.ActiveView() } // TODO: what's the command precedence? if c := e.cmdhandler.TextCommands[name]; c != nil { if err := e.CommandHandler().RunTextCommand(v, name, args); err != nil { log4go.Debug("Couldn't run textcommand: %s", err) } } else if c := e.cmdhandler.WindowCommands[name]; c != nil { if err := e.CommandHandler().RunWindowCommand(wnd, name, args); err != nil { log4go.Debug("Couldn't run windowcommand: %s", err) } } else if c := e.cmdhandler.ApplicationCommands[name]; c != nil { if err := e.CommandHandler().RunApplicationCommand(name, args); err != nil { log4go.Debug("Couldn't run applicationcommand: %s", err) } } else { log4go.Debug("Couldn't find command to run") } }
func (s *RaftServer) joinHandler(w http.ResponseWriter, req *http.Request) { // if this is the leader, process the command if s.raftServer.State() == raft.Leader { command := &InfluxJoinCommand{} if err := json.NewDecoder(req.Body).Decode(&command); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } log.Debug("ON RAFT LEADER - JOIN: %v", command) // during the test suite the join command will sometimes time out.. just retry a few times if _, err := s.raftServer.Do(command); err != nil { log.Error("Can't process %v: %s", command, err) http.Error(w, err.Error(), http.StatusInternalServerError) } return } leader, ok := s.leaderConnectString() log.Debug("Non-leader redirecting to: (%v, %v)", leader, ok) if ok { log.Debug("redirecting to leader to join...") http.Redirect(w, req, leader+"/join", http.StatusTemporaryRedirect) } else { http.Error(w, errors.New("Couldn't find leader of the cluster to join").Error(), http.StatusInternalServerError) } }
// Process responses from the given channel. Returns true if // processing should stop for other channels. False otherwise. func (p *MergeChannelProcessor) processChannel(channel <-chan *protocol.Response) bool { for response := range channel { log4go.Debug("%s received %s", p, response) switch rt := response.GetType(); rt { // all these types end the stream case protocol.Response_HEARTBEAT, protocol.Response_END_STREAM: p.e <- nil return false case protocol.Response_ERROR: err := common.NewQueryError(common.InvalidArgument, response.GetErrorMessage()) p.e <- err return false case protocol.Response_QUERY: for _, s := range response.MultiSeries { log4go.Debug("Yielding to %s: %s", p.next.Name(), s) _, err := p.next.Yield(s) if err != nil { p.e <- err return true } } default: panic(fmt.Errorf("Unknown response type: %s", rt)) } } panic(errors.New("Reached end of method")) }
func (self *ShardData) QueryResponseBufferSize(querySpec *parser.QuerySpec, batchPointSize int) int { groupByTime := querySpec.GetGroupByInterval() if groupByTime == nil { // If the group by time is nil, we shouldn't have to use a buffer since the shards should be queried sequentially. // However, set this to something high just to be safe. log.Debug("BUFFER SIZE: 1000") return 1000 } tickCount := int(self.shardNanoseconds / uint64(*groupByTime)) if tickCount < 10 { tickCount = 100 } else if tickCount > 1000 { // cap this because each response should have up to this number of points in it. tickCount = tickCount / batchPointSize // but make sure it's at least 1k if tickCount < 1000 { tickCount = 1000 } } columnCount := querySpec.GetGroupByColumnCount() if columnCount > 1 { // we don't really know the cardinality for any column up front. This is a just a multiplier so we'll see how this goes. // each response can have many points, so having a buffer of the ticks * 100 should be safe, but we'll see. tickCount = tickCount * 100 } log.Debug("BUFFER SIZE: %d", tickCount) return tickCount }
func (server *Server) serveTCP(conn *net.TCPConn, rrp, wrp *sync.Pool, rr *bufio.Reader, wr *bufio.Writer, tr *Timer) { var ( b *Bucket p *Proto hb time.Duration // heartbeat key string err error trd *TimerData ch = NewChannel(Conf.CliProto, Conf.SvrProto) pb = make([]byte, maxPackIntBuf) ) // auth if trd, err = tr.Add(Conf.HandshakeTimeout, conn); err != nil { log.Error("handshake: timer.Add() error(%v)", err) goto failed } key, hb, err = server.authTCP(rr, wr, pb, ch) tr.Del(trd) if err != nil { log.Error("server.authTCP() error(%v)", err) goto failed } // register key->channel b = server.Bucket(key) b.Put(key, ch) // hanshake ok start dispatch goroutine go server.dispatchTCP(conn, wrp, wr, ch, hb, tr) for { // fetch a proto from channel free list if p, err = ch.CliProto.Set(); err != nil { log.Error("%s fetch client proto error(%v)", key, err) goto failed } // parse request protocol if err = server.readTCPRequest(rr, pb, p); err != nil { log.Error("%s read client request error(%v)", key, err) goto failed } // send to writer ch.CliProto.SetAdv() ch.Signal() } failed: // dialog finish // may call twice if err = conn.Close(); err != nil { log.Error("reader: conn.Close() error(%v)") } PutBufioReader(rrp, rr) if b != nil { b.Del(key) log.Debug("wake up dispatch goroutine") ch.Finish() } if err = server.operator.Disconnect(key); err != nil { log.Error("%s operator do disconnect error(%v)", key, err) } log.Debug("%s serverconn goroutine exit", key) return }
/* * GameStateFetchingWords * All bets are over. Now ask players for their words. * BREAKS ON: computing winner finished */ func (g *Game) GameStateFetchingWords() { defer func() { recover() }() for { select { case <-g.stateFetchingWords: log.Debug("Game %d STATE: Fetching words from active players.", g.Id) logStr := "Waiting for words from: " g.awaitingPlayersWord = make(map[int]int) for _, p := range g.Players { if p != nil { if p.BetAction != BET_FOLD && p.BetAction != BET_LEFT && p.BetAction != BET_WAITING { g.awaitingPlayersWord[p.Id] = 1 logStr += "Player " + strconv.Itoa(p.Id) + ", " } } } log.Debug(logStr) g.sendUpdate(ACT_REQUEST_WORD, strconv.Itoa(-1), 0, nil) } } }
// We call this function only if we have a Select query (not continuous) or Delete query func (self *Coordinator) runQuerySpec(querySpec *parser.QuerySpec, p engine.Processor) error { shards, processor, err := self.getShardsAndProcessor(querySpec, p) if err != nil { return err } if len(shards) == 0 { return fmt.Errorf("Couldn't look up columns") } shardConcurrentLimit := self.config.ConcurrentShardQueryLimit if self.shouldQuerySequentially(shards, querySpec) { log.Debug("Querying shards sequentially") shardConcurrentLimit = 1 } log.Debug("Shard concurrent limit: %d", shardConcurrentLimit) mcp := NewMergeChannelProcessor(processor, shardConcurrentLimit) go mcp.ProcessChannels() if err := self.queryShards(querySpec, shards, mcp); err != nil { log.Error("Error while querying shards: %s", err) mcp.Close() return err } if err := mcp.Close(); err != nil { log.Error("Error while querying shards: %s", err) return err } return processor.Close() }
/* * GameStateDealing * Deals cards to the players and community * BREAKS ON: cards have been dealt */ func (g *Game) GameStateDealing() { defer func() { recover() }() for { select { case <-g.stateDealing: log.Debug("Game %d STATE: dealing", g.Id) log.Debug("Dealing cards to %d players and community.", len(g.Players)) rand := rand.Intn(len(g.lobby.handSets)) set := g.lobby.handSets[rand] g.CommunityCards = set.community for i, p := range g.Players { p.Hand = set.players[i] } g.sendUpdate(ACT_START, strconv.Itoa(-1), 0, nil) // broadcast GAME_START time.Sleep(time.Millisecond * DUR_START_COOLDOWN) g.State = GS_BETTING g.stateBetting <- 1 } } }
func (self *FoodPriceService) getCityFoodPrice(city string) (string, error) { foodPriceMsg, hitCache := cityFoodPriceMsgCache[city] if hitCache { l4g.Debug("Hit cityFoodPriceMsgCache, city: %s", city) return foodPriceMsg, nil } var cityFoodPrices []*CityFoodPrice entities, err := self.dbHelper.GetLatestCityFoodPriceEntity(city, int64(overTime)) if err != nil { return "", err } if len(entities) > 0 { cityFoodPrices = self.convertCityEntityToFoodPrice(entities) } if len(cityFoodPrices) == 0 { var e error l4g.Debug("get %s food price from web", city) cityFoodPrices, e = FetchCityFoodPrice(city) if e != nil { return "", e } } if len(cityFoodPrices) == 0 { return "无记录", nil } msg := self.formatCityFoodPrice(cityFoodPrices, city) url, _ := GetCityFoodPriceUrl(city) msg = msg + fmt.Sprintf("\n详细信息请点击:%s", url) cityFoodPriceMsgCache[city] = msg return msg, nil }
func (self *WAL) processCommitEntry(e *commitEntry) { logger.Debug("commiting %d for server %d", e.requestNumber, e.serverId) self.state.commitRequestNumber(e.serverId, e.requestNumber) idx := self.firstLogFile() if idx == 0 { e.confirmation <- &confirmation{0, nil} return } var unusedLogFiles []*log var unusedLogIndex []*index logger.Debug("Removing some unneeded log files: %d", idx) unusedLogFiles, self.logFiles = self.logFiles[:idx], self.logFiles[idx:] unusedLogIndex, self.logIndex = self.logIndex[:idx], self.logIndex[idx:] for logIdx, logFile := range unusedLogFiles { logger.Info("Deleting %s", logFile.file.Name()) logFile.close() logFile.delete() logIndex := unusedLogIndex[logIdx] logIndex.close() logIndex.delete() } self.state.FirstSuffix = self.logFiles[0].suffix() e.confirmation <- &confirmation{0, nil} }
func (self *WAL) recover() error { for idx, logFile := range self.logFiles { self.requestsSinceLastIndex = 0 self.requestsSinceRotation = self.logIndex[idx].getLength() lastOffset := self.logIndex[idx].getLastOffset() logger.Debug("Getting file size for %s[%d]", logFile.file.Name(), logFile.file.Fd()) stat, err := logFile.file.Stat() if err != nil { return err } logger.Info("Checking %s, last: %d, size: %d", logFile.file.Name(), lastOffset, stat.Size()) replay, _ := logFile.dupAndReplayFromOffset(nil, lastOffset, 0) firstOffset := int64(-1) for { replayRequest := <-replay if replayRequest == nil { break } self.state.LargestRequestNumber = replayRequest.requestNumber if err := replayRequest.err; err != nil { return err } var points []*protocol.Point if s := replayRequest.request.Series; s != nil { points = s.Points } for _, point := range points { sequenceNumber := (point.GetSequenceNumber() - uint64(self.serverId)) / HOST_ID_OFFSET self.state.recover(replayRequest.shardId, sequenceNumber) } if firstOffset == -1 { firstOffset = replayRequest.startOffset } self.requestsSinceLastIndex++ self.requestsSinceRotation++ logger.Debug("recovery requestsSinceLastIndex: %d, requestNumber: %d", self.requestsSinceLastIndex, replayRequest.request.GetRequestNumber()) logger.Debug("largestrequestnumber: %d\n", self.state.LargestRequestNumber) if self.requestsSinceLastIndex < self.config.WalIndexAfterRequests { continue } self.logIndex[idx].addEntry( replayRequest.requestNumber-uint32(self.requestsSinceLastIndex), uint32(replayRequest.requestNumber), firstOffset, replayRequest.endOffset, ) } } logger.Debug("Finished wal recovery") return nil }