func (self *FoodPriceService) getFoodPrice(username string, args []string) (string, error) { if args == nil || len(args) == 0 { return "", errors.New("缺少参数!") } cityOrDistrict := args[0] cityCode := self.getCityCode(cityOrDistrict) if len(cityCode) > 0 { foodPriceMessage, err := self.getCityFoodPrice(cityCode) if err != nil { l4g.Error("Get city food price failed: %v", err) return "", errors.New("获取数据失败!") } return foodPriceMessage, nil } districtCode := self.getDistrictCode(cityOrDistrict) if len(districtCode) > 0 { foodPriceMessage, err := self.getDistrictFoodPrice(districtCode) if err != nil { l4g.Error("Get district food price failed: %v", err) return "", errors.New("获取数据失败!") } return foodPriceMessage, nil } return "", errors.New("不支持该城市或地区的菜价查询!") }
// LoadConfig will try to search around for the corresponding config file. // It will search /tmp/fileName then attempt ./config/fileName, // then ../config/fileName and last it will look at fileName func LoadConfig(fileName string) { fileName = findConfigFile(fileName) l4g.Info("Loading config file at " + fileName) file, err := os.Open(fileName) if err != nil { panic("Error opening config file=" + fileName + ", err=" + err.Error()) } decoder := json.NewDecoder(file) config := Config{} err = decoder.Decode(&config) if err != nil { panic("Error decoding configuration " + err.Error()) } // Check for a valid email for feedback, if not then do feedback@domain if _, err := mail.ParseAddress(config.EmailSettings.FeedbackEmail); err != nil { config.EmailSettings.FeedbackEmail = "feedback@localhost" l4g.Error("Misconfigured feedback email setting: %s", config.EmailSettings.FeedbackEmail) } configureLog(config.LogSettings) Cfg = &config SanitizeOptions = getSanitizeOptions() // Validates our mail settings if err := CheckMailSettings(); err != nil { l4g.Error("Email settings are not valid err=%v", err) } }
func (self *ProtobufServer) handleConnection(conn net.Conn) { log.Info("ProtobufServer: client connected: %s", conn.RemoteAddr().String()) message := make([]byte, 0, MAX_REQUEST_SIZE) buff := bytes.NewBuffer(message) var messageSizeU uint32 for { err := binary.Read(conn, binary.LittleEndian, &messageSizeU) if err != nil { log.Error("Error reading from connection (%s): %s", conn.RemoteAddr().String(), err) self.connectionMapLock.Lock() delete(self.connectionMap, conn) self.connectionMapLock.Unlock() conn.Close() return } messageSize := int64(messageSizeU) if messageSize > MAX_REQUEST_SIZE { err = self.handleRequestTooLarge(conn, messageSize, buff) } else { err = self.handleRequest(conn, messageSize, buff) } if err != nil { log.Error("Error, closing connection: %s", err) self.connectionMapLock.Lock() delete(self.connectionMap, conn) self.connectionMapLock.Unlock() conn.Close() return } buff.Reset() } }
/** ** subscribe the all connected queues from queue server ** and to be connect the channel and serve the messages to handlers **/ func (self *QueueServer) ListenAndServe() { factor, err := amqp.Factory() if err != nil { log.Error("Failed to get the queue instance: %s", err) } pubsub, err := factor.Get(self.ListenAddress) if err != nil { log.Error("Failed to get the queue instance: %s", err) } //res := &global.Message{} msgChan, _ := pubsub.Sub() for msg := range msgChan { log.Info(" [x] %q", msg) queue1, _ := config.GetString("name") if self.ListenAddress == queue1 { coordinator.Handler(msg) } } log.Info("Handling message %v", msgChan) self.chann = msgChan }
// Accept accepts connections on the listener and serves requests // for each incoming connection. Accept blocks; the caller typically // invokes it in a go statement. func acceptTCP(server *Server, lis *net.TCPListener) { var ( conn *net.TCPConn err error r int ) for { if conn, err = lis.AcceptTCP(); err != nil { // if listener close then return log.Error("listener.Accept(\"%s\") error(%v)", lis.Addr().String(), err) return } if err = conn.SetKeepAlive(Conf.TCPKeepalive); err != nil { log.Error("conn.SetKeepAlive() error(%v)", err) return } if err = conn.SetReadBuffer(Conf.TCPSndbuf); err != nil { log.Error("conn.SetReadBuffer() error(%v)", err) return } if err = conn.SetWriteBuffer(Conf.TCPRcvbuf); err != nil { log.Error("conn.SetWriteBuffer() error(%v)", err) return } go serveTCP(server, conn, r) if r++; r == maxInt { r = 0 } } }
func CreateUser(c *Context, team *model.Team, user *model.User) *model.User { if !utils.Cfg.TeamSettings.EnableUserCreation { c.Err = model.NewAppError("CreateUser", "User creation has been disabled. Please ask your systems administrator for details.", "") return nil } channelRole := "" if team.Email == user.Email { user.Roles = model.ROLE_TEAM_ADMIN channelRole = model.CHANNEL_ROLE_ADMIN // Below is a speical case where the first user in the entire // system is granted the system_admin role instead of admin if result := <-Srv.Store.User().GetTotalUsersCount(); result.Err != nil { c.Err = result.Err return nil } else { count := result.Data.(int64) if count <= 0 { user.Roles = model.ROLE_SYSTEM_ADMIN } } } else { user.Roles = "" } user.MakeNonNil() if result := <-Srv.Store.User().Save(user); result.Err != nil { c.Err = result.Err l4g.Error("Couldn't save the user err=%v", result.Err) return nil } else { ruser := result.Data.(*model.User) // Soft error if there is an issue joining the default channels if err := JoinDefaultChannels(ruser, channelRole); err != nil { l4g.Error("Encountered an issue joining default channels user_id=%s, team_id=%s, err=%v", ruser.Id, ruser.TeamId, err) } addDirectChannelsAndForget(ruser) if user.EmailVerified { if cresult := <-Srv.Store.User().VerifyEmail(ruser.Id); cresult.Err != nil { l4g.Error("Failed to set email verified err=%v", cresult.Err) } } ruser.Sanitize(map[string]bool{}) // This message goes to every channel, so the channelId is irrelevant message := model.NewMessage(team.Id, "", ruser.Id, model.ACTION_NEW_USER) PublishAndForget(message) return ruser } }
func PostUpdateChannelHeaderMessageAndForget(c *Context, channelId string, oldChannelHeader, newChannelHeader string) { go func() { uc := Srv.Store.User().Get(c.Session.UserId) if uresult := <-uc; uresult.Err != nil { l4g.Error("Failed to retrieve user while trying to save update channel header message %v", uresult.Err) return } else { user := uresult.Data.(*model.User) var message string if oldChannelHeader == "" { message = fmt.Sprintf("%s updated the channel header to: %s", user.Username, newChannelHeader) } else if newChannelHeader == "" { message = fmt.Sprintf("%s removed the channel header (was: %s)", user.Username, oldChannelHeader) } else { message = fmt.Sprintf("%s updated the channel header from: %s to: %s", user.Username, oldChannelHeader, newChannelHeader) } post := &model.Post{ ChannelId: channelId, Message: message, Type: model.POST_HEADER_CHANGE, } if _, err := CreatePost(c, post, false); err != nil { l4g.Error("Failed to post join/leave message %v", err) } } }() }
func InitWebsocket() (err error) { var ( listener *net.TCPListener addr *net.TCPAddr httpServeMux = http.NewServeMux() ) httpServeMux.Handle("/sub", websocket.Handler(serveWebsocket)) for _, bind := range Conf.WebsocketBind { if addr, err = net.ResolveTCPAddr("tcp4", bind); err != nil { log.Error("net.ResolveTCPAddr(\"tcp4\", \"%s\") error(%v)", bind, err) return } if listener, err = net.ListenTCP("tcp4", addr); err != nil { log.Error("net.ListenTCP(\"tcp4\", \"%s\") error(%v)", bind, err) return } server := &http.Server{Handler: httpServeMux} log.Debug("start websocket listen: \"%s\"", bind) go func() { if err = server.Serve(listener); err != nil { log.Error("server.Serve(\"%s\") error(%v)", bind, err) panic(err) } }() } return }
// RegisterTmp create a ephemeral node, and watch it, if node droped then send a SIGQUIT to self. func RegisterTemp(conn *zk.Conn, fpath string, data []byte) error { tpath, err := conn.Create(path.Join(fpath)+"/", data, zk.FlagEphemeral|zk.FlagSequence, zk.WorldACL(zk.PermAll)) if err != nil { log.Error("conn.Create(\"%s\", \"%s\", zk.FlagEphemeral|zk.FlagSequence) error(%v)", fpath, string(data), err) return err } log.Debug("create a zookeeper node:%s", tpath) // watch self go func() { for { log.Info("zk path: \"%s\" set a watch", tpath) exist, _, watch, err := conn.ExistsW(tpath) if err != nil { log.Error("zk.ExistsW(\"%s\") error(%v)", tpath, err) log.Warn("zk path: \"%s\" set watch failed, kill itself", tpath) killSelf() return } if !exist { log.Warn("zk path: \"%s\" not exist, kill itself", tpath) killSelf() return } event := <-watch log.Info("zk path: \"%s\" receive a event %v", tpath, event) } }() return nil }
// NewMySQLStorage initialize mysql pool and consistency hash ring. func NewMySQLStorage() *MySQLStorage { var ( err error w int nw []string db *sql.DB ) dbPool := make(map[string]*sql.DB) ring := ketama.NewRing(ketamaBase) for n, source := range Conf.MySQLSource { nw = strings.Split(n, ":") if len(nw) != 2 { err = errors.New("node config error, it's nodeN:W") log.Error("strings.Split(\"%s\", :) failed (%v)", n, err) panic(err) } w, err = strconv.Atoi(nw[1]) if err != nil { log.Error("strconv.Atoi(\"%s\") failed (%v)", nw[1], err) panic(err) } db, err = sql.Open("mysql", source) if err != nil { log.Error("sql.Open(\"mysql\", %s) failed (%v)", source, err) panic(err) } dbPool[nw[0]] = db ring.AddNode(nw[0], w) } ring.Bake() s := &MySQLStorage{pool: dbPool, ring: ring} go s.clean() return s }
// hanleTCPConn handle a long live tcp connection. func handleTcpConn(conn net.Conn, readerChan chan *bufio.Reader) { addr := conn.RemoteAddr().String() log.Debug("<%s> handleTcpConn routine start", addr) reader := newBufioReader(readerChan, conn) if args, err := parseCmd(reader); err == nil { // return buffer bufio.Reader putBufioReader(readerChan, reader) switch args[0] { case CmdSubscribe: subscribeTcpHandle(conn, args[1:]) default: conn.Write(ParamErrorReply) log.Warn("<%s> unknown cmd \"%s\"", addr, args[0]) } } else { // return buffer bufio.Reader putBufioReader(readerChan, reader) log.Error("<%s> parseCmd() error(%v)", addr, err) } // close the connection if err := conn.Close(); err != nil { log.Error("<%s> conn.Close() error(%v)", addr, err) } log.Debug("<%s> handleTcpConn routine stop", addr) }
// GetPrivate implements the Storage GetPrivate method. func (s *MySQLStorage) GetPrivate(key string, mid int64) ([]*myrpc.Message, error) { db := s.getConn(key) if db == nil { return nil, ErrNoMySQLConn } now := time.Now().Unix() rows, err := db.Query(getPrivateMsgSQL, key, mid) if err != nil { log.Error("db.Query(\"%s\",\"%s\",%d,now) failed (%v)", getPrivateMsgSQL, key, mid, err) return nil, err } msgs := []*myrpc.Message{} for rows.Next() { expire := int64(0) cmid := int64(0) msg := []byte{} if err := rows.Scan(&cmid, &expire, &msg); err != nil { log.Error("rows.Scan() failed (%v)", err) return nil, err } if now > expire { log.Warn("user_key: \"%s\" mid: %d expired", key, cmid) continue } msgs = append(msgs, &myrpc.Message{MsgId: cmid, GroupId: myrpc.PrivateGroupId, Msg: json.RawMessage(msg)}) } return msgs, nil }
func (self *FoodPriceService) unsubFoodPrice(username string, args []string) (string, error) { if args == nil || len(args) == 0 { return "", errors.New("缺少参数!") } cityOrDistrict := self.getCityOrDistrictCode(args[0]) if len(cityOrDistrict) == 0 { return "", errors.New("不支持订阅该城市或地区的菜价!") } userSubEntity, err := self.dbHelper.GetUserSub(username, cityOrDistrict) if err != nil { l4g.Error("GetUserSub error in unsubscribe food price: username: %s, error: %v", username, err) return "", errors.New("退订失败!") } if userSubEntity != nil { userSubEntity.SubStatus = 0 updateError := self.dbHelper.UpdateUserSub(userSubEntity) if updateError != nil { l4g.Error("UpdateUserSub error in unsubscribe food price: username: %s, error: %v", username, err) return "", errors.New("退订失败!") } } else { return "未订阅过该城市的空气质量信息!", nil } return "退订成功!", nil }
func (self *FoodPriceService) subFoodPrice(username string, args []string) (string, error) { if args == nil || len(args) == 0 { return "", errors.New("缺少参数!") } cityOrDistrict := self.getCityOrDistrictCode(args[0]) if len(cityOrDistrict) == 0 { return "", errors.New("不支持订阅该城市或地区的菜价!") } userSubEntity, err := self.dbHelper.GetUserSub(username, cityOrDistrict) if err != nil { l4g.Error("GetUserSub error in subscribe food price: username: %s, error: %v", username, err) return "", errors.New("订阅失败!") } if userSubEntity == nil { userSubEntity = &UserSubEntity{} userSubEntity.Username = username userSubEntity.CityOrDistrict = cityOrDistrict userSubEntity.SubStatus = 1 addError := self.dbHelper.AddUserSub(userSubEntity) if addError != nil { l4g.Error("AddUserSub error in subscribe food price: username: %s, error: %v", username, err) return "", errors.New("订阅失败!") } } else { userSubEntity.SubStatus = 1 updateError := self.dbHelper.UpdateUserSub(userSubEntity) if updateError != nil { l4g.Error("UpdateUserSub error in subscribe food price: username: %s, error: %v", username, err) return "", errors.New("订阅失败!") } } return "订阅成功!", nil }
// Push push a message to a specified sub key, must goroutine safe. func Push(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { http.Error(w, "Method Not Allowed", 405) return } body := "" res := map[string]interface{}{"ret": OK} defer retPWrite(w, r, res, &body, time.Now()) // param bodyBytes, err := ioutil.ReadAll(r.Body) if err != nil { res["ret"] = InternalErr log.Error("ioutil.ReadAll() failed (%v)", err) return } params := r.URL.Query() key := params.Get("key") log.Debug("push key: \"%s\"", key) bucket := DefaultServer.Bucket(key) if channel := bucket.Get(key); channel != nil { // padding let caller do if err = channel.PushMsg(1, OP_SEND_SMS_REPLY, bodyBytes); err != nil { res["ret"] = InternalErr log.Error("channel.PushMsg() error(%v)", err) return } } res["ret"] = OK return }
func (c *DefaultServerCodec) ReadRequestHeader(rd *bufio.Reader, proto *Proto) (err error) { if err = binary.Read(rd, binary.BigEndian, &proto.PackLen); err != nil { log.Error("packLen: binary.Read() error(%v)", err) return } log.Debug("packLen: %d", proto.PackLen) if proto.PackLen > maxPackLen { return ErrProtoPackLen } if err = binary.Read(rd, binary.BigEndian, &proto.HeaderLen); err != nil { log.Error("headerLen: binary.Read() error(%v)", err) return } log.Debug("headerLen: %d", proto.HeaderLen) if proto.HeaderLen != rawHeaderLen { return ErrProtoHeaderLen } if err = binary.Read(rd, binary.BigEndian, &proto.Ver); err != nil { log.Error("protoVer: binary.Read() error(%v)", err) return } log.Debug("protoVer: %d", proto.Ver) if err = binary.Read(rd, binary.BigEndian, &proto.Operation); err != nil { log.Error("Operation: binary.Read() error(%v)", err) return } log.Debug("operation: %d", proto.Operation) if err = binary.Read(rd, binary.BigEndian, &proto.SeqId); err != nil { log.Error("seqId: binary.Read() error(%v)", err) return } log.Debug("seqId: %d", proto.SeqId) return }
/* * SaveAsFile - 保存网页内容 * * PARAMS: * - targetUrl : 目标网址 * - outputDir : 存储路径 * * RETURNS: * - true, if succeed * - false, if failed * */ func SaveAsFile(targetUrl string, outputDir string) bool { res, err := http.Get(targetUrl) if err != nil { return false } defer res.Body.Close() if res.StatusCode != 200 { l4g.Error("bad status code: %d, url:%s", res.StatusCode, targetUrl) return false } content, err := ioutil.ReadAll(res.Body) if err != nil { l4g.Error("read url content%s, err:%s", targetUrl, err) return false } filename := path.Join(outputDir, url.QueryEscape(targetUrl)) f, err := os.Create(filename) if err != nil { l4g.Error("create file %s, err:%s", filename, err) return false } defer f.Close() f.Write(content) return true }
func (c *DefaultServerCodec) ReadRequestBody(rd *bufio.Reader, proto *Proto) (err error) { var ( n = int(0) t = int(0) bodyLen = int(proto.PackLen - int32(proto.HeaderLen)) ) log.Debug("read body len: %d", bodyLen) if bodyLen > 0 { proto.Body = make([]byte, bodyLen) // no deadline, because readheader always incoming calls readbody for { if t, err = rd.Read(proto.Body[n:]); err != nil { log.Error("body: buf.Read() error(%v)", err) return } if n += t; n == bodyLen { log.Debug("body: rd.Read() fill ok") break } else if n < bodyLen { log.Debug("body: rd.Read() need %d bytes", bodyLen-n) } else { log.Error("body: readbytes %d > %d", n, bodyLen) } } } else { proto.Body = nil } return }
// checkRole check the current redis role. func checkRole(addr string, role string) bool { if addr == "" { return false } // Step 3: call the ROLE command in the target instance conn, err := redis.DialTimeout("tcp", addr, sentinelTimeout, sentinelTimeout, sentinelTimeout) if err != nil { log.Error("redis.DialTimeout(\"tcp\", \"%s\", 500ms...) error(%v)", addr, err) return false } defer conn.Close() replies, err := redis.Values(conn.Do("ROLE")) if err != nil { log.Error("conn.Do(\"ROLE\") error(%v)", err) return false } if len(replies) < 1 { return false } curRole, err := redis.String(replies[0], nil) if err != nil { log.Error("redis.String(replies[0], nil) error(%v)", err) return false } log.Info("redis: \"%s\" role: \"%s\"", addr, curRole) if curRole == role { return true } return false }
// watchNode watch a named node for leader selection when failover func watchCometNode(conn *zk.Conn, node, fpath string, retry, ping time.Duration, ch chan *CometNodeEvent) { fpath = path.Join(fpath, node) for { nodes, watch, err := myzk.GetNodesW(conn, fpath) if err == myzk.ErrNodeNotExist { log.Warn("zk don't have node \"%s\"", fpath) break } else if err == myzk.ErrNoChild { log.Warn("zk don't have any children in \"%s\", retry in %d second", fpath, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } else if err != nil { log.Error("zk path: \"%s\" getNodes error(%v), retry in %d second", fpath, err, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } // leader selection sort.Strings(nodes) if info, err := registerCometNode(conn, nodes[0], fpath, retry, ping, true); err != nil { log.Error("zk path: \"%s\" registerCometNode error(%v)", fpath, err) time.Sleep(waitNodeDelaySecond) continue } else { // update node info ch <- &CometNodeEvent{Event: eventNodeUpdate, Key: node, Value: info} } // blocking receive event event := <-watch log.Info("zk path: \"%s\" receive a event: (%v)", fpath, event) } // WARN, if no persistence node and comet rpc not config log.Warn("zk path: \"%s\" never watch again till recreate", fpath) }
func (self *PiAssistant) Init(configPath string) error { fileData, readErr := ioutil.ReadFile(configPath) if readErr != nil { l4g.Error("Read config file error: %v", readErr) return readErr } l4g.Info("Loading config file: %s", configPath) var piAssiConf PiAssistantConfig unmarshalErr := json.Unmarshal(fileData, &piAssiConf) if unmarshalErr != nil { l4g.Error("Config file formt error: %v", unmarshalErr) return unmarshalErr } self.piAssiConf = piAssiConf self.piai = piai.NewPiAi(self.piAssiConf.PiAiConf.SessionTimeout) serviceInitErr := self.initServices() if serviceInitErr != nil { l4g.Error("Service init failed: %v", serviceInitErr) return serviceInitErr } l4g.Info("Initialize services successful!") return nil }
func produce(conn *amqp.Connection, channel *amqp.Channel, val *interface{}) { if val == nil { log.Warn("the redis json is nil") return } body, err := json.Marshal(val) if err != nil || body == nil { log.Error("redis event to json error: %s , oplog is : %s ", err, string(body)) } else { routingKey := "redis.event" log.Info("routing key is : %s ", routingKey) err = channel.Publish( EXCHANGE_KEY, // exchange routingKey, // routing key false, // mandatory false, // immediate amqp.Publishing{ ContentType: "text/plain", Body: body, }) if err != nil { log.Error("publish message err : %s ", err) } //TODO recreate channel ? } }
// auth for goim handshake with client, use rsa & aes. func (server *Server) authTCP(rr *bufio.Reader, wr *bufio.Writer, ch *Channel) (subKey string, heartbeat time.Duration, err error) { var p *Proto // WARN // don't adv the cli proto, after auth simply discard it. if p, err = ch.CliProto.Set(); err != nil { return } if err = server.readTCPRequest(rr, p); err != nil { return } if p.Operation != define.OP_AUTH { log.Warn("auth operation not valid: %d", p.Operation) err = ErrOperation return } if subKey, ch.RoomId, heartbeat, err = server.operator.Connect(p); err != nil { log.Error("operator.Connect error(%v)", err) return } p.Body = nil p.Operation = define.OP_AUTH_REPLY if err = server.writeTCPResponse(wr, p); err != nil { log.Error("[%s] server.sendTCPResponse() error(%v)", subKey, err) } return }
func InitZK() (*zk.Conn, error) { conn, err := myzk.Connect(Conf.ZookeeperAddr, Conf.ZookeeperTimeout) if err != nil { log.Error("myzk.Connect() error(%v)", err) return nil, err } fpath := path.Join(Conf.ZookeeperCometPath, Conf.ZookeeperCometNode) if err = myzk.Create(conn, fpath); err != nil { log.Error("myzk.Create(conn,\"%s\",\"\") error(%v)", fpath, err) return conn, err } // comet tcp, websocket and rpc bind address store in the zk nodeInfo := &rpc.CometNodeInfo{} nodeInfo.RpcAddr = Conf.RPCBind nodeInfo.TcpAddr = Conf.TCPBind nodeInfo.WsAddr = Conf.WebsocketBind nodeInfo.Weight = Conf.ZookeeperCometWeight data, err := json.Marshal(nodeInfo) if err != nil { log.Error("json.Marshal() error(%v)", err) return conn, err } log.Debug("myzk node:\"%s\" registe data: \"%s\"", fpath, string(data)) if err = myzk.RegisterTemp(conn, fpath, data); err != nil { log.Error("myzk.RegisterTemp() error(%v)", err) return conn, err } // watch and update //go watchCometRoot(conn, Conf.ZookeeperCometPath, Conf.KetamaBase) rpc.InitMessage(conn, Conf.ZookeeperMessagePath, Conf.RPCRetry, Conf.RPCPing) return conn, nil }
func (self *ProtobufClient) readResponses() { message := make([]byte, 0, MAX_RESPONSE_SIZE) buff := bytes.NewBuffer(message) for !self.stopped { buff.Reset() conn := self.getConnection() if conn == nil { time.Sleep(200 * time.Millisecond) continue } var messageSizeU uint32 var err error err = binary.Read(conn, binary.LittleEndian, &messageSizeU) if err != nil { log.Error("Error while reading messsage size: %d", err) time.Sleep(200 * time.Millisecond) continue } messageSize := int64(messageSizeU) messageReader := io.LimitReader(conn, messageSize) _, err = io.Copy(buff, messageReader) if err != nil { log.Error("Error while reading message: %d", err) time.Sleep(200 * time.Millisecond) continue } response, err := protocol.DecodeResponse(buff) if err != nil { log.Error("error unmarshaling response: %s", err) time.Sleep(200 * time.Millisecond) } else { self.sendResponse(response) } } }
func (self *ProtobufRequestHandler) WriteResponse(conn net.Conn, response *protocol.Response) error { data, err := response.Encode() if err != nil { log.Error("error encoding response: %s", err) return err } if len(data) >= MAX_RESPONSE_SIZE { pointCount := len(response.Series.Points) firstHalfPoints := response.Series.Points[:pointCount] secondHalfPoints := response.Series.Points[pointCount:] response.Series.Points = firstHalfPoints err := self.WriteResponse(conn, response) if err != nil { return err } response.Series.Points = secondHalfPoints return self.WriteResponse(conn, response) } buff := bytes.NewBuffer(make([]byte, 0, len(data)+8)) binary.Write(buff, binary.LittleEndian, uint32(len(data))) _, err = conn.Write(append(buff.Bytes(), data...)) if err != nil { log.Error("error writing response: %s", err) return err } return nil }
func watchAndParseTemplates() { templatesDir := utils.FindDir("web/templates") l4g.Debug("Parsing templates at %v", templatesDir) var err error if Templates, err = template.ParseGlob(templatesDir + "*.html"); err != nil { l4g.Error("Failed to parse templates %v", err) } watcher, err := fsnotify.NewWatcher() if err != nil { l4g.Error("Failed to create directory watcher %v", err) } go func() { for { select { case event := <-watcher.Events: if event.Op&fsnotify.Write == fsnotify.Write { l4g.Info("Re-parsing templates because of modified file %v", event.Name) if Templates, err = template.ParseGlob(templatesDir + "*.html"); err != nil { l4g.Error("Failed to parse templates %v", err) } } case err := <-watcher.Errors: l4g.Error("Failed in directory watcher %v", err) } } }() err = watcher.Add(templatesDir) if err != nil { l4g.Error("Failed to add directory to watcher %v", err) } }
func (self *ProtobufRequestHandler) HandleRequest(request *protocol.Request, conn net.Conn) error { if *request.Type == protocol.Request_WRITE { shard := self.clusterConfig.GetLocalShardById(*request.ShardId) log.Debug("HANDLE: ", shard) err := shard.WriteLocalOnly(request) if err != nil { log.Error("ProtobufRequestHandler: error writing local shard: ", err) return err } response := &protocol.Response{RequestId: request.Id, Type: &self.writeOk} return self.WriteResponse(conn, response) } else if *request.Type == protocol.Request_DROP_DATABASE { go self.handleDropDatabase(request, conn) return nil } else if *request.Type == protocol.Request_QUERY { go self.handleQuery(request, conn) } else if *request.Type == protocol.Request_HEARTBEAT { response := &protocol.Response{RequestId: request.Id, Type: &heartbeatResponse} return self.WriteResponse(conn, response) } else { log.Error("unknown request type: %v", request) return errors.New("Unknown request type") } return nil }
func (self *CoordinatorImpl) InterpolateValuesAndCommit(query string, db string, series *protocol.Series, targetName string, assignSequenceNumbers bool) error { defer common.RecoverFunc(db, query, nil) targetName = strings.Replace(targetName, ":series_name", *series.Name, -1) type sequenceKey struct { seriesName string timestamp int64 } sequenceMap := make(map[sequenceKey]int) r, _ := regexp.Compile(`\[.*?\]`) if r.MatchString(targetName) { serieses := map[string]*protocol.Series{} for _, point := range series.Points { targetNameWithValues := r.ReplaceAllStringFunc(targetName, func(match string) string { fieldName := match[1 : len(match)-1] fieldIndex := series.GetFieldIndex(fieldName) return point.GetFieldValueAsString(fieldIndex) }) if assignSequenceNumbers { key := sequenceKey{targetNameWithValues, *point.Timestamp} sequenceMap[key] += 1 sequenceNumber := uint64(sequenceMap[key]) point.SequenceNumber = &sequenceNumber } newSeries := serieses[targetNameWithValues] if newSeries == nil { newSeries = &protocol.Series{Name: &targetNameWithValues, Fields: series.Fields, Points: []*protocol.Point{point}} serieses[targetNameWithValues] = newSeries continue } newSeries.Points = append(newSeries.Points, point) } seriesSlice := make([]*protocol.Series, 0, len(serieses)) for _, s := range serieses { seriesSlice = append(seriesSlice, s) } if e := self.CommitSeriesData(db, seriesSlice, true); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } else { newSeries := &protocol.Series{Name: &targetName, Fields: series.Fields, Points: series.Points} if assignSequenceNumbers { for _, point := range newSeries.Points { sequenceMap[sequenceKey{targetName, *point.Timestamp}] += 1 sequenceNumber := uint64(sequenceMap[sequenceKey{targetName, *point.Timestamp}]) point.SequenceNumber = &sequenceNumber } } if e := self.CommitSeriesData(db, []*protocol.Series{newSeries}, true); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } return nil }
func waitForSignals(stoppable Stoppable, filename string, stopped <-chan bool) { ch := make(chan os.Signal) signal.Notify(ch, syscall.SIGTERM, syscall.SIGINT) outer: for { sig := <-ch log.Info("Received signal: %s", sig.String()) switch sig { case syscall.SIGINT, syscall.SIGTERM: runtime.SetCPUProfileRate(0) f, err := os.OpenFile(fmt.Sprintf("%s.mem", filename), os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0600) if err != nil { log.Error("Cannot open memory profile: %s", err) break outer } if err := pprof.WriteHeapProfile(f); err != nil { log.Error("Cannot write memory profile: %s", err) } f.Close() stopCHeapProfiler() // stopCCpuProfiler() stoppable.Stop() break outer // make sure everything stopped before exiting } } // wait for all logging messages to be printed <-stopped time.Sleep(5 * time.Second) os.Exit(0) }