func InitZK() (*zk.Conn, error) { conn, err := myzk.Connect(Conf.ZookeeperAddr, Conf.ZookeeperTimeout) if err != nil { log.Error("myzk.Connect() error(%v)", err) return nil, err } fpath := path.Join(Conf.ZookeeperCometPath, Conf.ZookeeperCometNode) if err = myzk.Create(conn, fpath); err != nil { log.Error("myzk.Create(conn,\"%s\",\"\") error(%v)", fpath, err) return conn, err } // comet tcp, websocket and rpc bind address store in the zk nodeInfo := &rpc.CometNodeInfo{} nodeInfo.RpcAddr = Conf.RPCBind nodeInfo.TcpAddr = Conf.TCPBind nodeInfo.WsAddr = Conf.WebsocketBind nodeInfo.Weight = Conf.ZookeeperCometWeight data, err := json.Marshal(nodeInfo) if err != nil { log.Error("json.Marshal() error(%v)", err) return conn, err } log.Debug("myzk node:\"%s\" registe data: \"%s\"", fpath, string(data)) if err = myzk.RegisterTemp(conn, fpath, data); err != nil { log.Error("myzk.RegisterTemp() error(%v)", err) return conn, err } // watch and update rpc.InitMessage(conn, Conf.ZookeeperMessagePath, Conf.RPCRetry, Conf.RPCPing) return conn, nil }
func generateThumbnailImage(img image.Image, thumbnailPath string, width int, height int) { thumbWidth := float64(utils.Cfg.FileSettings.ThumbnailWidth) thumbHeight := float64(utils.Cfg.FileSettings.ThumbnailHeight) imgWidth := float64(width) imgHeight := float64(height) var thumbnail image.Image if imgHeight < thumbHeight && imgWidth < thumbWidth { thumbnail = img } else if imgHeight/imgWidth < thumbHeight/thumbWidth { thumbnail = imaging.Resize(img, 0, utils.Cfg.FileSettings.ThumbnailHeight, imaging.Lanczos) } else { thumbnail = imaging.Resize(img, utils.Cfg.FileSettings.ThumbnailWidth, 0, imaging.Lanczos) } buf := new(bytes.Buffer) if err := jpeg.Encode(buf, thumbnail, &jpeg.Options{Quality: 90}); err != nil { l4g.Error(utils.T("api.file.handle_images_forget.encode_jpeg.error"), thumbnailPath, err) return } if err := WriteFile(buf.Bytes(), thumbnailPath); err != nil { l4g.Error(utils.T("api.file.handle_images_forget.upload_thumb.error"), thumbnailPath, err) return } }
// parseCmd parse the tcp request command. func parseCmd(rd *bufio.Reader) ([]string, error) { // get argument number argNum, err := parseCmdSize(rd, '*') if err != nil { log.Error("tcp:cmd format error when find '*' (%v)", err) return nil, err } if argNum < minCmdNum || argNum > maxCmdNum { log.Error("tcp:cmd argument number length error") return nil, ErrProtocol } args := make([]string, 0, argNum) for i := 0; i < argNum; i++ { // get argument length cmdLen, err := parseCmdSize(rd, '$') if err != nil { log.Error("tcp:parseCmdSize(rd, '$') error(%v)", err) return nil, err } // get argument data d, err := parseCmdData(rd, cmdLen) if err != nil { log.Error("tcp:parseCmdData() error(%v)", err) return nil, err } // append args args = append(args, string(d)) } return args, nil }
/** * Json的序列化实现 */ func (self JsonProbe) Serialize(src *VictoriestMsg) ([]byte, error) { var v []byte var err error // vMsg := &VictoriestMsg{MsgType: msgType, MsgContext: src} v, err = json.Marshal(src) if err != nil { log.Error("when Encoding:", err.Error()) return nil, err } // 序列化后的bytes长度 var length int32 = int32(len(v)) pkg := new(bytes.Buffer) // 写入长度信息 err = binary.Write(pkg, binary.LittleEndian, length) if err != nil { log.Error("when Write length:", err.Error()) return nil, err } // 写入序列化后的对象 err = binary.Write(pkg, binary.LittleEndian, v) if err != nil { log.Error("when Serialize:", err.Error()) return nil, err } return pkg.Bytes(), nil }
func (s *Server) ListenAndServe() { var err error s.getAuth() s.typesdb, err = collectd.TypesDBFile(s.typesdbpath) if err != nil { log.Error("CollectdServer: TypesDB: ", err) return } addr, err := net.ResolveUDPAddr("udp4", s.listenAddress) if err != nil { log.Error("CollectdServer: ResolveUDPAddr: ", err) return } if s.listenAddress != "" { s.conn, err = net.ListenUDP("udp", addr) if err != nil { log.Error("CollectdServer: Listen: ", err) return } } defer s.conn.Close() s.HandleSocket(s.conn) }
// GetPrivate implements the Storage GetPrivate method. func (s *MySQLStorage) GetPrivate(key string, mid int64) ([]*myrpc.Message, error) { db := s.getConn(key) if db == nil { return nil, ErrNoMySQLConn } now := time.Now().Unix() rows, err := db.Query(getPrivateMsgSQL, key, mid) if err != nil { log.Error("db.Query(\"%s\",\"%s\",%d,now) failed (%v)", getPrivateMsgSQL, key, mid, err) return nil, err } msgs := []*myrpc.Message{} for rows.Next() { expire := int64(0) cmid := int64(0) msg := []byte{} if err := rows.Scan(&cmid, &expire, &msg); err != nil { log.Error("rows.Scan() failed (%v)", err) return nil, err } if now > expire { log.Warn("user_key: \"%s\" mid: %d expired", key, cmid) continue } msgs = append(msgs, &myrpc.Message{MsgId: cmid, GroupId: myrpc.PrivateGroupId, Msg: json.RawMessage(msg)}) } return msgs, nil }
func watchAndParseTemplates() { templatesDir := utils.FindDir("web/templates") l4g.Debug("Parsing templates at %v", templatesDir) var err error if Templates, err = template.ParseGlob(templatesDir + "*.html"); err != nil { l4g.Error("Failed to parse templates %v", err) } watcher, err := fsnotify.NewWatcher() if err != nil { l4g.Error("Failed to create directory watcher %v", err) } go func() { for { select { case event := <-watcher.Events: if event.Op&fsnotify.Write == fsnotify.Write { l4g.Info("Re-parsing templates because of modified file %v", event.Name) if Templates, err = template.ParseGlob(templatesDir + "*.html"); err != nil { l4g.Error("Failed to parse templates %v", err) } } case err := <-watcher.Errors: l4g.Error("Failed in directory watcher %v", err) } } }() err = watcher.Add(templatesDir) if err != nil { l4g.Error("Failed to add directory to watcher %v", err) } }
// DelMulti implements the Storage DelMulti method. func (s *RedisStorage) clean() { for { info := <-s.delCH conn := s.getConn(info.Key) if conn == nil { log.Warn("get redis connection nil") continue } for _, mid := range info.MIds { if err := conn.Send("ZREMRANGEBYSCORE", info.Key, mid, mid); err != nil { log.Error("conn.Send(\"ZREMRANGEBYSCORE\", \"%s\", %d, %d) error(%v)", info.Key, mid, mid, err) conn.Close() continue } } if err := conn.Flush(); err != nil { log.Error("conn.Flush() error(%v)", err) conn.Close() continue } for _, _ = range info.MIds { _, err := conn.Receive() if err != nil { log.Error("conn.Receive() error(%v)", err) conn.Close() continue } } conn.Close() } }
// NewRedis initialize the redis pool and consistency hash ring. func NewRedisStorage() *RedisStorage { redisPool := map[string]*redis.Pool{} ring := ketama.NewRing(ketamaBase) reg := regexp.MustCompile("(.+)@(.+)#(.+)|(.+)@(.+)") for n, addr := range Conf.RedisSource { nw := strings.Split(n, ":") if len(nw) != 2 { err := errors.New("node config error, it's nodeN:W") log.Error("strings.Split(\"%s\", :) failed (%v)", n, err) panic(err) } w, err := strconv.Atoi(nw[1]) if err != nil { log.Error("strconv.Atoi(\"%s\") failed (%v)", nw[1], err) panic(err) } // get protocol and addr pw := reg.FindStringSubmatch(addr) if len(pw) < 6 { log.Error("strings.regexp(\"%s\", \"%s\") failed (%v)", addr, pw) panic(fmt.Sprintf("config redis.source node:\"%s\" format error", addr)) } tmpProto := "" tmpAddr := "" if pw[1] != "" { tmpProto = pw[1] } else { tmpProto = pw[4] } if pw[2] != "" { tmpAddr = pw[2] } else { tmpAddr = pw[5] } // WARN: closures use redisPool[nw[0]] = &redis.Pool{ MaxIdle: Conf.RedisMaxIdle, MaxActive: Conf.RedisMaxActive, IdleTimeout: Conf.RedisIdleTimeout, Dial: func() (redis.Conn, error) { conn, err := redis.Dial(tmpProto, tmpAddr) if err != nil { log.Error("redis.Dial(\"%s\", \"%s\") error(%v)", tmpProto, tmpAddr, err) return nil, err } if pw[3] != "" { conn.Do("AUTH", pw[3]) } return conn, err }, } // add node to ketama hash ring.AddNode(nw[0], w) } ring.Bake() s := &RedisStorage{pool: redisPool, ring: ring, delCH: make(chan *RedisDelMessage, 10240)} go s.clean() return s }
func (self *ProtobufServer) handleConnection(conn net.Conn) { log.Info("ProtobufServer: client connected: %s", conn.RemoteAddr().String()) message := make([]byte, 0, MAX_REQUEST_SIZE) buff := bytes.NewBuffer(message) var messageSizeU uint32 for { err := binary.Read(conn, binary.LittleEndian, &messageSizeU) if err != nil { log.Error("Error reading from connection (%s): %s", conn.RemoteAddr().String(), err) self.connectionMapLock.Lock() delete(self.connectionMap, conn) self.connectionMapLock.Unlock() conn.Close() return } messageSize := int64(messageSizeU) if messageSize > MAX_REQUEST_SIZE { err = self.handleRequestTooLarge(conn, messageSize) } else { err = self.handleRequest(conn, messageSize, buff) } if err != nil { log.Error("Error, closing connection: %s", err) self.connectionMapLock.Lock() delete(self.connectionMap, conn) self.connectionMapLock.Unlock() conn.Close() return } buff.Reset() } }
// HandleWrite start a goroutine get msg from chan, then send to the conn. func (c *Connection) HandleWrite(key string) { go func() { var ( n int err error ) log.Debug("user_key: \"%s\" HandleWrite goroutine start", key) for { msg, ok := <-c.Buf if !ok { log.Debug("user_key: \"%s\" HandleWrite goroutine stop", key) return } if c.Proto == WebsocketProto { // raw n, err = c.Conn.Write(msg) } else if c.Proto == TCPProto { // redis protocol msg = []byte(fmt.Sprintf("$%d\r\n%s\r\n", len(msg), string(msg))) n, err = c.Conn.Write(msg) } else { log.Error("unknown connection protocol: %d", c.Proto) panic(ErrConnProto) } // update stat if err != nil { log.Error("user_key: \"%s\" conn.Write() error(%v)", key, err) MsgStat.IncrFailed(1) } else { log.Debug("user_key: \"%s\" write \r\n========%s(%d)========", key, string(msg), n) MsgStat.IncrSucceed(1) } } }() }
func doLoadChannel(c *api.Context, w http.ResponseWriter, r *http.Request, team *model.Team, channel *model.Channel, postid string) { userChan := api.Srv.Store.User().Get(c.Session.UserId) prefChan := api.Srv.Store.Preference().GetAll(c.Session.UserId) var user *model.User if ur := <-userChan; ur.Err != nil { c.Err = ur.Err c.RemoveSessionCookie(w, r) l4g.Error(utils.T("web.do_load_channel.error"), c.Session.UserId) return } else { user = ur.Data.(*model.User) } var preferences model.Preferences if result := <-prefChan; result.Err != nil { l4g.Error("Error in getting preferences for id=%v", c.Session.UserId) } else { preferences = result.Data.(model.Preferences) } page := NewHtmlTemplatePage("channel", "", c.Locale) page.Props["Title"] = channel.DisplayName + " - " + team.DisplayName + " " + page.ClientCfg["SiteName"] page.Props["TeamDisplayName"] = team.DisplayName page.Props["ChannelName"] = channel.Name page.Props["ChannelId"] = channel.Id page.Props["PostId"] = postid page.Team = team page.User = user page.Channel = channel page.Preferences = &preferences page.Render(c, w) }
func watchAndParseTemplates() { templatesDir := utils.FindDir("web/templates") l4g.Debug(utils.T("web.parsing_templates.debug"), templatesDir) var err error if Templates, err = template.ParseGlob(templatesDir + "*.html"); err != nil { l4g.Error(utils.T("web.parsing_templates.error"), err) } watcher, err := fsnotify.NewWatcher() if err != nil { l4g.Error(utils.T("web.create_dir.error"), err) } go func() { for { select { case event := <-watcher.Events: if event.Op&fsnotify.Write == fsnotify.Write { l4g.Info(utils.T("web.reparse_templates.info"), event.Name) if Templates, err = template.ParseGlob(templatesDir + "*.html"); err != nil { l4g.Error(utils.T("web.parsing_templates.error"), err) } } case err := <-watcher.Errors: l4g.Error(utils.T("web.dir_fail.error"), err) } } }() err = watcher.Add(templatesDir) if err != nil { l4g.Error(utils.T("web.watcher_fail.error"), err) } }
func PostUpdateChannelHeaderMessageAndForget(c *Context, channelId string, oldChannelHeader, newChannelHeader string) { go func() { uc := Srv.Store.User().Get(c.Session.UserId) if uresult := <-uc; uresult.Err != nil { l4g.Error("Failed to retrieve user while trying to save update channel header message %v", uresult.Err) return } else { user := uresult.Data.(*model.User) var message string if oldChannelHeader == "" { message = fmt.Sprintf("%s updated the channel header to: %s", user.Username, newChannelHeader) } else if newChannelHeader == "" { message = fmt.Sprintf("%s removed the channel header (was: %s)", user.Username, oldChannelHeader) } else { message = fmt.Sprintf("%s updated the channel header from: %s to: %s", user.Username, oldChannelHeader, newChannelHeader) } post := &model.Post{ ChannelId: channelId, Message: message, Type: model.POST_HEADER_CHANGE, } if _, err := CreatePost(c, post, false); err != nil { l4g.Error("Failed to post join/leave message %v", err) } } }() }
/** * param : src - 需要序列化的参数 * msgType- 需要序列化的对象标识 * return : []byte - 序列化后的byte数组 * error - 错误信息, 如果成功则为nil */ func (self ProtobufProbe) Serialize(src *protocol.MobileSuiteModel) ([]byte, error) { var v []byte var err error v, err = proto.Marshal(src) if err != nil { log.Error("when encoding:", err.Error()) return nil, err } var length int32 = int32(len(v)) pkg := new(bytes.Buffer) err = binary.Write(pkg, binary.LittleEndian, length) if err != nil { log.Error("when write length:", err.Error()) return nil, err } err = binary.Write(pkg, binary.LittleEndian, v) if err != nil { log.Error("when Serialize:", err.Error()) return nil, err } return pkg.Bytes(), nil }
// We call this function only if we have a Select query (not continuous) or Delete query func (self *Coordinator) runQuerySpec(querySpec *parser.QuerySpec, p engine.Processor) error { self.expandRegex(querySpec) shards, processor, err := self.getShardsAndProcessor(querySpec, p) if err != nil { return err } if len(shards) == 0 { return processor.Close() } shardConcurrentLimit := self.config.ConcurrentShardQueryLimit if self.shouldQuerySequentially(shards, querySpec) { log.Debug("Querying shards sequentially") shardConcurrentLimit = 1 } log.Debug("Shard concurrent limit: %d", shardConcurrentLimit) mcp := NewMergeChannelProcessor(processor, shardConcurrentLimit) go mcp.ProcessChannels() if err := self.queryShards(querySpec, shards, mcp); err != nil { log.Error("Error while querying shards: %s", err) mcp.Close() return err } if err := mcp.Close(); err != nil { log.Error("Error while querying shards: %s", err) return err } return processor.Close() }
func (self ProtobufProbe) DeserializeByReader(reader *bufio.Reader) (*protocol.MobileSuiteModel, int32, error) { lengthByte, _ := reader.Peek(4) lengthBuff := bytes.NewBuffer(lengthByte) var length int32 err := binary.Read(lengthBuff, binary.LittleEndian, &length) if err != nil { log.Error("when deserializeByReader:", err.Error()) return nil, -1, err } if int32(reader.Buffered()) < length+4 { log.Error("int32(reader.Buffered()) < length + 4") return nil, -1, err } pack := make([]byte, int(4+length)) _, err = reader.Read(pack) if err != nil { log.Error("when deserializeByReader:", err.Error()) return nil, -1, err } var dst protocol.MobileSuiteModel var msgType int32 msgType, err = self.Deserialize(pack, &dst) log.Debug(length, msgType, dst) return &dst, msgType, nil }
// RegisterTmp create a ephemeral node, and watch it, if node droped then send a SIGQUIT to self. func RegisterTemp(conn *zk.Conn, fpath string, data []byte) error { tpath, err := conn.Create(path.Join(fpath)+"/", data, zk.FlagEphemeral|zk.FlagSequence, zk.WorldACL(zk.PermAll)) if err != nil { log.Error("conn.Create(\"%s\", \"%s\", zk.FlagEphemeral|zk.FlagSequence) error(%v)", fpath, string(data), err) return err } log.Debug("create a zookeeper node:%s", tpath) // watch self go func() { for { log.Info("zk path: \"%s\" set a watch", tpath) exist, _, watch, err := conn.ExistsW(tpath) if err != nil { log.Error("zk.ExistsW(\"%s\") error(%v)", tpath, err) log.Warn("zk path: \"%s\" set watch failed, kill itself", tpath) killSelf() return } if !exist { log.Warn("zk path: \"%s\" not exist, kill itself", tpath) killSelf() return } event := <-watch log.Info("zk path: \"%s\" receive a event %v", tpath, event) } }() return nil }
// NewMySQLStorage initialize mysql pool and consistency hash ring. func NewMySQLStorage() *MySQLStorage { dbPool := make(map[string]*sql.DB) ring := ketama.NewRing(ketamaBase) for n, source := range Conf.MySQLSource { nw := strings.Split(n, mysqlSourceSpliter) if len(nw) != 2 { err := errors.New("node config error, it's nodeN:W") log.Error("strings.Split(\"%s\", :) failed (%v)", n, err) panic(err) } w, err := strconv.Atoi(nw[1]) if err != nil { log.Error("strconv.Atoi(\"%s\") failed (%v)", nw[1], err) panic(err) } db, err := sql.Open("mysql", source) if err != nil { log.Error("sql.Open(\"mysql\", %s) failed (%v)", source, err) panic(err) } dbPool[nw[0]] = db ring.AddNode(nw[0], w) } ring.Bake() s := &MySQLStorage{pool: dbPool, ring: ring} go s.clean() return s }
// gob的序列化方法实现 func (self GobProbe) Serialize(src *VictoriestMsg) ([]byte, error) { // 序列化 // vMsg := &VictoriestMsg{MsgType: msgType, MsgContext: src} buf := new(bytes.Buffer) enc := gob.NewEncoder(buf) err := enc.Encode(src) if err != nil { log.Error("when GobProbe.Encoding:", err.Error()) return nil, err } v := buf.Bytes() // 序列化后的byte长度 var length int32 = int32(len(v)) // 将长度信息写入byte数组 pkg := new(bytes.Buffer) err = binary.Write(pkg, binary.LittleEndian, length) if err != nil { log.Error("when GobProbe.Encoding:", err.Error()) return nil, err } // 写入序列化后的对象 err = binary.Write(pkg, binary.LittleEndian, v) if err != nil { log.Error("when GobProbe.Encoding:", err.Error()) return nil, err } return pkg.Bytes(), nil }
func handlePostEventsAndForget(c *Context, post *model.Post, triggerWebhooks bool) { go func() { tchan := Srv.Store.Team().Get(c.Session.TeamId) cchan := Srv.Store.Channel().Get(post.ChannelId) uchan := Srv.Store.User().Get(post.UserId) pchan := Srv.Store.User().GetProfiles(c.Session.TeamId) mchan := Srv.Store.Channel().GetMembers(post.ChannelId) var team *model.Team if result := <-tchan; result.Err != nil { l4g.Error(utils.T("api.post.handle_post_events_and_forget.team.error"), c.Session.TeamId, result.Err) return } else { team = result.Data.(*model.Team) } var channel *model.Channel if result := <-cchan; result.Err != nil { l4g.Error(utils.T("api.post.handle_post_events_and_forget.channel.error"), post.ChannelId, result.Err) return } else { channel = result.Data.(*model.Channel) } var profiles map[string]*model.User if result := <-pchan; result.Err != nil { l4g.Error(utils.T("api.post.handle_post_events_and_forget.profiles.error"), c.Session.TeamId, result.Err) return } else { profiles = result.Data.(map[string]*model.User) } var members []model.ChannelMember if result := <-mchan; result.Err != nil { l4g.Error(utils.T("api.post.handle_post_events_and_forget.members.error"), post.ChannelId, result.Err) return } else { members = result.Data.([]model.ChannelMember) } go sendNotifications(c, post, team, channel, profiles, members) go checkForOutOfChannelMentions(c, post, channel, profiles, members) var user *model.User if result := <-uchan; result.Err != nil { l4g.Error(utils.T("api.post.handle_post_events_and_forget.user.error"), post.UserId, result.Err) return } else { user = result.Data.(*model.User) } if triggerWebhooks { handleWebhookEventsAndForget(c, post, team, channel, user) } if channel.Type == model.CHANNEL_DIRECT { go makeDirectChannelVisible(c.Session.TeamId, post.ChannelId) } }() }
// NextIds get snowflake ids. func (id *IdWorker) NextIds(num int) ([]int64, error) { if num > maxNextIdsNum || num < 0 { log.Error("NextIds num can't be greater than %d or less than 0", maxNextIdsNum) return nil, errors.New(fmt.Sprintf("NextIds num: %d error", num)) } ids := make([]int64, num) id.mutex.Lock() defer id.mutex.Unlock() for i := 0; i < num; i++ { timestamp := timeGen() if timestamp < id.lastTimestamp { log.Error("clock is moving backwards. Rejecting requests until %d.", id.lastTimestamp) return nil, errors.New(fmt.Sprintf("Clock moved backwards. Refusing to generate id for %d milliseconds", id.lastTimestamp-timestamp)) } if id.lastTimestamp == timestamp { id.sequence = (id.sequence + 1) & sequenceMask if id.sequence == 0 { timestamp = tilNextMillis(id.lastTimestamp) } } else { id.sequence = 0 } id.lastTimestamp = timestamp ids[i] = ((timestamp - id.twepoch) << timestampLeftShift) | (id.datacenterId << datacenterIdShift) | (id.workerId << workerIdShift) | id.sequence } return ids, nil }
// watchNode watch a named node for leader selection when failover func watchCometNode(conn *zk.Conn, node, fpath string, retry, ping time.Duration, ch chan *CometNodeEvent) { fpath = path.Join(fpath, node) for { nodes, watch, err := myzk.GetNodesW(conn, fpath) if err == myzk.ErrNodeNotExist { log.Warn("zk don't have node \"%s\"", fpath) break } else if err == myzk.ErrNoChild { log.Warn("zk don't have any children in \"%s\", retry in %d second", fpath, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } else if err != nil { log.Error("zk path: \"%s\" getNodes error(%v), retry in %d second", fpath, err, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } // leader selection sort.Strings(nodes) if info, err := registerCometNode(conn, nodes[0], fpath, retry, ping, true); err != nil { log.Error("zk path: \"%s\" registerCometNode error(%v)", fpath, err) time.Sleep(waitNodeDelaySecond) continue } else { // update node info ch <- &CometNodeEvent{Event: eventNodeUpdate, Key: node, Value: info} } // blocking receive event event := <-watch log.Info("zk path: \"%s\" receive a event: (%v)", fpath, event) } // WARN, if no persistence node and comet rpc not config log.Warn("zk path: \"%s\" never watch again till recreate", fpath) }
func cmdResetMfa() { if flagCmdResetMfa { if len(flagEmail) == 0 && len(flagUsername) == 0 { fmt.Fprintln(os.Stderr, "flag needs an argument: -email OR -username") flag.Usage() os.Exit(1) } var user *model.User if len(flagEmail) > 0 { if result := <-api.Srv.Store.User().GetByEmail(flagEmail); result.Err != nil { l4g.Error("%v", result.Err) flushLogAndExit(1) } else { user = result.Data.(*model.User) } } else { if result := <-api.Srv.Store.User().GetByUsername(flagUsername); result.Err != nil { l4g.Error("%v", result.Err) flushLogAndExit(1) } else { user = result.Data.(*model.User) } } if err := api.DeactivateMfa(user.Id); err != nil { l4g.Error("%v", err) flushLogAndExit(1) } os.Exit(0) } }
func (s *Server) HandleSocket(socket *net.UDPConn) { // From https://collectd.org/wiki/index.php/Binary_protocol // 1024 bytes (payload only, not including UDP / IP headers) // In versions 4.0 through 4.7, the receive buffer has a fixed size // of 1024 bytes. When longer packets are received, the trailing data // is simply ignored. Since version 4.8, the buffer size can be // configured. Version 5.0 will increase the default buffer size to // 1452 bytes (the maximum payload size when using UDP/IPv6 over // Ethernet). buffer := make([]byte, 1452) for { n, _, err := socket.ReadFromUDP(buffer) if err != nil || n == 0 { log.Error("Collectd ReadFromUDP error: %s", err) continue } packets, err := collectd.Packets(buffer[:n], s.typesdb) if err != nil { log.Error("Collectd parse error: %s", err) continue } for _, packet := range *packets { series := packetToSeries(&packet) err = s.coordinator.WriteSeriesData(s.user, s.database, series) if err != nil { log.Error("Collectd cannot write data: %s", err) continue } } } }
func cmdUploadLicense() { if flagCmdUploadLicense { if model.BuildEnterpriseReady != "true" { fmt.Fprintln(os.Stderr, "build must be enterprise ready") os.Exit(1) } if len(flagLicenseFile) == 0 { fmt.Fprintln(os.Stderr, "flag needs an argument: -team_name") flag.Usage() os.Exit(1) } var fileBytes []byte var err error if fileBytes, err = ioutil.ReadFile(flagLicenseFile); err != nil { l4g.Error("%v", err) flushLogAndExit(1) } if _, err := api.SaveLicense(fileBytes); err != nil { l4g.Error("%v", err) flushLogAndExit(1) } else { flushLogAndExit(0) } flushLogAndExit(0) } }
// hanleTCPConn handle a long live tcp connection. func handleTCPConn(conn net.Conn, rc chan *bufio.Reader) { addr := conn.RemoteAddr().String() log.Debug("<%s> handleTcpConn routine start", addr) rd := newBufioReader(rc, conn) if args, err := parseCmd(rd); err == nil { // return buffer bufio.Reader putBufioReader(rc, rd) switch args[0] { case "sub": SubscribeTCPHandle(conn, args[1:]) break default: conn.Write(ParamReply) log.Warn("<%s> unknown cmd \"%s\"", addr, args[0]) break } } else { // return buffer bufio.Reader putBufioReader(rc, rd) log.Error("<%s> parseCmd() error(%v)", addr, err) } // close the connection if err := conn.Close(); err != nil { log.Error("<%s> conn.Close() error(%v)", addr, err) } log.Debug("<%s> handleTcpConn routine stop", addr) return }
func cmdActivateUser() { if flagCmdActivateUser { if len(flagEmail) == 0 { fmt.Fprintln(os.Stderr, "flag needs an argument: -email") flag.Usage() os.Exit(1) } var user *model.User if result := <-api.Srv.Store.User().GetByEmail(flagEmail); result.Err != nil { l4g.Error("%v", result.Err) flushLogAndExit(1) } else { user = result.Data.(*model.User) } if user.IsLDAPUser() { l4g.Error("%v", utils.T("api.user.update_active.no_deactivate_ldap.app_error")) } if _, err := api.UpdateActive(user, !flagUserSetInactive); err != nil { l4g.Error("%v", err) } os.Exit(0) } }
func PostUpdateChannelHeaderMessage(c *Context, channelId string, oldChannelHeader, newChannelHeader string) { uc := Srv.Store.User().Get(c.Session.UserId) if uresult := <-uc; uresult.Err != nil { l4g.Error(utils.T("api.channel.post_update_channel_header_message_and_forget.retrieve_user.error"), uresult.Err) return } else { user := uresult.Data.(*model.User) var message string if oldChannelHeader == "" { message = fmt.Sprintf(utils.T("api.channel.post_update_channel_header_message_and_forget.updated_to"), user.Username, newChannelHeader) } else if newChannelHeader == "" { message = fmt.Sprintf(utils.T("api.channel.post_update_channel_header_message_and_forget.removed"), user.Username, oldChannelHeader) } else { message = fmt.Sprintf(utils.T("api.channel.post_update_channel_header_message_and_forget.updated_from"), user.Username, oldChannelHeader, newChannelHeader) } post := &model.Post{ ChannelId: channelId, Message: message, Type: model.POST_HEADER_CHANGE, UserId: c.Session.UserId, Props: model.StringInterface{ "old_header": oldChannelHeader, "new_header": newChannelHeader, }, } if _, err := CreatePost(c, post, false); err != nil { l4g.Error(utils.T("api.channel.post_update_channel_header_message_and_forget.join_leave.error"), err) } } }
func (self *ProtobufRequestHandler) WriteResponse(conn net.Conn, response *protocol.Response) error { if response.Size() >= MAX_RESPONSE_SIZE { f, s := splitResponse(response) err := self.WriteResponse(conn, f) if err != nil { return err } return self.WriteResponse(conn, s) } data, err := response.Encode() if err != nil { log.Error("error encoding response: %s", err) return err } buff := bytes.NewBuffer(make([]byte, 0, len(data)+8)) binary.Write(buff, binary.LittleEndian, uint32(len(data))) _, err = conn.Write(append(buff.Bytes(), data...)) if err != nil { log.Error("error writing response: %s", err) return err } return nil }