// watchNode watch a named node for leader selection when failover func watchCometNode(conn *zk.Conn, node, fpath string, retry, ping time.Duration, ch chan *CometNodeEvent) { fpath = path.Join(fpath, node) for { nodes, watch, err := myzk.GetNodesW(conn, fpath) if err == myzk.ErrNodeNotExist { log.Warn("zk don't have node \"%s\"", fpath) break } else if err == myzk.ErrNoChild { log.Warn("zk don't have any children in \"%s\", retry in %d second", fpath, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } else if err != nil { log.Error("zk path: \"%s\" getNodes error(%v), retry in %d second", fpath, err, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } // leader selection sort.Strings(nodes) if info, err := registerCometNode(conn, nodes[0], fpath, retry, ping, true); err != nil { log.Error("zk path: \"%s\" registerCometNode error(%v)", fpath, err) time.Sleep(waitNodeDelaySecond) continue } else { // update node info ch <- &CometNodeEvent{Event: eventNodeUpdate, Key: node, Value: info} } // blocking receive event event := <-watch log.Info("zk path: \"%s\" receive a event: (%v)", fpath, event) } // WARN, if no persistence node and comet rpc not config log.Warn("zk path: \"%s\" never watch again till recreate", fpath) }
func PermanentDeleteTeam(c *Context, team *model.Team) *model.AppError { l4g.Warn(utils.T("api.team.permanent_delete_team.attempting.warn"), team.Name, team.Id) c.Path = "/teams/permanent_delete" c.LogAuditWithUserId("", fmt.Sprintf("attempt teamId=%v", team.Id)) team.DeleteAt = model.GetMillis() if result := <-Srv.Store.Team().Update(team); result.Err != nil { return result.Err } if result := <-Srv.Store.User().GetForExport(team.Id); result.Err != nil { return result.Err } else { users := result.Data.([]*model.User) for _, user := range users { PermanentDeleteUser(c, user) } } if result := <-Srv.Store.Channel().PermanentDeleteByTeam(team.Id); result.Err != nil { return result.Err } if result := <-Srv.Store.Team().PermanentDelete(team.Id); result.Err != nil { return result.Err } l4g.Warn(utils.T("api.team.permanent_delete_team.deleted.warn"), team.Name, team.Id) c.LogAuditWithUserId("", fmt.Sprintf("success teamId=%v", team.Id)) return nil }
func SlackUploadFile(sPost SlackPost, uploads map[string]*zip.File, teamId string, channelId string, userId string) (*model.FileInfo, bool) { if sPost.File != nil { if file, ok := uploads[sPost.File.Id]; ok == true { openFile, err := file.Open() if err != nil { l4g.Warn(utils.T("api.slackimport.slack_add_posts.upload_file_open_failed.warn", map[string]interface{}{"FileId": sPost.File.Id, "Error": err.Error()})) return nil, false } defer openFile.Close() uploadedFile, err := ImportFile(openFile, teamId, channelId, userId, filepath.Base(file.Name)) if err != nil { l4g.Warn(utils.T("api.slackimport.slack_add_posts.upload_file_upload_failed.warn", map[string]interface{}{"FileId": sPost.File.Id, "Error": err.Error()})) return nil, false } return uploadedFile, true } else { l4g.Warn(utils.T("api.slackimport.slack_add_posts.upload_file_not_found.warn", map[string]interface{}{"FileId": sPost.File.Id})) return nil, false } } else { l4g.Warn(utils.T("api.slackimport.slack_add_posts.upload_file_not_in_json.warn")) return nil, false } }
// RegisterTmp create a ephemeral node, and watch it, if node droped then send a SIGQUIT to self. func RegisterTemp(conn *zk.Conn, fpath string, data []byte) error { tpath, err := conn.Create(path.Join(fpath)+"/", data, zk.FlagEphemeral|zk.FlagSequence, zk.WorldACL(zk.PermAll)) if err != nil { log.Error("conn.Create(\"%s\", \"%s\", zk.FlagEphemeral|zk.FlagSequence) error(%v)", fpath, string(data), err) return err } log.Debug("create a zookeeper node:%s", tpath) // watch self go func() { for { log.Info("zk path: \"%s\" set a watch", tpath) exist, _, watch, err := conn.ExistsW(tpath) if err != nil { log.Error("zk.ExistsW(\"%s\") error(%v)", tpath, err) log.Warn("zk path: \"%s\" set watch failed, kill itself", tpath) killSelf() return } if !exist { log.Warn("zk path: \"%s\" not exist, kill itself", tpath) killSelf() return } event := <-watch log.Info("zk path: \"%s\" receive a event %v", tpath, event) } }() return nil }
func PermanentDeleteTeam(c *Context, team *model.Team) *model.AppError { l4g.Warn(utils.T("api.team.permanent_delete_team.attempting.warn"), team.Name, team.Id) c.Path = "/teams/permanent_delete" c.LogAuditWithUserId("", fmt.Sprintf("attempt teamId=%v", team.Id)) team.DeleteAt = model.GetMillis() if result := <-Srv.Store.Team().Update(team); result.Err != nil { return result.Err } if result := <-Srv.Store.Channel().PermanentDeleteByTeam(team.Id); result.Err != nil { return result.Err } if result := <-Srv.Store.Team().RemoveAllMembersByTeam(team.Id); result.Err != nil { return result.Err } if result := <-Srv.Store.Team().PermanentDelete(team.Id); result.Err != nil { return result.Err } l4g.Warn(utils.T("api.team.permanent_delete_team.deleted.warn"), team.Name, team.Id) c.LogAuditWithUserId("", fmt.Sprintf("success teamId=%v", team.Id)) return nil }
func shouldPerformUpgrade(sqlStore *SqlStore, currentSchemaVersion string, expectedSchemaVersion string) bool { if sqlStore.SchemaVersion == currentSchemaVersion { l4g.Warn(utils.T("store.sql.schema_out_of_date.warn"), currentSchemaVersion) l4g.Warn(utils.T("store.sql.schema_upgrade_attempt.warn"), expectedSchemaVersion) return true } return false }
func sendBatchedEmailNotification(userId string, notifications []*batchedNotification) { uchan := Srv.Store.User().Get(userId) pchan := Srv.Store.Preference().Get(userId, model.PREFERENCE_CATEGORY_DISPLAY_SETTINGS, model.PREFERENCE_NAME_DISPLAY_NAME_FORMAT) var user *model.User if result := <-uchan; result.Err != nil { l4g.Warn("api.email_batching.send_batched_email_notification.user.app_error") return } else { user = result.Data.(*model.User) } translateFunc := utils.GetUserTranslations(user.Locale) var displayNameFormat string if result := <-pchan; result.Err != nil && result.Err.DetailedError != sql.ErrNoRows.Error() { l4g.Warn("api.email_batching.send_batched_email_notification.preferences.app_error") return } else if result.Err != nil { // no display name format saved, so fall back to default displayNameFormat = model.PREFERENCE_DEFAULT_DISPLAY_NAME_FORMAT } else { displayNameFormat = result.Data.(model.Preference).Value } var contents string for _, notification := range notifications { template := utils.NewHTMLTemplate("post_batched_post", user.Locale) contents += renderBatchedPost(template, notification.post, notification.teamName, displayNameFormat, translateFunc) } tm := time.Unix(notifications[0].post.CreateAt/1000, 0) subject := translateFunc("api.email_batching.send_batched_email_notification.subject", len(notifications), map[string]interface{}{ "SiteName": utils.Cfg.TeamSettings.SiteName, "Year": tm.Year(), "Month": translateFunc(tm.Month().String()), "Day": tm.Day(), }) body := utils.NewHTMLTemplate("post_batched_body", user.Locale) body.Props["SiteURL"] = *utils.Cfg.ServiceSettings.SiteURL body.Props["Posts"] = template.HTML(contents) body.Props["BodyText"] = translateFunc("api.email_batching.send_batched_email_notification.body_text", len(notifications)) if err := utils.SendMail(user.Email, subject, body.Render()); err != nil { l4g.Warn(utils.T("api.email_batchings.send_batched_email_notification.send.app_error"), user.Email, err) } }
func recycleDatabaseConnection(c *Context, w http.ResponseWriter, r *http.Request) { oldStore := Srv.Store l4g.Warn(utils.T("api.admin.recycle_db_start.warn")) Srv.Store = store.NewSqlStore() time.Sleep(20 * time.Second) oldStore.Close() l4g.Warn(utils.T("api.admin.recycle_db_end.warn")) w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") ReturnStatusOK(w) }
func getInfoForFilename(post *model.Post, teamId string, filename string) *model.FileInfo { // Find the path from the Filename of the form /{channelId}/{userId}/{uid}/{nameWithExtension} split := strings.SplitN(filename, "/", 5) if len(split) < 5 { l4g.Error(utils.T("api.file.migrate_filenames_to_file_infos.unexpected_filename.error"), post.Id, filename) return nil } channelId := split[1] userId := split[2] oldId := split[3] name, _ := url.QueryUnescape(split[4]) if split[0] != "" || split[1] != post.ChannelId || split[2] != post.UserId || strings.Contains(split[4], "/") { l4g.Warn(utils.T("api.file.migrate_filenames_to_file_infos.mismatched_filename.warn"), post.Id, post.ChannelId, post.UserId, filename) } pathPrefix := fmt.Sprintf("teams/%s/channels/%s/users/%s/%s/", teamId, channelId, userId, oldId) path := pathPrefix + name // Open the file and populate the fields of the FileInfo var info *model.FileInfo if data, err := ReadFile(path); err != nil { l4g.Error(utils.T("api.file.migrate_filenames_to_file_infos.file_not_found.error"), post.Id, filename, path, err) return nil } else { var err *model.AppError info, err = model.GetInfoForBytes(name, data) if err != nil { l4g.Warn(utils.T("api.file.migrate_filenames_to_file_infos.info.app_error"), post.Id, filename, err) } } // Generate a new ID because with the old system, you could very rarely get multiple posts referencing the same file info.Id = model.NewId() info.CreatorId = post.UserId info.PostId = post.Id info.CreateAt = post.CreateAt info.UpdateAt = post.UpdateAt info.Path = path if info.IsImage() { nameWithoutExtension := name[:strings.LastIndex(name, ".")] info.PreviewPath = pathPrefix + nameWithoutExtension + "_preview.jpg" info.ThumbnailPath = pathPrefix + nameWithoutExtension + "_thumb.jpg" } return info }
func sendReactionEvent(event string, channelId string, reaction *model.Reaction, postHadReactions bool) { // send out that a reaction has been added/removed go func() { message := model.NewWebSocketEvent(event, "", channelId, "", nil) message.Add("reaction", reaction.ToJson()) app.Publish(message) }() // send out that a post was updated if post.HasReactions has changed go func() { var post *model.Post if result := <-app.Srv.Store.Post().Get(reaction.PostId); result.Err != nil { l4g.Warn(utils.T("api.reaction.send_reaction_event.post.app_error")) return } else { post = result.Data.(*model.PostList).Posts[reaction.PostId] } if post.HasReactions != postHadReactions { message := model.NewWebSocketEvent(model.WEBSOCKET_EVENT_POST_EDITED, "", channelId, "", nil) message.Add("post", post.ToJson()) app.Publish(message) } }() }
// DelMulti implements the Storage DelMulti method. func (s *RedisStorage) clean() { for { info := <-s.delCH conn := s.getConn(info.Key) if conn == nil { log.Warn("get redis connection nil") continue } for _, mid := range info.MIds { if err := conn.Send("ZREMRANGEBYSCORE", info.Key, mid, mid); err != nil { log.Error("conn.Send(\"ZREMRANGEBYSCORE\", \"%s\", %d, %d) error(%v)", info.Key, mid, mid, err) conn.Close() continue } } if err := conn.Flush(); err != nil { log.Error("conn.Flush() error(%v)", err) conn.Close() continue } for _, _ = range info.MIds { _, err := conn.Receive() if err != nil { log.Error("conn.Receive() error(%v)", err) conn.Close() continue } } conn.Close() } }
// hanleTCPConn handle a long live tcp connection. func handleTCPConn(conn net.Conn, rc chan *bufio.Reader) { addr := conn.RemoteAddr().String() log.Debug("<%s> handleTcpConn routine start", addr) rd := newBufioReader(rc, conn) if args, err := parseCmd(rd); err == nil { // return buffer bufio.Reader putBufioReader(rc, rd) switch args[0] { case "sub": SubscribeTCPHandle(conn, args[1:]) break default: conn.Write(ParamReply) log.Warn("<%s> unknown cmd \"%s\"", addr, args[0]) break } } else { // return buffer bufio.Reader putBufioReader(rc, rd) log.Error("<%s> parseCmd() error(%v)", addr, err) } // close the connection if err := conn.Close(); err != nil { log.Error("<%s> conn.Close() error(%v)", addr, err) } log.Debug("<%s> handleTcpConn routine stop", addr) return }
// Get a user channel from ChannleList. func (l *ChannelList) Get(key string, newOne bool) (Channel, error) { // validate if err := l.validate(key); err != nil { return nil, err } // get a channel bucket b := l.Bucket(key) b.Lock() if c, ok := b.Data[key]; !ok { if !Conf.Auth && newOne { c = NewSeqChannel() b.Data[key] = c b.Unlock() ChStat.IncrCreate() log.Info("user_key:\"%s\" create a new channel", key) return c, nil } else { b.Unlock() log.Warn("user_key:\"%s\" channle not exists", key) return nil, ErrChannelNotExist } } else { b.Unlock() ChStat.IncrAccess() log.Info("user_key:\"%s\" refresh channel bucket expire time", key) return c, nil } }
// GetPrivate implements the Storage GetPrivate method. func (s *MySQLStorage) GetPrivate(key string, mid int64) ([]*myrpc.Message, error) { db := s.getConn(key) if db == nil { return nil, ErrNoMySQLConn } now := time.Now().Unix() rows, err := db.Query(getPrivateMsgSQL, key, mid) if err != nil { log.Error("db.Query(\"%s\",\"%s\",%d,now) failed (%v)", getPrivateMsgSQL, key, mid, err) return nil, err } msgs := []*myrpc.Message{} for rows.Next() { expire := int64(0) cmid := int64(0) msg := []byte{} if err := rows.Scan(&cmid, &expire, &msg); err != nil { log.Error("rows.Scan() failed (%v)", err) return nil, err } if now > expire { log.Warn("user_key: \"%s\" mid: %d expired", key, cmid) continue } msgs = append(msgs, &myrpc.Message{MsgId: cmid, GroupId: myrpc.PrivateGroupId, Msg: json.RawMessage(msg)}) } return msgs, nil }
func SlackConvertUserMentions(users []SlackUser, posts map[string][]SlackPost) map[string][]SlackPost { var regexes = make(map[string]*regexp.Regexp, len(users)) for _, user := range users { r, err := regexp.Compile("<@" + user.Id + `(\|` + user.Username + ")?>") if err != nil { l4g.Warn(utils.T("api.slackimport.slack_convert_user_mentions.compile_regexp_failed.warn"), user.Id, user.Username) continue } regexes["@"+user.Username] = r } // Special cases. regexes["@here"], _ = regexp.Compile(`<!here\|@here>`) regexes["@channel"], _ = regexp.Compile("<!channel>") regexes["@all"], _ = regexp.Compile("<!everyone>") for channelName, channelPosts := range posts { for postIdx, post := range channelPosts { for mention, r := range regexes { post.Text = r.ReplaceAllString(post.Text, mention) posts[channelName][postIdx] = post } } } return posts }
func getMessageForNotification(post *model.Post, translateFunc i18n.TranslateFunc) string { if len(strings.TrimSpace(post.Message)) != 0 || len(post.FileIds) == 0 { return post.Message } // extract the filenames from their paths and determine what type of files are attached var infos []*model.FileInfo if result := <-Srv.Store.FileInfo().GetForPost(post.Id); result.Err != nil { l4g.Warn(utils.T("api.post.get_message_for_notification.get_files.error"), post.Id, result.Err) } else { infos = result.Data.([]*model.FileInfo) } filenames := make([]string, len(infos)) onlyImages := true for i, info := range infos { if escaped, err := url.QueryUnescape(filepath.Base(info.Name)); err != nil { // this should never error since filepath was escaped using url.QueryEscape filenames[i] = escaped } else { filenames[i] = info.Name } onlyImages = onlyImages && info.IsImage() } props := map[string]interface{}{"Filenames": strings.Join(filenames, ", ")} if onlyImages { return translateFunc("api.post.get_message_for_notification.images_sent", len(filenames), props) } else { return translateFunc("api.post.get_message_for_notification.files_sent", len(filenames), props) } }
func (self *ClusterServer) heartbeat() { defer func() { self.heartbeatStarted = false }() for { // this chan is buffered and in the loop on purpose. This is so // that if reading a heartbeat times out, and the heartbeat then comes through // later, it will be dumped into this chan and not block the protobuf client reader. responseChan := make(chan *protocol.Response, 1) heartbeatRequest := &protocol.Request{ Type: &HEARTBEAT_TYPE, Database: protocol.String(""), } self.MakeRequest(heartbeatRequest, responseChan) err := self.getHeartbeatResponse(responseChan) if err != nil { self.handleHeartbeatError(err) continue } if !self.isUp { log.Warn("Server marked as up. Heartbeat succeeded") } // otherwise, reset the backoff and mark the server as up self.isUp = true self.Backoff = self.MinBackoff time.Sleep(self.HeartbeatInterval) } }
// GetPrivate implements the Storage GetPrivate method. func (s *RedisStorage) GetPrivate(key string, mid int64) ([]*myrpc.Message, error) { conn := s.getConn(key) if conn == nil { return nil, RedisNoConnErr } defer conn.Close() values, err := redis.Values(conn.Do("ZRANGEBYSCORE", key, fmt.Sprintf("(%d", mid), "+inf", "WITHSCORES")) if err != nil { log.Error("conn.Do(\"ZRANGEBYSCORE\", \"%s\", \"%d\", \"+inf\", \"WITHSCORES\") error(%v)", key, mid, err) return nil, err } msgs := make([]*myrpc.Message, 0, len(values)) delMsgs := []int64{} now := time.Now().Unix() for len(values) > 0 { cmid := int64(0) b := []byte{} values, err = redis.Scan(values, &b, &cmid) if err != nil { log.Error("redis.Scan() error(%v)", err) return nil, err } rm := &RedisPrivateMessage{} if err = json.Unmarshal(b, rm); err != nil { log.Error("json.Unmarshal(\"%s\", rm) error(%v)", string(b), err) delMsgs = append(delMsgs, cmid) continue } // check expire if rm.Expire < now { log.Warn("user_key: \"%s\" msg: %d expired", key, cmid) delMsgs = append(delMsgs, cmid) continue } m := &myrpc.Message{MsgId: cmid, Msg: rm.Msg, GroupId: myrpc.PrivateGroupId} msgs = append(msgs, m) } // delete unmarshal failed and expired message if len(delMsgs) > 0 { select { case s.delCH <- &RedisDelMessage{Key: key, MIds: delMsgs}: default: log.Warn("user_key: \"%s\" send del messages failed, channel full", key) } } return msgs, nil }
// Auth auth a token is valid func (t *Token) Auth(ticket string) error { if e, ok := t.token[ticket]; !ok { log.Warn("token \"%s\" not exist", ticket) return ErrTokenNotExist } else { td, _ := e.Value.(*TokenData) if time.Now().After(td.Expire) { t.clean() log.Warn("token \"%s\" expired", ticket) return ErrTokenExpired } td.Expire = time.Now().Add(Conf.TokenExpire) t.lru.MoveToBack(e) } t.clean() return nil }
func (self *log) check() error { file, err := self.dupLogFile() if err != nil { return err } info, err := file.Stat() if err != nil { return err } size := info.Size() offset, err := file.Seek(0, os.SEEK_SET) if err != nil { return err } for { n, hdr, err := self.getNextHeader(file) if err != nil { return err } if n == 0 || hdr.length == 0 { logger.Warn("%s was truncated to %d since the file has a zero size request", self.file.Name(), offset) return self.file.Truncate(offset) } if offset+int64(n)+int64(hdr.length) > size { // file is incomplete, truncate logger.Warn("%s was truncated to %d since the file ends prematurely", self.file.Name(), offset) return self.file.Truncate(offset) } bytes := make([]byte, hdr.length) _, err = file.Read(bytes) if err != nil { return err } // this request is invalid truncate file req := &protocol.Request{} err = req.Decode(bytes) if err != nil { logger.Warn("%s was truncated to %d since the end of the file contains invalid data", self.file.Name(), offset) // truncate file and return return self.file.Truncate(offset) } offset += int64(n) + int64(hdr.length) } }
// Write different message to client by different protocol func (c *Connection) Write(key string, msg []byte) { select { case c.Buf <- msg: default: c.Conn.Close() log.Warn("user_key: \"%s\" discard message: \"%s\" and close connection", key, string(msg)) } }
// putBufioReader pub back a Reader to chan, if chan full discard it. func putBufioReader(c chan *bufio.Reader, r *bufio.Reader) { r.Reset(nil) select { case c <- r: default: log.Warn("tcp bufioReader cache full") } }
func LoadLicense() { file, err := os.Open(LicenseLocation()) if err != nil { l4g.Warn("Unable to open/find license file") return } defer file.Close() buf := bytes.NewBuffer(nil) io.Copy(buf, file) if success, licenseStr := ValidateLicense(buf.Bytes()); success { license := model.LicenseFromJson(strings.NewReader(licenseStr)) SetLicense(license) } l4g.Warn("No valid enterprise license found") }
func LoadLicense(licenseBytes []byte) { if success, licenseStr := ValidateLicense(licenseBytes); success { license := model.LicenseFromJson(strings.NewReader(licenseStr)) SetLicense(license) return } l4g.Warn(T("utils.license.load_license.invalid.warn")) }
func DeletePostFiles(post *model.Post) { if len(post.FileIds) != 0 { return } if result := <-app.Srv.Store.FileInfo().DeleteForPost(post.Id); result.Err != nil { l4g.Warn(utils.T("api.post.delete_post_files.app_error.warn"), post.Id, result.Err) } }
func (self *ClusterConfiguration) GetServerById(id *uint32) *ClusterServer { for _, server := range self.servers { if server.Id == *id { return server } } log.Warn("Couldn't find server with id %d. Cluster servers: %#v", *id, self.servers) return nil }
func (s *RedisStorage) getConnByNode(node string) redis.Conn { p, ok := s.pool[node] if !ok { log.Warn("no node: \"%s\" in redis pool", node) return nil } return p.Get() }
func (self *RaftServer) DropShard(id uint32, serverIds []uint32) error { if self.clusterConfig.GetShard(id) == nil { log.Warn("Attempted to drop shard that doesn't exist: ", id) return nil } command := NewDropShardCommand(id, serverIds) _, err := self.doOrProxyCommand(command) return err }
func (c *InfluxForceLeaveCommand) Apply(server raft.Server) (interface{}, error) { clusterConfig := server.Context().(*cluster.ClusterConfiguration) s := clusterConfig.GetServerById(&c.Id) if s == nil { return nil, nil } if err := server.RemovePeer(s.RaftName); err != nil { log.Warn("Cannot remove peer: %s", err) } if err := clusterConfig.RemoveServer(s); err != nil { log.Warn("Cannot remove peer from cluster config: %s", err) } server.FlushCommitIndex() return nil, nil }
func SlackAddPosts(channel *model.Channel, posts []SlackPost, users map[string]*model.User) { for _, sPost := range posts { switch { case sPost.Type == "message" && (sPost.SubType == "" || sPost.SubType == "file_share"): if sPost.User == "" { l4g.Debug(utils.T("api.slackimport.slack_add_posts.without_user.debug")) continue } else if users[sPost.User] == nil { l4g.Debug(utils.T("api.slackimport.slack_add_posts.user_no_exists.debug"), sPost.User) continue } newPost := model.Post{ UserId: users[sPost.User].Id, ChannelId: channel.Id, Message: sPost.Text, CreateAt: SlackConvertTimeStamp(sPost.TimeStamp), } ImportPost(&newPost) case sPost.Type == "message" && sPost.SubType == "file_comment": if sPost.Comment["user"] == "" { l4g.Debug(utils.T("api.slackimport.slack_add_posts.msg_no_usr.debug")) continue } else if users[sPost.Comment["user"]] == nil { l4g.Debug(utils.T("api.slackimport.slack_add_posts.user_no_exists.debug"), sPost.User) continue } newPost := model.Post{ UserId: users[sPost.Comment["user"]].Id, ChannelId: channel.Id, Message: sPost.Comment["comment"], CreateAt: SlackConvertTimeStamp(sPost.TimeStamp), } ImportPost(&newPost) case sPost.Type == "message" && sPost.SubType == "bot_message": continue case sPost.Type == "message" && (sPost.SubType == "channel_join" || sPost.SubType == "channel_leave"): if sPost.User == "" { l4g.Debug(utils.T("api.slackimport.slack_add_posts.msg_no_usr.debug")) continue } else if users[sPost.User] == nil { l4g.Debug(utils.T("api.slackimport.slack_add_posts.user_no_exists.debug"), sPost.User) continue } newPost := model.Post{ UserId: users[sPost.User].Id, ChannelId: channel.Id, Message: sPost.Text, CreateAt: SlackConvertTimeStamp(sPost.TimeStamp), Type: model.POST_JOIN_LEAVE, } ImportPost(&newPost) default: l4g.Warn(utils.T("api.slackimport.slack_add_posts.unsupported.warn"), sPost.Type, sPost.SubType) } } }