// hanleTCPConn handle a long live tcp connection. func handleTCPConn(conn net.Conn, rc chan *bufio.Reader) { addr := conn.RemoteAddr().String() log.Debug("<%s> handleTcpConn routine start", addr) rd := newBufioReader(rc, conn) if args, err := parseCmd(rd); err == nil { // return buffer bufio.Reader putBufioReader(rc, rd) switch args[0] { case "sub": SubscribeTCPHandle(conn, args[1:]) break default: conn.Write(ParamReply) log.Warn("<%s> unknown cmd \"%s\"", addr, args[0]) break } } else { // return buffer bufio.Reader putBufioReader(rc, rd) log.Error("<%s> parseCmd() error(%v)", addr, err) } // close the connection if err := conn.Close(); err != nil { log.Error("<%s> conn.Close() error(%v)", addr, err) } log.Debug("<%s> handleTcpConn routine stop", addr) return }
func (self *ShardData) QueryResponseBufferSize(querySpec *parser.QuerySpec, batchPointSize int) int { groupByTime := querySpec.GetGroupByInterval() if groupByTime == nil { // If the group by time is nil, we shouldn't have to use a buffer since the shards should be queried sequentially. // However, set this to something high just to be safe. log.Debug("BUFFER SIZE: 1000") return 1000 } tickCount := int(self.shardNanoseconds / uint64(*groupByTime)) if tickCount < 10 { tickCount = 100 } else if tickCount > 1000 { // cap this because each response should have up to this number of points in it. tickCount = tickCount / batchPointSize // but make sure it's at least 1k if tickCount < 1000 { tickCount = 1000 } } columnCount := querySpec.GetGroupByColumnCount() if columnCount > 1 { // we don't really know the cardinality for any column up front. This is a just a multiplier so we'll see how this goes. // each response can have many points, so having a buffer of the ticks * 100 should be safe, but we'll see. tickCount = tickCount * 100 } log.Debug("BUFFER SIZE: %d", tickCount) return tickCount }
func (this *HostProcessor) monitorSubProcs() { defer func() { if err := recover(); err != nil { l4g.Error(err) this.monitorSubProcs() } }() for { time.Sleep(2 * time.Second) l4g.Debug("monitor......") for port, countor := range ports { if port == "" { continue } if countor > 2 { l4g.Debug("restart port:" + port) err := KillProcessByPort(port) if err != nil { this.log(JoinString("hostprocessor->monitorsubprocs->killprocessbyport(port:", port, ")"), err) } this.runSubProc(port) ports[port] = 0 } else { _, err := GetHttp(JoinString("http://127.0.0.1:", port, "/heartbeat/")) if err != nil { ports[port] = ports[port] + 1 this.log(JoinString("hostprocessor->monitorsubprocs->get heartbeat(port:", port, ")"), err) } else { ports[port] = 0 } } } } }
func (self *Shard) Query(querySpec *parser.QuerySpec, processor engine.Processor) error { self.closeLock.RLock() defer self.closeLock.RUnlock() if self.closed { return fmt.Errorf("Shard is closed") } if querySpec.IsListSeriesQuery() { return fmt.Errorf("List series queries should never come to the shard") } else if querySpec.IsDeleteFromSeriesQuery() { return self.executeDeleteQuery(querySpec, processor) } if !self.hasReadAccess(querySpec) { return errors.New("User does not have access to one or more of the series requested.") } switch t := querySpec.SelectQuery().FromClause.Type; t { case parser.FromClauseArray: log.Debug("Shard %s: running a regular query", self.db.Path()) return self.executeArrayQuery(querySpec, processor) case parser.FromClauseMerge, parser.FromClauseInnerJoin: log.Debug("Shard %s: running a merge query", self.db.Path()) return self.executeMergeQuery(querySpec, processor, t) default: panic(fmt.Errorf("Unknown from clause type %s", t)) } }
func (c *WebConn) ReadPump() { defer func() { HubUnregister(c) c.WebSocket.Close() }() c.WebSocket.SetReadLimit(model.SOCKET_MAX_MESSAGE_SIZE_KB) c.WebSocket.SetReadDeadline(time.Now().Add(PONG_WAIT)) c.WebSocket.SetPongHandler(func(string) error { c.WebSocket.SetReadDeadline(time.Now().Add(PONG_WAIT)) if c.IsAuthenticated() { go SetStatusAwayIfNeeded(c.UserId, false) } return nil }) for { var req model.WebSocketRequest if err := c.WebSocket.ReadJSON(&req); err != nil { // browsers will appear as CloseNoStatusReceived if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { l4g.Debug(fmt.Sprintf("websocket.read: client side closed socket userId=%v", c.UserId)) } else { l4g.Debug(fmt.Sprintf("websocket.read: closing websocket for userId=%v error=%v", c.UserId, err.Error())) } return } else { Srv.WebSocketRouter.ServeWebSocket(c, &req) } } }
func (s *RaftServer) joinHandler(w http.ResponseWriter, req *http.Request) { // if this is the leader, process the command if s.raftServer.State() == raft.Leader { command := &InfluxJoinCommand{} if err := json.NewDecoder(req.Body).Decode(&command); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } log.Debug("ON RAFT LEADER - JOIN: %v", command) // during the test suite the join command will sometimes time out.. just retry a few times if _, err := s.raftServer.Do(command); err != nil { log.Error("Can't process %v: %s", command, err) http.Error(w, err.Error(), http.StatusInternalServerError) } return } leader, ok := s.leaderConnectString() log.Debug("Non-leader redirecting to: (%v, %v)", leader, ok) if ok { log.Debug("redirecting to leader to join...") http.Redirect(w, req, leader+"/join", http.StatusTemporaryRedirect) } else { http.Error(w, errors.New("Couldn't find leader of the cluster to join").Error(), http.StatusInternalServerError) } }
// We call this function only if we have a Select query (not continuous) or Delete query func (self *Coordinator) runQuerySpec(querySpec *parser.QuerySpec, p engine.Processor) error { self.expandRegex(querySpec) shards, processor, err := self.getShardsAndProcessor(querySpec, p) if err != nil { return err } if len(shards) == 0 { return processor.Close() } shardConcurrentLimit := self.config.ConcurrentShardQueryLimit if self.shouldQuerySequentially(shards, querySpec) { log.Debug("Querying shards sequentially") shardConcurrentLimit = 1 } log.Debug("Shard concurrent limit: %d", shardConcurrentLimit) mcp := NewMergeChannelProcessor(processor, shardConcurrentLimit) go mcp.ProcessChannels() if err := self.queryShards(querySpec, shards, mcp); err != nil { log.Error("Error while querying shards: %s", err) mcp.Close() return err } if err := mcp.Close(); err != nil { log.Error("Error while querying shards: %s", err) return err } return processor.Close() }
func (self *WAL) processCommitEntry(e *commitEntry) { logger.Debug("commiting %d for server %d", e.requestNumber, e.serverId) self.state.commitRequestNumber(e.serverId, e.requestNumber) idx := self.firstLogFile() if idx == 0 { e.confirmation <- &confirmation{0, nil} return } var unusedLogFiles []*log var unusedLogIndex []*index logger.Debug("Removing some unneeded log files: %d", idx) unusedLogFiles, self.logFiles = self.logFiles[:idx], self.logFiles[idx:] unusedLogIndex, self.logIndex = self.logIndex[:idx], self.logIndex[idx:] for logIdx, logFile := range unusedLogFiles { logger.Info("Deleting %s", logFile.file.Name()) logFile.close() logFile.delete() logIndex := unusedLogIndex[logIdx] logIndex.close() logIndex.delete() } self.state.FirstSuffix = self.logFiles[0].suffix() e.confirmation <- &confirmation{0, nil} }
/****************************************************************************** * 概述: ZmqReq 请求 * 函数名: Req * 返回值: * 参数列表: 参数名 参数类型 取值范围 描述 * *******************************************************************************/ func (this *ZmqReq) Req(servicepath string) (error, *dzhyun.SDSResponse) { stream, err0 := common.WrapRequest(DEFAULT_Version, servicepath, true) if err0 != nil { return err0, nil } if _, er1 := this.mreqSocket.SendBytes(stream, zmq4.DONTWAIT); er1 != nil { return er1, nil } stream2, err2 := this.mreqSocket.RecvBytes(0) // if err2 != nil { log4.Debug("zmqreq %s", err2.Error()) return err2, nil } if len(stream2) == 0 { log4.Debug("zmqreq timeout") return errors.New("req timeout"), nil } frm, err3 := common.UnwrapBaseProto(stream2) if err3 != nil { return err3, nil } mid := common.UnOffset(*frm.GetBody().Mid, 4) if mid[4-1] == 200 { return errors.New("failed protocal"), nil } res, err4 := common.UnwrapResponse(frm.GetBody().Mdata) if err4 != nil { return err4, nil } return nil, res }
func (cli *ZKClient) Connect() error { conn, session, err := zk.ConnectWithDialer(cli.Servers, cli.SessionTimeout, cli.Dialer) if err != nil { return err } cli.connLock.Lock() cli.conn = conn cli.connLock.Unlock() sched := make(chan struct{}) go func() { close(sched) for e := range session { if e.State == zk.StateDisconnected { log.Error("session disconnected, event:%s", e) } else if e.State == zk.StateHasSession { log.Debug("session build, event:%s", e) cli.fnLock.RLock() fn := cli.fnOnSessionBuild cli.fnLock.RUnlock() if fn != nil { fn(cli) } } else { log.Debug("session recv event:%s", e) } } log.Info("session channel closed") }() <-sched return nil }
// Process responses from the given channel. Returns true if // processing should stop for other channels. False otherwise. func (p *MergeChannelProcessor) processChannel(channel <-chan *protocol.Response) bool { for response := range channel { log4go.Debug("%s received %s", p, response) switch rt := response.GetType(); rt { // all these types end the stream case protocol.Response_HEARTBEAT, protocol.Response_END_STREAM: p.e <- nil return false case protocol.Response_ERROR: err := common.NewQueryError(common.InvalidArgument, response.GetErrorMessage()) p.e <- err return false case protocol.Response_QUERY: for _, s := range response.MultiSeries { log4go.Debug("Yielding to %s: %s", p.next.Name(), s) _, err := p.next.Yield(s) if err != nil { p.e <- err return true } } default: panic(fmt.Errorf("Unknown response type: %s", rt)) } } panic(errors.New("Reached end of method")) }
// HandleWrite start a goroutine get msg from chan, then send to the conn. func (c *Connection) HandleWrite(key string) { go func() { var ( n int err error ) log.Debug("user_key: \"%s\" HandleWrite goroutine start", key) for { msg, ok := <-c.Buf if !ok { log.Debug("user_key: \"%s\" HandleWrite goroutine stop", key) return } if c.Proto == WebsocketProto { // raw n, err = c.Conn.Write(msg) } else if c.Proto == TCPProto { // redis protocol msg = []byte(fmt.Sprintf("$%d\r\n%s\r\n", len(msg), string(msg))) n, err = c.Conn.Write(msg) } else { log.Error("unknown connection protocol: %d", c.Proto) panic(ErrConnProto) } // update stat if err != nil { log.Error("user_key: \"%s\" conn.Write() error(%v)", key, err) MsgStat.IncrFailed(1) } else { log.Debug("user_key: \"%s\" write \r\n========%s(%d)========", key, string(msg), n) MsgStat.IncrSucceed(1) } } }() }
func (c *WebConn) WritePump() { ticker := time.NewTicker(PING_PERIOD) authTicker := time.NewTicker(AUTH_TIMEOUT) defer func() { ticker.Stop() authTicker.Stop() c.WebSocket.Close() }() for { select { case msg, ok := <-c.Send: if !ok { c.WebSocket.SetWriteDeadline(time.Now().Add(WRITE_WAIT)) c.WebSocket.WriteMessage(websocket.CloseMessage, []byte{}) return } c.WebSocket.SetWriteDeadline(time.Now().Add(WRITE_WAIT)) if err := c.WebSocket.WriteMessage(websocket.TextMessage, msg.GetPreComputeJson()); err != nil { // browsers will appear as CloseNoStatusReceived if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { l4g.Debug(fmt.Sprintf("websocket.send: client side closed socket userId=%v", c.UserId)) } else { l4g.Debug(fmt.Sprintf("websocket.send: closing websocket for userId=%v, error=%v", c.UserId, err.Error())) } return } if msg.EventType() == model.WEBSOCKET_EVENT_POSTED { if einterfaces.GetMetricsInterface() != nil { einterfaces.GetMetricsInterface().IncrementPostBroadcast() } } case <-ticker.C: c.WebSocket.SetWriteDeadline(time.Now().Add(WRITE_WAIT)) if err := c.WebSocket.WriteMessage(websocket.PingMessage, []byte{}); err != nil { // browsers will appear as CloseNoStatusReceived if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { l4g.Debug(fmt.Sprintf("websocket.ticker: client side closed socket userId=%v", c.UserId)) } else { l4g.Debug(fmt.Sprintf("websocket.ticker: closing websocket for userId=%v error=%v", c.UserId, err.Error())) } return } case <-authTicker.C: if c.SessionToken == "" { l4g.Debug(fmt.Sprintf("websocket.authTicker: did not authenticate ip=%v", c.WebSocket.RemoteAddr())) return } authTicker.Stop() } } }
// Migrate migrate portion of connections which don't belong to this comet. func (l *ChannelList) Migrate(nw map[string]int) (err error) { migrate := false // check new/update node for k, v := range nw { weight, ok := nodeWeightMap[k] // not found or weight change if !ok || weight != v { migrate = true break } } // check del node if !migrate { for k, _ := range nodeWeightMap { // node deleted if _, ok := nw[k]; !ok { migrate = true break } } } if !migrate { return } // init ketama ring := ketama.NewRing(ketama.Base) for node, weight := range nw { ring.AddNode(node, weight) } ring.Bake() // atomic update nodeWeightMap = nw CometRing = ring // get all the channel lock channels := []Channel{} for i, c := range l.Channels { c.Lock() for k, v := range c.Data { hn := ring.Hash(k) if hn != Conf.ZookeeperCometNode { channels = append(channels, v) delete(c.Data, k) log.Debug("migrate delete channel key \"%s\"", k) } } c.Unlock() log.Debug("migrate channel bucket:%d finished", i) } // close all the migrate channels log.Info("close all the migrate channels") for _, channel := range channels { if err := channel.Close(); err != nil { log.Error("channel.Close() error(%v)", err) continue } } log.Info("close all the migrate channels finished") return }
func (self *Coordinator) AuthenticateDbUser(db, username, password string) (common.User, error) { log.Debug("(raft:%s) Authenticating password for %s:%s", self.raftServer.raftServer.Name(), db, username) user, err := self.clusterConfiguration.AuthenticateDbUser(db, username, password) if user != nil { log.Debug("(raft:%s) User %s authenticated succesfully", self.raftServer.raftServer.Name(), username) } return user, err }
func (self *ShardData) getProcessor(querySpec *parser.QuerySpec, processor engine.Processor) (engine.Processor, error) { switch qt := querySpec.Query().Type(); qt { case parser.Delete, parser.DropSeries: return NilProcessor{}, nil case parser.Select: // continue default: panic(fmt.Errorf("Unexpected query type: %s", qt)) } if querySpec.IsSinglePointQuery() { return engine.NewPassthroughEngine(processor, 1), nil } query := querySpec.SelectQuery() var err error // We should aggregate at the shard level if self.ShouldAggregateLocally(querySpec) { log.Debug("creating a query engine") processor, err = engine.NewQueryEngine(processor, query, nil) if err != nil { return nil, err } goto addFilter } // we shouldn't limit the queries if they have aggregates and aren't // aggregated locally, otherwise the aggregation result which happen // in the coordinator will get partial data and will be incorrect if query.HasAggregates() { log.Debug("creating a passthrough engine") processor = engine.NewPassthroughEngine(processor, 1000) goto addFilter } // This is an optimization so we don't send more data that we should // over the wire. The coordinator has its own Passthrough which does // the final limit. if l := query.Limit; l > 0 { log.Debug("creating a passthrough engine with limit") processor = engine.NewPassthroughEngineWithLimit(processor, 1000, query.Limit) } addFilter: if query := querySpec.SelectQuery(); query != nil && query.GetFromClause().Type != parser.FromClauseInnerJoin { // Joins do their own filtering since we need to get all // points before filtering. This is due to the fact that some // where expressions will be difficult to compute before the // points are joined together, think where clause with // left.column = 'something' or right.column = // 'something_else'. We can't filter the individual series // separately. The filtering happens in merge.go:55 processor = engine.NewFilteringEngine(query, processor) } return processor, nil }
func (self *WAL) recover() error { for idx, logFile := range self.logFiles { self.requestsSinceLastIndex = 0 self.requestsSinceRotation = self.logIndex[idx].getLength() lastOffset := self.logIndex[idx].getLastOffset() logger.Debug("Getting file size for %s[%d]", logFile.file.Name(), logFile.file.Fd()) stat, err := logFile.file.Stat() if err != nil { return err } logger.Info("Checking %s, last: %d, size: %d", logFile.file.Name(), lastOffset, stat.Size()) replay, _ := logFile.dupAndReplayFromOffset(nil, lastOffset, 0) firstOffset := int64(-1) for { replayRequest := <-replay if replayRequest == nil { break } self.state.LargestRequestNumber = replayRequest.requestNumber if err := replayRequest.err; err != nil { return err } for _, s := range replayRequest.request.MultiSeries { for _, point := range s.Points { sequenceNumber := (point.GetSequenceNumber() - uint64(self.serverId)) / HOST_ID_OFFSET self.state.recover(replayRequest.shardId, sequenceNumber) } } if firstOffset == -1 { firstOffset = replayRequest.startOffset } self.requestsSinceLastIndex++ self.requestsSinceRotation++ logger.Debug("recovery requestsSinceLastIndex: %d, requestNumber: %d", self.requestsSinceLastIndex, replayRequest.request.GetRequestNumber()) logger.Debug("largestrequestnumber: %d\n", self.state.LargestRequestNumber) if self.requestsSinceLastIndex < self.config.WalIndexAfterRequests { continue } self.logIndex[idx].addEntry( replayRequest.requestNumber-uint32(self.requestsSinceLastIndex), uint32(replayRequest.requestNumber), firstOffset, replayRequest.endOffset, ) } } logger.Debug("Finished wal recovery") return nil }
func (self *ShardData) HandleDestructiveQuery(querySpec *parser.QuerySpec, request *p.Request, response chan<- *p.Response, runLocalOnly bool) { if !self.IsLocal && runLocalOnly { panic("WTF islocal is false and runLocalOnly is true") } responseChannels := []<-chan *p.Response{} serverIds := []uint32{} if self.IsLocal { err := self.deleteDataLocally(querySpec) if err != nil { msg := err.Error() log.Error(msg) response <- &p.Response{ Type: p.Response_ERROR.Enum(), ErrorMessage: &msg, } return } } log.Debug("request %s, runLocalOnly: %v", request.GetDescription(), runLocalOnly) if !runLocalOnly { responses, ids, _ := self.forwardRequest(request) serverIds = append(serverIds, ids...) responseChannels = append(responseChannels, responses...) } var errorResponse *p.Response for idx, channel := range responseChannels { serverId := serverIds[idx] log.Debug("Waiting for response to %s from %d", request.GetDescription(), serverId) for { res := <-channel log.Debug("Received %s response from %d for %s", res.GetType(), serverId, request.GetDescription()) if res.GetType() == p.Response_END_STREAM { break } // don't send the access denied response until the end so the readers don't close out before the other responses. // See https://github.com/Wikia/influxdb/issues/316 for more info. if res.GetType() != p.Response_ERROR { response <- res } else if errorResponse == nil { errorResponse = res } } } if errorResponse != nil { response <- errorResponse return } response <- &p.Response{Type: p.Response_END_STREAM.Enum()} }
func tcpListen(bind string) { addr, err := net.ResolveTCPAddr("tcp", bind) if err != nil { log.Error("net.ResolveTCPAddr(\"tcp\"), %s) error(%v)", bind, err) panic(err) } l, err := net.ListenTCP("tcp", addr) if err != nil { log.Error("net.ListenTCP(\"tcp4\", \"%s\") error(%v)", bind, err) panic(err) } // free the listener resource defer func() { log.Info("tcp addr: \"%s\" close", bind) if err := l.Close(); err != nil { log.Error("listener.Close() error(%v)", err) } }() // init reader buffer instance rb := newtcpBufCache() for { log.Debug("start accept") conn, err := l.AcceptTCP() if err != nil { log.Error("listener.AcceptTCP() error(%v)", err) continue } if err = conn.SetKeepAlive(Conf.TCPKeepalive); err != nil { log.Error("conn.SetKeepAlive() error(%v)", err) conn.Close() continue } if err = conn.SetReadBuffer(Conf.RcvbufSize); err != nil { log.Error("conn.SetReadBuffer(%d) error(%v)", Conf.RcvbufSize, err) conn.Close() continue } if err = conn.SetWriteBuffer(Conf.SndbufSize); err != nil { log.Error("conn.SetWriteBuffer(%d) error(%v)", Conf.SndbufSize, err) conn.Close() continue } // first packet must sent by client in specified seconds if err = conn.SetReadDeadline(time.Now().Add(fitstPacketTimedoutSec)); err != nil { log.Error("conn.SetReadDeadLine() error(%v)", err) conn.Close() continue } rc := rb.Get() // one connection one routine go handleTCPConn(conn, rc) log.Debug("accept finished") } }
func (self *Shard) executeQueryForSeries(querySpec *parser.QuerySpec, name string, columns []string, processor engine.Processor) error { if querySpec.IsSinglePointQuery() { log.Debug("Running single query for series %s", name) return self.executeSinglePointQuery(querySpec, name, columns, processor) } var pi *PointIterator var err error columns, pi, err = self.getPointIteratorForSeries(querySpec, name, columns) if err != nil { return err } defer pi.Close() query := querySpec.SelectQuery() aliases := query.GetTableAliases(name) seriesOutgoing := &protocol.Series{Name: protocol.String(name), Fields: columns, Points: make([]*protocol.Point, 0, self.pointBatchSize)} for pi.Valid() { p := pi.Point() seriesOutgoing.Points = append(seriesOutgoing.Points, p) if len(seriesOutgoing.Points) >= self.pointBatchSize { ok, err := yieldToProcessor(seriesOutgoing, processor, aliases) if !ok || err != nil { log.Debug("Stopping processing.") if err != nil { log.Error("Error while processing data: %v", err) return err } return nil } seriesOutgoing = &protocol.Series{Name: protocol.String(name), Fields: columns, Points: make([]*protocol.Point, 0, self.pointBatchSize)} } pi.Next() } if err := pi.Error(); err != nil { return err } //Yield remaining data if ok, err := yieldToProcessor(seriesOutgoing, processor, aliases); !ok || err != nil { log.Debug("Stopping processing remaining points...") if err != nil { log.Error("Error while processing data: %v", err) return err } } log.Debug("Finished running query %s", query.GetQueryString()) return nil }
// validate check the key is belong to this comet. func (l *ChannelList) validate(key string) error { if len(nodeWeightMap) == 0 { log.Debug("no node found") return ErrChannelKey } node := CometRing.Hash(key) log.Debug("match node:%s hash node:%s", Conf.ZookeeperCometNode, node) if Conf.ZookeeperCometNode != node { log.Warn("user_key:\"%s\" node:%s not match this node:%s", key, node, Conf.ZookeeperCometNode) return ErrChannelKey } return nil }
func completeOAuth(c *api.Context, w http.ResponseWriter, r *http.Request) { params := mux.Vars(r) service := params["service"] code := r.URL.Query().Get("code") state := r.URL.Query().Get("state") uri := c.GetSiteURL() + "/signup/" + service + "/complete" // Remove /signup after a few releases (~1.8) if body, team, props, err := api.AuthorizeOAuthUser(service, code, state, uri); err != nil { c.Err = err return } else { action := props["action"] switch action { case model.OAUTH_ACTION_SIGNUP: api.CreateOAuthUser(c, w, r, service, body, team) if c.Err == nil { root(c, w, r) } break case model.OAUTH_ACTION_LOGIN: l4g.Debug(fmt.Sprintf("CODE === %v", code)) l4g.Debug(fmt.Sprintf("BODY === %v", body)) api.LoginByOAuth(c, w, r, service, body, team) if c.Err == nil { root(c, w, r) } break case model.OAUTH_ACTION_EMAIL_TO_SSO: api.CompleteSwitchWithOAuth(c, w, r, service, body, team, props["email"]) if c.Err == nil { http.Redirect(w, r, api.GetProtocol(r)+"://"+r.Host+"/"+team.Name+"/login?extra=signin_change", http.StatusTemporaryRedirect) } break case model.OAUTH_ACTION_SSO_TO_EMAIL: api.LoginByOAuth(c, w, r, service, body, team) if c.Err == nil { http.Redirect(w, r, api.GetProtocol(r)+"://"+r.Host+"/"+team.Name+"/"+"/claim?email="+url.QueryEscape(props["email"]), http.StatusTemporaryRedirect) } break default: api.LoginByOAuth(c, w, r, service, body, team) if c.Err == nil { root(c, w, r) } break } } }
// GetPrivate rpc interface get user private message. func (r *MessageRPC) GetPrivate(m *myrpc.MessageGetPrivateArgs, rw *myrpc.MessageGetResp) error { log.Debug("messageRPC.GetPrivate key:\"%s\" mid:\"%d\"", m.Key, m.MsgId) if m == nil || m.Key == "" || m.MsgId < 0 { return myrpc.ErrParam } msgs, err := UseStorage.GetPrivate(m.Key, m.MsgId) if err != nil { log.Error("UseStorage.GetPrivate(\"%s\", %d) error(%v)", m.Key, m.MsgId, err) return err } rw.Msgs = msgs log.Debug("UserStorage.GetPrivate(\"%s\", %d) ok", m.Key, m.MsgId) return nil }
/****************************************************************************** * 概述: Event初始化函数 * 函数名: Init * 返回值: bool true,false * 参数列表: 参数名 参数类型 取值范围 描述 * sdscntstring []string req-Ip:port * sdsServicepath string sds路径 * *******************************************************************************/ func (this *Event) Init(sdscntstring []string, sdsServicepath string) error { this.msdsServicepath = sdsServicepath this.mrun = true this.mcache.Init(&this.mzmq) if err := this.mzmq.Init(&this.mcache, this); err != nil { return errors.New("init mzmq err:" + err.Error()) } i := 0 for ; i < len(sdscntstring); i++ { if len(sdscntstring[i]) == 0 { continue } zmqReq := &ZmqReq{} if err := zmqReq.Init(sdscntstring[i], &this.mzmq); err != nil { zmqReq.Close() return errors.New("init MzmqReq err:" + err.Error()) } err1, res := zmqReq.Req(sdsServicepath) zmqReq.Close() if err1 != nil || res == nil { log4.Debug("req sds From(%s) err:%s!So try next sdsAddr", sdscntstring[i], err1.Error()) continue } else { this.mcache.UpdatePoints(res) break } } if i >= len(sdscntstring) { return errors.New("all sds req failed...") } if this.mcache.EmptySocketGroup() { return errors.New("no sds Socket create...") } log4.Debug("event init with sds num=%d", len(this.mcache.mSocketGroup)) // time.Sleep(time.Second * DEFAULT_TIMEWAIT) this.mcache.WaitSocketsState() if qs := this.mcache.GetSocket(); qs != nil { this.mnowSocket = qs this.mcache.UpdateSdsTopic(this.msdsServicepath, true) if err := this.mnowSocket.Register(this.msdsServicepath); err != nil { return errors.New("zmqSub register error:" + err.Error()) } } else { return errors.New("nofind state ok of sds Socket...") } return nil }
func InitWeb() { l4g.Debug("Initializing web routes") mainrouter := api.Srv.Router staticDir := utils.FindDir("web/static") l4g.Debug("Using static directory at %v", staticDir) mainrouter.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(staticDir)))) mainrouter.Handle("/", api.AppHandlerIndependent(root)).Methods("GET") mainrouter.Handle("/oauth/authorize", api.UserRequired(authorizeOAuth)).Methods("GET") mainrouter.Handle("/oauth/access_token", api.ApiAppHandler(getAccessToken)).Methods("POST") mainrouter.Handle("/signup_team_complete/", api.AppHandlerIndependent(signupTeamComplete)).Methods("GET") mainrouter.Handle("/signup_user_complete/", api.AppHandlerIndependent(signupUserComplete)).Methods("GET") mainrouter.Handle("/signup_team_confirm/", api.AppHandlerIndependent(signupTeamConfirm)).Methods("GET") mainrouter.Handle("/verify_email", api.AppHandlerIndependent(verifyEmail)).Methods("GET") mainrouter.Handle("/find_team", api.AppHandlerIndependent(findTeam)).Methods("GET") mainrouter.Handle("/signup_team", api.AppHandlerIndependent(signup)).Methods("GET") mainrouter.Handle("/login/{service:[A-Za-z]+}/complete", api.AppHandlerIndependent(completeOAuth)).Methods("GET") // Remove after a few releases (~1.8) mainrouter.Handle("/signup/{service:[A-Za-z]+}/complete", api.AppHandlerIndependent(completeOAuth)).Methods("GET") // Remove after a few releases (~1.8) mainrouter.Handle("/{service:[A-Za-z]+}/complete", api.AppHandlerIndependent(completeOAuth)).Methods("GET") mainrouter.Handle("/admin_console", api.UserRequired(adminConsole)).Methods("GET") mainrouter.Handle("/admin_console/", api.UserRequired(adminConsole)).Methods("GET") mainrouter.Handle("/admin_console/{tab:[A-Za-z0-9-_]+}", api.UserRequired(adminConsole)).Methods("GET") mainrouter.Handle("/admin_console/{tab:[A-Za-z0-9-_]+}/{team:[A-Za-z0-9-]*}", api.UserRequired(adminConsole)).Methods("GET") mainrouter.Handle("/hooks/{id:[A-Za-z0-9]+}", api.ApiAppHandler(incomingWebhook)).Methods("POST") mainrouter.Handle("/docs/{doc:[A-Za-z0-9]+}", api.AppHandlerIndependent(docs)).Methods("GET") // ---------------------------------------------------------------------------------------------- // *ANYTHING* team specific should go below this line // ---------------------------------------------------------------------------------------------- mainrouter.Handle("/{team:[A-Za-z0-9-]+(__)?[A-Za-z0-9-]+}", api.AppHandler(login)).Methods("GET") mainrouter.Handle("/{team:[A-Za-z0-9-]+(__)?[A-Za-z0-9-]+}/", api.AppHandler(login)).Methods("GET") mainrouter.Handle("/{team:[A-Za-z0-9-]+(__)?[A-Za-z0-9-]+}/login", api.AppHandler(login)).Methods("GET") mainrouter.Handle("/{team:[A-Za-z0-9-]+(__)?[A-Za-z0-9-]+}/logout", api.AppHandler(logout)).Methods("GET") mainrouter.Handle("/{team:[A-Za-z0-9-]+(__)?[A-Za-z0-9-]+}/reset_password", api.AppHandler(resetPassword)).Methods("GET") mainrouter.Handle("/{team:[A-Za-z0-9-]+(__)?[A-Za-z0-9-]+}/claim", api.AppHandler(claimAccount)).Methods("GET") mainrouter.Handle("/{team}/pl/{postid}", api.AppHandler(postPermalink)).Methods("GET") // Bug in gorilla.mux prevents us from using regex here. mainrouter.Handle("/{team}/login/{service}", api.AppHandler(loginWithOAuth)).Methods("GET") // Bug in gorilla.mux prevents us from using regex here. mainrouter.Handle("/{team}/channels/{channelname}", api.AppHandler(getChannel)).Methods("GET") // Bug in gorilla.mux prevents us from using regex here. mainrouter.Handle("/{team}/signup/{service}", api.AppHandler(signupWithOAuth)).Methods("GET") // Bug in gorilla.mux prevents us from using regex here. watchAndParseTemplates() }
func InitTeam() { l4g.Debug(utils.T("api.team.init.debug")) BaseRoutes.Teams.Handle("/create", ApiAppHandler(createTeam)).Methods("POST") BaseRoutes.Teams.Handle("/create_from_signup", ApiAppHandler(createTeamFromSignup)).Methods("POST") BaseRoutes.Teams.Handle("/signup", ApiAppHandler(signupTeam)).Methods("POST") BaseRoutes.Teams.Handle("/all", ApiAppHandler(getAll)).Methods("GET") BaseRoutes.Teams.Handle("/all_team_listings", ApiUserRequired(GetAllTeamListings)).Methods("GET") BaseRoutes.Teams.Handle("/get_invite_info", ApiAppHandler(getInviteInfo)).Methods("POST") BaseRoutes.Teams.Handle("/find_team_by_name", ApiAppHandler(findTeamByName)).Methods("POST") BaseRoutes.NeedTeam.Handle("/me", ApiUserRequired(getMyTeam)).Methods("GET") BaseRoutes.NeedTeam.Handle("/stats", ApiUserRequired(getTeamStats)).Methods("GET") BaseRoutes.NeedTeam.Handle("/members/{offset:[0-9]+}/{limit:[0-9]+}", ApiUserRequired(getTeamMembers)).Methods("GET") BaseRoutes.NeedTeam.Handle("/members/ids", ApiUserRequired(getTeamMembersByIds)).Methods("POST") BaseRoutes.NeedTeam.Handle("/members/{user_id:[A-Za-z0-9]+}", ApiUserRequired(getTeamMember)).Methods("GET") BaseRoutes.NeedTeam.Handle("/update", ApiUserRequired(updateTeam)).Methods("POST") BaseRoutes.NeedTeam.Handle("/update_member_roles", ApiUserRequired(updateMemberRoles)).Methods("POST") BaseRoutes.NeedTeam.Handle("/invite_members", ApiUserRequired(inviteMembers)).Methods("POST") BaseRoutes.NeedTeam.Handle("/add_user_to_team", ApiUserRequired(addUserToTeam)).Methods("POST") BaseRoutes.NeedTeam.Handle("/remove_user_from_team", ApiUserRequired(removeUserFromTeam)).Methods("POST") // These should be moved to the global admin console BaseRoutes.NeedTeam.Handle("/import_team", ApiUserRequired(importTeam)).Methods("POST") BaseRoutes.Teams.Handle("/add_user_to_team_from_invite", ApiUserRequired(addUserToTeamFromInvite)).Methods("POST") }
func ImportPost(post *model.Post) { post.Hashtags, _ = model.ParseHashtags(post.Message) if result := <-Srv.Store.Post().Save(post); result.Err != nil { l4g.Debug(utils.T("api.import.import_post.saving.debug"), post.UserId, post.Message) } }
func InitChannel() { l4g.Debug(utils.T("api.channel.init.debug")) BaseRoutes.Channels.Handle("/", ApiUserRequired(getChannels)).Methods("GET") BaseRoutes.Channels.Handle("/more", ApiUserRequired(getMoreChannels)).Methods("GET") BaseRoutes.Channels.Handle("/counts", ApiUserRequired(getChannelCounts)).Methods("GET") BaseRoutes.Channels.Handle("/members", ApiUserRequired(getMyChannelMembers)).Methods("GET") BaseRoutes.Channels.Handle("/create", ApiUserRequired(createChannel)).Methods("POST") BaseRoutes.Channels.Handle("/create_direct", ApiUserRequired(createDirectChannel)).Methods("POST") BaseRoutes.Channels.Handle("/update", ApiUserRequired(updateChannel)).Methods("POST") BaseRoutes.Channels.Handle("/update_header", ApiUserRequired(updateChannelHeader)).Methods("POST") BaseRoutes.Channels.Handle("/update_purpose", ApiUserRequired(updateChannelPurpose)).Methods("POST") BaseRoutes.Channels.Handle("/update_notify_props", ApiUserRequired(updateNotifyProps)).Methods("POST") BaseRoutes.NeedChannelName.Handle("/join", ApiUserRequired(join)).Methods("POST") BaseRoutes.NeedChannel.Handle("/", ApiUserRequired(getChannel)).Methods("GET") BaseRoutes.NeedChannel.Handle("/stats", ApiUserRequired(getChannelStats)).Methods("GET") BaseRoutes.NeedChannel.Handle("/members/{user_id:[A-Za-z0-9]+}", ApiUserRequired(getChannelMember)).Methods("GET") BaseRoutes.NeedChannel.Handle("/join", ApiUserRequired(join)).Methods("POST") BaseRoutes.NeedChannel.Handle("/leave", ApiUserRequired(leave)).Methods("POST") BaseRoutes.NeedChannel.Handle("/delete", ApiUserRequired(deleteChannel)).Methods("POST") BaseRoutes.NeedChannel.Handle("/add", ApiUserRequired(addMember)).Methods("POST") BaseRoutes.NeedChannel.Handle("/remove", ApiUserRequired(removeMember)).Methods("POST") BaseRoutes.NeedChannel.Handle("/update_last_viewed_at", ApiUserRequired(updateLastViewedAt)).Methods("POST") BaseRoutes.NeedChannel.Handle("/set_last_viewed_at", ApiUserRequired(setLastViewedAt)).Methods("POST") }
func InitAdmin() { l4g.Debug(utils.T("api.admin.init.debug")) BaseRoutes.Admin.Handle("/logs", ApiAdminSystemRequired(getLogs)).Methods("GET") BaseRoutes.Admin.Handle("/audits", ApiAdminSystemRequired(getAllAudits)).Methods("GET") BaseRoutes.Admin.Handle("/config", ApiAdminSystemRequired(getConfig)).Methods("GET") BaseRoutes.Admin.Handle("/save_config", ApiAdminSystemRequired(saveConfig)).Methods("POST") BaseRoutes.Admin.Handle("/reload_config", ApiAdminSystemRequired(reloadConfig)).Methods("GET") BaseRoutes.Admin.Handle("/invalidate_all_caches", ApiAdminSystemRequired(invalidateAllCaches)).Methods("GET") BaseRoutes.Admin.Handle("/test_email", ApiAdminSystemRequired(testEmail)).Methods("POST") BaseRoutes.Admin.Handle("/recycle_db_conn", ApiAdminSystemRequired(recycleDatabaseConnection)).Methods("GET") BaseRoutes.Admin.Handle("/analytics/{id:[A-Za-z0-9]+}/{name:[A-Za-z0-9_]+}", ApiAdminSystemRequired(getAnalytics)).Methods("GET") BaseRoutes.Admin.Handle("/analytics/{name:[A-Za-z0-9_]+}", ApiAdminSystemRequired(getAnalytics)).Methods("GET") BaseRoutes.Admin.Handle("/save_compliance_report", ApiAdminSystemRequired(saveComplianceReport)).Methods("POST") BaseRoutes.Admin.Handle("/compliance_reports", ApiAdminSystemRequired(getComplianceReports)).Methods("GET") BaseRoutes.Admin.Handle("/download_compliance_report/{id:[A-Za-z0-9]+}", ApiAdminSystemRequiredTrustRequester(downloadComplianceReport)).Methods("GET") BaseRoutes.Admin.Handle("/upload_brand_image", ApiAdminSystemRequired(uploadBrandImage)).Methods("POST") BaseRoutes.Admin.Handle("/get_brand_image", ApiAppHandlerTrustRequester(getBrandImage)).Methods("GET") BaseRoutes.Admin.Handle("/reset_mfa", ApiAdminSystemRequired(adminResetMfa)).Methods("POST") BaseRoutes.Admin.Handle("/reset_password", ApiAdminSystemRequired(adminResetPassword)).Methods("POST") BaseRoutes.Admin.Handle("/ldap_sync_now", ApiAdminSystemRequired(ldapSyncNow)).Methods("POST") BaseRoutes.Admin.Handle("/ldap_test", ApiAdminSystemRequired(ldapTest)).Methods("POST") BaseRoutes.Admin.Handle("/saml_metadata", ApiAppHandler(samlMetadata)).Methods("GET") BaseRoutes.Admin.Handle("/add_certificate", ApiAdminSystemRequired(addCertificate)).Methods("POST") BaseRoutes.Admin.Handle("/remove_certificate", ApiAdminSystemRequired(removeCertificate)).Methods("POST") BaseRoutes.Admin.Handle("/saml_cert_status", ApiAdminSystemRequired(samlCertificateStatus)).Methods("GET") BaseRoutes.Admin.Handle("/cluster_status", ApiAdminSystemRequired(getClusterStatus)).Methods("GET") BaseRoutes.Admin.Handle("/recently_active_users/{team_id:[A-Za-z0-9]+}", ApiUserRequired(getRecentlyActiveUsers)).Methods("GET") }
func InitChannel() { l4g.Debug(utils.T("api.channel.init.debug")) BaseRoutes.Channels.Handle("/", ApiUserRequiredActivity(getChannels, false)).Methods("GET") BaseRoutes.Channels.Handle("/more", ApiUserRequired(getMoreChannels)).Methods("GET") BaseRoutes.Channels.Handle("/counts", ApiUserRequiredActivity(getChannelCounts, false)).Methods("GET") BaseRoutes.Channels.Handle("/create", ApiUserRequired(createChannel)).Methods("POST") BaseRoutes.Channels.Handle("/create_direct", ApiUserRequired(createDirectChannel)).Methods("POST") BaseRoutes.Channels.Handle("/update", ApiUserRequired(updateChannel)).Methods("POST") BaseRoutes.Channels.Handle("/update_header", ApiUserRequired(updateChannelHeader)).Methods("POST") BaseRoutes.Channels.Handle("/update_purpose", ApiUserRequired(updateChannelPurpose)).Methods("POST") BaseRoutes.Channels.Handle("/update_notify_props", ApiUserRequired(updateNotifyProps)).Methods("POST") BaseRoutes.NeedChannelName.Handle("/join", ApiUserRequired(join)).Methods("POST") BaseRoutes.NeedChannel.Handle("/", ApiUserRequiredActivity(getChannel, false)).Methods("GET") BaseRoutes.NeedChannel.Handle("/extra_info", ApiUserRequired(getChannelExtraInfo)).Methods("GET") BaseRoutes.NeedChannel.Handle("/extra_info/{member_limit:-?[0-9]+}", ApiUserRequired(getChannelExtraInfo)).Methods("GET") BaseRoutes.NeedChannel.Handle("/join", ApiUserRequired(join)).Methods("POST") BaseRoutes.NeedChannel.Handle("/leave", ApiUserRequired(leave)).Methods("POST") BaseRoutes.NeedChannel.Handle("/delete", ApiUserRequired(deleteChannel)).Methods("POST") BaseRoutes.NeedChannel.Handle("/add", ApiUserRequired(addMember)).Methods("POST") BaseRoutes.NeedChannel.Handle("/remove", ApiUserRequired(removeMember)).Methods("POST") BaseRoutes.NeedChannel.Handle("/update_last_viewed_at", ApiUserRequired(updateLastViewedAt)).Methods("POST") }