func PermanentDeleteTeam(c *Context, team *model.Team) *model.AppError { l4g.Warn("Attempting to permanently delete team %v id=%v", team.Name, team.Id) c.Path = "/teams/permanent_delete" c.LogAuditWithUserId("", fmt.Sprintf("attempt teamId=%v", team.Id)) team.DeleteAt = model.GetMillis() if result := <-Srv.Store.Team().Update(team); result.Err != nil { return result.Err } if result := <-Srv.Store.User().GetForExport(team.Id); result.Err != nil { return result.Err } else { users := result.Data.([]*model.User) for _, user := range users { PermanentDeleteUser(c, user) } } if result := <-Srv.Store.Channel().PermanentDeleteByTeam(team.Id); result.Err != nil { return result.Err } if result := <-Srv.Store.Team().PermanentDelete(team.Id); result.Err != nil { return result.Err } l4g.Warn("Permanently deleted team %v id=%v", team.Name, team.Id) c.LogAuditWithUserId("", fmt.Sprintf("success teamId=%v", team.Id)) return nil }
// RegisterTmp create a ephemeral node, and watch it, if node droped then send a SIGQUIT to self. func RegisterTemp(conn *zk.Conn, fpath string, data []byte) error { tpath, err := conn.Create(path.Join(fpath)+"/", data, zk.FlagEphemeral|zk.FlagSequence, zk.WorldACL(zk.PermAll)) if err != nil { log.Error("conn.Create(\"%s\", \"%s\", zk.FlagEphemeral|zk.FlagSequence) error(%v)", fpath, string(data), err) return err } log.Debug("create a zookeeper node:%s", tpath) // watch self go func() { for { log.Info("zk path: \"%s\" set a watch", tpath) exist, _, watch, err := conn.ExistsW(tpath) if err != nil { log.Error("zk.ExistsW(\"%s\") error(%v)", tpath, err) log.Warn("zk path: \"%s\" set watch failed, kill itself", tpath) killSelf() return } if !exist { log.Warn("zk path: \"%s\" not exist, kill itself", tpath) killSelf() return } event := <-watch log.Info("zk path: \"%s\" receive a event %v", tpath, event) } }() return nil }
func (ctx *CommunicationContext) writeLoop() { ctx.communicating.Add(1) defer ctx.communicating.Done() log4go.Debug("write loop started") for ctx.isOpen { tckt, ok := <-ctx.Output if ok { log4go.Debug("found new output in output channel") err := ctx.sender.Send(tckt.msg) if err != nil { log4go.Warn("error while sending to device: %v", err.Error()) tckt.error <- err } else { tckt.isSend = true tckt.send <- tckt.msg } } else { log4go.Warn("output channel closed") } } log4go.Debug("write loop finished") }
// watchNode watch a named node for leader selection when failover func watchCometNode(conn *zk.Conn, node, fpath string, retry, ping time.Duration, ch chan *CometNodeEvent) { fpath = path.Join(fpath, node) for { nodes, watch, err := myzk.GetNodesW(conn, fpath) if err == myzk.ErrNodeNotExist { log.Warn("zk don't have node \"%s\"", fpath) break } else if err == myzk.ErrNoChild { log.Warn("zk don't have any children in \"%s\", retry in %d second", fpath, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } else if err != nil { log.Error("zk path: \"%s\" getNodes error(%v), retry in %d second", fpath, err, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } // leader selection sort.Strings(nodes) if info, err := registerCometNode(conn, nodes[0], fpath, retry, ping, true); err != nil { log.Error("zk path: \"%s\" registerCometNode error(%v)", fpath, err) time.Sleep(waitNodeDelaySecond) continue } else { // update node info ch <- &CometNodeEvent{Event: eventNodeUpdate, Key: node, Value: info} } // blocking receive event event := <-watch log.Info("zk path: \"%s\" receive a event: (%v)", fpath, event) } // WARN, if no persistence node and comet rpc not config log.Warn("zk path: \"%s\" never watch again till recreate", fpath) }
// Scaning path for finding plugins that contain files // whith specific suffix func ScanPlugins(path string, suffix string) []*Plugin { var plugins []*Plugin f, err := os.Open(path) if err != nil { log4go.Warn(err) return nil } defer f.Close() dirs, err := f.Readdirnames(-1) if err != nil { log4go.Warn(err) return nil } for _, dir := range dirs { dir2 := pt.Join(path, dir) f2, err := os.Open(dir2) if err != nil { log4go.Warn(err) continue } defer f2.Close() fi, err := f2.Readdir(-1) if err != nil { continue } for _, f := range fi { fn := f.Name() if strings.HasSuffix(fn, suffix) { plugins = append(plugins, NewPlugin(dir2, suffix)) break } } } return plugins }
func (p *Provider) buildCachePath() { go func() { fpath := "/rpc/" + p.Namespace + "/" + p.Version for { nodes, watch, err := zk.GetNodesW(p.ZkConn, fpath) if err == zk.ErrNodeNotExist { log.Warn("zk don't have node \"%s\", retry in %d second", fpath, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } else if err == zk.ErrNoChild { log.Warn("zk don't have any children in \"%s\", retry in %d second", fpath, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } else if err != nil { log.Error("getNodes error(%v), retry in %d second", err, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } p.lock.Lock() p.addes = nodes p.lock.Unlock() event := <-watch log.Info("zk path: \"%s\" receive a event %v", fpath, event) } }() }
func (device *AntUsbDevice) Reset() { log4go.Debug("resetting usb hardware") SEND_RESET: // Hard reset device first resetBuffer := []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} _, err := device.Write(resetBuffer) for err != nil { log4go.Warn("error while writing reset bytes to usb device: %v", err.Error()) _, err = device.Write(resetBuffer) } buffer := make([]byte, 16) // Read hard reset reply _, err = device.Read(buffer) for err != nil { log4go.Warn("error while reading reset bytes reply from usb device: %v", err.Error()) goto SEND_RESET } }
// This method fetches the status of the IGD. // // Errors are indicated by the channel closing before a ConnectionStatus is // returned. Listeners should therefore check at the very least for nil, better // still for channel closure. // // NOTA BENE the channel closes after a successive ConnectionStatus has been // send on it, in order to not leak resources. func (self *IGD) GetConnectionStatus() (ret chan *ConnectionStatus) { // We initialise the channel ret = make(chan *ConnectionStatus) // We go do the work in a separate goroutine, the closure has access to the // channel we just instanciated so we will be able to manipulate it. go func() { x, ok := self.soapRequest("GetStatusInfo", statusRequestStringReader(self.upnptype)) if ok && strings.EqualFold(x.Body.Status.NewConnectionStatus, "Connected") { y, ok := self.soapRequest("GetExternalIPAddress", externalIPRequestStringReader(self.upnptype)) if ok { ipString := y.Body.IP.NewExternalIPAddress ip := net.ParseIP(ipString) if ip != nil { ret <- &ConnectionStatus{true, ip} return } else { l4g.Warn("Failed to parse IP string %v", ipString) } } else { l4g.Warn("Failed to get IP address after estabilishing the connection was ok") } } else if ok && strings.EqualFold(x.Body.Status.NewConnectionStatus, "Disconnected") { ret <- &ConnectionStatus{false, nil} } close(ret) }() // We immediately return the channel to the caller return }
func scanpath(path string, m *py.Module) { sys, err := py.Import("sys") if err != nil { log4go.Debug(err) } else { defer sys.Decref() } // This should probably be done by the Editor as it needs to scan through for themes, keybinding, settings etc if f, err := os.Open(path); err != nil { log4go.Warn(err) } else { defer f.Close() if dirs, err := f.Readdirnames(-1); err != nil { log4go.Warn(err) } else { for _, dir := range dirs { if dir != "Vintageous" && dir != "Default" && dir != "plugins" { // TODO obviously continue } dir2 := path + dir if f2, err := os.Open(dir2); err != nil { log4go.Warn(err) } else { defer f2.Close() if fi, err := f2.Readdir(-1); err != nil { log4go.Warn(err) } else { for _, f := range fi { if fn := f.Name(); strings.HasSuffix(fn, ".py") { //m.Incref() if s, err := py.NewUnicode(dir + "." + fn[:len(fn)-3]); err != nil { log4go.Error(err) } else { if r, err := m.Base().CallMethodObjArgs("reload_plugin", s); err != nil { log4go.Error(err) } else if r != nil { r.Decref() } // if i, err := sys.Base().CallMethodObjArgs("getrefcount", s); err != nil { // log4go.Error(err) // } else { // log4go.Debug("m refs: %d", i.(*py.Long).Int64()) // i.Decref() // } } } } } } } } } }
func (node MenuNode) Execute(ivrChannel *IVRChannel) (string, error) { if ivrChannel.ChannelState == IVRChannel_State_Hangup { return "", errors.New("channel state is invalid : hangup") } ivrChannel.ActiveNode = node.NodeName // Clear dtmf channel value. for len(ivrChannel.Dtmf) > 0 { <-ivrChannel.Dtmf } executePrompt(node.Prompts.Prompt, ivrChannel) /* if len(node.Prompts.Prompt) > 0 { for _, promptName := range node.Prompts.Prompt { // Find prompt from ivrPromptMap by promptName if prompt, ok := ivrPromptMap[promptName]; ok { executePrompt(prompt, ivrChannel) <-ivrChannel.PlaybackDone } else { l4g.Warn("Prompt not find for promptName=%s", promptName) } } } */ ivrChannel.Esocket.StartDTMF() defer ivrChannel.Esocket.StopDTMF() // Wait dtmf input. timeout := eventsocket.CheckTimeout(node.Timeout) select { case <-timeout: l4g.Warn("Timeout,no dtmf.") ivrChannel.NoInputTimes = ivrChannel.NoInputTimes + 1 return node.NoInput, nil case dtmf := <-ivrChannel.Dtmf: for _, choice := range node.Choices.Choice { if dtmf == choice.DTMF { return choice.NextNode, nil } } l4g.Warn("No match for dtmf=%s", dtmf) ivrChannel.NoMatchTimes = ivrChannel.NoMatchTimes + 1 return node.NoMatch, nil case <-ivrChannel.ChannelHangup: l4g.Trace("Channel hangup.") return "", errors.New("Channel hangup.") } }
// watchMessageRoot watch the message root path. func watchMessageRoot(conn *zk.Conn, fpath string, ch chan *MessageNodeEvent) error { for { nodes, watch, err := myzk.GetNodesW(conn, fpath) if err == myzk.ErrNodeNotExist { log.Warn("zk don't have node \"%s\", retry in %d second", fpath, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } else if err == myzk.ErrNoChild { log.Warn("zk don't have any children in \"%s\", retry in %d second", fpath, waitNodeDelay) // all child died, kick all the nodes for _, client := range MessageRPC.Clients { log.Debug("node: \"%s\" send del node event", client.Addr) ch <- &MessageNodeEvent{Event: eventNodeDel, Key: &WeightRpc{Addr: client.Addr, Weight: client.Weight}} } time.Sleep(waitNodeDelaySecond) continue } else if err != nil { log.Error("getNodes error(%v), retry in %d second", err, waitNodeDelay) time.Sleep(waitNodeDelaySecond) continue } nodesMap := map[string]bool{} // handle new add nodes for _, node := range nodes { data, _, err := conn.Get(path.Join(fpath, node)) if err != nil { log.Error("zk.Get(\"%s\") error(%v)", path.Join(fpath, node), err) continue } // parse message node info nodeInfo := &MessageNodeInfo{} if err := json.Unmarshal(data, nodeInfo); err != nil { log.Error("json.Unmarshal(\"%s\", nodeInfo) error(%v)", string(data), err) continue } for _, addr := range nodeInfo.Rpc { // if not exists in old map then trigger a add event if _, ok := MessageRPC.Clients[addr]; !ok { ch <- &MessageNodeEvent{Event: eventNodeAdd, Key: &WeightRpc{Addr: addr, Weight: nodeInfo.Weight}} } nodesMap[addr] = true } } // handle delete nodes for _, client := range MessageRPC.Clients { if _, ok := nodesMap[client.Addr]; !ok { ch <- &MessageNodeEvent{Event: eventNodeDel, Key: client} } } // blocking wait node changed event := <-watch log.Info("zk path: \"%s\" receive a event %v", fpath, event) } }
func query(projectName string, statement string, params []interface{}, callback func(rows *sql.Rows) interface{}) ([]interface{}, error) { databaseId, err := getDatabaseId(projectName) if err != nil { logger.Error(err) return nil, err } projectDbFile := fmt.Sprintf("./db/%d.db", databaseId) if _, err := os.Stat(projectDbFile); err != nil { if os.IsNotExist(err) { logger.Warn("create project table.") CreateProjectTables(projectName, databaseId) logger.Warn("created project table.") } } db, err := sql.Open("sqlite3", projectDbFile) if err != nil { logger.Error(err) return nil, err } defer db.Close() logger.Debug("sql: %s", statement) stmt, err := db.Prepare(statement) if err != nil { logger.Error(err) return nil, err } defer stmt.Close() //execute `rows, err := stmt.Query(arg1, arg2, ...)` by reflect values := []reflect.Value{} for _, p := range params { logger.Debug("[%s]", p) values = append(values, reflect.ValueOf(p)) } returnValues := reflect.ValueOf(stmt).MethodByName("Query").Call(values) rows := returnValues[0].Interface().(*sql.Rows) if !returnValues[1].IsNil() { err = returnValues[1].Interface().(error) } if err != nil { logger.Error(err) return nil, err } defer rows.Close() results := []interface{}{} for rows.Next() { results = append(results, callback(rows)) } rows.Close() return results, nil }
// drainresults loops, printing the address of nodes it has found. func drainresults(n *dht.DHT) { fmt.Println("=========================== DHT") l4g.Warn("Note that there are many bad nodes that reply to anything you ask.") l4g.Warn("Peers found:") for r := range n.PeersRequestResults { for _, peers := range r { for _, x := range peers { l4g.Warn("%v", dht.DecodePeerAddress(x)) } } } }
func (p *Pattern) Cache(data string, pos int) (pat *Pattern, ret MatchObject) { if p.cachedData == data { if p.cachedMatch == nil { return nil, nil } if p.cachedMatch[0] >= pos && p.cachedPat.cachedMatch != nil { p.hits++ return p.cachedPat, p.cachedMatch } } else { p.cachedPatterns = nil } if p.cachedPatterns == nil { p.cachedPatterns = make([]*Pattern, len(p.Patterns)) for i := range p.cachedPatterns { p.cachedPatterns[i] = &p.Patterns[i] } } p.misses++ if p.Match.re != nil { pat, ret = p, p.Match.Find(data, pos) } else if p.Begin.re != nil { pat, ret = p, p.Begin.Find(data, pos) } else if p.Include != "" { if z := p.Include[0]; z == '#' { key := p.Include[1:] if p2, ok := p.owner.Repository[key]; ok { pat, ret = p2.Cache(data, pos) } else { log4go.Fine("Not found in repository: %s", p.Include) } } else if z == '$' { // TODO(q): Implement tmLanguage $ include directives log4go.Warn("Unhandled include directive: %s", p.Include) } else if l, err := Provider.GetLanguage(p.Include); err != nil { if !failed[p.Include] { log4go.Warn("Include directive %s failed: %s", p.Include, err) } failed[p.Include] = true } else { return l.RootPattern.Cache(data, pos) } } else { pat, ret = p.FirstMatch(data, pos) } p.cachedData = data p.cachedMatch = ret p.cachedPat = pat return }
func PermanentDeleteUser(c *Context, user *model.User) *model.AppError { l4g.Warn("Attempting to permanently delete account %v id=%v", user.Email, user.Id) c.Path = "/users/permanent_delete" c.LogAuditWithUserId(user.Id, fmt.Sprintf("attempt userId=%v", user.Id)) c.LogAuditWithUserId("", fmt.Sprintf("attempt userId=%v", user.Id)) if user.IsInRole(model.ROLE_SYSTEM_ADMIN) { l4g.Warn("You are deleting %v that is a system administrator. You may need to set another account as the system administrator using the command line tools.", user.Email) } UpdateActive(c, user, false) if result := <-Srv.Store.Session().PermanentDeleteSessionsByUser(user.Id); result.Err != nil { return result.Err } if result := <-Srv.Store.OAuth().PermanentDeleteAuthDataByUser(user.Id); result.Err != nil { return result.Err } if result := <-Srv.Store.Webhook().PermanentDeleteIncomingByUser(user.Id); result.Err != nil { return result.Err } if result := <-Srv.Store.Webhook().PermanentDeleteOutgoingByUser(user.Id); result.Err != nil { return result.Err } if result := <-Srv.Store.Preference().PermanentDeleteByUser(user.Id); result.Err != nil { return result.Err } if result := <-Srv.Store.Channel().PermanentDeleteMembersByUser(user.Id); result.Err != nil { return result.Err } if result := <-Srv.Store.Post().PermanentDeleteByUser(user.Id); result.Err != nil { return result.Err } if result := <-Srv.Store.User().PermanentDelete(user.Id); result.Err != nil { return result.Err } if result := <-Srv.Store.Audit().PermanentDeleteByUser(user.Id); result.Err != nil { return result.Err } l4g.Warn("Permanently deleted account %v id=%v", user.Email, user.Id) c.LogAuditWithUserId("", fmt.Sprintf("success userId=%v", user.Id)) return nil }
// drainresults loops, printing the address of nodes it has found. func drainresults(n *dht.DHT) { fmt.Println("=========================== DHT") for r := range n.PeersRequestResults { for ih, peers := range r { l4g.Warn("Found peer(s) for infohash %x:", ih) for _, x := range peers { l4g.Warn("==========================> %v", dht.DecodePeerAddress(x)) l4g.Warn("Note that there are many bad nodes that reply to anything you ask, so don't get too excited.") l4g.Warn("==========================") } } } }
func (r *routingTable) cleanup() (needPing []*remoteNode) { needPing = make([]*remoteNode, 0, 10) t0 := time.Now() // Needs some serious optimization. for addr, n := range r.addresses { if addr != n.address.String() { l4g.Warn("cleanup: node address mismatches: %v != %v. Deleting node", addr, n.address.String()) r.kill(n) continue } if addr == "" { l4g.Warn("cleanup: found empty address for node %x. Deleting node", n.id) r.kill(n) continue } if n.reachable { if len(n.pendingQueries) == 0 { goto PING } // Tolerate 2 cleanup cycles. if time.Since(n.lastResponseTime) > cleanupPeriod*2+(time.Minute) { l4g.Trace("DHT: Old node seen %v ago. Deleting", time.Since(n.lastResponseTime)) r.kill(n) continue } if time.Since(n.lastResponseTime).Nanoseconds() < cleanupPeriod.Nanoseconds()/2 { // Seen recently. Don't need to ping. continue } } else { // Not reachable. if len(n.pendingQueries) > 2 { // Didn't reply to 2 consecutive queries. l4g.Trace("DHT: Node never replied to ping. Deleting. %v", n.address) r.kill(n) continue } } PING: needPing = append(needPing, n) } duration := time.Since(t0) // If this pauses the server for too long I may have to segment the cleanup. // 2000 nodes: it takes ~12ms // 4000 nodes: ~24ms. l4g.Info("DHT: Routing table cleanup took %v", duration) return needPing }
// zkData create zookeeper path, if path exists ignore error, and set node data. func zkData(conn *zk.Conn, redisMaster string) error { node := path.Join(conf.ZKPath, conf.Node) tpath := "" for _, str := range strings.Split(conf.ZKPath, "/")[1:] { tpath = path.Join(tpath, "/", str) log.Info("create zookeeper path: \"%s\"", tpath) _, err := conn.Create(tpath, []byte(""), 0, zk.WorldACL(zk.PermAll)) if err != nil { if err == zk.ErrNodeExists { log.Warn("zk.create(\"%s\") exists", tpath) } else { log.Error("zk.create(\"%s\") error(%v)", tpath, err) return err } } } if _, err := conn.Create(node, []byte{}, 0, zk.WorldACL(zk.PermAll)); err != nil { if err == zk.ErrNodeExists { oData, stat, err := conn.Get(node) if err != nil { log.Error("zk.Get(\"%s\") error(%v)", node, err) return err } ni := unmarshal(oData) if ni != nil || len(ni.Servers) == 0 { log.Warn("node have not data") return nil } data := marshal(ni, redisMaster) if len(data) == 0 { log.Warn("marshal error") return nil } if bytes.Equal(oData, data) { log.Warn("zk data same, no change") return nil } if _, err = conn.Set(node, data, stat.Version); err != nil { log.Error("zk.Set(\"%s\", data, 0) error(%v)", node, err) return err } log.Info("zk update data: \"%s\"", node) } else { log.Error("zk.create(\"%s\") error(%v)", tpath, err) return err } } return nil }
func (self *ClusterServer) heartbeat() { defer func() { self.heartbeatStarted = false }() heartbeatRequest := &protocol.Request{ Type: &HEARTBEAT_TYPE, Database: protocol.String(""), } for { // this chan is buffered and in the loop on purpose. This is so // that if reading a heartbeat times out, and the heartbeat then comes through // later, it will be dumped into this chan and not block the protobuf client reader. responseChan := make(chan *protocol.Response, 1) heartbeatRequest.Id = nil self.MakeRequest(heartbeatRequest, responseChan) err := self.getHeartbeatResponse(responseChan) if err != nil { self.handleHeartbeatError(err) continue } if !self.isUp { log.Warn("Server marked as up. Hearbeat succeeded") } // otherwise, reset the backoff and mark the server as up self.isUp = true self.Backoff = self.MinBackoff <-time.After(self.HeartbeatInterval) } }
// Join an existing cluster func (rs *RaftServer) Join(peers []string) (e error) { command := &raft.DefaultJoinCommand{ Name: rs.Server.Name(), ConnectionString: rs.httpAddr, } var b bytes.Buffer if err := json.NewEncoder(&b).Encode(command); err != nil { return err } for _, peer := range peers { log4go.Info("%s is joining %s", rs.Server.Name(), peer) if peer == rs.httpAddr { continue } target := fmt.Sprintf("%s/raft_server/join", peer) _, err := postAndError(target, "application/json", &b) if err != nil { log4go.Warn(err.Error()) e = err continue } else { return nil } } return e }
func produce(conn *amqp.Connection, channel *amqp.Channel, val *interface{}) { if val == nil { log.Warn("the redis json is nil") return } body, err := json.Marshal(val) if err != nil || body == nil { log.Error("redis event to json error: %s , oplog is : %s ", err, string(body)) } else { routingKey := "redis.event" log.Info("routing key is : %s ", routingKey) err = channel.Publish( EXCHANGE_KEY, // exchange routingKey, // routing key false, // mandatory false, // immediate amqp.Publishing{ ContentType: "text/plain", Body: body, }) if err != nil { log.Error("publish message err : %s ", err) } //TODO recreate channel ? } }
// Get a user channel from ChannleList. func (l *ChannelList) Get(key string, newOne bool) (Channel, error) { // validate if err := l.validate(key); err != nil { return nil, err } // get a channel bucket b := l.Bucket(key) b.Lock() if c, ok := b.Data[key]; !ok { if !Conf.Auth && newOne { c = NewSeqChannel() b.Data[key] = c b.Unlock() ChStat.IncrCreate() log.Info("user_key:\"%s\" create a new channel", key) return c, nil } else { b.Unlock() log.Warn("user_key:\"%s\" channle not exists", key) return nil, ErrChannelNotExist } } else { b.Unlock() ChStat.IncrAccess() log.Info("user_key:\"%s\" refresh channel bucket expire time", key) return c, nil } }
// GetPrivate implements the Storage GetPrivate method. func (s *MySQLStorage) GetPrivate(key string, mid int64) ([]*myrpc.Message, error) { db := s.getConn(key) if db == nil { return nil, ErrNoMySQLConn } now := time.Now().Unix() rows, err := db.Query(getPrivateMsgSQL, key, mid) if err != nil { log.Error("db.Query(\"%s\",\"%s\",%d,now) failed (%v)", getPrivateMsgSQL, key, mid, err) return nil, err } msgs := []*myrpc.Message{} for rows.Next() { expire := int64(0) cmid := int64(0) msg := []byte{} if err := rows.Scan(&cmid, &expire, &msg); err != nil { log.Error("rows.Scan() failed (%v)", err) return nil, err } if now > expire { log.Warn("user_key: \"%s\" mid: %d expired", key, cmid) continue } msgs = append(msgs, &myrpc.Message{MsgId: cmid, GroupId: myrpc.PrivateGroupId, Msg: json.RawMessage(msg)}) } return msgs, nil }
func (g *Go) complete_pkg(pkg string, cmp *content.CompletionResult) error { if g.imports == nil { g.imports = make(map[string]*types.Package) } if p, err := types.GcImport(g.imports, pkg); err != nil { return err } else { nn := p.Scope() for i := 0; i < nn.NumEntries(); i++ { t := nn.At(i) var flags content.Flags if n := t.Name(); n[0] != strings.ToUpper(n)[0] { flags = content.FLAG_ACC_PROTECTED } else { flags = content.FLAG_ACC_PUBLIC } switch t.(type) { case *types.Func: var m content.Method m.Flags |= flags m.Name.Relative = t.Name() sig := t.Type().Underlying().(*types.Signature) if sig.Recv() != nil { continue } par := sig.Params() for j := 0; j < par.Len(); j++ { m.Parameters = append(m.Parameters, g.pkg_var(par.At(j))) } ret := sig.Results() for j := 0; j < ret.Len(); j++ { m.Returns = append(m.Returns, g.pkg_var(ret.At(j))) } cmp.Methods = append(cmp.Methods, m) case *types.TypeName: var t2 content.Type t2.Flags |= flags t2.Name.Relative = t.Name() switch t.Type().Underlying().(type) { case *types.Interface: t2.Flags |= content.FLAG_TYPE_INTERFACE case *types.Struct: t2.Flags |= content.FLAG_TYPE_STRUCT } cmp.Types = append(cmp.Types, t2) case *types.Const, *types.Var: var f content.Field f.Name.Relative = t.Name() f.Type = g.pkg_type(t.Type()) cmp.Fields = append(cmp.Fields, f) default: log4go.Warn("Unimplemented type in package completion: at: %+v, %v, %v", t, reflect.TypeOf(t), reflect.TypeOf(t.Type().Underlying())) } } } return nil }
func (c *Cache) loaderthread() { for req := range c.load { loaded := false func() { c.mutex.Lock() defer c.mutex.Unlock() for i := range c.entries { if c.entries[i].name != req.name { continue } loaded = true if req.reload { if err := c.reload(&c.entries[i]); err != nil { log4go.Warn("Error reloading assembly:", err) } } break } }() if loaded { continue } exts := []string{".dll", ".exe"} _, err := c.Load(req.name) if err == nil { continue } for _, ext := range exts { if _, err := c.Load(req.name + ext); err == nil { break } } } }
func (k *KeyPress) UnmarshalJSON(d []byte) error { combo := strings.Split(string(d[1:len(d)-1]), "+") for _, c := range combo { lower := strings.ToLower(c) switch lower { case "super": k.Super = true case "ctrl": k.Ctrl = true case "alt": k.Alt = true case "shift": k.Shift = true default: if v, ok := keylut[lower]; ok { k.Key = v } else { r := []Key(c) if len(r) != 1 { log4go.Warn("Unknown key value with %d bytes: %s", len(c), c) return nil } k.Key = Key(c[0]) k.fix() } } } return nil }
// auth for goim handshake with client, use rsa & aes. func (server *Server) authTCP(rr *bufio.Reader, wr *bufio.Writer, ch *Channel) (subKey string, heartbeat time.Duration, err error) { var p *Proto // WARN // don't adv the cli proto, after auth simply discard it. if p, err = ch.CliProto.Set(); err != nil { return } if err = server.readTCPRequest(rr, p); err != nil { return } if p.Operation != define.OP_AUTH { log.Warn("auth operation not valid: %d", p.Operation) err = ErrOperation return } if subKey, ch.RoomId, heartbeat, err = server.operator.Connect(p); err != nil { log.Error("operator.Connect error(%v)", err) return } p.Body = nil p.Operation = define.OP_AUTH_REPLY if err = server.writeTCPResponse(wr, p); err != nil { log.Error("[%s] server.sendTCPResponse() error(%v)", subKey, err) } return }
// hanleTCPConn handle a long live tcp connection. func handleTcpConn(conn net.Conn, readerChan chan *bufio.Reader) { addr := conn.RemoteAddr().String() log.Debug("<%s> handleTcpConn routine start", addr) reader := newBufioReader(readerChan, conn) if args, err := parseCmd(reader); err == nil { // return buffer bufio.Reader putBufioReader(readerChan, reader) switch args[0] { case CmdSubscribe: subscribeTcpHandle(conn, args[1:]) default: conn.Write(ParamErrorReply) log.Warn("<%s> unknown cmd \"%s\"", addr, args[0]) } } else { // return buffer bufio.Reader putBufioReader(readerChan, reader) log.Error("<%s> parseCmd() error(%v)", addr, err) } // close the connection if err := conn.Close(); err != nil { log.Error("<%s> conn.Close() error(%v)", addr, err) } log.Debug("<%s> handleTcpConn routine stop", addr) }
func (ss *StoreServer) uploadHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() vars := mux.Vars(r) fileIDStr := vars["fileID"] volID, needleID, cookie, err := newFileID(fileIDStr) if err != nil { helper.WriteJson(w, result{Error: err.Error()}, http.StatusInternalServerError) return } if ss.volumeMap[volID] == nil { helper.WriteJson(w, result{Error: fmt.Sprintf("no volume %d", volID)}, http.StatusInternalServerError) return } data, name, err := parseUpload(r) if err != nil { helper.WriteJson(w, result{Error: err.Error()}, http.StatusInternalServerError) return } n := storage.NewNeedle(cookie, needleID, data, name) if err = ss.volumeMap[volID].AppendNeedle(n); err != nil { helper.WriteJson(w, result{Error: err.Error()}, http.StatusInternalServerError) return } fi, _ := ss.volumeMap[volID].StoreFile.Stat() vi := volumeInfo{ ID: volID, Size: fi.Size(), } viBytes, _ := json.Marshal(vi) for i := range ss.conf.Directories { // send volume information to directory server var b bytes.Buffer b.Write(viBytes) _, err := postAndError("http://"+ss.conf.Directories[i]+"/vol/info", "application/json", &b) if err == nil { break } else { log4go.Warn("send volumeInfo to directory get err: %s", err.Error()) } } for _, localVolIDIP := range ss.localVolIDIPs { if localVolIDIP.ID == volID { for _, ip := range localVolIDIP.IP { if ip != ss.Addr { if err = replicateUpload(fmt.Sprintf("http://%s/replicate/%s", ip, fileIDStr), string(name), data); err != nil { helper.WriteJson(w, result{Error: err.Error()}, http.StatusInternalServerError) return } } } break } } res := result{ Name: string(name), Size: len(data), } helper.WriteJson(w, res, http.StatusOK) }
func getDatabaseId(projectName string) (int, error) { manageDbFile := "./db/1.db" if _, err := os.Stat(manageDbFile); err != nil { if os.IsNotExist(err) { logger.Warn("create top table.") CreateTopTables() } } if projectName == "manage" { return 1, nil } db, err := sql.Open("sqlite3", manageDbFile) if err != nil { logger.Error(err) return -1, err } defer db.Close() stmt, err := db.Prepare("select id from project_info where name = ?") if err != nil { logger.Error(err) return -1, err } defer stmt.Close() var id int err = stmt.QueryRow(projectName).Scan(&id) if err != nil { logger.Error(err) return -1, err } return id, nil }