func (n *Naive) readSearchModeInfo() *searchModeInfo { path := n.dir + "/" + searchModeInfoPath file, err := os.Open(path) if err != nil { panic(log.Criticalf("failed to open search mode info: %s", err)) } fi, serr := file.Stat() if serr != nil { panic(log.Criticalf("failed to stat: %s", err)) } buf := make([]byte, fi.Size()) _, rerr := file.Read(buf) if rerr != nil { panic(log.Criticalf("failed to read: %s", rerr)) } byteBuf := bytes.NewBuffer(buf) dec := gob.NewDecoder(byteBuf) var ret searchModeInfo derr := dec.Decode(&ret) if derr != nil { panic(log.Criticalf("decode error; %s", derr)) } log.Debugf("a number of collected traces: %d", ret.NrCollectedTraces) return &ret }
func (n *Naive) RecordNewTrace(newTrace *SingleTrace) { var traceBuf bytes.Buffer enc := gob.NewEncoder(&traceBuf) eerr := enc.Encode(&newTrace) if eerr != nil { panic(log.Criticalf("encoding trace failed: %s", eerr)) } tracePath := fmt.Sprintf("%s/history", n.nextWorkingDir) log.Debugf("new trace path: %s", tracePath) traceFile, oerr := os.Create(tracePath) if oerr != nil { panic(log.Criticalf("failed to create a file for new trace: %s", oerr)) } _, werr := traceFile.Write(traceBuf.Bytes()) if werr != nil { panic(log.Criticalf("writing new trace to file failed: %s", werr)) } actionTraceDir := path.Join(n.nextWorkingDir, "actions") if err := os.Mkdir(actionTraceDir, 0777); err != nil { panic(log.Criticalf("%s", err)) } for i, act := range newTrace.ActionSequence { recordAction(i, act, actionTraceDir) } }
func NewHttpNotifier(app *ApplicationContext) (*HttpNotifier, error) { // Compile the templates templatePost, err := template.ParseFiles(app.Config.Httpnotifier.TemplatePost) if err != nil { log.Criticalf("Cannot parse HTTP notifier POST template: %v", err) os.Exit(1) } templateDelete, err := template.ParseFiles(app.Config.Httpnotifier.TemplateDelete) if err != nil { log.Criticalf("Cannot parse HTTP notifier DELETE template: %v", err) os.Exit(1) } // Parse the extra parameters for the templates extras := make(map[string]string) for _, extra := range app.Config.Httpnotifier.Extras { parts := strings.Split(extra, "=") extras[parts[0]] = parts[1] } return &HttpNotifier{ app: app, templatePost: templatePost, templateDelete: templateDelete, extras: extras, quitChan: make(chan struct{}), groupIds: make(map[string]map[string]string), resultsChannel: make(chan *ConsumerGroupStatus), }, nil }
func writeSitemaps(outdir string, c crawler.Crawler) error { // Build sitemap and write to output file xmlout := fmt.Sprintf("%s/%s-sitemap.xml", outdir, c.Target().Host) xmlSitemap, err := sitemap.BuildXMLSitemap(c.AllPages()) if err != nil { log.Criticalf("Failed to generate sitemap to %s", xmlout) os.Exit(1) } if err := ioutil.WriteFile(xmlout, xmlSitemap, 0644); err != nil { log.Criticalf("Failed to write sitemap to %s", xmlout) os.Exit(1) } log.Infof("Wrote XML sitemap to %s", xmlout) // Build JSON site description siteout := fmt.Sprintf("%s/%s-sitemap.json", outdir, c.Target().Host) b, err := sitemap.BuildJSONSiteStructure(c.Target(), c.AllPages()) if err := ioutil.WriteFile(siteout, b, 0644); err != nil { log.Criticalf("Failed to write sitemap to %s", siteout) os.Exit(1) } log.Infof("Wrote JSON sitemap to %s", siteout) return nil }
func (fr *TrecFileReader) read_to_chan(count int) (i int) { //Catch and log panics defer func() { if x := recover(); x != nil { log.Criticalf("Error in document %d of %s: %v", fr.docCounter, fr.filename, x) log.Flush() } }() for i := 0; i < count || count == -1; i++ { log.Debugf("Reading document %d from %s", i, fr.filename) doc, err := fr.read_next_doc() switch err { case io.EOF: log.Debugf("Got EOF for file %s", fr.filename) close(fr.documents) return i case nil: log.Debugf("Successfully read document %s", doc.Identifier()) fr.documents <- doc default: log.Criticalf("Oh f**k...%v", err) panic(err) } } log.Infof("Returning") return i }
// Flush the message-buffer: func (handler *OnDiskHandler) FlushBufferToS3() error { log.Debugf("Messages processed (since the beginning): %d", handler.allTimeMessages) // Read the messages from disk: fileData, err := ioutil.ReadFile(*messageBufferFileName) if err != nil { log.Criticalf("Unable to read buffer-file! (%v) %v", *messageBufferFileName, err) os.Exit(2) } // Store them on S3: err = StoreMessages(fileData) if err != nil { log.Criticalf("Unable to store messages! %v", err) os.Exit(2) } // Reset the handler: handler.deDuper = make(map[string]int) handler.timeLastFlushedToS3 = int(time.Now().Unix()) handler.messagesBuffered = 0 os.Remove(*messageBufferFileName) return nil }
/// 启动rpcServer,监听rpc服务器端口,由于Start内部调用阻塞的方法,应在go 语句中调用. func (ms *RpcServer) Start() { go func() { seelog.Info("RpcServer start...") hostAndPort := fmt.Sprintf("%v:%v", ms.host, ms.port) servAddr, err := net.ResolveTCPAddr("tcp", hostAndPort) if err != nil { seelog.Criticalf("RpcServer failed to start with err<%v>", err.Error()) os.Exit(1) } listener, err := net.ListenTCP("tcp4", servAddr) if err != nil { seelog.Criticalf("RpcServer failed to start with err<%v>", err.Error()) os.Exit(1) } seelog.Debugf("Rpc Server listening: <%v>", servAddr.String()) defer listener.Close() for { conn, err := listener.Accept() seelog.Debug("Rpc Server accept new connection") if err != nil { seelog.Critical(err.Error()) os.Exit(1) } go ms.rpcServer.ServeCodec(jsonrpc.NewServerCodec(conn)) } }() }
func loadNotifiers(app *ApplicationContext) error { // Set up the Emailer, if configured if len(app.Config.Email) > 0 { log.Info("Configuring Email notifier") emailer, err := NewEmailer(app) if err != nil { log.Criticalf("Cannot configure email notifier: %v", err) return err } app.Emailer = emailer } // Set up the HTTP Notifier, if configured if app.Config.Httpnotifier.Url != "" { log.Info("Configuring HTTP notifier") httpnotifier, err := NewHttpNotifier(app) if err != nil { log.Criticalf("Cannot configure HTTP notifier: %v", err) return err } app.HttpNotifier = httpnotifier } return nil }
func initPool() { configs, err := goconfig.ReadConfigFile(configFileName) if err != nil { logger.Criticalf("Can not read nsq configs from %s. Error: %s", configFileName, err) panic(err) } options, err := configs.GetOptions(nsqdConfigSection) if err != nil { logger.Criticalf("Can not read nsqd config in %s. Error: $s", configFileName, err) panic(err) } addrs := make([]string, 0, len(options)) for _, option := range options { value, err := configs.GetString(nsqdConfigSection, option) if err != nil { logger.Errorf("Get error when reading section %s option %s in %s. Error: %s", nsqdConfigSection, option, configFileName, err) continue } addrs = append(addrs, value) } if len(addrs) <= 0 { logger.Criticalf("Read 0 configs for nsqd address in %s.", configFileName) panic("Read 0 configs for nsqd address in config file " + configFileName) } pool = make(map[string]*gonsq.Producer) lostConns = make([]string, 0) for _, addr := range addrs { config := gonsq.NewConfig() producer, err := gonsq.NewProducer(addr, config) if err != nil { logger.Errorf("Can not create nsq producer for address: %s. Error: %s", addr, err) continue } err = producer.Ping() if err != nil { logger.Errorf("Can not connect to address %s. Error: %s", addr, err) lostConns = append(lostConns, addr) } pool[addr] = producer } go autoReconnect() }
func NewHttpNotifier(app *ApplicationContext) (*HttpNotifier, error) { // Helper functions for templates fmap := template.FuncMap{ "jsonencoder": templateJsonEncoder, "topicsbystatus": classifyTopicsByStatus, "partitioncounts": templateCountPartitions, "add": templateAdd, "minus": templateMinus, "multiply": templateMultiply, "divide": templateDivide, "maxlag": maxLagHelper, } // Compile the templates templatePost, err := template.New("post").Funcs(fmap).ParseFiles(app.Config.Httpnotifier.TemplatePost) if err != nil { log.Criticalf("Cannot parse HTTP notifier POST template: %v", err) os.Exit(1) } templatePost = templatePost.Templates()[0] templateDelete, err := template.New("delete").Funcs(fmap).ParseFiles(app.Config.Httpnotifier.TemplateDelete) if err != nil { log.Criticalf("Cannot parse HTTP notifier DELETE template: %v", err) os.Exit(1) } templateDelete = templateDelete.Templates()[0] // Parse the extra parameters for the templates extras := make(map[string]string) for _, extra := range app.Config.Httpnotifier.Extras { parts := strings.Split(extra, "=") extras[parts[0]] = parts[1] } return &HttpNotifier{ app: app, templatePost: templatePost, templateDelete: templateDelete, extras: extras, quitChan: make(chan struct{}), groupIds: make(map[string]map[string]Event), groupList: make(map[string]map[string]bool), groupLock: sync.RWMutex{}, resultsChannel: make(chan *ConsumerGroupStatus), httpClient: &http.Client{ Timeout: time.Duration(app.Config.Httpnotifier.Timeout) * time.Second, Transport: &http.Transport{ Dial: (&net.Dialer{ KeepAlive: time.Duration(app.Config.Httpnotifier.Keepalive) * time.Second, }).Dial, Proxy: http.ProxyFromEnvironment, }, }, }, nil }
// Register an event class so that it can be serialized/deserialized // // name is a REST JSON class name func RegisterSignalClass(name string, value interface{}) { log.Debugf("Registering a signal class \"%s\"", name) _, isEvent := value.(Event) _, isAction := value.(Action) if !(isEvent || isAction) { panic(log.Criticalf("%s is not an Event nor an action", value)) } if _, registered := knownSignalClasses[name]; registered { panic(log.Criticalf("%s has been already registered", value)) } t := reflect.TypeOf(value) knownSignalClasses[name] = t gob.Register(value) }
// Store messages to S3: func StoreMessages(fileData []byte) error { // Something to compress the fileData into: var fileDataBytes bytes.Buffer gzFileData := gzip.NewWriter(&fileDataBytes) gzFileData.Write(fileData) gzFileData.Close() log.Infof("Storing %d bytes...", len(fileDataBytes.Bytes())) // Authenticate with AWS: awsAuth, err := aws.GetAuth("", "", "", time.Now()) if err != nil { log.Criticalf("Unable to authenticate to AWS! (%s) ...\n", err) os.Exit(2) } else { log.Debugf("Authenticated to AWS") } // Make a new S3 connection: log.Debugf("Connecting to AWS...") s3Connection := s3.New(awsAuth, aws.Regions[*awsRegion]) // Make a bucket object: s3Bucket := s3Connection.Bucket(*s3Bucket) // Prepare arguments for the call to store messages on S3: contType := "text/plain" perm := s3.BucketOwnerFull options := &s3.Options{ SSE: false, Meta: nil, } // Build the filename we'll use for S3: fileName := fmt.Sprintf("%v.gz", FileName()) // Upload the data: err = s3Bucket.Put(fileName, fileDataBytes.Bytes(), contType, perm, *options) if err != nil { log.Criticalf("Failed to put file (%v) on S3 (%v)", fileName, err) os.Exit(2) } else { log.Infof("Stored file (%v) on s3", fileName) } return nil }
func NewHttpNotifier(app *ApplicationContext) (*HttpNotifier, error) { // Helper functions for templates fmap := template.FuncMap{ "jsonencoder": templateJsonEncoder, "topicsbystatus": classifyTopicsByStatus, } // Compile the templates templatePost, err := template.New("post").Funcs(fmap).ParseFiles(app.Config.Httpnotifier.TemplatePost) if err != nil { log.Criticalf("Cannot parse HTTP notifier POST template: %v", err) os.Exit(1) } templatePost = templatePost.Templates()[0] templateDelete, err := template.New("delete").Funcs(fmap).ParseFiles(app.Config.Httpnotifier.TemplateDelete) if err != nil { log.Criticalf("Cannot parse HTTP notifier DELETE template: %v", err) os.Exit(1) } templateDelete = templateDelete.Templates()[0] // Parse the extra parameters for the templates extras := make(map[string]string) for _, extra := range app.Config.Httpnotifier.Extras { parts := strings.Split(extra, "=") extras[parts[0]] = parts[1] } return &HttpNotifier{ app: app, templatePost: templatePost, templateDelete: templateDelete, extras: extras, quitChan: make(chan struct{}), groupIds: make(map[string]map[string]Event), resultsChannel: make(chan *ConsumerGroupStatus), httpClient: &http.Client{ Transport: &http.Transport{ Dial: (&net.Dialer{ Timeout: time.Duration(app.Config.Httpnotifier.Timeout) * time.Second, KeepAlive: time.Duration(app.Config.Httpnotifier.Keepalive) * time.Second, }).Dial, TLSHandshakeTimeout: time.Duration(app.Config.Httpnotifier.Timeout) * time.Second, }, }, }, nil }
// Flush the message-buffer: func (handler *AbandonedChannelHandler) FlushBufferToS3() error { log.Debugf("Messages processed (since the beginning): %d", handler.allTimeMessages) // A byte array to submit to S3: var fileData []byte // Turn the message bodies into a []byte: for _, message := range handler.messageBuffer { fileData = append(fileData, message.Body...) } // Store them on S3: err := StoreMessages(fileData) if err != nil { log.Criticalf("Unable to store messages! %v", err) os.Exit(2) } // Reset the handler: handler.deDuper = make(map[string]int) handler.messageBuffer = make([]*nsq.Message, 0) handler.timeLastFlushedToS3 = int(time.Now().Unix()) return nil }
func (self *discovery) tick(die bool) { failCount := 0 ticker := time.NewTicker(tryDiscoveryInterval) for { select { case <-ticker.C: if !self.isMultiRegistered || !self.hb.healthy() { failCount++ log.Infof("[Server] Service has not received heartbeats within %v and is now disconnected", lostContactInterval) if failCount >= maxDisconnects && die { log.Criticalf("[Service] Max disconnects (%d) reached, bye bye cruel world", maxDisconnects) cleanupLogs() os.Exit(1) } self.connected = false if err := self.connect(); err == nil { // Successful connection = back to zero failCount = 0 } } } } }
func (s *server) start(trans transport.Transport) (*tomb.Tomb, error) { s.workerTombM.Lock() if s.workerTomb != nil { s.workerTombM.Unlock() return nil, ErrAlreadyRunning } tm := new(tomb.Tomb) s.workerTomb = tm s.workerTombM.Unlock() stop := func() { trans.StopListening(s.Name()) s.workerTombM.Lock() s.workerTomb = nil s.workerTombM.Unlock() } var inbound chan tmsg.Request connect := func() error { select { case <-trans.Ready(): inbound = make(chan tmsg.Request, 500) return trans.Listen(s.Name(), inbound) case <-time.After(connectTimeout): log.Warnf("[Mercury:Server] Timed out after %s waiting for transport readiness", connectTimeout.String()) return ttrans.ErrTimeout } } // Block here purposefully (deliberately not in the goroutine below, because we want to report a connection error // to the caller) if err := connect(); err != nil { stop() return nil, err } tm.Go(func() error { defer stop() for { select { case req, ok := <-inbound: if !ok { // Received because the channel closed; try to reconnect log.Warn("[Mercury:Server] Inbound channel closed; trying to reconnect…") if err := connect(); err != nil { log.Criticalf("[Mercury:Server] Could not reconnect after channel close: %s", err) return err } } else { go s.handle(trans, req) } case <-tm.Dying(): return tomb.ErrDying } } }) return tm, nil }
// checkMissingAndDeprecated checks all zero-valued fields for tags of the form // missing:STRING and acts based on that string. Current options are: fatal, // warn. Fatal will result in an error being returned, warn will result in a // warning that the field is missing being logged. func (cfg *Config) checkMissingAndDepreciated() error { cfgElem := reflect.ValueOf(cfg).Elem() cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type() fatalFields := []string{} for i := 0; i < cfgElem.NumField(); i++ { cfgField := cfgElem.Field(i) if utils.ZeroOrNil(cfgField.Interface()) { missingTag := cfgStructField.Field(i).Tag.Get("missing") if len(missingTag) == 0 { continue } switch missingTag { case "warn": seelog.Warnf("Configuration key not set, key: %v", cfgStructField.Field(i).Name) case "fatal": seelog.Criticalf("Configuration key not set, key: %v", cfgStructField.Field(i).Name) fatalFields = append(fatalFields, cfgStructField.Field(i).Name) default: seelog.Warnf("Unexpected `missing` tag value, tag %v", missingTag) } } else { // present deprecatedTag := cfgStructField.Field(i).Tag.Get("deprecated") if len(deprecatedTag) == 0 { continue } seelog.Warnf("Use of deprecated configuration key, key: %v message: %v", cfgStructField.Field(i).Name, deprecatedTag) } } if len(fatalFields) > 0 { return errors.New("Missing required fields: " + strings.Join(fatalFields, ", ")) } return nil }
func StartNotifiers(app *ApplicationContext) { nc := app.NotifyCenter // Do not proceed until we get the Zookeeper lock err := app.NotifierLock.Lock() if err != nil { log.Criticalf("Cannot get ZK nc lock: %v", err) os.Exit(1) } log.Info("Acquired Zookeeper notify lock") // Get a group list to start with (this will start the ncs) nc.refreshConsumerGroups() // Set a ticker to refresh the group list periodically nc.refreshTicker = time.NewTicker(time.Duration(nc.app.Config.Lagcheck.ZKGroupRefresh) * time.Second) // Main loop to handle refreshes and evaluation responses OUTERLOOP: for { select { case <-nc.quitChan: break OUTERLOOP case <-nc.refreshTicker.C: nc.refreshConsumerGroups() case result := <-nc.resultsChannel: go nc.handleEvaluationResponse(result) } } }
func StartTcpServer(host, port string, service ConnectorService) { if service == nil { connService = &MqService{} } else { connService = service } connService.InitService() addr, err := net.ResolveTCPAddr("tcp", ":9000") if err != nil { logger.Errorf("Can not resolve tcp address for server. host: %s. port: %s. address string: ") panic(err) } listener, err := net.ListenTCP("tcp", addr) if err != nil { logger.Criticalf("Can not start tcp server on address: %s:%s. Error: %s", addr.IP, addr.Port, err) panic(err) } defer listener.Close() for { conn, err := listener.AcceptTCP() if err != nil { logger.Errorf("Create tcp connection error. Err: %s", err) continue } handleConnection(conn) } }
func NewLogs() { logger, err := log.LoggerFromConfigAsBytes([]byte(default_template)) if err != nil { log.Criticalf("log config err: %v", err) } log.ReplaceLogger(logger) }
func init() { var err error db, err = sql.Open("mysql", "root:root@tcp(120.26.212.134:3306)/polyge?charset=utf8&parseTime=true") if err != nil { seelog.Criticalf("open mysql %v", err) } }
/// 监听服务器端口,接收新的连接.对于新来的连接首先调用为其注册的connection事件(函数回调) /// 之后开始监听新的连接. func (tc *TcpConnector) Start() { tcpAddr, err := net.ResolveTCPAddr("tcp", tc.host+":"+tc.port) if err != nil { log.Fatal(err.Error()) } // context.CheckError(err) listener, err := net.ListenTCP("tcp", tcpAddr) if err != nil { log.Fatal(err.Error()) } go func(ln *net.TCPListener) { defer ln.Close() for { conn, err := ln.AcceptTCP() // context.CheckError(err) if err != nil { seelog.Criticalf("AcceptTcp on host<%v> port<%v> error<%v>", tc.host, tc.port, err.Error()) os.Exit(0) } tcpSocket := NewTcpSocket(curID, conn) go tc.HandleNewConnection(tcpSocket) } //end for }(listener) } //end Start()
// Handle takes an inbound Request, unmarshals it, dispatches it to the handler, and serialises the result as a // Response. Note that the response may be nil. func (e Endpoint) Handle(req mercury.Request) (rsp mercury.Response, err error) { // Unmarshal the request body (unless there already is one) if req.Body() == nil && e.Request != nil { if um := e.unmarshaler(req); um != nil { if werr := terrors.Wrap(um.UnmarshalPayload(req), nil); werr != nil { log.Warnf("[Mercury:Server] Cannot unmarshal request payload: %v", werr) terr := werr.(*terrors.Error) terr.Code = terrors.ErrBadRequest rsp, err = nil, terr return } } } defer func() { if v := recover(); v != nil { traceVerbose := make([]byte, 1024) runtime.Stack(traceVerbose, true) log.Criticalf("[Mercury:Server] Recovered from handler panic for request %s:\n%v\n%s", req.Id(), v, string(traceVerbose)) rsp, err = nil, terrors.InternalService("panic", fmt.Sprintf("Panic in handler %s:\n%s", req.Endpoint(), string(traceVerbose)), nil) } }() rsp, err = e.Handler(req) return }
func initConsumer() { configs, err := goconfig.ReadConfigFile(configFileName) if err != nil { logger.Criticalf("Can not read nsqlookup config from %s. Error: %s", configFileName, err) panic(err) } options, err := configs.GetOptions(nsqlookupSection) if err != nil { logger.Criticalf("Can not find configs for nsqlookup in %s. Error: %s", configFileName, err) panic(err) } }
func NewConnectionManager() *ConnectionManager { manager := ConnectionManager{} // Initialize our random number generator (used for naming new connections) // A different seed will be used on each startup, for no good reason manager.rand = rand.New(rand.NewSource(time.Now().UnixNano())) manager.qm = newQueueManager() manager.udpClients = make(map[string]*Client) // Open TCP socket tcpAddr, tcpAddrErr := net.ResolveTCPAddr("tcp", fmt.Sprintf(":%d", Configuration.Port)) if tcpAddrErr != nil { panic("Invalid port configured") } tcpListener, tcpErr := net.ListenTCP("tcp", tcpAddr) manager.tcpLn = tcpListener if tcpErr != nil { log.Criticalf("Error whilst opening TCP socket: %s", tcpErr.Error()) panic(tcpErr.Error()) } // Listen on UDP socket udpAddr, _ := net.ResolveUDPAddr("udp", fmt.Sprintf(":%d", Configuration.Port)) udpConn, _ := net.ListenUDP("udp", udpAddr) manager.udpConn = udpConn return &manager }
func (c *client) listen(ch chan bool) { c.Lock() defer c.Unlock() // check if we started listening while locked if c.listening { ch <- true return } if deliveries, err := raven.Consume(c.instanceID); err != nil { log.Criticalf("[Client] Failed to consume: %v", err) c.listening = false ch <- false } else { log.Debugf("[Client] Listening on %s", c.instanceID) c.listening = true ch <- true c.Unlock() for d := range deliveries { log.Tracef("[Client] Inbound message %s on %s", d.CorrelationId, c.instanceID) go c.getResponse(d) } c.Lock() log.Errorf("[Client] Stopping listening due to channel closing") c.listening = false } }
// handleSingleMessage processes a single payload message. It adds tasks in the message to the task engine // An error is returned if the message was not handled correctly. The error is being used only for testing // today. In the future, it could be used for doing more interesting things. func (payloadHandler *payloadRequestHandler) handleSingleMessage(payload *ecsacs.PayloadMessage) error { if aws.StringValue(payload.MessageId) == "" { seelog.Criticalf("Recieved a payload with no message id, payload: %v", payload) return fmt.Errorf("Received a payload with no message id") } seelog.Debugf("Received payload message, message id: %s", aws.StringValue(payload.MessageId)) credentialsAcks, allTasksHandled := payloadHandler.addPayloadTasks(payload) // save the state of tasks we know about after passing them to the task engine err := payloadHandler.saver.Save() if err != nil { seelog.Errorf("Error saving state for payload message! err: %v, messageId: %s", err, *payload.MessageId) // Don't ack; maybe we can save it in the future. return fmt.Errorf("Error saving state for payload message, with messageId: %s", *payload.MessageId) } if !allTasksHandled { return fmt.Errorf("All tasks not handled") } go func() { // Throw the ack in async; it doesn't really matter all that much and this is blocking handling more tasks. for _, credentialsAck := range credentialsAcks { payloadHandler.refreshHandler.ackMessage(credentialsAck) } payloadHandler.ackRequest <- *payload.MessageId }() return nil }
/// 向rpc服务器注册服务,封装rpc.Register func (ms *RpcServer) RegisteService(r interface{}) { err := ms.rpcServer.Register(r) if err != nil { seelog.Criticalf("Fail to Register Rpc Service,%v", err.Error()) os.Exit(1) } }
func LoadSession(name, maxAge string) { maxAgeInt, err := strconv.Atoi(maxAge) if err != nil { log.Criticalf("load session maxAge[%s] type error: %s", maxAge, err) os.Exit(2) } CacheInit(name, maxAgeInt) }
func publishMsg(topic string, msg []byte) error { producer, err := nsq.GetProducer() if err != nil { logger.Criticalf("Can not get available producer from nsqd pool. Error: %s", err) return err } err = producer.Publish(topic, msg) if err != nil { logger.Criticalf("Publish topic %s error. Producer: %s. Error: %s", topic, producer.String(), err) return err } return nil }