func Run(nsqLookupdHTTPAddrs []string, st store.Store) { cfg := nsq.NewConfig() cfg.UserAgent = fmt.Sprintf("phosphor go-nsq/%s", nsq.VERSION) cfg.MaxInFlight = maxInFlight consumer, err := nsq.NewConsumer(topic, channel, cfg) if err != nil { log.Critical(err) os.Exit(1) } consumer.AddHandler(&IngestionHandler{ store: st, }) err = consumer.ConnectToNSQLookupds(nsqLookupdHTTPAddrs) if err != nil { log.Critical(err) os.Exit(1) } // Block until exit <-consumer.StopChan }
func readFile(filename string) int { file, err := os.Open(filename) if err != nil { log.Critical(err) } count, err = file.Read(buf) if err != nil { log.Critical(err) } return count }
func main() { cluster := gocql.NewCluster("127.0.0.1") cluster.Keyspace = "system" session, err := cluster.CreateSession() defer session.Close() if err != nil { log.Critical(err) } traceSession, err := cluster.CreateSession() defer session.Close() if err != nil { log.Critical(err) } // Create a new logger instance that we adjust the stack depth for to get // more meaningful frames logger, err := log.LoggerFromConfigAsString( `<seelog> <outputs> <console formatid="fmt"/> </outputs> <formats> <format id="fmt" format="%Date(Jan 02 2006 03:04:05.000) [%LEVEL] %File:%Line - %Msg%n"/> </formats> </seelog> `) logger.SetAdditionalStackDepth(2) if err != nil { log.Critical(err) } writer := NewTraceWriter(traceSession, logger) tracer := gocql.NewTraceWriter(traceSession, writer) session.SetTrace(tracer) var count int iter := session.Query(`select count(*) from schema_keyspaces`).Iter() iter.Scan(&count) err = iter.Close() if err != nil { log.Critical(err) } log.Infof("This instance has %d keyspaces", count) }
func doRun(opts *Options) { defer cleanupLogs() // check we have some endpoints if reg.size() == 0 { log.Critical("There are no endpoints for this service") os.Exit(3) } // start listening for incoming messages deliveries, err := raven.Consume(InstanceID) if err != nil { log.Critical("[Server] Failed to consume: %v", err) os.Exit(5) } if opts.SelfBind { // binding should come after you've started consuming if err := raven.BindService(Name, InstanceID); err != nil { log.Criticalf("[Server] Failed to bind itself: %v", err) os.Exit(7) } } // announce ourselves to the discovery service dsc = newDiscovery(opts) go dsc.connect() // run some post connect handlers for _, f := range postConnHdlrs { go f() } // register stats collector go registerStats() // listen for SIGQUIT go signalCatcher() // consume messages for d := range deliveries { req := NewRequestFromDelivery(d) go HandleRequest(req) } log.Critical("[Server] Stopping due to channel closing") os.Exit(6) }
func runFsInspector(args []string) int { if err := fsFlagset.Parse(args); err != nil { log.Critical(err) return 1 } if _fsFlags.OriginalDir == "" { log.Critical("original-dir is not set") return 1 } if _fsFlags.Mountpoint == "" { log.Critical("mount-point is not set") return 1 } if _fsFlags.AutopilotConfig != "" && _fsFlags.OrchestratorURL != ocutil.LocalOrchestratorURL { log.Critical("non-default orchestrator url set for autopilot orchestration mode") return 1 } if logutil.Debug { // log level: 0..2 hookfs.SetLogLevel(1) } else { hookfs.SetLogLevel(0) } if _fsFlags.AutopilotConfig != "" { cfg, err := config.NewFromFile(_fsFlags.AutopilotConfig) if err != nil { panic(log.Critical(err)) } autopilotOrchestrator, err := ocutil.NewAutopilotOrchestrator(cfg) if err != nil { panic(log.Critical(err)) } log.Info("Starting autopilot-mode orchestrator") go autopilotOrchestrator.Start() } hook := &inspector.FilesystemInspector{ OrchestratorURL: _fsFlags.OrchestratorURL, EntityID: _fsFlags.EntityID, } fs, err := hookfs.NewHookFs(_fsFlags.OriginalDir, _fsFlags.Mountpoint, hook) if err != nil { panic(log.Critical(err)) } log.Infof("Serving %s", fs) log.Infof("Please run `fusermount -u %s` after using this, manually", _fsFlags.Mountpoint) if err = fs.Serve(); err != nil { panic(log.Critical(err)) } // NOTREACHED return 0 }
func (emailer *EmailNotifier) Notify(msg Message) error { if emailer.auth == nil { switch emailer.AuthType { case "plain": emailer.auth = smtp.PlainAuth("", emailer.Username, emailer.Password, emailer.Server) case "crammd5": emailer.auth = smtp.CRAMMD5Auth(emailer.Username, emailer.Password) } } if emailer.template == nil { template, err := template.ParseFiles(emailer.TemplateFile) if err != nil { log.Critical("Cannot parse email template: %v", err) return err } emailer.template = template } if emailer.groupMsgs == nil { emailer.groupMsgs = make(map[string]Message) } for _, group := range emailer.Groups { clusterGroup := fmt.Sprintf("%s,%s", msg.Cluster, msg.Group) if clusterGroup == group { emailer.groupMsgs[clusterGroup] = msg } } if len(emailer.Groups) == len(emailer.groupMsgs) { return emailer.sendConsumerGroupStatusNotify() } return nil }
func testFuncException() { fmt.Println("testFuncException") testConfig := ` <seelog type="sync" minlevel="info"> <exceptions> <exception funcpattern="*main.test*Except*" minlevel="error"/> </exceptions> <outputs> <console/> </outputs> </seelog>` logger, _ := log.LoggerFromConfigAsBytes([]byte(testConfig)) log.ReplaceLogger(logger) log.Trace("NOT Printed") log.Debug("NOT Printed") log.Info("NOT Printed") log.Warn("NOT Printed") log.Error("Printed") log.Critical("Printed") log.Current.Trace("NOT Printed") log.Current.Debug("NOT Printed") log.Current.Info("NOT Printed") log.Current.Warn("NOT Printed") log.Current.Error("Printed") log.Current.Critical("Printed") }
func main() { // context ctx = context.Background() loadConfig() // db hoge.BuildInstances() // redis redis_pool := newPool() ctx = context.WithValue(ctx, "redis", redis_pool) router := gin.Default() router.Use(Custom()) // make route router.POST("/test", controller.Test) err := router.Run(":9999") // 存在しないルート時 if err != nil { log.Critical(err) } }
func (d *Dumb) QueueNextEvent(event signal.Event) { item, err := queue.NewBasicTBQueueItem(event, d.Interval, d.Interval) if err != nil { panic(log.Critical(err)) } d.queue.Enqueue(item) }
/// 启动rpcServer,监听rpc服务器端口,由于Start内部调用阻塞的方法,应在go 语句中调用. func (ms *RpcServer) Start() { go func() { seelog.Info("RpcServer start...") hostAndPort := fmt.Sprintf("%v:%v", ms.host, ms.port) servAddr, err := net.ResolveTCPAddr("tcp", hostAndPort) if err != nil { seelog.Criticalf("RpcServer failed to start with err<%v>", err.Error()) os.Exit(1) } listener, err := net.ListenTCP("tcp4", servAddr) if err != nil { seelog.Criticalf("RpcServer failed to start with err<%v>", err.Error()) os.Exit(1) } seelog.Debugf("Rpc Server listening: <%v>", servAddr.String()) defer listener.Close() for { conn, err := listener.Accept() seelog.Debug("Rpc Server accept new connection") if err != nil { seelog.Critical(err.Error()) os.Exit(1) } go ms.rpcServer.ServeCodec(jsonrpc.NewServerCodec(conn)) } }() }
func Default() *truetype.Font { if defaultFont == nil { fontBytes, err := ioutil.ReadFile("test.ttf") if err != nil { log.Critical(err) return nil } defaultFont, err = freetype.ParseFont(fontBytes) if err != nil { log.Critical(err) return nil } } return defaultFont }
// Critical outputs critical level message. func Critical(msg ...interface{}) { if !isValid { return } mutex.Lock() defer mutex.Unlock() seelog.Critical(msg...) }
func loadImage(filename string) (image.Image, error) { f, err := os.Open(filename) if err != nil { log.Critical(err) return nil, err } defer f.Close() img, _, err := image.Decode(bufio.NewReader(f)) if err != nil { log.Critical(err) return nil, err } return img, nil }
// criticalレベルでログメッセージを出力する。 // この関数が呼び出されると、ただちにログのフラッシュが行われる。 // // param : msg 出力するメッセージ。複数指定した場合は結合して出力される。 func Critical(msg ...interface{}) { if !valid { return } locker.Lock(lockTimeout) defer locker.Unlock() seelog.Critical(msg...) }
func StartEarthquakeRoutines(c *docker.Container, cfg config.Config) error { log.Debugf("Starting Orchestrator") go func() { oerr := StartOrchestrator(cfg) if oerr != nil { panic(log.Critical(oerr)) } }() if cfg.GetBool("containerParam.enableEthernetInspector") { nfqNum := cfg.GetInt("containerParam.ethernetNFQNumber") if nfqNum <= 0 { return fmt.Errorf("strange containerParam.ethernetNFQNumber: %d", nfqNum) } log.Debugf("Configuring NFQUEUE %d for container %s", nfqNum, c.ID) err := SetupNFQUEUE(c, nfqNum, false, false) if err != nil { return err } log.Debugf("Starting Ethernet Inspector") go func() { ierr := StartEthernetInspector(c, nfqNum) if ierr != nil { panic(log.Critical(ierr)) } }() } if cfg.GetBool("containerParam.enableProcInspector") { watchInterval := cfg.GetDuration("containerParam.procWatchInterval") if watchInterval <= 0 { return fmt.Errorf("strange containerParam.procWatchInterval: %s", watchInterval) } log.Debugf("Starting Process Inspector") go func() { ierr := StartProcInspector(c, watchInterval) if ierr != nil { panic(log.Critical(ierr)) } }() } return nil }
func runEtherInspector(args []string) int { if err := etherFlagset.Parse(args); err != nil { log.Critical(err) return 1 } useHookSwitch := _etherFlags.NFQNumber < 0 if useHookSwitch && _etherFlags.HookSwitchZMQAddr == "" { log.Critical("hookswitch is invalid") return 1 } if !useHookSwitch && _etherFlags.NFQNumber > 0xFFFF { log.Critical("nfq-number is invalid") return 1 } if _etherFlags.AutopilotConfig != "" && _etherFlags.OrchestratorURL != ocutil.LocalOrchestratorURL { log.Critical("non-default orchestrator url set for autopilot orchestration mode") return 1 } if _etherFlags.AutopilotConfig != "" { cfg, err := config.NewFromFile(_etherFlags.AutopilotConfig) if err != nil { panic(log.Critical(err)) } autopilotOrchestrator, err := ocutil.NewAutopilotOrchestrator(cfg) if err != nil { panic(log.Critical(err)) } log.Info("Starting autopilot-mode orchestrator") go autopilotOrchestrator.Start() } var etherInspector inspector.EthernetInspector if useHookSwitch { etherInspector = &inspector.HookSwitchInspector{ OrchestratorURL: _etherFlags.OrchestratorURL, EntityID: _etherFlags.EntityID, HookSwitchZMQAddr: _etherFlags.HookSwitchZMQAddr, EnableTCPWatcher: true, } } else { etherInspector = &inspector.NFQInspector{ OrchestratorURL: _etherFlags.OrchestratorURL, EntityID: _etherFlags.EntityID, NFQNumber: uint16(_etherFlags.NFQNumber), EnableTCPWatcher: true, } } if err := etherInspector.Start(); err != nil { panic(log.Critical(err)) } // NOTREACHED return 0 }
func checkConfigAndDie() { if len(config.AsteriskAddr) == 0 { log.Critical("Asterisk address is null or empty. Please check the configuraiton file.\n") log.Flush() os.Exit(1) } if len(config.AsteriskUser) == 0 || len(config.AsteriskPassword) == 0 { log.Critical("Asterisk credentials missing. Please check the configuration file.\n") log.Flush() os.Exit(1) } if config.TestCallActive && config.TestCallSchedule < 30 { log.Criticalf("Asterisk tesing interval is too short : %d. Minimal value is 30(seconds).Please check the configuration file.\n", config.TestCallSchedule) log.Flush() os.Exit(1) } }
// Register an endpoint with the registry func Register(eps ...*Endpoint) (err error) { if !initialised { log.Critical("Server must be initialised before you can register an endpoint") cleanupLogs() os.Exit(2) } for _, ep := range eps { if err = reg.add(ep); err != nil { log.Critical("Error registering endpoint, %v: %v", ep.Name, err) log.Flush() os.Exit(2) } log.Infof("[Server] Registered endpoint: %s", ep.Name) } return nil }
func (d *Dumb) dequeueEventRoutine() { for { qItem := <-d.queueDeqCh event := qItem.Value().(signal.Event) action, err := event.DefaultAction() if err != nil { panic(log.Critical(err)) } d.nextActionChan <- action } }
func Run(args []string) int { dockerOpt, removeOnExit, eqCfg, err := prepare(args) if err != nil { // do not panic here fmt.Fprintf(os.Stderr, "%s\n", err) return 1 } client, err := container.NewDockerClient() if err != nil { panic(log.Critical(err)) } containerExitStatusChan := make(chan error) c, err := container.Boot(client, dockerOpt, containerExitStatusChan) if err == docker.ErrNoSuchImage { log.Critical(err) // TODO: pull the image automatically log.Infof("You need to run `docker pull %s`", dockerOpt.Config.Image) return 1 } else if err != nil { panic(log.Critical(err)) } if removeOnExit { defer container.Remove(client, c) } err = core.StartEarthquakeRoutines(c, eqCfg) if err != nil { panic(log.Critical(err)) } err = <-containerExitStatusChan if err != nil { // do not panic here log.Error(err) } log.Debugf("Exiting..") // TODO: propagate err return 0 }
// dequeue event, determine corresponding action, and put the action to nextActionChan func (r *Random) dequeueEventRoutine() { for { qItem := <-r.queueDeqCh event := qItem.Value().(signal.Event) action, err := r.makeActionForEvent(event) log.Debugf("RANDOM: Determined action %#v for event %#v", action, event) if err != nil { panic(log.Critical(err)) } r.nextActionChan <- action } }
func Init(host string, name string) *DB { var err error db := new(DB) db.session, err = mgo.Dial(host) if err != nil { log.Critical(err) os.Exit(1) } db.name = name db.initIndexes() return db }
func (p *Broadcast) start() { //p.terminal = newTerminal() go func(p *Broadcast) { defer func() { if e := recover(); e != nil { log.Critical(e) } log.Info("Broadcast " + p.path + " stopped") }() log.Info("Broadcast " + p.path + " started") for { select { case amsg := <-p.producer.audiochan: for _, s := range p.consumers { err := s.SendAudio(amsg.Clone()) if err != nil { notifyError(s, err) } } case vmsg := <-p.producer.videochan: for _, s := range p.consumers { err := s.SendVideo(vmsg.Clone()) if err != nil { notifyError(s, err) } } case obj := <-p.control: if c, ok := obj.(*RtmpNetStream); ok { if c.closed { delete(p.consumers, c.conn.remoteAddr) log.Debugf("Broadcast %v consumers %v", p.path, len(p.consumers)) } else { p.consumers[c.conn.remoteAddr] = c log.Debugf("Broadcast %v consumers %v", p.path, len(p.consumers)) } } else if v, ok := obj.(string); ok && "stop" == v { for k, ss := range p.consumers { delete(p.consumers, k) ss.Close() } return } case <-time.After(time.Second * 90): log.Warn("Broadcast " + p.path + " Video | Audio Buffer Empty,Timeout 30s") p.stop() p.producer.Close() return } } }(p) }
func main() { flag.Parse() logger, err := log.LoggerFromConfigAsFile("seelog.xml") if err != nil { log.Critical("err parsing config file", err) return } log.ReplaceLogger(logger) count = readFile(*bidResFile) http.HandleFunc(*path, DspServe) http.ListenAndServe(":"+*port, nil) }
func sender(data io.Writer, send chan Encoder, response chan Packet) { for p := range send { gotResponse := make(chan struct{}) go waitForResponse(gotResponse, response) _, err := data.Write(p.Encode()) //Dont send next until we have a response from the last one <-gotResponse if err != nil { log.Critical(err) } } }
func checkErr(i int, err error) { if err != nil { switch i { case 1: log.Critical(err) case 2: log.Warn(err) default: log.Info(err) } } log.Flush() }
func main() { kingpin.CommandLine.Help = "Docker container EC2 metadata service." kingpin.Parse() defer log.Flush() configureLogging(*verboseOpt) auth, err := aws.GetAuth("", "", "", time.Time{}) if err != nil { panic(err) } containerService := NewContainerService(dockerClient(), *defaultRole, auth) // Proxy non-credentials requests to primary metadata service http.HandleFunc("/", logHandler(func(w http.ResponseWriter, r *http.Request) { match := credsRegex.FindStringSubmatch(r.URL.Path) if match != nil { handleCredentials(match[1], match[2], containerService, w, r) return } proxyReq, err := http.NewRequest(r.Method, fmt.Sprintf("%s%s", baseUrl, r.URL.Path), r.Body) if err != nil { log.Error("Error creating proxy http request: ", err) http.Error(w, "An unexpected error occurred communicating with Amazon", http.StatusInternalServerError) return } copyHeaders(proxyReq.Header, r.Header) resp, err := instanceServiceClient.RoundTrip(proxyReq) if err != nil { log.Error("Error forwarding request to EC2 metadata service: ", err) http.Error(w, "An unexpected error occurred communicating with Amazon", http.StatusInternalServerError) return } defer resp.Body.Close() copyHeaders(w.Header(), resp.Header) w.WriteHeader(resp.StatusCode) if _, err := io.Copy(w, resp.Body); err != nil { log.Warn("Error copying response content from EC2 metadata service: ", err) } })) log.Critical(http.ListenAndServe(*serverAddr, nil)) }
func Serial(send chan Encoder, recv chan Packet) { c := &serial.Config{Name: "/dev/ttyUSB0", Baud: 57600} s, err := serial.OpenPort(c) if err != nil { log.Critical(err) } response := make(chan Packet, 100) go readPackets(s, func(data []byte) { reciever(data, recv, response) }) go sender(s, send, response) }
func (ms *Mouse) CreatePointer(fb *fbdev.Framebuffer, imp chan int64) *MousePointer { f, err := os.Open("data/cursor/arrow.png") if err != nil { log.Critical(err) return nil } defer f.Close() img, _, err := image.Decode(bufio.NewReader(f)) if err != nil { log.Critical(err) return nil } mp := &MousePointer{ fb: fb, } w := img.Bounds().Max.X h := img.Bounds().Max.Y mp.Element = base.Element{ X: int(fb.Vinfo.Xres / 2), Y: int(fb.Vinfo.Yres / 2), Width: w, Height: h, Buffer: make([]byte, w*h*4), InvMsgPipe: imp, } ms.RegisterMousePointer(mp.mouse) gfx.DrawSrc(&mp.Element, img, 0, 0, w, h) return mp }
func (r *Random) QueueNextEvent(event signal.Event) { minInterval := r.MinInterval maxInterval := r.MaxInterval _, prioritized := r.PrioritizedEntities[event.EntityID()] if prioritized { // FIXME: magic coefficient for prioritizing (decrease intervals) minInterval = time.Duration(float64(minInterval) * 0.8) maxInterval = time.Duration(float64(maxInterval) * 0.8) } item, err := queue.NewBasicTBQueueItem(event, minInterval, maxInterval) if err != nil { panic(log.Critical(err)) } r.queue.Enqueue(item) }