func main() { // options var bind, backend, logTo string var buffer uint var daemon bool flag.StringVar(&bind, "bind", ":8002", "locate ip and port") flag.StringVar(&backend, "backend", "127.0.0.1:8003", "backend server ip and port") flag.StringVar(&logTo, "logTo", "stdout", "stdout or syslog") flag.UintVar(&buffer, "buffer", 4096, "buffer size") flag.BoolVar(&daemon, "daemon", false, "run as daemon process") flag.Parse() log.SetOutput(os.Stdout) if logTo == "syslog" { w, err := syslog.New(syslog.LOG_INFO, "portproxy") if err != nil { log.Fatal(err) } log.SetOutput(w) } if daemon == true { godaemon.MakeDaemon(&godaemon.DaemonAttr{}) } p := New(bind, backend, uint32(buffer)) log.Println("portproxy started.") go p.Start() waitSignal() }
func Daemonize(logFilePath, pidFilePath string) { logFile, err := os.OpenFile(logFilePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) if err != nil { fmt.Fprintln(os.Stderr, "Could not open local log file: %v", err) os.Exit(1) } stdout, stderr, err := godaemon.MakeDaemon(&godaemon.DaemonAttr{CaptureOutput: true}) if err != nil { fmt.Fprintln(os.Stderr, "Could not Daemonize: %v", err) os.Exit(1) } pidFile, err := os.Create(pidFilePath) if err == nil { defer pidFile.Close() _, err = fmt.Fprintln(pidFile, os.Getpid()) } if err != nil { fmt.Fprintln(os.Stderr, "Could not write PID file: %v", err) os.Exit(1) } go func() { io.Copy(logFile, stdout) }() go func() { io.Copy(logFile, stderr) }() }
func main() { conf := parseConf() godaemon.MakeDaemon(&godaemon.DaemonAttr{}) initLog(fmt.Sprintf("%s/vstack.log", conf.LogDir)) go registerSignal() go pingLoop(conf.Dest) log.Printf("INFO: Starting process loop ...") start(conf) }
// Init() initializes a context from a configuration file into an // existing context struct func Init(path string) (ctx Context, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("Init() -> %v", e) } ctx.Channels.Log <- mig.Log{Desc: "leaving Init()"}.Debug() }() err = gcfg.ReadFileInto(&ctx, path) if err != nil { panic(err) } // daemonize unless logging is set to stdout if ctx.Logging.Mode != "stdout" { godaemon.MakeDaemon(&godaemon.DaemonAttr{}) } ctx, err = initChannels(ctx) if err != nil { panic(err) } ctx.Logging, err = mig.InitLogger(ctx.Logging) if err != nil { panic(err) } ctx, err = initDirectories(ctx) if err != nil { panic(err) } ctx, err = initDB(ctx) if err != nil { panic(err) } ctx, err = initBroker(ctx) if err != nil { panic(err) } return }
/* {{{ func (mux *Mux) Run() * Run ogo application. */ func (mux *Mux) Run() { defer func() { if err := recover(); err != nil { WriteMsg("App crashed with error:", err) for i := 1; ; i++ { _, file, line, ok := runtime.Caller(i) if !ok { break } WriteMsg(file, line) } //panic要输出到console fmt.Println("App crashed with error:", err) } }() if env.Daemonize { godaemon.MakeDaemon(&godaemon.DaemonAttr{}) } //check&write pidfile, added by odin dir := filepath.Dir(env.PidFile) if _, err := os.Stat(dir); err != nil { if os.IsNotExist(err) { //mkdir if err := os.Mkdir(dir, 0755); err != nil { panic(err) } } } if l, err := lockfile.New(env.PidFile); err == nil { if le := l.TryLock(); le != nil { panic(le) } } else { panic(err) } Warn("Starting Ogo(http mode) on: %s", env.Port) // in goji appengine mode (tags --appengine) goji.Serve() }
func main() { HTTPClient = http.Client{} kingpin.Parse() log.Print("Starting hostsplitter") if *logFileLoc != "stdout" { logFile, err := os.OpenFile(*logFileLoc, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0640) defer logFile.Close() if err != nil { log.Fatalf("error opening file: %v", err) } log.Print("Using ", *logFileLoc, " for logging") log.SetOutput(logFile) } if *daemonize { log.Print("Daemonizing... Bye Bye") godaemon.MakeDaemon(&godaemon.DaemonAttr{}) } LoadConfig() go SignalHandler() log.Fatal(http.ListenAndServe(*bindAddr, &httputil.ReverseProxy{ Director: func(r *http.Request) { HTTPLogger(r) if i, ok := routedHostnames[string(r.Host)]; ok { r.Header.Set("Hostsplitter-Secret", Sites[i].Secret) r.Header.Set("Host", r.Host) r.URL.Scheme = "http" r.URL.Host = Sites[i].GetBackend() r.RequestURI = "" } else { log.Print("%q is not routed", r.Host) } }, })) }
func Daemonize(logFilePath, pidFilePath string) { if os.Getenv("__DAEMON_CWD") == "" { cwd, err := os.Getwd() if err != nil { fmt.Fprintln(os.Stderr, "Cannot determine working directory: %v", err) os.Exit(1) } os.Setenv("__DAEMON_CWD", cwd) } logFile, err := os.OpenFile(logFilePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) if err != nil { fmt.Fprintln(os.Stderr, "Could not open local log file: %v", err) os.Exit(1) } stdout, stderr, err := godaemon.MakeDaemon(&godaemon.DaemonAttr{CaptureOutput: true}) if err != nil { fmt.Fprintln(os.Stderr, "Could not Daemonize: %v", err) os.Exit(1) } go func() { io.Copy(logFile, stdout) }() go func() { io.Copy(logFile, stderr) }() lock, err := lockfile.New(pidFilePath) err = lock.TryLock() if err != nil { fmt.Println("Cannot lock \"%v\": %v", lock, err) os.Exit(1) } }
func main() { godaemon.MakeDaemon(&godaemon.DaemonAttr{}) os.Exit(0) }
func makeDaemon() { godaemon.MakeDaemon(&godaemon.DaemonAttr{}) }
func main() { flag.Usage = func() { fmt.Fprintf(os.Stderr, usage, os.Args[0]) os.Exit(2) } flag.Parse() if *printVersion { fmt.Println(version) os.Exit(0) } appLogger, err := setLogger(os.Getenv("AMIQUERY_APP_LOGFILE")) if err != nil { log.Fatalln("Unable to set application logging output:", err) } log.SetOutput(appLogger) httpLogger, err := setLogger(os.Getenv("AMIQUERY_HTTP_LOGFILE")) if err != nil { log.Fatalln("Unable to set HTTP logging output:", err) } cfg, err := NewConfig() if err != nil { log.Fatal(err) } log.Println("Loaded configuration") cacheMgr, err := amicache.NewManager(cfg.Manager, amicache.TTL(cfg.CacheTTL), amicache.Regions(cfg.Regions...), amicache.OwnerIDs(cfg.OwnerIDs...), amicache.AssumeRole(cfg.RoleARN), amicache.HTTPClient(&http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, }), ) if err != nil { log.Fatal(err) } if *daemonize { log.Println("Daemonizing process...") godaemon.MakeDaemon(&godaemon.DaemonAttr{}) } cacheMgr.Start() defer cacheMgr.Stop() // Create a context and add the cache manager ctx := context.Background() ctx = context.WithValue(ctx, api.CacheManagerKey, cacheMgr) // Version 1 of the REST API v1 := &api.ContextAdapter{ Context: ctx, Handler: api.ContextHandlerFunc(v1.Handler), } router := mux.NewRouter() // Add the version 1 handler and set it as the default (Accept: */*) router.Handle("/amis", handlers.CombinedLoggingHandler(httpLogger, v1)). HeadersRegexp("Accept", `(application/vnd\.ami-query-v1\+json|\*/\*)`). Methods("GET") http.Handle("/", router) if err := http.ListenAndServe(cfg.ListenAddr, nil); err != nil { log.Fatal(err) } }
func MakeDaemon() (io.Reader, io.Reader, error) { return godaemon.MakeDaemon(&godaemon.DaemonAttr{}) }
/* {{{ func (mux *Mux) Run() * Run ogo application. */ func (mux *Mux) Run() { defer func() { if err := recover(); err != nil { WriteMsg("App crashed with error:", err) for i := 1; ; i++ { _, file, line, ok := runtime.Caller(i) if !ok { break } WriteMsg(file, line) } //panic要输出到console fmt.Println("App crashed with error:", err) } }() if env.Daemonize { godaemon.MakeDaemon(&godaemon.DaemonAttr{}) } //check&write pidfile, added by odin dir := filepath.Dir(env.PidFile) if _, err := os.Stat(dir); err != nil { if os.IsNotExist(err) { //mkdir if err := os.Mkdir(dir, 0755); err != nil { panic(err) } } } if l, err := lockfile.New(env.PidFile); err == nil { if le := l.TryLock(); le != nil { panic(le) } } else { panic(err) } var mainErr error Debug("will run worker: %v", env.Worker) if worker, ok := DMux.Workers[env.Worker]; ok { vw := reflect.New(worker.WorkerType) execWorker, ok := vw.Interface().(WorkerInterface) if !ok { panic("worker is not WorkerInterface") } //Init execWorker.Init(DMux, env.Worker) //Main mainErr = execWorker.Main() } else { mainErr = fmt.Errorf("not found worker: %s", env.Worker) } if mainErr != nil { panic(mainErr) } //睡一段时间再结束 time.Sleep(1000 * time.Microsecond) }
// Init prepare the AMQP connections to the broker and launches the // goroutines that will process commands received by the MIG Scheduler func Init(foreground bool) (ctx Context, err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("initAgent() -> %v", e) } ctx.Channels.Log <- mig.Log{Desc: "leaving initAgent()"}.Debug() }() // find out current working dir and build the bin path // it's important to do that before we daemonize, other the cwd will be / cdir, err := os.Getwd() if err != nil { panic(err) } ctx.Agent.BinPath = cdir + "/" + os.Args[0] // daemonize, and force logging to stdout if !foreground && LOGGINGCONF.Mode != "stdout" { LOGGINGCONF.Mode = "stdout" godaemon.MakeDaemon(&godaemon.DaemonAttr{}) } // store heartbeat frequency ctx.Sleeper = HEARTBEATFREQ if err != nil { panic(err) } // create the go channels ctx, err = initChannels(ctx) if err != nil { panic(err) } // initiate logging configuration ctx.Logging, err = mig.InitLogger(LOGGINGCONF) if err != nil { panic(err) } // Goroutine that handles events, such as logs and panics, // and decides what to do with them go func() { for event := range ctx.Channels.Log { stop, err := mig.ProcessLog(ctx.Logging, event) if err != nil { fmt.Println("Unable to process logs") } // if ProcessLog says we should stop now, feed the Terminate chan if stop { ctx.Channels.Terminate <- fmt.Errorf(event.Desc) } } }() // retrieve information on agent environment ctx, err = initAgentEnv(ctx) if err != nil { panic(err) } // connect to the message broker ctx, err = initMQ(ctx) if err != nil { panic(err) } return }
func main() { var logfile *os.File var err error // check root if os.Geteuid() != 0 { fmt.Println("please run as root!") os.Exit(1) } daemonize := true if godaemon.Stage() == godaemon.StageParent { parseArgs(&daemonize) if daemonize { logfile, err = os.OpenFile(LOG_FILE, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { fmt.Printf("[ERROR] error opening log file: %v", err) os.Exit(1) } err = syscall.Flock(int(logfile.Fd()), syscall.LOCK_EX) if err != nil { fmt.Printf("[ERROR] error acquiring lock to log file: %v", err) os.Exit(1) } } } if daemonize { _, _, err = godaemon.MakeDaemon(&godaemon.DaemonAttr{Files: []**os.File{&logfile}}) if err != nil { fmt.Printf("[ERROR] error daemonizing: %v", err) os.Exit(1) } defer logfile.Close() log.SetOutput(logfile) parseArgs(&daemonize) } log.SetFlags(log.LstdFlags) log.Println("#################### BEGIN OF LOG ##########################") // getting machine name id, err := os.Hostname() if err != nil { log.Fatalln("[ERROR] unable to get system id: ", err) } machine_name = strings.TrimSpace(string(id)) log.Println("[INFO] machine name: ", machine_name) // influxdb clients args := flag.Args() clients = make([]*influxdb.Client, len(args)) index := 0 for _, arg := range args { fields := strings.Split(arg, ":") if len(fields) < 2 || len(fields) == 3 || len(fields) > 4 { log.Printf("[WARN] unable to parse %s!\n", arg) continue } host, err := url.Parse(fmt.Sprintf("http://%s:%s", fields[0], fields[1])) if err != nil { log.Printf("[WARN] unable to parse %s!\n", arg) continue } var conf influxdb.Config if len(fields) == 4 { conf = influxdb.Config{ URL: *host, Username: fields[2], Password: fields[3], } } else { conf = influxdb.Config{URL: *host} } client, err := influxdb.NewClient(conf) if err != nil { log.Printf("[WARN] unable to create influxdb client for %s!\n", arg) continue } clients[index] = client index += 1 log.Println("[INFO] adding influxdb client: ", arg) } if index == 0 { log.Fatalln("[ERROR] no client is parsable, exiting!") } sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) log.Println("[INFO] adding signal handler for SIGTERM") docker, err := dockerclient.NewClientFromEnv() if err != nil { log.Fatalln("[ERROR] unable to create docker client, exiting!") } dokchan := make(chan *dockerclient.APIEvents, 100) waitchan := make(chan bool, 1) err = docker.AddEventListener(dokchan) if err != nil { log.Fatalln("[ERROR] unable to add docker event listener, exiting!") } go dockerListener(docker, dokchan, waitchan) // wait for stop signal and then cleanup _ = <-sigs log.Println("[INFO] beginning cleanup!") docker.RemoveEventListener(dokchan) dokchan <- dockerclient.EOFEvent <-waitchan }