func (g *game) captureSignals() { g.sigs = make(chan os.Signal) g.pauseLoop = make(chan struct{}) signal.Notify(g.sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGTSTP) go func() { for { s := <-g.sigs g.Lock() if !g.pausedLoop { g.pauseLoop <- struct{}{} } g.cleanup() if s.String() != syscall.SIGTSTP.String() { os.Stdout.WriteString("\n") os.Exit(0) } g.pauseInput <- struct{}{} signal.Reset(syscall.SIGTSTP) syscall.Kill(os.Getpid(), syscall.SIGTSTP) signal.Notify(g.sigs, syscall.SIGINT, syscall.SIGTERM, syscall.SIGTSTP) g.setTTY() g.printGround() g.printAllSnakes() g.printAllFood() g.moveTo(position{g.h - 1, g.w - 1}) g.Unlock() g.pauseInput <- struct{}{} } }() }
// Serve listens for and handles API calls. It a blocking function. func (srv *Server) Serve() error { // stop the server if a kill signal is caught sigChan := make(chan os.Signal) signal.Notify(sigChan, os.Interrupt, os.Kill) defer signal.Reset(os.Interrupt, os.Kill) go func() { <-sigChan fmt.Println("\rCaught stop signal, quitting...") srv.listener.Close() }() // The server will run until an error is encountered or the listener is // closed, via either the Close method or the signal handling above. // Closing the listener will result in the benign error handled below. err := srv.apiServer.Serve(srv.listener) if err != nil && !strings.HasSuffix(err.Error(), "use of closed network connection") { return err } // safely close each module if srv.cs != nil { srv.cs.Close() } if srv.gateway != nil { srv.gateway.Close() } if srv.wallet != nil { srv.wallet.Lock() } return nil }
// Folder does everything func Folder(dest string, interv int) { sigs := make(chan os.Signal, 2) signal.Notify(sigs, os.Interrupt, os.Kill) tick := time.Tick(time.Duration(interv) * time.Second) callScrot(dest, time.Now()) pic1, _ := ioutil.ReadDir(dest) var size1 float64 for _, f := range pic1 { if n := f.Size(); n > 4096 { size1 = float64(n) / float64(interv) break } } estimate(size1) var numpic int64 numpic = int64(len(pic1)) fmt.Printf("picture #%010d taken\n", numpic) numpic++ scrotloop: for { select { case <-sigs: break scrotloop case t := <-tick: callScrot(dest, t) fmt.Printf("picture #%010d taken\n", numpic) numpic++ } } signal.Reset(os.Interrupt, os.Kill) fmt.Println("") }
func (t *tether) stopReaper() { defer trace.End(trace.Begin("Shutting down child reaping")) // stop child reaping log.Info("Shutting down reaper") signal.Reset(syscall.SIGCHLD) close(t.incoming) }
func Notify(path, s string) (int, error) { notifyLock.Lock() signal.Ignore(syscall.SIGPIPE) defer func() { signal.Reset(syscall.SIGPIPE) notifyLock.Unlock() }() return notifyNosig(path, s) }
func (this *CommandServer) waitForSignal() { for { select { case <-this.signalChan: signal.Reset(syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) this.Stop() return } } }
func TestForeground(t *testing.T) { signal.Ignore(syscall.SIGTTIN, syscall.SIGTTOU) tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0) if err != nil { t.Skipf("Can't test Foreground. Couldn't open /dev/tty: %s", err) } fpgrp := 0 errno := syscall.Ioctl(tty.Fd(), syscall.TIOCGPGRP, uintptr(unsafe.Pointer(&fpgrp))) if errno != 0 { t.Fatalf("TIOCGPGRP failed with error code: %s", errno) } if fpgrp == 0 { t.Fatalf("Foreground process group is zero") } ppid, ppgrp := parent() cmd := create(t) cmd.proc.SysProcAttr = &syscall.SysProcAttr{ Ctty: int(tty.Fd()), Foreground: true, } cmd.Start() cpid, cpgrp := cmd.Info() if cpid == ppid { t.Fatalf("Parent and child have the same process ID") } if cpgrp == ppgrp { t.Fatalf("Parent and child are in the same process group") } if cpid != cpgrp { t.Fatalf("Child's process group is not the child's process ID") } cmd.Stop() errno = syscall.Ioctl(tty.Fd(), syscall.TIOCSPGRP, uintptr(unsafe.Pointer(&fpgrp))) if errno != 0 { t.Fatalf("TIOCSPGRP failed with error code: %s", errno) } signal.Reset() }
// Gives control of chan to caller func (mr *ModRunner) runOnChan(modchan chan *moddwatch.Mod, readyCallback func()) error { dworld, err := NewDaemonWorld(mr.Config, mr.Log) if err != nil { return err } defer dworld.Shutdown(os.Kill) c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, os.Kill) defer signal.Reset(os.Interrupt, os.Kill) go func() { dworld.Shutdown(<-c) os.Exit(0) }() watchpaths := mr.Config.WatchPatterns() if mr.ConfReload { watchpaths = append(watchpaths, filepath.Dir(mr.ConfPath)) } // FIXME: This takes a long time. We could start it in parallel with the // first process run in a goroutine watcher, err := moddwatch.Watch(watchpaths, lullTime, modchan) if err != nil { return fmt.Errorf("Error watching: %s", err) } defer watcher.Stop() mr.trigger(nil, dworld) go readyCallback() for mod := range modchan { if mod == nil { break } if mr.ConfReload && mod.Has(mr.ConfPath) { mr.Log.Notice("Reloading config %s", mr.ConfPath) err := mr.ReadConfig() if err != nil { mr.Log.Warn("%s", err) continue } else { return nil } } mr.Log.SayAs("debug", "Delta: \n%s", mod.String()) mr.trigger(mod, dworld) } return nil }
/* Read non-empty password from terminal */ func GetPass(prompt string) (pass string, err error) { var resp []byte fd, err := getTerminalFd() if err != nil { return } /* Store current terminal state in case the call gets interrupted by signal */ oldState, err := terminal.GetState(fd) if err != nil { err = errors.Errorf("failed to get terminal state: %s\n", err) return } /* * Install signal handler * Unlike the ReadLine function, using a raw terminal state here does not help. * If the prompt gets killed by a signal, the terminal state is not restored. * Hence restore it in a signal handler */ c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGINT) go func() { <-c terminal.Restore(fd, oldState) fmt.Fprintln(os.Stderr, "aborting") os.Exit(1) }() for i := 0; len(resp) == 0; i++ { if i > 0 { fmt.Printf("\rInvalid response - try again") time.Sleep(500 * time.Millisecond) } /* Clear line - see https://en.wikipedia.org/wiki/ANSI_escape_code */ fmt.Printf("\r\033[2K%s: ", prompt) /* This function internally takes care of restoring terminal state. */ resp, err = terminal.ReadPassword(fd) if err != nil { return } resp = bytes.TrimSpace(resp) } /* Restore signal handling */ signal.Stop(c) signal.Reset(syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGINT) return string(resp), nil }
func (t *tether) stopReaper() { defer trace.End(trace.Begin("Shutting down child reaping")) // Ordering is important otherwise we may one goroutine closing, and the other goroutine is trying to write afterwards log.Debugf("Removing the signal notifier") signal.Reset(syscall.SIGCHLD) // just closing the incoming channel is not going to stop the iteration // so we use the context cancellation to signal it t.cancel() log.Debugf("Closing the reapers signal channel") close(t.incoming) }
func deregisterOnShutdown(client Client, app env.App, ctx context.Context, cancel context.CancelFunc, exit func(int)) { sigChan := make(chan os.Signal, 1) signal.Reset(syscall.SIGINT, syscall.SIGHUP, syscall.SIGTERM) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGHUP, syscall.SIGTERM) go func() { select { case <-sigChan: cancel() client.Deregister(app) case <-ctx.Done(): client.Deregister(app) } exit(1) }() }
func reloadConfig(config *Config) *Config { signalLock.Lock() defer signalLock.Unlock() log.Printf("Reloading configuration.\n") newConfig, err := loadConfig() if err != nil { log.Printf("Could not reload config: %v\n", err) return nil } // stop advertising the existing services so that we can // make sure we update them if ports, etc. change. stopPolling(config) forAllServices(config, func(service *ServiceConfig) { log.Printf("Deregistering service: %s\n", service.Name) service.Deregister() }) signal.Reset() handleSignals(newConfig) handlePolling(newConfig) return newConfig // return for debuggability }
func CatchSignal(sig os.Signal, f func()) func() { signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, sig) go func() { for { select { case _, ok := <-signalChan: if ok { f() os.Exit(1) } else { return } } } }() return func() { signal.Reset(sig) signal.Stop(signalChan) } }
func Main(start func(args []string, errorSink ErrorSink) Daemon) { runtime.GOMAXPROCS(runtime.NumCPU()) sigs := make(chan os.Signal, 2) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) errorSink := NewErrorSink() d := start(os.Args, errorSink) exitCode := 0 var exitSignal os.Signal select { case err := <-errorSink: if err != flag.ErrHelp { fmt.Fprintln(os.Stderr, err) exitCode = 1 } case exitSignal = <-sigs: } if d != nil { d.Stop() } if sig, ok := exitSignal.(syscall.Signal); ok { // Now we have cleaned up, re-kill the process with // the signal in order to produce a signal exit // status: signal.Reset(sig) syscall.Kill(syscall.Getpid(), sig) } else if exitSignal != nil { fmt.Fprintln(os.Stderr, "Exiting with signal ", sig) } os.Exit(exitCode) }
// ResetSIGIO stops catching SIGIO signals. //export ResetSIGIO func ResetSIGIO() { signal.Reset(syscall.SIGIO) }
func main() { fmt.Println("MNK Agent v2") if gomoku { m = 19 n = 19 k = 5 } var err error board, err = NewMNKBoard(m, n, k) if err != nil { fmt.Println(err) os.Exit(1) } rand.Seed(time.Now().UTC().UnixNano()) readKnowledgeOK := rlKnowledge.loadFromFile(rlModelFile) if rlModelStatusMode { if !readKnowledgeOK { return } fmt.Println("Reinforcement learning model report") fmt.Printf("Iterations: %d\n", rlKnowledge.Iterations) fmt.Printf("Learned states: %d\n", len(rlKnowledge.Values)) var max float64 = 0 var min float64 = 0 for _, v := range rlKnowledge.Values { if v > max { max = v } else if v < min { min = v } } fmt.Printf("Maximum value: %f\n", max) fmt.Printf("Minimum value: %f\n", min) return } if rlTrainingMode > 0 { // Register SIGINT handler sigint = make(chan os.Signal, 1) signal.Notify(sigint, os.Interrupt) go func(c <-chan os.Signal) { <-c flags["terminate"] = true signal.Reset(os.Interrupt) }(sigint) defer close(sigint) // Start training loop log := train(rlTrainingMode) printStats(log, true) return } fmt.Printf("? > How many rounds shall we play? ") _, err = fmt.Scanln(&rounds) if err != nil { fmt.Println("\n[error] Shit happened!") panic(err) } fmt.Println("Great! Have fun.") log := play(rounds) printStats(log, false) }
func main() { InitMagick() /* here we ensure that go's signal handlers don't interfere. We have to shut down graphicsmagick correctly or crash */ signal_chan := make(chan os.Signal, 1) // Blow away go's handlers signal.Reset(syscall.SIGTERM, syscall.SIGINT) signal.Notify(signal_chan, syscall.SIGTERM, syscall.SIGINT) go func() { <-signal_chan // clean up graphicsmagick's memory / event loops CloseMagick() os.Exit(1) }() r := mux.NewRouter() readSettings() var factory *ImageFactory if Conf.S3 { factory = NewS3ImageFactory(Conf.BucketName) } else { factory = NewDiskImageFactory() } imageCollections, err := ParseImageCollections(Conf.CollectionsPath) if err != nil { log.Fatal(err) return } log.Print("Found collections: ") for k, _ := range imageCollections { log.Print(k) } handler := Handler{ Confs: Conf, imageCollections: imageCollections, ImageFactory: factory, } capHandler := CapHandler{ Handler: handler, dimension: C.CAP, } r.Handle("/img/{collection}/cap{dimension}/{name}", capHandler) r.Handle("/img/{collection}/cap{dimension}/blur{blur}/{name}", capHandler) r.Handle("/img/{collection}/cap/{dimension}/{name}", capHandler) r.Handle("/img/{collection}/cap/{dimension}/blur{blur}/{name}", capHandler) widthHandler := CapHandler{ Handler: handler, dimension: C.WIDTH, } r.Handle("/img/{collection}/width{dimension}/{name}", widthHandler) r.Handle("/img/{collection}/width{dimension}/blur{blur}/{name}", widthHandler) r.Handle("/img/{collection}/width/{dimension}/{name}", widthHandler) r.Handle("/img/{collection}/width/{dimension}/blur{blur}/{name}", widthHandler) heightHandler := CapHandler{ Handler: handler, dimension: C.HEIGHT, } r.Handle("/img/{collection}/height{dimension}/{name}", heightHandler) r.Handle("/img/{collection}/height{dimension}/blur{blur}/{name}", heightHandler) r.Handle("/img/{collection}/height/{dimension}/{name}", heightHandler) r.Handle("/img/{collection}/height/{dimension}/blur{blur}/{name}", heightHandler) resizeHandler := ResizeHandler{ Handler: handler, } r.Handle("/img/{collection}/{width}x{height}/{name}", resizeHandler) r.Handle("/img/{collection}/{width}x{height}/blur{blur}/{name}", resizeHandler) originalHandler := OriginalHandler{ Handler: handler, } r.Handle("/img/{collection}/original/{name}", originalHandler) http.Handle("/", r) log.Print("Starting imageservice") if port, err := strconv.Atoi(Conf.Port); err == nil { adminzEndpoints = adminz.New() adminzEndpoints.KillfilePaths(adminz.Killfiles(port)) adminzEndpoints.Start() log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", Conf.Port), nil)) } else { log.Fatalf("Unable to parse port %s %s", Conf.Port, err) } }
func resetSignalSIGWINCH() { signal.Reset(syscall.SIGWINCH) }
// Gives control of chan to caller func runOnChan(modchan chan *watch.Mod, readyCallback func(), log termlog.TermLog, cnf *conf.Config, watchconf string, notifiers []notify.Notifier) (*conf.Config, error) { err := PrepOnly(log, cnf, notifiers) if err != nil { return nil, err } dworld, err := NewDaemonWorld(cnf, log) if err != nil { return nil, err } c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, os.Kill) defer signal.Reset(os.Interrupt, os.Kill) defer dworld.Shutdown(os.Kill) go func() { dworld.Shutdown(<-c) os.Exit(0) }() dworld.Start() watchpaths := cnf.WatchPatterns() if watchconf != "" { watchpaths = append(watchpaths, filepath.Dir(watchconf)) } // FIXME: This takes a long time. We could start it in parallel with the // first process run in a goroutine watcher, err := watch.Watch(watchpaths, lullTime, modchan) if err != nil { return nil, fmt.Errorf("Error watching: %s", err) } defer watcher.Stop() go readyCallback() for mod := range modchan { if mod == nil { break } if watchconf != "" && mod.Has(watchconf) { ret, err := ioutil.ReadFile(watchconf) if err != nil { log.Warn("Reloading config - error reading %s: %s", watchconf, err) continue } newcnf, err := conf.Parse(watchconf, string(ret)) if err != nil { log.Warn("Reloading config - error reading %s: %s", watchconf, err) continue } log.Notice("Reloading config %s", watchconf) return newcnf, nil } log.SayAs("debug", "Delta: \n%s", mod.String()) for i, b := range cnf.Blocks { lmod, err := mod.Filter(b.Include, b.Exclude) if err != nil { log.Shout("Error filtering events: %s", err) continue } if lmod.Empty() { continue } err = RunPreps(b, cnf.GetVariables(), lmod, log, notifiers) if err != nil { if _, ok := err.(ProcError); ok { continue } else { return nil, err } } dworld.DaemonPens[i].Restart() } } return nil, nil }
func main() { app := cli.NewApp() app.Name = "nats-repl" app.Usage = "REPL for NATS" app.Flags = []cli.Flag{ cli.StringFlag{ Name: "server, s", EnvVar: "NATS_DEFAULT_URL", Usage: "NATS `URL` to connect to", }, } app.Action = func(ctx *cli.Context) error { nc, err := nats.Connect(ctx.String("server")) if err != nil { logError(fmt.Sprintf("%v", err)) os.Exit(1) } logInfo(fmt.Sprintf("connected to %s", ctx.String("server"))) rl, err := readline.NewEx(&readline.Config{ Prompt: colorize("1;37", "nats> "), HistoryFile: "/tmp/nats-repl-history.tmp", InterruptPrompt: "^C", EOFPrompt: "exit", }) if err != nil { panic(err) } defer rl.Close() repl: for { line, err := rl.Readline() switch err { case readline.ErrInterrupt: if len(line) == 0 { break repl } else { continue repl } case io.EOF: break repl } args := strings.Fields(line) switch { case len(args) == 0: continue repl case args[0] == "pub": var subject string if len(args) >= 2 { subject = args[1] } else { logError("subject is required") continue repl } var data string if len(args) >= 3 { data = strings.Join(args[2:], " ") } else { data = "" } nc.Publish(subject, []byte(data)) case args[0] == "sub": sigch := make(chan os.Signal, 1) signal.Notify(sigch, os.Interrupt) var subject string if len(args) >= 2 { subject = args[1] } else { logError("subject is required") continue repl } subch := make(chan *nats.Msg, 64) sub, _ := nc.ChanSubscribe(subject, subch) sub: for { select { case msg := <-subch: fmt.Printf("[%s] %s\n", msg.Subject, string(msg.Data)) case <-sigch: fmt.Println() break sub } } close(sigch) close(subch) signal.Reset(os.Interrupt) sub.Unsubscribe() case args[0] == "req": var subject string if len(args) >= 2 { subject = args[1] } else { logError("subject is required") continue repl } var data string if len(args) >= 3 { data = strings.Join(args[2:], " ") } else { data = "" } msg, err := nc.Request(subject, []byte(data), 5000*time.Millisecond) if err != nil { logError(fmt.Sprintf("%v", err)) break } fmt.Println(string(msg.Data)) case args[0] == "help": logInfo("COMMANDS:") logInfo("pub <subject> [data]") logInfo("sub <subject>") logInfo("req <subject> [data]") case args[0] == "exit": break repl default: logError(fmt.Sprintf("unknown command: %s", args[0])) } } return nil } app.Run(os.Args) }
func main() { flag.Parse() log.SetLevel(log.InfoLevel) router := mux.NewRouter().StrictSlash(true) renderer := render.New(render.Options{ Extensions: []string{".html"}, IsDevelopment: true, Directory: *TEMPLATE_PATH, Funcs: []template.FuncMap{ template.FuncMap{ "HasPrefix": strings.HasPrefix, "HumanizeDate": func(date time.Time) string { return date.Format("Mon Jan 2 15:04:05 -0700 MST 2006") }, "Url": func(name string, params ...string) (string, error) { route := router.Get(name) if route == nil { return "", fmt.Errorf("route named %s not found", name) } url, err := route.URL(params...) if err != nil { return "", err } return url.Path, nil }, }, }, }) vmtpl, err := text_template.ParseFiles(*VM_TEMPLATE) if err != nil { log.WithError(err).WithField("filename", *VM_TEMPLATE).Fatal("failed to parse machine template") } machines, err := dal.NewLibvirtMachinerep("qemu:///system", vmtpl) if err != nil { log.WithError(err).Fatal("failed to initialize libvirt-kvm machines") } imagerep := dal.NewLocalfsImagerep(*IMAGES_PATH) metadb, err := bolt.Open(*METADB_PATH, 0600, nil) if err != nil { log.WithError(err).Fatal("failed to open metadata db") } planrep := dal.NewBoltPlanrep(metadb) ippool := dal.NewBoltIPPool(metadb) ctx := &vmango.Context{ Render: renderer, Router: router, Machines: machines, Logger: log.New(), Meta: metadb, Images: imagerep, IPPool: ippool, Plans: planrep, } router.Handle("/", vmango.NewHandler(ctx, handlers.Index)).Name("index") router.Handle("/machines/", vmango.NewHandler(ctx, handlers.MachineList)).Name("machine-list") router.Handle("/machines/add/", vmango.NewHandler(ctx, handlers.MachineAddForm)).Name("machine-add") router.Handle("/machines/{name:[^/]+}/", vmango.NewHandler(ctx, handlers.MachineDetail)).Name("machine-detail") router.Handle("/images/", vmango.NewHandler(ctx, handlers.ImageList)).Name("image-list") router.Handle("/ipaddress/", vmango.NewHandler(ctx, handlers.IPList)).Name("ip-list") router.Handle("/plans/", vmango.NewHandler(ctx, handlers.PlanList)).Name("plan-list") router.HandleFunc("/static{name:.*}", handlers.MakeStaticHandler(*STATIC_PATH)).Name("static") n := negroni.New() n.Use(negronilogrus.NewMiddleware()) n.Use(negroni.NewRecovery()) n.UseHandler(router) metaserv := cloudmeta.New() s := make(chan os.Signal, 1) signal.Notify(s, os.Interrupt) signal.Notify(s, syscall.SIGTERM) go func() { sig := <-s fmt.Println(sig, "received") signal.Reset(os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) close(s) if err := metaserv.CleanupIPTables(*META_ADDR); err != nil { log.WithError(err).Warn("cannot remove metaserver iptables rules") os.Exit(1) } os.Exit(0) }() go func() { log.WithField("address", *META_ADDR).Info("starting cloud metadata server") log.Fatal(metaserv.ListenAndServe(*META_ADDR)) }() log.WithField("address", *LISTEN_ADDR).Info("starting server") log.Fatal(http.ListenAndServe(*LISTEN_ADDR, n)) }