func main() { // Parse flags from `args'. Note that here we use flags.ParseArgs for // the sake of making a working example. Normally, you would simply use // flags.Parse(&opts) which uses os.Args args, err := flags.Parse(&opts) if err != nil { os.Exit(0) return } if opts.Verbose { fmt.Printf("Verbosity: %v\n", opts.Verbose) fmt.Printf("Encode: %v\n", opts.Encode) fmt.Printf("Decode: %v\n", opts.Decode) fmt.Printf("Neighbor: %v\n", opts.Neighbor) fmt.Printf("Latitude: %v\n", opts.Latitude) fmt.Printf("Longitude: %v\n", opts.Longitude) fmt.Printf("Precision: %v\n", opts.Precision) fmt.Printf("GeoHash: '%v'\n", opts.GeoHash) fmt.Printf("Remaining args: [%s]\n", strings.Join(args, " ")) } // Shorthand to make the code below readable location := ggeohash.MakePreciseLocationLowerBound() location.Latitude = opts.Latitude location.Longitude = opts.Longitude precision := (opts.Precision) geo := opts.GeoHash if opts.Encode { output := ggeohash.Encode(location, precision) fmt.Printf("latitude = %2.10f, longitude = %3.10f, precision = %d, geohash = %s\n", location.Latitude, location.Longitude, precision, output) } if opts.Decode { output := ggeohash.Decode(geo) fmt.Printf("geohash = %s, latitude = %2.10f, longitude = %3.10f, latitude.err = %2.10f, longitude.err = %3.10f\n", geo, output.Location.Latitude, output.Location.Longitude, output.Error.Latitude, output.Error.Longitude) } if opts.Neighbor { directions := [2]ggeohash.CardialDirections{ggeohash.None, ggeohash.None} if opts.North { directions[0] = ggeohash.North } if opts.South { directions[0] = ggeohash.South } if opts.East { directions[1] = ggeohash.East } if opts.West { directions[1] = ggeohash.West } output := ggeohash.Neighbor(geo, directions) fmt.Printf("geohash = %s, directions[0] = %d, directions[1] = %d, neighbor = %s\n", geo, directions[0], directions[1], string(output)) } }
// initFlags parses the given flags. // when the user asks for help (-h or --help): the application exists with status 0 // when unexpected flags is given: the application exits with status 1 func initFlags() { args, err := goflags.Parse(&flags) if err != nil { // assert the err to be a flags.Error flagError := err.(*goflags.Error) if flagError.Type == goflags.ErrHelp { // user asked for help on flags. // program can exit successfully os.Exit(0) } if flagError.Type == goflags.ErrUnknownFlag { fmt.Println("Use --help to view all available options.") os.Exit(1) } fmt.Printf("Error parsing flags: %s\n", err) os.Exit(1) } // check for unexpected arguments // when an unexpected argument is given: the application exists with status 1 if len(args) > 0 { fmt.Printf("Unknown argument '%s'.\n", args[0]) os.Exit(1) } if flags.DisableAlphaAuth { log.Printf("DEPRECATED flag --disable-alpha-auth: alpha auth has been stripped from the source code, therefore --disable-alpha-auth is of no use anymore. It will be removed in future release.") } if flags.HTTPFiles != "./http-files/" { log.Printf("DEPRECATED flag --http-files: loading of the http files is now done with go.rice. This flag be removed in future release.") } }
//Setup reads static config from file and runtime options from commandline //It also preserves static config for later comparsion with runtime to prevent //rewriting it when no changes are made func (opts *Options) Setup() ([]string, *Config) { err := flag.IniParse("config.ini", opts) if err != nil { switch err.(type) { default: lFatal(err) case *os.PathError: lWarn("config.ini not found, using defaults") } } inisets := *opts.Config //copy value instead of reference - or we will get no results later args, err := flag.Parse(opts) checkFlagError(err) //Here we scream if something goes wrong and provide help if something goes meh. for _, arg := range os.Args { if strings.Contains(arg, "--score") { opts.ScoreF = true } if strings.Contains(arg, "--faves") { opts.FavesF = true } } return args, &inisets }
// initFlags parses the given flags. // when the user asks for help (-h or --help): the application exists with status 0 // when unexpected flags is given: the application exits with status 1 func initFlags() { args, err := goflags.Parse(&flags) if err != nil { // assert the err to be a flags.Error flagError := err.(*goflags.Error) if flagError.Type == goflags.ErrHelp { // user asked for help on flags. // program can exit successfully os.Exit(0) } if flagError.Type == goflags.ErrUnknownFlag { fmt.Println("Use --help to view all available options.") os.Exit(1) } fmt.Printf("Error parsing flags: %s\n", err) os.Exit(1) } // check for unexpected arguments // when an unexpected argument is given: the application exists with status 1 if len(args) > 0 { fmt.Printf("Unknown argument '%s'.\n", args[0]) os.Exit(1) } }
func main() { // Set log options. log.SetOutput(os.Stderr) log.SetLevel(log.WarnLevel) // Options. var opts struct { Verbose bool `short:"v" long:"verbose" description:"Verbose"` Version bool `long:"version" description:"Version"` BindAddr string `short:"b" long:"bind-addr" description:"Bind to address" default:"0.0.0.0"` Port int `short:"p" long:"port" description:"Port" default:"5050"` StaticDir string `short:"s" long:"static-dir" description:"Static content" default:"static"` TemplateDir string `short:"t" long:"template-dir" description:"Templates" default:"templates"` } // Parse options. if _, err := flags.Parse(&opts); err != nil { ferr := err.(*flags.Error) if ferr.Type == flags.ErrHelp { os.Exit(0) } else { log.Fatal(err.Error()) } } // Print version. if opts.Version { fmt.Printf("peekaboo %s\n", Version) os.Exit(0) } // Set verbose. if opts.Verbose { log.SetLevel(log.InfoLevel) } // Check root. if runtime.GOOS != "darwin" && os.Getuid() != 0 { log.Fatal("This application requires root privileges to run.") } info, err := hwinfo.Get() if err != nil { log.Fatal(err.Error()) } log.Infof("Using static dir: %s", opts.StaticDir) log.Infof("Using template dir: %s", opts.TemplateDir) m := macaron.Classic() m.Use(macaron.Static(opts.StaticDir)) m.Use(macaron.Renderer(macaron.RenderOptions{ Directory: opts.TemplateDir, IndentJSON: true, })) routes(m, info) m.Run(opts.BindAddr, opts.Port) }
func main() { flags.Parse(&opts) if opts.Spanish == true { fmt.Printf("Hola %s!\n", opts.Name) } else { fmt.Printf("Hello %s!\n", opts.Name) } }
func readOptions() *options { o := options{} _, err := flags.Parse(&o) if err != nil { panic(err) } return &o }
func readOptions() *options { o := options{} _, err := flags.Parse(&o) if err != nil { os.Exit(1) } return &o }
func main() { var opts Options _, err := flags.Parse(&opts) if err != nil { os.Exit(1) } if opts.Debug { log.SetLevel(log.DebugLevel) } if opts.LogFile != "" { logFp, err := os.OpenFile(opts.LogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600) checkError(fmt.Sprintf("error opening %s", opts.LogFile), err) defer logFp.Close() // ensure panic output goes to log file syscall.Dup2(int(logFp.Fd()), 1) syscall.Dup2(int(logFp.Fd()), 2) // log as JSON log.SetFormatter(&log.JSONFormatter{}) // send output to file log.SetOutput(logFp) } log.Debug("hi there! (tickertape tickertape)") log.Infof("version: %s", version) vaultClient, err := helpers.NewVaultClient(opts.VaultAddr, opts.VaultToken) checkError("creating Vault client", err) consulClient, err := consulapi.NewClient(consulapi.DefaultConfig()) checkError("creating Consul client", err) router := mux.NewRouter() registrar := instance.NewRegistrar(vaultClient) v1 := v1.NewCentralBooking( registrar, consulClient.Catalog(), vaultClient.GetEndpoint(), ) v1.InstallHandlers(router.PathPrefix("/v1").Subrouter()) httpServer := &http.Server{ Addr: fmt.Sprintf(":%d", opts.HttpPort), Handler: Log(router), } checkError("launching HTTP server", httpServer.ListenAndServe()) }
func ParseArgs(settings interface{}) []string { remaining, err := flags.Parse(settings) if err != nil { switch err.(*flags.Error).Type { case flags.ErrHelp: os.Exit(0) default: log.Error(err.Error()) os.Exit(1) } } return remaining }
// initOptions initialize the command line options. func initOptions() { _, err := flags.Parse(&options) utils.ExitOnError(err, "error parsing options") // TODO: Add support for env variables if options.Version { printHeader() os.Exit(0) } // Remove all flags to make sure that Goji doesn't read them os.Args = os.Args[:1] }
// Parse returns a configuration from either a configuration file or flags. func Parse() (*Config, error) { opts := &opts{} if _, err := goflags.Parse(opts); err != nil { return nil, err } if opts.Quiet { log.SetLevel(-1) } else { log.SetLevel(len(opts.Verbose)) } if opts.ConfigPath == "" { return parseArgs(opts) } return parseFile(opts.ConfigPath) }
func main() { flags.Parse(&opts) opts.DataPath, _ = filepath.Abs(opts.DataPath) fmt.Println("DATA_PATH:", opts.DataPath) fmt.Println("PORT:", opts.Port) for i, hookUrl := range opts.WebHookUrls { if len(hookUrl) >= 4 && hookUrl[:4] != "http" { opts.WebHookUrls[i] = "http://" + hookUrl } } fmt.Printf("HOOKS: %+v\n", opts.WebHookUrls) http.HandleFunc("/", handler) http.ListenAndServe(":"+strconv.Itoa(opts.Port), nil) }
func main() { // Set log options. log.SetOutput(os.Stderr) log.SetLevel(log.WarnLevel) // Options. var opts struct { Verbose bool `short:"v" long:"verbose" description:"Verbose"` Version bool `long:"version" description:"Version"` File *string `short:"f" long:"file" description:"Input file, data serialization format used is based on the file extension"` } // Parse options. if _, err := flags.Parse(&opts); err != nil { ferr := err.(*flags.Error) if ferr.Type == flags.ErrHelp { os.Exit(0) } else { log.Fatal(err.Error()) } } // Print version. if opts.Version { fmt.Printf("tf %s\n", Version) os.Exit(0) } // Set verbose. if opts.Verbose { log.SetLevel(log.InfoLevel) } // Get file is specified. if opts.File != nil { // Load file. d, err := input.LoadFile(*opts.File) if err != nil { log.Fatal(err.Error()) } // Print as YAML. s, err := yaml.Marshal(&d) fmt.Println(string(s)) } }
func main() { var opts Opts _, err := flags.Parse(&opts) if err != nil { os.Exit(1) } if logFp := common.InitLogging(opts.Debug, opts.LogFile); logFp != nil { defer logFp.Close() } log.Debug("hi there! (tickertape tickertape)") log.Infof("version: %s", version) cfgBytes, err := ioutil.ReadFile(opts.Config) common.CheckError("reading config file", err) var schedule []Schedule common.CheckError("parsing config file", yaml.Unmarshal(cfgBytes, &schedule)) log.Errorf("%+v", schedule) common.ConfigureWorkers(opts.RedisHost, opts.RedisPort, opts.RedisDb) c := cron.New() for _, e := range schedule { // some weird scoping going on here func(entry Schedule) { c.AddFunc(entry.Spec, func() { jid, err := workers.Enqueue(entry.Queue, entry.Class, entry.Args) common.CheckError("scheduling something", err) log.WithField("something", entry.Spec).Infof("submitted job %s", jid) }) }(e) } c.Start() select {} }
// Parses the command-line arguments, and validates them. func parseArgs() *Options { opts := &Options{} _, err := goflags.Parse(opts) if err != nil { os.Exit(1) } if opts.RedisDb < 0 || opts.RedisDb > 15 { log.Fatal("redis db out of range") } opts.verbosity = 0 if opts.Verbose { opts.verbosity += 1 } return opts }
// initFlags parses the given flags. // when the user asks for help (-h or --help): the application exists with status 0 // when unexpected flags is given: the application exits with status 1 func initFlags() { args, err := goflags.Parse(&flags) if err != nil { // assert the err to be a flags.Error flagError := err.(*goflags.Error) if flagError.Type == goflags.ErrHelp { fmt.Println("npserver-daemon wraps npserver with daemon functionality.") // user asked for help on flags. // program can exit successfully os.Exit(0) } if flagError.Type == goflags.ErrUnknownFlag { fmt.Println("Use --help to view all available options.") os.Exit(1) } fmt.Printf("Error parsing flags: %s\n", err) os.Exit(1) } extraArgs = args }
// Parses the command-line arguments, and validates them. func parseArgs() *Options { opts := &Options{} _, err := goflags.Parse(opts) if err != nil { os.Exit(1) } if opts.ProxyAddr == "" { opts.ProxyAddr = opts.ListenAddr } switch { case opts.Rate < 1: opts.Rate = 2 case opts.Rate > 10: opts.Rate = 10 } return opts }
func main() { var opts Options _, err := flags.Parse(&opts) if err != nil { os.Exit(1) } if logFp := common.InitLogging(opts.Debug, opts.LogFile); logFp != nil { defer logFp.Close() } log.Debug("hi there! (tickertape tickertape)") log.Infof("version: %s", version) common.ConfigureWorkers(opts.RedisHost, opts.RedisPort, opts.RedisDb) workers.Process(opts.Queue, shell.Shell, opts.Concurrency) workers.Run() }
func main() { var opts Options _, err := flags.Parse(&opts) if err != nil { os.Exit(1) } docker, err := dockerapi.NewClient(getopt("DOCKER_HOST", "unix:///var/run/docker.sock")) assert(err) hosts := NewHosts(docker, opts.File.Filename, opts.DomainName) // set up to handle events early, so we don't miss anything while doing the // initial population events := make(chan *dockerapi.APIEvents) assert(docker.AddEventListener(events)) containers, err := docker.ListContainers(dockerapi.ListContainersOptions{}) assert(err) for _, listing := range containers { go hosts.Add(listing.ID) } log.Println("docker-hosts: Listening for Docker events...") for msg := range events { switch msg.Status { case "start": go hosts.Add(msg.ID) case "die": go hosts.Remove(msg.ID) } } log.Fatal("docker-hosts: docker event loop closed") // todo: reconnect? }
func main() { flags.Parse(&opts) opts.DataPath, _ = filepath.Abs(opts.DataPath) fmt.Println("DATA_PATH:", opts.DataPath) fmt.Println("PORT:", opts.Port) for i, hookUrl := range opts.WebHookUrls { if len(hookUrl) >= 4 && hookUrl[:4] != "http" { opts.WebHookUrls[i] = "http://" + hookUrl } } fmt.Printf("HOOKS: %+v\n", opts.WebHookUrls) deviceMux := http.NewServeMux() deviceMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { p := r.URL.Path r.URL.Path = "/devices" + p handler(w, r) }) go http.ListenAndServe(":82", deviceMux) http.HandleFunc("/", handler) http.ListenAndServe(":"+strconv.Itoa(opts.Port), nil) }
func main() { var settings Settings _, err := flags.Parse(&settings) if err != nil { switch err.(*flags.Error).Type { case flags.ErrHelp: os.Exit(0) default: log.Error(err.Error()) os.Exit(1) } } broker_endpoint := "inproc://fancy-req" server_cert, err := czmq.NewCertFromFile(settings.ServerCertificatePath) if err != nil { log.Error(err.Error()) os.Exit(1) } agent, _ := zap.NewZapAgent() // TODO(sissel): Gate access to prevent untrusted connections. go agent.Run(zap.NewRestrictedAccess()) defer agent.Destroy() b, err := mdp.NewBroker(broker_endpoint) if err != nil { log.Fatalf("NewBroker(%s) failed: %s", broker_endpoint, err) } b.CurveCertificate = server_cert b.Bind("inproc://fancy-dealer") port, err := b.Bind("tcp://*:*") log.WithFields(log.Fields{"address": fmt.Sprintf("tcp://*:%d", port)}).Info("Broker available") go RunHTTP(":8111", "/zws/1.0") b.Run() }
func main() { println("Starting monlite...") // Configuration var opts options _, err := flags.Parse(&opts) if err != nil { log.Fatal("can't parse the command line options:", err) } cfg, err := ini.Load(opts.Conf) if err != nil { log.Fatal("Error reading configuratio file:", opts.Conf) } // Log stuff println("Log...") name := appname pid := os.Getpid() pidstr := strconv.FormatInt(int64(pid), 10) name = name + " (" + pidstr + ")" fnull, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660) if err != nil { log.Fatal("open log file failed:", err) } defer fnull.Close() stderrBack := log.NewWriter(fnull).F(log.DefFormatter) if opts.Level != "" && opts.Level != "nolog" { level, err := log.ParseLevel(opts.Level) if err != nil { log.Fatal("Invalid log level.") } stderrBack = log.Filter( log.NewWriter(os.Stderr).F(log.DefFormatter), log.Op(log.Ge, "level", level), ) } logfile := cfg.Section("log").Key("file").MustString("/var/log/monlite.log") if opts.Log != "" { logfile = opts.Log } loglevel := cfg.Section("log").Key("level").MustString("debug") level, err := log.ParseLevel(loglevel) if err != nil { log.Fatal("Invalid log level.") } var fileBack log.LogBackend if logfile != "" { f, err := os.OpenFile(logfile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660) if err != nil { log.Fatalf("open log file %v failed: %v", logfile, err) } defer f.Close() fileBack = log.Filter( log.NewWriter(f).F(log.DefFormatter), log.Op(log.Ge, "level", level), ) } log.Log = log.New(stderrBack, true).Domain(name).InfoLevel() if fileBack != nil { log.Log = log.New( log.NewMulti( stderrBack, log.DefFormatter, fileBack, log.DefFormatter, ), true, ).Domain(name).InfoLevel() } log.Println("Log Ok!") dns.SetLookupHostFunction(net.LookupHost) log.Println("Configuration...") cfgMail := cfg.Section("mail") smtpTimeout, err := cfgMail.Key("timeout").Int() if err != nil { log.Fatal("invalid smtp timeout") } server := cfgMail.Key("smtp").String() s := strings.Split(server, ":") if len(s) > 0 { server = s[0] } auth := smtp.PlainAuth( "", cfgMail.Key("account").String(), cfgMail.Key("password").String(), server, ) hostname := "your system" if hn, err := os.Hostname(); err == nil { hostname = hn } mons := make([]*monlite.Monitor, 0) for _, sec := range cfg.Sections() { if !strings.HasPrefix(sec.Name(), "service.") { continue } name := strings.TrimPrefix(sec.Name(), "service.") log.DebugLevel().Printf("Adding monitor %v for url: %v", name, sec.Key("url").String()) to, err := sec.Key("timeout").Int() if err != nil { log.Fatalf("invalid value in timeout for %v", name) } p, err := sec.Key("periode").Int() if err != nil { log.Fatalf("invalid value in timeout for %v", name) } sleep, err := sec.Key("sleep").Int() if err != nil { log.Fatalf("invalid value in sleep for %v", name) } fails, err := sec.Key("fails").Int() if err != nil { log.Fatalf("invalid value in fails for %v", name) } mons = append(mons, &monlite.Monitor{ Name: name, Url: sec.Key("url").String(), Timeout: time.Duration(to) * time.Second, Periode: time.Duration(p) * time.Second, Sleep: time.Duration(sleep) * time.Second, Fails: fails, OnFail: func(m *monlite.Monitor) error { body := "Mime-Version: 1.0\n" body += "Content-Type: text/plain; charset=utf-8\n" body += "From:" + cfgMail.Key("from").String() + "\n" body += "To:" + cfgMail.Key("to").String() + "\n" body += "Subject: [" + hostname + "] " + "Monitor fail for " + m.Name + "\n" body += "Hi! This is " + hostname + ".\n\n" body += "Monitor fail for " + m.Name + " " + m.Url + "\n\n" body += "Tank you, our lazy boy.\n" body += time.Now().Format(time.RFC1123Z) err := mysmtp.SendMail( cfgMail.Key("smtp").String(), auth, cfgMail.Key("from").String(), []string{cfgMail.Key("to").String()}, cfgMail.Key("helo").String(), []byte(body), time.Duration(smtpTimeout)*time.Second, false, ) if err != nil { return e.Forward(err) } return nil }, OnUnFail: func(m *monlite.Monitor) error { body := "Mime-Version: 1.0\n" body += "Content-Type: text/plain; charset=utf-8\n" body += "From:" + cfgMail.Key("from").String() + "\n" body += "To:" + cfgMail.Key("to").String() + "\n" body += "Subject: [" + hostname + "] " + "Monitor ok for " + m.Name + "\n" body += "Hi! This is " + hostname + ".\n\n" body += "Monitor ok for " + m.Name + " " + m.Url + "\n\n" body += "Tank you, our lazy boy.\n" body += time.Now().Format(time.RFC1123Z) err := mysmtp.SendMail( cfgMail.Key("smtp").String(), auth, cfgMail.Key("from").String(), []string{cfgMail.Key("to").String()}, cfgMail.Key("helo").String(), []byte(body), time.Duration(smtpTimeout)*time.Second, false, ) if err != nil { return e.Forward(err) } return nil }, }) } log.Println("Starting monitors...") for _, m := range mons { err := m.Start() if err != nil { log.Fatalf("Failed to start monitor for %v. Error: %v", m.Name, err) } } log.Println("Monitors ok!") sig := make(chan os.Signal) signal.Notify(sig, syscall.SIGINT, syscall.SIGKILL, syscall.SIGTERM) <-sig log.Println("Stop monitors...") for _, m := range mons { log.DebugLevel().Printf("Stop monitor %v", m.Name) err := m.Stop() if err != nil { log.Errorf("Failed to start monitor for %v. Error: %v", m.Name, err) } } log.Println("End.") }
func main() { var gmx int if gmxEnv := os.Getenv("GOMAXPROCS"); gmxEnv != "" { gmx, _ = strconv.Atoi(gmxEnv) } else { gmx = runtime.NumCPU() } runtime.GOMAXPROCS(gmx) // command line flags var opts struct { HMACKey string `short:"k" long:"key" description:"HMAC key"` AddHeaders []string `short:"H" long:"header" description:"Extra header to return for each response. This option can be used multiple times to add multiple headers"` Stats bool `long:"stats" description:"Enable Stats"` NoLogTS bool `long:"no-log-ts" description:"Do not add a timestamp to logging"` AllowList string `long:"allow-list" description:"Text file of hostname allow regexes (one per line)"` MaxSize int64 `long:"max-size" default:"5120" description:"Max response image size (KB)"` ReqTimeout time.Duration `long:"timeout" default:"4s" description:"Upstream request timeout"` MaxRedirects int `long:"max-redirects" default:"3" description:"Maximum number of redirects to follow"` DisableKeepAlivesFE bool `long:"no-fk" description:"Disable frontend http keep-alive support"` DisableKeepAlivesBE bool `long:"no-bk" description:"Disable backend http keep-alive support"` BindAddress string `long:"listen" default:"0.0.0.0:8080" description:"Address:Port to bind to for HTTP"` BindAddressSSL string `long:"ssl-listen" description:"Address:Port to bind to for HTTPS/SSL/TLS"` SSLKey string `long:"ssl-key" description:"ssl private key (key.pem) path"` SSLCert string `long:"ssl-cert" description:"ssl cert (cert.pem) path"` Verbose bool `short:"v" long:"verbose" description:"Show verbose (debug) log level output"` Version []bool `short:"V" long:"version" description:"Print version and exit; specify twice to show license information"` } // parse said flags _, err := flags.Parse(&opts) if err != nil { if e, ok := err.(*flags.Error); ok { if e.Type == flags.ErrHelp { os.Exit(0) } } os.Exit(1) } if len(opts.Version) > 0 { fmt.Printf("%s %s (%s,%s-%s)\n", ServerName, ServerVersion, runtime.Version(), runtime.Compiler, runtime.GOARCH) if len(opts.Version) > 1 { fmt.Printf("\n%s\n", strings.TrimSpace(licenseText)) } os.Exit(0) } // start out with a very bare logger that only prints // the message (no special format or log elements) mlog.SetFlags(0) config := camo.Config{} if hmacKey := os.Getenv("GOCAMO_HMAC"); hmacKey != "" { config.HMACKey = []byte(hmacKey) } // flags override env var if opts.HMACKey != "" { config.HMACKey = []byte(opts.HMACKey) } if len(config.HMACKey) == 0 { mlog.Fatal("HMAC key required") } if opts.BindAddress == "" && opts.BindAddressSSL == "" { mlog.Fatal("One of listen or ssl-listen required") } if opts.BindAddressSSL != "" && opts.SSLKey == "" { mlog.Fatal("ssl-key is required when specifying ssl-listen") } if opts.BindAddressSSL != "" && opts.SSLCert == "" { mlog.Fatal("ssl-cert is required when specifying ssl-listen") } // set keepalive options config.DisableKeepAlivesBE = opts.DisableKeepAlivesBE config.DisableKeepAlivesFE = opts.DisableKeepAlivesFE if opts.AllowList != "" { b, err := ioutil.ReadFile(opts.AllowList) if err != nil { mlog.Fatal("Could not read allow-list", err) } config.AllowList = strings.Split(string(b), "\n") } AddHeaders := map[string]string{ "X-Content-Type-Options": "nosniff", "X-XSS-Protection": "1; mode=block", "Content-Security-Policy": "default-src 'none'", } for _, v := range opts.AddHeaders { s := strings.SplitN(v, ":", 2) if len(s) != 2 { mlog.Printf("ignoring bad header: '%s'", v) continue } s0 := strings.TrimSpace(s[0]) s1 := strings.TrimSpace(s[1]) if len(s0) == 0 || len(s1) == 0 { mlog.Printf("ignoring bad header: '%s'", v) continue } AddHeaders[s[0]] = s[1] } // now configure a standard logger mlog.SetFlags(mlog.Lstd) if opts.NoLogTS { mlog.SetFlags(mlog.Flags() ^ mlog.Ltimestamp) } if opts.Verbose { mlog.SetFlags(mlog.Flags() | mlog.Ldebug) mlog.Debug("debug logging enabled") } // convert from KB to Bytes config.MaxSize = opts.MaxSize * 1024 config.RequestTimeout = opts.ReqTimeout config.MaxRedirects = opts.MaxRedirects config.ServerName = ServerName proxy, err := camo.New(config) if err != nil { mlog.Fatal("Error creating camo", err) } dumbrouter := &router.DumbRouter{ ServerName: config.ServerName, AddHeaders: AddHeaders, CamoHandler: proxy, } if opts.Stats { ps := &stats.ProxyStats{} proxy.SetMetricsCollector(ps) mlog.Printf("Enabling stats at /status") dumbrouter.StatsHandler = stats.Handler(ps) } http.Handle("/", dumbrouter) if opts.BindAddress != "" { mlog.Printf("Starting server on: %s", opts.BindAddress) go func() { srv := &http.Server{ Addr: opts.BindAddress, ReadTimeout: 30 * time.Second} mlog.Fatal(srv.ListenAndServe()) }() } if opts.BindAddressSSL != "" { mlog.Printf("Starting TLS server on: %s", opts.BindAddressSSL) go func() { srv := &http.Server{ Addr: opts.BindAddressSSL, ReadTimeout: 30 * time.Second} mlog.Fatal(srv.ListenAndServeTLS(opts.SSLCert, opts.SSLKey)) }() } // just block. listen and serve will exit the program if they fail/return // so we just need to block to prevent main from exiting. select {} }
func main() { // command line flags var opts struct { HostPort string `long:"host" default:"127.0.0.1:8125" description:"host:port of statsd server"` Prefix string `long:"prefix" default:"test-client" description:"Statsd prefix"` StatType string `long:"type" default:"count" description:"stat type to send. Can be timing, count, guage"` StatValue int64 `long:"value" default:"1" description:"Value to send"` Name string `short:"n" long:"name" default:"counter" description:"stat name"` Rate float32 `short:"r" long:"rate" default:"1.0" description:"sample rate"` Volume int `short:"c" long:"count" default:"1000" description:"Number of stats to send. Volume."` Noop bool `long:"noop" default:"false" description:"Use noop client"` Duration time.Duration `short:"d" long:"duration" default:"10s" description:"How long to spread the volume across. Each second of duration volume/seconds events will be sent."` } // parse said flags _, err := flags.Parse(&opts) if err != nil { if e, ok := err.(*flags.Error); ok { if e.Type == flags.ErrHelp { os.Exit(0) } } fmt.Printf("Error: %+v\n", err) os.Exit(1) } var client statsd.Statter if !opts.Noop { client, err = statsd.New(opts.HostPort, opts.Prefix) if err != nil { log.Fatal(err) } defer client.Close() } else { client, err = statsd.NewNoop(opts.HostPort, opts.Prefix) } var stat func(stat string, value int64, rate float32) error switch opts.StatType { case "count": stat = func(stat string, value int64, rate float32) error { return client.Inc(stat, value, rate) } case "gauge": stat = func(stat string, value int64, rate float32) error { return client.Gauge(stat, value, rate) } case "timing": stat = func(stat string, value int64, rate float32) error { return client.Timing(stat, value, rate) } default: log.Fatal("Unsupported state type") } pertick := opts.Volume / int(opts.Duration.Seconds()) / 10 // add some extra tiem, because the first tick takes a while ender := time.After(opts.Duration + 100*time.Millisecond) c := time.Tick(time.Second / 10) count := 0 for { select { case <-c: for x := 0; x < pertick; x++ { err := stat(opts.Name, opts.StatValue, opts.Rate) if err != nil { log.Printf("Got Error: %+v", err) break } count += 1 } case <-ender: log.Printf("%d events called", count) os.Exit(0) return } } }
func main() { // Set log options. log.SetOutput(os.Stderr) log.SetLevel(log.WarnLevel) // Options. var opts struct { Verbose bool `short:"v" long:"verbose" description:"Verbose"` Version bool `long:"version" description:"Version"` BindAddr string `short:"b" long:"bind-addr" description:"Bind to address" default:"0.0.0.0"` Port int `short:"p" long:"port" description:"Port" default:"5050"` StaticDir string `short:"s" long:"static-dir" description:"Static content" default:"static"` TemplateDir string `short:"t" long:"template-dir" description:"Templates" default:"templates"` KafkaEnabled bool `short:"K" long:"kafka" description:"Enable Kafka message bus"` KafkaTopic string `long:"kafka-topic" description:"Kafka topic" default:"peekaboo"` KafkaPeers *string `long:"kafka-peers" description:"Comma-delimited list of Kafka brokers"` KafkaCert *string `long:"kafka-cert" description:"Certificate file for client authentication"` KafkaKey *string `long:"kafka-key" description:"Key file for client client authentication"` KafkaCA *string `long:"kafka-ca" description:"CA file for TLS client authentication"` KafkaVerify bool `long:"kafka-verify" description:"Verify SSL certificate"` } // Parse options. if _, err := flags.Parse(&opts); err != nil { ferr := err.(*flags.Error) if ferr.Type == flags.ErrHelp { os.Exit(0) } else { log.Fatal(err.Error()) } } // Print version. if opts.Version { fmt.Printf("peekaboo %s\n", Version) os.Exit(0) } // Set verbose. if opts.Verbose { log.SetLevel(log.InfoLevel) } // Check root. if runtime.GOOS != "darwin" && os.Getuid() != 0 { log.Fatal("This application requires root privileges to run.") } // Get hardware info. info := hwinfo.NewHWInfo() if err := info.GetTTL(); err != nil { log.Fatal(err.Error()) } // Produce message to Kafka bus. log.Infof("Produce startup event to Kafka bus with topic: %s", opts.KafkaTopic) if opts.KafkaEnabled { if opts.KafkaPeers == nil { log.Fatal("You need to specify Kafka Peers") } user, err := user.Current() if err != nil { log.Fatal(err.Error()) } event := &Event{ Name: "Peekaboo startup", EventType: STARTED, Created: time.Now().Format("20060102T150405ZB"), CreatedBy: CreatedBy{ User: user.Username, Service: "peekaboo", Host: info.Hostname, }, Descr: "Peekaboo startup event", Data: info, } producer := newProducer(strings.Split(*opts.KafkaPeers, ","), opts.KafkaCert, opts.KafkaKey, opts.KafkaCA, opts.KafkaVerify) partition, offset, err := producer.SendMessage(&sarama.ProducerMessage{ Topic: opts.KafkaTopic, Value: event, }) if err != nil { log.Fatal(err.Error()) } log.Infof("Kafka partition: %v, offset: %v", partition, offset) } log.Infof("Using static dir: %s", opts.StaticDir) log.Infof("Using template dir: %s", opts.TemplateDir) m := macaron.Classic() m.Use(macaron.Static(opts.StaticDir)) m.Use(macaron.Renderer(macaron.RenderOptions{ Directory: opts.TemplateDir, IndentJSON: true, })) routes(m, info) m.Run(opts.BindAddr, opts.Port) }
func main() { _, err := flags.Parse(&opts) if err != nil { fmt.Printf(err.Error()) os.Exit(1) } exe, cargs, dir := opts.Program, opts.Args, opts.Dir if dir != "" { err := os.Chdir(dir) if err != nil { panic(err.Error()) } } watcher, err := fsnotify.NewWatcher() if err != nil { log.Fatal(err) } done := make(chan bool) // Process events go func() { for { select { case ev := <-watcher.Event: if ev.IsModify() && filepath.Ext(ev.Name) == ".go" { time.Sleep(time.Second) for nev, cont := <-watcher.Event; cont; { ev = nev select { case nev = <-watcher.Event: continue default: cont = false } } log.Println("event:", ev) log.Printf("%s", exe) cmd := exec.Command(exe, strings.Split(cargs, " ")...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Run() if err != nil { log.Println("%s", err.Error()) } } case err := <-watcher.Error: panic(err.Error()) return } } }() cwd, _ := os.Getwd() err = watcher.Watch(cwd) if err != nil { panic(err.Error()) } <-done /* ... do stuff ... */ watcher.Close() }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) c := &Config{} migrator := Migrator{} migrator.Config = c // parse args _, err := goflags.Parse(c) if err != nil { log.Error(err) return } setInitLogging(c.LogLevel) if len(c.SourceEs) == 0 && len(c.DumpInputFile) == 0 { log.Error("no input, type --help for more details") return } if len(c.TargetEs) == 0 && len(c.DumpOutFile) == 0 { log.Error("no output, type --help for more details") return } if c.SourceEs == c.TargetEs && c.SourceIndexNames == c.TargetIndexName { log.Error("migration output is the same as the output") return } // enough of a buffer to hold all the search results across all workers migrator.DocChan = make(chan map[string]interface{}, c.DocBufferCount*c.Workers*10) var srcESVersion *ClusterVersion // create a progressbar and start a docCount var outputBar *pb.ProgressBar var fetchBar = pb.New(1).Prefix("Scroll") wg := sync.WaitGroup{} //dealing with input if len(c.SourceEs) > 0 { //dealing with basic auth if len(c.SourceEsAuthStr) > 0 && strings.Contains(c.SourceEsAuthStr, ":") { authArray := strings.Split(c.SourceEsAuthStr, ":") auth := Auth{User: authArray[0], Pass: authArray[1]} migrator.SourceAuth = &auth } //get source es version srcESVersion, errs := migrator.ClusterVersion(c.SourceEs, migrator.SourceAuth, migrator.Config.SourceProxy) if errs != nil { return } if strings.HasPrefix(srcESVersion.Version.Number, "5.") { log.Debug("source es is V5,", srcESVersion.Version.Number) api := new(ESAPIV5) api.Host = c.SourceEs api.Auth = migrator.SourceAuth api.HttpProxy = migrator.Config.SourceProxy migrator.SourceESAPI = api } else { log.Debug("source es is not V5,", srcESVersion.Version.Number) api := new(ESAPIV0) api.Host = c.SourceEs api.Auth = migrator.SourceAuth api.HttpProxy = migrator.Config.SourceProxy migrator.SourceESAPI = api } if c.ScrollSliceSize < 1 { c.ScrollSliceSize = 1 } fetchBar.ShowBar = false totalSize := 0 finishedSlice := 0 for slice := 0; slice < c.ScrollSliceSize; slice++ { scroll, err := migrator.SourceESAPI.NewScroll(c.SourceIndexNames, c.ScrollTime, c.DocBufferCount, c.Query, slice, c.ScrollSliceSize) if err != nil { log.Error(err) return } totalSize += scroll.Hits.Total if scroll != nil && scroll.Hits.Docs != nil { if scroll.Hits.Total == 0 { log.Error("can't find documents from source.") return } go func() { wg.Add(1) //process input // start scroll scroll.ProcessScrollResult(&migrator, fetchBar) // loop scrolling until done for scroll.Next(&migrator, fetchBar) == false { } fetchBar.Finish() // finished, close doc chan and wait for goroutines to be done wg.Done() finishedSlice++ //clean up final results if finishedSlice == c.ScrollSliceSize { log.Debug("closing doc chan") close(migrator.DocChan) } }() } } if totalSize > 0 { fetchBar.Total = int64(totalSize) fetchBar.ShowBar = true outputBar = pb.New(totalSize).Prefix("Output ") } } else if len(c.DumpInputFile) > 0 { //read file stream wg.Add(1) f, err := os.Open(c.DumpInputFile) if err != nil { log.Error(err) return } //get file lines lineCount := 0 defer f.Close() r := bufio.NewReader(f) for { _, err := r.ReadString('\n') if io.EOF == err || nil != err { break } lineCount += 1 } log.Trace("file line,", lineCount) fetchBar := pb.New(lineCount).Prefix("Read") outputBar = pb.New(lineCount).Prefix("Output ") f.Close() go migrator.NewFileReadWorker(fetchBar, &wg) } // start pool pool, err := pb.StartPool(fetchBar, outputBar) if err != nil { panic(err) } //dealing with output if len(c.TargetEs) > 0 { if len(c.TargetEsAuthStr) > 0 && strings.Contains(c.TargetEsAuthStr, ":") { authArray := strings.Split(c.TargetEsAuthStr, ":") auth := Auth{User: authArray[0], Pass: authArray[1]} migrator.TargetAuth = &auth } //get target es version descESVersion, errs := migrator.ClusterVersion(c.TargetEs, migrator.TargetAuth, migrator.Config.TargetProxy) if errs != nil { return } if strings.HasPrefix(descESVersion.Version.Number, "5.") { log.Debug("target es is V5,", descESVersion.Version.Number) api := new(ESAPIV5) api.Host = c.TargetEs api.Auth = migrator.TargetAuth api.HttpProxy = migrator.Config.TargetProxy migrator.TargetESAPI = api } else { log.Debug("target es is not V5,", descESVersion.Version.Number) api := new(ESAPIV0) api.Host = c.TargetEs api.Auth = migrator.TargetAuth api.HttpProxy = migrator.Config.TargetProxy migrator.TargetESAPI = api } log.Debug("start process with mappings") if srcESVersion != nil && c.CopyIndexMappings && descESVersion.Version.Number[0] != srcESVersion.Version.Number[0] { log.Error(srcESVersion.Version, "=>", descESVersion.Version, ",cross-big-version mapping migration not avaiable, please update mapping manually :(") return } // wait for cluster state to be okay before moving timer := time.NewTimer(time.Second * 3) for { if len(c.SourceEs) > 0 { if status, ready := migrator.ClusterReady(migrator.SourceESAPI); !ready { log.Infof("%s at %s is %s, delaying migration ", status.Name, c.SourceEs, status.Status) <-timer.C continue } } if len(c.TargetEs) > 0 { if status, ready := migrator.ClusterReady(migrator.TargetESAPI); !ready { log.Infof("%s at %s is %s, delaying migration ", status.Name, c.TargetEs, status.Status) <-timer.C continue } } timer.Stop() break } if len(c.SourceEs) > 0 { // get all indexes from source indexNames, indexCount, sourceIndexMappings, err := migrator.SourceESAPI.GetIndexMappings(c.CopyAllIndexes, c.SourceIndexNames) if err != nil { log.Error(err) return } sourceIndexRefreshSettings := map[string]interface{}{} log.Debugf("indexCount: %d", indexCount) if indexCount > 0 { //override indexnames to be copy c.SourceIndexNames = indexNames // copy index settings if user asked if c.CopyIndexSettings || c.ShardsCount > 0 { log.Info("start settings/mappings migration..") //get source index settings var sourceIndexSettings *Indexes sourceIndexSettings, err := migrator.SourceESAPI.GetIndexSettings(c.SourceIndexNames) log.Debug("source index settings:", sourceIndexSettings) if err != nil { log.Error(err) return } //get target index settings targetIndexSettings, err := migrator.TargetESAPI.GetIndexSettings(c.TargetIndexName) if err != nil { //ignore target es settings error log.Debug(err) } log.Debug("target IndexSettings", targetIndexSettings) //if there is only one index and we specify the dest indexname if c.SourceIndexNames != c.TargetIndexName && (len(c.TargetIndexName) > 0) && indexCount == 1 { log.Debugf("only one index,so we can rewrite indexname, src:%v, dest:%v ,indexCount:%d", c.SourceIndexNames, c.TargetIndexName, indexCount) (*sourceIndexSettings)[c.TargetIndexName] = (*sourceIndexSettings)[c.SourceIndexNames] delete(*sourceIndexSettings, c.SourceIndexNames) log.Debug(sourceIndexSettings) } // dealing with indices settings for name, idx := range *sourceIndexSettings { log.Debug("dealing with index,name:", name, ",settings:", idx) tempIndexSettings := getEmptyIndexSettings() targetIndexExist := false //if target index settings is exist and we don't copy settings, we use target settings if targetIndexSettings != nil { //if target es have this index and we dont copy index settings if val, ok := (*targetIndexSettings)[name]; ok { targetIndexExist = true tempIndexSettings = val.(map[string]interface{}) } if c.RecreateIndex { migrator.TargetESAPI.DeleteIndex(name) targetIndexExist = false } } //copy index settings if c.CopyIndexSettings { tempIndexSettings = ((*sourceIndexSettings)[name]).(map[string]interface{}) } //check map elements if _, ok := tempIndexSettings["settings"]; !ok { tempIndexSettings["settings"] = map[string]interface{}{} } if _, ok := tempIndexSettings["settings"].(map[string]interface{})["index"]; !ok { tempIndexSettings["settings"].(map[string]interface{})["index"] = map[string]interface{}{} } sourceIndexRefreshSettings[name] = ((*sourceIndexSettings)[name].(map[string]interface{}))["settings"].(map[string]interface{})["index"].(map[string]interface{})["refresh_interval"] //set refresh_interval tempIndexSettings["settings"].(map[string]interface{})["index"].(map[string]interface{})["refresh_interval"] = -1 tempIndexSettings["settings"].(map[string]interface{})["index"].(map[string]interface{})["number_of_replicas"] = 0 //clean up settings delete(tempIndexSettings["settings"].(map[string]interface{})["index"].(map[string]interface{}), "number_of_shards") //copy indexsettings and mappings if targetIndexExist { log.Debug("update index with settings,", name, tempIndexSettings) //override shard settings if c.ShardsCount > 0 { tempIndexSettings["settings"].(map[string]interface{})["index"].(map[string]interface{})["number_of_shards"] = c.ShardsCount } err := migrator.TargetESAPI.UpdateIndexSettings(name, tempIndexSettings) if err != nil { log.Error(err) } } else { //override shard settings if c.ShardsCount > 0 { tempIndexSettings["settings"].(map[string]interface{})["index"].(map[string]interface{})["number_of_shards"] = c.ShardsCount } log.Debug("create index with settings,", name, tempIndexSettings) err := migrator.TargetESAPI.CreateIndex(name, tempIndexSettings) if err != nil { log.Error(err) } } } if c.CopyIndexMappings { //if there is only one index and we specify the dest indexname if c.SourceIndexNames != c.TargetIndexName && (len(c.TargetIndexName) > 0) && indexCount == 1 { log.Debugf("only one index,so we can rewrite indexname, src:%v, dest:%v ,indexCount:%d", c.SourceIndexNames, c.TargetIndexName, indexCount) (*sourceIndexMappings)[c.TargetIndexName] = (*sourceIndexMappings)[c.SourceIndexNames] delete(*sourceIndexMappings, c.SourceIndexNames) log.Debug(sourceIndexMappings) } for name, mapping := range *sourceIndexMappings { err := migrator.TargetESAPI.UpdateIndexMapping(name, mapping.(map[string]interface{})["mappings"].(map[string]interface{})) if err != nil { log.Error(err) } } } log.Info("settings/mappings migration finished.") } } else { log.Error("index not exists,", c.SourceIndexNames) return } defer migrator.recoveryIndexSettings(sourceIndexRefreshSettings) } else if len(c.DumpInputFile) > 0 { //check shard settings //TODO support shard config } } log.Info("start data migration..") //start es bulk thread if len(c.TargetEs) > 0 { log.Debug("start es bulk workers") outputBar.Prefix("Bulk") var docCount int wg.Add(c.Workers) for i := 0; i < c.Workers; i++ { go migrator.NewBulkWorker(&docCount, outputBar, &wg) } } else if len(c.DumpOutFile) > 0 { // start file write outputBar.Prefix("Write") wg.Add(1) go migrator.NewFileDumpWorker(outputBar, &wg) } wg.Wait() outputBar.Finish() // close pool pool.Stop() log.Info("data migration finished.") }
func main() { args, err := flags.Parse(&opts) if err != nil { os.Exit(1) } if opts.Version { log.Println(version) os.Exit(0) } if len(args) == 0 { log.Fatalln("WSDL file is required to start the party") } if opts.OutputFile == args[0] { log.Fatalln("Output file cannot be the same WSDL file") } gowsdl, err := gen.NewGoWsdl(args[0], opts.Package, opts.IgnoreTls) if err != nil { log.Fatalln(err) } gocode, err := gowsdl.Start() if err != nil { log.Fatalln(err) } pkg := "./" + opts.Package err = os.Mkdir(pkg, 0744) if perr, ok := err.(*os.PathError); ok && os.IsExist(perr.Err) { log.Printf("Package directory %s already exist, skipping creation\n", pkg) } else { if err != nil { log.Fatalln(err) } } fd, err := os.Create(pkg + "/" + opts.OutputFile) if err != nil { log.Fatalln(err) } defer fd.Close() data := new(bytes.Buffer) data.Write(gocode["header"]) data.Write(gocode["types"]) data.Write(gocode["operations"]) source, err := format.Source(data.Bytes()) if err != nil { fd.Write(data.Bytes()) log.Fatalln(err) } fd.Write(source) log.Println("Done 💩") }