func main() { var ( defaultProbes = fmt.Sprintf("localhost:%d", xfer.ProbePort) probes = flag.String("probes", defaultProbes, "list of probe endpoints, comma separated") batch = flag.Duration("batch", 1*time.Second, "batch interval") window = flag.Duration("window", 15*time.Second, "window") listen = flag.String("http.address", ":"+strconv.Itoa(xfer.AppPort), "webserver listen address") ) flag.Parse() xfer.MaxBackoff = 10 * time.Second c := xfer.NewCollector(*batch, "id") for _, addr := range strings.Split(*probes, ",") { c.Add(addr) } defer c.Stop() lifo := NewReportLIFO(c, *window) defer lifo.Stop() http.Handle("/svg", handleSVG(lifo)) http.Handle("/txt", handleTXT(lifo)) http.Handle("/", http.HandlerFunc(handleHTML)) irq := interrupt() go func() { log.Printf("listening on %s", *listen) log.Print(http.ListenAndServe(*listen, nil)) irq <- syscall.SIGINT }() <-irq log.Printf("shutting down") }
func decodeRefArg(name, typeName string) (interface{}, error) { switch strings.ToLower(typeName) { case "*bool": newValue := flag.Bool(name, app.DefaultBoolValue, name) return newValue, nil case "bool": newValue := flag.Bool(name, app.DefaultBoolValue, name) return *newValue, nil case "*string": newValue := flag.String(name, app.DefaultStringValue, name) return *newValue, nil case "string": newValue := flag.String(name, app.DefaultStringValue, name) return *newValue, nil case "*time.duration": newValue := flag.Duration(name, app.DefaultDurationValue, name) return *newValue, nil case "time.duration": newValue := flag.Duration(name, app.DefaultDurationValue, name) return *newValue, nil case "*float64": newValue := flag.Float64(name, app.DefaultFloat64Value, name) return *newValue, nil case "float64": newValue := flag.Float64(name, app.DefaultFloat64Value, name) return *newValue, nil case "*int": newValue := flag.Int(name, app.DefaultIntValue, name) return *newValue, nil case "int": newValue := flag.Int(name, app.DefaultIntValue, name) return *newValue, nil case "*int64": newValue := flag.Int64(name, app.DefaultInt64Value, name) return *newValue, nil case "int64": newValue := flag.Int64(name, app.DefaultInt64Value, name) return *newValue, nil case "*uint": newValue := flag.Uint(name, app.DefaultUIntValue, name) return *newValue, nil case "uint": newValue := flag.Uint(name, app.DefaultUIntValue, name) return *newValue, nil case "*uint64": newValue := flag.Uint64(name, app.DefaultUInt64Value, name) return *newValue, nil case "uint64": newValue := flag.Uint64(name, app.DefaultUInt64Value, name) return *newValue, nil } return nil, fmt.Errorf("unknow type %s for argument %s", typeName, name) }
func main() { pg := flag.Bool("pg", false, "Check PostgreSQL TLS, incompatible with -hostfile") timeout := flag.Duration("timeout", 5*time.Second, "Timeout after sending heartbeat") hostFile := flag.String("hostfile", "", "Path to a newline seperated file with hosts or ips") workers := flag.Int("workers", runtime.NumCPU()*10, "Number of workers to scan hosts with, only used with hostfile flag") retryDelay := flag.Duration("retry", 10*time.Second, "Seconds to wait before retesting a host after an unfavorable response") refreshDelay := flag.Duration("refresh", 10*time.Minute, "Seconds to wait before rechecking secure hosts") listen := flag.String("listen", "localhost:5000", "Address to serve HTTP dashboard from") flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [options] host[:443]\n", os.Args[0]) fmt.Fprintf(os.Stderr, "Options:\n") flag.PrintDefaults() } flag.Parse() if *hostFile != "" { checkMultiHosts(*hostFile, *timeout, *retryDelay, *refreshDelay, *workers, *listen) } else { if flag.NArg() != 1 { flag.Usage() os.Exit(2) } checkSingleHost(flag.Arg(0), *timeout, *pg) } }
func init() { //Command line variables mqttServer = flag.String("s", ":1883", "IP and Port of the MQTT Broker. e.g. 127.0.0.1:1883. Default: :1883") stationID = flag.String("u", "", "WU PWS station id") password = flag.String("p", "", "WU PWS station password") software = flag.String("f", "gowupws", "Name of the software updating the PWS. Default: gowupws") calculateDewpoint = flag.Bool("d", false, "Provide calculated dewpoint, if not provided as a parameter. Default: False") configPath = flag.String("c", "", "Provide path to config file") sensorExpire = flag.Duration("l", 5*time.Minute, "Sensor/Device life, minutes") checkCache = flag.Duration("e", 1*time.Minute, "Check sensor/device every, minutes") stationReportPeriod = flag.Duration("r", 2*time.Minute, "Station report period, minutes") flag.Parse() if *stationID == "" { log.Fatal("A Weather Underground station ID has to be provided") } if *password == "" { log.Fatal("A Weather Underground password has to be provided") } if *configPath == "" { log.Fatal("A config file has to be provided") } config = readConfigFile(*configPath) addressParameter = mapAddressToParameter(&config) done = make(chan struct{}) }
func main() { flag.Usage = func() { fmt.Fprintf(os.Stderr, "usage: %s [flags] <destination>\n", os.Args[0]) flag.PrintDefaults() } interval := flag.Duration("interval", time.Second*1, "ping packet retransmission interval") timeout := flag.Duration("timeout", time.Second*5, "ping timeout until failure") flag.Parse() if flag.NArg() != 1 { flag.Usage() os.Exit(1) } server := flag.Arg(0) host, port, err := net.SplitHostPort(server) if err != nil { host = server port = strconv.Itoa(gumble.DefaultPort) } resp, err := gumble.Ping(net.JoinHostPort(host, port), *interval, *timeout) if err != nil { fmt.Fprintf(os.Stderr, "%s: %s\n", os.Args[0], err) os.Exit(1) } major, minor, patch := resp.Version.SemanticVersion() fmt.Printf("Address: %s\n", resp.Address) fmt.Printf("Ping: %s\n", resp.Ping) fmt.Printf("Version: %d.%d.%d\n", major, minor, patch) fmt.Printf("Connected Users: %d\n", resp.ConnectedUsers) fmt.Printf("Maximum Users: %d\n", resp.MaximumUsers) fmt.Printf("Maximum Bitrate: %d\n", resp.MaximumBitrate) }
func configure() { configF := flag.String("config", "", "JSON configuration file to load. Overrides flags") minUptime := flag.Duration("minuptime", 0, "will not exit 0 before uptime >= <minuptime> (depreciated)") locks := flag.String("lock", "/tmp/lockfiles/", "where to look for lockfiles (depreciated)") lockDur := flag.Duration("duration", time.Minute*10, "duration for which lock files are considered valid (depreciated)") logDir := flag.String("logs", "/var/log/dozy", "Where to place log files (depreciated)") _ = flag.Duration("sleep", 0, "duration to sleep at the end of script before exit 0 (not used anymore)") flag.Parse() if len(*configF) > 0 { Info.Println("loading settings from file:", *configF) configFile, err := os.Open(*configF) if err != nil { panic(err) } parser := json.NewDecoder(configFile) if err = parser.Decode(&settings); err != nil { panic(err) } } else { settings.MinUptime = *minUptime settings.Locks = *locks settings.LockAge = *lockDur settings.Logs = *logDir } }
func main() { var wg sync.WaitGroup results := make(chan FetchResult) rand.Seed(time.Now().UnixNano()) input := flag.String("input", "", "read feed URLs from this file or URL") poll := flag.Duration("poll", time.Hour, "how often to poll feeds") listPoll := flag.Duration("listpoll", 15*time.Minute, "how often to check feed list for new feeds") output := flag.String("output", "river.js", "write output to this file") quickstart := flag.Bool("quickstart", false, "when true, don't space out initial feed reads") flag.Parse() if *input == "" { fmt.Println("no -input given, exiting...") return } go buildRiver(results, *output) var feeds []string if startsWith(*input, "http://", "https://") { feeds = loadRemoteFeedList(*input) } else { feeds = loadLocalFeedList(*input) } go pollFeedList(*input, *poll, *listPoll, results) logger.Printf("Loading %d feeds from %q and writing to %q", len(feeds), *input, *output) for _, url := range feeds { wg.Add(1) if _, found := activeFeeds[url]; found { logger.Printf("%q already added, skipping", url) continue } var delayDuration time.Duration if *quickstart { delayDuration = time.Duration(0) logger.Printf("%q will update every %v", url, *poll) } else { delayDuration = time.Minute * time.Duration(rand.Intn(60)) logger.Printf("%q will first update in %v and then every %v", url, delayDuration, *poll) } t := new(time.Ticker) fetcher := &FeedFetcher{ Poll: *poll, Ticker: t, Delay: time.After(delayDuration), URL: url, } activeFeeds[url] = fetcher go fetcher.Run(results) } wg.Wait() }
func SetupConfigFlags() { fArchive = flag.Bool("archive", false, "Pass flag to execute archival") fArchiveAge = flag.Duration("archive-age", time.Hour*24*15, "Archive only indices old X time (Default is 15 days written 360h") fDelete = flag.Bool("delete", false, "Pass flag to execute deletion") fDeleteAge = flag.Duration("delete-age", time.Hour*24*30, "Delete only indices old X time (Default is 30 days written 720h") fElasticSearchHost = flag.String("es-host", "", "Elastic Search host to delete indices from (required)") fIndicePrefix = flag.String("prefix", "logstash-", "Prefix behind ElasticSearch indices to delete or archive") }
func main() { var ( flListen = flag.String("listen", "localhost:8901", "server listen address") flMode = flag.String("mode", "http", "server mode (http or fcgi)") flProtocol = flag.String("protocol", "tcp", "listener protocol, only valid for fcgi mode") flConfig = flag.String("config", "", "path to configuration file") flSsl = flag.Bool("ssl", false, "enable ssl, requires certificate and key files") flCertFile = flag.String("cert", "", "ssl certificate") flKeyFile = flag.String("ssl-key", "", "ssl private key") flReadTimeout = flag.Duration("read-timeout", 10*time.Second, "read timeout") flWriteTimeout = flag.Duration("write-timeout", 10*time.Second, "write timeout") flMaxHeaderBytes = flag.Int("buffer", 1<<20, "maximum header bytes") flBase = flag.String("base", "", "path to base directory") ) flag.Parse() handler, err := handlerFactory(*flConfig, *flBase) if err != nil { log.Fatalf("An error occured while initializing the application: %s", err) } switch *flMode { case "http": s := &http.Server{ Addr: *flListen, Handler: handler, ReadTimeout: *flReadTimeout, WriteTimeout: *flWriteTimeout, MaxHeaderBytes: *flMaxHeaderBytes, } if *flSsl { if *flCertFile == "" || *flKeyFile == "" { log.Fatalf("Requires SSL certificate and key files for SSL mode") } err = s.ListenAndServeTLS(*flCertFile, *flKeyFile) } else { err = s.ListenAndServe() } if err != nil { log.Fatal(err) } case "fcgi": listener, err := net.Listen(*flProtocol, *flListen) if err != nil { log.Fatal(err) } err = fcgi.Serve(listener, handler) if err != nil { log.Fatal(err) } default: log.Fatalf("Invalid server mode: %v (Valid modes are 'http' and 'fcgi')", *flMode) } }
func main() { var ( version = flag.Bool("version", false, "Print version information.") listenAddress = flag.String("web.listen-address", ":9109", "Address to listen on for web interface and telemetry.") metricsPath = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.") Labels = flag.String("labels", "", "List of labels (comma seperated).") LabelValues = flag.String("values", "", "List of label values (comma seperated)") Timeout = flag.Duration("timeout", 5*time.Second, "Timeout for trying to get to json URI.") interval = flag.Duration("interval", 1*time.Minute, "Refresh interval for json scraping.") namespace = flag.String("namespace", "json", "Namespace for metrics exported from Json.") debug = flag.Bool("debug", false, "Print debug information") lowercase = flag.Bool("lowercase", true, "Lowercase metric names") jmx = flag.Bool("jmx", false, "Enable jmx mode when parsing - name attribute will turn into path") unsecured = flag.Bool("unsecured", false, "Accept untrusted https certificate(used for private certificates)") blacklist = flag.String("blacklist", "", "Blacklist regex expression of metric names.") whitelist = flag.String("whitelist", "", "Whitelist regex expression of metric names.") valuelabel = flag.String("valuelabel", "", "Create labels from values using metric-name regex, format: <label1>:<regex1>[/<label2>:<regex2>[/...]].") pathlabel = flag.String("pathlabel", "", "Create labels from path segments with regex match, format: <label1>:<regex1>[/<label2>:<regex2>[/...]].") ) flag.Parse() log.Println("json_exporter", Version) if *version { return } urls := flag.Args() if len(urls) < 1 { log.Fatal("Got no URL's, please add use the following syntax to add URL's: json_exporter [options] <URL1>[ <URL2>[ ..<URLn>]]") } else { log.Println("Got the following Url list", urls) } //Importing static labels labels := []string{} labelValues := []string{} if len(*Labels) > 0 && len(*LabelValues) > 0 { labels = strings.Split(*Labels, ",") labelValues = strings.Split(*LabelValues, ",") if len(labels) != len(labelValues) { log.Fatal("Labels amount does not match value amount!!!") } } exporter := JSONExporter(urls, *Timeout, *namespace, labels, labelValues, *debug, *unsecured, *blacklist, *whitelist, *interval, *pathlabel, *valuelabel, *jmx, *lowercase) prometheus.MustRegister(exporter) log.Println("Starting Server:", *listenAddress) http.Handle(*metricsPath, prometheus.Handler()) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(`<html> <head><title>JSON Exporter</title></head> <body> <h1>JSON Exporter</h1> <p><a href='` + *metricsPath + `'>Metrics</a></p> </body> </html>`)) }) log.Fatal(http.ListenAndServe(*listenAddress, nil)) }
func Main() error { messageTimeout := flag.Duration("message_timeout", 2*time.Minute, "timeout for one message to be proxied") clientIdleTimeout := flag.Duration("client_idle_timeout", 60*time.Minute, "idle timeout for client connections") getLastErrorTimeout := flag.Duration("get_last_error_timeout", time.Minute, "timeout for getLastError pinning") maxConnections := flag.Uint("max_connections", 100, "maximum number of connections per mongo") portStart := flag.Int("port_start", 6000, "start of port range") portEnd := flag.Int("port_end", 6010, "end of port range") addrs := flag.String("addrs", "localhost:27017", "comma separated list of mongo addresses") flag.Parse() replicaSet := dvara.ReplicaSet{ Addrs: *addrs, PortStart: *portStart, PortEnd: *portEnd, MessageTimeout: *messageTimeout, ClientIdleTimeout: *clientIdleTimeout, GetLastErrorTimeout: *getLastErrorTimeout, MaxConnections: *maxConnections, } var statsClient stats.HookClient var log stdLogger var graph inject.Graph err := graph.Provide( &inject.Object{Value: &log}, &inject.Object{Value: &replicaSet}, &inject.Object{Value: &statsClient}, ) if err != nil { return err } if err := graph.Populate(); err != nil { return err } objects := graph.Objects() // Temporarily setup the metrics against a test registry. gregistry := gangliamr.NewTestRegistry() for _, o := range objects { if rmO, ok := o.Value.(registerMetrics); ok { rmO.RegisterMetrics(gregistry) } } if err := startstop.Start(objects, &log); err != nil { return err } defer startstop.Stop(objects, &log) ch := make(chan os.Signal, 2) signal.Notify(ch, syscall.SIGTERM, syscall.SIGINT) <-ch signal.Stop(ch) return nil }
func Main() error { messageTimeout := flag.Duration("message_timeout", 2*time.Minute, "timeout for one message to be proxied") clientIdleTimeout := flag.Duration("client_idle_timeout", 60*time.Minute, "idle timeout for client connections") serverIdleTimeout := flag.Duration("server_idle_timeout", 1*time.Hour, "idle timeout for server connections") serverClosePoolSize := flag.Uint("server_close_pool_size", 100, "number of goroutines that will handle closing server connections") getLastErrorTimeout := flag.Duration("get_last_error_timeout", time.Minute, "timeout for getLastError pinning") maxPerClientConnections := flag.Uint("max_per_client_connections", 100, "maximum number of connections per client") maxConnections := flag.Uint("max_connections", 100, "maximum number of connections per mongo") portStart := flag.Int("port_start", 6000, "start of port range") portEnd := flag.Int("port_end", 6010, "end of port range") addrs := flag.String("addrs", "localhost:27017", "comma separated list of mongo addresses") flag.Parse() replicaSet := dvara.ReplicaSet{ Addrs: *addrs, PortStart: *portStart, PortEnd: *portEnd, MessageTimeout: *messageTimeout, ClientIdleTimeout: *clientIdleTimeout, ServerIdleTimeout: *serverIdleTimeout, ServerClosePoolSize: *serverClosePoolSize, GetLastErrorTimeout: *getLastErrorTimeout, MaxConnections: *maxConnections, MaxPerClientConnections: *maxPerClientConnections, } var statsClient stats.HookClient var log stdLogger var graph inject.Graph err := graph.Provide( &inject.Object{Value: &log}, &inject.Object{Value: &replicaSet}, &inject.Object{Value: &statsClient}, ) if err != nil { return err } if err := graph.Populate(); err != nil { return err } objects := graph.Objects() if err := startstop.Start(objects, &log); err != nil { return err } defer startstop.Stop(objects, &log) ch := make(chan os.Signal, 2) signal.Notify(ch, syscall.SIGTERM, syscall.SIGINT) <-ch signal.Stop(ch) return nil }
func main() { address := flag.String("address", "0.0.0.0", "IP address to listen on") ports := flag.String("ports", "", "Comma-delimited list of host=container ports to listen on") docker := flag.String("docker", "unix:///var/run/docker.sock", "URL of the Docker host") tag := flag.String("tag", "", "Tag of docker images to watch") statusURL := flag.String("status_url", "", "Optional HTTP status URL of docker container, e.g. :80/status") statusTimeout := flag.Duration("status_timeout", 10*time.Second, "Time to wait for a new container to respond to a status query") gracePeriod := flag.Duration("grace_period", 10*time.Second, "Time to wait before killing an old container") flag.Parse() if *ports == "" { fmt.Fprintf(os.Stderr, "Must specify one or more port mappings.\n") flag.Usage() os.Exit(1) } if *tag == "" { fmt.Fprintf(os.Stderr, "Must specify a docker tag.\n") flag.Usage() os.Exit(1) } // Create the proxy server and begin listening in the background. server, err := NewProxyServer(*address, *ports) if err != nil { fmt.Fprintf(os.Stderr, "Could not listen on %s: %s\n", *address, err.Error()) os.Exit(1) } server.Start() defer server.Stop() dc, err := NewDockerClient(*docker, *tag, server) if err != nil { fmt.Fprintf(os.Stderr, "Could not connect to docker on %s: %s\n", *docker, err.Error()) os.Exit(1) } if *statusURL != "" { dc.SetStatusInfo(*statusURL, *statusTimeout) } dc.SetGracePeriod(*gracePeriod) // Try and proxy to anything currently running. if err := dc.DetectExistingContainers(); err != nil { fmt.Fprintf(os.Stderr, "Could not query containers: %s\n", err.Error()) os.Exit(1) } fmt.Fprintf(os.Stdout, "Listening...\n") dc.Listen() }
func main() { var ( listen = flag.String("listen", ":7800", "Server listen address.") name = flag.String("test.name", "unknown", "Name of the test to run.") path = flag.String("test.path", "/", "Path to hit on the targets") rate = flag.Uint64("test.rate", defaultRate, "Number of requests to send during test duration.") timeout = flag.Duration("test.timeout", defaultTimeout, "Time until a request is discarded") ts = targets{} ) flag.Var(&ts, "test.target", `Target to hit by the test with the following format: -test.target="NAME:address/url"`) flag.Parse() if *listen == "" || len(ts) == 0 { flag.Usage() os.Exit(1) } var ( test = newTest(*name, *path, *rate, defaultInterval, *timeout, ts) registry = newRegistry(prometheus.Labels{"test": test.name}) resultc = make(chan result) ) test.run(resultc) go registry.collect(resultc) http.Handle("/metrics", prometheus.Handler()) log.Printf("Starting server on %s", *listen) log.Fatal(http.ListenAndServe(*listen, nil)) }
func main() { uuid := flag.String("uuid", "1BEAC099-BEAC-BEAC-BEAC-BEAC09BEAC09", "iBeacon UUID") major := flag.Int("major", 0, "iBeacon major value (uint16)") minor := flag.Int("minor", 0, "iBeacon minor value (uint16)") power := flag.Int("power", -57, "iBeacon measured power (int8)") d := flag.Duration("duration", 1*time.Minute, "advertising duration") verbose := flag.Bool("verbose", false, "dump all events") flag.Parse() ble := goble.New() ble.SetVerbose(*verbose) ble.Init() var utsname xpc.Utsname xpc.Uname(&utsname) log.Println(utsname.Release) time.Sleep(1 * time.Second) log.Println("Start Advertising", *uuid, *major, *minor, *power) ble.StartAdvertisingIBeacon(xpc.MustUUID(*uuid), uint16(*major), uint16(*minor), int8(*power)) time.Sleep(*d) log.Println("Stop Advertising") ble.StopAdvertising() }
func main() { var ( publish = flag.String("publish", fmt.Sprintf("localhost:%d", xfer.AppPort), "publish target") publishInterval = flag.Duration("publish.interval", 1*time.Second, "publish (output) interval") ) flag.Parse() if len(flag.Args()) != 1 { log.Fatal("usage: fixprobe [--args] report.json") } f, err := os.Open(flag.Arg(0)) if err != nil { log.Fatal(err) } var fixedReport report.Report if err := json.NewDecoder(f).Decode(&fixedReport); err != nil { log.Fatal(err) } f.Close() publisher, err := xfer.NewHTTPPublisher(*publish, "fixprobe", "fixprobe") if err != nil { log.Fatal(err) } for range time.Tick(*publishInterval) { publisher.Publish(fixedReport) } }
// Main runs the app func appMain() { var ( window = flag.Duration("window", 15*time.Second, "window") listen = flag.String("http.address", ":"+strconv.Itoa(xfer.AppPort), "webserver listen address") logPrefix = flag.String("log.prefix", "<app>", "prefix for each log line") ) flag.Parse() if !strings.HasSuffix(*logPrefix, " ") { *logPrefix += " " } log.SetPrefix(*logPrefix) defer log.Print("app exiting") rand.Seed(time.Now().UnixNano()) app.UniqueID = strconv.FormatInt(rand.Int63(), 16) app.Version = version log.Printf("app starting, version %s, ID %s", app.Version, app.UniqueID) handler := router(app.NewCollector(*window)) go func() { log.Printf("listening on %s", *listen) log.Print(http.ListenAndServe(*listen, handler)) }() common.SignalHandlerLoop() }
func main() { var ( version = flag.Bool("version", false, "print version number and exit") publishInterval = flag.Duration("publish.interval", 1*time.Second, "publish (output) interval") listen = flag.String("listen", ":"+strconv.Itoa(xfer.ProbePort), "listen address") hostCount = flag.Int("hostcount", 10, "Number of demo hosts to generate") ) flag.Parse() if len(flag.Args()) != 0 { flag.Usage() os.Exit(1) } // -version flag: if *version { fmt.Printf("unstable\n") return } publisher, err := xfer.NewTCPPublisher(*listen) if err != nil { log.Fatal(err) } defer publisher.Close() go func() { for { publisher.Publish(DemoReport(*hostCount)) time.Sleep(*publishInterval) } }() log.Printf("%s", <-interrupt()) log.Printf("Shutting down...") }
func main() { var ( window = flag.Duration("window", 15*time.Second, "window") listen = flag.String("http.address", ":"+strconv.Itoa(xfer.AppPort), "webserver listen address") printVersion = flag.Bool("version", false, "print version number and exit") ) flag.Parse() if *printVersion { fmt.Println(version) return } rand.Seed(time.Now().UnixNano()) id := strconv.FormatInt(rand.Int63(), 16) log.Printf("app starting, version %s, ID %s", version, id) c := xfer.NewCollector(*window) http.Handle("/", Router(c)) irq := interrupt() go func() { log.Printf("listening on %s", *listen) log.Print(http.ListenAndServe(*listen, nil)) irq <- syscall.SIGINT }() <-irq log.Printf("shutting down") }
func main() { hndl := log15.CallerFileHandler(log15.StderrHandler) Log.SetHandler(hndl) flagVerbose := flag.Bool("v", false, "verbose logging") flagURL := flag.String("url", "http://nav.gov.hu/nav/adatbazisok/adatbleker/afaalanyok/afaalanyok_csoportos", "starting URL") flagBatchSize := flag.Int("batch.size", nav.DefaultBatchSize, "batch size") flagTimeout := flag.Duration("timeout", 5*time.Minute, "timeout duration") flag.Parse() if !*flagVerbose { hndl = log15.LvlFilterHandler(log15.LvlInfo, log15.StderrHandler) } Log.SetHandler(hndl) nav.Log.SetHandler(hndl) var wg sync.WaitGroup results := make(chan []nav.Result, runtime.NumCPU()) wg.Add(1) go func() { defer wg.Done() for result := range results { for _, res := range result { txt := fmt.Sprintf("%q", res.Owner) if !res.Valid { txt = "INVALID" } fmt.Fprintf(os.Stdout, "%s;%s\n", res.TaxNo, txt) } } }() ep := &nav.Endpoint{URL: *flagURL, BatchSize: *flagBatchSize} logger := Log var err error ctx := context.Background() if *flagTimeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, *flagTimeout) defer cancel() } if flag.NArg() > 0 { logger = logger.New("name", "Get", "args", flag.Args()) logger.Info("call") var result []nav.Result logger.Info("Start") result, err = ep.Get(ctx, flag.Args()) if result != nil { results <- result } close(results) } else { logger = logger.New("name", "GetFromReader") logger.Info("Start") err = ep.GetFromReader(ctx, results, os.Stdin) } wg.Wait() if err != nil { logger.Error("get", "error", err) os.Exit(2) } }
func main() { defaultDuration, _ := time.ParseDuration("1000ms") outputFile := flag.String("o", "out.txt", "the output file") nbThreads := flag.Int("t", 5, "the number of threads") duration := flag.Duration("d", defaultDuration, "the duration") flag.Parse() var hashers []*hash.Hash = make([]*hash.Hash, *nbThreads) var bv = []byte("chboing") loop := func(idx int) { (*hashers[idx]).Write(bv) } init := func(idx int) { t := sha256.New() hashers[idx] = &t } fmt.Printf("SHA256 bench: CPU Multi(%d) - duration %v\n", *nbThreads, *duration) nb := launchWorkers(*nbThreads, *duration, init, loop) fmt.Printf("Total computed: %d \n", nb) writeLines([]string{fmt.Sprintf("%d", nb)}, *outputFile) }
func main() { var ( publish = flag.String("publish", fmt.Sprintf("localhost:%d", xfer.AppPort), "publish target") publishInterval = flag.Duration("publish.interval", 1*time.Second, "publish (output) interval") hostCount = flag.Int("hostcount", 10, "Number of demo hosts to generate") ) flag.Parse() client, err := appclient.NewAppClient(appclient.ProbeConfig{ Token: "demoprobe", ProbeID: "demoprobe", Insecure: false, }, *publish, *publish, nil) if err != nil { log.Fatal(err) } rp := appclient.NewReportPublisher(client) rand.Seed(time.Now().UnixNano()) for range time.Tick(*publishInterval) { if err := rp.Publish(demoReport(*hostCount)); err != nil { log.Print(err) } } }
func main() { var ( window = flag.Duration("window", 15*time.Second, "window") listen = flag.String("http.address", ":"+strconv.Itoa(xfer.AppPort), "webserver listen address") logPrefix = flag.String("log.prefix", "<app>", "prefix for each log line") printVersion = flag.Bool("version", false, "print version number and exit") ) flag.Parse() if *printVersion { fmt.Println(version) return } if !strings.HasSuffix(*logPrefix, " ") { *logPrefix += " " } log.SetPrefix(*logPrefix) defer log.Print("app exiting") rand.Seed(time.Now().UnixNano()) uniqueID = strconv.FormatInt(rand.Int63(), 16) log.Printf("app starting, version %s, ID %s", version, uniqueID) c := NewCollector(*window) http.Handle("/", Router(c)) go func() { log.Printf("listening on %s", *listen) log.Print(http.ListenAndServe(*listen, nil)) }() log.Printf("%s", <-interrupt()) }
func main() { keyname := flag.String("keyname", "hosts", "Etcd keyname under which to record containers' hostnames/IP") ttl := flag.Uint64("ttl", 172800, "Time to live of the host entry") dockerAPIPort := flag.String("port", "4243", "Docker API Port") interval := flag.Duration("interval", 10, "Docker API to Etcd sync interval") //etcdHost := flag.String("etcd_host", "127.0.0.1", "Etcd host") //etcdPort := flag.String("etcd_port", "4001", "Etcd port") concurrency := flag.Int("concurrency", 1, "Number of worker threads") flag.Parse() //etcdCluster := []string{"http://" + *etcdHost + ":" + *etcdPort} etcdClient := etcd.NewClient() //etcdClient.SetCluster(etcdCluster) dockerClient, err := docker.NewClient("http://127.0.0.1:" + *dockerAPIPort) if err != nil { log.Fatal(err) } var c = make(chan string, *concurrency) for i := 0; i < *concurrency; i++ { go inspectAndSet(c, etcdClient, dockerClient, keyname, ttl) } loop(c, dockerClient, interval) }
func main() { var period = flag.Duration("period", 1*time.Second, "input the period seconds to sleep") flag.Parse() fmt.Printf("Sleep for : %v...", *period) time.Sleep(*period) fmt.Println("Done") }
func main() { ignore := stringslice([]string{}) port := flag.Int("p", 3000, "port number to listen on") dir := flag.String("d", ".", "directory to serve") timeout := flag.Duration("t", time.Second*5, "timeout in milliseconds") flag.Var(&ignore, "i", "ignore errors matching this string (multiple allowed)") flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [options] -- [php options]\n", os.Args[0]) flag.PrintDefaults() } flag.Parse() phpHandler, err := greyhound.NewPhpHandler(*dir, *timeout, flag.Args(), ignore) defer phpHandler.Close() if err != nil { log.Fatalln(err) } fallbackHandler := greyhound.NewFallbackHandler(*dir, ".php", phpHandler) http.Handle("/", fallbackHandler) fmt.Printf("Listening on :%d\n", *port) log.Fatalln(http.ListenAndServe(fmt.Sprintf(":%d", *port), nil)) }
func main() { var ( listenAddress = flag.String("web.listen-address", ":9108", "Address to listen on for web interface and telemetry.") metricsPath = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.") esURI = flag.String("es.uri", "http://localhost:9200", "HTTP API address of an Elasticsearch node.") esTimeout = flag.Duration("es.timeout", 5*time.Second, "Timeout for trying to get stats from Elasticsearch.") esAllNodes = flag.Bool("es.all", false, "Export stats for all nodes in the cluster.") ) flag.Parse() exporter := NewExporter(*esURI, *esTimeout, *esAllNodes) prometheus.MustRegister(exporter) log.Println("Starting Server:", *listenAddress) http.Handle(*metricsPath, prometheus.Handler()) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(`<html> <head><title>Elasticsearch Exporter</title></head> <body> <h1>Elasticsearch Exporter</h1> <p><a href='` + *metricsPath + `'>Metrics</a></p> </body> </html>`)) }) log.Fatal(http.ListenAndServe(*listenAddress, nil)) }
func main() { var ( device = flag.String("device", "en0", "device to listen to") timeout = flag.Duration("timeout", pcap.BlockForever, "time intervals to parse captured packets") ) flag.Parse() const ( MTULen = 1500 promiscuousMode = true ) handle, err := pcap.OpenLive(*device, MTULen, promiscuousMode, *timeout) // handle, err := pcap.OpenOffline("./dump_one.pcap") if err != nil { log.Fatalln(err) } packetSource := gopacket.NewPacketSource(handle, handle.LinkType()) go countBytes(goingChan) go func() { for packet := range packetSource.Packets() { go handlePacket(packet, goingChan) } }() http.HandleFunc("/count", showClientMap) http.Handle("/", http.FileServer(http.Dir("./"))) if err := http.ListenAndServe(":8080", nil); err != nil { log.Fatalln(err) } }
func main() { var ( publish = flag.String("publish", fmt.Sprintf("localhost:%d", xfer.AppPort), "publish target") publishInterval = flag.Duration("publish.interval", 1*time.Second, "publish (output) interval") ) flag.Parse() if len(flag.Args()) != 1 { log.Fatal("usage: fixprobe [--args] report.json") } f, err := os.Open(flag.Arg(0)) if err != nil { log.Fatal(err) } var fixedReport report.Report if err := json.NewDecoder(f).Decode(&fixedReport); err != nil { log.Fatal(err) } f.Close() client, err := appclient.NewAppClient(appclient.ProbeConfig{ Token: "fixprobe", ProbeID: "fixprobe", Insecure: false, }, *publish, *publish, nil) if err != nil { log.Fatal(err) } rp := appclient.NewReportPublisher(client) for range time.Tick(*publishInterval) { rp.Publish(fixedReport) } }
func LoadConfig() Config { var ( mongo_user = flag.String("mongo_user", "", "MongoDB User") mongo_pass = flag.String("mongo_pass", "", "MongoDB Password") statsd_host = flag.String("statsd_host", "localhost", "StatsD Host") statsd_port = flag.Int("statsd_port", 8125, "StatsD Port") statsd_env = flag.String("statsd_env", "dev", "StatsD metric environment prefix") statsd_cluster = flag.String("statsd_cluster", "0", "StatsD metric cluster prefix") interval = flag.Duration("interval", 5*time.Second, "Polling interval") ) flag.Var(&mongo_addresses, "mongo_address", "List of mongo addresses in host:port format") iniflags.Parse() if len(mongo_addresses) == 0 { mongo_addresses = append(mongo_addresses, "localhost:27017") } cfg := Config{ Interval: *interval, Mongo: Mongo{ Addresses: mongo_addresses, User: *mongo_user, Pass: *mongo_pass, }, Statsd: Statsd{ Host: *statsd_host, Port: *statsd_port, Env: *statsd_env, Cluster: *statsd_cluster, }, } return cfg }