func main() { // Parse input flags var socket *string = flag.String("socket", "", "Unix domain socket to connect to") var num_clients *int = flag.Int("num_clients", 100, "Number of clients to launch") var concurrency *int = flag.Int("concurrency", 10, "Number of clients running concurrently") var num_cpu *int = flag.Int("cpu_use", 2, "Number of CPUs to use") flag.Parse() // Set thew number of CPUs to use runtime.GOMAXPROCS(*num_cpu) // Create a chan to comunicate with the processes // This channel will imitate a semaphore p := 0 sem := make(chan bool, *concurrency) // Place the initial values in the semaphore for i := 0; i < *concurrency; i++ { sem <- true } // Run the test clients for <-sem; p < *num_clients; _, p = <-sem, p+1 { go createTestClient(*socket, "unix", sem) } // Wait for the remaining processes to finish for j := 0; j < *concurrency-1; _, j = <-sem, j+1 { } fmt.Println(p, "processes finished") }
func main() { var statsdHost = flag.String("statsd_host", "127.0.0.1", "Statsd host") var statsdPort = flag.Int("statsd_port", 8125, "Statsd host") var nodename = flag.String("nodename", "riak", "Riak node name") var riakHost = flag.String("riak_host", "127.0.0.1", "Riak host") var riakHttpPort = flag.Int("riak_http_port", 8098, "Riak HTTP port") flag.Parse() // First ping to node to make sure it works err := pingRiak(*riakHost, *riakHttpPort) if err != nil { log.Fatalf("Error: %v", err) os.Exit(1) } statsd := fmt.Sprintf("%s:%d", *statsdHost, *statsdPort) addr, err := net.ResolveUDPAddr("udp", statsd) if err != nil { log.Fatalf("Couldn't resolve UDP addr: %v", err) os.Exit(1) } conn, err := net.DialUDP("udp", nil, addr) if err != nil { log.Fatalf("Couldn't connect to statsd at %s", statsd) os.Exit(1) } // every 60s run hit the stats endpoint and then send to statsd interval := time.NewTicker(time.Second * 60) for _ = range interval.C { go getAndSendRiakMetrics(conn, *nodename, *riakHost, *riakHttpPort) } }
func main() { endpoint := flag.String("endpoint", "http://127.0.0.1:4001", "etcd HTTP endpoint") rWrites := flag.Int("write-requests", 50000, "number of writes") cWrites := flag.Int("concurrent-writes", 500, "number of concurrent writes") watches := flag.Int("watches", 500, "number of writes") flag.Parse() for i := 0; i < *watches; i++ { key := strconv.Itoa(i) go watch(*endpoint, key) } wChan := make(chan int, *cWrites) for i := 0; i < *cWrites; i++ { go write(*endpoint, (*rWrites / *cWrites), wChan) } for i := 0; i < *cWrites; i++ { <-wChan log.Printf("Completed %d writes", (*rWrites / *cWrites)) } }
func main() { numCPU := runtime.NumCPU() runtime.GOMAXPROCS(numCPU) obliteration := flag.Int("concurrency", 10, "threads and connections to use for load generation") host := flag.String("zk", "master.mesos:2181", "host:port for zk") size := flag.Int("size", 1024, "bytes per key written") ratio := flag.Float64("ratio", 0.2, "0 to 1 ratio of reads to writes. 0 is all writes, 1 is all reads.") flag.Parse() value := gen(*size) conns := []*zk.Conn{} for i := 0; i < *obliteration; i++ { cli, _, err := zk.Connect([]string{*host}, 5*time.Second) if err != nil { fmt.Printf("error connecting to zk: %v\n", err) os.Exit(1) } conns = append(conns, cli) } doRpc := func() { cli := conns[rand.Intn(len(conns))] bench(cli, value, *ratio) } loghisto.PrintBenchmark("benchmark1234", uint(*obliteration), doRpc) }
func main() { uuid := flag.String("uuid", "1BEAC099-BEAC-BEAC-BEAC-BEAC09BEAC09", "iBeacon UUID") major := flag.Int("major", 0, "iBeacon major value (uint16)") minor := flag.Int("minor", 0, "iBeacon minor value (uint16)") power := flag.Int("power", -57, "iBeacon measured power (int8)") d := flag.Duration("duration", 1*time.Minute, "advertising duration") verbose := flag.Bool("verbose", false, "dump all events") flag.Parse() ble := goble.New() ble.SetVerbose(*verbose) ble.Init() var utsname xpc.Utsname xpc.Uname(&utsname) log.Println(utsname.Release) time.Sleep(1 * time.Second) log.Println("Start Advertising", *uuid, *major, *minor, *power) ble.StartAdvertisingIBeacon(xpc.MustUUID(*uuid), uint16(*major), uint16(*minor), int8(*power)) time.Sleep(*d) log.Println("Stop Advertising") ble.StopAdvertising() }
func main() { var data = flag.String("data", "", "The data directory where WOF data lives, required") var cache_size = flag.Int("cache_size", 1024, "The number of WOF records with large geometries to cache") var cache_trigger = flag.Int("cache_trigger", 2000, "The minimum number of coordinates in a WOF record that will trigger caching") var lat = flag.Float64("latitude", 0.0, "") var lon = flag.Float64("longitude", 0.0, "") var loglevel = flag.String("loglevel", "info", "...") flag.Parse() logger := log.NewWOFLogger("[wof-breaches] ") logger.AddLogger(os.Stdout, *loglevel) idx, _ := rtree.NewIndex(*data, *cache_size, *cache_trigger, logger) for _, path := range flag.Args() { logger.Info("indexing %s", path) idx.IndexGeoJSONFile(path) } results := idx.GetIntersectsByLatLon(*lat, *lon) inflated := idx.InflateSpatialResults(results) for _, r := range inflated { logger.Info("%v", r) } }
func main() { addr := flag.String("addr", "", "host:port of slime-proxy to check") uuidFlag := flag.String("uuid", "", "uuid of metadata store to verify") warnUnconnected := flag.Int("warn", 0, "WARN if number of undead unconnected stores exceeds this number") critUnconnected := flag.Int("crit", 0, "CRIT if number of undead unconnected stores exceeds this number") flag.Parse() if *addr == "" || *uuidFlag == "" || *warnUnconnected == 0 || *critUnconnected == 0 { dieUnknown("Incorrect invocation, all arguments are required.\n" + "Run with -h for usage.") } wantID, err := uuid.Parse(*uuidFlag) if err != nil { dieUnknown("Couldn't parse uuid given: %v", err) } checkUUID(*addr, wantID) checkConnectivity(*addr, *warnUnconnected, *critUnconnected) os.Exit(0) }
func main() { create := flag.Bool("create", false, "create shares from a secret") minimum := flag.Int("minimum", 3, "minimum shares required to recreate secret") shares := flag.Int("shares", 4, "total shares to create (shares >= minimum)") secret := flag.String("secret", "Hello, World!", "secret to share") combine := flag.Bool("combine", false, "combines shares into a secret") raw := flag.String("secrets", "", "comma separated list of shared secrets") flag.Parse() if *create == *combine { flag.Usage() } else if *create { if *minimum > *shares { flag.Usage() } else { values := sssa.Create(*minimum, *shares, *secret) for i := range values { fmt.Println(values[i]) } } } else { if *raw == "" { flag.Usage() } else { secrets := strings.Split(*raw, ",") value := sssa.Combine(secrets) fmt.Println("Secret: ", value) } } }
func main() { port := flag.Int("port", 8125, "Port to bind to") flushPeriod := flag.Int("flush", 5, "Seconds between flushes") flag.Parse() log.Printf("Statsd binding to %v.\n", *port) startServer(int64(*port), *flushPeriod) }
func main() { // initialize rand seed rand.Seed(time.Now().UnixNano()) // parse arguments period := flag.Int("p", 25, "Period: length (lines) of wave") amp := flag.Int("a", 25, "Amplitude: width (chars) of wave") num := flag.Int("n", 2, "Number of waves") freq := flag.Int("f", 40, "Frequency of waves (Hz)") colorize := flag.String("color", "lgreen", "Output color (default for none)") character := flag.String("char", "*", "Output character") flag.Parse() // map color names to color codes colors := map[string]string{ "red": "31", "green": "32", "yellow": "33", "blue": "34", "magenta": "35", "cyan": "36", "lred": "91", "lgreen": "92", "lcyan": "96", "white": "97", "default": "39", } // color themes color_themes := map[string][]string{ "christmas": []string{"lred", "lgreen", "white"}, } sigs := make([]chan string, *num) // start up the waves for i, _ := range sigs { color := colors["default"] if _, ok := colors[*colorize]; ok { // a default color color = colors[*colorize] } else if _, ok := color_themes[*colorize]; ok { // iterate through theme's colors color = colors[color_themes[*colorize][i%len(color_themes[*colorize])]] } sigs[i] = make(chan string) go Wave(float64(*period), float64(*amp), 0, sigs[i], color, *character) } // sync up the waves at some frequency in Hz go func(frequency int, sigs []chan string) { for { line := make([]string, len(sigs)) for i, s := range sigs { line[i] = <-s } // join up the parts and put a newline at the end fmt.Printf("%s\n", strings.Join(line, "")) time.Sleep(time.Second / time.Duration(frequency)) } }(*freq, sigs) // press any key + enter to exit var exit string fmt.Scan(&exit) }
func main() { host := flag.String("host", "api.tsinghua.io", "Host of the server") port := flag.Int("port", 443, "Port of the server") certFile := flag.String("cert", "", "Certificate file.") keyFile := flag.String("key", "", "key file.") windowMin := flag.Int64("window", 15, "Window size of the rate limit (in minutes).") rate := flag.Int("rate", 900, "Max requests per window per IP.") flag.Parse() api := api.New( handlers.CompressHandler, util.HeadersHandler, util.NewLimiter(*windowMin*60, *rate).Handler(), ) api.AddResource("/semester", resource.Semester) api.AddResource("/users/me", resource.Profile) api.AddResource("/users/me/attended", resource.Attended) api.AddResource("/courses/{id}/announcements", resource.CourseAnnouncements) api.AddResource("/courses/{id}/files", resource.CourseFiles) api.AddResource("/courses/{id}/assignments", resource.CourseAssignments) api.AddResource("/courses/{id}/materials", resource.CourseMaterials) addr := *host + ":" + strconv.Itoa(*port) glog.Infof("windowMin = %d, rate = %d", *windowMin, *rate) glog.Infof("Starting server on %s", addr) err := http.ListenAndServeTLS(addr, *certFile, *keyFile, api) glog.Fatalf("Shutting down: %s", err) }
func main() { // Set up flags. wFlag := flag.Int("w", 640, "width of the image in pixels") hFlag := flag.Int("h", 480, "height of the image in pixels") xMinFlag := flag.Float64("x-min", -2.0, "minimum x value to plot") xMaxFlag := flag.Float64("x-max", 2.0, "maximum x value to plot") bailoutFlag := flag.Int("bailout", 100, "maximum iteration bailout") // Handle the flags. flag.Parse() // Main logic. acc := newAccumulator(*wFlag, *hFlag, *xMinFlag, *xMaxFlag) const numSamples = 1e8 for acc.getCount() < numSamples { log.Print(acc.getCount()) accumulateSeqence(*bailoutFlag, acc) } img := acc.toImage() f, err := os.Create("out.png") if err != nil { log.Fatal(err) } if err := png.Encode(f, img); err != nil { log.Fatal(err) } }
func main() { pr := flag.Float64("pr", 0.9, "non empty response probability") portPtr := flag.Int("p", 7040, "port to start http server") markupFilePath := flag.String("mf", "markup.html", "path to markup file") respdelay := flag.Int("d", 0, "response delay to imitate network latency") showVersion := flag.Bool("version", false, "print version string") flag.Parse() if *showVersion { fmt.Println(utils.Version("mock-bidder")) return } markup_bytes, err := ioutil.ReadFile(*markupFilePath) if nil != err { log.Fatal(err.Error()) } markup := string(markup_bytes[:]) h := http_handlers.RequestsHandler{Delay: *respdelay, Probability: float32(*pr), Markup: markup} http.HandleFunc("/auctions", func(w http.ResponseWriter, r *http.Request) { h.HandleResponse(w, r) }) log.Println("INFO Starting server on 0.0.0.0:" + strconv.Itoa(*portPtr)) http.ListenAndServe(":"+strconv.Itoa(*portPtr), nil) }
// CreateConfig creates server configuration base on application command line arguments func CreateConfig() *ServerConfig { var port = flag.Int("p", DEFAULT_PORT, "HTTP service port") var initCapacity = flag.Int("size", INIT_BASKET_CAPACITY, "Initial basket size (capacity)") var maxCapacity = flag.Int("maxsize", MAX_BASKET_CAPACITY, "Maximum allowed basket size (max capacity)") var pageSize = flag.Int("page", DEFAULT_PAGE_SIZE, "Default page size") var masterToken = flag.String("token", "", "Master token, random token is generated if not provided") var dbType = flag.String("db", DEFAULT_DB_TYPE, fmt.Sprintf( "Baskets storage type: %s - in-memory, %s - Bolt DB", DB_TYPE_MEM, DB_TYPE_BOLT)) var dbFile = flag.String("file", "./baskets.db", "Database location, only applicable for file databases") flag.Parse() var token = *masterToken if len(token) == 0 { token, _ = GenerateToken() log.Printf("[info] generated master token: %s", token) } return &ServerConfig{ ServerPort: *port, InitCapacity: *initCapacity, MaxCapacity: *maxCapacity, PageSize: *pageSize, MasterToken: token, DbType: *dbType, DbFile: *dbFile} }
// Init handles parsing command line flags func Init() { // Flags SearchFlag = flag.String("s", "", "Search for TypeIDs") InfoFlag = flag.String("i", "", "Get info with a TypeID, typeName or mDisplayName") VerboseInfo = flag.Bool("vi", false, "Prints all attributes when used with -i") LicenseFlag = flag.Bool("l", false, "Prints license information.") VersionFlag = flag.Bool("v", false, "Prints the SDETool version") SlowFlag = flag.Bool("slow", false, "Forces the use of unoptimized functions") TimeExecution = flag.Bool("time", false, "Times the execution of functions that may take a decent amount of time") Clean = flag.Bool("clean", false, "Cleans all database and cache files") DumpTypes = flag.Bool("dump", false, "Dumps all types to a file for use with the category package") ApplyModule = flag.String("m", "", "Used with -i to apply a module to a dropsuit") GetMarketData = flag.Bool("market", false, "Gets market data on item, used with -i. Sorry CCP if I'm pounding your APIs ;P") Debug = flag.Bool("debug", false, "Debug? Debug!") ForcePanic = flag.Bool("fp", false, "Forces a panic, debug uses") Quiet = flag.Bool("quiet", false, "Used with flags like uninstall where you want it to produce no output, ask for input or block in any sort of way") Uninstall = flag.Bool("uninstall", false, "Uninstalls SDETool if install via makefile or manually in your PATH variable") NoColor = flag.Bool("nocolor", false, "Used to disable color. Usefull for >'ing and |'ing") NoSkills = flag.Bool("ns", false, "Used to prevent SDETool from applying skill bonuses") // Damage and mod counts Damage = flag.String("d", "", "Get damage calculations, takes a TypeID") SDEVersion = flag.String("sv", "1.8", "Version of the SDE to use, in the form of '1.7' or '1.8'") ComplexModCount = flag.Int("c", 0, "Amount of complex damage mods, used with -d") EnhancedModCount = flag.Int("e", 0, "Amount of enhanced damage mods, used with -d") BasicModCount = flag.Int("b", 0, "Amount of enhanced damage mods, used with -d") Prof = flag.Int("p", 0, "Prof level, used with -d") // Server related RunServer = flag.Bool("server", false, "Runs a server for hosting the web version of SDETool") Port = flag.Int("port", 80, "Port used for -server") flag.Parse() }
func main() { concurrent := flag.Int("C", 10, "concurrent connections") flag.IntVar(&conf.messages, "M", 100, "messages per connection") connections := flag.Int("N", 10000, "total number of connections") flag.Parse() conf.addr = flag.Arg(0) if conf.addr == "" { log.Fatal("Error: supply the websocket URL as a non-flag argument") } c := make(chan int) var wg sync.WaitGroup // if anybody encounters an error set this to 1 var ret int32 for i := 0; i < *concurrent; i++ { wg.Add(1) go func(id int) { defer wg.Done() err := worker(c) if err != nil { // Don't kill the entire program just the worker log.Println("Worker", id, "died:", err) atomic.StoreInt32(&ret, 1) } }(i + 1) } go func() { for i := 0; i < *connections; i++ { c <- i } close(c) }() wg.Wait() os.Exit(int(atomic.LoadInt32(&ret))) }
func main() { endpoint := flag.String("endpoint", "http://localhost:9102/stats", "Stats http endpoint") frequency := flag.Int("frequency", 1, "Sampling frequency in seconds") duration := flag.Int("seconds", 30, "Collection duration") file := flag.String("file", "out.stats", "Output file") flag.Parse() fd, err := os.OpenFile(*file, os.O_WRONLY|os.O_CREATE, 0777) if err != nil { panic(err) } count := *duration / *frequency fd.Write([]byte(fmt.Sprintf("Count:%d Frequency:%d\n", count, *frequency))) for i := 0; i < count; i++ { resp, err := http.Get(*endpoint) if err != nil { panic(err) } body, _ := ioutil.ReadAll(resp.Body) fd.Write([]byte(fmt.Sprintf("Len:%d\n", len(body)+1))) fd.Write(body) fd.Write([]byte("\n")) fmt.Print(".") time.Sleep(time.Second * time.Duration(*frequency)) } fd.Close() }
func main() { threads := flag.Int("threads", 0, "number of parallel threads in a run. If 0, use CPU count.") sleepTime := flag.Float64("sleep", 4.0, "amount of sleep between runs.") runs := flag.Int("runs", 10, "number of runs.") flag.Parse() if *threads == 0 { *threads = fuse.CountCpus() runtime.GOMAXPROCS(*threads) } filename := flag.Args()[0] f, err := os.Open(filename) if err != nil { log.Panicf("Open: %v", err) } reader := bufio.NewReader(f) files := make([]string, 0) for { l, _, err := reader.ReadLine() if err != nil { break } files = append(files, string(l)) } results := fuse.RunBulkStat(*runs, *threads, *sleepTime, files) fuse.AnalyzeBenchmarkRuns(results) }
func main() { port := flag.Int("port", 8080, "port") backends := flag.Int("workers", 3, "number of workers") strategy := flag.String("strategy", "majority", "balancing strategy ['one', 'two', 'majority', 'all']") flag.Parse() cfg := profile.Config{ CPUProfile: true, MemProfile: true, ProfilePath: ".", } p := profile.Start(&cfg) defer p.Stop() balancer := newBalancer(backends, strategy) a := gin.Default() a.GET("/", func(c *gin.Context) { timeouted := make(chan bool) result := processFirstResponse(timeouted, balancer) select { case data := <-result: c.JSON(200, data) case <-time.After(globalTimeout): c.JSON(500, nil) timeouted <- true } }) a.Run(fmt.Sprintf(":%d", *port)) }
// 主函数 func main() { tasktypePtr := flag.String("type", "insert", "任务类型[insert, query]") startPtr := flag.Int("start", 1, "任务起始编号(整型)") countPtr := flag.Int("count", 1, "任务个数(整型)") debugPtr := flag.Bool("debug", false, "Debug模式") logfilePtr := flag.String("logfile", "/home/scada/mysqltest.log", "log文件位置") hostPtr := flag.String("host", "127.0.0.1", "host地址") batchPtr := flag.Int("batch", 500, "每批插入记录数") flag.Parse() tasktype := *tasktypePtr start := *startPtr count := *countPtr debug = *debugPtr host := *hostPtr batch := *batchPtr if tasktype != "insert" && tasktype != "query" { fmt.Println("参数[type]值不正确,该参数取值为[insert, query]") return } logfile, err := os.OpenFile(*logfilePtr, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0) if err != nil { fmt.Println("打开日志文件[%s]出错", *logfilePtr) return } defer logfile.Close() log.SetOutput(logfile) multiTask(count, start, tasktype, host, batch) }
// SetConfigOverrides will check the *CLI variables for any values // and override the values in the given config if they are set. // If LogCLI is set to "dev", the given `Log` pointer will be set to an // empty string. func SetConfigOverrides(c *Config) { // HTTPAccessLogCLI is a pointer to the value of the '-http-access-log' command line flag. It is meant to // declare an access log location for HTTP services. HTTPAccessLogCLI := flag.String("http-access-log", "", "HTTP access log location") // RPCAccessLogCLI is a pointer to the value of the '-rpc-access-log' command line flag. It is meant to // declare an acces log location for RPC services. RPCAccessLogCLI := flag.String("rpc-access-log", "", "RPC access log location") // HTTPPortCLI is a pointer to the value for the '-http' flag. It is meant to declare the port // number to serve HTTP services. HTTPPortCLI := flag.Int("http", 0, "Port to run an HTTP server on") // RPCPortCLI is a pointer to the value for the '-rpc' flag. It is meant to declare the port // number to serve RPC services. RPCPortCLI := flag.Int("rpc", 0, "Port to run an RPC server on") config.SetLogOverride(&c.Log) if *HTTPAccessLogCLI != "" { c.HTTPAccessLog = HTTPAccessLogCLI } if *RPCAccessLogCLI != "" { c.RPCAccessLog = RPCAccessLogCLI } if *HTTPPortCLI > 0 { c.HTTPPort = *HTTPPortCLI } if *RPCPortCLI > 0 { c.RPCPort = *RPCPortCLI } }
func main() { sortFuncs := flag.String("sorts", "", "Sorting functions to compare, comma separated") arrayCount := flag.Int("array", 1, "Number of arrays to sort, default 1") elementCount := flag.Int("element", 1000, "Number of random entries for each array, default 1000") timeout := flag.Int("timeout", 60, "Maximum time to run in seconds, default 60s") flag.Parse() funcs := strings.Split(*sortFuncs, ",") for i, f := range funcs { funcs[i] = strings.TrimSpace(f) } var sorts []func(sortable.Interface) for _, f := range funcs { s, err := getSortFunc(f) if err != nil { panic(fmt.Sprintf("Not recognised sort function: %s\n", s)) } sorts = append(sorts, s) } sortArray := generateSortArray(*arrayCount, *elementCount) startsorts(sorts, sortArray, *timeout) }
func main() { targetpath := flag.String("target", "", "Path to target image") dir := flag.String("dir", "", "Directory with tile pictures") outpath := flag.String("outpath", "output.jpg", "Output dir to result image") //Note: Tile pictures must be same size. I.E, tilewidth x tileheight tilewidth := flag.Int("tilewidth", 0, "Width of tile image") tileheight := flag.Int("tileheight", 0, "Height of tile image") flag.Parse() if *targetpath == "" { log.Fatal("Target picture is not selected") } if *dir == "" { log.Fatal("Directory contained tile pictures is not selected") } //First, read and decode image which will be split on tile images targetimg, err := PrepareTargetImage(*targetpath) if err != nil { log.Fatal(err) } //Read from directory which contain tile images for mosaic photos := ReadFromDir(*dir) //associate each region (tilewidth x tileheight) with tile image grid, msg := GetNearestPicturesToRegion(targetimg, photos, *tilewidth, *tileheight) if msg != "" { log.Fatal(msg) } bounds := targetimg.Bounds() //Construct gird and write it to output image ConstructFullImageFromTiles(grid, *outpath, bounds.Max.X, bounds.Max.Y, *tilewidth, *tileheight) }
func init() { // All the defaults mentioned here refer to etcd_load.cfg values. fhelp = flag.Bool("help", false, "shows how to use flags") fhost = flag.String("h", "null", "etcd instance address."+ "Default=127.0.0.1 from config file") fport = flag.String("p", "null", "port on which etcd is running. Defalt=4001") foperation = flag.String("o", "null", "operation - create/delete/get/update. Default:create") fkeycount = flag.Int("k", -1, "number of keys involved in operation,"+ "useful for create. Default:100") foperation_count = flag.Int("oc", -1, "number of operations to be performed,"+ " Default:100") flog_file = flag.String("log", "null", "logfile name, default : log") fremote_flag = flag.Bool("remote", false, " Must be set true if etcd "+ "instance is remote. Default=false") fmem_flag = flag.Bool("mem", false, "When true, memory info is shown."+ " Default=false") fsecure = flag.Bool("secure", false, "When true, new tls client created "+ "using certificate,key files. Default = false") fcfg_file = flag.String("c", "null", "Input the cfg file. Required") fcapath = flag.String("capath", "null", "Certificate Path"+ ". Default = /etc/openshift/master") fcacert = flag.String("ca", "ca.crt", "The ca filename. Default = ca.crt") fclient_cert = flag.String("cert", "null", "The client"+ "Certificate. Default = master.etcd-client.crt") fclient_key = flag.String("cakey", "null", "The client"+ "Key file. Default = master.etcd-client.key") }
func main() { a := flag.Int("a", 1000, "") b := flag.Int("b", 2000, "") // two seconds flag.Parse() k := syscall.MustLoadDLL("kernel32.dll") k.MustFindProc("Beep").Call(uintptr(*a), uintptr(*b)) }
func main() { runtime.GOMAXPROCS(8) startMode := flag.Int("startMode", 1, " 0 为mock ,1 为正式") bindAddr := flag.String("bindAddr", ":17070", "-bindAddr=:17070") certPath := flag.String("certPath", "./cert.pem", "-certPath=xxxxxx/cert.pem or -certPath=http://") keyPath := flag.String("keyPath", "./key.pem", "-keyPath=xxxxxx/key.pem or -keyPath=http://") runMode := flag.Int("runMode", 0, "-runMode=1(online) ,0(sandbox)") storeCap := flag.Int("storeCap", 1000, "-storeCap=100000 //重发链条长度") logxml := flag.String("log", "log.xml", "-log=log.xml //log配置文件") pprofPort := flag.String("pprof", ":9090", "pprof=:9090 //端口") flag.Parse() go func() { if len(*pprofPort) > 0 { addr, _ := net.ResolveTCPAddr("tcp4", *bindAddr) log.Error(http.ListenAndServe(addr.IP.String()+*pprofPort, nil)) } }() //加载log4go的配置 log.LoadConfiguration(*logxml) //设置启动项 option := server.NewOption(*startMode, *bindAddr, *certPath, *keyPath, *runMode, *storeCap) apnsserver := server.NewApnsHttpServer(option) ch := make(chan os.Signal, 1) signal.Notify(ch, os.Kill) //kill掉的server <-ch apnsserver.Shutdown() log.Info("APNS SERVER IS STOPPED!") }
func main() { halfProcs := runtime.GOMAXPROCS(0) / 2 if halfProcs < 1 { halfProcs = 1 } queryWorkers := flag.Int("queryWorkers", halfProcs, "Number of query tree walkers.") docWorkers := flag.Int("docWorkers", halfProcs, "Number of document mapreduce workers.") addr := flag.String("addr", ":3133", "Address to bind to") flag.Parse() // Update the query handler deadline to the query timeout found := false for i := range routingTable { matches := routingTable[i].Path.FindAllStringSubmatch("/x/_query", 1) if len(matches) > 0 { routingTable[i].Deadline = *queryTimeout found = true break } } if !found { log.Fatalf("Programming error: Could not find query handler") } processorInput = make(chan *processIn, *docBacklog) for i := 0; i < *docWorkers; i++ { go docProcessor(processorInput) } if *cacheAddr == "" { cacheInput = processorInput // Note: cacheInputSet will be null here, there should be no caching } else { cacheInput = make(chan *processIn, *cacheBacklog) cacheInputSet = make(chan *processOut, *cacheBacklog) for i := 0; i < *cacheWorkers; i++ { go cacheProcessor(cacheInput, cacheInputSet) } } queryInput = make(chan *queryIn, *queryBacklog) for i := 0; i < *queryWorkers; i++ { go queryExecutor(queryInput) } if *pprofFile != "" { go startProfiler() } s := &http.Server{ Addr: *addr, Handler: http.HandlerFunc(handler), ReadTimeout: 5 * time.Second, } log.Printf("Listening to web requests on %s", *addr) log.Fatal(s.ListenAndServe()) }
// TODO: NO error handling. Should probably do something about that. func main() { fmt.Println("***********************") LogInit(os.Stdout, os.Stdout, os.Stdout, os.Stderr) numResults := flag.Int("numResults", 50, "Number of results to return") startPage := flag.Int("startPage", 0, "Results page to start with") flag.Parse() fmt.Println("numResults: ", *numResults) fmt.Println("startPage: ", *startPage) // Print show details to the console results := SearchDeadShows(*numResults, *startPage) for _, doc := range results { fmt.Println(doc.Identifier) showURL := "http://archive.org/details/" + doc.Identifier + "?output=json" Trace.Println(showURL) r, _ := http.Get(showURL) defer r.Body.Close() showJSON, _ := ioutil.ReadAll(r.Body) var showResponse DeadShow json.Unmarshal(showJSON, &showResponse) b, _ := (json.MarshalIndent(showResponse.Details, "", " ")) println(string(b)) fmt.Println("***********************") fmt.Scanln() } }
func Run() { var r *int = flag.Int("r", 0, "read timeout") var w *int = flag.Int("w", 0, "write timeout") port := CFG.Int("port") if port == 0 { port = 80 } host := CFG.Str("host") if host == "" { host = "127.0.0.1" } address := fmt.Sprintf("%s:%d", host, port) LOGGER.Log("WebGO running", address) server := http.Server{ Addr: address, ReadTimeout: time.Duration(*r) * time.Second, WriteTimeout: time.Duration(*w) * time.Second, Handler: &app, } //server.SetKeepAlivesEnabled(false) err := server.ListenAndServe() if err != nil { LOGGER.Fatal(err) } }
func prepare() { var rootfsPath = flag.String("rootfsPath", "", "rootfs path to chroot into") var uid = flag.Int("uid", 0, "uid to create directories as") var gid = flag.Int("gid", 0, "gid to create directories as") var perm = flag.Int("perm", 0755, "Mode to create the directory with") var recreate = flag.Bool("recreate", false, "whether to delete the directory before (re-)creating it") flag.Parse() runtime.LockOSThread() if err := syscall.Chroot(*rootfsPath); err != nil { panic(err) } if err := os.Chdir("/"); err != nil { panic(err) } for _, path := range flag.Args() { path, err := filepath.Abs(path) if err != nil { panic(err) } if *recreate { rmdir(path) } mkdir(path, *uid, *gid, os.FileMode(*perm)) } }