func main() { laddr := flag.String("listen", ":8001", "listen address") baddr := flag.String("backend", "127.0.0.1:1234", "backend address") secret := flag.String("secret", "the answer to life, the universe and everything", "tunnel secret") tunnels := flag.Uint("tunnels", 1, "low level tunnel count, 0 if work as server") flag.Int64Var(&tunnel.Timeout, "timeout", 10, "tunnel read/write timeout") flag.UintVar(&tunnel.LogLevel, "log", 1, "log level") flag.Usage = usage flag.Parse() app := &tunnel.App{ Listen: *laddr, Backend: *baddr, Secret: *secret, Tunnels: *tunnels, } err := app.Start() if err != nil { fmt.Fprintf(os.Stderr, "start failed:%s\n", err.Error()) return } go handleSignal(app) app.Wait() }
func decodeRefArg(name, typeName string) (interface{}, error) { switch strings.ToLower(typeName) { case "*bool": newValue := flag.Bool(name, app.DefaultBoolValue, name) return newValue, nil case "bool": newValue := flag.Bool(name, app.DefaultBoolValue, name) return *newValue, nil case "*string": newValue := flag.String(name, app.DefaultStringValue, name) return *newValue, nil case "string": newValue := flag.String(name, app.DefaultStringValue, name) return *newValue, nil case "*time.duration": newValue := flag.Duration(name, app.DefaultDurationValue, name) return *newValue, nil case "time.duration": newValue := flag.Duration(name, app.DefaultDurationValue, name) return *newValue, nil case "*float64": newValue := flag.Float64(name, app.DefaultFloat64Value, name) return *newValue, nil case "float64": newValue := flag.Float64(name, app.DefaultFloat64Value, name) return *newValue, nil case "*int": newValue := flag.Int(name, app.DefaultIntValue, name) return *newValue, nil case "int": newValue := flag.Int(name, app.DefaultIntValue, name) return *newValue, nil case "*int64": newValue := flag.Int64(name, app.DefaultInt64Value, name) return *newValue, nil case "int64": newValue := flag.Int64(name, app.DefaultInt64Value, name) return *newValue, nil case "*uint": newValue := flag.Uint(name, app.DefaultUIntValue, name) return *newValue, nil case "uint": newValue := flag.Uint(name, app.DefaultUIntValue, name) return *newValue, nil case "*uint64": newValue := flag.Uint64(name, app.DefaultUInt64Value, name) return *newValue, nil case "uint64": newValue := flag.Uint64(name, app.DefaultUInt64Value, name) return *newValue, nil } return nil, fmt.Errorf("unknow type %s for argument %s", typeName, name) }
func handleFlags() (*CliOptions, bool) { flag.Usage = Usage _ = flag.String("serveraddr", "127.0.0.1", "Server address to listen") // ignored serverPort := flag.Uint("port", 9000, "Port to listen") portLayerAddr := flag.String("port-layer-addr", "127.0.0.1", "Port layer server address") portLayerPort := flag.Uint("port-layer-port", 9001, "Port Layer server port") debug := flag.Bool("debug", false, "Enable debuglevel logging") flag.Parse() // load the vch config src, err := extraconfig.GuestInfoSource() if err != nil { log.Fatalf("Unable to load configuration from guestinfo: %s", err) } extraconfig.Decode(src, &vchConfig) if *debug || vchConfig.Diagnostics.DebugLevel > 0 { log.SetLevel(log.DebugLevel) } cli := &CliOptions{ serverPort: *serverPort, portLayerAddr: fmt.Sprintf("%s:%d", *portLayerAddr, *portLayerPort), proto: "tcp", } return cli, true }
func handleFlags() (*CliOptions, bool) { flag.Usage = Usage enableTLS := flag.Bool("TLS", false, "Use TLS; implied by --tlsverify") verifyTLS := flag.Bool("tlsverify", false, "Use TLS and verify the remote") cafile := flag.String("tls-ca-certificate", "", "Trust certs signed only by this CA") certfile := flag.String("tls-certificate", "", "Path to TLS certificate file") keyfile := flag.String("tls-key", "", "Path to TLS Key file") serverAddr := flag.String("serveraddr", "127.0.0.1", "Server address to listen") serverPort := flag.Uint("port", 9000, "Port to listen") portLayerAddr := flag.String("port-layer-addr", "127.0.0.1", "Port layer server address") portLayerPort := flag.Uint("port-layer-port", 9001, "Port Layer server port") debug := flag.Bool("debug", false, "Enable debuglevel logging") flag.Parse() if *enableTLS && (len(*certfile) == 0 || len(*keyfile) == 0) { fmt.Fprintf(os.Stderr, "TLS requested, but tls-certificate and tls-key were all not specified\n") return nil, false } if *verifyTLS { *enableTLS = true if len(*certfile) == 0 || len(*keyfile) == 0 || len(*cafile) == 0 { fmt.Fprintf(os.Stderr, "tlsverfiy requested, but tls-ca-certificate, tls-certificate, tls-key were all not specified\n") return nil, false } } cli := &CliOptions{ enableTLS: *enableTLS, verifyTLS: *verifyTLS, cafile: *cafile, certfile: *certfile, keyfile: *keyfile, serverAddr: *serverAddr, serverPort: *serverPort, fullserver: fmt.Sprintf("%s:%d", *serverAddr, *serverPort), portLayerAddr: fmt.Sprintf("%s:%d", *portLayerAddr, *portLayerPort), proto: "tcp", } // load the vch config src, err := extraconfig.GuestInfoSource() if err != nil { log.Errorf("Unable to load configuration from guestinfo") } extraconfig.Decode(src, &vchConfig) if *debug || vchConfig.Diagnostics.DebugLevel > 0 { log.SetLevel(log.DebugLevel) } return cli, true }
func main() { // Define and parse flags. id := flag.Uint("id", 0, "Set the client node ID to connect to.") rate = flag.Uint("rate", 0, "Sets the maximum messages per second.") flag.Parse() // Validate flags. if *id == 0 || *id > 0xFFFF { log.Fatal("Invalid node ID specified.") } // Load configuration from config file. loadConfig() fmt.Printf("Loaded configuration, ID to connect to is %d.\n", *id) conn, err := connect.TestDial(uint16(*id)) if err != nil { log.Fatal(err) } var msg cliproto_up.Authenticate msg.Username = new(string) msg.Password = new(string) msg.SessionId = new(uint64) *msg.Username = "******" *msg.Password = "******" conn.SendProto(2, &msg) for { respMsg, ok := <-conn.Received if !ok { log.Fatal("connection error") } switch *respMsg.MsgType { case 2: conn.Close() log.Fatal("auth failed") case 3: log.Print("authenticated, follow msgsink...") var followMsg cliproto_up.FollowUsername followMsg.Username = new(string) *followMsg.Username = "******" conn.SendProto(3, &followMsg) case 4: log.Fatal("follow failed") case 6: handleData(respMsg.Content) case 7: log.Print("following msgsink, sending msgs...") go sendMessages(conn) } } }
func main() { dryRun := flag.Bool("dryRun", true, "Whether to do a dry run.") sleep := flag.Duration("sleep", 60*time.Second, "How long to sleep between batches.") batchSize := flag.Uint("batchSize", 1000, "Number of certificates to process between sleeps.") numBatches := flag.Uint("numBatches", 999999, "Stop processing after N batches.") type config struct { NotAfterBackFiller struct { cmd.DBConfig } Statsd cmd.StatsdConfig Syslog cmd.SyslogConfig } configFile := flag.String("config", "", "File containing a JSON config.") flag.Usage = func() { fmt.Fprintf(os.Stderr, "%s\n\n", usageIntro) fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) flag.PrintDefaults() } flag.Parse() if *configFile == "" { flag.Usage() os.Exit(1) } configData, err := ioutil.ReadFile(*configFile) cmd.FailOnError(err, fmt.Sprintf("Reading %q", *configFile)) var cfg config err = json.Unmarshal(configData, &cfg) cmd.FailOnError(err, "Unmarshaling config") stats, log := cmd.StatsAndLogging(cfg.Statsd, cfg.Syslog) defer log.AuditPanic() dbURL, err := cfg.NotAfterBackFiller.DBConfig.URL() cmd.FailOnError(err, "Couldn't load DB URL") dbMap, err := sa.NewDbMap(dbURL, 10) cmd.FailOnError(err, "Could not connect to database") go sa.ReportDbConnCount(dbMap, metrics.NewStatsdScope(stats, "NotAfterBackfiller")) b := backfiller{ dbMap: dbMap, log: log, clk: cmd.Clock(), dryRun: *dryRun, batchSize: *batchSize, numBatches: *numBatches, sleep: *sleep, } err = b.processForever() cmd.FailOnError(err, "Could not process certificate batches") }
func Main() error { messageTimeout := flag.Duration("message_timeout", 2*time.Minute, "timeout for one message to be proxied") clientIdleTimeout := flag.Duration("client_idle_timeout", 60*time.Minute, "idle timeout for client connections") serverIdleTimeout := flag.Duration("server_idle_timeout", 1*time.Hour, "idle timeout for server connections") serverClosePoolSize := flag.Uint("server_close_pool_size", 100, "number of goroutines that will handle closing server connections") getLastErrorTimeout := flag.Duration("get_last_error_timeout", time.Minute, "timeout for getLastError pinning") maxPerClientConnections := flag.Uint("max_per_client_connections", 100, "maximum number of connections per client") maxConnections := flag.Uint("max_connections", 100, "maximum number of connections per mongo") portStart := flag.Int("port_start", 6000, "start of port range") portEnd := flag.Int("port_end", 6010, "end of port range") addrs := flag.String("addrs", "localhost:27017", "comma separated list of mongo addresses") flag.Parse() replicaSet := dvara.ReplicaSet{ Addrs: *addrs, PortStart: *portStart, PortEnd: *portEnd, MessageTimeout: *messageTimeout, ClientIdleTimeout: *clientIdleTimeout, ServerIdleTimeout: *serverIdleTimeout, ServerClosePoolSize: *serverClosePoolSize, GetLastErrorTimeout: *getLastErrorTimeout, MaxConnections: *maxConnections, MaxPerClientConnections: *maxPerClientConnections, } var statsClient stats.HookClient var log stdLogger var graph inject.Graph err := graph.Provide( &inject.Object{Value: &log}, &inject.Object{Value: &replicaSet}, &inject.Object{Value: &statsClient}, ) if err != nil { return err } if err := graph.Populate(); err != nil { return err } objects := graph.Objects() if err := startstop.Start(objects, &log); err != nil { return err } defer startstop.Stop(objects, &log) ch := make(chan os.Signal, 2) signal.Notify(ch, syscall.SIGTERM, syscall.SIGINT) <-ch signal.Stop(ch) return nil }
func main() { defer trace("gperfect")() perPtr := flag.Uint64("number", 0, "Perfect number to find") coreCountPtr := flag.Uint("numCores", 8, "Number of cores to use when calculating random perfect numbers") numRandPtr := flag.Uint("numRandom", 100, "Number of random perfect numbers to look for. Omit for continuous calculations") flag.Parse() if *perPtr > 0 { calcPerfect(*perPtr, *coreCountPtr) } else { calcRandPerfects(coreCountPtr, numRandPtr) } }
func main() { var isWrite bool bus := flag.Uint("bus", 0, "bus number, [0 : 255]") dev := flag.Uint("dev", 0, "device number, [0 : 63]") function := flag.Uint("function", 0, "function number, [0 : 7]") offset := flag.Uint("offset", 0, "offset, [0 : 255] and 4 byte aligned") flag.Usage = usage flag.Parse() // Check parameters if *bus >= PciBusLimit || *dev >= PciDevLimit || *function >= PciFuncLimit || *offset >= PciRegLimit || *offset&0x03 != 0 { usage() } switch flag.NArg() { case 0: isWrite = false case 1: isWrite = true default: usage() } busVal := uint32(*bus) devVal := uint32(*dev) funcVal := uint32(*function) offsetVal := uint32(*offset) ioLevel(3) defer ioLevel(0) if isWrite { value, err := strconv.ParseUint(flag.Arg(0), 0, 32) if err != nil { log.Fatal(err) } pciWriteConfReg(busVal, devVal, funcVal, offsetVal, uint32(value)) } data := pciReadConfReg(busVal, devVal, funcVal, offsetVal) fmt.Printf("[%02X:%02X.%X-%02X] = %08x\n", busVal, devVal, funcVal, offsetVal, data) }
func main() { // Define and parse flags. id := flag.Uint("id", 0, "Set the client node ID to connect to.") print = flag.Uint("print", 10000, "Sets number of messages to print "+ "after.") flag.Parse() // Validate flags. if *id == 0 || *id > 0xFFFF { log.Fatal("Invalid node ID specified.") } // Load configuration from config file. loadConfig() fmt.Printf("Loaded configuration, ID to connect to is %d.\n", *id) conn, err := connect.TestDial(uint16(*id)) if err != nil { log.Fatal(err) } var msg cliproto_up.Authenticate msg.Username = new(string) msg.Password = new(string) msg.SessionId = new(uint64) *msg.Username = "******" *msg.Password = "******" conn.SendProto(2, &msg) for { respMsg, ok := <-conn.Received if !ok { break log.Fatal("connection error") } switch *respMsg.MsgType { case 2: conn.Close() log.Fatal("auth failed") case 3: log.Print("authenticated") case 10: handleMsg(respMsg.Content) } } }
func main() { var ( brokerName = flag.String("broker", brokers[0], brokerList()) brokerPort = flag.String("broker-port", defaultBrokerPort, "host machine broker port") dockerHost = flag.String("docker-host", defaultHost, "host machine (or VM) running Docker") brokerdHost = flag.String("host", defaultDaemonHost, "machine running broker daemon") peerHosts = flag.String("peer-hosts", defaultDaemonHost, "comma-separated list of machines to run peers") producers = flag.Uint("producers", defaultNumProducers, "number of producers per host") consumers = flag.Uint("consumers", defaultNumConsumers, "number of consumers per host") numMessages = flag.Uint("num-messages", defaultNumMessages, "number of messages to send from each producer") messageSize = flag.Uint64("message-size", defaultMessageSize, "size of each message in bytes") startupSleep = flag.Uint("startup-sleep", defaultStartupSleep, "seconds to wait after broker start before benchmarking") daemonTimeout = flag.Uint("daemon-timeout", defaultDaemonTimeout, "seconds to wait for daemon before timing out") dockerExtras = flag.String("docker-extra", "", "extra args to pass to `docker run'") ) flag.Parse() peers := strings.Split(*peerHosts, ",") client, err := broker.NewClient(&broker.Benchmark{ BrokerdHost: *brokerdHost, BrokerName: *brokerName, BrokerHost: *dockerHost, BrokerPort: *brokerPort, PeerHosts: peers, NumMessages: *numMessages, MessageSize: *messageSize, Publishers: *producers, Subscribers: *consumers, StartupSleep: *startupSleep, DaemonTimeout: *daemonTimeout, DockerExtras: *dockerExtras, }) if err != nil { fmt.Println("Failed to connect to flotilla:", err) os.Exit(1) } start := time.Now() results, err := runBenchmark(client) if err != nil { fmt.Println(err) os.Exit(1) } elapsed := time.Since(start) printSummary(client.Benchmark, elapsed) printResults(results) }
func main() { // Parse arguments verbose := flag.Bool("v", false, "Print verbose output to stdout") startColumn := flag.Uint("b", 0, "Fist column from the left of the table to unmerge") endColumn := flag.Uint("e", 0, "Last column from left of table to unmerge") inputDelimiter := flag.String("id", `\s*:\s*`, "Feild input delimiter (Golang regexp)") outputDelimiter := flag.String("od", "\t:\t", "Feild output delimiter") flag.Parse() // Open stdin ibuf := bufio.NewReader(os.Stdin) err := worker(ibuf, *startColumn, *endColumn, *inputDelimiter, *outputDelimiter, *verbose, os.Stdout) if err != nil { log.Fatal(err) } }
func main() { var size = flag.Uint("size", 0, "New size of the disk in GB") // Allow the same ID types as in disk_remove.go var reMajMin = regexp.MustCompile(`^\d+:\d+$`) var reMin = regexp.MustCompile(`^\d+$`) var id string flag.Usage = func() { fmt.Fprintf(os.Stderr, "usage: %s [options] <Server-Name> <Disk-ID>\n", path.Base(os.Args[0])) flag.PrintDefaults() } flag.Parse() if flag.NArg() != 2 || *size == 0 { flag.Usage() os.Exit(1) } else if reMajMin.MatchString(flag.Arg(1)) { id = flag.Arg(1) } else if reMin.MatchString(flag.Arg(1)) { id = fmt.Sprintf("0:%s", flag.Arg(1)) } else { exit.Errorf("invalid disk ID %q", flag.Arg(1)) } client, err := clcv2.NewCLIClient() if err != nil { exit.Fatal(err.Error()) } server, err := client.GetServer(flag.Arg(0)) if err != nil { exit.Fatalf("failed to list details of server %q: %s", flag.Arg(0), err) } disks := make([]clcv2.ServerAdditionalDisk, len(server.Details.Disks)) for i := range server.Details.Disks { disks[i] = clcv2.ServerAdditionalDisk{ Id: server.Details.Disks[i].Id, SizeGB: server.Details.Disks[i].SizeGB, } if disks[i].Id == id { // The API does not allow to reduce the size of an existing disk. if uint32(*size) <= disks[i].SizeGB { fmt.Printf("Disk %s size is already at %d GB.\n", id, disks[i].SizeGB) os.Exit(0) } fmt.Printf("Changing disk %s size from %d to %d GB ...\n", id, disks[i].SizeGB, *size) disks[i].SizeGB = uint32(*size) } } reqID, err := client.ServerSetDisks(flag.Arg(0), disks) if err != nil { exit.Fatalf("failed to update the disk configuration on %q: %s", flag.Arg(0), err) } log.Printf("Status Id for resizing the disk on %s: %s", flag.Arg(0), reqID) client.PollStatus(reqID, 10*time.Second) }
func main() { var shops golib.StringSlice parallel_orders := flag.Uint("orders", 20, "Number of open orders running at the same time") bank := flag.String("bank", "localhost:9001", "Bank endpoint") timeout := flag.Duration("timeout", 0, "Timeout for automatically stopping load generation") flag.Var(&shops, "shop", "Shop endpoint(s)") flag.Parse() if len(shops) == 0 { log.Fatalln("Specify at least one -shop") } golib.ConfigureOpenFilesLimit() orders := OrderPool{ ParallelOrders: *parallel_orders, Bank: *bank, Shops: shops, User: "******", } orders.Start() onInterrupt(orders.Terminate) if *timeout > 0 { services.L.Warnf("Terminating automatically after %v", timeout) time.AfterFunc(*timeout, func() { services.L.Warnf("Timer of %v expired. Terminating...", timeout) orders.Terminate() }) } orders.Wait() orders.PrintStats() }
func main() { var listenAPI = flag.String("http", "0.0.0.0:9381", "http port to listen on") var listenSMTP = flag.String("smtp", "0.0.0.0:9380", "smtp port to listen on") var closeFirst = flag.Uint("closeFirst", 0, "close first n connections after MAIL for reconnection tests") flag.Parse() l, err := net.Listen("tcp", *listenSMTP) if err != nil { log.Fatalln("Couldn't bind %q for SMTP", *listenSMTP, err) } defer l.Close() srv := mailSrv{ closeFirst: *closeFirst, } srv.setupHTTP(http.DefaultServeMux) go func() { err := http.ListenAndServe(*listenAPI, http.DefaultServeMux) if err != nil { log.Fatalln("Couldn't start HTTP server", err) } }() err = srv.serveSMTP(l) if err != nil { log.Fatalln(err, "Failed to accept connection") } }
func main() { launchTimeout := flag.Uint("launch-timeout", 240, "Seconds to retry launching an etcd instance for before giving up. "+ "This should be long enough for a port occupied by a killed process "+ "to be vacated.") flag.Parse() log.Infoln("Starting etcd Executor") dconfig := executor.DriverConfig{ Executor: etcdexecutor.New( time.Duration(*launchTimeout) * time.Second, ), } driver, err := executor.NewMesosExecutorDriver(dconfig) if err != nil { log.Infoln("Unable to create an ExecutorDriver ", err.Error()) } _, err = driver.Start() if err != nil { log.Infoln("Got error:", err) return } log.Infoln("Executor process has started and running.") driver.Join() }
func main() { oBuckets := flag.Bool("b", false, "show FLT buckets analysis") oDump := flag.Bool("d", false, "send a cdbmake formatted dump to stdout even if -f and -l is empty") oFirst := flag.String("f", "", "first key to dump (if non empty)") oLast := flag.String("l", "", "last key to dump (if non empty)") oMax := flag.Uint("max", 10, "errors reported limit.") oStat := flag.Bool("s", false, "show DB stats.") oVerbose := flag.Bool("v", false, "verbose mode.") flag.Parse() if flag.NArg() != 1 { fmt.Fprintln(os.Stderr, "Need exactly one file argument") os.Exit(1) } if *oStat { *oVerbose = true } r := rep if !*oVerbose { r = null } if err := main0(flag.Arg(0), int(*oMax), r, *oStat, *oFirst, *oLast, *oDump, *oBuckets); err != nil { fmt.Fprintf(os.Stderr, "kvaudit: %v\n", err) os.Exit(1) } }
func main() { laddr := flag.String("listen", ":8001", "listen address") baddr := flag.String("backend", "127.0.0.1:1234", "backend address") secret := flag.String("secret", "the answer to life, the universe and everything", "tunnel secret") tunnels := flag.Uint("tunnels", 0, "low level tunnel count, 0 if work as server") flag.Int64Var(&tunnel.Timeout, "timeout", 3, "tunnel read/write timeout") flag.UintVar(&tunnel.LogLevel, "log", 1, "log level") flag.Usage = usage flag.Parse() var app tunnel.Service var err error if *tunnels == 0 { app, err = tunnel.NewServer(*laddr, *baddr, *secret) } else { app, err = tunnel.NewClient(*laddr, *baddr, *secret, *tunnels) } if err != nil { fmt.Fprintf(os.Stderr, "create service failed:%s\n", err.Error()) return } if err = app.Start(); err != nil { fmt.Fprintf(os.Stderr, "start failed:%s\n", err.Error()) return } handleSignal(app) }
func init() { atomSizeUsage := "atom size (1, 2, or 3)" atomSize := flag.Uint("atomsize", 1, atomSizeUsage) carrierUsage := "path to message carrier" carrier := flag.String("carrier", "", carrierUsage) inputUsage := "path to input; can be - for standard in" input := flag.String("input", "-", inputUsage) boxUsage := "use size-checking encapsulation format" box := flag.Bool("box", false, boxUsage) offsetUsage := "read/write offset" offset := flag.Int64("offset", 0, offsetUsage) flag.Parse() if *atomSize < 1 || *atomSize > 3 { log.Fatalf("atom size must be 1, 2, or 3") } if *offset < 0 { log.Fatalf("offset must be positive") } state = new(cmd.State) state.Ctx = steg.NewCtx(uint8(*atomSize)) state.Carrier, state.CarrierSize = getCarrier(*carrier) state.Input, state.InputSize = getInput(*input) state.Box = *box state.Offset = *offset }
func main() { outputPtr := flag.String("o", "video.mp4", "output file") numThreadsPtr := flag.Uint("n", 4, "number of threads") quietPtr := flag.Bool("q", false, "quiet output. only errors displayed") flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage of multidown: multidown [-o outputfile] [-n numthreads] url\n") flag.PrintDefaults() } flag.Parse() if flag.NArg() < 1 { fmt.Fprintln(os.Stderr, "Need to specify a url to download") os.Exit(1) } else if flag.NArg() != 1 { fmt.Fprintln(os.Stderr, "Only one url will download at a time") os.Exit(1) } if *numThreadsPtr == 0 { fmt.Fprintln(os.Stderr, "Running with zero threads means nothing will download") os.Exit(1) } download(flag.Arg(0), *outputPtr, *numThreadsPtr, *quietPtr) }
func main() { verbose := flag.Bool( "v", false, "Display detailed VM state at each clock cycle.", ) memSize := flag.Uint( "m", 0xffff, "Memory size in 16-bit words.", ) flag.Parse() if len(flag.Args()) != 1 { die(errors.New("No input file.")) } file, err := os.Open(flag.Args()[0]) if err != nil { die(err) } defer file.Close() stdin := bufio.NewReader(os.Stdin) stdout := bufio.NewWriter(os.Stdout) v := vm.New(uint16(*memSize), 5) err = v.LoadBinary(file) if err != nil { die(err) } bytesIn := make(chan byte) go readBytes(stdin, bytesIn) for { if *verbose { fmt.Println(v.Debug()) } buf := make([]byte, 2) if v.Buses[3] == 0 { select { case c := (<-bytesIn): buf[0] = c v.Buses[2] = binary.LittleEndian.Uint16(buf) v.Buses[3] = 1 default: } } if v.Buses[1] != 0 { binary.LittleEndian.PutUint16(buf, v.Buses[0]) stdout.WriteByte(buf[0]) stdout.Flush() v.Buses[1] = 0 } if v.Buses[4] != 0 { break } v.Clock() runtime.Gosched() } }
func main() { num_users := flag.Uint("users", 5, "Number of simulated people") orders_per_user := flag.Uint("orders", 5, "Maximum umber of simultaneous orders per person. 0 for no limitation.") bank := flag.String("bank", "localhost:9001", "Bank endpoint") timeout := flag.Duration("timeout", 0, "Timeout for automatically stopping load generation") dynamicUsers := flag.Bool("dynamic", false, "Enable changing # of active users with arrow keys. CTRL-C breaks console") var shops golib.StringSlice flag.Var(&shops, "shop", "Shop endpoint(s)") flag.Parse() if len(shops) == 0 { log.Fatalln("Specify at least one -shop") } golib.ConfigureOpenFilesLimit() pool = NewPool(*bank, shops) pool.OrdersPerPerson = *orders_per_user pool.Start(int(*num_users)) if *dynamicUsers { fixKeyboard = true go readKeyboard(func(b byte) { switch b { case 65: // Up pool.StartOne() case 66: // Down pool.PauseOne() case 67: // Right pool.Start(10) case 68: // Left pool.Pause(10) case 10: // Enter terminate() } }) } onInterrupt(terminate) if *timeout > 0 { services.L.Warnf("Terminating automatically after %v", timeout) time.AfterFunc(*timeout, func() { services.L.Warnf("Timer of %v expired. Terminating...", timeout) terminate() }) } pool.Wait() pool.PrintStats() }
func main() { var redis *radix.Pool var indices []string var titles map[string]string var bitmaps map[string][]uint64 articles := flag.String("articles", "", "Directory to load *.txt files from") limit := flag.Uint("limit", 0, "Max number of articles to read") cutoff := flag.Uint("cutoff", 10, "Number of articles a word must appear into to be included in index") memlim := flag.Uint("memory", 0, "Target index final memory consumption in MB") flag.Parse() var bindAddr string if *articles != "" { indices, titles, bitmaps = loadArticles(*articles, *limit, *cutoff, *memlim) bindAddr = ":4088" } else { var err error redis, err = radix.New("tcp", "localhost:6379", 10) if err != nil { log.Fatal(err) } bindAddr = ":4080" } http.HandleFunc("/content", func(w http.ResponseWriter, r *http.Request) { q := r.FormValue("q") if q == "" { fmt.Fprint(w, "Usage: /content?q=ask+me") return } words := strings.Split(q, " ") if redis != nil { askRedis(w, words, redis) } else { searchMemory(w, words, indices, titles, bitmaps) } }) s := &http.Server{ Addr: bindAddr, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, } log.Print("ready\n") log.Fatal(s.ListenAndServe()) }
func init() { deliveryProperties.ContentType = flag.String("content-type", "", "Content-type, else derived from file extension.") deliveryProperties.ContentEncoding = flag.String("content-encoding", "UTF-8", "Mime content-encoding.") deliveryProperties.DeliveryMode = flag.Uint("delivery-mode", 2, "Delivery mode (1 for non-persistent, 2 for persistent.") deliveryProperties.Priority = flag.Uint("priority", 0, "queue implementation use - 0 to 9") deliveryProperties.ReplyTo = flag.String("replyto", "", "application use - address to to reply to (ex: rpc)") deliveryProperties.Expiration = flag.String("expiration", "", "implementation use - message expiration spec") deliveryProperties.Timestamp = flag.Int64("timestamp", time.Now().Unix(), "unix timestamp of message") deliveryProperties.Type = flag.String("type", "", "application use - message type name") deliveryProperties.UserId = flag.String("userid", "", "application use - creating user - should be authenticated user") deliveryProperties.AppId = flag.String("appid", "", "application use - creating application id") flag.Var(&deliveryProperties.CorrelationIdGenerator, "correlationid", "'series' for incrementing ids, 'uuid' for UUIDs, static value otherwise") flag.Var(&deliveryProperties.MessageIdGenerator, "messageid", "'series' for incrementing ids, 'uuid' for UUIDs, static value otherwise") flag.BoolVar(&versionFlag, "version", false, "Print version and exit") flag.BoolVar(&revFlag, "rev", false, "Print git revision and exit") }
func main() { var ( flagBind = flag.String("bind", "", "Bind to these address:port pairs") flagDebug = flag.Bool("debug", false, "Enable debug logging") flagIdleTimeout = flag.Duration("idle-timeout", 60*time.Second, "Disconnect clients without any activity within this time") flagReadTimeout = flag.Duration("read-timeout", 10*time.Second, "Maximum time to receive a single message") flagWriteTimeout = flag.Duration("write-timeout", 10*time.Second, "Maximum time to send a single message") flagMaxMessage = flag.Uint("max-message", 16<<10, "Maximum message length accepted by server. Clients trying to send more will be disconnected") flagReadBuffer = flag.Uint("read-buffer", 0, "Read buffer size for sockets") ) flag.Parse() // Set number of parallel threads to number of CPUs. runtime.GOMAXPROCS(runtime.NumCPU()) dlock.Debug = *flagDebug server := NewServer(*flagBind, *flagIdleTimeout) server.ConfigDebug = *flagDebug server.ConfigMaxMessage = *flagMaxMessage server.ConfigReadBuffer = *flagReadBuffer server.ConfigReadTimeout = *flagReadTimeout server.ConfigWriteTimeout = *flagWriteTimeout if *flagDebug { log.SetFlags(log.Flags() | log.Lmicroseconds) } listenCount := server.Start() if listenCount == 0 { os.Exit(1) } sigIntChan := make(chan os.Signal, 1) signal.Notify(sigIntChan, syscall.SIGINT) go func() { <-sigIntChan if server.ConfigDebug { log.Printf("main: goroutines=%d", runtime.NumGoroutine()) } server.Close() }() server.Wait() }
func Initialisation() uint { var ( nbLigne *uint ) nbLigne = flag.Uint("nbLigne", 100, "nombre de lignes à générer") flag.Parse() return *nbLigne }
func main() { dir, err := filepath.Abs(filepath.Dir(os.Args[0])) must(err) distBase := filepath.Join(dir, "..") port := flag.Uint("port", 8080, "port to listen on") templateDir := flag.String("templateDir", filepath.Join(distBase, "web", "templates"), "path to html templates") storeDir := flag.String("storeDir", filepath.Join(distBase, "db"), "directory for saving persistent data") buildsDir := flag.String("buildsDir", filepath.Join(distBase, "builds"), "directory for saving build output") assetsDir := flag.String("assetsDir", filepath.Join(distBase, "web", "assets"), "path to static web assets") gooseCmd := flag.String("gooseCmd", filepath.Join(distBase, "bin", "goose"), `path to "goose" database migration tool`) debugMode := flag.Bool("debugMode", false, "do not parse templates up front. Only for development use") flag.Parse() bootMsg := ` _ _ _ _ _____ _____ | | | | | | | / __ \_ _| | | | | ___ ___ __| | |__ ___ _ _ ___ ___ ______| / \/ | | | |/\| |/ _ \ / _ \ / _` + "`" + ` | '_ \ / _ \| | | / __|/ _ \______| | | | \ /\ / (_) | (_) | (_| | | | | (_) | |_| \__ \ __/ | \__/\_| |_ \/ \/ \___/ \___/ \__,_|_| |_|\___/ \__,_|___/\___| \____/\___/ ` fmt.Println(bootMsg) if *debugMode { log.Println("Starting in debug mode") } dbDir := filepath.Join(*storeDir, "sqlite") must(os.MkdirAll(dbDir, 0755)) migrateCmd := exec.Command(*gooseCmd, "up") migrateCmd.Dir = filepath.Join(*storeDir, "..") must(migrateCmd.Run()) jobRepo, err := db.NewJobRepository(filepath.Join(dbDir, "store.db")) must(err) // Only Interrupt handled, as this is available on all major platforms and is the most common way of stopping Woodhouse-CI exitChan := make(chan os.Signal) signal.Notify(exitChan, os.Interrupt) go func(c <-chan os.Signal) { log.Printf("Caught signal %s. Closing database connections. Goodbye!\n", <-c) must(jobRepo.Close()) os.Exit(0) }(exitChan) handler := web.New(&jobs.Service{ JobRepository: jobRepo, Runner: runner.NewDockerRunner(vcs.GitCloner{}), BuildRepository: builds.NewRepository(*buildsDir), }, *templateDir, !*debugMode) server := negroni.New(negroni.NewRecovery(), negroni.NewLogger(), negroni.NewStatic(http.Dir(*assetsDir))) server.UseHandler(handler) server.Run(fmt.Sprintf("0.0.0.0:%d", *port)) }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) initalWrite := flag.Uint("initial_write", 1000000, "number of rows to write before reading") writeThreads := flag.Uint("writer_threads", 10, "number of writer threads") numIter := flag.Uint("num_iter", 100, "number of iterators to run") path := flag.String("db_path", "", "db path") dbType := flag.String("db", "bolt", "bolt or rocks") flag.Parse() db := loadDB(*path, *dbType) defer db.Close() // go rocksiterbench.RecordDiskUsage(*path) writer := rocksiterbench.NewDBWriter(db, *initalWrite, *writeThreads) writer.WriteAll() scanner := rocksiterbench.NewRangeScanner(db, *numIter) // bg writer go rocksiterbench.NewDBWriter(db, 1000000, *writeThreads).WriteAll() scanner.ScanAll() }
func Main() error { messageTimeout := flag.Duration("message_timeout", 2*time.Minute, "timeout for one message to be proxied") clientIdleTimeout := flag.Duration("client_idle_timeout", 60*time.Minute, "idle timeout for client connections") getLastErrorTimeout := flag.Duration("get_last_error_timeout", time.Minute, "timeout for getLastError pinning") maxConnections := flag.Uint("max_connections", 100, "maximum number of connections per mongo") portStart := flag.Int("port_start", 6000, "start of port range") portEnd := flag.Int("port_end", 6010, "end of port range") addrs := flag.String("addrs", "localhost:27017", "comma separated list of mongo addresses") flag.Parse() replicaSet := dvara.ReplicaSet{ Addrs: *addrs, PortStart: *portStart, PortEnd: *portEnd, MessageTimeout: *messageTimeout, ClientIdleTimeout: *clientIdleTimeout, GetLastErrorTimeout: *getLastErrorTimeout, MaxConnections: *maxConnections, } var statsClient stats.HookClient var log stdLogger var graph inject.Graph err := graph.Provide( &inject.Object{Value: &log}, &inject.Object{Value: &replicaSet}, &inject.Object{Value: &statsClient}, ) if err != nil { return err } if err := graph.Populate(); err != nil { return err } objects := graph.Objects() // Temporarily setup the metrics against a test registry. gregistry := gangliamr.NewTestRegistry() for _, o := range objects { if rmO, ok := o.Value.(registerMetrics); ok { rmO.RegisterMetrics(gregistry) } } if err := startstop.Start(objects, &log); err != nil { return err } defer startstop.Stop(objects, &log) ch := make(chan os.Signal, 2) signal.Notify(ch, syscall.SIGTERM, syscall.SIGINT) <-ch signal.Stop(ch) return nil }
func main() { configFile := flag.String("config", "/etc/hekad.toml", "Config file") maxprocs := flag.Int("maxprocs", 1, "Go runtime MAXPROCS value") poolSize := flag.Int("poolsize", 100, "Pipeline pool size") decoderPoolSize := flag.Int("decoder_poolsize", 4, "Decoder pool size") chanSize := flag.Int("plugin_chansize", 50, "Plugin input channel buffer size") cpuProfName := flag.String("cpuprof", "", "Go CPU profiler output file") memProfName := flag.String("memprof", "", "Go memory profiler output file") version := flag.Bool("version", false, "Output version and exit") maxMsgLoops := flag.Uint("max_message_loops", 4, "Maximum number of times a message can pass thru the system") flag.Parse() if *version { fmt.Println(VERSION) os.Exit(0) } runtime.GOMAXPROCS(*maxprocs) if *cpuProfName != "" { profFile, err := os.Create(*cpuProfName) if err != nil { log.Fatalln(err) } pprof.StartCPUProfile(profFile) defer pprof.StopCPUProfile() } if *memProfName != "" { defer func() { profFile, err := os.Create(*memProfName) if err != nil { log.Fatalln(err) } pprof.WriteHeapProfile(profFile) profFile.Close() }() } // Set up and load the pipeline configuration and start the daemon. globals := pipeline.DefaultGlobals() globals.PoolSize = *poolSize globals.DecoderPoolSize = *decoderPoolSize globals.PluginChanSize = *chanSize globals.MaxMsgLoops = *maxMsgLoops if globals.MaxMsgLoops == 0 { globals.MaxMsgLoops = 1 } pipeconf := pipeline.NewPipelineConfig(globals) err := pipeconf.LoadFromConfigFile(*configFile) if err != nil { log.Fatal("Error reading config: ", err) } pipeline.Run(pipeconf) }