func main() { envflag.Parse() app := cli.NewApp() app.Name = "drone" app.Version = version.Version app.Usage = "command line utility" app.Flags = []cli.Flag{ cli.StringFlag{ Name: "t, token", Usage: "server auth token", EnvVar: "DRONE_TOKEN", }, cli.StringFlag{ Name: "s, server", Usage: "server location", EnvVar: "DRONE_SERVER", }, } app.Commands = []cli.Command{ agent.AgentCmd, agentsCmd, buildCmd, deployCmd, execCmd, infoCmd, secretCmd, serverCmd, signCmd, repoCmd, userCmd, } app.Run(os.Args) }
func main() { envflag.Parse() if *debug { logrus.SetLevel(logrus.DebugLevel) } else { logrus.SetLevel(logrus.WarnLevel) } handler := router.Load( ginrus.Ginrus(logrus.StandardLogger(), time.RFC3339, true), middleware.Version, middleware.Store(), middleware.Remote(), middleware.Cache(), ) if *cert != "" { logrus.Fatal( http.ListenAndServeTLS(*addr, *cert, *key, handler), ) } else { logrus.Fatal( http.ListenAndServe(*addr, handler), ) } }
func main() { envflag.Parse() app := cli.NewApp() app.Name = "drone" app.Version = version.Version app.Usage = "command line utility" app.Flags = []cli.Flag{ cli.StringFlag{ Name: "t, token", Value: "", Usage: "server auth token", EnvVar: "DRONE_TOKEN", }, cli.StringFlag{ Name: "s, server", Value: "", Usage: "server location", EnvVar: "DRONE_SERVER", }, } app.Commands = []cli.Command{ agent.AgentCmd, DaemonCmd, SignCmd, SecretCmd, } app.Run(os.Args) }
func SetupRedis() *redis.Pool { maxConnections := envflag.Int("REDIS_MAX_CONNECTIONS", 400, "Maximum ammount of concurrent Redis connections") redisURL := envflag.String("REDIS_URL", "redis://localhost:6379", "Redis database url") envflag.Parse() pool, err := redisurl.NewPoolWithURL(*redisURL, 3, *maxConnections, "240s") if err != nil { panic(err) } return pool }
func setupCluster(t *testing.T) *Cluster { var ( hostport = envflag.String("SHEEPDOG_HOSTPORT", "", "Host:port for Sheepdog server") ) envflag.Parse() c, err := NewCluster(*hostport) if err != nil { t.Fatalf("unable to create cluster: %s", err) } return c }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) port := envflag.String("PORT", "6380", "Port in which to serve Philote websocket connections") envflag.Parse() log.Printf("[Main] Initializing Philotic Network\n") log.Printf("[Main] Version: %v\n", VERSION) log.Printf("[Main] Port: %v\n", *port) log.Printf("[Main] Cores: %v\n", runtime.NumCPU()) done := make(chan bool) RunServer(done, *port) }
func main() { pagerkey := envflag.String("PAGERDUTY_KEY", "", "Pager API string") consulKV := envflag.String("CONSUL_KV_ADDR", "127.0.0.1:8500", "Address to consul to use for KV.") consulWatch := envflag.String("CONSUL_WATCH_ADDR", "127.0.0.1:8500", "Address to consul to use for watch.") bootstrap := envflag.Bool("NOTIF_BOOTSTRAP", false, "Starts the daemon in bootstrap mode. This prevents it from emitting any notifications and should be used to pre-populate the KV store with the current state of the world.") slackWebhook := envflag.String("SLACK_WEBHOOK_URL", "", "The webhook URL for slack.") slackUsername := envflag.String("SLACK_USERNAME", "notif", "The username for the slack webhook to be posted as.") slackChannel := envflag.String("SLACK_CHANNEL", "", "The channel for the slack webhook to be post to.") slackIcon := envflag.String("SLACK_ICON", "", "The icon to use when posting the slack webhook.") envflag.Parse() logrus.SetLevel(logrus.DebugLevel) logrus.SetFormatter(&logrus.JSONFormatter{}) drain := make(chan *consulapi.HealthCheck) var pager notif.Notifier if *bootstrap { pager = ¬if.NoopNotifier{} } else if *pagerkey != "" { pager = notif.NewPager(*pagerkey, nil) } else if *slackWebhook != "" { pager = notif.NewSlackNotifier(*slackUsername, *slackWebhook, *slackIcon, *slackChannel) } else { pager = ¬if.PrintNotifier{} } w, err := notif.NewWatcher(*consulWatch, "checks", drain) if err != nil { logrus.WithFields(logrus.Fields{"err": err}).Error("faild to build watcher") } go w.Run() config := *consulapi.DefaultConfig() config.Address = *consulKV cc, err := consulapi.NewClient(&config) if err != nil { panic(err) } p := notif.NewProcessor(drain, pager, cc) p.Run() }
func main() { envflag.Parse() if *clusterAddr != "" { } lis, err := net.Listen("tcp", port) if err != nil { log.Fatalf("failed to listen: %v", err) } logger := log.New(os.Stdout, "server: ", log.Ldate|log.Ltime) s := grpc.NewServer() server, err := server.NewServer(*clusterAddr, logger) if err != nil { logger.Fatalf("unable to create server: %v\n", err) } set.RegisterAddOnlyServer(s, server) go s.Serve(lis) }
func main() { var ( // This points to the client access token for your account on wit.ai, that you // will possibly have stored within an environment variable. accessToken = envflag.String("ACCESS_TOKEN", "", "WIT client access token") // The recording device you will use for voice queries. // Usually, you can leave it be default. device = flag.String("device", witai.DefaultDevice, "device name for recording input") ) envflag.Parse() flag.Parse() // Create a new wit-ai context that will be used for queries. ctx, err := witai.NewContext(*device, *accessToken, witai.Error) if err != nil { log.Fatalln("cannot create new wit-ai context:", err) } // Always make sure to close the context once you are done. defer ctx.Close() log.Println("Say something nice now: ...") done := make(chan struct{}) // Query the wit.ai voice service asyncly. if err := ctx.VoiceQueryAutoAsync(func(s string) { r, err := witai.NewResult(s) if err != nil || !r.IsValid() { return } log.Printf("Result: %q\n", r.Outcomes[0].Text) // We can exit now that we have the result. close(done) }); err != nil { log.Fatalln("cannot query wit-ai:", err) } // Wait exiting the process until the async result returns. <-done }
func main() { flag.Parse() envflag.Parse() if !*debug { // disbale gin debug mode gin.SetMode(gin.ReleaseMode) } err := repo.LoadRepoStorage() if err != nil { log.Fatalf("repo storage error: %s", err) } log.Printf("using repo storage path: %s", repo.RepoStorage) ctxStore, err := datastore.Load() if err != nil { log.Fatalf("failed to load datastore: %s", err) } ctxRemote := remote.Load() state := checker.NewState(stateTTL) chck := checker.Checker{ Remote: ctxRemote, Store: ctxStore, State: state, } go chck.Run() // setup the server and start listening handler := router.Load( context.SetStore(ctxStore), context.SetRemote(ctxRemote), context.SetState(state), ) log.Fatal(http.ListenAndServe(*addr, handler)) }
func main() { var ( hostport = envflag.String("SHEEPDOG_HOSTPORT", "", "host:port pair for sheepdog cluster") readHostPort = envflag.String("SHEEPDOG_READ_HOSTPORT", "", "host:port pair for only issuing reads to sheepdog cluster") readDelay = envflag.Duration("SHEEPDOG_READ_SLEEP", 0, "time to sleep between each read of test") payloadCount = envflag.Int("SHEEPDOG_TEST_PAYLOAD_COUNT", 10, "payload count to issue reads and writes to sheepdog") vdiSize = envflag.Int("SHEEPDOG_VDI_SIZE", 1<<22, "create vdi of given size") vdiName = envflag.String("SHEEPDOG_VDI_NAME", "testvdi", "name of vdi to test read/writes across") ) envflag.Parse() c, err := picard.NewCluster(*hostport) if err != nil { log.Fatalln(err) } log.Printf("Created connection to sheepdog successfully") defer func() { if err := c.Disconnect(); err != nil { log.Fatalln(err) } log.Printf("Successfully disconnected!") }() vdi, err := c.CreateOpenVDI(*vdiName, uint64(*vdiSize)) if err != nil { log.Fatalln(err) } log.Printf("Created and opened VDI successfully") defer func() { vdi.Close() if err := c.DeleteVDI(vdi.Name()); err != nil { log.Fatalln(err) } log.Printf("Successfully deleted vdi: %q", vdi.Name()) }() rvdi := vdi if *readHostPort != "" { rc, err := picard.NewCluster(*readHostPort) if err != nil { log.Fatalln(err) } log.Printf("Created read connection to sheepdog successfully") defer func() { if err := rc.Disconnect(); err != nil { log.Fatalln(err) } log.Printf("Successfully disconnected for read connection!") }() rvdi, err = rc.OpenVDI(*vdiName) if err != nil { log.Fatalln(err) } log.Printf("Opened VDI successfully for reads") defer rvdi.Close() } vdiChan := make(chan *vdiData) go func() { if count, err := writeToVDI(vdi, vdiChan, *payloadCount); err != nil { log.Printf("Error while writing at %d: %s", count, err) } }() if count, failed, err := readFromVDI(rvdi, vdiChan, *readDelay); err != nil { log.Printf("Error occurred during reads:\n\tTotal: %d, Failures: %d\n\tError: %q", count, failed, err) } }