func setupLogging(cfg *config) { capnslog.SetGlobalLogLevel(capnslog.INFO) if cfg.Debug { capnslog.SetGlobalLogLevel(capnslog.DEBUG) } if cfg.LogPkgLevels != "" { repoLog := capnslog.MustRepoLogger("github.com/coreos/etcd") settings, err := repoLog.ParseLogLevelConfig(cfg.LogPkgLevels) if err != nil { plog.Warningf("couldn't parse log level string: %s, continuing with default levels", err.Error()) return } repoLog.SetLogLevel(settings) } // capnslog initially SetFormatter(NewDefaultFormatter(os.Stderr)) // where NewDefaultFormatter returns NewJournaldFormatter when syscall.Getppid() == 1 // specify 'stdout' or 'stderr' to skip journald logging even when running under systemd switch cfg.logOutput { case "stdout": capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, cfg.Debug)) case "stderr": capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stderr, cfg.Debug)) case "default": default: plog.Panicf(`unknown log-output %q (only supports "default", "stdout", "stderr")`, cfg.logOutput) } }
func init() { capabilities.SetForTests(capabilities.Capabilities{ AllowPrivileged: true, }) flag.Set("v", "5") if len(os.Getenv("OS_TEST_VERBOSE_ETCD")) > 0 { capnslog.SetGlobalLogLevel(capnslog.DEBUG) capnslog.SetFormatter(capnslog.NewGlogFormatter(os.Stderr)) } else { capnslog.SetGlobalLogLevel(capnslog.INFO) capnslog.SetFormatter(capnslog.NewGlogFormatter(os.Stderr)) } }
func main() { flag.Parse() capnslog.SetFormatter(capnslog.NewGlogFormatter(os.Stderr)) client := startClient() client.MustDetectRemote() c := NewController(client) c.RefreshColumns() g := gocui.NewGui() if err := g.Init(); err != nil { log.Fatal(err) } g.SetLayout(c.layout) if err := initKeybindings(g, c); err != nil { log.Fatal(err) } err := g.MainLoop() if err != nil && err != gocui.Quit { g.Close() log.Fatal(err) } g.Close() }
func main() { rl := capnslog.MustRepoLogger("github.com/coreos/pkg/capnslog/cmd") capnslog.SetFormatter(capnslog.NewStringFormatter(os.Stderr)) // We can parse the log level configs from the command line flag.Parse() if flag.NArg() > 1 { cfg, err := rl.ParseLogLevelConfig(flag.Arg(1)) if err != nil { log.Fatal(err) } rl.SetLogLevel(cfg) log.Infof("Setting output to %s", flag.Arg(1)) } // Send some messages at different levels to the different packages dlog.Infof("Hello Dolly") dlog.Warningf("Well hello, Dolly") log.Errorf("It's so nice to have you back where you belong") dlog.Debugf("You're looking swell, Dolly") dlog.Tracef("I can tell, Dolly") // We also have control over the built-in "log" package. capnslog.SetGlobalLogLevel(logLevel) oldlog.Println("You're still glowin', you're still crowin', you're still lookin' strong") log.Fatalf("Dolly'll never go away again") }
func main() { // Parse command-line arguments flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError) flagConfigPath := flag.String("config", "/etc/clair/config.yaml", "Load configuration from the specified file.") flagCPUProfilePath := flag.String("cpu-profile", "", "Write a CPU profile to the specified file before exiting.") flagLogLevel := flag.String("log-level", "info", "Define the logging level.") flag.Parse() // Load configuration config, err := config.Load(*flagConfigPath) if err != nil { log.Fatalf("failed to load configuration: %s", err) } // Initialize logging system logLevel, err := capnslog.ParseLevel(strings.ToUpper(*flagLogLevel)) capnslog.SetGlobalLogLevel(logLevel) capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, false)) // Enable CPU Profiling if specified if *flagCPUProfilePath != "" { defer stopCPUProfiling(startCPUProfiling(*flagCPUProfilePath)) } clair.Boot(config) }
func init() { capabilities.SetForTests(capabilities.Capabilities{ AllowPrivileged: true, }) flag.Set("v", "5") capnslog.SetGlobalLogLevel(capnslog.DEBUG) capnslog.SetFormatter(capnslog.NewGlogFormatter(os.Stderr)) }
func ClairServiceInit() error { // Load database setting if setting.ClairDBPath != "" { clairConf.DBPath = setting.ClairDBPath } else { clairConf.DBPath = DefaultClairDBPath } clairConf.KeepDB = setting.ClairKeepDB clairConf.LogLevel = setting.ClairLogLevel clairConf.Duration = setting.ClairUpdateDuration clairConf.VulnPriority = setting.ClairVulnPriority // Set database if err := database.Open("bolt", clairConf.DBPath); err != nil { logrus.Debug(err) return err } // Set logLevel of clair lib logLevel, err := capnslog.ParseLevel(strings.ToUpper(clairConf.LogLevel)) if err != nil { logLevel, _ = capnslog.ParseLevel(strings.ToUpper(DefaultClairLogLevel)) } capnslog.SetGlobalLogLevel(logLevel) capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, false)) // Set minumum priority parameter. if types.Priority(clairConf.VulnPriority).IsValid() { logrus.Debugf("Vuln priority is invalid :%v.", clairConf.VulnPriority) clairConf.VulnPriority = DefaultClairVulnPriority } // Set 'duration' and Update the CVE database if clairConf.Duration == "" { logrus.Debugf("No duration set, so only update at the beginning.") go updater.Update() clairStopper = nil } else { st := utils.NewStopper() st.Begin() d, err := time.ParseDuration(clairConf.Duration) if err != nil { logrus.Warnf("Wrong duration format, use the default duration: %v.", DefaultClairUpdateDuration) clairConf.Duration = DefaultClairUpdateDuration d, err = time.ParseDuration(clairConf.Duration) if err != nil { logrus.Debugf("Cannot pare du %v", err) } } go updater.Run(d, st) clairStopper = st st.Begin() } return nil }
func main() { capnslog.SetGlobalLogLevel(capnslog.DEBUG) capnslog.SetFormatter(capnslog.NewGlogFormatter(os.Stderr)) f, err := capnslog.NewJournaldFormatter() if err == nil { capnslog.SetFormatter(f) } plog.Tracef("trace") plog.Debugf("debug") plog.Noticef("notice") plog.Infof("info") plog.Warningf("warn") plog.Errorf("error") plog.Fatalf("fatal") plog.Panicf("panic") }
func init() { log.SetFormatter(log.NewPrettyFormatter(os.Stdout, true)) apnsLogger = log.NewPackageLogger("apns-microservice", "apns") serverLogger = log.NewPackageLogger("apns-microservice", "http") log.SetGlobalLogLevel(log.INFO) apns.SetLogger(apnsLogger) server.SetLogger(serverLogger) }
func main() { capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, false)) capnslog.SetGlobalLogLevel(capnslog.TRACE) plog.Debug("aaa") plog.Trace("aaa") plog.Info("aaa") plog.Notice("aaa") plog.Warning("aaa") plog.Error("aaa") plog.Fatal("aaa") plog.Panic("aaa") }
func InitializeConfig() { rl := capnslog.MustRepoLogger("github.com/ecnahc515/core") capnslog.SetFormatter(capnslog.NewStringFormatter(os.Stderr)) capnslog.SetGlobalLogLevel(capnslog.INFO) if logLevel != "" { llc, err := rl.ParseLogLevelConfig(logLevel) if err != nil { plog.Fatal(err) } rl.SetLogLevel(llc) plog.Printf("Setting log level to %s", logLevel) } // TODO move to fetch/run specifically? if coreCfg.ImageDirectory == coreos.DefaultImageDirectory { coreCfg.ImageDirectory = os.ExpandEnv(coreCfg.ImageDirectory) err := CreateDirIfNotExist(coreCfg.ImageDirectory) if err != nil { plog.Errorf("Unable to create default image directory, err: %s", err) } } }
// Run is the main execution entrypoint to run mgmt. func (obj *Main) Run() error { var start = time.Now().UnixNano() var flags int if obj.DEBUG || true { // TODO: remove || true flags = log.LstdFlags | log.Lshortfile } flags = (flags - log.Ldate) // remove the date for now log.SetFlags(flags) // un-hijack from capnslog... log.SetOutput(os.Stderr) if obj.VERBOSE { capnslog.SetFormatter(capnslog.NewLogFormatter(os.Stderr, "(etcd) ", flags)) } else { capnslog.SetFormatter(capnslog.NewNilFormatter()) } log.Printf("This is: %s, version: %s", obj.Program, obj.Version) log.Printf("Main: Start: %v", start) hostname, err := os.Hostname() // a sensible default // allow passing in the hostname, instead of using the system setting if h := obj.Hostname; h != nil && *h != "" { // override by cli hostname = *h } else if err != nil { return errwrap.Wrapf(err, "Can't get default hostname!") } if hostname == "" { // safety check return fmt.Errorf("Hostname cannot be empty!") } var prefix = fmt.Sprintf("/var/lib/%s/", obj.Program) // default prefix if p := obj.Prefix; p != nil { prefix = *p } // make sure the working directory prefix exists if obj.TmpPrefix || os.MkdirAll(prefix, 0770) != nil { if obj.TmpPrefix || obj.AllowTmpPrefix { var err error if prefix, err = ioutil.TempDir("", obj.Program+"-"+hostname+"-"); err != nil { return fmt.Errorf("Main: Error: Can't create temporary prefix!") } log.Println("Main: Warning: Working prefix directory is temporary!") } else { return fmt.Errorf("Main: Error: Can't create prefix!") } } log.Printf("Main: Working prefix is: %s", prefix) var wg sync.WaitGroup var G, oldGraph *pgraph.Graph // exit after `max-runtime` seconds for no reason at all... if i := obj.MaxRuntime; i > 0 { go func() { time.Sleep(time.Duration(i) * time.Second) obj.Exit(nil) }() } // setup converger converger := converger.NewConverger( obj.ConvergedTimeout, nil, // stateFn gets added in by EmbdEtcd ) go converger.Loop(true) // main loop for converger, true to start paused // embedded etcd if len(obj.seeds) == 0 { log.Printf("Main: Seeds: No seeds specified!") } else { log.Printf("Main: Seeds(%d): %v", len(obj.seeds), obj.seeds) } EmbdEtcd := etcd.NewEmbdEtcd( hostname, obj.seeds, obj.clientURLs, obj.serverURLs, obj.NoServer, obj.idealClusterSize, prefix, converger, ) if EmbdEtcd == nil { // TODO: verify EmbdEtcd is not nil below... obj.Exit(fmt.Errorf("Main: Etcd: Creation failed!")) } else if err := EmbdEtcd.Startup(); err != nil { // startup (returns when etcd main loop is running) obj.Exit(fmt.Errorf("Main: Etcd: Startup failed: %v", err)) } convergerStateFn := func(b bool) error { // exit if we are using the converged timeout and we are the // root node. otherwise, if we are a child node in a remote // execution hierarchy, we should only notify our converged // state and wait for the parent to trigger the exit. if t := obj.ConvergedTimeout; obj.Depth == 0 && t >= 0 { if b { log.Printf("Converged for %d seconds, exiting!", t) obj.Exit(nil) // trigger an exit! } return nil } // send our individual state into etcd for others to see return etcd.EtcdSetHostnameConverged(EmbdEtcd, hostname, b) // TODO: what should happen on error? } if EmbdEtcd != nil { converger.SetStateFn(convergerStateFn) } var gapiChan chan error // stream events are nil errors if obj.GAPI != nil { data := gapi.Data{ Hostname: hostname, EmbdEtcd: EmbdEtcd, Noop: obj.Noop, NoWatch: obj.NoWatch, } if err := obj.GAPI.Init(data); err != nil { obj.Exit(fmt.Errorf("Main: GAPI: Init failed: %v", err)) } else if !obj.NoWatch { gapiChan = obj.GAPI.SwitchStream() // stream of graph switch events! } } exitchan := make(chan struct{}) // exit on close go func() { startChan := make(chan struct{}) // start signal go func() { startChan <- struct{}{} }() log.Println("Etcd: Starting...") etcdChan := etcd.EtcdWatch(EmbdEtcd) first := true // first loop or not for { log.Println("Main: Waiting...") select { case <-startChan: // kick the loop once at start // pass case b := <-etcdChan: if !b { // ignore the message continue } // everything else passes through to cause a compile! case err, ok := <-gapiChan: if !ok { // channel closed if obj.DEBUG { log.Printf("Main: GAPI exited") } gapiChan = nil // disable it continue } if err != nil { obj.Exit(err) // trigger exit continue //return // TODO: return or wait for exitchan? } if obj.NoWatch { // extra safety for bad GAPI's log.Printf("Main: GAPI stream should be quiet with NoWatch!") // fix the GAPI! continue // no stream events should be sent } case <-exitchan: return } if obj.GAPI == nil { log.Printf("Config: GAPI is empty!") continue } // we need the vertices to be paused to work on them, so // run graph vertex LOCK... if !first { // TODO: we can flatten this check out I think converger.Pause() // FIXME: add sync wait? G.Pause() // sync //G.UnGroup() // FIXME: implement me if needed! } // make the graph from yaml, lib, puppet->yaml, or dsl! newGraph, err := obj.GAPI.Graph() // generate graph! if err != nil { log.Printf("Config: Error creating new graph: %v", err) // unpause! if !first { G.Start(&wg, first) // sync converger.Start() // after G.Start() } continue } // apply the global noop parameter if requested if obj.Noop { for _, m := range newGraph.GraphMetas() { m.Noop = obj.Noop } } // FIXME: make sure we "UnGroup()" any semi-destructive // changes to the resources so our efficient GraphSync // will be able to re-use and cmp to the old graph. newFullGraph, err := newGraph.GraphSync(oldGraph) if err != nil { log.Printf("Config: Error running graph sync: %v", err) // unpause! if !first { G.Start(&wg, first) // sync converger.Start() // after G.Start() } continue } oldGraph = newFullGraph // save old graph G = oldGraph.Copy() // copy to active graph G.AutoEdges() // add autoedges; modifies the graph G.AutoGroup() // run autogroup; modifies the graph // TODO: do we want to do a transitive reduction? log.Printf("Graph: %v", G) // show graph if obj.GraphvizFilter != "" { if err := G.ExecGraphviz(obj.GraphvizFilter, obj.Graphviz); err != nil { log.Printf("Graphviz: %v", err) } else { log.Printf("Graphviz: Successfully generated graph!") } } G.AssociateData(converger) // G.Start(...) needs to be synchronous or wait, // because if half of the nodes are started and // some are not ready yet and the EtcdWatch // loops, we'll cause G.Pause(...) before we // even got going, thus causing nil pointer errors G.Start(&wg, first) // sync converger.Start() // after G.Start() first = false } }() configWatcher := recwatch.NewConfigWatcher() events := configWatcher.Events() if !obj.NoWatch { configWatcher.Add(obj.Remotes...) // add all the files... } else { events = nil // signal that no-watch is true } go func() { select { case err := <-configWatcher.Error(): obj.Exit(err) // trigger an exit! case <-exitchan: return } }() // initialize the add watcher, which calls the f callback on map changes convergerCb := func(f func(map[string]bool) error) (func(), error) { return etcd.EtcdAddHostnameConvergedWatcher(EmbdEtcd, f) } // build remotes struct for remote ssh remotes := remote.NewRemotes( EmbdEtcd.LocalhostClientURLs().StringSlice(), []string{etcd.DefaultClientURL}, obj.Noop, obj.Remotes, // list of files events, // watch for file changes obj.CConns, obj.AllowInteractive, obj.SSHPrivIDRsa, !obj.NoCaching, obj.Depth, prefix, converger, convergerCb, obj.Program, ) // TODO: is there any benefit to running the remotes above in the loop? // wait for etcd to be running before we remote in, which we do above! go remotes.Run() if obj.GAPI == nil { converger.Start() // better start this for empty graphs } log.Println("Main: Running...") reterr := <-obj.exit // wait for exit signal log.Println("Destroy...") if obj.GAPI != nil { if err := obj.GAPI.Close(); err != nil { err = errwrap.Wrapf(err, "GAPI closed poorly!") reterr = multierr.Append(reterr, err) // list of errors } } configWatcher.Close() // stop sending file changes to remotes if err := remotes.Exit(); err != nil { // tell all the remote connections to shutdown; waits! err = errwrap.Wrapf(err, "Remote exited poorly!") reterr = multierr.Append(reterr, err) // list of errors } G.Exit() // tell all the children to exit // tell inner main loop to exit close(exitchan) // cleanup etcd main loop last so it can process everything first if err := EmbdEtcd.Destroy(); err != nil { // shutdown and cleanup etcd err = errwrap.Wrapf(err, "Etcd exited poorly!") reterr = multierr.Append(reterr, err) // list of errors } if obj.DEBUG { log.Printf("Main: Graph: %v", G) } wg.Wait() // wait for primary go routines to exit // TODO: wait for each vertex to exit... log.Println("Goodbye!") return reterr }
func main() { rand.Seed(time.Now().UTC().UnixNano()) var err error st := utils.NewStopper() // Parse command-line arguments kingpin.Parse() if *cfgDbType != "memstore" && *cfgDbPath == "" { kingpin.Errorf("required flag --db-path not provided, try --help") os.Exit(1) } if *cfgNotifierType == "http" && *cfgNotifierHTTPURL == "" { kingpin.Errorf("required flag --notifier-http-url not provided, try --help") os.Exit(1) } // Initialize error/logging system logLevel, err := capnslog.ParseLevel(strings.ToUpper(*cfgLogLevel)) capnslog.SetGlobalLogLevel(logLevel) capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, false)) // Enable CPU Profiling if specified if *cfgCPUProfilePath != "" { f, err := os.Create(*cfgCPUProfilePath) if err != nil { log.Fatalf("failed to create profile file: %s", err) } defer f.Close() pprof.StartCPUProfile(f) log.Info("started profiling") defer func() { pprof.StopCPUProfile() log.Info("stopped profiling") }() } // Open database err = database.Open(*cfgDbType, *cfgDbPath) if err != nil { log.Fatal(err) } defer database.Close() // Start notifier var notifierService notifier.Notifier switch *cfgNotifierType { case "http": notifierService, err = notifier.NewHTTPNotifier(*cfgNotifierHTTPURL) if err != nil { log.Fatalf("could not initialize HTTP notifier: %s", err) } } if notifierService != nil { st.Begin() go notifierService.Run(st) } // Start Main API and Health API st.Begin() go api.RunMain(&api.Config{ Port: *cfgAPIPort, TimeOut: *cfgAPITimeout, CertFile: *cfgAPICertFile, KeyFile: *cfgAPIKeyFile, CAFile: *cfgAPICAFile, }, st) st.Begin() go api.RunHealth(*cfgAPIPort+1, st) // Start updater st.Begin() go updater.Run(*cfgUpdateInterval, st) // This blocks the main goroutine which is required to keep all the other goroutines running interrupts := make(chan os.Signal, 1) signal.Notify(interrupts, os.Interrupt) <-interrupts log.Info("Received interruption, gracefully stopping ...") st.Stop() }
func main() { flags := struct { address string rpcAddress string dataPath string assetsPath string logLevel string certFile string keyFile string caFile string keyRingPath string version bool help bool }{} flag.StringVar(&flags.address, "address", "127.0.0.1:8080", "HTTP listen address") flag.StringVar(&flags.rpcAddress, "rpc-address", "", "RPC listen address") flag.StringVar(&flags.dataPath, "data-path", "/var/lib/bootcfg", "Path to data directory") flag.StringVar(&flags.assetsPath, "assets-path", "/var/lib/bootcfg/assets", "Path to static assets") // Log levels https://godoc.org/github.com/coreos/pkg/capnslog#LogLevel flag.StringVar(&flags.logLevel, "log-level", "info", "Set the logging level") // gRPC Server TLS flag.StringVar(&flags.certFile, "cert-file", "/etc/bootcfg/server.crt", "Path to the server TLS certificate file") flag.StringVar(&flags.keyFile, "key-file", "/etc/bootcfg/server.key", "Path to the server TLS key file") // TLS Client Authentication flag.StringVar(&flags.caFile, "ca-file", "/etc/bootcfg/ca.crt", "Path to the CA verify and authenticate client certificates") // Signing flag.StringVar(&flags.keyRingPath, "key-ring-path", "", "Path to a private keyring file") // subcommands flag.BoolVar(&flags.version, "version", false, "print version and exit") flag.BoolVar(&flags.help, "help", false, "print usage and exit") // parse command-line and environment variable arguments flag.Parse() if err := flagutil.SetFlagsFromEnv(flag.CommandLine, "BOOTCFG"); err != nil { log.Fatal(err.Error()) } // restrict OpenPGP passphrase to pass via environment variable only passphrase := os.Getenv("BOOTCFG_PASSPHRASE") if flags.version { fmt.Println(version.Version) return } if flags.help { flag.Usage() return } // validate arguments if url, err := url.Parse(flags.address); err != nil || url.String() == "" { log.Fatal("A valid HTTP listen address is required") } if finfo, err := os.Stat(flags.dataPath); err != nil || !finfo.IsDir() { log.Fatal("A valid -data-path is required") } if flags.assetsPath != "" { if finfo, err := os.Stat(flags.assetsPath); err != nil || !finfo.IsDir() { log.Fatalf("Provide a valid -assets-path or '' to disable asset serving: %s", flags.assetsPath) } } if flags.rpcAddress != "" { if _, err := os.Stat(flags.certFile); err != nil { log.Fatalf("Provide a valid TLS server certificate with -cert-file: %v", err) } if _, err := os.Stat(flags.keyFile); err != nil { log.Fatalf("Provide a valid TLS server key with -key-file: %v", err) } if _, err := os.Stat(flags.caFile); err != nil { log.Fatalf("Provide a valid TLS certificate authority for authorizing client certificates: %v", err) } } // logging setup lvl, err := capnslog.ParseLevel(strings.ToUpper(flags.logLevel)) if err != nil { log.Fatalf("invalid log-level: %v", err) } capnslog.SetGlobalLogLevel(lvl) capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, false)) // (optional) signing var signer, armoredSigner sign.Signer if flags.keyRingPath != "" { entity, err := sign.LoadGPGEntity(flags.keyRingPath, passphrase) if err != nil { log.Fatal(err) } signer = sign.NewGPGSigner(entity) armoredSigner = sign.NewArmoredGPGSigner(entity) } // storage store := storage.NewFileStore(&storage.Config{ Root: flags.dataPath, }) // core logic server := server.NewServer(&server.Config{ Store: store, }) // gRPC Server (feature disabled by default) if flags.rpcAddress != "" { log.Infof("starting bootcfg gRPC server on %s", flags.rpcAddress) log.Infof("Using TLS server certificate: %s", flags.certFile) log.Infof("Using TLS server key: %s", flags.keyFile) log.Infof("Using CA certificate: %s to authenticate client certificates", flags.caFile) lis, err := net.Listen("tcp", flags.rpcAddress) if err != nil { log.Fatalf("failed to start listening: %v", err) } tlsinfo := tlsutil.TLSInfo{ CertFile: flags.certFile, KeyFile: flags.keyFile, CAFile: flags.caFile, } tlscfg, err := tlsinfo.ServerConfig() if err != nil { log.Fatalf("Invalid TLS credentials: %v", err) } grpcServer := rpc.NewServer(server, tlscfg) go grpcServer.Serve(lis) defer grpcServer.Stop() } // HTTP Server config := &web.Config{ Store: store, AssetsPath: flags.assetsPath, Signer: signer, ArmoredSigner: armoredSigner, } httpServer := web.NewServer(config) log.Infof("starting bootcfg HTTP server on %s", flags.address) err = http.ListenAndServe(flags.address, httpServer.HTTPHandler()) if err != nil { log.Fatalf("failed to start listening: %v", err) } }
func main() { flags := struct { address string configPath string dataPath string assetsPath string logLevel string version bool help bool }{} flag.StringVar(&flags.address, "address", "127.0.0.1:8080", "HTTP listen address") flag.StringVar(&flags.configPath, "config", "./data/config.yaml", "Path to config file") flag.StringVar(&flags.dataPath, "data-path", "./data", "Path to data directory") flag.StringVar(&flags.assetsPath, "assets-path", "./assets", "Path to static assets") // available log levels https://godoc.org/github.com/coreos/pkg/capnslog#LogLevel flag.StringVar(&flags.logLevel, "log-level", "info", "Set the logging level") // subcommands flag.BoolVar(&flags.version, "version", false, "print version and exit") flag.BoolVar(&flags.help, "help", false, "print usage and exit") // parse command-line and environment variable arguments flag.Parse() if err := flagutil.SetFlagsFromEnv(flag.CommandLine, "BOOTCFG"); err != nil { log.Fatal(err.Error()) } if flags.version { fmt.Println(version) return } if flags.help { flag.Usage() return } // validate arguments if url, err := url.Parse(flags.address); err != nil || url.String() == "" { log.Fatal("A valid HTTP listen address is required") } if finfo, err := os.Stat(flags.configPath); err != nil || finfo.IsDir() { log.Fatal("A path to a config file is required") } if finfo, err := os.Stat(flags.dataPath); err != nil || !finfo.IsDir() { log.Fatal("A path to a data directory is required") } if finfo, err := os.Stat(flags.assetsPath); err != nil || !finfo.IsDir() { log.Fatal("A path to an assets directory is required") } // logging setup lvl, err := capnslog.ParseLevel(strings.ToUpper(flags.logLevel)) if err != nil { log.Fatalf("Invalid log-level: %v", err.Error()) } capnslog.SetGlobalLogLevel(lvl) capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, false)) // storage store := api.NewFileStore(http.Dir(flags.dataPath)) // load bootstrap config cfg, err := config.LoadConfig(flags.configPath) if err != nil { log.Fatal(err) } store.BootstrapGroups(cfg.Groups) // API server config := &api.Config{ Store: store, AssetsPath: flags.assetsPath, } server := api.NewServer(config) log.Infof("starting bootcfg API Server on %s", flags.address) err = http.ListenAndServe(flags.address, server.HTTPHandler()) if err != nil { log.Fatalf("failed to start listening: %s", err) } }
func main() { var flags int if global.DEBUG || true { // TODO: remove || true flags = log.LstdFlags | log.Lshortfile } flags = (flags - log.Ldate) // remove the date for now log.SetFlags(flags) // un-hijack from capnslog... log.SetOutput(os.Stderr) if global.VERBOSE { capnslog.SetFormatter(capnslog.NewLogFormatter(os.Stderr, "(etcd) ", flags)) } else { capnslog.SetFormatter(capnslog.NewNilFormatter()) } // test for sanity if program == "" || version == "" { log.Fatal("Program was not compiled correctly. Please see Makefile.") } app := cli.NewApp() app.Name = program app.Usage = "next generation config management" app.Version = version //app.Action = ... // without a default action, help runs app.Commands = []cli.Command{ { Name: "run", Aliases: []string{"r"}, Usage: "run", Action: run, Flags: []cli.Flag{ cli.StringFlag{ Name: "file, f", Value: "", Usage: "graph definition to run", EnvVar: "MGMT_FILE", }, cli.BoolFlag{ Name: "no-watch", Usage: "do not update graph on watched graph definition file changes", }, cli.StringFlag{ Name: "code, c", Value: "", Usage: "code definition to run", }, cli.StringFlag{ Name: "graphviz, g", Value: "", Usage: "output file for graphviz data", }, cli.StringFlag{ Name: "graphviz-filter, gf", Value: "dot", // directed graph default Usage: "graphviz filter to use", }, // useful for testing multiple instances on same machine cli.StringFlag{ Name: "hostname", Value: "", Usage: "hostname to use", }, // if empty, it will startup a new server cli.StringSliceFlag{ Name: "seeds, s", Value: &cli.StringSlice{}, // empty slice Usage: "default etc client endpoint", EnvVar: "MGMT_SEEDS", }, // port 2379 and 4001 are common cli.StringSliceFlag{ Name: "client-urls", Value: &cli.StringSlice{}, Usage: "list of URLs to listen on for client traffic", EnvVar: "MGMT_CLIENT_URLS", }, // port 2380 and 7001 are common cli.StringSliceFlag{ Name: "server-urls, peer-urls", Value: &cli.StringSlice{}, Usage: "list of URLs to listen on for server (peer) traffic", EnvVar: "MGMT_SERVER_URLS", }, cli.BoolFlag{ Name: "no-server", Usage: "do not let other servers peer with me", }, cli.IntFlag{ Name: "ideal-cluster-size", Value: etcd.DefaultIdealClusterSize, Usage: "ideal number of server peers in cluster, only read by initial server", EnvVar: "MGMT_IDEAL_CLUSTER_SIZE", }, cli.IntFlag{ Name: "converged-timeout, t", Value: -1, Usage: "exit after approximately this many seconds in a converged state", EnvVar: "MGMT_CONVERGED_TIMEOUT", }, cli.IntFlag{ Name: "max-runtime", Value: 0, Usage: "exit after a maximum of approximately this many seconds", EnvVar: "MGMT_MAX_RUNTIME", }, cli.BoolFlag{ Name: "noop", Usage: "globally force all resources into no-op mode", }, cli.StringFlag{ Name: "puppet, p", Value: "", Usage: "load graph from puppet, optionally takes a manifest or path to manifest file", }, cli.StringFlag{ Name: "puppet-conf", Value: "", Usage: "supply the path to an alternate puppet.conf file to use", }, cli.StringSliceFlag{ Name: "remote", Value: &cli.StringSlice{}, Usage: "list of remote graph definitions to run", }, cli.BoolFlag{ Name: "allow-interactive", Usage: "allow interactive prompting, such as for remote passwords", }, cli.StringFlag{ Name: "ssh-priv-id-rsa", Value: "~/.ssh/id_rsa", Usage: "default path to ssh key file, set empty to never touch", EnvVar: "MGMT_SSH_PRIV_ID_RSA", }, cli.IntFlag{ Name: "cconns", Value: 0, Usage: "number of maximum concurrent remote ssh connections to run, 0 for unlimited", EnvVar: "MGMT_CCONNS", }, cli.BoolFlag{ Name: "no-caching", Usage: "don't allow remote caching of remote execution binary", }, cli.IntFlag{ Name: "depth", Hidden: true, // internal use only Value: 0, Usage: "specify depth in remote hierarchy", }, cli.StringFlag{ Name: "prefix", Usage: "specify a path to the working prefix directory", EnvVar: "MGMT_PREFIX", }, cli.BoolFlag{ Name: "tmp-prefix", Usage: "request a pseudo-random, temporary prefix to be used", }, cli.BoolFlag{ Name: "allow-tmp-prefix", Usage: "allow creation of a new temporary prefix if main prefix is unavailable", }, }, }, } app.EnableBashCompletion = true app.Run(os.Args) }