func main() { flag.Parse() domain, err := tao.LoadDomain(*configPath, []byte(*domainPass)) if err != nil { glog.Exitf("Couldn't load the config path %s: %s\n", *configPath, err) return } sock, err := net.Listen(*network, *addr) if err != nil { glog.Exit("Couldn't bind socket to address:", err) return } fmt.Println("tcca: accepting connections") for { conn, err := sock.Accept() if err != nil { glog.Exitf("Couldn't accept a connection on %s: %s", *addr, err) return } go tao.HandleCARequest(conn, domain.Keys.SigningKey, domain.Guard) } }
func ServeOnce(c *server.Config, cf string, hd *httpdown.HTTP) (*server.AuthServer, httpdown.Server) { glog.Infof("Config from %s (%d users, %d ACL entries)", cf, len(c.Users), len(c.ACL)) as, err := server.NewAuthServer(c) if err != nil { glog.Exitf("Failed to create auth server: %s", err) } hs := &http.Server{ Addr: c.Server.ListenAddress, Handler: as, TLSConfig: &tls.Config{ NextProtos: []string{"http/1.1"}, Certificates: make([]tls.Certificate, 1), }, } glog.Infof("Cert file: %s", c.Server.CertFile) glog.Infof("Key file : %s", c.Server.KeyFile) hs.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(c.Server.CertFile, c.Server.KeyFile) if err != nil { glog.Exitf("Failed to load certificate and key: %s", err) } s, err := hd.ListenAndServe(hs) if err != nil { glog.Exitf("Failed to set up listener: %s", err) } glog.Infof("Serving") return as, s }
func main() { flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [FLAG]... [PROFILE_DIR]...\n", os.Args[0]) fmt.Fprintf(os.Stderr, "Load the AppArmor profiles specified in the PROFILE_DIR directories.\n") flag.PrintDefaults() } flag.Parse() dirs = flag.Args() if len(dirs) == 0 { glog.Errorf("Must specify at least one directory.") flag.Usage() os.Exit(1) } // Check that the required parser binary is found. if _, err := exec.LookPath(parser); err != nil { glog.Exitf("Required binary %s not found in PATH", parser) } // Check that loaded profiles can be read. if _, err := getLoadedProfiles(); err != nil { glog.Exitf("Unable to access apparmor profiles: %v", err) } if *poll < 0 { runOnce() } else { pollForever() } }
func ServeOnce(c *config.Config, cf string, hd *httpdown.HTTP) (*server.AuthServer, httpdown.Server) { glog.Infof("Config from %s (%d users, %d ACL static entries)", cf, len(c.Users), len(c.ACL)) as, ms, err := server.NewAuthServer(c) if err != nil { glog.Exitf("Failed to create auth server: %s", err) } var tlsConfig *tls.Config if c.Server.CertFile != "" || c.Server.KeyFile != "" { // Check for partial configuration. if c.Server.CertFile == "" || c.Server.KeyFile == "" { glog.Exitf("Failed to load certificate and key: both were not provided") } tlsConfig = &tls.Config{ MinVersion: tls.VersionTLS10, PreferServerCipherSuites: true, CipherSuites: []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_RSA_WITH_AES_128_CBC_SHA, tls.TLS_RSA_WITH_AES_256_CBC_SHA, }, NextProtos: []string{"http/1.1"}, Certificates: make([]tls.Certificate, 1), } glog.Infof("Cert file: %s", c.Server.CertFile) glog.Infof("Key file : %s", c.Server.KeyFile) tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(c.Server.CertFile, c.Server.KeyFile) if err != nil { glog.Exitf("Failed to load certificate and key: %s", err) } } else { glog.Warning("Running without TLS") } hs := &http.Server{ Addr: c.Server.ListenAddress, Handler: as, TLSConfig: tlsConfig, } s, err := hd.ListenAndServe(hs) if err != nil { glog.Exitf("Failed to set up listener: %s", err) } ms.RunManagerServer() glog.Infof("Serving") return as, s }
func main() { flag.Parse() log.Infof("Simulating %v clients.", *count) for i := 0; i < *count; i++ { id := uuid.New() log.Infof("client %v with id %v", i, id) client, err := doorman.NewWithID(*addr, id, doorman.DialOpts(grpc.WithInsecure())) if err != nil { log.Exit(err) } defer client.Close() res, err := client.Resource(*resource, *initialCapacity) if err != nil { log.Exit(err) } go manipulateCapacity(res, *initialCapacity, id) conn, err := grpc.Dial(*target, grpc.WithInsecure()) if err != nil { log.Exitf("did not connect: %v", err) } defer conn.Close() c := pb.NewGreeterClient(conn) rl := ratelimiter.NewQPS(res) for i := 0; i < *workers; i++ { go func() { ctx := context.Background() for { if err := rl.Wait(ctx); err != nil { log.Exitf("rl.Wait: %v", err) } ctx, cancel := context.WithTimeout(ctx, 30*time.Second) if _, err := c.SayHello(ctx, &pb.HelloRequest{Name: *resource}); err != nil { log.Error(err) } cancel() } }() } } http.Handle("/metrics", prometheus.Handler()) http.ListenAndServe(fmt.Sprintf(":%v", *port), nil) }
func (d *deadlockDetector) run() { for { ch := make(chan bool, 1) go func() { d.lock.Lock() d.lock.Unlock() ch <- true }() select { case <-time.After(d.maxLockPeriod): go func() { defer func() { // Let's just be extra sure we die, even if Exitf panics glog.Errorf("Failed to Exitf for %s, dying anyway", d.name) os.Exit(2) }() glog.Exitf("Deadlock on %s, exiting", d.name) }() case <-ch: glog.V(6).Infof("%s is not deadlocked", d.name) } time.Sleep(d.maxLockPeriod / 2) } }
func TestE2eNode(t *testing.T) { if *runServicesMode { // If run-services-mode is specified, only run services in current process. services.RunE2EServices() return } if *systemValidateMode { // If system-validate-mode is specified, only run system validation in current process. if err := system.Validate(); err != nil { glog.Exitf("system validation failed: %v", err) } return } // If run-services-mode is not specified, run test. rand.Seed(time.Now().UTC().UnixNano()) RegisterFailHandler(Fail) reporters := []Reporter{} reportDir := framework.TestContext.ReportDir if reportDir != "" { // Create the directory if it doesn't already exists if err := os.MkdirAll(reportDir, 0755); err != nil { glog.Errorf("Failed creating report directory: %v", err) } else { // Configure a junit reporter to write to the directory junitFile := fmt.Sprintf("junit_%s%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode) junitPath := path.Join(reportDir, junitFile) reporters = append(reporters, more_reporters.NewJUnitReporter(junitPath)) } } RunSpecsWithDefaultAndCustomReporters(t, "E2eNode Suite", reporters) }
// The main function sets up the connection to the storage backend for // aggregated events (e.g. MongoDB) and fires up an HTTPs server which acts as // an endpoint for docker notifications. func main() { flag.Parse() rand.Seed(time.Now().UnixNano()) glog.CopyStandardLogTo("INFO") // Create our application context ctx, _ := NewAppContext() // Load config file given by first argument configFilePath := flag.Arg(0) if configFilePath == "" { glog.Exit("Config file not specified") } c, err := LoadConfig(configFilePath) if err != nil { glog.Exit(err) } ctx.Config = c // Connect to MongoDB session, err := createMongoDbSession(c) if err != nil { glog.Exit(err) } defer session.Close() ctx.Session = session // Wait for errors on inserts and updates and for flushing changes to disk session.SetSafe(&mgo.Safe{FSync: true}) collection := ctx.Session.DB(ctx.Config.DialInfo.DialInfo.Database).C(ctx.Config.Collection) // The repository structure shall have a uniqe key on the repository's // name field index := mgo.Index{ Key: []string{"repositoryname"}, Unique: true, DropDups: true, Background: true, Sparse: true, } if err = collection.EnsureIndex(index); err != nil { glog.Exitf("It looks like your mongo database is incosinstent. ", "Make sure you have no duplicate entries for repository names.") } // Setup HTTP endpoint var httpConnectionString = ctx.Config.GetEndpointConnectionString() glog.Infof("About to listen on \"%s%s\".", httpConnectionString, ctx.Config.Server.Route) mux := http.NewServeMux() appHandler := &appHandler{ctx: ctx} mux.Handle(ctx.Config.Server.Route, appHandler) err = http.ListenAndServeTLS(httpConnectionString, ctx.Config.Server.Ssl.Cert, ctx.Config.Server.Ssl.CertKey, mux) if err != nil { glog.Exit(err) } glog.Info("Exiting.") }
func TestE2eNode(t *testing.T) { if *runServicesMode { // If run-services-mode is specified, only run services in current process. services.RunE2EServices() return } if *runKubeletMode { // If run-kubelet-mode is specified, only start kubelet. services.RunKubelet() return } if *systemValidateMode { // If system-validate-mode is specified, only run system validation in current process. if framework.TestContext.NodeConformance { // Chroot to /rootfs to make system validation can check system // as in the root filesystem. // TODO(random-liu): Consider to chroot the whole test process to make writing // test easier. if err := syscall.Chroot(rootfs); err != nil { glog.Exitf("chroot %q failed: %v", rootfs, err) } } if err := system.ValidateDefault(); err != nil { glog.Exitf("system validation failed: %v", err) } return } // If run-services-mode is not specified, run test. rand.Seed(time.Now().UTC().UnixNano()) RegisterFailHandler(Fail) reporters := []Reporter{} reportDir := framework.TestContext.ReportDir if reportDir != "" { // Create the directory if it doesn't already exists if err := os.MkdirAll(reportDir, 0755); err != nil { glog.Errorf("Failed creating report directory: %v", err) } else { // Configure a junit reporter to write to the directory junitFile := fmt.Sprintf("junit_%s%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode) junitPath := path.Join(reportDir, junitFile) reporters = append(reporters, morereporters.NewJUnitReporter(junitPath)) } } RunSpecsWithDefaultAndCustomReporters(t, "E2eNode Suite", reporters) }
func init() { cmd := &cobra.Command{ Use: "cluster", Short: "Create cluster", Long: `Creates a k8s cluster.`, Run: func(cmd *cobra.Command, args []string) { err := createCluster.Run() if err != nil { glog.Exitf("%v", err) } }, } createCmd.AddCommand(cmd) executableLocation, err := exec.LookPath(os.Args[0]) if err != nil { glog.Fatalf("Cannot determine location of kops tool: %q. Please report this problem!", os.Args[0]) } modelsBaseDirDefault := path.Join(path.Dir(executableLocation), "models") cmd.Flags().BoolVar(&createCluster.DryRun, "dryrun", false, "Don't create cloud resources; just show what would be done") cmd.Flags().StringVar(&createCluster.Target, "target", "direct", "Target - direct, terraform") //configFile := cmd.Flags().StringVar(&createCluster., "conf", "", "Configuration file to load") cmd.Flags().StringVar(&createCluster.ModelsBaseDir, "modeldir", modelsBaseDirDefault, "Source directory where models are stored") cmd.Flags().StringVar(&createCluster.Models, "model", "config,proto,cloudup", "Models to apply (separate multiple models with commas)") cmd.Flags().StringVar(&createCluster.NodeModel, "nodemodel", "nodeup", "Model to use for node configuration") //defaultStateStore := os.Getenv("KOPS_STATE_STORE") //cmd.Flags().StringVar(&createCluster.StateStore, "state", defaultStateStore, "Location to use to store configuration state") cmd.Flags().StringVar(&createCluster.Cloud, "cloud", "", "Cloud provider to use - gce, aws") cmd.Flags().StringVar(&createCluster.Zones, "zones", "", "Zones in which to run the cluster") cmd.Flags().StringVar(&createCluster.MasterZones, "master-zones", "", "Zones in which to run masters (must be an odd number)") cmd.Flags().StringVar(&createCluster.Project, "project", "", "Project to use (must be set on GCE)") //cmd.Flags().StringVar(&createCluster.Name, "name", "", "Name for cluster") cmd.Flags().StringVar(&createCluster.KubernetesVersion, "kubernetes-version", "", "Version of kubernetes to run (defaults to latest)") cmd.Flags().StringVar(&createCluster.SSHPublicKey, "ssh-public-key", "~/.ssh/id_rsa.pub", "SSH public key to use") cmd.Flags().StringVar(&createCluster.NodeSize, "node-size", "", "Set instance size for nodes") cmd.Flags().StringVar(&createCluster.MasterSize, "master-size", "", "Set instance size for masters") cmd.Flags().StringVar(&createCluster.VPCID, "vpc", "", "Set to use a shared VPC") cmd.Flags().StringVar(&createCluster.NetworkCIDR, "network-cidr", "", "Set to override the default network CIDR") cmd.Flags().IntVar(&createCluster.NodeCount, "node-count", 0, "Set the number of nodes") cmd.Flags().StringVar(&createCluster.Image, "image", "", "Image to use") cmd.Flags().StringVar(&createCluster.DNSZone, "dns-zone", "", "DNS hosted zone to use (defaults to last two components of cluster name)") cmd.Flags().StringVar(&createCluster.OutDir, "out", "", "Path to write any local output") }
// parseInt parses an integer number from a string. It logs a fatal // error if the string cannot be converted to a valid integer. func parseInt(s string) int { i, err := strconv.ParseInt(s, 10, 32) if err != nil { log.Exitf("Cannot convert %v to int: %v", s, err) } return int(i) }
// parseFloat parses a floating point number from a string. It logs a fatal // error if the string cannot be converted to a valid floating point number. func parseFloat(s string) float64 { f, err := strconv.ParseFloat(s, 64) if err != nil { log.Exitf("Cannot convert %v to float64: %v", s, err) } return f }
func parseIP(address string) net.IP { addr, err := net.LookupIP(address) if err != nil { log.Exit(err) } if len(addr) < 1 { log.Exitf("failed to parse IP from address '%v'", address) } return addr[0] }
// cfgOpt returns the configuration option from the specified section. If the // option does not exist an empty string is returned. func cfgOpt(cfg *conf.ConfigFile, section, option string) string { if !cfg.HasOption(section, option) { return "" } s, err := cfg.GetString(section, option) if err != nil { log.Exitf("Failed to get %s for %s: %v", option, section, err) } return s }
func main() { flag.Parse() rand.Seed(time.Now().UnixNano()) glog.CopyStandardLogTo("INFO") cf := flag.Arg(0) if cf == "" { glog.Exitf("Config file not specified") } c, err := server.LoadConfig(cf) if err != nil { glog.Exitf("Failed to load config: %s", err) } rs := RestartableServer{ configFile: cf, hd: &httpdown.HTTP{}, } rs.Serve(c) }
func init() { cmd := addonsGetCmd.cobraCommand addonsCmd.cobraCommand.AddCommand(cmd) cmd.Run = func(cmd *cobra.Command, args []string) { err := addonsGetCmd.Run() if err != nil { glog.Exitf("%v", err) } } }
func main() { flag.Parse() if *progs == "" { glog.Exitf("No mtail program directory specified; use -progs") } if *logs == "" && *logFds == "" { glog.Exitf("No logs specified to tail; use -logs or -logfds") } var logPathnames []string for _, pathname := range strings.Split(*logs, ",") { if pathname != "" { logPathnames = append(logPathnames, pathname) } } var logDescriptors []int for _, fdStr := range strings.Split(*logFds, ",") { fdNum, err := strconv.Atoi(fdStr) if err == nil { logDescriptors = append(logDescriptors, fdNum) } } if len(logPathnames) == 0 && len(logDescriptors) == 0 { glog.Exit("No logs to tail.") } o := mtail.Options{ Progs: *progs, LogPaths: logPathnames, LogFds: logDescriptors, Port: *port, OneShot: *oneShot, OneShotMetrics: *oneShotMetrics, CompileOnly: *compileOnly, DumpBytecode: *dumpBytecode, SyslogUseCurrentYear: *syslogUseCurrentYear, } m, err := mtail.New(o) if err != nil { glog.Fatalf("couldn't start: %s", err) } m.Run() }
func (realExiter) Exitf(format string, args ...interface{}) { func() { defer func() { // Let's just be extra sure we die, even if Exitf panics if r := recover(); r != nil { glog.Errorf(format, args...) os.Exit(2) } }() glog.Exitf(format, args...) }() }
func main() { flagModel := "model" flag.StringVar(&flagModel, "model", flagModel, "directory to use as model for desired configuration") var flagConf string flag.StringVar(&flagConf, "conf", "node.yaml", "configuration location") var flagAssetDir string flag.StringVar(&flagAssetDir, "assets", "/var/cache/nodeup", "the location for the local asset cache") var flagRootFS string flag.StringVar(&flagRootFS, "rootfs", "/", "the location of the machine root (for running in a container)") dryrun := false flag.BoolVar(&dryrun, "dryrun", false, "Don't create cloud resources; just show what would be done") target := "direct" flag.StringVar(&target, "target", target, "Target - direct, cloudinit") if dryrun { target = "dryrun" } flag.Set("logtostderr", "true") flag.Parse() if flagConf == "" { glog.Exitf("--conf is required") } cmd := &nodeup.NodeUpCommand{ ConfigLocation: flagConf, ModelDir: flagModel, Target: target, AssetDir: flagAssetDir, FSRoot: flagRootFS, } err := cmd.Run(os.Stdout) if err != nil { glog.Exitf("error running nodeup: %v", err) os.Exit(1) } fmt.Printf("success") }
func main() { flag.Parse() if *server == "" || *resource == "" { log.Exit("both --server and --resource must be specified") } if *clientID == "" { log.Exit("--client_id must be set") } var opts []grpc.DialOption if len(*caFile) != 0 { var creds credentials.TransportAuthenticator var err error creds, err = credentials.NewClientTLSFromFile(*caFile, "") if err != nil { log.Exitf("Failed to create TLS credentials %v", err) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { opts = append(opts, grpc.WithInsecure()) } client, err := doorman.NewWithID(*server, *clientID, doorman.DialOpts(opts...)) if err != nil { log.Exitf("could not create client: %v", err) } defer client.Close() resource, err := client.Resource(*resource, *wants) if err != nil { log.Exitf("could not acquire resource: %v", err) } fmt.Println(<-resource.Capacity()) }
func main() { flag.Parse() domain, err := tao.LoadDomain(*configPath, []byte(*domainPass)) if err != nil { glog.Exitf("Couldn't load the config path %s: %s\n", *configPath, err) return } // Set up temporary keys for the connection, since the only thing that // matters to the remote client is that they receive a correctly-signed new // attestation from the policy key. keys, err := tao.NewTemporaryKeys(tao.Signing) if err != nil { glog.Exit("Couldn't set up temporary keys for the connection:", err) return } keys.Cert, err = keys.SigningKey.CreateSelfSignedX509(&pkix.Name{ Organization: []string{"Google Tao Demo"}}) if err != nil { glog.Exit("Couldn't set up a self-signed cert:", err) return } sock, err := net.Listen(*network, *addr) if err != nil { glog.Exit("Couldn't bind socket to address:", err) return } fmt.Println("tcca: accepting connections") for { conn, err := sock.Accept() if err != nil { glog.Exitf("Couldn't accept a connection on %s: %s", *addr, err) return } go tao.HandleCARequest(conn, domain.Keys.SigningKey, domain.Guard) } }
func ServeOnce(c *server.Config, cf string, hd *httpdown.HTTP) (*server.AuthServer, httpdown.Server) { glog.Infof("Config from %s (%d users, %d ACL entries)", cf, len(c.Users), len(c.ACL)) as, err := server.NewAuthServer(c) if err != nil { glog.Exitf("Failed to create auth server: %s", err) } var tlsConfig *tls.Config if c.Server.CertFile != "" || c.Server.KeyFile != "" { // Check for partial configuration. if c.Server.CertFile == "" || c.Server.KeyFile == "" { glog.Exitf("Failed to load certificate and key: both were not provided") } tlsConfig = &tls.Config{ NextProtos: []string{"http/1.1"}, Certificates: make([]tls.Certificate, 1), } glog.Infof("Cert file: %s", c.Server.CertFile) glog.Infof("Key file : %s", c.Server.KeyFile) tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(c.Server.CertFile, c.Server.KeyFile) if err != nil { glog.Exitf("Failed to load certificate and key: %s", err) } } else { glog.Warning("Running without TLS") } hs := &http.Server{ Addr: c.Server.ListenAddress, Handler: as, TLSConfig: tlsConfig, } s, err := hd.ListenAndServe(hs) if err != nil { glog.Exitf("Failed to set up listener: %s", err) } glog.Infof("Serving") return as, s }
func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%v", *port)) if err != nil { log.Exitf("failed to listen: %v", err) } http.Handle("/metrics", prometheus.Handler()) go http.ListenAndServe(fmt.Sprintf(":%v", *debugPort), nil) s := grpc.NewServer() pb.RegisterGreeterServer(s, &server{}) s.Serve(lis) }
func main() { flag.Parse() g = gocui.NewGui() if err := g.Init(); err != nil { log.Exitf("gocui init error: %v", err) } defer g.Close() g.SelBgColor = gocui.ColorGreen g.SelFgColor = gocui.ColorBlack g.ShowCursor = true g.SetLayout(layout) g.MainLoop() }
func createHost(api libmachine.API, config MachineConfig) (*host.Host, error) { var driver interface{} if config.ShouldCacheMinikubeISO() { if err := config.CacheMinikubeISOFromURL(); err != nil { return nil, errors.Wrap(err, "Error attempting to cache minikube iso from url") } } switch config.VMDriver { case "virtualbox": driver = createVirtualboxHost(config) case "vmwarefusion": driver = createVMwareFusionHost(config) case "kvm": driver = createKVMHost(config) case "xhyve": driver = createXhyveHost(config) case "hyperv": driver = createHypervHost(config) default: glog.Exitf("Unsupported driver: %s\n", config.VMDriver) } data, err := json.Marshal(driver) if err != nil { return nil, errors.Wrap(err, "Error marshalling json") } h, err := api.NewHost(config.VMDriver, data) if err != nil { return nil, errors.Wrap(err, "Error creating new host: %s") } h.HostOptions.AuthOptions.CertDir = constants.Minipath h.HostOptions.AuthOptions.StorePath = constants.Minipath h.HostOptions.EngineOptions = engineOptions(config) if err := api.Create(h); err != nil { // Wait for all the logs to reach the client time.Sleep(2 * time.Second) return nil, errors.Wrap(err, "Error creating host") } if err := api.Save(h); err != nil { return nil, errors.Wrap(err, "Error attempting to save") } return h, nil }
// No polling: run once and exit. func runOnce() { if success, newProfiles := loadNewProfiles(); !success { if len(newProfiles) > 0 { glog.Exitf("Not all profiles were successfully loaded. Loaded: %v", newProfiles) } else { glog.Exit("Error loading profiles.") } } else { if len(newProfiles) > 0 { glog.Infof("Successfully loaded profiles: %v", newProfiles) } else { glog.Warning("No new profiles found.") } } }
func init() { cmd := rollingupdateCluster.cobraCommand rollingUpdateCommand.cobraCommand.AddCommand(cmd) cmd.Flags().BoolVar(&rollingupdateCluster.Yes, "yes", false, "Rollingupdate without confirmation") cmd.Flags().StringVar(&rollingupdateCluster.Region, "region", "", "region") cmd.Run = func(cmd *cobra.Command, args []string) { err := rollingupdateCluster.Run() if err != nil { glog.Exitf("%v", err) } } }
func RegisterCreateCommand() *cobra.Command { cmd := &cobra.Command{ Use: "cluster", Short: "create cluster", Run: func(cmd *cobra.Command, args []string) { err := CreateCluster(args) if err != nil { glog.Exitf("%s\n", err.Error()) } }, } cmd.Flags().IntVarP(×, "times", "n", 1, "times to echo") return cmd }
func init() { cmd := &cobra.Command{ Use: "get", Short: "Get secrets", Long: `Get secrets.`, Run: func(cmd *cobra.Command, args []string) { err := getSecretsCommand.Run() if err != nil { glog.Exitf("%v", err) } }, } secretsCmd.AddCommand(cmd) }
func (ha HashAlgorithm) NewHasher() hash.Hash { switch ha { case HashAlgorithmMD5: return md5.New() case HashAlgorithmSHA1: return sha1.New() case HashAlgorithmSHA256: return sha256.New() } glog.Exitf("Unknown hash algorithm: %v", ha) return nil }