Esempio n. 1
0
func main() {
	flag.Usage = func() {
		fmt.Println("Usage: quilt " +
			"[-log-level=<level> | -l=<level>] [-H=<listen_address>] " +
			"[log-file=<log_output_file>] " +
			"[daemon | inspect <stitch> | run <stitch> | minion | " +
			"stop <namespace> | get <import_path> | " +
			"machines | containers | ssh <machine> | " +
			"exec <container> <command> | " +
			"logs <container>]")
		fmt.Println("\nWhen provided a stitch, quilt takes responsibility\n" +
			"for deploying it as specified.  Alternatively, quilt may be\n" +
			"instructed to stop all deployments in a given namespace,\n" +
			"or the default namespace if none is provided.\n")
		flag.PrintDefaults()
		fmt.Println("        Valid logger levels are:\n" +
			"            debug, info, warn, error, fatal or panic.")
	}

	var logOut = flag.String("log-file", "", "log output file (will be overwritten)")
	var logLevel = flag.String("log-level", "info", "level to set logger to")
	flag.StringVar(logLevel, "l", "info", "level to set logger to")
	flag.Parse()

	level, err := parseLogLevel(*logLevel)
	if err != nil {
		fmt.Println(err)
		usage()
	}
	log.SetLevel(level)
	log.SetFormatter(util.Formatter{})

	if *logOut != "" {
		file, err := os.Create(*logOut)
		if err != nil {
			fmt.Printf("Failed to create file %s\n", *logOut)
			os.Exit(1)
		}
		defer file.Close()
		log.SetOutput(file)
	}

	// GRPC spews a lot of useless log messages so we tell to eat its logs, unless
	// we are in debug mode
	grpclog.SetLogger(l_mod.New(ioutil.Discard, "", 0))
	if level == log.DebugLevel {
		grpclog.SetLogger(log.StandardLogger())
	}

	if len(flag.Args()) == 0 {
		usage()
	}

	subcommand := flag.Arg(0)
	if quiltctl.HasSubcommand(subcommand) {
		quiltctl.Run(subcommand, flag.Args()[1:])
	} else {
		usage()
	}
}
Esempio n. 2
0
// getClient returns a connection to the Suite containerd
func (cs *ContainerdSuite) getClient(socket string) error {
	// Parse proto://address form addresses.
	bindParts := strings.SplitN(socket, "://", 2)
	if len(bindParts) != 2 {
		return fmt.Errorf("bad bind address format %s, expected proto://address", socket)
	}

	// reset the logger for grpc to log to dev/null so that it does not mess with our stdio
	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
	dialOpts := []grpc.DialOption{grpc.WithInsecure()}
	dialOpts = append(dialOpts,
		grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
			return net.DialTimeout(bindParts[0], bindParts[1], timeout)
		}),
		grpc.WithBlock(),
		grpc.WithTimeout(5*time.Second),
	)
	conn, err := grpc.Dial(socket, dialOpts...)
	if err != nil {
		return err
	}
	healthClient := grpc_health_v1.NewHealthClient(conn)
	if _, err := healthClient.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{}); err != nil {
		return err
	}
	cs.grpcClient = types.NewAPIClient(conn)

	return nil
}
Esempio n. 3
0
// RedirectGrpclog will redirect grpclog to lion.
func RedirectGrpclog() {
	lion.AddGlobalHook(
		func(l lion.Logger) {
			grpclog.SetLogger(NewLogger(l))
		},
	)
}
Esempio n. 4
0
func init() {
	// use go's standard logger by default like grpc
	logger.mu.Lock()
	logger.l = log.New(os.Stderr, "", log.LstdFlags)
	grpclog.SetLogger(&logger)
	logger.mu.Unlock()
}
Esempio n. 5
0
// StatsAndLogging constructs a Statter and an AuditLogger based on its config
// parameters, and return them both. Crashes if any setup fails.
// Also sets the constructed AuditLogger as the default logger, and configures
// the cfssl, mysql, and grpc packages to use our logger.
// This must be called before any gRPC code is called, because gRPC's SetLogger
// doesn't use any locking.
func StatsAndLogging(statConf StatsdConfig, logConf SyslogConfig) (metrics.Statter, blog.Logger) {
	stats, err := metrics.NewStatter(statConf.Server, statConf.Prefix)
	FailOnError(err, "Couldn't connect to statsd")

	tag := path.Base(os.Args[0])
	syslogger, err := syslog.Dial(
		"",
		"",
		syslog.LOG_INFO, // default, not actually used
		tag)
	FailOnError(err, "Could not connect to Syslog")
	syslogLevel := int(syslog.LOG_INFO)
	if logConf.SyslogLevel != 0 {
		syslogLevel = logConf.SyslogLevel
	}
	logger, err := blog.New(syslogger, logConf.StdoutLevel, syslogLevel)
	FailOnError(err, "Could not connect to Syslog")

	_ = blog.Set(logger)
	cfsslLog.SetLogger(cfsslLogger{logger})
	_ = mysql.SetLogger(mysqlLogger{logger})
	grpclog.SetLogger(grpcLogger{logger})

	return stats, logger
}
Esempio n. 6
0
func newRootCmd(out io.Writer) *cobra.Command {
	cmd := &cobra.Command{
		Use:          "helm",
		Short:        "The Helm package manager for Kubernetes.",
		Long:         globalUsage,
		SilenceUsage: true,
		PersistentPostRun: func(cmd *cobra.Command, args []string) {
			teardown()
		},
	}
	p := cmd.PersistentFlags()
	p.StringVar(&helmHome, "home", defaultHelmHome(), "location of your Helm config. Overrides $HELM_HOME")
	p.StringVar(&tillerHost, "host", defaultHelmHost(), "address of tiller. Overrides $HELM_HOST")
	p.StringVar(&kubeContext, "kube-context", "", "name of the kubeconfig context to use")
	p.BoolVar(&flagDebug, "debug", false, "enable verbose output")
	p.StringVar(&tillerNamespace, "tiller-namespace", defaultTillerNamespace(), "namespace of tiller")

	// Tell gRPC not to log to console.
	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))

	rup := newRepoUpdateCmd(out)
	rup.Deprecated = "use 'helm repo update'\n"

	cmd.AddCommand(
		newCreateCmd(out),
		newDeleteCmd(nil, out),
		newDependencyCmd(out),
		newFetchCmd(out),
		newGetCmd(nil, out),
		newHomeCmd(out),
		newHistoryCmd(nil, out),
		newInitCmd(out),
		newInspectCmd(nil, out),
		newInstallCmd(nil, out),
		newLintCmd(out),
		newListCmd(nil, out),
		newPackageCmd(nil, out),
		newRepoCmd(out),
		newRollbackCmd(nil, out),
		newSearchCmd(out),
		newServeCmd(out),
		newStatusCmd(nil, out),
		newUpgradeCmd(nil, out),
		newVerifyCmd(out),
		newVersionCmd(nil, out),
		newCompletionCmd(out, cmd),

		// Hidden documentation generator command: 'helm docs'
		newDocsCmd(out, cmd),

		// Deprecated
		rup,
	)

	// Find and add plugins
	loadPlugins(cmd, helmpath.Home(homePath()), out)

	return cmd
}
Esempio n. 7
0
func init() {
	var l = &log.Logger{
		Out:       os.Stdout,
		Formatter: &log.TextFormatter{FullTimestamp: true},
		Hooks:     make(log.LevelHooks),
		Level:     log.GetLevel(),
	}
	grpclog.SetLogger(l)
}
Esempio n. 8
0
func TestMain(m *testing.M) {
	tc = cautils.NewTestCA(nil)

	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
	logrus.SetOutput(ioutil.Discard)

	res := m.Run()
	tc.Stop()
	os.Exit(res)
}
Esempio n. 9
0
// New creates a fresh instance of libcontainerd remote.
func New(stateDir string, options ...RemoteOption) (_ Remote, err error) {
	defer func() {
		if err != nil {
			err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specificed the correct address. Got error: %v", err)
		}
	}()
	r := &remote{
		stateDir:    stateDir,
		daemonPid:   -1,
		eventTsPath: filepath.Join(stateDir, eventTimestampFilename),
		pastEvents:  make(map[string]*containerd.Event),
	}
	for _, option := range options {
		if err := option.Apply(r); err != nil {
			return nil, err
		}
	}

	if err := sysinfo.MkdirAll(stateDir, 0700); err != nil {
		return nil, err
	}

	if r.rpcAddr == "" {
		r.rpcAddr = filepath.Join(stateDir, containerdSockFilename)
	}

	if r.startDaemon {
		if err := r.runContainerdDaemon(); err != nil {
			return nil, err
		}
	}

	// don't output the grpc reconnect logging
	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
	dialOpts := append([]grpc.DialOption{grpc.WithInsecure()},
		grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
			return net.DialTimeout("unix", addr, timeout)
		}),
	)
	conn, err := grpc.Dial(r.rpcAddr, dialOpts...)
	if err != nil {
		return nil, fmt.Errorf("error connecting to containerd: %v", err)
	}

	r.rpcConn = conn
	r.apiClient = containerd.NewAPIClient(conn)

	go r.handleConnectionChange()

	if err := r.startEventsMonitor(); err != nil {
		return nil, err
	}

	return r, nil
}
Esempio n. 10
0
func main() {
	/* XXX: GRPC spews a lot of uselss log message so we tell to eat its logs.
	 * Once we have more sophistcated logging support, we should enable the log
	 * messages when in debug mode. */
	grpclog.SetLogger(l_mod.New(ioutil.Discard, "", 0))

	log.SetFormatter(util.Formatter{})

	flag.Usage = func() {
		fmt.Println("Usage: quilt [inspect <stitch> | run <stitch>" +
			" | stop <namespace> | get <import_path>]" +
			" [-log-level=<level> | -l=<level>]")
		fmt.Println("\nWhen provided a stitch, quilt takes responsibility\n" +
			"for deploying it as specified.  Alternatively, quilt may be\n" +
			"instructed to stop all deployments in a given namespace,\n" +
			"or the default namespace if none is provided.\n")
		flag.PrintDefaults()
		fmt.Println("        Valid logger levels are:\n" +
			"            debug, info, warn, error, fatal or panic.")
	}

	var logLevel = flag.String("log-level", "info", "level to set logger to")
	flag.StringVar(logLevel, "l", "info", "level to set logger to")
	flag.Parse()

	level, err := parseLogLevel(*logLevel)
	if err != nil {
		fmt.Println(err)
		usage()
	}
	log.SetLevel(level)

	conn := db.New()
	if len(flag.Args()) != 2 {
		usage()
	}

	switch flag.Arg(0) {
	case "run":
		go configLoop(conn, flag.Arg(1))
	case "stop":
		stop(conn, flag.Arg(1))
	case "get":
		getSpec(flag.Arg(1))
	case "inspect":
		inspect.Main(flag.Args())
		return
	default:
		usage()
	}

	cluster.Run(conn)
}
Esempio n. 11
0
func newClient(cfg *Config) (*Client, error) {
	if cfg == nil {
		cfg = &Config{RetryDialer: dialEndpointList}
	}
	var creds *credentials.TransportAuthenticator
	if cfg.TLS != nil {
		c := credentials.NewTLS(cfg.TLS)
		creds = &c
	}
	// use a temporary skeleton client to bootstrap first connection
	ctx, cancel := context.WithCancel(context.TODO())
	conn, err := cfg.RetryDialer(&Client{cfg: *cfg, creds: creds, ctx: ctx})
	if err != nil {
		return nil, err
	}
	client := &Client{
		conn:   conn,
		cfg:    *cfg,
		creds:  creds,
		ctx:    ctx,
		cancel: cancel,
	}
	client.Cluster = NewCluster(client)
	client.KV = NewKV(client)
	client.Lease = NewLease(client)
	client.Watcher = NewWatcher(client)
	client.Auth = NewAuth(client)
	client.Maintenance = &maintenance{c: client}
	if cfg.Logger == nil {
		client.logger = log.New(ioutil.Discard, "", 0)
		// disable client side grpc by default
		grpclog.SetLogger(log.New(ioutil.Discard, "", 0))
	} else {
		client.logger = cfg.Logger
		grpclog.SetLogger(cfg.Logger)
	}

	return client, nil
}
Esempio n. 12
0
func init() {
	// disable client side logs by default
	logger.mu.Lock()
	logger.l = log.New(ioutil.Discard, "", 0)

	// logger has to override the grpclog at initialization so that
	// any changes to the grpclog go through logger with locking
	// instead of through SetLogger
	//
	// now updates only happen through settableLogger.set
	grpclog.SetLogger(&logger)
	logger.mu.Unlock()
}
Esempio n. 13
0
func TestMain(m *testing.M) {
	tc = cautils.NewTestCA(nil)

	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
	logrus.SetOutput(ioutil.Discard)

	// Set a smaller segment size so we don't incur cost preallocating
	// space on old filesystems like HFS+.
	wal.SegmentSizeBytes = 64 * 1024

	res := m.Run()
	tc.Stop()
	os.Exit(res)
}
Esempio n. 14
0
// NewGrpcTransport creates a new transport
// FIXME: add TLS and configurable timeout
func NewGrpcTransport(handler TransportCallback, listenURL string) (Transport, error) {
	// init a grpc transport object
	trans := GrpcTransport{}
	trans.clients = make(map[string]paxospb.PaxosClient)
	trans.handler = handler
	trans.listenURL = listenURL

	// start the grpc server
	trans.Start()

	// stop those annoying grpc Logs
	grpclog.SetLogger(&glogger{})

	return &trans, nil
}
Esempio n. 15
0
// TODO: parse flags and pass opts
func getClient(ctx *cli.Context) types.APIClient {
	// reset the logger for grpc to log to dev/null so that it does not mess with our stdio
	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
	dialOpts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(ctx.GlobalDuration("conn-timeout"))}
	dialOpts = append(dialOpts,
		grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
			return net.DialTimeout("unix", addr, timeout)
		},
		))
	conn, err := grpc.Dial(ctx.GlobalString("address"), dialOpts...)
	if err != nil {
		fatal(err.Error(), 1)
	}
	return types.NewAPIClient(conn)
}
Esempio n. 16
0
File: main.go Progetto: hyperhq/runv
func getClient(address string) types.APIClient {
	// reset the logger for grpc to log to dev/null so that it does not mess with our stdio
	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
	dialOpts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(5 * time.Second)}
	dialOpts = append(dialOpts,
		grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
			return net.DialTimeout("unix", addr, timeout)
		},
		))
	conn, err := grpc.Dial(address, dialOpts...)
	if err != nil {
		fmt.Printf("grpc.Dial error: %v", err)
		os.Exit(-1)
	}
	return types.NewAPIClient(conn)
}
Esempio n. 17
0
func (g *GrpcEndpoint) Start() {
	lis, err := net.Listen("tcp", g.hostport)
	if err != nil {
		logging.Fatalf("Cannot start server on port %v. Error %v", g.hostport, err)
		panic(fmt.Sprintf("Cannot start server on port %v. Error %v", g.hostport, err))
	}

	grpclog.SetLogger(logging.CurrentLogger())
	g.grpcServer = grpc.NewServer()
	// Each service should be registered
	pb.RegisterQuotaServiceServer(g.grpcServer, g)
	go g.grpcServer.Serve(lis)
	g.currentStatus = lifecycle.Started
	logging.Printf("Starting server on %v", g.hostport)
	logging.Printf("Server status: %v", g.currentStatus)
}
Esempio n. 18
0
// getClient returns a connection to the Suite containerd
func (cs *ContainerdSuite) getClient(socket string) error {
	// reset the logger for grpc to log to dev/null so that it does not mess with our stdio
	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
	dialOpts := []grpc.DialOption{grpc.WithInsecure()}
	dialOpts = append(dialOpts,
		grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
			return net.DialTimeout("unix", addr, timeout)
		},
		))
	conn, err := grpc.Dial(socket, dialOpts...)
	if err != nil {
		return err
	}
	cs.grpcClient = types.NewAPIClient(conn)

	return nil
}
Esempio n. 19
0
// Main Raft test method, we execute the
// tests sequentially to avoid any issue
// on bootstrap and raft cluster lifecycle
func TestRaft(t *testing.T) {
	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))

	testBootstrap(t)
	testLeader(t)
	testLeaderDown(t)
	testFollowerDown(t)
	testLogReplication(t)
	testLogReplicationWithoutLeader(t)
	testQuorumFailure(t)
	testLeaderLeave(t)
	testFollowerLeave(t)
	testPauseFollower(t)

	// TODO
	testSnapshot(t)
	testRecoverSnapshot(t)
}
Esempio n. 20
0
func newCliClient(address string) *cliClient {
	client := &cliClient{}

	dialer := func(addr string, timeout time.Duration) (net.Conn, error) {
		return net.DialTimeout("unix", addr, timeout)
	}

	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))

	conn, err := grpc.Dial(address, grpc.WithInsecure(), grpc.WithDialer(dialer), grpc.WithTimeout(1*time.Second))
	if err != nil {
		log.Fatalf("did not connect: %v\n", err)
	}

	client.conn = conn
	client.srv = pb.NewInstanceMetaServiceClient(conn)

	return client
}
Esempio n. 21
0
func (cache *rpcCache) connect(config *core.Configuration) {
	// Change grpc to log using our implementation
	grpclog.SetLogger(&grpcLogMabob{})
	log.Info("Connecting to RPC cache at %s", config.Cache.RpcUrl)
	opts := []grpc.DialOption{grpc.WithTimeout(cache.timeout)}
	if config.Cache.RpcPublicKey != "" || config.Cache.RpcCACert != "" || config.Cache.RpcSecure {
		auth, err := loadAuth(config.Cache.RpcCACert, config.Cache.RpcPublicKey, config.Cache.RpcPrivateKey)
		if err != nil {
			log.Warning("Failed to load RPC cache auth keys: %s", err)
			return
		}
		opts = append(opts, auth)
	} else {
		opts = append(opts, grpc.WithInsecure())
	}
	connection, err := grpc.Dial(config.Cache.RpcUrl, opts...)
	if err != nil {
		cache.Connecting = false
		log.Warning("Failed to connect to RPC cache: %s", err)
		return
	}
	// Note that we have to actually send it a message here to validate the connection;
	// Dial() only seems to return errors for superficial failures like syntactically invalid addresses,
	// it will return essentially immediately even if the server doesn't exist.
	healthclient := healthpb.NewHealthClient(connection)
	ctx, cancel := context.WithTimeout(context.Background(), cache.timeout)
	defer cancel()
	resp, err := healthclient.Check(ctx, &healthpb.HealthCheckRequest{Service: "plz-rpc-cache"})
	if err != nil {
		cache.Connecting = false
		log.Warning("Failed to contact RPC cache: %s", err)
	} else if resp.Status != healthpb.HealthCheckResponse_SERVING {
		cache.Connecting = false
		log.Warning("RPC cache says it is not serving (%d)", resp.Status)
	} else {
		cache.connection = connection
		cache.client = pb.NewRpcCacheClient(connection)
		cache.Connected = true
		cache.Connecting = false
		log.Info("RPC cache connected after %0.2fs", time.Since(cache.startTime).Seconds())
	}
}
Esempio n. 22
0
// TODO: parse flags and pass opts
func getClient(ctx *cli.Context) types.APIClient {
	// Parse proto://address form addresses.
	bindSpec := ctx.GlobalString("address")
	bindParts := strings.SplitN(bindSpec, "://", 2)
	if len(bindParts) != 2 {
		fatal(fmt.Sprintf("bad bind address format %s, expected proto://address", bindSpec), 1)
	}

	// reset the logger for grpc to log to dev/null so that it does not mess with our stdio
	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
	dialOpts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(ctx.GlobalDuration("conn-timeout"))}
	dialOpts = append(dialOpts,
		grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
			return net.DialTimeout(bindParts[0], bindParts[1], timeout)
		},
		))
	conn, err := grpc.Dial(bindSpec, dialOpts...)
	if err != nil {
		fatal(err.Error(), 1)
	}
	return types.NewAPIClient(conn)
}
Esempio n. 23
0
func getGRPCConnection(context *cli.Context) (*grpc.ClientConn, error) {
	if grpcConn != nil {
		return grpcConn, nil
	}

	bindSocket := context.GlobalString("socket")
	// reset the logger for grpc to log to dev/null so that it does not mess with our stdio
	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
	dialOpts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(100 * time.Second)}
	dialOpts = append(dialOpts,
		grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
			return net.DialTimeout("unix", bindSocket, timeout)
		},
		))

	conn, err := grpc.Dial(fmt.Sprintf("unix://%s", bindSocket), dialOpts...)
	if err != nil {
		return nil, err
	}

	grpcConn = conn
	return grpcConn, nil
}
Esempio n. 24
0
func ServeGrpc(cfg *server.ServerConfig, srv *server.Server, grpcUrl string, certFile, keyFile string, tf repo.TransactionFactory) {
	var opts []grpc.ServerOption
	if certFile != "" && keyFile != "" {
		creds, err := credentials.NewServerTLSFromFile(certFile, keyFile)
		if err != nil {
			log.Fatalf("grpc.go: Failed to generate credentials %v", err)
		}
		opts = []grpc.ServerOption{grpc.Creds(creds)}
	}
	s := grpc.NewServer(opts...)

	rpcSrv := &grpcServer{
		server: srv,
		idp: &connector.LocalIdentityProvider{
			UserRepo:         srv.UserRepo,
			PasswordInfoRepo: srv.PasswordInfoRepo,
		},
		begin: tf,
	}

	for _, c := range srv.Connectors {
		if cc, ok := c.(*connector.LocalConnector); ok {
			rpcSrv.localConnectorID = cc.ID()
			break
		}
	}

	grpclog.SetLogger(golog.New(os.Stdout, "", 0))
	pb.RegisterDexServiceServer(s, rpcSrv)

	lis, err := net.Listen("tcp", grpcUrl)
	if err != nil {
		log.Fatalf("grpc.go: failed to listen: %v", err)
	}
	log.Infof("grpc: Grpc server starting on %s", grpcUrl)
	s.Serve(lis)
}
Esempio n. 25
0
func init() {
	grpclog.SetLogger(plog)
}
Esempio n. 26
0
func init() {
	grpclog.SetLogger(log.New(ioutil.Discard, "", 0))
}
Esempio n. 27
0
func init() {
	grpclog.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd/etcdserver", "v3rpc/grpc"))
}
Esempio n. 28
0
func init() {
	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
	logrus.SetOutput(ioutil.Discard)

	tc = cautils.NewTestCA(nil, cautils.AcceptancePolicy(true, true, ""))
}
Esempio n. 29
0
func (c *ServerCommand) Run(args []string) int {
	var dev, verifyOnly, devHA bool
	var configPath []string
	var logLevel, devRootTokenID, devListenAddress string
	flags := c.Meta.FlagSet("server", meta.FlagSetDefault)
	flags.BoolVar(&dev, "dev", false, "")
	flags.StringVar(&devRootTokenID, "dev-root-token-id", "", "")
	flags.StringVar(&devListenAddress, "dev-listen-address", "", "")
	flags.StringVar(&logLevel, "log-level", "info", "")
	flags.BoolVar(&verifyOnly, "verify-only", false, "")
	flags.BoolVar(&devHA, "dev-ha", false, "")
	flags.Usage = func() { c.Ui.Output(c.Help()) }
	flags.Var((*sliceflag.StringFlag)(&configPath), "config", "config")
	if err := flags.Parse(args); err != nil {
		return 1
	}

	// Create a logger. We wrap it in a gated writer so that it doesn't
	// start logging too early.
	logGate := &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)}
	var level int
	switch logLevel {
	case "trace":
		level = log.LevelTrace
	case "debug":
		level = log.LevelDebug
	case "info":
		level = log.LevelInfo
	case "notice":
		level = log.LevelNotice
	case "warn":
		level = log.LevelWarn
	case "err":
		level = log.LevelError
	default:
		c.Ui.Output(fmt.Sprintf("Unknown log level %s", logLevel))
		return 1
	}

	logFormat := os.Getenv("VAULT_LOG_FORMAT")
	if logFormat == "" {
		logFormat = os.Getenv("LOGXI_FORMAT")
	}
	switch strings.ToLower(logFormat) {
	case "vault", "vault_json", "vault-json", "vaultjson", "json", "":
		c.logger = logformat.NewVaultLoggerWithWriter(logGate, level)
	default:
		c.logger = log.NewLogger(logGate, "vault")
		c.logger.SetLevel(level)
	}
	grpclog.SetLogger(&grpclogFaker{
		logger: c.logger,
	})

	if os.Getenv("VAULT_DEV_ROOT_TOKEN_ID") != "" && devRootTokenID == "" {
		devRootTokenID = os.Getenv("VAULT_DEV_ROOT_TOKEN_ID")
	}

	if os.Getenv("VAULT_DEV_LISTEN_ADDRESS") != "" && devListenAddress == "" {
		devListenAddress = os.Getenv("VAULT_DEV_LISTEN_ADDRESS")
	}

	if devHA {
		dev = true
	}

	// Validation
	if !dev {
		switch {
		case len(configPath) == 0:
			c.Ui.Output("At least one config path must be specified with -config")
			flags.Usage()
			return 1
		case devRootTokenID != "":
			c.Ui.Output("Root token ID can only be specified with -dev")
			flags.Usage()
			return 1
		}
	}

	// Load the configuration
	var config *server.Config
	if dev {
		config = server.DevConfig(devHA)
		if devListenAddress != "" {
			config.Listeners[0].Config["address"] = devListenAddress
		}
	}
	for _, path := range configPath {
		current, err := server.LoadConfig(path, c.logger)
		if err != nil {
			c.Ui.Output(fmt.Sprintf(
				"Error loading configuration from %s: %s", path, err))
			return 1
		}

		if config == nil {
			config = current
		} else {
			config = config.Merge(current)
		}
	}

	// Ensure at least one config was found.
	if config == nil {
		c.Ui.Output("No configuration files found.")
		return 1
	}

	// Ensure that a backend is provided
	if config.Backend == nil {
		c.Ui.Output("A physical backend must be specified")
		return 1
	}

	// If mlockall(2) isn't supported, show a warning.  We disable this
	// in dev because it is quite scary to see when first using Vault.
	if !dev && !mlock.Supported() {
		c.Ui.Output("==> WARNING: mlock not supported on this system!\n")
		c.Ui.Output("  An `mlockall(2)`-like syscall to prevent memory from being")
		c.Ui.Output("  swapped to disk is not supported on this system. Running")
		c.Ui.Output("  Vault on an mlockall(2) enabled system is much more secure.\n")
	}

	if err := c.setupTelemetry(config); err != nil {
		c.Ui.Output(fmt.Sprintf("Error initializing telemetry: %s", err))
		return 1
	}

	// Initialize the backend
	backend, err := physical.NewBackend(
		config.Backend.Type, c.logger, config.Backend.Config)
	if err != nil {
		c.Ui.Output(fmt.Sprintf(
			"Error initializing backend of type %s: %s",
			config.Backend.Type, err))
		return 1
	}

	infoKeys := make([]string, 0, 10)
	info := make(map[string]string)

	var seal vault.Seal = &vault.DefaultSeal{}

	// Ensure that the seal finalizer is called, even if using verify-only
	defer func() {
		if seal != nil {
			err = seal.Finalize()
			if err != nil {
				c.Ui.Error(fmt.Sprintf("Error finalizing seals: %v", err))
			}
		}
	}()

	if seal == nil {
		c.Ui.Error(fmt.Sprintf("Could not create seal"))
		return 1
	}

	coreConfig := &vault.CoreConfig{
		Physical:           backend,
		RedirectAddr:       config.Backend.RedirectAddr,
		HAPhysical:         nil,
		Seal:               seal,
		AuditBackends:      c.AuditBackends,
		CredentialBackends: c.CredentialBackends,
		LogicalBackends:    c.LogicalBackends,
		Logger:             c.logger,
		DisableCache:       config.DisableCache,
		DisableMlock:       config.DisableMlock,
		MaxLeaseTTL:        config.MaxLeaseTTL,
		DefaultLeaseTTL:    config.DefaultLeaseTTL,
		ClusterName:        config.ClusterName,
		CacheSize:          config.CacheSize,
	}

	var disableClustering bool

	// Initialize the separate HA physical backend, if it exists
	var ok bool
	if config.HABackend != nil {
		habackend, err := physical.NewBackend(
			config.HABackend.Type, c.logger, config.HABackend.Config)
		if err != nil {
			c.Ui.Output(fmt.Sprintf(
				"Error initializing backend of type %s: %s",
				config.HABackend.Type, err))
			return 1
		}

		if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok {
			c.Ui.Output("Specified HA backend does not support HA")
			return 1
		}

		if !coreConfig.HAPhysical.HAEnabled() {
			c.Ui.Output("Specified HA backend has HA support disabled; please consult documentation")
			return 1
		}

		coreConfig.RedirectAddr = config.HABackend.RedirectAddr
		disableClustering = config.HABackend.DisableClustering
		if !disableClustering {
			coreConfig.ClusterAddr = config.HABackend.ClusterAddr
		}
	} else {
		if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok {
			coreConfig.RedirectAddr = config.Backend.RedirectAddr
			disableClustering = config.Backend.DisableClustering
			if !disableClustering {
				coreConfig.ClusterAddr = config.Backend.ClusterAddr
			}
		}
	}

	if envRA := os.Getenv("VAULT_REDIRECT_ADDR"); envRA != "" {
		coreConfig.RedirectAddr = envRA
	} else if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" {
		coreConfig.RedirectAddr = envAA
	}

	// Attempt to detect the redirect address, if possible
	var detect physical.RedirectDetect
	if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
		detect, ok = coreConfig.HAPhysical.(physical.RedirectDetect)
	} else {
		detect, ok = coreConfig.Physical.(physical.RedirectDetect)
	}
	if ok && coreConfig.RedirectAddr == "" {
		redirect, err := c.detectRedirect(detect, config)
		if err != nil {
			c.Ui.Output(fmt.Sprintf("Error detecting redirect address: %s", err))
		} else if redirect == "" {
			c.Ui.Output("Failed to detect redirect address.")
		} else {
			coreConfig.RedirectAddr = redirect
		}
	}

	// After the redirect bits are sorted out, if no cluster address was
	// explicitly given, derive one from the redirect addr
	if disableClustering {
		coreConfig.ClusterAddr = ""
	} else if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" {
		coreConfig.ClusterAddr = envCA
	} else if coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "" {
		u, err := url.ParseRequestURI(coreConfig.RedirectAddr)
		if err != nil {
			c.Ui.Output(fmt.Sprintf("Error parsing redirect address %s: %v", coreConfig.RedirectAddr, err))
			return 1
		}
		host, port, err := net.SplitHostPort(u.Host)
		nPort, nPortErr := strconv.Atoi(port)
		if err != nil {
			// assume it's due to there not being a port specified, in which case
			// use 443
			host = u.Host
			nPort = 443
		}
		if nPortErr != nil {
			c.Ui.Output(fmt.Sprintf("Cannot parse %s as a numeric port: %v", port, nPortErr))
			return 1
		}
		u.Host = net.JoinHostPort(host, strconv.Itoa(nPort+1))
		// Will always be TLS-secured
		u.Scheme = "https"
		coreConfig.ClusterAddr = u.String()
	}
	if coreConfig.ClusterAddr != "" {
		// Force https as we'll always be TLS-secured
		u, err := url.ParseRequestURI(coreConfig.ClusterAddr)
		if err != nil {
			c.Ui.Output(fmt.Sprintf("Error parsing cluster address %s: %v", coreConfig.RedirectAddr, err))
			return 1
		}
		u.Scheme = "https"
		coreConfig.ClusterAddr = u.String()
	}

	// Initialize the core
	core, newCoreError := vault.NewCore(coreConfig)
	if newCoreError != nil {
		if !errwrap.ContainsType(newCoreError, new(vault.NonFatalError)) {
			c.Ui.Output(fmt.Sprintf("Error initializing core: %s", newCoreError))
			return 1
		}
	}

	// Copy the reload funcs pointers back
	c.reloadFuncs = coreConfig.ReloadFuncs
	c.reloadFuncsLock = coreConfig.ReloadFuncsLock

	// Compile server information for output later
	info["backend"] = config.Backend.Type
	info["log level"] = logLevel
	info["mlock"] = fmt.Sprintf(
		"supported: %v, enabled: %v",
		mlock.Supported(), !config.DisableMlock && mlock.Supported())
	infoKeys = append(infoKeys, "log level", "mlock", "backend")

	if config.HABackend != nil {
		info["HA backend"] = config.HABackend.Type
		info["redirect address"] = coreConfig.RedirectAddr
		infoKeys = append(infoKeys, "HA backend", "redirect address")
		if coreConfig.ClusterAddr != "" {
			info["cluster address"] = coreConfig.ClusterAddr
			infoKeys = append(infoKeys, "cluster address")
		}
	} else {
		// If the backend supports HA, then note it
		if coreConfig.HAPhysical != nil {
			if coreConfig.HAPhysical.HAEnabled() {
				info["backend"] += " (HA available)"
				info["redirect address"] = coreConfig.RedirectAddr
				infoKeys = append(infoKeys, "redirect address")
				if coreConfig.ClusterAddr != "" {
					info["cluster address"] = coreConfig.ClusterAddr
					infoKeys = append(infoKeys, "cluster address")
				}
			} else {
				info["backend"] += " (HA disabled)"
			}
		}
	}

	clusterAddrs := []*net.TCPAddr{}

	// Initialize the listeners
	c.reloadFuncsLock.Lock()
	lns := make([]net.Listener, 0, len(config.Listeners))
	for i, lnConfig := range config.Listeners {
		if lnConfig.Type == "atlas" {
			if config.ClusterName == "" {
				c.Ui.Output("cluster_name is not set in the config and is a required value")
				return 1
			}

			lnConfig.Config["cluster_name"] = config.ClusterName
		}

		ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, logGate)
		if err != nil {
			c.Ui.Output(fmt.Sprintf(
				"Error initializing listener of type %s: %s",
				lnConfig.Type, err))
			return 1
		}

		lns = append(lns, ln)

		if reloadFunc != nil {
			relSlice := (*c.reloadFuncs)["listener|"+lnConfig.Type]
			relSlice = append(relSlice, reloadFunc)
			(*c.reloadFuncs)["listener|"+lnConfig.Type] = relSlice
		}

		if !disableClustering && lnConfig.Type == "tcp" {
			var addr string
			var ok bool
			if addr, ok = lnConfig.Config["cluster_address"]; ok {
				tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
				if err != nil {
					c.Ui.Output(fmt.Sprintf(
						"Error resolving cluster_address: %s",
						err))
					return 1
				}
				clusterAddrs = append(clusterAddrs, tcpAddr)
			} else {
				tcpAddr, ok := ln.Addr().(*net.TCPAddr)
				if !ok {
					c.Ui.Output("Failed to parse tcp listener")
					return 1
				}
				clusterAddrs = append(clusterAddrs, &net.TCPAddr{
					IP:   tcpAddr.IP,
					Port: tcpAddr.Port + 1,
				})
			}
			props["cluster address"] = addr
		}

		// Store the listener props for output later
		key := fmt.Sprintf("listener %d", i+1)
		propsList := make([]string, 0, len(props))
		for k, v := range props {
			propsList = append(propsList, fmt.Sprintf(
				"%s: %q", k, v))
		}
		sort.Strings(propsList)
		infoKeys = append(infoKeys, key)
		info[key] = fmt.Sprintf(
			"%s (%s)", lnConfig.Type, strings.Join(propsList, ", "))

	}
	c.reloadFuncsLock.Unlock()
	if !disableClustering {
		if c.logger.IsTrace() {
			c.logger.Trace("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs)
		}
	}

	// Make sure we close all listeners from this point on
	listenerCloseFunc := func() {
		for _, ln := range lns {
			ln.Close()
		}
	}

	defer c.cleanupGuard.Do(listenerCloseFunc)

	infoKeys = append(infoKeys, "version")
	verInfo := version.GetVersion()
	info["version"] = verInfo.FullVersionNumber(false)
	if verInfo.Revision != "" {
		info["version sha"] = strings.Trim(verInfo.Revision, "'")
		infoKeys = append(infoKeys, "version sha")
	}
	infoKeys = append(infoKeys, "cgo")
	info["cgo"] = "disabled"
	if version.CgoEnabled {
		info["cgo"] = "enabled"
	}

	// Server configuration output
	padding := 24
	sort.Strings(infoKeys)
	c.Ui.Output("==> Vault server configuration:\n")
	for _, k := range infoKeys {
		c.Ui.Output(fmt.Sprintf(
			"%s%s: %s",
			strings.Repeat(" ", padding-len(k)),
			strings.Title(k),
			info[k]))
	}
	c.Ui.Output("")

	if verifyOnly {
		return 0
	}

	// Perform service discovery registrations and initialization of
	// HTTP server after the verifyOnly check.

	// Instantiate the wait group
	c.WaitGroup = &sync.WaitGroup{}

	// If the backend supports service discovery, run service discovery
	if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
		sd, ok := coreConfig.HAPhysical.(physical.ServiceDiscovery)
		if ok {
			activeFunc := func() bool {
				if isLeader, _, err := core.Leader(); err == nil {
					return isLeader
				}
				return false
			}

			sealedFunc := func() bool {
				if sealed, err := core.Sealed(); err == nil {
					return sealed
				}
				return true
			}

			if err := sd.RunServiceDiscovery(c.WaitGroup, c.ShutdownCh, coreConfig.RedirectAddr, activeFunc, sealedFunc); err != nil {
				c.Ui.Output(fmt.Sprintf("Error initializing service discovery: %v", err))
				return 1
			}
		}
	}

	handler := vaulthttp.Handler(core)

	// This needs to happen before we first unseal, so before we trigger dev
	// mode if it's set
	core.SetClusterListenerAddrs(clusterAddrs)
	core.SetClusterSetupFuncs(vault.WrapHandlerForClustering(handler, c.logger))

	// If we're in dev mode, then initialize the core
	if dev {
		init, err := c.enableDev(core, devRootTokenID)
		if err != nil {
			c.Ui.Output(fmt.Sprintf(
				"Error initializing dev mode: %s", err))
			return 1
		}

		export := "export"
		quote := "'"
		if runtime.GOOS == "windows" {
			export = "set"
			quote = ""
		}

		c.Ui.Output(fmt.Sprintf(
			"==> WARNING: Dev mode is enabled!\n\n"+
				"In this mode, Vault is completely in-memory and unsealed.\n"+
				"Vault is configured to only have a single unseal key. The root\n"+
				"token has already been authenticated with the CLI, so you can\n"+
				"immediately begin using the Vault CLI.\n\n"+
				"The only step you need to take is to set the following\n"+
				"environment variables:\n\n"+
				"    "+export+" VAULT_ADDR="+quote+"http://"+config.Listeners[0].Config["address"]+quote+"\n\n"+
				"The unseal key and root token are reproduced below in case you\n"+
				"want to seal/unseal the Vault or play with authentication.\n\n"+
				"Unseal Key: %s\nRoot Token: %s\n",
			base64.StdEncoding.EncodeToString(init.SecretShares[0]),
			init.RootToken,
		))
	}

	// Initialize the HTTP server
	server := &http.Server{}
	server.Handler = handler
	for _, ln := range lns {
		go server.Serve(ln)
	}

	if newCoreError != nil {
		c.Ui.Output("==> Warning:\n\nNon-fatal error during initialization; check the logs for more information.")
		c.Ui.Output("")
	}

	// Output the header that the server has started
	c.Ui.Output("==> Vault server started! Log data will stream in below:\n")

	// Release the log gate.
	logGate.Flush()

	// Wait for shutdown
	shutdownTriggered := false

	for !shutdownTriggered {
		select {
		case <-c.ShutdownCh:
			c.Ui.Output("==> Vault shutdown triggered")

			// Stop the listners so that we don't process further client requests.
			c.cleanupGuard.Do(listenerCloseFunc)

			// Shutdown will wait until after Vault is sealed, which means the
			// request forwarding listeners will also be closed (and also
			// waited for).
			if err := core.Shutdown(); err != nil {
				c.Ui.Output(fmt.Sprintf("Error with core shutdown: %s", err))
			}

			shutdownTriggered = true

		case <-c.SighupCh:
			c.Ui.Output("==> Vault reload triggered")
			if err := c.Reload(configPath); err != nil {
				c.Ui.Output(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
			}
		}
	}

	// Wait for dependent goroutines to complete
	c.WaitGroup.Wait()
	return 0
}
Esempio n. 30
0
func init() {
	grpclog.SetLogger(&glogger{})
}