Exemple #1
0
func (suite *ServerSuite) TestWildcard() {

	writer := log.NewConcurrentWriter(os.Stdout)
	logger := log.NewLogger(writer, "sshh_test")

	r := router.New(logger, nil, nil)
	r.Register("/echo", &EchoHandler{log.New("echo")})
	r.Register("/bad", &BadHandler{})

	acceptErr := errors.New("accept error")
	ch := &sshmocks.MockNewChannel{
		TypeName:  "*",
		AcceptErr: acceptErr,
	}
	ch.On("ChannelType").Return("*")
	ch.On("Reject", ssh.UnknownChannelType, "*").Return(errors.New("unknown reason 1000"))

	conn := &sshmocks.MockConn{}
	conn.On("Close").Return(nil)
	serverConn := ssh.ServerConn{
		Conn: conn,
	}
	// Create dispatcher
	dispatcher := &UrlDispatcher{Logger: log.NullLog, Router: r}
	dispatcher.Dispatch(context.Background(), &serverConn, ch)

	// assert that the expectations were met
	ch.AssertCalled(suite.T(), "ChannelType")
	ch.AssertCalled(suite.T(), "Reject", ssh.UnknownChannelType, "*")
	conn.AssertCalled(suite.T(), "Close")
}
Exemple #2
0
// InitializeConfig reads the configuration file and sets up the application settings via Viper.
func InitializeConfig(writer io.Writer) error {
	logger := log.NewLogger(writer, "config")

	if err := InitializeMainConfig(logger); err != nil {
		logger.Warn("Failed to initialize kappa command line flags")
		return err
	}

	if err := InitializeServerConfig(logger); err != nil {
		logger.Warn("Failed to initialize server command line flags")
		return err
	}

	if err := InitializeCertAuthConfig(logger); err != nil {
		logger.Warn("Failed to initialize init-ca command line flags")
		return err
	}

	if err := InitializeNewCertConfig(logger); err != nil {
		logger.Warn("Failed to initialize new-cert command line flags")
		return err
	}

	return nil
}
Exemple #3
0
func (suite *ServerSuite) createConfig() Config {

	// Create logger
	writer := log.NewConcurrentWriter(os.Stdout)
	// writer := log.NewConcurrentWriter(ioutil.Discard)
	logger := log.NewLogger(writer, "sshh")
	// logger := log.DefaultLog

	// Get signer
	signer, err := ssh.ParsePrivateKey([]byte(serverKey))
	if err != nil {
		suite.Fail("Private key could not be parsed", err.Error())
	}

	// Create config
	cfg := Config{
		Deadline: time.Second,
		Handlers: map[string]SSHHandler{
			"echo": &EchoHandler{log.New("echo")},
			"bad":  &BadHandler{},
		},
		Logger:            logger,
		Bind:              ":9022",
		PrivateKey:        signer,
		PasswordCallback:  passwordCallback,
		PublicKeyCallback: publicKeyCallback,
	}
	return cfg
}
Exemple #4
0
func TestConfig(t *testing.T) {
	var authLogCalled bool
	var authLogCallback = func(conn ssh.ConnMetadata, method string, err error) {
		authLogCalled = true
	}

	// Create logger
	writer := log.NewConcurrentWriter(ioutil.Discard)
	logger := log.NewLogger(writer, "sshh")

	// Get signer
	signer, err := ssh.ParsePrivateKey([]byte(serverKey))
	if err != nil {
		t.Fatalf("Private key could not be parsed", err.Error())
	}

	r := router.New(logger, nil, nil)
	r.Register("/echo", &EchoHandler{log.New("echo")})

	cfg := Config{
		Deadline: time.Second,
		Dispatcher: &UrlDispatcher{
			Router: r,
			Logger: logger,
		},
		// Handlers: map[string]SSHHandler{
		// 	"echo": &EchoHandler{log.New("echo")},
		// },
		Logger:            logger,
		Bind:              ":9022",
		PrivateKey:        signer,
		AuthLogCallback:   authLogCallback,
		PasswordCallback:  passwordCallback,
		PublicKeyCallback: publicKeyCallback,
	}

	// Assertions
	assert.Equal(t, time.Second, cfg.Deadline, "Deadline should be 1s")
	assert.Equal(t, ":9022", cfg.Bind, "Bind should be :9022")

	// Create SSH config
	c := cfg.SSHConfig()
	assert.NotNil(t, c, "SSH config should not be nil")
	assert.Equal(t, passwordCallback, c.PasswordCallback, "PasswordCallback should use the one we passed in")
	assert.Equal(t, publicKeyCallback, c.PublicKeyCallback, "PublicKeyCallback should use the one we passed in")
	assert.Equal(t, authLogCallback, c.AuthLogCallback, "AuthLogCallback should use the one we passed in")

	// // Test Handlers
	// h, ok := cfg.Handler("echo")
	// assert.True(t, ok, "Echo handler should be registered")
	// assert.NotNil(t, h, "Echo handler should not be nil")

	// h, ok = cfg.Handler("shell")
	// assert.False(t, ok, "Shell handler should not be registered")
	// assert.Nil(t, h, "Shell handler should be nil")
}
Exemple #5
0
// New creates an implementation of the Yamuxer interface using the given
// context and config.
func New(context context.Context, c *Config) Yamuxer {
	return &yamuxer{
		grim:       grim.ReaperWithContext(context),
		listener:   c.Listener,
		dispatcher: c.Dispatcher,
		deadline:   c.Deadline,
		tlsConfig:  c.TLSConfig,
		logger:     log.NewLogger(c.LogOutput, "yamuxer"),
		logOutput:  c.LogOutput,
	}
}
Exemple #6
0
func (suite *ServerSuite) createConfig() Config {

	// Create logger
	writer := log.NewConcurrentWriter(os.Stdout)
	// writer := log.NewConcurrentWriter(ioutil.Discard)
	logger := log.NewLogger(writer, "sshh")
	// logger := log.DefaultLog

	// Get signer
	signer, err := ssh.ParsePrivateKey([]byte(serverKey))
	if err != nil {
		suite.Fail("Private key could not be parsed", err.Error())
	}

	r := router.New(logger, nil, nil)
	r.Register("/echo", &EchoHandler{log.New("echo")})
	r.Register("/bad", &BadHandler{})

	// Create config
	cfg := Config{
		Context:  context.Background(),
		Deadline: time.Second,
		Dispatcher: &UrlDispatcher{
			Router: r,
			Logger: logger,
		},
		// Handlers: map[string]SSHHandler{
		// 	"echo": &EchoHandler{log.New("echo")},
		// 	"bad":  &BadHandler{},
		// },
		Logger:            logger,
		Bind:              ":9022",
		PrivateKey:        signer,
		PasswordCallback:  passwordCallback,
		PublicKeyCallback: publicKeyCallback,
	}
	return cfg
}
Exemple #7
0
func (c *ServerCommand) Run(args []string) int {
	var dev, verifyOnly, devHA bool
	var configPath []string
	var logLevel, devRootTokenID, devListenAddress string
	flags := c.Meta.FlagSet("server", meta.FlagSetDefault)
	flags.BoolVar(&dev, "dev", false, "")
	flags.StringVar(&devRootTokenID, "dev-root-token-id", "", "")
	flags.StringVar(&devListenAddress, "dev-listen-address", "", "")
	flags.StringVar(&logLevel, "log-level", "info", "")
	flags.BoolVar(&verifyOnly, "verify-only", false, "")
	flags.BoolVar(&devHA, "dev-ha", false, "")
	flags.Usage = func() { c.Ui.Output(c.Help()) }
	flags.Var((*sliceflag.StringFlag)(&configPath), "config", "config")
	if err := flags.Parse(args); err != nil {
		return 1
	}

	// Create a logger. We wrap it in a gated writer so that it doesn't
	// start logging too early.
	logGate := &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)}
	var level int
	switch logLevel {
	case "trace":
		level = log.LevelTrace
	case "debug":
		level = log.LevelDebug
	case "info":
		level = log.LevelInfo
	case "notice":
		level = log.LevelNotice
	case "warn":
		level = log.LevelWarn
	case "err":
		level = log.LevelError
	default:
		c.Ui.Output(fmt.Sprintf("Unknown log level %s", logLevel))
		return 1
	}

	logFormat := os.Getenv("VAULT_LOG_FORMAT")
	if logFormat == "" {
		logFormat = os.Getenv("LOGXI_FORMAT")
	}
	switch strings.ToLower(logFormat) {
	case "vault", "vault_json", "vault-json", "vaultjson", "json", "":
		c.logger = logformat.NewVaultLoggerWithWriter(logGate, level)
	default:
		c.logger = log.NewLogger(logGate, "vault")
		c.logger.SetLevel(level)
	}
	grpclog.SetLogger(&grpclogFaker{
		logger: c.logger,
	})

	if os.Getenv("VAULT_DEV_ROOT_TOKEN_ID") != "" && devRootTokenID == "" {
		devRootTokenID = os.Getenv("VAULT_DEV_ROOT_TOKEN_ID")
	}

	if os.Getenv("VAULT_DEV_LISTEN_ADDRESS") != "" && devListenAddress == "" {
		devListenAddress = os.Getenv("VAULT_DEV_LISTEN_ADDRESS")
	}

	if devHA {
		dev = true
	}

	// Validation
	if !dev {
		switch {
		case len(configPath) == 0:
			c.Ui.Output("At least one config path must be specified with -config")
			flags.Usage()
			return 1
		case devRootTokenID != "":
			c.Ui.Output("Root token ID can only be specified with -dev")
			flags.Usage()
			return 1
		}
	}

	// Load the configuration
	var config *server.Config
	if dev {
		config = server.DevConfig(devHA)
		if devListenAddress != "" {
			config.Listeners[0].Config["address"] = devListenAddress
		}
	}
	for _, path := range configPath {
		current, err := server.LoadConfig(path, c.logger)
		if err != nil {
			c.Ui.Output(fmt.Sprintf(
				"Error loading configuration from %s: %s", path, err))
			return 1
		}

		if config == nil {
			config = current
		} else {
			config = config.Merge(current)
		}
	}

	// Ensure at least one config was found.
	if config == nil {
		c.Ui.Output("No configuration files found.")
		return 1
	}

	// Ensure that a backend is provided
	if config.Backend == nil {
		c.Ui.Output("A physical backend must be specified")
		return 1
	}

	// If mlockall(2) isn't supported, show a warning.  We disable this
	// in dev because it is quite scary to see when first using Vault.
	if !dev && !mlock.Supported() {
		c.Ui.Output("==> WARNING: mlock not supported on this system!\n")
		c.Ui.Output("  An `mlockall(2)`-like syscall to prevent memory from being")
		c.Ui.Output("  swapped to disk is not supported on this system. Running")
		c.Ui.Output("  Vault on an mlockall(2) enabled system is much more secure.\n")
	}

	if err := c.setupTelemetry(config); err != nil {
		c.Ui.Output(fmt.Sprintf("Error initializing telemetry: %s", err))
		return 1
	}

	// Initialize the backend
	backend, err := physical.NewBackend(
		config.Backend.Type, c.logger, config.Backend.Config)
	if err != nil {
		c.Ui.Output(fmt.Sprintf(
			"Error initializing backend of type %s: %s",
			config.Backend.Type, err))
		return 1
	}

	infoKeys := make([]string, 0, 10)
	info := make(map[string]string)

	var seal vault.Seal = &vault.DefaultSeal{}

	// Ensure that the seal finalizer is called, even if using verify-only
	defer func() {
		if seal != nil {
			err = seal.Finalize()
			if err != nil {
				c.Ui.Error(fmt.Sprintf("Error finalizing seals: %v", err))
			}
		}
	}()

	if seal == nil {
		c.Ui.Error(fmt.Sprintf("Could not create seal"))
		return 1
	}

	coreConfig := &vault.CoreConfig{
		Physical:           backend,
		RedirectAddr:       config.Backend.RedirectAddr,
		HAPhysical:         nil,
		Seal:               seal,
		AuditBackends:      c.AuditBackends,
		CredentialBackends: c.CredentialBackends,
		LogicalBackends:    c.LogicalBackends,
		Logger:             c.logger,
		DisableCache:       config.DisableCache,
		DisableMlock:       config.DisableMlock,
		MaxLeaseTTL:        config.MaxLeaseTTL,
		DefaultLeaseTTL:    config.DefaultLeaseTTL,
		ClusterName:        config.ClusterName,
		CacheSize:          config.CacheSize,
	}

	var disableClustering bool

	// Initialize the separate HA physical backend, if it exists
	var ok bool
	if config.HABackend != nil {
		habackend, err := physical.NewBackend(
			config.HABackend.Type, c.logger, config.HABackend.Config)
		if err != nil {
			c.Ui.Output(fmt.Sprintf(
				"Error initializing backend of type %s: %s",
				config.HABackend.Type, err))
			return 1
		}

		if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok {
			c.Ui.Output("Specified HA backend does not support HA")
			return 1
		}

		if !coreConfig.HAPhysical.HAEnabled() {
			c.Ui.Output("Specified HA backend has HA support disabled; please consult documentation")
			return 1
		}

		coreConfig.RedirectAddr = config.HABackend.RedirectAddr
		disableClustering = config.HABackend.DisableClustering
		if !disableClustering {
			coreConfig.ClusterAddr = config.HABackend.ClusterAddr
		}
	} else {
		if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok {
			coreConfig.RedirectAddr = config.Backend.RedirectAddr
			disableClustering = config.Backend.DisableClustering
			if !disableClustering {
				coreConfig.ClusterAddr = config.Backend.ClusterAddr
			}
		}
	}

	if envRA := os.Getenv("VAULT_REDIRECT_ADDR"); envRA != "" {
		coreConfig.RedirectAddr = envRA
	} else if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" {
		coreConfig.RedirectAddr = envAA
	}

	// Attempt to detect the redirect address, if possible
	var detect physical.RedirectDetect
	if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
		detect, ok = coreConfig.HAPhysical.(physical.RedirectDetect)
	} else {
		detect, ok = coreConfig.Physical.(physical.RedirectDetect)
	}
	if ok && coreConfig.RedirectAddr == "" {
		redirect, err := c.detectRedirect(detect, config)
		if err != nil {
			c.Ui.Output(fmt.Sprintf("Error detecting redirect address: %s", err))
		} else if redirect == "" {
			c.Ui.Output("Failed to detect redirect address.")
		} else {
			coreConfig.RedirectAddr = redirect
		}
	}

	// After the redirect bits are sorted out, if no cluster address was
	// explicitly given, derive one from the redirect addr
	if disableClustering {
		coreConfig.ClusterAddr = ""
	} else if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" {
		coreConfig.ClusterAddr = envCA
	} else if coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "" {
		u, err := url.ParseRequestURI(coreConfig.RedirectAddr)
		if err != nil {
			c.Ui.Output(fmt.Sprintf("Error parsing redirect address %s: %v", coreConfig.RedirectAddr, err))
			return 1
		}
		host, port, err := net.SplitHostPort(u.Host)
		nPort, nPortErr := strconv.Atoi(port)
		if err != nil {
			// assume it's due to there not being a port specified, in which case
			// use 443
			host = u.Host
			nPort = 443
		}
		if nPortErr != nil {
			c.Ui.Output(fmt.Sprintf("Cannot parse %s as a numeric port: %v", port, nPortErr))
			return 1
		}
		u.Host = net.JoinHostPort(host, strconv.Itoa(nPort+1))
		// Will always be TLS-secured
		u.Scheme = "https"
		coreConfig.ClusterAddr = u.String()
	}
	if coreConfig.ClusterAddr != "" {
		// Force https as we'll always be TLS-secured
		u, err := url.ParseRequestURI(coreConfig.ClusterAddr)
		if err != nil {
			c.Ui.Output(fmt.Sprintf("Error parsing cluster address %s: %v", coreConfig.RedirectAddr, err))
			return 1
		}
		u.Scheme = "https"
		coreConfig.ClusterAddr = u.String()
	}

	// Initialize the core
	core, newCoreError := vault.NewCore(coreConfig)
	if newCoreError != nil {
		if !errwrap.ContainsType(newCoreError, new(vault.NonFatalError)) {
			c.Ui.Output(fmt.Sprintf("Error initializing core: %s", newCoreError))
			return 1
		}
	}

	// Copy the reload funcs pointers back
	c.reloadFuncs = coreConfig.ReloadFuncs
	c.reloadFuncsLock = coreConfig.ReloadFuncsLock

	// Compile server information for output later
	info["backend"] = config.Backend.Type
	info["log level"] = logLevel
	info["mlock"] = fmt.Sprintf(
		"supported: %v, enabled: %v",
		mlock.Supported(), !config.DisableMlock && mlock.Supported())
	infoKeys = append(infoKeys, "log level", "mlock", "backend")

	if config.HABackend != nil {
		info["HA backend"] = config.HABackend.Type
		info["redirect address"] = coreConfig.RedirectAddr
		infoKeys = append(infoKeys, "HA backend", "redirect address")
		if coreConfig.ClusterAddr != "" {
			info["cluster address"] = coreConfig.ClusterAddr
			infoKeys = append(infoKeys, "cluster address")
		}
	} else {
		// If the backend supports HA, then note it
		if coreConfig.HAPhysical != nil {
			if coreConfig.HAPhysical.HAEnabled() {
				info["backend"] += " (HA available)"
				info["redirect address"] = coreConfig.RedirectAddr
				infoKeys = append(infoKeys, "redirect address")
				if coreConfig.ClusterAddr != "" {
					info["cluster address"] = coreConfig.ClusterAddr
					infoKeys = append(infoKeys, "cluster address")
				}
			} else {
				info["backend"] += " (HA disabled)"
			}
		}
	}

	clusterAddrs := []*net.TCPAddr{}

	// Initialize the listeners
	c.reloadFuncsLock.Lock()
	lns := make([]net.Listener, 0, len(config.Listeners))
	for i, lnConfig := range config.Listeners {
		if lnConfig.Type == "atlas" {
			if config.ClusterName == "" {
				c.Ui.Output("cluster_name is not set in the config and is a required value")
				return 1
			}

			lnConfig.Config["cluster_name"] = config.ClusterName
		}

		ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, logGate)
		if err != nil {
			c.Ui.Output(fmt.Sprintf(
				"Error initializing listener of type %s: %s",
				lnConfig.Type, err))
			return 1
		}

		lns = append(lns, ln)

		if reloadFunc != nil {
			relSlice := (*c.reloadFuncs)["listener|"+lnConfig.Type]
			relSlice = append(relSlice, reloadFunc)
			(*c.reloadFuncs)["listener|"+lnConfig.Type] = relSlice
		}

		if !disableClustering && lnConfig.Type == "tcp" {
			var addr string
			var ok bool
			if addr, ok = lnConfig.Config["cluster_address"]; ok {
				tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
				if err != nil {
					c.Ui.Output(fmt.Sprintf(
						"Error resolving cluster_address: %s",
						err))
					return 1
				}
				clusterAddrs = append(clusterAddrs, tcpAddr)
			} else {
				tcpAddr, ok := ln.Addr().(*net.TCPAddr)
				if !ok {
					c.Ui.Output("Failed to parse tcp listener")
					return 1
				}
				clusterAddrs = append(clusterAddrs, &net.TCPAddr{
					IP:   tcpAddr.IP,
					Port: tcpAddr.Port + 1,
				})
			}
			props["cluster address"] = addr
		}

		// Store the listener props for output later
		key := fmt.Sprintf("listener %d", i+1)
		propsList := make([]string, 0, len(props))
		for k, v := range props {
			propsList = append(propsList, fmt.Sprintf(
				"%s: %q", k, v))
		}
		sort.Strings(propsList)
		infoKeys = append(infoKeys, key)
		info[key] = fmt.Sprintf(
			"%s (%s)", lnConfig.Type, strings.Join(propsList, ", "))

	}
	c.reloadFuncsLock.Unlock()
	if !disableClustering {
		if c.logger.IsTrace() {
			c.logger.Trace("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs)
		}
	}

	// Make sure we close all listeners from this point on
	listenerCloseFunc := func() {
		for _, ln := range lns {
			ln.Close()
		}
	}

	defer c.cleanupGuard.Do(listenerCloseFunc)

	infoKeys = append(infoKeys, "version")
	verInfo := version.GetVersion()
	info["version"] = verInfo.FullVersionNumber(false)
	if verInfo.Revision != "" {
		info["version sha"] = strings.Trim(verInfo.Revision, "'")
		infoKeys = append(infoKeys, "version sha")
	}
	infoKeys = append(infoKeys, "cgo")
	info["cgo"] = "disabled"
	if version.CgoEnabled {
		info["cgo"] = "enabled"
	}

	// Server configuration output
	padding := 24
	sort.Strings(infoKeys)
	c.Ui.Output("==> Vault server configuration:\n")
	for _, k := range infoKeys {
		c.Ui.Output(fmt.Sprintf(
			"%s%s: %s",
			strings.Repeat(" ", padding-len(k)),
			strings.Title(k),
			info[k]))
	}
	c.Ui.Output("")

	if verifyOnly {
		return 0
	}

	// Perform service discovery registrations and initialization of
	// HTTP server after the verifyOnly check.

	// Instantiate the wait group
	c.WaitGroup = &sync.WaitGroup{}

	// If the backend supports service discovery, run service discovery
	if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
		sd, ok := coreConfig.HAPhysical.(physical.ServiceDiscovery)
		if ok {
			activeFunc := func() bool {
				if isLeader, _, err := core.Leader(); err == nil {
					return isLeader
				}
				return false
			}

			sealedFunc := func() bool {
				if sealed, err := core.Sealed(); err == nil {
					return sealed
				}
				return true
			}

			if err := sd.RunServiceDiscovery(c.WaitGroup, c.ShutdownCh, coreConfig.RedirectAddr, activeFunc, sealedFunc); err != nil {
				c.Ui.Output(fmt.Sprintf("Error initializing service discovery: %v", err))
				return 1
			}
		}
	}

	handler := vaulthttp.Handler(core)

	// This needs to happen before we first unseal, so before we trigger dev
	// mode if it's set
	core.SetClusterListenerAddrs(clusterAddrs)
	core.SetClusterSetupFuncs(vault.WrapHandlerForClustering(handler, c.logger))

	// If we're in dev mode, then initialize the core
	if dev {
		init, err := c.enableDev(core, devRootTokenID)
		if err != nil {
			c.Ui.Output(fmt.Sprintf(
				"Error initializing dev mode: %s", err))
			return 1
		}

		export := "export"
		quote := "'"
		if runtime.GOOS == "windows" {
			export = "set"
			quote = ""
		}

		c.Ui.Output(fmt.Sprintf(
			"==> WARNING: Dev mode is enabled!\n\n"+
				"In this mode, Vault is completely in-memory and unsealed.\n"+
				"Vault is configured to only have a single unseal key. The root\n"+
				"token has already been authenticated with the CLI, so you can\n"+
				"immediately begin using the Vault CLI.\n\n"+
				"The only step you need to take is to set the following\n"+
				"environment variables:\n\n"+
				"    "+export+" VAULT_ADDR="+quote+"http://"+config.Listeners[0].Config["address"]+quote+"\n\n"+
				"The unseal key and root token are reproduced below in case you\n"+
				"want to seal/unseal the Vault or play with authentication.\n\n"+
				"Unseal Key: %s\nRoot Token: %s\n",
			base64.StdEncoding.EncodeToString(init.SecretShares[0]),
			init.RootToken,
		))
	}

	// Initialize the HTTP server
	server := &http.Server{}
	server.Handler = handler
	for _, ln := range lns {
		go server.Serve(ln)
	}

	if newCoreError != nil {
		c.Ui.Output("==> Warning:\n\nNon-fatal error during initialization; check the logs for more information.")
		c.Ui.Output("")
	}

	// Output the header that the server has started
	c.Ui.Output("==> Vault server started! Log data will stream in below:\n")

	// Release the log gate.
	logGate.Flush()

	// Wait for shutdown
	shutdownTriggered := false

	for !shutdownTriggered {
		select {
		case <-c.ShutdownCh:
			c.Ui.Output("==> Vault shutdown triggered")

			// Stop the listners so that we don't process further client requests.
			c.cleanupGuard.Do(listenerCloseFunc)

			// Shutdown will wait until after Vault is sealed, which means the
			// request forwarding listeners will also be closed (and also
			// waited for).
			if err := core.Shutdown(); err != nil {
				c.Ui.Output(fmt.Sprintf("Error with core shutdown: %s", err))
			}

			shutdownTriggered = true

		case <-c.SighupCh:
			c.Ui.Output("==> Vault reload triggered")
			if err := c.Reload(configPath); err != nil {
				c.Ui.Output(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
			}
		}
	}

	// Wait for dependent goroutines to complete
	c.WaitGroup.Wait()
	return 0
}
Exemple #8
0
	"github.com/spf13/viper"
	"github.com/subsilent/kappa/auth"
	"github.com/subsilent/kappa/datamodel"
	"github.com/subsilent/kappa/ssh"
)

// ServerCmd is the kappa root command.
var ServerCmd = &cobra.Command{
	Use:   "server",
	Short: "server starts the database server",
	Long:  ``,
	Run: func(cmd *cobra.Command, args []string) {

		// Create logger
		writer := log.NewConcurrentWriter(os.Stdout)
		logger := log.NewLogger(writer, "kappa")

		err := InitializeConfig(writer)
		if err != nil {
			return
		}

		// Create data directory
		if err := os.MkdirAll(viper.GetString("DataPath"), os.ModeDir|0655); err != nil {
			logger.Warn("Could not create data directory", "err", err.Error())
			return
		}

		// Connect to database
		cwd, err := os.Getwd()
		if err != nil {
Exemple #9
0
	"github.com/blacklabeldata/kappa/server"
	log "github.com/mgutz/logxi/v1"
	"github.com/spf13/cobra"
	"github.com/spf13/viper"
)

// ServerCmd is the kappa root command.
var ServerCmd = &cobra.Command{
	Use:   "server",
	Short: "server starts the database server",
	Long:  ``,
	Run: func(cmd *cobra.Command, args []string) {

		// Create logger
		writer := log.NewConcurrentWriter(os.Stderr)
		logger := log.NewLogger(writer, "kappa")

		// Initialize config
		err := InitializeConfig(writer)
		if err != nil {
			return
		}

		// Create server config
		cfg := server.DatabaseConfig{
			LogOutput:             writer,
			NodeName:              viper.GetString("NodeName"),
			ClusterName:           viper.GetString("ClusterName"),
			ExistingNodes:         strings.Split(viper.GetString("ClusterNodes"), ","),
			Bootstrap:             viper.GetBool("Bootstrap"),
			BootstrapExpect:       viper.GetInt("BootstrapExpect"),
Exemple #10
0
	"github.com/blacklabeldata/kappa/auth"
	log "github.com/mgutz/logxi/v1"
	"github.com/spf13/cobra"
	"github.com/spf13/viper"
)

// InitCACmd is the kappa root command.
var InitCACmd = &cobra.Command{
	Use:   "init-ca",
	Short: "init-ca creates a new certificate authority",
	Long:  ``,
	Run: func(cmd *cobra.Command, args []string) {

		// Create logger
		writer := log.NewConcurrentWriter(os.Stdout)
		logger := log.NewLogger(writer, "init-ca")

		err := InitializeConfig(writer)
		if err != nil {
			return
		}

		// Setup directory structure
		if err := auth.CreatePkiDirectories(logger, "."); err != nil {
			return
		}

		// Create file paths
		pki := path.Join(".", "pki")
		crtFile := path.Join(pki, "ca.crt")
		privFile := path.Join(pki, "private", "ca.key")
Exemple #11
0
// NewVaultLoggerWithWriter creates a new logger with the specified level and
// writer and a Vault formatter
func NewVaultLoggerWithWriter(w io.Writer, level int) log.Logger {
	logger := log.NewLogger(w, "vault")
	return setLevelFormatter(logger, level, createVaultFormatter())
}
Exemple #12
0
func New(c *Config) (cer Cerebrum, err error) {

	// Create logger
	if c.LogOutput == nil {
		c.LogOutput = log.NewConcurrentWriter(os.Stderr)
	}
	logger := log.NewLogger(c.LogOutput, "kappa")

	// Create data directory
	if err = os.MkdirAll(c.DataPath, 0755); err != nil {
		logger.Warn("Could not create data directory", "err", err)
		return
	}

	// Setup reconciler
	serfEventCh := make(chan serf.Event, 256)
	reconcilerCh := make(chan serf.Member, 32)

	ctx, cancel := context.WithCancel(context.Background())
	cereb := &cerebrum{
		config:      c,
		logger:      logger,
		dialer:      NewDialer(NewPool(c.LogOutput, 5*time.Minute, c.TLSConfig)),
		serfEventCh: serfEventCh,
		reconcileCh: reconcilerCh,
		grim:        grim.ReaperWithContext(ctx),
		context:     ctx,
		cancel:      cancel,
	}

	// Create serf server
	err = cereb.setupRaft()
	if err != nil {
		err = logger.Error("Failed to start serf: %v", err)
		return nil, err
	}

	isLeader := func() bool { return cereb.raft.State() == raft.Leader }
	reconciler := &Reconciler{reconcilerCh, isLeader}
	cereb.serfer = serfer.NewSerfer(serfEventCh, serfer.SerfEventHandler{
		Logger:              log.NewLogger(c.LogOutput, CerebrumEventPrefix),
		ServicePrefix:       CerebrumEventPrefix,
		ReconcileOnJoin:     true,
		ReconcileOnLeave:    true,
		ReconcileOnFail:     true,
		ReconcileOnUpdate:   true,
		ReconcileOnReap:     true,
		NodeJoined:          c.NodeJoined,
		NodeUpdated:         c.NodeUpdated,
		NodeLeft:            c.NodeLeft,
		NodeFailed:          c.NodeFailed,
		NodeReaped:          c.NodeReaped,
		UserEvent:           c.UserEvent,
		UnknownEventHandler: c.UnknownEventHandler,
		Reconciler:          reconciler,
		IsLeader:            isLeader,
		IsLeaderEvent: func(name string) bool {
			return name == CerebrumLeaderEvent
		},
		LeaderElectionHandler: cereb,
	})

	// Create serf server
	cereb.serf, err = cereb.setupSerf()
	if err != nil {
		err = logger.Error("Failed to start serf: %v", err)
		return nil, err
	}

	cer = cereb
	return cer, nil
}
Exemple #13
0
// setupRaft is used to setup and initialize Raft
func (c *cerebrum) setupRaft() error {

	// If we are in bootstrap mode, enable a single node cluster
	if c.config.Bootstrap {
		c.config.RaftConfig.EnableSingleNode = true
	}

	// Create the base state path
	statePath := filepath.Join(c.config.DataPath, tmpStatePath)
	if err := os.RemoveAll(statePath); err != nil {
		return err
	}
	if err := os.MkdirAll(filepath.Dir(statePath), 0755); err != nil {
		return err
	}

	// Create the base raft path
	path := filepath.Join(c.config.DataPath, RaftStateDir)
	if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
		return err
	}

	// Create the backend raft store for logs and stable storage
	store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
	if err != nil {
		return err
	}
	c.raftStore = store

	// Wrap the store in a LogCache to improve performance
	cacheStore, err := raft.NewLogCache(c.config.LogCacheSize, store)
	if err != nil {
		store.Close()
		return err
	}

	// Create the snapshot store
	snapshots, err := raft.NewFileSnapshotStore(path, c.config.SnapshotsRetained, c.config.LogOutput)
	if err != nil {
		store.Close()
		return err
	}

	// Try to bind
	addr, err := net.ResolveTCPAddr("tcp", c.config.RaftBindAddr)
	if err != nil {
		return err
	}

	// Start TCP listener
	listener, err := net.ListenTCP("tcp", addr)
	if err != nil {
		return err
	}

	// Create connection layer and transport
	layer := NewRaftLayer(c.dialer, listener.Addr(), c.config.TLSConfig)
	c.raftTransport = raft.NewNetworkTransport(layer, 3, 10*time.Second, c.config.LogOutput)

	// Create TLS connection dispatcher
	dispatcher := yamuxer.NewDispatcher(log.NewLogger(c.config.LogOutput, "dispatcher"), nil)
	dispatcher.Register(connRaft, layer)
	dispatcher.Register(connForward, &ForwardingHandler{c.applier, log.NewLogger(c.config.LogOutput, "forwarder")})

	// Create TLS connection muxer
	c.muxer = yamuxer.New(c.context, &yamuxer.Config{
		Listener:   listener,
		TLSConfig:  c.config.TLSConfig,
		Deadline:   c.config.ConnectionDeadline,
		LogOutput:  c.config.LogOutput,
		Dispatcher: dispatcher,
	})

	// Setup the peer store
	c.raftPeers = raft.NewJSONPeers(path, c.raftTransport)

	// Ensure local host is always included if we are in bootstrap mode
	if c.config.Bootstrap {
		peers, err := c.raftPeers.Peers()
		if err != nil {
			store.Close()
			return err
		}
		if !raft.PeerContained(peers, c.raftTransport.LocalAddr()) {
			c.raftPeers.SetPeers(raft.AddUniquePeer(peers, c.raftTransport.LocalAddr()))
		}
	}

	// Make sure we set the LogOutput
	c.config.RaftConfig.LogOutput = c.config.LogOutput

	// Setup the Raft store
	c.raft, err = raft.NewRaft(c.config.RaftConfig, c.fsm, cacheStore, store,
		snapshots, c.raftPeers, c.raftTransport)
	if err != nil {
		store.Close()
		c.raftTransport.Close()
		return err
	}

	// Setup forwarding and applier
	c.forwarder = NewForwarder(c.raft, c.dialer, log.NewLogger(c.config.LogOutput, "forwarder"))
	c.applier = NewApplier(c.raft, c.forwarder, log.NewLogger(c.config.LogOutput, "applier"), c.config.EnqueueTimeout)

	// // Start monitoring leadership
	// c.t.Go(func() error {
	// 	c.monitorLeadership()
	// 	return nil
	// })
	return nil
}
Exemple #14
0
func main() {

	// Create logger
	writer := log.NewConcurrentWriter(os.Stdout)
	logger := log.NewLogger(writer, "sshh")

	// Get private key
	privateKey, err := ssh.ParsePrivateKey([]byte(privateKey))
	if err != nil {
		logger.Warn("Private key could not be parsed", "error", err.Error())
	}

	// Setup server config
	config := sshh.Config{
		Deadline: time.Second,
		Logger:   logger,
		Bind:     ":9022",
		Handlers: map[string]sshh.SSHHandler{
			"session": NewShellHandler(logger),
		},
		PrivateKey: privateKey,
		PasswordCallback: func(conn ssh.ConnMetadata, password []byte) (perm *ssh.Permissions, err error) {
			if conn.User() == "admin" && string(password) == "password" {

				// Add username to permissions
				perm = &ssh.Permissions{
					Extensions: map[string]string{
						"username": conn.User(),
					},
				}
			} else {
				err = fmt.Errorf("Invalid username or password")
			}
			return
		},
		AuthLogCallback: func(conn ssh.ConnMetadata, method string, err error) {
			if err == nil {
				logger.Info("Successful login", "user", conn.User(), "method", method)
			}
		},
		// PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (perm *ssh.Permissions, err error) {
		// 	return nil, fmt.Errorf("Unauthorized")
		// },
	}

	// Create SSH server
	sshServer, err := sshh.NewSSHServer(&config)
	if err != nil {
		logger.Error("SSH Server could not be configured", "error", err.Error())
		return
	}

	// Start servers
	sshServer.Start()

	// Handle signals
	sig := make(chan os.Signal, 1)
	signal.Notify(sig, os.Interrupt, os.Kill)

	// Wait for signal
	logger.Info("Ready to serve requests")

	// Block until signal is received
	<-sig

	// Stop listening for signals and close channel
	signal.Stop(sig)
	close(sig)

	// Shut down SSH server
	logger.Info("Shutting down servers.")
	sshServer.Stop()
}
Exemple #15
0
	"github.com/spf13/cobra"
	"github.com/spf13/viper"

	"github.com/blacklabeldata/kappa/auth"
)

// NewCertCmd is the kappa root command.
var NewCertCmd = &cobra.Command{
	Use:   "new-cert",
	Short: "new-cert creates a new certificate",
	Long:  ``,
	Run: func(cmd *cobra.Command, args []string) {

		// Create logger
		writer := log.NewConcurrentWriter(os.Stdout)
		logger := log.NewLogger(writer, "new-cert")

		err := InitializeConfig(writer)
		if err != nil {
			return
		}

		// Setup directory structure
		if err := auth.CreatePkiDirectories(logger, "."); err != nil {
			return
		}

		// Create file paths
		pki := path.Join(".", "pki")
		reqFile := path.Join(pki, "reqs", viper.GetString("Name")+".req")
		privFile := path.Join(pki, "private", viper.GetString("Name")+".key")
Exemple #16
0
	"github.com/blacklabeldata/xbinary"
	"golang.org/x/crypto/ssh"
	// "golang.org/x/crypto/ssh/terminal"
	"github.com/subsilent/crypto/ssh/terminal"
)

// ClientCmd is the CLI command
var ClientCmd = &cobra.Command{
	Use:   "client [ssh://username@host:port]",
	Short: "client starts a terminal with the given kappa server",
	Long:  ``,
	Run: func(cmd *cobra.Command, args []string) {

		// Create logger
		writer := log.NewConcurrentWriter(os.Stdout)
		logger := log.NewLogger(writer, "cli")

		err := InitializeClientConfig(logger)
		if err != nil {
			return
		}

		// Get SSH Key file
		keyFile := viper.GetString("ClientKey")
		// logger.Info("Reading private key", "file", sshKeyFile)

		// Read SSH Key
		keyBytes, err := ioutil.ReadFile(keyFile)
		if err != nil {
			fmt.Println("Private key could not be read:", err.Error())
			fmt.Println(cmd.Help())
Exemple #17
0
func NewServer(c *DatabaseConfig) (server *Server, err error) {

	// Create logger
	if c.LogOutput == nil {
		c.LogOutput = log.NewConcurrentWriter(os.Stdout)
	}
	logger := log.NewLogger(c.LogOutput, "kappa")

	// Create data directory
	if err = os.MkdirAll(c.DataPath, 0755); err != nil {
		logger.Warn("Could not create data directory", "err", err)
		// logger.Warn("Could not create data directory", "err", err.Error())
		return
	}

	// Connect to database
	cwd, err := os.Getwd()
	if err != nil {
		logger.Error("Could not get working directory", "error", err.Error())
		return
	}

	file := path.Join(cwd, c.DataPath, "meta.db")
	logger.Info("Connecting to database", "file", file)
	system, err := datamodel.NewSystem(file)
	if err != nil {
		logger.Error("Could not connect to database", "error", err.Error())
		return
	}

	// Get SSH Key file
	sshKeyFile := c.SSHPrivateKeyFile
	logger.Info("Reading private key", "file", sshKeyFile)

	privateKey, err := auth.ReadPrivateKey(logger, sshKeyFile)
	if err != nil {
		return
	}

	// Get admin certificate
	adminCertFile := c.AdminCertificateFile
	logger.Info("Reading admin public key", "file", adminCertFile)

	// Read admin certificate
	cert, err := ioutil.ReadFile(adminCertFile)
	if err != nil {
		logger.Error("admin certificate could not be read", "filename", c.AdminCertificateFile)
		return
	}

	// Add admin cert to key ring
	userStore, err := system.Users()
	if err != nil {
		logger.Error("could not get user store", "error", err.Error())
		return
	}

	// Create admin account
	admin, err := userStore.Create("admin")
	if err != nil {
		logger.Error("error creating admin account", "error", err.Error())
		return
	}

	// Add admin certificate
	keyRing := admin.KeyRing()
	fingerprint, err := keyRing.AddPublicKey(cert)
	if err != nil {
		logger.Error("admin certificate could not be added", "error", err.Error())
		return
	}
	logger.Info("Added admin certificate", "fingerprint", fingerprint)

	// Read root cert
	rootPem, err := ioutil.ReadFile(c.CACertificateFile)
	if err != nil {
		logger.Error("root certificate could not be read", "filename", c.CACertificateFile)
		return
	}

	// Create certificate pool
	roots := x509.NewCertPool()
	if ok := roots.AppendCertsFromPEM(rootPem); !ok {
		logger.Error("failed to parse root certificate")
		return
	}

	// Setup SSH Server
	sshLogger := log.NewLogger(c.LogOutput, "ssh")
	pubKeyCallback, err := PublicKeyCallback(system)
	if err != nil {
		logger.Error("failed to create PublicKeyCallback", err)
		return
	}

	// Setup server config
	config := sshh.Config{
		Deadline:          c.SSHConnectionDeadline,
		Logger:            sshLogger,
		Bind:              c.SSHBindAddress,
		PrivateKey:        privateKey,
		PublicKeyCallback: pubKeyCallback,
		AuthLogCallback: func(meta ssh.ConnMetadata, method string, err error) {
			if err == nil {
				sshLogger.Info("login success", "user", meta.User())
			} else if err != nil && method == "publickey" {
				sshLogger.Info("login failure", "user", meta.User(), "err", err.Error())
			}
		},
		Handlers: map[string]sshh.SSHHandler{
			"kappa-client": &EchoHandler{},
		},
	}

	// Create SSH server
	sshServer, err := sshh.NewSSHServer(&config)
	if err != nil {
		logger.Error("SSH Server could not be configured", "error", err.Error())
		return
	}

	// Setup Serf handlers
	mgr := NewNodeList()
	reconcilerCh := make(chan serf.Member, 32)
	serfEventCh := make(chan serf.Event, 256)
	userEventCh := make(chan serf.UserEvent, 256)
	serfer := serfer.NewSerfer(serfEventCh, serfer.SerfEventHandler{
		Logger:            log.NewLogger(c.LogOutput, "serf"),
		ServicePrefix:     "kappa",
		ReconcileOnJoin:   true,
		ReconcileOnLeave:  true,
		ReconcileOnFail:   true,
		ReconcileOnUpdate: true,
		ReconcileOnReap:   true,
		NodeJoined: &SerfNodeJoinHandler{
			mgr, log.NewLogger(c.LogOutput, "serf:node-join")},
		NodeUpdated: &SerfNodeUpdateHandler{
			mgr, log.NewLogger(c.LogOutput, "serf:node-update")},
		NodeLeft: &SerfNodeLeaveHandler{
			mgr, log.NewLogger(c.LogOutput, "serf:node-left")},
		NodeFailed: &SerfNodeLeaveHandler{
			mgr, log.NewLogger(c.LogOutput, "serf:node-fail")},
		NodeReaped: &SerfNodeLeaveHandler{
			mgr, log.NewLogger(c.LogOutput, "serf:node-reap")},
		UserEvent: &SerfUserEventHandler{
			log.NewLogger(c.LogOutput, "serf:user-events"), userEventCh},
		UnknownEventHandler: &SerfUserEventHandler{
			log.NewLogger(c.LogOutput, "serf:unknown-event"), userEventCh},
		Reconciler: &SerfReconciler{reconcilerCh},
		IsLeader: func() bool {

			// TODO: Replace with Raft IsLeader check
			return true
		},
	})

	// Create database server
	s := &Server{
		config:       c,
		logger:       logger,
		sshServer:    &sshServer,
		serfer:       serfer,
		localKappas:  make(map[string]*NodeDetails),
		serfEventCh:  serfEventCh,
		kappaEventCh: userEventCh,
		reconcileCh:  reconcilerCh,
	}

	// Create serf server
	s.serf, err = s.setupSerf()
	if err != nil {
		err = logger.Error("Failed to start serf: %v", err)
		return
	}

	return s, nil
}