func handleSysUnseal(core *vault.Core) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.Method { case "PUT": case "POST": default: respondError(w, http.StatusMethodNotAllowed, nil) return } // Parse the request var req UnsealRequest if err := parseRequest(r, &req); err != nil { respondError(w, http.StatusBadRequest, err) return } if !req.Reset && req.Key == "" { respondError( w, http.StatusBadRequest, errors.New("'key' must specified in request body as JSON, or 'reset' set to true")) return } if req.Reset { sealed, err := core.Sealed() if err != nil { respondError(w, http.StatusInternalServerError, err) return } if !sealed { respondError(w, http.StatusBadRequest, errors.New("vault is unsealed")) return } core.ResetUnsealProcess() } else { // Decode the key, which is hex encoded key, err := hex.DecodeString(req.Key) if err != nil { respondError( w, http.StatusBadRequest, errors.New("'key' must be a valid hex-string")) return } // Attempt the unseal if _, err := core.Unseal(key); err != nil { // Ignore ErrInvalidKey because its a user error that we // mask away. We just show them the seal status. if !errwrap.ContainsType(err, new(vault.ErrInvalidKey)) { respondError(w, http.StatusInternalServerError, err) return } } } // Return the seal status handleSysSealStatusRaw(core, w, r) }) }
// Determines the type of the error being returned and sets the HTTP // status code appropriately func respondErrorStatus(w http.ResponseWriter, err error) { status := http.StatusInternalServerError switch { // Keep adding more error types here to appropriate the status codes case err != nil && errwrap.ContainsType(err, new(vault.StatusBadRequest)): status = http.StatusBadRequest } respondError(w, status, err) }
func handleSysInitPut(core *vault.Core, w http.ResponseWriter, r *http.Request) { // Parse the request var req InitRequest if err := parseRequest(r, &req); err != nil { respondError(w, http.StatusBadRequest, err) return } // Initialize barrierConfig := &vault.SealConfig{ SecretShares: req.SecretShares, SecretThreshold: req.SecretThreshold, StoredShares: req.StoredShares, PGPKeys: req.PGPKeys, } recoveryConfig := &vault.SealConfig{ SecretShares: req.RecoveryShares, SecretThreshold: req.RecoveryThreshold, PGPKeys: req.RecoveryPGPKeys, } result, initErr := core.Initialize(barrierConfig, recoveryConfig) if initErr != nil { if !errwrap.ContainsType(initErr, new(vault.NonFatalError)) { respondError(w, http.StatusBadRequest, initErr) return } else { // Add a warnings field? The error will be logged in the vault log // already. } } // Encode the keys keys := make([]string, 0, len(result.SecretShares)) for _, k := range result.SecretShares { keys = append(keys, hex.EncodeToString(k)) } resp := &InitResponse{ Keys: keys, RootToken: result.RootToken, } if len(result.RecoveryShares) > 0 { resp.RecoveryKeys = make([]string, 0, len(result.RecoveryShares)) for _, k := range result.RecoveryShares { resp.RecoveryKeys = append(resp.RecoveryKeys, hex.EncodeToString(k)) } } core.UnsealWithStoredKeys() respondOk(w, resp) }
func respondErrorCommon(w http.ResponseWriter, resp *logical.Response, err error) bool { // If there are no errors return if err == nil && (resp == nil || !resp.IsError()) { return false } // Start out with internal server error since in most of these cases there // won't be a response so this won't be overridden statusCode := http.StatusInternalServerError // If we actually have a response, start out with bad request if resp != nil { statusCode = http.StatusBadRequest } // Now, check the error itself; if it has a specific logical error, set the // appropriate code if err != nil { switch { case errwrap.ContainsType(err, new(vault.StatusBadRequest)): statusCode = http.StatusBadRequest case errwrap.Contains(err, logical.ErrPermissionDenied.Error()): statusCode = http.StatusForbidden case errwrap.Contains(err, logical.ErrUnsupportedOperation.Error()): statusCode = http.StatusMethodNotAllowed case errwrap.Contains(err, logical.ErrUnsupportedPath.Error()): statusCode = http.StatusNotFound case errwrap.Contains(err, logical.ErrInvalidRequest.Error()): statusCode = http.StatusBadRequest } } if resp != nil && resp.IsError() { err = fmt.Errorf("%s", resp.Data["error"].(string)) } respondError(w, statusCode, err) return true }
func handleSysInitPut(core *vault.Core, w http.ResponseWriter, r *http.Request) { // Parse the request var req InitRequest if err := parseRequest(r, &req); err != nil { respondError(w, http.StatusBadRequest, err) return } // Initialize barrierConfig := &vault.SealConfig{ SecretShares: req.SecretShares, SecretThreshold: req.SecretThreshold, StoredShares: req.StoredShares, PGPKeys: req.PGPKeys, } recoveryConfig := &vault.SealConfig{ SecretShares: req.RecoveryShares, SecretThreshold: req.RecoveryThreshold, PGPKeys: req.RecoveryPGPKeys, } if core.SealAccess().StoredKeysSupported() { if barrierConfig.SecretShares != 1 { respondError(w, http.StatusBadRequest, fmt.Errorf("secret shares must be 1")) return } if barrierConfig.SecretThreshold != barrierConfig.SecretShares { respondError(w, http.StatusBadRequest, fmt.Errorf("secret threshold must be same as secret shares")) return } if barrierConfig.StoredShares != barrierConfig.SecretShares { respondError(w, http.StatusBadRequest, fmt.Errorf("stored shares must be same as secret shares")) return } if barrierConfig.PGPKeys != nil && len(barrierConfig.PGPKeys) > 0 { respondError(w, http.StatusBadRequest, fmt.Errorf("PGP keys not supported when storing shares")) return } } else { if barrierConfig.StoredShares > 0 { respondError(w, http.StatusBadRequest, fmt.Errorf("stored keys are not supported")) return } } initParams := &vault.InitParams{ BarrierConfig: barrierConfig, RecoveryConfig: recoveryConfig, RootTokenPGPKey: req.RootTokenPGPKey, } result, initErr := core.Initialize(initParams) if initErr != nil { if !errwrap.ContainsType(initErr, new(vault.NonFatalError)) { respondError(w, http.StatusBadRequest, initErr) return } else { // Add a warnings field? The error will be logged in the vault log // already. } } // Encode the keys keys := make([]string, 0, len(result.SecretShares)) keysB64 := make([]string, 0, len(result.SecretShares)) for _, k := range result.SecretShares { keys = append(keys, hex.EncodeToString(k)) keysB64 = append(keysB64, base64.StdEncoding.EncodeToString(k)) } resp := &InitResponse{ Keys: keys, KeysB64: keysB64, RootToken: result.RootToken, } if len(result.RecoveryShares) > 0 { resp.RecoveryKeys = make([]string, 0, len(result.RecoveryShares)) resp.RecoveryKeysB64 = make([]string, 0, len(result.RecoveryShares)) for _, k := range result.RecoveryShares { resp.RecoveryKeys = append(resp.RecoveryKeys, hex.EncodeToString(k)) resp.RecoveryKeysB64 = append(resp.RecoveryKeysB64, base64.StdEncoding.EncodeToString(k)) } } core.UnsealWithStoredKeys() respondOk(w, resp) }
func (c *ServerCommand) Run(args []string) int { var dev, verifyOnly bool var configPath []string var logLevel, devRootTokenID, devListenAddress string flags := c.Meta.FlagSet("server", meta.FlagSetDefault) flags.BoolVar(&dev, "dev", false, "") flags.StringVar(&devRootTokenID, "dev-root-token-id", "", "") flags.StringVar(&devListenAddress, "dev-listen-address", "", "") flags.StringVar(&logLevel, "log-level", "info", "") flags.BoolVar(&verifyOnly, "verify-only", false, "") flags.Usage = func() { c.Ui.Error(c.Help()) } flags.Var((*sliceflag.StringFlag)(&configPath), "config", "config") if err := flags.Parse(args); err != nil { return 1 } if os.Getenv("VAULT_DEV_ROOT_TOKEN_ID") != "" { devRootTokenID = os.Getenv("VAULT_DEV_ROOT_TOKEN_ID") } if os.Getenv("VAULT_DEV_LISTEN_ADDRESS") != "" { devListenAddress = os.Getenv("VAULT_DEV_LISTEN_ADDRESS") } // Validation if !dev { switch { case len(configPath) == 0: c.Ui.Error("At least one config path must be specified with -config") flags.Usage() return 1 case devRootTokenID != "": c.Ui.Error("Root token ID can only be specified with -dev") flags.Usage() return 1 case devListenAddress != "": c.Ui.Error("Development address can only be specified with -dev") flags.Usage() return 1 } } // Load the configuration var config *server.Config if dev { config = server.DevConfig() if devListenAddress != "" { config.Listeners[0].Config["address"] = devListenAddress } } for _, path := range configPath { current, err := server.LoadConfig(path) if err != nil { c.Ui.Error(fmt.Sprintf( "Error loading configuration from %s: %s", path, err)) return 1 } if config == nil { config = current } else { config = config.Merge(current) } } // Ensure at least one config was found. if config == nil { c.Ui.Error("No configuration files found.") return 1 } // Ensure that a backend is provided if config.Backend == nil { c.Ui.Error("A physical backend must be specified") return 1 } // If mlockall(2) isn't supported, show a warning. We disable this // in dev because it is quite scary to see when first using Vault. if !dev && !mlock.Supported() { c.Ui.Output("==> WARNING: mlock not supported on this system!\n") c.Ui.Output(" An `mlockall(2)`-like syscall to prevent memory from being") c.Ui.Output(" swapped to disk is not supported on this system. Running") c.Ui.Output(" Vault on an mlockall(2) enabled system is much more secure.\n") } // Create a logger. We wrap it in a gated writer so that it doesn't // start logging too early. logGate := &gatedwriter.Writer{Writer: os.Stderr} logger := log.New(&logutils.LevelFilter{ Levels: []logutils.LogLevel{ "TRACE", "DEBUG", "INFO", "WARN", "ERR"}, MinLevel: logutils.LogLevel(strings.ToUpper(logLevel)), Writer: logGate, }, "", log.LstdFlags) if err := c.setupTelemetry(config); err != nil { c.Ui.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) return 1 } // Initialize the backend backend, err := physical.NewBackend( config.Backend.Type, config.Backend.Config) if err != nil { c.Ui.Error(fmt.Sprintf( "Error initializing backend of type %s: %s", config.Backend.Type, err)) return 1 } infoKeys := make([]string, 0, 10) info := make(map[string]string) var seal vault.Seal = &vault.DefaultSeal{} coreConfig := &vault.CoreConfig{ Physical: backend, AdvertiseAddr: config.Backend.AdvertiseAddr, HAPhysical: nil, Seal: seal, AuditBackends: c.AuditBackends, CredentialBackends: c.CredentialBackends, LogicalBackends: c.LogicalBackends, Logger: logger, DisableCache: config.DisableCache, DisableMlock: config.DisableMlock, MaxLeaseTTL: config.MaxLeaseTTL, DefaultLeaseTTL: config.DefaultLeaseTTL, } // Initialize the separate HA physical backend, if it exists var ok bool if config.HABackend != nil { habackend, err := physical.NewBackend( config.HABackend.Type, config.HABackend.Config) if err != nil { c.Ui.Error(fmt.Sprintf( "Error initializing backend of type %s: %s", config.HABackend.Type, err)) return 1 } if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok { c.Ui.Error("Specified HA backend does not support HA") return 1 } coreConfig.AdvertiseAddr = config.HABackend.AdvertiseAddr } else { if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok { coreConfig.AdvertiseAddr = config.Backend.AdvertiseAddr } } if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" { coreConfig.AdvertiseAddr = envAA } // Attempt to detect the advertise address possible var detect physical.AdvertiseDetect if coreConfig.HAPhysical != nil { detect, ok = coreConfig.HAPhysical.(physical.AdvertiseDetect) } else { detect, ok = coreConfig.Physical.(physical.AdvertiseDetect) } if ok && coreConfig.AdvertiseAddr == "" { advertise, err := c.detectAdvertise(detect, config) if err != nil { c.Ui.Error(fmt.Sprintf("Error detecting advertise address: %s", err)) } else if advertise == "" { c.Ui.Error("Failed to detect advertise address.") } else { coreConfig.AdvertiseAddr = advertise } } // Initialize the core core, newCoreError := vault.NewCore(coreConfig) if newCoreError != nil { if !errwrap.ContainsType(newCoreError, new(vault.NonFatalError)) { c.Ui.Error(fmt.Sprintf("Error initializing core: %s", newCoreError)) return 1 } } // If we're in dev mode, then initialize the core if dev { init, err := c.enableDev(core, devRootTokenID) if err != nil { c.Ui.Error(fmt.Sprintf( "Error initializing dev mode: %s", err)) return 1 } export := "export" quote := "'" if runtime.GOOS == "windows" { export = "set" quote = "" } c.Ui.Output(fmt.Sprintf( "==> WARNING: Dev mode is enabled!\n\n"+ "In this mode, Vault is completely in-memory and unsealed.\n"+ "Vault is configured to only have a single unseal key. The root\n"+ "token has already been authenticated with the CLI, so you can\n"+ "immediately begin using the Vault CLI.\n\n"+ "The only step you need to take is to set the following\n"+ "environment variables:\n\n"+ " "+export+" VAULT_ADDR="+quote+"http://"+config.Listeners[0].Config["address"]+quote+"\n\n"+ "The unseal key and root token are reproduced below in case you\n"+ "want to seal/unseal the Vault or play with authentication.\n\n"+ "Unseal Key: %s\nRoot Token: %s\n", hex.EncodeToString(init.SecretShares[0]), init.RootToken, )) } // Compile server information for output later info["backend"] = config.Backend.Type info["log level"] = logLevel info["mlock"] = fmt.Sprintf( "supported: %v, enabled: %v", mlock.Supported(), !config.DisableMlock) infoKeys = append(infoKeys, "log level", "mlock", "backend") if config.HABackend != nil { info["HA backend"] = config.HABackend.Type info["advertise address"] = coreConfig.AdvertiseAddr infoKeys = append(infoKeys, "HA backend", "advertise address") } else { // If the backend supports HA, then note it if coreConfig.HAPhysical != nil { info["backend"] += " (HA available)" info["advertise address"] = coreConfig.AdvertiseAddr infoKeys = append(infoKeys, "advertise address") } } // Initialize the listeners lns := make([]net.Listener, 0, len(config.Listeners)) for i, lnConfig := range config.Listeners { ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config) if err != nil { c.Ui.Error(fmt.Sprintf( "Error initializing listener of type %s: %s", lnConfig.Type, err)) return 1 } // Store the listener props for output later key := fmt.Sprintf("listener %d", i+1) propsList := make([]string, 0, len(props)) for k, v := range props { propsList = append(propsList, fmt.Sprintf( "%s: %q", k, v)) } sort.Strings(propsList) infoKeys = append(infoKeys, key) info[key] = fmt.Sprintf( "%s (%s)", lnConfig.Type, strings.Join(propsList, ", ")) lns = append(lns, ln) if reloadFunc != nil { relSlice := c.ReloadFuncs["listener|"+lnConfig.Type] relSlice = append(relSlice, reloadFunc) c.ReloadFuncs["listener|"+lnConfig.Type] = relSlice } } infoKeys = append(infoKeys, "version") info["version"] = version.GetVersion().String() // Server configuration output padding := 24 sort.Strings(infoKeys) c.Ui.Output("==> Vault server configuration:\n") for _, k := range infoKeys { c.Ui.Output(fmt.Sprintf( "%s%s: %s", strings.Repeat(" ", padding-len(k)), strings.Title(k), info[k])) } c.Ui.Output("") if verifyOnly { for _, listener := range lns { listener.Close() } return 0 } // Initialize the HTTP server server := &http.Server{} server.Handler = vaulthttp.Handler(core) for _, ln := range lns { go server.Serve(ln) } if newCoreError != nil { c.Ui.Output("==> Warning:\n\nNon-fatal error during initialization; check the logs for more information.") c.Ui.Output("") } // Output the header that the server has started c.Ui.Output("==> Vault server started! Log data will stream in below:\n") // Release the log gate. logGate.Flush() // Wait for shutdown shutdownTriggered := false for !shutdownTriggered { select { case <-c.ShutdownCh: c.Ui.Output("==> Vault shutdown triggered") if err := core.Shutdown(); err != nil { c.Ui.Error(fmt.Sprintf("Error with core shutdown: %s", err)) } shutdownTriggered = true case <-c.SighupCh: c.Ui.Output("==> Vault reload triggered") if err := c.Reload(configPath); err != nil { c.Ui.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) } } } return 0 }
func (c *ServerCommand) Run(args []string) int { var dev, verifyOnly, devHA bool var configPath []string var logLevel, devRootTokenID, devListenAddress string flags := c.Meta.FlagSet("server", meta.FlagSetDefault) flags.BoolVar(&dev, "dev", false, "") flags.StringVar(&devRootTokenID, "dev-root-token-id", "", "") flags.StringVar(&devListenAddress, "dev-listen-address", "", "") flags.StringVar(&logLevel, "log-level", "info", "") flags.BoolVar(&verifyOnly, "verify-only", false, "") flags.BoolVar(&devHA, "dev-ha", false, "") flags.Usage = func() { c.Ui.Output(c.Help()) } flags.Var((*sliceflag.StringFlag)(&configPath), "config", "config") if err := flags.Parse(args); err != nil { return 1 } // Create a logger. We wrap it in a gated writer so that it doesn't // start logging too early. logGate := &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)} var level int switch logLevel { case "trace": level = log.LevelTrace case "debug": level = log.LevelDebug case "info": level = log.LevelInfo case "notice": level = log.LevelNotice case "warn": level = log.LevelWarn case "err": level = log.LevelError default: c.Ui.Output(fmt.Sprintf("Unknown log level %s", logLevel)) return 1 } logFormat := os.Getenv("VAULT_LOG_FORMAT") if logFormat == "" { logFormat = os.Getenv("LOGXI_FORMAT") } switch strings.ToLower(logFormat) { case "vault", "vault_json", "vault-json", "vaultjson", "json", "": c.logger = logformat.NewVaultLoggerWithWriter(logGate, level) default: c.logger = log.NewLogger(logGate, "vault") c.logger.SetLevel(level) } grpclog.SetLogger(&grpclogFaker{ logger: c.logger, }) if os.Getenv("VAULT_DEV_ROOT_TOKEN_ID") != "" && devRootTokenID == "" { devRootTokenID = os.Getenv("VAULT_DEV_ROOT_TOKEN_ID") } if os.Getenv("VAULT_DEV_LISTEN_ADDRESS") != "" && devListenAddress == "" { devListenAddress = os.Getenv("VAULT_DEV_LISTEN_ADDRESS") } if devHA { dev = true } // Validation if !dev { switch { case len(configPath) == 0: c.Ui.Output("At least one config path must be specified with -config") flags.Usage() return 1 case devRootTokenID != "": c.Ui.Output("Root token ID can only be specified with -dev") flags.Usage() return 1 } } // Load the configuration var config *server.Config if dev { config = server.DevConfig(devHA) if devListenAddress != "" { config.Listeners[0].Config["address"] = devListenAddress } } for _, path := range configPath { current, err := server.LoadConfig(path, c.logger) if err != nil { c.Ui.Output(fmt.Sprintf( "Error loading configuration from %s: %s", path, err)) return 1 } if config == nil { config = current } else { config = config.Merge(current) } } // Ensure at least one config was found. if config == nil { c.Ui.Output("No configuration files found.") return 1 } // Ensure that a backend is provided if config.Backend == nil { c.Ui.Output("A physical backend must be specified") return 1 } // If mlockall(2) isn't supported, show a warning. We disable this // in dev because it is quite scary to see when first using Vault. if !dev && !mlock.Supported() { c.Ui.Output("==> WARNING: mlock not supported on this system!\n") c.Ui.Output(" An `mlockall(2)`-like syscall to prevent memory from being") c.Ui.Output(" swapped to disk is not supported on this system. Running") c.Ui.Output(" Vault on an mlockall(2) enabled system is much more secure.\n") } if err := c.setupTelemetry(config); err != nil { c.Ui.Output(fmt.Sprintf("Error initializing telemetry: %s", err)) return 1 } // Initialize the backend backend, err := physical.NewBackend( config.Backend.Type, c.logger, config.Backend.Config) if err != nil { c.Ui.Output(fmt.Sprintf( "Error initializing backend of type %s: %s", config.Backend.Type, err)) return 1 } infoKeys := make([]string, 0, 10) info := make(map[string]string) var seal vault.Seal = &vault.DefaultSeal{} // Ensure that the seal finalizer is called, even if using verify-only defer func() { if seal != nil { err = seal.Finalize() if err != nil { c.Ui.Error(fmt.Sprintf("Error finalizing seals: %v", err)) } } }() if seal == nil { c.Ui.Error(fmt.Sprintf("Could not create seal")) return 1 } coreConfig := &vault.CoreConfig{ Physical: backend, RedirectAddr: config.Backend.RedirectAddr, HAPhysical: nil, Seal: seal, AuditBackends: c.AuditBackends, CredentialBackends: c.CredentialBackends, LogicalBackends: c.LogicalBackends, Logger: c.logger, DisableCache: config.DisableCache, DisableMlock: config.DisableMlock, MaxLeaseTTL: config.MaxLeaseTTL, DefaultLeaseTTL: config.DefaultLeaseTTL, ClusterName: config.ClusterName, CacheSize: config.CacheSize, } var disableClustering bool // Initialize the separate HA physical backend, if it exists var ok bool if config.HABackend != nil { habackend, err := physical.NewBackend( config.HABackend.Type, c.logger, config.HABackend.Config) if err != nil { c.Ui.Output(fmt.Sprintf( "Error initializing backend of type %s: %s", config.HABackend.Type, err)) return 1 } if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok { c.Ui.Output("Specified HA backend does not support HA") return 1 } if !coreConfig.HAPhysical.HAEnabled() { c.Ui.Output("Specified HA backend has HA support disabled; please consult documentation") return 1 } coreConfig.RedirectAddr = config.HABackend.RedirectAddr disableClustering = config.HABackend.DisableClustering if !disableClustering { coreConfig.ClusterAddr = config.HABackend.ClusterAddr } } else { if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok { coreConfig.RedirectAddr = config.Backend.RedirectAddr disableClustering = config.Backend.DisableClustering if !disableClustering { coreConfig.ClusterAddr = config.Backend.ClusterAddr } } } if envRA := os.Getenv("VAULT_REDIRECT_ADDR"); envRA != "" { coreConfig.RedirectAddr = envRA } else if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" { coreConfig.RedirectAddr = envAA } // Attempt to detect the redirect address, if possible var detect physical.RedirectDetect if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() { detect, ok = coreConfig.HAPhysical.(physical.RedirectDetect) } else { detect, ok = coreConfig.Physical.(physical.RedirectDetect) } if ok && coreConfig.RedirectAddr == "" { redirect, err := c.detectRedirect(detect, config) if err != nil { c.Ui.Output(fmt.Sprintf("Error detecting redirect address: %s", err)) } else if redirect == "" { c.Ui.Output("Failed to detect redirect address.") } else { coreConfig.RedirectAddr = redirect } } // After the redirect bits are sorted out, if no cluster address was // explicitly given, derive one from the redirect addr if disableClustering { coreConfig.ClusterAddr = "" } else if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" { coreConfig.ClusterAddr = envCA } else if coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "" { u, err := url.ParseRequestURI(coreConfig.RedirectAddr) if err != nil { c.Ui.Output(fmt.Sprintf("Error parsing redirect address %s: %v", coreConfig.RedirectAddr, err)) return 1 } host, port, err := net.SplitHostPort(u.Host) nPort, nPortErr := strconv.Atoi(port) if err != nil { // assume it's due to there not being a port specified, in which case // use 443 host = u.Host nPort = 443 } if nPortErr != nil { c.Ui.Output(fmt.Sprintf("Cannot parse %s as a numeric port: %v", port, nPortErr)) return 1 } u.Host = net.JoinHostPort(host, strconv.Itoa(nPort+1)) // Will always be TLS-secured u.Scheme = "https" coreConfig.ClusterAddr = u.String() } if coreConfig.ClusterAddr != "" { // Force https as we'll always be TLS-secured u, err := url.ParseRequestURI(coreConfig.ClusterAddr) if err != nil { c.Ui.Output(fmt.Sprintf("Error parsing cluster address %s: %v", coreConfig.RedirectAddr, err)) return 1 } u.Scheme = "https" coreConfig.ClusterAddr = u.String() } // Initialize the core core, newCoreError := vault.NewCore(coreConfig) if newCoreError != nil { if !errwrap.ContainsType(newCoreError, new(vault.NonFatalError)) { c.Ui.Output(fmt.Sprintf("Error initializing core: %s", newCoreError)) return 1 } } // Copy the reload funcs pointers back c.reloadFuncs = coreConfig.ReloadFuncs c.reloadFuncsLock = coreConfig.ReloadFuncsLock // Compile server information for output later info["backend"] = config.Backend.Type info["log level"] = logLevel info["mlock"] = fmt.Sprintf( "supported: %v, enabled: %v", mlock.Supported(), !config.DisableMlock && mlock.Supported()) infoKeys = append(infoKeys, "log level", "mlock", "backend") if config.HABackend != nil { info["HA backend"] = config.HABackend.Type info["redirect address"] = coreConfig.RedirectAddr infoKeys = append(infoKeys, "HA backend", "redirect address") if coreConfig.ClusterAddr != "" { info["cluster address"] = coreConfig.ClusterAddr infoKeys = append(infoKeys, "cluster address") } } else { // If the backend supports HA, then note it if coreConfig.HAPhysical != nil { if coreConfig.HAPhysical.HAEnabled() { info["backend"] += " (HA available)" info["redirect address"] = coreConfig.RedirectAddr infoKeys = append(infoKeys, "redirect address") if coreConfig.ClusterAddr != "" { info["cluster address"] = coreConfig.ClusterAddr infoKeys = append(infoKeys, "cluster address") } } else { info["backend"] += " (HA disabled)" } } } clusterAddrs := []*net.TCPAddr{} // Initialize the listeners c.reloadFuncsLock.Lock() lns := make([]net.Listener, 0, len(config.Listeners)) for i, lnConfig := range config.Listeners { if lnConfig.Type == "atlas" { if config.ClusterName == "" { c.Ui.Output("cluster_name is not set in the config and is a required value") return 1 } lnConfig.Config["cluster_name"] = config.ClusterName } ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, logGate) if err != nil { c.Ui.Output(fmt.Sprintf( "Error initializing listener of type %s: %s", lnConfig.Type, err)) return 1 } lns = append(lns, ln) if reloadFunc != nil { relSlice := (*c.reloadFuncs)["listener|"+lnConfig.Type] relSlice = append(relSlice, reloadFunc) (*c.reloadFuncs)["listener|"+lnConfig.Type] = relSlice } if !disableClustering && lnConfig.Type == "tcp" { var addr string var ok bool if addr, ok = lnConfig.Config["cluster_address"]; ok { tcpAddr, err := net.ResolveTCPAddr("tcp", addr) if err != nil { c.Ui.Output(fmt.Sprintf( "Error resolving cluster_address: %s", err)) return 1 } clusterAddrs = append(clusterAddrs, tcpAddr) } else { tcpAddr, ok := ln.Addr().(*net.TCPAddr) if !ok { c.Ui.Output("Failed to parse tcp listener") return 1 } clusterAddrs = append(clusterAddrs, &net.TCPAddr{ IP: tcpAddr.IP, Port: tcpAddr.Port + 1, }) } props["cluster address"] = addr } // Store the listener props for output later key := fmt.Sprintf("listener %d", i+1) propsList := make([]string, 0, len(props)) for k, v := range props { propsList = append(propsList, fmt.Sprintf( "%s: %q", k, v)) } sort.Strings(propsList) infoKeys = append(infoKeys, key) info[key] = fmt.Sprintf( "%s (%s)", lnConfig.Type, strings.Join(propsList, ", ")) } c.reloadFuncsLock.Unlock() if !disableClustering { if c.logger.IsTrace() { c.logger.Trace("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs) } } // Make sure we close all listeners from this point on listenerCloseFunc := func() { for _, ln := range lns { ln.Close() } } defer c.cleanupGuard.Do(listenerCloseFunc) infoKeys = append(infoKeys, "version") verInfo := version.GetVersion() info["version"] = verInfo.FullVersionNumber(false) if verInfo.Revision != "" { info["version sha"] = strings.Trim(verInfo.Revision, "'") infoKeys = append(infoKeys, "version sha") } infoKeys = append(infoKeys, "cgo") info["cgo"] = "disabled" if version.CgoEnabled { info["cgo"] = "enabled" } // Server configuration output padding := 24 sort.Strings(infoKeys) c.Ui.Output("==> Vault server configuration:\n") for _, k := range infoKeys { c.Ui.Output(fmt.Sprintf( "%s%s: %s", strings.Repeat(" ", padding-len(k)), strings.Title(k), info[k])) } c.Ui.Output("") if verifyOnly { return 0 } // Perform service discovery registrations and initialization of // HTTP server after the verifyOnly check. // Instantiate the wait group c.WaitGroup = &sync.WaitGroup{} // If the backend supports service discovery, run service discovery if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() { sd, ok := coreConfig.HAPhysical.(physical.ServiceDiscovery) if ok { activeFunc := func() bool { if isLeader, _, err := core.Leader(); err == nil { return isLeader } return false } sealedFunc := func() bool { if sealed, err := core.Sealed(); err == nil { return sealed } return true } if err := sd.RunServiceDiscovery(c.WaitGroup, c.ShutdownCh, coreConfig.RedirectAddr, activeFunc, sealedFunc); err != nil { c.Ui.Output(fmt.Sprintf("Error initializing service discovery: %v", err)) return 1 } } } handler := vaulthttp.Handler(core) // This needs to happen before we first unseal, so before we trigger dev // mode if it's set core.SetClusterListenerAddrs(clusterAddrs) core.SetClusterSetupFuncs(vault.WrapHandlerForClustering(handler, c.logger)) // If we're in dev mode, then initialize the core if dev { init, err := c.enableDev(core, devRootTokenID) if err != nil { c.Ui.Output(fmt.Sprintf( "Error initializing dev mode: %s", err)) return 1 } export := "export" quote := "'" if runtime.GOOS == "windows" { export = "set" quote = "" } c.Ui.Output(fmt.Sprintf( "==> WARNING: Dev mode is enabled!\n\n"+ "In this mode, Vault is completely in-memory and unsealed.\n"+ "Vault is configured to only have a single unseal key. The root\n"+ "token has already been authenticated with the CLI, so you can\n"+ "immediately begin using the Vault CLI.\n\n"+ "The only step you need to take is to set the following\n"+ "environment variables:\n\n"+ " "+export+" VAULT_ADDR="+quote+"http://"+config.Listeners[0].Config["address"]+quote+"\n\n"+ "The unseal key and root token are reproduced below in case you\n"+ "want to seal/unseal the Vault or play with authentication.\n\n"+ "Unseal Key: %s\nRoot Token: %s\n", base64.StdEncoding.EncodeToString(init.SecretShares[0]), init.RootToken, )) } // Initialize the HTTP server server := &http.Server{} server.Handler = handler for _, ln := range lns { go server.Serve(ln) } if newCoreError != nil { c.Ui.Output("==> Warning:\n\nNon-fatal error during initialization; check the logs for more information.") c.Ui.Output("") } // Output the header that the server has started c.Ui.Output("==> Vault server started! Log data will stream in below:\n") // Release the log gate. logGate.Flush() // Wait for shutdown shutdownTriggered := false for !shutdownTriggered { select { case <-c.ShutdownCh: c.Ui.Output("==> Vault shutdown triggered") // Stop the listners so that we don't process further client requests. c.cleanupGuard.Do(listenerCloseFunc) // Shutdown will wait until after Vault is sealed, which means the // request forwarding listeners will also be closed (and also // waited for). if err := core.Shutdown(); err != nil { c.Ui.Output(fmt.Sprintf("Error with core shutdown: %s", err)) } shutdownTriggered = true case <-c.SighupCh: c.Ui.Output("==> Vault reload triggered") if err := c.Reload(configPath); err != nil { c.Ui.Output(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) } } } // Wait for dependent goroutines to complete c.WaitGroup.Wait() return 0 }
func handleSysUnseal(core *vault.Core) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.Method { case "PUT": case "POST": default: respondError(w, http.StatusMethodNotAllowed, nil) return } // Parse the request var req UnsealRequest if err := parseRequest(r, w, &req); err != nil { respondError(w, http.StatusBadRequest, err) return } if !req.Reset && req.Key == "" { respondError( w, http.StatusBadRequest, errors.New("'key' must specified in request body as JSON, or 'reset' set to true")) return } if req.Reset { sealed, err := core.Sealed() if err != nil { respondError(w, http.StatusInternalServerError, err) return } if !sealed { respondError(w, http.StatusBadRequest, errors.New("vault is unsealed")) return } core.ResetUnsealProcess() } else { // Decode the key, which is base64 or hex encoded min, max := core.BarrierKeyLength() key, err := hex.DecodeString(req.Key) // We check min and max here to ensure that a string that is base64 // encoded but also valid hex will not be valid and we instead base64 // decode it if err != nil || len(key) < min || len(key) > max { key, err = base64.StdEncoding.DecodeString(req.Key) if err != nil { respondError( w, http.StatusBadRequest, errors.New("'key' must be a valid hex or base64 string")) return } } // Attempt the unseal if _, err := core.Unseal(key); err != nil { switch { case errwrap.ContainsType(err, new(vault.ErrInvalidKey)): case errwrap.Contains(err, vault.ErrBarrierInvalidKey.Error()): case errwrap.Contains(err, vault.ErrBarrierNotInit.Error()): case errwrap.Contains(err, vault.ErrBarrierSealed.Error()): case errwrap.Contains(err, vault.ErrStandby.Error()): default: respondError(w, http.StatusInternalServerError, err) return } respondError(w, http.StatusBadRequest, err) return } } // Return the seal status handleSysSealStatusRaw(core, w, r) }) }
// Context returns a Terraform Context taking into account the context // options used to initialize this meta configuration. func (m *Meta) Context(copts contextOpts) (*terraform.Context, bool, error) { opts := m.contextOpts() // First try to just read the plan directly from the path given. f, err := os.Open(copts.Path) if err == nil { plan, err := terraform.ReadPlan(f) f.Close() if err == nil { // Setup our state, force it to use our plan's state stateOpts := m.StateOpts() if plan != nil { stateOpts.ForceState = plan.State } // Get the state result, err := State(stateOpts) if err != nil { return nil, false, fmt.Errorf("Error loading plan: %s", err) } // Set our state m.state = result.State // this is used for printing the saved location later if m.stateOutPath == "" { m.stateOutPath = result.StatePath } if len(m.variables) > 0 { return nil, false, fmt.Errorf( "You can't set variables with the '-var' or '-var-file' flag\n" + "when you're applying a plan file. The variables used when\n" + "the plan was created will be used. If you wish to use different\n" + "variable values, create a new plan file.") } ctx, err := plan.Context(opts) return ctx, true, err } } // Load the statePath if not given if copts.StatePath != "" { m.statePath = copts.StatePath } // Tell the context if we're in a destroy plan / apply opts.Destroy = copts.Destroy // Store the loaded state state, err := m.State() if err != nil { return nil, false, err } // Load the root module var mod *module.Tree if copts.Path != "" { mod, err = module.NewTreeModule("", copts.Path) // Check for the error where we have no config files but // allow that. If that happens, clear the error. if errwrap.ContainsType(err, new(config.ErrNoConfigsFound)) && copts.PathEmptyOk { log.Printf( "[WARN] Empty configuration dir, ignoring: %s", copts.Path) err = nil mod = module.NewEmptyTree() } if err != nil { return nil, false, fmt.Errorf("Error loading config: %s", err) } } else { mod = module.NewEmptyTree() } err = mod.Load(m.moduleStorage(m.DataDir()), copts.GetMode) if err != nil { return nil, false, fmt.Errorf("Error downloading modules: %s", err) } // Validate the module right away if err := mod.Validate(); err != nil { return nil, false, err } opts.Module = mod opts.Parallelism = copts.Parallelism opts.State = state.State() ctx, err := terraform.NewContext(opts) return ctx, false, err }