// InitializeCertAuthConfig sets up the command line options for creating a CA func InitializeCertAuthConfig(logger log.Logger) error { viper.SetDefault("Bits", "4096") viper.SetDefault("Years", "10") viper.SetDefault("Organization", "kappa-ca") viper.SetDefault("Country", "USA") if initCmd.PersistentFlags().Lookup("bits").Changed { logger.Info("", "Bits", KeyBits) viper.Set("Bits", KeyBits) } if initCmd.PersistentFlags().Lookup("years").Changed { logger.Info("", "Years", Years) viper.Set("Years", Years) } if initCmd.PersistentFlags().Lookup("organization").Changed { logger.Info("", "Organization", Organization) viper.Set("Organization", Organization) } if initCmd.PersistentFlags().Lookup("country").Changed { logger.Info("", "Country", Country) viper.Set("Country", Country) } if initCmd.PersistentFlags().Lookup("hosts").Changed { logger.Info("", "Hosts", Hosts) viper.Set("Hosts", Hosts) } return nil }
// CreateCertificateAuthority generates a new CA func CreateCertificateAuthority(logger log.Logger, key *rsa.PrivateKey, years int, org, country, hostList string) ([]byte, error) { // Generate subject key id logger.Info("Generating SubjectKeyID") subjectKeyID, err := GenerateSubjectKeyID(key) if err != nil { return nil, err } // Create serial number logger.Info("Generating Serial Number") serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { return nil, fmt.Errorf("failed to generate serial number: %s", err.Error()) } // Create template logger.Info("Creating Certificate template") template := &x509.Certificate{ IsCA: true, BasicConstraintsValid: true, SubjectKeyId: subjectKeyID, SerialNumber: serialNumber, Subject: pkix.Name{ Country: []string{country}, Organization: []string{org}, }, PublicKeyAlgorithm: x509.RSA, SignatureAlgorithm: x509.SHA512WithRSA, NotBefore: time.Now().Add(-600).UTC(), NotAfter: time.Now().AddDate(years, 0, 0).UTC(), // see http://golang.org/pkg/crypto/x509/#KeyUsage ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, } // Associate hosts logger.Info("Adding Hosts to Certificate") hosts := strings.Split(hostList, ",") for _, h := range hosts { if ip := net.ParseIP(h); ip != nil { template.IPAddresses = append(template.IPAddresses, ip) } else { template.DNSNames = append(template.DNSNames, h) } } // Create cert logger.Info("Generating Certificate") cert, err := x509.CreateCertificate(rand.Reader, template, template, &key.PublicKey, key) if err != nil { return nil, err } return cert, nil }
// SavePrivateKey saves a PrivateKey in the PEM format. func SavePrivateKey(logger log.Logger, key *rsa.PrivateKey, filename string) { logger.Info("Saving Private Key") pemfile, _ := os.Create(filename) pemkey := &pem.Block{ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)} pem.Encode(pemfile, pemkey) pemfile.Close() }
// SaveCertificateRequest saves a certificate in the PEM format. func SaveCertificateRequest(logger log.Logger, cert []byte, filename string) { logger.Info("Saving Certificate Request") pemfile, _ := os.Create(filename) pemkey := &pem.Block{ Type: "CERTIFICATE REQUEST", Bytes: cert} pem.Encode(pemfile, pemkey) pemfile.Close() }
// newGCSBackend constructs a Google Cloud Storage backend using a pre-existing // bucket. Credentials can be provided to the backend, sourced // from environment variables or a service account file func newGCSBackend(conf map[string]string, logger log.Logger) (Backend, error) { bucketName := os.Getenv("GOOGLE_STORAGE_BUCKET") if bucketName == "" { bucketName = conf["bucket"] if bucketName == "" { return nil, fmt.Errorf("env var GOOGLE_STORAGE_BUCKET or configuration parameter 'bucket' must be set") } } // path to service account JSON file credentialsFile := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") if credentialsFile == "" { credentialsFile = conf["credentials_file"] if credentialsFile == "" { return nil, fmt.Errorf("env var GOOGLE_APPLICATION_CREDENTIALS or configuration parameter 'credentials_file' must be set") } } client, err := storage.NewClient( context.Background(), option.WithServiceAccountFile(credentialsFile), ) if err != nil { return nil, fmt.Errorf("error establishing storage client: '%v'", err) } // check client connectivity by getting bucket attributes _, err = client.Bucket(bucketName).Attrs(context.Background()) if err != nil { return nil, fmt.Errorf("unable to access bucket '%s': '%v'", bucketName, err) } maxParStr, ok := conf["max_parallel"] var maxParInt int if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { logger.Debug("physical/gcs: max_parallel set", "max_parallel", maxParInt) } } g := GCSBackend{ bucketName: bucketName, client: client, permitPool: NewPermitPool(maxParInt), logger: logger, } return &g, nil }
// SavePublicKey saves a public key in the PEM format. func SavePublicKey(logger log.Logger, key *rsa.PrivateKey, filename string) { logger.Info("Saving Public Key") pemfile, _ := os.Create(filename) bytes, _ := x509.MarshalPKIXPublicKey(key.PublicKey) pemkey := &pem.Block{ Type: "RSA PUBLIC KEY", Bytes: bytes} pem.Encode(pemfile, pemkey) pemfile.Close() }
// newAzureBackend constructs an Azure backend using a pre-existing // bucket. Credentials can be provided to the backend, sourced // from the environment, AWS credential files or by IAM role. func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error) { container := os.Getenv("AZURE_BLOB_CONTAINER") if container == "" { container = conf["container"] if container == "" { return nil, fmt.Errorf("'container' must be set") } } accountName := os.Getenv("AZURE_ACCOUNT_NAME") if accountName == "" { accountName = conf["accountName"] if accountName == "" { return nil, fmt.Errorf("'accountName' must be set") } } accountKey := os.Getenv("AZURE_ACCOUNT_KEY") if accountKey == "" { accountKey = conf["accountKey"] if accountKey == "" { return nil, fmt.Errorf("'accountKey' must be set") } } client, err := storage.NewBasicClient(accountName, accountKey) if err != nil { return nil, fmt.Errorf("Failed to create Azure client: %v", err) } client.GetBlobService().CreateContainerIfNotExists(container, storage.ContainerAccessTypePrivate) maxParStr, ok := conf["max_parallel"] var maxParInt int if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { logger.Debug("azure: max_parallel set", "max_parallel", maxParInt) } } a := &AzureBackend{ container: container, client: client.GetBlobService(), logger: logger, permitPool: NewPermitPool(maxParInt), } return a, nil }
// NewCache returns a physical cache of the given size. // If no size is provided, the default size is used. func NewCache(b Backend, size int, logger log.Logger) *Cache { if size <= 0 { size = DefaultCacheSize } if logger.IsTrace() { logger.Trace("physical/cache: creating LRU cache", "size", size) } cache, _ := lru.New2Q(size) c := &Cache{ backend: b, lru: cache, } return c }
// CreatePkiDirectories creates the directory structures for storing the public and private keys. func CreatePkiDirectories(logger log.Logger, root string) error { pki := path.Join(root, "pki") // Create pki directory if err := os.MkdirAll(pki, os.ModeDir|0755); err != nil { logger.Warn("Could not create pki/ directory", "err", err.Error()) return err } // Create public directory if err := os.MkdirAll(path.Join(pki, "public"), os.ModeDir|0755); err != nil { logger.Warn("Could not create pki/public/ directory", "err", err.Error()) return err } // Create private directory if err := os.MkdirAll(path.Join(pki, "private"), os.ModeDir|0755); err != nil { logger.Warn("Could not create pki/private/ directory", "err", err.Error()) return err } // Create reqs directory if err := os.MkdirAll(path.Join(pki, "reqs"), os.ModeDir|0755); err != nil { logger.Warn("Could not create pki/reqs/ directory", "err", err.Error()) return err } return nil }
func handleChannelRequests(logger log.Logger, channel ssh.Channel, requests <-chan *ssh.Request, system datamodel.System, user datamodel.User) { defer channel.Close() for req := range requests { if req.Type == "skl" { logger.Info("SKL request", "request", string(req.Payload)) req.Reply(true, nil) } else { if req.WantReply { req.Reply(false, nil) } } } }
// InitializeNewCertConfig sets up the command line options for creating a new certificate func InitializeNewCertConfig(logger log.Logger) error { viper.SetDefault("Name", "localhost") viper.SetDefault("ForceOverwrite", "false") if newCertCmd.PersistentFlags().Lookup("name").Changed { logger.Info("", "Name", Name) viper.Set("Name", Name) } if newCertCmd.PersistentFlags().Lookup("overwrite").Changed { logger.Info("", "ForceOverwrite", ForceOverwrite) viper.Set("ForceOverwrite", ForceOverwrite) } return nil }
// ReadPrivateKey reads a private key file func ReadPrivateKey(logger log.Logger, keyFile string) (privateKey ssh.Signer, err error) { // Read SSH Key keyBytes, err := ioutil.ReadFile(keyFile) if err != nil { logger.Warn("Private key could not be read", "error", string(err.Error())) return } // Get private key privateKey, err = ssh.ParsePrivateKey(keyBytes) if err != nil { logger.Warn("Private key could not be parsed", "error", err.Error()) } return }
// WrapHandlerForClustering takes in Vault's HTTP handler and returns a setup // function that returns both the original handler and one wrapped with cluster // methods func WrapHandlerForClustering(handler http.Handler, logger log.Logger) func() (http.Handler, http.Handler) { return func() (http.Handler, http.Handler) { // This mux handles cluster functions (right now, only forwarded requests) mux := http.NewServeMux() mux.HandleFunc("/cluster/local/forwarded-request", func(w http.ResponseWriter, req *http.Request) { freq, err := forwarding.ParseForwardedHTTPRequest(req) if err != nil { if logger != nil { logger.Error("http/forwarded-request-server: error parsing forwarded request", "error", err) } w.Header().Add("Content-Type", "application/json") // The response writer here is different from // the one set in Vault's HTTP handler. // Hence, set the Cache-Control explicitly. w.Header().Set("Cache-Control", "no-store") w.WriteHeader(http.StatusInternalServerError) type errorResponse struct { Errors []string } resp := &errorResponse{ Errors: []string{ err.Error(), }, } enc := json.NewEncoder(w) enc.Encode(resp) return } // To avoid the risk of a forward loop in some pathological condition, // set the no-forward header freq.Header.Set(IntNoForwardingHeaderName, "true") handler.ServeHTTP(w, freq) }) return handler, mux } }
func acceptStreams(logger log.Logger, session *yamux.Session, streamCh chan net.Conn) grim.TaskFunc { return func(ctx context.Context) { defer close(streamCh) for { select { case <-ctx.Done(): return default: stream, err := session.Accept() if err != nil { if err != io.EOF { logger.Error("multiplex conn accept failed", "err", err) } return } streamCh <- stream } } } }
// CreateCertificateRequest generates a new certificate request func CreateCertificateRequest(logger log.Logger, key *rsa.PrivateKey, name, org, country, hostList string) (*x509.CertificateRequest, []byte, error) { // Create template logger.Info("Creating Certificate template") template := &x509.CertificateRequest{ Subject: pkix.Name{ Country: []string{country}, Organization: []string{org}, OrganizationalUnit: []string{name}, }, } // Associate hosts logger.Info("Adding Hosts to Certificate") hosts := strings.Split(hostList, ",") for _, h := range hosts { if ip := net.ParseIP(h); ip != nil { template.IPAddresses = append(template.IPAddresses, ip) } else { template.DNSNames = append(template.DNSNames, h) } } // Create cert logger.Info("Generating Certificate") cert, err := x509.CreateCertificateRequest(rand.Reader, template, key) if err != nil { return nil, nil, err } return template, cert, nil }
func (s *SSHServer) Run(logger log.Logger, closer chan<- bool) { logger.Info("Starting SSH server", "addr", viper.GetString("SSHListen")) s.done = make(chan bool) // Start server go func(l log.Logger, sock *net.TCPListener, config *ssh.ServerConfig, c <-chan bool, complete chan<- bool) { defer sock.Close() for { // Accepts will only block for 1s sock.SetDeadline(time.Now().Add(time.Second)) select { // Stop server on channel recieve case <-c: l.Info("Stopping SSH server") complete <- true return default: // Accept new connection tcpConn, err := sock.Accept() if err != nil { if neterr, ok := err.(net.Error); ok && neterr.Timeout() { // l.Debug("Connection timeout...") } else { l.Warn("Connection failed", "error", err) } continue } // Handle connection l.Debug("Successful SSH connection") go handleTCPConnection(l, tcpConn, config, s.system) } } }(logger, s.listener, s.sshConfig, s.done, closer) }
func handleTCPConnection(logger log.Logger, conn net.Conn, sshConfig *ssh.ServerConfig, system datamodel.System) { // Open SSH connection sshConn, channels, requests, err := ssh.NewServerConn(conn, sshConfig) if err != nil { logger.Warn("SSH handshake failed") return } // Get user if exists, otherwise return error users, _ := system.Users() user, _ := users.Get(sshConn.Permissions.Extensions["username"]) logger.Debug("Handshake successful") defer sshConn.Conn.Close() // Discard requests go ssh.DiscardRequests(requests) for ch := range channels { t := ch.ChannelType() if t != "session" && t != "kappa-client" { logger.Info("UnknownChannelType", "type", t) ch.Reject(ssh.UnknownChannelType, t) break } // Accept channel channel, requests, err := ch.Accept() if err != nil { logger.Warn("Error creating channel") continue } if t == "session" { go handleSessionRequests(logger, channel, requests, system, user) } else if t == "kappa-client" { go handleChannelRequests(logger, channel, requests, system, user) } } }
func reject(chType string, uri *url.URL, ch ssh.NewChannel, logger log.Logger) bool { if uri.Scheme != "" { logger.Warn("URI schemes not supported", "type", chType) ch.Reject(SchemeNotSupported, "schemes are not supported in the channel URI") return true } else if uri.User != nil { logger.Warn("URI users not supported", "type", chType) ch.Reject(UserNotSupported, "users are not supported in the channel URI") return true } else if uri.Host != "" { logger.Warn("URI hosts not supported", "type", chType) ch.Reject(HostNotSupported, "hosts are not supported in the channel URI") return true } return false }
// InitializeMainConfig sets up the config options for the kappa command func InitializeMainConfig(logger log.Logger) error { viper.SetConfigFile("config") viper.AddConfigPath(ConfigPath) // Read configuration file logger.Info("Reading configuration file") err := viper.ReadInConfig() if err != nil { logger.Warn("Unable to locate configuration file.") } if kappaCmd.PersistentFlags().Lookup("config").Changed { logger.Info("", "ConfigPath", ConfigPath) viper.Set("ConfigPath", ConfigPath) } return nil }
// newSwiftBackend constructs a Swift backend using a pre-existing // container. Credentials can be provided to the backend, sourced // from the environment. func newSwiftBackend(conf map[string]string, logger log.Logger) (Backend, error) { username := os.Getenv("OS_USERNAME") if username == "" { username = conf["username"] if username == "" { return nil, fmt.Errorf("missing username") } } password := os.Getenv("OS_PASSWORD") if password == "" { password = conf["password"] if password == "" { return nil, fmt.Errorf("missing password") } } authUrl := os.Getenv("OS_AUTH_URL") if authUrl == "" { authUrl = conf["auth_url"] if authUrl == "" { return nil, fmt.Errorf("missing auth_url") } } container := os.Getenv("OS_CONTAINER") if container == "" { container = conf["container"] if container == "" { return nil, fmt.Errorf("missing container") } } tenant := os.Getenv("OS_TENANT_NAME") if tenant == "" { tenant = conf["tenant"] } c := swift.Connection{ UserName: username, ApiKey: password, AuthUrl: authUrl, Tenant: tenant, Transport: cleanhttp.DefaultPooledTransport(), } err := c.Authenticate() if err != nil { return nil, err } _, _, err = c.Container(container) if err != nil { return nil, fmt.Errorf("Unable to access container '%s': %v", container, err) } maxParStr, ok := conf["max_parallel"] var maxParInt int if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { logger.Debug("swift: max_parallel set", "max_parallel", maxParInt) } } s := &SwiftBackend{ client: &c, container: container, logger: logger, permitPool: NewPermitPool(maxParInt), } return s, nil }
// InitializeServerConfig sets up the config options for the database servers. func InitializeServerConfig(logger log.Logger) error { // Load default settings logger.Info("Loading default server settings") // CACert sets the certificate authority viper.SetDefault("CACert", "ca.crt") viper.BindEnv("CACert", "KAPPA_CA_CERT") // AdminCert sets the admin certificate viper.SetDefault("AdminCert", "admin.crt") viper.BindEnv("AdminCert", "KAPPA_ADMIN_CERT") // SSHKey sets the private key for the SSH server viper.SetDefault("SSHKey", "ssh-identity.key") viper.BindEnv("SSHKey", "KAPPA_SSH_KEY") // TLSCert sets the certificate for HTTPS viper.SetDefault("TLSCert", "tls-identity.crt") viper.BindEnv("TLSCert", "KAPPA_TLS_CERT") // TLSKey sets the private key for HTTPS viper.SetDefault("TLSKey", "tls-identity.key") viper.BindEnv("TLSKey", "KAPPA_TLS_KEY") // DataPath sets the directory for data storage viper.SetDefault("DataPath", "./data") viper.BindEnv("DataPath", "KAPPA_DATA_PATH") // SSHListen sets the address to listen for SSH traffic viper.SetDefault("SSHListen", ":9022") viper.BindEnv("SSHListen", "KAPPA_SSH_LISTEN") // HTTPListen sets the address to listen for HTTP traffic viper.SetDefault("HTTPListen", ":19022") viper.BindEnv("HTTPListen", "KAPPA_HTTP_LISTEN") // Serf config // ClusterNodes is a list of existing cluster nodes viper.SetDefault("ClusterNodes", "") viper.BindEnv("ClusterNodes", "KAPPA_CLUSTER_NODES") // NodeName sets the server's name viper.SetDefault("NodeName", "kappa-server") viper.BindEnv("NodeName", "KAPPA_NODE_NAME") // ClusterName sets the cluster name of this node. viper.SetDefault("ClusterName", "kappa") viper.BindEnv("ClusterName", "KAPPA_CLUSTER_NAME") // Bootstrap sets whether to bootstrap this node. viper.SetDefault("Bootstrap", false) viper.BindEnv("Bootstrap", "KAPPA_BOOTSTRAP") // BootstrapExpect is an argument used by Serf. viper.SetDefault("BootstrapExpect", 0) // Memberlist config // GossipBindAddr sets the Addr for cluster gossip. viper.SetDefault("GossipBindAddr", "0.0.0.0") viper.BindEnv("GossipBindAddr", "KAPPA_GOSSIP_BIND_ADDR") // GossipBindPort sets the port for cluster gossip. The port is used for both UDP and TCP gossip. viper.SetDefault("GossipBindPort", 7946) viper.BindEnv("GossipBindPort", "KAPPA_GOSSIP_BIND_PORT") // GossipAdvertiseAddr sets what address to advertise to other // cluster members. Used for nat traversal. viper.SetDefault("GossipAdvertiseAddr", "") viper.BindEnv("GossipAdvertiseAddr", "KAPPA_GOSSIP_ADVERTISE_ADDR") // GossipAdvertisePort sets the port for cluster gossip and can // be useful for NAT traversal. viper.SetDefault("GossipAdvertisePort", 7946) viper.BindEnv("GossipAdvertisePort", "KAPPA_GOSSIP_ADVERTISE_PORT") // Set viper flags if serverCmd.PersistentFlags().Lookup("ca-cert").Changed { logger.Info("", "CACert", CACert) viper.Set("CACert", CACert) } if serverCmd.PersistentFlags().Lookup("admin-cert").Changed { logger.Info("", "AdminCert", AdminCert) viper.Set("AdminCert", AdminCert) } if serverCmd.PersistentFlags().Lookup("ssh-key").Changed { logger.Info("", "SSHKey", SSHKey) viper.Set("SSHKey", SSHKey) } if serverCmd.PersistentFlags().Lookup("tls-cert").Changed { logger.Info("", "TLSCert", TLSCert) viper.Set("TLSCert", TLSCert) } if serverCmd.PersistentFlags().Lookup("tls-key").Changed { logger.Info("", "TLSKey", TLSKey) viper.Set("TLSKey", TLSKey) } if serverCmd.PersistentFlags().Lookup("ssh-listen").Changed { logger.Info("", "SSHListen", SSHListen) viper.Set("SSHListen", SSHListen) } if serverCmd.PersistentFlags().Lookup("http-listen").Changed { logger.Info("", "HTTPListen", HTTPListen) viper.Set("HTTPListen", HTTPListen) } if serverCmd.PersistentFlags().Lookup("data").Changed { logger.Info("", "DataPath", DataPath) viper.Set("DataPath", DataPath) } // Serf config if serverCmd.PersistentFlags().Lookup("nodes").Changed { logger.Info("", "ClusterNodes", ClusterNodes) viper.Set("ClusterNodes", ClusterNodes) } if serverCmd.PersistentFlags().Lookup("node-name").Changed { logger.Info("", "NodeName", NodeName) viper.Set("NodeName", NodeName) } if serverCmd.PersistentFlags().Lookup("cluster").Changed { logger.Info("", "ClusterName", ClusterName) viper.Set("ClusterName", ClusterName) } if serverCmd.PersistentFlags().Lookup("bootstrap").Changed { logger.Info("", "Bootstrap", Bootstrap) viper.Set("Bootstrap", Bootstrap) } if serverCmd.PersistentFlags().Lookup("bootstrap-expect").Changed { logger.Info("", "BootstrapExpect", BootstrapExpect) viper.Set("BootstrapExpect", BootstrapExpect) } // Memberlist Config if serverCmd.PersistentFlags().Lookup("gossip-bind-addr").Changed { logger.Info("", "GossipBindAddr", GossipBindAddr) viper.Set("GossipBindAddr", GossipBindAddr) } if serverCmd.PersistentFlags().Lookup("gossip-bind-port").Changed { logger.Info("", "GossipBindPort", GossipBindPort) viper.Set("GossipBindPort", GossipBindPort) } if serverCmd.PersistentFlags().Lookup("gossip-advert-addr").Changed { logger.Info("", "GossipAdvertiseAddr", GossipAdvertiseAddr) viper.Set("GossipAdvertiseAddr", GossipAdvertiseAddr) } if serverCmd.PersistentFlags().Lookup("gossip-advert-port").Changed { logger.Info("", "GossipAdvertisePort", GossipAdvertisePort) viper.Set("GossipAdvertisePort", GossipAdvertisePort) } return nil }
// newConsulBackend constructs a Consul backend using the given API client // and the prefix in the KV store. func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error) { // Get the path in Consul path, ok := conf["path"] if !ok { path = "vault/" } if logger.IsDebug() { logger.Debug("physical/consul: config path set", "path", path) } // Ensure path is suffixed but not prefixed if !strings.HasSuffix(path, "/") { logger.Warn("physical/consul: appending trailing forward slash to path") path += "/" } if strings.HasPrefix(path, "/") { logger.Warn("physical/consul: trimming path of its forward slash") path = strings.TrimPrefix(path, "/") } // Allow admins to disable consul integration disableReg, ok := conf["disable_registration"] var disableRegistration bool if ok && disableReg != "" { b, err := strconv.ParseBool(disableReg) if err != nil { return nil, errwrap.Wrapf("failed parsing disable_registration parameter: {{err}}", err) } disableRegistration = b } if logger.IsDebug() { logger.Debug("physical/consul: config disable_registration set", "disable_registration", disableRegistration) } // Get the service name to advertise in Consul service, ok := conf["service"] if !ok { service = DefaultServiceName } if logger.IsDebug() { logger.Debug("physical/consul: config service set", "service", service) } // Get the additional tags to attach to the registered service name tags := conf["service_tags"] if logger.IsDebug() { logger.Debug("physical/consul: config service_tags set", "service_tags", tags) } checkTimeout := defaultCheckTimeout checkTimeoutStr, ok := conf["check_timeout"] if ok { d, err := time.ParseDuration(checkTimeoutStr) if err != nil { return nil, err } min, _ := lib.DurationMinusBufferDomain(d, checkMinBuffer, checkJitterFactor) if min < checkMinBuffer { return nil, fmt.Errorf("Consul check_timeout must be greater than %v", min) } checkTimeout = d if logger.IsDebug() { logger.Debug("physical/consul: config check_timeout set", "check_timeout", d) } } // Configure the client consulConf := api.DefaultConfig() if addr, ok := conf["address"]; ok { consulConf.Address = addr if logger.IsDebug() { logger.Debug("physical/consul: config address set", "address", addr) } } if scheme, ok := conf["scheme"]; ok { consulConf.Scheme = scheme if logger.IsDebug() { logger.Debug("physical/consul: config scheme set", "scheme", scheme) } } if token, ok := conf["token"]; ok { consulConf.Token = token logger.Debug("physical/consul: config token set") } if consulConf.Scheme == "https" { tlsClientConfig, err := setupTLSConfig(conf) if err != nil { return nil, err } transport := cleanhttp.DefaultPooledTransport() transport.MaxIdleConnsPerHost = 4 transport.TLSClientConfig = tlsClientConfig consulConf.HttpClient.Transport = transport logger.Debug("physical/consul: configured TLS") } client, err := api.NewClient(consulConf) if err != nil { return nil, errwrap.Wrapf("client setup failed: {{err}}", err) } maxParStr, ok := conf["max_parallel"] var maxParInt int if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { logger.Debug("physical/consul: max_parallel set", "max_parallel", maxParInt) } } // Setup the backend c := &ConsulBackend{ path: path, logger: logger, client: client, kv: client.KV(), permitPool: NewPermitPool(maxParInt), serviceName: service, serviceTags: strutil.ParseDedupAndSortStrings(tags, ","), checkTimeout: checkTimeout, disableRegistration: disableRegistration, } return c, nil }
// CreateCertificate generates a new cert func CreateCertificate(logger log.Logger, req *x509.CertificateRequest, key *rsa.PrivateKey, years int, hostList string) ([]byte, error) { // Read CA logger.Info("Reading Certificate Authority") pemBlock, err := ReadCertificate(path.Join(".", "pki", "ca.crt"), "CERTIFICATE") if err != nil { return nil, err } // Decrypt PEM logger.Info("Decoding Certificate Authority Public Key") authority, err := x509.ParseCertificate(pemBlock.Bytes) if err != nil { return nil, err } logger.Info("Reading Certificate Authority Private Key") pemBlock, err = ReadCertificate(path.Join(".", "pki", "private", "ca.key"), "RSA PRIVATE KEY") if err != nil { return nil, err } logger.Info("Parsing Certificate Authority Private Key") priv, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) if err != nil { return nil, err } // Generate subject key id logger.Info("Generating SubjectKeyID") subjectKeyID, err := GenerateSubjectKeyID(key) if err != nil { return nil, err } // Create serial number logger.Info("Generating Serial Number") serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { return nil, fmt.Errorf("failed to generate serial number: %s", err.Error()) } // Create template logger.Info("Creating Certificate template") template := &x509.Certificate{ IsCA: false, BasicConstraintsValid: false, SubjectKeyId: subjectKeyID, SerialNumber: serialNumber, Subject: req.Subject, PublicKeyAlgorithm: x509.RSA, SignatureAlgorithm: x509.SHA512WithRSA, NotBefore: time.Now().Add(-600).UTC(), NotAfter: time.Now().AddDate(years, 0, 0).UTC(), // see http://golang.org/pkg/crypto/x509/#KeyUsage ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, KeyUsage: x509.KeyUsageDigitalSignature, UnknownExtKeyUsage: nil, // Subject Alternative Name DNSNames: nil, PermittedDNSDomainsCritical: false, PermittedDNSDomains: nil, } // Associate hosts logger.Info("Adding Hosts to Certificate") hosts := strings.Split(hostList, ",") for _, h := range hosts { if ip := net.ParseIP(h); ip != nil { template.IPAddresses = append(template.IPAddresses, ip) } else { template.DNSNames = append(template.DNSNames, h) } } // Create cert logger.Info("Generating Certificate") cert, err := x509.CreateCertificate(rand.Reader, template, authority, &key.PublicKey, priv) if err != nil { return nil, err } return cert, nil }
// newDynamoDBBackend constructs a DynamoDB backend. If the // configured DynamoDB table does not exist, it creates it. func newDynamoDBBackend(conf map[string]string, logger log.Logger) (Backend, error) { table := os.Getenv("AWS_DYNAMODB_TABLE") if table == "" { table = conf["table"] if table == "" { table = DefaultDynamoDBTableName } } readCapacityString := os.Getenv("AWS_DYNAMODB_READ_CAPACITY") if readCapacityString == "" { readCapacityString = conf["read_capacity"] if readCapacityString == "" { readCapacityString = "0" } } readCapacity, err := strconv.Atoi(readCapacityString) if err != nil { return nil, fmt.Errorf("invalid read capacity: %s", readCapacityString) } if readCapacity == 0 { readCapacity = DefaultDynamoDBReadCapacity } writeCapacityString := os.Getenv("AWS_DYNAMODB_WRITE_CAPACITY") if writeCapacityString == "" { writeCapacityString = conf["write_capacity"] if writeCapacityString == "" { writeCapacityString = "0" } } writeCapacity, err := strconv.Atoi(writeCapacityString) if err != nil { return nil, fmt.Errorf("invalid write capacity: %s", writeCapacityString) } if writeCapacity == 0 { writeCapacity = DefaultDynamoDBWriteCapacity } accessKey := os.Getenv("AWS_ACCESS_KEY_ID") if accessKey == "" { accessKey = conf["access_key"] } secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY") if secretKey == "" { secretKey = conf["secret_key"] } sessionToken := os.Getenv("AWS_SESSION_TOKEN") if sessionToken == "" { sessionToken = conf["session_token"] } endpoint := os.Getenv("AWS_DYNAMODB_ENDPOINT") if endpoint == "" { endpoint = conf["endpoint"] } region := os.Getenv("AWS_DEFAULT_REGION") if region == "" { region = conf["region"] if region == "" { region = DefaultDynamoDBRegion } } creds := credentials.NewChainCredentials([]credentials.Provider{ &credentials.StaticProvider{Value: credentials.Value{ AccessKeyID: accessKey, SecretAccessKey: secretKey, SessionToken: sessionToken, }}, &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, }) awsConf := aws.NewConfig(). WithCredentials(creds). WithRegion(region). WithEndpoint(endpoint) client := dynamodb.New(session.New(awsConf)) if err := ensureTableExists(client, table, readCapacity, writeCapacity); err != nil { return nil, err } haEnabled := os.Getenv("DYNAMODB_HA_ENABLED") if haEnabled == "" { haEnabled = conf["ha_enabled"] } haEnabledBool, _ := strconv.ParseBool(haEnabled) recoveryMode := os.Getenv("RECOVERY_MODE") if recoveryMode == "" { recoveryMode = conf["recovery_mode"] } recoveryModeBool, _ := strconv.ParseBool(recoveryMode) maxParStr, ok := conf["max_parallel"] var maxParInt int if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { logger.Debug("physical/dynamodb: max_parallel set", "max_parallel", maxParInt) } } return &DynamoDBBackend{ table: table, client: client, permitPool: NewPermitPool(maxParInt), recovery: recoveryModeBool, haEnabled: haEnabledBool, logger: logger, }, nil }
func NewSSHServer(logger log.Logger, sys datamodel.System, privateKey ssh.Signer, roots *x509.CertPool) (server SSHServer, err error) { // Get user store users, err := sys.Users() // Create server config sshConfig := &ssh.ServerConfig{ NoClientAuth: false, PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (perm *ssh.Permissions, err error) { // Get user if exists, otherwise return error user, err := users.Get(conn.User()) if err != nil { return } // Check keyring for public key if keyring := user.KeyRing(); !keyring.Contains(key.Marshal()) { err = fmt.Errorf("invalid public key") return } // Add pubkey and username to permissions perm = &ssh.Permissions{ Extensions: map[string]string{ "pubkey": string(key.Marshal()), "username": conn.User(), }, } return }, AuthLogCallback: func(conn ssh.ConnMetadata, method string, err error) { if err != nil { logger.Info("Login attempt", "user", conn.User(), "method", method, "error", err.Error()) } else { logger.Info("Successful login", "user", conn.User(), "method", method) } }, } sshConfig.AddHostKey(privateKey) // Get ssh bind addr bind := viper.GetString("SSHListen") if bind == "" { err = fmt.Errorf("Empty SSH bind address") return } // Open SSH socket logger.Info("Starting SSH server", "addr", bind) sshAddr, err := net.ResolveTCPAddr("tcp", bind) if err != nil { err = fmt.Errorf("Invalid tcp address") return } // Create listener listener, err := net.ListenTCP("tcp", sshAddr) if err != nil { return } server.logger = logger server.sshConfig = sshConfig server.listener = listener server.system = sys return }
// InitializeServerConfig sets up the config options for the database servers. func InitializeServerConfig(logger log.Logger) error { // Load default settings logger.Info("Loading default server settings") viper.SetDefault("CACert", "ca.crt") viper.SetDefault("AdminCert", "admin.crt") viper.SetDefault("SSHKey", "ssh-identity.key") viper.SetDefault("TLSCert", "tls-identity.crt") viper.SetDefault("TLSKey", "tls-identity.key") viper.SetDefault("DataPath", "./data") viper.SetDefault("SSHListen", ":9022") viper.SetDefault("HTTPListen", ":19022") if serverCmd.PersistentFlags().Lookup("ca-cert").Changed { logger.Info("", "CACert", CACert) viper.Set("CACert", CACert) } if serverCmd.PersistentFlags().Lookup("admin-cert").Changed { logger.Info("", "AdminCert", AdminCert) viper.Set("AdminCert", AdminCert) } if serverCmd.PersistentFlags().Lookup("ssh-key").Changed { logger.Info("", "SSHKey", SSHKey) viper.Set("SSHKey", SSHKey) } if serverCmd.PersistentFlags().Lookup("tls-cert").Changed { logger.Info("", "TLSCert", TLSCert) viper.Set("TLSCert", TLSCert) } if serverCmd.PersistentFlags().Lookup("tls-key").Changed { logger.Info("", "TLSKey", TLSKey) viper.Set("TLSKey", TLSKey) } if serverCmd.PersistentFlags().Lookup("ssh-listen").Changed { logger.Info("", "SSHListen", SSHListen) viper.Set("SSHListen", SSHListen) } if serverCmd.PersistentFlags().Lookup("http-listen").Changed { logger.Info("", "HTTPListen", HTTPListen) viper.Set("HTTPListen", HTTPListen) } if serverCmd.PersistentFlags().Lookup("data").Changed { logger.Info("", "DataPath", DataPath) viper.Set("DataPath", DataPath) } return nil }
func startTerminal(logger log.Logger, channel ssh.Channel, system datamodel.System, user datamodel.User) { defer channel.Close() prompt := "kappa> " term := terminal.NewTerminal(channel, prompt) // // Try to make the terminal raw // oldState, err := terminal.MakeRaw(0) // if err != nil { // logger.Warn("Error making terminal raw: ", err.Error()) // } // defer terminal.Restore(0, oldState) // Write ascii text term.Write([]byte("\r\n")) for _, line := range ASCII { term.Write([]byte(line)) term.Write([]byte("\r\n")) } // Write login message term.Write([]byte("\r\n\n")) GetMessage(channel, DefaultColorCodes) term.Write([]byte("\n")) // Create query executor executor := Executor{ session: Session{ namespace: "", user: user, }, terminal: NewTerminal(term, prompt), system: system, } // Start REPL for { input, err := term.ReadLine() if err != nil { fmt.Errorf("Readline() error") break } // Process line line := strings.TrimSpace(input) if len(line) > 0 { // Log input and handle exit requests if line == "exit" || line == "quit" { logger.Info("Closing connection") break } else if line == "quote me" { term.Write([]byte("\r\n")) GetMessage(channel, DefaultColorCodes) term.Write([]byte("\r\n")) continue } else if strings.HasPrefix(line, "//") || strings.HasPrefix(line, "--") { channel.Write(DefaultColorCodes.LightGrey) channel.Write([]byte(line + "\r\n")) channel.Write(DefaultColorCodes.Reset) continue } // Parse statement stmt, err := skl.ParseStatement(line) // Return parse error in red if err != nil { logger.Warn("Bad Statement", "statement", line, "error", err) channel.Write(DefaultColorCodes.LightRed) channel.Write([]byte(err.Error())) channel.Write([]byte("\r\n")) channel.Write(DefaultColorCodes.Reset) continue } // Execute statements w := ResponseWriter{DefaultColorCodes, channel} executor.Execute(&w, stmt) } } }