// newGCSBackend constructs a Google Cloud Storage backend using a pre-existing // bucket. Credentials can be provided to the backend, sourced // from environment variables or a service account file func newGCSBackend(conf map[string]string, logger log.Logger) (Backend, error) { bucketName := os.Getenv("GOOGLE_STORAGE_BUCKET") if bucketName == "" { bucketName = conf["bucket"] if bucketName == "" { return nil, fmt.Errorf("env var GOOGLE_STORAGE_BUCKET or configuration parameter 'bucket' must be set") } } // path to service account JSON file credentialsFile := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") if credentialsFile == "" { credentialsFile = conf["credentials_file"] if credentialsFile == "" { return nil, fmt.Errorf("env var GOOGLE_APPLICATION_CREDENTIALS or configuration parameter 'credentials_file' must be set") } } client, err := storage.NewClient( context.Background(), option.WithServiceAccountFile(credentialsFile), ) if err != nil { return nil, fmt.Errorf("error establishing storage client: '%v'", err) } // check client connectivity by getting bucket attributes _, err = client.Bucket(bucketName).Attrs(context.Background()) if err != nil { return nil, fmt.Errorf("unable to access bucket '%s': '%v'", bucketName, err) } maxParStr, ok := conf["max_parallel"] var maxParInt int if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { logger.Debug("physical/gcs: max_parallel set", "max_parallel", maxParInt) } } g := GCSBackend{ bucketName: bucketName, client: client, permitPool: NewPermitPool(maxParInt), logger: logger, } return &g, nil }
// newAzureBackend constructs an Azure backend using a pre-existing // bucket. Credentials can be provided to the backend, sourced // from the environment, AWS credential files or by IAM role. func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error) { container := os.Getenv("AZURE_BLOB_CONTAINER") if container == "" { container = conf["container"] if container == "" { return nil, fmt.Errorf("'container' must be set") } } accountName := os.Getenv("AZURE_ACCOUNT_NAME") if accountName == "" { accountName = conf["accountName"] if accountName == "" { return nil, fmt.Errorf("'accountName' must be set") } } accountKey := os.Getenv("AZURE_ACCOUNT_KEY") if accountKey == "" { accountKey = conf["accountKey"] if accountKey == "" { return nil, fmt.Errorf("'accountKey' must be set") } } client, err := storage.NewBasicClient(accountName, accountKey) if err != nil { return nil, fmt.Errorf("Failed to create Azure client: %v", err) } client.GetBlobService().CreateContainerIfNotExists(container, storage.ContainerAccessTypePrivate) maxParStr, ok := conf["max_parallel"] var maxParInt int if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { logger.Debug("azure: max_parallel set", "max_parallel", maxParInt) } } a := &AzureBackend{ container: container, client: client.GetBlobService(), logger: logger, permitPool: NewPermitPool(maxParInt), } return a, nil }
func handleTCPConnection(logger log.Logger, conn net.Conn, sshConfig *ssh.ServerConfig, system datamodel.System) { // Open SSH connection sshConn, channels, requests, err := ssh.NewServerConn(conn, sshConfig) if err != nil { logger.Warn("SSH handshake failed") return } // Get user if exists, otherwise return error users, _ := system.Users() user, _ := users.Get(sshConn.Permissions.Extensions["username"]) logger.Debug("Handshake successful") defer sshConn.Conn.Close() // Discard requests go ssh.DiscardRequests(requests) for ch := range channels { t := ch.ChannelType() if t != "session" && t != "kappa-client" { logger.Info("UnknownChannelType", "type", t) ch.Reject(ssh.UnknownChannelType, t) break } // Accept channel channel, requests, err := ch.Accept() if err != nil { logger.Warn("Error creating channel") continue } if t == "session" { go handleSessionRequests(logger, channel, requests, system, user) } else if t == "kappa-client" { go handleChannelRequests(logger, channel, requests, system, user) } } }
// newSwiftBackend constructs a Swift backend using a pre-existing // container. Credentials can be provided to the backend, sourced // from the environment. func newSwiftBackend(conf map[string]string, logger log.Logger) (Backend, error) { username := os.Getenv("OS_USERNAME") if username == "" { username = conf["username"] if username == "" { return nil, fmt.Errorf("missing username") } } password := os.Getenv("OS_PASSWORD") if password == "" { password = conf["password"] if password == "" { return nil, fmt.Errorf("missing password") } } authUrl := os.Getenv("OS_AUTH_URL") if authUrl == "" { authUrl = conf["auth_url"] if authUrl == "" { return nil, fmt.Errorf("missing auth_url") } } container := os.Getenv("OS_CONTAINER") if container == "" { container = conf["container"] if container == "" { return nil, fmt.Errorf("missing container") } } tenant := os.Getenv("OS_TENANT_NAME") if tenant == "" { tenant = conf["tenant"] } c := swift.Connection{ UserName: username, ApiKey: password, AuthUrl: authUrl, Tenant: tenant, Transport: cleanhttp.DefaultPooledTransport(), } err := c.Authenticate() if err != nil { return nil, err } _, _, err = c.Container(container) if err != nil { return nil, fmt.Errorf("Unable to access container '%s': %v", container, err) } maxParStr, ok := conf["max_parallel"] var maxParInt int if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { logger.Debug("swift: max_parallel set", "max_parallel", maxParInt) } } s := &SwiftBackend{ client: &c, container: container, logger: logger, permitPool: NewPermitPool(maxParInt), } return s, nil }
// newDynamoDBBackend constructs a DynamoDB backend. If the // configured DynamoDB table does not exist, it creates it. func newDynamoDBBackend(conf map[string]string, logger log.Logger) (Backend, error) { table := os.Getenv("AWS_DYNAMODB_TABLE") if table == "" { table = conf["table"] if table == "" { table = DefaultDynamoDBTableName } } readCapacityString := os.Getenv("AWS_DYNAMODB_READ_CAPACITY") if readCapacityString == "" { readCapacityString = conf["read_capacity"] if readCapacityString == "" { readCapacityString = "0" } } readCapacity, err := strconv.Atoi(readCapacityString) if err != nil { return nil, fmt.Errorf("invalid read capacity: %s", readCapacityString) } if readCapacity == 0 { readCapacity = DefaultDynamoDBReadCapacity } writeCapacityString := os.Getenv("AWS_DYNAMODB_WRITE_CAPACITY") if writeCapacityString == "" { writeCapacityString = conf["write_capacity"] if writeCapacityString == "" { writeCapacityString = "0" } } writeCapacity, err := strconv.Atoi(writeCapacityString) if err != nil { return nil, fmt.Errorf("invalid write capacity: %s", writeCapacityString) } if writeCapacity == 0 { writeCapacity = DefaultDynamoDBWriteCapacity } accessKey := os.Getenv("AWS_ACCESS_KEY_ID") if accessKey == "" { accessKey = conf["access_key"] } secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY") if secretKey == "" { secretKey = conf["secret_key"] } sessionToken := os.Getenv("AWS_SESSION_TOKEN") if sessionToken == "" { sessionToken = conf["session_token"] } endpoint := os.Getenv("AWS_DYNAMODB_ENDPOINT") if endpoint == "" { endpoint = conf["endpoint"] } region := os.Getenv("AWS_DEFAULT_REGION") if region == "" { region = conf["region"] if region == "" { region = DefaultDynamoDBRegion } } creds := credentials.NewChainCredentials([]credentials.Provider{ &credentials.StaticProvider{Value: credentials.Value{ AccessKeyID: accessKey, SecretAccessKey: secretKey, SessionToken: sessionToken, }}, &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, }) awsConf := aws.NewConfig(). WithCredentials(creds). WithRegion(region). WithEndpoint(endpoint) client := dynamodb.New(session.New(awsConf)) if err := ensureTableExists(client, table, readCapacity, writeCapacity); err != nil { return nil, err } haEnabled := os.Getenv("DYNAMODB_HA_ENABLED") if haEnabled == "" { haEnabled = conf["ha_enabled"] } haEnabledBool, _ := strconv.ParseBool(haEnabled) recoveryMode := os.Getenv("RECOVERY_MODE") if recoveryMode == "" { recoveryMode = conf["recovery_mode"] } recoveryModeBool, _ := strconv.ParseBool(recoveryMode) maxParStr, ok := conf["max_parallel"] var maxParInt int if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { logger.Debug("physical/dynamodb: max_parallel set", "max_parallel", maxParInt) } } return &DynamoDBBackend{ table: table, client: client, permitPool: NewPermitPool(maxParInt), recovery: recoveryModeBool, haEnabled: haEnabledBool, logger: logger, }, nil }
// newConsulBackend constructs a Consul backend using the given API client // and the prefix in the KV store. func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error) { // Get the path in Consul path, ok := conf["path"] if !ok { path = "vault/" } if logger.IsDebug() { logger.Debug("physical/consul: config path set", "path", path) } // Ensure path is suffixed but not prefixed if !strings.HasSuffix(path, "/") { logger.Warn("physical/consul: appending trailing forward slash to path") path += "/" } if strings.HasPrefix(path, "/") { logger.Warn("physical/consul: trimming path of its forward slash") path = strings.TrimPrefix(path, "/") } // Allow admins to disable consul integration disableReg, ok := conf["disable_registration"] var disableRegistration bool if ok && disableReg != "" { b, err := strconv.ParseBool(disableReg) if err != nil { return nil, errwrap.Wrapf("failed parsing disable_registration parameter: {{err}}", err) } disableRegistration = b } if logger.IsDebug() { logger.Debug("physical/consul: config disable_registration set", "disable_registration", disableRegistration) } // Get the service name to advertise in Consul service, ok := conf["service"] if !ok { service = DefaultServiceName } if logger.IsDebug() { logger.Debug("physical/consul: config service set", "service", service) } // Get the additional tags to attach to the registered service name tags := conf["service_tags"] if logger.IsDebug() { logger.Debug("physical/consul: config service_tags set", "service_tags", tags) } checkTimeout := defaultCheckTimeout checkTimeoutStr, ok := conf["check_timeout"] if ok { d, err := time.ParseDuration(checkTimeoutStr) if err != nil { return nil, err } min, _ := lib.DurationMinusBufferDomain(d, checkMinBuffer, checkJitterFactor) if min < checkMinBuffer { return nil, fmt.Errorf("Consul check_timeout must be greater than %v", min) } checkTimeout = d if logger.IsDebug() { logger.Debug("physical/consul: config check_timeout set", "check_timeout", d) } } // Configure the client consulConf := api.DefaultConfig() if addr, ok := conf["address"]; ok { consulConf.Address = addr if logger.IsDebug() { logger.Debug("physical/consul: config address set", "address", addr) } } if scheme, ok := conf["scheme"]; ok { consulConf.Scheme = scheme if logger.IsDebug() { logger.Debug("physical/consul: config scheme set", "scheme", scheme) } } if token, ok := conf["token"]; ok { consulConf.Token = token logger.Debug("physical/consul: config token set") } if consulConf.Scheme == "https" { tlsClientConfig, err := setupTLSConfig(conf) if err != nil { return nil, err } transport := cleanhttp.DefaultPooledTransport() transport.MaxIdleConnsPerHost = 4 transport.TLSClientConfig = tlsClientConfig consulConf.HttpClient.Transport = transport logger.Debug("physical/consul: configured TLS") } client, err := api.NewClient(consulConf) if err != nil { return nil, errwrap.Wrapf("client setup failed: {{err}}", err) } maxParStr, ok := conf["max_parallel"] var maxParInt int if ok { maxParInt, err = strconv.Atoi(maxParStr) if err != nil { return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err) } if logger.IsDebug() { logger.Debug("physical/consul: max_parallel set", "max_parallel", maxParInt) } } // Setup the backend c := &ConsulBackend{ path: path, logger: logger, client: client, kv: client.KV(), permitPool: NewPermitPool(maxParInt), serviceName: service, serviceTags: strutil.ParseDedupAndSortStrings(tags, ","), checkTimeout: checkTimeout, disableRegistration: disableRegistration, } return c, nil }