func makeTemplate() *cf.Template { t := cf.NewTemplate() t.Description = "example production infrastructure" t.Parameters["DnsName"] = &cf.Parameter{ Description: "The top level DNS name for the infrastructure", Type: "String", Default: "preview.example.io", } t.AddResource("ServerLoadBalancer", cf.ElasticLoadBalancingLoadBalancer{ ConnectionDrainingPolicy: &cf.ElasticLoadBalancingConnectionDrainingPolicy{ Enabled: cf.Bool(true), Timeout: cf.Integer(30), }, CrossZone: cf.Bool(true), HealthCheck: &cf.ElasticLoadBalancingHealthCheck{ HealthyThreshold: cf.String("2"), Interval: cf.String("60"), Target: cf.String("HTTP:80/"), Timeout: cf.String("5"), UnhealthyThreshold: cf.String("2"), }, Listeners: &cf.ElasticLoadBalancingListenerList{ cf.ElasticLoadBalancingListener{ InstancePort: cf.String("8000"), InstanceProtocol: cf.String("TCP"), LoadBalancerPort: cf.String("443"), Protocol: cf.String("SSL"), SSLCertificateId: cf.Join("", cf.String("arn:aws:iam::"), cf.Ref("AWS::AccountID"), cf.String(":server-certificate/"), cf.Ref("DnsName")), }, }, Policies: &cf.ElasticLoadBalancingPolicyList{ cf.ElasticLoadBalancingPolicy{ PolicyName: cf.String("EnableProxyProtocol"), PolicyType: cf.String("ProxyProtocolPolicyType"), Attributes: []map[string]interface{}{ map[string]interface{}{ "Name": "ProxyProtocol", "Value": "true", }, }, InstancePorts: []int{8000}, }, }, Subnets: cf.StringList( cf.Ref("VpcSubnetA"), cf.Ref("VpcSubnetB"), cf.Ref("VpcSubnetC"), ), SecurityGroups: cf.StringList(cf.Ref("LoadBalancerSecurityGroup")), }) return t }
// Provision compiles, packages, and provisions (either via create or update) a Sparta application. // The serviceName is the service's logical // identify and is used to determine create vs update operations. The compilation options/flags are: // // TAGS: -tags lambdabinary // ENVIRONMENT: GOOS=linux GOARCH=amd64 GO15VENDOREXPERIMENT=1 // // The compiled binary is packaged with a NodeJS proxy shim to manage AWS Lambda setup & invocation per // http://docs.aws.amazon.com/lambda/latest/dg/authoring-function-in-nodejs.html // // The two files are ZIP'd, posted to S3 and used as an input to a dynamically generated CloudFormation // template (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/Welcome.html) // which creates or updates the service state. // // More information on golang 1.5's support for vendor'd resources is documented at // // https://docs.google.com/document/d/1Bz5-UB7g2uPBdOx-rw5t9MxJwkfpx90cqG9AFL0JAYo/edit // https://medium.com/@freeformz/go-1-5-s-vendor-experiment-fd3e830f52c3#.voiicue1j // // type Configuration struct { // Val string // Proxy struct { // Address string // Port string // } // } func Provision(noop bool, serviceName string, serviceDescription string, lambdaAWSInfos []*LambdaAWSInfo, api *API, site *S3Site, s3Bucket string, templateWriter io.Writer, logger *logrus.Logger) error { startTime := time.Now() ctx := &workflowContext{ noop: noop, serviceName: serviceName, serviceDescription: serviceDescription, lambdaAWSInfos: lambdaAWSInfos, api: api, s3SiteContext: &s3SiteContext{ s3Site: site, }, cfTemplate: gocf.NewTemplate(), s3Bucket: s3Bucket, awsSession: awsSession(logger), templateWriter: templateWriter, logger: logger, } ctx.cfTemplate.Description = serviceDescription if len(lambdaAWSInfos) <= 0 { return errors.New("No lambda functions provided to Sparta.Provision()") } // Start the workflow for step := verifyIAMRoles; step != nil; { next, err := step(ctx) if err != nil { ctx.rollback() return err } if next == nil { elapsed := time.Since(startTime) ctx.logger.WithFields(logrus.Fields{ "Seconds": fmt.Sprintf("%.f", elapsed.Seconds()), }).Info("Elapsed time") break } else { step = next } } return nil }
// Marshal this object into 1 or more CloudFormation resource definitions that are accumulated // in the resources map func (info *LambdaAWSInfo) export(serviceName string, S3Bucket string, S3Key string, buildID string, roleNameMap map[string]*gocf.StringExpr, template *gocf.Template, context map[string]interface{}, logger *logrus.Logger) error { // If we have RoleName, then get the ARN, otherwise get the Ref var dependsOn []string if nil != info.DependsOn { dependsOn = append(dependsOn, info.DependsOn...) } iamRoleArnName := info.RoleName // If there is no user supplied role, that means that the associated // IAMRoleDefinition name has been created and this resource needs to // depend on that being created. if iamRoleArnName == "" && info.RoleDefinition != nil { iamRoleArnName = info.RoleDefinition.logicalName(serviceName, info.lambdaFunctionName()) dependsOn = append(dependsOn, info.RoleDefinition.logicalName(serviceName, info.lambdaFunctionName())) } lambdaDescription := info.Options.Description if "" == lambdaDescription { lambdaDescription = fmt.Sprintf("%s: %s", serviceName, info.lambdaFunctionName()) } // Create the primary resource lambdaResource := gocf.LambdaFunction{ Code: &gocf.LambdaFunctionCode{ S3Bucket: gocf.String(S3Bucket), S3Key: gocf.String(S3Key), }, Description: gocf.String(lambdaDescription), Handler: gocf.String(fmt.Sprintf("index.%s", info.jsHandlerName())), MemorySize: gocf.Integer(info.Options.MemorySize), Role: roleNameMap[iamRoleArnName], Runtime: gocf.String(NodeJSVersion), Timeout: gocf.Integer(info.Options.Timeout), VpcConfig: info.Options.VpcConfig, } if "" != info.Options.KmsKeyArn { lambdaResource.KmsKeyArn = gocf.String(info.Options.KmsKeyArn) } if nil != info.Options.Environment { lambdaResource.Environment = &gocf.LambdaFunctionEnvironment{ Variables: info.Options.Environment, } } // Need to check if a functionName exists in the LambdaAwsInfo struct // If an empty string is passed, the template will error with invalid // function name. if "" != info.functionName { lambdaResource.FunctionName = gocf.String(info.functionName) } cfResource := template.AddResource(info.logicalName(), lambdaResource) cfResource.DependsOn = append(cfResource.DependsOn, dependsOn...) safeMetadataInsert(cfResource, "golangFunc", info.lambdaFunctionName()) // Create the lambda Ref in case we need a permission or event mapping functionAttr := gocf.GetAtt(info.logicalName(), "Arn") // Permissions for _, eachPermission := range info.Permissions { _, err := eachPermission.export(serviceName, info.lambdaFunctionName(), info.logicalName(), template, S3Bucket, S3Key, logger) if nil != err { return err } } // Event Source Mappings for _, eachEventSourceMapping := range info.EventSourceMappings { mappingErr := eachEventSourceMapping.export(serviceName, functionAttr, S3Bucket, S3Key, template, logger) if nil != mappingErr { return mappingErr } } // CustomResource for _, eachCustomResource := range info.customResources { resourceErr := eachCustomResource.export(serviceName, functionAttr, S3Bucket, S3Key, roleNameMap, template, logger) if nil != resourceErr { return resourceErr } } // Decorator if nil != info.Decorator { logger.Debug("Decorator found for Lambda: ", info.lambdaFunctionName()) // Create an empty template so that we can track whether things // are overwritten metadataMap := make(map[string]interface{}, 0) decoratorProxyTemplate := gocf.NewTemplate() err := info.Decorator(serviceName, info.logicalName(), lambdaResource, metadataMap, S3Bucket, S3Key, buildID, decoratorProxyTemplate, context, logger) if nil != err { return err } // This data is marshalled into a DiscoveryInfo struct s.t. it can be // unmarshalled via sparta.Discover. We're going to just stuff it into // it's own same named property if len(metadataMap) != 0 { safeMetadataInsert(cfResource, info.logicalName(), metadataMap) } // Append the custom resources err = safeMergeTemplates(decoratorProxyTemplate, template, logger) if nil != err { return fmt.Errorf("Lambda (%s) decorator created conflicting resources", info.lambdaFunctionName()) } } return nil }
// Provision compiles, packages, and provisions (either via create or update) a Sparta application. // The serviceName is the service's logical // identify and is used to determine create vs update operations. The compilation options/flags are: // // TAGS: -tags lambdabinary // ENVIRONMENT: GOOS=linux GOARCH=amd64 // // The compiled binary is packaged with a NodeJS proxy shim to manage AWS Lambda setup & invocation per // http://docs.aws.amazon.com/lambda/latest/dg/authoring-function-in-nodejs.html // // The two files are ZIP'd, posted to S3 and used as an input to a dynamically generated CloudFormation // template (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/Welcome.html) // which creates or updates the service state. // // More information on golang 1.5's support for vendor'd resources is documented at // // https://docs.google.com/document/d/1Bz5-UB7g2uPBdOx-rw5t9MxJwkfpx90cqG9AFL0JAYo/edit // https://medium.com/@freeformz/go-1-5-s-vendor-experiment-fd3e830f52c3#.voiicue1j // // type Configuration struct { // Val string // Proxy struct { // Address string // Port string // } // } func Provision(noop bool, serviceName string, serviceDescription string, lambdaAWSInfos []*LambdaAWSInfo, api *API, site *S3Site, s3Bucket string, buildID string, codePipelineTrigger string, buildTags string, linkerFlags string, templateWriter io.Writer, workflowHooks *WorkflowHooks, logger *logrus.Logger) error { err := validateSpartaPreconditions(lambdaAWSInfos, logger) if nil != err { return err } startTime := time.Now() ctx := &workflowContext{ noop: noop, serviceName: serviceName, serviceDescription: serviceDescription, lambdaAWSInfos: lambdaAWSInfos, api: api, s3SiteContext: &s3SiteContext{ s3Site: site, }, cfTemplate: gocf.NewTemplate(), s3Bucket: s3Bucket, s3BucketVersioningEnabled: false, buildID: buildID, codePipelineTrigger: codePipelineTrigger, buildTags: buildTags, linkFlags: linkerFlags, buildTime: time.Now(), awsSession: spartaAWS.NewSession(logger), templateWriter: templateWriter, workflowHooks: workflowHooks, workflowHooksContext: make(map[string]interface{}, 0), logger: logger, } ctx.cfTemplate.Description = serviceDescription // Update the context iff it exists if nil != workflowHooks && nil != workflowHooks.Context { for eachKey, eachValue := range workflowHooks.Context { ctx.workflowHooksContext[eachKey] = eachValue } } ctx.logger.WithFields(logrus.Fields{ "BuildID": buildID, "NOOP": noop, "Tags": ctx.buildTags, "CodePipelineTrigger": ctx.codePipelineTrigger, }).Info("Provisioning service") if len(lambdaAWSInfos) <= 0 { return errors.New("No lambda functions provided to Sparta.Provision()") } // Start the workflow for step := verifyIAMRoles; step != nil; { next, err := step(ctx) if err != nil { ctx.rollback() // Workflow step? ctx.logger.Error(err) return err } if next == nil { elapsed := time.Since(startTime) ctx.logger.WithFields(logrus.Fields{ "Seconds": fmt.Sprintf("%.f", elapsed.Seconds()), }).Info("Elapsed time") break } else { step = next } } // When we're done, execute any finalizers if nil != ctx.finalizerFunctions { ctx.logger.WithFields(logrus.Fields{ "FinalizerCount": len(ctx.finalizerFunctions), }).Debug("Invoking finalizer functions") for _, eachFinalizer := range ctx.finalizerFunctions { eachFinalizer(ctx.logger) } } return nil }
func ensureCloudFormationStack() workflowStep { return func(ctx *workflowContext) (workflowStep, error) { // PreMarshall Hook if ctx.workflowHooks != nil { preMarshallErr := callWorkflowHook(ctx.workflowHooks.PreMarshall, ctx) if nil != preMarshallErr { return nil, preMarshallErr } } // Add the "Parameters" to the template... if nil != codePipelineEnvironments { ctx.cfTemplate.Parameters = make(map[string]*gocf.Parameter, 0) for _, eachEnvironment := range codePipelineEnvironments { for eachKey := range eachEnvironment { ctx.cfTemplate.Parameters[eachKey] = &gocf.Parameter{ Type: "String", Default: "", } } } } for _, eachEntry := range ctx.lambdaAWSInfos { annotateCodePipelineEnvironments(eachEntry, ctx.logger) err := eachEntry.export(ctx.serviceName, ctx.s3Bucket, ctx.s3CodeZipURL.keyName(), ctx.buildID, ctx.lambdaIAMRoleNameMap, ctx.cfTemplate, ctx.workflowHooksContext, ctx.logger) if nil != err { return nil, err } } // If there's an API gateway definition, include the resources that provision it. Since this export will likely // generate outputs that the s3 site needs, we'll use a temporary outputs accumulator, pass that to the S3Site // if it's defined, and then merge it with the normal output map. apiGatewayTemplate := gocf.NewTemplate() if nil != ctx.api { err := ctx.api.export( ctx.serviceName, ctx.awsSession, ctx.s3Bucket, ctx.s3CodeZipURL.keyName(), ctx.lambdaIAMRoleNameMap, apiGatewayTemplate, ctx.noop, ctx.logger) if nil == err { err = safeMergeTemplates(apiGatewayTemplate, ctx.cfTemplate, ctx.logger) } if nil != err { return nil, fmt.Errorf("Failed to export APIGateway template resources") } } // If there's a Site defined, include the resources the provision it if nil != ctx.s3SiteContext.s3Site { ctx.s3SiteContext.s3Site.export(ctx.serviceName, ctx.s3Bucket, ctx.s3CodeZipURL.keyName(), ctx.s3SiteContext.s3UploadURL.keyName(), apiGatewayTemplate.Outputs, ctx.lambdaIAMRoleNameMap, ctx.cfTemplate, ctx.logger) } // Service decorator? // If there's an API gateway definition, include the resources that provision it. Since this export will likely // generate outputs that the s3 site needs, we'll use a temporary outputs accumulator, pass that to the S3Site // if it's defined, and then merge it with the normal output map.- if nil != ctx.workflowHooks && nil != ctx.workflowHooks.ServiceDecorator { hookName := runtime.FuncForPC(reflect.ValueOf(ctx.workflowHooks.ServiceDecorator).Pointer()).Name() ctx.logger.WithFields(logrus.Fields{ "WorkflowHook": hookName, "WorkflowHookContext": ctx.workflowHooksContext, }).Info("Calling WorkflowHook") serviceTemplate := gocf.NewTemplate() decoratorError := ctx.workflowHooks.ServiceDecorator( ctx.workflowHooksContext, ctx.serviceName, serviceTemplate, ctx.s3Bucket, ctx.buildID, ctx.awsSession, ctx.noop, ctx.logger, ) if nil != decoratorError { return nil, decoratorError } mergeErr := safeMergeTemplates(serviceTemplate, ctx.cfTemplate, ctx.logger) if nil != mergeErr { return nil, mergeErr } } ctx.cfTemplate = annotateDiscoveryInfo(ctx.cfTemplate, ctx.logger) // PostMarshall Hook if ctx.workflowHooks != nil { postMarshallErr := callWorkflowHook(ctx.workflowHooks.PostMarshall, ctx) if nil != postMarshallErr { return nil, postMarshallErr } } return applyCloudFormationOperation(ctx) } }
func ensureCloudFormationStack() workflowStep { return func(ctx *workflowContext) (workflowStep, error) { for _, eachEntry := range ctx.lambdaAWSInfos { err := eachEntry.export(ctx.serviceName, ctx.s3Bucket, ctx.s3LambdaZipKey, ctx.lambdaIAMRoleNameMap, ctx.cfTemplate, ctx.logger) if nil != err { return nil, err } } // If there's an API gateway definition, include the resources that provision it. Since this export will likely // generate outputs that the s3 site needs, we'll use a temporary outputs accumulator, pass that to the S3Site // if it's defined, and then merge it with the normal output map. apiGatewayTemplate := gocf.NewTemplate() if nil != ctx.api { err := ctx.api.export(ctx.s3Bucket, ctx.s3LambdaZipKey, ctx.lambdaIAMRoleNameMap, apiGatewayTemplate, ctx.logger) if nil == err { err = safeMergeTemplates(apiGatewayTemplate, ctx.cfTemplate, ctx.logger) } if nil != err { return nil, fmt.Errorf("Failed to export APIGateway template resources") } } // If there's a Site defined, include the resources the provision it if nil != ctx.s3SiteContext.s3Site { ctx.s3SiteContext.s3Site.export(ctx.s3Bucket, ctx.s3LambdaZipKey, ctx.s3SiteContext.s3SiteLambdaZipKey, apiGatewayTemplate.Outputs, ctx.lambdaIAMRoleNameMap, ctx.cfTemplate, ctx.logger) } // Save the output ctx.cfTemplate.Outputs[OutputSpartaHomeKey] = &gocf.Output{ Description: "Sparta Home", Value: gocf.String("http://gosparta.io"), } ctx.cfTemplate.Outputs[OutputSpartaVersionKey] = &gocf.Output{ Description: "Sparta Version", Value: gocf.String(SpartaVersion), } // Next pass - exchange outputs between dependencies. Lambda functions for _, eachResource := range ctx.cfTemplate.Resources { // Only apply this to lambda functions if eachResource.Properties.ResourceType() == "AWS::Lambda::Function" { // Update the metdata with a reference to the output of each // depended on item... for _, eachDependsKey := range eachResource.DependsOn { dependencyOutputs, _ := outputsForResource(ctx.cfTemplate, eachDependsKey, ctx.logger) if nil != dependencyOutputs && len(dependencyOutputs) != 0 { ctx.logger.WithFields(logrus.Fields{ "Resource": eachDependsKey, "DependsOn": eachResource.DependsOn, "Outputs": dependencyOutputs, }).Debug("Resource metadata") safeMetadataInsert(eachResource, eachDependsKey, dependencyOutputs) } } // Also include standard AWS outputs at a resource level if a lambda // needs to self-discover other resources safeMetadataInsert(eachResource, TagStackRegion, gocf.Ref("AWS::Region")) safeMetadataInsert(eachResource, TagStackID, gocf.Ref("AWS::StackId")) safeMetadataInsert(eachResource, TagStackName, gocf.Ref("AWS::StackName")) } } // Generate a complete CloudFormation template cfTemplate, err := json.Marshal(ctx.cfTemplate) if err != nil { ctx.logger.Error("Failed to Marshal CloudFormation template: ", err.Error()) return nil, err } // Upload the actual CloudFormation template to S3 to increase the template // size limit contentBody := string(cfTemplate) sanitizedServiceName := sanitizedName(ctx.serviceName) hash := sha1.New() hash.Write([]byte(contentBody)) s3keyName := fmt.Sprintf("%s-%s-cf.json", sanitizedServiceName, hex.EncodeToString(hash.Sum(nil))) uploadInput := &s3manager.UploadInput{ Bucket: &ctx.s3Bucket, Key: &s3keyName, ContentType: aws.String("application/json"), Body: strings.NewReader(contentBody), } formatted, err := json.MarshalIndent(contentBody, "", " ") if nil != err { return nil, err } ctx.logger.WithFields(logrus.Fields{ "Body": string(formatted), }).Debug("CloudFormation template body") if nil != ctx.templateWriter { io.WriteString(ctx.templateWriter, string(formatted)) } if ctx.noop { ctx.logger.WithFields(logrus.Fields{ "Bucket": ctx.s3Bucket, "Key": s3keyName, }).Info("Bypassing template upload & creation due to -n/-noop command line argument") } else { ctx.logger.Info("Uploading CloudFormation template") uploader := s3manager.NewUploader(ctx.awsSession) templateUploadResult, err := uploader.Upload(uploadInput) if nil != err { return nil, err } // Cleanup if there's a problem ctx.registerRollback(createS3RollbackFunc(ctx.awsSession, ctx.s3Bucket, s3keyName, ctx.noop)) // Be transparent ctx.logger.WithFields(logrus.Fields{ "URL": templateUploadResult.Location, }).Info("Template uploaded") stack, err := convergeStackState(templateUploadResult.Location, ctx) if nil != err { return nil, err } ctx.logger.WithFields(logrus.Fields{ "StackName": *stack.StackName, "StackId": *stack.StackId, "CreationTime": *stack.CreationTime, }).Info("Stack provisioned") } return nil, nil } }
// Marshal this object into 1 or more CloudFormation resource definitions that are accumulated // in the resources map func (info *LambdaAWSInfo) export(serviceName string, S3Bucket string, S3Key string, roleNameMap map[string]*gocf.StringExpr, template *gocf.Template, logger *logrus.Logger) error { // If we have RoleName, then get the ARN, otherwise get the Ref var dependsOn []string if nil != info.DependsOn { dependsOn = append(dependsOn, info.DependsOn...) } iamRoleArnName := info.RoleName // If there is no user supplied role, that means that the associated // IAMRoleDefinition name has been created and this resource needs to // depend on that being created. if iamRoleArnName == "" && info.RoleDefinition != nil { iamRoleArnName = info.RoleDefinition.logicalName() dependsOn = append(dependsOn, info.RoleDefinition.logicalName()) } lambdaDescription := info.Options.Description if "" == lambdaDescription { lambdaDescription = fmt.Sprintf("%s: %s", serviceName, info.lambdaFnName) } // Create the primary resource lambdaResource := gocf.LambdaFunction{ Code: &gocf.LambdaFunctionCode{ S3Bucket: gocf.String(S3Bucket), S3Key: gocf.String(S3Key), }, Description: gocf.String(lambdaDescription), Handler: gocf.String(fmt.Sprintf("index.%s", info.jsHandlerName())), MemorySize: gocf.Integer(info.Options.MemorySize), Role: roleNameMap[iamRoleArnName], Runtime: gocf.String("nodejs"), Timeout: gocf.Integer(info.Options.Timeout), } cfResource := template.AddResource(info.logicalName(), lambdaResource) cfResource.DependsOn = append(cfResource.DependsOn, dependsOn...) safeMetadataInsert(cfResource, "golangFunc", info.lambdaFnName) // Create the lambda Ref in case we need a permission or event mapping functionAttr := gocf.GetAtt(info.logicalName(), "Arn") // Permissions for _, eachPermission := range info.Permissions { _, err := eachPermission.export(serviceName, info.logicalName(), template, S3Bucket, S3Key, logger) if nil != err { return err } } // Event Source Mappings hash := sha1.New() for _, eachEventSourceMapping := range info.EventSourceMappings { eventSourceMappingResource := gocf.LambdaEventSourceMapping{ EventSourceArn: gocf.String(eachEventSourceMapping.EventSourceArn), FunctionName: functionAttr, StartingPosition: gocf.String(eachEventSourceMapping.StartingPosition), BatchSize: gocf.Integer(eachEventSourceMapping.BatchSize), Enabled: gocf.Bool(!eachEventSourceMapping.Disabled), } hash.Write([]byte(eachEventSourceMapping.EventSourceArn)) binary.Write(hash, binary.LittleEndian, eachEventSourceMapping.BatchSize) hash.Write([]byte(eachEventSourceMapping.StartingPosition)) resourceName := fmt.Sprintf("LambdaES%s", hex.EncodeToString(hash.Sum(nil))) template.AddResource(resourceName, eventSourceMappingResource) } // Decorator if nil != info.Decorator { logger.Debug("Decorator found for Lambda: ", info.lambdaFnName) // Create an empty template so that we can track whether things // are overwritten decoratorProxyTemplate := gocf.NewTemplate() err := info.Decorator(info.logicalName(), lambdaResource, decoratorProxyTemplate, logger) if nil != err { return err } // Append the custom resources err = safeMergeTemplates(decoratorProxyTemplate, template, logger) if nil != err { return fmt.Errorf("Lambda (%s) decorator created conflicting resources", info.lambdaFnName) } } return nil }