func flattenLambdaVpcConfigResponse(s *lambda.VpcConfigResponse) []map[string]interface{} { settings := make(map[string]interface{}, 0) if s == nil { return nil } settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds)) settings["security_group_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SecurityGroupIds)) settings["vpc_id"] = *s.VpcId return []map[string]interface{}{settings} }
func dataSourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cfconn name := d.Get("name").(string) input := cloudformation.DescribeStacksInput{ StackName: aws.String(name), } out, err := conn.DescribeStacks(&input) if err != nil { return fmt.Errorf("Failed describing CloudFormation stack (%s): %s", name, err) } if l := len(out.Stacks); l != 1 { return fmt.Errorf("Expected 1 CloudFormation stack (%s), found %d", name, l) } stack := out.Stacks[0] d.SetId(*stack.StackId) d.Set("description", stack.Description) d.Set("disable_rollback", stack.DisableRollback) d.Set("timeout_in_minutes", stack.TimeoutInMinutes) if len(stack.NotificationARNs) > 0 { d.Set("notification_arns", schema.NewSet(schema.HashString, flattenStringList(stack.NotificationARNs))) } d.Set("parameters", flattenAllCloudFormationParameters(stack.Parameters)) d.Set("tags", flattenCloudFormationTags(stack.Tags)) d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs)) if len(stack.Capabilities) > 0 { d.Set("capabilities", schema.NewSet(schema.HashString, flattenStringList(stack.Capabilities))) } tInput := cloudformation.GetTemplateInput{ StackName: aws.String(name), } tOut, err := conn.GetTemplate(&tInput) if err != nil { return err } template, err := normalizeJsonString(*tOut.TemplateBody) if err != nil { return errwrap.Wrapf("template body contains an invalid JSON: {{err}}", err) } d.Set("template_body", template) return nil }
func resourceDigitalOceanVolumeRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*godo.Client) volume, resp, err := client.Storage.GetVolume(d.Id()) if err != nil { // If the volume is somehow already destroyed, mark as // successfully gone if resp.StatusCode == 404 { d.SetId("") return nil } return fmt.Errorf("Error retrieving volume: %s", err) } d.Set("id", volume.ID) dids := make([]interface{}, 0, len(volume.DropletIDs)) for _, did := range volume.DropletIDs { dids = append(dids, did) } d.Set("droplet_ids", schema.NewSet( func(dropletID interface{}) int { return dropletID.(int) }, dids, )) return nil }
func TestBuildTriggerConfigs(t *testing.T) { input := []interface{}{ map[string]interface{}{ "trigger_events": schema.NewSet(schema.HashString, []interface{}{ "DeploymentFailure", }), "trigger_name": "foo-trigger", "trigger_target_arn": "arn:aws:sns:us-west-2:123456789012:foo-topic", }, } expected := []*codedeploy.TriggerConfig{ &codedeploy.TriggerConfig{ TriggerEvents: []*string{ aws.String("DeploymentFailure"), }, TriggerName: aws.String("foo-trigger"), TriggerTargetArn: aws.String("arn:aws:sns:us-west-2:123456789012:foo-topic"), }, } actual := buildTriggerConfigs(input) if !reflect.DeepEqual(actual, expected) { t.Fatalf("buildTriggerConfigs output is not correct.\nGot:\n%#v\nExpected:\n%#v\n", actual, expected) } }
func resourceAwsDirectoryServiceDirectoryRead(d *schema.ResourceData, meta interface{}) error { dsconn := meta.(*AWSClient).dsconn input := directoryservice.DescribeDirectoriesInput{ DirectoryIds: []*string{aws.String(d.Id())}, } out, err := dsconn.DescribeDirectories(&input) if err != nil { return err } dir := out.DirectoryDescriptions[0] log.Printf("[DEBUG] Received DS directory: %s", *dir) d.Set("access_url", *dir.AccessUrl) d.Set("alias", *dir.Alias) if dir.Description != nil { d.Set("description", *dir.Description) } d.Set("dns_ip_addresses", schema.NewSet(schema.HashString, flattenStringList(dir.DnsIpAddrs))) d.Set("name", *dir.Name) if dir.ShortName != nil { d.Set("short_name", *dir.ShortName) } d.Set("size", *dir.Size) d.Set("type", *dir.Type) d.Set("vpc_settings", flattenDSVpcSettings(dir.VpcSettings)) d.Set("enable_sso", *dir.SsoEnabled) return nil }
func flattenCustomErrorResponses(ers *cloudfront.CustomErrorResponses) *schema.Set { s := []interface{}{} for _, v := range ers.Items { s = append(s, flattenCustomErrorResponse(v)) } return schema.NewSet(customErrorResponseHash, s) }
func flattenLoggingConfig(lc *cloudfront.LoggingConfig) *schema.Set { m := make(map[string]interface{}) m["prefix"] = *lc.Prefix m["bucket"] = *lc.Bucket m["include_cookies"] = *lc.IncludeCookies return schema.NewSet(loggingConfigHash, []interface{}{m}) }
func flattenOrigins(ors *cloudfront.Origins) *schema.Set { s := []interface{}{} for _, v := range ors.Items { s = append(s, flattenOrigin(v)) } return schema.NewSet(originHash, s) }
func flattenCustomHeaders(chs *cloudfront.CustomHeaders) *schema.Set { s := []interface{}{} for _, v := range chs.Items { s = append(s, flattenOriginCustomHeader(v)) } return schema.NewSet(originCustomHeaderHash, s) }
func flattenCacheBehavior(cb *cloudfront.CacheBehavior) map[string]interface{} { m := make(map[string]interface{}) m["compress"] = *cb.Compress m["viewer_protocol_policy"] = *cb.ViewerProtocolPolicy m["target_origin_id"] = *cb.TargetOriginId m["forwarded_values"] = schema.NewSet(forwardedValuesHash, []interface{}{flattenForwardedValues(cb.ForwardedValues)}) m["min_ttl"] = int(*cb.MinTTL) if len(cb.TrustedSigners.Items) > 0 { m["trusted_signers"] = flattenTrustedSigners(cb.TrustedSigners) } if cb.MaxTTL != nil { m["max_ttl"] = int(*cb.MaxTTL) } if cb.SmoothStreaming != nil { m["smooth_streaming"] = *cb.SmoothStreaming } if cb.DefaultTTL != nil { m["default_ttl"] = int(*cb.DefaultTTL) } if cb.AllowedMethods != nil { m["allowed_methods"] = flattenAllowedMethods(cb.AllowedMethods) } if cb.AllowedMethods.CachedMethods != nil { m["cached_methods"] = flattenCachedMethods(cb.AllowedMethods.CachedMethods) } if cb.PathPattern != nil { m["path_pattern"] = *cb.PathPattern } return m }
func TestBuildAlarmConfig(t *testing.T) { input := []interface{}{ map[string]interface{}{ "alarms": schema.NewSet(schema.HashString, []interface{}{ "foo-alarm", }), "enabled": true, "ignore_poll_alarm_failure": false, }, } expected := &codedeploy.AlarmConfiguration{ Alarms: []*codedeploy.Alarm{ { Name: aws.String("foo-alarm"), }, }, Enabled: aws.Bool(true), IgnorePollAlarmFailure: aws.Bool(false), } actual := buildAlarmConfig(input) if !reflect.DeepEqual(actual, expected) { t.Fatalf("buildAlarmConfig output is not correct.\nGot:\n%#v\nExpected:\n%#v\n", actual, expected) } }
func forwardedValuesConf() map[string]interface{} { return map[string]interface{}{ "query_string": true, "cookies": schema.NewSet(cookiePreferenceHash, []interface{}{cookiePreferenceConf()}), "headers": headersConf(), } }
func flattenCacheBehaviors(cbs *cloudfront.CacheBehaviors) *schema.Set { s := []interface{}{} for _, v := range cbs.Items { s = append(s, flattenCacheBehavior(v)) } return schema.NewSet(cacheBehaviorHash, s) }
func resourceAwsEfsMountTargetRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).efsconn resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ MountTargetId: aws.String(d.Id()), }) if err != nil { return err } if len(resp.MountTargets) < 1 { return fmt.Errorf("EFS mount target %q not found", d.Id()) } mt := resp.MountTargets[0] log.Printf("[DEBUG] Found EFS mount target: %#v", mt) d.SetId(*mt.MountTargetId) d.Set("file_system_id", *mt.FileSystemId) d.Set("ip_address", *mt.IpAddress) d.Set("subnet_id", *mt.SubnetId) d.Set("network_interface_id", *mt.NetworkInterfaceId) sgResp, err := conn.DescribeMountTargetSecurityGroups(&efs.DescribeMountTargetSecurityGroupsInput{ MountTargetId: aws.String(d.Id()), }) if err != nil { return err } d.Set("security_groups", schema.NewSet(schema.HashString, flattenStringList(sgResp.SecurityGroups))) return nil }
//Convert slice of strings to schema.Set func makeStringSet(list *[]string) *schema.Set { ilist := make([]interface{}, len(*list)) for i, v := range *list { ilist[i] = v } return schema.NewSet(schema.HashString, ilist) }
func flattenDefaultCacheBehavior(dcb *cloudfront.DefaultCacheBehavior) *schema.Set { m := make(map[string]interface{}) var cb cloudfront.CacheBehavior simpleCopyStruct(dcb, &cb) m = flattenCacheBehavior(&cb) return schema.NewSet(defaultCacheBehaviorHash, []interface{}{m}) }
func flattenDSConnectSettings( customerDnsIps []*string, s *directoryservice.DirectoryConnectSettingsDescription) []map[string]interface{} { if s == nil { return nil } settings := make(map[string]interface{}, 0) settings["customer_dns_ips"] = schema.NewSet(schema.HashString, flattenStringList(customerDnsIps)) settings["connect_ips"] = schema.NewSet(schema.HashString, flattenStringList(s.ConnectIps)) settings["customer_username"] = *s.CustomerUserName settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds)) settings["vpc_id"] = *s.VpcId return []map[string]interface{}{settings} }
// resourceAzureSqlDatabaseServerFirewallRuleUpdate does all the necessary API // calls to update the state of the SQL Database Server Firewall Rule on Azure. func resourceAzureSqlDatabaseServerFirewallRuleUpdate(d *schema.ResourceData, meta interface{}) error { sqlClient := meta.(*Client).sqlClient var found bool name := d.Get("name").(string) updateParams := sql.FirewallRuleUpdateParams{ Name: name, StartIPAddress: d.Get("start_ip").(string), EndIPAddress: d.Get("end_ip").(string), } // for each of the Database Servers our rules concerns; issue the update: remaining := schema.NewSet(schema.HashString, nil) for _, srv := range d.Get("database_server_names").(*schema.Set).List() { serverName := srv.(string) log.Printf("[INFO] Issuing Azure Database Server Firewall Rule list for Database Server %q: %s.", name, serverName) rules, err := sqlClient.ListFirewallRules(serverName) if err != nil { if strings.Contains(err.Error(), "does not exist") { // it means that the database server this rule belonged to has // been deleted in the meantime. continue } else { return fmt.Errorf("Error getting Azure Firewall Rules for Database Server %q: %s", serverName, err) } } // look for our rule: for _, rule := range rules.FirewallRules { if rule.Name == name { // take note of the fact that this Database Server still has // this rule: found = true remaining.Add(serverName) // go ahead and update the rule: log.Printf("[INFO] Issuing update of Azure Database Server Firewall Rule %q in Server %q.", name, serverName) if err := sqlClient.UpdateFirewallRule(serverName, name, updateParams); err != nil { return fmt.Errorf("Error updating Azure Database Server Firewall Rule %q for Server %q: %s", name, serverName, err) } break } } } // check to see if the rule is still exists on any of the servers: if !found { d.SetId("") return nil } // else; update the list with the remaining Servers: d.Set("database_server_names", remaining) return nil }
func originWithS3Conf() map[string]interface{} { return map[string]interface{}{ "origin_id": "S3Origin", "domain_name": "s3.example.com", "origin_path": "/", "s3_origin_config": schema.NewSet(s3OriginConfigHash, []interface{}{s3OriginConf()}), "custom_header": originCustomHeadersConf(), } }
func makeTestCloudStackEgressFirewallRuleHash(ports []interface{}) string { return strconv.Itoa(resourceCloudStackEgressFirewallRuleHash(map[string]interface{}{ "source_cidr": CLOUDSTACK_NETWORK_1_IPADDRESS + "/32", "protocol": "tcp", "ports": schema.NewSet(schema.HashString, ports), "icmp_type": 0, "icmp_code": 0, })) }
func flattenOrigin(or *cloudfront.Origin) map[string]interface{} { m := make(map[string]interface{}) m["origin_id"] = *or.Id m["domain_name"] = *or.DomainName if or.CustomHeaders != nil { m["custom_header"] = flattenCustomHeaders(or.CustomHeaders) } if or.CustomOriginConfig != nil { m["custom_origin_config"] = schema.NewSet(customOriginConfigHash, []interface{}{flattenCustomOriginConfig(or.CustomOriginConfig)}) } if or.OriginPath != nil { m["origin_path"] = *or.OriginPath } if or.S3OriginConfig != nil { m["s3_origin_config"] = schema.NewSet(s3OriginConfigHash, []interface{}{flattenS3OriginConfig(or.S3OriginConfig)}) } return m }
func flattenDSVpcSettings( s *directoryservice.DirectoryVpcSettingsDescription) []map[string]interface{} { settings := make(map[string]interface{}, 0) settings["subnet_ids"] = schema.NewSet(schema.HashString, flattenStringList(s.SubnetIds)) settings["vpc_id"] = *s.VpcId return []map[string]interface{}{settings} }
func resourceAwsEfsMountTargetRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).efsconn resp, err := conn.DescribeMountTargets(&efs.DescribeMountTargetsInput{ MountTargetId: aws.String(d.Id()), }) if err != nil { if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "MountTargetNotFound" { // The EFS mount target could not be found, // which would indicate that it might be // already deleted. log.Printf("[WARN] EFS mount target %q could not be found.", d.Id()) d.SetId("") return nil } return fmt.Errorf("Error reading EFS mount target %s: %s", d.Id(), err) } if hasEmptyMountTargets(resp) { return fmt.Errorf("EFS mount target %q could not be found.", d.Id()) } mt := resp.MountTargets[0] log.Printf("[DEBUG] Found EFS mount target: %#v", mt) d.SetId(*mt.MountTargetId) d.Set("file_system_id", mt.FileSystemId) d.Set("ip_address", mt.IpAddress) d.Set("subnet_id", mt.SubnetId) d.Set("network_interface_id", mt.NetworkInterfaceId) sgResp, err := conn.DescribeMountTargetSecurityGroups(&efs.DescribeMountTargetSecurityGroupsInput{ MountTargetId: aws.String(d.Id()), }) if err != nil { return err } err = d.Set("security_groups", schema.NewSet(schema.HashString, flattenStringList(sgResp.SecurityGroups))) if err != nil { return err } // DNS name per http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html az, err := getAzFromSubnetId(*mt.SubnetId, meta.(*AWSClient).ec2conn) if err != nil { return fmt.Errorf("Failed getting Availability Zone from subnet ID (%s): %s", *mt.SubnetId, err) } region := meta.(*AWSClient).region err = d.Set("dns_name", resourceAwsEfsMountTargetDnsName(az, *mt.FileSystemId, region)) if err != nil { return err } return nil }
func flattenGzips(gzipsList []*gofastly.Gzip) []map[string]interface{} { var gl []map[string]interface{} for _, g := range gzipsList { // Convert Gzip to a map for saving to state. ng := map[string]interface{}{ "name": g.Name, "cache_condition": g.CacheCondition, } // Fastly API provides default values for Extensions or ContentTypes, in the // event that you do not specify them. To work around this, if they are // omitted we'll use an empty space as a sentinel value to indicate not to // include them, and filter on that if g.Extensions != "" && g.Extensions != " " { e := strings.Split(g.Extensions, " ") var et []interface{} for _, ev := range e { et = append(et, ev) } ng["extensions"] = schema.NewSet(schema.HashString, et) } if g.ContentTypes != "" && g.ContentTypes != " " { c := strings.Split(g.ContentTypes, " ") var ct []interface{} for _, cv := range c { ct = append(ct, cv) } ng["content_types"] = schema.NewSet(schema.HashString, ct) } // prune any empty values that come from the default string value in structs for k, v := range ng { if v == "" { delete(ng, k) } } gl = append(gl, ng) } return gl }
func TestCloudFrontStructure_falttenViewerCertificate_acm_certificate_arn(t *testing.T) { in := viewerCertificateConfSetACM() vc := expandViewerCertificate(in) out := flattenViewerCertificate(vc) diff := schema.NewSet(viewerCertificateHash, []interface{}{in}).Difference(out) if len(diff.List()) > 0 { t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) } }
func TestCloudFrontStructure_flattenRestrictions(t *testing.T) { in := geoRestrictionsConf() r := expandRestrictions(in) out := flattenRestrictions(r) diff := schema.NewSet(restrictionsHash, []interface{}{in}).Difference(out) if len(diff.List()) > 0 { t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) } }
func TestCloudFrontStructure_flattenLoggingConfig(t *testing.T) { in := loggingConfigConf() lc := expandLoggingConfig(in) out := flattenLoggingConfig(lc) diff := schema.NewSet(loggingConfigHash, []interface{}{in}).Difference(out) if len(diff.List()) > 0 { t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) } }
func TestCloudFrontStructure_flattenDefaultCacheBehavior(t *testing.T) { in := defaultCacheBehaviorConf() dcb := expandDefaultCacheBehavior(in) out := flattenDefaultCacheBehavior(dcb) diff := schema.NewSet(defaultCacheBehaviorHash, []interface{}{in}).Difference(out) if len(diff.List()) > 0 { t.Fatalf("Expected out to be %v, got %v, diff: %v", in, out, diff) } }
func flattenForwardedValues(fv *cloudfront.ForwardedValues) map[string]interface{} { m := make(map[string]interface{}) m["query_string"] = *fv.QueryString if fv.Cookies != nil { m["cookies"] = schema.NewSet(cookiePreferenceHash, []interface{}{flattenCookiePreference(fv.Cookies)}) } if fv.Headers != nil { m["headers"] = flattenHeaders(fv.Headers) } return m }
// triggerConfigsToMap converts a list of []*codedeploy.TriggerConfig into a []map[string]interface{} func triggerConfigsToMap(list []*codedeploy.TriggerConfig) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(list)) for _, tc := range list { item := make(map[string]interface{}) item["trigger_events"] = schema.NewSet(schema.HashString, flattenStringList(tc.TriggerEvents)) item["trigger_name"] = *tc.TriggerName item["trigger_target_arn"] = *tc.TriggerTargetArn result = append(result, item) } return result }