func Test_expandNetworkACLEntry(t *testing.T) { input := []interface{}{ map[string]interface{}{ "protocol": "tcp", "from_port": 22, "to_port": 22, "cidr_block": "0.0.0.0/0", "action": "deny", "rule_no": 1, }, map[string]interface{}{ "protocol": "tcp", "from_port": 443, "to_port": 443, "cidr_block": "0.0.0.0/0", "action": "deny", "rule_no": 2, }, } expanded, _ := expandNetworkAclEntries(input, "egress") expected := []ec2.NetworkACLEntry{ ec2.NetworkACLEntry{ Protocol: aws.String("6"), PortRange: &ec2.PortRange{ From: aws.Integer(22), To: aws.Integer(22), }, RuleAction: aws.String("deny"), RuleNumber: aws.Integer(1), CIDRBlock: aws.String("0.0.0.0/0"), Egress: aws.Boolean(true), }, ec2.NetworkACLEntry{ Protocol: aws.String("6"), PortRange: &ec2.PortRange{ From: aws.Integer(443), To: aws.Integer(443), }, RuleAction: aws.String("deny"), RuleNumber: aws.Integer(2), CIDRBlock: aws.String("0.0.0.0/0"), Egress: aws.Boolean(true), }, } if !reflect.DeepEqual(expanded, expected) { t.Fatalf( "Got:\n\n%#v\n\nExpected:\n\n%#v\n", expanded, expected) } }
func resourceAwsInstanceUpdate(d *schema.ResourceData, meta interface{}) error { ec2conn := meta.(*AWSClient).ec2conn // SourceDestCheck can only be set on VPC instances if d.Get("subnet_id").(string) != "" { log.Printf("[INFO] Modifying instance %s", d.Id()) err := ec2conn.ModifyInstanceAttribute(&ec2.ModifyInstanceAttributeRequest{ InstanceID: aws.String(d.Id()), SourceDestCheck: &ec2.AttributeBooleanValue{ Value: aws.Boolean(d.Get("source_dest_check").(bool)), }, }) if err != nil { return err } } // TODO(mitchellh): wait for the attributes we modified to // persist the change... if err := setTags(ec2conn, d); err != nil { return err } else { d.SetPartial("tags") } return nil }
func resourceAwsNetworkInterfaceDetach(oa *schema.Set, meta interface{}, eniId string) error { // if there was an old attachment, remove it if oa != nil && len(oa.List()) > 0 { old_attachment := oa.List()[0].(map[string]interface{}) detach_request := &ec2.DetachNetworkInterfaceRequest{ AttachmentID: aws.String(old_attachment["attachment_id"].(string)), Force: aws.Boolean(true), } ec2conn := meta.(*AWSClient).ec2conn detach_err := ec2conn.DetachNetworkInterface(detach_request) if detach_err != nil { return fmt.Errorf("Error detaching ENI: %s", detach_err) } log.Printf("[DEBUG] Waiting for ENI (%s) to become dettached", eniId) stateConf := &resource.StateChangeConf{ Pending: []string{"true"}, Target: "false", Refresh: networkInterfaceAttachmentRefreshFunc(ec2conn, eniId), Timeout: 10 * time.Minute, } if _, err := stateConf.WaitForState(); err != nil { return fmt.Errorf( "Error waiting for ENI (%s) to become dettached: %s", eniId, err) } } return nil }
func resourceAwsSubnetUpdate(d *schema.ResourceData, meta interface{}) error { ec2conn := meta.(*AWSClient).ec2conn d.Partial(true) if err := setTags(ec2conn, d); err != nil { return err } else { d.SetPartial("tags") } if d.HasChange("map_public_ip_on_launch") { modifyOpts := &ec2.ModifySubnetAttributeRequest{ SubnetID: aws.String(d.Id()), MapPublicIPOnLaunch: &ec2.AttributeBooleanValue{ aws.Boolean(d.Get("map_public_ip_on_launch").(bool)), }, } log.Printf("[DEBUG] Subnet modify attributes: %#v", modifyOpts) err := ec2conn.ModifySubnetAttribute(modifyOpts) if err != nil { return err } else { d.SetPartial("map_public_ip_on_launch") } } d.Partial(false) return resourceAwsSubnetRead(d, meta) }
func expandNetworkAclEntries(configured []interface{}, entryType string) ([]ec2.NetworkACLEntry, error) { entries := make([]ec2.NetworkACLEntry, 0, len(configured)) for _, eRaw := range configured { data := eRaw.(map[string]interface{}) protocol := data["protocol"].(string) _, ok := protocolIntegers()[protocol] if !ok { return nil, fmt.Errorf("Invalid Protocol %s for rule %#v", protocol, data) } p := extractProtocolInteger(data["protocol"].(string)) e := ec2.NetworkACLEntry{ Protocol: aws.String(strconv.Itoa(p)), PortRange: &ec2.PortRange{ From: aws.Integer(data["from_port"].(int)), To: aws.Integer(data["to_port"].(int)), }, Egress: aws.Boolean((entryType == "egress")), RuleAction: aws.String(data["action"].(string)), RuleNumber: aws.Integer(data["rule_no"].(int)), CIDRBlock: aws.String(data["cidr_block"].(string)), } entries = append(entries, e) } return entries, nil }
// tagsFromMap returns the tags for the given map of data. func autoscalingTagsFromMap(m map[string]interface{}, resourceID string) []autoscaling.Tag { result := make([]autoscaling.Tag, 0, len(m)) for k, v := range m { attr := v.(map[string]interface{}) result = append(result, autoscaling.Tag{ Key: aws.String(k), Value: aws.String(attr["value"].(string)), PropagateAtLaunch: aws.Boolean(attr["propagate_at_launch"].(bool)), ResourceID: aws.String(resourceID), ResourceType: aws.String("auto-scaling-group"), }) } return result }
func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) error { autoscalingconn := meta.(*AWSClient).autoscalingconn // Read the autoscaling group first. If it doesn't exist, we're done. // We need the group in order to check if there are instances attached. // If so, we need to remove those first. g, err := getAwsAutoscalingGroup(d, meta) if err != nil { return err } if g == nil { return nil } if len(g.Instances) > 0 || *g.DesiredCapacity > 0 { if err := resourceAwsAutoscalingGroupDrain(d, meta); err != nil { return err } } log.Printf("[DEBUG] AutoScaling Group destroy: %v", d.Id()) deleteopts := autoscaling.DeleteAutoScalingGroupType{AutoScalingGroupName: aws.String(d.Id())} // You can force an autoscaling group to delete // even if it's in the process of scaling a resource. // Normally, you would set the min-size and max-size to 0,0 // and then delete the group. This bypasses that and leaves // resources potentially dangling. if d.Get("force_delete").(bool) { deleteopts.ForceDelete = aws.Boolean(true) } if err := autoscalingconn.DeleteAutoScalingGroup(&deleteopts); err != nil { autoscalingerr, ok := err.(aws.APIError) if ok && autoscalingerr.Code == "InvalidGroup.NotFound" { return nil } return err } return resource.Retry(5*time.Minute, func() error { if g, _ = getAwsAutoscalingGroup(d, meta); g != nil { return fmt.Errorf("Auto Scaling Group still exists") } return nil }) }
func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn log.Printf("[DEBUG] DB Instance destroy: %v", d.Id()) opts := rds.DeleteDBInstanceMessage{DBInstanceIdentifier: aws.String(d.Id())} finalSnapshot := d.Get("final_snapshot_identifier").(string) if finalSnapshot == "" { opts.SkipFinalSnapshot = aws.Boolean(true) } else { opts.FinalDBSnapshotIdentifier = aws.String(finalSnapshot) } log.Printf("[DEBUG] DB Instance destroy configuration: %v", opts) if _, err := conn.DeleteDBInstance(&opts); err != nil { return err } log.Println( "[INFO] Waiting for DB Instance to be destroyed") stateConf := &resource.StateChangeConf{ Pending: []string{"creating", "backing-up", "modifying", "deleting", "available"}, Target: "", Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), Timeout: 40 * time.Minute, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, // Wait 30 secs before starting } if _, err := stateConf.WaitForState(); err != nil { return err } return nil }
func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface{}) error { autoscalingconn := meta.(*AWSClient).autoscalingconn ec2conn := meta.(*AWSClient).ec2conn createLaunchConfigurationOpts := autoscaling.CreateLaunchConfigurationType{ LaunchConfigurationName: aws.String(d.Get("name").(string)), ImageID: aws.String(d.Get("image_id").(string)), InstanceType: aws.String(d.Get("instance_type").(string)), EBSOptimized: aws.Boolean(d.Get("ebs_optimized").(bool)), } if v, ok := d.GetOk("user_data"); ok { userData := base64.StdEncoding.EncodeToString([]byte(v.(string))) createLaunchConfigurationOpts.UserData = aws.String(userData) } if v, ok := d.GetOk("iam_instance_profile"); ok { createLaunchConfigurationOpts.IAMInstanceProfile = aws.String(v.(string)) } if v, ok := d.GetOk("placement_tenancy"); ok { createLaunchConfigurationOpts.PlacementTenancy = aws.String(v.(string)) } if v := d.Get("associate_public_ip_address"); v != nil { createLaunchConfigurationOpts.AssociatePublicIPAddress = aws.Boolean(v.(bool)) } else { createLaunchConfigurationOpts.AssociatePublicIPAddress = aws.Boolean(false) } if v, ok := d.GetOk("key_name"); ok { createLaunchConfigurationOpts.KeyName = aws.String(v.(string)) } if v, ok := d.GetOk("spot_price"); ok { createLaunchConfigurationOpts.SpotPrice = aws.String(v.(string)) } if v, ok := d.GetOk("security_groups"); ok { createLaunchConfigurationOpts.SecurityGroups = expandStringList( v.(*schema.Set).List(), ) } var blockDevices []autoscaling.BlockDeviceMapping if v, ok := d.GetOk("ebs_block_device"); ok { vL := v.(*schema.Set).List() for _, v := range vL { bd := v.(map[string]interface{}) ebs := &autoscaling.EBS{ DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)), } if v, ok := bd["snapshot_id"].(string); ok && v != "" { ebs.SnapshotID = aws.String(v) } if v, ok := bd["volume_size"].(int); ok && v != 0 { ebs.VolumeSize = aws.Integer(v) } if v, ok := bd["volume_type"].(string); ok && v != "" { ebs.VolumeType = aws.String(v) } if v, ok := bd["iops"].(int); ok && v > 0 { ebs.IOPS = aws.Integer(v) } blockDevices = append(blockDevices, autoscaling.BlockDeviceMapping{ DeviceName: aws.String(bd["device_name"].(string)), EBS: ebs, }) } } if v, ok := d.GetOk("ephemeral_block_device"); ok { vL := v.(*schema.Set).List() for _, v := range vL { bd := v.(map[string]interface{}) blockDevices = append(blockDevices, autoscaling.BlockDeviceMapping{ DeviceName: aws.String(bd["device_name"].(string)), VirtualName: aws.String(bd["virtual_name"].(string)), }) } } if v, ok := d.GetOk("root_block_device"); ok { vL := v.(*schema.Set).List() if len(vL) > 1 { return fmt.Errorf("Cannot specify more than one root_block_device.") } for _, v := range vL { bd := v.(map[string]interface{}) ebs := &autoscaling.EBS{ DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)), } if v, ok := bd["volume_size"].(int); ok && v != 0 { ebs.VolumeSize = aws.Integer(v) } if v, ok := bd["volume_type"].(string); ok && v != "" { ebs.VolumeType = aws.String(v) } if v, ok := bd["iops"].(int); ok && v > 0 { ebs.IOPS = aws.Integer(v) } if dn, err := fetchRootDeviceName(d.Get("image_id").(string), ec2conn); err == nil { blockDevices = append(blockDevices, autoscaling.BlockDeviceMapping{ DeviceName: dn, EBS: ebs, }) } else { return err } } } if len(blockDevices) > 0 { createLaunchConfigurationOpts.BlockDeviceMappings = blockDevices } if v, ok := d.GetOk("name"); ok { createLaunchConfigurationOpts.LaunchConfigurationName = aws.String(v.(string)) d.SetId(d.Get("name").(string)) } else { hash := sha1.Sum([]byte(fmt.Sprintf("%#v", createLaunchConfigurationOpts))) config_name := fmt.Sprintf("terraform-%s", base64.URLEncoding.EncodeToString(hash[:])) log.Printf("[DEBUG] Computed Launch config name: %s", config_name) createLaunchConfigurationOpts.LaunchConfigurationName = aws.String(config_name) d.SetId(config_name) } log.Printf("[DEBUG] autoscaling create launch configuration: %#v", createLaunchConfigurationOpts) err := autoscalingconn.CreateLaunchConfiguration(&createLaunchConfigurationOpts) if err != nil { return fmt.Errorf("Error creating launch configuration: %s", err) } log.Printf("[INFO] launch configuration ID: %s", d.Id()) // We put a Retry here since sometimes eventual consistency bites // us and we need to retry a few times to get the LC to load properly return resource.Retry(30*time.Second, func() error { return resourceAwsLaunchConfigurationRead(d, meta) }) }
func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn d.Partial(true) req := &rds.ModifyDBInstanceMessage{ ApplyImmediately: aws.Boolean(d.Get("apply_immediately").(bool)), DBInstanceIdentifier: aws.String(d.Id()), } d.SetPartial("apply_immediately") if d.HasChange("allocated_storage") { d.SetPartial("allocated_storage") req.AllocatedStorage = aws.Integer(d.Get("allocated_storage").(int)) } if d.HasChange("backup_retention_period") { d.SetPartial("backup_retention_period") req.BackupRetentionPeriod = aws.Integer(d.Get("backup_retention_period").(int)) } if d.HasChange("instance_class") { d.SetPartial("instance_class") req.DBInstanceClass = aws.String(d.Get("instance_class").(string)) } if d.HasChange("parameter_group_name") { d.SetPartial("parameter_group_name") req.DBParameterGroupName = aws.String(d.Get("parameter_group_name").(string)) } if d.HasChange("engine_version") { d.SetPartial("engine_version") req.EngineVersion = aws.String(d.Get("engine_version").(string)) } if d.HasChange("iops") { d.SetPartial("iops") req.IOPS = aws.Integer(d.Get("iops").(int)) } if d.HasChange("backup_window") { d.SetPartial("backup_window") req.PreferredBackupWindow = aws.String(d.Get("backup_window").(string)) } if d.HasChange("maintenance_window") { d.SetPartial("maintenance_window") req.PreferredMaintenanceWindow = aws.String(d.Get("maintenance_window").(string)) } if d.HasChange("password") { d.SetPartial("password") req.MasterUserPassword = aws.String(d.Get("password").(string)) } if d.HasChange("multi_az") { d.SetPartial("multi_az") req.MultiAZ = aws.Boolean(d.Get("multi_az").(bool)) } if d.HasChange("storage_type") { d.SetPartial("storage_type") req.StorageType = aws.String(d.Get("storage_type").(string)) } if d.HasChange("vpc_security_group_ids") { if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { var s []string for _, v := range attr.List() { s = append(s, v.(string)) } req.VPCSecurityGroupIDs = s } } if d.HasChange("vpc_security_group_ids") { if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { var s []string for _, v := range attr.List() { s = append(s, v.(string)) } req.DBSecurityGroups = s } } log.Printf("[DEBUG] DB Instance Modification request: %#v", req) _, err := conn.ModifyDBInstance(req) if err != nil { return fmt.Errorf("Error modifying DB Instance %s: %s", d.Id(), err) } if arn, err := buildRDSARN(d, meta); err == nil { if err := setTagsRDS(conn, d, arn); err != nil { return err } else { d.SetPartial("tags") } } d.Partial(false) return resourceAwsDbInstanceRead(d, meta) }
func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) opts := rds.CreateDBInstanceMessage{ AllocatedStorage: aws.Integer(d.Get("allocated_storage").(int)), DBInstanceClass: aws.String(d.Get("instance_class").(string)), DBInstanceIdentifier: aws.String(d.Get("identifier").(string)), DBName: aws.String(d.Get("name").(string)), MasterUsername: aws.String(d.Get("username").(string)), MasterUserPassword: aws.String(d.Get("password").(string)), Engine: aws.String(d.Get("engine").(string)), EngineVersion: aws.String(d.Get("engine_version").(string)), StorageEncrypted: aws.Boolean(d.Get("storage_encrypted").(bool)), Tags: tags, } if attr, ok := d.GetOk("storage_type"); ok { opts.StorageType = aws.String(attr.(string)) } attr := d.Get("backup_retention_period") opts.BackupRetentionPeriod = aws.Integer(attr.(int)) if attr, ok := d.GetOk("iops"); ok { opts.IOPS = aws.Integer(attr.(int)) } if attr, ok := d.GetOk("port"); ok { opts.Port = aws.Integer(attr.(int)) } if attr, ok := d.GetOk("multi_az"); ok { opts.MultiAZ = aws.Boolean(attr.(bool)) } if attr, ok := d.GetOk("availability_zone"); ok { opts.AvailabilityZone = aws.String(attr.(string)) } if attr, ok := d.GetOk("maintenance_window"); ok { opts.PreferredMaintenanceWindow = aws.String(attr.(string)) } if attr, ok := d.GetOk("backup_window"); ok { opts.PreferredBackupWindow = aws.String(attr.(string)) } if attr, ok := d.GetOk("publicly_accessible"); ok { opts.PubliclyAccessible = aws.Boolean(attr.(bool)) } if attr, ok := d.GetOk("db_subnet_group_name"); ok { opts.DBSubnetGroupName = aws.String(attr.(string)) } if attr, ok := d.GetOk("parameter_group_name"); ok { opts.DBParameterGroupName = aws.String(attr.(string)) } if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { var s []string for _, v := range attr.List() { s = append(s, v.(string)) } opts.VPCSecurityGroupIDs = s } if attr := d.Get("security_group_names").(*schema.Set); attr.Len() > 0 { var s []string for _, v := range attr.List() { s = append(s, v.(string)) } opts.DBSecurityGroups = s } log.Printf("[DEBUG] DB Instance create configuration: %#v", opts) _, err := conn.CreateDBInstance(&opts) if err != nil { return fmt.Errorf("Error creating DB Instance: %s", err) } d.SetId(d.Get("identifier").(string)) log.Printf("[INFO] DB Instance ID: %s", d.Id()) log.Println( "[INFO] Waiting for DB Instance to be available") stateConf := &resource.StateChangeConf{ Pending: []string{"creating", "backing-up", "modifying"}, Target: "available", Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), Timeout: 40 * time.Minute, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, // Wait 30 secs before starting } // Wait, catching any errors _, err = stateConf.WaitForState() if err != nil { return err } return resourceAwsDbInstanceRead(d, meta) }
func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { elbconn := meta.(*AWSClient).elbconn d.Partial(true) // If we currently have instances, or did have instances, // we want to figure out what to add and remove from the load // balancer if d.HasChange("instances") { o, n := d.GetChange("instances") os := o.(*schema.Set) ns := n.(*schema.Set) remove := expandInstanceString(os.Difference(ns).List()) add := expandInstanceString(ns.Difference(os).List()) if len(add) > 0 { registerInstancesOpts := elb.RegisterEndPointsInput{ LoadBalancerName: aws.String(d.Id()), Instances: add, } _, err := elbconn.RegisterInstancesWithLoadBalancer(®isterInstancesOpts) if err != nil { return fmt.Errorf("Failure registering instances: %s", err) } } if len(remove) > 0 { deRegisterInstancesOpts := elb.DeregisterEndPointsInput{ LoadBalancerName: aws.String(d.Id()), Instances: remove, } _, err := elbconn.DeregisterInstancesFromLoadBalancer(&deRegisterInstancesOpts) if err != nil { return fmt.Errorf("Failure deregistering instances: %s", err) } } d.SetPartial("instances") } log.Println("[INFO] outside modify attributes") if d.HasChange("cross_zone_load_balancing") { log.Println("[INFO] inside modify attributes") attrs := elb.ModifyLoadBalancerAttributesInput{ LoadBalancerName: aws.String(d.Get("name").(string)), LoadBalancerAttributes: &elb.LoadBalancerAttributes{ CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{ aws.Boolean(d.Get("cross_zone_load_balancing").(bool)), }, }, } _, err := elbconn.ModifyLoadBalancerAttributes(&attrs) if err != nil { return fmt.Errorf("Failure configuring cross zone balancing: %s", err) } d.SetPartial("cross_zone_load_balancing") } if d.HasChange("health_check") { vs := d.Get("health_check").(*schema.Set).List() if len(vs) > 0 { check := vs[0].(map[string]interface{}) configureHealthCheckOpts := elb.ConfigureHealthCheckInput{ LoadBalancerName: aws.String(d.Id()), HealthCheck: &elb.HealthCheck{ HealthyThreshold: aws.Integer(check["healthy_threshold"].(int)), UnhealthyThreshold: aws.Integer(check["unhealthy_threshold"].(int)), Interval: aws.Integer(check["interval"].(int)), Target: aws.String(check["target"].(string)), Timeout: aws.Integer(check["timeout"].(int)), }, } _, err := elbconn.ConfigureHealthCheck(&configureHealthCheckOpts) if err != nil { return fmt.Errorf("Failure configuring health check: %s", err) } d.SetPartial("health_check") } } if err := setTagsELB(elbconn, d); err != nil { return err } else { d.SetPartial("tags") } d.Partial(false) return resourceAwsElbRead(d, meta) }
func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { ec2conn := meta.(*AWSClient).ec2conn // Figure out user data userData := "" if v := d.Get("user_data"); v != nil { userData = base64.StdEncoding.EncodeToString([]byte(v.(string))) } // check for non-default Subnet, and cast it to a String var hasSubnet bool subnet, hasSubnet := d.GetOk("subnet_id") subnetID := subnet.(string) placement := &ec2.Placement{ AvailabilityZone: aws.String(d.Get("availability_zone").(string)), } if hasSubnet { // Tenancy is only valid inside a VPC // See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Placement.html if v := d.Get("tenancy").(string); v != "" { placement.Tenancy = aws.String(v) } } iam := &ec2.IAMInstanceProfileSpecification{ Name: aws.String(d.Get("iam_instance_profile").(string)), } // Build the creation struct runOpts := &ec2.RunInstancesRequest{ ImageID: aws.String(d.Get("ami").(string)), Placement: placement, InstanceType: aws.String(d.Get("instance_type").(string)), MaxCount: aws.Integer(1), MinCount: aws.Integer(1), UserData: aws.String(userData), EBSOptimized: aws.Boolean(d.Get("ebs_optimized").(bool)), IAMInstanceProfile: iam, } associatePublicIPAddress := false if v := d.Get("associate_public_ip_address"); v != nil { associatePublicIPAddress = v.(bool) } var groups []string if v := d.Get("security_groups"); v != nil { // Security group names. // For a nondefault VPC, you must use security group IDs instead. // See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html for _, v := range v.(*schema.Set).List() { str := v.(string) groups = append(groups, str) } } if hasSubnet && associatePublicIPAddress { // If we have a non-default VPC / Subnet specified, we can flag // AssociatePublicIpAddress to get a Public IP assigned. By default these are not provided. // You cannot specify both SubnetId and the NetworkInterface.0.* parameters though, otherwise // you get: Network interfaces and an instance-level subnet ID may not be specified on the same request // You also need to attach Security Groups to the NetworkInterface instead of the instance, // to avoid: Network interfaces and an instance-level security groups may not be specified on // the same request ni := ec2.InstanceNetworkInterfaceSpecification{ AssociatePublicIPAddress: aws.Boolean(associatePublicIPAddress), DeviceIndex: aws.Integer(0), SubnetID: aws.String(subnetID), } if v, ok := d.GetOk("private_ip"); ok { ni.PrivateIPAddress = aws.String(v.(string)) } if len(groups) > 0 { ni.Groups = groups } runOpts.NetworkInterfaces = []ec2.InstanceNetworkInterfaceSpecification{ni} } else { if subnetID != "" { runOpts.SubnetID = aws.String(subnetID) } if v, ok := d.GetOk("private_ip"); ok { runOpts.PrivateIPAddress = aws.String(v.(string)) } if runOpts.SubnetID != nil && *runOpts.SubnetID != "" { runOpts.SecurityGroupIDs = groups } else { runOpts.SecurityGroups = groups } } if v, ok := d.GetOk("key_name"); ok { runOpts.KeyName = aws.String(v.(string)) } blockDevices := make([]ec2.BlockDeviceMapping, 0) if v, ok := d.GetOk("ebs_block_device"); ok { vL := v.(*schema.Set).List() for _, v := range vL { bd := v.(map[string]interface{}) ebs := &ec2.EBSBlockDevice{ DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)), } if v, ok := bd["snapshot_id"].(string); ok && v != "" { ebs.SnapshotID = aws.String(v) } if v, ok := bd["volume_size"].(int); ok && v != 0 { ebs.VolumeSize = aws.Integer(v) } if v, ok := bd["volume_type"].(string); ok && v != "" { ebs.VolumeType = aws.String(v) } if v, ok := bd["iops"].(int); ok && v > 0 { ebs.IOPS = aws.Integer(v) } blockDevices = append(blockDevices, ec2.BlockDeviceMapping{ DeviceName: aws.String(bd["device_name"].(string)), EBS: ebs, }) } } if v, ok := d.GetOk("ephemeral_block_device"); ok { vL := v.(*schema.Set).List() for _, v := range vL { bd := v.(map[string]interface{}) blockDevices = append(blockDevices, ec2.BlockDeviceMapping{ DeviceName: aws.String(bd["device_name"].(string)), VirtualName: aws.String(bd["virtual_name"].(string)), }) } } if v, ok := d.GetOk("root_block_device"); ok { vL := v.(*schema.Set).List() if len(vL) > 1 { return fmt.Errorf("Cannot specify more than one root_block_device.") } for _, v := range vL { bd := v.(map[string]interface{}) ebs := &ec2.EBSBlockDevice{ DeleteOnTermination: aws.Boolean(bd["delete_on_termination"].(bool)), } if v, ok := bd["volume_size"].(int); ok && v != 0 { ebs.VolumeSize = aws.Integer(v) } if v, ok := bd["volume_type"].(string); ok && v != "" { ebs.VolumeType = aws.String(v) } if v, ok := bd["iops"].(int); ok && v > 0 { ebs.IOPS = aws.Integer(v) } if dn, err := fetchRootDeviceName(d.Get("ami").(string), ec2conn); err == nil { blockDevices = append(blockDevices, ec2.BlockDeviceMapping{ DeviceName: dn, EBS: ebs, }) } else { return err } } } if len(blockDevices) > 0 { runOpts.BlockDeviceMappings = blockDevices } // Create the instance log.Printf("[DEBUG] Run configuration: %#v", runOpts) runResp, err := ec2conn.RunInstances(runOpts) if err != nil { return fmt.Errorf("Error launching source instance: %s", err) } instance := &runResp.Instances[0] log.Printf("[INFO] Instance ID: %s", *instance.InstanceID) // Store the resulting ID so we can look this up later d.SetId(*instance.InstanceID) // Wait for the instance to become running so we can get some attributes // that aren't available until later. log.Printf( "[DEBUG] Waiting for instance (%s) to become running", *instance.InstanceID) stateConf := &resource.StateChangeConf{ Pending: []string{"pending"}, Target: "running", Refresh: InstanceStateRefreshFunc(ec2conn, *instance.InstanceID), Timeout: 10 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } instanceRaw, err := stateConf.WaitForState() if err != nil { return fmt.Errorf( "Error waiting for instance (%s) to become ready: %s", *instance.InstanceID, err) } instance = instanceRaw.(*ec2.Instance) // Initialize the connection info if instance.PublicIPAddress != nil { d.SetConnInfo(map[string]string{ "type": "ssh", "host": *instance.PublicIPAddress, }) } // Set our attributes if err := resourceAwsInstanceRead(d, meta); err != nil { return err } // Update if we need to return resourceAwsInstanceUpdate(d, meta) }