func resourceVultrServerRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*lib.Client) server, err := client.GetServer(d.Id()) if err != nil { // check if the server not longer exists. if err.Error() == "Invalid server." { d.SetId("") return nil } return fmt.Errorf("Error retrieving server: %s", err) } d.Set("name", server.Name) d.Set("region_id", server.RegionID) d.Set("plan_id", server.PlanID) d.Set("status", server.Status) d.Set("power_status", server.PowerStatus) d.Set("default_password", server.DefaultPassword) d.Set("ipv4_address", server.MainIP) d.Set("ipv6_address", server.MainIPV6) d.Set("ipv4_private_address", server.InternalIP) d.SetConnInfo(map[string]string{ "type": "ssh", "host": server.MainIP, "password": server.DefaultPassword, }) return nil }
func resourceScalewayServerRead(d *schema.ResourceData, m interface{}) error { scaleway := m.(*Client).scaleway server, err := scaleway.GetServer(d.Id()) if err != nil { if serr, ok := err.(api.ScalewayAPIError); ok { log.Printf("[DEBUG] Error reading server: %q\n", serr.APIMessage) if serr.StatusCode == 404 { d.SetId("") return nil } } return err } d.Set("private_ip", server.PrivateIP) d.Set("public_ip", server.PublicAddress.IP) d.Set("state", server.State) d.Set("state_detail", server.StateDetail) d.Set("tags", server.Tags) d.SetConnInfo(map[string]string{ "type": "ssh", "host": server.PublicAddress.IP, }) return nil }
func resourceServerRead(d *schema.ResourceData, m interface{}) error { scaleway := m.(*api.ScalewayAPI) server, err := scaleway.GetServer(d.Id()) if err != nil { // TODO: make sure it's ScalewayAPIError or it might crash serr := err.(api.ScalewayAPIError) // if the resource was destroyed, destroy the resource locally if serr.StatusCode == 404 { d.SetId("") return nil } return err } // S.t. it's compactible with terraform-ansible d.Set("ipv4_address_private", server.PrivateIP) d.Set("state", server.State) d.Set("state_detail", server.StateDetail) d.SetConnInfo(map[string]string{ "type": "ssh", "host": server.PublicAddress.IP, }) // TODO: set more fields return nil }
func resourceLxdContainerRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*LxdProvider).Client sshIP := "" gotIp := false cycles := 0 // wait for NIC to come up and get IP from DHCP for !gotIp && cycles < 15 { cycles += 1 ct, _ := client.ContainerState(d.Get("name").(string)) d.Set("status", ct.Status) for iface, net := range ct.Network { if iface != "lo" { for _, ip := range net.Addresses { if ip.Family == "inet" { d.Set("ip_address", ip.Address) d.Set("mac_address", net.Hwaddr) gotIp = true } } } } time.Sleep(1 * time.Second) } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", "host": sshIP, }) return nil }
func resourceVIXVMCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) vm := new(vix.VM) vm.Provider = config.Product vm.VerifySSL = config.VerifySSL if err := tf_to_vix(d, vm); err != nil { return err } id, err := vm.Create() if err != nil { return err } log.Printf("[DEBUG] Resource ID: %s\n", id) d.SetId(id) // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", "host": vm.IPAddress, }) return resourceVIXVMRead(d, meta) }
func resourceVMRead(d *schema.ResourceData, meta interface{}) error { vm, err := vbox.GetMachine(d.Id()) if err != nil { /* VM no longer exist */ if err == vbox.ErrMachineNotExist { d.SetId("") return nil } return err } // if vm.State != vbox.Running { // setState(d, vm.State) // return nil // } setState(d, vm.State) d.Set("name", vm.Name) d.Set("cpus", vm.CPUs) bytes := uint64(vm.Memory) * humanize.MiByte repr := humanize.IBytes(bytes) d.Set("memory", strings.ToLower(repr)) userData, err := vm.GetExtraData("user_data") if err != nil { return err } if userData != nil && *userData != "" { d.Set("user_data", *userData) } err = net_vbox_to_tf(vm, d) if err != nil { return err } /* Set connection info to first non NAT IPv4 address */ for i, nic := range vm.NICs { if nic.Network == vbox.NICNetNAT { continue } availKey := fmt.Sprintf("network_adapter.%d.ipv4_address_available", i) if d.Get(availKey).(string) != "yes" { continue } ipv4Key := fmt.Sprintf("network_adapter.%d.ipv4_address", i) ipv4 := d.Get(ipv4Key).(string) if ipv4 == "" { continue } d.SetConnInfo(map[string]string{ "type": "ssh", "host": ipv4, }) break } return nil }
func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*godo.Client) id, err := strconv.Atoi(d.Id()) if err != nil { return fmt.Errorf("invalid droplet id: %v", err) } // Retrieve the droplet properties for updating the state droplet, resp, err := client.Droplets.Get(id) if err != nil { // check if the droplet no longer exists. if resp.StatusCode == 404 { log.Printf("[WARN] DigitalOcean Droplet (%s) not found", d.Id()) d.SetId("") return nil } return fmt.Errorf("Error retrieving droplet: %s", err) } if droplet.Image.Slug != "" { d.Set("image", droplet.Image.Slug) } else { d.Set("image", droplet.Image.ID) } d.Set("name", droplet.Name) d.Set("region", droplet.Region.Slug) d.Set("size", droplet.Size.Slug) d.Set("status", droplet.Status) d.Set("locked", strconv.FormatBool(droplet.Locked)) if publicIPv6 := findIPv6AddrByType(droplet, "public"); publicIPv6 != "" { d.Set("ipv6", true) d.Set("ipv6_address", publicIPv6) d.Set("ipv6_address_private", findIPv6AddrByType(droplet, "private")) } d.Set("ipv4_address", findIPv4AddrByType(droplet, "public")) if privateIPv4 := findIPv4AddrByType(droplet, "private"); privateIPv4 != "" { d.Set("private_networking", true) d.Set("ipv4_address_private", privateIPv4) } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", "host": findIPv4AddrByType(droplet, "public"), }) return nil }
func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*packngo.Client) // Retrieve the device properties for updating the state device, _, err := client.Devices.Get(d.Id()) if err != nil { return fmt.Errorf("Error retrieving device: %s", err) } d.Set("name", device.Hostname) d.Set("plan", device.Plan.Slug) d.Set("facility", device.Facility.Code) d.Set("operating_system", device.OS.Slug) d.Set("state", device.State) d.Set("billing_cycle", device.BillingCycle) d.Set("locked", device.Locked) d.Set("created", device.Created) d.Set("updated", device.Updated) tags := make([]string, 0) for _, tag := range device.Tags { tags = append(tags, tag) } d.Set("tags", tags) provisionerAddress := "" networks := make([]map[string]interface{}, 0, 1) for _, ip := range device.Network { network := make(map[string]interface{}) network["address"] = ip.Address network["gateway"] = ip.Gateway network["family"] = ip.Family network["cidr"] = ip.Cidr network["public"] = ip.Public networks = append(networks, network) if ip.Family == 4 && ip.Public == true { provisionerAddress = ip.Address } } d.Set("network", networks) log.Printf("[DEBUG] Provisioner Address set to %v", provisionerAddress) if provisionerAddress != "" { d.SetConnInfo(map[string]string{ "type": "ssh", "host": provisionerAddress, }) } return nil }
func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) instance, err := config.clientCompute.Instances.Get( config.Project, d.Get("zone").(string), d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore d.SetId("") return nil } return fmt.Errorf("Error reading instance: %s", err) } d.Set("can_ip_forward", instance.CanIpForward) // Set the networks externalIP := "" for i, iface := range instance.NetworkInterfaces { prefix := fmt.Sprintf("network.%d", i) d.Set(prefix+".name", iface.Name) // Use the first external IP found for the default connection info. natIP := resourceInstanceNatIP(iface) if externalIP == "" && natIP != "" { externalIP = natIP } d.Set(prefix+".external_address", natIP) d.Set(prefix+".internal_address", iface.NetworkIP) } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", "host": externalIP, }) // Set the metadata fingerprint if there is one. if instance.Metadata != nil { d.Set("metadata_fingerprint", instance.Metadata.Fingerprint) } // Set the tags fingerprint if there is one. if instance.Tags != nil { d.Set("tags_fingerprint", instance.Tags.Fingerprint) } return nil }
func resourceServerCreate(d *schema.ResourceData, m interface{}) error { address := d.Get("address").(string) d.Set("increment", d.Get("increment").(string)) d.SetId(address) d.SetConnInfo(map[string]string{ "type": "ssh", "host": address, }) log.Printf("[DEBUG] :::::::::::::::::::::::::: ") log.Printf("%+v\n", d) log.Printf("[DEBUG] :::::::::::::::::::::::::: ") return nil }
func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*digitalocean.Client) // Retrieve the droplet properties for updating the state droplet, err := client.RetrieveDroplet(d.Id()) if err != nil { // check if the droplet no longer exists. if err.Error() == "Error retrieving droplet: API Error: 404 Not Found" { d.SetId("") return nil } return fmt.Errorf("Error retrieving droplet: %s", err) } if droplet.ImageSlug() != "" { d.Set("image", droplet.ImageSlug()) } else { d.Set("image", droplet.ImageId()) } d.Set("name", droplet.Name) d.Set("region", droplet.RegionSlug()) d.Set("size", droplet.SizeSlug) d.Set("status", droplet.Status) d.Set("locked", droplet.IsLocked()) if droplet.IPV6Address("public") != "" { d.Set("ipv6", true) d.Set("ipv6_address", droplet.IPV6Address("public")) d.Set("ipv6_address_private", droplet.IPV6Address("private")) } d.Set("ipv4_address", droplet.IPV4Address("public")) if droplet.NetworkingType() == "private" { d.Set("private_networking", true) d.Set("ipv4_address_private", droplet.IPV4Address("private")) } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", "host": droplet.IPV4Address("public"), }) return nil }
func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*packngo.Client) // Retrieve the device properties for updating the state dev, _, err := client.Devices.Get(d.Id()) if err != nil { // check if the device no longer exists. // TODO: This is all wrong for Packet. if strings.Contains(err.Error(), "404 Not Found") { d.SetId("") return nil } return fmt.Errorf("Error retrieving device: %s", err) } d.Set("os", dev.OS.Slug) d.Set("hostname", dev.Hostname) d.Set("facility", dev.Facility.Code) d.Set("plan", dev.Plan.Slug) d.Set("state", dev.State) d.Set("locked", dev.Locked) var publicIPv4 string for _, addr := range dev.Network { switch addr.Family { case 4: if addr.Public { publicIPv4 = addr.Address d.Set("ipv4_address", addr.Address) } else { d.Set("ipv4_address_private", addr.Address) } case 6: if addr.Public { d.Set("ipv6_address", addr.Address) } } } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", "host": publicIPv4, }) return nil }
func readInstance(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn resp, err := conn.DescribeInstances(&ec2.DescribeInstancesInput{ InstanceIds: []*string{aws.String(d.Get("spot_instance_id").(string))}, }) if err != nil { // If the instance was not found, return nil so that we can show // that the instance is gone. if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "InvalidInstanceID.NotFound" { return fmt.Errorf("no instance found") } // Some other error, report it return err } // If nothing was found, then return no state if len(resp.Reservations) == 0 { return fmt.Errorf("no instances found") } instance := resp.Reservations[0].Instances[0] // Set these fields for connection information if instance != nil { d.Set("public_dns", instance.PublicDnsName) d.Set("public_ip", instance.PublicIpAddress) d.Set("private_dns", instance.PrivateDnsName) d.Set("private_ip", instance.PrivateIpAddress) // set connection information if instance.PublicIpAddress != nil { d.SetConnInfo(map[string]string{ "type": "ssh", "host": *instance.PublicIpAddress, }) } else if instance.PrivateIpAddress != nil { d.SetConnInfo(map[string]string{ "type": "ssh", "host": *instance.PrivateIpAddress, }) } } return nil }
func resourceFromJson(d *schema.ResourceData, vmJson []byte) error { l := log.New(os.Stderr, "", 0) l.Printf("VM definition: %s", vmJson) vm := &bigvServer{} if err := json.Unmarshal(vmJson, vm); err != nil { return err } d.SetId(strconv.Itoa(vm.Id)) d.Set("name", vm.Name) d.Set("cores", vm.Cores) d.Set("memory", vm.Memory) d.Set("power_on", vm.Power) d.Set("reboot", vm.Reboot) d.Set("group_id", vm.GroupId) d.Set("zone", vm.Zone) // If we don't get discs back, this was probably an update request if len(vm.Discs) == 1 { d.Set("disk_size", vm.Discs[0].Size) } // Distribution is empty in create response, leave it with what we sent in if vm.Distribution != "" { d.Set("os", vm.Distribution) } // Not finding the ips is fine, because they're not sent back in the create request if len(vm.Nics) > 0 { // This is fairly^Wvery^Wacceptably hacky d.Set("ipv4", vm.Nics[0].Ips[0]) d.Set("ipv6", vm.Nics[0].Ips[1]) d.SetConnInfo(map[string]string{ "type": "ssh", "host": vm.Nics[0].Ips[0], "password": d.Get("root_password").(string), }) } return nil }
func resourceChefNodeCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*chefGo.Client) name := d.Get("name").(string) environment := d.Get("environment").(string) attributes, err := readAttributes(d.Get("attributes").(string)) if err != nil { return err } var run_list []string schema_run_list := d.Get("run_list").(interface{}) if err := mapstructure.Decode(schema_run_list, &run_list); err != nil { return err } node := chefGo.Node{ Name: name, Environment: environment, NormalAttributes: attributes, AutomaticAttributes: map[string]interface{}{}, DefaultAttributes: map[string]interface{}{}, OverrideAttributes: map[string]interface{}{}, ChefType: "node", JsonClass: "Chef::Node", RunList: run_list, } log.Printf("[DEBUG] node create configuration: %#v", node) _, err = client.Nodes.Post(node) if err != nil { return fmt.Errorf("Error creating chef node: %s", err) } d.SetId(node.Name) d.SetConnInfo(map[string]string{ "type": "ssh", "host": node.Name, }) return resourceChefNodeRead(d, meta) }
func resourceMachineRead(d *schema.ResourceData, config *Config) error { api, err := config.Cloud() if err != nil { return err } machine, err := api.GetMachine(d.Id()) if err != nil { return err } d.SetId(machine.Id) d.Set("name", machine.Name) d.Set("type", machine.Type) d.Set("state", machine.State) d.Set("dataset", machine.Dataset) d.Set("memory", machine.Memory) d.Set("disk", machine.Disk) d.Set("ips", machine.IPs) d.Set("tags", machine.Tags) d.Set("created", machine.Created) d.Set("updated", machine.Updated) d.Set("package", machine.Package) d.Set("image", machine.Image) d.Set("primaryip", machine.PrimaryIP) d.Set("networks", machine.Networks) // d.Set("firewall_enabled", machine.FirewallEnabled) // but that field doesn't exist... // computed attributes from metadata for schemaName, metadataKey := range resourceMachineMetadataKeys { d.Set(schemaName, machine.Metadata[metadataKey]) } // Initialize connection info to enable remote-exec d.SetConnInfo(map[string]string{ "type": "ssh", "host": machine.PrimaryIP, }) return nil }
func SetConnectionDetails(d *schema.ResourceData) { var preferredSSHAddress string if attr, ok := d.GetOk("public_hostname"); ok { preferredSSHAddress = attr.(string) } else if attr, ok := d.GetOk("ipv6_hostname"); ok { preferredSSHAddress = attr.(string) } else if attr, ok := d.GetOk("fqdn"); ok { preferredSSHAddress = attr.(string) } if preferredSSHAddress != "" { connection_details := map[string]string{ "type": "ssh", "host": preferredSSHAddress, } if attr, ok := d.GetOk("username"); ok { connection_details["user"] = attr.(string) } d.SetConnInfo(connection_details) } }
func resourceSoftLayerVirtualserverRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*Client).virtualGuestService id, err := strconv.Atoi(d.Id()) if err != nil { return fmt.Errorf("Not a valid ID, must be an integer: %s", err) } result, err := client.GetObject(id) if err != nil { return fmt.Errorf("Error retrieving virtual server: %s", err) } d.Set("name", result.Hostname) d.Set("domain", result.Domain) if result.Datacenter != nil { d.Set("region", result.Datacenter.Name) } d.Set("public_network_speed", result.NetworkComponents[0].MaxSpeed) d.Set("cpu", result.StartCpus) d.Set("ram", result.MaxMemory) d.Set("has_public_ip", result.PrimaryIpAddress != "") d.Set("ipv4_address", result.PrimaryIpAddress) d.Set("ipv4_address_private", result.PrimaryBackendIpAddress) connIpAddress := "" if result.PrimaryIpAddress != "" { connIpAddress = result.PrimaryIpAddress } else { connIpAddress = result.PrimaryBackendIpAddress } log.Printf("[INFO] Setting ConnInfo IP: %s", connIpAddress) d.SetConnInfo(map[string]string{ "type": "ssh", "host": connIpAddress, }) return nil }
func resourceLxdContainerRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*LxdProvider).Client name := d.Id() container, err := client.ContainerInfo(name) if err != nil { return err } log.Printf("[DEBUG] Retrieved container %s: %#v", name, container) ct, err := client.ContainerState(name) if err != nil { return err } d.Set("status", ct.Status) sshIP := "" for iface, net := range ct.Network { if iface != "lo" { for _, ip := range net.Addresses { if ip.Family == "inet" { d.Set("ip_address", ip.Address) sshIP = ip.Address d.Set("mac_address", net.Hwaddr) } } } } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", "host": sshIP, }) return nil }
func lxcIPAddressConfiguration(c *lxc.Container, d *schema.ResourceData) error { // Loop through all interfaces and see if one is marked as management managementNIC := "eth0" i := 0 networkInterfaces := d.Get("network_interface").([]interface{}) for _, n := range networkInterfaces { nic := n.(map[string]interface{}) if nic["management"] == true { managementNIC = fmt.Sprintf("%s%s", "eth", strconv.Itoa(i)) } i++ } // Get the IP addresses of the management NIC // For now, we'll just use the first returned IP. d.Set("address_v4", "") ipv4s, err := c.IPv4Address(managementNIC) if err == nil { if len(ipv4s) > 0 { d.Set("address_v4", ipv4s[0]) d.SetConnInfo(map[string]string{ "type": "ssh", "host": ipv4s[0], }) } } d.Set("address_v6", "") ipv6s, err := c.IPv6Address(managementNIC) if err == nil { if len(ipv6s) > 0 { d.Set("address_v6", ipv6s[0]) } } return nil }
func resourceIcfInstanceRead(d *schema.ResourceData, meta interface{}) error { c := meta.(*icf.Client) instance, err := c.GetInstance(d.Id()) if err != nil { errs := fmt.Sprintf("%v", err) if strings.Contains(errs, "404") || strings.Contains(errs, "400") { return nil } return fmt.Errorf("Error reading ICF instannce %s error %v", d.Id(), err) } //d.Set("public_ip", instance.PublicIp) d.Set("public_ip", instance.Nics[0].Ip) d.Set("private_ip", instance.PrivateIp) d.Set("enterprise_ip", instance.Nics[0].Ip) d.Set("name", instance.Name) d.SetConnInfo(map[string]string{ "type": "ssh", "host": instance.Nics[0].Ip, }) return nil }
func resourceComputeInstanceV2Read(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) computeClient, err := config.computeV2Client(d.Get("region").(string)) if err != nil { return fmt.Errorf("Error creating OpenStack compute client: %s", err) } server, err := servers.Get(computeClient, d.Id()).Extract() if err != nil { return CheckDeleted(d, err, "server") } log.Printf("[DEBUG] Retreived Server %s: %+v", d.Id(), server) d.Set("name", server.Name) // Get the instance network and address information networks, err := getInstanceNetworksAndAddresses(computeClient, d) if err != nil { return err } // Determine the best IPv4 and IPv6 addresses to access the instance with hostv4, hostv6 := getInstanceAccessAddresses(d, networks) d.Set("network", networks) d.Set("access_ip_v4", hostv4) d.Set("access_ip_v6", hostv6) // Determine the best IP address to use for SSH connectivity. // Prefer IPv4 over IPv6. preferredSSHAddress := "" if hostv4 != "" { preferredSSHAddress = hostv4 } else if hostv6 != "" { preferredSSHAddress = hostv6 } if preferredSSHAddress != "" { // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", "host": preferredSSHAddress, }) } d.Set("metadata", server.Metadata) secGrpNames := []string{} for _, sg := range server.SecurityGroups { secGrpNames = append(secGrpNames, sg["name"].(string)) } d.Set("security_groups", secGrpNames) flavorId, ok := server.Flavor["id"].(string) if !ok { return fmt.Errorf("Error setting OpenStack server's flavor: %v", server.Flavor) } d.Set("flavor_id", flavorId) flavor, err := flavors.Get(computeClient, flavorId).Extract() if err != nil { return err } d.Set("flavor_name", flavor.Name) // Set the instance's image information appropriately if err := setImageInformation(computeClient, server, d); err != nil { return err } // volume attachments if err := getVolumeAttachments(computeClient, d); err != nil { return err } return nil }
func resourceCloudStackInstanceCreate(d *schema.ResourceData, meta interface{}) error { cs := meta.(*cloudstack.CloudStackClient) // Retrieve the service_offering ID serviceofferingid, e := retrieveID(cs, "service_offering", d.Get("service_offering").(string)) if e != nil { return e.Error() } // Retrieve the zone ID zoneid, e := retrieveID(cs, "zone", d.Get("zone").(string)) if e != nil { return e.Error() } // Retrieve the zone object zone, _, err := cs.Zone.GetZoneByID(zoneid) if err != nil { return err } // Retrieve the template ID templateid, e := retrieveTemplateID(cs, zone.Id, d.Get("template").(string)) if e != nil { return e.Error() } // Create a new parameter struct p := cs.VirtualMachine.NewDeployVirtualMachineParams(serviceofferingid, templateid, zone.Id) // Set the name name, hasName := d.GetOk("name") if hasName { p.SetName(name.(string)) } // Set the display name if displayname, ok := d.GetOk("display_name"); ok { p.SetDisplayname(displayname.(string)) } else if hasName { p.SetDisplayname(name.(string)) } if zone.Networktype == "Advanced" { // Retrieve the network ID networkid, e := retrieveID(cs, "network", d.Get("network").(string)) if e != nil { return e.Error() } // Set the default network ID p.SetNetworkids([]string{networkid}) } // If there is a ipaddres supplied, add it to the parameter struct if ipaddres, ok := d.GetOk("ipaddress"); ok { p.SetIpaddress(ipaddres.(string)) } // If there is a project supplied, we retrieve and set the project id if project, ok := d.GetOk("project"); ok { // Retrieve the project ID projectid, e := retrieveID(cs, "project", project.(string)) if e != nil { return e.Error() } // Set the default project ID p.SetProjectid(projectid) } // If a keypair is supplied, add it to the parameter struct if keypair, ok := d.GetOk("keypair"); ok { p.SetKeypair(keypair.(string)) } // If the user data contains any info, it needs to be base64 encoded and // added to the parameter struct if userData, ok := d.GetOk("user_data"); ok { ud := base64.StdEncoding.EncodeToString([]byte(userData.(string))) // deployVirtualMachine uses POST by default, so max userdata is 32K maxUD := 32768 if cs.HTTPGETOnly { // deployVirtualMachine using GET instead, so max userdata is 2K maxUD = 2048 } if len(ud) > maxUD { return fmt.Errorf( "The supplied user_data contains %d bytes after encoding, "+ "this exeeds the limit of %d bytes", len(ud), maxUD) } p.SetUserdata(ud) } // Create the new instance r, err := cs.VirtualMachine.DeployVirtualMachine(p) if err != nil { return fmt.Errorf("Error creating the new instance %s: %s", name, err) } d.SetId(r.Id) // Set the connection info for any configured provisioners d.SetConnInfo(map[string]string{ "host": r.Nic[0].Ipaddress, "password": r.Password, }) return resourceCloudStackInstanceRead(d, meta) }
func resourceCloudStackInstanceCreate(d *schema.ResourceData, meta interface{}) error { cs := meta.(*cloudstack.CloudStackClient) // Retrieve the service_offering ID serviceofferingid, e := retrieveID(cs, "service_offering", d.Get("service_offering").(string)) if e != nil { return e.Error() } // Retrieve the zone ID zoneid, e := retrieveID(cs, "zone", d.Get("zone").(string)) if e != nil { return e.Error() } // Retrieve the zone object zone, _, err := cs.Zone.GetZoneByID(zoneid) if err != nil { return err } // Retrieve the template ID templateid, e := retrieveTemplateID(cs, zone.Id, d.Get("template").(string)) if e != nil { return e.Error() } // Create a new parameter struct p := cs.VirtualMachine.NewDeployVirtualMachineParams(serviceofferingid, templateid, zone.Id) // Set the name name, hasName := d.GetOk("name") if hasName { p.SetName(name.(string)) } // Set the display name if displayname, ok := d.GetOk("display_name"); ok { p.SetDisplayname(displayname.(string)) } else if hasName { p.SetDisplayname(name.(string)) } if zone.Networktype == "Advanced" { network, ok := d.GetOk("network_id") if !ok { network, ok = d.GetOk("network") } if !ok { return errors.New( "Either `network_id` or [deprecated] `network` must be provided when using a zone with network type `advanced`.") } // Retrieve the network ID networkid, e := retrieveID( cs, "network", network.(string), cloudstack.WithProject(d.Get("project").(string)), ) if e != nil { return e.Error() } // Set the default network ID p.SetNetworkids([]string{networkid}) } // If there is a ipaddres supplied, add it to the parameter struct ipaddress, ok := d.GetOk("ip_address") if !ok { ipaddress, ok = d.GetOk("ipaddress") } if ok { p.SetIpaddress(ipaddress.(string)) } if ags := d.Get("affinity_group_ids").(*schema.Set); ags.Len() > 0 { var groups []string for _, group := range ags.List() { groups = append(groups, group.(string)) } p.SetAffinitygroupids(groups) } // If there is a project supplied, we retrieve and set the project id if err := setProjectid(p, cs, d); err != nil { return err } // If a keypair is supplied, add it to the parameter struct if keypair, ok := d.GetOk("keypair"); ok { p.SetKeypair(keypair.(string)) } // If the user data contains any info, it needs to be base64 encoded and // added to the parameter struct if userData, ok := d.GetOk("user_data"); ok { ud := base64.StdEncoding.EncodeToString([]byte(userData.(string))) // deployVirtualMachine uses POST by default, so max userdata is 32K maxUD := 32768 if cs.HTTPGETOnly { // deployVirtualMachine using GET instead, so max userdata is 2K maxUD = 2048 } if len(ud) > maxUD { return fmt.Errorf( "The supplied user_data contains %d bytes after encoding, "+ "this exeeds the limit of %d bytes", len(ud), maxUD) } p.SetUserdata(ud) } // If there is a group supplied, add it to the parameter struct if group, ok := d.GetOk("group"); ok { p.SetGroup(group.(string)) } // If there is a root_disk_size supplied, add it to the parameter struct if rootdisksize, ok := d.GetOk("root_disk_size"); ok { p.SetRootdisksize(int64(rootdisksize.(int))) } // Create the new instance r, err := cs.VirtualMachine.DeployVirtualMachine(p) if err != nil { return fmt.Errorf("Error creating the new instance %s: %s", name, err) } d.SetId(r.Id) // Set the connection info for any configured provisioners d.SetConnInfo(map[string]string{ "host": r.Nic[0].Ipaddress, "password": r.Password, }) return resourceCloudStackInstanceRead(d, meta) }
func resourceBigvVMCreate(d *schema.ResourceData, meta interface{}) error { l := log.New(os.Stderr, "", 0) bigvClient := meta.(*client) vm := bigvVMCreate{ VirtualMachine: bigvVm{ Name: d.Get("name").(string), Cores: d.Get("cores").(int), Memory: d.Get("memory").(int), Power: d.Get("power_on").(bool), Reboot: d.Get("reboot").(bool), Group: d.Get("group").(string), Zone: d.Get("zone").(string), }, Discs: []bigvDisc{{ Label: "root", StorageGrade: "sata", Size: d.Get("disc_size").(int), }}, Image: bigvImage{ Distribution: d.Get("os").(string), RootPassword: randomPassword(), SshPublicKey: d.Get("ssh_public_key").(string), FirstBootScript: d.Get("firstboot_script").(string), }, } // If no ipv* is set then let bigv allocate it itself // The json for ip must be nil if ip := d.Get("ipv4"); ip != nil && ip.(string) != "" { vm.Ips = &bigvIps{ Ipv4: ip.(string), } } if ip := d.Get("ipv6"); ip != nil && ip.(string) != "" { if vm.Ips == nil { vm.Ips = &bigvIps{} } vm.Ips.Ipv6 = ip.(string) } // Make sure the root password gets stored in d d.Set("root_password", vm.Image.RootPassword) // Connection information connInfo := map[string]string{ "type": "ssh", "password": vm.Image.RootPassword, } if vm.Ips != nil { connInfo["host"] = vm.Ips.Ipv4 } d.SetConnInfo(connInfo) if err := vm.VirtualMachine.computeCoresToMemory(); err != nil { return err } if vm.Image.SshPublicKey != "" && vm.Image.Distribution == "none" { return errors.New("Cannot deploy ssh public keys with an os of 'none'. Please use a provisioner instead") } body, err := json.Marshal(vm) if err != nil { return err } // VM create uses a bigger path url := fmt.Sprintf("%s/accounts/%s/groups/%s/vm_create", bigvUri, bigvClient.account, vm.VirtualMachine.Group, // this will be group name ) l.Printf("Requesting VM create: %s", url) l.Printf("VM profile: %s", body) req, _ := http.NewRequest("POST", url, bytes.NewBuffer(body)) // TODO - Early 2016, and we hope to remove this soonish // bigV deadlocks if you hit it with concurrent creates. // That might be an ip allocation issue, and specifying both ips might // fix it, but that's untested. For now waiting for them to confirm we // can lift this restriction. createPipeline.Lock() resp, err := bigvClient.do(req) createPipeline.Unlock() if err != nil { return err } // Always close the body when done defer resp.Body.Close() l.Printf("HTTP response Status: %s", resp.Status) if resp.StatusCode != http.StatusAccepted { body, _ := ioutil.ReadAll(resp.Body) return fmt.Errorf("Create VM status %d from bigv: %s", resp.StatusCode, body) } d.Partial(true) for _, i := range []string{"name", "group_id", "group", "zone", "cores", "memory", "ipv4", "ipv6", "root_password"} { d.SetPartial(i) } for k, v := range resp.Header { l.Printf("%s: %s", k, v) } // wait for state also sets up the resource from the read state we get back if err := waitForBigvState(d, bigvClient, waitForProvisioned); err != nil { return err } l.Printf("Created BigV VM, Id: %s", d.Id()) // If we expect it to be turned on, wait for it to powered if vm.VirtualMachine.Power == true { if err := waitForBigvState(d, bigvClient, waitForPowered); err != nil { return err } // This assumes all distributions will listen on public ssh if vm.Image.Distribution != "none" { if err := waitForVmSsh(d); err != nil { return err } } } d.Partial(false) return nil }
func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) instance, err := getInstance(config, d) if err != nil { return err } // Synch metadata md := instance.Metadata _md := MetadataFormatSchema(md) if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { d.Set("metadata_startup_script", script) delete(_md, "startup-script") } if err = d.Set("metadata", _md); err != nil { return fmt.Errorf("Error setting metadata: %s", err) } d.Set("can_ip_forward", instance.CanIpForward) // Set the service accounts serviceAccounts := make([]map[string]interface{}, 0, 1) for _, serviceAccount := range instance.ServiceAccounts { scopes := make([]interface{}, len(serviceAccount.Scopes)) for i, scope := range serviceAccount.Scopes { scopes[i] = scope } serviceAccounts = append(serviceAccounts, map[string]interface{}{ "email": serviceAccount.Email, "scopes": schema.NewSet(stringScopeHashcode, scopes), }) } d.Set("service_account", serviceAccounts) networksCount := d.Get("network.#").(int) networkInterfacesCount := d.Get("network_interface.#").(int) if networksCount > 0 && networkInterfacesCount > 0 { return fmt.Errorf("Error: cannot define both networks and network_interfaces.") } if networksCount == 0 && networkInterfacesCount == 0 { return fmt.Errorf("Error: Must define at least one network_interface.") } // Set the networks // Use the first external IP found for the default connection info. externalIP := "" internalIP := "" networks := make([]map[string]interface{}, 0, 1) if networksCount > 0 { // TODO: Remove this when realizing deprecation of .network for i, iface := range instance.NetworkInterfaces { var natIP string for _, config := range iface.AccessConfigs { if config.Type == "ONE_TO_ONE_NAT" { natIP = config.NatIP break } } if externalIP == "" && natIP != "" { externalIP = natIP } network := make(map[string]interface{}) network["name"] = iface.Name network["external_address"] = natIP network["internal_address"] = iface.NetworkIP network["source"] = d.Get(fmt.Sprintf("network.%d.source", i)) networks = append(networks, network) } } d.Set("network", networks) networkInterfaces := make([]map[string]interface{}, 0, 1) if networkInterfacesCount > 0 { for i, iface := range instance.NetworkInterfaces { // The first non-empty ip is left in natIP var natIP string accessConfigs := make( []map[string]interface{}, 0, len(iface.AccessConfigs)) for _, config := range iface.AccessConfigs { accessConfigs = append(accessConfigs, map[string]interface{}{ "nat_ip": config.NatIP, }) if natIP == "" { natIP = config.NatIP } } if externalIP == "" { externalIP = natIP } if internalIP == "" { internalIP = iface.NetworkIP } networkInterfaces = append(networkInterfaces, map[string]interface{}{ "name": iface.Name, "address": iface.NetworkIP, "network": d.Get(fmt.Sprintf("network_interface.%d.network", i)), "access_config": accessConfigs, }) } } d.Set("network_interface", networkInterfaces) // Fall back on internal ip if there is no external ip. This makes sense in the situation where // terraform is being used on a cloud instance and can therefore access the instances it creates // via their internal ips. sshIP := externalIP if sshIP == "" { sshIP = internalIP } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", "host": sshIP, }) // Set the metadata fingerprint if there is one. if instance.Metadata != nil { d.Set("metadata_fingerprint", instance.Metadata.Fingerprint) } // Set the tags fingerprint if there is one. if instance.Tags != nil { d.Set("tags_fingerprint", instance.Tags.Fingerprint) } d.Set("self_link", instance.SelfLink) d.SetId(instance.Name) return nil }
func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn instanceOpts, err := buildAwsInstanceOpts(d, meta) if err != nil { return err } // Build the creation struct runOpts := &ec2.RunInstancesInput{ BlockDeviceMappings: instanceOpts.BlockDeviceMappings, DisableApiTermination: instanceOpts.DisableAPITermination, EbsOptimized: instanceOpts.EBSOptimized, Monitoring: instanceOpts.Monitoring, IamInstanceProfile: instanceOpts.IAMInstanceProfile, ImageId: instanceOpts.ImageID, InstanceInitiatedShutdownBehavior: instanceOpts.InstanceInitiatedShutdownBehavior, InstanceType: instanceOpts.InstanceType, KeyName: instanceOpts.KeyName, MaxCount: aws.Int64(int64(1)), MinCount: aws.Int64(int64(1)), NetworkInterfaces: instanceOpts.NetworkInterfaces, Placement: instanceOpts.Placement, PrivateIpAddress: instanceOpts.PrivateIPAddress, SecurityGroupIds: instanceOpts.SecurityGroupIDs, SecurityGroups: instanceOpts.SecurityGroups, SubnetId: instanceOpts.SubnetID, UserData: instanceOpts.UserData64, } // Create the instance log.Printf("[DEBUG] Run configuration: %s", runOpts) var runResp *ec2.Reservation for i := 0; i < 5; i++ { runResp, err = conn.RunInstances(runOpts) if awsErr, ok := err.(awserr.Error); ok { // IAM profiles can take ~10 seconds to propagate in AWS: // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "Invalid IAM Instance Profile") { log.Printf("[DEBUG] Invalid IAM Instance Profile referenced, retrying...") time.Sleep(2 * time.Second) continue } // Warn if the AWS Error involves group ids, to help identify situation // where a user uses group ids in security_groups for the Default VPC. // See https://github.com/hashicorp/terraform/issues/3798 if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "groupId is invalid") { return fmt.Errorf("Error launching instance, possible mismatch of Security Group IDs and Names. See AWS Instance docs here: %s.\n\n\tAWS Error: %s", "https://terraform.io/docs/providers/aws/r/instance.html", awsErr.Message()) } } break } if err != nil { return fmt.Errorf("Error launching source instance: %s", err) } if runResp == nil || len(runResp.Instances) == 0 { return fmt.Errorf("Error launching source instance: no instances returned in response") } instance := runResp.Instances[0] log.Printf("[INFO] Instance ID: %s", *instance.InstanceId) // Store the resulting ID so we can look this up later d.SetId(*instance.InstanceId) // Wait for the instance to become running so we can get some attributes // that aren't available until later. log.Printf( "[DEBUG] Waiting for instance (%s) to become running", *instance.InstanceId) stateConf := &resource.StateChangeConf{ Pending: []string{"pending"}, Target: []string{"running"}, Refresh: InstanceStateRefreshFunc(conn, *instance.InstanceId), Timeout: 10 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } instanceRaw, err := stateConf.WaitForState() if err != nil { return fmt.Errorf( "Error waiting for instance (%s) to become ready: %s", *instance.InstanceId, err) } instance = instanceRaw.(*ec2.Instance) // Initialize the connection info if instance.PublicIpAddress != nil { d.SetConnInfo(map[string]string{ "type": "ssh", "host": *instance.PublicIpAddress, }) } else if instance.PrivateIpAddress != nil { d.SetConnInfo(map[string]string{ "type": "ssh", "host": *instance.PrivateIpAddress, }) } // Update if we need to return resourceAwsInstanceUpdate(d, meta) }
func resourcePacketDeviceRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*packngo.Client) device, _, err := client.Devices.Get(d.Id()) if err != nil { err = friendlyError(err) // If the device somehow already destroyed, mark as succesfully gone. if isNotFound(err) { d.SetId("") return nil } return err } d.Set("name", device.Hostname) d.Set("plan", device.Plan.Slug) d.Set("facility", device.Facility.Code) d.Set("operating_system", device.OS.Slug) d.Set("state", device.State) d.Set("billing_cycle", device.BillingCycle) d.Set("locked", device.Locked) d.Set("created", device.Created) d.Set("updated", device.Updated) tags := make([]string, 0, len(device.Tags)) for _, tag := range device.Tags { tags = append(tags, tag) } d.Set("tags", tags) var ( host string networks = make([]map[string]interface{}, 0, 1) ) for _, ip := range device.Network { network := map[string]interface{}{ "address": ip.Address, "gateway": ip.Gateway, "family": ip.AddressFamily, "cidr": ip.Cidr, "public": ip.Public, } networks = append(networks, network) if ip.AddressFamily == 4 && ip.Public == true { host = ip.Address } } d.Set("network", networks) if host != "" { d.SetConnInfo(map[string]string{ "type": "ssh", "host": host, }) } return nil }
func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] reading virtual machine: %#v", d) client := meta.(*govmomi.Client) dc, err := getDatacenter(client, d.Get("datacenter").(string)) if err != nil { return err } finder := find.NewFinder(client.Client, true) finder = finder.SetDatacenter(dc) vm, err := finder.VirtualMachine(context.TODO(), d.Id()) if err != nil { d.SetId("") return nil } var mvm mo.VirtualMachine collector := property.DefaultCollector(client.Client) if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore"}, &mvm); err != nil { return err } log.Printf("[DEBUG] %#v", dc) log.Printf("[DEBUG] %#v", mvm.Summary.Config) log.Printf("[DEBUG] %#v", mvm.Guest.Net) networkInterfaces := make([]map[string]interface{}, 0) for _, v := range mvm.Guest.Net { if v.DeviceConfigId >= 0 { log.Printf("[DEBUG] %#v", v.Network) networkInterface := make(map[string]interface{}) networkInterface["label"] = v.Network for _, ip := range v.IpConfig.IpAddress { p := net.ParseIP(ip.IpAddress) if p.To4() != nil { log.Printf("[DEBUG] %#v", p.String()) log.Printf("[DEBUG] %#v", ip.PrefixLength) networkInterface["ipv4_address"] = p.String() networkInterface["ipv4_prefix_length"] = ip.PrefixLength } else if p.To16() != nil { log.Printf("[DEBUG] %#v", p.String()) log.Printf("[DEBUG] %#v", ip.PrefixLength) networkInterface["ipv6_address"] = p.String() networkInterface["ipv6_prefix_length"] = ip.PrefixLength } log.Printf("[DEBUG] networkInterface: %#v", networkInterface) } log.Printf("[DEBUG] networkInterface: %#v", networkInterface) networkInterfaces = append(networkInterfaces, networkInterface) } } log.Printf("[DEBUG] networkInterfaces: %#v", networkInterfaces) err = d.Set("network_interface", networkInterfaces) if err != nil { return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces) } ip, err := vm.WaitForIP(context.TODO()) if err != nil { return err } log.Printf("[DEBUG] ip address: %v", ip) d.SetConnInfo(map[string]string{ "type": "ssh", "host": ip, }) var rootDatastore string for _, v := range mvm.Datastore { var md mo.Datastore if err := collector.RetrieveOne(context.TODO(), v, []string{"name", "parent"}, &md); err != nil { return err } if md.Parent.Type == "StoragePod" { var msp mo.StoragePod if err := collector.RetrieveOne(context.TODO(), *md.Parent, []string{"name"}, &msp); err != nil { return err } rootDatastore = msp.Name log.Printf("[DEBUG] %#v", msp.Name) } else { rootDatastore = md.Name log.Printf("[DEBUG] %#v", md.Name) } break } d.Set("datacenter", dc) d.Set("memory", mvm.Summary.Config.MemorySizeMB) d.Set("memory_reservation", mvm.Summary.Config.MemoryReservation) d.Set("cpu", mvm.Summary.Config.NumCpu) d.Set("datastore", rootDatastore) return nil }
func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error { client := meta.(*govmomi.Client) vm := virtualMachine{ name: d.Get("name").(string), vcpu: d.Get("vcpu").(int), memoryMb: int64(d.Get("memory").(int)), memoryAllocation: memoryAllocation{ reservation: int64(d.Get("memory_reservation").(int)), }, } if v, ok := d.GetOk("folder"); ok { vm.folder = v.(string) } if v, ok := d.GetOk("datacenter"); ok { vm.datacenter = v.(string) } if v, ok := d.GetOk("cluster"); ok { vm.cluster = v.(string) } if v, ok := d.GetOk("resource_pool"); ok { vm.resourcePool = v.(string) } if v, ok := d.GetOk("gateway"); ok { vm.gateway = v.(string) } if v, ok := d.GetOk("domain"); ok { vm.domain = v.(string) } if v, ok := d.GetOk("time_zone"); ok { vm.timeZone = v.(string) } if v, ok := d.GetOk("linked_clone"); ok { vm.linkedClone = v.(bool) } if raw, ok := d.GetOk("dns_suffixes"); ok { for _, v := range raw.([]interface{}) { vm.dnsSuffixes = append(vm.dnsSuffixes, v.(string)) } } else { vm.dnsSuffixes = DefaultDNSSuffixes } if raw, ok := d.GetOk("dns_servers"); ok { for _, v := range raw.([]interface{}) { vm.dnsServers = append(vm.dnsServers, v.(string)) } } else { vm.dnsServers = DefaultDNSServers } if vL, ok := d.GetOk("custom_configuration_parameters"); ok { if custom_configs, ok := vL.(map[string]interface{}); ok { custom := make(map[string]types.AnyType) for k, v := range custom_configs { custom[k] = v } vm.customConfigurations = custom log.Printf("[DEBUG] custom_configuration_parameters init: %v", vm.customConfigurations) } } if vL, ok := d.GetOk("network_interface"); ok { networks := make([]networkInterface, len(vL.([]interface{}))) for i, v := range vL.([]interface{}) { network := v.(map[string]interface{}) networks[i].label = network["label"].(string) if v, ok := network["ip_address"].(string); ok && v != "" { networks[i].ipv4Address = v } if v, ok := network["subnet_mask"].(string); ok && v != "" { ip := net.ParseIP(v).To4() if ip != nil { mask := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3]) pl, _ := mask.Size() networks[i].ipv4PrefixLength = pl } else { return fmt.Errorf("subnet_mask parameter is invalid.") } } if v, ok := network["ipv4_address"].(string); ok && v != "" { networks[i].ipv4Address = v } if v, ok := network["ipv4_prefix_length"].(int); ok && v != 0 { networks[i].ipv4PrefixLength = v } } vm.networkInterfaces = networks log.Printf("[DEBUG] network_interface init: %v", networks) } if vL, ok := d.GetOk("windows_opt_config"); ok { var winOpt windowsOptConfig custom_configs := (vL.([]interface{}))[0].(map[string]interface{}) if v, ok := custom_configs["admin_password"].(string); ok && v != "" { winOpt.adminPassword = v } if v, ok := custom_configs["domain"].(string); ok && v != "" { winOpt.domain = v } if v, ok := custom_configs["domain_user"].(string); ok && v != "" { winOpt.domainUser = v } if v, ok := custom_configs["product_key"].(string); ok && v != "" { winOpt.productKey = v } if v, ok := custom_configs["domain_user_password"].(string); ok && v != "" { winOpt.domainUserPassword = v } vm.windowsOptionalConfig = winOpt log.Printf("[DEBUG] windows config init: %v", winOpt) } if vL, ok := d.GetOk("disk"); ok { disks := make([]hardDisk, len(vL.([]interface{}))) for i, v := range vL.([]interface{}) { disk := v.(map[string]interface{}) if i == 0 { if v, ok := disk["template"].(string); ok && v != "" { vm.template = v } else { if v, ok := disk["size"].(int); ok && v != 0 { disks[i].size = int64(v) } else if v, ok := disk["vmdk"].(string); ok && v != "" { disks[i].vmdkPath = v if v, ok := disk["bootable"].(bool); ok { vm.bootableVmdk = v } } else { return fmt.Errorf("template, size, or vmdk argument is required") } } if v, ok := disk["datastore"].(string); ok && v != "" { vm.datastore = v } } else { if v, ok := disk["size"].(int); ok && v != 0 { disks[i].size = int64(v) } else if v, ok := disk["vmdk"].(string); ok && v != "" { disks[i].vmdkPath = v } else { return fmt.Errorf("size or vmdk argument is required") } } if v, ok := disk["iops"].(int); ok && v != 0 { disks[i].iops = int64(v) } if v, ok := disk["type"].(string); ok && v != "" { disks[i].initType = v } } vm.hardDisks = disks log.Printf("[DEBUG] disk init: %v", disks) } if vL, ok := d.GetOk("cdrom"); ok { cdroms := make([]cdrom, len(vL.([]interface{}))) for i, v := range vL.([]interface{}) { c := v.(map[string]interface{}) if v, ok := c["datastore"].(string); ok && v != "" { cdroms[i].datastore = v } else { return fmt.Errorf("Datastore argument must be specified when attaching a cdrom image.") } if v, ok := c["path"].(string); ok && v != "" { cdroms[i].path = v } else { return fmt.Errorf("Path argument must be specified when attaching a cdrom image.") } } vm.cdroms = cdroms log.Printf("[DEBUG] cdrom init: %v", cdroms) } if vm.template != "" { err := vm.deployVirtualMachine(client) if err != nil { return err } } else { err := vm.createVirtualMachine(client) if err != nil { return err } } if _, ok := d.GetOk("network_interface.0.ipv4_address"); !ok { if v, ok := d.GetOk("boot_delay"); ok { stateConf := &resource.StateChangeConf{ Pending: []string{"pending"}, Target: []string{"active"}, Refresh: waitForNetworkingActive(client, vm.datacenter, vm.Path()), Timeout: 600 * time.Second, Delay: time.Duration(v.(int)) * time.Second, MinTimeout: 2 * time.Second, } _, err := stateConf.WaitForState() if err != nil { return err } } } if ip, ok := d.GetOk("network_interface.0.ipv4_address"); ok { d.SetConnInfo(map[string]string{ "host": ip.(string), }) } else { log.Printf("[DEBUG] Could not get IP address for %s", d.Id()) } d.SetId(vm.Path()) log.Printf("[INFO] Created virtual machine: %s", d.Id()) return resourceVSphereVirtualMachineRead(d, meta) }