func testAccCheckBlockStorageVolumeAttachV2Destroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) client, err := config.blockStorageV2Client(OS_REGION_NAME) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } for _, rs := range s.RootModule().Resources { if rs.Type != "openstack_blockstorage_volume_attach_v2" { continue } volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(rs.Primary.ID) if err != nil { return err } volume, err := volumes.Get(client, volumeId).Extract() if err != nil { if _, ok := err.(gophercloud.ErrDefault404); ok { return nil } return err } for _, v := range volume.Attachments { if attachmentId == v.AttachmentID { return fmt.Errorf("Volume attachment still exists") } } } return nil }
func resourceBlockStorageVolumeV2Read(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) blockStorageClient, err := config.blockStorageV2Client(d.Get("region").(string)) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } v, err := volumes.Get(blockStorageClient, d.Id()).Extract() if err != nil { return CheckDeleted(d, err, "volume") } log.Printf("[DEBUG] Retrieved volume %s: %+v", d.Id(), v) d.Set("size", v.Size) d.Set("description", v.Description) d.Set("availability_zone", v.AvailabilityZone) d.Set("name", v.Name) d.Set("snapshot_id", v.SnapshotID) d.Set("source_vol_id", v.SourceVolID) d.Set("volume_type", v.VolumeType) d.Set("metadata", v.Metadata) attachments := make([]map[string]interface{}, len(v.Attachments)) for i, attachment := range v.Attachments { attachments[i] = make(map[string]interface{}) attachments[i]["id"] = attachment.ID attachments[i]["instance_id"] = attachment.ServerID attachments[i]["device"] = attachment.Device log.Printf("[DEBUG] attachment: %v", attachment) } d.Set("attachment", attachments) return nil }
func testAccCheckBlockStorageV2VolumeExists(n string, volume *volumes.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } if rs.Primary.ID == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) blockStorageClient, err := config.blockStorageV2Client(OS_REGION_NAME) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } found, err := volumes.Get(blockStorageClient, rs.Primary.ID).Extract() if err != nil { return err } if found.ID != rs.Primary.ID { return fmt.Errorf("Volume not found") } *volume = *found return nil } }
func TestGetWithExtensions(t *testing.T) { th.SetupHTTP() defer th.TeardownHTTP() MockGetResponse(t) var s struct { volumes.Volume volumetenants.VolumeExt } err := volumes.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").ExtractInto(&s) th.AssertNoErr(t, err) th.AssertEquals(t, "304dc00909ac4d0da6c62d816bcb3459", s.TenantID) err = volumes.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").ExtractInto(s) if err == nil { t.Errorf("Expected error when providing non-pointer struct") } }
func TestGet(t *testing.T) { th.SetupHTTP() defer th.TeardownHTTP() MockGetResponse(t) v, err := volumes.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() th.AssertNoErr(t, err) th.AssertEquals(t, v.Name, "vol-001") th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") }
// VolumeV2StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch // an OpenStack volume. func VolumeV2StateRefreshFunc(client *gophercloud.ServiceClient, volumeID string) resource.StateRefreshFunc { return func() (interface{}, string, error) { v, err := volumes.Get(client, volumeID).Extract() if err != nil { if _, ok := err.(gophercloud.ErrDefault404); ok { return v, "deleted", nil } return nil, "", err } return v, v.Status, nil } }
// VolumeV2StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch // an OpenStack volume. func VolumeV2StateRefreshFunc(client *gophercloud.ServiceClient, volumeID string) resource.StateRefreshFunc { return func() (interface{}, string, error) { v, err := volumes.Get(client, volumeID).Extract() if err != nil { if _, ok := err.(gophercloud.ErrDefault404); ok { return v, "deleted", nil } return nil, "", err } if v.Status == "error" { return v, v.Status, fmt.Errorf("There was an error creating the volume. " + "Please check with your cloud admin or check the Block Storage " + "API logs to see why this error occurred.") } return v, v.Status, nil } }
func testAccCheckBlockStorageV2VolumeDoesNotExist(t *testing.T, n string, volume *volumes.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) blockStorageClient, err := config.blockStorageV2Client(OS_REGION_NAME) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } _, err = volumes.Get(blockStorageClient, volume.ID).Extract() if err != nil { if _, ok := err.(gophercloud.ErrDefault404); ok { return nil } return err } return fmt.Errorf("Volume still exists") } }
func TestVolumesCreateDestroy(t *testing.T) { client, err := clients.NewBlockStorageV2Client() if err != nil { t.Fatalf("Unable to create blockstorage client: %v", err) } volume, err := CreateVolume(t, client) if err != nil { t.Fatalf("Unable to create volume: %v", err) } defer DeleteVolume(t, client, volume) newVolume, err := volumes.Get(client, volume.ID).Extract() if err != nil { t.Errorf("Unable to retrieve volume: %v", err) } PrintVolume(t, newVolume) }
func testAccCheckBlockStorageVolumeAttachV2Exists(t *testing.T, n string, va *volumes.Attachment) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } if rs.Primary.ID == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) client, err := config.blockStorageV2Client(OS_REGION_NAME) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(rs.Primary.ID) if err != nil { return err } volume, err := volumes.Get(client, volumeId).Extract() if err != nil { return err } var found bool for _, v := range volume.Attachments { if attachmentId == v.AttachmentID { found = true *va = v } } if !found { return fmt.Errorf("Volume Attachment not found") } return nil } }
func testAccCheckBlockStorageV2VolumeDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) blockStorageClient, err := config.blockStorageV2Client(OS_REGION_NAME) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } for _, rs := range s.RootModule().Resources { if rs.Type != "openstack_blockstorage_volume_v2" { continue } _, err := volumes.Get(blockStorageClient, rs.Primary.ID).Extract() if err == nil { return fmt.Errorf("Volume still exists") } } return nil }
func TestVolumeActionsAttachCreateDestroy(t *testing.T) { blockClient, err := clients.NewBlockStorageV2Client() if err != nil { t.Fatalf("Unable to create a blockstorage client: %v", err) } computeClient, err := clients.NewComputeV2Client() if err != nil { t.Fatalf("Unable to create a compute client: %v", err) } choices, err := clients.AcceptanceTestChoicesFromEnv() if err != nil { t.Fatal(err) } server, err := compute.CreateServer(t, computeClient, choices) if err != nil { t.Fatalf("Unable to create server: %v", err) } defer compute.DeleteServer(t, computeClient, server) volume, err := blockstorage.CreateVolume(t, blockClient) if err != nil { t.Fatalf("Unable to create volume: %v", err) } defer blockstorage.DeleteVolume(t, blockClient, volume) err = CreateVolumeAttach(t, blockClient, volume, server) if err != nil { t.Fatalf("Unable to attach volume: %v", err) } newVolume, err := volumes.Get(blockClient, volume.ID).Extract() if err != nil { t.Fatal("Unable to get updated volume information: %v", err) } DeleteVolumeAttach(t, blockClient, newVolume) }
func resourceBlockStorageVolumeAttachV2Read(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) client, err := config.blockStorageV2Client(GetRegion(d)) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } volumeId, attachmentId, err := blockStorageVolumeAttachV2ParseId(d.Id()) if err != nil { return err } volume, err := volumes.Get(client, volumeId).Extract() if err != nil { return err } log.Printf("[DEBUG] Retrieved volume %s: %#v", d.Id(), volume) var attachment volumes.Attachment for _, v := range volume.Attachments { if attachmentId == v.AttachmentID { attachment = v } } log.Printf("[DEBUG] Retrieved volume attachment: %#v", attachment) d.Set("volume_id", volumeId) d.Set("attachment_id", attachmentId) d.Set("device", attachment.Device) d.Set("instance_id", attachment.ServerID) d.Set("host_name", attachment.HostName) d.Set("region", GetRegion(d)) return nil }
func resourceBlockStorageVolumeV2Delete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) blockStorageClient, err := config.blockStorageV2Client(GetRegion(d)) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } v, err := volumes.Get(blockStorageClient, d.Id()).Extract() if err != nil { return CheckDeleted(d, err, "volume") } // make sure this volume is detached from all instances before deleting if len(v.Attachments) > 0 { log.Printf("[DEBUG] detaching volumes") if computeClient, err := config.computeV2Client(GetRegion(d)); err != nil { return err } else { for _, volumeAttachment := range v.Attachments { log.Printf("[DEBUG] Attachment: %v", volumeAttachment) if err := volumeattach.Delete(computeClient, volumeAttachment.ServerID, volumeAttachment.ID).ExtractErr(); err != nil { return err } } stateConf := &resource.StateChangeConf{ Pending: []string{"in-use", "attaching", "detaching"}, Target: []string{"available"}, Refresh: VolumeV2StateRefreshFunc(blockStorageClient, d.Id()), Timeout: 10 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } _, err = stateConf.WaitForState() if err != nil { return fmt.Errorf( "Error waiting for volume (%s) to become available: %s", d.Id(), err) } } } // It's possible that this volume was used as a boot device and is currently // in a "deleting" state from when the instance was terminated. // If this is true, just move on. It'll eventually delete. if v.Status != "deleting" { if err := volumes.Delete(blockStorageClient, d.Id()).ExtractErr(); err != nil { return CheckDeleted(d, err, "volume") } } // Wait for the volume to delete before moving on. log.Printf("[DEBUG] Waiting for volume (%s) to delete", d.Id()) stateConf := &resource.StateChangeConf{ Pending: []string{"deleting", "downloading", "available"}, Target: []string{"deleted"}, Refresh: VolumeV2StateRefreshFunc(blockStorageClient, d.Id()), Timeout: 10 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } _, err = stateConf.WaitForState() if err != nil { return fmt.Errorf( "Error waiting for volume (%s) to delete: %s", d.Id(), err) } d.SetId("") return nil }
func resourceBlockStorageVolumeAttachV2Create(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) client, err := config.blockStorageV2Client(GetRegion(d)) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } // Check if either instance_id or host_name was set. instanceId := d.Get("instance_id").(string) hostName := d.Get("host_name").(string) if instanceId == "" && hostName == "" { return fmt.Errorf("One of 'instance_id' or 'host_name' must be set.") } volumeId := d.Get("volume_id").(string) attachMode, err := blockStorageVolumeAttachV2AttachMode(d.Get("attach_mode").(string)) if err != nil { return nil } attachOpts := &volumeactions.AttachOpts{ InstanceUUID: d.Get("instance_id").(string), HostName: d.Get("host_name").(string), MountPoint: d.Get("device").(string), Mode: attachMode, } log.Printf("[DEBUG] Attachment Options: %#v", attachOpts) if err := volumeactions.Attach(client, volumeId, attachOpts).ExtractErr(); err != nil { return err } // Wait for the volume to become available. log.Printf("[DEBUG] Waiting for volume (%s) to become available", volumeId) stateConf := &resource.StateChangeConf{ Pending: []string{"available", "attaching"}, Target: []string{"in-use"}, Refresh: VolumeV2StateRefreshFunc(client, volumeId), Timeout: 10 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } _, err = stateConf.WaitForState() if err != nil { return fmt.Errorf("Error waiting for volume (%s) to become ready: %s", volumeId, err) } volume, err := volumes.Get(client, volumeId).Extract() if err != nil { return err } var attachmentId string for _, attachment := range volume.Attachments { if instanceId != "" && instanceId == attachment.ServerID { attachmentId = attachment.AttachmentID } if hostName != "" && hostName == attachment.HostName { attachmentId = attachment.AttachmentID } } if attachmentId == "" { return fmt.Errorf("Unable to determine attachment ID.") } // The ID must be a combination of the volume and attachment ID // in order to import attachments. id := fmt.Sprintf("%s/%s", volumeId, attachmentId) d.SetId(id) return resourceBlockStorageVolumeAttachV2Read(d, meta) }