func resourceBlockStorageVolumeV2Read(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) blockStorageClient, err := config.blockStorageV2Client(d.Get("region").(string)) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } v, err := volumes.Get(blockStorageClient, d.Id()).Extract() if err != nil { return CheckDeleted(d, err, "volume") } log.Printf("[DEBUG] Retreived volume %s: %+v", d.Id(), v) d.Set("size", v.Size) d.Set("description", v.Description) d.Set("availability_zone", v.AvailabilityZone) d.Set("name", v.Name) d.Set("snapshot_id", v.SnapshotID) d.Set("source_vol_id", v.SourceVolID) d.Set("volume_type", v.VolumeType) d.Set("metadata", v.Metadata) attachments := make([]map[string]interface{}, len(v.Attachments)) for i, attachment := range v.Attachments { attachments[i] = make(map[string]interface{}) attachments[i]["id"] = attachment["id"] attachments[i]["instance_id"] = attachment["server_id"] attachments[i]["device"] = attachment["device"] log.Printf("[DEBUG] attachment: %v", attachment) } d.Set("attachment", attachments) return nil }
func testAccCheckBlockStorageV2VolumeExists(t *testing.T, n string, volume *volumes.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } if rs.Primary.ID == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) blockStorageClient, err := config.blockStorageV2Client(OS_REGION_NAME) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } found, err := volumes.Get(blockStorageClient, rs.Primary.ID).Extract() if err != nil { return err } if found.ID != rs.Primary.ID { return fmt.Errorf("Volume not found") } *volume = *found return nil } }
// Get cinder volume info by volumeID func (client *cinderClient) getVolume(volumeID string) (*volumes.Volume, error) { volume, err := volumes.Get(client.cinder, volumeID).Extract() if err != nil { return nil, err } return volume, nil }
// VolumeV2StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch // an OpenStack volume. func VolumeV2StateRefreshFunc(client *gophercloud.ServiceClient, volumeID string) resource.StateRefreshFunc { return func() (interface{}, string, error) { v, err := volumes.Get(client, volumeID).Extract() if err != nil { errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) if !ok { return nil, "", err } if errCode.Actual == 404 { return v, "deleted", nil } return nil, "", err } return v, v.Status, nil } }
func testAccCheckBlockStorageV2VolumeDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) blockStorageClient, err := config.blockStorageV2Client(OS_REGION_NAME) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } for _, rs := range s.RootModule().Resources { if rs.Type != "openstack_blockstorage_volume_v2" { continue } _, err := volumes.Get(blockStorageClient, rs.Primary.ID).Extract() if err == nil { return fmt.Errorf("Volume still exists") } } return nil }
func testAccCheckBlockStorageV2VolumeDoesNotExist(t *testing.T, n string, volume *volumes.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) blockStorageClient, err := config.blockStorageV2Client(OS_REGION_NAME) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } _, err = volumes.Get(blockStorageClient, volume.ID).Extract() if err != nil { errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) if !ok { return err } if errCode.Actual == 404 { return nil } return err } return fmt.Errorf("Volume still exists") } }
func TestVolumes(t *testing.T) { client, err := newClient(t) th.AssertNoErr(t, err) cv, err := volumes.Create(client, &volumes.CreateOpts{ Size: 1, Name: "blockv2-volume", }).Extract() th.AssertNoErr(t, err) defer func() { err = volumes.WaitForStatus(client, cv.ID, "available", 60) th.AssertNoErr(t, err) err = volumes.Delete(client, cv.ID).ExtractErr() th.AssertNoErr(t, err) }() _, err = volumes.Update(client, cv.ID, &volumes.UpdateOpts{ Name: "blockv2-updated-volume", }).Extract() th.AssertNoErr(t, err) v, err := volumes.Get(client, cv.ID).Extract() th.AssertNoErr(t, err) t.Logf("Got volume: %+v\n", v) if v.Name != "blockv2-updated-volume" { t.Errorf("Unable to update volume: Expected name: blockv2-updated-volume\nActual name: %s", v.Name) } err = volumes.List(client, &volumes.ListOpts{Name: "blockv2-updated-volume"}).EachPage(func(page pagination.Page) (bool, error) { vols, err := volumes.ExtractVolumes(page) th.CheckEquals(t, 1, len(vols)) return true, err }) th.AssertNoErr(t, err) }
func resourceBlockStorageVolumeV2Delete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) blockStorageClient, err := config.blockStorageV2Client(d.Get("region").(string)) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } v, err := volumes.Get(blockStorageClient, d.Id()).Extract() if err != nil { return CheckDeleted(d, err, "volume") } // make sure this volume is detached from all instances before deleting if len(v.Attachments) > 0 { log.Printf("[DEBUG] detaching volumes") if computeClient, err := config.computeV2Client(d.Get("region").(string)); err != nil { return err } else { for _, volumeAttachment := range v.Attachments { log.Printf("[DEBUG] Attachment: %v", volumeAttachment) if err := volumeattach.Delete(computeClient, volumeAttachment["server_id"].(string), volumeAttachment["id"].(string)).ExtractErr(); err != nil { return err } } stateConf := &resource.StateChangeConf{ Pending: []string{"in-use", "attaching", "detaching"}, Target: []string{"available"}, Refresh: VolumeV2StateRefreshFunc(blockStorageClient, d.Id()), Timeout: 10 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } _, err = stateConf.WaitForState() if err != nil { return fmt.Errorf( "Error waiting for volume (%s) to become available: %s", d.Id(), err) } } } // It's possible that this volume was used as a boot device and is currently // in a "deleting" state from when the instance was terminated. // If this is true, just move on. It'll eventually delete. if v.Status != "deleting" { if err := volumes.Delete(blockStorageClient, d.Id()).ExtractErr(); err != nil { return CheckDeleted(d, err, "volume") } } // Wait for the volume to delete before moving on. log.Printf("[DEBUG] Waiting for volume (%s) to delete", d.Id()) stateConf := &resource.StateChangeConf{ Pending: []string{"deleting", "downloading", "available"}, Target: []string{"deleted"}, Refresh: VolumeV2StateRefreshFunc(blockStorageClient, d.Id()), Timeout: 10 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } _, err = stateConf.WaitForState() if err != nil { return fmt.Errorf( "Error waiting for volume (%s) to delete: %s", d.Id(), err) } d.SetId("") return nil }