func TestVolumeActions(t *testing.T) { client, err := newClient(t) th.AssertNoErr(t, err) cv, err := volumes.Create(client, &volumes.CreateOpts{ Size: 1, Name: "blockv2-volume", }).Extract() th.AssertNoErr(t, err) defer func() { err = volumes.WaitForStatus(client, cv.ID, "available", 60) th.AssertNoErr(t, err) err = volumes.Delete(client, cv.ID).ExtractErr() th.AssertNoErr(t, err) }() err = volumes.WaitForStatus(client, cv.ID, "available", 60) th.AssertNoErr(t, err) _, err = volumeactions.Attach(client, cv.ID, &volumeactions.AttachOpts{ MountPoint: "/mnt", Mode: "rw", InstanceUUID: "50902f4f-a974-46a0-85e9-7efc5e22dfdd", }).Extract() th.AssertNoErr(t, err) err = volumes.WaitForStatus(client, cv.ID, "in-use", 60) th.AssertNoErr(t, err) _, err = volumeactions.Detach(client, cv.ID).Extract() th.AssertNoErr(t, err) }
func resourceBlockStorageVolumeV2Create(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) blockStorageClient, err := config.blockStorageV2Client(d.Get("region").(string)) if err != nil { return fmt.Errorf("Error creating OpenStack block storage client: %s", err) } createOpts := &volumes.CreateOpts{ AvailabilityZone: d.Get("availability_zone").(string), ConsistencyGroupID: d.Get("consistency_group_id").(string), Description: d.Get("description").(string), ImageID: d.Get("image_id").(string), Metadata: resourceContainerMetadataV2(d), Name: d.Get("name").(string), Size: d.Get("size").(int), SnapshotID: d.Get("snapshot_id").(string), SourceReplica: d.Get("source_replica").(string), SourceVolID: d.Get("source_vol_id").(string), VolumeType: d.Get("volume_type").(string), } log.Printf("[DEBUG] Create Options: %#v", createOpts) v, err := volumes.Create(blockStorageClient, createOpts).Extract() if err != nil { return fmt.Errorf("Error creating OpenStack volume: %s", err) } log.Printf("[INFO] Volume ID: %s", v.ID) // Store the ID now d.SetId(v.ID) // Wait for the volume to become available. log.Printf( "[DEBUG] Waiting for volume (%s) to become available", v.ID) stateConf := &resource.StateChangeConf{ Pending: []string{"downloading", "creating"}, Target: []string{"available"}, Refresh: VolumeV2StateRefreshFunc(blockStorageClient, v.ID), Timeout: 10 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } _, err = stateConf.WaitForState() if err != nil { return fmt.Errorf( "Error waiting for volume (%s) to become ready: %s", v.ID, err) } return resourceBlockStorageVolumeV2Read(d, meta) }
func TestVolumeAttach(t *testing.T) { client, err := newClient(t) th.AssertNoErr(t, err) t.Logf("Creating volume") cv, err := volumes.Create(client, &volumes.CreateOpts{ Size: 1, Name: "blockv2-volume", }).Extract() th.AssertNoErr(t, err) defer func() { err = volumes.WaitForStatus(client, cv.ID, "available", 60) th.AssertNoErr(t, err) t.Logf("Deleting volume") err = volumes.Delete(client, cv.ID).ExtractErr() th.AssertNoErr(t, err) }() err = volumes.WaitForStatus(client, cv.ID, "available", 60) th.AssertNoErr(t, err) instanceID := os.Getenv("OS_INSTANCE_ID") if instanceID == "" { t.Fatal("Environment variable OS_INSTANCE_ID is required") } t.Logf("Attaching volume") err = volumeactions.Attach(client, cv.ID, &volumeactions.AttachOpts{ MountPoint: "/mnt", Mode: "rw", InstanceUUID: instanceID, }).ExtractErr() th.AssertNoErr(t, err) err = volumes.WaitForStatus(client, cv.ID, "in-use", 60) th.AssertNoErr(t, err) t.Logf("Detaching volume") err = volumeactions.Detach(client, cv.ID).ExtractErr() th.AssertNoErr(t, err) }
func TestVolumeConns(t *testing.T) { client, err := newClient(t) th.AssertNoErr(t, err) t.Logf("Creating volume") cv, err := volumes.Create(client, &volumes.CreateOpts{ Size: 1, Name: "blockv2-volume", }).Extract() th.AssertNoErr(t, err) defer func() { err = volumes.WaitForStatus(client, cv.ID, "available", 60) th.AssertNoErr(t, err) t.Logf("Deleting volume") err = volumes.Delete(client, cv.ID).ExtractErr() th.AssertNoErr(t, err) }() err = volumes.WaitForStatus(client, cv.ID, "available", 60) th.AssertNoErr(t, err) connOpts := &volumeactions.ConnectorOpts{ IP: "127.0.0.1", Host: "stack", Initiator: "iqn.1994-05.com.redhat:17cf566367d2", Multipath: false, Platform: "x86_64", OSType: "linux2", } t.Logf("Initializing connection") _, err = volumeactions.InitializeConnection(client, cv.ID, connOpts).Extract() th.AssertNoErr(t, err) t.Logf("Terminating connection") err = volumeactions.TerminateConnection(client, cv.ID, connOpts).ExtractErr() th.AssertNoErr(t, err) }
func TestVolumeReserve(t *testing.T) { client, err := newClient(t) th.AssertNoErr(t, err) t.Logf("Creating volume") cv, err := volumes.Create(client, &volumes.CreateOpts{ Size: 1, Name: "blockv2-volume", }).Extract() th.AssertNoErr(t, err) defer func() { err = volumes.WaitForStatus(client, cv.ID, "available", 60) th.AssertNoErr(t, err) t.Logf("Deleting volume") err = volumes.Delete(client, cv.ID).ExtractErr() th.AssertNoErr(t, err) }() err = volumes.WaitForStatus(client, cv.ID, "available", 60) th.AssertNoErr(t, err) t.Logf("Reserving volume") err = volumeactions.Reserve(client, cv.ID).ExtractErr() th.AssertNoErr(t, err) err = volumes.WaitForStatus(client, cv.ID, "attaching", 60) th.AssertNoErr(t, err) t.Logf("Unreserving volume") err = volumeactions.Unreserve(client, cv.ID).ExtractErr() th.AssertNoErr(t, err) err = volumes.WaitForStatus(client, cv.ID, "available", 60) th.AssertNoErr(t, err) }
func TestVolumes(t *testing.T) { client, err := newClient(t) th.AssertNoErr(t, err) cv, err := volumes.Create(client, &volumes.CreateOpts{ Size: 1, Name: "blockv2-volume", }).Extract() th.AssertNoErr(t, err) defer func() { err = volumes.WaitForStatus(client, cv.ID, "available", 60) th.AssertNoErr(t, err) err = volumes.Delete(client, cv.ID).ExtractErr() th.AssertNoErr(t, err) }() _, err = volumes.Update(client, cv.ID, &volumes.UpdateOpts{ Name: "blockv2-updated-volume", }).Extract() th.AssertNoErr(t, err) v, err := volumes.Get(client, cv.ID).Extract() th.AssertNoErr(t, err) t.Logf("Got volume: %+v\n", v) if v.Name != "blockv2-updated-volume" { t.Errorf("Unable to update volume: Expected name: blockv2-updated-volume\nActual name: %s", v.Name) } err = volumes.List(client, &volumes.ListOpts{Name: "blockv2-updated-volume"}).EachPage(func(page pagination.Page) (bool, error) { vols, err := volumes.ExtractVolumes(page) th.CheckEquals(t, 1, len(vols)) return true, err }) th.AssertNoErr(t, err) }
func (d CinderDriver) Create(r volume.Request) volume.Response { // TODO(jdg): Right now we have a weird mix for some of our semantics. We // wanted to be able to dynamically create, but create can be called when a // volume already exists and is going to be used on another Docker node (ie // things like compose); we need to look at reworking things to NOT use // names to access Cinder volumes or some way to differentiate a create vs // a "use" log.Infof("Create volume %s on %s", r.Name, "Cinder") d.Mutex.Lock() defer d.Mutex.Unlock() vol, err := d.getByName(r.Name) if err != nil { log.Errorf("Error getting existing Volume by Name: (volume %s, error %s)", vol, err.Error()) return volume.Response{Err: err.Error()} } // FIXME(jdg): Keep in mind, NotFound isn't the only error we can get here, // we can also receive a "Multiple matches" error if there are duplicate // names. opts := d.parseOpts(r) opts.Name = r.Name log.Debugf("Creating with options: %+v", opts) _, err = volumes.Create(d.Client, opts).Extract() if err != nil { log.Errorf("Failed to Create volume: %s\nEncountered error: %s", r.Name, err) return volume.Response{Err: err.Error()} } path := filepath.Join(d.Conf.MountPoint, r.Name) if err := os.Mkdir(path, os.ModeDir); err != nil { log.Errorf("Failed to create Mount directory: %v", err) return volume.Response{Err: err.Error()} } return volume.Response{} }