// Attaches given cinder volume to the compute running kubelet func (rs *Rackspace) AttachDisk(instanceID string, diskName string) (string, error) { disk, err := rs.getVolume(diskName) if err != nil { return "", err } compute, err := rs.getComputeClient() if err != nil { return "", err } if len(disk.Attachments) > 0 { if instanceID == disk.Attachments[0]["server_id"] { glog.V(4).Infof("Disk: %q is already attached to compute: %q", diskName, instanceID) return disk.ID, nil } errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, disk.Attachments[0]["server_id"]) glog.Errorf(errMsg) return "", errors.New(errMsg) } _, err = volumeattach.Create(compute, instanceID, &osvolumeattach.CreateOpts{ VolumeID: disk.ID, }).Extract() if err != nil { glog.Errorf("Failed to attach %s volume to %s compute", diskName, instanceID) return "", err } glog.V(2).Infof("Successfully attached %s volume to %s compute", diskName, instanceID) return disk.ID, nil }
func createVolumeAttachment(t *testing.T, computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverID string, volumeID string) { va, err := volumeattach.Create(computeClient, serverID, &osVolumeAttach.CreateOpts{ VolumeID: volumeID, }).Extract() th.AssertNoErr(t, err) defer func() { err = osVolumes.WaitForStatus(blockClient, volumeID, "in-use", 60) th.AssertNoErr(t, err) err = volumeattach.Delete(computeClient, serverID, va.ID).ExtractErr() th.AssertNoErr(t, err) err = osVolumes.WaitForStatus(blockClient, volumeID, "available", 60) th.AssertNoErr(t, err) }() t.Logf("Attached volume to server: %+v", va) }