// Detaches given cinder volume from the compute running kubelet func (rs *Rackspace) DetachDisk(instanceID string, partialDiskId string) error { disk, err := rs.getVolume(partialDiskId) if err != nil { return err } compute, err := rs.getComputeClient() if err != nil { return err } if len(disk.Attachments) > 1 { // Rackspace does not support "multiattach", this is a sanity check. errmsg := fmt.Sprintf("Volume %s is attached to multiple instances, which is not supported by this provider.", disk.ID) return errors.New(errmsg) } if len(disk.Attachments) > 0 && instanceID == disk.Attachments[0]["server_id"] { // This is a blocking call and effects kubelet's performance directly. // We should consider kicking it out into a separate routine, if it is bad. err = volumeattach.Delete(compute, instanceID, disk.ID).ExtractErr() if err != nil { glog.Errorf("Failed to delete volume %s from compute %s attached %v", disk.ID, instanceID, err) return err } glog.V(2).Infof("Successfully detached volume: %s from compute: %s", disk.ID, instanceID) } else { errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", disk.Name, instanceID) glog.Errorf(errMsg) return errors.New(errMsg) } return nil }
func createVolumeAttachment(t *testing.T, computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverID string, volumeID string) { va, err := volumeattach.Create(computeClient, serverID, &osVolumeAttach.CreateOpts{ VolumeID: volumeID, }).Extract() th.AssertNoErr(t, err) defer func() { err = osVolumes.WaitForStatus(blockClient, volumeID, "in-use", 60) th.AssertNoErr(t, err) err = volumeattach.Delete(computeClient, serverID, va.ID).ExtractErr() th.AssertNoErr(t, err) err = osVolumes.WaitForStatus(blockClient, volumeID, "available", 60) th.AssertNoErr(t, err) }() t.Logf("Attached volume to server: %+v", va) }