示例#1
0
// Detaches given cinder volume from the compute running kubelet
func (os *OpenStack) DetachDisk(instanceID string, partialDiskId string) error {
	disk, err := os.getVolume(partialDiskId)
	if err != nil {
		return err
	}
	cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
		Region: os.region,
	})
	if err != nil || cClient == nil {
		glog.Errorf("Unable to initialize nova client for region: %s", os.region)
		return err
	}
	if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil && instanceID == disk.Attachments[0]["server_id"] {
		// This is a blocking call and effects kubelet's performance directly.
		// We should consider kicking it out into a separate routine, if it is bad.
		err = volumeattach.Delete(cClient, instanceID, disk.ID).ExtractErr()
		if err != nil {
			glog.Errorf("Failed to delete volume %s from compute %s attached %v", disk.ID, instanceID, err)
			return err
		}
		glog.V(2).Infof("Successfully detached volume: %s from compute: %s", disk.ID, instanceID)
	} else {
		errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", disk.Name, instanceID)
		glog.Errorf(errMsg)
		return errors.New(errMsg)
	}
	return nil
}
func detachVolumesFromInstance(computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, vols []interface{}) error {
	for _, v := range vols {
		va := v.(map[string]interface{})
		aId := va["id"].(string)

		if err := volumeattach.Delete(computeClient, serverId, aId).ExtractErr(); err != nil {
			return err
		}

		stateConf := &resource.StateChangeConf{
			Pending:    []string{"detaching", "in-use"},
			Target:     []string{"available"},
			Refresh:    VolumeV1StateRefreshFunc(blockClient, va["volume_id"].(string)),
			Timeout:    30 * time.Minute,
			Delay:      5 * time.Second,
			MinTimeout: 2 * time.Second,
		}

		if _, err := stateConf.WaitForState(); err != nil {
			return err
		}
		log.Printf("[INFO] Detached volume %s from instance %s", va["volume_id"], serverId)
	}

	return nil
}
示例#3
0
func (driver *Driver) DetachVolume(runAsync bool, volumeID, instanceID string) error {
	fields := eff(map[string]interface{}{
		"runAsync":   runAsync,
		"volumeId":   volumeID,
		"instanceId": instanceID,
	})

	if volumeID == "" {
		return errors.WithFields(fields, "volumeId is required")
	}
	volume, err := driver.GetVolume(volumeID, "")
	if err != nil {
		return errors.WithFieldsE(fields, "error getting volume", err)
	}

	fields["instanceId"] = volume[0].Attachments[0].InstanceID
	resp := volumeattach.Delete(
		driver.Client, volume[0].Attachments[0].InstanceID, volumeID)
	if resp.Err != nil {
		return errors.WithFieldsE(fields, "error deleting volume", err)
	}

	if !runAsync {
		log.WithFields(fields).Debug("waiting for volume to detach")
		err = driver.waitVolumeDetach(volumeID)
		if err != nil {
			return errors.WithFieldsE(
				fields, "error waiting for volume to detach", err)
		}
	}

	log.WithFields(fields).Debug("volume detached")
	return nil
}
示例#4
0
func (driver *Driver) DetachVolume(runAsync bool, volumeID, instanceID string) error {
	if volumeID == "" {
		return ErrMissingVolumeID
	}
	volume, err := driver.GetVolume(volumeID, "")
	if err != nil {
		return err
	}

	resp := volumeattach.Delete(driver.Client, volume[0].Attachments[0].InstanceID, volumeID)
	if resp.Err != nil {
		return resp.Err
	}

	if !runAsync {
		log.Println("Waiting for volume detachment to complete")
		err = driver.waitVolumeDetach(volumeID)
		if err != nil {
			return err
		}
	}

	log.Println("Detached volume", volumeID)
	return nil
}
示例#5
0
// 	// VolumeDetach detaches a volume.
func (d *driver) VolumeDetach(
	ctx types.Context,
	volumeID string,
	opts *types.VolumeDetachOpts) (*types.Volume, error) {
	fields := eff(map[string]interface{}{
		"moduleName": ctx,
		"volumeId":   volumeID,
	})

	if volumeID == "" {
		return nil, goof.WithFields(fields, "volumeId is required for VolumeDetach")
	}
	vols, err := d.getVolume(ctx, volumeID, "", types.VolAttReqTrue)
	if err != nil {
		return nil, err
	}

	resp := volumeattach.Delete(
		d.client, vols[0].Attachments[0].InstanceID.ID, volumeID)
	if resp.Err != nil {
		return nil, goof.WithFieldsE(fields, "error detaching volume", resp.Err)
	}
	ctx.WithFields(fields).Debug("waiting for volume to detach")
	volume, err := d.waitVolumeAttachStatus(ctx, volumeID, false)
	if err == nil {
		return volume, nil
	}
	log.WithFields(fields).Debug("volume detached")
	return nil, nil
}
示例#6
0
func (d *driver) DetachVolume(
	runAsync bool, volumeID, instanceID string, force bool) error {

	fields := eff(map[string]interface{}{
		"runAsync":   runAsync,
		"volumeId":   volumeID,
		"instanceId": instanceID,
	})

	if volumeID == "" {
		return goof.WithFields(fields, "volumeId is required")
	}
	volume, err := d.GetVolume(volumeID, "")
	if err != nil {
		return goof.WithFieldsE(fields, "error getting volume", err)
	}

	if len(volume) == 0 {
		return goof.WithFields(fields, "no volumes returned")
	}

	if len(volume[0].Attachments) == 0 {
		return nil
	}

	fields["instanceId"] = volume[0].Attachments[0].InstanceID
	if force {
		if resp := volumeactions.ForceDetach(d.clientBlockStoragev2, volumeID); resp.Err != nil {
			log.Info(fmt.Sprintf("%+v", resp.Err))
			return goof.WithFieldsE(fields, "error forcing detach volume", resp.Err)
		}
	} else {
		if resp := volumeattach.Delete(
			d.client, volume[0].Attachments[0].InstanceID, volumeID); resp.Err != nil {
			return goof.WithFieldsE(fields, "error detaching volume", resp.Err)
		}
	}

	if !runAsync {
		log.WithFields(fields).Debug("waiting for volume to detach")
		err = d.waitVolumeDetach(volumeID)
		if err != nil {
			return goof.WithFieldsE(
				fields, "error waiting for volume to detach", err)
		}
	}

	log.WithFields(fields).Debug("volume detached")
	return nil
}
示例#7
0
func createVolumeAttachment(t *testing.T, computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, volumeId string) {
	va, err := volumeattach.Create(computeClient, serverId, &volumeattach.CreateOpts{
		VolumeID: volumeId,
	}).Extract()
	th.AssertNoErr(t, err)
	defer func() {
		err = volumes.WaitForStatus(blockClient, volumeId, "in-use", 60)
		th.AssertNoErr(t, err)
		err = volumeattach.Delete(computeClient, serverId, va.ID).ExtractErr()
		th.AssertNoErr(t, err)
		err = volumes.WaitForStatus(blockClient, volumeId, "available", 60)
		th.AssertNoErr(t, err)
	}()
}
func resourceBlockStorageVolumeV1Delete(d *schema.ResourceData, meta interface{}) error {
	config := meta.(*Config)
	blockStorageClient, err := config.blockStorageV1Client(d.Get("region").(string))
	if err != nil {
		return fmt.Errorf("Error creating OpenStack block storage client: %s", err)
	}

	v, err := volumes.Get(blockStorageClient, d.Id()).Extract()
	if err != nil {
		return CheckDeleted(d, err, "volume")
	}

	// make sure this volume is detached from all instances before deleting
	if len(v.Attachments) > 0 {
		log.Printf("[DEBUG] detaching volumes")
		if computeClient, err := config.computeV2Client(d.Get("region").(string)); err != nil {
			return err
		} else {
			for _, volumeAttachment := range v.Attachments {
				log.Printf("[DEBUG] Attachment: %v", volumeAttachment)
				if err := volumeattach.Delete(computeClient, volumeAttachment["server_id"].(string), volumeAttachment["id"].(string)).ExtractErr(); err != nil {
					return err
				}
			}

			stateConf := &resource.StateChangeConf{
				Pending:    []string{"in-use", "attaching", "detaching"},
				Target:     []string{"available"},
				Refresh:    VolumeV1StateRefreshFunc(blockStorageClient, d.Id()),
				Timeout:    10 * time.Minute,
				Delay:      10 * time.Second,
				MinTimeout: 3 * time.Second,
			}

			_, err = stateConf.WaitForState()
			if err != nil {
				return fmt.Errorf(
					"Error waiting for volume (%s) to become available: %s",
					d.Id(), err)
			}
		}
	}

	// It's possible that this volume was used as a boot device and is currently
	// in a "deleting" state from when the instance was terminated.
	// If this is true, just move on. It'll eventually delete.
	if v.Status != "deleting" {
		if err := volumes.Delete(blockStorageClient, d.Id()).ExtractErr(); err != nil {
			return CheckDeleted(d, err, "volume")
		}
	}

	// Wait for the volume to delete before moving on.
	log.Printf("[DEBUG] Waiting for volume (%s) to delete", d.Id())

	stateConf := &resource.StateChangeConf{
		Pending:    []string{"deleting", "downloading", "available"},
		Target:     []string{"deleted"},
		Refresh:    VolumeV1StateRefreshFunc(blockStorageClient, d.Id()),
		Timeout:    10 * time.Minute,
		Delay:      10 * time.Second,
		MinTimeout: 3 * time.Second,
	}

	_, err = stateConf.WaitForState()
	if err != nil {
		return fmt.Errorf(
			"Error waiting for volume (%s) to delete: %s",
			d.Id(), err)
	}

	d.SetId("")
	return nil
}
示例#9
0
文件: delegate.go 项目: 40a/bootkube
// Delete requests the deletion of a previous stored VolumeAttachment from the server.
func Delete(client *gophercloud.ServiceClient, serverID, aID string) os.DeleteResult {
	return os.Delete(client, serverID, aID)
}