func (d *driver) detachVolume( ctx types.Context, volumeID, volumeName string) error { iid := context.MustInstanceID(ctx) m, err := d.findMachineByInstanceID(ctx, iid) if err != nil { return err } if err := m.Refresh(); err != nil { return err } defer m.Release() media, err := d.vbox.GetMedium(volumeID, volumeName) if err != nil { return err } if len(media) == 0 { return goof.New("no volume returned") } if len(media) > 1 { return goof.New("too many volumes returned") } if err := media[0].DetachMachines(); err != nil { return err } return nil }
func (d *driver) detachVolume(volumeID, volumeName string) error { d.m.Lock() defer d.m.Unlock() d.checkSession() medium, err := d.virtualbox.GetMedium(volumeID, volumeName) if err != nil { return err } if len(medium) == 0 { return goof.New("no volume returned") } else if len(medium) > 1 { return goof.New("too many volumes returned") } if err := d.machine.Refresh(); err != nil { return err } defer d.machine.Release() if err := medium[0].DetachMachines(); err != nil { return err } return nil }
func (d *driver) prefixToMountUnmount( volumeName, volumeID string) ([]*core.Volume, []*core.VolumeAttachment, *core.Instance, error) { if volumeName == "" && volumeID == "" { return nil, nil, nil, goof.New("Missing volume name or ID") } var instance *core.Instance var err error if instance, err = d.getInstance(); err != nil { return nil, nil, nil, err } var vols []*core.Volume if vols, err = d.r.Storage.GetVolume(volumeID, volumeName); err != nil { return nil, nil, nil, err } switch { case len(vols) == 0: return nil, nil, nil, goof.New("No volumes returned by name") case len(vols) > 1: return nil, nil, nil, goof.New("Multiple volumes returned by name") } var volAttachments []*core.VolumeAttachment if volAttachments, err = d.r.Storage.GetVolumeAttach( vols[0].VolumeID, instance.InstanceID); err != nil { return nil, nil, nil, err } return vols, volAttachments, instance, nil }
// NetworkName will return relevant information about how a volume can be discovered on an OS func (d *driver) NetworkName(volumeName, instanceID string) (string, error) { log.WithFields(log.Fields{ "volumeName": volumeName, "instanceID": instanceID, "driverName": d.Name()}).Info("returning network name") volumes, err := d.r.Storage.GetVolume("", volumeName) if err != nil { return "", err } switch { case len(volumes) == 0: return "", goof.New("No volumes returned by name") case len(volumes) > 1: return "", goof.New("Multiple volumes returned by name") } volumeAttachment, err := d.r.Storage.GetVolumeAttach( volumes[0].VolumeID, instanceID) if err != nil { return "", err } if len(volumeAttachment) == 0 { return "", goof.New("Volume not attached") } volumes, err = d.r.Storage.GetVolume("", volumeName) if err != nil { return "", err } return volumes[0].NetworkName, nil }
// Attach will attach a volume to an instance func (d *driver) Attach(volumeName, instanceID string, force bool) (string, error) { log.WithFields(log.Fields{ "volumeName": volumeName, "instanceID": instanceID, "driverName": d.Name()}).Info("attaching volume") volumes, err := d.r.Storage.GetVolume("", volumeName) if err != nil { return "", err } switch { case len(volumes) == 0: return "", goof.New("No volumes returned by name") case len(volumes) > 1: return "", goof.New("Multiple volumes returned by name") } _, err = d.r.Storage.AttachVolume(true, volumes[0].VolumeID, instanceID, force) if err != nil { return "", err } volumes, err = d.r.Storage.GetVolume("", volumeName) if err != nil { return "", err } return volumes[0].NetworkName, nil }
func (d *driver) CreateVolume( runAsync bool, volumeName, volumeID, snapshotID, NUvolumeType string, NUIOPS, size int64, NUavailabilityZone string) (*core.Volume, error) { exists, err := d.volumeExists(volumeName) if err != nil && !exists { return nil, err } else if exists { return nil, err } PostVolRequest := &govmax.PostVolumesReq{ PostVolumesRequestContent: &govmax.PostVolumesReqContent{ AtType: "http://schemas.emc.com/ecom/edaa/root/emc/Symm_StorageConfigurationService", ElementName: d.prefixVolumeName(volumeName), ElementType: "2", EMCNumberOfDevices: "1", Size: strconv.Itoa(int(size * 1024 * 1024 * 1024)), }, } queuedJob, _, err := d.client.PostVolumes(PostVolRequest, d.arrayID) if err != nil { return nil, goof.WithError("error creating volume", err) } if len(queuedJob.Entries) == 0 { return nil, goof.New("no jobs returned") } if !runAsync { jobStatusResp, err := d.waitJob(queuedJob.Entries[0].Content.I_Parameters.I_Job.E0_InstanceID) if err != nil { return nil, err } if len(jobStatusResp.Entries) == 0 { return nil, goof.New("no volume returned") } fields := strings.Split(jobStatusResp.Entries[0].Content.I_Description, "Output: DeviceIDs=") if len(fields) < 2 { return nil, goof.New("new volumeID not found") } volume, err := d.GetVolume(fields[1], "") if err != nil { return nil, err } if len(volume) == 0 { return nil, goof.New("no new volume returned") } return volume[0], nil } return nil, nil }
func (d *driver) waitAttach(volumeID string) (*core.BlockDevice, error) { volumes, err := d.GetVolume(volumeID, "") if err != nil { return nil, err } if len(volumes) == 0 { return nil, goof.New("no volumes returned") } timeout := make(chan bool, 1) go func() { time.Sleep(10 * time.Second) timeout <- true }() successCh := make(chan *core.BlockDevice, 1) errorCh := make(chan error, 1) go func(volumeID string) { log.Println("XtremIO: waiting for volume attach") for { if d.multipath() { _, _ = exec.Command("/sbin/multipath", "-f", fmt.Sprintf("3%s", volumes[0].NetworkName)).Output() _, _ = exec.Command("/sbin/multipath").Output() } blockDevices, err := d.GetVolumeMapping() if err != nil { errorCh <- goof.Newf( "problem getting local block devices: %s", err) return } for _, blockDevice := range blockDevices { if blockDevice.VolumeID == volumeID { successCh <- blockDevice return } } time.Sleep(100 * time.Millisecond) } }(volumeID) select { case blockDevice := <-successCh: log.Println(fmt.Sprintf("XtremIO: got attachedVolume %s at %s", blockDevice.VolumeID, blockDevice.DeviceName)) return blockDevice, nil case err := <-errorCh: return nil, err case <-timeout: return nil, goof.New("timed out waiting for mount") } }
// VolumeAttach attaches a volume. func (d *driver) VolumeAttach( ctx types.Context, volumeID string, opts *types.VolumeAttachOpts) (*types.Volume, string, error) { if err := d.refreshSession(ctx); err != nil { return nil, "", err } if volumeID == "" { return nil, "", goof.New("missing volume id") } // review volume with attachments to any host volumes, err := d.getVolume(ctx, volumeID, "", types.VolAttReq) if err != nil { return nil, "", err } if len(volumes) == 0 { return nil, "", goof.New("no volume found") } if len(volumes[0].Attachments) > 0 && !opts.Force { return nil, "", goof.New("volume already attached to a host") } if opts.Force { if _, err := d.VolumeDetach(ctx, volumeID, nil); err != nil { return nil, "", err } } err = d.attachVolume(ctx, volumeID, "") if err != nil { return nil, "", goof.WithFieldsE( log.Fields{ "provider": vbox.Name, "volumeID": volumeID}, "error attaching volume", err, ) } volumes, err = d.getVolume(ctx, volumeID, "", types.VolAttReqTrue) if err != nil { return nil, "", err } if len(volumes) == 0 { return nil, "", err } svid := strings.Split(volumes[0].ID, "-") return volumes[0], svid[0], nil }
func (d *driver) AttachVolume( notused bool, volumeID, instanceID string, force bool) ([]*core.VolumeAttachment, error) { // sanity check the input if volumeID == "" { return nil, errors.ErrMissingVolumeID } if instanceID == "" { return nil, goof.New("Missing Instance ID") } // ensure the volume exists and is exported volumes, err := d.GetVolume(volumeID, "") if err != nil { return nil, err } if len(volumes) == 0 { return nil, errors.ErrNoVolumesReturned } if err := d.client.ExportVolume(volumeID); err != nil { return nil, goof.WithError("problem exporting volume", err) } // see if anyone is attached already clients, err := d.client.GetExportClients(volumeID) if err != nil { return nil, goof.WithError("problem getting export client", err) } // clear out any existing clients if necessary. if force is false and // we have existing clients, we need to exit. if len(clients) > 0 { if force == false { return nil, goof.New("Volume already attached to another host") } // remove all clients err = d.client.ClearExportClients(volumeID) if err != nil { return nil, err } } err = d.client.SetExportClients(volumeID, parseInstanceId(instanceID)) if err != nil { return nil, err } volumeAttachment, err := d.GetVolumeAttach(volumeID, instanceID) if err != nil { return nil, err } return volumeAttachment, nil }
// Path returns the mounted path of the volume func (d *driver) Path(volumeName, volumeID string) (string, error) { log.WithFields(log.Fields{ "volumeName": volumeName, "volumeID": volumeID, "driverName": d.Name()}).Info("getting path to volume") if volumeName == "" && volumeID == "" { return "", goof.New("Missing volume name or ID") } instances, err := d.r.Storage.GetInstances() if err != nil { return "", err } switch { case len(instances) == 0: return "", goof.New("No instances") case len(instances) > 1: return "", goof.New("Too many instances returned, limit the storagedrivers") } volumes, err := d.r.Storage.GetVolume(volumeID, volumeName) if err != nil { return "", err } switch { case len(volumes) == 0: return "", goof.New("No volumes returned by name") case len(volumes) > 1: return "", goof.New("Multiple volumes returned by name") } volumeAttachment, err := d.r.Storage.GetVolumeAttach(volumes[0].VolumeID, instances[0].InstanceID) if err != nil { return "", err } if len(volumeAttachment) == 0 { return "", nil } mounts, err := d.r.OS.GetMounts(volumeAttachment[0].DeviceName, "") if err != nil { return "", err } if len(mounts) == 0 { return "", nil } return d.volumeMountPath(mounts[0].Mountpoint), nil }
// VolumeCreate creates a new volume. func (d *driver) VolumeCreate(ctx types.Context, volumeName string, opts *types.VolumeCreateOpts) (*types.Volume, error) { d.Lock() defer d.Unlock() if err := d.refreshSession(ctx); err != nil { return nil, err } if opts.Size == nil { return nil, goof.New("missing volume size") } fields := map[string]interface{}{ "provider": vbox.Name, "volumeName": volumeName, "size": *opts.Size, } size := *opts.Size * 1024 * 1024 * 1024 vol, err := d.getVolume(ctx, "", volumeName, types.VolAttFalse) if err != nil { return nil, err } if vol != nil { return nil, goof.New("volume already exists") } med, err := d.createVolume(ctx, volumeName, size) if err != nil { return nil, goof.WithFieldsE(fields, "error creating new volume", err) } var iops int64 if opts.IOPS != nil { iops = *opts.IOPS } newVol := &types.Volume{ ID: med.ID, Name: med.Name, Size: med.LogicalSize / 1024 / 1024 / 1024, IOPS: iops, Type: string(med.DeviceType), } return newVol, nil }
func getVolumeMountPath(name string) (string, error) { if name == "" { return "", goof.New("Missing volume name") } return fmt.Sprintf("%s/%s", mountDirectoryPath, name), nil }
func (d *driver) Volumes( ctx types.Context, opts *types.VolumesOpts) ([]*types.Volume, error) { context.MustSession(ctx) iid, iidOK := context.InstanceID(ctx) if iidOK { if iid.ID == "" { return nil, goof.New("missing instance ID") } } volJSONPaths, err := d.getVolJSONs() if err != nil { return nil, err } volumes := []*types.Volume{} for _, volJSONPath := range volJSONPaths { v, err := readVolume(volJSONPath) if err != nil { return nil, err } if opts.Attachments > 0 { v.AttachmentState = 0 } volumes = append(volumes, v) } return utils.SortVolumeByID(volumes), nil }
func (d *driver) RemoveVolume(volumeID string) error { fields := eff(map[string]interface{}{ "volumeID": volumeID, }) deleteVolumeRequest := &govmax.DeleteVolReq{ DeleteVolRequestContent: &govmax.DeleteVolReqContent{ AtType: "http://schemas.emc.com/ecom/edaa/root/emc/Symm_StorageConfigurationService", DeleteVolRequestContentElement: &govmax.DeleteVolReqContentElement{ AtType: "http://schemas.emc.com/ecom/edaa/root/emc/Symm_StorageVolume", DeviceID: volumeID, CreationClassName: "Symm_StorageVolume", SystemName: "SYMMETRIX-+-" + d.arrayID, SystemCreationClassName: "Symm_StorageSystem", }, }, } queuedJob, err := d.client.PostDeleteVol(deleteVolumeRequest, d.arrayID) if err != nil { return goof.WithFieldsE(fields, "error deleteing volume", err) } if len(queuedJob.Entries) == 0 { return goof.New("no jobs returned") } _, err = d.waitJob(queuedJob.Entries[0].Content.I_Parameters.I_Job.E0_InstanceID) if err != nil { return err } log.Println("Deleted Volume: " + volumeID) return nil }
func (d *driver) CreateSnapshot( runAsync bool, snapshotName, volumeID, description string) ([]*core.Snapshot, error) { log.WithField("provider", providerName).Debug("CreateSnapshot") volumes, err := d.GetVolume(volumeID, "") if len(volumes) == 0 { return nil, goof.New("no volume returned by ID") } if err := d.createSnapshot(runAsync, snapshotName, volumes[0]); err != nil { return nil, err } snapshot, err := d.GetSnapshot("", snapshotName, "") if err != nil { return nil, err } log.WithFields(log.Fields{ "runAsync": runAsync, "snapshotName": snapshotName, "volumeId": volumeID, "description": description}).Debug("created snapshot") return snapshot, nil }
func (d *driver) waitUntilOperationIsFinished(operation *compute.Operation) error { opName := operation.Name OpLoop: for { time.Sleep(100 * time.Millisecond) op, err := d.client.ZoneOperations.Get(d.project, d.zone, opName).Do() if err != nil { return err } switch op.Status { case "PENDING", "RUNNING": continue case "DONE": if op.Error != nil { bytea, _ := op.Error.MarshalJSON() return goof.New(string(bytea)) } break OpLoop default: log.WithField("provider", providerName).Fatalf("Unknown status %q: %+v", op.Status, op) return nil } } return nil }
func (d *driver) getInstance() (*core.Instance, error) { instances, err := d.r.Storage.GetInstances() if err != nil { return nil, err } switch { case len(instances) == 0: return nil, goof.New("No instances") case len(instances) > 1: return nil, goof.New("Too many instances returned, limit the storagedrivers") } return instances[0], nil }
func (d *driver) findMachineByNameOrID( ctx types.Context, nameOrID string) (*vboxc.Machine, error) { ctx.WithField("nameOrID", nameOrID).Debug("finding local machine") m, err := d.vbox.FindMachine(nameOrID) if err != nil { return nil, err } if m == nil { return nil, goof.New("could not find machine") } if id, err := m.GetID(); err == nil { m.ID = id } else { return nil, err } if name, err := m.GetName(); err == nil { m.Name = name } else { return nil, err } return m, nil }
//Added if statement for vmh mode func (d *driver) DetachVolume(runAsync bool, volumeID string, blank string, notused bool) error { volumes, err := d.GetVolume(volumeID, "") if err != nil { return err } if len(volumes) == 0 { return goof.New("volume not found") } if d.vmh != nil { if err := d.deleteScsiDevice(volumes[0].NetworkName); err != nil { return goof.WithError("error deleting scsi device from host", err) } if err := d.vmh.DetachRDM(d.vmh.Vm, volumes[0].NetworkName); err != nil { return goof.WithError("error removing RDM from vm", err) } } if err := d.detachVolumeFromSG(runAsync, volumeID); err != nil { return goof.WithError("error detaching volume from storage group", err) } log.Println("Detached volume", volumeID) return nil }
func (d *driver) getVolumeMountPath(volumeName string) (string, error) { if volumeName == "" { return "", goof.New("missing volume name") } return path.Join(d.mountDirPath(), volumeName), nil }
func (c *client) NextDevice( ctx types.Context, opts types.Store) (string, error) { if c.isController() { return "", utils.NewUnsupportedForClientTypeError( c.clientType, "NextDevice") } if supported, _ := c.Supported(ctx, opts); !supported { return "", errExecutorNotSupported } ctx = context.RequireTX(ctx.Join(c.ctx)) serviceName, ok := context.ServiceName(ctx) if !ok { return "", goof.New("missing service name") } si, err := c.getServiceInfo(serviceName) if err != nil { return "", err } driverName := si.Driver.Name out, err := c.runExecutor(ctx, driverName, types.LSXCmdNextDevice) if err != nil { return "", err } ctx.Debug("xli nextdevice success") return gotil.Trim(string(out)), nil }
func (d *driver) CopySnapshot( runAsync bool, volumeID, snapshotID, snapshotName, destinationSnapshotName, destinationRegion string) (*core.Snapshot, error) { return nil, goof.New("This driver does not implement CopySnapshot") }
func (d *driver) VolumeAttach( ctx types.Context, volumeID string, opts *types.VolumeAttachOpts) (*types.Volume, string, error) { if d.isController() { return nil, "", utils.NewUnsupportedForClientTypeError( d.clientType, "VolumeAttach") } ctx = d.requireCtx(ctx) serviceName, ok := context.ServiceName(ctx) if !ok { return nil, "", goof.New("missing service name") } nextDevice, err := d.NextDevice(ctx, utils.NewStore()) if err != nil { return nil, "", err } var nextDevicePtr *string if nextDevice != "" { nextDevicePtr = &nextDevice } req := &types.VolumeAttachRequest{ NextDeviceName: nextDevicePtr, Force: opts.Force, Opts: opts.Opts.Map(), } return d.client.VolumeAttach(ctx, serviceName, volumeID, req) }
func (d *driver) getLunMaps(initiatorName, volumeID string) (xtio.Refs, error) { if initiatorName == "" { return nil, goof.New("Missing initiatorName") } initiatorGroup, err := d.client.GetInitiatorGroup("", initiatorName) if err != nil { return nil, err } lunMaps, err := d.client.GetLunMaps() if err != nil { return nil, err } var refs xtio.Refs for _, ref := range lunMaps { idents := strings.Split(ref.Name, "_") if len(idents) < 3 { continue } else if strconv.Itoa(initiatorGroup.Index) == idents[1] && volumeID == idents[0] { refs = append(refs, ref) } } return refs, nil }
func (r *configReg) Key( keyType types.ConfigKeyTypes, short string, defVal interface{}, description string, keys ...interface{}) { lk := len(keys) if lk == 0 { panic(goof.New("keys is empty")) } rk := &configRegKey{ keyType: keyType, short: short, desc: description, defVal: defVal, keyName: toString(keys[0]), } if keyType == types.SecureString { secureKey(rk) } if lk < 2 { kp := strings.Split(rk.keyName, ".") for x, s := range kp { if x == 0 { var buff []byte b := bytes.NewBuffer(buff) for y, r := range s { if y == 0 { b.WriteRune(unicode.ToLower(r)) } else { b.WriteRune(r) } } kp[x] = b.String() } else { kp[x] = strings.Title(s) } } rk.flagName = strings.Join(kp, "") } else { rk.flagName = toString(keys[1]) } if lk < 3 { kp := strings.Split(rk.keyName, ".") for x, s := range kp { kp[x] = strings.ToUpper(s) } rk.envVarName = strings.Join(kp, "_") } else { rk.envVarName = toString(keys[2]) } r.keys = append(r.keys, rk) }
// Create will create a new volume with the volumeName and opts. func (d *driver) Create( ctx types.Context, volumeName string, opts *types.VolumeCreateOpts) (*types.Volume, error) { if volumeName == "" { return nil, goof.New("missing volume name or ID") } optsNew := &types.VolumeCreateOpts{} az := d.availabilityZone() optsNew.AvailabilityZone = &az i, _ := strconv.Atoi(d.size()) size := int64(i) optsNew.Size = &size volumeType := d.volumeType() optsNew.Type = &volumeType io, _ := strconv.Atoi(d.iops()) IOPS := int64(io) optsNew.IOPS = &IOPS if opts.Opts.IsSet("availabilityZone") { az = opts.Opts.GetString("availabilityZone") } if opts.Opts.IsSet("size") { size = opts.Opts.GetInt64("size") } if opts.Opts.IsSet("volumeType") { volumeType = opts.Opts.GetString("volumeType") } if opts.Opts.IsSet("type") { volumeType = opts.Opts.GetString("type") } if opts.Opts.IsSet("iops") { IOPS = opts.Opts.GetInt64("iops") } optsNew.Opts = opts.Opts ctx.WithFields(log.Fields{ "volumeName": volumeName, "availabilityZone": az, "size": size, "volumeType": volumeType, "IOPS": IOPS, "opts": opts}).Info("creating volume") client := context.MustClient(ctx) vol, err := client.Storage().VolumeCreate(ctx, volumeName, optsNew) if err != nil { return nil, err } ctx.WithFields(log.Fields{ "volumeName": volumeName, "vol": vol}).Info("volume created") return vol, nil }
func (d *driver) waitJob(instanceID string) (*govmax.GetJobStatusResp, error) { timeout := make(chan bool, 1) go func() { time.Sleep(jobTimeout * time.Second) timeout <- true }() successCh := make(chan *govmax.GetJobStatusResp, 1) errorCh := make(chan struct { err error jobStatusResp *govmax.GetJobStatusResp }, 1) go func(instanceID string) { log.Println("waiting for job to complete") for { jobStatusResp, jobStatus, err := d.client.GetJobStatus(instanceID) if err != nil { errorCh <- struct { err error jobStatusResp *govmax.GetJobStatusResp }{ goof.WithError( "error getting job status", err), nil, } } switch { case jobStatus == "TERMINATED" || jobStatus == "KILLED" || jobStatus == "EXCEPTION": errorCh <- struct { err error jobStatusResp *govmax.GetJobStatusResp }{ goof.Newf( "problem with job: %s", jobStatus), jobStatusResp, } return case jobStatus == "COMPLETED": successCh <- jobStatusResp return } time.Sleep(100 * time.Millisecond) } }(instanceID) select { case jobStatusResp := <-successCh: return jobStatusResp, nil case jobStatusRespErr := <-errorCh: return jobStatusRespErr.jobStatusResp, jobStatusRespErr.err case <-timeout: return nil, goof.New("timed out waiting for job") } }
// LocalDevices returns a map of the system's local devices. func (d *driver) LocalDevices( ctx types.Context, opts types.Store) (*types.LocalDevices, error) { if ld, ok := context.LocalDevices(ctx); ok { return ld, nil } return nil, goof.New("missing local devices") }
// Remove will remove a remote volume func (d *driver) Remove(volumeName string) error { log.WithFields(log.Fields{ "volumeName": volumeName, "driverName": d.Name()}).Info("removing volume") if volumeName == "" { return goof.New("Missing volume name") } instances, err := d.r.Storage.GetInstances() if err != nil { return err } switch { case len(instances) == 0: return goof.New("No instances") case len(instances) > 1: return goof.New("Too many instances returned, limit the storagedrivers") } volumes, err := d.r.Storage.GetVolume("", volumeName) if err != nil { return err } switch { case len(volumes) == 0: return goof.New("No volumes returned by name") case len(volumes) > 1: return goof.New("Multiple volumes returned by name") } err = d.Unmount("", volumes[0].VolumeID) if err != nil { return err } err = d.r.Storage.RemoveVolume(volumes[0].VolumeID) if err != nil { return err } return nil }
func (d *driver) createGetInstance() error { var err error var instances []*core.Instance if instances, err = d.r.Storage.GetInstances(); err != nil { return err } switch { case len(instances) == 0: return goof.New("No instances") case len(instances) > 1: return goof.New( "Too many instances returned, limit the storagedrivers") } return nil }