//Added if statement for vmh mode func (d *driver) DetachVolume(runAsync bool, volumeID string, blank string, notused bool) error { volumes, err := d.GetVolume(volumeID, "") if err != nil { return err } if len(volumes) == 0 { return goof.New("volume not found") } if d.vmh != nil { if err := d.deleteScsiDevice(volumes[0].NetworkName); err != nil { return goof.WithError("error deleting scsi device from host", err) } if err := d.vmh.DetachRDM(d.vmh.Vm, volumes[0].NetworkName); err != nil { return goof.WithError("error removing RDM from vm", err) } } if err := d.detachVolumeFromSG(runAsync, volumeID); err != nil { return goof.WithError("error detaching volume from storage group", err) } log.Println("Detached volume", volumeID) return nil }
// InstanceID returns the aws instance configuration func (d *driver) InstanceID( ctx types.Context, opts types.Store) (*types.InstanceID, error) { cmd := exec.Command("xenstore-read", "name") cmd.Env = d.config.EnvVars() cmdOut, err := cmd.Output() if err != nil { return nil, goof.WithError("problem getting InstanceID", err) } instanceID := strings.Replace(string(cmdOut), "\n", "", -1) validInstanceID := regexp.MustCompile(`^instance-`) valid := validInstanceID.MatchString(instanceID) if !valid { return nil, goof.WithError("InstanceID not valid", err) } instanceID = strings.Replace(instanceID, "instance-", "", 1) iid := &types.InstanceID{Driver: rackspace.Name} if err := iid.MarshalMetadata(instanceID); err != nil { return nil, err } return iid, nil }
// VolumeDetach detaches a volume. func (d *driver) VolumeDetach( ctx types.Context, volumeID string, opts *types.VolumeDetachOpts) (*types.Volume, error) { // review volume with attachments to any host ec2vols, err := d.getVolume(ctx, volumeID, "") if err != nil { return nil, goof.WithError("error getting volume", err) } volumes, convErr := d.toTypesVolume( ctx, ec2vols, types.VolAttReqTrue) if convErr != nil { return nil, goof.WithError("error converting to types.Volume", convErr) } // no volumes to detach if len(volumes) == 0 { return nil, errNoVolReturned } // volume has no attachments if len(volumes[0].Attachments) == 0 { return nil, errVolAlreadyDetached } dvInput := &awsec2.DetachVolumeInput{ VolumeId: &volumeID, Force: &opts.Force, } // Detach volume using EC2 API call if _, err = mustSession(ctx).DetachVolume(dvInput); err != nil { return nil, goof.WithFieldsE( log.Fields{ "provider": d.Name(), "volumeID": volumeID}, "error detaching volume", err) } if err = d.waitVolumeComplete(ctx, volumeID, waitVolumeDetach); err != nil { return nil, goof.WithError("error waiting for volume detach", err) } ctx.Info("detached volume", volumeID) // check if successful detach detachedVol, err := d.VolumeInspect( ctx, volumeID, &types.VolumeInspectOpts{ Attachments: types.VolAttReqTrue, Opts: opts.Opts, }) if err != nil { return nil, goof.WithError("error getting volume", err) } return detachedVol, nil }
func (d *driver) AttachVolume( notused bool, volumeID, instanceID string, force bool) ([]*core.VolumeAttachment, error) { // sanity check the input if volumeID == "" { return nil, errors.ErrMissingVolumeID } if instanceID == "" { return nil, goof.New("Missing Instance ID") } // ensure the volume exists and is exported volumes, err := d.GetVolume(volumeID, "") if err != nil { return nil, err } if len(volumes) == 0 { return nil, errors.ErrNoVolumesReturned } if err := d.client.ExportVolume(volumeID); err != nil { return nil, goof.WithError("problem exporting volume", err) } // see if anyone is attached already clients, err := d.client.GetExportClients(volumeID) if err != nil { return nil, goof.WithError("problem getting export client", err) } // clear out any existing clients if necessary. if force is false and // we have existing clients, we need to exit. if len(clients) > 0 { if force == false { return nil, goof.New("Volume already attached to another host") } // remove all clients err = d.client.ClearExportClients(volumeID) if err != nil { return nil, err } } err = d.client.SetExportClients(volumeID, parseInstanceId(instanceID)) if err != nil { return nil, err } volumeAttachment, err := d.GetVolumeAttach(volumeID, instanceID) if err != nil { return nil, err } return volumeAttachment, nil }
func (c *client) dial(ctx types.Context) error { ctx.WithField("path", lsxMutex).Info("lsx lock file path") svcInfos, err := c.Services(ctx) if err != nil { return err } // controller clients do not have any additional dialer logic if c.isController() { return nil } store := utils.NewStore() c.ctx = c.ctx.WithValue(context.ServerKey, c.ServerName()) if !c.config.GetBool(types.ConfigExecutorNoDownload) { ctx.Info("initializing executors cache") if _, err := c.Executors(ctx); err != nil { return err } if err := c.updateExecutor(ctx); err != nil { return err } } for service, _ := range svcInfos { ctx := c.ctx.WithValue(context.ServiceKey, service) ctx.Info("initializing supported cache") supported, err := c.Supported(ctx, store) if err != nil { return goof.WithError("error initializing supported cache", err) } if !supported { ctx.Warn("executor not supported") continue } ctx.Info("initializing instance ID cache") if _, err := c.InstanceID(ctx, store); err != nil { if err == types.ErrNotImplemented { ctx.WithError(err).Warn("cannot get instance ID") continue } return goof.WithError("error initializing instance ID cache", err) } } return nil }
// NextDevice returns the next available device. func (d *driver) NextDevice( ctx types.Context, opts types.Store) (string, error) { // All possible device paths on Linux EC2 instances are /dev/xvd[f-p] letters := []string{ "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p"} // Find which letters are used for local devices localDeviceNames := make(map[string]bool) localDevices, err := d.LocalDevices( ctx, &types.LocalDevicesOpts{Opts: opts}) if err != nil { return "", goof.WithError("error getting local devices", err) } localDeviceMapping := localDevices.DeviceMap for localDevice := range localDeviceMapping { re, _ := regexp.Compile(`^/dev/` + ebsUtils.NextDeviceInfo.Prefix + `(` + ebsUtils.NextDeviceInfo.Pattern + `)`) res := re.FindStringSubmatch(localDevice) if len(res) > 0 { localDeviceNames[res[1]] = true } } // Find which letters are used for ephemeral devices ephemeralDevices, err := d.getEphemeralDevices(ctx) if err != nil { return "", goof.WithError("error getting ephemeral devices", err) } for _, ephemeralDevice := range ephemeralDevices { re, _ := regexp.Compile(`^` + ebsUtils.NextDeviceInfo.Prefix + `(` + ebsUtils.NextDeviceInfo.Pattern + `)`) res := re.FindStringSubmatch(ephemeralDevice) if len(res) > 0 { localDeviceNames[res[1]] = true } } // Find next available letter for device path for _, letter := range letters { if localDeviceNames[letter] { continue } return fmt.Sprintf( "/dev/%s%s", ebsUtils.NextDeviceInfo.Prefix, letter), nil } return "", errNoAvaiDevice }
func (d *driver) VolumeAttach( ctx types.Context, volumeID string, opts *types.VolumeAttachOpts) (*types.Volume, string, error) { iid := context.MustInstanceID(ctx) mapVolumeSdcParam := &siotypes.MapVolumeSdcParam{ SdcID: iid.ID, AllowMultipleMappings: "false", AllSdcs: "", } vol, err := d.VolumeInspect( ctx, volumeID, &types.VolumeInspectOpts{ Attachments: types.VolAttReqTrue, }) if err != nil { return nil, "", goof.WithError("error getting volume", err) } if len(vol.Attachments) > 0 && !opts.Force { return nil, "", goof.New("volume already attached to a host") } if len(vol.Attachments) > 0 && opts.Force { if _, err := d.VolumeDetach(ctx, volumeID, &types.VolumeDetachOpts{Force: opts.Force}); err != nil { return nil, "", err } } targetVolume := sio.NewVolume(d.client) targetVolume.Volume = &siotypes.Volume{ID: vol.ID} err = targetVolume.MapVolumeSdc(mapVolumeSdcParam) if err != nil { return nil, "", goof.WithError("error mapping volume sdc", err) } attachedVol, err := d.VolumeInspect( ctx, volumeID, &types.VolumeInspectOpts{ Attachments: types.VolAttReqTrue, Opts: opts.Opts, }) if err != nil { return nil, "", goof.WithError("error getting volume", err) } return attachedVol, attachedVol.ID, nil }
// Used in VolumeAttach func (d *driver) attachVolume( ctx types.Context, volumeID, volumeName, deviceName string) error { // sanity check # of volumes to attach vol, err := d.getVolume(ctx, volumeID, volumeName) if err != nil { return goof.WithError("error getting volume", err) } if len(vol) == 0 { return errNoVolReturned } if len(vol) > 1 { return errTooManyVolsReturned } // Attach volume via EC2 API call avInput := &awsec2.AttachVolumeInput{ Device: &deviceName, InstanceId: mustInstanceIDID(ctx), VolumeId: &volumeID, } if _, err := mustSession(ctx).AttachVolume(avInput); err != nil { return err } return nil }
// Retrieve device paths currently attached and/or mounted func (d *driver) LocalDevices( ctx types.Context, opts *types.LocalDevicesOpts) (*types.LocalDevices, error) { f, err := os.Open(procPartitions) if err != nil { return nil, goof.WithError("error reading "+procPartitions, err) } defer f.Close() devMap := map[string]string{} scanner := bufio.NewScanner(f) for scanner.Scan() { fields := strings.Fields(scanner.Text()) if len(fields) != 4 { continue } devName := fields[3] if !xvdRX.MatchString(devName) { continue } devPath := path.Join("/dev/", devName) devMap[devPath] = devPath } ld := &types.LocalDevices{Driver: d.Name()} if len(devMap) > 0 { ld.DeviceMap = devMap } return ld, nil }
// Find ephemeral devices from metadata func (d *driver) getEphemeralDevices( ctx types.Context) (deviceNames []string, err error) { buf, err := ebsUtils.BlockDevices(ctx) if err != nil { return nil, err } // Filter list of all block devices for ephemeral devices scanner := bufio.NewScanner(bytes.NewReader(buf)) scanner.Split(bufio.ScanWords) for scanner.Scan() { word := scanner.Bytes() if !ephemDevRX.Match(word) { continue } name, err := ebsUtils.BlockDeviceName(ctx, string(word)) if err != nil { return nil, goof.WithError( "ec2 block device mapping lookup failed", err) } // compensate for kernel volume mapping i.e. change "/dev/sda" to // "/dev/xvda" deviceNameStr := strings.Replace( string(name), "sd", ebsUtils.NextDeviceInfo.Prefix, 1) deviceNames = append(deviceNames, deviceNameStr) } return deviceNames, nil }
func (d *driver) RemoveVolume(volumeID string) error { log.WithField("provider", providerName).Debugf("RemoveVolume :%s", volumeID) if _, err := d.client.Disks.Delete(d.project, d.zone, volumeID).Do(); err != nil { return goof.WithError("problem removing volume", err) } return nil }
func (d *driver) LocalDevices( ctx types.Context, opts *types.LocalDevicesOpts) (*types.LocalDevices, error) { // Read from /proc/partitions localDevices := make(map[string]string) file := "/proc/partitions" contentBytes, err := ioutil.ReadFile(file) if err != nil { return nil, goof.WithError( "Error reading /proc/partitions", err) } content := string(contentBytes) // Parse device names var deviceName string lines := strings.Split(content, "\n") for _, line := range lines[2:] { fields := strings.Fields(line) if len(fields) == 4 { deviceName = "/dev/" + fields[3] localDevices[deviceName] = deviceName } } return &types.LocalDevices{ Driver: rackspace.Name, DeviceMap: localDevices, }, nil }
func (d *driver) AttachVolume( notused bool, volumeID, instanceID string, force bool) ([]*core.VolumeAttachment, error) { if volumeID == "" { return nil, errors.ErrMissingVolumeID } volumes, err := d.GetVolume(volumeID, "") if err != nil { return nil, err } if len(volumes) == 0 { return nil, errors.ErrNoVolumesReturned } if err := d.client.ExportVolume(volumeID); err != nil { return nil, goof.WithError("problem exporting volume", err) } volumeAttachment, err := d.GetVolumeAttach(volumeID, instanceID) if err != nil { return nil, err } return volumeAttachment, nil }
func (d *driver) RemoveSnapshot(snapshotID string) error { log.WithField("provider", providerName).Debug("RemoveSnapshot :%s", snapshotID) if _, err := d.client.Snapshots.Delete(d.project, snapshotID).Do(); err != nil { return goof.WithError("problem removing snapshot", err) } return nil }
func (d *driver) GetVolume(volumeID, volumeName string) ([]*core.Volume, error) { localDeviceMap, err := d.getLocalWWNDeviceByID() if err != nil { return nil, goof.WithError("error getting local devices", err) } var volumesResp *govmax.GetVolumesResp if volumeID != "" { volumesResp, err = d.client.GetVolumeByID(d.sid(), volumeID) } else if volumeName != "" { volumesResp, err = d.client.GetVolumeByName(d.sid(), d.prefixVolumeName(volumeName)) } else { volumesResp, err = d.client.GetVolumes(d.sid()) } if err != nil { return nil, goof.WithError("problem getting volumes", err) } var volumesSD []*core.Volume for _, entry := range volumesResp.Entries { if d.isValidVolume(entry.Content.I_ElementName) { deviceName, _ := localDeviceMap[entry.Content.I_EMCWWN] volumeSD := &core.Volume{ Name: d.unprefixVolumeName(entry.Content.I_ElementName), VolumeID: entry.Content.I_DeviceID, NetworkName: entry.Content.I_EMCWWN, Status: strings.Join(entry.Content.I_StatusDescriptions, ","), VolumeType: entry.Content.I_Caption, AvailabilityZone: d.arrayID, Size: strconv.Itoa((entry.Content.I_BlockSize * entry.Content.I_NumberOfBlocks) / 1024 / 1024 / 1024), } if deviceName != "" { volumeSD.Attachments = append(volumeSD.Attachments, &core.VolumeAttachment{ VolumeID: entry.Content.I_DeviceID, InstanceID: d.instanceID, DeviceName: deviceName, Status: strings.Join(entry.Content.I_StatusDescriptions, ","), }) } volumesSD = append(volumesSD, volumeSD) } } return volumesSD, nil }
func (d *driver) waitJob(instanceID string) (*govmax.GetJobStatusResp, error) { timeout := make(chan bool, 1) go func() { time.Sleep(jobTimeout * time.Second) timeout <- true }() successCh := make(chan *govmax.GetJobStatusResp, 1) errorCh := make(chan struct { err error jobStatusResp *govmax.GetJobStatusResp }, 1) go func(instanceID string) { log.Println("waiting for job to complete") for { jobStatusResp, jobStatus, err := d.client.GetJobStatus(instanceID) if err != nil { errorCh <- struct { err error jobStatusResp *govmax.GetJobStatusResp }{ goof.WithError( "error getting job status", err), nil, } } switch { case jobStatus == "TERMINATED" || jobStatus == "KILLED" || jobStatus == "EXCEPTION": errorCh <- struct { err error jobStatusResp *govmax.GetJobStatusResp }{ goof.Newf( "problem with job: %s", jobStatus), jobStatusResp, } return case jobStatus == "COMPLETED": successCh <- jobStatusResp return } time.Sleep(100 * time.Millisecond) } }(instanceID) select { case jobStatusResp := <-successCh: return jobStatusResp, nil case jobStatusRespErr := <-errorCh: return jobStatusRespErr.jobStatusResp, jobStatusRespErr.err case <-timeout: return nil, goof.New("timed out waiting for job") } }
func (d *driver) CreateVolume( runAsync bool, volumeName, volumeID, snapshotID, NUvolumeType string, NUIOPS, size int64, NUavailabilityZone string) (*core.Volume, error) { exists, err := d.volumeExists(volumeName) if err != nil && !exists { return nil, err } else if exists { return nil, err } PostVolRequest := &govmax.PostVolumesReq{ PostVolumesRequestContent: &govmax.PostVolumesReqContent{ AtType: "http://schemas.emc.com/ecom/edaa/root/emc/Symm_StorageConfigurationService", ElementName: d.prefixVolumeName(volumeName), ElementType: "2", EMCNumberOfDevices: "1", Size: strconv.Itoa(int(size * 1024 * 1024 * 1024)), }, } queuedJob, _, err := d.client.PostVolumes(PostVolRequest, d.arrayID) if err != nil { return nil, goof.WithError("error creating volume", err) } if len(queuedJob.Entries) == 0 { return nil, goof.New("no jobs returned") } if !runAsync { jobStatusResp, err := d.waitJob(queuedJob.Entries[0].Content.I_Parameters.I_Job.E0_InstanceID) if err != nil { return nil, err } if len(jobStatusResp.Entries) == 0 { return nil, goof.New("no volume returned") } fields := strings.Split(jobStatusResp.Entries[0].Content.I_Description, "Output: DeviceIDs=") if len(fields) < 2 { return nil, goof.New("new volumeID not found") } volume, err := d.GetVolume(fields[1], "") if err != nil { return nil, err } if len(volume) == 0 { return nil, goof.New("no new volume returned") } return volume[0], nil } return nil, nil }
// Volumes returns all volumes or a filtered list of volumes. func (d *driver) Volumes( ctx types.Context, opts *types.VolumesOpts) ([]*types.Volume, error) { // Get all volumes via EC2 API ec2vols, err := d.getVolume(ctx, "", "") if err != nil { return nil, goof.WithError("error getting volume", err) } if len(ec2vols) == 0 { return nil, errNoVolReturned } // Convert retrieved volumes to libStorage types.Volume vols, convErr := d.toTypesVolume(ctx, ec2vols, opts.Attachments) if convErr != nil { return nil, goof.WithError("error converting to types.Volume", convErr) } return vols, nil }
func (d *driver) nfsMount(device, target string) error { command := exec.Command("mount", device, target) output, err := command.CombinedOutput() if err != nil { return goof.WithError(fmt.Sprintf("failed mounting: %s", output), err) } return nil }
func getSdcLocalGUID() (sdcGUID string, err error) { out, err := exec.Command(sioBinPath, "--query_guid").Output() if err != nil { return "", goof.WithError("problem getting sdc guid", err) } sdcGUID = strings.Replace(string(out), "\n", "", -1) return sdcGUID, nil }
// Retrieve current instance using EC2 API call func (d *driver) getInstance(ctx types.Context) (awsec2.Instance, error) { diInput := &awsec2.DescribeInstancesInput{ InstanceIds: []*string{mustInstanceIDID(ctx)}, } resp, err := mustSession(ctx).DescribeInstances(diInput) if err != nil { return awsec2.Instance{}, goof.WithError( "error retrieving instance with EC2 API call", err) } return *resp.Reservations[0].Instances[0], nil }
func (d *driver) Init(r *core.RexRay) error { d.r = r var err error if d.zone, err = getCurrentZone(); err != nil { return goof.WithError("error getting current zone", err) } if d.project, err = getCurrentProjectID(); err != nil { return goof.WithError("error getting current project ID", err) } serviceAccountJSON, err := ioutil.ReadFile(d.r.Config.GetString("gce.keyfile")) if err != nil { log.WithField("provider", providerName).Fatalf("Could not read service account credentials file, %s => {%s}", d.r.Config.GetString("gce.keyfile"), err) return err } config, err := google.JWTConfigFromJSON(serviceAccountJSON, compute.ComputeScope, ) if err != nil { goof.WithFieldE("provider", providerName, "could not create JWT Config From JSON", err) return err } client, err := compute.New(config.Client(context.Background())) if err != nil { log.WithField("provider", providerName).Fatalf("Could not create compute client => {%s}", err) return err } d.client = client instanceID, err := getCurrentInstanceID() if err != nil { log.WithField("provider", providerName).Fatalf("Could not get current instance => {%s}", err) return err } d.currentInstanceID = instanceID log.WithField("provider", providerName).Info("storage driver initialized") return nil }
func (d *driver) getInstanceID() (string, error) { cmd := exec.Command("xenstore-read", "name") cmd.Env = d.config.EnvVars() cmdOut, err := cmd.Output() if err != nil { return "", goof.WithError("problem getting instance ID", err) } instanceID := strings.Replace(string(cmdOut), "\n", "", -1) validInstanceID := regexp.MustCompile(`^instance-`) valid := validInstanceID.MatchString(instanceID) if !valid { return "", goof.WithError("InstanceID not valid", err) } instanceID = strings.Replace(instanceID, "instance-", "", 1) return instanceID, nil }
// VolumeInspect inspects a single volume. func (d *driver) VolumeInspect( ctx types.Context, volumeID string, opts *types.VolumeInspectOpts) (*types.Volume, error) { // Get volume corresponding to volume ID via EC2 API ec2vols, err := d.getVolume(ctx, volumeID, "") if err != nil { return nil, goof.WithError("error getting volume", err) } if len(ec2vols) == 0 { return nil, errNoVolReturned } vols, convErr := d.toTypesVolume(ctx, ec2vols, opts.Attachments) if convErr != nil { return nil, goof.WithError("error converting to types.Volume", convErr) } // Because getVolume returns an array // and we only expect the 1st element to be a match, return 1st element return vols[0], nil }
// // Snapshots returns all volumes or a filtered list of snapshots. func (d *driver) Snapshots( ctx types.Context, opts types.Store) ([]*types.Snapshot, error) { allPages, err := snapshots.List(d.clientBlockStorage, nil).AllPages() if err != nil { return []*types.Snapshot{}, goof.WithError("error listing volume snapshots", err) } allSnapshots, err := snapshots.ExtractSnapshots(allPages) if err != nil { return []*types.Snapshot{}, goof.WithError("error listing volume snapshots", err) } var libstorageSnapshots []*types.Snapshot for _, snapshot := range allSnapshots { libstorageSnapshots = append(libstorageSnapshots, translateSnapshot(&snapshot)) } return libstorageSnapshots, nil }
func getIQN() (string, error) { data, err := ioutil.ReadFile("/etc/iscsi/initiatorname.iscsi") if err != nil { return "", goof.WithError("problem reading /etc/iscsi/initiatorname.iscsi", err) } result := string(data) lines := strings.Split(result, "\n") for _, line := range lines { split := strings.Split(line, "=") if split[0] == "InitiatorName" { return split[1], nil } } return "", goof.New("IQN not found") }
func (d *driver) VolumeDetach( ctx types.Context, volumeID string, opts *types.VolumeDetachOpts) (*types.Volume, error) { iid := context.MustInstanceID(ctx) volumes, err := d.getVolume(volumeID, "", 0) if err != nil { return nil, goof.WithError("error getting volume", err) } if len(volumes) == 0 { return nil, goof.New("no volumes returned") } targetVolume := sio.NewVolume(d.client) targetVolume.Volume = volumes[0] unmapVolumeSdcParam := &siotypes.UnmapVolumeSdcParam{ SdcID: "", IgnoreScsiInitiators: "true", AllSdcs: "", } if opts.Force { unmapVolumeSdcParam.AllSdcs = "true" } else { unmapVolumeSdcParam.SdcID = iid.ID } if err := targetVolume.UnmapVolumeSdc(unmapVolumeSdcParam); err != nil { return nil, err } vol, err := d.VolumeInspect(ctx, volumeID, &types.VolumeInspectOpts{ Attachments: types.VolAttReqTrue, }) if err != nil { return nil, err } return vol, nil }
// Wait for volume action to complete (creation, attachment, detachment) func (d *driver) waitVolumeComplete( ctx types.Context, volumeID, action string) error { // no volume id inputted if volumeID == "" { return errMissingVolID } var ( loop = true attached = awsec2.VolumeAttachmentStateAttached ) for loop { // update volume volumes, err := d.getVolume(ctx, volumeID, "") if err != nil { return goof.WithError("error getting volume", err) } // check retrieved volume switch action { case waitVolumeCreate: if *volumes[0].State == awsec2.VolumeStateAvailable { loop = false } case waitVolumeDetach: if len(volumes[0].Attachments) == 0 { loop = false } case waitVolumeAttach: if len(volumes[0].Attachments) == 1 && *volumes[0].Attachments[0].State == attached { loop = false } } if loop { time.Sleep(1 * time.Second) } } return nil }
func getLocalVolumeMap() (map[string]string, error) { mappedVolumesMap := make(map[string]*sdcMappedVolume) volumeMap := make(map[string]string) out, err := exec.Command(sioBinPath, "--query_vols").Output() if err != nil { return nil, goof.WithError("error querying volumes", err) } result := string(out) lines := strings.Split(result, "\n") for _, line := range lines { split := strings.Split(line, " ") if split[0] == "VOL-ID" { mappedVolume := &sdcMappedVolume{ mdmID: split[3], volumeID: split[1], } mappedVolume.mdmVolumeID = fmt.Sprintf( "%s-%s", mappedVolume.mdmID, mappedVolume.volumeID) mappedVolumesMap[mappedVolume.mdmVolumeID] = mappedVolume } } diskIDPath := "/dev/disk/by-id" files, _ := ioutil.ReadDir(diskIDPath) r, _ := regexp.Compile(`^emc-vol-\w*-\w*$`) for _, f := range files { matched := r.MatchString(f.Name()) if matched { mdmVolumeID := strings.Replace(f.Name(), "emc-vol-", "", 1) devPath, _ := filepath.EvalSymlinks( fmt.Sprintf("%s/%s", diskIDPath, f.Name())) if _, ok := mappedVolumesMap[mdmVolumeID]; ok { volumeID := mappedVolumesMap[mdmVolumeID].volumeID volumeMap[volumeID] = devPath } } } return volumeMap, nil }
// Fill in tags for volume or snapshot func (d *driver) createTags(ctx types.Context, id, name string) (err error) { var ( ctInput *awsec2.CreateTagsInput inputName string ) initCTInput := func() { if ctInput != nil { return } ctInput = &awsec2.CreateTagsInput{ Resources: []*string{&id}, Tags: []*awsec2.Tag{}, } // Append config tag to name inputName = d.getFullName(d.getPrintableName(name)) } initCTInput() ctInput.Tags = append( ctInput.Tags, &awsec2.Tag{ Key: aws.String("Name"), Value: &inputName, }) // TODO rexrayTag /* if d.ec2Tag != "" { initCTInput() ctInput.Tags = append( ctInput.Tags, &awsec2.Tag{ Key: aws.String(d.rexrayTag()), Value: &d.ec2Tag, }) } */ _, err = mustSession(ctx).CreateTags(ctInput) if err != nil { return goof.WithError("error creating tags", err) } return nil }