func (d *driver) RemoveVolume(volumeID string) error { fields := eff(map[string]interface{}{ "volumeId": volumeID, }) if volumeID == "" { return errors.WithFields(fields, "volumeId is required") } var err error var volumes []*types.Volume if volumes, err = d.getVolume(volumeID, ""); err != nil { return errors.WithFieldsE(fields, "error getting volume", err) } targetVolume := goscaleio.NewVolume(d.client) targetVolume.Volume = volumes[0] if err = targetVolume.RemoveVolume("ONLY_ME"); err != nil { return errors.WithFieldsE(fields, "error removing volume", err) } log.WithFields(fields).Debug("removed volume") return nil }
func (d *driver) createVolumeHandleSnapshotID( size *int64, snapshotID string, fields map[string]interface{}) error { if snapshotID == "" { return nil } snapshots, err := d.GetSnapshot("", snapshotID, "") if err != nil { return errors.WithFieldsE(fields, "error getting snapshot", err) } if len(snapshots) == 0 { return errors.WithFields(fields, "snapshot array is empty") } volSize := snapshots[0].VolumeSize sizeInt, err := strconv.Atoi(volSize) if err != nil { f := errors.Fields{ "volumeSize": volSize, } for k, v := range fields { f[k] = v } return errors.WithFieldsE(f, "error casting volume size", err) } *size = int64(sizeInt) return nil }
func (d *driver) AttachVolume( runAsync bool, volumeID, instanceID string) ([]*core.VolumeAttachment, error) { fields := eff(map[string]interface{}{ "runAsync": runAsync, "volumeId": volumeID, "instanceId": instanceID, }) if volumeID == "" { return nil, errors.WithFields(fields, "volumeId is required") } mapVolumeSdcParam := &types.MapVolumeSdcParam{ SdcID: d.sdc.Sdc.ID, AllowMultipleMappings: "false", AllSdcs: "", } volumes, err := d.getVolume(volumeID, "") if err != nil { return nil, errors.WithFieldsE(fields, "error getting volume", err) } if len(volumes) == 0 { return nil, errors.WithFields(fields, "no volumes returned") } targetVolume := goscaleio.NewVolume(d.client) targetVolume.Volume = volumes[0] err = targetVolume.MapVolumeSdc(mapVolumeSdcParam) if err != nil { return nil, errors.WithFieldsE(fields, "error mapping volume sdc", err) } _, err = waitMount(volumes[0].ID) if err != nil { fields["volumeId"] = volumes[0].ID return nil, errors.WithFieldsE( fields, "error waiting on volume to mount", err) } volumeAttachment, err := d.GetVolumeAttach(volumeID, instanceID) if err != nil { return nil, errors.WithFieldsE( fields, "error getting volume attachments", err) } log.WithFields(log.Fields{ "provider": providerName, "volumeId": volumeID, "instanceId": instanceID, }).Debug("attached volume to instance") return volumeAttachment, nil }
func (d *driver) getVolume( volumeID, volumeName string) (volumesRet []volumes.Volume, err error) { if volumeID != "" { volume, err := volumes.Get(d.clientBlockStorage, volumeID).Extract() if err != nil { return []volumes.Volume{}, errors.WithFieldsE(eff(errors.Fields{ "volumeId": volumeID, "volumeName": volumeName}), "error getting volumes", err) } volumesRet = append(volumesRet, *volume) } else { listOpts := &volumes.ListOpts{ //Name: volumeName, } allPages, err := volumes.List(d.clientBlockStorage, listOpts).AllPages() if err != nil { return []volumes.Volume{}, errors.WithFieldsE(eff(errors.Fields{ "volumeId": volumeID, "volumeName": volumeName}), "error listing volumes", err) } volumesRet, err = volumes.ExtractVolumes(allPages) if err != nil { return []volumes.Volume{}, errors.WithFieldsE(eff(errors.Fields{ "volumeId": volumeID, "volumeName": volumeName}), "error extracting volumes", err) } var volumesRetFiltered []volumes.Volume if volumeName != "" { var found bool for _, volume := range volumesRet { if volume.Name == volumeName { volumesRetFiltered = append(volumesRetFiltered, volume) found = true break } } if !found { return []volumes.Volume{}, nil } volumesRet = volumesRetFiltered } } return volumesRet, nil }
func (d *driver) Init(r *core.RexRay) error { d.r = r fields := ef() var err error if d.instanceID, err = getInstanceID(d.r.Config); err != nil { return err } fields["instanceId"] = d.instanceID if d.region, err = getInstanceRegion(d.r.Config); err != nil { return err } fields["region"] = d.region d.region = strings.ToUpper(d.region) authOpts := getAuthOptions(d.r.Config) fields["identityEndpoint"] = d.r.Config.RackspaceAuthURL fields["userId"] = d.r.Config.RackspaceUserID fields["userName"] = d.r.Config.RackspaceUserName if d.r.Config.RackspacePassword == "" { fields["password"] = "" } else { fields["password"] = "******" } fields["tenantId"] = d.r.Config.RackspaceTenantID fields["tenantName"] = d.r.Config.RackspaceTenantName fields["domainId"] = d.r.Config.RackspaceDomainID fields["domainName"] = d.r.Config.RackspaceDomainName if d.provider, err = openstack.AuthenticatedClient(authOpts); err != nil { return errors.WithFieldsE(fields, "error getting authenticated client", err) } if d.client, err = openstack.NewComputeV2(d.provider, gophercloud.EndpointOpts{Region: d.region}); err != nil { errors.WithFieldsE(fields, "error getting newComputeV2", err) } if d.clientBlockStorage, err = openstack.NewBlockStorageV1(d.provider, gophercloud.EndpointOpts{Region: d.region}); err != nil { return errors.WithFieldsE(fields, "error getting newBlockStorageV1", err) } log.WithField("provider", providerName).Info("storage driver initialized") return nil }
func (d *driver) createVolumeHandleVolumeID( availabilityZone, snapshotID, volumeID *string, size *int64, fields map[string]interface{}) ([]*core.Volume, error) { if *volumeID == "" { return nil, nil } var err error var volume []*core.Volume if volume, err = d.GetVolume(*volumeID, ""); err != nil { return nil, errors.WithFieldsE(fields, "error getting volumes", err) } if len(volume) == 0 { return nil, errors.WithFieldsE(fields, "", errors.ErrNoVolumesReturned) } volSize := volume[0].Size sizeInt, err := strconv.Atoi(volSize) if err != nil { f := errors.Fields{ "volumeSize": volSize, } for k, v := range fields { f[k] = v } return nil, errors.WithFieldsE(f, "error casting volume size", err) } *size = int64(sizeInt) *volumeID = volume[0].VolumeID snapshot, err := d.CreateSnapshot( false, fmt.Sprintf("temp-%s", *volumeID), *volumeID, "") if err != nil { return nil, errors.WithFields(fields, "error creating snapshot") } *snapshotID = snapshot[0].SnapshotID if *availabilityZone == "" { *availabilityZone = volume[0].AvailabilityZone } return volume, nil }
func (d *driver) CreateSnapshot( runAsync bool, snapshotName, volumeID, description string) ([]*core.Snapshot, error) { fields := eff(map[string]interface{}{ "runAsync": runAsync, "snapshotName": snapshotName, "volumeId": volumeID, "description": description, }) opts := snapshots.CreateOpts{ Name: snapshotName, VolumeID: volumeID, Description: description, Force: true, } resp, err := snapshots.Create(d.clientBlockStorage, opts).Extract() if err != nil { return nil, errors.WithFieldsE(fields, "error creating snapshot", err) } if !runAsync { log.Debug("waiting for snapshot creation to complete") err = snapshots.WaitForStatus(d.clientBlockStorage, resp.ID, "available", 120) if err != nil { return nil, errors.WithFieldsE(fields, "error waiting for snapshot creation to complete", err) } } snapshot, err := d.GetSnapshot("", resp.ID, "") if err != nil { return nil, err } log.WithFields(log.Fields{ "runAsync": runAsync, "snapshotName": snapshotName, "volumeId": volumeID, "description": description}).Debug("created snapshot") return snapshot, nil }
func (d *driver) GetVolumeMapping() ([]*core.BlockDevice, error) { blockDevices, err := d.getBlockDevices() if err != nil { return nil, errors.WithFieldsE(ef(), "error getting block devices", err) } var BlockDevices []*core.BlockDevice for _, blockDevice := range blockDevices { sdBlockDevice := &core.BlockDevice{ ProviderName: providerName, InstanceID: d.sdc.Sdc.ID, Region: blockDevice.MdmID, DeviceName: blockDevice.SdcDevice, VolumeID: blockDevice.VolumeID, Status: "", } BlockDevices = append(BlockDevices, sdBlockDevice) } log.WithFields(log.Fields{ "provider": providerName, "blockDevices": BlockDevices, }).Debug("got block device mappings") return BlockDevices, nil }
func (d *driver) GetSnapshot( volumeID, snapshotID, snapshotName string) ([]*core.Snapshot, error) { snapshots, err := d.getSnapshot(volumeID, snapshotID, snapshotName) if err != nil { return nil, errors.WithFieldsE(eff(errors.Fields{ "volumeId": volumeID, "snapshotId": snapshotID, "snapshotName": snapshotName}), "error getting snapshot", err) } var snapshotsInt []*core.Snapshot for _, snapshot := range snapshots { snapshotSD := &core.Snapshot{ Name: snapshot.Name, VolumeID: snapshot.VolumeID, SnapshotID: snapshot.ID, VolumeSize: strconv.Itoa(snapshot.Size), StartTime: snapshot.CreatedAt, Description: snapshot.Description, Status: snapshot.Status, } snapshotsInt = append(snapshotsInt, snapshotSD) } return snapshotsInt, nil }
func (d *driver) GetVolumeAttach( volumeID, instanceID string) ([]*core.VolumeAttachment, error) { fields := eff(map[string]interface{}{ "volumeId": volumeID, "instanceId": instanceID, }) if volumeID == "" { return []*core.VolumeAttachment{}, errors.WithFields(fields, "volumeId is required") } volume, err := d.GetVolume(volumeID, "") if err != nil { return []*core.VolumeAttachment{}, errors.WithFieldsE(fields, "error getting volume attach", err) } if instanceID != "" { var attached bool for _, volumeAttachment := range volume[0].Attachments { if volumeAttachment.InstanceID == instanceID { return volume[0].Attachments, nil } } if !attached { return []*core.VolumeAttachment{}, nil } } return volume[0].Attachments, nil }
func (d *driver) GetVolumeMapping() ([]*core.BlockDevice, error) { blockDevices, err := d.getBlockDevices(d.instanceID) if err != nil { return nil, errors.WithFieldsE(eff(errors.Fields{ "instanceId": d.instanceID, }), "error getting block devices", err) } var BlockDevices []*core.BlockDevice for _, blockDevice := range blockDevices { sdBlockDevice := &core.BlockDevice{ ProviderName: providerName, InstanceID: d.instanceID, VolumeID: blockDevice.VolumeID, DeviceName: blockDevice.Device, Region: d.region, Status: "", } BlockDevices = append(BlockDevices, sdBlockDevice) } return BlockDevices, nil }
func (d *driver) getBlockDevices() ([]*goscaleio.SdcMappedVolume, error) { volumeMaps, err := goscaleio.GetLocalVolumeMap() if err != nil { return []*goscaleio.SdcMappedVolume{}, errors.WithFieldsE(ef(), "error getting local volume map", err) } return volumeMaps, nil }
func (d *driver) getInstance() (*servers.Server, error) { server, err := servers.Get(d.client, d.instanceID).Extract() if err != nil { return nil, errors.WithFieldsE(ef(), "error getting server instance", err) } return server, nil }
func (d *driver) AttachVolume( runAsync bool, volumeID, instanceID string) ([]*core.VolumeAttachment, error) { fields := eff(map[string]interface{}{ "runAsync": runAsync, "volumeId": volumeID, "instanceId": instanceID, }) nextDeviceName, err := d.GetDeviceNextAvailable() if err != nil { return nil, errors.WithFieldsE( fields, "error getting next available device", err) } options := &volumeattach.CreateOpts{ Device: nextDeviceName, VolumeID: volumeID, } _, err = volumeattach.Create(d.client, instanceID, options).Extract() if err != nil { return nil, errors.WithFieldsE( fields, "error attaching volume", err) } if !runAsync { log.WithFields(fields).Debug("waiting for volume to attach") err = d.waitVolumeAttach(volumeID) if err != nil { return nil, errors.WithFieldsE( fields, "error waiting for volume to detach", err) } } volumeAttachment, err := d.GetVolumeAttach(volumeID, instanceID) if err != nil { return nil, err } log.WithFields(fields).Debug("volume attached") return volumeAttachment, nil }
func (d *driver) DetachVolume( runAsync bool, volumeID string, blank string) error { fields := eff(map[string]interface{}{ "runAsync": runAsync, "volumeId": volumeID, "blank": blank, }) if volumeID == "" { return errors.WithFields(fields, "volumeId is required") } volumes, err := d.getVolume(volumeID, "") if err != nil { return errors.WithFieldsE(fields, "error getting volume", err) } if len(volumes) == 0 { return errors.WithFields(fields, "no volumes returned") } targetVolume := goscaleio.NewVolume(d.client) targetVolume.Volume = volumes[0] unmapVolumeSdcParam := &types.UnmapVolumeSdcParam{ SdcID: d.sdc.Sdc.ID, IgnoreScsiInitiators: "true", AllSdcs: "", } // need to detect if unmounted first err = targetVolume.UnmapVolumeSdc(unmapVolumeSdcParam) if err != nil { return errors.WithFieldsE(fields, "error unmapping volume sdc", err) } log.WithFields(log.Fields{ "provider": providerName, "volumeId": volumeID}).Debug("detached volume") return nil }
func (d *driver) getSnapshot( volumeID, snapshotID, snapshotName string) (allSnapshots []snapshots.Snapshot, err error) { fields := eff(map[string]interface{}{ "volumeId": volumeID, "snapshotId": snapshotID, "snapshotName": snapshotName, }) if snapshotID != "" { snapshot, err := snapshots.Get(d.clientBlockStorage, snapshotID).Extract() if err != nil { return []snapshots.Snapshot{}, errors.WithFieldsE(fields, "error getting snapshot", err) } allSnapshots = append(allSnapshots, *snapshot) } else { opts := snapshots.ListOpts{ VolumeID: volumeID, Name: snapshotName, } allPages, err := snapshots.List(d.clientBlockStorage, opts).AllPages() if err != nil { return []snapshots.Snapshot{}, errors.WithFieldsE(fields, "error listing snapshot", err) } allSnapshots, err = snapshots.ExtractSnapshots(allPages) if err != nil { return []snapshots.Snapshot{}, errors.WithFieldsE(fields, "error extracting snapshot", err) } } return allSnapshots, nil }
func (d *driver) Init(r *core.RexRay) error { d.r = r d.volumesByNaa = map[string]xtio.Volume{} fields := eff(map[string]interface{}{ "endpoint": d.endpoint(), "userName": d.userName(), "deviceMapper": d.deviceMapper(), "multipath": d.multipath(), "remoteManagement": d.remoteManagement(), "insecure": d.insecure(), }) if d.password() == "" { fields["password"] = "" } else { fields["password"] = "******" } if !isXtremIOAttached() && !d.remoteManagement() { return errors.WithFields(fields, "device not detected") } var err error if d.client, err = xtio.NewClientWithArgs( d.endpoint(), d.insecure(), d.userName(), d.password()); err != nil { return errors.WithFieldsE(fields, "error creating xtremio client", err) } if !d.remoteManagement() { var iqn string if iqn, err = getIQN(); err != nil { return err } if d.initiator, err = d.client.GetInitiator("", iqn); err != nil { return err } } log.WithField("provider", providerName).Info("storage driver initialized") return nil }
func (d *xtremIODriver) Init(r *core.RexRay) error { d.r = r d.volumesByNaa = map[string]xtio.Volume{} fields := eff(map[string]interface{}{ "endpoint": r.Config.XtremIOEndpoint, "userName": r.Config.XtremIOUserName, "deviceMapper": r.Config.XtremIODeviceMapper, "multipath": r.Config.XtremIOMultipath, "remoteManagement": r.Config.XtremIORemoteManagement, "insecure": r.Config.XtremIOInsecure, }) if r.Config.XtremIoPassword == "" { fields["password"] = "" } else { fields["password"] = "******" } if !isXtremIOAttached() && !d.r.Config.XtremIORemoteManagement { return errors.WithFields(fields, "device not detected") } var err error if d.client, err = xtio.NewClientWithArgs( r.Config.XtremIOEndpoint, r.Config.XtremIOInsecure, r.Config.XtremIOUserName, r.Config.XtremIoPassword); err != nil { return errors.WithFieldsE(fields, "error creating xtremio client", err) } if !d.r.Config.XtremIORemoteManagement { var iqn string if iqn, err = getIQN(); err != nil { return err } if d.initiator, err = d.client.GetInitiator("", iqn); err != nil { return err } } log.WithField("provider", providerName).Debug("storage driver initialized") return nil }
func (d *driver) DetachVolume( runAsync bool, volumeID, instanceID string) error { fields := eff(map[string]interface{}{ "runAsync": runAsync, "volumeId": volumeID, "instanceId": instanceID, }) if volumeID == "" { return errors.WithFields(fields, "volumeId is required") } volume, err := d.GetVolume(volumeID, "") if err != nil { return errors.WithFieldsE(fields, "error getting volume", err) } fields["instanceId"] = volume[0].Attachments[0].InstanceID resp := volumeattach.Delete( d.client, volume[0].Attachments[0].InstanceID, volumeID) if resp.Err != nil { return errors.WithFieldsE(fields, "error deleting volume", err) } if !runAsync { log.WithFields(fields).Debug("waiting for volume to detach") err = d.waitVolumeDetach(volumeID) if err != nil { return errors.WithFieldsE( fields, "error waiting for volume to detach", err) } } log.WithFields(fields).Debug("volume detached") return nil }
func (d *driver) RemoveVolume(volumeID string) error { fields := eff(map[string]interface{}{ "volumeId": volumeID, }) if volumeID == "" { return errors.WithFields(fields, "volumeId is required") } res := volumes.Delete(d.clientBlockStorage, volumeID) if res.Err != nil { return errors.WithFieldsE(fields, "error removing volume", res.Err) } log.WithFields(fields).Debug("removed volume") return nil }
func (d *driver) GetInstance() (*core.Instance, error) { server, err := d.getInstance() if err != nil { return nil, errors.WithFieldsE(ef(), "error getting driver instance", err) } instance := &core.Instance{ ProviderName: providerName, InstanceID: d.instanceID, Region: d.region, Name: server.Name, } return instance, nil }
func (d *driver) getBlockDevices( instanceID string) ([]volumeattach.VolumeAttachment, error) { // volumes := volumeattach.Get(driver.client, driver.instanceId, "") allPages, err := volumeattach.List(d.client, d.instanceID).AllPages() // volumeAttachments, err := volumes.VolumeAttachmentResult.ExtractAll() volumeAttachments, err := volumeattach.ExtractVolumeAttachments(allPages) if err != nil { return []volumeattach.VolumeAttachment{}, errors.WithFieldsE(eff(errors.Fields{ "instanceId": instanceID}), "error extracting volume attachments", err) } return volumeAttachments, nil }
func getLocalDevices() (deviceNames []string, err error) { file := "/proc/partitions" contentBytes, err := ioutil.ReadFile(file) if err != nil { return []string{}, errors.WithFieldsE( eff(errors.Fields{"file": file}), "error reading file", err) } content := string(contentBytes) lines := strings.Split(content, "\n") for _, line := range lines[2:] { fields := strings.Fields(line) if len(fields) == 4 { deviceNames = append(deviceNames, fields[3]) } } return deviceNames, nil }
func (d *driver) GetVolume( volumeID, volumeName string) ([]*core.Volume, error) { volumesRet, err := d.getVolume(volumeID, volumeName) if err != nil { return []*core.Volume{}, errors.WithFieldsE(eff(errors.Fields{ "volumeId": volumeID, "volumeName": volumeName}), "error getting volume", err) } var volumesSD []*core.Volume for _, volume := range volumesRet { var attachmentsSD []*core.VolumeAttachment for _, attachment := range volume.Attachments { attachmentSD := &core.VolumeAttachment{ VolumeID: attachment["volume_id"].(string), InstanceID: attachment["server_id"].(string), DeviceName: attachment["device"].(string), Status: "", } attachmentsSD = append(attachmentsSD, attachmentSD) } volumeSD := &core.Volume{ Name: volume.Name, VolumeID: volume.ID, AvailabilityZone: volume.AvailabilityZone, Status: volume.Status, VolumeType: volume.VolumeType, IOPS: 0, Size: strconv.Itoa(volume.Size), Attachments: attachmentsSD, } volumesSD = append(volumesSD, volumeSD) } return volumesSD, nil }
func (d *driver) waitVolumeAttach(volumeID string) error { fields := eff(map[string]interface{}{ "volumeId": volumeID, }) if volumeID == "" { return errors.WithFields(fields, "volumeId is required") } for { volume, err := d.GetVolume(volumeID, "") if err != nil { return errors.WithFieldsE(fields, "error getting volume", err) } if volume[0].Status == "in-use" { break } time.Sleep(1 * time.Second) } return nil }
func (m *mod) Start() error { proto, addr, parseAddrErr := util.ParseAddress(m.Address()) if parseAddrErr != nil { return parseAddrErr } const validProtoPatt = "(?i)^unix|tcp$" isProtoValid, matchProtoErr := regexp.MatchString(validProtoPatt, proto) if matchProtoErr != nil { return errors.WithFieldsE(errors.Fields{ "protocol": proto, "validProtoPatt": validProtoPatt, }, "error matching protocol", matchProtoErr) } if !isProtoValid { return errors.WithField("protocol", proto, "invalid protocol") } if err := m.r.InitDrivers(); err != nil { return errors.WithFieldsE(errors.Fields{ "m": m, "m.r": m.r, }, "error initializing drivers", err) } if err := os.MkdirAll("/etc/docker/plugins", 0755); err != nil { return err } var specPath string var startFunc func() error mux := m.buildMux() if proto == "unix" { sockFile := addr sockFileDir := filepath.Dir(sockFile) mkSockFileDirErr := os.MkdirAll(sockFileDir, 0755) if mkSockFileDirErr != nil { return mkSockFileDirErr } _ = os.RemoveAll(sockFile) specPath = m.Address() startFunc = func() error { l, lErr := net.Listen("unix", sockFile) if lErr != nil { return lErr } defer l.Close() defer os.Remove(sockFile) return http.Serve(l, mux) } } else { specPath = addr startFunc = func() error { s := &http.Server{ Addr: addr, Handler: mux, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, MaxHeaderBytes: 1 << 20, } return s.ListenAndServe() } } go func() { sErr := startFunc() if sErr != nil { panic(sErr) } }() writeSpecErr := ioutil.WriteFile( "/etc/docker/plugins/rexray.spec", []byte(specPath), 0644) if writeSpecErr != nil { return writeSpecErr } return nil }
func (d *driver) Init(r *core.RexRay) error { d.r = r fields := eff(map[string]interface{}{ "endpoint": d.r.Config.ScaleIOEndpoint, "insecure": d.r.Config.ScaleIOInsecure, "useCerts": d.r.Config.ScaleIOUseCerts, }) var err error if d.client, err = goscaleio.NewClientWithArgs( d.r.Config.ScaleIOEndpoint, d.r.Config.ScaleIOInsecure, d.r.Config.ScaleIOUseCerts); err != nil { return errors.WithFieldsE(fields, "error constructing new client", err) } if _, err := d.client.Authenticate( &goscaleio.ConfigConnect{ d.r.Config.ScaleIOEndpoint, d.r.Config.ScaleIOUserName, d.r.Config.ScaleIoPassword}); err != nil { fields["userName"] = d.r.Config.ScaleIOUserName if d.r.Config.ScaleIoPassword != "" { fields["password"] = "******" } return errors.WithFieldsE(fields, "error authenticating", err) } if d.system, err = d.client.FindSystem( d.r.Config.ScaleIOSystemID, d.r.Config.ScaleIOSystemName, ""); err != nil { fields["systemId"] = d.r.Config.ScaleIOSystemID fields["systemName"] = d.r.Config.ScaleIOSystemName return errors.WithFieldsE(fields, "error finding system", err) } var pd *types.ProtectionDomain if pd, err = d.system.FindProtectionDomain( d.r.Config.ScaleIOProtectionDomainID, d.r.Config.ScaleIOProtectionDomainName, ""); err != nil { fields["domainId"] = d.r.Config.ScaleIOProtectionDomainID fields["domainName"] = d.r.Config.ScaleIOProtectionDomainName return errors.WithFieldsE(fields, "error finding protection domain", err) } d.protectionDomain = goscaleio.NewProtectionDomain(d.client) d.protectionDomain.ProtectionDomain = pd var sp *types.StoragePool if sp, err = d.protectionDomain.FindStoragePool( d.r.Config.ScaleIOStoragePoolID, d.r.Config.ScaleIOStoragePoolName, ""); err != nil { fields["storagePoolId"] = d.r.Config.ScaleIOStoragePoolID fields["storagePoolName"] = d.r.Config.ScaleIOStoragePoolName return errors.WithFieldsE(fields, "error finding storage pool", err) } d.storagePool = goscaleio.NewStoragePool(d.client) d.storagePool.StoragePool = sp var sdcGUID string if sdcGUID, err = goscaleio.GetSdcLocalGUID(); err != nil { return errors.WithFieldsE(fields, "error getting sdc local guid", err) } if d.sdc, err = d.system.FindSdc( "SdcGuid", strings.ToUpper(sdcGUID)); err != nil { fields["sdcGuid"] = sdcGUID return errors.WithFieldsE(fields, "error finding sdc", err) } log.WithField("provider", providerName).Debug("storage driver initialized") return nil }
// StartModule starts the module with the provided instance ID. func StartModule(modInstID int32) error { modInstancesRwl.RLock() defer modInstancesRwl.RUnlock() lf := map[string]interface{}{"id": modInstID} mod, modExists := modInstances[modInstID] if !modExists { return errors.WithFields(lf, "unknown module instance") } lf["id"] = mod.ID lf["typeId"] = mod.Type.ID lf["typeName"] = mod.Type.Name lf["address"] = mod.Config.Address started := make(chan bool) timeout := make(chan bool) startError := make(chan error) go func() { defer func() { r := recover() m := "error starting module" errMsg := fmt.Sprintf( "Error starting module type %d, %d-%s at %s", mod.TypeID, mod.ID, mod.Name, mod.Config.Address) if r == nil { startError <- errors.New(errMsg) return } switch x := r.(type) { case string: lf["inner"] = x startError <- errors.WithFields(lf, m) case error: startError <- errors.WithFieldsE(lf, m, x) default: startError <- errors.WithFields(lf, m) } }() sErr := mod.Inst.Start() if sErr != nil { startError <- sErr } else { started <- true } }() go func() { time.Sleep(3 * time.Second) timeout <- true }() select { case <-started: mod.IsStarted = true log.WithFields(lf).Info("started module") case <-timeout: log.WithFields(lf).Debug("timed out while monitoring module start") case sErr := <-startError: return sErr } return nil }
func (d *driver) Init(r *core.RexRay) error { d.r = r fields := ef() var err error if d.instanceID, err = getInstanceID(d.r.Config); err != nil { return err } fields["instanceId"] = d.instanceID if d.regionName() == "" { if d.region, err = getInstanceRegion(d.r.Config); err != nil { return err } } else { d.region = d.regionName() } fields["region"] = d.region if d.availabilityZoneName() == "" { if d.availabilityZone, err = getInstanceAvailabilityZone(); err != nil { return err } } else { d.availabilityZone = d.availabilityZoneName() } fields["availabilityZone"] = d.availabilityZone authOpts := d.getAuthOptions() fields["identityEndpoint"] = d.authURL() fields["userId"] = d.userID() fields["userName"] = d.userName() if d.password() == "" { fields["password"] = "" } else { fields["password"] = "******" } fields["tenantId"] = d.tenantID() fields["tenantName"] = d.tenantName() fields["domainId"] = d.domainID() fields["domainName"] = d.domainName() if d.provider, err = openstack.AuthenticatedClient(authOpts); err != nil { return errors.WithFieldsE(fields, "error getting authenticated client", err) } if d.client, err = openstack.NewComputeV2(d.provider, gophercloud.EndpointOpts{Region: d.region}); err != nil { errors.WithFieldsE(fields, "error getting newComputeV2", err) } if d.clientBlockStorage, err = openstack.NewBlockStorageV1(d.provider, gophercloud.EndpointOpts{Region: d.region}); err != nil { return errors.WithFieldsE(fields, "error getting newBlockStorageV1", err) } log.WithField("provider", providerName).Info("storage driver initialized") return nil }