func getInstanceID(c gofig.Config) (string, error) { cmd := newCmd(c, "/usr/bin/xenstore-read", "name") cmdOut, err := cmd.Output() if err != nil { return "", goof.WithFields(eff(goof.Fields{ "cmd.Path": cmd.Path, "cmd.Args": cmd.Args, "cmd.Out": cmdOut, }), "error getting instance id") } instanceID := strings.Replace(string(cmdOut), "\n", "", -1) validInstanceID := regexp.MustCompile(`^instance-`) valid := validInstanceID.MatchString(instanceID) if !valid { return "", goof.WithFields(eff(goof.Fields{ "instanceId": instanceID}), "error matching instance id") } instanceID = strings.Replace(instanceID, "instance-", "", 1) return instanceID, nil }
func (d *driver) DetachVolume( runAsync bool, volumeID, instanceID string, force bool) error { fields := eff(map[string]interface{}{ "runAsync": runAsync, "volumeId": volumeID, "instanceId": instanceID, }) if volumeID == "" { return goof.WithFields(fields, "volumeId is required") } volume, err := d.GetVolume(volumeID, "") if err != nil { return goof.WithFieldsE(fields, "error getting volume", err) } if len(volume) == 0 { return goof.WithFields(fields, "no volumes returned") } if len(volume[0].Attachments) == 0 { return nil } fields["instanceId"] = volume[0].Attachments[0].InstanceID if force { if resp := volumeactions.ForceDetach(d.clientBlockStoragev2, volumeID); resp.Err != nil { log.Info(fmt.Sprintf("%+v", resp.Err)) return goof.WithFieldsE(fields, "error forcing detach volume", resp.Err) } } else { if resp := volumeattach.Delete( d.client, volume[0].Attachments[0].InstanceID, volumeID); resp.Err != nil { return goof.WithFieldsE(fields, "error detaching volume", resp.Err) } } if !runAsync { log.WithFields(fields).Debug("waiting for volume to detach") err = d.waitVolumeDetach(volumeID) if err != nil { return goof.WithFieldsE( fields, "error waiting for volume to detach", err) } } log.WithFields(fields).Debug("volume detached") return nil }
func (d *driver) GetVolumeAttach( volumeID, instanceID string) ([]*core.VolumeAttachment, error) { fields := eff(map[string]interface{}{ "volumeId": volumeID, "instanceId": instanceID, }) if volumeID == "" { return []*core.VolumeAttachment{}, goof.WithFields(fields, "volumeId is required") } volume, err := d.GetVolume(volumeID, "") if err != nil { return []*core.VolumeAttachment{}, goof.WithFieldsE(fields, "error getting volume", err) } if instanceID != "" { var attached bool for _, volumeAttachment := range volume[0].Attachments { if volumeAttachment.InstanceID == instanceID { return volume[0].Attachments, nil } } if !attached { return []*core.VolumeAttachment{}, nil } } return volume[0].Attachments, nil }
func (d *driver) waitVolumeAttachStatus( ctx types.Context, volumeID string, attachmentNeeded bool) (*types.Volume, error) { fields := eff(map[string]interface{}{ "moduleName": ctx, "volumeId": volumeID, }) if volumeID == "" { return nil, goof.WithFields(fields, "volumeId is required") } for { volume, err := d.VolumeInspect( ctx, volumeID, &types.VolumeInspectOpts{Attachments: types.VolAttReqTrue}) if err != nil { return nil, goof.WithFieldsE(fields, "error getting volume when waiting", err) } if attachmentNeeded { if len(volume.Attachments) > 0 { return volume, nil } } else { if len(volume.Attachments) == 0 { return volume, nil } } time.Sleep(1 * time.Second) } }
func (d *driver) RemoveVolume(volumeID string) error { fields := eff(map[string]interface{}{ "volumeId": volumeID, }) if volumeID == "" { return goof.WithFields(fields, "volumeId is required") } var err error var volumes []*types.Volume if volumes, err = d.getVolume(volumeID, "", false); err != nil { return goof.WithFieldsE(fields, "error getting volume", err) } targetVolume := goscaleio.NewVolume(d.client) targetVolume.Volume = volumes[0] if err = targetVolume.RemoveVolume("ONLY_ME"); err != nil { return goof.WithFieldsE(fields, "error removing volume", err) } log.WithFields(fields).Debug("removed volume") return nil }
// // VolumeDetach detaches a volume. func (d *driver) VolumeDetach( ctx types.Context, volumeID string, opts *types.VolumeDetachOpts) (*types.Volume, error) { fields := eff(map[string]interface{}{ "moduleName": ctx, "volumeId": volumeID, }) if volumeID == "" { return nil, goof.WithFields(fields, "volumeId is required for VolumeDetach") } vols, err := d.getVolume(ctx, volumeID, "", types.VolAttReqTrue) if err != nil { return nil, err } resp := volumeattach.Delete( d.client, vols[0].Attachments[0].InstanceID.ID, volumeID) if resp.Err != nil { return nil, goof.WithFieldsE(fields, "error detaching volume", resp.Err) } ctx.WithFields(fields).Debug("waiting for volume to detach") volume, err := d.waitVolumeAttachStatus(ctx, volumeID, false) if err == nil { return volume, nil } log.WithFields(fields).Debug("volume detached") return nil, nil }
// // VolumeRemove removes a volume. func (d *driver) VolumeRemove( ctx types.Context, volumeID string, opts types.Store) error { fields := eff(map[string]interface{}{ "volumeId": volumeID, }) if volumeID == "" { return goof.WithFields(fields, "volumeId is required") } attached, err := d.volumeAttached(ctx, volumeID) if err != nil { return goof.WithFieldsE(fields, "error retrieving attachment status", err) } if attached { _, err := d.VolumeDetach(ctx, volumeID, &types.VolumeDetachOpts{}) if err != nil { return goof.WithFieldsE(fields, "error detaching before volume removal", err) } } res := volumes.Delete(d.clientBlockStorage, volumeID) if res.Err != nil { return goof.WithFieldsE(fields, "error removing volume", res.Err) } return nil }
func (d *driver) Init(r *core.RexRay) error { d.r = r var err error d.instanceDocument, err = getInstanceIdendityDocument() if err != nil { return goof.WithFields(ef(), "error getting instance id doc") } auth := aws.Auth{ AccessKey: d.r.Config.GetString("aws.accessKey"), SecretKey: d.r.Config.GetString("aws.secretKey"), } region := d.r.Config.GetString("aws.region") if region == "" { region = d.instanceDocument.Region } d.ec2Instance = ec2.New( auth, aws.Regions[region], ) log.WithField("provider", providerName).Info("storage driver initialized") return nil }
func (d *driver) createVolumeHandleSnapshotID( size *int64, snapshotID string, fields map[string]interface{}) error { if snapshotID == "" { return nil } snapshots, err := d.GetSnapshot("", snapshotID, "") if err != nil { return goof.WithFieldsE(fields, "error getting snapshot", err) } if len(snapshots) == 0 { return goof.WithFields(fields, "snapshot array is empty") } volSize := snapshots[0].VolumeSize sizeInt, err := strconv.Atoi(volSize) if err != nil { f := goof.Fields{ "volumeSize": volSize, } for k, v := range fields { f[k] = v } return goof.WithFieldsE(f, "error casting volume size", err) } *size = int64(sizeInt) return nil }
func (d *driver) DetachVolume( runAsync bool, volumeID string, blank string, force bool) error { fields := eff(map[string]interface{}{ "runAsync": runAsync, "volumeId": volumeID, "blank": blank, }) if volumeID == "" { return goof.WithFields(fields, "volumeId is required") } volumes, err := d.getVolume(volumeID, "", false) if err != nil { return goof.WithFieldsE(fields, "error getting volume", err) } if len(volumes) == 0 { return goof.WithFields(fields, "no volumes returned") } targetVolume := goscaleio.NewVolume(d.client) targetVolume.Volume = volumes[0] unmapVolumeSdcParam := &types.UnmapVolumeSdcParam{ SdcID: "", IgnoreScsiInitiators: "true", AllSdcs: "", } if force { unmapVolumeSdcParam.AllSdcs = "true" } else { unmapVolumeSdcParam.SdcID = d.sdc.Sdc.ID } _ = targetVolume.UnmapVolumeSdc(unmapVolumeSdcParam) log.WithFields(log.Fields{ "provider": providerName, "volumeId": volumeID}).Debug("detached volume") return nil }
func waitMount(volumeID string) (*goscaleio.SdcMappedVolume, error) { timeout := make(chan bool, 1) go func() { time.Sleep(10 * time.Second) timeout <- true }() successCh := make(chan *goscaleio.SdcMappedVolume, 1) errorCh := make(chan error, 1) go func(volumeID string) { log.WithField("provider", providerName).Debug("waiting for volume mount") for { sdcMappedVolumes, err := goscaleio.GetLocalVolumeMap() if err != nil { errorCh <- goof.WithFieldE( "provider", providerName, "problem getting local volume mappings", err) return } sdcMappedVolume := &goscaleio.SdcMappedVolume{} var foundVolume bool for _, sdcMappedVolume = range sdcMappedVolumes { if sdcMappedVolume.VolumeID == volumeID && sdcMappedVolume.SdcDevice != "" { foundVolume = true break } } if foundVolume { successCh <- sdcMappedVolume return } time.Sleep(100 * time.Millisecond) } }(volumeID) select { case sdcMappedVolume := <-successCh: log.WithFields(log.Fields{ "provider": providerName, "volumeId": sdcMappedVolume.VolumeID, "volume": sdcMappedVolume.SdcDevice, }).Debug("got sdcMappedVolume") return sdcMappedVolume, nil case err := <-errorCh: return &goscaleio.SdcMappedVolume{}, err case <-timeout: return &goscaleio.SdcMappedVolume{}, goof.WithFields( ef(), "timed out waiting for mount") } }
func (d *driver) DetachVolume( runAsync bool, volumeID, instanceID string, force bool) error { fields := eff(map[string]interface{}{ "runAsync": runAsync, "volumeId": volumeID, "instanceId": instanceID, }) if volumeID == "" { return goof.WithFields(fields, "volumeId is required") } volumes, err := d.GetVolume(volumeID, "") if err != nil { return goof.WithFieldsE(fields, "error getting volume", err) } if len(volumes) == 0 { return goof.WithFields(fields, "no volumes returned") } if len(volumes[0].Attachments) == 0 { return nil } for _, attachment := range volumes[0].Attachments { operation, err := d.client.Instances.DetachDisk(d.project, d.zone, attachment.InstanceID, attachment.DeviceName).Do() if err != nil { return err } if !runAsync { err := d.waitUntilOperationIsFinished(operation) if err != nil { return err } } } return nil }
func (d *driver) Init(r *core.RexRay) error { d.r = r d.volumesByNaa = map[string]xtio.Volume{} fields := eff(map[string]interface{}{ "endpoint": d.endpoint(), "userName": d.userName(), "deviceMapper": d.deviceMapper(), "multipath": d.multipath(), "remoteManagement": d.remoteManagement(), "insecure": d.insecure(), }) if d.password() == "" { fields["password"] = "" } else { fields["password"] = "******" } if !isXtremIOAttached() && !d.remoteManagement() { return goof.WithFields(fields, "device not detected") } var err error if d.client, err = xtio.NewClientWithArgs( d.endpoint(), d.insecure(), d.userName(), d.password()); err != nil { return goof.WithFieldsE(fields, "error creating xtremio client", err) } if !d.remoteManagement() { var iqn string if iqn, err = getIQN(); err != nil { return goof.WithFieldsE(fields, "error getting IQN", err) } if d.initiator, err = d.client.GetInitiator("", iqn); err != nil { return goof.WithFieldsE(fields, "error getting initiator", err) } } log.WithField("provider", providerName).Info("storage driver initialized") return nil }
func (d *driver) volumeAttached(ctx types.Context, volumeID string) (bool, error) { fields := eff(map[string]interface{}{ "moduleName": ctx, "volumeId": volumeID, }) if volumeID == "" { return true, goof.WithFields(fields, "volumeId is required") } volume, err := d.VolumeInspect( ctx, volumeID, &types.VolumeInspectOpts{ Attachments: types.VolAttReqTrue}) if err != nil { return true, goof.WithFieldsE(fields, "error getting volume when waiting", err) } if len(volume.Attachments) > 0 { return true, nil } if len(volume.Attachments) == 0 { return false, nil } return true, goof.WithFields(fields, "check volume attachement status failed is required") }
func (d *driver) createVolumeHandleVolumeID( availabilityZone, snapshotID, volumeID *string, size *int64, fields map[string]interface{}) ([]*core.Volume, error) { if *volumeID == "" { return nil, nil } var err error var volume []*core.Volume if volume, err = d.GetVolume(*volumeID, ""); err != nil { return nil, goof.WithFieldsE(fields, "error getting volumes", err) } if len(volume) == 0 { return nil, goof.WithFieldsE(fields, "", errors.ErrNoVolumesReturned) } volSize := volume[0].Size sizeInt, err := strconv.Atoi(volSize) if err != nil { f := goof.Fields{ "volumeSize": volSize, } for k, v := range fields { f[k] = v } return nil, goof.WithFieldsE(f, "error casting volume size", err) } *size = int64(sizeInt) *volumeID = volume[0].VolumeID snapshot, err := d.CreateSnapshot( false, fmt.Sprintf("temp-%s", *volumeID), *volumeID, "") if err != nil { return nil, goof.WithFields(fields, "error creating snapshot") } *snapshotID = snapshot[0].SnapshotID if *availabilityZone == "" { *availabilityZone = volume[0].AvailabilityZone } return volume, nil }
func (d *driver) RemoveVolume(volumeID string) error { fields := eff(map[string]interface{}{ "volumeId": volumeID, }) if volumeID == "" { return goof.WithFields(fields, "volumeId is required") } res := volumes.Delete(d.clientBlockStorage, volumeID) if res.Err != nil { return goof.WithFieldsE(fields, "error removing volume", res.Err) } log.WithFields(fields).Debug("removed volume") return nil }
// StartModule starts the module with the provided instance name. func StartModule(ctx apitypes.Context, config gofig.Config, name string) error { modInstancesRwl.RLock() defer modInstancesRwl.RUnlock() name = strings.ToLower(name) lf := map[string]interface{}{"name": name} mod, modExists := modInstances[name] if !modExists { return goof.WithFields(lf, "unknown module instance") } lf["typeName"] = mod.Type.Name lf["address"] = mod.Config.Address started := make(chan bool) timeout := make(chan bool) startError := make(chan error) go func() { sErr := mod.Inst.Start() if sErr != nil { startError <- sErr } else { started <- true } }() go func() { time.Sleep(startTimeout(config)) timeout <- true }() select { case <-started: mod.IsStarted = true ctx.WithFields(lf).Info("started module") case <-timeout: return goof.New("timed out while monitoring module start") case sErr := <-startError: return sErr } return nil }
func getInstanceID(c gofig.Config) (string, error) { cmd := newCmd(c, "/usr/sbin/dmidecode") cmdOut, err := cmd.Output() if err != nil { return "", goof.WithFields(eff(goof.Fields{ "cmd.Path": cmd.Path, "cmd.Args": cmd.Args, "cmd.Out": cmdOut, }), "error getting instance id") } rp := regexp.MustCompile("UUID:(.*)") uuid := strings.Replace(rp.FindString(string(cmdOut)), "UUID: ", "", -1) return strings.ToLower(uuid), nil }
func getInstanceRegion(cfg gofig.Config) (string, error) { cmd := newCmd( cfg, "/usr/bin/xenstore-read", "vm-data/provider_data/region") cmdOut, err := cmd.Output() if err != nil { return "", goof.WithFields(eff(goof.Fields{ "cmd.Path": cmd.Path, "cmd.Args": cmd.Args, "cmd.Out": cmdOut, }), "error getting instance region") } region := strings.Replace(string(cmdOut), "\n", "", -1) return region, nil }
// InitializeModule initializes a module. func InitializeModule( modTypeID int32, modConfig *Config) (*Instance, error) { modInstancesRwl.Lock() defer modInstancesRwl.Unlock() lf := log.Fields{ "typeId": modTypeID, "address": modConfig.Address, } mt, modTypeExists := modTypes[modTypeID] if !modTypeExists { return nil, goof.WithFields(lf, "unknown module type") } lf["typeName"] = mt.Name lf["ignoreFailOnInit"] = mt.IgnoreFailOnInit modInstID := atomic.AddInt32(&nextModInstanceID, 1) mod, initErr := mt.InitFunc(modInstID, modConfig) if initErr != nil { atomic.AddInt32(&nextModInstanceID, -1) return nil, initErr } modInst := &Instance{ ID: modInstID, Type: mt, TypeID: mt.ID, Inst: mod, Name: mod.Name(), Config: modConfig, Description: mod.Description(), } modInstances[modInstID] = modInst lf["id"] = modInstID log.WithFields(lf).Info("initialized module instance") return modInst, nil }
func (d *driver) VolumeCreateFromSnapshot( ctx types.Context, snapshotID, volumeName string, opts *types.VolumeCreateOpts) (*types.Volume, error) { // notUsed bool,volumeName, volumeID, snapshotID, volumeType string, // IOPS, size int64, availabilityZone string) (*types.VolumeResp, error) if volumeName == "" { return nil, goof.New("no volume name specified") } volumes, err := d.getVolume("", volumeName, 0) if err != nil { return nil, err } if len(volumes) > 0 { return nil, goof.WithFields(eff(map[string]interface{}{ "volumeName": volumeName}), "volume name already exists") } resp, err := d.VolumeCreate(ctx, volumeName, opts) if err != nil { return nil, err } volumeInspectOpts := &types.VolumeInspectOpts{ Attachments: types.VolAttReqTrue, Opts: opts.Opts, } createdVolume, err := d.VolumeInspect(ctx, resp.ID, volumeInspectOpts) if err != nil { return nil, err } log.WithFields(log.Fields{ "provider": "scaleIO", "volume": createdVolume, }).Debug("created volume") return createdVolume, nil }
// InitializeModule initializes a module. func InitializeModule( ctx apitypes.Context, modConfig *Config) (*Instance, error) { modInstancesRwl.Lock() defer modInstancesRwl.Unlock() ctx.WithField("name", modConfig.Name).Debug("initializing module instance") typeName := strings.ToLower(modConfig.Type) lf := log.Fields{ "typeName": typeName, "address": modConfig.Address, } mt, modTypeExists := modTypes[typeName] if !modTypeExists { return nil, goof.WithFields(lf, "unknown module type") } mod, initErr := mt.InitFunc(ctx, modConfig) if initErr != nil { return nil, initErr } modName := mod.Name() modInst := &Instance{ Type: mt, TypeName: typeName, Inst: mod, Name: modName, Config: modConfig, Description: mod.Description(), } modInstances[modName] = modInst lf["name"] = modName ctx.WithFields(lf).Info("initialized module instance") return modInst, nil }
func (d *driver) Init(r *core.RexRay) error { d.r = r fields := eff(map[string]interface{}{ "endpoint": d.endpoint(), "userName": d.userName(), "group": d.group(), "insecure": d.insecure(), "volumePath": d.volumePath(), "dataSubnet": d.dataSubnet(), }) if d.password() == "" { fields["password"] = "" } else { fields["password"] = "******" } if !isIsilonAttached() { return goof.WithFields(fields, "device not detected") } var err error if d.client, err = isi.NewClientWithArgs( d.endpoint(), d.insecure(), d.userName(), d.group(), d.password(), d.volumePath()); err != nil { return goof.WithFieldsE(fields, "error creating isilon client", err) } log.WithField("provider", providerName).Info("storage driver initialized") return nil }
func (d *driver) waitVolumeAttach(volumeID string) error { fields := eff(map[string]interface{}{ "volumeId": volumeID, }) if volumeID == "" { return goof.WithFields(fields, "volumeId is required") } for { volume, err := d.GetVolume(volumeID, "") if err != nil { return goof.WithFieldsE(fields, "error getting volume", err) } if volume[0].Status == "in-use" { break } time.Sleep(1 * time.Second) } return nil }
// VolumeCreate creates a new volume. func (d *driver) VolumeCreate(ctx types.Context, volumeName string, opts *types.VolumeCreateOpts) (*types.Volume, error) { // Initialize for logging fields := map[string]interface{}{ "driverName": d.Name(), "volumeName": volumeName, "opts": opts, } log.WithFields(fields).Debug("creating volume") // Check if volume with same name exists ec2vols, err := d.getVolume(ctx, "", volumeName) if err != nil { return nil, goof.WithFieldsE(fields, "error getting volume", err) } volumes, convErr := d.toTypesVolume(ctx, ec2vols, 0) if convErr != nil { return nil, goof.WithFieldsE( fields, "error converting to types.Volume", convErr) } if len(volumes) > 0 { return nil, goof.WithFields(fields, "volume name already exists") } // Pass libStorage types.Volume to helper function which calls EC2 API vol, err := d.createVolume(ctx, volumeName, "", opts) if err != nil { return nil, goof.WithFieldsE(fields, "error creating volume", err) } // Return the volume created return d.VolumeInspect(ctx, *vol.VolumeId, &types.VolumeInspectOpts{ Attachments: types.VolAttReqTrue, }) }
func (d *driver) DetachVolume( runAsync bool, volumeID, instanceID string, force bool) error { fields := eff(map[string]interface{}{ "runAsync": runAsync, "volumeId": volumeID, "instanceId": instanceID, }) if volumeID == "" { return goof.WithFields(fields, "volumeId is required") } volume, err := d.GetVolume(volumeID, "") if err != nil { return goof.WithFieldsE(fields, "error getting volume", err) } fields["instanceId"] = volume[0].Attachments[0].InstanceID resp := volumeattach.Delete( d.client, volume[0].Attachments[0].InstanceID, volumeID) if resp.Err != nil { return goof.WithFieldsE(fields, "error deleting volume", err) } if !runAsync { log.WithFields(fields).Debug("waiting for volume to detach") err = d.waitVolumeDetach(volumeID) if err != nil { return goof.WithFieldsE( fields, "error waiting for volume to detach", err) } } log.WithFields(fields).Debug("volume detached") return nil }
func (d *driver) AttachVolume( runAsync bool, volumeID, instanceID string, force bool) ([]*core.VolumeAttachment, error) { fields := eff(map[string]interface{}{ "runAsync": runAsync, "volumeId": volumeID, "instanceId": instanceID, }) if volumeID == "" { return nil, goof.WithFields(fields, "volumeId is required") } if force { if err := d.DetachVolume(false, volumeID, "", true); err != nil { return nil, err } } mapVolumeSdcParam := &types.MapVolumeSdcParam{ SdcID: d.sdc.Sdc.ID, AllowMultipleMappings: "false", AllSdcs: "", } volumes, err := d.getVolume(volumeID, "", false) if err != nil { return nil, goof.WithFieldsE(fields, "error getting volume", err) } if len(volumes) == 0 { return nil, goof.WithFields(fields, "no volumes returned") } targetVolume := goscaleio.NewVolume(d.client) targetVolume.Volume = volumes[0] err = targetVolume.MapVolumeSdc(mapVolumeSdcParam) if err != nil { return nil, goof.WithFieldsE(fields, "error mapping volume sdc", err) } _, err = waitMount(volumes[0].ID) if err != nil { fields["volumeId"] = volumes[0].ID return nil, goof.WithFieldsE( fields, "error waiting on volume to mount", err) } volumeAttachment, err := d.GetVolumeAttach(volumeID, instanceID) if err != nil { return nil, goof.WithFieldsE( fields, "error getting volume attachments", err) } log.WithFields(log.Fields{ "provider": providerName, "volumeId": volumeID, "instanceId": instanceID, }).Debug("attached volume to instance") return volumeAttachment, nil }
func (d *driver) createVolume( ctx types.Context, volumeName string, volumeSourceID string, snapshotID string, opts *types.VolumeCreateOpts) (*types.Volume, error) { var ( volumeType string IOPS int64 size int64 availabilityZone string ) if opts.Type != nil { volumeType = *(opts.Type) } if opts.IOPS != nil { IOPS = *(opts.IOPS) } if opts.Size != nil { size = *(opts.Size) } if opts.AvailabilityZone != nil { availabilityZone = *(opts.AvailabilityZone) } //check some fields... createVolumeEnsureSize(&size) vsize := int(size) fields := map[string]interface{}{ "availabilityZone": availabilityZone, "iops": IOPS, "provider": d.Name(), "size": size, "snapshotId": snapshotID, "volumeName": volumeName, "volumeSourceID": volumeSourceID, "volumeType": volumeType, } options := &volumes.CreateOpts{ Name: volumeName, Size: vsize, SnapshotID: snapshotID, VolumeType: volumeType, //AvailabilityZone: availabilityZone, //Not in old Rackspace //SourceReplica: volumeSourceID, } resp, err := volumes.Create(d.clientBlockStorage, options).Extract() if err != nil { return nil, goof.WithFields(fields, "error creating volume") } fields["volumeId"] = resp.ID //for openstack must test before rackspace integration err = volumes.WaitForStatus(d.clientBlockStorage, resp.ID, "available", 120) if err != nil { return nil, goof.WithFieldsE(fields, "error waiting for volume creation to complete", err) } log.WithFields(fields).Debug("created volume") return translateVolume(resp, types.VolAttReqTrue), nil }
// StartModule starts the module with the provided instance name. func StartModule(name string) error { modInstancesRwl.RLock() defer modInstancesRwl.RUnlock() name = strings.ToLower(name) lf := map[string]interface{}{"name": name} mod, modExists := modInstances[name] if !modExists { return goof.WithFields(lf, "unknown module instance") } lf["typeName"] = mod.Type.Name lf["address"] = mod.Config.Address started := make(chan bool) timeout := make(chan bool) startError := make(chan error) go func() { defer func() { r := recover() m := "error starting module" errMsg := fmt.Sprintf( "Error starting module type=%s, instance=%s at %s", mod.TypeName, mod.Name, mod.Config.Address) if r == nil { startError <- goof.New(errMsg) return } switch x := r.(type) { case string: lf["inner"] = x startError <- goof.WithFields(lf, m) case error: startError <- goof.WithFieldsE(lf, m, x) default: startError <- goof.WithFields(lf, m) } }() sErr := mod.Inst.Start() if sErr != nil { startError <- sErr } else { started <- true } }() go func() { time.Sleep(3 * time.Second) timeout <- true }() select { case <-started: mod.IsStarted = true log.WithFields(lf).Info("started module") case <-timeout: log.WithFields(lf).Debug("timed out while monitoring module start") case sErr := <-startError: return sErr } return nil }
func (d *driver) CreateVolume( runAsync bool, volumeName string, volumeID string, snapshotID string, volumeType string, IOPS int64, size int64, availabilityZone string) (*core.Volume, error) { fields := map[string]interface{}{ "provider": providerName, "runAsync": runAsync, "volumeName": volumeName, "volumeId": volumeID, "snapshotId": snapshotID, "volumeType": volumeType, "iops": IOPS, "size": size, "availabilityZone": availabilityZone, } if volumeID != "" && runAsync { return nil, errors.ErrRunAsyncFromVolume } d.createVolumeEnsureAvailabilityZone(&availabilityZone) var err error if err = d.createVolumeHandleSnapshotID( &size, snapshotID, fields); err != nil { return nil, err } var volume []*core.Volume if volume, err = d.createVolumeHandleVolumeID( &availabilityZone, &snapshotID, &volumeID, &size, fields); err != nil { return nil, err } createVolumeEnsureSize(&size) options := &volumes.CreateOpts{ Name: volumeName, Size: int(size), SnapshotID: snapshotID, VolumeType: volumeType, Availability: availabilityZone, } resp, err := volumes.Create(d.clientBlockStorage, options).Extract() if err != nil { return nil, goof.WithFields(fields, "error creating volume") } if !runAsync { log.Debug("waiting for volume creation to complete") err = volumes.WaitForStatus(d.clientBlockStorage, resp.ID, "available", 120) if err != nil { return nil, goof.WithFields(fields, "error waiting for volume creation to complete") } if volumeID != "" { err := d.RemoveSnapshot(snapshotID) if err != nil { return nil, goof.WithFields(fields, "error removing snapshot") } } } fields["volumeId"] = resp.ID fields["volumeName"] = "" volume, err = d.GetVolume(resp.ID, "") if err != nil { return nil, goof.WithFields(fields, "error removing snapshot") } log.WithFields(fields).Debug("created volume") return volume[0], nil }