예제 #1
0
func (p *pwxPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager pwxManager, mounter mount.Interface) (volume.Builder, error) {
	var readOnly bool

	var px *api.PwxVolumeSource
	if spec.Volume != nil && spec.Volume.PwxDisk != nil {
		px = spec.Volume.PwxDisk
		readOnly = px.ReadOnly
	} else {
		px = spec.PersistentVolume.Spec.PwxDisk
		readOnly = spec.ReadOnly
	}

	volid := px.VolumeID
	fsType := px.FSType

	return &pwxBuilder{
		pwxDisk: &pwxDisk{
			podUID:   podUID,
			volName:  spec.Name(),
			VolumeID: volid,
			mounter:  mounter,
			manager:  manager,
			plugin:   p,
		},
		fsType:      fsType,
		readOnly:    readOnly,
		diskMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil
}
예제 #2
0
파일: fc.go 프로젝트: jojimt/contrib
func (plugin *fcPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) {
	// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
	// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	var readOnly bool
	var fc *api.FCVolumeSource
	if spec.Volume != nil && spec.Volume.FC != nil {
		fc = spec.Volume.FC
		readOnly = fc.ReadOnly
	} else {
		fc = spec.PersistentVolume.Spec.FC
		readOnly = spec.ReadOnly
	}

	if fc.Lun == nil {
		return nil, fmt.Errorf("empty lun")
	}

	lun := strconv.Itoa(*fc.Lun)

	return &fcDiskBuilder{
		fcDisk: &fcDisk{
			podUID:  podUID,
			volName: spec.Name(),
			wwns:    fc.TargetWWNs,
			lun:     lun,
			manager: manager,
			io:      &osIOHandler{},
			plugin:  plugin},
		fsType:   fc.FSType,
		readOnly: readOnly,
		mounter:  &mount.SafeFormatAndMount{mounter, exec.New()},
	}, nil
}
func (asw *actualStateOfWorld) AddVolumeNode(
	volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (v1.UniqueVolumeName, error) {
	asw.Lock()
	defer asw.Unlock()

	attachableVolumePlugin, err := asw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
	if err != nil || attachableVolumePlugin == nil {
		return "", fmt.Errorf(
			"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
			volumeSpec.Name(),
			err)
	}

	volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
		attachableVolumePlugin, volumeSpec)
	if err != nil {
		return "", fmt.Errorf(
			"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v",
			volumeSpec.Name(),
			err)
	}

	volumeObj, volumeExists := asw.attachedVolumes[volumeName]
	if !volumeExists {
		volumeObj = attachedVolume{
			volumeName:      volumeName,
			spec:            volumeSpec,
			nodesAttachedTo: make(map[types.NodeName]nodeAttachedTo),
			devicePath:      devicePath,
		}
	} else {
		// If volume object already exists, it indicates that the information would be out of date.
		// Update the fields for volume object except the nodes attached to the volumes.
		volumeObj.devicePath = devicePath
		volumeObj.spec = volumeSpec
		glog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q",
			volumeName,
			nodeName,
			devicePath)
	}
	asw.attachedVolumes[volumeName] = volumeObj

	_, nodeExists := volumeObj.nodesAttachedTo[nodeName]
	if !nodeExists {
		// Create object if it doesn't exist.
		volumeObj.nodesAttachedTo[nodeName] = nodeAttachedTo{
			nodeName:              nodeName,
			mountedByNode:         true, // Assume mounted, until proven otherwise
			mountedByNodeSetCount: 0,
			detachRequestedTime:   time.Time{},
		}
	} else {
		glog.V(5).Infof("Volume %q is already added to attachedVolume list to the node %q",
			volumeName,
			nodeName)
	}

	asw.addVolumeToReportAsAttached(volumeName, nodeName)
	return volumeName, nil
}
예제 #4
0
func (plugin *gcePersistentDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Mounter, error) {
	// GCEPDs used directly in a pod have a ReadOnly flag set by the pod author.
	// GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	volumeSource, readOnly, err := getVolumeSource(spec)
	if err != nil {
		return nil, err
	}

	pdName := volumeSource.PDName
	partition := ""
	if volumeSource.Partition != 0 {
		partition = strconv.Itoa(int(volumeSource.Partition))
	}

	return &gcePersistentDiskMounter{
		gcePersistentDisk: &gcePersistentDisk{
			podUID:          podUID,
			volName:         spec.Name(),
			pdName:          pdName,
			partition:       partition,
			mounter:         mounter,
			manager:         manager,
			plugin:          plugin,
			MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
		},
		readOnly: readOnly}, nil
}
예제 #5
0
파일: fc.go 프로젝트: CodeJuan/kubernetes
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
	// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
	// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	fc, readOnly, err := getVolumeSource(spec)
	if err != nil {
		return nil, err
	}

	if fc.Lun == nil {
		return nil, fmt.Errorf("empty lun")
	}

	lun := strconv.Itoa(int(*fc.Lun))

	return &fcDiskMounter{
		fcDisk: &fcDisk{
			podUID:  podUID,
			volName: spec.Name(),
			wwns:    fc.TargetWWNs,
			lun:     lun,
			manager: manager,
			io:      &osIOHandler{},
			plugin:  plugin},
		fsType:   fc.FSType,
		readOnly: readOnly,
		mounter:  &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
	}, nil
}
예제 #6
0
func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
	return &configMapVolumeMounter{
		configMapVolume: &configMapVolume{spec.Name(), pod.UID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter(), volume.MetricsNil{}},
		source:          *spec.Volume.ConfigMap,
		pod:             *pod,
		opts:            &opts}, nil
}
예제 #7
0
func (plugin *iscsiPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) {
	// iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author.
	// iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	var readOnly bool
	var iscsi *api.ISCSIVolumeSource
	if spec.Volume != nil && spec.Volume.ISCSI != nil {
		iscsi = spec.Volume.ISCSI
		readOnly = iscsi.ReadOnly
	} else {
		iscsi = spec.PersistentVolume.Spec.ISCSI
		readOnly = spec.ReadOnly
	}

	lun := strconv.Itoa(iscsi.Lun)
	portal := portalBuilder(iscsi.TargetPortal)

	iface := iscsi.ISCSIInterface

	return &iscsiDiskBuilder{
		iscsiDisk: &iscsiDisk{
			podUID:  podUID,
			volName: spec.Name(),
			portal:  portal,
			iqn:     iscsi.IQN,
			lun:     lun,
			iface:   iface,
			manager: manager,
			plugin:  plugin},
		fsType:   iscsi.FSType,
		readOnly: readOnly,
		mounter:  &mount.SafeFormatAndMount{mounter, exec.New()},
	}, nil
}
예제 #8
0
파일: cephfs.go 프로젝트: 40a/bootkube
func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Mounter, error) {
	cephvs := plugin.getVolumeSource(spec)
	id := cephvs.User
	if id == "" {
		id = "admin"
	}
	path := cephvs.Path
	if path == "" {
		path = "/"
	}
	if !strings.HasPrefix(path, "/") {
		path = "/" + path
	}
	secret_file := cephvs.SecretFile
	if secret_file == "" {
		secret_file = "/etc/ceph/" + id + ".secret"
	}

	return &cephfsMounter{
		cephfs: &cephfs{
			podUID:      podUID,
			volName:     spec.Name(),
			mon:         cephvs.Monitors,
			path:        path,
			secret:      secret,
			id:          id,
			secret_file: secret_file,
			readonly:    cephvs.ReadOnly,
			mounter:     mounter,
			plugin:      plugin},
	}, nil
}
예제 #9
0
func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Builder, error) {
	// GCEPDs used directly in a pod have a ReadOnly flag set by the pod author.
	// GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	var readOnly bool

	var gce *api.GCEPersistentDiskVolumeSource
	if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
		gce = spec.Volume.GCEPersistentDisk
		readOnly = gce.ReadOnly
	} else {
		gce = spec.PersistentVolume.Spec.GCEPersistentDisk
		readOnly = spec.ReadOnly
	}

	pdName := gce.PDName
	fsType := gce.FSType
	partition := ""
	if gce.Partition != 0 {
		partition = strconv.Itoa(gce.Partition)
	}

	return &gcePersistentDiskBuilder{
		gcePersistentDisk: &gcePersistentDisk{
			podUID:    podUID,
			volName:   spec.Name(),
			pdName:    pdName,
			partition: partition,
			mounter:   mounter,
			manager:   manager,
			plugin:    plugin,
		},
		fsType:      fsType,
		readOnly:    readOnly,
		diskMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil
}
예제 #10
0
func (plugin *azureDataDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.Mounter, error) {
	// azures used directly in a pod have a ReadOnly flag set by the pod author.
	// azures used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	azure, err := getVolumeSource(spec)
	if err != nil {
		return nil, err
	}
	fsType := "ext4"
	if azure.FSType != nil {
		fsType = *azure.FSType
	}
	cachingMode := api.AzureDataDiskCachingNone
	if azure.CachingMode != nil {
		cachingMode = *azure.CachingMode
	}
	readOnly := false
	if azure.ReadOnly != nil {
		readOnly = *azure.ReadOnly
	}
	diskName := azure.DiskName
	diskUri := azure.DataDiskURI
	return &azureDiskMounter{
		azureDisk: &azureDisk{
			podUID:      podUID,
			volName:     spec.Name(),
			diskName:    diskName,
			diskUri:     diskUri,
			cachingMode: cachingMode,
			mounter:     mounter,
			plugin:      plugin,
		},
		fsType:      fsType,
		readOnly:    readOnly,
		diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
}
예제 #11
0
파일: rbd.go 프로젝트: thed00de/hypernetes
func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, secret string) (volume.Builder, error) {
	source, readOnly := plugin.getRBDVolumeSource(spec)
	pool := source.RBDPool
	if pool == "" {
		pool = "rbd"
	}
	id := source.RadosUser
	if id == "" {
		id = "admin"
	}
	keyring := source.Keyring
	if keyring == "" {
		keyring = "/etc/ceph/keyring"
	}

	return &rbdBuilder{
		rbd: &rbd{
			podUID:   podUID,
			volName:  spec.Name(),
			Image:    source.RBDImage,
			Pool:     pool,
			ReadOnly: readOnly,
			manager:  manager,
			mounter:  &mount.SafeFormatAndMount{mounter, exec.New()},
			plugin:   plugin,
		},
		Mon:     source.CephMonitors,
		Id:      id,
		Keyring: keyring,
		Secret:  secret,
		fsType:  source.FSType,
	}, nil
}
예제 #12
0
func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Builder, error) {
	// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
	// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	var readOnly bool
	var ebs *api.AWSElasticBlockStoreVolumeSource
	if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
		ebs = spec.Volume.AWSElasticBlockStore
		readOnly = ebs.ReadOnly
	} else {
		ebs = spec.PersistentVolume.Spec.AWSElasticBlockStore
		readOnly = spec.ReadOnly
	}

	volumeID := ebs.VolumeID
	fsType := ebs.FSType
	partition := ""
	if ebs.Partition != 0 {
		partition = strconv.Itoa(ebs.Partition)
	}

	return &awsElasticBlockStoreBuilder{
		awsElasticBlockStore: &awsElasticBlockStore{
			podUID:   podUID,
			volName:  spec.Name(),
			volumeID: volumeID,
			manager:  manager,
			mounter:  mounter,
			plugin:   plugin,
		},
		fsType:      fsType,
		partition:   partition,
		readOnly:    readOnly,
		diskMounter: &mount.SafeFormatAndMount{plugin.host.GetMounter(), exec.New()}}, nil
}
예제 #13
0
// newMounterInternal is the internal mounter routine to build the volume.
func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *v1.Pod, manager flexVolumeManager, mounter mount.Interface, runner exec.Interface, secrets map[string]string) (volume.Mounter, error) {
	source, _, err := getVolumeSource(spec)
	if err != nil {
		return nil, err
	}

	return &flexVolumeMounter{
		flexVolumeDisk: &flexVolumeDisk{
			podUID:       pod.UID,
			podNamespace: pod.Namespace,
			podName:      pod.Name,
			volName:      spec.Name(),
			driverName:   source.Driver,
			execPath:     plugin.getExecutable(),
			mounter:      mounter,
			plugin:       plugin,
			secrets:      secrets,
		},
		fsType:             source.FSType,
		readOnly:           source.ReadOnly,
		options:            source.Options,
		runner:             runner,
		manager:            manager,
		blockDeviceMounter: &mount.SafeFormatAndMount{Interface: mounter, Runner: runner},
	}, nil
}
예제 #14
0
파일: secret.go 프로젝트: ncantor/origin
func (plugin *secretPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) {
	return &secretVolumeBuilder{
		secretVolume: &secretVolume{spec.Name(), pod.UID, plugin, mounter},
		secretName:   spec.Volume.Secret.SecretName,
		pod:          *pod,
		opts:         &opts}, nil
}
예제 #15
0
func newDeleter(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error) {
	if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.HostPath == nil {
		return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
	}
	path := spec.PersistentVolume.Spec.HostPath.Path
	return &hostPathDeleter{name: spec.Name(), path: path, host: host}, nil
}
예제 #16
0
파일: secret.go 프로젝트: jojimt/contrib
func (plugin *secretPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) {
	return &secretVolumeBuilder{
		secretVolume: &secretVolume{spec.Name(), pod.UID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter(), volume.MetricsNil{}},
		secretName:   spec.Volume.Secret.SecretName,
		pod:          *pod,
		opts:         &opts}, nil
}
예제 #17
0
func (plugin *cinderPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Builder, error) {
	var cinder *api.CinderVolumeSource
	if spec.Volume != nil && spec.Volume.Cinder != nil {
		cinder = spec.Volume.Cinder
	} else {
		cinder = spec.PersistentVolume.Spec.Cinder
	}

	pdName := cinder.VolumeID
	fsType := cinder.FSType
	readOnly := cinder.ReadOnly

	return &cinderVolumeBuilder{
		cinderVolume: &cinderVolume{
			podUID:  podUID,
			volName: spec.Name(),
			pdName:  pdName,
			mounter: mounter,
			manager: manager,
			plugin:  plugin,
		},
		fsType:             fsType,
		readOnly:           readOnly,
		blockDeviceMounter: &cinderSafeFormatAndMount{mounter, exec.New()}}, nil
}
예제 #18
0
func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
	// iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author.
	// iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	iscsi, readOnly, err := getVolumeSource(spec)
	if err != nil {
		return nil, err
	}

	lun := strconv.Itoa(int(iscsi.Lun))
	portal := portalMounter(iscsi.TargetPortal)

	iface := iscsi.ISCSIInterface

	return &iscsiDiskMounter{
		iscsiDisk: &iscsiDisk{
			podUID:  podUID,
			volName: spec.Name(),
			portal:  portal,
			iqn:     iscsi.IQN,
			lun:     lun,
			iface:   iface,
			manager: manager,
			plugin:  plugin},
		fsType:     iscsi.FSType,
		readOnly:   readOnly,
		mounter:    &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()},
		deviceUtil: ioutil.NewDeviceHandler(ioutil.NewIOHandler()),
	}, nil
}
예제 #19
0
func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Mounter, error) {
	// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
	// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
	ebs, readOnly, err := getVolumeSource(spec)
	if err != nil {
		return nil, err
	}

	volumeID := aws.KubernetesVolumeID(ebs.VolumeID)
	fsType := ebs.FSType
	partition := ""
	if ebs.Partition != 0 {
		partition = strconv.Itoa(int(ebs.Partition))
	}

	return &awsElasticBlockStoreMounter{
		awsElasticBlockStore: &awsElasticBlockStore{
			podUID:          podUID,
			volName:         spec.Name(),
			volumeID:        volumeID,
			partition:       partition,
			manager:         manager,
			mounter:         mounter,
			plugin:          plugin,
			MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
		},
		fsType:      fsType,
		readOnly:    readOnly,
		diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
}
예제 #20
0
func (plugin *downwardAPIPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
	volumeSource, _ := getVolumeSource(spec)
	if volumeSource == nil {
		return "", fmt.Errorf("Spec does not reference a DownwardAPI volume type")
	}

	// Return user defined volume name, since this is an ephemeral volume type
	return spec.Name(), nil
}
예제 #21
0
파일: configmap.go 프로젝트: ncdc/origin
func (plugin *configMapPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
	volumeSource, _ := getVolumeSource(spec)
	if volumeSource == nil {
		return "", fmt.Errorf("Spec does not reference a ConfigMap volume type")
	}

	return fmt.Sprintf(
		"%v/%v",
		spec.Name(),
		volumeSource.Name), nil
}
예제 #22
0
파일: nfs.go 프로젝트: neumino/kubernetes
func newRecycler(spec *volume.Spec, host volume.VolumeHost) (volume.Recycler, error) {
	if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.NFS == nil {
		return nil, fmt.Errorf("spec.PersistentVolumeSource.NFS is nil")
	}
	return &nfsRecycler{
		name:   spec.Name(),
		server: spec.PersistentVolume.Spec.NFS.Server,
		path:   spec.PersistentVolume.Spec.NFS.Path,
		host:   host,
	}, nil
}
예제 #23
0
파일: cinder.go 프로젝트: ncdc/origin
func (plugin *cinderPlugin) newDeleterInternal(spec *volume.Spec, manager cdManager) (volume.Deleter, error) {
	if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder == nil {
		return nil, fmt.Errorf("spec.PersistentVolumeSource.Cinder is nil")
	}
	return &cinderVolumeDeleter{
		&cinderVolume{
			volName: spec.Name(),
			pdName:  spec.PersistentVolume.Spec.Cinder.VolumeID,
			manager: manager,
			plugin:  plugin,
		}}, nil
}
예제 #24
0
func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec, manager ebsManager) (volume.Deleter, error) {
	if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore == nil {
		return nil, fmt.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil")
	}
	return &awsElasticBlockStoreDeleter{
		awsElasticBlockStore: &awsElasticBlockStore{
			volName:  spec.Name(),
			volumeID: spec.PersistentVolume.Spec.AWSElasticBlockStore.VolumeID,
			manager:  manager,
			plugin:   plugin,
		}}, nil
}
예제 #25
0
func (plugin *vsphereVolumePlugin) newDeleterInternal(spec *volume.Spec, manager vdManager) (volume.Deleter, error) {
	if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.VsphereVolume == nil {
		return nil, fmt.Errorf("spec.PersistentVolumeSource.VsphereVolume is nil")
	}
	return &vsphereVolumeDeleter{
		&vsphereVolume{
			volName: spec.Name(),
			volPath: spec.PersistentVolume.Spec.VsphereVolume.VolumePath,
			manager: manager,
			plugin:  plugin,
		}}, nil
}
예제 #26
0
// newVolumeMounterFromPlugins attempts to find a plugin by volume spec, pod
// and volume options and then creates a Mounter.
// Returns a valid Unmounter or an error.
func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
	plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec)
	if err != nil {
		return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err)
	}
	physicalMounter, err := plugin.NewMounter(spec, pod, opts)
	if err != nil {
		return nil, fmt.Errorf("failed to instantiate mounter for volume: %s using plugin: %s with a root cause: %v", spec.Name(), plugin.GetPluginName(), err)
	}
	glog.V(10).Infof("Using volume plugin %q to mount %s", plugin.GetPluginName(), spec.Name())
	return physicalMounter, nil
}
예제 #27
0
func newRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) {
	if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil {
		return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil")
	}
	return &hostPathRecycler{
		name:    spec.Name(),
		path:    spec.PersistentVolume.Spec.HostPath.Path,
		host:    host,
		config:  config,
		timeout: volume.CalculateTimeoutForVolume(config.RecyclerMinimumTimeout, config.RecyclerTimeoutIncrement, spec.PersistentVolume),
	}, nil
}
예제 #28
0
func (plugin *gcePersistentDiskPlugin) newDeleterInternal(spec *volume.Spec, manager pdManager) (volume.Deleter, error) {
	if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.GCEPersistentDisk == nil {
		return nil, fmt.Errorf("spec.PersistentVolumeSource.GCEPersistentDisk is nil")
	}
	return &gcePersistentDiskDeleter{
		gcePersistentDisk: &gcePersistentDisk{
			volName: spec.Name(),
			pdName:  spec.PersistentVolume.Spec.GCEPersistentDisk.PDName,
			manager: manager,
			plugin:  plugin,
		}}, nil
}
예제 #29
0
func (plugin *flockerPlugin) newDeleterInternal(spec *volume.Spec, manager volumeManager) (volume.Deleter, error) {
	if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Flocker == nil {
		return nil, fmt.Errorf("spec.PersistentVolumeSource.Flocker is nil")
	}
	return &flockerVolumeDeleter{
		flockerVolume: &flockerVolume{
			volName:     spec.Name(),
			datasetName: spec.PersistentVolume.Spec.Flocker.DatasetName,
			datasetUUID: spec.PersistentVolume.Spec.Flocker.DatasetUUID,
			manager:     manager,
		}}, nil
}
예제 #30
0
func (plugin *photonPersistentDiskPlugin) newDeleterInternal(spec *volume.Spec, manager pdManager) (volume.Deleter, error) {
	if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.PhotonPersistentDisk == nil {
		return nil, fmt.Errorf("spec.PersistentVolumeSource.PhotonPersistentDisk is nil")
	}
	return &photonPersistentDiskDeleter{
		&photonPersistentDisk{
			volName: spec.Name(),
			pdID:    spec.PersistentVolume.Spec.PhotonPersistentDisk.PdID,
			manager: manager,
			plugin:  plugin,
		}}, nil
}