func (plugin *secretPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { return &secretVolumeBuilder{ secretVolume: &secretVolume{spec.Name(), pod.UID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter()}, secretName: spec.Volume.Secret.SecretName, pod: *pod, opts: &opts}, nil }
func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Builder, error) { // EBSs used directly in a pod have a ReadOnly flag set by the pod author. // EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV var readOnly bool var ebs *api.AWSElasticBlockStoreVolumeSource if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil { ebs = spec.Volume.AWSElasticBlockStore readOnly = ebs.ReadOnly } else { ebs = spec.PersistentVolume.Spec.AWSElasticBlockStore readOnly = spec.ReadOnly } volumeID := ebs.VolumeID fsType := ebs.FSType partition := "" if ebs.Partition != 0 { partition = strconv.Itoa(ebs.Partition) } return &awsElasticBlockStoreBuilder{ awsElasticBlockStore: &awsElasticBlockStore{ podUID: podUID, volName: spec.Name(), volumeID: volumeID, manager: manager, mounter: mounter, plugin: plugin, }, fsType: fsType, partition: partition, readOnly: readOnly, diskMounter: &mount.SafeFormatAndMount{plugin.host.GetMounter(), exec.New()}}, nil }
func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Builder, error) { // GCEPDs used directly in a pod have a ReadOnly flag set by the pod author. // GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV var readOnly bool var gce *api.GCEPersistentDiskVolumeSource if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil { gce = spec.Volume.GCEPersistentDisk readOnly = gce.ReadOnly } else { gce = spec.PersistentVolume.Spec.GCEPersistentDisk readOnly = spec.ReadOnly } pdName := gce.PDName fsType := gce.FSType partition := "" if gce.Partition != 0 { partition = strconv.Itoa(gce.Partition) } return &gcePersistentDiskBuilder{ gcePersistentDisk: &gcePersistentDisk{ podUID: podUID, volName: spec.Name(), pdName: pdName, partition: partition, mounter: mounter, manager: manager, plugin: plugin, }, fsType: fsType, readOnly: readOnly, diskMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil }
func (plugin *iscsiPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) { // iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author. // iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV var readOnly bool var iscsi *api.ISCSIVolumeSource if spec.Volume != nil && spec.Volume.ISCSI != nil { iscsi = spec.Volume.ISCSI readOnly = iscsi.ReadOnly } else { iscsi = spec.PersistentVolume.Spec.ISCSI readOnly = spec.ReadOnly } lun := strconv.Itoa(iscsi.Lun) return &iscsiDiskBuilder{ iscsiDisk: &iscsiDisk{ podUID: podUID, volName: spec.Name(), portal: iscsi.TargetPortal, iqn: iscsi.IQN, lun: lun, manager: manager, mounter: mounter, plugin: plugin}, fsType: iscsi.FSType, readOnly: readOnly, }, nil }
func (plugin *cinderPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Builder, error) { var cinder *api.CinderVolumeSource if spec.Volume != nil && spec.Volume.Cinder != nil { cinder = spec.Volume.Cinder } else { cinder = spec.PersistentVolume.Spec.Cinder } pdName := cinder.VolumeID fsType := cinder.FSType readOnly := cinder.ReadOnly return &cinderVolumeBuilder{ cinderVolume: &cinderVolume{ podUID: podUID, volName: spec.Name(), pdName: pdName, mounter: mounter, manager: manager, plugin: plugin, }, fsType: fsType, readOnly: readOnly, blockDeviceMounter: &cinderSafeFormatAndMount{mounter, exec.New()}}, nil }
func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, secret string) (volume.Builder, error) { source, readOnly := plugin.getRBDVolumeSource(spec) pool := source.RBDPool if pool == "" { pool = "rbd" } id := source.RadosUser if id == "" { id = "admin" } keyring := source.Keyring if keyring == "" { keyring = "/etc/ceph/keyring" } return &rbdBuilder{ rbd: &rbd{ podUID: podUID, volName: spec.Name(), Image: source.RBDImage, Pool: pool, ReadOnly: readOnly, manager: manager, mounter: &mount.SafeFormatAndMount{mounter, exec.New()}, plugin: plugin, }, Mon: source.CephMonitors, Id: id, Keyring: keyring, Secret: secret, fsType: source.FSType, }, nil }
func (plugin *fcPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) { // fc volumes used directly in a pod have a ReadOnly flag set by the pod author. // fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV var readOnly bool var fc *api.FCVolumeSource if spec.Volume != nil && spec.Volume.FC != nil { fc = spec.Volume.FC readOnly = fc.ReadOnly } else { fc = spec.PersistentVolume.Spec.FC readOnly = spec.ReadOnly } if fc.Lun == nil { return nil, fmt.Errorf("empty lun") } lun := strconv.Itoa(*fc.Lun) return &fcDiskBuilder{ fcDisk: &fcDisk{ podUID: podUID, volName: spec.Name(), wwns: fc.TargetWWNs, lun: lun, manager: manager, mounter: &mount.SafeFormatAndMount{mounter, exec.New()}, io: &osIOHandler{}, plugin: plugin}, fsType: fc.FSType, readOnly: readOnly, }, nil }
func newRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) { if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil { return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil") } return &hostPathRecycler{ name: spec.Name(), path: spec.PersistentVolume.Spec.HostPath.Path, host: host, config: config, timeout: volume.CalculateTimeoutForVolume(config.RecyclerMinimumTimeout, config.RecyclerTimeoutIncrement, spec.PersistentVolume), }, nil }
func (plugin *glusterfsPlugin) newBuilderInternal(spec *volume.Spec, ep *api.Endpoints, pod *api.Pod, mounter mount.Interface, exe exec.Interface) (volume.Builder, error) { source, readOnly := plugin.getGlusterVolumeSource(spec) return &glusterfsBuilder{ glusterfs: &glusterfs{ volName: spec.Name(), mounter: mounter, pod: pod, plugin: plugin, }, hosts: ep, path: source.Path, readOnly: readOnly, exe: exe}, nil }
func (plugin *gitRepoPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { return &gitRepoVolumeBuilder{ gitRepoVolume: &gitRepoVolume{ volName: spec.Name(), podUID: pod.UID, plugin: plugin, }, pod: *pod, source: spec.Volume.GitRepo.Repository, revision: spec.Volume.GitRepo.Revision, exec: exec.New(), opts: opts, }, nil }
func (vh *volumeHost) NewWrapperCleaner(spec *volume.Spec, podUID types.UID) (volume.Cleaner, error) { plugin, err := vh.kubelet.volumePluginMgr.FindPluginBySpec(spec) if err != nil { return nil, err } if plugin == nil { // Not found but not an error return nil, nil } c, err := plugin.NewCleaner(spec.Name(), podUID) if err == nil && c == nil { return nil, errUnsupportedVolumeType } return c, nil }
func (plugin *downwardAPIPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { v := &downwardAPIVolume{ volName: spec.Name(), pod: pod, podUID: pod.UID, plugin: plugin, } v.fieldReferenceFileNames = make(map[string]string) for _, fileInfo := range spec.Volume.DownwardAPI.Items { v.fieldReferenceFileNames[fileInfo.FieldRef.FieldPath] = path.Clean(fileInfo.Path) } return &downwardAPIVolumeBuilder{ downwardAPIVolume: v, opts: &opts}, nil }
func (plugin *emptyDirPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface, mountDetector mountDetector, opts volume.VolumeOptions, chconRunner chconRunner) (volume.Builder, error) { medium := api.StorageMediumDefault if spec.Volume.EmptyDir != nil { // Support a non-specified source as EmptyDir. medium = spec.Volume.EmptyDir.Medium } return &emptyDir{ pod: pod, volName: spec.Name(), medium: medium, mounter: mounter, mountDetector: mountDetector, plugin: plugin, rootContext: opts.RootContext, chconRunner: chconRunner, }, nil }
func (plugin *nfsPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface) (volume.Builder, error) { var source *api.NFSVolumeSource var readOnly bool if spec.Volume != nil && spec.Volume.NFS != nil { source = spec.Volume.NFS readOnly = spec.Volume.NFS.ReadOnly } else { source = spec.PersistentVolume.Spec.NFS readOnly = spec.ReadOnly } return &nfsBuilder{ nfs: &nfs{ volName: spec.Name(), mounter: mounter, pod: pod, plugin: plugin, }, server: source.Server, exportPath: source.Path, readOnly: readOnly, }, nil }
func (plugin *cephfsPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Builder, error) { cephvs := plugin.getVolumeSource(spec) id := cephvs.User if id == "" { id = "admin" } secret_file := cephvs.SecretFile if secret_file == "" { secret_file = "/etc/ceph/" + id + ".secret" } return &cephfsBuilder{ cephfs: &cephfs{ podUID: podUID, volName: spec.Name(), mon: cephvs.Monitors, secret: secret, id: id, secret_file: secret_file, readonly: cephvs.ReadOnly, mounter: mounter, plugin: plugin}, }, nil }
func (kl *Kubelet) newVolumeBuilderFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec) if err != nil { return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err) } if plugin == nil { // Not found but not an error return nil, nil } builder, err := plugin.NewBuilder(spec, pod, opts) if err != nil { return nil, fmt.Errorf("failed to instantiate volume plugin for %s: %v", spec.Name(), err) } glog.V(3).Infof("Used volume plugin %q for %s", plugin.Name(), spec.Name()) return builder, nil }
func newDeleter(spec *volume.Spec, host volume.VolumeHost) (volume.Deleter, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.HostPath == nil { return nil, fmt.Errorf("spec.PersistentVolumeSource.HostPath is nil") } return &hostPathDeleter{spec.Name(), spec.PersistentVolume.Spec.HostPath.Path, host}, nil }