func (plugin *quobytePlugin) CanSupport(spec *volume.Spec) bool { if (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Quobyte == nil) || (spec.Volume != nil && spec.Volume.Quobyte == nil) { return false } // If Quobyte is already mounted we don't need to check if the binary is installed if mounter, err := plugin.newMounterInternal(spec, nil, plugin.host.GetMounter()); err == nil { qm, _ := mounter.(*quobyteMounter) pluginDir := plugin.host.GetPluginDir(strings.EscapeQualifiedNameForDisk(quobytePluginName)) if mounted, err := qm.pluginDirIsMounted(pluginDir); mounted && err == nil { glog.V(4).Infof("quobyte: can support") return true } } else { glog.V(4).Infof("quobyte: Error: %v", err) } if out, err := exec.New().Command("ls", "/sbin/mount.quobyte").CombinedOutput(); err == nil { glog.V(4).Infof("quobyte: can support: %s", string(out)) return true } return false }
func (b *flockerBuilder) getMetaDir() string { return path.Join( b.plugin.host.GetPodPluginDir( b.flocker.pod.UID, strings.EscapeQualifiedNameForDisk(flockerPluginName), ), b.datasetName, ) }
// GetPath returns the path to the user specific mount of a Quobyte volume // Returns a path in the format ../user@volume e.g. ../root@MyVolume // or if a group is set ../user#group@volume func (quobyteVolume *quobyte) GetPath() string { user := quobyteVolume.user if len(user) == 0 { user = "******" } // Quobyte has only one mount in the PluginDir where all Volumes are mounted // The Quobyte client does a fixed-user mapping pluginDir := quobyteVolume.plugin.host.GetPluginDir(strings.EscapeQualifiedNameForDisk(quobytePluginName)) if len(quobyteVolume.group) > 0 { return path.Join(pluginDir, fmt.Sprintf("%s#%s@%s", user, quobyteVolume.group, quobyteVolume.volume)) } return path.Join(pluginDir, fmt.Sprintf("%s@%s", user, quobyteVolume.volume)) }
func (vv *vsphereVolume) GetPath() string { name := vsphereVolumePluginName return vv.plugin.host.GetPodVolumeDir(vv.podUID, utilstrings.EscapeQualifiedNameForDisk(name), vv.volName) }
func (r *cinderVolumeDeleter) GetPath() string { name := cinderVolumePluginName return r.plugin.host.GetPodVolumeDir(r.podUID, strings.EscapeQualifiedNameForDisk(name), r.volName) }
func (rbd *rbd) GetPath() string { name := rbdPluginName // safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up return rbd.plugin.host.GetPodVolumeDir(rbd.podUID, strings.EscapeQualifiedNameForDisk(name), rbd.volName) }
func (ppd *photonPersistentDisk) GetPath() string { name := photonPersistentDiskPluginName return ppd.plugin.host.GetPodVolumeDir(ppd.podUID, utilstrings.EscapeQualifiedNameForDisk(name), ppd.volName) }
func (fv *FakeVolume) getPath() string { return path.Join(fv.Plugin.Host.GetPodVolumeDir(fv.PodUID, utilstrings.EscapeQualifiedNameForDisk(fv.Plugin.PluginName), fv.VolName)) }
func (d *downwardAPIVolume) GetPath() string { return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName), d.volName) }
func (azureFileVolume *azureFile) GetPath() string { name := azureFilePluginName return azureFileVolume.plugin.host.GetPodVolumeDir(azureFileVolume.pod.UID, strings.EscapeQualifiedNameForDisk(name), azureFileVolume.volName) }
// GatePath creates global mount path func (cephfsVolume *cephfs) GetPath() string { name := cephfsPluginName return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName) }
func (d *gcePersistentDiskDeleter) GetPath() string { name := gcePersistentDiskPluginName return d.plugin.host.GetPodVolumeDir(d.podUID, strings.EscapeQualifiedNameForDisk(name), d.volName) }
func (gr *gitRepoVolume) GetPath() string { name := gitRepoPluginName return gr.plugin.host.GetPodVolumeDir(gr.podUID, utilstrings.EscapeQualifiedNameForDisk(name), gr.volName) }
// GetPathFromPlugin gets the actual volume mount directory based on plugin. func (f *flexVolumeDisk) GetPath() string { name := f.driverName return f.plugin.host.GetPodVolumeDir(f.podUID, utilstrings.EscapeQualifiedNameForDisk(name), f.volName) }
func (azure *azureDisk) GetPath() string { name := azureDataDiskPluginName return azure.plugin.host.GetPodVolumeDir(azure.podUID, utilstrings.EscapeQualifiedNameForDisk(name), azure.volName) }
// SetUp attaches the disk and bind mounts to the volume path. func (mounter *quobyteMounter) SetUp(fsGroup *int64) error { pluginDir := mounter.plugin.host.GetPluginDir(strings.EscapeQualifiedNameForDisk(quobytePluginName)) return mounter.SetUpAt(pluginDir, fsGroup) }
func (d *glusterfsVolumeDeleter) GetPath() string { name := glusterfsPluginName return d.plugin.host.GetPodVolumeDir(d.glusterfsMounter.glusterfs.pod.UID, strings.EscapeQualifiedNameForDisk(name), d.glusterfsMounter.glusterfs.volName) }
func (ed *emptyDir) getMetaDir() string { return path.Join(ed.plugin.host.GetPodPluginDir(ed.pod.UID, strings.EscapeQualifiedNameForDisk(emptyDirPluginName)), ed.volName) }
func (sv *configMapVolume) GetPath() string { return sv.plugin.host.GetPodVolumeDir(sv.podUID, strings.EscapeQualifiedNameForDisk(configMapPluginName), sv.volName) }
func getPath(uid types.UID, volName string, host volume.VolumeHost) string { return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(emptyDirPluginName), volName) }
func (b *downwardAPIVolumeMounter) getMetaDir() string { return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName)), b.volName) }
func getPath(uid types.UID, volName string, host volume.VolumeHost) string { return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(awsElasticBlockStorePluginName), volName) }
func (iscsi *iscsiDisk) GetPath() string { name := iscsiPluginName // safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up return iscsi.plugin.host.GetPodVolumeDir(iscsi.podUID, utilstrings.EscapeQualifiedNameForDisk(name), iscsi.volName) }
func (nfsVolume *nfs) GetPath() string { name := nfsPluginName return nfsVolume.plugin.host.GetPodVolumeDir(nfsVolume.pod.UID, strings.EscapeQualifiedNameForDisk(name), nfsVolume.volName) }
func (fc *fcDisk) GetPath() string { name := fcPluginName // safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up return fc.plugin.host.GetPodVolumeDir(fc.podUID, strings.EscapeQualifiedNameForDisk(name), fc.volName) }
func (b *secretVolumeBuilder) getMetaDir() string { return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, strings.EscapeQualifiedNameForDisk(secretPluginName)), b.volName) }
func (cd *cinderVolume) GetPath() string { name := cinderVolumePluginName return cd.plugin.host.GetPodVolumeDir(cd.podUID, strings.EscapeQualifiedNameForDisk(name), cd.volName) }
func (glusterfsVolume *glusterfs) GetPath() string { name := glusterfsPluginName return glusterfsVolume.plugin.host.GetPodVolumeDir(glusterfsVolume.pod.UID, strings.EscapeQualifiedNameForDisk(name), glusterfsVolume.volName) }
func TestNewBuilder(t *testing.T) { tests := []struct { pv *api.PersistentVolume claim *api.PersistentVolumeClaim plugin volume.VolumePlugin podVolume api.VolumeSource testFunc func(builder volume.Builder, plugin volume.VolumePlugin) error expectedFailure bool }{ { pv: &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, }, claim: &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, }, podVolume: api.VolumeSource{ PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ ReadOnly: false, ClaimName: "claimA", }, }, plugin: gce_pd.ProbeVolumePlugins()[0], testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { if !strings.Contains(builder.GetPath(), utilstrings.EscapeQualifiedNameForDisk(plugin.Name())) { return fmt.Errorf("builder path expected to contain plugin name. Got: %s", builder.GetPath()) } return nil }, expectedFailure: false, }, { pv: &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvB", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{Path: "/somepath"}, }, ClaimRef: &api.ObjectReference{ Name: "claimB", }, }, }, claim: &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimB", Namespace: "nsB", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, }, podVolume: api.VolumeSource{ PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ ReadOnly: false, ClaimName: "claimB", }, }, plugin: host_path.ProbeVolumePlugins(volume.VolumeConfig{})[0], testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { if builder.GetPath() != "/somepath" { return fmt.Errorf("Expected HostPath.Path /somepath, got: %s", builder.GetPath()) } return nil }, expectedFailure: false, }, { pv: &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, }, }, }, claim: &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, }, podVolume: api.VolumeSource{ PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ ReadOnly: false, ClaimName: "claimA", }, }, plugin: gce_pd.ProbeVolumePlugins()[0], testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { if builder != nil { return fmt.Errorf("Unexpected non-nil builder: %+v", builder) } return nil }, expectedFailure: true, // missing pv.Spec.ClaimRef }, { pv: &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, }, ClaimRef: &api.ObjectReference{ Name: "claimB", UID: types.UID("abc123"), }, }, }, claim: &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", UID: types.UID("def456"), }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, }, podVolume: api.VolumeSource{ PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ ReadOnly: false, ClaimName: "claimA", }, }, plugin: gce_pd.ProbeVolumePlugins()[0], testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { if builder != nil { return fmt.Errorf("Unexpected non-nil builder: %+v", builder) } return nil }, expectedFailure: true, // mismatched pv.Spec.ClaimRef and pvc }, } for _, item := range tests { client := fake.NewSimpleClientset(item.pv, item.claim) plugMgr := volume.VolumePluginMgr{} tempDir, vh := newTestHost(t, client) defer os.RemoveAll(tempDir) plugMgr.InitPlugins(testProbeVolumePlugins(), vh) plug, err := plugMgr.FindPluginByName("kubernetes.io/persistent-claim") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &volume.Spec{Volume: &api.Volume{VolumeSource: item.podVolume}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !item.expectedFailure { if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder: %v", builder) } } if err := item.testFunc(builder, item.plugin); err != nil { t.Errorf("Unexpected error %+v", err) } } }
func (d *awsElasticBlockStoreDeleter) GetPath() string { name := awsElasticBlockStorePluginName return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(name), d.volName) }