func TestPluginBackCompat(t *testing.T) { basePath, err := utiltesting.MkTmpdir("emptydirTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(basePath) plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath, "" /* rootContext */) spec := &api.Volume{ Name: "vol1", } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } volPath := mounter.GetPath() if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") { t.Errorf("Got unexpected path: %s", volPath) } }
func TestMounterAndUnmounterTypeAssert(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("awsebsTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ VolumeID: "pd", FSType: "ext4", }, }, } mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if _, ok := mounter.(volume.Unmounter); ok { t.Errorf("Volume Mounter can be type-assert to Unmounter") } unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if _, ok := unmounter.(volume.Mounter); ok { t.Errorf("Volume Unmounter can be type-assert to Mounter") } }
func TestPluginVolume(t *testing.T) { vol := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ ISCSI: &api.ISCSIVolumeSource{ TargetPortal: "127.0.0.1:3260", IQN: "iqn.2014-12.server:storage.target01", FSType: "ext4", Lun: 0, }, }, } doTestPlugin(t, volume.NewSpecFromVolume(vol)) }
// TestMetrics tests that MetricProvider methods return sane values. func TestMetrics(t *testing.T) { // Create an empty temp directory for the volume tmpDir, err := utiltesting.MkTmpdir("empty_dir_test") if err != nil { t.Fatalf("Can't make a tmp dir: %v", err) } defer os.RemoveAll(tmpDir) plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir, "" /* rootContext */) spec := &api.Volume{ Name: "vol1", } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } // Need to create the subdirectory os.MkdirAll(mounter.GetPath(), 0755) expectedEmptyDirUsage, err := volumetest.FindEmptyDirectoryUsageOnTmpfs() if err != nil { t.Errorf("Unexpected error finding expected empty directory usage on tmpfs: %v", err) } // TODO(pwittroc): Move this into a reusable testing utility metrics, err := mounter.GetMetrics() if err != nil { t.Errorf("Unexpected error when calling GetMetrics %v", err) } if e, a := expectedEmptyDirUsage.Value(), metrics.Used.Value(); e != a { t.Errorf("Unexpected value for empty directory; expected %v, got %v", e, a) } if metrics.Capacity.Value() <= 0 { t.Errorf("Expected Capacity to be greater than 0") } if metrics.Available.Value() <= 0 { t.Errorf("Expected Available to be greater than 0") } }
// doTestPlugin sets up a volume and tears it back down. func doTestPlugin(t *testing.T, config pluginTestConfig) { basePath, err := utiltesting.MkTmpdir("emptydir_volume_test") if err != nil { t.Fatalf("can't make a temp rootdir: %v", err) } defer os.RemoveAll(basePath) var ( volumePath = path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/test-volume") metadataDir = path.Join(basePath, "pods/poduid/plugins/kubernetes.io~empty-dir/test-volume") plug = makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath, config.rootContext) volumeName = "test-volume" spec = &api.Volume{ Name: volumeName, VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: config.medium}}, } physicalMounter = mount.FakeMounter{} mountDetector = fakeMountDetector{} pod = &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} ) // Set up the SELinux options on the pod if config.SELinuxOptions != nil { pod.Spec = api.PodSpec{ Containers: []api.Container{ { SecurityContext: &api.SecurityContext{ SELinuxOptions: config.SELinuxOptions, }, VolumeMounts: []api.VolumeMount{ { Name: volumeName, }, }, }, }, } } if config.idempotent { physicalMounter.MountPoints = []mount.MountPoint{ { Path: volumePath, }, } util.SetReady(metadataDir) } mounter, err := plug.(*emptyDirPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &physicalMounter, &mountDetector, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } volPath := mounter.GetPath() if volPath != volumePath { t.Errorf("Got unexpected path: %s", volPath) } if err := mounter.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } // Stat the directory and check the permission bits fileinfo, err := os.Stat(volPath) if !config.idempotent { if err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volPath) } else { t.Errorf("SetUp() failed: %v", err) } } if e, a := perm, fileinfo.Mode().Perm(); e != a { t.Errorf("Unexpected file mode for %v: expected: %v, got: %v", volPath, e, a) } } else if err == nil { // If this test is for idempotency and we were able // to stat the volume path, it's an error. t.Errorf("Volume directory was created unexpectedly") } // Check the number of mounts performed during setup if e, a := config.expectedSetupMounts, len(physicalMounter.Log); e != a { t.Errorf("Expected %v physicalMounter calls during setup, got %v", e, a) } else if config.expectedSetupMounts == 1 && (physicalMounter.Log[0].Action != mount.FakeActionMount || physicalMounter.Log[0].FSType != "tmpfs") { t.Errorf("Unexpected physicalMounter action during setup: %#v", physicalMounter.Log[0]) } physicalMounter.ResetLog() // Make a unmounter for the volume teardownMedium := mediumUnknown if config.medium == api.StorageMediumMemory { teardownMedium = mediumMemory } unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown} unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), &physicalMounter, unmounterMountDetector) if err != nil { t.Errorf("Failed to make a new Unmounter: %v", err) } if unmounter == nil { t.Errorf("Got a nil Unmounter") } // Tear down the volume if err := unmounter.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volPath); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", volPath) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } // Check the number of physicalMounter calls during tardown if e, a := config.expectedTeardownMounts, len(physicalMounter.Log); e != a { t.Errorf("Expected %v physicalMounter calls during teardown, got %v", e, a) } else if config.expectedTeardownMounts == 1 && physicalMounter.Log[0].Action != mount.FakeActionUnmount { t.Errorf("Unexpected physicalMounter action during teardown: %#v", physicalMounter.Log[0]) } physicalMounter.ResetLog() }
// createVolumeSpec creates and returns a mutatable volume.Spec object for the // specified volume. It dereference any PVC to get PV objects, if needed. func (adc *attachDetachController) createVolumeSpec( podVolume api.Volume, podNamespace string) (*volume.Spec, error) { if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil { glog.V(10).Infof( "Found PVC, ClaimName: %q/%q", podNamespace, pvcSource.ClaimName) // If podVolume is a PVC, fetch the real PV behind the claim pvName, pvcUID, err := adc.getPVCFromCacheExtractPV( podNamespace, pvcSource.ClaimName) if err != nil { return nil, fmt.Errorf( "error processing PVC %q/%q: %v", podNamespace, pvcSource.ClaimName, err) } glog.V(10).Infof( "Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q", podNamespace, pvcSource.ClaimName, pvcUID, pvName) // Fetch actual PV object volumeSpec, err := adc.getPVSpecFromCache( pvName, pvcSource.ReadOnly, pvcUID) if err != nil { return nil, fmt.Errorf( "error processing PVC %q/%q: %v", podNamespace, pvcSource.ClaimName, err) } glog.V(10).Infof( "Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)", volumeSpec.Name, pvName, podNamespace, pvcSource.ClaimName, pvcUID) return volumeSpec, nil } // Do not return the original volume object, since it's from the shared // informer it may be mutated by another consumer. clonedPodVolumeObj, err := api.Scheme.DeepCopy(podVolume) if err != nil || clonedPodVolumeObj == nil { return nil, fmt.Errorf( "failed to deep copy %q volume object. err=%v", podVolume.Name, err) } clonedPodVolume, ok := clonedPodVolumeObj.(api.Volume) if !ok { return nil, fmt.Errorf("failed to cast clonedPodVolume %#v to api.Volume", clonedPodVolumeObj) } return volume.NewSpecFromVolume(&clonedPodVolume), nil }
func TestPlugin(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("gcepdTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ PDName: "pd", FSType: "ext4", }, }, } fakeManager := &fakePDManager{} fakeMounter := &mount.FakeMounter{} mounter, err := plug.(*gcePersistentDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~gce-pd/vol1") path := mounter.GetPath() if path != volPath { t.Errorf("Got unexpected path: %s", path) } if err := mounter.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } fakeManager = &fakePDManager{} unmounter, err := plug.(*gcePersistentDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter) if err != nil { t.Errorf("Failed to make a new Unmounter: %v", err) } if unmounter == nil { t.Errorf("Got a nil Unmounter") } if err := unmounter.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } // Test Provisioner cap := resource.MustParse("100Mi") options := volume.VolumeOptions{ Capacity: cap, AccessModes: []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, }, PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, } provisioner, err := plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{}) persistentSpec, err := provisioner.Provision() if err != nil { t.Errorf("Provision() failed: %v", err) } if persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName != "test-gce-volume-name" { t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName) } cap = persistentSpec.Spec.Capacity[api.ResourceStorage] size := cap.Value() if size != 100*1024*1024*1024 { t.Errorf("Provision() returned unexpected volume size: %v", size) } if persistentSpec.Labels["fakepdmanager"] != "yes" { t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels) } // Test Deleter volSpec := &volume.Spec{ PersistentVolume: persistentSpec, } deleter, err := plug.(*gcePersistentDiskPlugin).newDeleterInternal(volSpec, &fakePDManager{}) err = deleter.Delete() if err != nil { t.Errorf("Deleter() failed: %v", err) } }
func TestPlugin(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("cephTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ CephFS: &api.CephFSVolumeSource{ Monitors: []string{"a", "b"}, User: "******", SecretRef: nil, SecretFile: "/etc/ceph/user.secret", }, }, } mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets") volumePath := mounter.GetPath() if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } volpath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cephfs/vol1") path := mounter.GetPath() if path != volpath { t.Errorf("Got unexpected path: %s", path) } if err := mounter.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volumePath) } else { t.Errorf("SetUp() failed: %v", err) } } unmounter, err := plug.(*cephfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) if err != nil { t.Errorf("Failed to make a new Unmounter: %v", err) } if unmounter == nil { t.Errorf("Got a nil Unmounter") } if err := unmounter.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", volumePath) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
func TestPlugin(t *testing.T) { // Initial setup to test volume plugin tmpDir, err := utiltesting.MkTmpdir("vsphereVolumeTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ VsphereVolume: &api.VsphereVirtualDiskVolumeSource{ VolumePath: "[local] test-volume-name.vmdk", FSType: "ext4", }, }, } // Test Mounter fakeManager := &fakePDManager{} fakeMounter := &mount.FakeMounter{} mounter, err := plug.(*vsphereVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } mntPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~vsphere-volume/vol1") path := mounter.GetPath() if path != mntPath { t.Errorf("Got unexpected path: %s", path) } if err := mounter.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } if !fakeManager.attachCalled { t.Errorf("Attach watch not called") } // Test Unmounter fakeManager = &fakePDManager{} unmounter, err := plug.(*vsphereVolumePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter) if err != nil { t.Errorf("Failed to make a new Unmounter: %v", err) } if unmounter == nil { t.Errorf("Got a nil Unmounter") } if err := unmounter.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } if !fakeManager.detachCalled { t.Errorf("Detach watch not called") } // Test Provisioner cap := resource.MustParse("100Mi") options := volume.VolumeOptions{ Capacity: cap, AccessModes: []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, }, PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, } provisioner, err := plug.(*vsphereVolumePlugin).newProvisionerInternal(options, &fakePDManager{}) persistentSpec, err := provisioner.Provision() if err != nil { t.Errorf("Provision() failed: %v", err) } if persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath != "[local] test-volume-name.vmdk" { t.Errorf("Provision() returned unexpected path %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath) } cap = persistentSpec.Spec.Capacity[api.ResourceStorage] size := cap.Value() if size != 100*1024 { t.Errorf("Provision() returned unexpected volume size: %v", size) } // Test Deleter volSpec := &volume.Spec{ PersistentVolume: persistentSpec, } deleter, err := plug.(*vsphereVolumePlugin).newDeleterInternal(volSpec, &fakePDManager{}) err = deleter.Delete() if err != nil { t.Errorf("Deleter() failed: %v", err) } }