func TestCanSupport(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("vsphereVolumeTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume") if err != nil { t.Errorf("Can't find the plugin by name") } if plug.GetPluginName() != "kubernetes.io/vsphere-volume" { t.Errorf("Wrong name: %s", plug.GetPluginName()) } if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{VsphereVolume: &api.VsphereVirtualDiskVolumeSource{}}}}) { t.Errorf("Expected true") } if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{VsphereVolume: &api.VsphereVirtualDiskVolumeSource{}}}}}) { t.Errorf("Expected true") } }
func TestPluginBackCompat(t *testing.T) { basePath, err := utiltesting.MkTmpdir("emptydirTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(basePath) plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath, "" /* rootContext */) spec := &api.Volume{ Name: "vol1", } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } volPath := mounter.GetPath() if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") { t.Errorf("Got unexpected path: %s", volPath) } }
func TestMounterAndUnmounterTypeAssert(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("awsebsTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ VolumeID: "pd", FSType: "ext4", }, }, } mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if _, ok := mounter.(volume.Unmounter); ok { t.Errorf("Volume Mounter can be type-assert to Unmounter") } unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if _, ok := unmounter.(volume.Mounter); ok { t.Errorf("Volume Unmounter can be type-assert to Mounter") } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("iscsi_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ ISCSI: &api.ISCSIVolumeSource{ TargetPortal: "127.0.0.1:3260", IQN: "iqn.2014-12.server:storage.target01", FSType: "ext4", Lun: 0, }, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } client := fake.NewSimpleClientset(pv, claim) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil, "" /* rootContext */)) plug, _ := plugMgr.FindPluginByName(iscsiPluginName) // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{}) if !mounter.GetAttributes().ReadOnly { t.Errorf("Expected true for mounter.IsReadOnly") } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } client := fake.NewSimpleClientset(pv, claim) tmpDir, err := utiltesting.MkTmpdir("gcepdTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil, "" /* rootContext */)) plug, _ := plugMgr.FindPluginByName(gcePersistentDiskPluginName) // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{}) if !mounter.GetAttributes().ReadOnly { t.Errorf("Expected true for mounter.IsReadOnly") } }
// FindEmptyDirectoryUsageOnTmpfs finds the expected usage of an empty directory existing on // a tmpfs filesystem on this system. func FindEmptyDirectoryUsageOnTmpfs() (*resource.Quantity, error) { tmpDir, err := utiltesting.MkTmpdir("metrics_du_test") if err != nil { return nil, err } out, err := exec.Command("nice", "-n", "19", "du", "-s", "-B", "1", tmpDir).CombinedOutput() if err != nil { return nil, fmt.Errorf("failed command 'du' on %s with error %v", tmpDir, err) } used, err := resource.ParseQuantity(strings.Fields(string(out))[0]) if err != nil { return nil, fmt.Errorf("failed to parse 'du' output %s due to error %v", out, err) } used.Format = resource.BinarySI return &used, nil }
func TestGetAccessModes(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("gcepdTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/gce-pd") if err != nil { t.Errorf("Can't find the plugin by name") } if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) { t.Errorf("Expected two AccessModeTypes: %s and %s", api.ReadWriteOnce, api.ReadOnlyMany) } }
func TestCanSupport(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("emptydirTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir, "" /* rootContext */) if plug.GetPluginName() != "kubernetes.io/empty-dir" { t.Errorf("Wrong name: %s", plug.GetPluginName()) } if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}) { t.Errorf("Expected true") } if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) { t.Errorf("Expected false") } }
// TestMetrics tests that MetricProvider methods return sane values. func TestMetrics(t *testing.T) { // Create an empty temp directory for the volume tmpDir, err := utiltesting.MkTmpdir("empty_dir_test") if err != nil { t.Fatalf("Can't make a tmp dir: %v", err) } defer os.RemoveAll(tmpDir) plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir, "" /* rootContext */) spec := &api.Volume{ Name: "vol1", } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } // Need to create the subdirectory os.MkdirAll(mounter.GetPath(), 0755) expectedEmptyDirUsage, err := volumetest.FindEmptyDirectoryUsageOnTmpfs() if err != nil { t.Errorf("Unexpected error finding expected empty directory usage on tmpfs: %v", err) } // TODO(pwittroc): Move this into a reusable testing utility metrics, err := mounter.GetMetrics() if err != nil { t.Errorf("Unexpected error when calling GetMetrics %v", err) } if e, a := expectedEmptyDirUsage.Value(), metrics.Used.Value(); e != a { t.Errorf("Unexpected value for empty directory; expected %v, got %v", e, a) } if metrics.Capacity.Value() <= 0 { t.Errorf("Expected Capacity to be greater than 0") } if metrics.Available.Value() <= 0 { t.Errorf("Expected Available to be greater than 0") } }
func TestCanSupport(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("iscsi_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPluginByName("kubernetes.io/iscsi") if err != nil { t.Errorf("Can't find the plugin by name") } if plug.GetPluginName() != "kubernetes.io/iscsi" { t.Errorf("Wrong name: %s", plug.GetPluginName()) } if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) { t.Errorf("Expected false") } }
// doTestPlugin sets up a volume and tears it back down. func doTestPlugin(t *testing.T, config pluginTestConfig) { basePath, err := utiltesting.MkTmpdir("emptydir_volume_test") if err != nil { t.Fatalf("can't make a temp rootdir: %v", err) } defer os.RemoveAll(basePath) var ( volumePath = path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/test-volume") metadataDir = path.Join(basePath, "pods/poduid/plugins/kubernetes.io~empty-dir/test-volume") plug = makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath, config.rootContext) volumeName = "test-volume" spec = &api.Volume{ Name: volumeName, VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: config.medium}}, } physicalMounter = mount.FakeMounter{} mountDetector = fakeMountDetector{} pod = &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} ) // Set up the SELinux options on the pod if config.SELinuxOptions != nil { pod.Spec = api.PodSpec{ Containers: []api.Container{ { SecurityContext: &api.SecurityContext{ SELinuxOptions: config.SELinuxOptions, }, VolumeMounts: []api.VolumeMount{ { Name: volumeName, }, }, }, }, } } if config.idempotent { physicalMounter.MountPoints = []mount.MountPoint{ { Path: volumePath, }, } util.SetReady(metadataDir) } mounter, err := plug.(*emptyDirPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &physicalMounter, &mountDetector, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } volPath := mounter.GetPath() if volPath != volumePath { t.Errorf("Got unexpected path: %s", volPath) } if err := mounter.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } // Stat the directory and check the permission bits fileinfo, err := os.Stat(volPath) if !config.idempotent { if err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volPath) } else { t.Errorf("SetUp() failed: %v", err) } } if e, a := perm, fileinfo.Mode().Perm(); e != a { t.Errorf("Unexpected file mode for %v: expected: %v, got: %v", volPath, e, a) } } else if err == nil { // If this test is for idempotency and we were able // to stat the volume path, it's an error. t.Errorf("Volume directory was created unexpectedly") } // Check the number of mounts performed during setup if e, a := config.expectedSetupMounts, len(physicalMounter.Log); e != a { t.Errorf("Expected %v physicalMounter calls during setup, got %v", e, a) } else if config.expectedSetupMounts == 1 && (physicalMounter.Log[0].Action != mount.FakeActionMount || physicalMounter.Log[0].FSType != "tmpfs") { t.Errorf("Unexpected physicalMounter action during setup: %#v", physicalMounter.Log[0]) } physicalMounter.ResetLog() // Make a unmounter for the volume teardownMedium := mediumUnknown if config.medium == api.StorageMediumMemory { teardownMedium = mediumMemory } unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown} unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), &physicalMounter, unmounterMountDetector) if err != nil { t.Errorf("Failed to make a new Unmounter: %v", err) } if unmounter == nil { t.Errorf("Got a nil Unmounter") } // Tear down the volume if err := unmounter.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volPath); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", volPath) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } // Check the number of physicalMounter calls during tardown if e, a := config.expectedTeardownMounts, len(physicalMounter.Log); e != a { t.Errorf("Expected %v physicalMounter calls during teardown, got %v", e, a) } else if config.expectedTeardownMounts == 1 && physicalMounter.Log[0].Action != mount.FakeActionUnmount { t.Errorf("Unexpected physicalMounter action during teardown: %#v", physicalMounter.Log[0]) } physicalMounter.ResetLog() }
func TestPlugin(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("gcepdTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ PDName: "pd", FSType: "ext4", }, }, } fakeManager := &fakePDManager{} fakeMounter := &mount.FakeMounter{} mounter, err := plug.(*gcePersistentDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~gce-pd/vol1") path := mounter.GetPath() if path != volPath { t.Errorf("Got unexpected path: %s", path) } if err := mounter.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } fakeManager = &fakePDManager{} unmounter, err := plug.(*gcePersistentDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter) if err != nil { t.Errorf("Failed to make a new Unmounter: %v", err) } if unmounter == nil { t.Errorf("Got a nil Unmounter") } if err := unmounter.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } // Test Provisioner cap := resource.MustParse("100Mi") options := volume.VolumeOptions{ Capacity: cap, AccessModes: []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, }, PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, } provisioner, err := plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{}) persistentSpec, err := provisioner.Provision() if err != nil { t.Errorf("Provision() failed: %v", err) } if persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName != "test-gce-volume-name" { t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName) } cap = persistentSpec.Spec.Capacity[api.ResourceStorage] size := cap.Value() if size != 100*1024*1024*1024 { t.Errorf("Provision() returned unexpected volume size: %v", size) } if persistentSpec.Labels["fakepdmanager"] != "yes" { t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels) } // Test Deleter volSpec := &volume.Spec{ PersistentVolume: persistentSpec, } deleter, err := plug.(*gcePersistentDiskPlugin).newDeleterInternal(volSpec, &fakePDManager{}) err = deleter.Delete() if err != nil { t.Errorf("Deleter() failed: %v", err) } }
func doTestPlugin(t *testing.T, spec *volume.Spec) { tmpDir, err := utiltesting.MkTmpdir("iscsi_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPluginByName("kubernetes.io/iscsi") if err != nil { t.Errorf("Can't find the plugin by name") } fakeManager := NewFakeDiskManager() defer fakeManager.Cleanup() fakeMounter := &mount.FakeMounter{} mounter, err := plug.(*iscsiPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Error("Got a nil Mounter") } path := mounter.GetPath() expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~iscsi/vol1", tmpDir) if path != expectedPath { t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path) } if err := mounter.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } if !fakeManager.attachCalled { t.Errorf("Attach was not called") } fakeManager2 := NewFakeDiskManager() defer fakeManager2.Cleanup() unmounter, err := plug.(*iscsiPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter) if err != nil { t.Errorf("Failed to make a new Unmounter: %v", err) } if unmounter == nil { t.Error("Got a nil Unmounter") } if err := unmounter.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } if !fakeManager2.detachCalled { t.Errorf("Detach was not called") } }
func TestPlugin(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("cephTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ CephFS: &api.CephFSVolumeSource{ Monitors: []string{"a", "b"}, User: "******", SecretRef: nil, SecretFile: "/etc/ceph/user.secret", }, }, } mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets") volumePath := mounter.GetPath() if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } volpath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cephfs/vol1") path := mounter.GetPath() if path != volpath { t.Errorf("Got unexpected path: %s", path) } if err := mounter.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volumePath) } else { t.Errorf("SetUp() failed: %v", err) } } unmounter, err := plug.(*cephfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) if err != nil { t.Errorf("Failed to make a new Unmounter: %v", err) } if unmounter == nil { t.Errorf("Got a nil Unmounter") } if err := unmounter.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", volumePath) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
func TestPlugin(t *testing.T) { // Initial setup to test volume plugin tmpDir, err := utiltesting.MkTmpdir("vsphereVolumeTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */)) plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ VsphereVolume: &api.VsphereVirtualDiskVolumeSource{ VolumePath: "[local] test-volume-name.vmdk", FSType: "ext4", }, }, } // Test Mounter fakeManager := &fakePDManager{} fakeMounter := &mount.FakeMounter{} mounter, err := plug.(*vsphereVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter) if err != nil { t.Errorf("Failed to make a new Mounter: %v", err) } if mounter == nil { t.Errorf("Got a nil Mounter") } mntPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~vsphere-volume/vol1") path := mounter.GetPath() if path != mntPath { t.Errorf("Got unexpected path: %s", path) } if err := mounter.SetUp(nil); err != nil { t.Errorf("Expected success, got: %v", err) } if !fakeManager.attachCalled { t.Errorf("Attach watch not called") } // Test Unmounter fakeManager = &fakePDManager{} unmounter, err := plug.(*vsphereVolumePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter) if err != nil { t.Errorf("Failed to make a new Unmounter: %v", err) } if unmounter == nil { t.Errorf("Got a nil Unmounter") } if err := unmounter.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } if !fakeManager.detachCalled { t.Errorf("Detach watch not called") } // Test Provisioner cap := resource.MustParse("100Mi") options := volume.VolumeOptions{ Capacity: cap, AccessModes: []api.PersistentVolumeAccessMode{ api.ReadWriteOnce, }, PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, } provisioner, err := plug.(*vsphereVolumePlugin).newProvisionerInternal(options, &fakePDManager{}) persistentSpec, err := provisioner.Provision() if err != nil { t.Errorf("Provision() failed: %v", err) } if persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath != "[local] test-volume-name.vmdk" { t.Errorf("Provision() returned unexpected path %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath) } cap = persistentSpec.Spec.Capacity[api.ResourceStorage] size := cap.Value() if size != 100*1024 { t.Errorf("Provision() returned unexpected volume size: %v", size) } // Test Deleter volSpec := &volume.Spec{ PersistentVolume: persistentSpec, } deleter, err := plug.(*vsphereVolumePlugin).newDeleterInternal(volSpec, &fakePDManager{}) err = deleter.Delete() if err != nil { t.Errorf("Deleter() failed: %v", err) } }