func TestDeleter(t *testing.T) { tempPath := fmt.Sprintf("/tmp/hostpath/%s", util.NewUUID()) defer os.RemoveAll(tempPath) err := os.MkdirAll(tempPath, 0750) if err != nil { t.Fatal("Failed to create tmp directory for deleter: %v", err) } plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}} plug, err := plugMgr.FindDeletablePluginBySpec(spec) if err != nil { t.Errorf("Can't find the plugin by name") } deleter, err := plug.NewDeleter(spec) if err != nil { t.Errorf("Failed to make a new Deleter: %v", err) } if deleter.GetPath() != tempPath { t.Errorf("Expected %s but got %s", tempPath, deleter.GetPath()) } if err := deleter.Delete(); err != nil { t.Errorf("Mock Recycler expected to return nil but got %s", err) } if exists, _ := util.FileExists("foo"); exists { t.Errorf("Temp path expected to be deleted, but was found at %s", tempPath) } }
func TestBuilderAndCleanerTypeAssert(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ VolumeID: "pd", FSType: "ext4", }, }, } builder, err := plug.(*awsElasticBlockStorePlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if _, ok := builder.(volume.Cleaner); ok { t.Errorf("Volume Builder can be type-assert to Cleaner") } cleaner, err := plug.(*awsElasticBlockStorePlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if _, ok := cleaner.(volume.Builder); ok { t.Errorf("Volume Cleaner can be type-assert to Builder") } }
func newTestHost(t *testing.T) volume.VolumeHost { tempDir, err := ioutil.TempDir("/tmp", "git_repo_test.") if err != nil { t.Fatalf("can't make a temp rootdir: %v", err) } return volume.NewFakeVolumeHost(tempDir, nil, empty_dir.ProbeVolumePlugins()) }
func newInitializedVolumePlugMgr(t *testing.T) (volume.VolumePluginMgr, string) { plugMgr := volume.VolumePluginMgr{} dir, err := ioutil.TempDir("", "flocker") assert.NoError(t, err) plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(dir, nil, nil)) return plugMgr, dir }
func TestDeleterTempDir(t *testing.T) { tests := map[string]struct { expectedFailure bool path string }{ "just-tmp": {true, "/tmp"}, "not-tmp": {true, "/nottmp"}, "good-tmp": {false, "/tmp/scratch"}, } for name, test := range tests { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: test.path}}}}} plug, _ := plugMgr.FindDeletablePluginBySpec(spec) deleter, _ := plug.NewDeleter(spec) err := deleter.Delete() if err == nil && test.expectedFailure { t.Errorf("Expected failure for test '%s' but got nil err", name) } if err != nil && !test.expectedFailure { t.Errorf("Unexpected failure for test '%s': %v", name, err) } } }
func newTestHost(t *testing.T, fakeKubeClient client.Interface) volume.VolumeHost { tempDir, err := ioutil.TempDir("/tmp", "persistent_volume_test.") if err != nil { t.Fatalf("can't make a temp rootdir: %v", err) } return volume.NewFakeVolumeHost(tempDir, fakeKubeClient, testProbeVolumePlugins()) }
func newTestHost(t *testing.T, client client.Interface) volume.VolumeHost { tempDir, err := ioutil.TempDir(basePath, "downwardApi_volume_test.") if err != nil { t.Fatalf("can't make a temp rootdir: %v", err) } return volume.NewFakeVolumeHost(tempDir, client, empty_dir.ProbeVolumePlugins()) }
func newTestHost(t *testing.T, client client.Interface) (string, volume.VolumeHost) { tempDir, err := ioutil.TempDir("/tmp", "secret_volume_test.") if err != nil { t.Fatalf("can't make a temp rootdir: %v", err) } return tempDir, volume.NewFakeVolumeHost(tempDir, client, empty_dir.ProbeVolumePlugins()) }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } ep := &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "ep", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}}, Ports: []api.EndpointPort{{"foo", 80, api.ProtocolTCP}}, }}, } o := testclient.NewObjects(api.Scheme, api.Scheme) o.Add(pv) o.Add(claim) o.Add(ep) client := &testclient.Fake{} client.AddReactor("*", "*", testclient.ObjectReaction(o, testapi.Default.RESTMapper())) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(glusterfsPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !builder.IsReadOnly() { t.Errorf("Expected true for builder.IsReadOnly") } }
func TestPlugin(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ CephFS: &api.CephFSVolumeSource{ Monitors: []string{"a", "b"}, User: "******", SecretRef: nil, SecretFile: "/etc/ceph/user.secret", }, }, } builder, err := plug.(*cephfsPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets") volumePath := builder.GetPath() if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder: %v") } path := builder.GetPath() if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~cephfs/vol1" { t.Errorf("Got unexpected path: %s", path) } if err := builder.SetUp(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volumePath) } else { t.Errorf("SetUp() failed: %v", err) } } cleaner, err := plug.(*cephfsPlugin).newCleanerInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner: %v") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", volumePath) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
func doTestPlugin(t *testing.T, spec *volume.Spec) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd") if err != nil { t.Errorf("Can't find the plugin by name") } builder, err := plug.(*rbdPlugin).newBuilderInternal(spec, types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{}, "secrets") if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Error("Got a nil Builder") } path := builder.GetPath() if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~rbd/vol1" { t.Errorf("Got unexpected path: %s", path) } if err := builder.SetUp(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } cleaner, err := plug.(*rbdPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{}) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Error("Got a nil Cleaner") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
// Construct an instance of a plugin, by name. func makePluginUnderTest(t *testing.T, plugName, basePath string) volume.VolumePlugin { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(basePath, nil, nil)) plug, err := plugMgr.FindPluginByName(plugName) if err != nil { t.Errorf("Can't find the plugin by name") } return plug }
func TestGetAccessModes(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/host-path") if err != nil { t.Errorf("Can't find the plugin by name") } if len(plug.GetAccessModes()) != 1 || plug.GetAccessModes()[0] != api.ReadWriteOnce { t.Errorf("Expected %s PersistentVolumeAccessMode", api.ReadWriteOnce) } }
func TestGetAccessModes(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/fc") if err != nil { t.Errorf("Can't find the plugin by name") } if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) { t.Errorf("Expected two AccessModeTypes: %s and %s", api.ReadWriteOnce, api.ReadOnlyMany) } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { lun := 0 pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ FC: &api.FCVolumeSource{ TargetWWNs: []string{"some_wwn"}, FSType: "ext4", Lun: &lun, }, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } o := testclient.NewObjects(api.Scheme, api.Scheme) o.Add(pv) o.Add(claim) client := &testclient.Fake{} client.AddReactor("*", "*", testclient.ObjectReaction(o, testapi.Default.RESTMapper())) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(fcPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !builder.IsReadOnly() { t.Errorf("Expected true for builder.IsReadOnly") } }
func TestGetAccessModes(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/aws-ebs") if err != nil { t.Errorf("Can't find the plugin by name") } if !contains(plug.GetAccessModes(), api.ReadWriteOnce) { t.Errorf("Expected to find AccessMode: %s", api.ReadWriteOnce) } if len(plug.GetAccessModes()) != 1 { t.Errorf("Expected to find exactly one AccessMode") } }
func TestCanSupport(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/fc") if err != nil { t.Errorf("Can't find the plugin by name") } if plug.Name() != "kubernetes.io/fc" { t.Errorf("Wrong name: %s", plug.Name()) } if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) { t.Errorf("Expected false") } }
func TestCanSupport(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs") if err != nil { t.Errorf("Can't find the plugin by name") } if plug.Name() != "kubernetes.io/aws-ebs" { t.Errorf("Wrong name: %s", plug.Name()) } if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}}}}) { t.Errorf("Expected true") } if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}}}}}) { t.Errorf("Expected true") } }
func TestPlugin(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/vol1"}}, } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{}) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } path := builder.GetPath() if path != "/vol1" { t.Errorf("Got unexpected path: %s", path) } if err := builder.SetUp(); err != nil { t.Errorf("Expected success, got: %v", err) } cleaner, err := plug.NewCleaner("vol1", types.UID("poduid")) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } }
func TestCanSupport(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs") if err != nil { t.Errorf("Can't find the plugin by name") } if plug.Name() != "kubernetes.io/nfs" { t.Errorf("Wrong name: %s", plug.Name()) } if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{NFS: &api.NFSVolumeSource{}}}}) { t.Errorf("Expected true") } if !plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{NFS: &api.NFSVolumeSource{}}}}}) { t.Errorf("Expected true") } if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) { t.Errorf("Expected false") } }
func TestRecycler(t *testing.T) { plugMgr := volume.VolumePluginMgr{} pluginHost := volume.NewFakeVolumeHost("/tmp/fake", nil, nil) plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volume.NewFakeRecycler, nil, nil, volume.VolumeConfig{}}}, pluginHost) spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/foo"}}}}} plug, err := plugMgr.FindRecyclablePluginBySpec(spec) if err != nil { t.Errorf("Can't find the plugin by name") } recycler, err := plug.NewRecycler(spec) if err != nil { t.Errorf("Failed to make a new Recyler: %v", err) } if recycler.GetPath() != spec.PersistentVolume.Spec.HostPath.Path { t.Errorf("Expected %s but got %s", spec.PersistentVolume.Spec.HostPath.Path, recycler.GetPath()) } if err := recycler.Recycle(); err != nil { t.Errorf("Mock Recycler expected to return nil but got %s", err) } }
func TestCreater(t *testing.T) { tempPath := "/tmp/hostpath/" defer os.RemoveAll(tempPath) err := os.MkdirAll(tempPath, 0750) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}} plug, err := plugMgr.FindCreatablePluginBySpec(spec) if err != nil { t.Errorf("Can't find the plugin by name") } creater, err := plug.NewCreater(volume.VolumeOptions{CapacityMB: 100, PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete}) if err != nil { t.Errorf("Failed to make a new Creater: %v", err) } pv, err := creater.Create() if err != nil { t.Errorf("Unexpected error creating volume: %v", err) } if pv.Spec.HostPath.Path == "" { t.Errorf("Expected pv.Spec.HostPath.Path to not be empty: %#v", pv) } expectedCapacity := resource.NewQuantity(100*1024*1024, resource.BinarySI) actualCapacity := pv.Spec.Capacity[api.ResourceStorage] expectedAmt := expectedCapacity.Value() actualAmt := actualCapacity.Value() if expectedAmt != actualAmt { t.Errorf("Expected capacity %+v but got %+v", expectedAmt, actualAmt) } if pv.Spec.PersistentVolumeReclaimPolicy != api.PersistentVolumeReclaimDelete { t.Errorf("Expected reclaim policy %+v but got %+v", api.PersistentVolumeReclaimDelete, pv.Spec.PersistentVolumeReclaimPolicy) } os.RemoveAll(pv.Spec.HostPath.Path) }
func TestPersistentVolumeRecycler(t *testing.T) { _, s := framework.RunAMaster(t) defer s.Close() deleteAllEtcdKeys() binderClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) recyclerClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) testClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(binderClient, 1*time.Second) binder.Run() defer binder.Stop() recycler, _ := volumeclaimbinder.NewPersistentVolumeRecycler(recyclerClient, 1*time.Second, []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", volume.NewFakeVolumeHost("/tmp/fake", nil, nil)}}) recycler.Run() defer recycler.Stop() // This PV will be claimed, released, and recycled. pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{Name: "fake-pv"}, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/tmp/foo"}}, Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")}, AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle, }, } pvc := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{Name: "fake-pvc"}, Spec: api.PersistentVolumeClaimSpec{ Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("5G")}}, AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, }, } w, _ := testClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), "0") defer w.Stop() _, _ = testClient.PersistentVolumes().Create(pv) _, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc) // wait until the binder pairs the volume and claim waitForPersistentVolumePhase(w, api.VolumeBound) // deleting a claim releases the volume, after which it can be recycled if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } waitForPersistentVolumePhase(w, api.VolumeReleased) waitForPersistentVolumePhase(w, api.VolumeAvailable) // end of Recycler test. // Deleter test begins now. // tests are serial because running masters concurrently that delete keys may cause similar tests to time out deleteAllEtcdKeys() // change the reclamation policy of the PV for the next test pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimDelete w, _ = testClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), "0") defer w.Stop() _, _ = testClient.PersistentVolumes().Create(pv) _, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc) waitForPersistentVolumePhase(w, api.VolumeBound) // deleting a claim releases the volume, after which it can be recycled if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } waitForPersistentVolumePhase(w, api.VolumeReleased) for { event := <-w.ResultChan() if event.Type == watch.Deleted { break } } }
func doTestPlugin(t *testing.T, spec *volume.Spec) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs") if err != nil { t.Errorf("Can't find the plugin by name") } fake := &mount.FakeMounter{} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plug.(*nfsPlugin).newBuilderInternal(spec, pod, fake) volumePath := builder.GetPath() if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder") } path := builder.GetPath() if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~nfs/vol1" { t.Errorf("Got unexpected path: %s", path) } if err := builder.SetUp(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volumePath) } else { t.Errorf("SetUp() failed: %v", err) } } if builder.(*nfsBuilder).readOnly { t.Errorf("The volume source should not be read-only and it is.") } if len(fake.Log) != 1 { t.Errorf("Mount was not called exactly one time. It was called %d times.", len(fake.Log)) } else { if fake.Log[0].Action != mount.FakeActionMount { t.Errorf("Unexpected mounter action: %#v", fake.Log[0]) } } fake.ResetLog() cleaner, err := plug.(*nfsPlugin).newCleanerInternal("vol1", types.UID("poduid"), fake) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", volumePath) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } if len(fake.Log) != 1 { t.Errorf("Unmount was not called exactly one time. It was called %d times.", len(fake.Log)) } else { if fake.Log[0].Action != mount.FakeActionUnmount { t.Errorf("Unexpected mounter action: %#v", fake.Log[0]) } } fake.ResetLog() }
func doTestPlugin(t *testing.T, spec *volume.Spec) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs") if err != nil { t.Errorf("Can't find the plugin by name") } ep := &api.Endpoints{ObjectMeta: api.ObjectMeta{Name: "foo"}, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}}}}} var fcmd exec.FakeCmd fcmd = exec.FakeCmd{ CombinedOutputScript: []exec.FakeCombinedOutputAction{ // mount func() ([]byte, error) { return []byte{}, nil }, }, } fake := exec.FakeExec{ CommandScript: []exec.FakeCommandAction{ func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, }, } pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, err := plug.(*glusterfsPlugin).newBuilderInternal(spec, ep, pod, &mount.FakeMounter{}, &fake) volumePath := builder.GetPath() if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Error("Got a nil Builder") } path := builder.GetPath() if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~glusterfs/vol1" { t.Errorf("Got unexpected path: %s", path) } if err := builder.SetUp(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", volumePath) } else { t.Errorf("SetUp() failed: %v", err) } } cleaner, err := plug.(*glusterfsPlugin).newCleanerInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Error("Got a nil Cleaner") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(volumePath); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", volumePath) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
func TestPlugin(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder") if err != nil { t.Errorf("Can't find the plugin by name") } spec := &api.Volume{ Name: "vol1", VolumeSource: api.VolumeSource{ Cinder: &api.CinderVolumeSource{ VolumeID: "pd", FSType: "ext4", }, }, } builder, err := plug.(*cinderPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } if builder == nil { t.Errorf("Got a nil Builder: %v") } path := builder.GetPath() if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~cinder/vol1" { t.Errorf("Got unexpected path: %s", path) } if err := builder.SetUp(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { t.Errorf("SetUp() failed, volume path not created: %s", path) } else { t.Errorf("SetUp() failed: %v", err) } } cleaner, err := plug.(*cinderPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } if cleaner == nil { t.Errorf("Got a nil Cleaner: %v") } if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } if _, err := os.Stat(path); err == nil { t.Errorf("TearDown() failed, volume path still exists: %s", path) } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } }
func TestBindingWithExamples(t *testing.T) { api.ForTesting_ReferencesAllowBlankSelfLinks = true o := testclient.NewObjects(api.Scheme, api.Scheme) if err := testclient.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/claims/claim-01.yaml", o, api.Scheme); err != nil { t.Fatal(err) } if err := testclient.AddObjectsFromPath("../../../docs/user-guide/persistent-volumes/volumes/local-01.yaml", o, api.Scheme); err != nil { t.Fatal(err) } client := &testclient.Fake{} client.AddReactor("*", "*", testclient.ObjectReaction(o, api.RESTMapper)) pv, err := client.PersistentVolumes().Get("any") pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimRecycle if err != nil { t.Errorf("Unexpected error getting PV from client: %v", err) } claim, error := client.PersistentVolumeClaims("ns").Get("any") if error != nil { t.Errorf("Unexpected error getting PVC from client: %v", err) } volumeIndex := NewPersistentVolumeOrderedIndex() mockClient := &mockBinderClient{ volume: pv, claim: claim, } plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(host_path.ProbeRecyclableVolumePlugins(newMockRecycler, volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) recycler := &PersistentVolumeRecycler{ kubeClient: client, client: mockClient, pluginMgr: plugMgr, } // adds the volume to the index, making the volume available syncVolume(volumeIndex, mockClient, pv) if pv.Status.Phase != api.VolumeAvailable { t.Errorf("Expected phase %s but got %s", api.VolumeBound, pv.Status.Phase) } // an initial sync for a claim will bind it to an unbound volume, triggers state change syncClaim(volumeIndex, mockClient, claim) // state change causes another syncClaim to update statuses syncClaim(volumeIndex, mockClient, claim) // claim updated volume's status, causing an update and syncVolume call syncVolume(volumeIndex, mockClient, pv) if pv.Spec.ClaimRef == nil { t.Errorf("Expected ClaimRef but got nil for pv.Status.ClaimRef: %+v\n", pv) } if pv.Status.Phase != api.VolumeBound { t.Errorf("Expected phase %s but got %s", api.VolumeBound, pv.Status.Phase) } if claim.Status.Phase != api.ClaimBound { t.Errorf("Expected phase %s but got %s", api.ClaimBound, claim.Status.Phase) } if len(claim.Status.AccessModes) != len(pv.Spec.AccessModes) { t.Errorf("Expected phase %s but got %s", api.ClaimBound, claim.Status.Phase) } if claim.Status.AccessModes[0] != pv.Spec.AccessModes[0] { t.Errorf("Expected access mode %s but got %s", claim.Status.AccessModes[0], pv.Spec.AccessModes[0]) } // pretend the user deleted their claim mockClient.claim = nil syncVolume(volumeIndex, mockClient, pv) if pv.Status.Phase != api.VolumeReleased { t.Errorf("Expected phase %s but got %s", api.VolumeReleased, pv.Status.Phase) } if pv.Spec.ClaimRef == nil { t.Errorf("Expected non-nil ClaimRef: %+v", pv.Spec) } mockClient.volume = pv // released volumes with a PersistentVolumeReclaimPolicy (recycle/delete) can have further processing err = recycler.reclaimVolume(pv) if err != nil { t.Errorf("Unexpected error reclaiming volume: %+v", err) } if pv.Status.Phase != api.VolumePending { t.Errorf("Expected phase %s but got %s", api.VolumePending, pv.Status.Phase) } // after the recycling changes the phase to Pending, the binder picks up again // to remove any vestiges of binding and make the volume Available again syncVolume(volumeIndex, mockClient, pv) if pv.Status.Phase != api.VolumeAvailable { t.Errorf("Expected phase %s but got %s", api.VolumeAvailable, pv.Status.Phase) } if pv.Spec.ClaimRef != nil { t.Errorf("Expected nil ClaimRef: %+v", pv.Spec) } }