func TestPersistentClaimReadOnlyFlag(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("glusterfs_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) pv := &v1.PersistentVolume{ ObjectMeta: v1.ObjectMeta{ Name: "pvA", }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}, }, ClaimRef: &v1.ObjectReference{ Name: "claimA", }, }, } claim := &v1.PersistentVolumeClaim{ ObjectMeta: v1.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: v1.PersistentVolumeClaimStatus{ Phase: v1.ClaimBound, }, } ep := &v1.Endpoints{ ObjectMeta: v1.ObjectMeta{ Namespace: "nsA", Name: "ep", }, Subsets: []v1.EndpointSubset{{ Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}, Ports: []v1.EndpointPort{{Name: "foo", Port: 80, Protocol: v1.ProtocolTCP}}, }}, } client := fake.NewSimpleClientset(pv, claim, ep) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil)) plug, _ := plugMgr.FindPluginByName(glusterfsPluginName) // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}} mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{}) if !mounter.GetAttributes().ReadOnly { t.Errorf("Expected true for mounter.IsReadOnly") } }
// getPVSpec fetches the PV object with the given name from the API server // and returns a volume.Spec representing it. // An error is returned if the call to fetch the PV object fails. func (dswp *desiredStateOfWorldPopulator) getPVSpec( name string, pvcReadOnly bool, expectedClaimUID types.UID) (*volume.Spec, string, error) { pv, err := dswp.kubeClient.Core().PersistentVolumes().Get(name, metav1.GetOptions{}) if err != nil || pv == nil { return nil, "", fmt.Errorf( "failed to fetch PV %q from API server. err=%v", name, err) } if pv.Spec.ClaimRef == nil { return nil, "", fmt.Errorf( "found PV object %q but it has a nil pv.Spec.ClaimRef indicating it is not yet bound to the claim", name) } if pv.Spec.ClaimRef.UID != expectedClaimUID { return nil, "", fmt.Errorf( "found PV object %q but its pv.Spec.ClaimRef.UID (%q) does not point to claim.UID (%q)", name, pv.Spec.ClaimRef.UID, expectedClaimUID) } volumeGidValue := getPVVolumeGidAnnotationValue(pv) return volume.NewSpecFromPersistentVolume(pv, pvcReadOnly), volumeGidValue, nil }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } ep := &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "ep", }, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}}, Ports: []api.EndpointPort{{"foo", 80, api.ProtocolTCP}}, }}, } o := testclient.NewObjects(api.Scheme, api.Scheme) o.Add(pv) o.Add(claim) o.Add(ep) client := &testclient.Fake{} client.AddReactor("*", "*", testclient.ObjectReaction(o, testapi.Default.RESTMapper())) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(glusterfsPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !builder.IsReadOnly() { t.Errorf("Expected true for builder.IsReadOnly") } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("iscsi_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ ISCSI: &api.ISCSIVolumeSource{ TargetPortal: "127.0.0.1:3260", IQN: "iqn.2014-12.server:storage.target01", FSType: "ext4", Lun: 0, }, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } client := fake.NewSimpleClientset(pv, claim) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil)) plug, _ := plugMgr.FindPluginByName(iscsiPluginName) // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{}) if !mounter.GetAttributes().ReadOnly { t.Errorf("Expected true for mounter.IsReadOnly") } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("fc_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) lun := 0 pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ FC: &api.FCVolumeSource{ TargetWWNs: []string{"some_wwn"}, FSType: "ext4", Lun: &lun, }, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } client := testclient.NewSimpleFake(pv, claim) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil)) plug, _ := plugMgr.FindPluginByName(fcPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !builder.GetAttributes().ReadOnly { t.Errorf("Expected true for builder.IsReadOnly") } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("rbd_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ RBD: &api.RBDVolumeSource{ CephMonitors: []string{"a", "b"}, RBDImage: "bar", FSType: "ext4", }, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } client := fake.NewSimpleClientset(pv, claim) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil, "" /* rootContext */)) plug, _ := plugMgr.FindPluginByName(rbdPluginName) // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{}) if !mounter.GetAttributes().ReadOnly { t.Errorf("Expected true for mounter.IsReadOnly") } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { lun := 0 pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ FC: &api.FCVolumeSource{ TargetWWNs: []string{"some_wwn"}, FSType: "ext4", Lun: &lun, }, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } o := testclient.NewObjects(api.Scheme, api.Scheme) o.Add(pv) o.Add(claim) client := &testclient.Fake{} client.AddReactor("*", "*", testclient.ObjectReaction(o, testapi.Default.RESTMapper())) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(fcPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !builder.IsReadOnly() { t.Errorf("Expected true for builder.IsReadOnly") } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } o := testclient.NewObjects(api.Scheme, api.Scheme) o.Add(pv) o.Add(claim) client := &testclient.Fake{} client.AddReactor("*", "*", testclient.ObjectReaction(o, testapi.Default.RESTMapper())) tmpDir, err := ioutil.TempDir(os.TempDir(), "gcepdTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil)) plug, _ := plugMgr.FindPluginByName(gcePersistentDiskPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !builder.GetAttributes().ReadOnly { t.Errorf("Expected true for builder.IsReadOnly") } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ ISCSI: &api.ISCSIVolumeSource{ TargetPortal: "127.0.0.1:3260", IQN: "iqn.2014-12.server:storage.target01", FSType: "ext4", Lun: 0, }, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } o := testclient.NewObjects(api.Scheme, api.Scheme) o.Add(pv) o.Add(claim) client := &testclient.Fake{} client.AddReactor("*", "*", testclient.ObjectReaction(o, latest.RESTMapper)) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(iscsiPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil) if !builder.IsReadOnly() { t.Errorf("Expected true for builder.IsReadOnly") } }
func TestPluginPersistentVolume(t *testing.T) { vol := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "vol1", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ FlexVolume: &api.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher", ReadOnly: false}, }, }, } doTestPluginAttachDetach(t, volume.NewSpecFromPersistentVolume(vol, false)) }
func TestPluginPersistentVolume(t *testing.T) { vol := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "vol1", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ Glusterfs: &api.GlusterfsVolumeSource{"ep", "vol", false}, }, }, } doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) }
func TestPluginPersistentVolume(t *testing.T) { vol := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: "vol1", }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ Quobyte: &v1.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "******", Group: "root"}, }, }, } doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) }
func TestPluginPersistentVolume(t *testing.T) { vol := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "vol1", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ NFS: &api.NFSVolumeSource{Server: "localhost", Path: "/tmp", ReadOnly: false}, }, }, } doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ RBD: &api.RBDVolumeSource{ CephMonitors: []string{"a", "b"}, RBDImage: "bar", FSType: "ext4", }, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } o := testclient.NewObjects(api.Scheme, api.Scheme) o.Add(pv) o.Add(claim) client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(rbdPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil) if !builder.IsReadOnly() { t.Errorf("Expected true for builder.IsReadOnly") } }
func TestPluginPersistentVolume(t *testing.T) { vol := &v1.PersistentVolume{ ObjectMeta: v1.ObjectMeta{ Name: "vol1", }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}, }, }, } doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("nfs_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) pv := &v1.PersistentVolume{ ObjectMeta: v1.ObjectMeta{ Name: "pvA", }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ NFS: &v1.NFSVolumeSource{}, }, ClaimRef: &v1.ObjectReference{ Name: "claimA", }, }, } claim := &v1.PersistentVolumeClaim{ ObjectMeta: v1.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: v1.PersistentVolumeClaimStatus{ Phase: v1.ClaimBound, }, } client := fake.NewSimpleClientset(pv, claim) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, client, nil)) plug, _ := plugMgr.FindPluginByName(nfsPluginName) // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}} mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{}) if !mounter.GetAttributes().ReadOnly { t.Errorf("Expected true for mounter.IsReadOnly") } }
func (recycler *PersistentVolumeRecycler) handleDelete(pv *api.PersistentVolume) error { glog.V(5).Infof("Deleting PersistentVolume[%s]\n", pv.Name) currentPhase := pv.Status.Phase nextPhase := currentPhase spec := volume.NewSpecFromPersistentVolume(pv, false) plugin, err := recycler.pluginMgr.FindDeletablePluginBySpec(spec) if err != nil { nextPhase = api.VolumeFailed pv.Status.Message = fmt.Sprintf("%v", err) } // an error above means a suitable plugin for this volume was not found. // we don't need to attempt deleting when plugin is nil, but we do need to persist the next/failed phase // of the volume so that subsequent syncs won't attempt deletion through this handler func. if plugin != nil { deleter, err := plugin.NewDeleter(spec) if err != nil { return fmt.Errorf("Could not obtain Deleter for spec: %#v error: %v", spec, err) } // blocks until completion err = deleter.Delete() if err != nil { glog.Errorf("PersistentVolume[%s] failed deletion: %+v", pv.Name, err) pv.Status.Message = fmt.Sprintf("Deletion error: %s", err) nextPhase = recycler.handleReleaseFailure(pv) } else { glog.V(5).Infof("PersistentVolume[%s] successfully deleted through plugin\n", pv.Name) recycler.removeReleasedVolume(pv) // after successful deletion through the plugin, we can also remove the PV from the cluster if err := recycler.client.DeletePersistentVolume(pv); err != nil { return fmt.Errorf("error deleting persistent volume: %+v", err) } } } if currentPhase != nextPhase { glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", pv.Name, currentPhase, nextPhase) pv.Status.Phase = nextPhase _, err := recycler.client.UpdatePersistentVolumeStatus(pv) if err != nil { // Rollback to previous phase pv.Status.Phase = currentPhase } } return nil }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } clientset := fake.NewSimpleClientset(pv, claim) tmpDir, err := utiltesting.MkTmpdir("awsebsTest") if err != nil { t.Fatalf("can't make a temp dir: %v", err) } defer os.RemoveAll(tmpDir) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, clientset, nil)) plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !builder.GetAttributes().ReadOnly { t.Errorf("Expected true for builder.IsReadOnly") } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ Quobyte: &api.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "******", Group: "root"}, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } tmpDir, err := utiltesting.MkTmpdir("quobyte_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) client := fake.NewSimpleClientset(pv, claim) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil)) plug, _ := plugMgr.FindPluginByName(quobytePluginName) // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{}) if !mounter.GetAttributes().ReadOnly { t.Errorf("Expected true for mounter.IsReadOnly") } }
func (recycler *PersistentVolumeRecycler) handleRecycle(pv *api.PersistentVolume) error { glog.V(5).Infof("Recycling PersistentVolume[%s]\n", pv.Name) currentPhase := pv.Status.Phase nextPhase := currentPhase spec := volume.NewSpecFromPersistentVolume(pv, false) plugin, err := recycler.pluginMgr.FindRecyclablePluginBySpec(spec) if err != nil { nextPhase = api.VolumeFailed pv.Status.Message = fmt.Sprintf("%v", err) } // an error above means a suitable plugin for this volume was not found. // we don't need to attempt recycling when plugin is nil, but we do need to persist the next/failed phase // of the volume so that subsequent syncs won't attempt recycling through this handler func. if plugin != nil { volRecycler, err := plugin.NewRecycler(spec) if err != nil { return fmt.Errorf("Could not obtain Recycler for spec: %#v error: %v", spec, err) } // blocks until completion if err := volRecycler.Recycle(); err != nil { glog.Errorf("PersistentVolume[%s] failed recycling: %+v", pv.Name, err) pv.Status.Message = fmt.Sprintf("Recycling error: %s", err) nextPhase = recycler.handleReleaseFailure(pv) } else { glog.V(5).Infof("PersistentVolume[%s] successfully recycled\n", pv.Name) // The volume has been recycled. Remove any internal state to make // any subsequent bind+recycle cycle working. recycler.removeReleasedVolume(pv) nextPhase = api.VolumePending } } if currentPhase != nextPhase { glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", pv.Name, currentPhase, nextPhase) pv.Status.Phase = nextPhase _, err := recycler.client.UpdatePersistentVolumeStatus(pv) if err != nil { // Rollback to previous phase pv.Status.Phase = currentPhase } } return nil }
// getPVSpecFromCache fetches the PV object with the given name from the shared // internal PV store and returns a volume.Spec representing it. // This method returns an error if a PV object does not exist in the cache with // the given name. // This method deep copies the PV object so the caller may use the returned // volume.Spec object without worrying about it mutating unexpectedly. func (adc *attachDetachController) getPVSpecFromCache( name string, pvcReadOnly bool, expectedClaimUID types.UID) (*volume.Spec, error) { pvObj, exists, err := adc.pvInformer.GetStore().GetByKey(name) if pvObj == nil || !exists || err != nil { return nil, fmt.Errorf( "failed to find PV %q in PVInformer cache. %v", name, err) } pv, ok := pvObj.(*api.PersistentVolume) if !ok || pv == nil { return nil, fmt.Errorf( "failed to cast %q object %#v to PersistentVolume", name, pvObj) } if pv.Spec.ClaimRef == nil { return nil, fmt.Errorf( "found PV object %q but it has a nil pv.Spec.ClaimRef indicating it is not yet bound to the claim", name) } if pv.Spec.ClaimRef.UID != expectedClaimUID { return nil, fmt.Errorf( "found PV object %q but its pv.Spec.ClaimRef.UID (%q) does not point to claim.UID (%q)", name, pv.Spec.ClaimRef.UID, expectedClaimUID) } // Do not return the object from the informer, since the store is shared it // may be mutated by another consumer. clonedPVObj, err := api.Scheme.DeepCopy(*pv) if err != nil || clonedPVObj == nil { return nil, fmt.Errorf( "failed to deep copy %q PV object. err=%v", name, err) } clonedPV, ok := clonedPVObj.(api.PersistentVolume) if !ok { return nil, fmt.Errorf( "failed to cast %q clonedPV %#v to PersistentVolume", name, pvObj) } return volume.NewSpecFromPersistentVolume(&clonedPV, pvcReadOnly), nil }
func TestPluginPersistentVolume(t *testing.T) { vol := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "vol1", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ RBD: &api.RBDVolumeSource{ CephMonitors: []string{"a", "b"}, RBDImage: "bar", FSType: "ext4", }, }, }, } doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) }
func TestPluginPersistentVolume(t *testing.T) { vol := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "vol1", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ ISCSI: &api.ISCSIVolumeSource{ TargetPortal: "127.0.0.1:3260", IQN: "iqn.2014-12.server:storage.target01", FSType: "ext4", Lun: 0, }, }, }, } doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) }
func TestPluginPersistentVolume(t *testing.T) { lun := 0 vol := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "vol1", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ FC: &api.FCVolumeSource{ TargetWWNs: []string{"some_wwn"}, FSType: "ext4", Lun: &lun, }, }, }, } doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { pv := &v1.PersistentVolume{ ObjectMeta: v1.ObjectMeta{ Name: "pvA", }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ AzureFile: &v1.AzureFileVolumeSource{}, }, ClaimRef: &v1.ObjectReference{ Name: "claimA", }, }, } claim := &v1.PersistentVolumeClaim{ ObjectMeta: v1.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: v1.PersistentVolumeClaimStatus{ Phase: v1.ClaimBound, }, } client := fake.NewSimpleClientset(pv, claim) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(azureFilePluginName) // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}} mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{}) if !mounter.GetAttributes().ReadOnly { t.Errorf("Expected true for mounter.IsReadOnly") } }
func TestPersistentClaimReadOnlyFlag(t *testing.T) { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "pvA", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ NFS: &api.NFSVolumeSource{}, }, ClaimRef: &api.ObjectReference{ Name: "claimA", }, }, } claim := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "claimA", Namespace: "nsA", }, Spec: api.PersistentVolumeClaimSpec{ VolumeName: "pvA", }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } client := testclient.NewSimpleFake(pv, claim) plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(nfsPluginName) // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) if !builder.GetAttributes().ReadOnly { t.Errorf("Expected true for builder.IsReadOnly") } }
func (recycler *PersistentVolumeRecycler) handleDelete(pv *api.PersistentVolume) error { glog.V(5).Infof("Deleting PersistentVolume[%s]\n", pv.Name) currentPhase := pv.Status.Phase nextPhase := currentPhase spec := volume.NewSpecFromPersistentVolume(pv, false) plugin, err := recycler.pluginMgr.FindDeletablePluginBySpec(spec) if err != nil { return fmt.Errorf("Could not find deletable volume plugin for spec: %+v", err) } deleter, err := plugin.NewDeleter(spec) if err != nil { return fmt.Errorf("could not obtain Deleter for spec: %+v", err) } // blocks until completion err = deleter.Delete() if err != nil { glog.Errorf("PersistentVolume[%s] failed deletion: %+v", pv.Name, err) pv.Status.Message = fmt.Sprintf("Deletion error: %s", err) nextPhase = api.VolumeFailed } else { glog.V(5).Infof("PersistentVolume[%s] successfully deleted through plugin\n", pv.Name) // after successful deletion through the plugin, we can also remove the PV from the cluster err = recycler.client.DeletePersistentVolume(pv) if err != nil { return fmt.Errorf("error deleting persistent volume: %+v", err) } } if currentPhase != nextPhase { glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", pv.Name, currentPhase, nextPhase) pv.Status.Phase = nextPhase _, err := recycler.client.UpdatePersistentVolumeStatus(pv) if err != nil { // Rollback to previous phase pv.Status.Phase = currentPhase } } return nil }
func TestPluginPersistentVolume(t *testing.T) { tmpDir, err := utiltesting.MkTmpdir("flexvolume_test") if err != nil { t.Fatalf("error creating temp dir: %v", err) } defer os.RemoveAll(tmpDir) vol := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: "vol1", }, Spec: api.PersistentVolumeSpec{ PersistentVolumeSource: api.PersistentVolumeSource{ FlexVolume: &api.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher", ReadOnly: false}, }, }, } doTestPluginAttachDetach(t, volume.NewSpecFromPersistentVolume(vol, false), tmpDir) }
func (recycler *PersistentVolumeRecycler) handleRecycle(pv *api.PersistentVolume) error { glog.V(5).Infof("Recycling PersistentVolume[%s]\n", pv.Name) currentPhase := pv.Status.Phase nextPhase := currentPhase spec := volume.NewSpecFromPersistentVolume(pv, false) plugin, err := recycler.pluginMgr.FindRecyclablePluginBySpec(spec) if err != nil { return fmt.Errorf("Could not find recyclable volume plugin for spec: %+v", err) } volRecycler, err := plugin.NewRecycler(spec) if err != nil { return fmt.Errorf("Could not obtain Recycler for spec: %+v", err) } // blocks until completion err = volRecycler.Recycle() if err != nil { glog.Errorf("PersistentVolume[%s] failed recycling: %+v", pv.Name, err) pv.Status.Message = fmt.Sprintf("Recycling error: %s", err) nextPhase = api.VolumeFailed } else { glog.V(5).Infof("PersistentVolume[%s] successfully recycled\n", pv.Name) nextPhase = api.VolumePending if err != nil { glog.Errorf("Error updating pv.Status: %+v", err) } } if currentPhase != nextPhase { glog.V(5).Infof("PersistentVolume[%s] changing phase from %s to %s\n", pv.Name, currentPhase, nextPhase) pv.Status.Phase = nextPhase _, err := recycler.client.UpdatePersistentVolumeStatus(pv) if err != nil { // Rollback to previous phase pv.Status.Phase = currentPhase } } return nil }
// mountExternalVolumes mounts the volumes declared in a pod, attaching them // to the host if necessary, and returns a map containing information about // the volumes for the pod or an error. This method is run multiple times, // and requires that implementations of Attach() and SetUp() be idempotent. // // Note, in the future, the attach-detach controller will handle attaching and // detaching volumes; this call site will be maintained for backward- // compatibility with current behavior of static pods and pods created via the // Kubelet's http API. func (kl *Kubelet) mountExternalVolumes(pod *api.Pod) (kubecontainer.VolumeMap, error) { podVolumes := make(kubecontainer.VolumeMap) for i := range pod.Spec.Volumes { var fsGroup *int64 if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.FSGroup != nil { fsGroup = pod.Spec.SecurityContext.FSGroup } rootContext, err := kl.getRootDirContext() if err != nil { return nil, err } var volSpec *volume.Spec if pod.Spec.Volumes[i].VolumeSource.PersistentVolumeClaim != nil { claimName := pod.Spec.Volumes[i].PersistentVolumeClaim.ClaimName pv, err := kl.getPersistentVolumeByClaimName(claimName, pod.Namespace) if err != nil { glog.Errorf("Could not find persistentVolume for claim %s err %v", claimName, err) return nil, err } kl.applyPersistentVolumeAnnotations(pv, pod) volSpec = volume.NewSpecFromPersistentVolume(pv, pod.Spec.Volumes[i].PersistentVolumeClaim.ReadOnly) } else { volSpec = volume.NewSpecFromVolume(&pod.Spec.Volumes[i]) } // Try to use a plugin for this volume. mounter, err := kl.newVolumeMounterFromPlugins(volSpec, pod, volume.VolumeOptions{RootContext: rootContext}) if err != nil { glog.Errorf("Could not create volume mounter for pod %s: %v", pod.UID, err) return nil, err } // some volumes require attachment before mounter's setup. // The plugin can be nil, but non-nil errors are legitimate errors. // For non-nil plugins, Attachment to a node is required before Mounter's setup. attacher, attachablePlugin, err := kl.newVolumeAttacherFromPlugins(volSpec, pod) if err != nil { glog.Errorf("Could not create volume attacher for pod %s: %v", pod.UID, err) return nil, err } if attacher != nil { // If the device path is already mounted, avoid an expensive call to the // cloud provider. deviceMountPath := attacher.GetDeviceMountPath(volSpec) notMountPoint, err := kl.mounter.IsLikelyNotMountPoint(deviceMountPath) if err != nil && !os.IsNotExist(err) { return nil, err } if notMountPoint { if !kl.enableControllerAttachDetach { err = attacher.Attach(volSpec, kl.hostname) if err != nil { return nil, err } } devicePath, err := attacher.WaitForAttach(volSpec, maxWaitForVolumeOps) if err != nil { return nil, err } if kl.enableControllerAttachDetach { // Attach/Detach controller is enabled and this volume type // implements an attacher uniqueDeviceName, err := attachdetach.GetUniqueDeviceNameFromSpec( attachablePlugin, volSpec) if err != nil { return nil, err } kl.volumeManager.AddVolumeInUse( api.UniqueDeviceName(uniqueDeviceName)) } if err = attacher.MountDevice(volSpec, devicePath, deviceMountPath, kl.mounter); err != nil { return nil, err } } } err = mounter.SetUp(fsGroup) if err != nil { return nil, err } podVolumes[pod.Spec.Volumes[i].Name] = kubecontainer.VolumeInfo{Mounter: mounter} } return podVolumes, nil }