Example #1
0
func createMissingPVs(c *k8sclient.Client, ns string) {
	found, pvcs, pendingClaimNames := findPendingPVs(c, ns)
	if found {
		sshCommand := ""
		createPV(c, ns, pendingClaimNames, sshCommand)
		items := pvcs.Items
		for _, item := range items {
			status := item.Status.Phase
			if status == api.ClaimPending || status == api.ClaimLost {
				err := c.PersistentVolumeClaims(ns).Delete(item.ObjectMeta.Name)
				if err != nil {
					util.Infof("Error deleting PVC %s\n", item.ObjectMeta.Name)
				} else {
					util.Infof("Recreating PVC %s\n", item.ObjectMeta.Name)

					c.PersistentVolumeClaims(ns).Create(&api.PersistentVolumeClaim{
						ObjectMeta: api.ObjectMeta{
							Name:      item.ObjectMeta.Name,
							Namespace: ns,
						},
						Spec: api.PersistentVolumeClaimSpec{
							VolumeName:  ns + "-" + item.ObjectMeta.Name,
							AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
							Resources: api.ResourceRequirements{
								Requests: api.ResourceList{
									api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi"),
								},
							},
						},
					})
				}
			}
		}
	}
}
Example #2
0
// Waits for the pv and pvc to be bound to each other, then checks that the pv's
// claimRef matches the pvc. Fails test on errors.
func waitAndValidatePVandPVC(c *client.Client, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {

	var err error

	// Wait for pv and pvc to bind to each other
	if err = waitOnPVandPVC(c, ns, pv, pvc); err != nil {
		return pv, pvc, err
	}

	// Check that the PersistentVolume.ClaimRef is valid and matches the PVC
	framework.Logf("Checking PersistentVolume ClaimRef is non-nil")
	pv, err = c.PersistentVolumes().Get(pv.Name)
	if err != nil {
		return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name)
	}

	pvc, err = c.PersistentVolumeClaims(ns).Get(pvc.Name)
	if err != nil {
		return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolumeClaim %v:", pvc.Name)
	}

	if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID != pvc.UID {
		pvJSON, _ := json.Marshal(pv.Spec.ClaimRef)
		return pv, pvc, fmt.Errorf("Expected Bound PersistentVolume %v to have valid ClaimRef: %+v", pv.Name, string(pvJSON))
	}

	return pv, pvc, nil
}
Example #3
0
// create the PVC resource. Fails test on error.
func createPVC(c *client.Client, ns string, pvc *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {

	pvc, err := c.PersistentVolumeClaims(ns).Create(pvc)
	if err != nil {
		return pvc, fmt.Errorf("Create PersistentVolumeClaim %v failed: %v", pvc.Name, err)
	}

	return pvc, nil
}
Example #4
0
func validatePersistenceVolumeClaims(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
	ns, _, err := f.DefaultNamespace()
	if err != nil {
		return Failure, err
	}
	rc, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{})
	if err != nil {
		util.Fatalf("Failed to get PersistentVolumeClaims, %s in namespace %s\n", err, ns)
	}
	if rc != nil {
		items := rc.Items
		pendingClaimNames := make([]string, 0, len(items))
		for _, item := range items {
			status := item.Status.Phase
			if status != "Bound" {
				pendingClaimNames = append(pendingClaimNames, item.ObjectMeta.Name)
			}
		}
		if len(pendingClaimNames) > 0 {
			util.Failuref("PersistentVolumeClaim not Bound for: %s. You need to create a PersistentVolume!\n", strings.Join(pendingClaimNames, ", "))
			util.Info(`
You can enable dynamic PersistentVolume creation with Kubernetes 1.4 or later.

Or to get gofabric8 to create HostPath based PersistentVolume resources for you on minikube and minishift type:

  gofabric8 volumes

For other clusters you could do something like this - though ideally with a persistent volume implementation other than hostPath:

cat <<EOF | oc create -f -
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: fabric8
spec:
  accessModes:
    - ReadWrite
  capacity:
    storage: 1000
  hostPath:
    path: /opt/fabric8-data
EOF


`)
			return Failure, err
		}
		return Success, err
	}
	return Failure, err
}
Example #5
0
func validatePersistenceVolumeClaims(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
	ns, _, err := f.DefaultNamespace()
	if err != nil {
		return Failure, err
	}
	rc, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{})
	if err != nil {
		util.Fatalf("Failed to get PersistentVolumeClaims, %s in namespace %s\n", err, ns)
	}
	if rc != nil {
		items := rc.Items
		pendingClaimNames := make([]string, 0, len(items))
		for _, item := range items {
			status := item.Status.Phase
			if status != "Bound" {
				pendingClaimNames = append(pendingClaimNames, item.ObjectMeta.Name)
			}
		}
		if len(pendingClaimNames) > 0 {
			util.Failuref("PersistentVolumeClaim not Bound for: %s. You need to create a PersistentVolume!\n", strings.Join(pendingClaimNames, ", "))
			util.Info(`
to generate a single node PersistentVolume then type something like this:


cat <<EOF | oc create -f -
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: fabric8
spec:
  accessModes:
    - ReadWrite
  capacity:
    storage: 1000
  hostPath:
    path: /opt/fabric8-data
EOF


`)
			return Failure, err
		}
		return Success, err
	}
	return Failure, err
}
// GetPersistentVolumeClaimDetail returns detailed information about a persistent volume claim
func GetPersistentVolumeClaimDetail(client *client.Client, namespace string, name string) (*PersistentVolumeClaimDetail, error) {
	log.Printf("Getting details of %s persistent volume claim", name)

	rawPersistentVolumeClaim, err := client.PersistentVolumeClaims(namespace).Get(name)

	if err != nil {
		return nil, err
	}

	return getPersistentVolumeClaimDetail(rawPersistentVolumeClaim), nil
}
Example #7
0
func findPendingPVS(c *k8sclient.Client, ns string) (bool, []string) {

	pvcs, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{})
	if err != nil {
		util.Infof("Failed to find any PersistentVolumeClaims, %s in namespace %s\n", err, ns)
	}

	if pvcs != nil {
		items := pvcs.Items
		pendingClaimNames := make([]string, 0, len(items))
		for _, item := range items {
			status := item.Status.Phase
			if status == "Pending" || status == "Lost" {
				pendingClaimNames = append(pendingClaimNames, item.ObjectMeta.Name)
			}
		}
		if len(pendingClaimNames) > 0 {
			return true, pendingClaimNames
		}
	}
	return false, nil
}
Example #8
0
// Delete the PVC and wait for the PV to become Available again.
// Validate that the PV has recycled (assumption here about reclaimPolicy).
func deletePVCandValidatePV(c *client.Client, ns string, pvc *api.PersistentVolumeClaim, pv *api.PersistentVolume) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {

	By("Deleting PersistentVolumeClaim to trigger PV Recycling")

	framework.Logf("Deleting PersistentVolumeClaim %v to trigger PV Recycling", pvc.Name)
	err := c.PersistentVolumeClaims(ns).Delete(pvc.Name)
	if err != nil {
		return pv, pvc, fmt.Errorf("Delete of PVC %v failed: %v", pvc.Name, err)
	}

	// Check that the PVC is really deleted.
	pvc, err = c.PersistentVolumeClaims(ns).Get(pvc.Name)
	if err == nil {
		return pv, pvc, fmt.Errorf("PVC %v deleted yet still exists", pvc.Name)
	}
	if !apierrs.IsNotFound(err) {
		return pv, pvc, fmt.Errorf("Get on deleted PVC %v failed with error other than \"not found\": %v", pvc.Name, err)
	}

	// Wait for the PV's phase to return to Available
	framework.Logf("Waiting for recycling process to complete.")
	err = framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 3*time.Second, 300*time.Second)
	if err != nil {
		return pv, pvc, fmt.Errorf("Recycling failed: %v", err)
	}

	// Examine the pv.ClaimRef and UID. Expect nil values.
	pv, err = c.PersistentVolumes().Get(pv.Name)
	if err != nil {
		return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name)
	}
	if pv.Spec.ClaimRef != nil && len(pv.Spec.ClaimRef.UID) > 0 {
		crJSON, _ := json.Marshal(pv.Spec.ClaimRef)
		return pv, pvc, fmt.Errorf("Expected PV %v's ClaimRef to be nil, or the claimRef's UID to be blank. Instead claimRef is: %v", pv.Name, string(crJSON))
	}

	return pv, pvc, nil
}
Example #9
0
func findPendingPVs(c *k8sclient.Client, ns string) (bool, *api.PersistentVolumeClaimList, []string) {

	pvcs, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{})

	if err != nil {
		util.Infof("Failed to find any PersistentVolumeClaims, %s in namespace %s\n", err, ns)
	}

	if pvcs != nil {
		pendingClaims := pvcs.Items
		var pendingClaimNames []string
		for _, item := range pendingClaims {
			status := item.Status.Phase
			if status == api.ClaimPending || status == api.ClaimLost {
				pendingClaimNames = append(pendingClaimNames, item.ObjectMeta.Name)
			}
		}
		if len(pendingClaimNames) > 0 {
			return true, pvcs, pendingClaimNames
		}
	}
	return false, nil, nil
}
Example #10
0
// DoTestStorageClasses tests storage classes for one api version.
func DoTestStorageClasses(t *testing.T, client *client.Client, ns *api.Namespace) {
	// Make a storage class object.
	s := storage.StorageClass{
		TypeMeta: unversioned.TypeMeta{
			Kind: "StorageClass",
		},
		ObjectMeta: api.ObjectMeta{
			Name: "gold",
		},
		Provisioner: provisionerPluginName,
	}

	if _, err := client.Storage().StorageClasses().Create(&s); err != nil {
		t.Errorf("unable to create test storage class: %v", err)
	}
	defer deleteStorageClassOrErrorf(t, client, s.Namespace, s.Name)

	// Template for pvcs that specify a storage class
	pvc := &api.PersistentVolumeClaim{
		ObjectMeta: api.ObjectMeta{
			Name:      "XXX",
			Namespace: ns.Name,
			Annotations: map[string]string{
				storageutil.StorageClassAnnotation: "gold",
			},
		},
		Spec: api.PersistentVolumeClaimSpec{
			Resources:   api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("1G")}},
			AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
		},
	}

	pvc.ObjectMeta.Name = "uses-storageclass"
	if _, err := client.PersistentVolumeClaims(ns.Name).Create(pvc); err != nil {
		t.Errorf("Failed to create pvc: %v", err)
	}
	defer deletePersistentVolumeClaimOrErrorf(t, client, ns.Name, pvc.Name)
}
Example #11
0
			claim, err = c.PersistentVolumeClaims(ns).Get(claim.Name)
			Expect(err).NotTo(HaveOccurred())

			// Get the bound PV
			pv, err := c.PersistentVolumes().Get(claim.Spec.VolumeName)
			Expect(err).NotTo(HaveOccurred())

			// Check sizes
			expectedCapacity := resource.MustParse(expectedSize)
			pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
			Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))

			requestedCapacity := resource.MustParse(requestedSize)
			claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
			Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))

			// Check PV properties
			Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
			expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
			Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
			Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
			Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))

			// We start two pods:
			// - The first writes 'hello word' to the /mnt/test (= the volume).
			// - The second one runs grep 'hello world' on /mnt/test.
			// If both suceed, Kubernetes actually allocated something that is
			// persistent across pods.
			By("checking the created volume is writable")
			runInPodWithVolume(c, ns, claim.Name, "echo 'hello world' > /mnt/test/data")

			By("checking the created volume is readable and retains data")
			runInPodWithVolume(c, ns, claim.Name, "grep 'hello world' /mnt/test/data")

			// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
			// probably collide with destruction of the pods above - the pods
			// still have the volume attached (kubelet is slow...) and deletion
			// of attached volume is not allowed by AWS/GCE/OpenStack.
			// Kubernetes *will* retry deletion several times in
			// pvclaimbinder-sync-period.
			// So, technically, this sleep is not needed. On the other hand,
			// the sync perion is 10 minutes and we really don't want to wait
			// 10 minutes here. There is no way how to see if kubelet is
			// finished with cleaning volumes. A small sleep here actually
			// speeds up the test!
			// One minute should be enough to clean up the pods properly.
			// Detaching e.g. a Cinder volume takes some time.
			By("Sleeping to let kubelet destroy all pods")
			time.Sleep(time.Minute)

			By("deleting the claim")
			expectNoError(c.PersistentVolumeClaims(ns).Delete(claim.Name))
Example #12
0
func deleteAllPetSets(c *client.Client, ns string) {
	pst := &petSetTester{c: c}
	psList, err := c.Apps().PetSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
	ExpectNoError(err)

	// Scale down each petset, then delete it completely.
	// Deleting a pvc without doing this will leak volumes, #25101.
	errList := []string{}
	for _, ps := range psList.Items {
		framework.Logf("Scaling petset %v to 0", ps.Name)
		if err := pst.scale(&ps, 0); err != nil {
			errList = append(errList, fmt.Sprintf("%v", err))
		}
		framework.Logf("Deleting petset %v", ps.Name)
		if err := c.Apps().PetSets(ps.Namespace).Delete(ps.Name, nil); err != nil {
			errList = append(errList, fmt.Sprintf("%v", err))
		}
	}

	// pvs are global, so we need to wait for the exact ones bound to the petset pvcs.
	pvNames := sets.NewString()
	// TODO: Don't assume all pvcs in the ns belong to a petset
	pvcPollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
		pvcList, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
		if err != nil {
			framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
			return false, nil
		}
		for _, pvc := range pvcList.Items {
			pvNames.Insert(pvc.Spec.VolumeName)
			// TODO: Double check that there are no pods referencing the pvc
			framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
			if err := c.PersistentVolumeClaims(ns).Delete(pvc.Name); err != nil {
				return false, nil
			}
		}
		return true, nil
	})
	if pvcPollErr != nil {
		errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
	}

	pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
		pvList, err := c.PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()})
		if err != nil {
			framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
			return false, nil
		}
		waitingFor := []string{}
		for _, pv := range pvList.Items {
			if pvNames.Has(pv.Name) {
				waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status))
			}
		}
		if len(waitingFor) == 0 {
			return true, nil
		}
		framework.Logf("Still waiting for pvs of petset to disappear:\n%v", strings.Join(waitingFor, "\n"))
		return false, nil
	})
	if pollErr != nil {
		errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
	}
	if len(errList) != 0 {
		ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
	}
}
		pv := makePersistentVolume(serverIP)
		pvc := makePersistentVolumeClaim(ns)

		framework.Logf("Creating PersistentVolume using NFS")
		pv, err := c.PersistentVolumes().Create(pv)
		Expect(err).NotTo(HaveOccurred())

		framework.Logf("Creating PersistentVolumeClaim")
		pvc, err = c.PersistentVolumeClaims(ns).Create(pvc)
		Expect(err).NotTo(HaveOccurred())

		// allow the binder a chance to catch up.  should not be more than 20s.
		framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 1*time.Second, 30*time.Second)

		pv, err = c.PersistentVolumes().Get(pv.Name)
		Expect(err).NotTo(HaveOccurred())
		if pv.Spec.ClaimRef == nil {
			framework.Failf("Expected PersistentVolume to be bound, but got nil ClaimRef: %+v", pv)
		}

		framework.Logf("Deleting PersistentVolumeClaim to trigger PV Recycling")
		err = c.PersistentVolumeClaims(ns).Delete(pvc.Name)
		Expect(err).NotTo(HaveOccurred())

		// allow the recycler a chance to catch up.  it has to perform NFS scrub, which can be slow in e2e.
		framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 5*time.Second, 300*time.Second)

		pv, err = c.PersistentVolumes().Get(pv.Name)
		Expect(err).NotTo(HaveOccurred())
		if pv.Spec.ClaimRef != nil {
			framework.Failf("Expected PersistentVolume to be unbound, but found non-nil ClaimRef: %+v", pv)
Example #14
0
func deletePersistentVolumeClaimOrErrorf(t *testing.T, c *client.Client, ns, name string) {
	if err := c.PersistentVolumeClaims(ns).Delete(name); err != nil {
		t.Errorf("unable to delete persistent volume claim %v: %v", name, err)
	}
}
Example #15
0
// claimClient returns the pvcClient for the given kubeClient/ns.
func claimClient(kubeClient *client.Client, ns string) client.PersistentVolumeClaimInterface {
	return kubeClient.PersistentVolumeClaims(ns)
}
Example #16
0
		// If it doesn't exist, create the nfs server pod in "default" ns
		// The "default" ns is used so that individual tests can delete
		// their ns without impacting the nfs-server pod.
		if nfsServerPod == nil {
			nfsServerPod = startVolumeServer(c, NFSconfig)
			serverIP = nfsServerPod.Status.PodIP
			framework.Logf("NFS server IP address: %v", serverIP)
		}
	})

	AfterEach(func() {
		if c != nil && len(ns) > 0 { // still have client and namespace
			if pvc != nil && len(pvc.Name) > 0 {
				// Delete the PersistentVolumeClaim
				framework.Logf("AfterEach: PVC %v is non-nil, deleting claim", pvc.Name)
				err := c.PersistentVolumeClaims(ns).Delete(pvc.Name)
				if err != nil && !apierrs.IsNotFound(err) {
					framework.Logf("AfterEach: delete of PersistentVolumeClaim %v error: %v", pvc.Name, err)
				}
				pvc = nil
			}
			if pv != nil && len(pv.Name) > 0 {
				framework.Logf("AfterEach: PV %v is non-nil, deleting pv", pv.Name)
				err := c.PersistentVolumes().Delete(pv.Name)
				if err != nil && !apierrs.IsNotFound(err) {
					framework.Logf("AfterEach: delete of PersistentVolume %v error: %v", pv.Name, err)
				}
				pv = nil
			}
		}
	})
Example #17
0
func testDynamicProvisioning(client *client.Client, claim *api.PersistentVolumeClaim) {
	err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
	Expect(err).NotTo(HaveOccurred())

	By("checking the claim")
	// Get new copy of the claim
	claim, err = client.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
	Expect(err).NotTo(HaveOccurred())

	// Get the bound PV
	pv, err := client.PersistentVolumes().Get(claim.Spec.VolumeName)
	Expect(err).NotTo(HaveOccurred())

	// Check sizes
	expectedCapacity := resource.MustParse(expectedSize)
	pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
	Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))

	requestedCapacity := resource.MustParse(requestedSize)
	claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
	Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))

	// Check PV properties
	Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
	expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
	Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
	Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
	Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))

	// We start two pods:
	// - The first writes 'hello word' to the /mnt/test (= the volume).
	// - The second one runs grep 'hello world' on /mnt/test.
	// If both succeed, Kubernetes actually allocated something that is
	// persistent across pods.
	By("checking the created volume is writable")
	runInPodWithVolume(client, claim.Namespace, claim.Name, "echo 'hello world' > /mnt/test/data")

	By("checking the created volume is readable and retains data")
	runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")

	// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
	// probably collide with destruction of the pods above - the pods
	// still have the volume attached (kubelet is slow...) and deletion
	// of attached volume is not allowed by AWS/GCE/OpenStack.
	// Kubernetes *will* retry deletion several times in
	// pvclaimbinder-sync-period.
	// So, technically, this sleep is not needed. On the other hand,
	// the sync perion is 10 minutes and we really don't want to wait
	// 10 minutes here. There is no way how to see if kubelet is
	// finished with cleaning volumes. A small sleep here actually
	// speeds up the test!
	// Three minutes should be enough to clean up the pods properly.
	// We've seen GCE PD detach to take more than 1 minute.
	By("Sleeping to let kubelet destroy all pods")
	time.Sleep(3 * time.Minute)

	By("deleting the claim")
	framework.ExpectNoError(client.PersistentVolumeClaims(claim.Namespace).Delete(claim.Name))

	// Wait for the PV to get deleted too.
	framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
}
Example #18
0
			defer c.Storage().StorageClasses().Delete(class.Name)
			Expect(err).NotTo(HaveOccurred())

			By("creating a claim with a dynamic provisioning annotation")
			claim := newClaim(ns, false)
			defer func() {
				c.PersistentVolumeClaims(ns).Delete(claim.Name)
			}()
			claim, err = c.PersistentVolumeClaims(ns).Create(claim)
			Expect(err).NotTo(HaveOccurred())

			testDynamicProvisioning(c, claim)
		})
	})

	framework.KubeDescribe("DynamicProvisioner Alpha", func() {
		It("should create and delete alpha persistent volumes [Slow]", func() {
			framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")

			By("creating a claim with an alpha dynamic provisioning annotation")
			claim := newClaim(ns, true)
			defer func() {
				c.PersistentVolumeClaims(ns).Delete(claim.Name)
			}()
			claim, err := c.PersistentVolumeClaims(ns).Create(claim)
			Expect(err).NotTo(HaveOccurred())

			testDynamicProvisioning(c, claim)
		})
	})
})