Example #1
0
// Waits for the pv and pvc to be bound to each other, then checks that the pv's
// claimRef matches the pvc. Fails test on errors.
func waitAndValidatePVandPVC(c *client.Client, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {

	var err error

	// Wait for pv and pvc to bind to each other
	if err = waitOnPVandPVC(c, ns, pv, pvc); err != nil {
		return pv, pvc, err
	}

	// Check that the PersistentVolume.ClaimRef is valid and matches the PVC
	framework.Logf("Checking PersistentVolume ClaimRef is non-nil")
	pv, err = c.PersistentVolumes().Get(pv.Name)
	if err != nil {
		return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name)
	}

	pvc, err = c.PersistentVolumeClaims(ns).Get(pvc.Name)
	if err != nil {
		return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolumeClaim %v:", pvc.Name)
	}

	if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID != pvc.UID {
		pvJSON, _ := json.Marshal(pv.Spec.ClaimRef)
		return pv, pvc, fmt.Errorf("Expected Bound PersistentVolume %v to have valid ClaimRef: %+v", pv.Name, string(pvJSON))
	}

	return pv, pvc, nil
}
Example #2
0
// create the PV resource. Fails test on error.
func createPV(c *client.Client, pv *api.PersistentVolume) (*api.PersistentVolume, error) {

	pv, err := c.PersistentVolumes().Create(pv)
	if err != nil {
		return pv, fmt.Errorf("Create PersistentVolume %v failed: %v", pv.Name, err)
	}

	return pv, nil
}
func deletePersistentVolume(c *client.Client, pv *api.PersistentVolume) {
	// Delete the PersistentVolume
	framework.Logf("Deleting PersistentVolume")
	err := c.PersistentVolumes().Delete(pv.Name)
	if err != nil {
		framework.Failf("Delete PersistentVolume failed: %v", err)
	}
	// Wait for PersistentVolume to Delete
	framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 30*time.Second)
}
Example #4
0
func createPV(c *k8sclient.Client, ns string, pvcNames []string, sshCommand string) (Result, error) {

	for _, pvcName := range pvcNames {
		hostPath := path.Join("/data", ns, pvcName)
		nsPvcName := ns + "-" + pvcName
		pvs := c.PersistentVolumes()
		rc, err := pvs.List(api.ListOptions{})
		if err != nil {
			util.Errorf("Failed to load PersistentVolumes with error %v\n", err)
		}
		items := rc.Items
		for _, volume := range items {
			if nsPvcName == volume.ObjectMeta.Name {
				util.Infof("Already created PersistentVolumes for %s\n", nsPvcName)
			}
		}

		// we no longer need to do chmod on kubernetes as we have init containers now
		typeOfMaster := util.TypeOfMaster(c)
		if typeOfMaster != util.Kubernetes || len(sshCommand) > 0 {
			err = configureHostPathVolume(c, ns, hostPath, sshCommand)
			if err != nil {
				util.Errorf("Failed to configure the host path %s with error %v\n", hostPath, err)
			}
		}

		// lets create a new PV
		util.Infof("PersistentVolume name %s will be created on host path %s\n", nsPvcName, hostPath)
		pv := api.PersistentVolume{
			ObjectMeta: api.ObjectMeta{
				Name: nsPvcName,
			},
			Spec: api.PersistentVolumeSpec{
				Capacity: api.ResourceList{
					api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi"),
				},
				AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
				PersistentVolumeSource: api.PersistentVolumeSource{
					HostPath: &api.HostPathVolumeSource{Path: hostPath},
				},
				PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
			},
		}

		_, err = pvs.Create(&pv)
		if err != nil {
			util.Errorf("Failed to create PersistentVolume %s at %s with error %v\n", nsPvcName, hostPath, err)
		}

	}

	return Success, nil
}
Example #5
0
func createPV(c *k8sclient.Client, ns string, pvcNames []string, cmd *cobra.Command) (Result, error) {

	for _, pvcName := range pvcNames {
		hostPath := "/data/" + pvcName
		pvs := c.PersistentVolumes()
		rc, err := pvs.List(api.ListOptions{})
		if err != nil {
			util.Errorf("Failed to load PersistentVolumes with error %v\n", err)
		}
		items := rc.Items
		for _, volume := range items {
			vname := volume.ObjectMeta.Name
			if vname == pvcName {
				util.Infof("Already created PersistentVolumes for %s\n", pvcName)
			}
		}

		err = configureHostPathVolume(c, ns, hostPath, cmd)
		if err != nil {
			util.Errorf("Failed to configure the host path %s with error %v\n", hostPath, err)
		}

		// lets create a new PV
		util.Infof("PersistentVolume name %s will be created on host path %s\n", pvcName, hostPath)
		pv := api.PersistentVolume{
			ObjectMeta: api.ObjectMeta{
				Name: pvcName,
			},
			Spec: api.PersistentVolumeSpec{
				Capacity: api.ResourceList{
					api.ResourceName(api.ResourceStorage): resource.MustParse("5Gi"),
				},
				AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
				PersistentVolumeSource: api.PersistentVolumeSource{
					HostPath: &api.HostPathVolumeSource{Path: hostPath},
				},
			},
		}

		_, err = pvs.Create(&pv)
		if err != nil {
			util.Errorf("Failed to create PersistentVolume %s at %s with error %v\n", pvcName, hostPath, err)
		}
	}

	return Success, nil
}
// GetPersistentVolumeDetail returns detailed information about a persistent volume
func GetPersistentVolumeDetail(client *client.Client, name string) (*PersistentVolumeDetail, error) {
	log.Printf("Getting details of %s persistent volume", name)

	rawPersistentVolume, err := client.PersistentVolumes().Get(name)

	if err != nil {
		return nil, err
	}

	return getPersistentVolumeDetail(rawPersistentVolume), nil
}
Example #7
0
// Delete the PV. Fail test if delete fails. If success the returned PV should
// be nil, which prevents the AfterEach from attempting to delete it.
func deletePersistentVolume(c *client.Client, pv *api.PersistentVolume) (*api.PersistentVolume, error) {

	if pv == nil {
		return nil, fmt.Errorf("PV to be deleted is nil")
	}

	framework.Logf("Deleting PersistentVolume %v", pv.Name)
	err := c.PersistentVolumes().Delete(pv.Name)
	if err != nil {
		return pv, fmt.Errorf("Delete() PersistentVolume %v failed: %v", pv.Name, err)
	}

	// Wait for PersistentVolume to delete
	deleteDuration := 90 * time.Second
	err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, deleteDuration)
	if err != nil {
		return pv, fmt.Errorf("Unable to delete PersistentVolume %s after waiting for %v: %v", pv.Name, deleteDuration, err)
	}

	return nil, nil // success
}
Example #8
0
// Delete the PVC and wait for the PV to become Available again.
// Validate that the PV has recycled (assumption here about reclaimPolicy).
func deletePVCandValidatePV(c *client.Client, ns string, pvc *api.PersistentVolumeClaim, pv *api.PersistentVolume) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {

	By("Deleting PersistentVolumeClaim to trigger PV Recycling")

	framework.Logf("Deleting PersistentVolumeClaim %v to trigger PV Recycling", pvc.Name)
	err := c.PersistentVolumeClaims(ns).Delete(pvc.Name)
	if err != nil {
		return pv, pvc, fmt.Errorf("Delete of PVC %v failed: %v", pvc.Name, err)
	}

	// Check that the PVC is really deleted.
	pvc, err = c.PersistentVolumeClaims(ns).Get(pvc.Name)
	if err == nil {
		return pv, pvc, fmt.Errorf("PVC %v deleted yet still exists", pvc.Name)
	}
	if !apierrs.IsNotFound(err) {
		return pv, pvc, fmt.Errorf("Get on deleted PVC %v failed with error other than \"not found\": %v", pvc.Name, err)
	}

	// Wait for the PV's phase to return to Available
	framework.Logf("Waiting for recycling process to complete.")
	err = framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 3*time.Second, 300*time.Second)
	if err != nil {
		return pv, pvc, fmt.Errorf("Recycling failed: %v", err)
	}

	// Examine the pv.ClaimRef and UID. Expect nil values.
	pv, err = c.PersistentVolumes().Get(pv.Name)
	if err != nil {
		return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name)
	}
	if pv.Spec.ClaimRef != nil && len(pv.Spec.ClaimRef.UID) > 0 {
		crJSON, _ := json.Marshal(pv.Spec.ClaimRef)
		return pv, pvc, fmt.Errorf("Expected PV %v's ClaimRef to be nil, or the claimRef's UID to be blank. Instead claimRef is: %v", pv.Name, string(crJSON))
	}

	return pv, pvc, nil
}
Example #9
0
			defer func() {
				c.PersistentVolumeClaims(ns).Delete(claim.Name)
			}()
			claim, err := c.PersistentVolumeClaims(ns).Create(claim)
			Expect(err).NotTo(HaveOccurred())

			err = waitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, claim.Name, poll, claimProvisionTimeout)
			Expect(err).NotTo(HaveOccurred())

			By("checking the claim")
			// Get new copy of the claim
			claim, err = c.PersistentVolumeClaims(ns).Get(claim.Name)
			Expect(err).NotTo(HaveOccurred())

			// Get the bound PV
			pv, err := c.PersistentVolumes().Get(claim.Spec.VolumeName)
			Expect(err).NotTo(HaveOccurred())

			// Check sizes
			expectedCapacity := resource.MustParse(expectedSize)
			pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
			Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))

			requestedCapacity := resource.MustParse(requestedSize)
			claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
			Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))

			// Check PV properties
			Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
			expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
			Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
Example #10
0
func deleteAllPetSets(c *client.Client, ns string) {
	pst := &petSetTester{c: c}
	psList, err := c.Apps().PetSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
	ExpectNoError(err)

	// Scale down each petset, then delete it completely.
	// Deleting a pvc without doing this will leak volumes, #25101.
	errList := []string{}
	for _, ps := range psList.Items {
		framework.Logf("Scaling petset %v to 0", ps.Name)
		if err := pst.scale(&ps, 0); err != nil {
			errList = append(errList, fmt.Sprintf("%v", err))
		}
		framework.Logf("Deleting petset %v", ps.Name)
		if err := c.Apps().PetSets(ps.Namespace).Delete(ps.Name, nil); err != nil {
			errList = append(errList, fmt.Sprintf("%v", err))
		}
	}

	// pvs are global, so we need to wait for the exact ones bound to the petset pvcs.
	pvNames := sets.NewString()
	// TODO: Don't assume all pvcs in the ns belong to a petset
	pvcPollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
		pvcList, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
		if err != nil {
			framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
			return false, nil
		}
		for _, pvc := range pvcList.Items {
			pvNames.Insert(pvc.Spec.VolumeName)
			// TODO: Double check that there are no pods referencing the pvc
			framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
			if err := c.PersistentVolumeClaims(ns).Delete(pvc.Name); err != nil {
				return false, nil
			}
		}
		return true, nil
	})
	if pvcPollErr != nil {
		errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
	}

	pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
		pvList, err := c.PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()})
		if err != nil {
			framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
			return false, nil
		}
		waitingFor := []string{}
		for _, pv := range pvList.Items {
			if pvNames.Has(pv.Name) {
				waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status))
			}
		}
		if len(waitingFor) == 0 {
			return true, nil
		}
		framework.Logf("Still waiting for pvs of petset to disappear:\n%v", strings.Join(waitingFor, "\n"))
		return false, nil
	})
	if pollErr != nil {
		errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
	}
	if len(errList) != 0 {
		ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
	}
}
		pv := makePersistentVolume(serverIP)
		pvc := makePersistentVolumeClaim(ns)

		framework.Logf("Creating PersistentVolume using NFS")
		pv, err := c.PersistentVolumes().Create(pv)
		Expect(err).NotTo(HaveOccurred())

		framework.Logf("Creating PersistentVolumeClaim")
		pvc, err = c.PersistentVolumeClaims(ns).Create(pvc)
		Expect(err).NotTo(HaveOccurred())

		// allow the binder a chance to catch up.  should not be more than 20s.
		framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 1*time.Second, 30*time.Second)

		pv, err = c.PersistentVolumes().Get(pv.Name)
		Expect(err).NotTo(HaveOccurred())
		if pv.Spec.ClaimRef == nil {
			framework.Failf("Expected PersistentVolume to be bound, but got nil ClaimRef: %+v", pv)
		}

		framework.Logf("Deleting PersistentVolumeClaim to trigger PV Recycling")
		err = c.PersistentVolumeClaims(ns).Delete(pvc.Name)
		Expect(err).NotTo(HaveOccurred())

		// allow the recycler a chance to catch up.  it has to perform NFS scrub, which can be slow in e2e.
		framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 5*time.Second, 300*time.Second)

		pv, err = c.PersistentVolumes().Get(pv.Name)
		Expect(err).NotTo(HaveOccurred())
		if pv.Spec.ClaimRef != nil {
			framework.Failf("Expected PersistentVolume to be unbound, but found non-nil ClaimRef: %+v", pv)
		}
Example #12
0
	})

	AfterEach(func() {
		if c != nil && len(ns) > 0 { // still have client and namespace
			if pvc != nil && len(pvc.Name) > 0 {
				// Delete the PersistentVolumeClaim
				framework.Logf("AfterEach: PVC %v is non-nil, deleting claim", pvc.Name)
				err := c.PersistentVolumeClaims(ns).Delete(pvc.Name)
				if err != nil && !apierrs.IsNotFound(err) {
					framework.Logf("AfterEach: delete of PersistentVolumeClaim %v error: %v", pvc.Name, err)
				}
				pvc = nil
			}
			if pv != nil && len(pv.Name) > 0 {
				framework.Logf("AfterEach: PV %v is non-nil, deleting pv", pv.Name)
				err := c.PersistentVolumes().Delete(pv.Name)
				if err != nil && !apierrs.IsNotFound(err) {
					framework.Logf("AfterEach: delete of PersistentVolume %v error: %v", pv.Name, err)
				}
				pv = nil
			}
		}
	})

	// Execute after *all* the tests have run
	AddCleanupAction(func() {
		if nfsServerPod != nil && c != nil {
			framework.Logf("AfterSuite: nfs-server pod %v is non-nil, deleting pod", nfsServerPod.Name)
			nfsServerPodCleanup(c, NFSconfig)
			nfsServerPod = nil
		}
Example #13
0
func testDynamicProvisioning(client *client.Client, claim *api.PersistentVolumeClaim) {
	err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
	Expect(err).NotTo(HaveOccurred())

	By("checking the claim")
	// Get new copy of the claim
	claim, err = client.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
	Expect(err).NotTo(HaveOccurred())

	// Get the bound PV
	pv, err := client.PersistentVolumes().Get(claim.Spec.VolumeName)
	Expect(err).NotTo(HaveOccurred())

	// Check sizes
	expectedCapacity := resource.MustParse(expectedSize)
	pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
	Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))

	requestedCapacity := resource.MustParse(requestedSize)
	claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
	Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))

	// Check PV properties
	Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
	expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
	Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
	Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
	Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))

	// We start two pods:
	// - The first writes 'hello word' to the /mnt/test (= the volume).
	// - The second one runs grep 'hello world' on /mnt/test.
	// If both succeed, Kubernetes actually allocated something that is
	// persistent across pods.
	By("checking the created volume is writable")
	runInPodWithVolume(client, claim.Namespace, claim.Name, "echo 'hello world' > /mnt/test/data")

	By("checking the created volume is readable and retains data")
	runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")

	// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
	// probably collide with destruction of the pods above - the pods
	// still have the volume attached (kubelet is slow...) and deletion
	// of attached volume is not allowed by AWS/GCE/OpenStack.
	// Kubernetes *will* retry deletion several times in
	// pvclaimbinder-sync-period.
	// So, technically, this sleep is not needed. On the other hand,
	// the sync perion is 10 minutes and we really don't want to wait
	// 10 minutes here. There is no way how to see if kubelet is
	// finished with cleaning volumes. A small sleep here actually
	// speeds up the test!
	// Three minutes should be enough to clean up the pods properly.
	// We've seen GCE PD detach to take more than 1 minute.
	By("Sleeping to let kubelet destroy all pods")
	time.Sleep(3 * time.Minute)

	By("deleting the claim")
	framework.ExpectNoError(client.PersistentVolumeClaims(claim.Namespace).Delete(claim.Name))

	// Wait for the PV to get deleted too.
	framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
}