func newTestController(kubeClient clientset.Interface, volumeSource, claimSource, classSource cache.ListerWatcher, enableDynamicProvisioning bool) *PersistentVolumeController {
	if volumeSource == nil {
		volumeSource = framework.NewFakePVControllerSource()
	}
	if claimSource == nil {
		claimSource = framework.NewFakePVCControllerSource()
	}
	if classSource == nil {
		classSource = framework.NewFakeControllerSource()
	}
	ctrl := NewPersistentVolumeController(
		kubeClient,
		5*time.Second,        // sync period
		nil,                  // alpha provisioner
		[]vol.VolumePlugin{}, // recyclers
		nil,                  // cloud
		"",
		volumeSource,
		claimSource,
		classSource,
		record.NewFakeRecorder(1000), // event recorder
		enableDynamicProvisioning,
	)

	// Speed up the test
	ctrl.createProvisionedPVInterval = 5 * time.Millisecond
	return ctrl
}
func ExampleInformer() {
	// source simulates an apiserver object endpoint.
	source := framework.NewFakeControllerSource()

	// Let's do threadsafe output to get predictable test results.
	deletionCounter := make(chan string, 1000)

	// Make a controller that immediately deletes anything added to it, and
	// logs anything deleted.
	_, controller := framework.NewInformer(
		source,
		&api.Pod{},
		time.Millisecond*100,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				source.Delete(obj.(runtime.Object))
			},
			DeleteFunc: func(obj interface{}) {
				key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj)
				if err != nil {
					key = "oops something went wrong with the key"
				}

				// Report this deletion.
				deletionCounter <- key
			},
		},
	)

	// Run the controller and run it until we close stop.
	stop := make(chan struct{})
	defer close(stop)
	go controller.Run(stop)

	// Let's add a few objects to the source.
	testIDs := []string{"a-hello", "b-controller", "c-framework"}
	for _, name := range testIDs {
		// Note that these pods are not valid-- the fake source doesn't
		// call validation or anything.
		source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}})
	}

	// Let's wait for the controller to process the things we just added.
	outputSet := util.StringSet{}
	for i := 0; i < len(testIDs); i++ {
		outputSet.Insert(<-deletionCounter)
	}

	for _, key := range outputSet.List() {
		fmt.Println(key)
	}
	// Output:
	// a-hello
	// b-controller
	// c-framework
}
func Example() {
	// source simulates an apiserver object endpoint.
	source := framework.NewFakeControllerSource()

	// This will hold the downstream state, as we know it.
	downstream := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)

	// This will hold incoming changes. Note how we pass downstream in as a
	// KeyLister, that way resync operations will result in the correct set
	// of update/delete deltas.
	fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, downstream)

	// Let's do threadsafe output to get predictable test results.
	deletionCounter := make(chan string, 1000)

	cfg := &framework.Config{
		Queue:            fifo,
		ListerWatcher:    source,
		ObjectType:       &api.Pod{},
		FullResyncPeriod: time.Millisecond * 100,
		RetryOnError:     false,

		// Let's implement a simple controller that just deletes
		// everything that comes in.
		Process: func(obj interface{}) error {
			// Obj is from the Pop method of the Queue we make above.
			newest := obj.(cache.Deltas).Newest()

			if newest.Type != cache.Deleted {
				// Update our downstream store.
				err := downstream.Add(newest.Object)
				if err != nil {
					return err
				}

				// Delete this object.
				source.Delete(newest.Object.(runtime.Object))
			} else {
				// Update our downstream store.
				err := downstream.Delete(newest.Object)
				if err != nil {
					return err
				}

				// fifo's KeyOf is easiest, because it handles
				// DeletedFinalStateUnknown markers.
				key, err := fifo.KeyOf(newest.Object)
				if err != nil {
					return err
				}

				// Report this deletion.
				deletionCounter <- key
			}
			return nil
		},
	}

	// Create the controller and run it until we close stop.
	stop := make(chan struct{})
	defer close(stop)
	go framework.New(cfg).Run(stop)

	// Let's add a few objects to the source.
	testIDs := []string{"a-hello", "b-controller", "c-framework"}
	for _, name := range testIDs {
		// Note that these pods are not valid-- the fake source doesn't
		// call validation or anything.
		source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}})
	}

	// Let's wait for the controller to process the things we just added.
	outputSet := util.StringSet{}
	for i := 0; i < len(testIDs); i++ {
		outputSet.Insert(<-deletionCounter)
	}

	for _, key := range outputSet.List() {
		fmt.Println(key)
	}
	// Output:
	// a-hello
	// b-controller
	// c-framework
}
func TestUpdate(t *testing.T) {
	// This test is going to exercise the various paths that result in a
	// call to update.

	// source simulates an apiserver object endpoint.
	source := framework.NewFakeControllerSource()

	const (
		FROM       = "from"
		ADD_MISSED = "missed the add event"
		TO         = "to"
	)

	// These are the transitions we expect to see; because this is
	// asynchronous, there are a lot of valid possibilities.
	type pair struct{ from, to string }
	allowedTransitions := map[pair]bool{
		pair{FROM, TO}:         true,
		pair{FROM, ADD_MISSED}: true,
		pair{ADD_MISSED, TO}:   true,

		// Because a resync can happen when we've already observed one
		// of the above but before the item is deleted.
		pair{TO, TO}: true,
		// Because a resync could happen before we observe an update.
		pair{FROM, FROM}: true,
	}

	var testDoneWG sync.WaitGroup

	// Make a controller that deletes things once it observes an update.
	// It calls Done() on the wait group on deletions so we can tell when
	// everything we've added has been deleted.
	_, controller := framework.NewInformer(
		source,
		&api.Pod{},
		time.Millisecond*1,
		framework.ResourceEventHandlerFuncs{
			UpdateFunc: func(oldObj, newObj interface{}) {
				o, n := oldObj.(*api.Pod), newObj.(*api.Pod)
				from, to := o.Labels["check"], n.Labels["check"]
				if !allowedTransitions[pair{from, to}] {
					t.Errorf("observed transition %q -> %q for %v", from, to, n.Name)
				}
				source.Delete(n)
			},
			DeleteFunc: func(obj interface{}) {
				testDoneWG.Done()
			},
		},
	)

	// Run the controller and run it until we close stop.
	stop := make(chan struct{})
	go controller.Run(stop)

	pod := func(name, check string) *api.Pod {
		return &api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:   name,
				Labels: map[string]string{"check": check},
			},
		}
	}

	tests := []func(string){
		func(name string) {
			name = "a-" + name
			source.Add(pod(name, FROM))
			source.Modify(pod(name, TO))
		},
		func(name string) {
			name = "b-" + name
			source.Add(pod(name, FROM))
			source.ModifyDropWatch(pod(name, TO))
		},
		func(name string) {
			name = "c-" + name
			source.AddDropWatch(pod(name, FROM))
			source.Modify(pod(name, ADD_MISSED))
			source.Modify(pod(name, TO))
		},
		func(name string) {
			name = "d-" + name
			source.Add(pod(name, FROM))
		},
	}

	// run every test a few times, in parallel
	const threads = 3
	var wg sync.WaitGroup
	wg.Add(threads * len(tests))
	testDoneWG.Add(threads * len(tests))
	for i := 0; i < threads; i++ {
		for j, f := range tests {
			go func(name string, f func(string)) {
				defer wg.Done()
				f(name)
			}(fmt.Sprintf("%v-%v", i, j), f)
		}
	}
	wg.Wait()

	// Let's wait for the controller to process the things we just added.
	testDoneWG.Wait()
	close(stop)
}
func TestHammerController(t *testing.T) {
	// This test executes a bunch of requests through the fake source and
	// controller framework to make sure there's no locking/threading
	// errors. If an error happens, it should hang forever or trigger the
	// race detector.

	// source simulates an apiserver object endpoint.
	source := framework.NewFakeControllerSource()

	// Let's do threadsafe output to get predictable test results.
	outputSetLock := sync.Mutex{}
	// map of key to operations done on the key
	outputSet := map[string][]string{}

	recordFunc := func(eventType string, obj interface{}) {
		key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj)
		if err != nil {
			t.Errorf("something wrong with key: %v", err)
			key = "oops something went wrong with the key"
		}

		// Record some output when items are deleted.
		outputSetLock.Lock()
		defer outputSetLock.Unlock()
		outputSet[key] = append(outputSet[key], eventType)
	}

	// Make a controller which just logs all the changes it gets.
	_, controller := framework.NewInformer(
		source,
		&api.Pod{},
		time.Millisecond*100,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    func(obj interface{}) { recordFunc("add", obj) },
			UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) },
			DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) },
		},
	)

	if controller.HasSynced() {
		t.Errorf("Expected HasSynced() to return false before we started the controller")
	}

	// Run the controller and run it until we close stop.
	stop := make(chan struct{})
	go controller.Run(stop)

	// Let's wait for the controller to do its initial sync
	time.Sleep(100 * time.Millisecond)
	if !controller.HasSynced() {
		t.Errorf("Expected HasSynced() to return true after the initial sync")
	}

	wg := sync.WaitGroup{}
	const threads = 3
	wg.Add(threads)
	for i := 0; i < threads; i++ {
		go func() {
			defer wg.Done()
			// Let's add a few objects to the source.
			currentNames := util.StringSet{}
			rs := rand.NewSource(rand.Int63())
			f := fuzz.New().NilChance(.5).NumElements(0, 2).RandSource(rs)
			r := rand.New(rs) // Mustn't use r and f concurrently!
			for i := 0; i < 100; i++ {
				var name string
				var isNew bool
				if currentNames.Len() == 0 || r.Intn(3) == 1 {
					f.Fuzz(&name)
					isNew = true
				} else {
					l := currentNames.List()
					name = l[r.Intn(len(l))]
				}

				pod := &api.Pod{}
				f.Fuzz(pod)
				pod.ObjectMeta.Name = name
				pod.ObjectMeta.Namespace = "default"
				// Add, update, or delete randomly.
				// Note that these pods are not valid-- the fake source doesn't
				// call validation or perform any other checking.
				if isNew {
					currentNames.Insert(name)
					source.Add(pod)
					continue
				}
				switch r.Intn(2) {
				case 0:
					currentNames.Insert(name)
					source.Modify(pod)
				case 1:
					currentNames.Delete(name)
					source.Delete(pod)
				}
			}
		}()
	}
	wg.Wait()

	// Let's wait for the controller to finish processing the things we just added.
	time.Sleep(100 * time.Millisecond)
	close(stop)

	outputSetLock.Lock()
	t.Logf("got: %#v", outputSet)
}
func TestUpdate(t *testing.T) {
	// This test is going to exercise the various paths that result in a
	// call to update.

	// source simulates an apiserver object endpoint.
	source := framework.NewFakeControllerSource()

	const (
		FROM = "from"
		TO   = "to"
	)

	// These are the transitions we expect to see; because this is
	// asynchronous, there are a lot of valid possibilities.
	type pair struct{ from, to string }
	allowedTransitions := map[pair]bool{
		pair{FROM, TO}: true,

		// Because a resync can happen when we've already observed one
		// of the above but before the item is deleted.
		pair{TO, TO}: true,
		// Because a resync could happen before we observe an update.
		pair{FROM, FROM}: true,
	}

	pod := func(name, check string, final bool) *api.Pod {
		p := &api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:   name,
				Labels: map[string]string{"check": check},
			},
		}
		if final {
			p.Labels["final"] = "true"
		}
		return p
	}
	deletePod := func(p *api.Pod) bool {
		return p.Labels["final"] == "true"
	}

	tests := []func(string){
		func(name string) {
			name = "a-" + name
			source.Add(pod(name, FROM, false))
			source.Modify(pod(name, TO, true))
		},
	}

	const threads = 3

	var testDoneWG sync.WaitGroup
	testDoneWG.Add(threads * len(tests))

	// Make a controller that deletes things once it observes an update.
	// It calls Done() on the wait group on deletions so we can tell when
	// everything we've added has been deleted.
	watchCh := make(chan struct{})
	_, controller := framework.NewInformer(
		&testLW{
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				watch, err := source.Watch(options)
				close(watchCh)
				return watch, err
			},
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return source.List(options)
			},
		},
		&api.Pod{},
		0,
		framework.ResourceEventHandlerFuncs{
			UpdateFunc: func(oldObj, newObj interface{}) {
				o, n := oldObj.(*api.Pod), newObj.(*api.Pod)
				from, to := o.Labels["check"], n.Labels["check"]
				if !allowedTransitions[pair{from, to}] {
					t.Errorf("observed transition %q -> %q for %v", from, to, n.Name)
				}
				if deletePod(n) {
					source.Delete(n)
				}
			},
			DeleteFunc: func(obj interface{}) {
				testDoneWG.Done()
			},
		},
	)

	// Run the controller and run it until we close stop.
	// Once Run() is called, calls to testDoneWG.Done() might start, so
	// all testDoneWG.Add() calls must happen before this point
	stop := make(chan struct{})
	go controller.Run(stop)
	<-watchCh

	// run every test a few times, in parallel
	var wg sync.WaitGroup
	wg.Add(threads * len(tests))
	for i := 0; i < threads; i++ {
		for j, f := range tests {
			go func(name string, f func(string)) {
				defer wg.Done()
				f(name)
			}(fmt.Sprintf("%v-%v", i, j), f)
		}
	}
	wg.Wait()

	// Let's wait for the controller to process the things we just added.
	testDoneWG.Wait()
	close(stop)
}
// Test the real controller methods (add/update/delete claim/volume) with
// a fake API server.
// There is no controller API to 'initiate syncAll now', therefore these tests
// can't reliably simulate periodic sync of volumes/claims - it would be
// either very timing-sensitive or slow to wait for real periodic sync.
func TestControllerSync(t *testing.T) {
	expectedChanges := []int{4, 1, 1, 2, 1, 1, 1}
	tests := []controllerTest{
		// [Unit test set 5] - controller tests.
		// We test the controller as if
		// it was connected to real API server, i.e. we call add/update/delete
		// Claim/Volume methods. Also, all changes to volumes and claims are
		// sent to add/update/delete Claim/Volume as real controller would do.
		{
			// addClaim gets a new claim. Check it's bound to a volume.
			"5-2 - complete bind",
			newVolumeArray("volume5-2", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
			newVolumeArray("volume5-2", "10Gi", "uid5-2", "claim5-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
			noclaims, /* added in testAddClaim5_2 */
			newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", api.ClaimBound, annBoundByController, annBindCompleted),
			noevents, noerrors,
			// Custom test function that generates an add event
			func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
				claim := newClaim("claim5-2", "uid5-2", "1Gi", "", api.ClaimPending)
				reactor.addClaimEvent(claim)
				return nil
			},
		},
		{
			// deleteClaim with a bound claim makes bound volume released.
			"5-3 - delete claim",
			newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
			newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain, annBoundByController),
			newClaimArray("claim5-3", "uid5-3", "1Gi", "volume5-3", api.ClaimBound, annBoundByController, annBindCompleted),
			noclaims,
			noevents, noerrors,
			// Custom test function that generates a delete event
			func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
				obj := ctrl.claims.List()[0]
				claim := obj.(*api.PersistentVolumeClaim)
				reactor.deleteClaimEvent(claim)
				return nil
			},
		},
		{
			// deleteVolume with a bound volume. Check the claim is Lost.
			"5-4 - delete volume",
			newVolumeArray("volume5-4", "10Gi", "uid5-4", "claim5-4", api.VolumeBound, api.PersistentVolumeReclaimRetain),
			novolumes,
			newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimBound, annBoundByController, annBindCompleted),
			newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimLost, annBoundByController, annBindCompleted),
			[]string{"Warning ClaimLost"}, noerrors,
			// Custom test function that generates a delete event
			func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
				obj := ctrl.volumes.store.List()[0]
				volume := obj.(*api.PersistentVolume)
				reactor.deleteVolumeEvent(volume)
				return nil
			},
		},
		{
			// addVolume with provisioned volume from Kubernetes 1.2. No "action"
			// is expected - it should stay bound.
			"5-5 - add bound volume from 1.2",
			novolumes,
			[]*api.PersistentVolume{addVolumeAnnotation(newVolume("volume5-5", "10Gi", "uid5-5", "claim5-5", api.VolumeBound, api.PersistentVolumeReclaimDelete), pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)},
			newClaimArray("claim5-5", "uid5-5", "1Gi", "", api.ClaimPending),
			newClaimArray("claim5-5", "uid5-5", "1Gi", "volume5-5", api.ClaimBound, annBindCompleted, annBoundByController),
			noevents, noerrors,
			// Custom test function that generates a add event
			func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
				volume := newVolume("volume5-5", "10Gi", "uid5-5", "claim5-5", api.VolumeBound, api.PersistentVolumeReclaimDelete)
				volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)
				reactor.addVolumeEvent(volume)
				return nil
			},
		},
		{
			// updateVolume with provisioned volume from Kubernetes 1.2. No
			// "action" is expected - it should stay bound.
			"5-6 - update bound volume from 1.2",
			[]*api.PersistentVolume{addVolumeAnnotation(newVolume("volume5-6", "10Gi", "uid5-6", "claim5-6", api.VolumeBound, api.PersistentVolumeReclaimDelete), pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)},
			[]*api.PersistentVolume{addVolumeAnnotation(newVolume("volume5-6", "10Gi", "uid5-6", "claim5-6", api.VolumeBound, api.PersistentVolumeReclaimDelete), pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)},
			newClaimArray("claim5-6", "uid5-6", "1Gi", "volume5-6", api.ClaimBound),
			newClaimArray("claim5-6", "uid5-6", "1Gi", "volume5-6", api.ClaimBound, annBindCompleted),
			noevents, noerrors,
			// Custom test function that generates a add event
			func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
				volume := newVolume("volume5-6", "10Gi", "uid5-6", "claim5-6", api.VolumeBound, api.PersistentVolumeReclaimDelete)
				volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)
				reactor.modifyVolumeEvent(volume)
				return nil
			},
		},
		{
			// addVolume with unprovisioned volume from Kubernetes 1.2. The
			// volume should be deleted.
			"5-7 - add unprovisioned volume from 1.2",
			novolumes,
			novolumes,
			newClaimArray("claim5-7", "uid5-7", "1Gi", "", api.ClaimPending),
			newClaimArray("claim5-7", "uid5-7", "1Gi", "", api.ClaimPending),
			noevents, noerrors,
			// Custom test function that generates a add event
			func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
				volume := newVolume("volume5-7", "10Gi", "uid5-7", "claim5-7", api.VolumeBound, api.PersistentVolumeReclaimDelete)
				volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, "yes")
				reactor.addVolumeEvent(volume)
				return nil
			},
		},
		{
			// updateVolume with unprovisioned volume from Kubernetes 1.2. The
			// volume should be deleted.
			"5-8 - update bound volume from 1.2",
			novolumes,
			novolumes,
			newClaimArray("claim5-8", "uid5-8", "1Gi", "", api.ClaimPending),
			newClaimArray("claim5-8", "uid5-8", "1Gi", "", api.ClaimPending),
			noevents, noerrors,
			// Custom test function that generates a add event
			func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
				volume := newVolume("volume5-8", "10Gi", "uid5-8", "claim5-8", api.VolumeBound, api.PersistentVolumeReclaimDelete)
				volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, "yes")
				reactor.modifyVolumeEvent(volume)
				return nil
			},
		},
	}

	for ix, test := range tests {
		glog.V(4).Infof("starting test %q", test.name)

		// Initialize the controller
		client := &fake.Clientset{}
		volumeSource := framework.NewFakeControllerSource()
		claimSource := framework.NewFakeControllerSource()
		ctrl := newTestController(client, volumeSource, claimSource, true)
		reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors)
		for _, claim := range test.initialClaims {
			claimSource.Add(claim)
			reactor.claims[claim.Name] = claim
		}
		for _, volume := range test.initialVolumes {
			volumeSource.Add(volume)
			reactor.volumes[volume.Name] = volume
		}

		// Start the controller
		count := reactor.getChangeCount()
		go ctrl.Run()

		// Wait for the controller to pass initial sync and fill its caches.
		for !ctrl.volumeController.HasSynced() ||
			!ctrl.claimController.HasSynced() ||
			len(ctrl.claims.ListKeys()) < len(test.initialClaims) ||
			len(ctrl.volumes.store.ListKeys()) < len(test.initialVolumes) {

			time.Sleep(10 * time.Millisecond)
		}
		glog.V(4).Infof("controller synced, starting test")

		// Call the tested function
		err := test.test(ctrl, reactor, test)
		if err != nil {
			t.Errorf("Test %q initial test call failed: %v", test.name, err)
		}
		// Simulate a periodic resync, just in case some events arrived in a
		// wrong order.
		ctrl.claims.Resync()
		ctrl.volumes.store.Resync()

		// Wait at least once, just in case expectedChanges[ix] == 0
		reactor.waitTest()
		// Wait for expected number of operations.
		for reactor.getChangeCount() < count+expectedChanges[ix] {
			reactor.waitTest()
		}

		ctrl.Stop()

		evaluateTestResults(ctrl, reactor, test, t)
	}
}
// Test the real controller methods (add/update/delete claim/volume) with
// a fake API server.
// There is no controller API to 'initiate syncAll now', therefore these tests
// can't reliably simulate periodic sync of volumes/claims - it would be
// either very timing-sensitive or slow to wait for real periodic sync.
func TestControllerSync(t *testing.T) {
	expectedChanges := []int{1, 4, 1, 1}
	tests := []controllerTest{
		// [Unit test set 5] - controller tests.
		// We test the controller as if
		// it was connected to real API server, i.e. we call add/update/delete
		// Claim/Volume methods. Also, all changes to volumes and claims are
		// sent to add/update/delete Claim/Volume as real controller would do.
		{
			// addVolume gets a new volume. Check it's marked as Available and
			// that it's not bound to any claim - we bind volumes on periodic
			// syncClaim, not on addVolume.
			"5-1 - addVolume",
			novolumes, /* added in testCall below */
			newVolumeArray("volume5-1", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
			newClaimArray("claim5-1", "uid5-1", "1Gi", "", api.ClaimPending),
			newClaimArray("claim5-1", "uid5-1", "1Gi", "", api.ClaimPending),
			noevents, noerrors,
			// Custom test function that generates an add event
			func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
				volume := newVolume("volume5-1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain)
				reactor.volumes[volume.Name] = volume
				reactor.volumeSource.Add(volume)
				return nil
			},
		},
		{
			// addClaim gets a new claim. Check it's bound to a volume.
			"5-2 - complete bind",
			newVolumeArray("volume5-2", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
			newVolumeArray("volume5-2", "10Gi", "uid5-2", "claim5-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
			noclaims, /* added in testAddClaim5_2 */
			newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", api.ClaimBound, annBoundByController, annBindCompleted),
			noevents, noerrors,
			// Custom test function that generates an add event
			func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
				claim := newClaim("claim5-2", "uid5-2", "1Gi", "", api.ClaimPending)
				reactor.claims[claim.Name] = claim
				reactor.claimSource.Add(claim)
				return nil
			},
		},
		{
			// deleteClaim with a bound claim makes bound volume released.
			"5-3 - delete claim",
			newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
			newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain, annBoundByController),
			newClaimArray("claim5-3", "uid5-3", "1Gi", "volume5-3", api.ClaimBound, annBoundByController, annBindCompleted),
			noclaims,
			noevents, noerrors,
			// Custom test function that generates a delete event
			func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
				obj := ctrl.claims.List()[0]
				claim := obj.(*api.PersistentVolumeClaim)
				// Remove the claim from list of resulting claims.
				delete(reactor.claims, claim.Name)
				// Poke the controller with deletion event. Cloned claim is
				// needed to prevent races (and we would get a clone from etcd
				// too).
				clone, _ := conversion.NewCloner().DeepCopy(claim)
				claimClone := clone.(*api.PersistentVolumeClaim)
				reactor.claimSource.Delete(claimClone)
				return nil
			},
		},
		{
			// deleteVolume with a bound volume. Check the claim is Lost.
			"5-4 - delete volume",
			newVolumeArray("volume5-4", "10Gi", "uid5-4", "claim5-4", api.VolumeBound, api.PersistentVolumeReclaimRetain),
			novolumes,
			newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimBound, annBoundByController, annBindCompleted),
			newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimLost, annBoundByController, annBindCompleted),
			[]string{"Warning ClaimLost"}, noerrors,
			// Custom test function that generates a delete event
			func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
				obj := ctrl.volumes.store.List()[0]
				volume := obj.(*api.PersistentVolume)
				// Remove the volume from list of resulting volumes.
				delete(reactor.volumes, volume.Name)
				// Poke the controller with deletion event. Cloned volume is
				// needed to prevent races (and we would get a clone from etcd
				// too).
				clone, _ := conversion.NewCloner().DeepCopy(volume)
				volumeClone := clone.(*api.PersistentVolume)
				reactor.volumeSource.Delete(volumeClone)
				return nil
			},
		},
	}

	for ix, test := range tests {
		glog.V(4).Infof("starting test %q", test.name)

		// Initialize the controller
		client := &fake.Clientset{}
		volumeSource := framework.NewFakeControllerSource()
		claimSource := framework.NewFakeControllerSource()
		ctrl := newTestController(client, volumeSource, claimSource)
		reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors)
		for _, claim := range test.initialClaims {
			claimSource.Add(claim)
			reactor.claims[claim.Name] = claim
		}
		for _, volume := range test.initialVolumes {
			volumeSource.Add(volume)
			reactor.volumes[volume.Name] = volume
		}

		// Start the controller
		defer ctrl.Stop()
		go ctrl.Run()

		// Wait for the controller to pass initial sync.
		for !ctrl.isFullySynced() {
			time.Sleep(10 * time.Millisecond)
		}

		count := reactor.getChangeCount()

		// Call the tested function
		err := test.test(ctrl, reactor, test)
		if err != nil {
			t.Errorf("Test %q initial test call failed: %v", test.name, err)
		}

		for reactor.getChangeCount() < count+expectedChanges[ix] {
			reactor.waitTest()
		}

		evaluateTestResults(ctrl, reactor, test, t)
	}
}