Esempio n. 1
0
func TestMultipleFarms(t *testing.T) {
	retryInterval = testFarmRetryInterval

	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	session := kptest.NewSession()
	firstLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "firstMultiple",
	})

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1", "node2", "node3")
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	//
	// Instantiate first farm
	//
	firstFarm := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       session,
		logger:        firstLogger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	firstQuitCh := make(chan struct{})
	defer close(firstQuitCh)
	go func() {
		go firstFarm.cleanupDaemonSetPods(firstQuitCh)
		firstFarm.mainLoop(firstQuitCh)
	}()

	//
	// Instantiate second farm
	//
	secondLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "secondMultiple",
	})
	secondFarm := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       session,
		logger:        secondLogger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	secondQuitCh := make(chan struct{})
	defer close(secondQuitCh)
	go func() {
		go secondFarm.cleanupDaemonSetPods(secondQuitCh)
		secondFarm.mainLoop(secondQuitCh)
	}()

	// Make two daemon sets with difference node selectors
	// First daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")

	// Second daemon set
	anotherNodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
	anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, anotherNodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")

	// Make a node and verify that it was scheduled by the first daemon set
	applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")

	labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make a second node and verify that it was scheduled by the second daemon set
	applicator.SetLabel(labels.NODE, "node2", pc_fields.AvailabilityZoneLabel, "az2")

	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make a third unschedulable node and verify it doesn't get scheduled by anything
	applicator.SetLabel(labels.NODE, "node3", pc_fields.AvailabilityZoneLabel, "undefined")

	labeled, err = waitForPodLabel(applicator, false, "node3/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")

	// Now add 10 new nodes and verify that they are scheduled by the first daemon set
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		err := applicator.SetLabel(labels.NODE, nodeName, pc_fields.AvailabilityZoneLabel, "az1")
		Assert(t).IsNil(err, "expected no error labeling node")
	}
	for i := 0; i < 10; i++ {
		podPath := fmt.Sprintf("good_node%v/testPod", i)
		labeled, err = waitForPodLabel(applicator, true, podPath)
		Assert(t).IsNil(err, "Expected pod to have a dsID label")
		dsID := labeled.Labels.Get(DSIDLabel)
		Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
	}

	//
	// Update a daemon set's node selector and expect a node to be unscheduled
	//
	mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify node2 is unscheduled
	labeled, err = waitForPodLabel(applicator, false, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")

	//
	// Now update the node selector to schedule node2 again and verify
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify node2 is scheduled
	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	//
	// Disabling a daemon set should not unschedule any nodes
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = true

		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	_, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify it is disabled
	condition := func() error {
		anotherDSData, _, err = dsStore.Get(anotherDSData.ID)
		if err != nil {
			return util.Errorf("Expected no error getting daemon set")
		}
		if !anotherDSData.Disabled {
			return util.Errorf("Expected daemon set to be disabled")
		}

		if _, ok := firstFarm.children[anotherDSData.ID]; ok {
			if !firstFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be disabled in only one farm")
			}
			if _, ok := secondFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else if _, ok := secondFarm.children[anotherDSData.ID]; ok {
			if !secondFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be disabled in only one farm")
			}
			if _, ok := firstFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else {
			return util.Errorf("Expected daemon set to be disabled in only one farm")
		}
		return nil
	}
	err = waitForCondition(condition)
	Assert(t).IsNil(err, "Error disabling daemon set!")

	// Verify node2 is scheduled
	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	//
	// Enable a daemon set should make the dameon set resume its regular activities
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = false
		return dsToUpdate, nil
	}
	_, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")

	// Verify it is enabled
	condition = func() error {
		anotherDSData, _, err = dsStore.Get(anotherDSData.ID)
		if err != nil {
			return util.Errorf("Expected no error getting daemon set")
		}
		if anotherDSData.Disabled {
			return util.Errorf("Expected daemon set to be enabled")
		}

		if _, ok := firstFarm.children[anotherDSData.ID]; ok {
			if firstFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be enabled in only one farm")
			}
			if _, ok := secondFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else if _, ok := secondFarm.children[anotherDSData.ID]; ok {
			if secondFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be enabled in only one farm")
			}
			if _, ok := firstFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else {
			return util.Errorf("Expected daemon set to be enabled in only one farm")
		}
		return nil
	}
	err = waitForCondition(condition)
	Assert(t).IsNil(err, "Error enabling daemon set!")

	// Verify node2 is unscheduled
	labeled, err = waitForPodLabel(applicator, false, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")
}
Esempio n. 2
0
func TestCleanupPods(t *testing.T) {
	retryInterval = testFarmRetryInterval

	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	// Make some dangling pod labels and instantiate a farm and expect it clean it up
	podID := types.PodID("testPod")
	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	var allNodes []types.NodeName
	allNodes = append(allNodes)
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
		err := applicator.SetLabel(labels.POD, id, DSIDLabel, "impossible_id")
		Assert(t).IsNil(err, "Expected no error labeling node")

		_, err = kpStore.SetPod(kp.INTENT_TREE, types.NodeName(nodeName), podManifest)
		Assert(t).IsNil(err, "Expected no error added pod to intent tree")
	}

	// Assert that precondition is true
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
		labeled, err := applicator.GetLabels(labels.POD, id)
		Assert(t).IsNil(err, "Expected no error getting labels")
		Assert(t).IsTrue(labeled.Labels.Has(DSIDLabel), "Precondition failed: Pod must have a dsID label")

		_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
		Assert(t).IsNil(err, "Expected no error getting pod from intent store")
		Assert(t).AreNotEqual(err, pods.NoCurrentManifest, "Precondition failed: Pod was not in intent store")
	}

	// Instantiate farm
	logger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "cleanupPods",
	})
	dsf := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       kptest.NewSession(),
		logger:        logger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	quitCh := make(chan struct{})
	defer close(quitCh)
	go func() {
		go dsf.cleanupDaemonSetPods(quitCh)
		dsf.mainLoop(quitCh)
	}()

	// Make there are no nodes left
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
		_, err := waitForPodLabel(applicator, false, id)
		Assert(t).IsNil(err, "Expected pod not to have a dsID label")

		condition := func() error {
			_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
			if err != pods.NoCurrentManifest {
				return util.Errorf("Expected pod to be deleted in intent store")
			}
			return nil
		}
		err = waitForCondition(condition)
		Assert(t).IsNil(err, "Error cleaning up pods")
	}
}
Esempio n. 3
0
// Tests dsContends for changes to both daemon sets and nodes
func TestContendNodes(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Instantiate farm
	//
	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()
	logger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "contendNodes",
	})
	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1")
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	dsf := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       kptest.NewSession(),
		logger:        logger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	quitCh := make(chan struct{})
	defer close(quitCh)
	go func() {
		go dsf.cleanupDaemonSetPods(quitCh)
		dsf.mainLoop(quitCh)
	}()

	//
	// Check for contention between two daemon sets among their nodes
	//
	// Make a daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, dsData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Make a node and verify that it was scheduled
	applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")

	labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make another daemon set with a contending AvailabilityZoneLabel and verify
	// that it gets disabled and that the node label does not change
	anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).AreNotEqual(dsData.ID.String(), anotherDSData.ID.String(), "Precondition failed")
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, anotherDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	anotherDSID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")

	// Expect the new daemon set to be disabled both in the farm and in the dsStore
	err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling daemon set!")

	//
	// Make a third daemon set and update its node selector to force a contend,
	// then verify that it has been disabled and the node hasn't been overwritten
	//
	anotherSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"undefined"})
	badDS, err := dsStore.Create(podManifest, minHealth, clusterName, anotherSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, badDS.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.NodeSelector = nodeSelector
		return dsToUpdate, nil
	}

	badDS, err = dsStore.MutateDS(badDS.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelector(dsf, badDS)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	anotherDSID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")

	err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling daemon set!")
}
Esempio n. 4
0
// Tests dsContends for NodeSelectors
func TestContendSelectors(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Instantiate farm
	//
	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()
	logger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "contendSelectors",
	})
	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	dsf := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       kptest.NewSession(),
		logger:        logger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	quitCh := make(chan struct{})
	defer close(quitCh)
	go func() {
		go dsf.cleanupDaemonSetPods(quitCh)
		dsf.mainLoop(quitCh)
	}()

	//
	// Make two daemon sets with a everything selector and verify that they trivially
	// contend and that only the second daemon set gets disabled
	//
	// Make a daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	everythingSelector := klabels.Everything()
	firstDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, firstDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	secondDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, secondDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Verify that only the second daemon set is disabled
	err = waitForDisabled(dsf, dsStore, firstDSData.ID, false)
	Assert(t).IsNil(err, "First daemon set should not be disabled")

	err = waitForDisabled(dsf, dsStore, secondDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling second daemon set")

	// Add another daemon set with different selector and verify it gets disabled
	someSelector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"nowhere"})
	thirdDSData, err := dsStore.Create(podManifest, minHealth, clusterName, someSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, thirdDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling third daemon set")

	//
	// Disable first daemon set, then enable second and third daemon sets in that order
	// and then there should be a contend on the third daemon set
	//
	disableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = true
		return dsToUpdate, nil
	}

	enableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = false
		return dsToUpdate, nil
	}

	// Disable first ds and verify it is disabled
	_, err = dsStore.MutateDS(firstDSData.ID, disableMutator)
	Assert(t).IsNil(err, "Expected no error getting daemon set")

	err = waitForDisabled(dsf, dsStore, firstDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling first daemon set")

	// Enable second ds and verify it is enabled
	_, err = dsStore.MutateDS(secondDSData.ID, enableMutator)
	Assert(t).IsNil(err, "Expected no error getting daemon set")

	err = waitForDisabled(dsf, dsStore, secondDSData.ID, false)
	Assert(t).IsNil(err, "Error enabling second daemon set")

	// Enabled third ds and verify it disabled because it contends with second ds
	_, err = dsStore.MutateDS(thirdDSData.ID, enableMutator)
	Assert(t).IsNil(err, "Expected no error getting daemon set")

	err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling third daemon set")

	//
	// Test equivalent selectors, fifth ds should contend with fourth
	//
	anotherPodID := types.PodID("anotherPodID")

	anotherManifestBuilder := manifest.NewBuilder()
	anotherManifestBuilder.SetID(anotherPodID)
	anotherPodManifest := manifestBuilder.GetManifest()

	equalSelector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})

	fourthDSData, err := dsStore.Create(anotherPodManifest, minHealth, clusterName, equalSelector, anotherPodID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, fourthDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Verify the fourth daemon set is enabled
	err = waitForDisabled(dsf, dsStore, fourthDSData.ID, false)
	Assert(t).IsNil(err, "Error enabling fourth daemon set")

	fifthDSData, err := dsStore.Create(anotherPodManifest, minHealth, clusterName, equalSelector, anotherPodID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, fifthDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Verify that the fifth daemon set contends and gets disabled
	err = waitForDisabled(dsf, dsStore, fifthDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling fifth daemon set")
}
Esempio n. 5
0
func TestPublishToReplication(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Setup fixture and schedule a pod
	//
	dsStore := dsstoretest.NewFake()

	podID := types.PodID("testPod")
	minHealth := 1
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
	timeout := replication.NoTimeout

	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout)
	Assert(t).IsNil(err, "expected no error creating request")

	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1", "node2")
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	ds := New(
		dsData,
		dsStore,
		kpStore,
		applicator,
		applicator,
		logging.DefaultLogger,
		&happyHealthChecker,
		0,
		false,
	).(*daemonSet)

	scheduled := scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled")
	err = waitForPodsInIntent(kpStore, 0)
	Assert(t).IsNil(err, "Unexpected number of pods scheduled")

	err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "good")
	Assert(t).IsNil(err, "expected no error labeling node1")
	err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good")
	Assert(t).IsNil(err, "expected no error labeling node1")

	//
	// Adds a watch that will automatically send a signal when a change was made
	// to the daemon set
	//
	quitCh := make(chan struct{})
	updatedCh := make(chan *ds_fields.DaemonSet)
	deletedCh := make(chan *ds_fields.DaemonSet)
	defer close(quitCh)
	defer close(updatedCh)
	defer close(deletedCh)
	desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh)
	dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh)

	//
	// Verify that 2 pods have been scheduled
	//
	numNodes := waitForNodes(t, ds, 2, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 2, "took too long to schedule")

	scheduled = scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 2, "expected a node to have been labeled")

	// Mutate the daemon set so that the node is unscheduled, this should not produce an error
	mutator := func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToChange.NodeSelector = klabels.Everything().
			Add("nodeQuality", klabels.EqualsOperator, []string{"bad"})
		return dsToChange, nil
	}
	_, err = dsStore.MutateDS(ds.ID(), mutator)
	Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")

	select {
	case <-time.After(1 * time.Second):
	case err := <-desiresErrCh:
		t.Fatalf("Unexpected error unscheduling pod: %v", err)
	}
	numNodes = waitForNodes(t, ds, 0, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 0, "took too long to unschedule")
}
Esempio n. 6
0
// TestSchedule checks consecutive scheduling and unscheduling for:
//	- creation of a daemon set
// 	- different node selectors
//	- changes to nodes allocations
// 	- mutations to a daemon set
//	- deleting a daemon set
func TestSchedule(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Setup fixture and schedule a pod
	//
	dsStore := dsstoretest.NewFake()

	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
	timeout := replication.NoTimeout

	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout)
	Assert(t).IsNil(err, "expected no error creating request")

	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1", "node2", "nodeOk")
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("bad_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	ds := New(
		dsData,
		dsStore,
		kpStore,
		applicator,
		applicator,
		logging.DefaultLogger,
		&happyHealthChecker,
		0,
		false,
	).(*daemonSet)

	scheduled := scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled")

	err = waitForPodsInIntent(kpStore, 0)
	Assert(t).IsNil(err, "Unexpected number of pods scheduled")

	err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "bad")
	Assert(t).IsNil(err, "expected no error labeling node1")
	err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good")
	Assert(t).IsNil(err, "expected no error labeling node2")

	//
	// Adds a watch that will automatically send a signal when a change was made
	// to the daemon set
	//
	quitCh := make(chan struct{})
	updatedCh := make(chan *ds_fields.DaemonSet)
	deletedCh := make(chan *ds_fields.DaemonSet)
	defer close(quitCh)
	defer close(updatedCh)
	defer close(deletedCh)
	desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh)
	dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh)

	//
	// Verify that the pod has been scheduled
	//
	numNodes := waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 1, "took too long to schedule")

	scheduled = scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 1, "expected a node to have been labeled")
	Assert(t).AreEqual(scheduled[0].ID, "node2/testPod", "expected node labeled with the daemon set's id")

	// Verify that the scheduled pod is correct
	err = waitForSpecificPod(kpStore, "node2", types.PodID("testPod"))
	Assert(t).IsNil(err, "Unexpected pod scheduled")

	//
	// Add 10 good nodes and 10 bad nodes then verify
	//
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		err := applicator.SetLabel(labels.NODE, nodeName, "nodeQuality", "good")
		Assert(t).IsNil(err, "expected no error labeling node")
	}

	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("bad_node%v", i)
		err := applicator.SetLabel(labels.NODE, nodeName, "nodeQuality", "bad")
		Assert(t).IsNil(err, "expected no error labeling node")
	}

	// The node watch should automatically notice a change
	numNodes = waitForNodes(t, ds, 11, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 11, "took too long to schedule")

	scheduled = scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 11, "expected a lot of nodes to have been labeled")

	//
	// Add a node with the labels nodeQuality=good and cherry=pick
	//
	err = applicator.SetLabel(labels.NODE, "nodeOk", "nodeQuality", "good")
	Assert(t).IsNil(err, "expected no error labeling nodeOk")
	err = applicator.SetLabel(labels.NODE, "nodeOk", "cherry", "pick")
	Assert(t).IsNil(err, "expected no error labeling nodeOk")
	fmt.Println(applicator.GetLabels(labels.NODE, "nodeOk"))

	numNodes = waitForNodes(t, ds, 12, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 12, "took too long to schedule")

	// Schedule only a node that is both nodeQuality=good and cherry=pick
	mutator := func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToChange.NodeSelector = klabels.Everything().
			Add("nodeQuality", klabels.EqualsOperator, []string{"good"}).
			Add("cherry", klabels.EqualsOperator, []string{"pick"})
		return dsToChange, nil
	}
	_, err = dsStore.MutateDS(ds.ID(), mutator)
	Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")

	numNodes = waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 1, "took too long to schedule")

	// Verify that the scheduled pod is correct
	err = waitForSpecificPod(kpStore, "nodeOk", types.PodID("testPod"))
	Assert(t).IsNil(err, "Unexpected pod scheduled")

	//
	// Disabling the daemon set and making a change should not do anything
	//
	mutator = func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToChange.Disabled = true
		dsToChange.NodeSelector = klabels.Everything().
			Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
		return dsToChange, nil
	}
	_, err = dsStore.MutateDS(ds.ID(), mutator)
	Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")

	numNodes = waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 1, "took too long to unschedule")

	//
	// Now re-enable it and try to schedule everything
	//
	mutator = func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToChange.NodeSelector = klabels.Everything()
		dsToChange.Disabled = false
		return dsToChange, nil
	}
	_, err = dsStore.MutateDS(ds.ID(), mutator)
	Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")

	// 11 good nodes 11 bad nodes, and 1 good cherry picked node = 23 nodes
	numNodes = waitForNodes(t, ds, 23, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 23, "took too long to schedule")

	//
	// Deleting the daemon set should unschedule all of its nodes
	//
	ds.logger.NoFields().Info("Deleting daemon set...")
	err = dsStore.Delete(ds.ID())
	if err != nil {
		t.Fatalf("Unable to delete daemon set: %v", err)
	}
	numNodes = waitForNodes(t, ds, 0, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 0, "took too long to unschedule")

	scheduled = scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 0, "expected all nodes to have been unlabeled")

	err = waitForPodsInIntent(kpStore, 0)
	Assert(t).IsNil(err, "Unexpected number of pods scheduled")
}