Example #1
0
func consulStoreWithFakeKV() *consulStore {
	return &consulStore{
		kv:         consulutil.NewFakeClient().KV(),
		applicator: labels.NewFakeApplicator(),
		logger:     logging.DefaultLogger,
	}
}
Example #2
0
func TestWouldWorkOn(t *testing.T) {
	fakeLabels := labels.NewFakeApplicator()
	fakeLabels.SetLabel(labels.RC, "abc-123", "color", "red")
	fakeLabels.SetLabel(labels.RC, "def-456", "color", "blue")

	f := &Farm{
		labeler:    fakeLabels,
		rcSelector: klabels.Everything().Add("color", klabels.EqualsOperator, []string{"red"}),
	}

	workOn, err := f.shouldWorkOn(rc_fields.ID("abc-123"))
	Assert(t).IsNil(err, "should not have erred on abc-123")
	Assert(t).IsTrue(workOn, "should have worked on abc-123, but didn't")

	dontWorkOn, err := f.shouldWorkOn(rc_fields.ID("def-456"))
	Assert(t).IsNil(err, "should not have erred on def-456")
	Assert(t).IsFalse(dontWorkOn, "should not have worked on def-456, but did")

	dontWorkOn, err = f.shouldWorkOn(rc_fields.ID("987-cba"))
	Assert(t).IsNil(err, "should not have erred on 987-cba")
	Assert(t).IsFalse(dontWorkOn, "should not have worked on 987-cba, but did")

	f.rcSelector = klabels.Everything()

	workOn, err = f.shouldWorkOn(rc_fields.ID("def-456"))
	Assert(t).IsNil(err, "should not have erred on def-456")
	Assert(t).IsTrue(workOn, "should have worked on def-456, but didn't")
}
func setup(t *testing.T) (
	rcStore rcstore.Store,
	kpStore fakeKpStore,
	applicator labels.Applicator,
	rc ReplicationController) {

	rcStore = rcstore.NewFake()

	manifestBuilder := pods.NewManifestBuilder()
	manifestBuilder.SetID("testPod")
	manifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
	podLabels := map[string]string{"podTest": "successful"}

	rcData, err := rcStore.Create(manifest, nodeSelector, podLabels)
	Assert(t).IsNil(err, "expected no error creating request")

	kpStore = fakeKpStore{manifests: make(map[string]pods.Manifest)}
	applicator = labels.NewFakeApplicator()

	rc = New(
		rcData,
		&kpStore,
		rcStore,
		NewApplicatorScheduler(applicator),
		applicator,
		logging.DefaultLogger,
	)

	return
}
Example #4
0
func newRollStore(t *testing.T, entries []fields.Update) consulStore {
	storeFields := make(map[string]*api.KVPair)
	for _, u := range entries {
		path, err := RollPath(fields.ID(u.NewRC))
		if err != nil {
			t.Fatalf("Unable to create roll store for test: %s", err)
		}
		json, err := json.Marshal(u)
		if err != nil {
			t.Fatalf("Unable to marshal test field as JSON: %s", err)
		}
		storeFields[path] = &api.KVPair{
			Key:   path,
			Value: json,
		}
	}
	return consulStore{
		kv: &consulutil.FakeKV{
			Entries: storeFields,
		},
		store:   kptest.NewFakePodStore(nil, nil),
		rcstore: rcstore.NewFake(),
		labeler: labels.NewFakeApplicator(),
	}
}
Example #5
0
func TestNewConsul(t *testing.T) {
	store := NewConsul(kp.NewConsulClient(kp.Options{}), labels.NewFakeApplicator(), nil)
	rollstore := store.(consulStore)
	if rollstore.kv == nil {
		t.Fatal("kv should not be nil for constructed rollstore")
	}

	if rollstore.rcstore == nil {
		t.Fatal("rcstore should not be nil for constructed rollstore")
	}

	if rollstore.labeler == nil {
		t.Fatal("labeler should not be nil for constructed rollstore")
	}

	if rollstore.store == nil {
		t.Fatal("store should not be nil for constructed rollstore")
	}
}
Example #6
0
func updateWithHealth(t *testing.T,
	desiredOld, desiredNew int,
	oldNodes, newNodes map[types.NodeName]bool,
	checks map[types.NodeName]health.Result,
) (update, manifest.Manifest, manifest.Manifest) {
	podID := "mypod"

	oldManifest := podWithIDAndPort(podID, 9001)
	newManifest := podWithIDAndPort(podID, 9002)

	podMap := map[kptest.FakePodStoreKey]manifest.Manifest{}
	assignManifestsToNodes(types.PodID(podID), oldNodes, podMap, oldManifest, newManifest)
	assignManifestsToNodes(types.PodID(podID), newNodes, podMap, newManifest, oldManifest)
	kps := kptest.NewFakePodStore(podMap, nil)

	rcs := rcstore.NewFake()
	applicator := labels.NewFakeApplicator()

	oldRC, err := createRC(rcs, applicator, oldManifest, desiredOld, oldNodes)
	Assert(t).IsNil(err, "expected no error setting up old RC")

	newRC, err := createRC(rcs, applicator, newManifest, desiredNew, newNodes)
	Assert(t).IsNil(err, "expected no error setting up new RC")

	return update{
		kps:     kps,
		rcs:     rcs,
		hcheck:  checkertest.NewSingleService(podID, checks),
		labeler: applicator,
		logger:  logging.TestLogger(),
		Update: fields.Update{
			OldRC: oldRC.ID,
			NewRC: newRC.ID,
		},
	}, oldManifest, newManifest
}
Example #7
0
func TestPublishLatestRCsWithLockInfoWithLocks(t *testing.T) {
	client := consulutil.NewFakeClient()
	fakeKV := client.KV()
	rcstore := NewConsul(client, labels.NewFakeApplicator(), 1)

	inCh := make(chan []fields.RC)
	defer close(inCh)
	quitCh := make(chan struct{})
	defer close(quitCh)

	lockResultCh, errCh := rcstore.publishLatestRCsWithLockInfo(inCh, quitCh)
	go func() {
		for err := range errCh {
			t.Fatalf("Unexpected error on errCh: %s", err)
		}
	}()

	// Create a test case with 2 RCs with no locks
	lockedCase := LockInfoTestCase{
		InputRCs: []fields.RC{{ID: "abc"}, {ID: "123"}},
		ExpectedOutput: []RCLockResult{
			{
				RC:                fields.RC{ID: "abc"},
				LockedForMutation: true,
			},
			{
				RC:                 fields.RC{ID: "123"},
				LockedForOwnership: true,
			},
		},
	}

	ownershipLockPath, err := rcstore.ownershipLockPath("123")
	if err != nil {
		t.Fatalf("Unable to compute ownership lock path: %s", err)
	}
	_, _, err = fakeKV.Acquire(&api.KVPair{
		Key: ownershipLockPath,
	}, nil)
	if err != nil {
		t.Fatalf("Unable to lock for ownership: %s", err)
	}

	mutationLockPath, err := rcstore.mutationLockPath("abc")
	if err != nil {
		t.Fatalf("Unable to compute mutation lock path: %s", err)
	}
	_, _, err = fakeKV.Acquire(&api.KVPair{
		Key: mutationLockPath,
	}, nil)
	if err != nil {
		t.Fatalf("Unable to lock for mutation: %s", err)
	}

	verifyLockInfoTestCase(t, lockedCase, inCh, lockResultCh)

	// Add an update creation lock to the second one
	lockedCase2 := LockInfoTestCase{
		InputRCs: []fields.RC{{ID: "abc"}, {ID: "123"}},
		ExpectedOutput: []RCLockResult{
			{
				RC:                fields.RC{ID: "abc"},
				LockedForMutation: true,
			},
			{
				RC:                      fields.RC{ID: "123"},
				LockedForOwnership:      true,
				LockedForUpdateCreation: true,
			},
		},
	}

	updateCreationLockPath, err := rcstore.updateCreationLockPath("123")
	if err != nil {
		t.Fatalf("Unable to compute update creation lock path: %s", err)
	}
	_, _, err = fakeKV.Acquire(&api.KVPair{
		Key: updateCreationLockPath,
	}, nil)
	if err != nil {
		t.Fatalf("Unable to lock for updateCreation: %s", err)
	}

	verifyLockInfoTestCase(t, lockedCase2, inCh, lockResultCh)
}
Example #8
0
func TestPublishLatestRCsWithLockInfoNoLocks(t *testing.T) {
	client := consulutil.NewFakeClient()
	rcstore := NewConsul(client, labels.NewFakeApplicator(), 1)

	inCh := make(chan []fields.RC)
	defer close(inCh)
	quitCh := make(chan struct{})
	defer close(quitCh)

	lockResultCh, errCh := rcstore.publishLatestRCsWithLockInfo(inCh, quitCh)
	go func() {
		for err := range errCh {
			t.Fatalf("Unexpected error on errCh: %s", err)
		}
	}()

	// Create a test case with 2 RCs with no locks
	unlockedCase := LockInfoTestCase{
		InputRCs: []fields.RC{{ID: "abc"}, {ID: "123"}},
		ExpectedOutput: []RCLockResult{
			{
				RC: fields.RC{ID: "abc"},
			},
			{
				RC: fields.RC{ID: "123"},
			},
		},
	}

	verifyLockInfoTestCase(t, unlockedCase, inCh, lockResultCh)

	// Write the same input once more without reading the channel and make
	// sure that doesn't cause timeouts
	select {
	case inCh <- unlockedCase.InputRCs:
	case <-time.After(1 * time.Second):
		t.Fatalf("Timed out writing to input channel")
	}

	// create a new case
	unlockedCase2 := LockInfoTestCase{
		InputRCs: []fields.RC{
			{ID: "abc"},
			{ID: "123"},
			{ID: "456"},
		},
		ExpectedOutput: []RCLockResult{
			{
				RC: fields.RC{ID: "abc"},
			},
			{
				RC: fields.RC{ID: "123"},
			},
			{
				RC: fields.RC{ID: "456"},
			},
		},
	}

	verifyLockInfoTestCase(t, unlockedCase2, inCh, lockResultCh)
}
Example #9
0
func TestMultipleFarms(t *testing.T) {
	retryInterval = testFarmRetryInterval

	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	session := kptest.NewSession()
	firstLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "firstMultiple",
	})

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1", "node2", "node3")
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	//
	// Instantiate first farm
	//
	firstFarm := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       session,
		logger:        firstLogger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	firstQuitCh := make(chan struct{})
	defer close(firstQuitCh)
	go func() {
		go firstFarm.cleanupDaemonSetPods(firstQuitCh)
		firstFarm.mainLoop(firstQuitCh)
	}()

	//
	// Instantiate second farm
	//
	secondLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "secondMultiple",
	})
	secondFarm := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       session,
		logger:        secondLogger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	secondQuitCh := make(chan struct{})
	defer close(secondQuitCh)
	go func() {
		go secondFarm.cleanupDaemonSetPods(secondQuitCh)
		secondFarm.mainLoop(secondQuitCh)
	}()

	// Make two daemon sets with difference node selectors
	// First daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")

	// Second daemon set
	anotherNodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
	anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, anotherNodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")

	// Make a node and verify that it was scheduled by the first daemon set
	applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")

	labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make a second node and verify that it was scheduled by the second daemon set
	applicator.SetLabel(labels.NODE, "node2", pc_fields.AvailabilityZoneLabel, "az2")

	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make a third unschedulable node and verify it doesn't get scheduled by anything
	applicator.SetLabel(labels.NODE, "node3", pc_fields.AvailabilityZoneLabel, "undefined")

	labeled, err = waitForPodLabel(applicator, false, "node3/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")

	// Now add 10 new nodes and verify that they are scheduled by the first daemon set
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		err := applicator.SetLabel(labels.NODE, nodeName, pc_fields.AvailabilityZoneLabel, "az1")
		Assert(t).IsNil(err, "expected no error labeling node")
	}
	for i := 0; i < 10; i++ {
		podPath := fmt.Sprintf("good_node%v/testPod", i)
		labeled, err = waitForPodLabel(applicator, true, podPath)
		Assert(t).IsNil(err, "Expected pod to have a dsID label")
		dsID := labeled.Labels.Get(DSIDLabel)
		Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
	}

	//
	// Update a daemon set's node selector and expect a node to be unscheduled
	//
	mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify node2 is unscheduled
	labeled, err = waitForPodLabel(applicator, false, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")

	//
	// Now update the node selector to schedule node2 again and verify
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify node2 is scheduled
	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	//
	// Disabling a daemon set should not unschedule any nodes
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = true

		someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})
		dsToUpdate.NodeSelector = someSelector
		return dsToUpdate, nil
	}
	_, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	// Verify it is disabled
	condition := func() error {
		anotherDSData, _, err = dsStore.Get(anotherDSData.ID)
		if err != nil {
			return util.Errorf("Expected no error getting daemon set")
		}
		if !anotherDSData.Disabled {
			return util.Errorf("Expected daemon set to be disabled")
		}

		if _, ok := firstFarm.children[anotherDSData.ID]; ok {
			if !firstFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be disabled in only one farm")
			}
			if _, ok := secondFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else if _, ok := secondFarm.children[anotherDSData.ID]; ok {
			if !secondFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be disabled in only one farm")
			}
			if _, ok := firstFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else {
			return util.Errorf("Expected daemon set to be disabled in only one farm")
		}
		return nil
	}
	err = waitForCondition(condition)
	Assert(t).IsNil(err, "Error disabling daemon set!")

	// Verify node2 is scheduled
	labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled")

	//
	// Enable a daemon set should make the dameon set resume its regular activities
	//
	mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = false
		return dsToUpdate, nil
	}
	_, err = dsStore.MutateDS(anotherDSData.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")

	// Verify it is enabled
	condition = func() error {
		anotherDSData, _, err = dsStore.Get(anotherDSData.ID)
		if err != nil {
			return util.Errorf("Expected no error getting daemon set")
		}
		if anotherDSData.Disabled {
			return util.Errorf("Expected daemon set to be enabled")
		}

		if _, ok := firstFarm.children[anotherDSData.ID]; ok {
			if firstFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be enabled in only one farm")
			}
			if _, ok := secondFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else if _, ok := secondFarm.children[anotherDSData.ID]; ok {
			if secondFarm.children[anotherDSData.ID].ds.IsDisabled() {
				return util.Errorf("Expected daemon set to be enabled in only one farm")
			}
			if _, ok := firstFarm.children[anotherDSData.ID]; ok {
				return util.Errorf("Expected daemon set to be held by only one farm")
			}
		} else {
			return util.Errorf("Expected daemon set to be enabled in only one farm")
		}
		return nil
	}
	err = waitForCondition(condition)
	Assert(t).IsNil(err, "Error enabling daemon set!")

	// Verify node2 is unscheduled
	labeled, err = waitForPodLabel(applicator, false, "node2/testPod")
	Assert(t).IsNil(err, "Expected pod not to have a dsID label")
}
Example #10
0
func TestCleanupPods(t *testing.T) {
	retryInterval = testFarmRetryInterval

	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	// Make some dangling pod labels and instantiate a farm and expect it clean it up
	podID := types.PodID("testPod")
	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	var allNodes []types.NodeName
	allNodes = append(allNodes)
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
		err := applicator.SetLabel(labels.POD, id, DSIDLabel, "impossible_id")
		Assert(t).IsNil(err, "Expected no error labeling node")

		_, err = kpStore.SetPod(kp.INTENT_TREE, types.NodeName(nodeName), podManifest)
		Assert(t).IsNil(err, "Expected no error added pod to intent tree")
	}

	// Assert that precondition is true
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
		labeled, err := applicator.GetLabels(labels.POD, id)
		Assert(t).IsNil(err, "Expected no error getting labels")
		Assert(t).IsTrue(labeled.Labels.Has(DSIDLabel), "Precondition failed: Pod must have a dsID label")

		_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
		Assert(t).IsNil(err, "Expected no error getting pod from intent store")
		Assert(t).AreNotEqual(err, pods.NoCurrentManifest, "Precondition failed: Pod was not in intent store")
	}

	// Instantiate farm
	logger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "cleanupPods",
	})
	dsf := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       kptest.NewSession(),
		logger:        logger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	quitCh := make(chan struct{})
	defer close(quitCh)
	go func() {
		go dsf.cleanupDaemonSetPods(quitCh)
		dsf.mainLoop(quitCh)
	}()

	// Make there are no nodes left
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("node%v", i)
		id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
		_, err := waitForPodLabel(applicator, false, id)
		Assert(t).IsNil(err, "Expected pod not to have a dsID label")

		condition := func() error {
			_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
			if err != pods.NoCurrentManifest {
				return util.Errorf("Expected pod to be deleted in intent store")
			}
			return nil
		}
		err = waitForCondition(condition)
		Assert(t).IsNil(err, "Error cleaning up pods")
	}
}
Example #11
0
// Tests dsContends for changes to both daemon sets and nodes
func TestContendNodes(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Instantiate farm
	//
	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()
	logger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "contendNodes",
	})
	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1")
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	dsf := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       kptest.NewSession(),
		logger:        logger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	quitCh := make(chan struct{})
	defer close(quitCh)
	go func() {
		go dsf.cleanupDaemonSetPods(quitCh)
		dsf.mainLoop(quitCh)
	}()

	//
	// Check for contention between two daemon sets among their nodes
	//
	// Make a daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, dsData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Make a node and verify that it was scheduled
	applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")

	labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	dsID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")

	// Make another daemon set with a contending AvailabilityZoneLabel and verify
	// that it gets disabled and that the node label does not change
	anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
	Assert(t).AreNotEqual(dsData.ID.String(), anotherDSData.ID.String(), "Precondition failed")
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, anotherDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	anotherDSID := labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")

	// Expect the new daemon set to be disabled both in the farm and in the dsStore
	err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling daemon set!")

	//
	// Make a third daemon set and update its node selector to force a contend,
	// then verify that it has been disabled and the node hasn't been overwritten
	//
	anotherSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"undefined"})
	badDS, err := dsStore.Create(podManifest, minHealth, clusterName, anotherSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, badDS.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.NodeSelector = nodeSelector
		return dsToUpdate, nil
	}

	badDS, err = dsStore.MutateDS(badDS.ID, mutator)
	Assert(t).IsNil(err, "Expected no error mutating daemon set")
	err = waitForMutateSelector(dsf, badDS)
	Assert(t).IsNil(err, "Expected daemon set to be mutated in farm")

	labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
	Assert(t).IsNil(err, "Expected pod to have a dsID label")
	anotherDSID = labeled.Labels.Get(DSIDLabel)
	Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")

	err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling daemon set!")
}
Example #12
0
// Tests dsContends for NodeSelectors
func TestContendSelectors(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Instantiate farm
	//
	dsStore := dsstoretest.NewFake()
	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()
	logger := logging.DefaultLogger.SubLogger(logrus.Fields{
		"farm": "contendSelectors",
	})
	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	dsf := &Farm{
		dsStore:       dsStore,
		kpStore:       kpStore,
		scheduler:     scheduler.NewApplicatorScheduler(applicator),
		applicator:    applicator,
		children:      make(map[ds_fields.ID]*childDS),
		session:       kptest.NewSession(),
		logger:        logger,
		alerter:       alerting.NewNop(),
		healthChecker: &happyHealthChecker,
	}
	quitCh := make(chan struct{})
	defer close(quitCh)
	go func() {
		go dsf.cleanupDaemonSetPods(quitCh)
		dsf.mainLoop(quitCh)
	}()

	//
	// Make two daemon sets with a everything selector and verify that they trivially
	// contend and that only the second daemon set gets disabled
	//
	// Make a daemon set
	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	everythingSelector := klabels.Everything()
	firstDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, firstDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	secondDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, secondDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Verify that only the second daemon set is disabled
	err = waitForDisabled(dsf, dsStore, firstDSData.ID, false)
	Assert(t).IsNil(err, "First daemon set should not be disabled")

	err = waitForDisabled(dsf, dsStore, secondDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling second daemon set")

	// Add another daemon set with different selector and verify it gets disabled
	someSelector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"nowhere"})
	thirdDSData, err := dsStore.Create(podManifest, minHealth, clusterName, someSelector, podID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, thirdDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling third daemon set")

	//
	// Disable first daemon set, then enable second and third daemon sets in that order
	// and then there should be a contend on the third daemon set
	//
	disableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = true
		return dsToUpdate, nil
	}

	enableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToUpdate.Disabled = false
		return dsToUpdate, nil
	}

	// Disable first ds and verify it is disabled
	_, err = dsStore.MutateDS(firstDSData.ID, disableMutator)
	Assert(t).IsNil(err, "Expected no error getting daemon set")

	err = waitForDisabled(dsf, dsStore, firstDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling first daemon set")

	// Enable second ds and verify it is enabled
	_, err = dsStore.MutateDS(secondDSData.ID, enableMutator)
	Assert(t).IsNil(err, "Expected no error getting daemon set")

	err = waitForDisabled(dsf, dsStore, secondDSData.ID, false)
	Assert(t).IsNil(err, "Error enabling second daemon set")

	// Enabled third ds and verify it disabled because it contends with second ds
	_, err = dsStore.MutateDS(thirdDSData.ID, enableMutator)
	Assert(t).IsNil(err, "Expected no error getting daemon set")

	err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling third daemon set")

	//
	// Test equivalent selectors, fifth ds should contend with fourth
	//
	anotherPodID := types.PodID("anotherPodID")

	anotherManifestBuilder := manifest.NewBuilder()
	anotherManifestBuilder.SetID(anotherPodID)
	anotherPodManifest := manifestBuilder.GetManifest()

	equalSelector := klabels.Everything().
		Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"})

	fourthDSData, err := dsStore.Create(anotherPodManifest, minHealth, clusterName, equalSelector, anotherPodID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, fourthDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Verify the fourth daemon set is enabled
	err = waitForDisabled(dsf, dsStore, fourthDSData.ID, false)
	Assert(t).IsNil(err, "Error enabling fourth daemon set")

	fifthDSData, err := dsStore.Create(anotherPodManifest, minHealth, clusterName, equalSelector, anotherPodID, replicationTimeout)
	Assert(t).IsNil(err, "Expected no error creating request")
	err = waitForCreate(dsf, fifthDSData.ID)
	Assert(t).IsNil(err, "Expected daemon set to be created")

	// Verify that the fifth daemon set contends and gets disabled
	err = waitForDisabled(dsf, dsStore, fifthDSData.ID, true)
	Assert(t).IsNil(err, "Error disabling fifth daemon set")
}
Example #13
0
func TestPublishToReplication(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Setup fixture and schedule a pod
	//
	dsStore := dsstoretest.NewFake()

	podID := types.PodID("testPod")
	minHealth := 1
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
	timeout := replication.NoTimeout

	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout)
	Assert(t).IsNil(err, "expected no error creating request")

	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1", "node2")
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	ds := New(
		dsData,
		dsStore,
		kpStore,
		applicator,
		applicator,
		logging.DefaultLogger,
		&happyHealthChecker,
		0,
		false,
	).(*daemonSet)

	scheduled := scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled")
	err = waitForPodsInIntent(kpStore, 0)
	Assert(t).IsNil(err, "Unexpected number of pods scheduled")

	err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "good")
	Assert(t).IsNil(err, "expected no error labeling node1")
	err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good")
	Assert(t).IsNil(err, "expected no error labeling node1")

	//
	// Adds a watch that will automatically send a signal when a change was made
	// to the daemon set
	//
	quitCh := make(chan struct{})
	updatedCh := make(chan *ds_fields.DaemonSet)
	deletedCh := make(chan *ds_fields.DaemonSet)
	defer close(quitCh)
	defer close(updatedCh)
	defer close(deletedCh)
	desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh)
	dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh)

	//
	// Verify that 2 pods have been scheduled
	//
	numNodes := waitForNodes(t, ds, 2, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 2, "took too long to schedule")

	scheduled = scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 2, "expected a node to have been labeled")

	// Mutate the daemon set so that the node is unscheduled, this should not produce an error
	mutator := func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToChange.NodeSelector = klabels.Everything().
			Add("nodeQuality", klabels.EqualsOperator, []string{"bad"})
		return dsToChange, nil
	}
	_, err = dsStore.MutateDS(ds.ID(), mutator)
	Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")

	select {
	case <-time.After(1 * time.Second):
	case err := <-desiresErrCh:
		t.Fatalf("Unexpected error unscheduling pod: %v", err)
	}
	numNodes = waitForNodes(t, ds, 0, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 0, "took too long to unschedule")
}
Example #14
0
// TestSchedule checks consecutive scheduling and unscheduling for:
//	- creation of a daemon set
// 	- different node selectors
//	- changes to nodes allocations
// 	- mutations to a daemon set
//	- deleting a daemon set
func TestSchedule(t *testing.T) {
	retryInterval = testFarmRetryInterval

	//
	// Setup fixture and schedule a pod
	//
	dsStore := dsstoretest.NewFake()

	podID := types.PodID("testPod")
	minHealth := 0
	clusterName := ds_fields.ClusterName("some_name")

	manifestBuilder := manifest.NewBuilder()
	manifestBuilder.SetID(podID)
	podManifest := manifestBuilder.GetManifest()

	nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
	timeout := replication.NoTimeout

	dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout)
	Assert(t).IsNil(err, "expected no error creating request")

	kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
	applicator := labels.NewFakeApplicator()

	preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
	preparer.Enable()
	defer preparer.Disable()

	var allNodes []types.NodeName
	allNodes = append(allNodes, "node1", "node2", "nodeOk")
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("bad_node%v", i)
		allNodes = append(allNodes, types.NodeName(nodeName))
	}
	happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)

	ds := New(
		dsData,
		dsStore,
		kpStore,
		applicator,
		applicator,
		logging.DefaultLogger,
		&happyHealthChecker,
		0,
		false,
	).(*daemonSet)

	scheduled := scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled")

	err = waitForPodsInIntent(kpStore, 0)
	Assert(t).IsNil(err, "Unexpected number of pods scheduled")

	err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "bad")
	Assert(t).IsNil(err, "expected no error labeling node1")
	err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good")
	Assert(t).IsNil(err, "expected no error labeling node2")

	//
	// Adds a watch that will automatically send a signal when a change was made
	// to the daemon set
	//
	quitCh := make(chan struct{})
	updatedCh := make(chan *ds_fields.DaemonSet)
	deletedCh := make(chan *ds_fields.DaemonSet)
	defer close(quitCh)
	defer close(updatedCh)
	defer close(deletedCh)
	desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh)
	dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh)

	//
	// Verify that the pod has been scheduled
	//
	numNodes := waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 1, "took too long to schedule")

	scheduled = scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 1, "expected a node to have been labeled")
	Assert(t).AreEqual(scheduled[0].ID, "node2/testPod", "expected node labeled with the daemon set's id")

	// Verify that the scheduled pod is correct
	err = waitForSpecificPod(kpStore, "node2", types.PodID("testPod"))
	Assert(t).IsNil(err, "Unexpected pod scheduled")

	//
	// Add 10 good nodes and 10 bad nodes then verify
	//
	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("good_node%v", i)
		err := applicator.SetLabel(labels.NODE, nodeName, "nodeQuality", "good")
		Assert(t).IsNil(err, "expected no error labeling node")
	}

	for i := 0; i < 10; i++ {
		nodeName := fmt.Sprintf("bad_node%v", i)
		err := applicator.SetLabel(labels.NODE, nodeName, "nodeQuality", "bad")
		Assert(t).IsNil(err, "expected no error labeling node")
	}

	// The node watch should automatically notice a change
	numNodes = waitForNodes(t, ds, 11, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 11, "took too long to schedule")

	scheduled = scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 11, "expected a lot of nodes to have been labeled")

	//
	// Add a node with the labels nodeQuality=good and cherry=pick
	//
	err = applicator.SetLabel(labels.NODE, "nodeOk", "nodeQuality", "good")
	Assert(t).IsNil(err, "expected no error labeling nodeOk")
	err = applicator.SetLabel(labels.NODE, "nodeOk", "cherry", "pick")
	Assert(t).IsNil(err, "expected no error labeling nodeOk")
	fmt.Println(applicator.GetLabels(labels.NODE, "nodeOk"))

	numNodes = waitForNodes(t, ds, 12, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 12, "took too long to schedule")

	// Schedule only a node that is both nodeQuality=good and cherry=pick
	mutator := func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToChange.NodeSelector = klabels.Everything().
			Add("nodeQuality", klabels.EqualsOperator, []string{"good"}).
			Add("cherry", klabels.EqualsOperator, []string{"pick"})
		return dsToChange, nil
	}
	_, err = dsStore.MutateDS(ds.ID(), mutator)
	Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")

	numNodes = waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 1, "took too long to schedule")

	// Verify that the scheduled pod is correct
	err = waitForSpecificPod(kpStore, "nodeOk", types.PodID("testPod"))
	Assert(t).IsNil(err, "Unexpected pod scheduled")

	//
	// Disabling the daemon set and making a change should not do anything
	//
	mutator = func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToChange.Disabled = true
		dsToChange.NodeSelector = klabels.Everything().
			Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
		return dsToChange, nil
	}
	_, err = dsStore.MutateDS(ds.ID(), mutator)
	Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")

	numNodes = waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 1, "took too long to unschedule")

	//
	// Now re-enable it and try to schedule everything
	//
	mutator = func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
		dsToChange.NodeSelector = klabels.Everything()
		dsToChange.Disabled = false
		return dsToChange, nil
	}
	_, err = dsStore.MutateDS(ds.ID(), mutator)
	Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")

	// 11 good nodes 11 bad nodes, and 1 good cherry picked node = 23 nodes
	numNodes = waitForNodes(t, ds, 23, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 23, "took too long to schedule")

	//
	// Deleting the daemon set should unschedule all of its nodes
	//
	ds.logger.NoFields().Info("Deleting daemon set...")
	err = dsStore.Delete(ds.ID())
	if err != nil {
		t.Fatalf("Unable to delete daemon set: %v", err)
	}
	numNodes = waitForNodes(t, ds, 0, desiresErrCh, dsChangesErrCh)
	Assert(t).AreEqual(numNodes, 0, "took too long to unschedule")

	scheduled = scheduledPods(t, ds)
	Assert(t).AreEqual(len(scheduled), 0, "expected all nodes to have been unlabeled")

	err = waitForPodsInIntent(kpStore, 0)
	Assert(t).IsNil(err, "Unexpected number of pods scheduled")
}