func TestList(t *testing.T) { store := consulStoreWithFakeKV() // Create first DaemonSet firstPodID := types.PodID("some_pod_id") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") azLabel := pc_fields.AvailabilityZone("some_zone") selector := klabels.Everything(). Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()}) manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(firstPodID) firstManifest := manifestBuilder.GetManifest() timeout := replication.NoTimeout firstDS, err := store.Create(firstManifest, minHealth, clusterName, selector, firstPodID, timeout) if err != nil { t.Fatalf("Unable to create daemon set: %s", err) } // Create second DaemonSet secondPodID := types.PodID("different_pod_id") manifestBuilder = manifest.NewBuilder() manifestBuilder.SetID(secondPodID) secondManifest := manifestBuilder.GetManifest() secondDS, err := store.Create(secondManifest, minHealth, clusterName, selector, secondPodID, timeout) if err != nil { t.Fatalf("Unable to create daemon set: %s", err) } daemonSetList, err := store.List() if err != nil { t.Fatalf("Error getting list of daemon sets: %s", err) } Assert(t).AreEqual(len(daemonSetList), 2, "Unexpected number of daemon sets listed") for _, daemonSet := range daemonSetList { if daemonSet.ID == firstDS.ID { Assert(t).AreEqual(daemonSet.PodID, firstPodID, "Listed daemon set pod ids were not equal") } else if daemonSet.PodID == secondDS.PodID { Assert(t).AreEqual(daemonSet.PodID, secondPodID, "Listed daemon set pod ids were not equal") } else { t.Errorf("Unexpected daemon set listed: %v", daemonSet) } } }
func createDaemonSet(store *consulStore, t *testing.T) ds_fields.DaemonSet { podID := types.PodID("some_pod_id") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") azLabel := pc_fields.AvailabilityZone("some_zone") selector := klabels.Everything(). Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()}) manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) manifest := manifestBuilder.GetManifest() timeout := replication.NoTimeout ds, err := store.Create(manifest, minHealth, clusterName, selector, podID, timeout) if err != nil { t.Fatalf("Unable to create daemon set: %s", err) } if ds.ID == "" { t.Error("daemon set should have an id") } Assert(t).AreNotEqual(ds.ID, "", "Daemon set should have an id") Assert(t).AreNotEqual(ds.PodID, "", "Daemon set should have a pod id") Assert(t).AreEqual(ds.PodID, podID, "Daemon set pod id was not set correctly") Assert(t).AreEqual(ds.MinHealth, minHealth, "Daemon set minimum health was not set correctly") Assert(t).AreEqual(ds.Name, clusterName, "Daemon set cluster name was not set correctly") Assert(t).IsFalse(ds.Disabled, "Daemon set disabled field was not set correctly") testLabels := klabels.Set{ pc_fields.AvailabilityZoneLabel: azLabel.String(), } if matches := ds.NodeSelector.Matches(testLabels); !matches { t.Error("The daemon set has a bad node selector") } originalSHA, err := manifest.SHA() if err != nil { t.Fatal("Unable to retrieve SHA from manifest") } getSHA, err := ds.Manifest.SHA() if err != nil { t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set") } Assert(t).AreEqual(originalSHA, getSHA, "Daemon set manifest not set correctly") return ds }
func TestCreate(t *testing.T) { store := consulStoreWithFakeKV() createDaemonSet(store, t) // Create a bad DaemonSet podID := types.PodID("") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") azLabel := pc_fields.AvailabilityZone("some_zone") selector := klabels.Everything(). Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()}) manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID("") podManifest := manifestBuilder.GetManifest() timeout := replication.NoTimeout if _, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout); err == nil { t.Error("Expected create to fail on bad pod id") } podID = types.PodID("pod_id") if _, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout); err == nil { t.Error("Expected create to fail on bad manifest pod id") } manifestBuilder = manifest.NewBuilder() manifestBuilder.SetID("different_pod_id") podManifest = manifestBuilder.GetManifest() if _, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout); err == nil { t.Error("Expected create to fail on pod id and manifest pod id mismatch") } }
func main() { cmd, consulOpts := flags.ParseWithConsulOptions() client := kp.NewConsulClient(consulOpts) logger := logging.NewLogger(logrus.Fields{}) dsstore := dsstore.NewConsul(client, 3, &logger) applicator := labels.NewConsulApplicator(client, 3) switch cmd { case CmdCreate: minHealth, err := strconv.Atoi(*createMinHealth) if err != nil { log.Fatalf("Invalid value for minimum health, expected integer: %v", err) } name := ds_fields.ClusterName(*createName) manifest, err := manifest.FromPath(*createManifest) if err != nil { log.Fatalf("%s", err) } podID := manifest.ID() if *createTimeout <= time.Duration(0) { log.Fatalf("Timeout must be a positive non-zero value, got '%v'", *createTimeout) } selectorString := *createSelector if *createEverywhere { selectorString = klabels.Everything().String() } else if selectorString == "" { selectorString = klabels.Nothing().String() log.Fatal("Explicit everything selector not allowed, please use the --everwhere flag") } selector, err := parseNodeSelectorWithPrompt(klabels.Nothing(), selectorString, applicator) if err != nil { log.Fatalf("Error occurred: %v", err) } if err = confirmMinheathForSelector(minHealth, selector, applicator); err != nil { log.Fatalf("Error occurred: %v", err) } ds, err := dsstore.Create(manifest, minHealth, name, selector, podID, *createTimeout) if err != nil { log.Fatalf("err: %v", err) } fmt.Printf("%v has been created in consul", ds.ID) fmt.Println() case CmdGet: id := ds_fields.ID(*getID) ds, _, err := dsstore.Get(id) if err != nil { log.Fatalf("err: %v", err) } bytes, err := json.Marshal(ds) if err != nil { logger.WithError(err).Fatalln("Unable to marshal daemon set as JSON") } fmt.Printf("%s", bytes) case CmdList: dsList, err := dsstore.List() if err != nil { log.Fatalf("err: %v", err) } podID := types.PodID(*listPod) for _, ds := range dsList { if *listPod == "" || podID == ds.PodID { fmt.Printf("%s/%s:%s\n", ds.PodID, ds.Name, ds.ID) } } case CmdEnable: id := ds_fields.ID(*enableID) mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { if !ds.Disabled { return ds, util.Errorf("Daemon set has already been enabled") } ds.Disabled = false return ds, nil } _, err := dsstore.MutateDS(id, mutator) if err != nil { log.Fatalf("err: %v", err) } fmt.Printf("The daemon set '%s' has been successfully enabled in consul", id.String()) fmt.Println() case CmdDisable: id := ds_fields.ID(*disableID) mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { if ds.Disabled { return ds, util.Errorf("Daemon set has already been disabled") } ds.Disabled = true return ds, nil } _, err := dsstore.MutateDS(id, mutator) if err != nil { log.Fatalf("err: %v", err) } fmt.Printf("The daemon set '%s' has been successfully disabled in consul", id.String()) fmt.Println() case CmdDelete: id := ds_fields.ID(*deleteID) err := dsstore.Delete(id) if err != nil { log.Fatalf("err: %v", err) } fmt.Printf("The daemon set '%s' has been successfully deleted from consul", id.String()) fmt.Println() case CmdUpdate: id := ds_fields.ID(*updateID) mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { changed := false if *updateMinHealth != "" { minHealth, err := strconv.Atoi(*updateMinHealth) if err != nil { log.Fatalf("Invalid value for minimum health, expected integer") } if ds.MinHealth != minHealth { changed = true ds.MinHealth = minHealth } } if *updateName != "" { name := ds_fields.ClusterName(*updateName) if ds.Name != name { changed = true ds.Name = name } } if *updateTimeout != TimeoutNotSpecified { if *updateTimeout <= time.Duration(0) { return ds, util.Errorf("Timeout must be a positive non-zero value, got '%v'", *createTimeout) } if ds.Timeout != *updateTimeout { changed = true ds.Timeout = *updateTimeout } } if *updateManifest != "" { manifest, err := manifest.FromPath(*updateManifest) if err != nil { return ds, util.Errorf("%s", err) } if manifest.ID() != ds.PodID { return ds, util.Errorf("Manifest ID of %s does not match daemon set's pod ID (%s)", manifest.ID(), ds.PodID) } dsSHA, err := ds.Manifest.SHA() if err != nil { return ds, util.Errorf("Unable to get SHA from consul daemon set manifest: %v", err) } newSHA, err := manifest.SHA() if err != nil { return ds, util.Errorf("Unable to get SHA from new manifest: %v", err) } if dsSHA != newSHA { changed = true ds.Manifest = manifest } } if updateSelectorGiven { selectorString := *updateSelector if *updateEverywhere { selectorString = klabels.Everything().String() } else if selectorString == "" { return ds, util.Errorf("Explicit everything selector not allowed, please use the --everwhere flag") } selector, err := parseNodeSelectorWithPrompt(ds.NodeSelector, selectorString, applicator) if err != nil { return ds, util.Errorf("Error occurred: %v", err) } if ds.NodeSelector.String() != selector.String() { changed = true ds.NodeSelector = selector } } if !changed { return ds, util.Errorf("No changes were made") } if updateSelectorGiven || *updateMinHealth != "" { if err := confirmMinheathForSelector(ds.MinHealth, ds.NodeSelector, applicator); err != nil { return ds, util.Errorf("Error occurred: %v", err) } } return ds, nil } _, err := dsstore.MutateDS(id, mutator) if err != nil { log.Fatalf("err: %v", err) } fmt.Printf("The daemon set '%s' has been successfully updated in consul", id.String()) fmt.Println() case CmdTestSelector: selectorString := *testSelectorString if *testSelectorEverywhere { selectorString = klabels.Everything().String() } else if selectorString == "" { fmt.Println("Explicit everything selector not allowed, please use the --everwhere flag") } selector, err := parseNodeSelector(selectorString) if err != nil { log.Fatalf("Error occurred: %v", err) } matches, err := applicator.GetMatches(selector, labels.NODE, false) if err != nil { log.Fatalf("Error getting matching labels: %v", err) } fmt.Println(matches) default: log.Fatalf("Unrecognized command %v", cmd) } }
func TestMultipleFarms(t *testing.T) { retryInterval = testFarmRetryInterval dsStore := dsstoretest.NewFake() kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult)) applicator := labels.NewFakeApplicator() preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger) preparer.Enable() defer preparer.Disable() session := kptest.NewSession() firstLogger := logging.DefaultLogger.SubLogger(logrus.Fields{ "farm": "firstMultiple", }) var allNodes []types.NodeName allNodes = append(allNodes, "node1", "node2", "node3") for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("good_node%v", i) allNodes = append(allNodes, types.NodeName(nodeName)) } happyHealthChecker := fake_checker.HappyHealthChecker(allNodes) // // Instantiate first farm // firstFarm := &Farm{ dsStore: dsStore, kpStore: kpStore, scheduler: scheduler.NewApplicatorScheduler(applicator), applicator: applicator, children: make(map[ds_fields.ID]*childDS), session: session, logger: firstLogger, alerter: alerting.NewNop(), healthChecker: &happyHealthChecker, } firstQuitCh := make(chan struct{}) defer close(firstQuitCh) go func() { go firstFarm.cleanupDaemonSetPods(firstQuitCh) firstFarm.mainLoop(firstQuitCh) }() // // Instantiate second farm // secondLogger := logging.DefaultLogger.SubLogger(logrus.Fields{ "farm": "secondMultiple", }) secondFarm := &Farm{ dsStore: dsStore, kpStore: kpStore, scheduler: scheduler.NewApplicatorScheduler(applicator), applicator: applicator, children: make(map[ds_fields.ID]*childDS), session: session, logger: secondLogger, alerter: alerting.NewNop(), healthChecker: &happyHealthChecker, } secondQuitCh := make(chan struct{}) defer close(secondQuitCh) go func() { go secondFarm.cleanupDaemonSetPods(secondQuitCh) secondFarm.mainLoop(secondQuitCh) }() // Make two daemon sets with difference node selectors // First daemon set podID := types.PodID("testPod") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) podManifest := manifestBuilder.GetManifest() nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"}) dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") // Second daemon set anotherNodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"}) anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, anotherNodeSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") // Make a node and verify that it was scheduled by the first daemon set applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1") labeled, err := waitForPodLabel(applicator, true, "node1/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") dsID := labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled") // Make a second node and verify that it was scheduled by the second daemon set applicator.SetLabel(labels.NODE, "node2", pc_fields.AvailabilityZoneLabel, "az2") labeled, err = waitForPodLabel(applicator, true, "node2/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") dsID = labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled") // Make a third unschedulable node and verify it doesn't get scheduled by anything applicator.SetLabel(labels.NODE, "node3", pc_fields.AvailabilityZoneLabel, "undefined") labeled, err = waitForPodLabel(applicator, false, "node3/testPod") Assert(t).IsNil(err, "Expected pod not to have a dsID label") // Now add 10 new nodes and verify that they are scheduled by the first daemon set for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("good_node%v", i) err := applicator.SetLabel(labels.NODE, nodeName, pc_fields.AvailabilityZoneLabel, "az1") Assert(t).IsNil(err, "expected no error labeling node") } for i := 0; i < 10; i++ { podPath := fmt.Sprintf("good_node%v/testPod", i) labeled, err = waitForPodLabel(applicator, true, podPath) Assert(t).IsNil(err, "Expected pod to have a dsID label") dsID := labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled") } // // Update a daemon set's node selector and expect a node to be unscheduled // mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"}) dsToUpdate.NodeSelector = someSelector return dsToUpdate, nil } anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator) Assert(t).IsNil(err, "Expected no error mutating daemon set") err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData) Assert(t).IsNil(err, "Expected daemon set to be mutated in farm") // Verify node2 is unscheduled labeled, err = waitForPodLabel(applicator, false, "node2/testPod") Assert(t).IsNil(err, "Expected pod not to have a dsID label") // // Now update the node selector to schedule node2 again and verify // mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"}) dsToUpdate.NodeSelector = someSelector return dsToUpdate, nil } anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator) Assert(t).IsNil(err, "Expected no error mutating daemon set") err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData) Assert(t).IsNil(err, "Expected daemon set to be mutated in farm") // Verify node2 is scheduled labeled, err = waitForPodLabel(applicator, true, "node2/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") dsID = labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled") // // Disabling a daemon set should not unschedule any nodes // mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToUpdate.Disabled = true someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"}) dsToUpdate.NodeSelector = someSelector return dsToUpdate, nil } _, err = dsStore.MutateDS(anotherDSData.ID, mutator) Assert(t).IsNil(err, "Expected no error mutating daemon set") err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData) Assert(t).IsNil(err, "Expected daemon set to be mutated in farm") // Verify it is disabled condition := func() error { anotherDSData, _, err = dsStore.Get(anotherDSData.ID) if err != nil { return util.Errorf("Expected no error getting daemon set") } if !anotherDSData.Disabled { return util.Errorf("Expected daemon set to be disabled") } if _, ok := firstFarm.children[anotherDSData.ID]; ok { if !firstFarm.children[anotherDSData.ID].ds.IsDisabled() { return util.Errorf("Expected daemon set to be disabled in only one farm") } if _, ok := secondFarm.children[anotherDSData.ID]; ok { return util.Errorf("Expected daemon set to be held by only one farm") } } else if _, ok := secondFarm.children[anotherDSData.ID]; ok { if !secondFarm.children[anotherDSData.ID].ds.IsDisabled() { return util.Errorf("Expected daemon set to be disabled in only one farm") } if _, ok := firstFarm.children[anotherDSData.ID]; ok { return util.Errorf("Expected daemon set to be held by only one farm") } } else { return util.Errorf("Expected daemon set to be disabled in only one farm") } return nil } err = waitForCondition(condition) Assert(t).IsNil(err, "Error disabling daemon set!") // Verify node2 is scheduled labeled, err = waitForPodLabel(applicator, true, "node2/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") dsID = labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled") // // Enable a daemon set should make the dameon set resume its regular activities // mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToUpdate.Disabled = false return dsToUpdate, nil } _, err = dsStore.MutateDS(anotherDSData.ID, mutator) Assert(t).IsNil(err, "Expected no error mutating daemon set") // Verify it is enabled condition = func() error { anotherDSData, _, err = dsStore.Get(anotherDSData.ID) if err != nil { return util.Errorf("Expected no error getting daemon set") } if anotherDSData.Disabled { return util.Errorf("Expected daemon set to be enabled") } if _, ok := firstFarm.children[anotherDSData.ID]; ok { if firstFarm.children[anotherDSData.ID].ds.IsDisabled() { return util.Errorf("Expected daemon set to be enabled in only one farm") } if _, ok := secondFarm.children[anotherDSData.ID]; ok { return util.Errorf("Expected daemon set to be held by only one farm") } } else if _, ok := secondFarm.children[anotherDSData.ID]; ok { if secondFarm.children[anotherDSData.ID].ds.IsDisabled() { return util.Errorf("Expected daemon set to be enabled in only one farm") } if _, ok := firstFarm.children[anotherDSData.ID]; ok { return util.Errorf("Expected daemon set to be held by only one farm") } } else { return util.Errorf("Expected daemon set to be enabled in only one farm") } return nil } err = waitForCondition(condition) Assert(t).IsNil(err, "Error enabling daemon set!") // Verify node2 is unscheduled labeled, err = waitForPodLabel(applicator, false, "node2/testPod") Assert(t).IsNil(err, "Expected pod not to have a dsID label") }
// Tests dsContends for changes to both daemon sets and nodes func TestContendNodes(t *testing.T) { retryInterval = testFarmRetryInterval // // Instantiate farm // dsStore := dsstoretest.NewFake() kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult)) applicator := labels.NewFakeApplicator() logger := logging.DefaultLogger.SubLogger(logrus.Fields{ "farm": "contendNodes", }) preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger) preparer.Enable() defer preparer.Disable() var allNodes []types.NodeName allNodes = append(allNodes, "node1") happyHealthChecker := fake_checker.HappyHealthChecker(allNodes) dsf := &Farm{ dsStore: dsStore, kpStore: kpStore, scheduler: scheduler.NewApplicatorScheduler(applicator), applicator: applicator, children: make(map[ds_fields.ID]*childDS), session: kptest.NewSession(), logger: logger, alerter: alerting.NewNop(), healthChecker: &happyHealthChecker, } quitCh := make(chan struct{}) defer close(quitCh) go func() { go dsf.cleanupDaemonSetPods(quitCh) dsf.mainLoop(quitCh) }() // // Check for contention between two daemon sets among their nodes // // Make a daemon set podID := types.PodID("testPod") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) podManifest := manifestBuilder.GetManifest() nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"}) dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, dsData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") // Make a node and verify that it was scheduled applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1") labeled, err := waitForPodLabel(applicator, true, "node1/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") dsID := labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled") // Make another daemon set with a contending AvailabilityZoneLabel and verify // that it gets disabled and that the node label does not change anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout) Assert(t).AreNotEqual(dsData.ID.String(), anotherDSData.ID.String(), "Precondition failed") Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, anotherDSData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") labeled, err = waitForPodLabel(applicator, true, "node1/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") anotherDSID := labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten") // Expect the new daemon set to be disabled both in the farm and in the dsStore err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true) Assert(t).IsNil(err, "Error disabling daemon set!") // // Make a third daemon set and update its node selector to force a contend, // then verify that it has been disabled and the node hasn't been overwritten // anotherSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"undefined"}) badDS, err := dsStore.Create(podManifest, minHealth, clusterName, anotherSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, badDS.ID) Assert(t).IsNil(err, "Expected daemon set to be created") mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToUpdate.NodeSelector = nodeSelector return dsToUpdate, nil } badDS, err = dsStore.MutateDS(badDS.ID, mutator) Assert(t).IsNil(err, "Expected no error mutating daemon set") err = waitForMutateSelector(dsf, badDS) Assert(t).IsNil(err, "Expected daemon set to be mutated in farm") labeled, err = waitForPodLabel(applicator, true, "node1/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") anotherDSID = labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten") err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true) Assert(t).IsNil(err, "Error disabling daemon set!") }
// Tests dsContends for NodeSelectors func TestContendSelectors(t *testing.T) { retryInterval = testFarmRetryInterval // // Instantiate farm // dsStore := dsstoretest.NewFake() kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult)) applicator := labels.NewFakeApplicator() logger := logging.DefaultLogger.SubLogger(logrus.Fields{ "farm": "contendSelectors", }) preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger) preparer.Enable() defer preparer.Disable() var allNodes []types.NodeName happyHealthChecker := fake_checker.HappyHealthChecker(allNodes) dsf := &Farm{ dsStore: dsStore, kpStore: kpStore, scheduler: scheduler.NewApplicatorScheduler(applicator), applicator: applicator, children: make(map[ds_fields.ID]*childDS), session: kptest.NewSession(), logger: logger, alerter: alerting.NewNop(), healthChecker: &happyHealthChecker, } quitCh := make(chan struct{}) defer close(quitCh) go func() { go dsf.cleanupDaemonSetPods(quitCh) dsf.mainLoop(quitCh) }() // // Make two daemon sets with a everything selector and verify that they trivially // contend and that only the second daemon set gets disabled // // Make a daemon set podID := types.PodID("testPod") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) podManifest := manifestBuilder.GetManifest() everythingSelector := klabels.Everything() firstDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, firstDSData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") secondDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, secondDSData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") // Verify that only the second daemon set is disabled err = waitForDisabled(dsf, dsStore, firstDSData.ID, false) Assert(t).IsNil(err, "First daemon set should not be disabled") err = waitForDisabled(dsf, dsStore, secondDSData.ID, true) Assert(t).IsNil(err, "Error disabling second daemon set") // Add another daemon set with different selector and verify it gets disabled someSelector := klabels.Everything(). Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"nowhere"}) thirdDSData, err := dsStore.Create(podManifest, minHealth, clusterName, someSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, thirdDSData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true) Assert(t).IsNil(err, "Error disabling third daemon set") // // Disable first daemon set, then enable second and third daemon sets in that order // and then there should be a contend on the third daemon set // disableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToUpdate.Disabled = true return dsToUpdate, nil } enableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToUpdate.Disabled = false return dsToUpdate, nil } // Disable first ds and verify it is disabled _, err = dsStore.MutateDS(firstDSData.ID, disableMutator) Assert(t).IsNil(err, "Expected no error getting daemon set") err = waitForDisabled(dsf, dsStore, firstDSData.ID, true) Assert(t).IsNil(err, "Error disabling first daemon set") // Enable second ds and verify it is enabled _, err = dsStore.MutateDS(secondDSData.ID, enableMutator) Assert(t).IsNil(err, "Expected no error getting daemon set") err = waitForDisabled(dsf, dsStore, secondDSData.ID, false) Assert(t).IsNil(err, "Error enabling second daemon set") // Enabled third ds and verify it disabled because it contends with second ds _, err = dsStore.MutateDS(thirdDSData.ID, enableMutator) Assert(t).IsNil(err, "Expected no error getting daemon set") err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true) Assert(t).IsNil(err, "Error disabling third daemon set") // // Test equivalent selectors, fifth ds should contend with fourth // anotherPodID := types.PodID("anotherPodID") anotherManifestBuilder := manifest.NewBuilder() anotherManifestBuilder.SetID(anotherPodID) anotherPodManifest := manifestBuilder.GetManifest() equalSelector := klabels.Everything(). Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"}) fourthDSData, err := dsStore.Create(anotherPodManifest, minHealth, clusterName, equalSelector, anotherPodID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, fourthDSData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") // Verify the fourth daemon set is enabled err = waitForDisabled(dsf, dsStore, fourthDSData.ID, false) Assert(t).IsNil(err, "Error enabling fourth daemon set") fifthDSData, err := dsStore.Create(anotherPodManifest, minHealth, clusterName, equalSelector, anotherPodID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, fifthDSData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") // Verify that the fifth daemon set contends and gets disabled err = waitForDisabled(dsf, dsStore, fifthDSData.ID, true) Assert(t).IsNil(err, "Error disabling fifth daemon set") }
func TestWatchAll(t *testing.T) { store := consulStoreWithFakeKV() // // Create a new daemon set // podID := types.PodID("some_pod_id") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") azLabel := pc_fields.AvailabilityZone("some_zone") selector := klabels.Everything(). Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()}) manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) podManifest := manifestBuilder.GetManifest() timeout := replication.NoTimeout ds, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout) if err != nil { t.Fatalf("Unable to create daemon set: %s", err) } // // Create another new daemon set // someOtherPodID := types.PodID("some_other_pod_id") manifestBuilder = manifest.NewBuilder() manifestBuilder.SetID(someOtherPodID) someOtherManifest := manifestBuilder.GetManifest() someOtherDS, err := store.Create(someOtherManifest, minHealth, clusterName, selector, someOtherPodID, timeout) if err != nil { t.Fatalf("Unable to create daemon set: %s", err) } // // Watch for create and verify // quitCh := make(chan struct{}) inCh := store.WatchAll(quitCh, 0) defer close(quitCh) var watched WatchedDaemonSetList select { case watched = <-inCh: case <-time.After(5 * time.Second): t.Fatal("Expected something on channel but found nothing") } if watched.Err != nil { t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err) } Assert(t).AreEqual(len(watched.DaemonSets), 2, "Unexpected number of daemon sets watched") for _, watchedDS := range watched.DaemonSets { if watchedDS.ID == ds.ID { Assert(t).AreEqual(watchedDS.PodID, ds.PodID, "Daemon sets should have equal pod ids") } else if watchedDS.ID == someOtherDS.ID { Assert(t).AreEqual(watchedDS.PodID, someOtherDS.PodID, "Daemon sets should have equal pod ids") } else { t.Errorf("Expected to find id '%s' among watch results, but was not present", watchedDS.ID) } } // // Watch for delete and verify // err = store.Delete(someOtherDS.ID) if err != nil { t.Error("Unable to delete daemon set") } select { case watched = <-inCh: case <-time.After(5 * time.Second): t.Fatal("Expected something on channel but found nothing") } if watched.Err != nil { t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err) } Assert(t).AreEqual(len(watched.DaemonSets), 1, "Unexpected number of daemon sets watched") Assert(t).AreEqual(ds.ID, watched.DaemonSets[0].ID, "Daemon sets should have equal ids") // // Watch for update and verify // mutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToMutate.Disabled = !dsToMutate.Disabled return dsToMutate, nil } ds, err = store.MutateDS(ds.ID, mutator) if err != nil { t.Fatalf("Unable to mutate daemon set: %s", err) } select { case watched = <-inCh: case <-time.After(5 * time.Second): t.Fatal("Expected something on channel but found nothing") } if watched.Err != nil { t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err) } Assert(t).AreEqual(len(watched.DaemonSets), 1, "Unexpected number of daemon sets watched") Assert(t).AreEqual(ds.ID, watched.DaemonSets[0].ID, "Daemon sets should have equal ids") Assert(t).AreEqual(ds.PodID, watched.DaemonSets[0].PodID, "Daemon sets should have equal pod ids") }
func TestMutate(t *testing.T) { store := consulStoreWithFakeKV() podID := types.PodID("some_pod_id") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") azLabel := pc_fields.AvailabilityZone("some_zone") selector := klabels.Everything(). Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()}) manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) podManifest := manifestBuilder.GetManifest() timeout := replication.NoTimeout ds, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout) if err != nil { t.Fatalf("Unable to create daemon set: %s", err) } // // Invalid mutates // errorMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { return dsToMutate, util.Errorf("This is an error") } _, err = store.MutateDS(ds.ID, errorMutator) if err == nil { t.Error("Expected error when mutator produces an error") } badIDMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToMutate.ID = "" return dsToMutate, nil } _, err = store.MutateDS(ds.ID, badIDMutator) if err == nil { t.Error("Expected error when mutating daemon set ID") } badPodIDMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToMutate.PodID = "" return dsToMutate, nil } _, err = store.MutateDS(ds.ID, badPodIDMutator) if err == nil { t.Error("Expected error when mutating daemon set PodID to mismatch manifest") } // // A valid mutate followed by validation // someOtherDisabled := !ds.Disabled someOtherMinHealth := 42 someOtherName := ds_fields.ClusterName("some_other_name") someOtherPodID := types.PodID("some_other_pod_id") manifestBuilder = manifest.NewBuilder() manifestBuilder.SetID(someOtherPodID) someOtherManifest := manifestBuilder.GetManifest() someOtherAZLabel := pc_fields.AvailabilityZone("some_other_zone") someOtherSelector := klabels.Everything(). Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{someOtherAZLabel.String()}) goodMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToMutate.Disabled = someOtherDisabled dsToMutate.Manifest = someOtherManifest dsToMutate.MinHealth = someOtherMinHealth dsToMutate.Name = someOtherName dsToMutate.NodeSelector = someOtherSelector dsToMutate.PodID = someOtherPodID return dsToMutate, nil } someOtherDS, err := store.MutateDS(ds.ID, goodMutator) if err != nil { t.Fatalf("Unable to mutate daemon set: %s", err) } Assert(t).AreEqual(someOtherDS.ID, ds.ID, "Daemon sets should be equal ids") Assert(t).AreEqual(someOtherDS.PodID, someOtherPodID, "Daemon sets should have equal pod ids") Assert(t).AreEqual(someOtherDS.MinHealth, someOtherMinHealth, "Daemon sets should have equal minimum healths") Assert(t).AreEqual(someOtherDS.Name, someOtherName, "Daemon sets should have equal names") Assert(t).AreEqual(someOtherDS.Disabled, someOtherDisabled, "Daemon sets should have same disabled fields") someOtherLabels := klabels.Set{ pc_fields.AvailabilityZoneLabel: someOtherAZLabel.String(), } if matches := someOtherDS.NodeSelector.Matches(someOtherLabels); !matches { t.Error("The daemon set has a bad node selector") } someOtherSHA, err := someOtherManifest.SHA() if err != nil { t.Fatal("Unable to retrieve SHA from manifest") } dsSHA, err := someOtherDS.Manifest.SHA() if err != nil { t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set") } Assert(t).AreEqual(someOtherSHA, dsSHA, "Daemon set shas were not equal") // // Validate daemon set from a get function // getDS, _, err := store.Get(ds.ID) if err != nil { t.Fatalf("Unable to get daemon set: %s", err) } Assert(t).AreEqual(getDS.ID, ds.ID, "Daemon sets should be equal ids") Assert(t).AreEqual(getDS.PodID, someOtherPodID, "Daemon sets should have equal pod ids") Assert(t).AreEqual(getDS.MinHealth, someOtherMinHealth, "Daemon sets should have equal minimum healths") Assert(t).AreEqual(getDS.Name, someOtherName, "Daemon sets should have equal names") Assert(t).AreEqual(getDS.Disabled, someOtherDisabled, "Daemon sets should have same disabled fields") someOtherLabels = klabels.Set{ pc_fields.AvailabilityZoneLabel: someOtherAZLabel.String(), } if matches := getDS.NodeSelector.Matches(someOtherLabels); !matches { t.Error("The daemon set has a bad node selector") } someOtherSHA, err = someOtherManifest.SHA() if err != nil { t.Fatal("Unable to retrieve SHA from manifest") } dsSHA, err = getDS.Manifest.SHA() if err != nil { t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set") } Assert(t).AreEqual(someOtherSHA, dsSHA, "Daemon set shas were not equal") }
func TestGet(t *testing.T) { store := consulStoreWithFakeKV() // // Create DaemonSet // podID := types.PodID("some_pod_id") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") azLabel := pc_fields.AvailabilityZone("some_zone") selector := klabels.Everything(). Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()}) manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) manifest := manifestBuilder.GetManifest() timeout := replication.NoTimeout ds, err := store.Create(manifest, minHealth, clusterName, selector, podID, timeout) if err != nil { t.Fatalf("Unable to create daemon set: %s", err) } Assert(t).AreNotEqual(ds.ID, "", "Daemon set should have an id") // // Get DaemonSet and verify it is the same // getDS, _, err := store.Get(ds.ID) if err != nil { t.Fatalf("Error retrieving created daemon set: %s", err) } Assert(t).AreNotEqual(getDS.ID, "", "Daemon set should have an id") Assert(t).AreNotEqual(getDS.PodID, "", "Daemon set should have a pod id") Assert(t).AreEqual(ds.ID, getDS.ID, "Daemon set should be equal ids") Assert(t).AreEqual(ds.PodID, getDS.PodID, "Daemon set should have equal pod ids") Assert(t).AreEqual(ds.MinHealth, getDS.MinHealth, "Daemon set should have equal minimum healths") Assert(t).AreEqual(ds.Name, getDS.Name, "Daemon set should have equal names") Assert(t).AreEqual(ds.Disabled, getDS.Disabled, "Daemon set should have same disabled fields") testLabels := klabels.Set{ pc_fields.AvailabilityZoneLabel: azLabel.String(), } if matches := getDS.NodeSelector.Matches(testLabels); !matches { t.Error("The daemon set has a bad node selector") } originalSHA, err := manifest.SHA() if err != nil { t.Fatal("Unable to retrieve SHA from manifest") } getSHA, err := getDS.Manifest.SHA() if err != nil { t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set") } Assert(t).AreEqual(originalSHA, getSHA, "Daemon set shas were not equal") // Invalid get opertaion _, _, err = store.Get("bad_id") if err == nil { t.Error("Expected get operation to fail when getting a daemon set which does not exist") } }
func TestPublishToReplication(t *testing.T) { retryInterval = testFarmRetryInterval // // Setup fixture and schedule a pod // dsStore := dsstoretest.NewFake() podID := types.PodID("testPod") minHealth := 1 clusterName := ds_fields.ClusterName("some_name") manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) podManifest := manifestBuilder.GetManifest() nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"}) timeout := replication.NoTimeout dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout) Assert(t).IsNil(err, "expected no error creating request") kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult)) applicator := labels.NewFakeApplicator() preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger) preparer.Enable() defer preparer.Disable() var allNodes []types.NodeName allNodes = append(allNodes, "node1", "node2") for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("good_node%v", i) allNodes = append(allNodes, types.NodeName(nodeName)) } happyHealthChecker := fake_checker.HappyHealthChecker(allNodes) ds := New( dsData, dsStore, kpStore, applicator, applicator, logging.DefaultLogger, &happyHealthChecker, 0, false, ).(*daemonSet) scheduled := scheduledPods(t, ds) Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled") err = waitForPodsInIntent(kpStore, 0) Assert(t).IsNil(err, "Unexpected number of pods scheduled") err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "good") Assert(t).IsNil(err, "expected no error labeling node1") err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good") Assert(t).IsNil(err, "expected no error labeling node1") // // Adds a watch that will automatically send a signal when a change was made // to the daemon set // quitCh := make(chan struct{}) updatedCh := make(chan *ds_fields.DaemonSet) deletedCh := make(chan *ds_fields.DaemonSet) defer close(quitCh) defer close(updatedCh) defer close(deletedCh) desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh) dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh) // // Verify that 2 pods have been scheduled // numNodes := waitForNodes(t, ds, 2, desiresErrCh, dsChangesErrCh) Assert(t).AreEqual(numNodes, 2, "took too long to schedule") scheduled = scheduledPods(t, ds) Assert(t).AreEqual(len(scheduled), 2, "expected a node to have been labeled") // Mutate the daemon set so that the node is unscheduled, this should not produce an error mutator := func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToChange.NodeSelector = klabels.Everything(). Add("nodeQuality", klabels.EqualsOperator, []string{"bad"}) return dsToChange, nil } _, err = dsStore.MutateDS(ds.ID(), mutator) Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set") select { case <-time.After(1 * time.Second): case err := <-desiresErrCh: t.Fatalf("Unexpected error unscheduling pod: %v", err) } numNodes = waitForNodes(t, ds, 0, desiresErrCh, dsChangesErrCh) Assert(t).AreEqual(numNodes, 0, "took too long to unschedule") }
// TestSchedule checks consecutive scheduling and unscheduling for: // - creation of a daemon set // - different node selectors // - changes to nodes allocations // - mutations to a daemon set // - deleting a daemon set func TestSchedule(t *testing.T) { retryInterval = testFarmRetryInterval // // Setup fixture and schedule a pod // dsStore := dsstoretest.NewFake() podID := types.PodID("testPod") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) podManifest := manifestBuilder.GetManifest() nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"}) timeout := replication.NoTimeout dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout) Assert(t).IsNil(err, "expected no error creating request") kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult)) applicator := labels.NewFakeApplicator() preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger) preparer.Enable() defer preparer.Disable() var allNodes []types.NodeName allNodes = append(allNodes, "node1", "node2", "nodeOk") for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("good_node%v", i) allNodes = append(allNodes, types.NodeName(nodeName)) } for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("bad_node%v", i) allNodes = append(allNodes, types.NodeName(nodeName)) } happyHealthChecker := fake_checker.HappyHealthChecker(allNodes) ds := New( dsData, dsStore, kpStore, applicator, applicator, logging.DefaultLogger, &happyHealthChecker, 0, false, ).(*daemonSet) scheduled := scheduledPods(t, ds) Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled") err = waitForPodsInIntent(kpStore, 0) Assert(t).IsNil(err, "Unexpected number of pods scheduled") err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "bad") Assert(t).IsNil(err, "expected no error labeling node1") err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good") Assert(t).IsNil(err, "expected no error labeling node2") // // Adds a watch that will automatically send a signal when a change was made // to the daemon set // quitCh := make(chan struct{}) updatedCh := make(chan *ds_fields.DaemonSet) deletedCh := make(chan *ds_fields.DaemonSet) defer close(quitCh) defer close(updatedCh) defer close(deletedCh) desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh) dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh) // // Verify that the pod has been scheduled // numNodes := waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh) Assert(t).AreEqual(numNodes, 1, "took too long to schedule") scheduled = scheduledPods(t, ds) Assert(t).AreEqual(len(scheduled), 1, "expected a node to have been labeled") Assert(t).AreEqual(scheduled[0].ID, "node2/testPod", "expected node labeled with the daemon set's id") // Verify that the scheduled pod is correct err = waitForSpecificPod(kpStore, "node2", types.PodID("testPod")) Assert(t).IsNil(err, "Unexpected pod scheduled") // // Add 10 good nodes and 10 bad nodes then verify // for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("good_node%v", i) err := applicator.SetLabel(labels.NODE, nodeName, "nodeQuality", "good") Assert(t).IsNil(err, "expected no error labeling node") } for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("bad_node%v", i) err := applicator.SetLabel(labels.NODE, nodeName, "nodeQuality", "bad") Assert(t).IsNil(err, "expected no error labeling node") } // The node watch should automatically notice a change numNodes = waitForNodes(t, ds, 11, desiresErrCh, dsChangesErrCh) Assert(t).AreEqual(numNodes, 11, "took too long to schedule") scheduled = scheduledPods(t, ds) Assert(t).AreEqual(len(scheduled), 11, "expected a lot of nodes to have been labeled") // // Add a node with the labels nodeQuality=good and cherry=pick // err = applicator.SetLabel(labels.NODE, "nodeOk", "nodeQuality", "good") Assert(t).IsNil(err, "expected no error labeling nodeOk") err = applicator.SetLabel(labels.NODE, "nodeOk", "cherry", "pick") Assert(t).IsNil(err, "expected no error labeling nodeOk") fmt.Println(applicator.GetLabels(labels.NODE, "nodeOk")) numNodes = waitForNodes(t, ds, 12, desiresErrCh, dsChangesErrCh) Assert(t).AreEqual(numNodes, 12, "took too long to schedule") // Schedule only a node that is both nodeQuality=good and cherry=pick mutator := func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToChange.NodeSelector = klabels.Everything(). Add("nodeQuality", klabels.EqualsOperator, []string{"good"}). Add("cherry", klabels.EqualsOperator, []string{"pick"}) return dsToChange, nil } _, err = dsStore.MutateDS(ds.ID(), mutator) Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set") numNodes = waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh) Assert(t).AreEqual(numNodes, 1, "took too long to schedule") // Verify that the scheduled pod is correct err = waitForSpecificPod(kpStore, "nodeOk", types.PodID("testPod")) Assert(t).IsNil(err, "Unexpected pod scheduled") // // Disabling the daemon set and making a change should not do anything // mutator = func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToChange.Disabled = true dsToChange.NodeSelector = klabels.Everything(). Add("nodeQuality", klabels.EqualsOperator, []string{"good"}) return dsToChange, nil } _, err = dsStore.MutateDS(ds.ID(), mutator) Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set") numNodes = waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh) Assert(t).AreEqual(numNodes, 1, "took too long to unschedule") // // Now re-enable it and try to schedule everything // mutator = func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToChange.NodeSelector = klabels.Everything() dsToChange.Disabled = false return dsToChange, nil } _, err = dsStore.MutateDS(ds.ID(), mutator) Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set") // 11 good nodes 11 bad nodes, and 1 good cherry picked node = 23 nodes numNodes = waitForNodes(t, ds, 23, desiresErrCh, dsChangesErrCh) Assert(t).AreEqual(numNodes, 23, "took too long to schedule") // // Deleting the daemon set should unschedule all of its nodes // ds.logger.NoFields().Info("Deleting daemon set...") err = dsStore.Delete(ds.ID()) if err != nil { t.Fatalf("Unable to delete daemon set: %v", err) } numNodes = waitForNodes(t, ds, 0, desiresErrCh, dsChangesErrCh) Assert(t).AreEqual(numNodes, 0, "took too long to unschedule") scheduled = scheduledPods(t, ds) Assert(t).AreEqual(len(scheduled), 0, "expected all nodes to have been unlabeled") err = waitForPodsInIntent(kpStore, 0) Assert(t).IsNil(err, "Unexpected number of pods scheduled") }