func NewFarm( kpStore kp.Store, rcs rcstore.Store, scheduler scheduler.Scheduler, labeler labels.Applicator, sessions <-chan string, logger logging.Logger, rcSelector klabels.Selector, alerter alerting.Alerter, ) *Farm { if alerter == nil { alerter = alerting.NewNop() } return &Farm{ kpStore: kpStore, rcStore: rcs, scheduler: scheduler, labeler: labeler, sessions: sessions, logger: logger, children: make(map[fields.ID]childRC), alerter: alerter, rcSelector: rcSelector, metrics: rcmetrics.NewMetrics(logger), } }
func (f UpdateFactory) New(u roll_fields.Update, l logging.Logger, session kp.Session, alerter alerting.Alerter) Update { if alerter == nil { alerter = alerting.NewNop() } return NewUpdate(u, f.KPStore, f.RCStore, f.HealthChecker, f.Labeler, f.Scheduler, l, session, alerter) }
func NewFarm( factory Factory, kps kp.Store, rls rollstore.Store, rcs RCGetter, sessions <-chan string, logger logging.Logger, labeler rc.Labeler, rcSelector klabels.Selector, alerter alerting.Alerter, ) *Farm { if alerter == nil { alerter = alerting.NewNop() } return &Farm{ factory: factory, kps: kps, rls: rls, rcs: rcs, sessions: sessions, logger: logger, children: make(map[roll_fields.ID]childRU), labeler: labeler, rcSelector: rcSelector, alerter: alerter, } }
func NewFarm( store store, rcs rcstore.Store, scheduler scheduler.Scheduler, labeler Labeler, sessions <-chan string, logger logging.Logger, rcSelector klabels.Selector, alerter alerting.Alerter, rcWatchPauseTime time.Duration, ) *Farm { if alerter == nil { alerter = alerting.NewNop() } return &Farm{ store: store, rcStore: rcs, scheduler: scheduler, labeler: labeler, sessions: sessions, logger: logger, children: make(map[fields.ID]childRC), alerter: alerter, rcSelector: rcSelector, rcWatchPauseTime: rcWatchPauseTime, } }
// Create a new Update. The kp.Store, rcstore.Store, labels.Applicator and // scheduler.Scheduler arguments should be the same as those of the RCs themselves. The // session must be valid for the lifetime of the Update; maintaining this is the // responsibility of the caller. func NewUpdate( f fields.Update, kps kp.Store, rcs rcstore.Store, hcheck checker.ConsulHealthChecker, labeler rc.Labeler, sched scheduler.Scheduler, logger logging.Logger, session kp.Session, alerter alerting.Alerter, ) Update { if alerter == nil { alerter = alerting.NewNop() } logger = logger.SubLogger(logrus.Fields{ "desired_replicas": f.DesiredReplicas, "minimum_replicas": f.MinimumReplicas, }) return &update{ Update: f, kps: kps, rcs: rcs, hcheck: hcheck, labeler: labeler, sched: sched, logger: logger, session: session, alerter: alerter, } }
func NewFarm( kpStore kp.Store, dsStore dsstore.Store, applicator labels.Applicator, sessions <-chan string, logger logging.Logger, alerter alerting.Alerter, healthChecker *checker.ConsulHealthChecker, rateLimitInterval time.Duration, ) *Farm { if alerter == nil { alerter = alerting.NewNop() } return &Farm{ kpStore: kpStore, dsStore: dsStore, scheduler: scheduler.NewApplicatorScheduler(applicator), applicator: applicator, sessions: sessions, children: make(map[fields.ID]*childDS), logger: logger, alerter: alerter, healthChecker: healthChecker, rateLimitInterval: rateLimitInterval, } }
func NewFarm( store store, dsStore dsstore.Store, labeler Labeler, watcher LabelWatcher, sessions <-chan string, logger logging.Logger, alerter alerting.Alerter, healthChecker *checker.ConsulHealthChecker, rateLimitInterval time.Duration, cachedPodMatch bool, ) *Farm { if alerter == nil { alerter = alerting.NewNop() } return &Farm{ store: store, dsStore: dsStore, scheduler: scheduler.NewApplicatorScheduler(labeler), labeler: labeler, watcher: watcher, sessions: sessions, children: make(map[fields.ID]*childDS), logger: logger, alerter: alerter, healthChecker: healthChecker, rateLimitInterval: rateLimitInterval, cachedPodMatch: cachedPodMatch, } }
func New( fields fields.RC, kpStore kpStore, rcStore rcstore.Store, scheduler scheduler.Scheduler, podApplicator labels.Applicator, logger logging.Logger, alerter alerting.Alerter, ) ReplicationController { if alerter == nil { alerter = alerting.NewNop() } return &replicationController{ RC: fields, logger: logger, kpStore: kpStore, rcStore: rcStore, scheduler: scheduler, podApplicator: podApplicator, alerter: alerter, } }
func TestMultipleFarms(t *testing.T) { retryInterval = testFarmRetryInterval dsStore := dsstoretest.NewFake() kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult)) applicator := labels.NewFakeApplicator() preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger) preparer.Enable() defer preparer.Disable() session := kptest.NewSession() firstLogger := logging.DefaultLogger.SubLogger(logrus.Fields{ "farm": "firstMultiple", }) var allNodes []types.NodeName allNodes = append(allNodes, "node1", "node2", "node3") for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("good_node%v", i) allNodes = append(allNodes, types.NodeName(nodeName)) } happyHealthChecker := fake_checker.HappyHealthChecker(allNodes) // // Instantiate first farm // firstFarm := &Farm{ dsStore: dsStore, kpStore: kpStore, scheduler: scheduler.NewApplicatorScheduler(applicator), applicator: applicator, children: make(map[ds_fields.ID]*childDS), session: session, logger: firstLogger, alerter: alerting.NewNop(), healthChecker: &happyHealthChecker, } firstQuitCh := make(chan struct{}) defer close(firstQuitCh) go func() { go firstFarm.cleanupDaemonSetPods(firstQuitCh) firstFarm.mainLoop(firstQuitCh) }() // // Instantiate second farm // secondLogger := logging.DefaultLogger.SubLogger(logrus.Fields{ "farm": "secondMultiple", }) secondFarm := &Farm{ dsStore: dsStore, kpStore: kpStore, scheduler: scheduler.NewApplicatorScheduler(applicator), applicator: applicator, children: make(map[ds_fields.ID]*childDS), session: session, logger: secondLogger, alerter: alerting.NewNop(), healthChecker: &happyHealthChecker, } secondQuitCh := make(chan struct{}) defer close(secondQuitCh) go func() { go secondFarm.cleanupDaemonSetPods(secondQuitCh) secondFarm.mainLoop(secondQuitCh) }() // Make two daemon sets with difference node selectors // First daemon set podID := types.PodID("testPod") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) podManifest := manifestBuilder.GetManifest() nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"}) dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") // Second daemon set anotherNodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"}) anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, anotherNodeSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") // Make a node and verify that it was scheduled by the first daemon set applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1") labeled, err := waitForPodLabel(applicator, true, "node1/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") dsID := labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled") // Make a second node and verify that it was scheduled by the second daemon set applicator.SetLabel(labels.NODE, "node2", pc_fields.AvailabilityZoneLabel, "az2") labeled, err = waitForPodLabel(applicator, true, "node2/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") dsID = labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled") // Make a third unschedulable node and verify it doesn't get scheduled by anything applicator.SetLabel(labels.NODE, "node3", pc_fields.AvailabilityZoneLabel, "undefined") labeled, err = waitForPodLabel(applicator, false, "node3/testPod") Assert(t).IsNil(err, "Expected pod not to have a dsID label") // Now add 10 new nodes and verify that they are scheduled by the first daemon set for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("good_node%v", i) err := applicator.SetLabel(labels.NODE, nodeName, pc_fields.AvailabilityZoneLabel, "az1") Assert(t).IsNil(err, "expected no error labeling node") } for i := 0; i < 10; i++ { podPath := fmt.Sprintf("good_node%v/testPod", i) labeled, err = waitForPodLabel(applicator, true, podPath) Assert(t).IsNil(err, "Expected pod to have a dsID label") dsID := labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled") } // // Update a daemon set's node selector and expect a node to be unscheduled // mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"}) dsToUpdate.NodeSelector = someSelector return dsToUpdate, nil } anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator) Assert(t).IsNil(err, "Expected no error mutating daemon set") err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData) Assert(t).IsNil(err, "Expected daemon set to be mutated in farm") // Verify node2 is unscheduled labeled, err = waitForPodLabel(applicator, false, "node2/testPod") Assert(t).IsNil(err, "Expected pod not to have a dsID label") // // Now update the node selector to schedule node2 again and verify // mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"}) dsToUpdate.NodeSelector = someSelector return dsToUpdate, nil } anotherDSData, err = dsStore.MutateDS(anotherDSData.ID, mutator) Assert(t).IsNil(err, "Expected no error mutating daemon set") err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData) Assert(t).IsNil(err, "Expected daemon set to be mutated in farm") // Verify node2 is scheduled labeled, err = waitForPodLabel(applicator, true, "node2/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") dsID = labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled") // // Disabling a daemon set should not unschedule any nodes // mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToUpdate.Disabled = true someSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"}) dsToUpdate.NodeSelector = someSelector return dsToUpdate, nil } _, err = dsStore.MutateDS(anotherDSData.ID, mutator) Assert(t).IsNil(err, "Expected no error mutating daemon set") err = waitForMutateSelectorFarms(firstFarm, secondFarm, anotherDSData) Assert(t).IsNil(err, "Expected daemon set to be mutated in farm") // Verify it is disabled condition := func() error { anotherDSData, _, err = dsStore.Get(anotherDSData.ID) if err != nil { return util.Errorf("Expected no error getting daemon set") } if !anotherDSData.Disabled { return util.Errorf("Expected daemon set to be disabled") } if _, ok := firstFarm.children[anotherDSData.ID]; ok { if !firstFarm.children[anotherDSData.ID].ds.IsDisabled() { return util.Errorf("Expected daemon set to be disabled in only one farm") } if _, ok := secondFarm.children[anotherDSData.ID]; ok { return util.Errorf("Expected daemon set to be held by only one farm") } } else if _, ok := secondFarm.children[anotherDSData.ID]; ok { if !secondFarm.children[anotherDSData.ID].ds.IsDisabled() { return util.Errorf("Expected daemon set to be disabled in only one farm") } if _, ok := firstFarm.children[anotherDSData.ID]; ok { return util.Errorf("Expected daemon set to be held by only one farm") } } else { return util.Errorf("Expected daemon set to be disabled in only one farm") } return nil } err = waitForCondition(condition) Assert(t).IsNil(err, "Error disabling daemon set!") // Verify node2 is scheduled labeled, err = waitForPodLabel(applicator, true, "node2/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") dsID = labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(anotherDSData.ID.String(), dsID, "Unexpected dsID labeled") // // Enable a daemon set should make the dameon set resume its regular activities // mutator = func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToUpdate.Disabled = false return dsToUpdate, nil } _, err = dsStore.MutateDS(anotherDSData.ID, mutator) Assert(t).IsNil(err, "Expected no error mutating daemon set") // Verify it is enabled condition = func() error { anotherDSData, _, err = dsStore.Get(anotherDSData.ID) if err != nil { return util.Errorf("Expected no error getting daemon set") } if anotherDSData.Disabled { return util.Errorf("Expected daemon set to be enabled") } if _, ok := firstFarm.children[anotherDSData.ID]; ok { if firstFarm.children[anotherDSData.ID].ds.IsDisabled() { return util.Errorf("Expected daemon set to be enabled in only one farm") } if _, ok := secondFarm.children[anotherDSData.ID]; ok { return util.Errorf("Expected daemon set to be held by only one farm") } } else if _, ok := secondFarm.children[anotherDSData.ID]; ok { if secondFarm.children[anotherDSData.ID].ds.IsDisabled() { return util.Errorf("Expected daemon set to be enabled in only one farm") } if _, ok := firstFarm.children[anotherDSData.ID]; ok { return util.Errorf("Expected daemon set to be held by only one farm") } } else { return util.Errorf("Expected daemon set to be enabled in only one farm") } return nil } err = waitForCondition(condition) Assert(t).IsNil(err, "Error enabling daemon set!") // Verify node2 is unscheduled labeled, err = waitForPodLabel(applicator, false, "node2/testPod") Assert(t).IsNil(err, "Expected pod not to have a dsID label") }
func TestCleanupPods(t *testing.T) { retryInterval = testFarmRetryInterval dsStore := dsstoretest.NewFake() kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult)) applicator := labels.NewFakeApplicator() preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger) preparer.Enable() defer preparer.Disable() // Make some dangling pod labels and instantiate a farm and expect it clean it up podID := types.PodID("testPod") manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) podManifest := manifestBuilder.GetManifest() var allNodes []types.NodeName allNodes = append(allNodes) for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("node%v", i) allNodes = append(allNodes, types.NodeName(nodeName)) } happyHealthChecker := fake_checker.HappyHealthChecker(allNodes) for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("node%v", i) id := labels.MakePodLabelKey(types.NodeName(nodeName), podID) err := applicator.SetLabel(labels.POD, id, DSIDLabel, "impossible_id") Assert(t).IsNil(err, "Expected no error labeling node") _, err = kpStore.SetPod(kp.INTENT_TREE, types.NodeName(nodeName), podManifest) Assert(t).IsNil(err, "Expected no error added pod to intent tree") } // Assert that precondition is true for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("node%v", i) id := labels.MakePodLabelKey(types.NodeName(nodeName), podID) labeled, err := applicator.GetLabels(labels.POD, id) Assert(t).IsNil(err, "Expected no error getting labels") Assert(t).IsTrue(labeled.Labels.Has(DSIDLabel), "Precondition failed: Pod must have a dsID label") _, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID) Assert(t).IsNil(err, "Expected no error getting pod from intent store") Assert(t).AreNotEqual(err, pods.NoCurrentManifest, "Precondition failed: Pod was not in intent store") } // Instantiate farm logger := logging.DefaultLogger.SubLogger(logrus.Fields{ "farm": "cleanupPods", }) dsf := &Farm{ dsStore: dsStore, kpStore: kpStore, scheduler: scheduler.NewApplicatorScheduler(applicator), applicator: applicator, children: make(map[ds_fields.ID]*childDS), session: kptest.NewSession(), logger: logger, alerter: alerting.NewNop(), healthChecker: &happyHealthChecker, } quitCh := make(chan struct{}) defer close(quitCh) go func() { go dsf.cleanupDaemonSetPods(quitCh) dsf.mainLoop(quitCh) }() // Make there are no nodes left for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("node%v", i) id := labels.MakePodLabelKey(types.NodeName(nodeName), podID) _, err := waitForPodLabel(applicator, false, id) Assert(t).IsNil(err, "Expected pod not to have a dsID label") condition := func() error { _, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID) if err != pods.NoCurrentManifest { return util.Errorf("Expected pod to be deleted in intent store") } return nil } err = waitForCondition(condition) Assert(t).IsNil(err, "Error cleaning up pods") } }
// Tests dsContends for changes to both daemon sets and nodes func TestContendNodes(t *testing.T) { retryInterval = testFarmRetryInterval // // Instantiate farm // dsStore := dsstoretest.NewFake() kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult)) applicator := labels.NewFakeApplicator() logger := logging.DefaultLogger.SubLogger(logrus.Fields{ "farm": "contendNodes", }) preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger) preparer.Enable() defer preparer.Disable() var allNodes []types.NodeName allNodes = append(allNodes, "node1") happyHealthChecker := fake_checker.HappyHealthChecker(allNodes) dsf := &Farm{ dsStore: dsStore, kpStore: kpStore, scheduler: scheduler.NewApplicatorScheduler(applicator), applicator: applicator, children: make(map[ds_fields.ID]*childDS), session: kptest.NewSession(), logger: logger, alerter: alerting.NewNop(), healthChecker: &happyHealthChecker, } quitCh := make(chan struct{}) defer close(quitCh) go func() { go dsf.cleanupDaemonSetPods(quitCh) dsf.mainLoop(quitCh) }() // // Check for contention between two daemon sets among their nodes // // Make a daemon set podID := types.PodID("testPod") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) podManifest := manifestBuilder.GetManifest() nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"}) dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, dsData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") // Make a node and verify that it was scheduled applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1") labeled, err := waitForPodLabel(applicator, true, "node1/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") dsID := labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled") // Make another daemon set with a contending AvailabilityZoneLabel and verify // that it gets disabled and that the node label does not change anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout) Assert(t).AreNotEqual(dsData.ID.String(), anotherDSData.ID.String(), "Precondition failed") Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, anotherDSData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") labeled, err = waitForPodLabel(applicator, true, "node1/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") anotherDSID := labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten") // Expect the new daemon set to be disabled both in the farm and in the dsStore err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true) Assert(t).IsNil(err, "Error disabling daemon set!") // // Make a third daemon set and update its node selector to force a contend, // then verify that it has been disabled and the node hasn't been overwritten // anotherSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"undefined"}) badDS, err := dsStore.Create(podManifest, minHealth, clusterName, anotherSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, badDS.ID) Assert(t).IsNil(err, "Expected daemon set to be created") mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToUpdate.NodeSelector = nodeSelector return dsToUpdate, nil } badDS, err = dsStore.MutateDS(badDS.ID, mutator) Assert(t).IsNil(err, "Expected no error mutating daemon set") err = waitForMutateSelector(dsf, badDS) Assert(t).IsNil(err, "Expected daemon set to be mutated in farm") labeled, err = waitForPodLabel(applicator, true, "node1/testPod") Assert(t).IsNil(err, "Expected pod to have a dsID label") anotherDSID = labeled.Labels.Get(DSIDLabel) Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten") err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true) Assert(t).IsNil(err, "Error disabling daemon set!") }
// Tests dsContends for NodeSelectors func TestContendSelectors(t *testing.T) { retryInterval = testFarmRetryInterval // // Instantiate farm // dsStore := dsstoretest.NewFake() kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult)) applicator := labels.NewFakeApplicator() logger := logging.DefaultLogger.SubLogger(logrus.Fields{ "farm": "contendSelectors", }) preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger) preparer.Enable() defer preparer.Disable() var allNodes []types.NodeName happyHealthChecker := fake_checker.HappyHealthChecker(allNodes) dsf := &Farm{ dsStore: dsStore, kpStore: kpStore, scheduler: scheduler.NewApplicatorScheduler(applicator), applicator: applicator, children: make(map[ds_fields.ID]*childDS), session: kptest.NewSession(), logger: logger, alerter: alerting.NewNop(), healthChecker: &happyHealthChecker, } quitCh := make(chan struct{}) defer close(quitCh) go func() { go dsf.cleanupDaemonSetPods(quitCh) dsf.mainLoop(quitCh) }() // // Make two daemon sets with a everything selector and verify that they trivially // contend and that only the second daemon set gets disabled // // Make a daemon set podID := types.PodID("testPod") minHealth := 0 clusterName := ds_fields.ClusterName("some_name") manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) podManifest := manifestBuilder.GetManifest() everythingSelector := klabels.Everything() firstDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, firstDSData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") secondDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, secondDSData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") // Verify that only the second daemon set is disabled err = waitForDisabled(dsf, dsStore, firstDSData.ID, false) Assert(t).IsNil(err, "First daemon set should not be disabled") err = waitForDisabled(dsf, dsStore, secondDSData.ID, true) Assert(t).IsNil(err, "Error disabling second daemon set") // Add another daemon set with different selector and verify it gets disabled someSelector := klabels.Everything(). Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"nowhere"}) thirdDSData, err := dsStore.Create(podManifest, minHealth, clusterName, someSelector, podID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, thirdDSData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true) Assert(t).IsNil(err, "Error disabling third daemon set") // // Disable first daemon set, then enable second and third daemon sets in that order // and then there should be a contend on the third daemon set // disableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToUpdate.Disabled = true return dsToUpdate, nil } enableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) { dsToUpdate.Disabled = false return dsToUpdate, nil } // Disable first ds and verify it is disabled _, err = dsStore.MutateDS(firstDSData.ID, disableMutator) Assert(t).IsNil(err, "Expected no error getting daemon set") err = waitForDisabled(dsf, dsStore, firstDSData.ID, true) Assert(t).IsNil(err, "Error disabling first daemon set") // Enable second ds and verify it is enabled _, err = dsStore.MutateDS(secondDSData.ID, enableMutator) Assert(t).IsNil(err, "Expected no error getting daemon set") err = waitForDisabled(dsf, dsStore, secondDSData.ID, false) Assert(t).IsNil(err, "Error enabling second daemon set") // Enabled third ds and verify it disabled because it contends with second ds _, err = dsStore.MutateDS(thirdDSData.ID, enableMutator) Assert(t).IsNil(err, "Expected no error getting daemon set") err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true) Assert(t).IsNil(err, "Error disabling third daemon set") // // Test equivalent selectors, fifth ds should contend with fourth // anotherPodID := types.PodID("anotherPodID") anotherManifestBuilder := manifest.NewBuilder() anotherManifestBuilder.SetID(anotherPodID) anotherPodManifest := manifestBuilder.GetManifest() equalSelector := klabels.Everything(). Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az99"}) fourthDSData, err := dsStore.Create(anotherPodManifest, minHealth, clusterName, equalSelector, anotherPodID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, fourthDSData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") // Verify the fourth daemon set is enabled err = waitForDisabled(dsf, dsStore, fourthDSData.ID, false) Assert(t).IsNil(err, "Error enabling fourth daemon set") fifthDSData, err := dsStore.Create(anotherPodManifest, minHealth, clusterName, equalSelector, anotherPodID, replicationTimeout) Assert(t).IsNil(err, "Expected no error creating request") err = waitForCreate(dsf, fifthDSData.ID) Assert(t).IsNil(err, "Expected daemon set to be created") // Verify that the fifth daemon set contends and gets disabled err = waitForDisabled(dsf, dsStore, fifthDSData.ID, true) Assert(t).IsNil(err, "Error disabling fifth daemon set") }
func (r rctlParams) RollingUpdate(oldID, newID string, want, need int, pagerdutyServiceKey string) { if want < need { r.logger.WithFields(logrus.Fields{ "want": want, "need": need, }).Fatalln("Cannot run update with desired replicas less than minimum replicas") } sessions := make(chan string) quit := make(chan struct{}) go consulutil.SessionManager(api.SessionEntry{ Name: SessionName(), LockDelay: 5 * time.Second, Behavior: api.SessionBehaviorDelete, TTL: "15s", }, r.baseClient, sessions, quit, r.logger) sessionID := <-sessions if sessionID == "" { r.logger.NoFields().Fatalln("Could not acquire session") } session := r.kps.NewUnmanagedSession(sessionID, "") alerter := alerting.NewNop() if pagerdutyServiceKey != "" { var err error alerter, err = alerting.NewPagerduty(pagerdutyServiceKey, r.httpClient) if err != nil { r.logger.WithError(err).Fatalln("Could not initialize pagerduty alerter") } } result := make(chan bool, 1) go func() { result <- roll.NewUpdate(roll_fields.Update{ OldRC: rc_fields.ID(oldID), NewRC: rc_fields.ID(newID), DesiredReplicas: want, MinimumReplicas: need, }, r.kps, r.rcs, r.hcheck, r.labeler, r.sched, r.logger, session, alerter).Run(quit) close(result) }() signals := make(chan os.Signal, 2) signal.Notify(signals, syscall.SIGTERM, os.Interrupt) LOOP: for { select { case <-signals: // try to clean up locks on ^C close(quit) // do not exit right away - the session and result channels will be // closed after the quit is requested, ensuring that the locks held // by the farm were released. r.logger.NoFields().Errorln("Got signal, exiting") case <-sessions: r.logger.NoFields().Fatalln("Lost session") case res := <-result: // done, either due to ^C (already printed message above) or // clean finish if res { r.logger.NoFields().Infoln("Done") } break LOOP } } }
func main() { // Parse custom flags + standard Consul routing options kingpin.Version(version.VERSION) _, opts := flags.ParseWithConsulOptions() // Set up the logger logger := logging.NewLogger(logrus.Fields{}) logger.Logger.Formatter = new(logrus.TextFormatter) if *logLevel != "" { lv, err := logrus.ParseLevel(*logLevel) if err != nil { logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}). Fatalln("Could not parse log level") } logger.Logger.Level = lv } // Initialize the myriad of different storage components httpClient := cleanhttp.DefaultClient() client := kp.NewConsulClient(opts) kpStore := kp.NewConsulStore(client) rcStore := rcstore.NewConsul(client, RetryCount) rollStore := rollstore.NewConsul(client, nil) healthChecker := checker.NewConsulHealthChecker(client) labeler := labels.NewConsulApplicator(client, RetryCount) var sched scheduler.Scheduler if *labelEndpoint != "" { endpoint, err := url.Parse(*labelEndpoint) if err != nil { logger.WithErrorAndFields(err, logrus.Fields{ "url": *labelEndpoint, }).Fatalln("Could not parse URL from label endpoint") } httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint) if err != nil { logger.WithError(err).Fatalln("Could not create label applicator from endpoint") } sched = scheduler.NewApplicatorScheduler(httpLabeler) } else { sched = scheduler.NewApplicatorScheduler(labeler) } // Start acquiring sessions sessions := make(chan string) go consulutil.SessionManager(api.SessionEntry{ Name: SessionName(), LockDelay: 5 * time.Second, Behavior: api.SessionBehaviorDelete, TTL: "15s", }, client, sessions, nil, logger) pub := stream.NewStringValuePublisher(sessions, "") alerter := alerting.NewNop() if *pagerdutyServiceKey != "" { var err error alerter, err = alerting.NewPagerduty(*pagerdutyServiceKey, httpClient) if err != nil { logger.WithError(err).Fatalln( "Unable to initialize pagerduty alerter", ) } } // Run the farms! go rc.NewFarm( kpStore, rcStore, sched, labeler, pub.Subscribe().Chan(), logger, klabels.Everything(), alerter, ).Start(nil) roll.NewFarm( roll.UpdateFactory{ KPStore: kpStore, RCStore: rcStore, HealthChecker: healthChecker, Labeler: labeler, Scheduler: sched, }, kpStore, rollStore, rcStore, pub.Subscribe().Chan(), logger, labeler, klabels.Everything(), alerter, ).Start(nil) }