func (ds *daemonSet) labelPod(node types.NodeName) error { ds.logger.NoFields().Infof("Labelling '%v' in node '%v' with daemon set uuid '%v'", ds.Manifest.ID(), node, ds.ID()) // Will apply the following label on the key <labels.POD>/<node>/<ds.Manifest.ID()>: // { DSIDLabel : ds.ID() } // eg node/127.0.0.1/test_pod[daemon_set_id] := test_ds_id // This is for indicating that this pod path belongs to this daemon set id := labels.MakePodLabelKey(node, ds.Manifest.ID()) err := ds.applicator.SetLabel(labels.POD, id, DSIDLabel, ds.ID().String()) if err != nil { return util.Errorf("Error setting label: %v", err) } return nil }
func TestReservedLabels(t *testing.T) { _, _, applicator, rc, _ := setup(t) err := applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "good") Assert(t).IsNil(err, "expected no error assigning label") // Install manifest on a single node rc.ReplicasDesired = 1 err = rc.meetDesires() Assert(t).IsNil(err, "unexpected error scheduling nodes") labeled, err := applicator.GetLabels(labels.POD, labels.MakePodLabelKey("node1", "testPod")) Assert(t).IsNil(err, "unexpected error getting pod labels") Assert(t).AreEqual(labeled.Labels[rcstore.PodIDLabel], "testPod", "Pod label not set as expected") Assert(t).AreEqual(labeled.Labels[RCIDLabel], rc.ID().String(), "RC label not set as expected") }
// forEachLabel Attempts to apply the supplied function to all user-supplied labels // and the reserved labels. // If forEachLabel encounters any error applying the function, it returns that error immediately. // The function is not further applied to subsequent labels on an error. func (rc *replicationController) forEachLabel(node types.NodeName, f func(id, k, v string) error) error { id := labels.MakePodLabelKey(node, rc.Manifest.ID()) // user-requested labels. for k, v := range rc.PodLabels { if err := f(id, k, v); err != nil { return err } } // our reserved labels (pod id and replication controller id) err := f(id, rcstore.PodIDLabel, rc.Manifest.ID().String()) if err != nil { return err } return f(id, RCIDLabel, rc.ID().String()) }
func (ds *daemonSet) unschedule(node types.NodeName) error { ds.logger.NoFields().Infof("Unscheduling '%v' in node '%v' with daemon set uuid '%v'", ds.Manifest.ID(), node, ds.ID()) // Will remove the following key: // <kp.INTENT_TREE>/<node>/<ds.Manifest.ID()> _, err := ds.kpStore.DeletePod(kp.INTENT_TREE, node, ds.Manifest.ID()) if err != nil { return util.Errorf("Unable to delete pod id '%v' in node '%v', from intent tree: %v", ds.Manifest.ID(), node, err) } // Will remove the following label on the key <labels.POD>/<node>/<ds.Manifest.ID()>: DSIDLabel // This is for indicating that this pod path no longer belongs to this daemon set id := labels.MakePodLabelKey(node, ds.Manifest.ID()) err = ds.applicator.RemoveLabel(labels.POD, id, DSIDLabel) if err != nil { return util.Errorf("Error removing label: %v", err) } return nil }
func TestCleanupPods(t *testing.T) { retryInterval = testFarmRetryInterval dsStore := dsstoretest.NewFake() kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult)) applicator := labels.NewFakeApplicator() preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger) preparer.Enable() defer preparer.Disable() // Make some dangling pod labels and instantiate a farm and expect it clean it up podID := types.PodID("testPod") manifestBuilder := manifest.NewBuilder() manifestBuilder.SetID(podID) podManifest := manifestBuilder.GetManifest() var allNodes []types.NodeName allNodes = append(allNodes) for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("node%v", i) allNodes = append(allNodes, types.NodeName(nodeName)) } happyHealthChecker := fake_checker.HappyHealthChecker(allNodes) for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("node%v", i) id := labels.MakePodLabelKey(types.NodeName(nodeName), podID) err := applicator.SetLabel(labels.POD, id, DSIDLabel, "impossible_id") Assert(t).IsNil(err, "Expected no error labeling node") _, err = kpStore.SetPod(kp.INTENT_TREE, types.NodeName(nodeName), podManifest) Assert(t).IsNil(err, "Expected no error added pod to intent tree") } // Assert that precondition is true for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("node%v", i) id := labels.MakePodLabelKey(types.NodeName(nodeName), podID) labeled, err := applicator.GetLabels(labels.POD, id) Assert(t).IsNil(err, "Expected no error getting labels") Assert(t).IsTrue(labeled.Labels.Has(DSIDLabel), "Precondition failed: Pod must have a dsID label") _, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID) Assert(t).IsNil(err, "Expected no error getting pod from intent store") Assert(t).AreNotEqual(err, pods.NoCurrentManifest, "Precondition failed: Pod was not in intent store") } // Instantiate farm logger := logging.DefaultLogger.SubLogger(logrus.Fields{ "farm": "cleanupPods", }) dsf := &Farm{ dsStore: dsStore, kpStore: kpStore, scheduler: scheduler.NewApplicatorScheduler(applicator), applicator: applicator, children: make(map[ds_fields.ID]*childDS), session: kptest.NewSession(), logger: logger, alerter: alerting.NewNop(), healthChecker: &happyHealthChecker, } quitCh := make(chan struct{}) defer close(quitCh) go func() { go dsf.cleanupDaemonSetPods(quitCh) dsf.mainLoop(quitCh) }() // Make there are no nodes left for i := 0; i < 10; i++ { nodeName := fmt.Sprintf("node%v", i) id := labels.MakePodLabelKey(types.NodeName(nodeName), podID) _, err := waitForPodLabel(applicator, false, id) Assert(t).IsNil(err, "Expected pod not to have a dsID label") condition := func() error { _, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID) if err != pods.NoCurrentManifest { return util.Errorf("Expected pod to be deleted in intent store") } return nil } err = waitForCondition(condition) Assert(t).IsNil(err, "Error cleaning up pods") } }
// Transfers the named node from the old RC to the new RC func transferNode(node types.NodeName, manifest manifest.Manifest, upd update) error { if _, err := upd.kps.SetPod(kp.REALITY_TREE, node, manifest); err != nil { return err } return upd.labeler.SetLabel(labels.POD, labels.MakePodLabelKey(node, manifest.ID()), rc.RCIDLabel, string(upd.NewRC)) }
// This function removes all pods with a DSIDLabel where the daemon set id does // not exist in the store at every interval specified because it is possible // that the farm will unexpectedly crash or someone deletes or modifies a node func (dsf *Farm) cleanupDaemonSetPods(quitCh <-chan struct{}) { timer := time.NewTimer(time.Duration(0)) for { select { case <-quitCh: return case <-timer.C: } timer.Reset(cleanupInterval) allDaemonSets, err := dsf.dsStore.List() if err != nil { dsf.logger.Errorf("Unable to get daemon sets from intent tree in daemon set farm: %v", err) continue } dsIDMap := make(map[fields.ID]ds_fields.DaemonSet) for _, dsFields := range allDaemonSets { dsIDMap[dsFields.ID] = dsFields } dsIDLabelSelector := klabels.Everything(). Add(DSIDLabel, klabels.ExistsOperator, []string{}) allPods, err := dsf.applicator.GetMatches(dsIDLabelSelector, labels.POD) if err != nil { dsf.logger.Errorf("Unable to get matches for daemon sets in pod store: %v", err) continue } for _, podLabels := range allPods { // Only check if it is a pod scheduled by a daemon set dsID := podLabels.Labels.Get(DSIDLabel) // Check if the daemon set exists, if it doesn't unschedule the pod if _, ok := dsIDMap[fields.ID(dsID)]; ok { continue } nodeName, podID, err := labels.NodeAndPodIDFromPodLabel(podLabels) if err != nil { dsf.logger.NoFields().Error(err) continue } // TODO: Since this mirrors the unschedule function in daemon_set.go, // We should find a nice way to couple them together dsf.logger.NoFields().Infof("Unscheduling '%v' in node '%v' with dangling daemon set uuid '%v'", podID, nodeName, dsID) _, err = dsf.kpStore.DeletePod(kp.INTENT_TREE, nodeName, podID) if err != nil { dsf.logger.NoFields().Errorf("Unable to delete pod id '%v' in node '%v', from intent tree: %v", podID, nodeName, err) continue } id := labels.MakePodLabelKey(nodeName, podID) err = dsf.applicator.RemoveLabel(labels.POD, id, DSIDLabel) if err != nil { dsf.logger.NoFields().Errorf("Error removing ds pod id label '%v': %v", id, err) } } } }