// Returns an RC that matches the intent of the given deployment. // It creates a new RC if required. func (dc *DeploymentController) getNewRC(deployment extensions.Deployment) (*api.ReplicationController, error) { existingNewRC, err := deploymentutil.GetNewRCFromList(deployment, dc.client, func(namespace string, options api.ListOptions) ([]api.ReplicationController, error) { return dc.rcStore.List() }) if err != nil || existingNewRC != nil { return existingNewRC, err } // Check the rc expectations of deployment before creating a new rc dKey, err := controller.KeyFunc(&deployment) if err != nil { return nil, fmt.Errorf("couldn't get key for deployment %#v: %v", deployment, err) } if !dc.rcExpectations.SatisfiedExpectations(dKey) { dc.enqueueDeployment(&deployment) return nil, fmt.Errorf("RC expectations not met yet before getting new RC\n") } // new RC does not exist, create one. namespace := deployment.ObjectMeta.Namespace podTemplateSpecHash := podutil.GetPodTemplateSpecHash(deployment.Spec.Template) newRCTemplate := deploymentutil.GetNewRCTemplate(deployment) // Add podTemplateHash label to selector. newRCSelector := labelsutil.CloneAndAddLabel(deployment.Spec.Selector, deployment.Spec.UniqueLabelKey, podTemplateSpecHash) // Set RC expectations (1 rc should be created) dKey, err = controller.KeyFunc(&deployment) if err != nil { return nil, fmt.Errorf("couldn't get key for deployment controller %#v: %v", deployment, err) } dc.rcExpectations.ExpectCreations(dKey, 1) // Create new RC newRC := api.ReplicationController{ ObjectMeta: api.ObjectMeta{ GenerateName: deployment.Name + "-", Namespace: namespace, }, Spec: api.ReplicationControllerSpec{ Replicas: 0, Selector: newRCSelector, Template: &newRCTemplate, }, } createdRC, err := dc.client.ReplicationControllers(namespace).Create(&newRC) if err != nil { dc.rcExpectations.DeleteExpectations(dKey) return nil, fmt.Errorf("error creating replication controller: %v", err) } return createdRC, nil }
func newPod(t *testing.T, name string) (*api.Pod, string) { pod := &api.Pod{ TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, ObjectMeta: api.ObjectMeta{ UID: uuid.NewUUID(), Annotations: make(map[string]string), Name: name, Namespace: api.NamespaceDefault, ResourceVersion: "18", Labels: fooBar(), }, Spec: api.PodSpec{}, Status: api.PodStatus{ Conditions: []api.PodCondition{ {Type: api.PodReady, Status: api.ConditionTrue}, }, }, } podName, err := controller.KeyFunc(pod) if err != nil { t.Fatalf("Unexpected error naming pod %q: %v", pod.Name, err) } return pod, podName }
// getDockerRegistryLocations returns the dns form and the ip form of the secret func (e *DockerRegistryServiceController) getDockerRegistryLocations() []string { key, err := controller.KeyFunc(&kapi.Service{ObjectMeta: kapi.ObjectMeta{Name: e.serviceName, Namespace: e.serviceNamespace}}) if err != nil { return []string{} } obj, exists, err := e.serviceCache.GetByKey(key) if err != nil { return []string{} } if !exists { return []string{} } service := obj.(*kapi.Service) hasPortalIP := (len(service.Spec.ClusterIP) > 0) && (net.ParseIP(service.Spec.ClusterIP) != nil) if hasPortalIP && len(service.Spec.Ports) > 0 { return []string{ net.JoinHostPort(service.Spec.ClusterIP, fmt.Sprintf("%d", service.Spec.Ports[0].Port)), net.JoinHostPort(fmt.Sprintf("%s.%s.svc", service.Name, service.Namespace), fmt.Sprintf("%d", service.Spec.Ports[0].Port)), } } return []string{} }
// When a pod is deleted, enqueue the replica set that manages the pod and update its expectations. // obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item. func (rsc *ReplicaSetController) deletePod(obj interface{}) { pod, ok := obj.(*v1.Pod) // When a delete is dropped, the relist will notice a pod in the store not // in the list, leading to the insertion of a tombstone object which contains // the deleted key/value. Note that this value might be stale. If the pod // changed labels the new ReplicaSet will not be woken up till the periodic resync. if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %+v", obj)) return } pod, ok = tombstone.Obj.(*v1.Pod) if !ok { utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a pod %#v", obj)) return } } glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %#v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod) if rs := rsc.getPodReplicaSet(pod); rs != nil { rsKey, err := controller.KeyFunc(rs) if err != nil { utilruntime.HandleError(fmt.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err)) return } rsc.expectations.DeletionObserved(rsKey, controller.PodKey(pod)) rsc.enqueueReplicaSet(rs) } }
// clearOldAllocation clears the old allocation for a service if it // differs from a new allocation. Returns a boolean indication of // whether the old allocation was cleared. func (ic *IngressIPController) clearOldAllocation(new, old *kapi.Service) bool { oldIP := "" if old != nil && old.Spec.Type == kapi.ServiceTypeLoadBalancer && len(old.Status.LoadBalancer.Ingress) > 0 { oldIP = old.Status.LoadBalancer.Ingress[0].IP } noOldAllocation := len(oldIP) == 0 if noOldAllocation { return false } newIP := "" if new != nil && new.Spec.Type == kapi.ServiceTypeLoadBalancer && len(new.Status.LoadBalancer.Ingress) > 0 { newIP = new.Status.LoadBalancer.Ingress[0].IP } allocationUnchanged := newIP == oldIP if allocationUnchanged { return false } // New allocation differs from old due to update or deletion // Get the key from the old service since the new service may be nil if key, err := controller.KeyFunc(old); err == nil { ic.clearLocalAllocation(key, oldIP) return true } else { // Recovery/retry not possible for this error utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", old, err)) return false } }
func (dsc *DaemonSetsController) syncDaemonSet(key string) error { startTime := time.Now() defer func() { glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime)) }() obj, exists, err := dsc.dsStore.Store.GetByKey(key) if err != nil { glog.Infof("Unable to retrieve ds %v from store: %v", key, err) dsc.queue.Add(key) return err } if !exists { glog.V(3).Infof("daemon set has been deleted %v", key) dsc.expectations.DeleteExpectations(key) return nil } ds := obj.(*experimental.DaemonSet) // Don't process a daemon set until all its creations and deletions have been processed. // For example if daemon set foo asked for 3 new daemon pods in the previous call to manage, // then we do not want to call manage on foo until the daemon pods have been created. dsKey, err := controller.KeyFunc(ds) if err != nil { glog.Errorf("Couldn't get key for object %+v: %v", ds, err) return err } dsNeedsSync := dsc.expectations.SatisfiedExpectations(dsKey) if dsNeedsSync { dsc.manage(ds) } dsc.updateDaemonSetStatus(ds) return nil }
func (dsc *DaemonSetsController) deletePod(obj interface{}) { pod, ok := obj.(*api.Pod) // When a delete is dropped, the relist will notice a pod in the store not // in the list, leading to the insertion of a tombstone object which contains // the deleted key/value. Note that this value might be stale. If the pod // changed labels the new daemonset will not be woken up till the periodic // resync. if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { glog.Errorf("Couldn't get object from tombstone %+v", obj) return } pod, ok = tombstone.Obj.(*api.Pod) if !ok { glog.Errorf("Tombstone contained object that is not a pod %+v", obj) return } } glog.V(4).Infof("Pod %s deleted.", pod.Name) if ds := dsc.getPodDaemonSet(pod); ds != nil { dsKey, err := controller.KeyFunc(ds) if err != nil { glog.Errorf("Couldn't get key for object %+v: %v", ds, err) return } dsc.expectations.DeletionObserved(dsKey) dsc.enqueueDaemonSet(ds) } }
// When a pod is updated, figure out what replica set/s manage it and wake them // up. If the labels of the pod have changed we need to awaken both the old // and new replica set. old and cur must be *api.Pod types. func (rsc *ReplicaSetController) updatePod(old, cur interface{}) { if api.Semantic.DeepEqual(old, cur) { // A periodic relist will send update events for all known pods. return } curPod := cur.(*api.Pod) rs := rsc.getPodReplicaSet(curPod) if rs == nil { return } rsKey, err := controller.KeyFunc(rs) if err != nil { glog.Errorf("Couldn't get key for replication controller %#v: %v", rs, err) return } oldPod := old.(*api.Pod) if curPod.DeletionTimestamp != nil && oldPod.DeletionTimestamp == nil { // when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period, // and after such time has passed, the kubelet actually deletes it from the store. We receive an update // for modification of the deletion timestamp and expect an rc to create more replicas asap, not wait // until the kubelet actually deletes the pod. This is different from the Phase of a pod changing, because // an rc never initiates a phase change, and so is never asleep waiting for the same. rsc.expectations.DeletionObserved(rsKey) } rsc.enqueueReplicaSet(rs) if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) { // If the old and new ReplicaSet are the same, the first one that syncs // will set expectations preventing any damage from the second. if oldRS := rsc.getPodReplicaSet(oldPod); oldRS != nil { rsc.enqueueReplicaSet(oldRS) } } }
// When a pod is deleted, enqueue the controller that manages the pod and update its expectations. // obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item. func (rm *ReplicationManager) deletePod(obj interface{}) { pod, ok := obj.(*api.Pod) // When a delete is dropped, the relist will notice a pod in the store not // in the list, leading to the insertion of a tombstone object which contains // the deleted key/value. Note that this value might be stale. If the pod // changed labels the new rc will not be woken up till the periodic resync. if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { glog.Errorf("Couldn't get object from tombstone %#v", obj) return } pod, ok = tombstone.Obj.(*api.Pod) if !ok { glog.Errorf("Tombstone contained object that is not a pod %#v", obj) return } } glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v, labels %+v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod.Labels) if rc := rm.getPodController(pod); rc != nil { rcKey, err := controller.KeyFunc(rc) if err != nil { glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) return } rm.expectations.DeletionObserved(rcKey, controller.PodKey(pod)) rm.enqueueController(rc) } }
// When a pod is deleted, enqueue the controller that manages the pod and update its expectations. // obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item. func (rm *ReplicationManager) deletePod(obj interface{}) { pod, ok := obj.(*api.Pod) // When a delete is dropped, the relist will notice a pod in the store not // in the list, leading to the insertion of a tombstone object which contains // the deleted key/value. Note that this value might be stale. If the pod // changed labels the new rc will not be woken up till the periodic resync. if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { glog.Errorf("Couldn't get object from tombstone %+v", obj) return } pod, ok = tombstone.Obj.(*api.Pod) if !ok { glog.Errorf("Tombstone contained object that is not a pod %+v", obj) return } } if rc := rm.getPodController(pod); rc != nil { rcKey, err := controller.KeyFunc(rc) if err != nil { glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) return } // This method only manages expectations for the case where a pod is // deleted without a grace period. if pod.DeletionTimestamp == nil { rm.expectations.DeletionObserved(rcKey) } rm.enqueueController(rc) } }
// When a pod is deleted, enqueue the replica set that manages the pod and update its expectations. // obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item. func (rsc *ReplicaSetController) deletePod(obj interface{}) { pod, ok := obj.(*api.Pod) // When a delete is dropped, the relist will notice a pod in the store not // in the list, leading to the insertion of a tombstone object which contains // the deleted key/value. Note that this value might be stale. If the pod // changed labels the new ReplicaSet will not be woken up till the periodic resync. if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { glog.Errorf("Couldn't get object from tombstone %+v, could take up to %v before a replica set recreates a replica", obj, controller.ExpectationsTimeout) return } pod, ok = tombstone.Obj.(*api.Pod) if !ok { glog.Errorf("Tombstone contained object that is not a pod %+v, could take up to %v before replica set recreates a replica", obj, controller.ExpectationsTimeout) return } } if rs := rsc.getPodReplicaSet(pod); rs != nil { rsKey, err := controller.KeyFunc(rs) if err != nil { glog.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err) return } rsc.expectations.DeletionObserved(rsKey) rsc.enqueueReplicaSet(rs) } }
func TestSyncReplicaSetDormancy(t *testing.T) { // Setup a test server so we can lie about the current state of pods fakeHandler := utiltesting.FakeHandler{ StatusCode: 200, ResponseBody: "{}", } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) fakePodControl := controller.FakePodControl{} manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager.podStoreSynced = alwaysReady manager.podControl = &fakePodControl labelMap := map[string]string{"foo": "bar"} rsSpec := newReplicaSet(2, labelMap) manager.rsStore.Store.Add(rsSpec) newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap, rsSpec, "pod") // Creates a replica and sets expectations rsSpec.Status.Replicas = 1 rsSpec.Status.ReadyReplicas = 1 manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0) // Expectations prevents replicas but not an update on status rsSpec.Status.Replicas = 0 rsSpec.Status.ReadyReplicas = 0 fakePodControl.Clear() manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 0, 0, 0) // Get the key for the controller rsKey, err := controller.KeyFunc(rsSpec) if err != nil { t.Errorf("Couldn't get key for object %#v: %v", rsSpec, err) } // Lowering expectations should lead to a sync that creates a replica, however the // fakePodControl error will prevent this, leaving expectations at 0, 0 manager.expectations.CreationObserved(rsKey) rsSpec.Status.Replicas = 1 rsSpec.Status.ReadyReplicas = 1 fakePodControl.Clear() fakePodControl.Err = fmt.Errorf("Fake Error") manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0) // This replica should not need a Lowering of expectations, since the previous create failed fakePodControl.Clear() fakePodControl.Err = nil manager.syncReplicaSet(getKey(rsSpec, t)) validateSyncReplicaSet(t, &fakePodControl, 1, 0, 0) // 1 PUT for the ReplicaSet status during dormancy window. // Note that the pod creates go through pod control so they're not recorded. fakeHandler.ValidateRequestCount(t, 1) }
// When a pod is deleted, update expectations of the controller that manages the pod. // obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item. func (dc *DeploymentController) deletePod(obj interface{}) { pod, ok := obj.(*api.Pod) // When a delete is dropped, the relist will notice a pod in the store not // in the list, leading to the insertion of a tombstone object which contains // the deleted key/value. Note that this value might be stale. If the pod // changed labels the new ReplicaSet will not be woken up till the periodic // resync. if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { glog.Errorf("Couldn't get object from tombstone %+v, could take up to %v before a ReplicaSet recreates a replica", obj, controller.ExpectationsTimeout) return } pod, ok = tombstone.Obj.(*api.Pod) if !ok { glog.Errorf("Tombstone contained object that is not a pod %+v, could take up to %v before ReplicaSet recreates a replica", obj, controller.ExpectationsTimeout) return } } glog.V(4).Infof("Pod %s deleted.", pod.Name) if d := dc.getDeploymentForPod(pod); d != nil { dKey, err := controller.KeyFunc(d) if err != nil { glog.Errorf("Couldn't get key for deployment controller %#v: %v", d, err) return } dc.podExpectations.DeletionObserved(dKey) } }
func (rq *ResourceQuotaController) addQuota(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { glog.Errorf("Couldn't get key for object %+v: %v", obj, err) return } resourceQuota := obj.(*api.ResourceQuota) // if we declared an intent that is not yet captured in status (prioritize it) if !api.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard) { rq.missingUsageQueue.Add(key) return } // if we declared a constraint that has no usage (which this controller can calculate, prioritize it) for constraint := range resourceQuota.Status.Hard { if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound { matchedResources := []api.ResourceName{constraint} for _, evaluator := range rq.registry.Evaluators() { if intersection := quota.Intersection(evaluator.MatchesResources(), matchedResources); len(intersection) != 0 { rq.missingUsageQueue.Add(key) return } } } } // no special priority, go in normal recalc queue rq.queue.Add(key) }
// When a pod is deleted, enqueue the job that manages the pod and update its expectations. // obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item. func (jm *JobController) deletePod(obj interface{}) { pod, ok := obj.(*api.Pod) // When a delete is dropped, the relist will notice a pod in the store not // in the list, leading to the insertion of a tombstone object which contains // the deleted key/value. Note that this value might be stale. If the pod // changed labels the new job will not be woken up till the periodic resync. if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { glog.Errorf("Couldn't get object from tombstone %+v, could take up to %v before a job recreates a pod", obj, controller.ExpectationsTimeout) return } pod, ok = tombstone.Obj.(*api.Pod) if !ok { glog.Errorf("Tombstone contained object that is not a pod %+v, could take up to %v before job recreates a pod", obj, controller.ExpectationsTimeout) return } } if job := jm.getPodJob(pod); job != nil { jobKey, err := controller.KeyFunc(job) if err != nil { glog.Errorf("Couldn't get key for job %#v: %v", job, err) return } jm.expectations.DeletionObserved(jobKey) jm.enqueueController(job) } }
func (dc *DisruptionController) enqueuePdbForRecheck(pdb *policy.PodDisruptionBudget, delay time.Duration) { key, err := controller.KeyFunc(pdb) if err != nil { glog.Errorf("Cound't get key for PodDisruptionBudget object %+v: %v", pdb, err) return } dc.recheckQueue.AddAfter(key, delay) }
func (dc *DisruptionController) enqueuePdb(pdb *policy.PodDisruptionBudget) { key, err := controller.KeyFunc(pdb) if err != nil { glog.Errorf("Cound't get key for PodDisruptionBudget object %+v: %v", pdb, err) return } dc.queue.Add(key) }
func (frsc *ReplicaSetController) deliverFedReplicaSetObj(obj interface{}, delay time.Duration) { key, err := controller.KeyFunc(obj) if err != nil { glog.Errorf("Couldn't get key for object %+v: %v", obj, err) return } frsc.deliverReplicaSetByKey(key, delay, false) }
func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) { key, err := controller.KeyFunc(ds) if err != nil { t.Errorf("Could not get key for daemon.") } manager.syncHandler(key) validateSyncDaemonSets(t, podControl, expectedCreates, expectedDeletes) }
func getKey(job *extensions.Job, t *testing.T) string { if key, err := controller.KeyFunc(job); err != nil { t.Errorf("Unexpected error getting key for job %v: %v", job.Name, err) return "" } else { return key } }
// obj could be an *api.ResourceQuota, or a DeletionFinalStateUnknown marker item. func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { glog.Errorf("Couldn't get key for object %+v: %v", obj, err) return } rq.queue.Add(key) }
func (c *DeploymentConfigController) enqueueDeploymentConfig(dc *deployapi.DeploymentConfig) { key, err := kcontroller.KeyFunc(dc) if err != nil { glog.Errorf("Couldn't get key for object %#v: %v", dc, err) return } c.queue.Add(key) }
// enqueuePetSet enqueues the given petset in the work queue. func (psc *PetSetController) enqueuePetSet(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { glog.Errorf("Cound't get key for object %+v: %v", obj, err) return } psc.queue.Add(key) }
func getKey(d *exp.Deployment, t *testing.T) string { if key, err := controller.KeyFunc(d); err != nil { t.Errorf("Unexpected error getting key for deployment %v: %v", d.Name, err) return "" } else { return key } }
func (cc *CertificateController) enqueueCertificateRequest(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err)) return } cc.queue.Add(key) }
func getKey(rc *api.ReplicationController, t *testing.T) string { if key, err := controller.KeyFunc(rc); err != nil { t.Errorf("Unexpected error getting key for rc %v: %v", rc.Name, err) return "" } else { return key } }
func getKey(ds *extensions.DaemonSet, t *testing.T) string { if key, err := controller.KeyFunc(ds); err != nil { t.Errorf("Unexpected error getting key for ds %v: %v", ds.Name, err) return "" } else { return key } }
// obj could be an *api.Service, or a DeletionFinalStateUnknown marker item. func (s *ServiceController) enqueueService(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { glog.Errorf("Couldn't get key for object %+v: %v", obj, err) return } s.queue.Add(key) }
func getKey(rs *extensions.ReplicaSet, t *testing.T) string { if key, err := controller.KeyFunc(rs); err != nil { t.Errorf("Unexpected error getting key for ReplicaSet %v: %v", rs.Name, err) return "" } else { return key } }
func (dsc *DaemonSetsController) enqueueDaemonSet(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { glog.Errorf("Couldn't get key for object %+v: %v", obj, err) return } dsc.queue.Add(key) }