// TestURLBackoffFunctionality generally tests the URLBackoff wrapper. We avoid duplicating tests from backoff and request. func TestURLBackoffFunctionality(t *testing.T) { myBackoff := &URLBackoff{ Backoff: flowcontrol.NewBackOff(1*time.Second, 60*time.Second), } // Now test that backoff increases, then recovers. // 200 and 300 should both result in clearing the backoff. // all others like 429 should result in increased backoff. seconds := []int{0, 1, 2, 4, 8, 0, 1, 2} returnCodes := []int{ 429, 500, 501, 502, 300, 500, 501, 502, } if len(seconds) != len(returnCodes) { t.Fatalf("responseCode to backoff arrays should be the same length... sanity check failed.") } for i, sec := range seconds { backoffSec := myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) if backoffSec < time.Duration(sec)*time.Second || backoffSec > time.Duration(sec+5)*time.Second { t.Errorf("Backoff out of range %v: %v %v", i, sec, backoffSec) } myBackoff.UpdateBackoff(parse("http://1.2.3.4:100/responseCodeForFuncTest"), nil, returnCodes[i]) } if myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) == 0 { t.Errorf("The final return code %v should have resulted in a backoff ! ", returnCodes[7]) } }
// readExpBackoffConfig handles the internal logic of determining what the // backoff policy is. By default if no information is available, NoBackoff. // TODO Generalize this see #17727 . func readExpBackoffConfig() BackoffManager { backoffBase := os.Getenv(envBackoffBase) backoffDuration := os.Getenv(envBackoffDuration) backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64) backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64) if errBase != nil || errDuration != nil { return &NoBackoff{} } return &URLBackoff{ Backoff: flowcontrol.NewBackOff( time.Duration(backoffBaseInt)*time.Second, time.Duration(backoffDurationInt)*time.Second)} }
func TestURLBackoffFunctionalityCollisions(t *testing.T) { myBackoff := &URLBackoff{ Backoff: flowcontrol.NewBackOff(1*time.Second, 60*time.Second), } // Add some noise and make sure backoff for a clean URL is zero. myBackoff.UpdateBackoff(parse("http://100.200.300.400:8080"), nil, 500) myBackoff.UpdateBackoff(parse("http://1.2.3.4:8080"), nil, 500) if myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) > 0 { t.Errorf("URLs are colliding in the backoff map!") } }
func pullerTestEnv(c pullerTestCase, serialized bool) (puller ImageManager, fakeClock *clock.FakeClock, fakeRuntime *ctest.FakeRuntime, container *v1.Container) { container = &v1.Container{ Name: "container_name", Image: c.containerImage, ImagePullPolicy: c.policy, } backOff := flowcontrol.NewBackOff(time.Second, time.Minute) fakeClock = clock.NewFakeClock(time.Now()) backOff.Clock = fakeClock fakeRuntime = &ctest.FakeRuntime{} fakeRecorder := &record.FakeRecorder{} fakeRuntime.ImageList = []Image{{ID: "present_image"}} fakeRuntime.Err = c.pullerErr fakeRuntime.InspectErr = c.inspectErr puller = NewImageManager(fakeRecorder, fakeRuntime, backOff, serialized, 0, 0) return }
func TestSyncPod(t *testing.T) { fakeRuntime, fakeImage, m, err := createTestRuntimeManager() assert.NoError(t, err) containers := []v1.Container{ { Name: "foo1", Image: "busybox", ImagePullPolicy: v1.PullIfNotPresent, }, { Name: "foo2", Image: "alpine", ImagePullPolicy: v1.PullIfNotPresent, }, } pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: v1.PodSpec{ Containers: containers, }, } backOff := flowcontrol.NewBackOff(time.Second, time.Minute) result := m.SyncPod(pod, v1.PodStatus{}, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) assert.Equal(t, 2, len(fakeRuntime.Containers)) assert.Equal(t, 2, len(fakeImage.Images)) assert.Equal(t, 1, len(fakeRuntime.Sandboxes)) for _, sandbox := range fakeRuntime.Sandboxes { assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.GetState()) } for _, c := range fakeRuntime.Containers { assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.GetState()) } }
func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, networkPlugin network.NetworkPlugin, osInterface kubecontainer.OSInterface) (*kubeGenericRuntimeManager, error) { recorder := &record.FakeRecorder{} kubeRuntimeManager := &kubeGenericRuntimeManager{ recorder: recorder, cpuCFSQuota: false, livenessManager: proberesults.NewManager(), containerRefManager: kubecontainer.NewRefManager(), machineInfo: machineInfo, osInterface: osInterface, networkPlugin: networkPlugin, runtimeHelper: &fakeRuntimeHelper{}, runtimeService: runtimeService, imageService: imageService, keyring: credentialprovider.NewDockerKeyring(), } typedVersion, err := runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { return nil, err } kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodGetter(), kubeRuntimeManager) kubeRuntimeManager.runtimeName = typedVersion.GetRuntimeName() kubeRuntimeManager.imagePuller = images.NewImageManager( kubecontainer.FilterEventRecorder(recorder), kubeRuntimeManager, flowcontrol.NewBackOff(time.Second, 300*time.Second), false, 0, // Disable image pull throttling by setting QPS to 0, 0, ) kubeRuntimeManager.runner = lifecycle.NewHandlerRunner( &fakeHTTP{}, kubeRuntimeManager, kubeRuntimeManager) return kubeRuntimeManager, nil }
// NewDaemonSetController returns a new daemonset controller func NewDaemonSetController(client federationclientset.Interface) *DaemonSetController { broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client)) recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-daemonset-controller"}) daemonsetcontroller := &DaemonSetController{ federatedApiClient: client, daemonsetReviewDelay: time.Second * 10, clusterAvailableDelay: time.Second * 20, smallDelay: time.Second * 3, updateTimeout: time.Second * 30, daemonsetBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute), eventRecorder: recorder, } // Build deliverers for triggering reconciliations. daemonsetcontroller.daemonsetDeliverer = util.NewDelayingDeliverer() daemonsetcontroller.clusterDeliverer = util.NewDelayingDeliverer() // Start informer in federated API servers on daemonsets that should be federated. daemonsetcontroller.daemonsetInformerStore, daemonsetcontroller.daemonsetInformerController = cache.NewInformer( &cache.ListWatch{ ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) { return client.Extensions().DaemonSets(apiv1.NamespaceAll).List(options) }, WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { return client.Extensions().DaemonSets(apiv1.NamespaceAll).Watch(options) }, }, &extensionsv1.DaemonSet{}, controller.NoResyncPeriodFunc(), util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { daemonsetcontroller.deliverDaemonSetObj(obj, 0, false) })) // Federated informer on daemonsets in members of federation. daemonsetcontroller.daemonsetFederatedInformer = util.NewFederatedInformer( client, func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) { return targetClient.Extensions().DaemonSets(apiv1.NamespaceAll).List(options) }, WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { return targetClient.Extensions().DaemonSets(apiv1.NamespaceAll).Watch(options) }, }, &extensionsv1.DaemonSet{}, controller.NoResyncPeriodFunc(), // Trigger reconciliation whenever something in federated cluster is changed. In most cases it // would be just confirmation that some daemonset operation succeeded. util.NewTriggerOnAllChanges( func(obj pkgruntime.Object) { daemonsetcontroller.deliverDaemonSetObj(obj, daemonsetcontroller.daemonsetReviewDelay, false) }, )) }, &util.ClusterLifecycleHandlerFuncs{ ClusterAvailable: func(cluster *federationapi.Cluster) { // When new cluster becomes available process all the daemonsets again. daemonsetcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(daemonsetcontroller.clusterAvailableDelay)) }, }, ) // Federated updater along with Create/Update/Delete operations. daemonsetcontroller.federatedUpdater = util.NewFederatedUpdater(daemonsetcontroller.daemonsetFederatedInformer, func(client kubeclientset.Interface, obj pkgruntime.Object) error { daemonset := obj.(*extensionsv1.DaemonSet) glog.V(4).Infof("Attempting to create daemonset: %s/%s", daemonset.Namespace, daemonset.Name) _, err := client.Extensions().DaemonSets(daemonset.Namespace).Create(daemonset) if err != nil { glog.Errorf("Error creating daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err) } else { glog.V(4).Infof("Successfully created deamonset %s/%s", daemonset.Namespace, daemonset.Name) } return err }, func(client kubeclientset.Interface, obj pkgruntime.Object) error { daemonset := obj.(*extensionsv1.DaemonSet) glog.V(4).Infof("Attempting to update daemonset: %s/%s", daemonset.Namespace, daemonset.Name) _, err := client.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset) if err != nil { glog.Errorf("Error updating daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err) } else { glog.V(4).Infof("Successfully updating deamonset %s/%s", daemonset.Namespace, daemonset.Name) } return err }, func(client kubeclientset.Interface, obj pkgruntime.Object) error { daemonset := obj.(*extensionsv1.DaemonSet) glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name) err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &apiv1.DeleteOptions{}) if err != nil { glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err) } else { glog.V(4).Infof("Successfully deleting deamonset %s/%s", daemonset.Namespace, daemonset.Name) } return err }) daemonsetcontroller.deletionHelper = deletionhelper.NewDeletionHelper( daemonsetcontroller.hasFinalizerFunc, daemonsetcontroller.removeFinalizerFunc, daemonsetcontroller.addFinalizerFunc, // objNameFunc func(obj pkgruntime.Object) string { daemonset := obj.(*extensionsv1.DaemonSet) return daemonset.Name }, daemonsetcontroller.updateTimeout, daemonsetcontroller.eventRecorder, daemonsetcontroller.daemonsetFederatedInformer, daemonsetcontroller.federatedUpdater, ) return daemonsetcontroller }
// NewNamespaceController returns a new namespace controller func NewNamespaceController(client federationclientset.Interface) *NamespaceController { broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client)) recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-namespace-controller"}) nc := &NamespaceController{ federatedApiClient: client, namespaceReviewDelay: time.Second * 10, clusterAvailableDelay: time.Second * 20, smallDelay: time.Second * 3, updateTimeout: time.Second * 30, namespaceBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute), eventRecorder: recorder, } // Build deliverers for triggering reconciliations. nc.namespaceDeliverer = util.NewDelayingDeliverer() nc.clusterDeliverer = util.NewDelayingDeliverer() // Start informer in federated API servers on namespaces that should be federated. nc.namespaceInformerStore, nc.namespaceInformerController = cache.NewInformer( &cache.ListWatch{ ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { return client.Core().Namespaces().List(options) }, WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { return client.Core().Namespaces().Watch(options) }, }, &apiv1.Namespace{}, controller.NoResyncPeriodFunc(), util.NewTriggerOnAllChanges(func(obj runtime.Object) { nc.deliverNamespaceObj(obj, 0, false) })) // Federated informer on namespaces in members of federation. nc.namespaceFederatedInformer = util.NewFederatedInformer( client, func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { return targetClient.Core().Namespaces().List(options) }, WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { return targetClient.Core().Namespaces().Watch(options) }, }, &apiv1.Namespace{}, controller.NoResyncPeriodFunc(), // Trigger reconciliation whenever something in federated cluster is changed. In most cases it // would be just confirmation that some namespace operation succeeded. util.NewTriggerOnMetaAndSpecChanges( func(obj runtime.Object) { nc.deliverNamespaceObj(obj, nc.namespaceReviewDelay, false) }, )) }, &util.ClusterLifecycleHandlerFuncs{ ClusterAvailable: func(cluster *federationapi.Cluster) { // When new cluster becomes available process all the namespaces again. nc.clusterDeliverer.DeliverAfter(allClustersKey, nil, nc.clusterAvailableDelay) }, }, ) // Federated updater along with Create/Update/Delete operations. nc.federatedUpdater = util.NewFederatedUpdater(nc.namespaceFederatedInformer, func(client kubeclientset.Interface, obj runtime.Object) error { namespace := obj.(*apiv1.Namespace) _, err := client.Core().Namespaces().Create(namespace) return err }, func(client kubeclientset.Interface, obj runtime.Object) error { namespace := obj.(*apiv1.Namespace) _, err := client.Core().Namespaces().Update(namespace) return err }, func(client kubeclientset.Interface, obj runtime.Object) error { namespace := obj.(*apiv1.Namespace) err := client.Core().Namespaces().Delete(namespace.Name, &apiv1.DeleteOptions{}) // IsNotFound error is fine since that means the object is deleted already. if errors.IsNotFound(err) { return nil } return err }) nc.deletionHelper = deletionhelper.NewDeletionHelper( nc.hasFinalizerFunc, nc.removeFinalizerFunc, nc.addFinalizerFunc, // objNameFunc func(obj runtime.Object) string { namespace := obj.(*apiv1.Namespace) return namespace.Name }, nc.updateTimeout, nc.eventRecorder, nc.namespaceFederatedInformer, nc.federatedUpdater, ) return nc }
// Disable makes the backoff trivial, i.e., sets it to zero. This might be used // by tests which want to run 1000s of mock requests without slowing down. func (b *URLBackoff) Disable() { glog.V(4).Infof("Disabling backoff strategy") b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second) }
// NewclusterController returns a new cluster controller func NewDeploymentController(federationClient fedclientset.Interface) *DeploymentController { broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(federationClient)) recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-deployment-controller"}) fdc := &DeploymentController{ fedClient: federationClient, deploymentDeliverer: fedutil.NewDelayingDeliverer(), clusterDeliverer: fedutil.NewDelayingDeliverer(), deploymentWorkQueue: workqueue.New(), deploymentBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute), defaultPlanner: planner.NewPlanner(&fed.FederatedReplicaSetPreferences{ Clusters: map[string]fed.ClusterReplicaSetPreferences{ "*": {Weight: 1}, }, }), eventRecorder: recorder, } deploymentFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { return clientset.Extensions().Deployments(apiv1.NamespaceAll).List(options) }, WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { return clientset.Extensions().Deployments(apiv1.NamespaceAll).Watch(options) }, }, &extensionsv1.Deployment{}, controller.NoResyncPeriodFunc(), fedutil.NewTriggerOnAllChanges( func(obj runtime.Object) { fdc.deliverLocalDeployment(obj, deploymentReviewDelay) }, ), ) } clusterLifecycle := fedutil.ClusterLifecycleHandlerFuncs{ ClusterAvailable: func(cluster *fedv1.Cluster) { fdc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterAvailableDelay) }, ClusterUnavailable: func(cluster *fedv1.Cluster, _ []interface{}) { fdc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay) }, } fdc.fedDeploymentInformer = fedutil.NewFederatedInformer(federationClient, deploymentFedInformerFactory, &clusterLifecycle) podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { return clientset.Core().Pods(apiv1.NamespaceAll).List(options) }, WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { return clientset.Core().Pods(apiv1.NamespaceAll).Watch(options) }, }, &apiv1.Pod{}, controller.NoResyncPeriodFunc(), fedutil.NewTriggerOnAllChanges( func(obj runtime.Object) { fdc.clusterDeliverer.DeliverAfter(allClustersKey, nil, allDeploymentReviewDelay) }, ), ) } fdc.fedPodInformer = fedutil.NewFederatedInformer(federationClient, podFedInformerFactory, &fedutil.ClusterLifecycleHandlerFuncs{}) fdc.deploymentStore, fdc.deploymentController = cache.NewInformer( &cache.ListWatch{ ListFunc: func(options apiv1.ListOptions) (runtime.Object, error) { return fdc.fedClient.Extensions().Deployments(apiv1.NamespaceAll).List(options) }, WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { return fdc.fedClient.Extensions().Deployments(apiv1.NamespaceAll).Watch(options) }, }, &extensionsv1.Deployment{}, controller.NoResyncPeriodFunc(), fedutil.NewTriggerOnMetaAndSpecChanges( func(obj runtime.Object) { fdc.deliverFedDeploymentObj(obj, deploymentReviewDelay) }, ), ) fdc.fedUpdater = fedutil.NewFederatedUpdater(fdc.fedDeploymentInformer, func(client kubeclientset.Interface, obj runtime.Object) error { rs := obj.(*extensionsv1.Deployment) _, err := client.Extensions().Deployments(rs.Namespace).Create(rs) return err }, func(client kubeclientset.Interface, obj runtime.Object) error { rs := obj.(*extensionsv1.Deployment) _, err := client.Extensions().Deployments(rs.Namespace).Update(rs) return err }, func(client kubeclientset.Interface, obj runtime.Object) error { rs := obj.(*extensionsv1.Deployment) err := client.Extensions().Deployments(rs.Namespace).Delete(rs.Name, &apiv1.DeleteOptions{}) return err }) fdc.deletionHelper = deletionhelper.NewDeletionHelper( fdc.hasFinalizerFunc, fdc.removeFinalizerFunc, fdc.addFinalizerFunc, // objNameFunc func(obj runtime.Object) string { deployment := obj.(*extensionsv1.Deployment) return deployment.Name }, updateTimeout, fdc.eventRecorder, fdc.fedDeploymentInformer, fdc.fedUpdater, ) return fdc }
// NewConfigMapController returns a new configmap controller func NewConfigMapController(client federationclientset.Interface) *ConfigMapController { broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client)) recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-configmaps-controller"}) configmapcontroller := &ConfigMapController{ federatedApiClient: client, configmapReviewDelay: time.Second * 10, clusterAvailableDelay: time.Second * 20, smallDelay: time.Second * 3, updateTimeout: time.Second * 30, configmapBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute), eventRecorder: recorder, } // Build delivereres for triggering reconciliations. configmapcontroller.configmapDeliverer = util.NewDelayingDeliverer() configmapcontroller.clusterDeliverer = util.NewDelayingDeliverer() // Start informer on federated API servers on configmaps that should be federated. configmapcontroller.configmapInformerStore, configmapcontroller.configmapInformerController = cache.NewInformer( &cache.ListWatch{ ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) { return client.Core().ConfigMaps(apiv1.NamespaceAll).List(options) }, WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { return client.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options) }, }, &apiv1.ConfigMap{}, controller.NoResyncPeriodFunc(), util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { configmapcontroller.deliverConfigMapObj(obj, 0, false) })) // Federated informer on configmaps in members of federation. configmapcontroller.configmapFederatedInformer = util.NewFederatedInformer( client, func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) { return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).List(options) }, WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) { return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options) }, }, &apiv1.ConfigMap{}, controller.NoResyncPeriodFunc(), // Trigger reconciliation whenever something in federated cluster is changed. In most cases it // would be just confirmation that some configmap operation succeeded. util.NewTriggerOnAllChanges( func(obj pkgruntime.Object) { configmapcontroller.deliverConfigMapObj(obj, configmapcontroller.configmapReviewDelay, false) }, )) }, &util.ClusterLifecycleHandlerFuncs{ ClusterAvailable: func(cluster *federationapi.Cluster) { // When new cluster becomes available process all the configmaps again. configmapcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(configmapcontroller.clusterAvailableDelay)) }, }, ) // Federated updater along with Create/Update/Delete operations. configmapcontroller.federatedUpdater = util.NewFederatedUpdater(configmapcontroller.configmapFederatedInformer, func(client kubeclientset.Interface, obj pkgruntime.Object) error { configmap := obj.(*apiv1.ConfigMap) _, err := client.Core().ConfigMaps(configmap.Namespace).Create(configmap) return err }, func(client kubeclientset.Interface, obj pkgruntime.Object) error { configmap := obj.(*apiv1.ConfigMap) _, err := client.Core().ConfigMaps(configmap.Namespace).Update(configmap) return err }, func(client kubeclientset.Interface, obj pkgruntime.Object) error { configmap := obj.(*apiv1.ConfigMap) err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &apiv1.DeleteOptions{}) return err }) return configmapcontroller }
// NewIngressController returns a new ingress controller func NewIngressController(client federationclientset.Interface) *IngressController { glog.V(4).Infof("->NewIngressController V(4)") broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client)) recorder := broadcaster.NewRecorder(v1.EventSource{Component: "federated-ingress-controller"}) ic := &IngressController{ federatedApiClient: client, ingressReviewDelay: time.Second * 10, configMapReviewDelay: time.Second * 10, clusterAvailableDelay: time.Second * 20, smallDelay: time.Second * 3, updateTimeout: time.Second * 30, ingressBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute), eventRecorder: recorder, configMapBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute), } // Build deliverers for triggering reconciliations. ic.ingressDeliverer = util.NewDelayingDeliverer() ic.clusterDeliverer = util.NewDelayingDeliverer() ic.configMapDeliverer = util.NewDelayingDeliverer() // Start informer in federated API servers on ingresses that should be federated. ic.ingressInformerStore, ic.ingressInformerController = cache.NewInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) { return client.Extensions().Ingresses(api.NamespaceAll).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { return client.Extensions().Ingresses(api.NamespaceAll).Watch(options) }, }, &extensionsv1beta1.Ingress{}, controller.NoResyncPeriodFunc(), util.NewTriggerOnAllChanges( func(obj pkgruntime.Object) { ic.deliverIngressObj(obj, 0, false) }, )) // Federated informer on ingresses in members of federation. ic.ingressFederatedInformer = util.NewFederatedInformer( client, func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) { return cache.NewInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) { return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { return targetClient.Extensions().Ingresses(api.NamespaceAll).Watch(options) }, }, &extensionsv1beta1.Ingress{}, controller.NoResyncPeriodFunc(), // Trigger reconciliation whenever something in federated cluster is changed. In most cases it // would be just confirmation that some ingress operation succeeded. util.NewTriggerOnAllChanges( func(obj pkgruntime.Object) { ic.deliverIngressObj(obj, ic.ingressReviewDelay, false) }, )) }, &util.ClusterLifecycleHandlerFuncs{ ClusterAvailable: func(cluster *federationapi.Cluster) { // When new cluster becomes available process all the ingresses again, and configure it's ingress controller's configmap with the correct UID ic.clusterDeliverer.DeliverAfter(cluster.Name, cluster, ic.clusterAvailableDelay) }, }, ) // Federated informer on configmaps for ingress controllers in members of the federation. ic.configMapFederatedInformer = util.NewFederatedInformer( client, func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.Controller) { glog.V(4).Infof("Returning new informer for cluster %q", cluster.Name) return cache.NewInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) { if targetClient == nil { glog.Errorf("Internal error: targetClient is nil") } return targetClient.Core().ConfigMaps(uidConfigMapNamespace).List(options) // we only want to list one by name - unfortunately Kubernetes don't have a selector for that. }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if targetClient == nil { glog.Errorf("Internal error: targetClient is nil") } return targetClient.Core().ConfigMaps(uidConfigMapNamespace).Watch(options) // as above }, }, &v1.ConfigMap{}, controller.NoResyncPeriodFunc(), // Trigger reconciliation whenever the ingress controller's configmap in a federated cluster is changed. In most cases it // would be just confirmation that the configmap for the ingress controller is correct. util.NewTriggerOnAllChanges( func(obj pkgruntime.Object) { ic.deliverConfigMapObj(cluster.Name, obj, ic.configMapReviewDelay, false) }, )) }, &util.ClusterLifecycleHandlerFuncs{ ClusterAvailable: func(cluster *federationapi.Cluster) { ic.clusterDeliverer.DeliverAfter(cluster.Name, cluster, ic.clusterAvailableDelay) }, }, ) // Federated ingress updater along with Create/Update/Delete operations. ic.federatedIngressUpdater = util.NewFederatedUpdater(ic.ingressFederatedInformer, func(client kubeclientset.Interface, obj pkgruntime.Object) error { ingress := obj.(*extensionsv1beta1.Ingress) glog.V(4).Infof("Attempting to create Ingress: %v", ingress) _, err := client.Extensions().Ingresses(ingress.Namespace).Create(ingress) if err != nil { glog.Errorf("Error creating ingress %q: %v", types.NamespacedName{Name: ingress.Name, Namespace: ingress.Namespace}, err) } else { glog.V(4).Infof("Successfully created ingress %q", types.NamespacedName{Name: ingress.Name, Namespace: ingress.Namespace}) } return err }, func(client kubeclientset.Interface, obj pkgruntime.Object) error { ingress := obj.(*extensionsv1beta1.Ingress) glog.V(4).Infof("Attempting to update Ingress: %v", ingress) _, err := client.Extensions().Ingresses(ingress.Namespace).Update(ingress) if err != nil { glog.V(4).Infof("Failed to update Ingress: %v", err) } else { glog.V(4).Infof("Successfully updated Ingress: %q", types.NamespacedName{Name: ingress.Name, Namespace: ingress.Namespace}) } return err }, func(client kubeclientset.Interface, obj pkgruntime.Object) error { ingress := obj.(*extensionsv1beta1.Ingress) glog.V(4).Infof("Attempting to delete Ingress: %v", ingress) err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &v1.DeleteOptions{}) return err }) // Federated configmap updater along with Create/Update/Delete operations. Only Update should ever be called. ic.federatedConfigMapUpdater = util.NewFederatedUpdater(ic.configMapFederatedInformer, func(client kubeclientset.Interface, obj pkgruntime.Object) error { configMap := obj.(*v1.ConfigMap) configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace} glog.Errorf("Internal error: Incorrectly attempting to create ConfigMap: %q", configMapName) _, err := client.Core().ConfigMaps(configMap.Namespace).Create(configMap) return err }, func(client kubeclientset.Interface, obj pkgruntime.Object) error { configMap := obj.(*v1.ConfigMap) configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace} glog.V(4).Infof("Attempting to update ConfigMap: %v", configMap) _, err := client.Core().ConfigMaps(configMap.Namespace).Update(configMap) if err == nil { glog.V(4).Infof("Successfully updated ConfigMap %q", configMapName) } else { glog.V(4).Infof("Failed to update ConfigMap %q: %v", configMapName, err) } return err }, func(client kubeclientset.Interface, obj pkgruntime.Object) error { configMap := obj.(*v1.ConfigMap) configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace} glog.Errorf("Internal error: Incorrectly attempting to delete ConfigMap: %q", configMapName) err := client.Core().ConfigMaps(configMap.Namespace).Delete(configMap.Name, &v1.DeleteOptions{}) return err }) ic.deletionHelper = deletionhelper.NewDeletionHelper( ic.hasFinalizerFunc, ic.removeFinalizerFunc, ic.addFinalizerFunc, // objNameFunc func(obj pkgruntime.Object) string { ingress := obj.(*extensionsv1beta1.Ingress) return ingress.Name }, ic.updateTimeout, ic.eventRecorder, ic.ingressFederatedInformer, ic.federatedIngressUpdater, ) return ic }
func TestSyncPodWithInitContainers(t *testing.T) { fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) initContainers := []v1.Container{ { Name: "init1", Image: "init", ImagePullPolicy: v1.PullIfNotPresent, }, } containers := []v1.Container{ { Name: "foo1", Image: "busybox", ImagePullPolicy: v1.PullIfNotPresent, }, { Name: "foo2", Image: "alpine", ImagePullPolicy: v1.PullIfNotPresent, }, } pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Spec: v1.PodSpec{ Containers: containers, InitContainers: initContainers, }, } // buildContainerID is an internal helper function to build container id from api pod // and container with default attempt number 0. buildContainerID := func(pod *v1.Pod, container v1.Container) string { uid := string(pod.UID) sandboxID := apitest.BuildSandboxName(&runtimeapi.PodSandboxMetadata{ Name: &pod.Name, Uid: &uid, Namespace: &pod.Namespace, }) return apitest.BuildContainerName(&runtimeapi.ContainerMetadata{Name: &container.Name}, sandboxID) } backOff := flowcontrol.NewBackOff(time.Second, time.Minute) // 1. should only create the init container. podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) result := m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) assert.Equal(t, 1, len(fakeRuntime.Containers)) initContainerID := buildContainerID(pod, initContainers[0]) expectedContainers := []string{initContainerID} if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok { t.Errorf("expected %q, got %q", expectedContainers, actual) } // 2. should not create app container because init container is still running. podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) result = m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) assert.Equal(t, 1, len(fakeRuntime.Containers)) expectedContainers = []string{initContainerID} if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok { t.Errorf("expected %q, got %q", expectedContainers, actual) } // 3. should create all app containers because init container finished. fakeRuntime.StopContainer(initContainerID, 0) podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) result = m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) assert.Equal(t, 3, len(fakeRuntime.Containers)) expectedContainers = []string{initContainerID, buildContainerID(pod, containers[0]), buildContainerID(pod, containers[1])} if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok { t.Errorf("expected %q, got %q", expectedContainers, actual) } }