Пример #1
0
func NewFakeKubeRuntimeManager(runtimeService internalApi.RuntimeService, imageService internalApi.ImageManagerService, networkPlugin network.NetworkPlugin, osInterface kubecontainer.OSInterface) (*kubeGenericRuntimeManager, error) {
	recorder := &record.FakeRecorder{}
	kubeRuntimeManager := &kubeGenericRuntimeManager{
		recorder:            recorder,
		cpuCFSQuota:         false,
		livenessManager:     proberesults.NewManager(),
		containerRefManager: kubecontainer.NewRefManager(),
		osInterface:         osInterface,
		networkPlugin:       networkPlugin,
		runtimeHelper:       &fakeRuntimeHelper{},
		runtimeService:      runtimeService,
		imageService:        imageService,
		keyring:             credentialprovider.NewDockerKeyring(),
	}

	typedVersion, err := runtimeService.Version(kubeRuntimeAPIVersion)
	if err != nil {
		return nil, err
	}

	kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodGetter(), kubeRuntimeManager)
	kubeRuntimeManager.runtimeName = typedVersion.GetRuntimeName()
	kubeRuntimeManager.imagePuller = images.NewImageManager(
		kubecontainer.FilterEventRecorder(recorder),
		kubeRuntimeManager,
		flowcontrol.NewBackOff(time.Second, 300*time.Second),
		false)
	kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(
		&fakeHTTP{},
		kubeRuntimeManager,
		kubeRuntimeManager)

	return kubeRuntimeManager, nil
}
Пример #2
0
// TestURLBackoffFunctionality generally tests the URLBackoff wrapper.  We avoid duplicating tests from backoff and request.
func TestURLBackoffFunctionality(t *testing.T) {
	myBackoff := &URLBackoff{
		Backoff: flowcontrol.NewBackOff(1*time.Second, 60*time.Second),
	}

	// Now test that backoff increases, then recovers.
	// 200 and 300 should both result in clearing the backoff.
	// all others like 429 should result in increased backoff.
	seconds := []int{0,
		1, 2, 4, 8, 0,
		1, 2}
	returnCodes := []int{
		429, 500, 501, 502, 300,
		500, 501, 502,
	}

	if len(seconds) != len(returnCodes) {
		t.Fatalf("responseCode to backoff arrays should be the same length... sanity check failed.")
	}

	for i, sec := range seconds {
		backoffSec := myBackoff.CalculateBackoff(parse("http://1.2.3.4:100"))
		if backoffSec < time.Duration(sec)*time.Second || backoffSec > time.Duration(sec+5)*time.Second {
			t.Errorf("Backoff out of range %v: %v %v", i, sec, backoffSec)
		}
		myBackoff.UpdateBackoff(parse("http://1.2.3.4:100/responseCodeForFuncTest"), nil, returnCodes[i])
	}

	if myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) == 0 {
		t.Errorf("The final return code %v should have resulted in a backoff ! ", returnCodes[7])
	}
}
Пример #3
0
// readExpBackoffConfig handles the internal logic of determining what the
// backoff policy is.  By default if no information is available, NoBackoff.
// TODO Generalize this see #17727 .
func readExpBackoffConfig() BackoffManager {
	backoffBase := os.Getenv(envBackoffBase)
	backoffDuration := os.Getenv(envBackoffDuration)

	backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64)
	backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64)
	if errBase != nil || errDuration != nil {
		return &NoBackoff{}
	}
	return &URLBackoff{
		Backoff: flowcontrol.NewBackOff(
			time.Duration(backoffBaseInt)*time.Second,
			time.Duration(backoffDurationInt)*time.Second)}
}
Пример #4
0
func TestURLBackoffFunctionalityCollisions(t *testing.T) {
	myBackoff := &URLBackoff{
		Backoff: flowcontrol.NewBackOff(1*time.Second, 60*time.Second),
	}

	// Add some noise and make sure backoff for a clean URL is zero.
	myBackoff.UpdateBackoff(parse("http://100.200.300.400:8080"), nil, 500)

	myBackoff.UpdateBackoff(parse("http://1.2.3.4:8080"), nil, 500)

	if myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) > 0 {
		t.Errorf("URLs are colliding in the backoff map!")
	}
}
Пример #5
0
func TestSyncPodWithInitContainers(t *testing.T) {
	fakeRuntime, _, m, err := createTestRuntimeManager()
	assert.NoError(t, err)

	initContainers := []api.Container{
		{
			Name:            "init1",
			Image:           "init",
			ImagePullPolicy: api.PullIfNotPresent,
		},
	}
	containers := []api.Container{
		{
			Name:            "foo1",
			Image:           "busybox",
			ImagePullPolicy: api.PullIfNotPresent,
		},
		{
			Name:            "foo2",
			Image:           "alpine",
			ImagePullPolicy: api.PullIfNotPresent,
		},
	}
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			UID:       "12345678",
			Name:      "foo",
			Namespace: "new",
		},
		Spec: api.PodSpec{
			Containers:     containers,
			InitContainers: initContainers,
		},
	}

	_, _, err = makeAndSetFakePod(m, fakeRuntime, pod)
	assert.NoError(t, err)

	backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
	podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
	assert.NoError(t, err)
	result := m.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff)
	assert.NoError(t, result.Error())
	assert.Equal(t, 3, len(fakeRuntime.Containers))
	expectedContainers := []string{"foo1_0", "foo2_0", "init1_0"}
	if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
		t.Errorf("expected %q, got %q", expectedContainers, actual)
	}
}
Пример #6
0
func TestSyncPod(t *testing.T) {
	fakeRuntime, fakeImage, m, err := createTestRuntimeManager()
	assert.NoError(t, err)

	containers := []api.Container{
		{
			Name:            "foo1",
			Image:           "busybox",
			ImagePullPolicy: api.PullIfNotPresent,
		},
		{
			Name:            "foo2",
			Image:           "alpine",
			ImagePullPolicy: api.PullIfNotPresent,
		},
	}
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			UID:       "12345678",
			Name:      "foo",
			Namespace: "new",
		},
		Spec: api.PodSpec{
			Containers: containers,
		},
	}

	backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
	result := m.SyncPod(pod, api.PodStatus{}, &kubecontainer.PodStatus{}, []api.Secret{}, backOff)
	assert.NoError(t, result.Error())
	assert.Equal(t, 2, len(fakeRuntime.Containers))
	assert.Equal(t, 2, len(fakeImage.Images))
	assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
	for _, sandbox := range fakeRuntime.Sandboxes {
		assert.Equal(t, runtimeApi.PodSandBoxState_READY, sandbox.GetState())
	}
	for _, c := range fakeRuntime.Containers {
		assert.Equal(t, runtimeApi.ContainerState_RUNNING, c.GetState())
	}
}
Пример #7
0
func NewFakeKubeRuntimeManager(runtimeService internalApi.RuntimeService, imageService internalApi.ImageManagerService) (*kubeGenericRuntimeManager, error) {
	networkPlugin, _ := network.InitNetworkPlugin(
		[]network.NetworkPlugin{},
		"",
		nettest.NewFakeHost(nil),
		componentconfig.HairpinNone,
		"10.0.0.0/8",
	)

	return NewKubeGenericRuntimeManager(
		&record.FakeRecorder{},
		proberesults.NewManager(),
		kubecontainer.NewRefManager(),
		&containertest.FakeOS{},
		networkPlugin,
		&fakeRuntimeHelper{},
		&fakeHTTP{},
		flowcontrol.NewBackOff(time.Second, 300*time.Second),
		false,
		false,
		runtimeService,
		imageService,
	)
}
// NewclusterController returns a new cluster controller
func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSetController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(federationClient))
	recorder := broadcaster.NewRecorder(api.EventSource{Component: "federated-replicaset-controller"})

	frsc := &ReplicaSetController{
		fedClient:           federationClient,
		replicasetDeliverer: fedutil.NewDelayingDeliverer(),
		clusterDeliverer:    fedutil.NewDelayingDeliverer(),
		replicasetWorkQueue: workqueue.New(),
		replicaSetBackoff:   flowcontrol.NewBackOff(5*time.Second, time.Minute),
		defaultPlanner: planner.NewPlanner(&fed.FederatedReplicaSetPreferences{
			Clusters: map[string]fed.ClusterReplicaSetPreferences{
				"*": {Weight: 1},
			},
		}),
		eventRecorder: recorder,
	}

	replicaSetFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
		return cache.NewInformer(
			&cache.ListWatch{
				ListFunc: func(options api.ListOptions) (runtime.Object, error) {
					versionedOptions := fedutil.VersionizeV1ListOptions(options)
					return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).List(versionedOptions)
				},
				WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
					versionedOptions := fedutil.VersionizeV1ListOptions(options)
					return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).Watch(versionedOptions)
				},
			},
			&extensionsv1.ReplicaSet{},
			controller.NoResyncPeriodFunc(),
			fedutil.NewTriggerOnAllChanges(
				func(obj runtime.Object) { frsc.deliverLocalReplicaSet(obj, replicaSetReviewDelay) },
			),
		)
	}
	clusterLifecycle := fedutil.ClusterLifecycleHandlerFuncs{
		ClusterAvailable: func(cluster *fedv1.Cluster) {
			frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterAvailableDelay)
		},
		ClusterUnavailable: func(cluster *fedv1.Cluster, _ []interface{}) {
			frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay)
		},
	}
	frsc.fedReplicaSetInformer = fedutil.NewFederatedInformer(federationClient, replicaSetFedInformerFactory, &clusterLifecycle)

	podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
		return cache.NewInformer(
			&cache.ListWatch{
				ListFunc: func(options api.ListOptions) (runtime.Object, error) {
					versionedOptions := fedutil.VersionizeV1ListOptions(options)
					return clientset.Core().Pods(apiv1.NamespaceAll).List(versionedOptions)
				},
				WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
					versionedOptions := fedutil.VersionizeV1ListOptions(options)
					return clientset.Core().Pods(apiv1.NamespaceAll).Watch(versionedOptions)
				},
			},
			&apiv1.Pod{},
			controller.NoResyncPeriodFunc(),
			fedutil.NewTriggerOnAllChanges(
				func(obj runtime.Object) {
					frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, allReplicaSetReviewDelay)
				},
			),
		)
	}
	frsc.fedPodInformer = fedutil.NewFederatedInformer(federationClient, podFedInformerFactory, &fedutil.ClusterLifecycleHandlerFuncs{})

	frsc.replicaSetStore.Indexer, frsc.replicaSetController = cache.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				versionedOptions := fedutil.VersionizeV1ListOptions(options)
				return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).List(versionedOptions)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				versionedOptions := fedutil.VersionizeV1ListOptions(options)
				return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).Watch(versionedOptions)
			},
		},
		&extensionsv1.ReplicaSet{},
		controller.NoResyncPeriodFunc(),
		fedutil.NewTriggerOnMetaAndSpecChanges(
			func(obj runtime.Object) { frsc.deliverFedReplicaSetObj(obj, replicaSetReviewDelay) },
		),
		cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
	)

	frsc.fedUpdater = fedutil.NewFederatedUpdater(frsc.fedReplicaSetInformer,
		func(client kubeclientset.Interface, obj runtime.Object) error {
			rs := obj.(*extensionsv1.ReplicaSet)
			_, err := client.Extensions().ReplicaSets(rs.Namespace).Create(rs)
			return err
		},
		func(client kubeclientset.Interface, obj runtime.Object) error {
			rs := obj.(*extensionsv1.ReplicaSet)
			_, err := client.Extensions().ReplicaSets(rs.Namespace).Update(rs)
			return err
		},
		func(client kubeclientset.Interface, obj runtime.Object) error {
			rs := obj.(*extensionsv1.ReplicaSet)
			err := client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, &apiv1.DeleteOptions{})
			return err
		})

	frsc.deletionHelper = deletionhelper.NewDeletionHelper(
		frsc.hasFinalizerFunc,
		frsc.removeFinalizerFunc,
		frsc.addFinalizerFunc,
		// objNameFunc
		func(obj runtime.Object) string {
			replicaset := obj.(*extensionsv1.ReplicaSet)
			return replicaset.Name
		},
		updateTimeout,
		frsc.eventRecorder,
		frsc.fedReplicaSetInformer,
		frsc.fedUpdater,
	)

	return frsc
}
Пример #9
0
// NewIngressController returns a new ingress controller
func NewIngressController(client federation_release_1_4.Interface) *IngressController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
	recorder := broadcaster.NewRecorder(api.EventSource{Component: "federated-ingress-controller"})

	ic := &IngressController{
		federatedApiClient:    client,
		ingressReviewDelay:    time.Second * 10,
		clusterAvailableDelay: time.Second * 20,
		smallDelay:            time.Second * 3,
		updateTimeout:         time.Second * 30,
		ingressBackoff:        flowcontrol.NewBackOff(5*time.Second, time.Minute),
		eventRecorder:         recorder,
	}

	// Build deliverers for triggering reconcilations.
	ic.ingressDeliverer = util.NewDelayingDeliverer()
	ic.clusterDeliverer = util.NewDelayingDeliverer()

	// Start informer in federated API servers on ingresses that should be federated.
	ic.ingressInformerStore, ic.ingressInformerController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
				return client.Extensions().Ingresses(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return client.Extensions().Ingresses(api.NamespaceAll).Watch(options)
			},
		},
		&extensions_v1beta1.Ingress{},
		controller.NoResyncPeriodFunc(),
		util.NewTriggerOnAllChanges(
			func(obj pkg_runtime.Object) {
				ic.deliverIngressObj(obj, 0, false)
			},
		))

	// Federated informer on ingresses in members of federation.
	ic.ingressFederatedInformer = util.NewFederatedInformer(
		client,
		func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) {
			return framework.NewInformer(
				&cache.ListWatch{
					ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
						return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options)
					},
					WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
						return targetClient.Extensions().Ingresses(api.NamespaceAll).Watch(options)
					},
				},
				&extensions_v1beta1.Ingress{},
				controller.NoResyncPeriodFunc(),
				// Trigger reconcilation whenever something in federated cluster is changed. In most cases it
				// would be just confirmation that some ingress operation suceeded.
				util.NewTriggerOnAllChanges(
					func(obj pkg_runtime.Object) {
						ic.deliverIngressObj(obj, ic.ingressReviewDelay, false)
					},
				))
		},

		&util.ClusterLifecycleHandlerFuncs{
			ClusterAvailable: func(cluster *federation_api.Cluster) {
				// When new cluster becomes available process all the ingresses again.
				ic.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(ic.clusterAvailableDelay))
			},
		},
	)

	// Federated updater along with Create/Update/Delete operations.
	ic.federatedUpdater = util.NewFederatedUpdater(ic.ingressFederatedInformer,
		func(client kube_release_1_4.Interface, obj pkg_runtime.Object) error {
			ingress := obj.(*extensions_v1beta1.Ingress)
			glog.V(4).Infof("Attempting to create Ingress: %v", ingress)
			_, err := client.Extensions().Ingresses(ingress.Namespace).Create(ingress)
			return err
		},
		func(client kube_release_1_4.Interface, obj pkg_runtime.Object) error {
			ingress := obj.(*extensions_v1beta1.Ingress)
			glog.V(4).Infof("Attempting to update Ingress: %v", ingress)
			_, err := client.Extensions().Ingresses(ingress.Namespace).Update(ingress)
			return err
		},
		func(client kube_release_1_4.Interface, obj pkg_runtime.Object) error {
			ingress := obj.(*extensions_v1beta1.Ingress)
			glog.V(4).Infof("Attempting to delete Ingress: %v", ingress)
			err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &api.DeleteOptions{})
			return err
		})
	return ic
}
Пример #10
0
// NewIngressController returns a new ingress controller
func NewIngressController(client federationclientset.Interface) *IngressController {
	glog.V(4).Infof("->NewIngressController V(4)")
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
	recorder := broadcaster.NewRecorder(v1.EventSource{Component: "federated-ingress-controller"})
	ic := &IngressController{
		federatedApiClient:    client,
		ingressReviewDelay:    time.Second * 10,
		configMapReviewDelay:  time.Second * 10,
		clusterAvailableDelay: time.Second * 20,
		smallDelay:            time.Second * 3,
		updateTimeout:         time.Second * 30,
		ingressBackoff:        flowcontrol.NewBackOff(5*time.Second, time.Minute),
		eventRecorder:         recorder,
		configMapBackoff:      flowcontrol.NewBackOff(5*time.Second, time.Minute),
	}

	// Build deliverers for triggering reconciliations.
	ic.ingressDeliverer = util.NewDelayingDeliverer()
	ic.clusterDeliverer = util.NewDelayingDeliverer()
	ic.configMapDeliverer = util.NewDelayingDeliverer()

	// Start informer in federated API servers on ingresses that should be federated.
	ic.ingressInformerStore, ic.ingressInformerController = cache.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
				return client.Extensions().Ingresses(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
				return client.Extensions().Ingresses(api.NamespaceAll).Watch(options)
			},
		},
		&extensionsv1beta1.Ingress{},
		controller.NoResyncPeriodFunc(),
		util.NewTriggerOnAllChanges(
			func(obj pkgruntime.Object) {
				ic.deliverIngressObj(obj, 0, false)
			},
		))

	// Federated informer on ingresses in members of federation.
	ic.ingressFederatedInformer = util.NewFederatedInformer(
		client,
		func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
			return cache.NewInformer(
				&cache.ListWatch{
					ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
						return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options)
					},
					WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
						return targetClient.Extensions().Ingresses(api.NamespaceAll).Watch(options)
					},
				},
				&extensionsv1beta1.Ingress{},
				controller.NoResyncPeriodFunc(),
				// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
				// would be just confirmation that some ingress operation succeeded.
				util.NewTriggerOnAllChanges(
					func(obj pkgruntime.Object) {
						ic.deliverIngressObj(obj, ic.ingressReviewDelay, false)
					},
				))
		},

		&util.ClusterLifecycleHandlerFuncs{
			ClusterAvailable: func(cluster *federationapi.Cluster) {
				// When new cluster becomes available process all the ingresses again, and configure it's ingress controller's configmap with the correct UID
				ic.clusterDeliverer.DeliverAfter(cluster.Name, cluster, ic.clusterAvailableDelay)
			},
		},
	)

	// Federated informer on configmaps for ingress controllers in members of the federation.
	ic.configMapFederatedInformer = util.NewFederatedInformer(
		client,
		func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
			glog.V(4).Infof("Returning new informer for cluster %q", cluster.Name)
			return cache.NewInformer(
				&cache.ListWatch{
					ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
						if targetClient == nil {
							glog.Errorf("Internal error: targetClient is nil")
						}
						return targetClient.Core().ConfigMaps(uidConfigMapNamespace).List(options) // we only want to list one by name - unfortunately Kubernetes don't have a selector for that.
					},
					WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
						if targetClient == nil {
							glog.Errorf("Internal error: targetClient is nil")
						}
						return targetClient.Core().ConfigMaps(uidConfigMapNamespace).Watch(options) // as above
					},
				},
				&v1.ConfigMap{},
				controller.NoResyncPeriodFunc(),
				// Trigger reconcilation whenever the ingress controller's configmap in a federated cluster is changed. In most cases it
				// would be just confirmation that the configmap for the ingress controller is correct.
				util.NewTriggerOnAllChanges(
					func(obj pkgruntime.Object) {
						ic.deliverConfigMapObj(cluster.Name, obj, ic.configMapReviewDelay, false)
					},
				))
		},

		&util.ClusterLifecycleHandlerFuncs{
			ClusterAvailable: func(cluster *federationapi.Cluster) {
				ic.clusterDeliverer.DeliverAfter(cluster.Name, cluster, ic.clusterAvailableDelay)
			},
		},
	)

	// Federated ingress updater along with Create/Update/Delete operations.
	ic.federatedIngressUpdater = util.NewFederatedUpdater(ic.ingressFederatedInformer,
		func(client kubeclientset.Interface, obj pkgruntime.Object) error {
			ingress := obj.(*extensionsv1beta1.Ingress)
			glog.V(4).Infof("Attempting to create Ingress: %v", ingress)
			_, err := client.Extensions().Ingresses(ingress.Namespace).Create(ingress)
			if err != nil {
				glog.Errorf("Error creating ingress %q: %v", types.NamespacedName{Name: ingress.Name, Namespace: ingress.Namespace}, err)
			} else {
				glog.V(4).Infof("Successfully created ingress %q", types.NamespacedName{Name: ingress.Name, Namespace: ingress.Namespace})
			}
			return err
		},
		func(client kubeclientset.Interface, obj pkgruntime.Object) error {
			ingress := obj.(*extensionsv1beta1.Ingress)
			glog.V(4).Infof("Attempting to update Ingress: %v", ingress)
			_, err := client.Extensions().Ingresses(ingress.Namespace).Update(ingress)
			if err != nil {
				glog.V(4).Infof("Failed to update Ingress: %v", err)
			} else {
				glog.V(4).Infof("Successfully updated Ingress: %q", types.NamespacedName{Name: ingress.Name, Namespace: ingress.Namespace})
			}
			return err
		},
		func(client kubeclientset.Interface, obj pkgruntime.Object) error {
			ingress := obj.(*extensionsv1beta1.Ingress)
			glog.V(4).Infof("Attempting to delete Ingress: %v", ingress)
			err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &v1.DeleteOptions{})
			return err
		})

	// Federated configmap updater along with Create/Update/Delete operations.  Only Update should ever be called.
	ic.federatedConfigMapUpdater = util.NewFederatedUpdater(ic.configMapFederatedInformer,
		func(client kubeclientset.Interface, obj pkgruntime.Object) error {
			configMap := obj.(*v1.ConfigMap)
			configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}
			glog.Errorf("Internal error: Incorrectly attempting to create ConfigMap: %q", configMapName)
			_, err := client.Core().ConfigMaps(configMap.Namespace).Create(configMap)
			return err
		},
		func(client kubeclientset.Interface, obj pkgruntime.Object) error {
			configMap := obj.(*v1.ConfigMap)
			configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}
			glog.V(4).Infof("Attempting to update ConfigMap: %v", configMap)
			_, err := client.Core().ConfigMaps(configMap.Namespace).Update(configMap)
			if err == nil {
				glog.V(4).Infof("Successfully updated ConfigMap %q", configMapName)
			} else {
				glog.V(4).Infof("Failed to update ConfigMap %q: %v", configMapName, err)
			}
			return err
		},
		func(client kubeclientset.Interface, obj pkgruntime.Object) error {
			configMap := obj.(*v1.ConfigMap)
			configMapName := types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}
			glog.Errorf("Internal error: Incorrectly attempting to delete ConfigMap: %q", configMapName)
			err := client.Core().ConfigMaps(configMap.Namespace).Delete(configMap.Name, &v1.DeleteOptions{})
			return err
		})

	ic.deletionHelper = deletionhelper.NewDeletionHelper(
		ic.hasFinalizerFunc,
		ic.removeFinalizerFunc,
		ic.addFinalizerFunc,
		// objNameFunc
		func(obj pkgruntime.Object) string {
			ingress := obj.(*extensionsv1beta1.Ingress)
			return ingress.Name
		},
		ic.updateTimeout,
		ic.eventRecorder,
		ic.ingressFederatedInformer,
		ic.federatedIngressUpdater,
	)
	return ic
}
Пример #11
0
// Disable makes the backoff trivial, i.e., sets it to zero.  This might be used
// by tests which want to run 1000s of mock requests without slowing down.
func (b *URLBackoff) Disable() {
	glog.V(4).Infof("Disabling backoff strategy")
	b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second)
}
Пример #12
0
// NewclusterController returns a new cluster controller
func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSetController {
	frsc := &ReplicaSetController{
		fedClient:           federationClient,
		replicasetDeliverer: fedutil.NewDelayingDeliverer(),
		clusterDeliverer:    fedutil.NewDelayingDeliverer(),
		replicasetWorkQueue: workqueue.New(),
		replicaSetBackoff:   flowcontrol.NewBackOff(5*time.Second, time.Minute),
		defaultPlanner: planner.NewPlanner(&fed.FederatedReplicaSetPreferences{
			Clusters: map[string]fed.ClusterReplicaSetPreferences{
				"*": {Weight: 1},
			},
		}),
	}

	replicaSetFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, framework.ControllerInterface) {
		return framework.NewInformer(
			&cache.ListWatch{
				ListFunc: func(options api.ListOptions) (runtime.Object, error) {
					return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options)
				},
				WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
					return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).Watch(options)
				},
			},
			&extensionsv1.ReplicaSet{},
			controller.NoResyncPeriodFunc(),
			fedutil.NewTriggerOnAllChanges(
				func(obj runtime.Object) { frsc.deliverLocalReplicaSet(obj, allReplicaSetReviewDealy) },
			),
		)
	}
	clusterLifecycle := fedutil.ClusterLifecycleHandlerFuncs{
		ClusterAvailable: func(cluster *fedv1.Cluster) {
			frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay)
		},
		ClusterUnavailable: func(cluster *fedv1.Cluster, _ []interface{}) {
			frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay)
		},
	}
	frsc.fedReplicaSetInformer = fedutil.NewFederatedInformer(federationClient, replicaSetFedInformerFactory, &clusterLifecycle)

	podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, framework.ControllerInterface) {
		return framework.NewInformer(
			&cache.ListWatch{
				ListFunc: func(options api.ListOptions) (runtime.Object, error) {
					return clientset.Core().Pods(apiv1.NamespaceAll).List(options)
				},
				WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
					return clientset.Core().Pods(apiv1.NamespaceAll).Watch(options)
				},
			},
			&apiv1.Pod{},
			controller.NoResyncPeriodFunc(),
			fedutil.NewTriggerOnAllChanges(
				func(obj runtime.Object) {
					frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay)
				},
			),
		)
	}
	frsc.fedPodInformer = fedutil.NewFederatedInformer(federationClient, podFedInformerFactory, &fedutil.ClusterLifecycleHandlerFuncs{})

	frsc.replicaSetStore.Store, frsc.replicaSetController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).Watch(options)
			},
		},
		&extensionsv1.ReplicaSet{},
		controller.NoResyncPeriodFunc(),
		fedutil.NewTriggerOnMetaAndSpecChanges(
			func(obj runtime.Object) { frsc.deliverFedReplicaSetObj(obj, replicaSetReviewDelay) },
		),
	)

	return frsc
}
// NewSecretController returns a new secret controller
func NewSecretController(client federationclientset.Interface) *SecretController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
	recorder := broadcaster.NewRecorder(api.EventSource{Component: "federated-secrets-controller"})

	secretcontroller := &SecretController{
		federatedApiClient:    client,
		secretReviewDelay:     time.Second * 10,
		clusterAvailableDelay: time.Second * 20,
		smallDelay:            time.Second * 3,
		updateTimeout:         time.Second * 30,
		secretBackoff:         flowcontrol.NewBackOff(5*time.Second, time.Minute),
		eventRecorder:         recorder,
	}

	// Build delivereres for triggering reconciliations.
	secretcontroller.secretDeliverer = util.NewDelayingDeliverer()
	secretcontroller.clusterDeliverer = util.NewDelayingDeliverer()

	// Start informer in federated API servers on secrets that should be federated.
	secretcontroller.secretInformerStore, secretcontroller.secretInformerController = cache.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
				versionedOptions := util.VersionizeV1ListOptions(options)
				return client.Core().Secrets(api_v1.NamespaceAll).List(versionedOptions)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				versionedOptions := util.VersionizeV1ListOptions(options)
				return client.Core().Secrets(api_v1.NamespaceAll).Watch(versionedOptions)
			},
		},
		&api_v1.Secret{},
		controller.NoResyncPeriodFunc(),
		util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { secretcontroller.deliverSecretObj(obj, 0, false) }))

	// Federated informer on secrets in members of federation.
	secretcontroller.secretFederatedInformer = util.NewFederatedInformer(
		client,
		func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
			return cache.NewInformer(
				&cache.ListWatch{
					ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
						versionedOptions := util.VersionizeV1ListOptions(options)
						return targetClient.Core().Secrets(api_v1.NamespaceAll).List(versionedOptions)
					},
					WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
						versionedOptions := util.VersionizeV1ListOptions(options)
						return targetClient.Core().Secrets(api_v1.NamespaceAll).Watch(versionedOptions)
					},
				},
				&api_v1.Secret{},
				controller.NoResyncPeriodFunc(),
				// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
				// would be just confirmation that some secret opration succeeded.
				util.NewTriggerOnAllChanges(
					func(obj pkg_runtime.Object) {
						secretcontroller.deliverSecretObj(obj, secretcontroller.secretReviewDelay, false)
					},
				))
		},

		&util.ClusterLifecycleHandlerFuncs{
			ClusterAvailable: func(cluster *federation_api.Cluster) {
				// When new cluster becomes available process all the secrets again.
				secretcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(secretcontroller.clusterAvailableDelay))
			},
		},
	)

	// Federated updeater along with Create/Update/Delete operations.
	secretcontroller.federatedUpdater = util.NewFederatedUpdater(secretcontroller.secretFederatedInformer,
		func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
			secret := obj.(*api_v1.Secret)
			_, err := client.Core().Secrets(secret.Namespace).Create(secret)
			return err
		},
		func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
			secret := obj.(*api_v1.Secret)
			_, err := client.Core().Secrets(secret.Namespace).Update(secret)
			return err
		},
		func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
			secret := obj.(*api_v1.Secret)
			err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &api_v1.DeleteOptions{})
			return err
		})

	secretcontroller.deletionHelper = deletionhelper.NewDeletionHelper(
		secretcontroller.hasFinalizerFunc,
		secretcontroller.removeFinalizerFunc,
		secretcontroller.addFinalizerFunc,
		// objNameFunc
		func(obj pkg_runtime.Object) string {
			secret := obj.(*api_v1.Secret)
			return secret.Name
		},
		secretcontroller.updateTimeout,
		secretcontroller.eventRecorder,
		secretcontroller.secretFederatedInformer,
		secretcontroller.federatedUpdater,
	)

	return secretcontroller
}
func TestPuller(t *testing.T) {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name:            "test_pod",
			Namespace:       "test-ns",
			UID:             "bar",
			ResourceVersion: "42",
			SelfLink:        "/api/v1/pods/foo",
		}}

	cases := []struct {
		containerImage  string
		policy          api.PullPolicy
		calledFunctions []string
		inspectErr      error
		pullerErr       error
		expectedErr     []error
	}{
		{ // pull missing image
			containerImage:  "missing_image",
			policy:          api.PullIfNotPresent,
			calledFunctions: []string{"IsImagePresent", "PullImage"},
			inspectErr:      nil,
			pullerErr:       nil,
			expectedErr:     []error{nil}},

		{ // image present, dont pull
			containerImage:  "present_image",
			policy:          api.PullIfNotPresent,
			calledFunctions: []string{"IsImagePresent"},
			inspectErr:      nil,
			pullerErr:       nil,
			expectedErr:     []error{nil, nil, nil}},
		// image present, pull it
		{containerImage: "present_image",
			policy:          api.PullAlways,
			calledFunctions: []string{"IsImagePresent", "PullImage"},
			inspectErr:      nil,
			pullerErr:       nil,
			expectedErr:     []error{nil, nil, nil}},
		// missing image, error PullNever
		{containerImage: "missing_image",
			policy:          api.PullNever,
			calledFunctions: []string{"IsImagePresent"},
			inspectErr:      nil,
			pullerErr:       nil,
			expectedErr:     []error{ErrImageNeverPull, ErrImageNeverPull, ErrImageNeverPull}},
		// missing image, unable to inspect
		{containerImage: "missing_image",
			policy:          api.PullIfNotPresent,
			calledFunctions: []string{"IsImagePresent"},
			inspectErr:      errors.New("unknown inspectError"),
			pullerErr:       nil,
			expectedErr:     []error{ErrImageInspect, ErrImageInspect, ErrImageInspect}},
		// missing image, unable to fetch
		{containerImage: "typo_image",
			policy:          api.PullIfNotPresent,
			calledFunctions: []string{"IsImagePresent", "PullImage"},
			inspectErr:      nil,
			pullerErr:       errors.New("404"),
			expectedErr:     []error{ErrImagePull, ErrImagePull, ErrImagePullBackOff, ErrImagePull, ErrImagePullBackOff, ErrImagePullBackOff}},
	}

	for i, c := range cases {
		container := &api.Container{
			Name:            "container_name",
			Image:           c.containerImage,
			ImagePullPolicy: c.policy,
		}

		backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
		fakeClock := util.NewFakeClock(time.Now())
		backOff.Clock = fakeClock

		fakeRuntime := &ctest.FakeRuntime{}
		fakeRecorder := &record.FakeRecorder{}
		puller := newParallelImagePuller(fakeRecorder, fakeRuntime, backOff)

		fakeRuntime.ImageList = []Image{{"present_image", nil, nil, 1}}
		fakeRuntime.Err = c.pullerErr
		fakeRuntime.InspectErr = c.inspectErr

		for tick, expected := range c.expectedErr {
			fakeClock.Step(time.Second)
			err, _ := puller.pullImage(pod, container, nil)
			fakeRuntime.AssertCalls(c.calledFunctions)
			assert.Equal(t, expected, err, "in test %d tick=%d", i, tick)
		}

	}
}
Пример #15
0
// NewDaemonSetController returns a new daemonset controller
func NewDaemonSetController(client federationclientset.Interface) *DaemonSetController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
	recorder := broadcaster.NewRecorder(api_v1.EventSource{Component: "federated-daemonset-controller"})

	daemonsetcontroller := &DaemonSetController{
		federatedApiClient:    client,
		daemonsetReviewDelay:  time.Second * 10,
		clusterAvailableDelay: time.Second * 20,
		smallDelay:            time.Second * 3,
		updateTimeout:         time.Second * 30,
		daemonsetBackoff:      flowcontrol.NewBackOff(5*time.Second, time.Minute),
		eventRecorder:         recorder,
	}

	// Build deliverers for triggering reconciliations.
	daemonsetcontroller.daemonsetDeliverer = util.NewDelayingDeliverer()
	daemonsetcontroller.clusterDeliverer = util.NewDelayingDeliverer()

	// Start informer in federated API servers on daemonsets that should be federated.
	daemonsetcontroller.daemonsetInformerStore, daemonsetcontroller.daemonsetInformerController = cache.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
				return client.Extensions().DaemonSets(api_v1.NamespaceAll).List(options)
			},
			WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
				return client.Extensions().DaemonSets(api_v1.NamespaceAll).Watch(options)
			},
		},
		&extensionsv1.DaemonSet{},
		controller.NoResyncPeriodFunc(),
		util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { daemonsetcontroller.deliverDaemonSetObj(obj, 0, false) }))

	// Federated informer on daemonsets in members of federation.
	daemonsetcontroller.daemonsetFederatedInformer = util.NewFederatedInformer(
		client,
		func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
			return cache.NewInformer(
				&cache.ListWatch{
					ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
						return targetClient.Extensions().DaemonSets(api_v1.NamespaceAll).List(options)
					},
					WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
						return targetClient.Extensions().DaemonSets(api_v1.NamespaceAll).Watch(options)
					},
				},
				&extensionsv1.DaemonSet{},
				controller.NoResyncPeriodFunc(),
				// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
				// would be just confirmation that some daemonset opration succeeded.
				util.NewTriggerOnAllChanges(
					func(obj pkg_runtime.Object) {
						daemonsetcontroller.deliverDaemonSetObj(obj, daemonsetcontroller.daemonsetReviewDelay, false)
					},
				))
		},

		&util.ClusterLifecycleHandlerFuncs{
			ClusterAvailable: func(cluster *federation_api.Cluster) {
				// When new cluster becomes available process all the daemonsets again.
				daemonsetcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(daemonsetcontroller.clusterAvailableDelay))
			},
		},
	)

	// Federated updater along with Create/Update/Delete operations.
	daemonsetcontroller.federatedUpdater = util.NewFederatedUpdater(daemonsetcontroller.daemonsetFederatedInformer,
		func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
			daemonset := obj.(*extensionsv1.DaemonSet)
			glog.V(4).Infof("Attempting to create daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
			_, err := client.Extensions().DaemonSets(daemonset.Namespace).Create(daemonset)
			if err != nil {
				glog.Errorf("Error creating daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
			} else {
				glog.V(4).Infof("Successfully created deamonset %s/%s", daemonset.Namespace, daemonset.Name)
			}
			return err
		},
		func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
			daemonset := obj.(*extensionsv1.DaemonSet)
			glog.V(4).Infof("Attempting to update daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
			_, err := client.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset)
			if err != nil {
				glog.Errorf("Error updating daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
			} else {
				glog.V(4).Infof("Successfully updating deamonset %s/%s", daemonset.Namespace, daemonset.Name)
			}
			return err
		},
		func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
			daemonset := obj.(*extensionsv1.DaemonSet)
			glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
			err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &api_v1.DeleteOptions{})
			if err != nil {
				glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
			} else {
				glog.V(4).Infof("Successfully deleting deamonset %s/%s", daemonset.Namespace, daemonset.Name)
			}
			return err
		})

	daemonsetcontroller.deletionHelper = deletionhelper.NewDeletionHelper(
		daemonsetcontroller.hasFinalizerFunc,
		daemonsetcontroller.removeFinalizerFunc,
		daemonsetcontroller.addFinalizerFunc,
		// objNameFunc
		func(obj pkg_runtime.Object) string {
			daemonset := obj.(*extensionsv1.DaemonSet)
			return daemonset.Name
		},
		daemonsetcontroller.updateTimeout,
		daemonsetcontroller.eventRecorder,
		daemonsetcontroller.daemonsetFederatedInformer,
		daemonsetcontroller.federatedUpdater,
	)

	return daemonsetcontroller
}
Пример #16
0
// NewConfigMapController returns a new configmap controller
func NewConfigMapController(client federationclientset.Interface) *ConfigMapController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
	recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-configmaps-controller"})

	configmapcontroller := &ConfigMapController{
		federatedApiClient:    client,
		configmapReviewDelay:  time.Second * 10,
		clusterAvailableDelay: time.Second * 20,
		smallDelay:            time.Second * 3,
		updateTimeout:         time.Second * 30,
		configmapBackoff:      flowcontrol.NewBackOff(5*time.Second, time.Minute),
		eventRecorder:         recorder,
	}

	// Build delivereres for triggering reconciliations.
	configmapcontroller.configmapDeliverer = util.NewDelayingDeliverer()
	configmapcontroller.clusterDeliverer = util.NewDelayingDeliverer()

	// Start informer on federated API servers on configmaps that should be federated.
	configmapcontroller.configmapInformerStore, configmapcontroller.configmapInformerController = cache.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
				return client.Core().ConfigMaps(apiv1.NamespaceAll).List(options)
			},
			WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
				return client.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options)
			},
		},
		&apiv1.ConfigMap{},
		controller.NoResyncPeriodFunc(),
		util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { configmapcontroller.deliverConfigMapObj(obj, 0, false) }))

	// Federated informer on configmaps in members of federation.
	configmapcontroller.configmapFederatedInformer = util.NewFederatedInformer(
		client,
		func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
			return cache.NewInformer(
				&cache.ListWatch{
					ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
						return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).List(options)
					},
					WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
						return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options)
					},
				},
				&apiv1.ConfigMap{},
				controller.NoResyncPeriodFunc(),
				// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
				// would be just confirmation that some configmap opration succeeded.
				util.NewTriggerOnAllChanges(
					func(obj pkgruntime.Object) {
						configmapcontroller.deliverConfigMapObj(obj, configmapcontroller.configmapReviewDelay, false)
					},
				))
		},

		&util.ClusterLifecycleHandlerFuncs{
			ClusterAvailable: func(cluster *federationapi.Cluster) {
				// When new cluster becomes available process all the configmaps again.
				configmapcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(configmapcontroller.clusterAvailableDelay))
			},
		},
	)

	// Federated updater along with Create/Update/Delete operations.
	configmapcontroller.federatedUpdater = util.NewFederatedUpdater(configmapcontroller.configmapFederatedInformer,
		func(client kubeclientset.Interface, obj pkgruntime.Object) error {
			configmap := obj.(*apiv1.ConfigMap)
			_, err := client.Core().ConfigMaps(configmap.Namespace).Create(configmap)
			return err
		},
		func(client kubeclientset.Interface, obj pkgruntime.Object) error {
			configmap := obj.(*apiv1.ConfigMap)
			_, err := client.Core().ConfigMaps(configmap.Namespace).Update(configmap)
			return err
		},
		func(client kubeclientset.Interface, obj pkgruntime.Object) error {
			configmap := obj.(*apiv1.ConfigMap)
			err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &apiv1.DeleteOptions{})
			return err
		})
	return configmapcontroller
}
Пример #17
0
func TestSyncPodWithInitContainers(t *testing.T) {
	fakeRuntime, _, m, err := createTestRuntimeManager()
	assert.NoError(t, err)

	initContainers := []v1.Container{
		{
			Name:            "init1",
			Image:           "init",
			ImagePullPolicy: v1.PullIfNotPresent,
		},
	}
	containers := []v1.Container{
		{
			Name:            "foo1",
			Image:           "busybox",
			ImagePullPolicy: v1.PullIfNotPresent,
		},
		{
			Name:            "foo2",
			Image:           "alpine",
			ImagePullPolicy: v1.PullIfNotPresent,
		},
	}
	pod := &v1.Pod{
		ObjectMeta: v1.ObjectMeta{
			UID:       "12345678",
			Name:      "foo",
			Namespace: "new",
		},
		Spec: v1.PodSpec{
			Containers:     containers,
			InitContainers: initContainers,
		},
	}

	// buildContainerID is an internal helper function to build container id from api pod
	// and container with default attempt number 0.
	buildContainerID := func(pod *v1.Pod, container v1.Container) string {
		uid := string(pod.UID)
		sandboxID := apitest.BuildSandboxName(&runtimeApi.PodSandboxMetadata{
			Name:      &pod.Name,
			Uid:       &uid,
			Namespace: &pod.Namespace,
		})
		return apitest.BuildContainerName(&runtimeApi.ContainerMetadata{Name: &container.Name}, sandboxID)
	}

	backOff := flowcontrol.NewBackOff(time.Second, time.Minute)

	// 1. should only create the init container.
	podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
	assert.NoError(t, err)
	result := m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff)
	assert.NoError(t, result.Error())
	assert.Equal(t, 1, len(fakeRuntime.Containers))
	initContainerID := buildContainerID(pod, initContainers[0])
	expectedContainers := []string{initContainerID}
	if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
		t.Errorf("expected %q, got %q", expectedContainers, actual)
	}

	// 2. should not create app container because init container is still running.
	podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
	assert.NoError(t, err)
	result = m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff)
	assert.NoError(t, result.Error())
	assert.Equal(t, 1, len(fakeRuntime.Containers))
	expectedContainers = []string{initContainerID}
	if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
		t.Errorf("expected %q, got %q", expectedContainers, actual)
	}

	// 3. should create all app containers because init container finished.
	fakeRuntime.StopContainer(initContainerID, 0)
	podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
	assert.NoError(t, err)
	result = m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff)
	assert.NoError(t, result.Error())
	assert.Equal(t, 3, len(fakeRuntime.Containers))
	expectedContainers = []string{initContainerID, buildContainerID(pod, containers[0]),
		buildContainerID(pod, containers[1])}
	if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
		t.Errorf("expected %q, got %q", expectedContainers, actual)
	}
}
Пример #18
0
// NewNamespaceController returns a new namespace controller
func NewNamespaceController(client federationclientset.Interface) *NamespaceController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
	recorder := broadcaster.NewRecorder(api.EventSource{Component: "federated-namespace-controller"})

	nc := &NamespaceController{
		federatedApiClient:    client,
		namespaceReviewDelay:  time.Second * 10,
		clusterAvailableDelay: time.Second * 20,
		smallDelay:            time.Second * 3,
		updateTimeout:         time.Second * 30,
		namespaceBackoff:      flowcontrol.NewBackOff(5*time.Second, time.Minute),
		eventRecorder:         recorder,
	}

	// Build delivereres for triggering reconciliations.
	nc.namespaceDeliverer = util.NewDelayingDeliverer()
	nc.clusterDeliverer = util.NewDelayingDeliverer()

	// Start informer in federated API servers on namespaces that should be federated.
	nc.namespaceInformerStore, nc.namespaceInformerController = cache.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
				return client.Core().Namespaces().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return client.Core().Namespaces().Watch(options)
			},
		},
		&api_v1.Namespace{},
		controller.NoResyncPeriodFunc(),
		util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { nc.deliverNamespaceObj(obj, 0, false) }))

	// Federated informer on namespaces in members of federation.
	nc.namespaceFederatedInformer = util.NewFederatedInformer(
		client,
		func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
			return cache.NewInformer(
				&cache.ListWatch{
					ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
						return targetClient.Core().Namespaces().List(options)
					},
					WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
						return targetClient.Core().Namespaces().Watch(options)
					},
				},
				&api_v1.Namespace{},
				controller.NoResyncPeriodFunc(),
				// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
				// would be just confirmation that some namespace opration succeeded.
				util.NewTriggerOnMetaAndSpecChanges(
					func(obj pkg_runtime.Object) { nc.deliverNamespaceObj(obj, nc.namespaceReviewDelay, false) },
				))
		},

		&util.ClusterLifecycleHandlerFuncs{
			ClusterAvailable: func(cluster *federation_api.Cluster) {
				// When new cluster becomes available process all the namespaces again.
				nc.clusterDeliverer.DeliverAfter(allClustersKey, nil, nc.clusterAvailableDelay)
			},
		},
	)

	// Federated updeater along with Create/Update/Delete operations.
	nc.federatedUpdater = util.NewFederatedUpdater(nc.namespaceFederatedInformer,
		func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
			namespace := obj.(*api_v1.Namespace)
			_, err := client.Core().Namespaces().Create(namespace)
			return err
		},
		func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
			namespace := obj.(*api_v1.Namespace)
			_, err := client.Core().Namespaces().Update(namespace)
			return err
		},
		func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
			namespace := obj.(*api_v1.Namespace)
			err := client.Core().Namespaces().Delete(namespace.Name, &api.DeleteOptions{})
			return err
		})
	return nc
}
Пример #19
0
// NewSecretController returns a new secret controller
func NewSecretController(client federation_release_1_4.Interface) *SecretController {
	secretcontroller := &SecretController{
		federatedApiClient:    client,
		secretReviewDelay:     time.Second * 10,
		clusterAvailableDelay: time.Second * 20,
		smallDelay:            time.Second * 3,
		updateTimeout:         time.Second * 30,
		secretBackoff:         flowcontrol.NewBackOff(5*time.Second, time.Minute),
	}

	// Build delivereres for triggering reconcilations.
	secretcontroller.secretDeliverer = util.NewDelayingDeliverer()
	secretcontroller.clusterDeliverer = util.NewDelayingDeliverer()

	// Start informer in federated API servers on secrets that should be federated.
	secretcontroller.secretInformerStore, secretcontroller.secretInformerController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
				return client.Core().Secrets(api_v1.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return client.Core().Secrets(api_v1.NamespaceAll).Watch(options)
			},
		},
		&api_v1.Secret{},
		controller.NoResyncPeriodFunc(),
		util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { secretcontroller.deliverSecretObj(obj, 0, false) }))

	// Federated informer on secrets in members of federation.
	secretcontroller.secretFederatedInformer = util.NewFederatedInformer(
		client,
		func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) {
			return framework.NewInformer(
				&cache.ListWatch{
					ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
						return targetClient.Core().Secrets(api_v1.NamespaceAll).List(options)
					},
					WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
						return targetClient.Core().Secrets(api_v1.NamespaceAll).Watch(options)
					},
				},
				&api_v1.Secret{},
				controller.NoResyncPeriodFunc(),
				// Trigger reconcilation whenever something in federated cluster is changed. In most cases it
				// would be just confirmation that some secret opration suceeded.
				util.NewTriggerOnChanges(
					func(obj pkg_runtime.Object) {
						secretcontroller.deliverSecretObj(obj, secretcontroller.secretReviewDelay, false)
					},
				))
		},

		&util.ClusterLifecycleHandlerFuncs{
			ClusterAvailable: func(cluster *federation_api.Cluster) {
				// When new cluster becomes available process all the secrets again.
				secretcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(secretcontroller.clusterAvailableDelay))
			},
		},
	)

	// Federated updeater along with Create/Update/Delete operations.
	secretcontroller.federatedUpdater = util.NewFederatedUpdater(secretcontroller.secretFederatedInformer,
		func(client kube_release_1_4.Interface, obj pkg_runtime.Object) error {
			secret := obj.(*api_v1.Secret)
			_, err := client.Core().Secrets(secret.Namespace).Create(secret)
			return err
		},
		func(client kube_release_1_4.Interface, obj pkg_runtime.Object) error {
			secret := obj.(*api_v1.Secret)
			_, err := client.Core().Secrets(secret.Namespace).Update(secret)
			return err
		},
		func(client kube_release_1_4.Interface, obj pkg_runtime.Object) error {
			secret := obj.(*api_v1.Secret)
			err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &api.DeleteOptions{})
			return err
		})
	return secretcontroller
}