Beispiel #1
0
func watchForServices(kubeClient *kclient.Client, ks *kube2consul) {
	var serviceController *kcontrollerFramework.Controller
	_, serviceController = kframework.NewInformer(
		createServiceLW(kubeClient),
		&kapi.Service{},
		resyncPeriod,
		kframework.ResourceEventHandlerFuncs{
			AddFunc:    ks.newService,
			DeleteFunc: ks.removeService,
			UpdateFunc: ks.updateService,
		},
	)
	go serviceController.Run(util.NeverStop)
}
			},
			&api.Pod{},
			0,
			controllerframework.ResourceEventHandlerFuncs{
				AddFunc: func(obj interface{}) {
					tracker.remember(obj.(*api.Pod), ADD)
				},
				UpdateFunc: func(oldObj, newObj interface{}) {
					tracker.remember(newObj.(*api.Pod), UPDATE)
				},
				DeleteFunc: func(obj interface{}) {
					tracker.remember(obj.(*api.Pod), DEL)
				},
			},
		)
		go controller.Run(stopCh)
	})

	AfterEach(func() {
		close(stopCh)
	})

	It("Controller Manager should not create/delete replicas across restart", func() {

		restarter := NewRestartConfig(
			getMasterHost(), "kube-controller", ports.ControllerManagerPort, restartPollInterval, restartTimeout)
		restarter.restart()

		// The intent is to ensure the replication controller manager has observed and reported status of
		// the replication controller at least once since the manager restarted, so that we can determine
		// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC