func TestDSManagerInit(t *testing.T) {
	// Insert a stable daemon set and make sure we don't create an extra pod
	// for the one node which already has a daemon after a simulated restart.
	ds := newDaemonSet("test")
	ds.Status = extensions.DaemonSetStatus{
		CurrentNumberScheduled: 1,
		NumberMisscheduled:     0,
		DesiredNumberScheduled: 1,
	}
	nodeName := "only-node"
	podList := &api.PodList{
		Items: []api.Pod{
			*newPod("podname", nodeName, simpleDaemonSetLabel),
		}}
	response := runtime.EncodeOrDie(testapi.Default.Codec(), podList)
	fakeHandler := utiltesting.FakeHandler{
		StatusCode:   200,
		ResponseBody: response,
	}
	testServer := httptest.NewServer(&fakeHandler)
	// TODO: Uncomment when fix #19254
	// defer testServer.Close()

	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	manager := NewDaemonSetsController(client, controller.NoResyncPeriodFunc)
	manager.dsStore.Add(ds)
	manager.nodeStore.Add(newNode(nodeName, nil))
	manager.podStoreSynced = alwaysReady
	controller.SyncAllPodsWithStore(manager.kubeClient, manager.podStore.Store)

	fakePodControl := &controller.FakePodControl{}
	manager.podControl = fakePodControl
	manager.syncHandler(getKey(ds, t))
	validateSyncDaemonSets(t, fakePodControl, 0, 0)
}
// Run begins watching and syncing.
func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	glog.Infof("Starting RC Manager")
	controller.SyncAllPodsWithStore(rm.kubeClient, rm.podStore.Store)
	go rm.rcController.Run(stopCh)
	go rm.podController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go util.Until(rm.worker, time.Second, stopCh)
	}
	<-stopCh
	glog.Infof("Shutting down RC Manager")
	rm.queue.ShutDown()
}
Esempio n. 3
0
// Run begins watching and syncing daemon sets.
func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	glog.Infof("Starting Daemon Sets controller manager")
	controller.SyncAllPodsWithStore(dsc.kubeClient, dsc.podStore.Store)
	go dsc.dsController.Run(stopCh)
	go dsc.podController.Run(stopCh)
	go dsc.nodeController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go util.Until(dsc.worker, time.Second, stopCh)
	}
	<-stopCh
	glog.Infof("Shutting down Daemon Set Controller")
	dsc.queue.ShutDown()
}
func TestRCManagerInit(t *testing.T) {
	// Insert a stable rc into the replication manager's store and make sure
	// it syncs pods with the apiserver before making any decisions.
	rc := newReplicationController(2)
	response := runtime.EncodeOrDie(testapi.Default.Codec(), newPodList(nil, 2, api.PodRunning, rc))
	fakeHandler := utiltesting.FakeHandler{
		StatusCode:   200,
		ResponseBody: response,
	}
	testServer := httptest.NewServer(&fakeHandler)
	// TODO: Uncomment when fix #19254
	// defer testServer.Close()

	client := client.NewOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
	manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
	manager.rcStore.Store.Add(rc)
	manager.podStoreSynced = alwaysReady
	controller.SyncAllPodsWithStore(manager.kubeClient, manager.podStore.Store)
	fakePodControl := &controller.FakePodControl{}
	manager.podControl = fakePodControl
	manager.syncReplicationController(getKey(rc, t))
	validateSyncReplication(t, fakePodControl, 0, 0)
}