Ejemplo n.º 1
0
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan qinglet.PodUpdate, *PodConfig) {
	eventBroadcaster := record.NewBroadcaster()
	config := NewPodConfig(mode, eventBroadcaster.NewRecorder(api.EventSource{Component: "qinglet"}))
	channel := config.Channel(TestSource)
	ch := config.Updates()
	return channel, ch, config
}
Ejemplo n.º 2
0
// RunQinglet is responsible for setting up and running a qinglet.  It is used in three different applications:
//   1 Integration tests
//   2 Qinglet binary
//   3 Standalone 'qingyuan' binary
// Eventually, #2 will be replaced with instances of #3
func RunQinglet(kcfg *QingletConfig, builder QingletBuilder) error {
	kcfg.Hostname = nodeutil.GetHostname(kcfg.HostnameOverride)

	if len(kcfg.NodeName) == 0 {
		// Query the cloud provider for our node name, default to Hostname
		nodeName := kcfg.Hostname
		if kcfg.Cloud != nil {
			var err error
			instances, ok := kcfg.Cloud.Instances()
			if !ok {
				return fmt.Errorf("failed to get instances from cloud provider")
			}

			nodeName, err = instances.CurrentNodeName(kcfg.Hostname)
			if err != nil {
				return fmt.Errorf("error fetching current instance name from cloud provider: %v", err)
			}

			glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName)
		}

		kcfg.NodeName = nodeName
	}

	eventBroadcaster := record.NewBroadcaster()
	kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "qinglet", Host: kcfg.NodeName})
	eventBroadcaster.StartLogging(glog.V(3).Infof)
	if kcfg.QingClient != nil {
		glog.V(4).Infof("Sending events to api server.")
		eventBroadcaster.StartRecordingToSink(kcfg.QingClient.Events(""))
	} else {
		glog.Warning("No api server defined - no events will be sent to API server.")
	}
	capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources)

	credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory)

	if builder == nil {
		builder = createAndInitQinglet
	}
	if kcfg.OSInterface == nil {
		kcfg.OSInterface = qingcontainer.RealOS{}
	}
	k, podCfg, err := builder(kcfg)
	if err != nil {
		return fmt.Errorf("failed to create qinglet: %v", err)
	}
	// process pods and exit.
	if kcfg.Runonce {
		if _, err := k.RunOnce(podCfg.Updates()); err != nil {
			return fmt.Errorf("runonce failed: %v", err)
		}
		glog.Infof("Started qinglet as runonce")
	} else {
		startQinglet(k, podCfg, kcfg)
		glog.Infof("Started qinglet")
	}
	return nil
}
Ejemplo n.º 3
0
// Run runs the specified SchedulerServer.  This should never exit.
func (s *SchedulerServer) Run(_ []string) error {
	if s.Qingconfig == "" && s.Master == "" {
		glog.Warningf("Neither --qingconfig nor --master was specified.  Using default API client.  This might not work.")
	}

	// This creates a client, first loading any specified qingconfig
	// file, and then overriding the Master flag, if non-empty.
	qingconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Qingconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
	if err != nil {
		return err
	}
	qingconfig.QPS = 20.0
	qingconfig.Burst = 30

	qingClient, err := client.New(qingconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	go func() {
		mux := http.NewServeMux()
		healthz.InstallHandler(mux)
		if s.EnableProfiling {
			mux.HandleFunc("/debug/pprof/", pprof.Index)
			mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
			mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
		}
		mux.Handle("/metrics", prometheus.Handler())

		server := &http.Server{
			Addr:    net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
			Handler: mux,
		}
		glog.Fatal(server.ListenAndServe())
	}()

	configFactory := factory.NewConfigFactory(qingClient)
	config, err := s.createConfig(configFactory)
	if err != nil {
		glog.Fatalf("Failed to create scheduler configuration: %v", err)
	}

	eventBroadcaster := record.NewBroadcaster()
	config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(qingClient.Events(""))

	sched := scheduler.New(config)
	sched.Run()

	select {}
}
Ejemplo n.º 4
0
func (k *QingYuanScheduler) NewPluginConfig(terminate <-chan struct{}, mux *http.ServeMux,
	podsWatcher *cache.ListWatch) *PluginConfig {

	// Watch and queue pods that need scheduling.
	updates := make(chan queue.Entry, k.schedcfg.UpdatesBacklog)
	podUpdates := &podStoreAdapter{queue.NewHistorical(updates)}
	reflector := cache.NewReflector(podsWatcher, &api.Pod{}, podUpdates, 0)

	// lock that guards critial sections that involve transferring pods from
	// the store (cache) to the scheduling queue; its purpose is to maintain
	// an ordering (vs interleaving) of operations that's easier to reason about.
	kapi := &k8smScheduler{internal: k}
	q := newQueuer(podUpdates)
	podDeleter := &deleter{
		api: kapi,
		qr:  q,
	}
	eh := &errorHandler{
		api:     kapi,
		backoff: backoff.New(k.schedcfg.InitialPodBackoff.Duration, k.schedcfg.MaxPodBackoff.Duration),
		qr:      q,
	}
	startLatch := make(chan struct{})
	eventBroadcaster := record.NewBroadcaster()
	runtime.On(startLatch, func() {
		eventBroadcaster.StartRecordingToSink(k.client.Events(""))
		reflector.Run() // TODO(jdef) should listen for termination
		podDeleter.Run(updates, terminate)
		q.Run(terminate)

		q.installDebugHandlers(mux)
		podtask.InstallDebugHandlers(k.taskRegistry, mux)
	})
	return &PluginConfig{
		Config: &plugin.Config{
			MinionLister: nil,
			Algorithm: &qingScheduler{
				api:        kapi,
				podUpdates: podUpdates,
			},
			Binder:   &binder{api: kapi},
			NextPod:  q.yield,
			Error:    eh.handleSchedulingError,
			Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
		},
		api:      kapi,
		client:   k.client,
		qr:       q,
		deleter:  podDeleter,
		starting: startLatch,
	}
}
Ejemplo n.º 5
0
// New returns a new service controller to keep cloud provider service resources
// (like external load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, qingClient client.Interface, clusterName string) *ServiceController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(qingClient.Events(""))
	recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})

	return &ServiceController{
		cloud:            cloud,
		qingClient:       qingClient,
		clusterName:      clusterName,
		cache:            &serviceCache{serviceMap: make(map[string]*cachedService)},
		eventBroadcaster: broadcaster,
		eventRecorder:    recorder,
	}
}
Ejemplo n.º 6
0
func TestUnschedulableNodes(t *testing.T) {
	helper, err := framework.NewHelper()
	if err != nil {
		t.Fatalf("Couldn't create etcd helper: %v", err)
	}
	framework.DeleteAllEtcdKeys()

	var m *master.Master
	s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		m.Handler.ServeHTTP(w, req)
	}))
	defer s.Close()

	m = master.New(&master.Config{
		EtcdHelper:            helper,
		QingletClient:         client.FakeQingletClient{},
		EnableCoreControllers: true,
		EnableLogsSupport:     false,
		EnableUISupport:       false,
		EnableIndex:           true,
		APIPrefix:             "/api",
		Authorizer:            apiserver.NewAlwaysAllowAuthorizer(),
		AdmissionControl:      admit.NewAlwaysAdmit(),
	})

	restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()})

	schedulerConfigFactory := factory.NewConfigFactory(restClient)
	schedulerConfig, err := schedulerConfigFactory.Create()
	if err != nil {
		t.Fatalf("Couldn't create scheduler config: %v", err)
	}
	eventBroadcaster := record.NewBroadcaster()
	schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
	eventBroadcaster.StartRecordingToSink(restClient.Events(""))
	scheduler.New(schedulerConfig).Run()

	defer close(schedulerConfig.StopEverything)

	DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store)
}
Ejemplo n.º 7
0
func TestScheduler(t *testing.T) {
	eventBroadcaster := record.NewBroadcaster()
	defer eventBroadcaster.StartLogging(t.Logf).Stop()
	errS := errors.New("scheduler")
	errB := errors.New("binder")

	table := []struct {
		injectBindError  error
		sendPod          *api.Pod
		algo             algorithm.ScheduleAlgorithm
		expectErrorPod   *api.Pod
		expectAssumedPod *api.Pod
		expectError      error
		expectBind       *api.Binding
		eventReason      string
	}{
		{
			sendPod:          podWithID("foo", ""),
			algo:             mockScheduler{"machine1", nil},
			expectBind:       &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}},
			expectAssumedPod: podWithID("foo", "machine1"),
			eventReason:      "scheduled",
		}, {
			sendPod:        podWithID("foo", ""),
			algo:           mockScheduler{"machine1", errS},
			expectError:    errS,
			expectErrorPod: podWithID("foo", ""),
			eventReason:    "failedScheduling",
		}, {
			sendPod:         podWithID("foo", ""),
			algo:            mockScheduler{"machine1", nil},
			expectBind:      &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: "machine1"}},
			injectBindError: errB,
			expectError:     errB,
			expectErrorPod:  podWithID("foo", ""),
			eventReason:     "failedScheduling",
		},
	}

	for i, item := range table {
		var gotError error
		var gotPod *api.Pod
		var gotAssumedPod *api.Pod
		var gotBinding *api.Binding
		c := &Config{
			Modeler: &FakeModeler{
				AssumePodFunc: func(pod *api.Pod) {
					gotAssumedPod = pod
				},
			},
			MinionLister: algorithm.FakeMinionLister(
				api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
			),
			Algorithm: item.algo,
			Binder: fakeBinder{func(b *api.Binding) error {
				gotBinding = b
				return item.injectBindError
			}},
			Error: func(p *api.Pod, err error) {
				gotPod = p
				gotError = err
			},
			NextPod: func() *api.Pod {
				return item.sendPod
			},
			Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
		}
		s := New(c)
		called := make(chan struct{})
		events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
			if e, a := item.eventReason, e.Reason; e != a {
				t.Errorf("%v: expected %v, got %v", i, e, a)
			}
			close(called)
		})
		s.scheduleOne()
		if e, a := item.expectAssumedPod, gotAssumedPod; !reflect.DeepEqual(e, a) {
			t.Errorf("%v: assumed pod: wanted %v, got %v", i, e, a)
		}
		if e, a := item.expectErrorPod, gotPod; !reflect.DeepEqual(e, a) {
			t.Errorf("%v: error pod: wanted %v, got %v", i, e, a)
		}
		if e, a := item.expectError, gotError; !reflect.DeepEqual(e, a) {
			t.Errorf("%v: error: wanted %v, got %v", i, e, a)
		}
		if e, a := item.expectBind, gotBinding; !reflect.DeepEqual(e, a) {
			t.Errorf("%v: error: %s", i, util.ObjectDiff(e, a))
		}
		<-called
		events.Stop()
	}
}
Ejemplo n.º 8
0
func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
	eventBroadcaster := record.NewBroadcaster()
	defer eventBroadcaster.StartLogging(t.Logf).Stop()

	// Setup modeler so we control the contents of all 3 stores: assumed,
	// scheduled and queued
	scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	scheduledPodLister := &cache.StoreToPodLister{scheduledPodStore}

	queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	queuedPodLister := &cache.StoreToPodLister{queuedPodStore}

	modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister)

	// Create a fake clock used to timestamp entries and calculate ttl. Nothing
	// will expire till we flip to something older than the ttl, at which point
	// all entries inserted with fakeTime will expire.
	ttl := 30 * time.Second
	fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
	fakeClock := &util.FakeClock{fakeTime}
	ttlPolicy := &cache.TTLPolicy{ttl, fakeClock}
	assumedPodsStore := cache.NewFakeExpirationStore(
		cache.MetaNamespaceKeyFunc, nil, ttlPolicy, fakeClock)
	modeler.assumedPods = &cache.StoreToPodLister{assumedPodsStore}

	// Port is the easiest way to cause a fit predicate failure
	podPort := 8080
	firstPod := podWithPort("foo", "", podPort)

	// Create the scheduler config
	algo := NewGenericScheduler(
		map[string]algorithm.FitPredicate{"PodFitsPorts": predicates.PodFitsPorts},
		[]algorithm.PriorityConfig{},
		modeler.PodLister(),
		rand.New(rand.NewSource(time.Now().UnixNano())))

	var gotBinding *api.Binding
	c := &Config{
		Modeler: modeler,
		MinionLister: algorithm.FakeMinionLister(
			api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
		),
		Algorithm: algo,
		Binder: fakeBinder{func(b *api.Binding) error {
			scheduledPodStore.Add(podWithPort(b.Name, b.Target.Name, podPort))
			gotBinding = b
			return nil
		}},
		NextPod: func() *api.Pod {
			return queuedPodStore.Pop().(*api.Pod)
		},
		Error: func(p *api.Pod, err error) {
			t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err)
		},
		Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
	}

	// First scheduling pass should schedule the pod
	s := New(c)
	called := make(chan struct{})
	events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
		if e, a := "scheduled", e.Reason; e != a {
			t.Errorf("expected %v, got %v", e, a)
		}
		close(called)
	})

	queuedPodStore.Add(firstPod)
	// queuedPodStore: [foo:8080]
	// scheduledPodStore: []
	// assumedPods: []

	s.scheduleOne()
	// queuedPodStore: []
	// scheduledPodStore: [foo:8080]
	// assumedPods: [foo:8080]

	pod, exists, _ := scheduledPodStore.GetByKey("foo")
	if !exists {
		t.Errorf("Expected scheduled pod store to contain pod")
	}
	pod, exists, _ = queuedPodStore.GetByKey("foo")
	if exists {
		t.Errorf("Did not expect a queued pod, found %+v", pod)
	}
	pod, exists, _ = assumedPodsStore.GetByKey("foo")
	if !exists {
		t.Errorf("Assumed pod store should contain stale pod")
	}

	expectBind := &api.Binding{
		ObjectMeta: api.ObjectMeta{Name: "foo"},
		Target:     api.ObjectReference{Kind: "Node", Name: "machine1"},
	}
	if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
		t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac))
	}

	<-called
	events.Stop()

	scheduledPodStore.Delete(pod)
	_, exists, _ = assumedPodsStore.Get(pod)
	if !exists {
		t.Errorf("Expected pod %#v in assumed pod store", pod)
	}

	secondPod := podWithPort("bar", "", podPort)
	queuedPodStore.Add(secondPod)
	// queuedPodStore: [bar:8080]
	// scheduledPodStore: []
	// assumedPods: [foo:8080]

	// Second scheduling pass will fail to schedule if the store hasn't expired
	// the deleted pod. This would normally happen with a timeout.
	//expirationPolicy.NeverExpire = util.NewStringSet()
	fakeClock.Time = fakeClock.Time.Add(ttl + 1)

	called = make(chan struct{})
	events = eventBroadcaster.StartEventWatcher(func(e *api.Event) {
		if e, a := "scheduled", e.Reason; e != a {
			t.Errorf("expected %v, got %v", e, a)
		}
		close(called)
	})

	s.scheduleOne()

	expectBind = &api.Binding{
		ObjectMeta: api.ObjectMeta{Name: "bar"},
		Target:     api.ObjectReference{Kind: "Node", Name: "machine1"},
	}
	if ex, ac := expectBind, gotBinding; !reflect.DeepEqual(ex, ac) {
		t.Errorf("Expected exact match on binding: %s", util.ObjectDiff(ex, ac))
	}
	<-called
	events.Stop()
}
// NewReplicationManager creates a new ReplicationManager.
func NewReplicationManager(qingClient client.Interface, burstReplicas int) *ReplicationManager {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(qingClient.Events(""))

	rm := &ReplicationManager{
		qingClient: qingClient,
		podControl: RealPodControl{
			qingClient: qingClient,
			recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}),
		},
		burstReplicas: burstReplicas,
		expectations:  NewRCExpectations(),
		queue:         workqueue.New(),
	}

	rm.controllerStore.Store, rm.rcController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return rm.qingClient.ReplicationControllers(api.NamespaceAll).List(labels.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return rm.qingClient.ReplicationControllers(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.ReplicationController{},
		FullControllerResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: rm.enqueueController,
			UpdateFunc: func(old, cur interface{}) {
				// We only really need to do this when spec changes, but for correctness it is safer to
				// periodically double check. It is overkill for 2 reasons:
				// 1. Status.Replica updates will cause a sync
				// 2. Every 30s we will get a full resync (this will happen anyway every 5 minutes when pods relist)
				// However, it shouldn't be that bad as rcs that haven't met expectations won't sync, and all
				// the listing is done using local stores.
				oldRC := old.(*api.ReplicationController)
				curRC := cur.(*api.ReplicationController)
				if oldRC.Status.Replicas != curRC.Status.Replicas {
					glog.V(4).Infof("Observed updated replica count for rc: %v, %d->%d", curRC.Name, oldRC.Status.Replicas, curRC.Status.Replicas)
				}
				rm.enqueueController(cur)
			},
			// This will enter the sync loop and no-op, becuase the controller has been deleted from the store.
			// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
			// way of achieving this is by performing a `stop` operation on the controller.
			DeleteFunc: rm.enqueueController,
		},
	)

	rm.podStore.Store, rm.podController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return rm.qingClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return rm.qingClient.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.Pod{},
		PodRelistPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: rm.addPod,
			// This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill
			// the most frequent pod update is status, and the associated rc will only list from local storage, so
			// it should be ok.
			UpdateFunc: rm.updatePod,
			DeleteFunc: rm.deletePod,
		},
	)

	rm.syncHandler = rm.syncReplicationController
	rm.podStoreSynced = rm.podController.HasSynced
	return rm
}
Ejemplo n.º 10
0
func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (string, string) {
	// Setup
	servers := []string{}
	glog.Infof("Creating etcd client pointing to %v", servers)

	handler := delegateHandler{}
	apiServer := httptest.NewServer(&handler)

	etcdClient := etcd.NewClient(servers)
	sleep := 4 * time.Second
	ok := false
	for i := 0; i < 3; i++ {
		keys, err := etcdClient.Get("/", false, false)
		if err != nil {
			glog.Warningf("Unable to list root etcd keys: %v", err)
			if i < 2 {
				time.Sleep(sleep)
				sleep = sleep * sleep
			}
			continue
		}
		for _, node := range keys.Node.Nodes {
			if _, err := etcdClient.Delete(node.Key, true); err != nil {
				glog.Fatalf("Unable delete key: %v", err)
			}
		}
		ok = true
		break
	}
	if !ok {
		glog.Fatalf("Failed to connect to etcd")
	}

	cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: apiVersion})

	helper, err := master.NewEtcdHelper(etcdClient, "", etcdtest.PathPrefix())
	if err != nil {
		glog.Fatalf("Unable to get etcd helper: %v", err)
	}

	// Master
	host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://"))
	if err != nil {
		glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err)
	}
	portNumber, err := strconv.Atoi(port)
	if err != nil {
		glog.Fatalf("Nonnumeric port? %v", err)
	}

	publicAddress := net.ParseIP(host)
	if publicAddress == nil {
		glog.Fatalf("no public address for %s", host)
	}

	// Create a master and install handlers into mux.
	m := master.New(&master.Config{
		EtcdHelper:            helper,
		QingletClient:         fakeQingletClient{},
		EnableCoreControllers: true,
		EnableLogsSupport:     false,
		EnableProfiling:       true,
		APIPrefix:             "/api",
		Authorizer:            apiserver.NewAlwaysAllowAuthorizer(),
		AdmissionControl:      admit.NewAlwaysAdmit(),
		ReadWritePort:         portNumber,
		PublicAddress:         publicAddress,
		CacheTimeout:          2 * time.Second,
	})
	handler.delegate = m.Handler

	// Scheduler
	schedulerConfigFactory := factory.NewConfigFactory(cl)
	schedulerConfig, err := schedulerConfigFactory.Create()
	if err != nil {
		glog.Fatalf("Couldn't create scheduler config: %v", err)
	}
	eventBroadcaster := record.NewBroadcaster()
	schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(cl.Events(""))
	scheduler.New(schedulerConfig).Run()

	endpoints := service.NewEndpointController(cl)
	// ensure the service endpoints are sync'd several times within the window that the integration tests wait
	go endpoints.Run(3, util.NeverStop)

	controllerManager := replicationControllerPkg.NewReplicationManager(cl, replicationControllerPkg.BurstReplicas)

	// TODO: Write an integration test for the replication controllers watch.
	go controllerManager.Run(3, util.NeverStop)

	nodeController := nodecontroller.NewNodeController(nil, cl, 10, 5*time.Minute, nodecontroller.NewPodEvictor(util.NewFakeRateLimiter()),
		40*time.Second, 60*time.Second, 5*time.Second, nil, false)
	nodeController.Run(5 * time.Second)
	cadvisorInterface := new(cadvisor.Fake)

	// Qinglet (localhost)
	testRootDir := makeTempDirOrDie("qinglet_integ_1.", "")
	configFilePath := makeTempDirOrDie("config", testRootDir)
	glog.Infof("Using %s as root dir for qinglet #1", testRootDir)
	fakeDocker1.VersionInfo = docker.Env{"ApiVersion=1.15"}
	kcfg := qingletapp.SimpleQinglet(cl, &fakeDocker1, "localhost", testRootDir, firstManifestURL, "127.0.0.1", 10250, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, configFilePath, nil, qingcontainer.FakeOS{})
	qingletapp.RunQinglet(kcfg, nil)
	// Qinglet (machine)
	// Create a second qinglet so that the guestbook example's two redis slaves both
	// have a place they can schedule.
	testRootDir = makeTempDirOrDie("qinglet_integ_2.", "")
	glog.Infof("Using %s as root dir for qinglet #2", testRootDir)
	fakeDocker2.VersionInfo = docker.Env{"ApiVersion=1.15"}
	kcfg = qingletapp.SimpleQinglet(cl, &fakeDocker2, "127.0.0.1", testRootDir, secondManifestURL, "127.0.0.1", 10251, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, "", nil, qingcontainer.FakeOS{})
	qingletapp.RunQinglet(kcfg, nil)
	return apiServer.URL, configFilePath
}