// newLoadBalancerController creates a new controller from the given config. func newLoadBalancerController(cfg *loadBalancerConfig, kubeClient *client.Client, namespace string) *loadBalancerController { lbc := loadBalancerController{ cfg: cfg, client: kubeClient, queue: workqueue.New(), reloadRateLimiter: util.NewTokenBucketRateLimiter( reloadQPS, int(reloadQPS)), targetService: *targetService, forwardServices: *forwardServices, httpPort: *httpPort, tcpServices: map[string]int{}, } for _, service := range strings.Split(*tcpServices, ",") { portSplit := strings.Split(service, ":") if len(portSplit) != 2 { glog.Errorf("Ignoring misconfigured TCP service %v", service) continue } if port, err := strconv.Atoi(portSplit[1]); err != nil { glog.Errorf("Ignoring misconfigured TCP service %v: %v", service, err) continue } else { lbc.tcpServices[portSplit[0]] = port } } enqueue := func(obj interface{}) { key, err := keyFunc(obj) if err != nil { glog.Infof("Couldn't get key for object %+v: %v", obj, err) return } lbc.queue.Add(key) } eventHandlers := framework.ResourceEventHandlerFuncs{ AddFunc: enqueue, DeleteFunc: enqueue, UpdateFunc: func(old, cur interface{}) { if !reflect.DeepEqual(old, cur) { enqueue(cur) } }, } lbc.svcLister.Store, lbc.svcController = framework.NewInformer( cache.NewListWatchFromClient( lbc.client, "services", namespace, fields.Everything()), &api.Service{}, resyncPeriod, eventHandlers) lbc.epLister.Store, lbc.epController = framework.NewInformer( cache.NewListWatchFromClient( lbc.client, "endpoints", namespace, fields.Everything()), &api.Endpoints{}, resyncPeriod, eventHandlers) return &lbc }
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder { volumeIndex := NewPersistentVolumeOrderedIndex() binderClient := NewBinderClient(kubeClient) binder := &PersistentVolumeClaimBinder{ volumeIndex: volumeIndex, client: binderClient, } _, volumeController := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return kubeClient.PersistentVolumes().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kubeClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.PersistentVolume{}, syncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: binder.addVolume, UpdateFunc: binder.updateVolume, DeleteFunc: binder.deleteVolume, }, ) _, claimController := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return kubeClient.PersistentVolumeClaims(api.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kubeClient.PersistentVolumeClaims(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.PersistentVolumeClaim{}, syncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: binder.addClaim, UpdateFunc: binder.updateClaim, // no DeleteFunc needed. a claim requires no clean-up. // syncVolume handles the missing claim }, ) binder.claimController = claimController binder.volumeController = volumeController return binder }
// NewNamespaceManager creates a new NamespaceManager func NewNamespaceManager(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceManager { _, controller := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return kubeClient.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.Namespace{}, resyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { namespace := obj.(*api.Namespace) syncNamespace(kubeClient, *namespace) }, UpdateFunc: func(oldObj, newObj interface{}) { namespace := newObj.(*api.Namespace) syncNamespace(kubeClient, *namespace) }, }, ) return &NamespaceManager{ controller: controller, } }
func newPodsApi(client *kclient.Client) podsApi { // Extend the selector to include specific nodes to monitor // or provide an API to update the nodes to monitor. selector, err := kSelector.ParseSelector("spec.nodeName!=") if err != nil { panic(err) } lw := kcache.NewListWatchFromClient(client, "pods", kapi.NamespaceAll, selector) podLister := &kcache.StoreToPodLister{Store: kcache.NewStore(kcache.MetaNamespaceKeyFunc)} // Watch and cache all running pods. reflector := kcache.NewReflector(lw, &kapi.Pod{}, podLister.Store, 0) stopChan := make(chan struct{}) reflector.RunUntil(stopChan) nStore, nController := kframework.NewInformer( createNamespaceLW(client), &kapi.Namespace{}, resyncPeriod, kframework.ResourceEventHandlerFuncs{}) go nController.Run(util.NeverStop) podsApi := &realPodsApi{ client: client, podLister: podLister, stopChan: stopChan, reflector: reflector, namespaceStore: nStore, } return podsApi }
// NewDockercfgController returns a new *DockercfgController. func NewDockercfgController(cl client.Interface, options DockercfgControllerOptions) *DockercfgController { e := &DockercfgController{ client: cl, } _, e.serviceAccountController = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.ServiceAccount{}, options.Resync, framework.ResourceEventHandlerFuncs{ AddFunc: e.serviceAccountAdded, UpdateFunc: e.serviceAccountUpdated, }, ) e.dockerURL = options.DefaultDockerURL return e }
// NewEndpointController returns a new *EndpointController. func NewEndpointController(client *client.Client) *EndpointController { e := &EndpointController{ client: client, queue: workqueue.New(), } e.serviceStore.Store, e.serviceController = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return e.client.Services(api.NamespaceAll).List(labels.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return e.client.Services(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.Service{}, FullServiceResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: e.enqueueService, UpdateFunc: func(old, cur interface{}) { e.enqueueService(cur) }, DeleteFunc: e.enqueueService, }, ) e.podStore.Store, e.podController = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return e.client.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return e.client.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.Pod{}, PodRelistPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: e.addPod, UpdateFunc: e.updatePod, DeleteFunc: e.deletePod, }, ) return e }
func ExampleInformer() { // source simulates an apiserver object endpoint. source := framework.NewFakeControllerSource() // Let's do threadsafe output to get predictable test results. deletionCounter := make(chan string, 1000) // Make a controller that immediately deletes anything added to it, and // logs anything deleted. _, controller := framework.NewInformer( source, &api.Pod{}, time.Millisecond*100, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { source.Delete(obj.(runtime.Object)) }, DeleteFunc: func(obj interface{}) { key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { key = "oops something went wrong with the key" } // Report this deletion. deletionCounter <- key }, }, ) // Run the controller and run it until we close stop. stop := make(chan struct{}) defer close(stop) go controller.Run(stop) // Let's add a few objects to the source. testIDs := []string{"a-hello", "b-controller", "c-framework"} for _, name := range testIDs { // Note that these pods are not valid-- the fake source doesn't // call validation or anything. source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}}) } // Let's wait for the controller to process the things we just added. outputSet := util.StringSet{} for i := 0; i < len(testIDs); i++ { outputSet.Insert(<-deletionCounter) } for _, key := range outputSet.List() { fmt.Println(key) } // Output: // a-hello // b-controller // c-framework }
func watchForServices(kubeClient *kclient.Client, kv *kube2vulcand) kcache.Store { serviceStore, serviceController := kframework.NewInformer( createServiceLW(kubeClient), &kapi.Service{}, resyncPeriod, kframework.ResourceEventHandlerFuncs{ AddFunc: kv.newService, DeleteFunc: kv.removeService, UpdateFunc: kv.updateService, }, ) go serviceController.Run(util.NeverStop) return serviceStore }
func watchForServices(kubeClient *kclient.Client, ks *kube2consul) { var serviceController *kcontrollerFramework.Controller _, serviceController = framework.NewInformer( createServiceLW(kubeClient), &kapi.Service{}, resyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: ks.newService, DeleteFunc: ks.removeService, UpdateFunc: func(oldObj, newObj interface{}) { ks.newService(newObj) }, }, ) serviceController.Run(util.NeverStop) }
// Initializes the factory. func NewConfigFactory(client *client.Client) *ConfigFactory { c := &ConfigFactory{ Client: client, PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc), ScheduledPodLister: &cache.StoreToPodLister{}, // Only nodes in the "Ready" condition with status == "True" are schedulable NodeLister: &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}, ServiceLister: &cache.StoreToServiceLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}, ControllerLister: &cache.StoreToReplicationControllerLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}, StopEverything: make(chan struct{}), } modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{c.PodQueue}, c.ScheduledPodLister) c.modeler = modeler c.PodLister = modeler.PodLister() c.BindPodsRateLimiter = util.NewTokenBucketRateLimiter(BindPodsQps, BindPodsBurst) // On add/delete to the scheduled pods, remove from the assumed pods. // We construct this here instead of in CreateFromKeys because // ScheduledPodLister is something we provide to plug in functions that // they may need to call. c.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer( c.createAssignedPodLW(), &api.Pod{}, 0, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { if pod, ok := obj.(*api.Pod); ok { c.modeler.LockedAction(func() { c.modeler.ForgetPod(pod) }) } }, DeleteFunc: func(obj interface{}) { c.modeler.LockedAction(func() { switch t := obj.(type) { case *api.Pod: c.modeler.ForgetPod(t) case cache.DeletedFinalStateUnknown: c.modeler.ForgetPodByKey(t.Key) } }) }, }, ) return c }
func watchEndpoints(kubeClient *kclient.Client, ks *kube2sky) kcache.Store { eStore, eController := kframework.NewInformer( createEndpointsLW(kubeClient), &kapi.Endpoints{}, resyncPeriod, kframework.ResourceEventHandlerFuncs{ AddFunc: ks.handleEndpointAdd, UpdateFunc: func(oldObj, newObj interface{}) { // TODO: Avoid unwanted updates. ks.handleEndpointAdd(newObj) }, }, ) go eController.Run(util.NeverStop) return eStore }
// PersistentVolumeRecycler creates a new PersistentVolumeRecycler func NewPersistentVolumeRecycler(kubeClient client.Interface, syncPeriod time.Duration, plugins []volume.VolumePlugin) (*PersistentVolumeRecycler, error) { recyclerClient := NewRecyclerClient(kubeClient) recycler := &PersistentVolumeRecycler{ client: recyclerClient, kubeClient: kubeClient, } if err := recycler.pluginMgr.InitPlugins(plugins, recycler); err != nil { return nil, fmt.Errorf("Could not initialize volume plugins for PVClaimBinder: %+v", err) } _, volumeController := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return kubeClient.PersistentVolumes().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kubeClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.PersistentVolume{}, syncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { pv := obj.(*api.PersistentVolume) recycler.reclaimVolume(pv) }, UpdateFunc: func(oldObj, newObj interface{}) { pv := newObj.(*api.PersistentVolume) recycler.reclaimVolume(pv) }, }, ) recycler.volumeController = volumeController return recycler, nil }
// blocks until it has finished syncing. func startEndpointWatcher(f *Framework, q *endpointQueries) { _, controller := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return f.Client.Endpoints(f.Namespace.Name).List(labels.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return f.Client.Endpoints(f.Namespace.Name).Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.Endpoints{}, 0, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { if e, ok := obj.(*api.Endpoints); ok { if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 { q.added(e) } } }, UpdateFunc: func(old, cur interface{}) { if e, ok := cur.(*api.Endpoints); ok { if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 { q.added(e) } } }, }, ) go controller.Run(q.stop) // Wait for the controller to sync, so that we don't count any warm-up time. for !controller.HasSynced() { time.Sleep(100 * time.Millisecond) } }
// NewDockercfgTokenDeletedController returns a new *DockercfgTokenDeletedController. func NewDockercfgTokenDeletedController(cl client.Interface, options DockercfgTokenDeletedControllerOptions) *DockercfgTokenDeletedController { e := &DockercfgTokenDeletedController{ client: cl, } dockercfgSelector := fields.OneTermEqualSelector(client.SecretType, string(api.SecretTypeServiceAccountToken)) _, e.secretController = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return e.client.Secrets(api.NamespaceAll).List(labels.Everything(), dockercfgSelector) }, WatchFunc: func(rv string) (watch.Interface, error) { return e.client.Secrets(api.NamespaceAll).Watch(labels.Everything(), dockercfgSelector, rv) }, }, &api.Secret{}, options.Resync, framework.ResourceEventHandlerFuncs{ DeleteFunc: e.secretDeleted, }, ) return e }
totalPods := itArg.podsPerMinion * minionCount nameStr := strconv.Itoa(totalPods) + "-" + uuid ns = "e2e-density" + nameStr RCName = "my-hostname-density" + nameStr // Create a listener for events events := make([](*api.Event), 0) _, controller := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Events(ns).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return c.Events(ns).Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.Event{}, time.Second*10, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { events = append(events, obj.(*api.Event)) }, }, ) stop := make(chan struct{}) go controller.Run(stop) // Start the replication controller expectNoError(RunRC(c, RCName, ns, "gcr.io/google_containers/pause:go", totalPods)) By("Waiting for all events to be recorded")
PodStatusFile: fileHndl, Replicas: totalPods, MaxContainerFailures: &MaxContainerFailures, } // Create a listener for events. events := make([](*api.Event), 0) _, controller := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Events(ns).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return c.Events(ns).Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.Event{}, 0, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { events = append(events, obj.(*api.Event)) }, }, ) stop := make(chan struct{}) go controller.Run(stop) // Start the replication controller. startTime := time.Now() expectNoError(RunRC(config)) e2eStartupTime := time.Now().Sub(startTime)
// NewReplicationManager creates a new ReplicationManager. func NewReplicationManager(kubeClient client.Interface, burstReplicas int) *ReplicationManager { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) rm := &ReplicationManager{ kubeClient: kubeClient, podControl: controller.RealPodControl{ KubeClient: kubeClient, Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}), }, burstReplicas: burstReplicas, expectations: controller.NewControllerExpectations(), queue: workqueue.New(), } rm.rcStore.Store, rm.rcController = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return rm.kubeClient.ReplicationControllers(api.NamespaceAll).List(labels.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return rm.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.ReplicationController{}, FullControllerResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: rm.enqueueController, UpdateFunc: func(old, cur interface{}) { // We only really need to do this when spec changes, but for correctness it is safer to // periodically double check. It is overkill for 2 reasons: // 1. Status.Replica updates will cause a sync // 2. Every 30s we will get a full resync (this will happen anyway every 5 minutes when pods relist) // However, it shouldn't be that bad as rcs that haven't met expectations won't sync, and all // the listing is done using local stores. oldRC := old.(*api.ReplicationController) curRC := cur.(*api.ReplicationController) if oldRC.Status.Replicas != curRC.Status.Replicas { glog.V(4).Infof("Observed updated replica count for rc: %v, %d->%d", curRC.Name, oldRC.Status.Replicas, curRC.Status.Replicas) } rm.enqueueController(cur) }, // This will enter the sync loop and no-op, becuase the controller has been deleted from the store. // Note that deleting a controller immediately after scaling it to 0 will not work. The recommended // way of achieving this is by performing a `stop` operation on the controller. DeleteFunc: rm.enqueueController, }, ) rm.podStore.Store, rm.podController = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return rm.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return rm.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.Pod{}, PodRelistPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: rm.addPod, // This invokes the rc for every pod change, eg: host assignment. Though this might seem like overkill // the most frequent pod update is status, and the associated rc will only list from local storage, so // it should be ok. UpdateFunc: rm.updatePod, DeleteFunc: rm.deletePod, }, ) rm.syncHandler = rm.syncReplicationController rm.podStoreSynced = rm.podController.HasSynced return rm }
func TestHammerController(t *testing.T) { // This test executes a bunch of requests through the fake source and // controller framework to make sure there's no locking/threading // errors. If an error happens, it should hang forever or trigger the // race detector. // source simulates an apiserver object endpoint. source := framework.NewFakeControllerSource() // Let's do threadsafe output to get predictable test results. outputSetLock := sync.Mutex{} // map of key to operations done on the key outputSet := map[string][]string{} recordFunc := func(eventType string, obj interface{}) { key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { t.Errorf("something wrong with key: %v", err) key = "oops something went wrong with the key" } // Record some output when items are deleted. outputSetLock.Lock() defer outputSetLock.Unlock() outputSet[key] = append(outputSet[key], eventType) } // Make a controller which just logs all the changes it gets. _, controller := framework.NewInformer( source, &api.Pod{}, time.Millisecond*100, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { recordFunc("add", obj) }, UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) }, DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) }, }, ) // Run the controller and run it until we close stop. stop := make(chan struct{}) go controller.Run(stop) wg := sync.WaitGroup{} const threads = 3 wg.Add(threads) for i := 0; i < threads; i++ { go func() { defer wg.Done() // Let's add a few objects to the source. currentNames := util.StringSet{} rs := rand.NewSource(rand.Int63()) f := fuzz.New().NilChance(.5).NumElements(0, 2).RandSource(rs) r := rand.New(rs) // Mustn't use r and f concurrently! for i := 0; i < 100; i++ { var name string var isNew bool if currentNames.Len() == 0 || r.Intn(3) == 1 { f.Fuzz(&name) isNew = true } else { l := currentNames.List() name = l[r.Intn(len(l))] } pod := &api.Pod{} f.Fuzz(pod) pod.ObjectMeta.Name = name pod.ObjectMeta.Namespace = "default" // Add, update, or delete randomly. // Note that these pods are not valid-- the fake source doesn't // call validation or perform any other checking. if isNew { currentNames.Insert(name) source.Add(pod) continue } switch r.Intn(2) { case 0: currentNames.Insert(name) source.Modify(pod) case 1: currentNames.Delete(name) source.Delete(pod) } } }() } wg.Wait() // Let's wait for the controller to finish processing the things we just added. time.Sleep(100 * time.Millisecond) close(stop) outputSetLock.Lock() t.Logf("got: %#v", outputSet) }
func TestUpdate(t *testing.T) { // This test is going to exercise the various paths that result in a // call to update. // source simulates an apiserver object endpoint. source := framework.NewFakeControllerSource() const ( FROM = "from" ADD_MISSED = "missed the add event" TO = "to" ) // These are the transitions we expect to see; because this is // asynchronous, there are a lot of valid possibilities. type pair struct{ from, to string } allowedTransitions := map[pair]bool{ pair{FROM, TO}: true, pair{FROM, ADD_MISSED}: true, pair{ADD_MISSED, TO}: true, // Because a resync can happen when we've already observed one // of the above but before the item is deleted. pair{TO, TO}: true, // Because a resync could happen before we observe an update. pair{FROM, FROM}: true, } var testDoneWG sync.WaitGroup // Make a controller that deletes things once it observes an update. // It calls Done() on the wait group on deletions so we can tell when // everything we've added has been deleted. _, controller := framework.NewInformer( source, &api.Pod{}, time.Millisecond*1, framework.ResourceEventHandlerFuncs{ UpdateFunc: func(oldObj, newObj interface{}) { o, n := oldObj.(*api.Pod), newObj.(*api.Pod) from, to := o.Labels["check"], n.Labels["check"] if !allowedTransitions[pair{from, to}] { t.Errorf("observed transition %q -> %q for %v", from, to, n.Name) } source.Delete(n) }, DeleteFunc: func(obj interface{}) { testDoneWG.Done() }, }, ) // Run the controller and run it until we close stop. stop := make(chan struct{}) go controller.Run(stop) pod := func(name, check string) *api.Pod { return &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: map[string]string{"check": check}, }, } } tests := []func(string){ func(name string) { name = "a-" + name source.Add(pod(name, FROM)) source.Modify(pod(name, TO)) }, func(name string) { name = "b-" + name source.Add(pod(name, FROM)) source.ModifyDropWatch(pod(name, TO)) }, func(name string) { name = "c-" + name source.AddDropWatch(pod(name, FROM)) source.Modify(pod(name, ADD_MISSED)) source.Modify(pod(name, TO)) }, func(name string) { name = "d-" + name source.Add(pod(name, FROM)) }, } // run every test a few times, in parallel const threads = 3 var wg sync.WaitGroup wg.Add(threads * len(tests)) testDoneWG.Add(threads * len(tests)) for i := 0; i < threads; i++ { for j, f := range tests { go func(name string, f func(string)) { defer wg.Done() f(name) }(fmt.Sprintf("%v-%v", i, j), f) } } wg.Wait() // Let's wait for the controller to process the things we just added. testDoneWG.Wait() close(stop) }
func (m *NetworkManager) start(args []string) { config := &client.Config{ Host: m.config.KubeUrl, } var err error m.Client, err = client.New(config) if err != nil { glog.Fatalf("Invalid API configuratin: %v", err) } m.Controller = network.NewNetworkFactory().Create(m.Client, args) m.PodStore, m.PodInformer = framework.NewInformer( cache.NewListWatchFromClient( m.Client, string(api.ResourcePods), api.NamespaceAll, fields.Everything(), ), &api.Pod{}, m.config.ResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { m.Controller.AddPod(obj.(*api.Pod)) }, UpdateFunc: func(oldObj, newObj interface{}) { m.Controller.UpdatePod( oldObj.(*api.Pod), newObj.(*api.Pod)) }, DeleteFunc: func(obj interface{}) { if pod, ok := obj.(*api.Pod); ok { m.Controller.DeletePod(pod) } }, }, ) m.NamespaceStore, m.NamespaceInformer = framework.NewInformer( cache.NewListWatchFromClient( m.Client, "namespaces", api.NamespaceAll, fields.Everything(), ), &api.Namespace{}, m.config.ResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { m.Controller.AddNamespace( obj.(*api.Namespace)) }, UpdateFunc: func(oldObj, newObj interface{}) { m.Controller.UpdateNamespace( oldObj.(*api.Namespace), newObj.(*api.Namespace)) }, DeleteFunc: func(obj interface{}) { if namespace, ok := obj.(*api.Namespace); ok { m.Controller.DeleteNamespace(namespace) } }, }, ) m.RCStore, m.RCInformer = framework.NewInformer( cache.NewListWatchFromClient( m.Client, string(api.ResourceReplicationControllers), api.NamespaceAll, fields.Everything(), ), &api.ReplicationController{}, m.config.ResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { m.Controller.AddReplicationController( obj.(*api.ReplicationController)) }, UpdateFunc: func(oldObj, newObj interface{}) { m.Controller.UpdateReplicationController( oldObj.(*api.ReplicationController), newObj.(*api.ReplicationController)) }, DeleteFunc: func(obj interface{}) { if rc, ok := obj.(*api.ReplicationController); ok { m.Controller.DeleteReplicationController(rc) } }, }, ) m.ServiceStore, m.ServiceInformer = framework.NewInformer( cache.NewListWatchFromClient( m.Client, string(api.ResourceServices), api.NamespaceAll, fields.Everything(), ), &api.Service{}, m.config.ResyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { m.Controller.AddService( obj.(*api.Service)) }, UpdateFunc: func(oldObj, newObj interface{}) { m.Controller.UpdateService( oldObj.(*api.Service), newObj.(*api.Service)) }, DeleteFunc: func(obj interface{}) { if service, ok := obj.(*api.Service); ok { m.Controller.DeleteService(service) } }, }, ) m.Controller.SetPodStore(m.PodStore) m.Controller.SetNamespaceStore(m.NamespaceStore) m.Controller.SetReplicationControllerStore(m.RCStore) m.Controller.SetServiceStore(m.ServiceStore) }