// NewNamespaceController creates a new NamespaceController func NewNamespaceController(kubeClient client.Interface, versions *unversioned.APIVersions, resyncPeriod time.Duration) *NamespaceController { var controller *framework.Controller _, controller = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return kubeClient.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.Namespace{}, // TODO: Can we have much longer period here? resyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { namespace := obj.(*api.Namespace) if err := syncNamespace(kubeClient, versions, namespace); err != nil { if estimate, ok := err.(*contentRemainingError); ok { go func() { // Estimate is the aggregate total of TerminationGracePeriodSeconds, which defaults to 30s // for pods. However, most processes will terminate faster - within a few seconds, probably // with a peak within 5-10s. So this division is a heuristic that avoids waiting the full // duration when in many cases things complete more quickly. The extra second added is to // ensure we never wait 0 seconds. t := estimate.Estimate/2 + 1 glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", namespace.Name, t) time.Sleep(time.Duration(t) * time.Second) if err := controller.Requeue(namespace); err != nil { util.HandleError(err) } }() return } util.HandleError(err) } }, UpdateFunc: func(oldObj, newObj interface{}) { namespace := newObj.(*api.Namespace) if err := syncNamespace(kubeClient, versions, namespace); err != nil { if estimate, ok := err.(*contentRemainingError); ok { go func() { t := estimate.Estimate/2 + 1 glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", namespace.Name, t) time.Sleep(time.Duration(t) * time.Second) if err := controller.Requeue(namespace); err != nil { util.HandleError(err) } }() return } util.HandleError(err) } }, }, ) return &NamespaceController{ controller: controller, } }
failedContainers = failedContainers + v.restarts containerRestartNodes.Insert(p.Spec.NodeName) } } return failedContainers, containerRestartNodes.List() } var _ = Describe("DaemonRestart", func() { framework := Framework{BaseName: "daemonrestart"} rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID()) labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector() existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc) var ns string var config RCConfig var controller *controllerFramework.Controller var newPods cache.Store var stopCh chan struct{} BeforeEach(func() { // These tests require SSH // TODO(11834): Enable this test in GKE once experimental API there is switched on SkipUnlessProviderIs("gce") framework.beforeEach() ns = framework.Namespace.Name // All the restart tests need an rc and a watch on pods of the rc. // Additionally some of them might scale the rc during the test. config = RCConfig{ Client: framework.Client,