func RunProjectCache(c client.Interface, defaultNodeSelector string) { if pcache != nil { return } store := cache.NewStore(cache.MetaNamespaceKeyFunc) reflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &kapi.Namespace{}, store, 0, ) reflector.Run() pcache = &ProjectCache{ Client: c, Store: store, DefaultNodeSelector: defaultNodeSelector, } }
// NewNamespaceController creates a new NamespaceController func NewNamespaceController(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceController { var controller *framework.Controller _, controller = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return kubeClient.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.Namespace{}, resyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { namespace := obj.(*api.Namespace) if err := syncNamespace(kubeClient, *namespace); err != nil { if estimate, ok := err.(*contentRemainingError); ok { go func() { // Estimate is the aggregate total of TerminationGracePeriodSeconds, which defaults to 30s // for pods. However, most processes will terminate faster - within a few seconds, probably // with a peak within 5-10s. So this division is a heuristic that avoids waiting the full // duration when in many cases things complete more quickly. The extra second added is to // ensure we never wait 0 seconds. t := estimate.Estimate/2 + 1 glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", namespace.Name, t) time.Sleep(time.Duration(t) * time.Second) if err := controller.Requeue(namespace); err != nil { util.HandleError(err) } }() return } util.HandleError(err) } }, UpdateFunc: func(oldObj, newObj interface{}) { namespace := newObj.(*api.Namespace) if err := syncNamespace(kubeClient, *namespace); err != nil { if estimate, ok := err.(*contentRemainingError); ok { go func() { t := estimate.Estimate/2 + 1 glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", namespace.Name, t) time.Sleep(time.Duration(t) * time.Second) if err := controller.Requeue(namespace); err != nil { util.HandleError(err) } }() return } util.HandleError(err) } }, }, ) return &NamespaceController{ controller: controller, } }
// NewNamespaceController creates a new NamespaceController func NewNamespaceController(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceController { _, controller := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return kubeClient.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.Namespace{}, resyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { namespace := obj.(*api.Namespace) err := syncNamespace(kubeClient, *namespace) if err != nil { glog.Error(err) } }, UpdateFunc: func(oldObj, newObj interface{}) { namespace := newObj.(*api.Namespace) err := syncNamespace(kubeClient, *namespace) if err != nil { glog.Error(err) } }, }, ) return &NamespaceController{ controller: controller, } }
// finalize will finalize the namespace for kubernetes func finalize(kubeClient client.Interface, namespace api.Namespace) (*api.Namespace, error) { namespaceFinalize := api.Namespace{} namespaceFinalize.ObjectMeta = namespace.ObjectMeta namespaceFinalize.Spec = namespace.Spec finalizerSet := util.NewStringSet() for i := range namespace.Spec.Finalizers { if namespace.Spec.Finalizers[i] != api.FinalizerKubernetes { finalizerSet.Insert(string(namespace.Spec.Finalizers[i])) } } namespaceFinalize.Spec.Finalizers = make([]api.FinalizerName, 0, len(finalizerSet)) for _, value := range finalizerSet.List() { namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, api.FinalizerName(value)) } return kubeClient.Namespaces().Finalize(&namespaceFinalize) }
// NewProvision creates a new namespace provision admission control handler func NewProvision(c client.Interface) admission.Interface { store := cache.NewStore(cache.MetaNamespaceKeyFunc) reflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.Namespace{}, store, 0, ) reflector.Run() return createProvision(c, store) }
// NewFactory initializes a factory that will watch the requested routes func (o *RouterSelection) NewFactory(oc oclient.Interface, kc kclient.Interface) *controllerfactory.RouterControllerFactory { factory := controllerfactory.NewDefaultRouterControllerFactory(oc, kc) factory.Labels = o.Labels factory.Fields = o.Fields factory.Namespace = o.Namespace factory.ResyncInterval = o.ResyncInterval switch { case o.NamespaceLabels != nil: glog.Infof("Router is only using routes in namespaces matching %s", o.NamespaceLabels) factory.Namespaces = namespaceNames{kc.Namespaces(), o.NamespaceLabels} case o.ProjectLabels != nil: glog.Infof("Router is only using routes in projects matching %s", o.ProjectLabels) factory.Namespaces = projectNames{oc.Projects(), o.ProjectLabels} case len(factory.Namespace) > 0: glog.Infof("Router is only using resources in namespace %s", factory.Namespace) default: glog.Infof("Router is including routes in all namespaces") } return factory }
// finalizeInternal will update the namespace finalizer list to either have or not have origin finalizer func finalizeInternal(kubeClient kclient.Interface, namespace *kapi.Namespace, withOrigin bool) (*kapi.Namespace, error) { namespaceFinalize := kapi.Namespace{} namespaceFinalize.ObjectMeta = namespace.ObjectMeta namespaceFinalize.Spec = namespace.Spec finalizerSet := util.NewStringSet() for i := range namespace.Spec.Finalizers { finalizerSet.Insert(string(namespace.Spec.Finalizers[i])) } if withOrigin { finalizerSet.Insert(string(api.FinalizerOrigin)) } else { finalizerSet.Delete(string(api.FinalizerOrigin)) } namespaceFinalize.Spec.Finalizers = make([]kapi.FinalizerName, 0, len(finalizerSet)) for _, value := range finalizerSet.List() { namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, kapi.FinalizerName(value)) } return kubeClient.Namespaces().Finalize(&namespaceFinalize) }
// NewExists creates a new namespace exists admission control handler func NewExists(c client.Interface) admission.Interface { store := cache.NewStore(cache.MetaNamespaceKeyFunc) reflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.Namespace{}, store, 5*time.Minute, ) reflector.Run() return &exists{ client: c, store: store, Handler: admission.NewHandler(admission.Create, admission.Update, admission.Delete), } }
func GetAllocatedID(kClient client.Interface, pod *api.Pod, annotation string) (*int64, error) { if len(pod.Spec.ServiceAccountName) > 0 { sa, err := kClient.ServiceAccounts(pod.Namespace).Get(pod.Spec.ServiceAccountName) if err != nil { return nil, err } sUID, ok := sa.Annotations[annotation] if !ok { return nil, fmt.Errorf("Unable to find annotation %s on service account %s", annotation, pod.Spec.ServiceAccountName) } return AnnotationToIntPtr(sUID) } else { ns, err := kClient.Namespaces().Get(pod.Namespace) if err != nil { return nil, err } sUID, ok := ns.Annotations[annotation] if !ok { return nil, fmt.Errorf("Unable to find annotation %s on namespace %s", annotation, pod.Namespace) } return AnnotationToIntPtr(sUID) } }
// NewLifecycle creates a new namespace lifecycle admission control handler func NewLifecycle(c client.Interface) admission.Interface { store := cache.NewStore(cache.MetaNamespaceKeyFunc) reflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.Namespace{}, store, 0, ) reflector.Run() return &lifecycle{ Handler: admission.NewHandler(admission.Create, admission.Delete), client: c, store: store, immortalNamespaces: util.NewStringSet(api.NamespaceDefault), } }
// syncNamespace makes namespace life-cycle decisions func syncNamespace(kubeClient client.Interface, namespace api.Namespace) (err error) { if namespace.DeletionTimestamp == nil { return nil } glog.V(4).Infof("Syncing namespace %s", namespace.Name) // if there is a deletion timestamp, and the status is not terminating, then update status if !namespace.DeletionTimestamp.IsZero() && namespace.Status.Phase != api.NamespaceTerminating { newNamespace := api.Namespace{} newNamespace.ObjectMeta = namespace.ObjectMeta newNamespace.Status = namespace.Status newNamespace.Status.Phase = api.NamespaceTerminating result, err := kubeClient.Namespaces().Status(&newNamespace) if err != nil { return err } // work with the latest copy so we can proceed to clean up right away without another interval namespace = *result } // if the namespace is already finalized, delete it if finalized(namespace) { err = kubeClient.Namespaces().Delete(namespace.Name) if err != nil && !errors.IsNotFound(err) { return err } return nil } // there may still be content for us to remove estimate, err := deleteAllContent(kubeClient, namespace.Name, *namespace.DeletionTimestamp) if err != nil { return err } if estimate > 0 { return &contentRemainingError{estimate} } // we have removed content, so mark it finalized by us result, err := finalize(kubeClient, namespace) if err != nil { return err } // now check if all finalizers have reported that we delete now if finalized(*result) { err = kubeClient.Namespaces().Delete(namespace.Name) if err != nil && !errors.IsNotFound(err) { return err } } return nil }