func deleteNamespaceFromTenant(kubeClient client.Interface, namespace *api.Namespace) error {
	te, err := kubeClient.Tenants().Get(namespace.Tenant)
	if err != nil {
		return err
	}
	for i, n := range te.Spec.Namespaces {
		if n.Name == namespace.Name {
			te.Spec.Namespaces = append(te.Spec.Namespaces[:i], te.Spec.Namespaces[i+1:]...)
			break
		}
	}
	if _, err = kubeClient.Tenants().Update(te); err != nil {
		return err
	}
	return nil
}
// syncTenant orchestrates deletion of a Tenant and its associated content.
func syncTenant(kubeClient client.Interface, versions *unversioned.APIVersions, tenant *api.Tenant) (err error) {
	if tenant.DeletionTimestamp == nil {
		return nil
	}

	// there may still be content for us to remove
	estimate, err := deleteAllContent(kubeClient, versions, tenant.Name, *tenant.DeletionTimestamp)
	if err != nil {
		return err
	}
	if estimate > 0 {
		return &contentRemainingError{estimate}
	}

	err = kubeClient.Tenants().Delete(tenant.Name)
	if err != nil && !errors.IsNotFound(err) {
		return err
	}

	return nil
}
func syncTenantAndNamespace(kubeClient client.Interface, namespace *api.Namespace) error {
	if namespace.Tenant == "" {
		namespace.Tenant = api.TenantDefault
	}
	te, err := kubeClient.Tenants().Get(namespace.Tenant)
	if err != nil {
		return err
	}
	for i, n := range te.Spec.Namespaces {
		if n.Name == namespace.Name {
			te.Spec.Namespaces = append(te.Spec.Namespaces[:i], te.Spec.Namespaces[i+1:]...)
			break
		}
	}
	te.Spec.Namespaces = append(te.Spec.Namespaces, *namespace)

	if _, err = kubeClient.Namespaces().Update(namespace); err != nil {
		return err
	}
	if _, err = kubeClient.Tenants().Update(te); err != nil {
		return err
	}
	return nil
}
// NewTenantController creates a new TenantController
func NewTenantController(kubeClient client.Interface, versions *unversioned.APIVersions, resyncPeriod time.Duration) *TenantController {
	var controller *framework.Controller
	_, controller = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.Tenants().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return kubeClient.Tenants().Watch(labels.Everything(), fields.Everything(), options)
			},
		},
		&api.Tenant{},
		// TODO: Can we have much longer period here?
		resyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				tenant := obj.(*api.Tenant)
				if err := syncTenant(kubeClient, versions, tenant); err != nil {
					if estimate, ok := err.(*contentRemainingError); ok {
						go func() {
							// Estimate is the aggregate total of TerminationGracePeriodSeconds, which defaults to 30s
							// for pods.  However, most processes will terminate faster - within a few seconds, probably
							// with a peak within 5-10s.  So this division is a heuristic that avoids waiting the full
							// duration when in many cases things complete more quickly. The extra second added is to
							// ensure we never wait 0 seconds.
							t := estimate.Estimate/2 + 1
							glog.V(4).Infof("Content remaining in tenant %s, waiting %d seconds", tenant.Name, t)
							time.Sleep(time.Duration(t) * time.Second)
							if err := controller.Requeue(tenant); err != nil {
								util.HandleError(err)
							}
						}()
						return
					}
					util.HandleError(err)
				}
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				tenant := newObj.(*api.Tenant)
				if err := syncTenant(kubeClient, versions, tenant); err != nil {
					if estimate, ok := err.(*contentRemainingError); ok {
						go func() {
							t := estimate.Estimate/2 + 1
							glog.V(4).Infof("Content remaining in tenant %s, waiting %d seconds", tenant.Name, t)
							time.Sleep(time.Duration(t) * time.Second)
							if err := controller.Requeue(tenant); err != nil {
								util.HandleError(err)
							}
						}()
						return
					}
					util.HandleError(err)
				}
			},
		},
	)

	return &TenantController{
		controller: controller,
	}
}