// NewNamespaceController creates a new NamespaceController
func NewNamespaceController(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceController {
	_, controller := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		resyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				namespace := obj.(*api.Namespace)
				err := syncNamespace(kubeClient, *namespace)
				if err != nil {
					glog.Error(err)
				}
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				namespace := newObj.(*api.Namespace)
				err := syncNamespace(kubeClient, *namespace)
				if err != nil {
					glog.Error(err)
				}
			},
		},
	)

	return &NamespaceController{
		controller: controller,
	}
}
// NewNamespaceController creates a new NamespaceController
func NewNamespaceController(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceController {
	var controller *framework.Controller
	_, controller = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		resyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				namespace := obj.(*api.Namespace)
				if err := syncNamespace(kubeClient, *namespace); err != nil {
					if estimate, ok := err.(*contentRemainingError); ok {
						go func() {
							// Estimate is the aggregate total of TerminationGracePeriodSeconds, which defaults to 30s
							// for pods.  However, most processes will terminate faster - within a few seconds, probably
							// with a peak within 5-10s.  So this division is a heuristic that avoids waiting the full
							// duration when in many cases things complete more quickly. The extra second added is to
							// ensure we never wait 0 seconds.
							t := estimate.Estimate/2 + 1
							glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", namespace.Name, t)
							time.Sleep(time.Duration(t) * time.Second)
							if err := controller.Requeue(namespace); err != nil {
								util.HandleError(err)
							}
						}()
						return
					}
					util.HandleError(err)
				}
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				namespace := newObj.(*api.Namespace)
				if err := syncNamespace(kubeClient, *namespace); err != nil {
					if estimate, ok := err.(*contentRemainingError); ok {
						go func() {
							t := estimate.Estimate/2 + 1
							glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", namespace.Name, t)
							time.Sleep(time.Duration(t) * time.Second)
							if err := controller.Requeue(namespace); err != nil {
								util.HandleError(err)
							}
						}()
						return
					}
					util.HandleError(err)
				}
			},
		},
	)

	return &NamespaceController{
		controller: controller,
	}
}
func deletePods(kubeClient client.Interface, ns string, before util.Time) (int64, error) {
	items, err := kubeClient.Pods(ns).List(labels.Everything(), fields.Everything())
	if err != nil {
		return 0, err
	}
	expired := util.Now().After(before.Time)
	var deleteOptions *api.DeleteOptions
	if expired {
		deleteOptions = api.NewDeleteOptions(0)
	}
	estimate := int64(0)
	for i := range items.Items {
		if items.Items[i].Spec.TerminationGracePeriodSeconds != nil {
			grace := *items.Items[i].Spec.TerminationGracePeriodSeconds
			if grace > estimate {
				estimate = grace
			}
		}
		err := kubeClient.Pods(ns).Delete(items.Items[i].Name, deleteOptions)
		if err != nil && !errors.IsNotFound(err) {
			return 0, err
		}
	}
	if expired {
		estimate = 0
	}
	return estimate, nil
}
Esempio n. 4
0
func RunProjectCache(c client.Interface, defaultNodeSelector string) {
	if pcache != nil {
		return
	}

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&kapi.Namespace{},
		store,
		0,
	)
	reflector.Run()
	pcache = &ProjectCache{
		Client:              c,
		Store:               store,
		DefaultNodeSelector: defaultNodeSelector,
	}
}
Esempio n. 5
0
// NewRollingDeploymentStrategy makes a new RollingDeploymentStrategy.
func NewRollingDeploymentStrategy(namespace string, client kclient.Interface, codec runtime.Codec, initialStrategy acceptingDeploymentStrategy) *RollingDeploymentStrategy {
	return &RollingDeploymentStrategy{
		codec:           codec,
		initialStrategy: initialStrategy,
		client:          client,
		apiRetryPeriod:  DefaultApiRetryPeriod,
		apiRetryTimeout: DefaultApiRetryTimeout,
		rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error {
			updater := kubectl.NewRollingUpdater(namespace, client)
			return updater.Update(config)
		},
		hookExecutor: &stratsupport.HookExecutor{
			PodClient: &stratsupport.HookExecutorPodClientImpl{
				CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
					return client.Pods(namespace).Create(pod)
				},
				PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
					return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel)
				},
			},
		},
		getUpdateAcceptor: func(timeout time.Duration) strat.UpdateAcceptor {
			return stratsupport.NewAcceptNewlyObservedReadyPods(client, timeout, AcceptorInterval)
		},
	}
}
Esempio n. 6
0
func GetApiVersions(w io.Writer, kubeClient client.Interface) {
	apiVersions, err := kubeClient.ServerAPIVersions()
	if err != nil {
		fmt.Printf("Couldn't get available api versions from server: %v\n", err)
		os.Exit(1)
	}

	fmt.Fprintf(w, "Available Server Api Versions: %#v\n", *apiVersions)
}
Esempio n. 7
0
func UpdateExistingReplicationController(c client.Interface, oldRc *api.ReplicationController, namespace, newName, deploymentKey, deploymentValue string, out io.Writer) (*api.ReplicationController, error) {
	SetNextControllerAnnotation(oldRc, newName)
	if _, found := oldRc.Spec.Selector[deploymentKey]; !found {
		return AddDeploymentKeyToReplicationController(oldRc, c, deploymentKey, deploymentValue, namespace, out)
	} else {
		// If we didn't need to update the controller for the deployment key, we still need to write
		// the "next" controller.
		return c.ReplicationControllers(namespace).Update(oldRc)
	}
}
Esempio n. 8
0
func GetVersion(w io.Writer, kubeClient client.Interface) {
	GetClientVersion(w)

	serverVersion, err := kubeClient.ServerVersion()
	if err != nil {
		fmt.Printf("Couldn't read version from server: %v\n", err)
		os.Exit(1)
	}

	fmt.Fprintf(w, "Server Version: %#v\n", *serverVersion)
}
func deleteResourceQuotas(kubeClient client.Interface, ns string) error {
	resourceQuotas, err := kubeClient.ResourceQuotas(ns).List(labels.Everything())
	if err != nil {
		return err
	}
	for i := range resourceQuotas.Items {
		err := kubeClient.ResourceQuotas(ns).Delete(resourceQuotas.Items[i].Name)
		if err != nil && !errors.IsNotFound(err) {
			return err
		}
	}
	return nil
}
func deleteReplicationControllers(kubeClient client.Interface, ns string) error {
	items, err := kubeClient.ReplicationControllers(ns).List(labels.Everything())
	if err != nil {
		return err
	}
	for i := range items.Items {
		err := kubeClient.ReplicationControllers(ns).Delete(items.Items[i].Name)
		if err != nil && !errors.IsNotFound(err) {
			return err
		}
	}
	return nil
}
Esempio n. 11
0
// NewResourceQuota creates a new resource quota admission control handler
func NewResourceQuota(client client.Interface) admission.Interface {
	lw := &cache.ListWatch{
		ListFunc: func() (runtime.Object, error) {
			return client.ResourceQuotas(api.NamespaceAll).List(labels.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return client.ResourceQuotas(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0)
	reflector.Run()
	return createResourceQuota(client, indexer)
}
func deletePersistentVolumeClaims(kubeClient client.Interface, ns string) error {
	items, err := kubeClient.PersistentVolumeClaims(ns).List(labels.Everything(), fields.Everything())
	if err != nil {
		return err
	}
	for i := range items.Items {
		err := kubeClient.PersistentVolumeClaims(ns).Delete(items.Items[i].Name)
		if err != nil && !errors.IsNotFound(err) {
			return err
		}
	}
	return nil
}
// finalize will finalize the namespace for kubernetes
func finalize(kubeClient client.Interface, namespace api.Namespace) (*api.Namespace, error) {
	namespaceFinalize := api.Namespace{}
	namespaceFinalize.ObjectMeta = namespace.ObjectMeta
	namespaceFinalize.Spec = namespace.Spec
	finalizerSet := util.NewStringSet()
	for i := range namespace.Spec.Finalizers {
		if namespace.Spec.Finalizers[i] != api.FinalizerKubernetes {
			finalizerSet.Insert(string(namespace.Spec.Finalizers[i]))
		}
	}
	namespaceFinalize.Spec.Finalizers = make([]api.FinalizerName, 0, len(finalizerSet))
	for _, value := range finalizerSet.List() {
		namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, api.FinalizerName(value))
	}
	return kubeClient.Namespaces().Finalize(&namespaceFinalize)
}
Esempio n. 14
0
// New returns a new service controller to keep cloud provider service resources
// (like external load balancers) in sync with the registry.
func New(cloud cloudprovider.Interface, kubeClient client.Interface, clusterName string) *ServiceController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(kubeClient.Events(""))
	recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})

	return &ServiceController{
		cloud:            cloud,
		kubeClient:       kubeClient,
		clusterName:      clusterName,
		cache:            &serviceCache{serviceMap: make(map[string]*cachedService)},
		eventBroadcaster: broadcaster,
		eventRecorder:    recorder,
		nodeLister: cache.StoreToNodeLister{
			Store: cache.NewStore(cache.MetaNamespaceKeyFunc),
		},
	}
}
Esempio n. 15
0
// NewLimitRanger returns an object that enforces limits based on the supplied limit function
func NewLimitRanger(client client.Interface, limitFunc LimitFunc) admission.Interface {
	lw := &cache.ListWatch{
		ListFunc: func() (runtime.Object, error) {
			return client.LimitRanges(api.NamespaceAll).List(labels.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return client.LimitRanges(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.LimitRange{}, 0)
	reflector.Run()
	return &limitRanger{
		Handler:   admission.NewHandler(admission.Create, admission.Update),
		client:    client,
		limitFunc: limitFunc,
		indexer:   indexer,
	}
}
Esempio n. 16
0
// NewProvision creates a new namespace provision admission control handler
func NewProvision(c client.Interface) admission.Interface {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		store,
		0,
	)
	reflector.Run()
	return createProvision(c, store)
}
Esempio n. 17
0
// NewPodWatch creates a pod watching function which is backed by a
// FIFO/reflector pair. This avoids managing watches directly.
// A stop channel to close the watch's reflector is also returned.
// It is the caller's responsibility to defer closing the stop channel to prevent leaking resources.
func NewPodWatch(client kclient.Interface, namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
	fieldSelector, _ := fields.ParseSelector("metadata.name=" + name)
	podLW := &deployutil.ListWatcherImpl{
		ListFunc: func() (runtime.Object, error) {
			return client.Pods(namespace).List(labels.Everything(), fieldSelector)
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return client.Pods(namespace).Watch(labels.Everything(), fieldSelector, resourceVersion)
		},
	}

	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(podLW, &kapi.Pod{}, queue, 1*time.Minute).RunUntil(stopChannel)

	return func() *kapi.Pod {
		obj := queue.Pop()
		return obj.(*kapi.Pod)
	}
}
Esempio n. 18
0
// NewFactory initializes a factory that will watch the requested routes
func (o *RouterSelection) NewFactory(oc oclient.Interface, kc kclient.Interface) *controllerfactory.RouterControllerFactory {
	factory := controllerfactory.NewDefaultRouterControllerFactory(oc, kc)
	factory.Labels = o.Labels
	factory.Fields = o.Fields
	factory.Namespace = o.Namespace
	factory.ResyncInterval = o.ResyncInterval
	switch {
	case o.NamespaceLabels != nil:
		glog.Infof("Router is only using routes in namespaces matching %s", o.NamespaceLabels)
		factory.Namespaces = namespaceNames{kc.Namespaces(), o.NamespaceLabels}
	case o.ProjectLabels != nil:
		glog.Infof("Router is only using routes in projects matching %s", o.ProjectLabels)
		factory.Namespaces = projectNames{oc.Projects(), o.ProjectLabels}
	case len(factory.Namespace) > 0:
		glog.Infof("Router is only using resources in namespace %s", factory.Namespace)
	default:
		glog.Infof("Router is including routes in all namespaces")
	}
	return factory
}
Esempio n. 19
0
// NewDeploymentConfigDescriber returns a new DeploymentConfigDescriber
func NewDeploymentConfigDescriber(client client.Interface, kclient kclient.Interface) *DeploymentConfigDescriber {
	return &DeploymentConfigDescriber{
		client: &genericDeploymentDescriberClient{
			getDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
				return client.DeploymentConfigs(namespace).Get(name)
			},
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return kclient.ReplicationControllers(namespace).Get(name)
			},
			listDeploymentsFunc: func(namespace string, selector labels.Selector) (*kapi.ReplicationControllerList, error) {
				return kclient.ReplicationControllers(namespace).List(selector)
			},
			listPodsFunc: func(namespace string, selector labels.Selector) (*kapi.PodList, error) {
				return kclient.Pods(namespace).List(selector, fields.Everything())
			},
			listEventsFunc: func(deploymentConfig *deployapi.DeploymentConfig) (*kapi.EventList, error) {
				return kclient.Events(deploymentConfig.Namespace).Search(deploymentConfig)
			},
		},
	}
}
Esempio n. 20
0
// NewNodeController returns a new node controller to sync instances from cloudprovider.
func NewNodeController(
	cloud cloudprovider.Interface,
	kubeClient client.Interface,
	registerRetryCount int,
	podEvictionTimeout time.Duration,
	podEvictor *PodEvictor,
	nodeMonitorGracePeriod time.Duration,
	nodeStartupGracePeriod time.Duration,
	nodeMonitorPeriod time.Duration,
	clusterCIDR *net.IPNet,
	allocateNodeCIDRs bool) *NodeController {
	eventBroadcaster := record.NewBroadcaster()
	recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
	eventBroadcaster.StartLogging(glog.Infof)
	if kubeClient != nil {
		glog.Infof("Sending events to api server.")
		eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
	} else {
		glog.Infof("No api server defined - no events will be sent to API server.")
	}
	if allocateNodeCIDRs && clusterCIDR == nil {
		glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.")
	}
	return &NodeController{
		cloud:                  cloud,
		kubeClient:             kubeClient,
		recorder:               recorder,
		registerRetryCount:     registerRetryCount,
		podEvictionTimeout:     podEvictionTimeout,
		podEvictor:             podEvictor,
		nodeStatusMap:          make(map[string]nodeStatusData),
		nodeMonitorGracePeriod: nodeMonitorGracePeriod,
		nodeMonitorPeriod:      nodeMonitorPeriod,
		nodeStartupGracePeriod: nodeStartupGracePeriod,
		lookupIP:               net.LookupIP,
		now:                    util.Now,
		clusterCIDR:            clusterCIDR,
		allocateNodeCIDRs:      allocateNodeCIDRs,
	}
}
Esempio n. 21
0
// NewAcceptNewlyObservedReadyPods makes a new AcceptNewlyObservedReadyPods
// from a real client.
func NewAcceptNewlyObservedReadyPods(kclient kclient.Interface, timeout time.Duration, interval time.Duration) *AcceptNewlyObservedReadyPods {
	return &AcceptNewlyObservedReadyPods{
		timeout:      timeout,
		interval:     interval,
		acceptedPods: kutil.NewStringSet(),
		getDeploymentPodStore: func(deployment *kapi.ReplicationController) (cache.Store, chan struct{}) {
			selector := labels.Set(deployment.Spec.Selector).AsSelector()
			store := cache.NewStore(cache.MetaNamespaceKeyFunc)
			lw := &deployutil.ListWatcherImpl{
				ListFunc: func() (runtime.Object, error) {
					return kclient.Pods(deployment.Namespace).List(selector, fields.Everything())
				},
				WatchFunc: func(resourceVersion string) (watch.Interface, error) {
					return kclient.Pods(deployment.Namespace).Watch(selector, fields.Everything(), resourceVersion)
				},
			}
			stop := make(chan struct{})
			cache.NewReflector(lw, &kapi.Pod{}, store, 10*time.Second).RunUntil(stop)
			return store, stop
		},
	}
}
Esempio n. 22
0
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(client kclient.Interface, codec runtime.Codec) *RecreateDeploymentStrategy {
	scaler, _ := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(client))
	return &RecreateDeploymentStrategy{
		getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Get(name)
		},
		scaler: scaler,
		codec:  codec,
		hookExecutor: &stratsupport.HookExecutor{
			PodClient: &stratsupport.HookExecutorPodClientImpl{
				CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
					return client.Pods(namespace).Create(pod)
				},
				PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
					return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel)
				},
			},
		},
		retryTimeout: 120 * time.Second,
		retryPeriod:  1 * time.Second,
	}
}
Esempio n. 23
0
func listPods(client kclient.Interface) (*kapi.PodList, error) {
	// get builds with new label
	sel, err := labels.Parse(buildapi.BuildLabel)
	if err != nil {
		return nil, err
	}
	listNew, err := client.Pods(kapi.NamespaceAll).List(sel, fields.Everything())
	if err != nil {
		return nil, err
	}
	// FIXME: get builds with old label - remove this when depracated label will be removed
	selOld, err := labels.Parse(buildapi.DeprecatedBuildLabel)
	if err != nil {
		return nil, err
	}
	listOld, err := client.Pods(kapi.NamespaceAll).List(selOld, fields.Everything())
	if err != nil {
		return nil, err
	}
	listNew.Items = mergeWithoutDuplicates(listNew.Items, listOld.Items)
	return listNew, nil
}
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder
func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder {
	volumeIndex := NewPersistentVolumeOrderedIndex()
	binderClient := NewBinderClient(kubeClient)
	binder := &PersistentVolumeClaimBinder{
		volumeIndex: volumeIndex,
		client:      binderClient,
	}

	_, volumeController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.PersistentVolumes().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.PersistentVolume{},
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addVolume,
			UpdateFunc: binder.updateVolume,
			DeleteFunc: binder.deleteVolume,
		},
	)
	_, claimController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.PersistentVolumeClaims(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.PersistentVolumeClaims(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.PersistentVolumeClaim{},
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addClaim,
			UpdateFunc: binder.updateClaim,
			// no DeleteFunc needed.  a claim requires no clean-up.
			// syncVolume handles the missing claim
		},
	)

	binder.claimController = claimController
	binder.volumeController = volumeController

	return binder
}
Esempio n. 25
0
// NewExists creates a new namespace exists admission control handler
func NewExists(c client.Interface) admission.Interface {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		store,
		5*time.Minute,
	)
	reflector.Run()
	return &exists{
		client:  c,
		store:   store,
		Handler: admission.NewHandler(admission.Create, admission.Update, admission.Delete),
	}
}
Esempio n. 26
0
// finalizeInternal will update the namespace finalizer list to either have or not have origin finalizer
func finalizeInternal(kubeClient kclient.Interface, namespace *kapi.Namespace, withOrigin bool) (*kapi.Namespace, error) {
	namespaceFinalize := kapi.Namespace{}
	namespaceFinalize.ObjectMeta = namespace.ObjectMeta
	namespaceFinalize.Spec = namespace.Spec

	finalizerSet := util.NewStringSet()
	for i := range namespace.Spec.Finalizers {
		finalizerSet.Insert(string(namespace.Spec.Finalizers[i]))
	}

	if withOrigin {
		finalizerSet.Insert(string(api.FinalizerOrigin))
	} else {
		finalizerSet.Delete(string(api.FinalizerOrigin))
	}

	namespaceFinalize.Spec.Finalizers = make([]kapi.FinalizerName, 0, len(finalizerSet))
	for _, value := range finalizerSet.List() {
		namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, kapi.FinalizerName(value))
	}
	return kubeClient.Namespaces().Finalize(&namespaceFinalize)
}
Esempio n. 27
0
func GetAllocatedID(kClient client.Interface, pod *api.Pod, annotation string) (*int64, error) {
	if len(pod.Spec.ServiceAccountName) > 0 {
		sa, err := kClient.ServiceAccounts(pod.Namespace).Get(pod.Spec.ServiceAccountName)
		if err != nil {
			return nil, err
		}
		sUID, ok := sa.Annotations[annotation]
		if !ok {
			return nil, fmt.Errorf("Unable to find annotation %s on service account %s", annotation, pod.Spec.ServiceAccountName)
		}
		return AnnotationToIntPtr(sUID)
	} else {
		ns, err := kClient.Namespaces().Get(pod.Namespace)
		if err != nil {
			return nil, err
		}
		sUID, ok := ns.Annotations[annotation]
		if !ok {
			return nil, fmt.Errorf("Unable to find annotation %s on namespace %s", annotation, pod.Namespace)
		}
		return AnnotationToIntPtr(sUID)
	}
}
Esempio n. 28
0
// NewDeployer makes a new Deployer from a kube client.
func NewDeployer(client kclient.Interface) *Deployer {
	scaler, _ := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(client))
	return &Deployer{
		getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Get(name)
		},
		getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
			return client.ReplicationControllers(namespace).List(deployutil.ConfigSelector(configName))
		},
		scaler: scaler,
		strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) {
			switch config.Template.Strategy.Type {
			case deployapi.DeploymentStrategyTypeRecreate:
				return recreate.NewRecreateDeploymentStrategy(client, latest.Codec), nil
			case deployapi.DeploymentStrategyTypeRolling:
				recreate := recreate.NewRecreateDeploymentStrategy(client, latest.Codec)
				return rolling.NewRollingDeploymentStrategy(config.Namespace, client, latest.Codec, recreate), nil
			default:
				return nil, fmt.Errorf("unsupported strategy type: %s", config.Template.Strategy.Type)
			}
		},
	}
}
Esempio n. 29
0
// NewLifecycle creates a new namespace lifecycle admission control handler
func NewLifecycle(c client.Interface) admission.Interface {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		store,
		0,
	)
	reflector.Run()
	return &lifecycle{
		Handler:            admission.NewHandler(admission.Create, admission.Delete),
		client:             c,
		store:              store,
		immortalNamespaces: util.NewStringSet(api.NamespaceDefault),
	}
}
Esempio n. 30
0
// NewConstraint creates a new SCC constraint admission plugin.
func NewConstraint(kclient client.Interface) kadmission.Interface {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kclient.SecurityContextConstraints().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kclient.SecurityContextConstraints().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&kapi.SecurityContextConstraints{},
		store,
		0,
	)
	reflector.Run()

	return &constraint{
		Handler: kadmission.NewHandler(kadmission.Create),
		client:  kclient,
		store:   store,
	}
}