// NewNamespaceManager creates a new NamespaceManager func NewNamespaceManager(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceManager { _, controller := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return kubeClient.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.Namespace{}, resyncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { namespace := obj.(*api.Namespace) syncNamespace(kubeClient, *namespace) }, UpdateFunc: func(oldObj, newObj interface{}) { namespace := newObj.(*api.Namespace) syncNamespace(kubeClient, *namespace) }, }, ) return &NamespaceManager{ controller: controller, } }
func NewFirstContainerReady(kclient kclient.Interface, timeout time.Duration, interval time.Duration) *FirstContainerReady { return &FirstContainerReady{ timeout: timeout, interval: interval, podsForDeployment: func(deployment *kapi.ReplicationController) (*kapi.PodList, error) { selector := labels.Set(deployment.Spec.Selector).AsSelector() return kclient.Pods(deployment.Namespace).List(selector, fields.Everything()) }, getPodStore: func(namespace, name string) (cache.Store, chan struct{}) { sel, _ := fields.ParseSelector("metadata.name=" + name) store := cache.NewStore(cache.MetaNamespaceKeyFunc) lw := &deployutil.ListWatcherImpl{ ListFunc: func() (runtime.Object, error) { return kclient.Pods(namespace).List(labels.Everything(), sel) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kclient.Pods(namespace).Watch(labels.Everything(), sel, resourceVersion) }, } stop := make(chan struct{}) cache.NewReflector(lw, &kapi.Pod{}, store, 10*time.Second).RunUntil(stop) return store, stop }, } }
// Update performs a rolling update of a collection of pods. // 'name' points to a replication controller. // 'client' is used for updating pods. // 'updatePeriod' is the time between pod updates. func Update(name string, client client.Interface, updatePeriod time.Duration) error { controller, err := client.GetReplicationController(name) if err != nil { return err } s := labels.Set(controller.DesiredState.ReplicaSelector).AsSelector() podList, err := client.ListPods(s) if err != nil { return err } expected := len(podList.Items) if expected == 0 { return nil } for _, pod := range podList.Items { // We delete the pod here, the controller will recreate it. This will result in pulling // a new Docker image. This isn't a full "update" but it's what we support for now. err = client.DeletePod(pod.ID) if err != nil { return err } time.Sleep(updatePeriod) } return wait.Poll(time.Second*5, time.Second*300, func() (bool, error) { podList, err := client.ListPods(s) if err != nil { return false, err } return len(podList.Items) == expected, nil }) }
func RunProjectCache(c client.Interface, defaultNodeSelector string) { if pcache != nil { return } store := cache.NewStore(cache.MetaNamespaceKeyFunc) reflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &kapi.Namespace{}, store, 0, ) reflector.Run() pcache = &ProjectCache{ Client: c, Store: store, DefaultNodeSelector: defaultNodeSelector, } }
func GetApiVersions(w io.Writer, kubeClient client.Interface) { apiVersions, err := kubeClient.ServerAPIVersions() if err != nil { fmt.Printf("Couldn't get available api versions from server: %v\n", err) os.Exit(1) } fmt.Fprintf(w, "Available Server Api Versions: %#v\n", *apiVersions) }
// DeleteController deletes a replication controller named 'name', requires that the controller // already be stopped func DeleteController(name string, client client.Interface) error { controller, err := client.GetReplicationController(name) if err != nil { return err } if controller.DesiredState.Replicas != 0 { return fmt.Errorf("controller has non-zero replicas (%d), please stop it first", controller.DesiredState.Replicas) } return client.DeleteReplicationController(name) }
func GetVersion(w io.Writer, kubeClient client.Interface) { serverVersion, err := kubeClient.ServerVersion() if err != nil { fmt.Printf("Couldn't read version from server: %v\n", err) os.Exit(1) } GetClientVersion(w) fmt.Fprintf(w, "Server Version: %#v\n", serverVersion) }
func UpdateExistingReplicationController(c client.Interface, oldRc *api.ReplicationController, namespace, newName, deploymentKey, deploymentValue string, out io.Writer) (*api.ReplicationController, error) { SetNextControllerAnnotation(oldRc, newName) if _, found := oldRc.Spec.Selector[deploymentKey]; !found { return AddDeploymentKeyToReplicationController(oldRc, c, deploymentKey, deploymentValue, namespace, out) } else { // If we didn't need to update the controller for the deployment key, we still need to write // the "next" controller. return c.ReplicationControllers(namespace).Update(oldRc) } }
// DeleteController deletes a replication controller named 'name', requires that the controller // already be stopped. func DeleteController(ctx api.Context, name string, client client.Interface) error { // TODO remove ctx in favor of just namespace string controller, err := client.ReplicationControllers(api.Namespace(ctx)).Get(name) if err != nil { return err } if controller.Spec.Replicas != 0 { return fmt.Errorf("controller has non-zero replicas (%d), please stop it first", controller.Spec.Replicas) } return client.ReplicationControllers(api.Namespace(ctx)).Delete(name) }
func deletePersistentVolumeClaims(kubeClient client.Interface, ns string) error { items, err := kubeClient.PersistentVolumeClaims(ns).List(labels.Everything(), fields.Everything()) if err != nil { return err } for i := range items.Items { err := kubeClient.PersistentVolumeClaims(ns).Delete(items.Items[i].Name) if err != nil { return err } } return nil }
func deleteReplicationControllers(kubeClient client.Interface, ns string) error { items, err := kubeClient.ReplicationControllers(ns).List(labels.Everything()) if err != nil { return err } for i := range items.Items { err := kubeClient.ReplicationControllers(ns).Delete(items.Items[i].Name) if err != nil { return err } } return nil }
func deleteResourceQuotas(kubeClient client.Interface, ns string) error { resourceQuotas, err := kubeClient.ResourceQuotas(ns).List(labels.Everything()) if err != nil { return err } for i := range resourceQuotas.Items { err := kubeClient.ResourceQuotas(ns).Delete(resourceQuotas.Items[i].Name) if err != nil { return err } } return nil }
// NewResourceQuota creates a new resource quota admission control handler func NewResourceQuota(client client.Interface) admission.Interface { lw := &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return client.ResourceQuotas(api.NamespaceAll).List(labels.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return client.ResourceQuotas(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion) }, } indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0) reflector.Run() return createResourceQuota(client, indexer) }
// NewLimitRanger returns an object that enforces limits based on the supplied limit function func NewLimitRanger(client client.Interface, limitFunc LimitFunc) admission.Interface { lw := &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return client.LimitRanges(api.NamespaceAll).List(labels.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return client.LimitRanges(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion) }, } indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.LimitRange{}, 0) reflector.Run() return &limitRanger{client: client, limitFunc: limitFunc, indexer: indexer} }
func deleteSecrets(kubeClient client.Interface, ns string) error { items, err := kubeClient.Secrets(ns).List(labels.Everything(), fields.Everything()) if err != nil { return err } for i := range items.Items { err := kubeClient.Secrets(ns).Delete(items.Items[i].Name) if err != nil && !errors.IsNotFound(err) { return err } } return nil }
// New returns a new service controller to keep cloud provider service resources // (like external load balancers) in sync with the registry. func New(cloud cloudprovider.Interface, kubeClient client.Interface, clusterName string) *ServiceController { broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink(kubeClient.Events("")) recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"}) return &ServiceController{ cloud: cloud, kubeClient: kubeClient, clusterName: clusterName, cache: &serviceCache{serviceMap: make(map[string]*cachedService)}, eventBroadcaster: broadcaster, eventRecorder: recorder, } }
// NewReplicationManager creates a new ReplicationManager. func NewReplicationManager(kubeClient client.Interface) *ReplicationManager { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) rm := &ReplicationManager{ kubeClient: kubeClient, podControl: RealPodControl{ kubeClient: kubeClient, recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}), }, } rm.syncHandler = rm.syncReplicationController return rm }
// finalize will finalize the namespace for kubernetes func finalize(kubeClient client.Interface, namespace api.Namespace) (*api.Namespace, error) { namespaceFinalize := api.Namespace{} namespaceFinalize.ObjectMeta = namespace.ObjectMeta namespaceFinalize.Spec = namespace.Spec finalizerSet := util.NewStringSet() for i := range namespace.Spec.Finalizers { if namespace.Spec.Finalizers[i] != api.FinalizerKubernetes { finalizerSet.Insert(string(namespace.Spec.Finalizers[i])) } } namespaceFinalize.Spec.Finalizers = make([]api.FinalizerName, 0, len(finalizerSet)) for _, value := range finalizerSet.List() { namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, api.FinalizerName(value)) } return kubeClient.Namespaces().Finalize(&namespaceFinalize) }
// RunController creates a new replication controller named 'name' which creates 'replicas' pods running 'image'. func RunController(ctx api.Context, image, name string, replicas int, client client.Interface, portSpec string, servicePort int) error { // TODO replace ctx with a namespace string if servicePort > 0 && !util.IsDNSLabel(name) { return fmt.Errorf("service creation requested, but an invalid name for a service was provided (%s). Service names must be valid DNS labels.", name) } ports, err := portsFromString(portSpec) if err != nil { return err } controller := &api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.ReplicationControllerSpec{ Replicas: replicas, Selector: map[string]string{ "name": name, }, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "name": name, }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: strings.ToLower(name), Image: image, Ports: ports, }, }, }, }, }, } controllerOut, err := client.ReplicationControllers(api.Namespace(ctx)).Create(controller) if err != nil { return err } data, err := yaml.Marshal(controllerOut) if err != nil { return err } fmt.Print(string(data)) if servicePort > 0 { svc, err := createService(ctx, name, servicePort, client) if err != nil { return err } data, err = yaml.Marshal(svc) if err != nil { return err } fmt.Printf(string(data)) } return nil }
// ResizeController resizes a controller named 'name' by setting replicas to 'replicas' func ResizeController(name string, replicas int, client client.Interface) error { controller, err := client.GetReplicationController(name) if err != nil { return err } controller.DesiredState.Replicas = replicas controllerOut, err := client.UpdateReplicationController(controller) if err != nil { return err } data, err := yaml.Marshal(controllerOut) if err != nil { return err } fmt.Print(string(data)) return nil }
// NewProvision creates a new namespace provision admission control handler func NewProvision(c client.Interface) admission.Interface { store := cache.NewStore(cache.MetaNamespaceKeyFunc) reflector := cache.NewReflector( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.Namespace{}, store, 0, ) reflector.Run() return createProvision(c, store) }
// ResizeController resizes a controller named 'name' by setting replicas to 'replicas'. func ResizeController(ctx api.Context, name string, replicas int, client client.Interface) error { // TODO ctx is not needed, and should just be a namespace controller, err := client.ReplicationControllers(api.Namespace(ctx)).Get(name) if err != nil { return err } controller.Spec.Replicas = replicas controllerOut, err := client.ReplicationControllers(api.Namespace(ctx)).Update(controller) if err != nil { return err } data, err := yaml.Marshal(controllerOut) if err != nil { return err } fmt.Print(string(data)) return nil }
// RunController creates a new replication controller named 'name' which creates 'replicas' pods running 'image' func RunController(image, name string, replicas int, client client.Interface, portSpec string, servicePort int) error { controller := api.ReplicationController{ JSONBase: api.JSONBase{ ID: name, }, DesiredState: api.ReplicationControllerState{ Replicas: replicas, ReplicaSelector: map[string]string{ "name": name, }, PodTemplate: api.PodTemplate{ DesiredState: api.PodState{ Manifest: api.ContainerManifest{ Version: "v1beta2", Containers: []api.Container{ { Name: strings.ToLower(name), Image: image, Ports: makePorts(portSpec), }, }, }, }, Labels: map[string]string{ "name": name, }, }, }, Labels: map[string]string{ "name": name, }, } controllerOut, err := client.CreateReplicationController(controller) if err != nil { return err } data, err := yaml.Marshal(controllerOut) if err != nil { return err } fmt.Print(string(data)) if servicePort > 0 { svc, err := createService(name, servicePort, client) if err != nil { return err } data, err = yaml.Marshal(svc) if err != nil { return err } fmt.Printf(string(data)) } return nil }
// RunController creates a new replication controller named 'name' which creates 'replicas' pods running 'image'. func RunController(image, name string, replicas int, client client.Interface, portSpec string, servicePort int) error { if servicePort > 0 && !util.IsDNSLabel(name) { return fmt.Errorf("Service creation requested, but an invalid name for a service was provided (%s). Service names must be valid DNS labels.", name) } controller := &api.ReplicationController{ JSONBase: api.JSONBase{ ID: name, }, DesiredState: api.ReplicationControllerState{ Replicas: replicas, ReplicaSelector: map[string]string{ "replicationController": name, }, PodTemplate: api.PodTemplate{ DesiredState: api.PodState{ Manifest: api.ContainerManifest{ Version: "v1beta2", Containers: []api.Container{ { Name: strings.ToLower(name), Image: image, Ports: portsFromString(portSpec), }, }, }, }, Labels: map[string]string{ "replicationController": name, }, }, }, } controllerOut, err := client.CreateReplicationController(controller) if err != nil { return err } data, err := yaml.Marshal(controllerOut) if err != nil { return err } fmt.Print(string(data)) if servicePort > 0 { svc, err := createService(name, servicePort, client) if err != nil { return err } data, err = yaml.Marshal(svc) if err != nil { return err } fmt.Printf(string(data)) } return nil }
// NewPodWatch creates a pod watching function which is backed by a // FIFO/reflector pair. This avoids managing watches directly. // A stop channel to close the watch's reflector is also returned. // It is the caller's responsibility to defer closing the stop channel to prevent leaking resources. func NewPodWatch(client kclient.Interface, namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod { fieldSelector, _ := fields.ParseSelector("metadata.name=" + name) podLW := &deployutil.ListWatcherImpl{ ListFunc: func() (runtime.Object, error) { return client.Pods(namespace).List(labels.Everything(), fieldSelector) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return client.Pods(namespace).Watch(labels.Everything(), fieldSelector, resourceVersion) }, } queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(podLW, &kapi.Pod{}, queue, 1*time.Minute).RunUntil(stopChannel) return func() *kapi.Pod { obj := queue.Pop() return obj.(*kapi.Pod) } }
// Update performs a rolling update of a collection of pods. // 'name' points to a replication controller. // 'client' is used for updating pods. // 'updatePeriod' is the time between pod updates. // 'imageName' is the new image to update for the template. This will work // with the first container in the pod. There is no support yet for // updating more complex replication controllers. If this is blank then no // update of the image is performed. func Update(ctx api.Context, name string, client client.Interface, updatePeriod time.Duration, imageName string) error { // TODO ctx is not needed as input to this function, should just be 'namespace' controller, err := client.ReplicationControllers(api.Namespace(ctx)).Get(name) if err != nil { return err } if len(imageName) != 0 { controller.Spec.Template.Spec.Containers[0].Image = imageName controller, err = client.ReplicationControllers(controller.Namespace).Update(controller) if err != nil { return err } } s := labels.Set(controller.Spec.Selector).AsSelector() podList, err := client.Pods(api.Namespace(ctx)).List(s) if err != nil { return err } expected := len(podList.Items) if expected == 0 { return nil } for _, pod := range podList.Items { // We delete the pod here, the controller will recreate it. This will result in pulling // a new Docker image. This isn't a full "update" but it's what we support for now. err = client.Pods(pod.Namespace).Delete(pod.Name) if err != nil { return err } time.Sleep(updatePeriod) } return wait.Poll(time.Second*5, time.Second*300, func() (bool, error) { podList, err := client.Pods(api.Namespace(ctx)).List(s) if err != nil { return false, err } return len(podList.Items) == expected, nil }) }
// NewDeploymentConfigDescriber returns a new DeploymentConfigDescriber func NewDeploymentConfigDescriber(client client.Interface, kclient kclient.Interface) *DeploymentConfigDescriber { return &DeploymentConfigDescriber{ client: &genericDeploymentDescriberClient{ getDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) { return client.DeploymentConfigs(namespace).Get(name) }, getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) { return kclient.ReplicationControllers(namespace).Get(name) }, listDeploymentsFunc: func(namespace string, selector labels.Selector) (*kapi.ReplicationControllerList, error) { return kclient.ReplicationControllers(namespace).List(selector) }, listPodsFunc: func(namespace string, selector labels.Selector) (*kapi.PodList, error) { return kclient.Pods(namespace).List(selector, fields.Everything()) }, listEventsFunc: func(deploymentConfig *deployapi.DeploymentConfig) (*kapi.EventList, error) { return kclient.Events(deploymentConfig.Namespace).Search(deploymentConfig) }, }, } }
// NewNodeController returns a new node controller to sync instances from cloudprovider. func NewNodeController( cloud cloudprovider.Interface, matchRE string, nodes []string, staticResources *api.NodeResources, kubeClient client.Interface, registerRetryCount int, podEvictionTimeout time.Duration, deletingPodsRateLimiter util.RateLimiter, nodeMonitorGracePeriod time.Duration, nodeStartupGracePeriod time.Duration, nodeMonitorPeriod time.Duration, clusterName string) *NodeController { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"}) if kubeClient != nil { glog.Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) } else { glog.Infof("No api server defined - no events will be sent to API server.") } return &NodeController{ cloud: cloud, matchRE: matchRE, nodes: nodes, staticResources: staticResources, kubeClient: kubeClient, recorder: recorder, registerRetryCount: registerRetryCount, podEvictionTimeout: podEvictionTimeout, deletingPodsRateLimiter: deletingPodsRateLimiter, nodeStatusMap: make(map[string]nodeStatusData), nodeMonitorGracePeriod: nodeMonitorGracePeriod, nodeMonitorPeriod: nodeMonitorPeriod, nodeStartupGracePeriod: nodeStartupGracePeriod, lookupIP: net.LookupIP, now: util.Now, clusterName: clusterName, } }
// NewNodeController returns a new node controller to sync instances from cloudprovider. func NewNodeController( cloud cloudprovider.Interface, kubeClient client.Interface, registerRetryCount int, podEvictionTimeout time.Duration, podEvictor *PodEvictor, nodeMonitorGracePeriod time.Duration, nodeStartupGracePeriod time.Duration, nodeMonitorPeriod time.Duration, clusterCIDR *net.IPNet, allocateNodeCIDRs bool) *NodeController { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"}) eventBroadcaster.StartLogging(glog.Infof) if kubeClient != nil { glog.Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) } else { glog.Infof("No api server defined - no events will be sent to API server.") } if allocateNodeCIDRs && clusterCIDR == nil { glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.") } return &NodeController{ cloud: cloud, kubeClient: kubeClient, recorder: recorder, registerRetryCount: registerRetryCount, podEvictionTimeout: podEvictionTimeout, podEvictor: podEvictor, nodeStatusMap: make(map[string]nodeStatusData), nodeMonitorGracePeriod: nodeMonitorGracePeriod, nodeMonitorPeriod: nodeMonitorPeriod, nodeStartupGracePeriod: nodeStartupGracePeriod, lookupIP: net.LookupIP, now: util.Now, clusterCIDR: clusterCIDR, allocateNodeCIDRs: allocateNodeCIDRs, } }
// finalizeInternal will update the namespace finalizer list to either have or not have origin finalizer func finalizeInternal(kubeClient kclient.Interface, namespace *kapi.Namespace, withOrigin bool) (*kapi.Namespace, error) { namespaceFinalize := kapi.Namespace{} namespaceFinalize.ObjectMeta = namespace.ObjectMeta namespaceFinalize.Spec = namespace.Spec finalizerSet := util.NewStringSet() for i := range namespace.Spec.Finalizers { finalizerSet.Insert(string(namespace.Spec.Finalizers[i])) } if withOrigin { finalizerSet.Insert(string(api.FinalizerOrigin)) } else { finalizerSet.Delete(string(api.FinalizerOrigin)) } namespaceFinalize.Spec.Finalizers = make([]kapi.FinalizerName, 0, len(finalizerSet)) for _, value := range finalizerSet.List() { namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, kapi.FinalizerName(value)) } return kubeClient.Namespaces().Finalize(&namespaceFinalize) }