// Stop scales a replication controller via its deployment configuration down to // zero replicas, waits for all of them to get deleted and then deletes both the // replication controller and its deployment configuration. func (reaper *DeploymentConfigReaper) Stop(namespace, name string, gracePeriod *kapi.DeleteOptions) (string, error) { // If the config is already deleted, it may still have associated // deployments which didn't get cleaned up during prior calls to Stop. If // the config can't be found, still make an attempt to clean up the // deployments. // // It's important to delete the config first to avoid an undesirable side // effect which can cause the deployment to be re-triggered upon the // config's deletion. See https://github.com/projectatomic/atomic-enterprise/issues/2721 // for more details. err := reaper.oc.DeploymentConfigs(namespace).Delete(name) configNotFound := kerrors.IsNotFound(err) if err != nil && !configNotFound { return "", err } // Clean up deployments related to the config. rcList, err := reaper.kc.ReplicationControllers(namespace).List(util.ConfigSelector(name)) if err != nil { return "", err } rcReaper, err := kubectl.ReaperFor("ReplicationController", reaper.kc) if err != nil { return "", err } // If there is neither a config nor any deployments, we can return NotFound. deployments := rcList.Items if configNotFound && len(deployments) == 0 { return "", kerrors.NewNotFound("DeploymentConfig", name) } for _, rc := range deployments { if _, err = rcReaper.Stop(rc.Namespace, rc.Name, gracePeriod); err != nil { // Better not error out here... glog.Infof("Cannot delete ReplicationController %s/%s: %v", rc.Namespace, rc.Name, err) } } return fmt.Sprintf("%s stopped", name), nil }
// NewDeployer makes a new Deployer from a kube client. func NewDeployer(client kclient.Interface) *Deployer { scaler, _ := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(client)) return &Deployer{ getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) { return client.ReplicationControllers(namespace).Get(name) }, getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) { return client.ReplicationControllers(namespace).List(deployutil.ConfigSelector(configName)) }, scaler: scaler, strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) { switch config.Template.Strategy.Type { case deployapi.DeploymentStrategyTypeRecreate: return recreate.NewRecreateDeploymentStrategy(client, latest.Codec), nil case deployapi.DeploymentStrategyTypeRolling: recreate := recreate.NewRecreateDeploymentStrategy(client, latest.Codec) return rolling.NewRollingDeploymentStrategy(config.Namespace, client, latest.Codec, recreate), nil default: return nil, fmt.Errorf("unsupported strategy type: %s", config.Template.Strategy.Type) } }, } }
// Create creates a DeploymentConfigController. func (factory *DeploymentConfigControllerFactory) Create() controller.RunnableController { deploymentConfigLW := &deployutil.ListWatcherImpl{ ListFunc: func() (runtime.Object, error) { return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion) }, } queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run() eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events("")) configController := &DeploymentConfigController{ deploymentClient: &deploymentClientImpl{ createDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) { return factory.KubeClient.ReplicationControllers(namespace).Create(deployment) }, listDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) { sel := deployutil.ConfigSelector(configName) list, err := factory.KubeClient.ReplicationControllers(namespace).List(sel) if err != nil { return nil, err } return list, nil }, updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) { return factory.KubeClient.ReplicationControllers(namespace).Update(deployment) }, }, makeDeployment: func(config *deployapi.DeploymentConfig) (*kapi.ReplicationController, error) { return deployutil.MakeDeployment(config, factory.Codec) }, recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}), } return &controller.RetryController{ Queue: queue, RetryManager: controller.NewQueueRetryManager( queue, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { kutil.HandleError(err) // no retries for a fatal error if _, isFatal := err.(fatalError); isFatal { return false } // infinite retries for a transient error if _, isTransient := err.(transientError); isTransient { return true } // no retries for anything else if retries.Count > 0 { return false } return true }, kutil.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { config := obj.(*deployapi.DeploymentConfig) return configController.Handle(config) }, } }
// Create creates a DeployerPodController. func (factory *DeployerPodControllerFactory) Create() controller.RunnableController { deploymentLW := &deployutil.ListWatcherImpl{ ListFunc: func() (runtime.Object, error) { return factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).List(labels.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion) }, } deploymentStore := cache.NewStore(cache.MetaNamespaceKeyFunc) cache.NewReflector(deploymentLW, &kapi.ReplicationController{}, deploymentStore, 2*time.Minute).Run() // TODO: These should be filtered somehow to include only the primary // deployer pod. For now, the controller is filtering. // TODO: Even with the label selector, this is inefficient on the backend // and we should work to consolidate namespace-spanning pod watches. For // example, the build infra is also watching pods across namespaces. podLW := &deployutil.ListWatcherImpl{ ListFunc: func() (runtime.Object, error) { return factory.KubeClient.Pods(kapi.NamespaceAll).List(deployutil.AnyDeployerPodSelector(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return factory.KubeClient.Pods(kapi.NamespaceAll).Watch(deployutil.AnyDeployerPodSelector(), fields.Everything(), resourceVersion) }, } podQueue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(podLW, &kapi.Pod{}, podQueue, 2*time.Minute).Run() podController := &DeployerPodController{ deploymentClient: &deploymentClientImpl{ getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) { // Try to use the cache first. Trust hits and return them. example := &kapi.ReplicationController{ObjectMeta: kapi.ObjectMeta{Namespace: namespace, Name: name}} cached, exists, err := deploymentStore.Get(example) if err == nil && exists { return cached.(*kapi.ReplicationController), nil } // Double-check with the master for cache misses/errors, since those // are rare and API calls are expensive but more reliable. return factory.KubeClient.ReplicationControllers(namespace).Get(name) }, updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) { return factory.KubeClient.ReplicationControllers(namespace).Update(deployment) }, listDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) { return factory.KubeClient.ReplicationControllers(namespace).List(deployutil.ConfigSelector(configName)) }, }, deployerPodsFor: func(namespace, name string) (*kapi.PodList, error) { return factory.KubeClient.Pods(namespace).List(deployutil.DeployerPodSelector(name), fields.Everything()) }, deletePod: func(namespace, name string) error { return factory.KubeClient.Pods(namespace).Delete(name, kapi.NewDeleteOptions(0)) }, } return &controller.RetryController{ Queue: podQueue, RetryManager: controller.NewQueueRetryManager( podQueue, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { kutil.HandleError(err) // infinite retries for a transient error if _, isTransient := err.(transientError); isTransient { return true } // no retries for anything else if retries.Count > 0 { return false } return true }, kutil.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { pod := obj.(*kapi.Pod) return podController.Handle(pod) }, } }
func (o DeployOptions) RunDeploy() error { config, err := o.osClient.DeploymentConfigs(o.namespace).Get(o.deploymentConfigName) if err != nil { return err } commandClient := &deployCommandClientImpl{ GetDeploymentFn: func(namespace, name string) (*kapi.ReplicationController, error) { return o.kubeClient.ReplicationControllers(namespace).Get(name) }, ListDeploymentsForConfigFn: func(namespace, configName string) (*kapi.ReplicationControllerList, error) { list, err := o.kubeClient.ReplicationControllers(namespace).List(deployutil.ConfigSelector(configName)) if err != nil { return nil, err } return list, nil }, UpdateDeploymentConfigFn: func(config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) { return o.osClient.DeploymentConfigs(config.Namespace).Update(config) }, UpdateDeploymentFn: func(deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) { return o.kubeClient.ReplicationControllers(deployment.Namespace).Update(deployment) }, ListDeployerPodsForFn: func(namespace, deploymentName string) (*kapi.PodList, error) { selector, err := labels.Parse(fmt.Sprintf("%s=%s", deployapi.DeployerPodForDeploymentLabel, deploymentName)) if err != nil { return nil, err } return o.kubeClient.Pods(namespace).List(selector, fields.Everything()) }, DeletePodFn: func(pod *kapi.Pod) error { return o.kubeClient.Pods(pod.Namespace).Delete(pod.Name, nil) }, } switch { case o.deployLatest: c := &deployLatestCommand{client: commandClient} err = c.deploy(config, o.out) case o.retryDeploy: c := &retryDeploymentCommand{client: commandClient} err = c.retry(config, o.out) case o.cancelDeploy: c := &cancelDeploymentCommand{client: commandClient} err = c.cancel(config, o.out) case o.enableTriggers: t := &triggerEnabler{ updateConfig: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) { return o.osClient.DeploymentConfigs(namespace).Update(config) }, } t.enableTriggers(config, o.out) default: describer := describe.NewLatestDeploymentsDescriber(o.osClient, o.kubeClient, -1) desc, err := describer.Describe(config.Namespace, config.Name) if err != nil { return err } fmt.Fprintln(o.out, desc) } return err }