// pause marks the deployment configuration as paused to avoid triggering new // deployments. func (reaper *DeploymentConfigReaper) pause(namespace, name string) (*deployapi.DeploymentConfig, error) { return reaper.updateDeploymentWithRetries(namespace, name, func(d *deployapi.DeploymentConfig) { d.Spec.RevisionHistoryLimit = kutil.Int32Ptr(0) d.Spec.Replicas = 0 d.Spec.Paused = true }) }
func NewHollowProxyOrDie( nodeName string, client clientset.Interface, endpointsConfig *proxyconfig.EndpointsConfig, serviceConfig *proxyconfig.ServiceConfig, iptInterface utiliptables.Interface, broadcaster record.EventBroadcaster, recorder record.EventRecorder, ) *HollowProxy { // Create and start Hollow Proxy config := options.NewProxyConfig() config.OOMScoreAdj = util.Int32Ptr(0) config.ResourceContainer = "" config.NodeRef = &api.ObjectReference{ Kind: "Node", Name: nodeName, UID: types.UID(nodeName), Namespace: "", } proxyconfig.NewSourceAPI( client.Core().RESTClient(), 30*time.Second, serviceConfig.Channel("api"), endpointsConfig.Channel("api"), ) hollowProxy, err := proxyapp.NewProxyServer(client, config, iptInterface, &FakeProxier{}, broadcaster, recorder, nil, "fake") if err != nil { glog.Fatalf("Error while creating ProxyServer: %v\n", err) } return &HollowProxy{ ProxyServer: hollowProxy, } }
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { deployments := reaper.dClient.Deployments(namespace) replicaSets := reaper.rsClient.ReplicaSets(namespace) rsReaper := &ReplicaSetReaper{reaper.rsClient, reaper.pollInterval, reaper.timeout} deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) { // set deployment's history and scale to 0 // TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527 d.Spec.RevisionHistoryLimit = util.Int32Ptr(0) d.Spec.Replicas = 0 d.Spec.Paused = true }) if err != nil { return err } // Use observedGeneration to determine if the deployment controller noticed the pause. if err := deploymentutil.WaitForObservedDeploymentInternal(func() (*extensions.Deployment, error) { return deployments.Get(name, metav1.GetOptions{}) }, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil { return err } // Do not cascade deletion for overlapping deployments. if len(deployment.Annotations[deploymentutil.OverlapAnnotation]) > 0 { return deployments.Delete(name, nil) } // Stop all replica sets. selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) if err != nil { return err } options := api.ListOptions{LabelSelector: selector} rsList, err := replicaSets.List(options) if err != nil { return err } errList := []error{} for _, rc := range rsList.Items { if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil { scaleGetErr, ok := err.(ScaleError) if errors.IsNotFound(err) || (ok && errors.IsNotFound(scaleGetErr.ActualError)) { continue } errList = append(errList, err) } } if len(errList) > 0 { return utilerrors.NewAggregate(errList) } // Delete deployment at the end. // Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry. var falseVar = false nonOrphanOption := api.DeleteOptions{OrphanDependents: &falseVar} return deployments.Delete(name, &nonOrphanOption) }
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { deployments := reaper.Extensions().Deployments(namespace) replicaSets := reaper.Extensions().ReplicaSets(namespace) rsReaper, _ := ReaperFor(extensions.Kind("ReplicaSet"), reaper) deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) { // set deployment's history and scale to 0 // TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527 d.Spec.RevisionHistoryLimit = util.Int32Ptr(0) d.Spec.Replicas = 0 d.Spec.Paused = true }) if err != nil { return err } // Use observedGeneration to determine if the deployment controller noticed the pause. if err := deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return deployments.Get(name) }, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil { return err } // Stop all replica sets. selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) if err != nil { return err } options := api.ListOptions{LabelSelector: selector} rsList, err := replicaSets.List(options) if err != nil { return err } errList := []error{} for _, rc := range rsList.Items { if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil { scaleGetErr, ok := err.(*ScaleError) if errors.IsNotFound(err) || (ok && errors.IsNotFound(scaleGetErr.ActualError)) { continue } errList = append(errList, err) } } if len(errList) > 0 { return utilerrors.NewAggregate(errList) } // Delete deployment at the end. // Note: We delete deployment at the end so that if removing RSs fails, we at least have the deployment to retry. return deployments.Delete(name, nil) }
func TestDescribeDeployment(t *testing.T) { fake := fake.NewSimpleClientset() versionedFake := versionedfake.NewSimpleClientset(&v1beta1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "foo", }, Spec: v1beta1.DeploymentSpec{ Replicas: util.Int32Ptr(1), Selector: &metav1.LabelSelector{}, Template: v1.PodTemplateSpec{}, }, }) d := DeploymentDescriber{fake, versionedFake} out, err := d.Describe("foo", "bar", DescriberSettings{ShowEvents: true}) if err != nil { t.Errorf("unexpected error: %v", err) } if !strings.Contains(out, "bar") || !strings.Contains(out, "foo") { t.Errorf("unexpected out: %s", out) } }
func TestDescribeEvents(t *testing.T) { events := &api.EventList{ Items: []api.Event{ { ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", }, Source: api.EventSource{Component: "kubelet"}, Message: "Item 1", FirstTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), LastTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), Count: 1, Type: api.EventTypeNormal, }, }, } m := map[string]Describer{ "DaemonSetDescriber": &DaemonSetDescriber{ fake.NewSimpleClientset(&extensions.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "foo", }, }, events), }, "DeploymentDescriber": &DeploymentDescriber{ fake.NewSimpleClientset(events), versionedfake.NewSimpleClientset(&v1beta1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "foo", }, Spec: v1beta1.DeploymentSpec{ Replicas: util.Int32Ptr(1), Selector: &metav1.LabelSelector{}, }, }), }, "EndpointsDescriber": &EndpointsDescriber{ fake.NewSimpleClientset(&api.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "foo", }, }, events), }, // TODO(jchaloup): add tests for: // - HorizontalPodAutoscalerDescriber // - IngressDescriber // - JobDescriber "NodeDescriber": &NodeDescriber{ fake.NewSimpleClientset(&api.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", SelfLink: "url/url/url", }, }, events), }, "PersistentVolumeDescriber": &PersistentVolumeDescriber{ fake.NewSimpleClientset(&api.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", SelfLink: "url/url/url", }, }, events), }, "PodDescriber": &PodDescriber{ fake.NewSimpleClientset(&api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "foo", SelfLink: "url/url/url", }, }, events), }, "ReplicaSetDescriber": &ReplicaSetDescriber{ fake.NewSimpleClientset(&extensions.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "foo", }, }, events), }, "ReplicationControllerDescriber": &ReplicationControllerDescriber{ fake.NewSimpleClientset(&api.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "foo", }, }, events), }, "Service": &ServiceDescriber{ fake.NewSimpleClientset(&api.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "foo", }, }, events), }, "StorageClass": &StorageClassDescriber{ fake.NewSimpleClientset(&storage.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", }, }, events), }, } for name, d := range m { out, err := d.Describe("foo", "bar", DescriberSettings{ShowEvents: true}) if err != nil { t.Errorf("unexpected error for %q: %v", name, err) } if !strings.Contains(out, "bar") { t.Errorf("unexpected out for %q: %s", name, out) } if !strings.Contains(out, "Events:") { t.Errorf("events not found for %q when ShowEvents=true: %s", name, out) } out, err = d.Describe("foo", "bar", DescriberSettings{ShowEvents: false}) if err != nil { t.Errorf("unexpected error for %q: %s", name, err) } if !strings.Contains(out, "bar") { t.Errorf("unexpected out for %q: %s", name, out) } if strings.Contains(out, "Events:") { t.Errorf("events found for %q when ShowEvents=false: %s", name, out) } } }
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy func testDeploymentCleanUpPolicy(f *framework.Framework) { ns := f.Namespace.Name unversionedClient := f.Client c := adapter.FromUnversionedClient(unversionedClient) // Create nginx pods. deploymentPodLabels := map[string]string{"name": "cleanup-pod"} rsPodLabels := map[string]string{ "name": "cleanup-pod", "pod": nginxImageName, } rsName := "test-cleanup-controller" replicas := int32(1) revisionHistoryLimit := util.Int32Ptr(0) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. err = framework.VerifyPods(unversionedClient, ns, "cleanup-pod", false, 1) if err != nil { framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-cleanup-deployment" framework.Logf("Creating deployment %s", deploymentName) pods, err := c.Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) if err != nil { Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err) } options := api.ListOptions{ ResourceVersion: pods.ListMeta.ResourceVersion, } stopCh := make(chan struct{}) w, err := c.Pods(ns).Watch(options) go func() { // There should be only one pod being created, which is the pod with the redis image. // The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector. numPodCreation := 1 for { select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { continue } numPodCreation-- if numPodCreation < 0 { framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event) } pod, ok := event.Object.(*api.Pod) if !ok { Fail("Expect event Object to be a pod") } if pod.Spec.Containers[0].Name != redisImageName { framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", redisImageName, pod) } case <-stopCh: return } } }() _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit)) Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) err = framework.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit)) Expect(err).NotTo(HaveOccurred()) close(stopCh) }