Beispiel #1
0
func waitForSyncedConfig(oc *exutil.CLI, name string, timeout time.Duration) error {
	dc, rcs, pods, err := deploymentInfo(oc, name)
	if err != nil {
		return err
	}
	if err := checkDeploymentInvariants(dc, rcs, pods); err != nil {
		return err
	}
	generation := dc.Generation
	return wait.PollImmediate(200*time.Millisecond, timeout, func() (bool, error) {
		return deployutil.HasSynced(dc, generation), nil
	})
}
Beispiel #2
0
func (c *DeploymentTriggerController) addDeploymentConfig(obj interface{}) {
	dc := obj.(*deployapi.DeploymentConfig)

	// No need to enqueue deployment configs that have no triggers or are paused.
	if len(dc.Spec.Triggers) == 0 || dc.Spec.Paused {
		return
	}
	// We don't want to compete with the main deployment config controller. Let's process this
	// config once it's synced.
	if !deployutil.HasSynced(dc, dc.Generation) {
		return
	}

	c.enqueueDeploymentConfig(dc)
}
Beispiel #3
0
func (c *DeploymentTriggerController) updateDeploymentConfig(old, cur interface{}) {
	newDc := cur.(*deployapi.DeploymentConfig)
	oldDc := old.(*deployapi.DeploymentConfig)

	// A periodic relist will send update events for all known deployment configs.
	if newDc.ResourceVersion == oldDc.ResourceVersion {
		return
	}
	// No need to enqueue deployment configs that have no triggers or are paused.
	if len(newDc.Spec.Triggers) == 0 || newDc.Spec.Paused {
		return
	}
	// We don't want to compete with the main deployment config controller. Let's process this
	// config once it's synced. Note that this does not eliminate conflicts between the two
	// controllers because the main controller is constantly updating deployment configs as
	// owning replication controllers and pods are updated.
	if !deployutil.HasSynced(newDc, newDc.Generation) {
		return
	}
	// Enqueue the deployment config if it hasn't been deployed yet.
	if newDc.Status.LatestVersion == 0 {
		c.enqueueDeploymentConfig(newDc)
		return
	}
	// Compare deployment config templates before enqueueing. This reduces the amount of times
	// we will try to instantiate a deployment config at the expense of duplicating some of the
	// work that the instantiate endpoint is already doing but I think this is fine.
	shouldInstantiate := true
	latestRc, err := c.rcLister.ReplicationControllers(newDc.Namespace).Get(deployutil.LatestDeploymentNameForConfig(newDc))
	if err != nil {
		// If we get an error here it may be due to the rc cache lagging behind. In such a case
		// just defer to the api server (instantiate REST) where we will retry this.
		glog.V(2).Infof("Cannot get latest rc for dc %s:%d (%v) - will defer to instantiate", deployutil.LabelForDeploymentConfig(newDc), newDc.Status.LatestVersion, err)
	} else {
		initial, err := deployutil.DecodeDeploymentConfig(latestRc, c.codec)
		if err != nil {
			glog.V(2).Infof("Cannot decode dc from replication controller %s: %v", deployutil.LabelForDeployment(latestRc), err)
			return
		}
		shouldInstantiate = !reflect.DeepEqual(newDc.Spec.Template, initial.Spec.Template)
	}
	if !shouldInstantiate {
		return
	}

	c.enqueueDeploymentConfig(newDc)
}
func TestDeployScale(t *testing.T) {
	const namespace = "test-deploy-scale"

	testutil.RequireEtcd(t)
	_, clusterAdminKubeConfig, err := testserver.StartTestMaster()
	checkErr(t, err)
	clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
	checkErr(t, err)
	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	checkErr(t, err)
	_, err = testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, namespace, "my-test-user")
	checkErr(t, err)
	osClient, _, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "my-test-user")
	checkErr(t, err)

	config := deploytest.OkDeploymentConfig(0)
	config.Spec.Triggers = []deployapi.DeploymentTriggerPolicy{}
	config.Spec.Replicas = 1

	dc, err := osClient.DeploymentConfigs(namespace).Create(config)
	if err != nil {
		t.Fatalf("Couldn't create DeploymentConfig: %v %#v", err, config)
	}

	condition := func() (bool, error) {
		config, err := osClient.DeploymentConfigs(namespace).Get(dc.Name)
		if err != nil {
			return false, nil
		}
		return deployutil.HasSynced(config), nil
	}
	if err := wait.PollImmediate(500*time.Millisecond, 10*time.Second, condition); err != nil {
		t.Fatalf("Deployment config never synced: %v", err)
	}

	scale, err := osClient.DeploymentConfigs(namespace).GetScale(config.Name)
	if err != nil {
		t.Fatalf("Couldn't get DeploymentConfig scale: %v", err)
	}
	if scale.Spec.Replicas != 1 {
		t.Fatalf("Expected scale.spec.replicas=1, got %#v", scale)
	}

	scaleUpdate := deployapi.ScaleFromConfig(dc)
	scaleUpdate.Spec.Replicas = 3

	updatedScale, err := osClient.DeploymentConfigs(namespace).UpdateScale(scaleUpdate)
	if err != nil {
		// If this complains about "Scale" not being registered in "v1", check the kind overrides in the API registration in SubresourceGroupVersionKind
		t.Fatalf("Couldn't update DeploymentConfig scale to %#v: %v", scaleUpdate, err)
	}
	if updatedScale.Spec.Replicas != 3 {
		t.Fatalf("Expected scale.spec.replicas=3, got %#v", scale)
	}

	persistedScale, err := osClient.DeploymentConfigs(namespace).GetScale(config.Name)
	if err != nil {
		t.Fatalf("Couldn't get DeploymentConfig scale: %v", err)
	}
	if persistedScale.Spec.Replicas != 3 {
		t.Fatalf("Expected scale.spec.replicas=3, got %#v", scale)
	}
}
Beispiel #5
0
			g.By("verifying that both latestVersion and generation are updated")
			version, err = oc.Run("get").Args(resource, "--output=jsonpath=\"{.status.latestVersion}\"").Output()
			version = strings.Trim(version, "\"")
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By(fmt.Sprintf("checking the latest version for %s: %s", resource, version))
			o.Expect(version).To(o.ContainSubstring("2"))
			generation, err = oc.Run("get").Args(resource, "--output=jsonpath=\"{.metadata.generation}\"").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By(fmt.Sprintf("checking the generation for %s: %s", resource, generation))
			o.Expect(generation).To(o.ContainSubstring("3"))

			g.By("verifying that observedGeneration equals generation")
			err = wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
				dc, _, _, err := deploymentInfo(oc, name)
				o.Expect(err).NotTo(o.HaveOccurred())
				return deployutil.HasSynced(dc), nil
			})
		})
	})

	g.Describe("paused", func() {
		g.AfterEach(func() {
			failureTrap(oc, "paused", g.CurrentGinkgoTestDescription().Failed)
		})

		g.It("should disable actions on deployments [Conformance]", func() {
			resource, name, err := createFixture(oc, pausedDeploymentFixture)
			o.Expect(err).NotTo(o.HaveOccurred())

			_, rcs, _, err := deploymentInfo(oc, name)
			o.Expect(err).NotTo(o.HaveOccurred())