func crossScaled(c unversioned.Interface, oldRC, newRC *api.ReplicationController, settleDuration time.Duration) wait.ConditionFunc {
	oldRCReady := unversioned.ControllerHasDesiredReplicas(c, oldRC)
	newRCReady := unversioned.ControllerHasDesiredReplicas(c, newRC)
	oldRCPodsReady := desiredPodsAreReady(c, oldRC, settleDuration)
	newRCPodsReady := desiredPodsAreReady(c, newRC, settleDuration)
	return func() (done bool, err error) {

		if ok, err := oldRCReady(); err != nil || !ok {
			return ok, err
		}

		if ok, err := newRCReady(); err != nil || !ok {
			return ok, err
		}
		if ok, err := oldRCPodsReady(); err != nil || !ok {
			return ok, err
		}

		if ok, err := newRCPodsReady(); err != nil || !ok {
			return ok, err
		}

		return true, nil
	}
}
예제 #2
0
// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value
// (if wait is not nil).
func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
	if preconditions == nil {
		preconditions = &ScalePrecondition{-1, ""}
	}
	if retry == nil {
		// Make it try only once, immediately
		retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
	}
	cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
	if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
		return err
	}
	if waitForReplicas != nil {
		rc, err := scaler.c.ReplicationControllers(namespace).Get(name)
		if err != nil {
			return err
		}
		err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.ControllerHasDesiredReplicas(scaler.c, rc))
		if err == wait.ErrWaitTimeout {
			return fmt.Errorf("timed out waiting for %q to be synced", name)
		}
		return err
	}
	return nil
}
예제 #3
0
파일: scale.go 프로젝트: porcelli/origin
// Scale updates a replication controller created by the DeploymentConfig with the provided namespace/name,
// to a new size, with optional precondition check (if preconditions is not nil),optional retries (if retry
//  is not nil), and then optionally waits for it's replica count to reach the new value (if wait is not nil).
func (scaler *DeploymentConfigScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, waitForReplicas *kubectl.RetryParams) error {
	if preconditions == nil {
		preconditions = &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}
	}
	if retry == nil {
		// Make it try only once, immediately
		retry = &kubectl.RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
	}
	cond := kubectl.ScaleCondition(scaler, preconditions, namespace, name, newSize)
	if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
		if scaleErr := err.(kubectl.ControllerScaleError); kerrors.IsNotFound(scaleErr.ActualError) {
			glog.Infof("No deployment found for dc/%s. Scaling the deployment configuration template...", name)
			dc, err := scaler.dcClient.DeploymentConfigs(namespace).Get(name)
			if err != nil {
				return err
			}
			dc.Template.ControllerTemplate.Replicas = int(newSize)

			if _, err := scaler.dcClient.DeploymentConfigs(namespace).Update(dc); err != nil {
				return err
			}
			return nil
		}
		return err
	}
	if waitForReplicas != nil {
		rc, err := scaler.rcClient.ReplicationControllers(namespace).Get(name)
		if err != nil {
			return err
		}
		return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, kclient.ControllerHasDesiredReplicas(scaler.clientInterface, rc))
	}
	return nil
}
func scaled(c unversioned.Interface, rc *api.ReplicationController, settleDuration time.Duration) wait.ConditionFunc {
	rcReady := unversioned.ControllerHasDesiredReplicas(c, rc)
	rcPodsReady := desiredPodsAreReady(c, rc, settleDuration)
	return func() (done bool, err error) {

		if ok, err := rcReady(); err != nil || !ok {
			return ok, err
		}

		if ok, err := rcPodsReady(); err != nil || !ok {
			return ok, err
		}

		return true, nil
	}
}
예제 #5
0
func runReplicationControllerTest(c *client.Client) {
	t := time.Now()
	clientAPIVersion := c.APIVersion().String()
	data, err := ioutil.ReadFile("cmd/integration/" + clientAPIVersion + "-controller.json")
	if err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}
	glog.Infof("Done reading config file, took %v", time.Since(t))
	t = time.Now()
	var controller api.ReplicationController
	if err := runtime.DecodeInto(testapi.Default.Codec(), data, &controller); err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}

	glog.Infof("Creating replication controllers")
	updated, err := c.ReplicationControllers("test").Create(&controller)
	if err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}
	glog.Infof("Done creating replication controllers, took %v", time.Since(t))
	t = time.Now()

	// In practice the controller doesn't need 60s to create a handful of pods, but network latencies on CI
	// systems have been observed to vary unpredictably, so give the controller enough time to create pods.
	// Our e2e scalability tests will catch controllers that are *actually* slow.
	if err := wait.Poll(time.Second, longTestTimeout, client.ControllerHasDesiredReplicas(c, updated)); err != nil {
		glog.Fatalf("FAILED: pods never created %v", err)
	}
	glog.Infof("Done creating replicas, took %v", time.Since(t))
	t = time.Now()

	// Poll till we can retrieve the status of all pods matching the given label selector from their nodes.
	// This involves 3 operations:
	//	- The scheduler must assign all pods to a node
	//	- The assignment must reflect in a `List` operation against the apiserver, for labels matching the selector
	//  - We need to be able to query the kubelet on that node for information about the pod
	if err := wait.Poll(
		time.Second, longTestTimeout, podsOnNodes(c, "test", labels.Set(updated.Spec.Selector).AsSelector())); err != nil {
		glog.Fatalf("FAILED: pods never started running %v", err)
	}

	glog.Infof("Pods verified on nodes, took %v", time.Since(t))
}
예제 #6
0
// cleanupWithClients performs cleanup tasks after the rolling update. Update
// process related annotations are removed from oldRc and newRc. The
// CleanupPolicy on config is executed.
func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error {
	// Clean up annotations
	var err error
	newRc, err = r.c.ReplicationControllers(r.ns).Get(newRc.Name)
	if err != nil {
		return err
	}
	applyUpdate := func(rc *api.ReplicationController) {
		delete(rc.Annotations, sourceIdAnnotation)
		delete(rc.Annotations, desiredReplicasAnnotation)
	}
	if newRc, err = updateRcWithRetries(r.c, r.ns, newRc, applyUpdate); err != nil {
		return err
	}

	if err = wait.Poll(config.Interval, config.Timeout, client.ControllerHasDesiredReplicas(r.c, newRc)); err != nil {
		return err
	}
	newRc, err = r.c.ReplicationControllers(r.ns).Get(newRc.Name)
	if err != nil {
		return err
	}

	switch config.CleanupPolicy {
	case DeleteRollingUpdateCleanupPolicy:
		// delete old rc
		fmt.Fprintf(config.Out, "Update succeeded. Deleting %s\n", oldRc.Name)
		return r.c.ReplicationControllers(r.ns).Delete(oldRc.Name)
	case RenameRollingUpdateCleanupPolicy:
		// delete old rc
		fmt.Fprintf(config.Out, "Update succeeded. Deleting old controller: %s\n", oldRc.Name)
		if err := r.c.ReplicationControllers(r.ns).Delete(oldRc.Name); err != nil {
			return err
		}
		fmt.Fprintf(config.Out, "Renaming %s to %s\n", newRc.Name, oldRc.Name)
		return Rename(r.c, newRc, oldRc.Name)
	case PreserveRollingUpdateCleanupPolicy:
		return nil
	default:
		return nil
	}
}
예제 #7
0
func (c *realScalerClient) ControllerHasDesiredReplicas(rc *api.ReplicationController) wait.ConditionFunc {
	return client.ControllerHasDesiredReplicas(c.client, rc)
}