Ejemplo n.º 1
0
func ReaperFor(kind unversioned.GroupKind, c client.Interface) (Reaper, error) {
	switch kind {
	case api.Kind("ReplicationController"):
		return &ReplicationControllerReaper{c, Interval, Timeout}, nil

	case extensions.Kind("ReplicaSet"):
		return &ReplicaSetReaper{c, Interval, Timeout}, nil

	case extensions.Kind("DaemonSet"):
		return &DaemonSetReaper{c, Interval, Timeout}, nil

	case api.Kind("Pod"):
		return &PodReaper{c}, nil

	case api.Kind("Service"):
		return &ServiceReaper{c}, nil

	case extensions.Kind("Job"), batch.Kind("Job"):
		return &JobReaper{c, Interval, Timeout}, nil

	case extensions.Kind("Deployment"):
		return &DeploymentReaper{c, Interval, Timeout}, nil

	}
	return nil, &NoSuchReaperError{kind}
}
Ejemplo n.º 2
0
func HistoryViewerFor(kind unversioned.GroupKind, c clientset.Interface) (HistoryViewer, error) {
	switch kind {
	case extensions.Kind("Deployment"):
		return &DeploymentHistoryViewer{c}, nil
	}
	return nil, fmt.Errorf("no history viewer has been implemented for %q", kind)
}
Ejemplo n.º 3
0
func RollbackerFor(kind unversioned.GroupKind, c client.Interface) (Rollbacker, error) {
	switch kind {
	case extensions.Kind("Deployment"):
		return &DeploymentRollbacker{c}, nil
	}
	return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind)
}
Ejemplo n.º 4
0
func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
	deployments := reaper.Extensions().Deployments(namespace)
	replicaSets := reaper.Extensions().ReplicaSets(namespace)
	rsReaper, _ := ReaperFor(extensions.Kind("ReplicaSet"), reaper)

	deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) {
		// set deployment's history and scale to 0
		// TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527
		d.Spec.RevisionHistoryLimit = util.Int32Ptr(0)
		d.Spec.Replicas = 0
		d.Spec.Paused = true
	})
	if err != nil {
		return err
	}

	// Use observedGeneration to determine if the deployment controller noticed the pause.
	if err := deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
		return deployments.Get(name)
	}, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil {
		return err
	}

	// Stop all replica sets.
	selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
	if err != nil {
		return err
	}

	options := api.ListOptions{LabelSelector: selector}
	rsList, err := replicaSets.List(options)
	if err != nil {
		return err
	}
	errList := []error{}
	for _, rc := range rsList.Items {
		if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
			scaleGetErr, ok := err.(*ScaleError)
			if !errors.IsNotFound(err) || ok && !errors.IsNotFound(scaleGetErr.ActualError) {
				errList = append(errList, err)
			}
		}
	}
	if len(errList) > 0 {
		return utilerrors.NewAggregate(errList)
	}

	// Delete deployment at the end.
	// Note: We delete deployment at the end so that if removing RSs fails, we atleast have the deployment to retry.
	return deployments.Delete(name, nil)
}
Ejemplo n.º 5
0
func (r *RollbackREST) Create(ctx api.Context, obj runtime.Object) (out runtime.Object, err error) {
	rollback, ok := obj.(*extensions.DeploymentRollback)
	if !ok {
		return nil, fmt.Errorf("expected input object type to be DeploymentRollback, but %T", obj)
	}

	if errs := extvalidation.ValidateDeploymentRollback(rollback); len(errs) != 0 {
		return nil, errors.NewInvalid(extensions.Kind("DeploymentRollback"), rollback.Name, errs)
	}

	// Update the Deployment with information in DeploymentRollback to trigger rollback
	err = r.rollbackDeployment(ctx, rollback.Name, &rollback.RollbackTo, rollback.UpdatedAnnotations)
	return
}
Ejemplo n.º 6
0
func (r *ScaleREST) Update(ctx api.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) {
	deployment, err := r.registry.GetDeployment(ctx, name)
	if err != nil {
		return nil, false, errors.NewNotFound(extensions.Resource("deployments/scale"), name)
	}

	oldScale, err := scaleFromDeployment(deployment)
	if err != nil {
		return nil, false, err
	}

	obj, err := objInfo.UpdatedObject(ctx, oldScale)
	if err != nil {
		return nil, false, err
	}
	if obj == nil {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("nil update passed to Scale"))
	}
	scale, ok := obj.(*extensions.Scale)
	if !ok {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("expected input object type to be Scale, but %T", obj))
	}

	if errs := extvalidation.ValidateScale(scale); len(errs) > 0 {
		return nil, false, errors.NewInvalid(extensions.Kind("Scale"), name, errs)
	}

	deployment.Spec.Replicas = scale.Spec.Replicas
	deployment.ResourceVersion = scale.ResourceVersion
	deployment, err = r.registry.UpdateDeployment(ctx, deployment)
	if err != nil {
		return nil, false, err
	}
	newScale, err := scaleFromDeployment(deployment)
	if err != nil {
		return nil, false, errors.NewBadRequest(fmt.Sprintf("%v", err))
	}
	return newScale, false, nil
}
Ejemplo n.º 7
0
func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error {
	rsc := reaper.Extensions().ReplicaSets(namespace)
	scaler, err := ScalerFor(extensions.Kind("ReplicaSet"), *reaper)
	if err != nil {
		return err
	}
	rs, err := rsc.Get(name)
	if err != nil {
		return err
	}
	if timeout == 0 {
		timeout = Timeout + time.Duration(10*rs.Spec.Replicas)*time.Second
	}

	// The ReplicaSet controller will try and detect all matching ReplicaSets
	// for a pod's labels, and only sync the oldest one. This means if we have
	// a pod with labels [(k1: v1), (k2: v2)] and two ReplicaSets: rs1 with
	// selector [(k1=v1)], and rs2 with selector [(k1=v1),(k2=v2)], the
	// ReplicaSet controller will sync the older of the two ReplicaSets.
	//
	// If there are ReplicaSets with a superset of labels, eg:
	// deleting: (k1=v1), superset: (k2=v2, k1=v1)
	//	- It isn't safe to delete the ReplicaSet because there could be a pod
	//    with labels (k1=v1) that isn't managed by the superset ReplicaSet.
	//    We can't scale it down either, because there could be a pod
	//    (k2=v2, k1=v1) that it deletes causing a fight with the superset
	//    ReplicaSet.
	// If there are ReplicaSets with a subset of labels, eg:
	// deleting: (k2=v2, k1=v1), subset: (k1=v1), superset: (k2=v2, k1=v1, k3=v3)
	//  - Even if it's safe to delete this ReplicaSet without a scale down because
	//    all it's pods are being controlled by the subset ReplicaSet the code
	//    returns an error.

	// In theory, creating overlapping ReplicaSets is user error, so the loop below
	// tries to account for this logic only in the common case, where we end up
	// with multiple ReplicaSets that have an exact match on selectors.

	// TODO(madhusudancs): Re-evaluate again when controllerRef is implemented -
	// https://github.com/kubernetes/kubernetes/issues/2210
	overlappingRSs, exactMatchRSs, err := getOverlappingReplicaSets(rsc, rs)
	if err != nil {
		return fmt.Errorf("error getting ReplicaSets: %v", err)
	}

	if len(overlappingRSs) > 0 {
		var names []string
		for _, overlappingRS := range overlappingRSs {
			names = append(names, overlappingRS.Name)
		}
		return fmt.Errorf(
			"Detected overlapping ReplicaSets for ReplicaSet %v: %v, please manage deletion individually with --cascade=false.",
			rs.Name, strings.Join(names, ","))
	}
	if len(exactMatchRSs) == 0 {
		// No overlapping ReplicaSets.
		retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
		waitForReplicas := NewRetryParams(reaper.pollInterval, timeout)
		if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil {
			return err
		}
	}

	if err := rsc.Delete(name, nil); err != nil {
		return err
	}
	return nil
}