Esempio n. 1
0
// Deploy starts the deployment process for rcName.
func (d *Deployer) Deploy(namespace, rcName string) error {
	// Look up the new deployment.
	to, err := d.getDeployment(namespace, rcName)
	if err != nil {
		return fmt.Errorf("couldn't get deployment %s: %v", rcName, err)
	}

	// Decode the config from the deployment.
	config, err := deployutil.DecodeDeploymentConfig(to, kapi.Codecs.UniversalDecoder())
	if err != nil {
		return fmt.Errorf("couldn't decode deployment config from deployment %s: %v", to.Name, err)
	}

	// Get a strategy for the deployment.
	s, err := d.strategyFor(config)
	if err != nil {
		return err
	}

	// New deployments must have a desired replica count.
	desiredReplicas, hasDesired := deployutil.DeploymentDesiredReplicas(to)
	if !hasDesired {
		return fmt.Errorf("deployment %s has already run to completion", to.Name)
	}

	// Find all deployments for the config.
	unsortedDeployments, err := d.getDeployments(namespace, config.Name)
	if err != nil {
		return fmt.Errorf("couldn't get controllers in namespace %s: %v", namespace, err)
	}
	deployments := unsortedDeployments.Items

	// Sort all the deployments by version.
	sort.Sort(deployutil.ByLatestVersionDesc(deployments))

	// Find any last completed deployment.
	var from *kapi.ReplicationController
	for _, candidate := range deployments {
		if candidate.Name == to.Name {
			continue
		}
		if deployutil.IsCompleteDeployment(&candidate) {
			from = &candidate
			break
		}
	}

	if deployutil.DeploymentVersionFor(to) < deployutil.DeploymentVersionFor(from) {
		return fmt.Errorf("deployment %s is older than %s", to.Name, from.Name)
	}

	// Scale down any deployments which aren't the new or last deployment.
	for _, candidate := range deployments {
		// Skip the from/to deployments.
		if candidate.Name == to.Name {
			continue
		}
		if from != nil && candidate.Name == from.Name {
			continue
		}
		// Skip the deployment if it's already scaled down.
		if candidate.Spec.Replicas == 0 {
			continue
		}
		// Scale the deployment down to zero.
		retryWaitParams := kubectl.NewRetryParams(1*time.Second, 120*time.Second)
		if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retryWaitParams, retryWaitParams); err != nil {
			fmt.Fprintf(d.errOut, "error: Couldn't scale down prior deployment %s: %v\n", deployutil.LabelForDeployment(&candidate), err)
		} else {
			fmt.Fprintf(d.out, "--> Scaled older deployment %s down\n", candidate.Name)
		}
	}

	if d.until == "start" {
		return strategy.NewConditionReachedErr("Ready to start deployment")
	}

	// Perform the deployment.
	if err := s.Deploy(from, to, int(desiredReplicas)); err != nil {
		return err
	}
	fmt.Fprintf(d.out, "--> Success\n")
	return nil
}
Esempio n. 2
0
func (s *RollingDeploymentStrategy) Deploy(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int) error {
	config, err := deployutil.DecodeDeploymentConfig(to, s.decoder)
	if err != nil {
		return fmt.Errorf("couldn't decode DeploymentConfig from deployment %s: %v", deployutil.LabelForDeployment(to), err)
	}

	params := config.Spec.Strategy.RollingParams
	updateAcceptor := s.getUpdateAcceptor(time.Duration(*params.TimeoutSeconds) * time.Second)

	// If there's no prior deployment, delegate to another strategy since the
	// rolling updater only supports transitioning between two deployments.
	//
	// Hook support is duplicated here for now. When the rolling updater can
	// handle initial deployments, all of this code can go away.
	if from == nil {
		// Execute any pre-hook.
		if params.Pre != nil {
			if err := s.hookExecutor.Execute(params.Pre, to, deployapi.PreHookPodSuffix, "pre"); err != nil {
				return fmt.Errorf("Pre hook failed: %s", err)
			}
		}

		// Execute the delegate strategy.
		err := s.initialStrategy.DeployWithAcceptor(from, to, desiredReplicas, updateAcceptor)
		if err != nil {
			return err
		}

		// Execute any post-hook. Errors are logged and ignored.
		if params.Post != nil {
			if err := s.hookExecutor.Execute(params.Post, to, deployapi.PostHookPodSuffix, "post"); err != nil {
				return fmt.Errorf("post hook failed: %s", err)
			}
		}

		// All done.
		return nil
	}

	// Prepare for a rolling update.
	// Execute any pre-hook.
	if params.Pre != nil {
		if err := s.hookExecutor.Execute(params.Pre, to, deployapi.PreHookPodSuffix, "pre"); err != nil {
			return fmt.Errorf("pre hook failed: %s", err)
		}
	}

	if s.until == "pre" {
		return strat.NewConditionReachedErr("pre hook succeeded")
	}

	if s.until == "0%" {
		return strat.NewConditionReachedErr("Reached 0% (before rollout)")
	}

	// HACK: Assign the source ID annotation that the rolling updater expects,
	// unless it already exists on the deployment.
	//
	// Related upstream issue:
	// https://github.com/kubernetes/kubernetes/pull/7183
	err = wait.Poll(s.apiRetryPeriod, s.apiRetryTimeout, func() (done bool, err error) {
		existing, err := s.client.ReplicationControllers(to.Namespace).Get(to.Name)
		if err != nil {
			msg := fmt.Sprintf("couldn't look up deployment %s: %s", to.Name, err)
			if kerrors.IsNotFound(err) {
				return false, fmt.Errorf("%s", msg)
			}
			// Try again.
			fmt.Fprintln(s.errOut, "error:", msg)
			return false, nil
		}
		if _, hasSourceId := existing.Annotations[sourceIdAnnotation]; !hasSourceId {
			existing.Annotations[sourceIdAnnotation] = fmt.Sprintf("%s:%s", from.Name, from.ObjectMeta.UID)
			if _, err := s.client.ReplicationControllers(existing.Namespace).Update(existing); err != nil {
				msg := fmt.Sprintf("couldn't assign source annotation to deployment %s: %v", existing.Name, err)
				if kerrors.IsNotFound(err) {
					return false, fmt.Errorf("%s", msg)
				}
				// Try again.
				fmt.Fprintln(s.errOut, "error:", msg)
				return false, nil
			}
		}
		return true, nil
	})
	if err != nil {
		return err
	}
	to, err = s.client.ReplicationControllers(to.Namespace).Get(to.Name)
	if err != nil {
		return err
	}

	// HACK: There's a validation in the rolling updater which assumes that when
	// an existing RC is supplied, it will have >0 replicas- a validation which
	// is then disregarded as the desired count is obtained from the annotation
	// on the RC. For now, fake it out by just setting replicas to 1.
	//
	// Related upstream issue:
	// https://github.com/kubernetes/kubernetes/pull/7183
	to.Spec.Replicas = 1

	// Perform a rolling update.
	rollingConfig := &kubectl.RollingUpdaterConfig{
		Out:            &rollingUpdaterWriter{w: s.out},
		OldRc:          from,
		NewRc:          to,
		UpdatePeriod:   time.Duration(*params.UpdatePeriodSeconds) * time.Second,
		Interval:       time.Duration(*params.IntervalSeconds) * time.Second,
		Timeout:        time.Duration(*params.TimeoutSeconds) * time.Second,
		CleanupPolicy:  kubectl.PreserveRollingUpdateCleanupPolicy,
		MaxSurge:       params.MaxSurge,
		MaxUnavailable: params.MaxUnavailable,
		OnProgress: func(oldRc, newRc *kapi.ReplicationController, percentage int) error {
			if expect, ok := strat.Percentage(s.until); ok && percentage >= expect {
				return strat.NewConditionReachedErr(fmt.Sprintf("Reached %s (currently %d%%)", s.until, percentage))
			}
			return nil
		},
	}
	if err := s.rollingUpdate(rollingConfig); err != nil {
		return err
	}

	// Execute any post-hook.
	if params.Post != nil {
		if err := s.hookExecutor.Execute(params.Post, to, deployapi.PostHookPodSuffix, "post"); err != nil {
			return fmt.Errorf("post hook failed: %s", err)
		}
	}
	return nil
}
Esempio n. 3
0
// DeployWithAcceptor scales down from and then scales up to. If
// updateAcceptor is provided and the desired replica count is >1, the first
// replica of to is rolled out and validated before performing the full scale
// up.
//
// This is currently only used in conjunction with the rolling update strategy
// for initial deployments.
func (s *RecreateDeploymentStrategy) DeployWithAcceptor(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
	config, err := deployutil.DecodeDeploymentConfig(to, s.decoder)
	if err != nil {
		return fmt.Errorf("couldn't decode config from deployment %s: %v", to.Name, err)
	}

	params := config.Spec.Strategy.RecreateParams
	retryParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)
	waitParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)

	if updateAcceptor == nil {
		updateAcceptor = s.getUpdateAcceptor(time.Duration(*params.TimeoutSeconds)*time.Second, config.Spec.MinReadySeconds)
	}

	// Execute any pre-hook.
	if params != nil && params.Pre != nil {
		if err := s.hookExecutor.Execute(params.Pre, to, deployapi.PreHookPodSuffix, "pre"); err != nil {
			return fmt.Errorf("pre hook failed: %s", err)
		}
	}

	if s.until == "pre" {
		return strat.NewConditionReachedErr("pre hook succeeded")
	}

	// Record all warnings
	defer stratutil.RecordConfigWarnings(s.eventClient, from, s.decoder, s.out)
	defer stratutil.RecordConfigWarnings(s.eventClient, to, s.decoder, s.out)

	// Scale down the from deployment.
	if from != nil {
		fmt.Fprintf(s.out, "--> Scaling %s down to zero\n", from.Name)
		_, err := s.scaleAndWait(from, 0, retryParams, waitParams)
		if err != nil {
			return fmt.Errorf("couldn't scale %s to 0: %v", from.Name, err)
		}
	}

	if s.until == "0%" {
		return strat.NewConditionReachedErr("Reached 0% (no running pods)")
	}

	if params != nil && params.Mid != nil {
		if err := s.hookExecutor.Execute(params.Mid, to, deployapi.MidHookPodSuffix, "mid"); err != nil {
			return fmt.Errorf("mid hook failed: %s", err)
		}
	}

	if s.until == "mid" {
		return strat.NewConditionReachedErr("mid hook succeeded")
	}

	accepted := false

	// Scale up the to deployment.
	if desiredReplicas > 0 {
		if from != nil {
			// Scale up to 1 and validate the replica,
			// aborting if the replica isn't acceptable.
			fmt.Fprintf(s.out, "--> Scaling %s to 1 before performing acceptance check\n", to.Name)
			updatedTo, err := s.scaleAndWait(to, 1, retryParams, waitParams)
			if err != nil {
				return fmt.Errorf("couldn't scale %s to 1: %v", to.Name, err)
			}
			if err := updateAcceptor.Accept(updatedTo); err != nil {
				return fmt.Errorf("update acceptor rejected %s: %v", to.Name, err)
			}
			accepted = true
			to = updatedTo

			if strat.PercentageBetween(s.until, 1, 99) {
				return strat.NewConditionReachedErr(fmt.Sprintf("Reached %s", s.until))
			}
		}

		// Complete the scale up.
		if to.Spec.Replicas != int32(desiredReplicas) {
			fmt.Fprintf(s.out, "--> Scaling %s to %d\n", to.Name, desiredReplicas)
			updatedTo, err := s.scaleAndWait(to, desiredReplicas, retryParams, waitParams)
			if err != nil {
				return fmt.Errorf("couldn't scale %s to %d: %v", to.Name, desiredReplicas, err)
			}

			to = updatedTo
		}

		if !accepted {
			if err := updateAcceptor.Accept(to); err != nil {
				return fmt.Errorf("update acceptor rejected %s: %v", to.Name, err)
			}
		}
	}

	if (from == nil && strat.PercentageBetween(s.until, 1, 100)) || (from != nil && s.until == "100%") {
		return strat.NewConditionReachedErr(fmt.Sprintf("Reached %s", s.until))
	}

	// Execute any post-hook.
	if params != nil && params.Post != nil {
		if err := s.hookExecutor.Execute(params.Post, to, deployapi.PostHookPodSuffix, "post"); err != nil {
			return fmt.Errorf("post hook failed: %s", err)
		}
	}

	return nil
}