예제 #1
0
파일: scale.go 프로젝트: rferris/kubernetes
// RunScale executes the scaling
func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, shortOutput bool) error {
	if len(os.Args) > 1 && os.Args[1] == "resize" {
		printDeprecationWarning("scale", "resize")
	}

	count := cmdutil.GetFlagInt(cmd, "replicas")
	if count < 0 {
		return cmdutil.UsageError(cmd, "--replicas=COUNT RESOURCE NAME")
	}

	cmdNamespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	mapper, typer := f.Object()
	r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()).
		ContinueOnError().
		NamespaceParam(cmdNamespace).DefaultNamespace().
		ResourceTypeOrNameArgs(false, args...).
		Flatten().
		Do()
	err = r.Err()
	if err != nil {
		return err
	}
	mapping, err := r.ResourceMapping()
	if err != nil {
		return err
	}

	infos, err := r.Infos()
	if err != nil {
		return err
	}
	if len(infos) > 1 {
		return fmt.Errorf("multiple resources provided: %v", args)
	}
	info := infos[0]

	scaler, err := f.Scaler(mapping)
	if err != nil {
		return err
	}

	resourceVersion := cmdutil.GetFlagString(cmd, "resource-version")
	currentSize := cmdutil.GetFlagInt(cmd, "current-replicas")
	precondition := &kubectl.ScalePrecondition{currentSize, resourceVersion}
	retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout)
	waitForReplicas := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout)
	if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil {
		return err
	}
	cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "scaled")
	return nil
}
예제 #2
0
// Deploy starts the deployment process for deploymentName.
func (d *Deployer) Deploy(namespace, deploymentName string) error {
	// Look up the new deployment.
	to, err := d.getDeployment(namespace, deploymentName)
	if err != nil {
		return fmt.Errorf("couldn't get deployment %s/%s: %v", namespace, deploymentName, err)
	}

	// Decode the config from the deployment.
	config, err := deployutil.DecodeDeploymentConfig(to, latest.Codec)
	if err != nil {
		return fmt.Errorf("couldn't decode DeploymentConfig from deployment %s/%s: %v", to.Namespace, to.Name, err)
	}

	// Get a strategy for the deployment.
	strategy, err := d.strategyFor(config)
	if err != nil {
		return err
	}

	// New deployments must have a desired replica count.
	desiredReplicas, hasDesired := deployutil.DeploymentDesiredReplicas(to)
	if !hasDesired {
		return fmt.Errorf("deployment %s has no desired replica count", deployutil.LabelForDeployment(to))
	}

	// Find all deployments for the config.
	unsortedDeployments, err := d.getDeployments(namespace, config.Name)
	if err != nil {
		return fmt.Errorf("couldn't get controllers in namespace %s: %v", namespace, err)
	}
	deployments := unsortedDeployments.Items

	// Sort all the deployments by version.
	sort.Sort(deployutil.DeploymentsByLatestVersionDesc(deployments))

	// Find any last completed deployment.
	var from *kapi.ReplicationController
	for _, candidate := range deployments {
		if candidate.Name == to.Name {
			continue
		}
		if deployutil.DeploymentStatusFor(&candidate) == deployapi.DeploymentStatusComplete {
			from = &candidate
			break
		}
	}

	// Scale down any deployments which aren't the new or last deployment.
	for _, candidate := range deployments {
		// Skip the from/to deployments.
		if candidate.Name == to.Name {
			continue
		}
		if from != nil && candidate.Name == from.Name {
			continue
		}
		// Skip the deployment if it's already scaled down.
		if candidate.Spec.Replicas == 0 {
			continue
		}
		// Scale the deployment down to zero.
		retryWaitParams := kubectl.NewRetryParams(1*time.Second, 120*time.Second)
		if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{-1, ""}, retryWaitParams, retryWaitParams); err != nil {
			glog.Errorf("Couldn't scale down prior deployment %s: %v", deployutil.LabelForDeployment(&candidate), err)
		} else {
			glog.Infof("Scaled down prior deployment %s", deployutil.LabelForDeployment(&candidate))
		}
	}

	// Perform the deployment.
	if from == nil {
		glog.Infof("Deploying %s for the first time (replicas: %d)", deployutil.LabelForDeployment(to), desiredReplicas)
	} else {
		glog.Infof("Deploying from %s to %s (replicas: %d)", deployutil.LabelForDeployment(from), deployutil.LabelForDeployment(to), desiredReplicas)
	}
	return strategy.Deploy(from, to, desiredReplicas)
}
예제 #3
0
// RunScale executes the scaling
func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, shortOutput bool) error {
	if len(os.Args) > 1 && os.Args[1] == "resize" {
		printDeprecationWarning("scale", "resize")
	}

	count := cmdutil.GetFlagInt(cmd, "replicas")
	if count < 0 {
		return cmdutil.UsageError(cmd, "--replicas=COUNT TYPE NAME")
	}

	cmdNamespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	mapper, typer := f.Object()
	r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()).
		ContinueOnError().
		NamespaceParam(cmdNamespace).DefaultNamespace().
		ResourceTypeOrNameArgs(false, args...).
		Flatten().
		Do()
	err = r.Err()
	if err != nil {
		return err
	}
	mapping, err := r.ResourceMapping()
	if err != nil {
		return err
	}

	infos, err := r.Infos()
	if err != nil {
		return err
	}

	scaler, err := f.Scaler(mapping)
	if err != nil {
		return err
	}

	resourceVersion := cmdutil.GetFlagString(cmd, "resource-version")
	if len(resourceVersion) != 0 && len(infos) > 1 {
		return fmt.Errorf("cannot use --resource-version with multiple controllers")
	}
	currentSize := cmdutil.GetFlagInt(cmd, "current-replicas")
	if currentSize != -1 && len(infos) > 1 {
		return fmt.Errorf("cannot use --current-replicas with multiple controllers")
	}
	precondition := &kubectl.ScalePrecondition{currentSize, resourceVersion}
	retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout)
	var waitForReplicas *kubectl.RetryParams
	if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 {
		waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout)
	}

	errs := []error{}
	for _, info := range infos {
		if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil {
			errs = append(errs, err)
			continue
		}
		cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "scaled")
	}

	return errors.NewAggregate(errs)
}
예제 #4
0
// DeployWithAcceptor scales down from and then scales up to. If
// updateAcceptor is provided and the desired replica count is >1, the first
// replica of to is rolled out and validated before performing the full scale
// up.
//
// This is currently only used in conjunction with the rolling update strategy
// for initial deployments.
func (s *RecreateDeploymentStrategy) DeployWithAcceptor(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor kubectl.UpdateAcceptor) error {
	config, err := deployutil.DecodeDeploymentConfig(to, s.codec)
	if err != nil {
		return fmt.Errorf("couldn't decode config from deployment %s: %v", to.Name, err)
	}

	params := config.Template.Strategy.RecreateParams
	retryParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)
	waitParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)

	// Execute any pre-hook.
	if params != nil && params.Pre != nil {
		if err := s.hookExecutor.Execute(params.Pre, to, "prehook"); err != nil {
			return fmt.Errorf("Pre hook failed: %s", err)
		} else {
			glog.Infof("Pre hook finished")
		}
	}

	// Scale down the from deployment.
	if from != nil {
		glog.Infof("Scaling %s down to zero", deployutil.LabelForDeployment(from))
		_, err := s.scaleAndWait(from, 0, retryParams, waitParams)
		if err != nil {
			return fmt.Errorf("couldn't scale %s to 0: %v", deployutil.LabelForDeployment(from), err)
		}
	}

	// If an UpdateAcceptor is provided and we're trying to scale up to more
	// than one replica, scale up to 1 and validate the replica, aborting if the
	// replica isn't acceptable.
	if updateAcceptor != nil && desiredReplicas > 1 {
		glog.Infof("Scaling %s to 1 before validating first replica", deployutil.LabelForDeployment(to))
		updatedTo, err := s.scaleAndWait(to, 1, retryParams, waitParams)
		if err != nil {
			return fmt.Errorf("couldn't scale %s to 1: %v", deployutil.LabelForDeployment(to), err)
		}
		glog.Infof("Validating first replica of %s", deployutil.LabelForDeployment(to))
		if err := updateAcceptor.Accept(updatedTo); err != nil {
			return fmt.Errorf("first replica rejected for %s: %v", to.Name, err)
		}
		to = updatedTo
	}

	// Complete the scale up.
	glog.Infof("Scaling %s to %d", deployutil.LabelForDeployment(to), desiredReplicas)
	updatedTo, err := s.scaleAndWait(to, desiredReplicas, retryParams, waitParams)
	if err != nil {
		return fmt.Errorf("couldn't scale %s to %d: %v", deployutil.LabelForDeployment(to), desiredReplicas, err)
	}
	to = updatedTo

	// Execute any post-hook. Errors are logged and ignored.
	if params != nil && params.Post != nil {
		if err := s.hookExecutor.Execute(params.Post, to, "posthook"); err != nil {
			util.HandleError(fmt.Errorf("post hook failed: %s", err))
		} else {
			glog.Infof("Post hook finished")
		}
	}

	glog.Infof("Deployment %s successfully made active", to.Name)
	return nil
}