// TestHandle_newConfigNoTriggers ensures that a change to a config with no
// triggers doesn't result in a new config version bump.
func TestHandle_newConfigNoTriggers(t *testing.T) {
	controller := &DeploymentConfigChangeController{
		decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
			return deployutil.DecodeDeploymentConfig(deployment, api.Codec)
		},
		changeStrategy: &changeStrategyImpl{
			generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
				t.Fatalf("unexpected generation of deploymentConfig")
				return nil, nil
			},
			updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
				t.Fatalf("unexpected update of deploymentConfig")
				return config, nil
			},
		},
	}

	config := deployapitest.OkDeploymentConfig(1)
	config.Triggers = []deployapi.DeploymentTriggerPolicy{}
	err := controller.Handle(config)

	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
}
// TestHandle_changeWithTemplateDiff ensures that a pod template change to a
// config with a config change trigger results in a version bump and cause
// update.
func TestHandle_changeWithTemplateDiff(t *testing.T) {
	var updated *deployapi.DeploymentConfig

	controller := &DeploymentConfigChangeController{
		decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
			return deployutil.DecodeDeploymentConfig(deployment, api.Codec)
		},
		changeStrategy: &changeStrategyImpl{
			generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
				return deployapitest.OkDeploymentConfig(2), nil
			},
			updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
				updated = config
				return config, nil
			},
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				deployment, _ := deployutil.MakeDeployment(deployapitest.OkDeploymentConfig(1), kapi.Codec)
				return deployment, nil
			},
		},
	}

	config := deployapitest.OkDeploymentConfig(1)
	config.Triggers = []deployapi.DeploymentTriggerPolicy{deployapitest.OkConfigChangeTrigger()}
	config.Template.ControllerTemplate.Template.Spec.Containers[1].Name = "modified"
	err := controller.Handle(config)

	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if updated == nil {
		t.Fatalf("expected config to be updated")
	}

	if e, a := 2, updated.LatestVersion; e != a {
		t.Fatalf("expected update to latestversion=%d, got %d", e, a)
	}

	if updated.Details == nil {
		t.Fatalf("expected config change details to be set")
	} else if updated.Details.Causes == nil {
		t.Fatalf("expected config change causes to be set")
	} else if updated.Details.Causes[0].Type != deployapi.DeploymentTriggerOnConfigChange {
		t.Fatalf("expected config change cause to be set to config change trigger, got %s", updated.Details.Causes[0].Type)
	}
}
// TestHandle_changeWithoutTemplateDiff ensures that an updated config with no
// pod template diff results in the config version remaining the same.
func TestHandle_changeWithoutTemplateDiff(t *testing.T) {
	config := deployapitest.OkDeploymentConfig(1)
	config.Triggers = []deployapi.DeploymentTriggerPolicy{deployapitest.OkConfigChangeTrigger()}

	generated := false
	updated := false

	controller := &DeploymentConfigChangeController{
		decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
			return deployutil.DecodeDeploymentConfig(deployment, api.Codec)
		},
		changeStrategy: &changeStrategyImpl{
			generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
				generated = true
				return config, nil
			},
			updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
				updated = true
				return config, nil
			},
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				deployment, _ := deployutil.MakeDeployment(deployapitest.OkDeploymentConfig(1), kapi.Codec)
				return deployment, nil
			},
		},
	}

	err := controller.Handle(config)

	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if generated {
		t.Error("Unexpected generation of deploymentConfig")
	}

	if updated {
		t.Error("Unexpected update of deploymentConfig")
	}
}
Esempio n. 4
0
// Create generates a new DeploymentConfig representing a rollback.
func (s *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {
	rollback, ok := obj.(*deployapi.DeploymentConfigRollback)
	if !ok {
		return nil, kerrors.NewBadRequest(fmt.Sprintf("not a rollback spec: %#v", obj))
	}

	if errs := validation.ValidateDeploymentConfigRollback(rollback); len(errs) > 0 {
		return nil, kerrors.NewInvalid("DeploymentConfigRollback", "", errs)
	}

	// Roll back "from" the current deployment "to" a target deployment

	// Find the target ("to") deployment and decode the DeploymentConfig
	targetDeployment, err := s.generator.GetDeployment(ctx, rollback.Spec.From.Name)
	if err != nil {
		if kerrors.IsNotFound(err) {
			return nil, newInvalidDeploymentError(rollback, "Deployment not found")
		}
		return nil, newInvalidDeploymentError(rollback, fmt.Sprintf("%v", err))
	}

	to, err := deployutil.DecodeDeploymentConfig(targetDeployment, s.codec)
	if err != nil {
		return nil, newInvalidDeploymentError(rollback,
			fmt.Sprintf("couldn't decode DeploymentConfig from Deployment: %v", err))
	}

	// Find the current ("from") version of the target deploymentConfig
	from, err := s.generator.GetDeploymentConfig(ctx, to.Name)
	if err != nil {
		if kerrors.IsNotFound(err) {
			return nil, newInvalidDeploymentError(rollback,
				fmt.Sprintf("couldn't find a current DeploymentConfig %s/%s", targetDeployment.Namespace, to.Name))
		}
		return nil, newInvalidDeploymentError(rollback,
			fmt.Sprintf("error finding current DeploymentConfig %s/%s: %v", targetDeployment.Namespace, to.Name, err))
	}

	return s.generator.GenerateRollback(from, to, &rollback.Spec)
}
Esempio n. 5
0
// Deploy starts the deployment process for deploymentName.
func (d *Deployer) Deploy(namespace, deploymentName string) error {
	// Look up the new deployment.
	to, err := d.getDeployment(namespace, deploymentName)
	if err != nil {
		return fmt.Errorf("couldn't get deployment %s/%s: %v", namespace, deploymentName, err)
	}

	// Decode the config from the deployment.
	config, err := deployutil.DecodeDeploymentConfig(to, latest.Codec)
	if err != nil {
		return fmt.Errorf("couldn't decode DeploymentConfig from deployment %s/%s: %v", to.Namespace, to.Name, err)
	}

	// Get a strategy for the deployment.
	strategy, err := d.strategyFor(config)
	if err != nil {
		return err
	}

	// New deployments must have a desired replica count.
	desiredReplicas, hasDesired := deployutil.DeploymentDesiredReplicas(to)
	if !hasDesired {
		return fmt.Errorf("deployment %s has no desired replica count", deployutil.LabelForDeployment(to))
	}

	// Find all deployments for the config.
	unsortedDeployments, err := d.getDeployments(namespace, config.Name)
	if err != nil {
		return fmt.Errorf("couldn't get controllers in namespace %s: %v", namespace, err)
	}
	deployments := unsortedDeployments.Items

	// Sort all the deployments by version.
	sort.Sort(deployutil.DeploymentsByLatestVersionDesc(deployments))

	// Find any last completed deployment.
	var from *kapi.ReplicationController
	for _, candidate := range deployments {
		if candidate.Name == to.Name {
			continue
		}
		if deployutil.DeploymentStatusFor(&candidate) == deployapi.DeploymentStatusComplete {
			from = &candidate
			break
		}
	}

	// Scale down any deployments which aren't the new or last deployment.
	for _, candidate := range deployments {
		// Skip the from/to deployments.
		if candidate.Name == to.Name {
			continue
		}
		if from != nil && candidate.Name == from.Name {
			continue
		}
		// Skip the deployment if it's already scaled down.
		if candidate.Spec.Replicas == 0 {
			continue
		}
		// Scale the deployment down to zero.
		retryWaitParams := kubectl.NewRetryParams(1*time.Second, 120*time.Second)
		if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{-1, ""}, retryWaitParams, retryWaitParams); err != nil {
			glog.Errorf("Couldn't scale down prior deployment %s: %v", deployutil.LabelForDeployment(&candidate), err)
		} else {
			glog.Infof("Scaled down prior deployment %s", deployutil.LabelForDeployment(&candidate))
		}
	}

	// Perform the deployment.
	if from == nil {
		glog.Infof("Deploying %s for the first time (replicas: %d)", deployutil.LabelForDeployment(to), desiredReplicas)
	} else {
		glog.Infof("Deploying from %s to %s (replicas: %d)", deployutil.LabelForDeployment(from), deployutil.LabelForDeployment(to), desiredReplicas)
	}
	return strategy.Deploy(from, to, desiredReplicas)
}
Esempio n. 6
0
func (s *RollingDeploymentStrategy) Deploy(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int) error {
	config, err := deployutil.DecodeDeploymentConfig(to, s.codec)
	if err != nil {
		return fmt.Errorf("couldn't decode DeploymentConfig from deployment %s: %v", deployutil.LabelForDeployment(to), err)
	}

	params := config.Template.Strategy.RollingParams
	updateAcceptor := s.getUpdateAcceptor(time.Duration(*params.TimeoutSeconds) * time.Second)

	// If there's no prior deployment, delegate to another strategy since the
	// rolling updater only supports transitioning between two deployments.
	//
	// Hook support is duplicated here for now. When the rolling updater can
	// handle initial deployments, all of this code can go away.
	if from == nil {
		// Execute any pre-hook.
		if params.Pre != nil {
			err := s.hookExecutor.Execute(params.Pre, to, "prehook")
			if err != nil {
				return fmt.Errorf("Pre hook failed: %s", err)
			}
			glog.Infof("Pre hook finished")
		}

		// Execute the delegate strategy.
		err := s.initialStrategy.DeployWithAcceptor(from, to, desiredReplicas, updateAcceptor)
		if err != nil {
			return err
		}

		// Execute any post-hook. Errors are logged and ignored.
		if params.Post != nil {
			err := s.hookExecutor.Execute(params.Post, to, "posthook")
			if err != nil {
				util.HandleError(fmt.Errorf("post hook failed: %s", err))
			} else {
				glog.Infof("Post hook finished")
			}
		}

		// All done.
		return nil
	}

	// Prepare for a rolling update.
	// Execute any pre-hook.
	if params.Pre != nil {
		err := s.hookExecutor.Execute(params.Pre, to, "prehook")
		if err != nil {
			return fmt.Errorf("pre hook failed: %s", err)
		}
		glog.Infof("Pre hook finished")
	}

	// HACK: Assign the source ID annotation that the rolling updater expects,
	// unless it already exists on the deployment.
	//
	// Related upstream issue:
	// https://github.com/GoogleCloudPlatform/kubernetes/pull/7183
	to, err = s.client.GetReplicationController(to.Namespace, to.Name)
	if err != nil {
		return fmt.Errorf("couldn't look up deployment %s: %s", deployutil.LabelForDeployment(to))
	}
	if _, hasSourceId := to.Annotations[sourceIdAnnotation]; !hasSourceId {
		to.Annotations[sourceIdAnnotation] = fmt.Sprintf("%s:%s", from.Name, from.ObjectMeta.UID)
		if updated, err := s.client.UpdateReplicationController(to.Namespace, to); err != nil {
			return fmt.Errorf("couldn't assign source annotation to deployment %s: %v", deployutil.LabelForDeployment(to), err)
		} else {
			to = updated
		}
	}

	// HACK: There's a validation in the rolling updater which assumes that when
	// an existing RC is supplied, it will have >0 replicas- a validation which
	// is then disregarded as the desired count is obtained from the annotation
	// on the RC. For now, fake it out by just setting replicas to 1.
	//
	// Related upstream issue:
	// https://github.com/GoogleCloudPlatform/kubernetes/pull/7183
	to.Spec.Replicas = 1

	// Perform a rolling update.
	rollingConfig := &kubectl.RollingUpdaterConfig{
		Out:            &rollingUpdaterWriter{},
		OldRc:          from,
		NewRc:          to,
		UpdatePeriod:   time.Duration(*params.UpdatePeriodSeconds) * time.Second,
		Interval:       time.Duration(*params.IntervalSeconds) * time.Second,
		Timeout:        time.Duration(*params.TimeoutSeconds) * time.Second,
		CleanupPolicy:  kubectl.PreserveRollingUpdateCleanupPolicy,
		UpdateAcceptor: updateAcceptor,
	}
	glog.Infof("Starting rolling update from %s to %s (desired replicas: %d, UpdatePeriodSeconds=%d, IntervalSeconds=%d, TimeoutSeconds=%d)",
		deployutil.LabelForDeployment(from),
		deployutil.LabelForDeployment(to),
		desiredReplicas,
		*params.UpdatePeriodSeconds,
		*params.IntervalSeconds,
		*params.TimeoutSeconds,
	)
	if err := s.rollingUpdate(rollingConfig); err != nil {
		return err
	}

	// Execute any post-hook. Errors are logged and ignored.
	if params.Post != nil {
		err := s.hookExecutor.Execute(params.Post, to, "posthook")
		if err != nil {
			util.HandleError(fmt.Errorf("Post hook failed: %s", err))
		} else {
			glog.Info("Post hook finished")
		}
	}

	return nil
}
Esempio n. 7
0
// DeployWithAcceptor scales down from and then scales up to. If
// updateAcceptor is provided and the desired replica count is >1, the first
// replica of to is rolled out and validated before performing the full scale
// up.
//
// This is currently only used in conjunction with the rolling update strategy
// for initial deployments.
func (s *RecreateDeploymentStrategy) DeployWithAcceptor(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor kubectl.UpdateAcceptor) error {
	config, err := deployutil.DecodeDeploymentConfig(to, s.codec)
	if err != nil {
		return fmt.Errorf("couldn't decode config from deployment %s: %v", to.Name, err)
	}

	params := config.Template.Strategy.RecreateParams
	retryParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)
	waitParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)

	// Execute any pre-hook.
	if params != nil && params.Pre != nil {
		if err := s.hookExecutor.Execute(params.Pre, to, "prehook"); err != nil {
			return fmt.Errorf("Pre hook failed: %s", err)
		} else {
			glog.Infof("Pre hook finished")
		}
	}

	// Scale down the from deployment.
	if from != nil {
		glog.Infof("Scaling %s down to zero", deployutil.LabelForDeployment(from))
		_, err := s.scaleAndWait(from, 0, retryParams, waitParams)
		if err != nil {
			return fmt.Errorf("couldn't scale %s to 0: %v", deployutil.LabelForDeployment(from), err)
		}
	}

	// If an UpdateAcceptor is provided and we're trying to scale up to more
	// than one replica, scale up to 1 and validate the replica, aborting if the
	// replica isn't acceptable.
	if updateAcceptor != nil && desiredReplicas > 1 {
		glog.Infof("Scaling %s to 1 before validating first replica", deployutil.LabelForDeployment(to))
		updatedTo, err := s.scaleAndWait(to, 1, retryParams, waitParams)
		if err != nil {
			return fmt.Errorf("couldn't scale %s to 1: %v", deployutil.LabelForDeployment(to), err)
		}
		glog.Infof("Validating first replica of %s", deployutil.LabelForDeployment(to))
		if err := updateAcceptor.Accept(updatedTo); err != nil {
			return fmt.Errorf("first replica rejected for %s: %v", to.Name, err)
		}
		to = updatedTo
	}

	// Complete the scale up.
	glog.Infof("Scaling %s to %d", deployutil.LabelForDeployment(to), desiredReplicas)
	updatedTo, err := s.scaleAndWait(to, desiredReplicas, retryParams, waitParams)
	if err != nil {
		return fmt.Errorf("couldn't scale %s to %d: %v", deployutil.LabelForDeployment(to), desiredReplicas, err)
	}
	to = updatedTo

	// Execute any post-hook. Errors are logged and ignored.
	if params != nil && params.Post != nil {
		if err := s.hookExecutor.Execute(params.Post, to, "posthook"); err != nil {
			util.HandleError(fmt.Errorf("post hook failed: %s", err))
		} else {
			glog.Infof("Post hook finished")
		}
	}

	glog.Infof("Deployment %s successfully made active", to.Name)
	return nil
}
Esempio n. 8
0
// Create creates a DeploymentController.
func (factory *DeploymentControllerFactory) Create() controller.RunnableController {
	deploymentLW := &deployutil.ListWatcherImpl{
		// TODO: Investigate specifying annotation field selectors to fetch only 'deployments'
		// Currently field selectors are not supported for replication controllers
		ListFunc: func() (runtime.Object, error) {
			return factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).List(labels.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	deploymentQueue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(deploymentLW, &kapi.ReplicationController{}, deploymentQueue, 2*time.Minute).Run()

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))

	deployController := &DeploymentController{
		serviceAccount: factory.ServiceAccount,
		deploymentClient: &deploymentClientImpl{
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return factory.KubeClient.ReplicationControllers(namespace).Get(name)
			},
			updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
				return factory.KubeClient.ReplicationControllers(namespace).Update(deployment)
			},
		},
		podClient: &podClientImpl{
			getPodFunc: func(namespace, name string) (*kapi.Pod, error) {
				return factory.KubeClient.Pods(namespace).Get(name)
			},
			createPodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
				return factory.KubeClient.Pods(namespace).Create(pod)
			},
			deletePodFunc: func(namespace, name string) error {
				return factory.KubeClient.Pods(namespace).Delete(name, nil)
			},
			updatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
				return factory.KubeClient.Pods(namespace).Update(pod)
			},
			// Find deployer pods using the label they should all have which
			// correlates them to the named deployment.
			getDeployerPodsForFunc: func(namespace, name string) ([]kapi.Pod, error) {
				labelSel, err := labels.Parse(fmt.Sprintf("%s=%s", deployapi.DeployerPodForDeploymentLabel, name))
				if err != nil {
					return []kapi.Pod{}, err
				}
				pods, err := factory.KubeClient.Pods(namespace).List(labelSel, fields.Everything())
				if err != nil {
					return []kapi.Pod{}, err
				}
				return pods.Items, nil
			},
		},
		makeContainer: func(strategy *deployapi.DeploymentStrategy) (*kapi.Container, error) {
			return factory.makeContainer(strategy)
		},
		decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
			return deployutil.DecodeDeploymentConfig(deployment, factory.Codec)
		},
		recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}),
	}

	return &controller.RetryController{
		Queue: deploymentQueue,
		RetryManager: controller.NewQueueRetryManager(
			deploymentQueue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				if _, isFatal := err.(fatalError); isFatal {
					kutil.HandleError(err)
					return false
				}
				if retries.Count > 1 {
					return false
				}
				return true
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			deployment := obj.(*kapi.ReplicationController)
			return deployController.Handle(deployment)
		},
	}
}