예제 #1
1
파일: deployer.go 프로젝트: pweil-/origin
// NewDeployer makes a new Deployer from a kube client.
func NewDeployer(client kclient.Interface, oclient client.Interface, out, errOut io.Writer, until string) *Deployer {
	scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
	return &Deployer{
		out:    out,
		errOut: errOut,
		until:  until,
		getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Get(name)
		},
		getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
			return client.ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(configName)})
		},
		scaler: scaler,
		strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) {
			switch config.Spec.Strategy.Type {
			case deployapi.DeploymentStrategyTypeRecreate:
				return recreate.NewRecreateDeploymentStrategy(client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), out, errOut, until), nil
			case deployapi.DeploymentStrategyTypeRolling:
				recreate := recreate.NewRecreateDeploymentStrategy(client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), out, errOut, until)
				return rolling.NewRollingDeploymentStrategy(config.Namespace, client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), recreate, out, errOut, until), nil
			default:
				return nil, fmt.Errorf("unsupported strategy type: %s", config.Spec.Strategy.Type)
			}
		},
	}
}
예제 #2
0
// Returns the old RCs targetted by the given Deployment.
func GetOldRCs(deployment extensions.Deployment, c client.Interface) ([]*api.ReplicationController, error) {
	namespace := deployment.ObjectMeta.Namespace
	// 1. Find all pods whose labels match deployment.Spec.Selector
	podList, err := c.Pods(namespace).List(labels.SelectorFromSet(deployment.Spec.Selector), fields.Everything(), unversioned.ListOptions{})
	if err != nil {
		return nil, fmt.Errorf("error listing pods: %v", err)
	}
	// 2. Find the corresponding RCs for pods in podList.
	// TODO: Right now we list all RCs and then filter. We should add an API for this.
	oldRCs := map[string]api.ReplicationController{}
	rcList, err := c.ReplicationControllers(namespace).List(labels.Everything(), fields.Everything(), unversioned.ListOptions{})
	if err != nil {
		return nil, fmt.Errorf("error listing replication controllers: %v", err)
	}
	newRCTemplate := GetNewRCTemplate(deployment)
	for _, pod := range podList.Items {
		podLabelsSelector := labels.Set(pod.ObjectMeta.Labels)
		for _, rc := range rcList.Items {
			rcLabelsSelector := labels.SelectorFromSet(rc.Spec.Selector)
			if rcLabelsSelector.Matches(podLabelsSelector) {
				// Filter out RC that has the same pod template spec as the deployment - that is the new RC.
				if api.Semantic.DeepEqual(rc.Spec.Template, &newRCTemplate) {
					continue
				}
				oldRCs[rc.ObjectMeta.Name] = rc
			}
		}
	}
	requiredRCs := []*api.ReplicationController{}
	for _, value := range oldRCs {
		requiredRCs = append(requiredRCs, &value)
	}
	return requiredRCs, nil
}
예제 #3
0
// updateRcWithRetries retries updating the given rc on conflict with the following steps:
// 1. Get latest resource
// 2. applyUpdate
// 3. Update the resource
func updateRcWithRetries(c client.Interface, namespace string, rc *api.ReplicationController, applyUpdate updateRcFunc) (*api.ReplicationController, error) {
	// Deep copy the rc in case we failed on Get during retry loop
	obj, err := api.Scheme.Copy(rc)
	if err != nil {
		return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err)
	}
	oldRc := obj.(*api.ReplicationController)
	err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
		// Apply the update, then attempt to push it to the apiserver.
		applyUpdate(rc)
		if rc, e = c.ReplicationControllers(namespace).Update(rc); e == nil {
			// rc contains the latest controller post update
			return
		}
		updateErr := e
		// Update the controller with the latest resource version, if the update failed we
		// can't trust rc so use oldRc.Name.
		if rc, e = c.ReplicationControllers(namespace).Get(oldRc.Name); e != nil {
			// The Get failed: Value in rc cannot be trusted.
			rc = oldRc
		}
		// Only return the error from update
		return updateErr
	})
	// If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned
	// controller contains the applied update.
	return rc, err
}
예제 #4
0
// GetNewRC returns an RC that matches the intent of the given deployment; get RCList from client interface.
// Returns nil if the new RC doesnt exist yet.
func GetNewRC(deployment extensions.Deployment, c client.Interface) (*api.ReplicationController, error) {
	return GetNewRCFromList(deployment, c,
		func(namespace string, options api.ListOptions) ([]api.ReplicationController, error) {
			rcList, err := c.ReplicationControllers(namespace).List(options)
			return rcList.Items, err
		})
}
예제 #5
0
func UpdateExistingReplicationController(c client.Interface, oldRc *api.ReplicationController, namespace, newName, deploymentKey, deploymentValue string, out io.Writer) (*api.ReplicationController, error) {
	SetNextControllerAnnotation(oldRc, newName)
	if _, found := oldRc.Spec.Selector[deploymentKey]; !found {
		return AddDeploymentKeyToReplicationController(oldRc, c, deploymentKey, deploymentValue, namespace, out)
	} else {
		// If we didn't need to update the controller for the deployment key, we still need to write
		// the "next" controller.
		return c.ReplicationControllers(namespace).Update(oldRc)
	}
}
예제 #6
0
파일: framework.go 프로젝트: pweil-/origin
// WaitForRegistry waits until a newly deployed registry becomes ready. If waitForDCVersion is given, the
// function will wait until a corresponding replica controller completes. If not give, the latest version of
// registry's deployment config will be fetched from etcd.
func WaitForRegistry(
	dcNamespacer client.DeploymentConfigsNamespacer,
	kubeClient kclient.Interface,
	waitForDCVersion *int64,
	oc *CLI,
) error {
	var latestVersion int64
	start := time.Now()

	if waitForDCVersion != nil {
		latestVersion = *waitForDCVersion
	} else {
		dc, err := dcNamespacer.DeploymentConfigs(kapi.NamespaceDefault).Get("docker-registry")
		if err != nil {
			return err
		}
		latestVersion = dc.Status.LatestVersion
	}
	fmt.Fprintf(g.GinkgoWriter, "waiting for deployment of version %d to complete\n", latestVersion)

	err := WaitForADeployment(kubeClient.ReplicationControllers(kapi.NamespaceDefault), "docker-registry",
		func(rc *kapi.ReplicationController) bool {
			if !CheckDeploymentCompletedFn(rc) {
				return false
			}
			v, err := strconv.ParseInt(rc.Annotations[deployapi.DeploymentVersionAnnotation], 10, 64)
			if err != nil {
				fmt.Fprintf(g.GinkgoWriter, "failed to parse %q of replication controller %q: %v\n", deployapi.DeploymentVersionAnnotation, rc.Name, err)
				return false
			}
			return v >= latestVersion
		},
		func(rc *kapi.ReplicationController) bool {
			v, err := strconv.ParseInt(rc.Annotations[deployapi.DeploymentVersionAnnotation], 10, 64)
			if err != nil {
				fmt.Fprintf(g.GinkgoWriter, "failed to parse %q of replication controller %q: %v\n", deployapi.DeploymentVersionAnnotation, rc.Name, err)
				return false
			}
			if v < latestVersion {
				return false
			}
			return CheckDeploymentFailedFn(rc)
		}, oc)
	if err != nil {
		return err
	}

	requirement, err := labels.NewRequirement(deployapi.DeploymentLabel, selection.Equals, sets.NewString(fmt.Sprintf("docker-registry-%d", latestVersion)))
	pods, err := WaitForPods(kubeClient.Pods(kapi.NamespaceDefault), labels.NewSelector().Add(*requirement), CheckPodIsReadyFn, 1, time.Minute)
	now := time.Now()
	fmt.Fprintf(g.GinkgoWriter, "deployed registry pod %s after %s\n", pods[0], now.Sub(start).String())
	return err
}
예제 #7
0
func CreateNewControllerFromCurrentController(c client.Interface, codec runtime.Codec, cfg *NewControllerConfig) (*api.ReplicationController, error) {
	containerIndex := 0
	// load the old RC into the "new" RC
	newRc, err := c.ReplicationControllers(cfg.Namespace).Get(cfg.OldName)
	if err != nil {
		return nil, err
	}

	if len(cfg.Container) != 0 {
		containerFound := false

		for i, c := range newRc.Spec.Template.Spec.Containers {
			if c.Name == cfg.Container {
				containerIndex = i
				containerFound = true
				break
			}
		}

		if !containerFound {
			return nil, fmt.Errorf("container %s not found in pod", cfg.Container)
		}
	}

	if len(newRc.Spec.Template.Spec.Containers) > 1 && len(cfg.Container) == 0 {
		return nil, goerrors.New("Must specify container to update when updating a multi-container pod")
	}

	if len(newRc.Spec.Template.Spec.Containers) == 0 {
		return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc))
	}
	newRc.Spec.Template.Spec.Containers[containerIndex].Image = cfg.Image
	if len(cfg.PullPolicy) != 0 {
		newRc.Spec.Template.Spec.Containers[containerIndex].ImagePullPolicy = cfg.PullPolicy
	}

	newHash, err := api.HashObject(newRc, codec)
	if err != nil {
		return nil, err
	}

	if len(cfg.NewName) == 0 {
		cfg.NewName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
	}
	newRc.Name = cfg.NewName

	newRc.Spec.Selector[cfg.DeploymentKey] = newHash
	newRc.Spec.Template.Labels[cfg.DeploymentKey] = newHash
	// Clear resource version after hashing so that identical updates get different hashes.
	newRc.ResourceVersion = ""
	return newRc, nil
}
예제 #8
0
func deleteReplicationControllers(kubeClient client.Interface, ns string) error {
	items, err := kubeClient.ReplicationControllers(ns).List(unversioned.ListOptions{})
	if err != nil {
		return err
	}
	for i := range items.Items {
		err := kubeClient.ReplicationControllers(ns).Delete(items.Items[i].Name)
		if err != nil && !errors.IsNotFound(err) {
			return err
		}
	}
	return nil
}
예제 #9
0
func deleteReplicationControllers(kubeClient client.Interface, ns string) error {
	items, err := kubeClient.ReplicationControllers(ns).List(labels.Everything(), fields.Everything())
	if err != nil {
		return err
	}
	for i := range items.Items {
		err := kubeClient.ReplicationControllers(ns).Delete(items.Items[i].Name)
		if err != nil && !errors.IsNotFound(err) {
			return err
		}
	}
	return nil
}
예제 #10
0
파일: recreate.go 프로젝트: erinboyd/origin
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(client kclient.Interface, codec runtime.Codec) *RecreateDeploymentStrategy {
	scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
	return &RecreateDeploymentStrategy{
		getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Get(name)
		},
		scaler:       scaler,
		codec:        codec,
		hookExecutor: stratsupport.NewHookExecutor(client, os.Stdout, codec),
		retryTimeout: 120 * time.Second,
		retryPeriod:  1 * time.Second,
	}
}
예제 #11
0
func unloadReplicationControllerLabel(client kclient.Interface, application *api.Application, labelSelector labels.Selector) error {

	resourceList, _ := client.ReplicationControllers(application.Namespace).List(kapi.ListOptions{LabelSelector: labelSelector, FieldSelector: fields.Everything()})
	errs := []error{}
	for _, resource := range resourceList.Items {
		if !hasItem(application.Spec.Items, api.Item{Kind: "ReplicationController", Name: resource.Name}) {
			delete(resource.Labels, fmt.Sprintf("%s.application.%s", application.Namespace, application.Name))
			if _, err := client.ReplicationControllers(application.Namespace).Update(&resource); err != nil {
				errs = append(errs, err)
			}
		}
	}

	return nil
}
예제 #12
0
// ValidateAppName validates application name. When error is returned, name validity could not be
// determined.
func ValidateAppName(spec *AppNameValiditySpec, client client.Interface) (*AppNameValidity, error) {
	log.Printf("Validating %s application name in %s namespace", spec.Name, spec.Namespace)

	isValidRc := false
	isValidService := false

	_, err := client.ReplicationControllers(spec.Namespace).Get(spec.Name)
	if err != nil {
		if isNotFoundError(err) {
			isValidRc = true
		} else {
			return nil, err
		}
	}

	_, err = client.Services(spec.Namespace).Get(spec.Name)
	if err != nil {
		if isNotFoundError(err) {
			isValidService = true
		} else {
			return nil, err
		}
	}

	isValid := isValidRc && isValidService

	log.Printf("Validation result for %s application name in %s namespace is %t", spec.Name,
		spec.Namespace, isValid)

	return &AppNameValidity{Valid: isValid}, nil
}
// Deletes services related to replication controller with given name in given namespace.
func DeleteReplicationControllerServices(client client.Interface, namespace, name string) error {
	log.Printf("Deleting services related to %s replication controller from %s namespace", name,
		namespace)

	replicationController, err := client.ReplicationControllers(namespace).Get(name)
	if err != nil {
		return err
	}

	labelSelector, err := toLabelSelector(replicationController.Spec.Selector)
	if err != nil {
		return err
	}

	services, err := getServicesForDeletion(client, labelSelector, namespace)
	if err != nil {
		return err
	}

	for _, service := range services {
		if err := client.Services(namespace).Delete(service.Name); err != nil {
			return err
		}
	}

	log.Printf("Successfully deleted services related to %s replication controller from %s namespace",
		name, namespace)

	return nil
}
// TODO(floreks): This should be transactional to make sure that RC will not be deleted without pods
// Deletes replication controller with given name in given namespace and related pods.
// Also deletes services related to replication controller if deleteServices is true.
func DeleteReplicationController(client client.Interface, namespace, name string,
	deleteServices bool) error {

	log.Printf("Deleting %s replication controller from %s namespace", name, namespace)

	if deleteServices {
		if err := DeleteReplicationControllerServices(client, namespace, name); err != nil {
			return err
		}
	}

	pods, err := getRawReplicationControllerPods(client, namespace, name)
	if err != nil {
		return err
	}

	if err := client.ReplicationControllers(namespace).Delete(name); err != nil {
		return err
	}

	for _, pod := range pods.Items {
		if err := client.Pods(namespace).Delete(pod.Name, &api.DeleteOptions{}); err != nil {
			return err
		}
	}

	log.Printf("Successfully deleted %s replication controller from %s namespace", name, namespace)

	return nil
}
// GetReplicationControllerDetail returns detailed information about the given replication
// controller in the given namespace.
func GetReplicationControllerDetail(client k8sClient.Interface, heapsterClient client.HeapsterClient,
	namespace, name string) (*ReplicationControllerDetail, error) {
	log.Printf("Getting details of %s replication controller in %s namespace", name, namespace)

	replicationController, err := client.ReplicationControllers(namespace).Get(name)
	if err != nil {
		return nil, err
	}

	podInfo, err := getReplicationControllerPodInfo(client, replicationController, namespace)
	if err != nil {
		return nil, err
	}

	podList, err := GetReplicationControllerPods(client, heapsterClient, dataselect.DefaultDataSelectWithMetrics,
		name, namespace)
	if err != nil {
		return nil, err
	}

	eventList, err := GetReplicationControllerEvents(client, dataselect.DefaultDataSelect, namespace, name)
	if err != nil {
		return nil, err
	}

	serviceList, err := GetReplicationControllerServices(client, dataselect.DefaultDataSelect, namespace,
		name)
	if err != nil {
		return nil, err
	}

	replicationControllerDetail := ToReplicationControllerDetail(replicationController, *podInfo,
		*podList, *eventList, *serviceList)
	return &replicationControllerDetail, nil
}
예제 #16
0
파일: recreate.go 프로젝트: rrati/origin
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(client kclient.Interface, decoder runtime.Decoder) *RecreateDeploymentStrategy {
	scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
	return &RecreateDeploymentStrategy{
		getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Get(name)
		},
		getUpdateAcceptor: func(timeout time.Duration) strat.UpdateAcceptor {
			return stratsupport.NewAcceptNewlyObservedReadyPods(client, timeout, AcceptorInterval)
		},
		scaler:       scaler,
		decoder:      decoder,
		hookExecutor: stratsupport.NewHookExecutor(client, os.Stdout, decoder),
		retryTimeout: 120 * time.Second,
		retryPeriod:  1 * time.Second,
	}
}
예제 #17
0
파일: recreate.go 프로젝트: ZenoRewn/origin
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(client kclient.Interface, tagClient client.ImageStreamTagsNamespacer, decoder runtime.Decoder, out, errOut io.Writer, until string) *RecreateDeploymentStrategy {
	if out == nil {
		out = ioutil.Discard
	}
	if errOut == nil {
		errOut = ioutil.Discard
	}
	scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
	return &RecreateDeploymentStrategy{
		out:    out,
		errOut: errOut,
		until:  until,
		getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Get(name)
		},
		getUpdateAcceptor: func(timeout time.Duration) strat.UpdateAcceptor {
			return stratsupport.NewAcceptNewlyObservedReadyPods(out, client, timeout, AcceptorInterval)
		},
		scaler:       scaler,
		decoder:      decoder,
		hookExecutor: stratsupport.NewHookExecutor(client, tagClient, os.Stdout, decoder),
		retryTimeout: 120 * time.Second,
		retryPeriod:  1 * time.Second,
	}
}
예제 #18
0
// Updates number of replicas in Replica Set based on Replica Set Spec
func UpdateReplicasCount(client client.Interface, namespace string, name string,
	replicaSetSpec *ReplicaSetSpec) error {
	replicaSet, err := client.ReplicationControllers(namespace).Get(name)
	if err != nil {
		return err
	}

	replicaSet.Spec.Replicas = replicaSetSpec.Replicas

	_, err = client.ReplicationControllers(namespace).Update(replicaSet)
	if err != nil {
		return err
	}

	return nil
}
// Based on given selector returns list of services that are candidates for deletion.
// Services are matched by replication controllers' label selector. They are deleted if given
// label selector is targeting only 1 replication controller.
func getServicesForDeletion(client client.Interface, labelSelector labels.Selector,
	namespace string) ([]api.Service, error) {

	replicationControllers, err := client.ReplicationControllers(namespace).List(api.ListOptions{
		LabelSelector: labelSelector,
		FieldSelector: fields.Everything(),
	})
	if err != nil {
		return nil, err
	}

	// if label selector is targeting only 1 replication controller
	// then we can delete services targeted by this label selector,
	// otherwise we can not delete any services so just return empty list
	if len(replicationControllers.Items) != 1 {
		return []api.Service{}, nil
	}

	services, err := client.Services(namespace).List(api.ListOptions{
		LabelSelector: labelSelector,
		FieldSelector: fields.Everything(),
	})
	if err != nil {
		return nil, err
	}

	return services.Items, nil
}
예제 #20
0
// Returns an RC that matches the intent of the given deployment.
// Returns nil if the new RC doesnt exist yet.
func GetNewRC(deployment extensions.Deployment, c client.Interface) (*api.ReplicationController, error) {
	namespace := deployment.ObjectMeta.Namespace
	rcList, err := c.ReplicationControllers(namespace).List(unversioned.ListOptions{})
	if err != nil {
		return nil, fmt.Errorf("error listing replication controllers: %v", err)
	}
	newRCTemplate := GetNewRCTemplate(deployment)

	for _, rc := range rcList.Items {
		if api.Semantic.DeepEqual(rc.Spec.Template, &newRCTemplate) {
			// This is the new RC.
			return &rc, nil
		}
	}
	// new RC does not exist.
	return nil, nil
}
예제 #21
0
파일: scale.go 프로젝트: ibotty/origin
// controllerHasSpecifiedReplicas returns a condition that will be true if and
// only if the specified replica count for a controller's ReplicaSelector
// equals the Replicas count.
//
// This is a slightly modified version of
// unversioned.ControllerHasDesiredReplicas. This  is necessary because when
// scaling an RC via a DC, the RC spec replica count is not immediately
// updated to match the owning DC.
func controllerHasSpecifiedReplicas(c kclient.Interface, controller *kapi.ReplicationController, specifiedReplicas int) wait.ConditionFunc {
	// If we're given a controller where the status lags the spec, it either means that the controller is stale,
	// or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case.
	desiredGeneration := controller.Generation

	return func() (bool, error) {
		ctrl, err := c.ReplicationControllers(controller.Namespace).Get(controller.Name)
		if err != nil {
			return false, err
		}
		// There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass,
		// or, after this check has passed, a modification causes the rc manager to create more pods.
		// This will not be an issue once we've implemented graceful delete for rcs, but till then
		// concurrent stop operations on the same rc might have unintended side effects.
		return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == specifiedReplicas, nil
	}
}
예제 #22
0
func toReplicationControllerPodController(client k8sClient.Interface, reference api.ObjectReference, pods []api.Pod, events []api.Event, heapsterClient client.HeapsterClient) (*Controller, error) {
	rc, err := client.ReplicationControllers(reference.Namespace).Get(reference.Name)
	if err != nil {
		return nil, err
	}
	rcs := []api.ReplicationController{*rc}
	replicationControllerList := replicationcontrollerlist.CreateReplicationControllerList(rcs, dataselect.StdMetricsDataSelect, pods, events, &heapsterClient)
	return &Controller{
		Kind: "ReplicationController",
		ReplicationControllerList: replicationControllerList,
	}, nil
}
예제 #23
0
// NewDeploymentConfigDescriber returns a new DeploymentConfigDescriber
func NewDeploymentConfigDescriber(client client.Interface, kclient kclient.Interface) *DeploymentConfigDescriber {
	return &DeploymentConfigDescriber{
		client: &genericDeploymentDescriberClient{
			getDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
				return client.DeploymentConfigs(namespace).Get(name)
			},
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return kclient.ReplicationControllers(namespace).Get(name)
			},
			listDeploymentsFunc: func(namespace string, selector labels.Selector) (*kapi.ReplicationControllerList, error) {
				return kclient.ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: selector})
			},
			listPodsFunc: func(namespace string, selector labels.Selector) (*kapi.PodList, error) {
				return kclient.Pods(namespace).List(kapi.ListOptions{LabelSelector: selector})
			},
			listEventsFunc: func(deploymentConfig *deployapi.DeploymentConfig) (*kapi.EventList, error) {
				return kclient.Events(deploymentConfig.Namespace).Search(deploymentConfig)
			},
		},
	}
}
예제 #24
0
// Returns the old RCs targetted by the given Deployment.
func GetOldRCs(deployment extensions.Deployment, c client.Interface) ([]*api.ReplicationController, error) {
	namespace := deployment.ObjectMeta.Namespace
	// 1. Find all pods whose labels match deployment.Spec.Selector
	selector := labels.SelectorFromSet(deployment.Spec.Selector)
	options := unversioned.ListOptions{LabelSelector: unversioned.LabelSelector{selector}}
	podList, err := c.Pods(namespace).List(options)
	if err != nil {
		return nil, fmt.Errorf("error listing pods: %v", err)
	}
	// 2. Find the corresponding RCs for pods in podList.
	// TODO: Right now we list all RCs and then filter. We should add an API for this.
	oldRCs := map[string]api.ReplicationController{}
	rcList, err := c.ReplicationControllers(namespace).List(unversioned.ListOptions{})
	if err != nil {
		return nil, fmt.Errorf("error listing replication controllers: %v", err)
	}
	newRCTemplate := GetNewRCTemplate(deployment)
	for _, pod := range podList.Items {
		podLabelsSelector := labels.Set(pod.ObjectMeta.Labels)
		for _, rc := range rcList.Items {
			rcLabelsSelector := labels.SelectorFromSet(rc.Spec.Selector)
			if rcLabelsSelector.Matches(podLabelsSelector) {
				// Filter out RC that has the same pod template spec as the deployment - that is the new RC.
				if api.Semantic.DeepEqual(rc.Spec.Template, &newRCTemplate) {
					continue
				}
				oldRCs[rc.ObjectMeta.Name] = rc
			}
		}
	}
	requiredRCs := []*api.ReplicationController{}
	// Note that go reuses the same memory location for every iteration,
	// which means the 'value' returned from range will have the same address.
	// Therefore, we should use the returned 'index' instead.
	for i := range oldRCs {
		value := oldRCs[i]
		requiredRCs = append(requiredRCs, &value)
	}
	return requiredRCs, nil
}
예제 #25
0
// NewLatestDeploymentsDescriber lists the latest deployments limited to "count". In case count == -1, list back to the last successful.
func NewLatestDeploymentsDescriber(client client.Interface, kclient kclient.Interface, count int) *LatestDeploymentsDescriber {
	return &LatestDeploymentsDescriber{
		count: count,
		client: &genericDeploymentDescriberClient{
			getDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
				return client.DeploymentConfigs(namespace).Get(name)
			},
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return kclient.ReplicationControllers(namespace).Get(name)
			},
			listDeploymentsFunc: func(namespace string, selector labels.Selector) (*kapi.ReplicationControllerList, error) {
				return kclient.ReplicationControllers(namespace).List(selector, fields.Everything())
			},
			listPodsFunc: func(namespace string, selector labels.Selector) (*kapi.PodList, error) {
				return kclient.Pods(namespace).List(selector, fields.Everything())
			},
			listEventsFunc: func(deploymentConfig *deployapi.DeploymentConfig) (*kapi.EventList, error) {
				return kclient.Events(deploymentConfig.Namespace).Search(deploymentConfig)
			},
		},
	}
}
// Updates number of replicas in Replication Controller based on Replication Controller Spec
func UpdateReplicasCount(client client.Interface, namespace, name string,
	replicationControllerSpec *ReplicationControllerSpec) error {
	log.Printf("Updating replicas count to %d for %s replication controller from %s namespace",
		replicationControllerSpec.Replicas, name, namespace)

	replicationController, err := client.ReplicationControllers(namespace).Get(name)
	if err != nil {
		return err
	}

	replicationController.Spec.Replicas = replicationControllerSpec.Replicas

	_, err = client.ReplicationControllers(namespace).Update(replicationController)
	if err != nil {
		return err
	}

	log.Printf("Successfully updated replicas count to %d for %s replication controller from %s namespace",
		replicationControllerSpec.Replicas, name, namespace)

	return nil
}
예제 #27
0
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(client kclient.Interface, codec runtime.Codec) *RecreateDeploymentStrategy {
	scaler, _ := kubectl.ScalerFor("ReplicationController", client)
	return &RecreateDeploymentStrategy{
		getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Get(name)
		},
		scaler: scaler,
		codec:  codec,
		hookExecutor: &stratsupport.HookExecutor{
			PodClient: &stratsupport.HookExecutorPodClientImpl{
				CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
					return client.Pods(namespace).Create(pod)
				},
				PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
					return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel)
				},
			},
		},
		retryTimeout: 120 * time.Second,
		retryPeriod:  1 * time.Second,
	}
}
예제 #28
0
파일: deployer.go 프로젝트: sztsian/origin
// NewDeployer makes a new Deployer from a kube client.
func NewDeployer(client kclient.Interface) *Deployer {
	scaler, _ := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(client))
	return &Deployer{
		getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Get(name)
		},
		getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
			return client.ReplicationControllers(namespace).List(deployutil.ConfigSelector(configName))
		},
		scaler: scaler,
		strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) {
			switch config.Template.Strategy.Type {
			case deployapi.DeploymentStrategyTypeRecreate:
				return recreate.NewRecreateDeploymentStrategy(client, latest.Codec), nil
			case deployapi.DeploymentStrategyTypeRolling:
				recreate := recreate.NewRecreateDeploymentStrategy(client, latest.Codec)
				return rolling.NewRollingDeploymentStrategy(config.Namespace, client, latest.Codec, recreate), nil
			default:
				return nil, fmt.Errorf("unsupported strategy type: %s", config.Template.Strategy.Type)
			}
		},
	}
}
예제 #29
0
// makePodsFromRC will create a ReplicationController object and
// a given number of pods (imitating the controller).
func makePodsFromRC(c client.Interface, name string, podCount int) {
	rc := &api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: int32(podCount),
			Selector: map[string]string{"name": name},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: makePodSpec(),
			},
		},
	}
	if _, err := c.ReplicationControllers("default").Create(rc); err != nil {
		glog.Fatalf("unexpected error: %v", err)
	}

	basePod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			GenerateName: "scheduler-test-pod-",
			Labels:       map[string]string{"name": name},
		},
		Spec: makePodSpec(),
	}
	createPod := func(i int) {
		for {
			if _, err := c.Pods("default").Create(basePod); err == nil {
				break
			}
		}
	}
	workqueue.Parallelize(30, podCount, createPod)
}
예제 #30
0
// testPodNodeConstraintsObjectCreationWithPodTemplate attemps to create different object types that contain pod templates
// using the passed in nodeName and nodeSelector. It will use the expectError flag to determine if an error should be returned or not
func testPodNodeConstraintsObjectCreationWithPodTemplate(t *testing.T, name string, kclient kclient.Interface, client client.Interface, nodeName string, nodeSelector map[string]string, expectError bool) {

	checkForbiddenErr := func(objType string, err error) {
		if err == nil && expectError {
			t.Errorf("%s (%s): expected forbidden error but did not receive one", name, objType)
			return
		}
		if err != nil && !expectError {
			t.Errorf("%s (%s): got error but did not expect one: %v", name, objType, err)
			return
		}
		if err != nil && expectError && !kapierrors.IsForbidden(err) {
			t.Errorf("%s (%s): did not get an expected forbidden error: %v", name, objType, err)
			return
		}
	}

	// Pod
	pod := testPodNodeConstraintsPod(nodeName, nodeSelector)
	_, err := kclient.Pods(testutil.Namespace()).Create(pod)
	checkForbiddenErr("pod", err)

	// ReplicationController
	rc := testPodNodeConstraintsReplicationController(nodeName, nodeSelector)
	_, err = kclient.ReplicationControllers(testutil.Namespace()).Create(rc)
	checkForbiddenErr("rc", err)

	// TODO: Enable when the deployments endpoint is supported in Origin
	// Deployment
	// d := testPodNodeConstraintsDeployment(nodeName, nodeSelector)
	// _, err = kclient.Extensions().Deployments(testutil.Namespace()).Create(d)
	// checkForbiddenErr("deployment", err)

	// ReplicaSet
	rs := testPodNodeConstraintsReplicaSet(nodeName, nodeSelector)
	_, err = kclient.Extensions().ReplicaSets(testutil.Namespace()).Create(rs)
	checkForbiddenErr("replicaset", err)

	// Job
	job := testPodNodeConstraintsJob(nodeName, nodeSelector)
	_, err = kclient.Extensions().Jobs(testutil.Namespace()).Create(job)
	checkForbiddenErr("job", err)

	// DeploymentConfig
	dc := testPodNodeConstraintsDeploymentConfig(nodeName, nodeSelector)
	_, err = client.DeploymentConfigs(testutil.Namespace()).Create(dc)
	checkForbiddenErr("dc", err)
}