Esempio n. 1
0
func UpdateExistingReplicationController(c client.Interface, oldRc *api.ReplicationController, namespace, newName, deploymentKey, deploymentValue string, out io.Writer) (*api.ReplicationController, error) {
	SetNextControllerAnnotation(oldRc, newName)
	if _, found := oldRc.Spec.Selector[deploymentKey]; !found {
		return AddDeploymentKeyToReplicationController(oldRc, c, deploymentKey, deploymentValue, namespace, out)
	} else {
		// If we didn't need to update the controller for the deployment key, we still need to write
		// the "next" controller.
		return c.ReplicationControllers(namespace).Update(oldRc)
	}
}
Esempio n. 2
0
// DeleteController deletes a replication controller named 'name', requires that the controller
// already be stopped.
func DeleteController(ctx api.Context, name string, client client.Interface) error {
	// TODO remove ctx in favor of just namespace string
	controller, err := client.ReplicationControllers(api.Namespace(ctx)).Get(name)
	if err != nil {
		return err
	}
	if controller.Spec.Replicas != 0 {
		return fmt.Errorf("controller has non-zero replicas (%d), please stop it first", controller.Spec.Replicas)
	}
	return client.ReplicationControllers(api.Namespace(ctx)).Delete(name)
}
func deleteReplicationControllers(kubeClient client.Interface, ns string) error {
	items, err := kubeClient.ReplicationControllers(ns).List(labels.Everything())
	if err != nil {
		return err
	}
	for i := range items.Items {
		err := kubeClient.ReplicationControllers(ns).Delete(items.Items[i].Name)
		if err != nil {
			return err
		}
	}
	return nil
}
Esempio n. 4
0
// NewRollingDeploymentStrategy makes a new RollingDeploymentStrategy.
func NewRollingDeploymentStrategy(namespace string, client kclient.Interface, codec runtime.Codec, initialStrategy acceptingDeploymentStrategy) *RollingDeploymentStrategy {
	updaterClient := &rollingUpdaterClient{
		ControllerHasDesiredReplicasFn: func(rc *kapi.ReplicationController) wait.ConditionFunc {
			return kclient.ControllerHasDesiredReplicas(client, rc)
		},
		GetReplicationControllerFn: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Get(name)
		},
		UpdateReplicationControllerFn: func(namespace string, rc *kapi.ReplicationController) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Update(rc)
		},
		// This guards against the RollingUpdater's built-in behavior to create
		// RCs when the supplied old RC is nil. We won't pass nil, but it doesn't
		// hurt to further guard against it since we would have no way to identify
		// or clean up orphaned RCs RollingUpdater might inadvertently create.
		CreateReplicationControllerFn: func(namespace string, rc *kapi.ReplicationController) (*kapi.ReplicationController, error) {
			return nil, fmt.Errorf("unexpected attempt to create Deployment: %#v", rc)
		},
		// We give the RollingUpdater a policy which should prevent it from
		// deleting the source deployment after the transition, but it doesn't
		// hurt to guard by removing its ability to delete.
		DeleteReplicationControllerFn: func(namespace, name string) error {
			return fmt.Errorf("unexpected attempt to delete Deployment %s/%s", namespace, name)
		},
	}
	return &RollingDeploymentStrategy{
		codec:           codec,
		initialStrategy: initialStrategy,
		client:          updaterClient,
		rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error {
			updater := kubectl.NewRollingUpdater(namespace, updaterClient)
			return updater.Update(config)
		},
		hookExecutor: &stratsupport.HookExecutor{
			PodClient: &stratsupport.HookExecutorPodClientImpl{
				CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
					return client.Pods(namespace).Create(pod)
				},
				PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
					return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel)
				},
			},
		},
		getUpdateAcceptor: func(timeout time.Duration) kubectl.UpdateAcceptor {
			return stratsupport.NewFirstContainerReady(client, timeout, NewFirstContainerReadyInterval)
		},
	}
}
Esempio n. 5
0
// RunController creates a new replication controller named 'name' which creates 'replicas' pods running 'image'.
func RunController(ctx api.Context, image, name string, replicas int, client client.Interface, portSpec string, servicePort int) error {
	// TODO replace ctx with a namespace string
	if servicePort > 0 && !util.IsDNSLabel(name) {
		return fmt.Errorf("service creation requested, but an invalid name for a service was provided (%s). Service names must be valid DNS labels.", name)
	}
	ports, err := portsFromString(portSpec)
	if err != nil {
		return err
	}
	controller := &api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{
						"name": name,
					},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  strings.ToLower(name),
							Image: image,
							Ports: ports,
						},
					},
				},
			},
		},
	}

	controllerOut, err := client.ReplicationControllers(api.Namespace(ctx)).Create(controller)
	if err != nil {
		return err
	}
	data, err := yaml.Marshal(controllerOut)
	if err != nil {
		return err
	}
	fmt.Print(string(data))

	if servicePort > 0 {
		svc, err := createService(ctx, name, servicePort, client)
		if err != nil {
			return err
		}
		data, err = yaml.Marshal(svc)
		if err != nil {
			return err
		}
		fmt.Printf(string(data))
	}
	return nil
}
Esempio n. 6
0
// ResizeController resizes a controller named 'name' by setting replicas to 'replicas'.
func ResizeController(ctx api.Context, name string, replicas int, client client.Interface) error {
	// TODO ctx is not needed, and should just be a namespace
	controller, err := client.ReplicationControllers(api.Namespace(ctx)).Get(name)
	if err != nil {
		return err
	}
	controller.Spec.Replicas = replicas
	controllerOut, err := client.ReplicationControllers(api.Namespace(ctx)).Update(controller)
	if err != nil {
		return err
	}
	data, err := yaml.Marshal(controllerOut)
	if err != nil {
		return err
	}
	fmt.Print(string(data))
	return nil
}
Esempio n. 7
0
// Update performs a rolling update of a collection of pods.
// 'name' points to a replication controller.
// 'client' is used for updating pods.
// 'updatePeriod' is the time between pod updates.
// 'imageName' is the new image to update for the template.  This will work
//     with the first container in the pod.  There is no support yet for
//     updating more complex replication controllers.  If this is blank then no
//     update of the image is performed.
func Update(ctx api.Context, name string, client client.Interface, updatePeriod time.Duration, imageName string) error {
	// TODO ctx is not needed as input to this function, should just be 'namespace'
	controller, err := client.ReplicationControllers(api.Namespace(ctx)).Get(name)
	if err != nil {
		return err
	}

	if len(imageName) != 0 {
		controller.Spec.Template.Spec.Containers[0].Image = imageName
		controller, err = client.ReplicationControllers(controller.Namespace).Update(controller)
		if err != nil {
			return err
		}
	}

	s := labels.Set(controller.Spec.Selector).AsSelector()

	podList, err := client.Pods(api.Namespace(ctx)).List(s)
	if err != nil {
		return err
	}
	expected := len(podList.Items)
	if expected == 0 {
		return nil
	}
	for _, pod := range podList.Items {
		// We delete the pod here, the controller will recreate it.  This will result in pulling
		// a new Docker image.  This isn't a full "update" but it's what we support for now.
		err = client.Pods(pod.Namespace).Delete(pod.Name)
		if err != nil {
			return err
		}
		time.Sleep(updatePeriod)
	}
	return wait.Poll(time.Second*5, time.Second*300, func() (bool, error) {
		podList, err := client.Pods(api.Namespace(ctx)).List(s)
		if err != nil {
			return false, err
		}
		return len(podList.Items) == expected, nil
	})
}
// NewDeploymentConfigDescriber returns a new DeploymentConfigDescriber
func NewDeploymentConfigDescriber(client client.Interface, kclient kclient.Interface) *DeploymentConfigDescriber {
	return &DeploymentConfigDescriber{
		client: &genericDeploymentDescriberClient{
			getDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
				return client.DeploymentConfigs(namespace).Get(name)
			},
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return kclient.ReplicationControllers(namespace).Get(name)
			},
			listDeploymentsFunc: func(namespace string, selector labels.Selector) (*kapi.ReplicationControllerList, error) {
				return kclient.ReplicationControllers(namespace).List(selector)
			},
			listPodsFunc: func(namespace string, selector labels.Selector) (*kapi.PodList, error) {
				return kclient.Pods(namespace).List(selector, fields.Everything())
			},
			listEventsFunc: func(deploymentConfig *deployapi.DeploymentConfig) (*kapi.EventList, error) {
				return kclient.Events(deploymentConfig.Namespace).Search(deploymentConfig)
			},
		},
	}
}
Esempio n. 9
0
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(client kclient.Interface, codec runtime.Codec) *RecreateDeploymentStrategy {
	scaler, _ := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(client))
	return &RecreateDeploymentStrategy{
		getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Get(name)
		},
		scaler: scaler,
		codec:  codec,
		hookExecutor: &stratsupport.HookExecutor{
			PodClient: &stratsupport.HookExecutorPodClientImpl{
				CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
					return client.Pods(namespace).Create(pod)
				},
				PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
					return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel)
				},
			},
		},
		retryTimeout: 120 * time.Second,
		retryPeriod:  1 * time.Second,
	}
}
Esempio n. 10
0
// NewDeployer makes a new Deployer from a kube client.
func NewDeployer(client kclient.Interface) *Deployer {
	scaler, _ := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(client))
	return &Deployer{
		getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) {
			return client.ReplicationControllers(namespace).Get(name)
		},
		getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
			return client.ReplicationControllers(namespace).List(deployutil.ConfigSelector(configName))
		},
		scaler: scaler,
		strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) {
			switch config.Template.Strategy.Type {
			case deployapi.DeploymentStrategyTypeRecreate:
				return recreate.NewRecreateDeploymentStrategy(client, latest.Codec), nil
			case deployapi.DeploymentStrategyTypeRolling:
				recreate := recreate.NewRecreateDeploymentStrategy(client, latest.Codec)
				return rolling.NewRollingDeploymentStrategy(config.Namespace, client, latest.Codec, recreate), nil
			default:
				return nil, fmt.Errorf("unsupported strategy type: %s", config.Template.Strategy.Type)
			}
		},
	}
}
Esempio n. 11
0
func loadReplicationControllers(g osgraph.Graph, graphLock sync.Mutex, namespace string, kclient kclient.Interface, client client.Interface) error {
	rcs, err := kclient.ReplicationControllers(namespace).List(labels.Everything())
	if err != nil {
		return err
	}

	graphLock.Lock()
	defer graphLock.Unlock()
	for i := range rcs.Items {
		kubegraph.EnsureReplicationControllerNode(g, &rcs.Items[i])
	}

	return nil
}
Esempio n. 12
0
func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, client client.Interface, deploymentKey, deploymentValue, namespace string, out io.Writer) (*api.ReplicationController, error) {
	var err error
	// First, update the template label.  This ensures that any newly created pods will have the new label
	if oldRc, err = updateWithRetries(client.ReplicationControllers(namespace), oldRc, func(rc *api.ReplicationController) {
		if rc.Spec.Template.Labels == nil {
			rc.Spec.Template.Labels = map[string]string{}
		}
		rc.Spec.Template.Labels[deploymentKey] = deploymentValue
	}); err != nil {
		return nil, err
	}

	// Update all pods managed by the rc to have the new hash label, so they are correctly adopted
	// TODO: extract the code from the label command and re-use it here.
	podList, err := client.Pods(namespace).List(labels.SelectorFromSet(oldRc.Spec.Selector), fields.Everything())
	if err != nil {
		return nil, err
	}
	for ix := range podList.Items {
		pod := &podList.Items[ix]
		if pod.Labels == nil {
			pod.Labels = map[string]string{
				deploymentKey: deploymentValue,
			}
		} else {
			pod.Labels[deploymentKey] = deploymentValue
		}
		err = nil
		delay := 3
		for i := 0; i < MaxRetries; i++ {
			_, err = client.Pods(namespace).Update(pod)
			if err != nil {
				fmt.Fprintf(out, "Error updating pod (%v), retrying after %d seconds", err, delay)
				time.Sleep(time.Second * time.Duration(delay))
				delay *= delay
			} else {
				break
			}
		}
		if err != nil {
			return nil, err
		}
	}

	if oldRc.Spec.Selector == nil {
		oldRc.Spec.Selector = map[string]string{}
	}
	// Copy the old selector, so that we can scrub out any orphaned pods
	selectorCopy := map[string]string{}
	for k, v := range oldRc.Spec.Selector {
		selectorCopy[k] = v
	}
	oldRc.Spec.Selector[deploymentKey] = deploymentValue

	// Update the selector of the rc so it manages all the pods we updated above
	if oldRc, err = updateWithRetries(client.ReplicationControllers(namespace), oldRc, func(rc *api.ReplicationController) {
		rc.Spec.Selector[deploymentKey] = deploymentValue
	}); err != nil {
		return nil, err
	}

	// Clean up any orphaned pods that don't have the new label, this can happen if the rc manager
	// doesn't see the update to its pod template and creates a new pod with the old labels after
	// we've finished re-adopting existing pods to the rc.
	podList, err = client.Pods(namespace).List(labels.SelectorFromSet(selectorCopy), fields.Everything())
	for ix := range podList.Items {
		pod := &podList.Items[ix]
		if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue {
			if err := client.Pods(namespace).Delete(pod.Name, nil); err != nil {
				return nil, err
			}
		}
	}

	return oldRc, nil
}