예제 #1
0
// updateRcWithRetries retries updating the given rc on conflict with the following steps:
// 1. Get latest resource
// 2. applyUpdate
// 3. Update the resource
func updateRcWithRetries(c client.Interface, namespace string, rc *api.ReplicationController, applyUpdate updateRcFunc) (*api.ReplicationController, error) {
	// Deep copy the rc in case we failed on Get during retry loop
	obj, err := api.Scheme.Copy(rc)
	if err != nil {
		return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err)
	}
	oldRc := obj.(*api.ReplicationController)
	err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
		// Apply the update, then attempt to push it to the apiserver.
		applyUpdate(rc)
		if rc, e = c.ReplicationControllers(namespace).Update(rc); e == nil {
			// rc contains the latest controller post update
			return
		}
		updateErr := e
		// Update the controller with the latest resource version, if the update failed we
		// can't trust rc so use oldRc.Name.
		if rc, e = c.ReplicationControllers(namespace).Get(oldRc.Name); e != nil {
			// The Get failed: Value in rc cannot be trusted.
			rc = oldRc
		}
		// Only return the error from update
		return updateErr
	})
	// If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned
	// controller contains the applied update.
	return rc, err
}
예제 #2
0
func GetServerVersion(w io.Writer, kubeClient client.Interface) {
	serverVersion, err := kubeClient.Discovery().ServerVersion()
	if err != nil {
		fmt.Printf("Couldn't read server version from server: %v\n", err)
		os.Exit(1)
	}

	fmt.Fprintf(w, "Server Version: %#v\n", *serverVersion)
}
예제 #3
0
func CreateNewControllerFromCurrentController(c client.Interface, codec runtime.Codec, cfg *NewControllerConfig) (*api.ReplicationController, error) {
	containerIndex := 0
	// load the old RC into the "new" RC
	newRc, err := c.ReplicationControllers(cfg.Namespace).Get(cfg.OldName)
	if err != nil {
		return nil, err
	}

	if len(cfg.Container) != 0 {
		containerFound := false

		for i, c := range newRc.Spec.Template.Spec.Containers {
			if c.Name == cfg.Container {
				containerIndex = i
				containerFound = true
				break
			}
		}

		if !containerFound {
			return nil, fmt.Errorf("container %s not found in pod", cfg.Container)
		}
	}

	if len(newRc.Spec.Template.Spec.Containers) > 1 && len(cfg.Container) == 0 {
		return nil, goerrors.New("Must specify container to update when updating a multi-container pod")
	}

	if len(newRc.Spec.Template.Spec.Containers) == 0 {
		return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc))
	}
	newRc.Spec.Template.Spec.Containers[containerIndex].Image = cfg.Image
	if len(cfg.PullPolicy) != 0 {
		newRc.Spec.Template.Spec.Containers[containerIndex].ImagePullPolicy = cfg.PullPolicy
	}

	newHash, err := api.HashObject(newRc, codec)
	if err != nil {
		return nil, err
	}

	if len(cfg.NewName) == 0 {
		cfg.NewName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
	}
	newRc.Name = cfg.NewName

	newRc.Spec.Selector[cfg.DeploymentKey] = newHash
	newRc.Spec.Template.Labels[cfg.DeploymentKey] = newHash
	// Clear resource version after hashing so that identical updates get different hashes.
	newRc.ResourceVersion = ""
	return newRc, nil
}
예제 #4
0
// updatePodWithRetries retries updating the given pod on conflict with the following steps:
// 1. Get latest resource
// 2. applyUpdate
// 3. Update the resource
func updatePodWithRetries(c client.Interface, namespace string, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, error) {
	// Deep copy the pod in case we failed on Get during retry loop
	obj, err := api.Scheme.Copy(pod)
	if err != nil {
		return nil, fmt.Errorf("failed to deep copy pod before updating it: %v", err)
	}
	oldPod := obj.(*api.Pod)
	err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) {
		// Apply the update, then attempt to push it to the apiserver.
		applyUpdate(pod)
		if pod, e = c.Pods(namespace).Update(pod); e == nil {
			return
		}
		updateErr := e
		if pod, e = c.Pods(namespace).Get(oldPod.Name); e != nil {
			pod = oldPod
		}
		// Only return the error from update
		return updateErr
	})
	// If the error is non-nil the returned pod cannot be trusted, if it is nil, the returned
	// controller contains the applied update.
	return pod, err
}
예제 #5
0
func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, client client.Interface, deploymentKey, deploymentValue, namespace string, out io.Writer) (*api.ReplicationController, error) {
	var err error
	// First, update the template label.  This ensures that any newly created pods will have the new label
	applyUpdate := func(rc *api.ReplicationController) {
		if rc.Spec.Template.Labels == nil {
			rc.Spec.Template.Labels = map[string]string{}
		}
		rc.Spec.Template.Labels[deploymentKey] = deploymentValue
	}
	if oldRc, err = updateRcWithRetries(client, namespace, oldRc, applyUpdate); err != nil {
		return nil, err
	}

	// Update all pods managed by the rc to have the new hash label, so they are correctly adopted
	// TODO: extract the code from the label command and re-use it here.
	selector := labels.SelectorFromSet(oldRc.Spec.Selector)
	options := api.ListOptions{LabelSelector: selector}
	podList, err := client.Pods(namespace).List(options)
	if err != nil {
		return nil, err
	}
	for ix := range podList.Items {
		pod := &podList.Items[ix]
		applyUpdate := func(p *api.Pod) {
			if p.Labels == nil {
				p.Labels = map[string]string{
					deploymentKey: deploymentValue,
				}
			} else {
				p.Labels[deploymentKey] = deploymentValue
			}
		}
		if pod, err = updatePodWithRetries(client, namespace, pod, applyUpdate); err != nil {
			return nil, err
		}
	}

	if oldRc.Spec.Selector == nil {
		oldRc.Spec.Selector = map[string]string{}
	}
	// Copy the old selector, so that we can scrub out any orphaned pods
	selectorCopy := map[string]string{}
	for k, v := range oldRc.Spec.Selector {
		selectorCopy[k] = v
	}
	applyUpdate = func(rc *api.ReplicationController) {
		rc.Spec.Selector[deploymentKey] = deploymentValue
	}
	// Update the selector of the rc so it manages all the pods we updated above
	if oldRc, err = updateRcWithRetries(client, namespace, oldRc, applyUpdate); err != nil {
		return nil, err
	}

	// Clean up any orphaned pods that don't have the new label, this can happen if the rc manager
	// doesn't see the update to its pod template and creates a new pod with the old labels after
	// we've finished re-adopting existing pods to the rc.
	selector = labels.SelectorFromSet(selectorCopy)
	options = api.ListOptions{LabelSelector: selector}
	podList, err = client.Pods(namespace).List(options)
	for ix := range podList.Items {
		pod := &podList.Items[ix]
		if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue {
			if err := client.Pods(namespace).Delete(pod.Name, nil); err != nil {
				return nil, err
			}
		}
	}

	return oldRc, nil
}