コード例 #1
0
ファイル: main.go プロジェクト: rudle/p2
func parseNodeSelectorWithPrompt(
	oldSelector klabels.Selector,
	newSelectorString string,
	applicator labels.Applicator,
) (klabels.Selector, error) {
	newSelector, err := parseNodeSelector(newSelectorString)
	if err != nil {
		return newSelector, err
	}
	if oldSelector.String() == newSelector.String() {
		return newSelector, nil
	}

	newNodeLabels, err := applicator.GetMatches(newSelector, labels.NODE, false)
	if err != nil {
		return newSelector, util.Errorf("Error getting matching labels: %v", err)
	}

	oldNodeLabels, err := applicator.GetMatches(oldSelector, labels.NODE, false)
	if err != nil {
		return newSelector, util.Errorf("Error getting matching labels: %v", err)
	}

	toRemove, toAdd := makeNodeChanges(oldNodeLabels, newNodeLabels)

	fmt.Printf("Changing deployment from '%v' to '%v':\n", oldSelector.String(), newSelectorString)
	fmt.Printf("Removing:%9s hosts %s\n", fmt.Sprintf("-%v", len(toRemove)), toRemove)
	fmt.Printf("Adding:  %9s hosts %s\n", fmt.Sprintf("+%v", len(toAdd)), toAdd)
	fmt.Println("Continue?")
	if !confirm() {
		return newSelector, util.Errorf("User cancelled")
	}

	return newSelector, nil
}
コード例 #2
0
ファイル: metrics_client.go プロジェクト: ncdc/kubernetes
func (cli *HeapsterMetricsClient) GetNodeMetrics(nodeName string, selector labels.Selector) ([]metrics_api.NodeMetrics, error) {
	params := map[string]string{"labelSelector": selector.String()}
	path, err := nodeMetricsUrl(nodeName)
	if err != nil {
		return []metrics_api.NodeMetrics{}, err
	}
	resultRaw, err := GetHeapsterMetrics(cli, path, params)
	if err != nil {
		return []metrics_api.NodeMetrics{}, err
	}
	metrics := make([]metrics_api.NodeMetrics, 0)
	if len(nodeName) == 0 {
		metricsList := metrics_api.NodeMetricsList{}
		err = json.Unmarshal(resultRaw, &metricsList)
		if err != nil {
			return []metrics_api.NodeMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
		}
		metrics = append(metrics, metricsList.Items...)
	} else {
		var singleMetric metrics_api.NodeMetrics
		err = json.Unmarshal(resultRaw, &singleMetric)
		if err != nil {
			return []metrics_api.NodeMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
		}
		metrics = append(metrics, singleMetric)
	}
	return metrics, nil
}
コード例 #3
0
ファイル: metrics_client.go プロジェクト: ncdc/kubernetes
func (cli *HeapsterMetricsClient) GetPodMetrics(namespace string, podName string, allNamespaces bool, selector labels.Selector) ([]metrics_api.PodMetrics, error) {
	if allNamespaces {
		namespace = api.NamespaceAll
	}
	path, err := podMetricsUrl(namespace, podName)
	if err != nil {
		return []metrics_api.PodMetrics{}, err
	}

	params := map[string]string{"labelSelector": selector.String()}
	allMetrics := make([]metrics_api.PodMetrics, 0)

	resultRaw, err := GetHeapsterMetrics(cli, path, params)
	if err != nil {
		return []metrics_api.PodMetrics{}, err
	}
	if len(podName) == 0 {
		metrics := metrics_api.PodMetricsList{}
		err = json.Unmarshal(resultRaw, &metrics)
		if err != nil {
			return []metrics_api.PodMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
		}
		allMetrics = append(allMetrics, metrics.Items...)
	} else {
		var singleMetric metrics_api.PodMetrics
		err = json.Unmarshal(resultRaw, &singleMetric)
		if err != nil {
			return []metrics_api.PodMetrics{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
		}
		allMetrics = append(allMetrics, singleMetric)
	}
	return allMetrics, nil
}
コード例 #4
0
ファイル: consul_aggregator.go プロジェクト: petertseng/p2
// Add a new selector to the aggregator. New values on the output channel may not appear
// right away.
func (c *consulAggregator) Watch(selector labels.Selector, quitCh <-chan struct{}) chan []Labeled {
	resCh := make(chan []Labeled, 1) // this buffer is useful in sendMatches(), below
	select {
	case <-c.aggregatorQuit:
		c.logger.WithField("selector", selector.String()).Warnln("New selector added after aggregator was closed")
		close(resCh)
		return resCh
	default:
	}
	c.watcherLock.Lock()
	defer c.watcherLock.Unlock()
	watch := &selectorWatch{
		selector: selector,
		resultCh: resCh,
	}
	if c.watchers == nil {
		c.watchers = watch
	} else {
		c.watchers.append(watch)
	}
	if c.labeledCache != nil {
		c.sendMatches(watch)
	}
	go func() {
		select {
		case <-quitCh:
		case <-c.aggregatorQuit:
		}
		c.removeWatch(watch)
	}()
	c.metWatchCount.Update(int64(c.watchers.len()))
	return watch.resultCh
}
コード例 #5
0
ファイル: http_applicator.go プロジェクト: drcapulet/p2
func (h *httpApplicator) GetMatches(selector labels.Selector, labelType Type) ([]Labeled, error) {
	params := url.Values{}
	params.Add("selector", selector.String())
	params.Add("type", labelType.String())

	// Make value copy of URL; don't want to mutate the URL in the struct.
	urlToGet := *h.matchesEndpoint
	urlToGet.RawQuery = params.Encode()

	req, err := http.NewRequest("GET", urlToGet.String(), nil)
	if err != nil {
		return []Labeled{}, err
	}
	req.Header.Add("Accept", "application/json")

	resp, err := h.client.Do(req)
	if err != nil {
		return []Labeled{}, err
	}
	defer resp.Body.Close()

	bodyData, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		return []Labeled{}, err
	}

	matches := []string{}
	err = json.Unmarshal(bodyData, &matches)
	if err != nil {
		l := len(bodyData)
		if l > 80 {
			l = 80
		}
		return []Labeled{}, util.Errorf(
			"bad response from http applicator %s: %s: %s %q",
			urlToGet.String(),
			err,
			resp.Status,
			string(bodyData[:l]),
		)
	}

	labeled := make([]Labeled, len(matches))

	for i, s := range matches {
		labeled[i] = Labeled{
			ID:        s,
			LabelType: labelType,
			Labels:    labels.Set{},
		}
	}

	return labeled, nil
}
コード例 #6
0
ファイル: metrics_client.go プロジェクト: ncdc/kubernetes
func (h *HeapsterMetricsClient) getCpuUtilizationForPods(namespace string, selector labels.Selector, podNames map[string]struct{}) (int64, time.Time, error) {
	metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace)
	params := map[string]string{"labelSelector": selector.String()}

	resultRaw, err := h.client.Core().Services(h.heapsterNamespace).
		ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, params).
		DoRaw()
	if err != nil {
		return 0, time.Time{}, fmt.Errorf("failed to get pods metrics: %v", err)
	}

	glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))

	metrics := metrics_api.PodMetricsList{}
	err = json.Unmarshal(resultRaw, &metrics)
	if err != nil {
		return 0, time.Time{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
	}

	if len(metrics.Items) != len(podNames) {
		present := sets.NewString()
		for _, m := range metrics.Items {
			present.Insert(m.Name)
		}
		missing := make([]string, 0)
		for expected := range podNames {
			if !present.Has(expected) {
				missing = append(missing, expected)
			}
		}
		hint := ""
		if len(missing) > 0 {
			hint = fmt.Sprintf(" (sample missing pod: %s/%s)", namespace, missing[0])
		}
		return 0, time.Time{}, fmt.Errorf("metrics obtained for %d/%d of pods%s", len(metrics.Items), len(podNames), hint)
	}

	sum := int64(0)
	for _, m := range metrics.Items {
		if _, found := podNames[m.Name]; found {
			for _, c := range m.Containers {
				cpu, found := c.Usage[v1.ResourceCPU]
				if !found {
					return 0, time.Time{}, fmt.Errorf("no cpu for container %v in pod %v/%v", c.Name, namespace, m.Name)
				}
				sum += cpu.MilliValue()
			}
		} else {
			return 0, time.Time{}, fmt.Errorf("not expected metrics for pod %v/%v", namespace, m.Name)
		}
	}

	return sum / int64(len(metrics.Items)), metrics.Items[0].Timestamp.Time, nil
}
コード例 #7
0
ファイル: request.go プロジェクト: alex-mohr/kubernetes
// LabelsSelectorParam adds the given selector as a query parameter
func (r *Request) LabelsSelectorParam(s labels.Selector) *Request {
	if r.err != nil {
		return r
	}
	if s == nil {
		return r
	}
	if s.Empty() {
		return r
	}
	return r.setParam(metav1.LabelSelectorQueryParam(r.content.GroupVersion.String()), s.String())
}
コード例 #8
0
ファイル: request.go プロジェクト: hvescovi/kubernetes
// LabelsSelectorParam adds the given selector as a query parameter
func (r *Request) LabelsSelectorParam(s labels.Selector) *Request {
	if r.err != nil {
		return r
	}
	if s == nil {
		return r
	}
	if s.Empty() {
		return r
	}
	return r.setParam(api.LabelSelectorQueryParam(r.apiVersion), s.String())
}
コード例 #9
0
ファイル: metrics_client.go プロジェクト: maisem/kubernetes
func (h *HeapsterMetricsClient) GetResourceMetric(resource api.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error) {
	metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace)
	params := map[string]string{"labelSelector": selector.String()}

	resultRaw, err := h.services.
		ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, params).
		DoRaw()
	if err != nil {
		return nil, time.Time{}, fmt.Errorf("failed to get pod resource metrics: %v", err)
	}

	glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))

	metrics := metrics_api.PodMetricsList{}
	err = json.Unmarshal(resultRaw, &metrics)
	if err != nil {
		return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
	}

	if len(metrics.Items) == 0 {
		return nil, time.Time{}, fmt.Errorf("no metrics returned from heapster")
	}

	res := make(PodResourceInfo, len(metrics.Items))

	for _, m := range metrics.Items {
		podSum := int64(0)
		missing := len(m.Containers) == 0
		for _, c := range m.Containers {
			resValue, found := c.Usage[v1.ResourceName(resource)]
			if !found {
				missing = true
				glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
				continue
			}
			podSum += resValue.MilliValue()
		}

		if !missing {
			res[m.Name] = int64(podSum)
		}
	}

	timestamp := time.Time{}
	if len(metrics.Items) > 0 {
		timestamp = metrics.Items[0].Timestamp.Time
	}

	return res, timestamp, nil
}
コード例 #10
0
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
// and a list of nodenames across which these containers restarted.
func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
	options := v1.ListOptions{LabelSelector: labelSelector.String()}
	pods, err := c.Core().Pods(ns).List(options)
	framework.ExpectNoError(err)
	failedContainers := 0
	containerRestartNodes := sets.NewString()
	for _, p := range pods.Items {
		for _, v := range testutils.FailedContainers(&p) {
			failedContainers = failedContainers + v.Restarts
			containerRestartNodes.Insert(p.Spec.NodeName)
		}
	}
	return failedContainers, containerRestartNodes.List()
}
コード例 #11
0
ファイル: framework.go プロジェクト: gambol99/kubernetes
// filterLabels returns a list of pods which have labels.
func filterLabels(selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) {
	var err error
	var selector labels.Selector
	var pl *v1.PodList
	// List pods based on selectors.  This might be a tiny optimization rather then filtering
	// everything manually.
	if len(selectors) > 0 {
		selector = labels.SelectorFromSet(labels.Set(selectors))
		options := v1.ListOptions{LabelSelector: selector.String()}
		pl, err = cli.Core().Pods(ns).List(options)
	} else {
		pl, err = cli.Core().Pods(ns).List(v1.ListOptions{})
	}
	return pl, err
}
コード例 #12
0
ファイル: pod_store.go プロジェクト: alex-mohr/kubernetes
func NewPodStore(c clientset.Interface, namespace string, label labels.Selector, field fields.Selector) *PodStore {
	lw := &cache.ListWatch{
		ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
			options.LabelSelector = label.String()
			options.FieldSelector = field.String()
			obj, err := c.Core().Pods(namespace).List(options)
			return runtime.Object(obj), err
		},
		WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
			options.LabelSelector = label.String()
			options.FieldSelector = field.String()
			return c.Core().Pods(namespace).Watch(options)
		},
	}
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	stopCh := make(chan struct{})
	reflector := cache.NewReflector(lw, &v1.Pod{}, store, 0)
	reflector.RunUntil(stopCh)
	return &PodStore{Store: store, stopCh: stopCh, Reflector: reflector}
}
コード例 #13
0
// Wait up to 10 minutes for all matching pods to become Running and at least one
// matching pod exists.
func WaitForPodsWithLabelRunning(c clientset.Interface, ns string, label labels.Selector) error {
	running := false
	PodStore := NewPodStore(c, ns, label, fields.Everything())
	defer PodStore.Stop()
waitLoop:
	for start := time.Now(); time.Since(start) < 10*time.Minute; time.Sleep(5 * time.Second) {
		pods := PodStore.List()
		if len(pods) == 0 {
			continue waitLoop
		}
		for _, p := range pods {
			if p.Status.Phase != api.PodRunning {
				continue waitLoop
			}
		}
		running = true
		break
	}
	if !running {
		return fmt.Errorf("Timeout while waiting for pods with labels %q to be running", label.String())
	}
	return nil
}
コード例 #14
0
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
	podList, err := h.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
	if err != nil {
		return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
	}

	if len(podList.Items) == 0 {
		return nil, time.Time{}, fmt.Errorf("no pods matched the provided selector")
	}

	podNames := make([]string, len(podList.Items))
	for i, pod := range podList.Items {
		podNames[i] = pod.Name
	}

	now := time.Now()

	startTime := now.Add(heapsterQueryStart)
	metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s",
		namespace,
		strings.Join(podNames, ","),
		metricName)

	resultRaw, err := h.services.
		ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, map[string]string{"start": startTime.Format(time.RFC3339)}).
		DoRaw()
	if err != nil {
		return nil, time.Time{}, fmt.Errorf("failed to get heapster service: %v", err)
	}

	var metrics heapster.MetricResultList
	err = json.Unmarshal(resultRaw, &metrics)
	if err != nil {
		return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
	}

	glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))

	if len(metrics.Items) != len(podNames) {
		// if we get too many metrics or two few metrics, we have no way of knowing which metric goes to which pod
		// (note that Heapster returns *empty* metric items when a pod does not exist or have that metric, so this
		// does not cover the "missing metric entry" case)
		return nil, time.Time{}, fmt.Errorf("requested metrics for %v pods, got metrics for %v", len(podNames), len(metrics.Items))
	}

	var timestamp *time.Time
	res := make(PodMetricsInfo, len(metrics.Items))
	for i, podMetrics := range metrics.Items {
		val, podTimestamp, hadMetrics := collapseTimeSamples(podMetrics, time.Minute)
		if hadMetrics {
			res[podNames[i]] = val
			if timestamp == nil || podTimestamp.Before(*timestamp) {
				timestamp = &podTimestamp
			}
		}
	}

	if timestamp == nil {
		timestamp = &time.Time{}
	}

	return res, *timestamp, nil
}
コード例 #15
0
ファイル: http_applicator.go プロジェクト: petertseng/p2
// Finds all matches for the given type and selector.
//
// GET /api/select?selector=:selector&type=:type&cachedMatch=:cachedMatch
func (h *httpApplicator) GetMatches(selector labels.Selector, labelType Type, cachedMatch bool) ([]Labeled, error) {
	params := url.Values{}
	params.Add("selector", selector.String())
	params.Add("type", labelType.String())
	params.Add("cachedMatch", strconv.FormatBool(cachedMatch))

	// Make value copy of URL; don't want to mutate the URL in the struct.
	urlToGet := h.toURL("/api/select", params)

	req, err := http.NewRequest("GET", urlToGet.String(), nil)
	if err != nil {
		return nil, err
	}
	req.Header.Add("Accept", "application/json")

	resp, err := h.client.Do(req)
	if err != nil {
		return nil, err
	}

	defer resp.Body.Close()

	bodyData, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		return []Labeled{}, err
	}

	// try to unmarshal as a set of labeled objects.
	var labeled []Labeled
	err = json.Unmarshal(bodyData, &labeled)
	if err == nil {
		return labeled, nil
	}
	h.logger.Warnln(err)

	// fallback to a list of IDs
	matches := []string{}
	err = json.Unmarshal(bodyData, &matches)
	if err != nil {
		l := len(bodyData)
		if l > 80 {
			l = 80
		}
		return []Labeled{}, util.Errorf(
			"bad response from http applicator %s: %s: %s %q",
			urlToGet.String(),
			err,
			resp.Status,
			string(bodyData[:l]),
		)
	}

	labeled = make([]Labeled, len(matches))

	for i, s := range matches {
		labeled[i] = Labeled{
			ID:        s,
			LabelType: labelType,
			Labels:    labels.Set{},
		}
	}

	return labeled, nil
}
コード例 #16
0
// GetResourceReplicas calculates the desired replica count based on a target resource utilization percentage
// of the given resource for pods matching the given selector in the given namespace, and the current replica count
func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUtilization int32, resource v1.ResourceName, namespace string, selector labels.Selector) (replicaCount int32, utilization int32, timestamp time.Time, err error) {
	metrics, timestamp, err := c.metricsClient.GetResourceMetric(resource, namespace, selector)
	if err != nil {
		return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
	}

	podList, err := c.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
	if err != nil {
		return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
	}

	if len(podList.Items) == 0 {
		return 0, 0, time.Time{}, fmt.Errorf("no pods returned by selector while calculating replica count")
	}

	requests := make(map[string]int64, len(podList.Items))
	readyPodCount := 0
	unreadyPods := sets.NewString()
	missingPods := sets.NewString()

	for _, pod := range podList.Items {
		podSum := int64(0)
		for _, container := range pod.Spec.Containers {
			if containerRequest, ok := container.Resources.Requests[resource]; ok {
				podSum += containerRequest.MilliValue()
			} else {
				return 0, 0, time.Time{}, fmt.Errorf("missing request for %s on container %s in pod %s/%s", resource, container.Name, namespace, pod.Name)
			}
		}

		requests[pod.Name] = podSum

		if pod.Status.Phase != v1.PodRunning || !v1.IsPodReady(&pod) {
			// save this pod name for later, but pretend it doesn't exist for now
			unreadyPods.Insert(pod.Name)
			delete(metrics, pod.Name)
			continue
		}

		if _, found := metrics[pod.Name]; !found {
			// save this pod name for later, but pretend it doesn't exist for now
			missingPods.Insert(pod.Name)
			continue
		}

		readyPodCount++
	}

	if len(metrics) == 0 {
		return 0, 0, time.Time{}, fmt.Errorf("did not receive metrics for any ready pods")
	}

	usageRatio, utilization, err := metricsclient.GetResourceUtilizationRatio(metrics, requests, targetUtilization)
	if err != nil {
		return 0, 0, time.Time{}, err
	}

	rebalanceUnready := len(unreadyPods) > 0 && usageRatio > 1.0
	if !rebalanceUnready && len(missingPods) == 0 {
		if math.Abs(1.0-usageRatio) <= tolerance {
			// return the current replicas if the change would be too small
			return currentReplicas, utilization, timestamp, nil
		}

		// if we don't have any unready or missing pods, we can calculate the new replica count now
		return int32(math.Ceil(usageRatio * float64(readyPodCount))), utilization, timestamp, nil
	}

	if len(missingPods) > 0 {
		if usageRatio < 1.0 {
			// on a scale-down, treat missing pods as using 100% of the resource request
			for podName := range missingPods {
				metrics[podName] = requests[podName]
			}
		} else if usageRatio > 1.0 {
			// on a scale-up, treat missing pods as using 0% of the resource request
			for podName := range missingPods {
				metrics[podName] = 0
			}
		}
	}

	if rebalanceUnready {
		// on a scale-up, treat unready pods as using 0% of the resource request
		for podName := range unreadyPods {
			metrics[podName] = 0
		}
	}

	// re-run the utilization calculation with our new numbers
	newUsageRatio, _, err := metricsclient.GetResourceUtilizationRatio(metrics, requests, targetUtilization)
	if err != nil {
		return 0, utilization, time.Time{}, err
	}

	if math.Abs(1.0-newUsageRatio) <= tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) {
		// return the current replicas if the change would be too small,
		// or if the new usage ratio would cause a change in scale direction
		return currentReplicas, utilization, timestamp, nil
	}

	// return the result, where the number of replicas considered is
	// however many replicas factored into our calculation
	return int32(math.Ceil(newUsageRatio * float64(len(metrics)))), utilization, timestamp, nil
}
コード例 #17
0
ファイル: consul_store.go プロジェクト: petertseng/p2
// Creates a rolling update that may or may not already have an existing old
// RC. If one matches the oldRCSelector, it will be used as the old RC in the
// new update.  If one does not exist, a "dummy" old RC will be created that is
// identical to the specifications for the new RC.
// Returns an error if the old RC exists but is part of another RU, or if
// the label selector returns more than one match.
func (s consulStore) CreateRollingUpdateFromOneMaybeExistingWithLabelSelector(
	oldRCSelector klabels.Selector,
	desiredReplicas int,
	minimumReplicas int,
	leaveOld bool,
	rollDelay time.Duration,
	newRCManifest manifest.Manifest,
	newRCNodeSelector klabels.Selector,
	newRCPodLabels klabels.Set,
	newRCLabels klabels.Set,
	rollLabels klabels.Set,
) (u roll_fields.Update, err error) {
	// This function may or may not create old and new RCs and subsequently
	// fail, so we defer a function that does any cleanup (if applicable)
	var cleanupOldRC func()
	var cleanupNewRC func()

	defer func() {
		if err != nil {
			if cleanupOldRC != nil {
				cleanupOldRC()
			}

			if cleanupNewRC != nil {
				cleanupNewRC()
			}
		}
	}()

	session, renewalErrCh, err := s.newRUCreationSession()
	if err != nil {
		return roll_fields.Update{}, err
	}
	defer session.Destroy()

	// Check if any RCs match the oldRCSelector
	matches, err := s.labeler.GetMatches(oldRCSelector, labels.RC, false)
	if err != nil {
		return roll_fields.Update{}, err
	}

	var oldRCID rc_fields.ID
	if len(matches) > 1 {
		return roll_fields.Update{}, AmbiguousRCSelector
	} else if len(matches) == 1 {
		oldRCID = rc_fields.ID(matches[0].ID)
	} else {
		if leaveOld {
			return roll_fields.Update{}, util.Errorf(
				"Can't create an update with LeaveOld set if there is no old RC (sel=%s)",
				oldRCSelector.String(),
			)
		}

		// Create the old RC using the same info as the new RC, it'll be
		// removed when the update completes anyway
		rc, err := s.rcstore.Create(newRCManifest, newRCNodeSelector, newRCPodLabels)
		if err != nil {
			return roll_fields.Update{}, err
		}

		oldRCID = rc.ID
		cleanupOldRC = func() {
			err = s.rcstore.Delete(oldRCID, false)
			if err != nil {
				s.logger.WithError(err).Errorf("Unable to cleanup newly-created old RC %s after update creation failure:", oldRCID)
			}

			// Any labels we wrote will be deleted by rcstore.Delete()
		}

		// Copy the new RC labels to the old RC as well
		err = s.labeler.SetLabels(labels.RC, oldRCID.String(), newRCLabels)
		if err != nil {
			return roll_fields.Update{}, err
		}
	}

	// Lock the old RC to guarantee that no new updates can use it
	err = s.lockRCs(rc_fields.IDs{oldRCID}, session)
	if err != nil {
		return roll_fields.Update{}, err
	}

	// Check for updates that exist that operate on the old RC
	err = s.checkForConflictingUpdates(rc_fields.IDs{oldRCID})
	if err != nil {
		return roll_fields.Update{}, err
	}

	// Create the new RC
	var newRCID rc_fields.ID
	select {
	case err = <-renewalErrCh:
		return roll_fields.Update{}, err
	default:
		rc, err := s.rcstore.Create(newRCManifest, newRCNodeSelector, newRCPodLabels)
		if err != nil {
			return roll_fields.Update{}, err
		}

		newRCID = rc.ID
	}

	cleanupNewRC = func() {
		err = s.rcstore.Delete(newRCID, false)
		if err != nil {
			s.logger.WithError(err).Errorf("Unable to cleanup newly-created new RC %s after update creation failure:", newRCID)
		}

		// Any labels we wrote will be deleted by rcstore.Delete()
	}

	// lock newly-created new rc so it's less likely to race on it
	// with another parallel update creation
	err = s.lockRCs(rc_fields.IDs{newRCID}, session)
	if err != nil {
		return roll_fields.Update{}, err
	}

	// Check once again for conflicting updates in case a racing update
	// creation grabbed the new RC we just created
	err = s.checkForConflictingUpdates(rc_fields.IDs{newRCID})
	if err != nil {
		return roll_fields.Update{}, err
	}

	// Now that we know there are no RUs in progress, and we have the
	// update creation locks, we can safely apply labels.
	err = s.labeler.SetLabels(labels.RC, newRCID.String(), newRCLabels)
	if err != nil {
		return roll_fields.Update{}, err
	}

	u = roll_fields.Update{
		OldRC:           oldRCID,
		NewRC:           newRCID,
		DesiredReplicas: desiredReplicas,
		MinimumReplicas: minimumReplicas,
		LeaveOld:        leaveOld,
		RollDelay:       rollDelay,
	}
	return s.attemptRUCreation(u, rollLabels, renewalErrCh)
}