// PodsRequests returns sum of each resource request for each pod in list // If a given pod in the list does not have a request for the named resource, we log the error // but still attempt to get the most representative count func PodsRequests(pods []*api.Pod, resourceName api.ResourceName) *resource.Quantity { var sum *resource.Quantity for i := range pods { pod := pods[i] podQuantity, err := PodRequests(pod, resourceName) if err != nil { // log the error, but try to keep the most accurate count possible in log // rationale here is that you may have had pods in a namespace that did not have // explicit requests prior to adding the quota glog.Infof("No explicit request for resource, pod %s/%s, %s", pod.Namespace, pod.Name, resourceName) } else { if sum == nil { sum = podQuantity } else { sum.Add(*podQuantity) } } } // if list is empty if sum == nil { q := resource.MustParse("0") sum = &q } return sum }
// The method veryfies whether resources should be set for the given pod and // if there is estimation available the method fills Request field. func (ir initialResources) estimateAndFillResourcesIfNotSet(pod *api.Pod) { annotations := []string{} for i := range pod.Spec.Containers { c := &pod.Spec.Containers[i] req := c.Resources.Requests lim := c.Resources.Limits var cpu, mem *resource.Quantity var err error if _, ok := req[api.ResourceCPU]; !ok { if _, ok2 := lim[api.ResourceCPU]; !ok2 { cpu, err = ir.getEstimation(api.ResourceCPU, c, pod.ObjectMeta.Namespace) if err != nil { glog.Errorf("Error while trying to estimate resources: %v", err) } } } if _, ok := req[api.ResourceMemory]; !ok { if _, ok2 := lim[api.ResourceMemory]; !ok2 { mem, err = ir.getEstimation(api.ResourceMemory, c, pod.ObjectMeta.Namespace) if err != nil { glog.Errorf("Error while trying to estimate resources: %v", err) } } } // If Requests doesn't exits and an estimation was made, create Requests. if req == nil && (cpu != nil || mem != nil) { c.Resources.Requests = api.ResourceList{} req = c.Resources.Requests } setRes := []string{} if cpu != nil { glog.Infof("CPU estimation for container %v in pod %v/%v is %v", c.Name, pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, cpu.String()) setRes = append(setRes, string(api.ResourceCPU)) req[api.ResourceCPU] = *cpu } if mem != nil { glog.Infof("Memory estimation for container %v in pod %v/%v is %v", c.Name, pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, mem.String()) setRes = append(setRes, string(api.ResourceMemory)) req[api.ResourceMemory] = *mem } if len(setRes) > 0 { a := strings.Join(setRes, ", ") + " request for container " + c.Name annotations = append(annotations, a) } } if len(annotations) > 0 { if pod.ObjectMeta.Annotations == nil { pod.ObjectMeta.Annotations = make(map[string]string) } val := "Initial Resources plugin set: " + strings.Join(annotations, "; ") pod.ObjectMeta.Annotations[initialResourcesAnnotation] = val } }
func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error { if in.Amount != nil { if newVal, err := c.DeepCopy(in.Amount); err != nil { return err } else { out.Amount = newVal.(*inf.Dec) } } else { out.Amount = nil } out.Format = in.Format return nil }
// limitRequestRatioConstraint enforces the limit to request ratio over the specified resource func limitRequestRatioConstraint(limitType api.LimitType, resourceName api.ResourceName, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error { req, reqExists := request[resourceName] lim, limExists := limit[resourceName] observedReqValue, observedLimValue, _ := requestLimitEnforcedValues(req, lim, enforced) if !reqExists || (observedReqValue == int64(0)) { return fmt.Errorf("%s max limit to request ratio per %s is %s, but no request is specified or request is 0.", resourceName, limitType, enforced.String()) } if !limExists || (observedLimValue == int64(0)) { return fmt.Errorf("%s max limit to request ratio per %s is %s, but no limit is specified or limit is 0.", resourceName, limitType, enforced.String()) } observedRatio := float64(observedLimValue) / float64(observedReqValue) displayObservedRatio := observedRatio maxLimitRequestRatio := float64(enforced.Value()) if enforced.Value() <= resource.MaxMilliValue { observedRatio = observedRatio * 1000 maxLimitRequestRatio = float64(enforced.MilliValue()) } if observedRatio > maxLimitRequestRatio { return fmt.Errorf("%s max limit to request ratio per %s is %s, but provided ratio is %f.", resourceName, limitType, enforced.String(), displayObservedRatio) } return nil }
// PodRequests returns sum of each resource request across all containers in pod func PodRequests(pod *api.Pod, resourceName api.ResourceName) (*resource.Quantity, error) { if !PodHasRequests(pod, resourceName) { return nil, fmt.Errorf("Each container in pod %s/%s does not have an explicit request for resource %s.", pod.Namespace, pod.Name, resourceName) } var sum *resource.Quantity for j := range pod.Spec.Containers { value, _ := pod.Spec.Containers[j].Resources.Requests[resourceName] if sum == nil { sum = value.Copy() } else { err := sum.Add(value) if err != nil { return sum, err } } } // if list is empty if sum == nil { q := resource.MustParse("0") sum = &q } return sum, nil }
func makeKBitString(rsrc *resource.Quantity) string { return fmt.Sprintf("%dkbit", (rsrc.Value() / 1000)) }
// maxConstraint enforces the max constraint over the specified resource func maxConstraint(limitType api.LimitType, resourceName api.ResourceName, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error { req, reqExists := request[resourceName] lim, limExists := limit[resourceName] observedReqValue, observedLimValue, enforcedValue := requestLimitEnforcedValues(req, lim, enforced) if !limExists { return fmt.Errorf("Maximum %s usage per %s is %s. No limit is specified.", resourceName, limitType, enforced.String()) } if observedLimValue > enforcedValue { return fmt.Errorf("Maximum %s usage per %s is %s, but limit is %s.", resourceName, limitType, enforced.String(), lim.String()) } if reqExists && (observedReqValue > enforcedValue) { return fmt.Errorf("Maximum %s usage per %s is %s, but request is %s.", resourceName, limitType, enforced.String(), req.String()) } return nil }
// requestLimitEnforcedValues returns the specified values at a common precision to support comparability func requestLimitEnforcedValues(requestQuantity, limitQuantity, enforcedQuantity resource.Quantity) (request, limit, enforced int64) { request = requestQuantity.Value() limit = limitQuantity.Value() enforced = enforcedQuantity.Value() // do a more precise comparison if possible (if the value won't overflow) if request <= resource.MaxMilliValue && limit <= resource.MaxMilliValue && enforced <= resource.MaxMilliValue { request = requestQuantity.MilliValue() limit = limitQuantity.MilliValue() enforced = enforcedQuantity.MilliValue() } return }
// syncResourceQuota runs a complete sync of current status func (rm *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (err error) { // quota is dirty if any part of spec hard limits differs from the status hard limits dirty := !api.Semantic.DeepEqual(quota.Spec.Hard, quota.Status.Hard) // dirty tracks if the usage status differs from the previous sync, // if so, we send a new usage with latest status // if this is our first sync, it will be dirty by default, since we need track usage dirty = dirty || (quota.Status.Hard == nil || quota.Status.Used == nil) // Create a usage object that is based on the quota resource version usage := api.ResourceQuota{ ObjectMeta: api.ObjectMeta{ Name: quota.Name, Namespace: quota.Namespace, ResourceVersion: quota.ResourceVersion, Labels: quota.Labels, Annotations: quota.Annotations}, Status: api.ResourceQuotaStatus{ Hard: api.ResourceList{}, Used: api.ResourceList{}, }, } // set the hard values supported on the quota for k, v := range quota.Spec.Hard { usage.Status.Hard[k] = *v.Copy() } // set any last known observed status values for usage for k, v := range quota.Status.Used { usage.Status.Used[k] = *v.Copy() } set := map[api.ResourceName]bool{} for k := range usage.Status.Hard { set[k] = true } pods := &api.PodList{} if set[api.ResourcePods] || set[api.ResourceMemory] || set[api.ResourceCPU] { pods, err = rm.kubeClient.Pods(usage.Namespace).List(labels.Everything(), fields.Everything()) if err != nil { return err } } filteredPods := FilterQuotaPods(pods.Items) // iterate over each resource, and update observation for k := range usage.Status.Hard { // look if there is a used value, if none, we are definitely dirty prevQuantity, found := usage.Status.Used[k] if !found { dirty = true } var value *resource.Quantity switch k { case api.ResourcePods: value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI) case api.ResourceServices: items, err := rm.kubeClient.Services(usage.Namespace).List(labels.Everything()) if err != nil { return err } value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) case api.ResourceReplicationControllers: items, err := rm.kubeClient.ReplicationControllers(usage.Namespace).List(labels.Everything()) if err != nil { return err } value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) case api.ResourceQuotas: items, err := rm.kubeClient.ResourceQuotas(usage.Namespace).List(labels.Everything()) if err != nil { return err } value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) case api.ResourceSecrets: items, err := rm.kubeClient.Secrets(usage.Namespace).List(labels.Everything(), fields.Everything()) if err != nil { return err } value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) case api.ResourcePersistentVolumeClaims: items, err := rm.kubeClient.PersistentVolumeClaims(usage.Namespace).List(labels.Everything(), fields.Everything()) if err != nil { return err } value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) case api.ResourceMemory: value = PodsRequests(filteredPods, api.ResourceMemory) case api.ResourceCPU: value = PodsRequests(filteredPods, api.ResourceCPU) } // ignore fields we do not understand (assume another controller is tracking it) if value != nil { // see if the value has changed dirty = dirty || (value.Value() != prevQuantity.Value()) // just update the value usage.Status.Used[k] = *value } } // update the usage only if it changed if dirty { _, err = rm.kubeClient.ResourceQuotas(usage.Namespace).UpdateStatus(&usage) return err } return nil }