Example #1
2
func printSingleResourceUsage(out io.Writer, resourceType api.ResourceName, quantity resource.Quantity) {
	switch resourceType {
	case api.ResourceCPU:
		fmt.Fprintf(out, "%vm", quantity.MilliValue())
	case api.ResourceMemory:
		fmt.Fprintf(out, "%vMi", quantity.Value()/(1024*1024))
	default:
		fmt.Fprintf(out, "%v", quantity.Value())
	}
}
Example #2
0
func formatImageStreamQuota(out *tabwriter.Writer, c client.Interface, kc kclient.Interface, stream *imageapi.ImageStream) {
	quotas, err := kc.ResourceQuotas(stream.Namespace).List(api.ListOptions{})
	if err != nil {
		return
	}

	var limit *resource.Quantity
	for _, item := range quotas.Items {
		// search for smallest ImageStream quota
		if value, ok := item.Spec.Hard[imageapi.ResourceImageStreamSize]; ok {
			if limit == nil || limit.Cmp(value) > 0 {
				limit = &value
			}
		}
	}
	if limit != nil {
		quantity := imagequota.GetImageStreamSize(c, stream, make(map[string]*imageapi.Image))
		scale := mega
		if quantity.Value() >= (1<<giga.scale) || limit.Value() >= (1<<giga.scale) {
			scale = giga
		}
		formatString(out, "Quota Usage", fmt.Sprintf("%s / %s",
			formatQuantity(quantity, scale), formatQuantity(limit, scale)))
	}
}
Example #3
0
// limitRequestRatioConstraint enforces the limit to request ratio over the specified resource
func limitRequestRatioConstraint(limitType api.LimitType, resourceName api.ResourceName, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error {
	req, reqExists := request[resourceName]
	lim, limExists := limit[resourceName]
	observedReqValue, observedLimValue, _ := requestLimitEnforcedValues(req, lim, enforced)

	if !reqExists || (observedReqValue == int64(0)) {
		return fmt.Errorf("%s max limit to request ratio per %s is %s, but no request is specified or request is 0.", resourceName, limitType, enforced.String())
	}
	if !limExists || (observedLimValue == int64(0)) {
		return fmt.Errorf("%s max limit to request ratio per %s is %s, but no limit is specified or limit is 0.", resourceName, limitType, enforced.String())
	}

	observedRatio := float64(observedLimValue) / float64(observedReqValue)
	displayObservedRatio := observedRatio
	maxLimitRequestRatio := float64(enforced.Value())
	if enforced.Value() <= resource.MaxMilliValue {
		observedRatio = observedRatio * 1000
		maxLimitRequestRatio = float64(enforced.MilliValue())
	}

	if observedRatio > maxLimitRequestRatio {
		return fmt.Errorf("%s max limit to request ratio per %s is %s, but provided ratio is %f.", resourceName, limitType, enforced.String(), displayObservedRatio)
	}

	return nil
}
// Calculates score for all pods and returns podInfo structure.
// Score is defined as cpu_sum/node_capacity + mem_sum/node_capacity.
// Pods that have bigger requirements should be processed first, thus have higher scores.
func calculatePodScore(pods []*apiv1.Pod, nodeTemplate *schedulercache.NodeInfo) []*podInfo {
	podInfos := make([]*podInfo, 0, len(pods))

	for _, pod := range pods {
		cpuSum := resource.Quantity{}
		memorySum := resource.Quantity{}

		for _, container := range pod.Spec.Containers {
			if request, ok := container.Resources.Requests[apiv1.ResourceCPU]; ok {
				cpuSum.Add(request)
			}
			if request, ok := container.Resources.Requests[apiv1.ResourceMemory]; ok {
				memorySum.Add(request)
			}
		}
		score := float64(0)
		if cpuAllocatable, ok := nodeTemplate.Node().Status.Allocatable[apiv1.ResourceCPU]; ok && cpuAllocatable.MilliValue() > 0 {
			score += float64(cpuSum.MilliValue()) / float64(cpuAllocatable.MilliValue())
		}
		if memAllocatable, ok := nodeTemplate.Node().Status.Allocatable[apiv1.ResourceMemory]; ok && memAllocatable.Value() > 0 {
			score += float64(memorySum.Value()) / float64(memAllocatable.Value())
		}

		podInfos = append(podInfos, &podInfo{
			score: score,
			pod:   pod,
		})
	}
	return podInfos
}
Example #5
0
func addCpuLimit(opts []*unit.UnitOption, limit *resource.Quantity) ([]*unit.UnitOption, error) {
	if limit.Value() > resource.MaxMilliValue {
		return nil, fmt.Errorf("cpu limit exceeds the maximum millivalue: %v", limit.String())
	}
	quota := strconv.Itoa(int(limit.MilliValue()/10)) + "%"
	opts = append(opts, unit.NewUnitOption("Service", "CPUQuota", quota))
	return opts, nil
}
Example #6
0
func validateBandwidthIsReasonable(rsrc *resource.Quantity) error {
	if rsrc.Value() < minRsrc.Value() {
		return fmt.Errorf("resource is unreasonably small (< 1kbit)")
	}
	if rsrc.Value() > maxRsrc.Value() {
		return fmt.Errorf("resoruce is unreasonably large (> 1Pbit)")
	}
	return nil
}
Example #7
0
// formatQuantity prints quantity according to passed scale. Manual scaling was
// done here to make sure we print correct binary values for quantity.
func formatQuantity(quantity *resource.Quantity, scale scale) string {
	integer := quantity.Value() >> scale.scale
	// fraction is the reminder of a division shifted by one order of magnitude
	fraction := (quantity.Value() % (1 << scale.scale)) >> (scale.scale - 10)
	// additionally we present only 2 digits after dot, so divide by 10
	fraction = fraction / 10
	if fraction > 0 {
		return fmt.Sprintf("%d.%02d%s", integer, fraction, scale.unit)
	}
	return fmt.Sprintf("%d%s", integer, scale.unit)
}
Example #8
0
func printSingleResourceUsage(out io.Writer, resourceType v1.ResourceName, quantity resource.Quantity) {
	switch resourceType {
	case v1.ResourceCPU:
		fmt.Fprintf(out, "%vm", quantity.MilliValue())
	case v1.ResourceMemory:
		fmt.Fprintf(out, "%vMi", quantity.Value()/(1024*1024))
	case v1.ResourceStorage:
		// TODO: Change it after storage metrics collection is finished.
		fmt.Fprint(out, "-")
	default:
		fmt.Fprintf(out, "%v", quantity.Value())
	}
}
Example #9
0
func (cr *realQuotaCommandRunner) RunApplyQuotaCommand(fsDevice string, quota resource.Quantity, fsGroup int64) (string, string, error) {
	args := []string{"-x", "-c",
		fmt.Sprintf("limit -g bsoft=%d bhard=%d %d", quota.Value(), quota.Value(), fsGroup),
		fsDevice,
	}

	cmd := exec.Command("xfs_quota", args...)
	var stderr bytes.Buffer
	cmd.Stderr = &stderr

	err := cmd.Run()
	glog.V(5).Infof("Ran: xfs_quota %s", args)
	return "", stderr.String(), err
}
Example #10
0
func (xqa *xfsQuotaApplicator) applyQuota(volDevice string, quota resource.Quantity, fsGroupID int64) error {
	_, stderr, err := xqa.cmdRunner.RunApplyQuotaCommand(volDevice, quota, fsGroupID)
	if err != nil {
		return err
	}
	// xfs_quota is very happy to fail but return a success code, likely due to its
	// interactive shell approach. Grab stderr, if we see anything written to it we'll
	// consider this an error.
	if len(stderr) > 0 {
		return fmt.Errorf("xfs_quota wrote to stderr: %s", stderr)
	}

	glog.V(4).Infof("XFS quota applied: device=%s, quota=%d, fsGroup=%d", volDevice, quota.Value(), fsGroupID)
	return nil
}
Example #11
0
func newPodMemoryStats(pod *api.Pod, workingSet resource.Quantity) statsapi.PodStats {
	result := statsapi.PodStats{
		PodRef: statsapi.PodReference{
			Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID),
		},
	}
	for range pod.Spec.Containers {
		workingSetBytes := uint64(workingSet.Value())
		result.Containers = append(result.Containers, statsapi.ContainerStats{
			Memory: &statsapi.MemoryStats{
				WorkingSetBytes: &workingSetBytes,
			},
		})
	}
	return result
}
Example #12
0
func (xqa *xfsQuotaApplicator) applyQuota(volDevice string, quota resource.Quantity, fsGroupID int64) error {
	_, stderr, err := xqa.cmdRunner.RunApplyQuotaCommand(volDevice, quota, fsGroupID)

	// xfs_quota is very happy to fail but return a success code, likely due to its
	// interactive shell approach. Grab stderr, if we see anything written to it we'll
	// consider this an error.
	//
	// If we exit non-zero *and* write to stderr, stderr is likely to have the details on what
	// actually went wrong, so we'll use this as the error message instead.
	if len(stderr) > 0 {
		return fmt.Errorf("error applying quota: %s", stderr)
	}

	if err != nil {
		return fmt.Errorf("error applying quota: %v", err)
	}

	glog.V(4).Infof("XFS quota applied: device=%s, quota=%d, fsGroup=%d", volDevice, quota.Value(), fsGroupID)
	return nil
}
Example #13
0
// newPodDiskStats returns stats with specified usage amounts.
func newPodDiskStats(pod *api.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats {
	result := statsapi.PodStats{
		PodRef: statsapi.PodReference{
			Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID),
		},
	}

	rootFsUsedBytes := uint64(rootFsUsed.Value())
	logsUsedBytes := uint64(logsUsed.Value())
	for range pod.Spec.Containers {
		result.Containers = append(result.Containers, statsapi.ContainerStats{
			Rootfs: &statsapi.FsStats{
				UsedBytes: &rootFsUsedBytes,
			},
			Logs: &statsapi.FsStats{
				UsedBytes: &logsUsedBytes,
			},
		})
	}

	perLocalVolumeUsedBytes := uint64(perLocalVolumeUsed.Value())
	for _, volumeName := range localVolumeNames(pod) {
		result.VolumeStats = append(result.VolumeStats, statsapi.VolumeStats{
			Name: volumeName,
			FsStats: statsapi.FsStats{
				UsedBytes: &perLocalVolumeUsedBytes,
			},
		})
	}

	return result
}
Example #14
0
// requestLimitEnforcedValues returns the specified values at a common precision to support comparability
func requestLimitEnforcedValues(requestQuantity, limitQuantity, enforcedQuantity resource.Quantity) (request, limit, enforced int64) {
	request = requestQuantity.Value()
	limit = limitQuantity.Value()
	enforced = enforcedQuantity.Value()
	// do a more precise comparison if possible (if the value won't overflow)
	if request <= resource.MaxMilliValue && limit <= resource.MaxMilliValue && enforced <= resource.MaxMilliValue {
		request = requestQuantity.MilliValue()
		limit = limitQuantity.MilliValue()
		enforced = enforcedQuantity.MilliValue()
	}
	return
}
Example #15
0
func makeKBitString(rsrc *resource.Quantity) string {
	return fmt.Sprintf("%dkbit", (rsrc.Value() / 1000))
}
Example #16
0
// convertResourceMemoryToString converts memory value to the format of divisor and returns
// ceiling of the value.
func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) {
	m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value())))
	return strconv.FormatInt(m, 10), nil
}
Example #17
0
// getThresholdQuantity returns the expected quantity value for a thresholdValue
func getThresholdQuantity(value ThresholdValue, capacity *resource.Quantity) *resource.Quantity {
	if value.Quantity != nil {
		return value.Quantity.Copy()
	}
	return resource.NewQuantity(int64(float64(capacity.Value())*float64(value.Percentage)), resource.BinarySI)
}
Example #18
0
func NewMegaBytes(q resource.Quantity) MegaBytes {
	return MegaBytes(float64(q.Value()) / 1024.0 / 1024.0)
}
// syncResourceQuota runs a complete sync of current status
func (rq *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (err error) {

	// quota is dirty if any part of spec hard limits differs from the status hard limits
	dirty := !api.Semantic.DeepEqual(quota.Spec.Hard, quota.Status.Hard)

	// dirty tracks if the usage status differs from the previous sync,
	// if so, we send a new usage with latest status
	// if this is our first sync, it will be dirty by default, since we need track usage
	dirty = dirty || (quota.Status.Hard == nil || quota.Status.Used == nil)

	// Create a usage object that is based on the quota resource version
	usage := api.ResourceQuota{
		ObjectMeta: api.ObjectMeta{
			Name:            quota.Name,
			Namespace:       quota.Namespace,
			ResourceVersion: quota.ResourceVersion,
			Labels:          quota.Labels,
			Annotations:     quota.Annotations},
		Status: api.ResourceQuotaStatus{
			Hard: api.ResourceList{},
			Used: api.ResourceList{},
		},
	}

	// set the hard values supported on the quota
	for k, v := range quota.Spec.Hard {
		usage.Status.Hard[k] = *v.Copy()
	}
	// set any last known observed status values for usage
	for k, v := range quota.Status.Used {
		usage.Status.Used[k] = *v.Copy()
	}

	set := map[api.ResourceName]bool{}
	for k := range usage.Status.Hard {
		set[k] = true
	}

	pods := &api.PodList{}
	if set[api.ResourcePods] || set[api.ResourceMemory] || set[api.ResourceCPU] {
		pods, err = rq.kubeClient.Core().Pods(usage.Namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
	}

	filteredPods := FilterQuotaPods(pods.Items)

	// iterate over each resource, and update observation
	for k := range usage.Status.Hard {

		// look if there is a used value, if none, we are definitely dirty
		prevQuantity, found := usage.Status.Used[k]
		if !found {
			dirty = true
		}

		var value *resource.Quantity

		switch k {
		case api.ResourcePods:
			value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI)
		case api.ResourceServices:
			items, err := rq.kubeClient.Core().Services(usage.Namespace).List(api.ListOptions{})
			if err != nil {
				return err
			}
			value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
		case api.ResourceReplicationControllers:
			items, err := rq.kubeClient.Core().ReplicationControllers(usage.Namespace).List(api.ListOptions{})
			if err != nil {
				return err
			}
			value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
		case api.ResourceQuotas:
			items, err := rq.kubeClient.Core().ResourceQuotas(usage.Namespace).List(api.ListOptions{})
			if err != nil {
				return err
			}
			value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
		case api.ResourceSecrets:
			items, err := rq.kubeClient.Core().Secrets(usage.Namespace).List(api.ListOptions{})
			if err != nil {
				return err
			}
			value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
		case api.ResourcePersistentVolumeClaims:
			items, err := rq.kubeClient.Core().PersistentVolumeClaims(usage.Namespace).List(api.ListOptions{})
			if err != nil {
				return err
			}
			value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
		case api.ResourceMemory:
			value = PodsRequests(filteredPods, api.ResourceMemory)
		case api.ResourceCPU:
			value = PodsRequests(filteredPods, api.ResourceCPU)
		}

		// ignore fields we do not understand (assume another controller is tracking it)
		if value != nil {
			// see if the value has changed
			dirty = dirty || (value.Value() != prevQuantity.Value())
			// just update the value
			usage.Status.Used[k] = *value
		}
	}

	// update the usage only if it changed
	if dirty {
		_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
		return err
	}
	return nil
}
Example #20
0
func addMemoryLimit(opts []*unit.UnitOption, limit *resource.Quantity) ([]*unit.UnitOption, error) {
	opts = append(opts, unit.NewUnitOption("Service", "MemoryLimit", strconv.Itoa(int(limit.Value()))))
	return opts, nil
}