// PodCPU computes total cpu usage of a pod func PodCPU(pod *api.Pod) *resource.Quantity { val := int64(0) for j := range pod.Spec.Containers { val = val + pod.Spec.Containers[j].Resources.Limits.Cpu().MilliValue() } return resource.NewMilliQuantity(int64(val), resource.DecimalSI) }
// cpu is in cores, memory is in GiB func makeResources(cpu float64, memory float64) *api.NodeResources { return &api.NodeResources{ Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(int64(cpu*1000), resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(int64(memory*1024*1024*1024), resource.BinarySI), }, } }
func makeResources(milliCPU int64, memory int64, pods int64) api.NodeResources { return api.NodeResources{ Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), api.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI), }, } }
func CapacityFromMachineInfo(info *cadvisorApi.MachineInfo) api.ResourceList { c := api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity( int64(info.NumCores*1000), resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity( info.MemoryCapacity, resource.BinarySI), } return c }
func makeMinion(node string, milliCPU, memory int64) api.Node { return api.Node{ ObjectMeta: api.ObjectMeta{Name: node}, Status: api.NodeStatus{ Capacity: api.ResourceList{ "cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), "memory": *resource.NewQuantity(memory, resource.BinarySI), }, }, } }
func (cm *containerManagerImpl) SystemContainersLimit() api.ResourceList { cpuLimit := int64(0) // Sum up resources of all external containers. for _, cont := range cm.systemContainers { cpuLimit += cont.cpuMillicores } return api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity( cpuLimit, resource.DecimalSI), } }
func ExampleFormat() { memorySize := resource.NewQuantity(5*1024*1024*1024, resource.BinarySI) fmt.Printf("memorySize = %v\n", memorySize) diskSize := resource.NewQuantity(5*1000*1000*1000, resource.DecimalSI) fmt.Printf("diskSize = %v\n", diskSize) cores := resource.NewMilliQuantity(5300, resource.DecimalSI) fmt.Printf("cores = %v\n", cores) // Output: // memorySize = 5Gi // diskSize = 5G // cores = 5300m }
func newResourcePod(usage ...resourceRequest) *api.Pod { containers := []api.Container{} for _, req := range usage { containers = append(containers, api.Container{ Resources: api.ResourceRequirements{ Limits: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(req.milliCPU, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(req.memory, resource.BinarySI), }, }, }) } return &api.Pod{ Spec: api.PodSpec{ Containers: containers, }, } }
// Instances returns an implementation of Instances for OpenStack. func (os *OpenStack) Instances() (cloudprovider.Instances, bool) { glog.V(4).Info("openstack.Instances() called") compute, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{ Region: os.region, }) if err != nil { glog.Warningf("Failed to find compute endpoint: %v", err) return nil, false } pager := flavors.ListDetail(compute, nil) flavor_to_resource := make(map[string]*api.NodeResources) err = pager.EachPage(func(page pagination.Page) (bool, error) { flavorList, err := flavors.ExtractFlavors(page) if err != nil { return false, err } for _, flavor := range flavorList { rsrc := api.NodeResources{ Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewQuantity(int64(flavor.VCPUs), resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(int64(flavor.RAM)*MiB, resource.BinarySI), "openstack.org/disk": *resource.NewQuantity(int64(flavor.Disk)*GB, resource.DecimalSI), "openstack.org/rxTxFactor": *resource.NewMilliQuantity(int64(flavor.RxTxFactor)*1000, resource.DecimalSI), "openstack.org/swap": *resource.NewQuantity(int64(flavor.Swap)*MiB, resource.BinarySI), }, } flavor_to_resource[flavor.ID] = &rsrc } return true, nil }) if err != nil { glog.Warningf("Failed to find compute flavors: %v", err) return nil, false } glog.V(3).Infof("Found %v compute flavors", len(flavor_to_resource)) glog.V(1).Info("Claiming to support Instances") return &Instances{compute, flavor_to_resource}, true }
func TestGetResources(t *testing.T) { var instance0 ec2.Instance var instance1 ec2.Instance var instance2 ec2.Instance //0 instance0.InstanceID = aws.String("m3.medium") instance0.InstanceType = aws.String("m3.medium") state0 := ec2.InstanceState{ Name: aws.String("running"), } instance0.State = &state0 //1 instance1.InstanceID = aws.String("r3.8xlarge") instance1.InstanceType = aws.String("r3.8xlarge") state1 := ec2.InstanceState{ Name: aws.String("running"), } instance1.State = &state1 //2 instance2.InstanceID = aws.String("unknown.type") instance2.InstanceType = aws.String("unknown.type") state2 := ec2.InstanceState{ Name: aws.String("running"), } instance2.State = &state2 instances := []*ec2.Instance{&instance0, &instance1, &instance2} aws1 := mockInstancesResp(instances) res1, err1 := aws1.GetNodeResources("m3.medium") if err1 != nil { t.Errorf("Should not error when instance type found: %v", err1) } e1 := &api.NodeResources{ Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(int64(3.0*1000), resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(int64(3.75*1024*1024*1024), resource.BinarySI), }, } if !reflect.DeepEqual(e1, res1) { t.Errorf("Expected %v, got %v", e1, res1) } res2, err2 := aws1.GetNodeResources("r3.8xlarge") if err2 != nil { t.Errorf("Should not error when instance type found: %v", err2) } e2 := &api.NodeResources{ Capacity: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(int64(104.0*1000), resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(int64(244.0*1024*1024*1024), resource.BinarySI), }, } if !reflect.DeepEqual(e2, res2) { t.Errorf("Expected %v, got %v", e2, res2) } res3, err3 := aws1.GetNodeResources("unknown.type") if err3 != nil { t.Errorf("Should not error when unknown instance type") } if res3 != nil { t.Errorf("Should return nil resources when unknown instance type") } }
// PodLimitFunc enforces resource requirements enumerated by the pod against // the specified LimitRange. The pod may be modified to apply default resource // requirements if not specified, and enumerated on the LimitRange func PodLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { defaultResources := defaultContainerResourceRequirements(limitRange) mergePodResourceRequirements(pod, &defaultResources) podCPU := int64(0) podMem := int64(0) minContainerCPU := int64(0) minContainerMem := int64(0) maxContainerCPU := int64(0) maxContainerMem := int64(0) for i := range pod.Spec.Containers { container := &pod.Spec.Containers[i] containerCPU := container.Resources.Limits.Cpu().MilliValue() containerMem := container.Resources.Limits.Memory().Value() if i == 0 { minContainerCPU = containerCPU minContainerMem = containerMem maxContainerCPU = containerCPU maxContainerMem = containerMem } podCPU = podCPU + container.Resources.Limits.Cpu().MilliValue() podMem = podMem + container.Resources.Limits.Memory().Value() minContainerCPU = Min(containerCPU, minContainerCPU) minContainerMem = Min(containerMem, minContainerMem) maxContainerCPU = Max(containerCPU, maxContainerCPU) maxContainerMem = Max(containerMem, maxContainerMem) } for i := range limitRange.Spec.Limits { limit := limitRange.Spec.Limits[i] for _, minOrMax := range []string{"Min", "Max"} { var rl api.ResourceList switch minOrMax { case "Min": rl = limit.Min case "Max": rl = limit.Max } for k, v := range rl { observed := int64(0) enforced := int64(0) var err error switch k { case api.ResourceMemory: enforced = v.Value() switch limit.Type { case api.LimitTypePod: observed = podMem err = fmt.Errorf("%simum memory usage per pod is %s", minOrMax, v.String()) case api.LimitTypeContainer: observed = maxContainerMem err = fmt.Errorf("%simum memory usage per container is %s", minOrMax, v.String()) } case api.ResourceCPU: enforced = v.MilliValue() switch limit.Type { case api.LimitTypePod: observed = podCPU err = fmt.Errorf("%simum CPU usage per pod is %s, but requested %s", minOrMax, v.String(), resource.NewMilliQuantity(observed, resource.DecimalSI)) case api.LimitTypeContainer: observed = maxContainerCPU err = fmt.Errorf("%simum CPU usage per container is %s", minOrMax, v.String()) } } switch minOrMax { case "Min": if observed < enforced { return err } case "Max": if observed > enforced { return err } } } } } return nil }
pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: map[string]string{ "name": "foo", "time": value, }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "nginx", Image: "qingyuan/pause", Resources: api.ResourceRequirements{ Limits: api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), api.ResourceMemory: *resource.NewQuantity(10*1024*1024, resource.DecimalSI), }, }, }, }, }, } defer podClient.Delete(pod.Name, nil) _, err := podClient.Create(pod) if err != nil { Fail(fmt.Sprintf("Error creating a pod: %v", err)) } expectNoError(waitForPodRunning(c, pod.Name)) }) It("should be submitted and removed", func() {
// syncResourceQuota runs a complete sync of current status func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err error) { // quota is dirty if any part of spec hard limits differs from the status hard limits dirty := !api.Semantic.DeepEqual(quota.Spec.Hard, quota.Status.Hard) // dirty tracks if the usage status differs from the previous sync, // if so, we send a new usage with latest status // if this is our first sync, it will be dirty by default, since we need track usage dirty = dirty || (quota.Status.Hard == nil || quota.Status.Used == nil) // Create a usage object that is based on the quota resource version usage := api.ResourceQuota{ ObjectMeta: api.ObjectMeta{ Name: quota.Name, Namespace: quota.Namespace, ResourceVersion: quota.ResourceVersion, Labels: quota.Labels, Annotations: quota.Annotations}, Status: api.ResourceQuotaStatus{ Hard: api.ResourceList{}, Used: api.ResourceList{}, }, } // set the hard values supported on the quota for k, v := range quota.Spec.Hard { usage.Status.Hard[k] = *v.Copy() } // set any last known observed status values for usage for k, v := range quota.Status.Used { usage.Status.Used[k] = *v.Copy() } set := map[api.ResourceName]bool{} for k := range usage.Status.Hard { set[k] = true } pods := &api.PodList{} if set[api.ResourcePods] || set[api.ResourceMemory] || set[api.ResourceCPU] { pods, err = rm.qingClient.Pods(usage.Namespace).List(labels.Everything(), fields.Everything()) if err != nil { return err } } filteredPods := FilterQuotaPods(pods.Items) // iterate over each resource, and update observation for k := range usage.Status.Hard { // look if there is a used value, if none, we are definitely dirty prevQuantity, found := usage.Status.Used[k] if !found { dirty = true } var value *resource.Quantity switch k { case api.ResourcePods: value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI) case api.ResourceMemory: val := int64(0) for _, pod := range filteredPods { val = val + PodMemory(pod).Value() } value = resource.NewQuantity(int64(val), resource.DecimalSI) case api.ResourceCPU: val := int64(0) for _, pod := range filteredPods { val = val + PodCPU(pod).MilliValue() } value = resource.NewMilliQuantity(int64(val), resource.DecimalSI) case api.ResourceServices: items, err := rm.qingClient.Services(usage.Namespace).List(labels.Everything()) if err != nil { return err } value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) case api.ResourceReplicationControllers: items, err := rm.qingClient.ReplicationControllers(usage.Namespace).List(labels.Everything()) if err != nil { return err } value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) case api.ResourceQuotas: items, err := rm.qingClient.ResourceQuotas(usage.Namespace).List(labels.Everything()) if err != nil { return err } value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) case api.ResourceSecrets: items, err := rm.qingClient.Secrets(usage.Namespace).List(labels.Everything(), fields.Everything()) if err != nil { return err } value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) case api.ResourcePersistentVolumeClaims: items, err := rm.qingClient.PersistentVolumeClaims(usage.Namespace).List(labels.Everything(), fields.Everything()) if err != nil { return err } value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) } // ignore fields we do not understand (assume another controller is tracking it) if value != nil { // see if the value has changed dirty = dirty || (value.Value() != prevQuantity.Value()) // just update the value usage.Status.Used[k] = *value } } // update the usage only if it changed if dirty { _, err = rm.qingClient.ResourceQuotas(usage.Namespace).UpdateStatus(&usage) return err } return nil }
// IncrementUsage updates the supplied ResourceQuotaStatus object based on the incoming operation // Return true if the usage must be recorded prior to admitting the new resource // Return an error if the operation should not pass admission control func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, client client.Interface) (bool, error) { dirty := false set := map[api.ResourceName]bool{} for k := range status.Hard { set[k] = true } obj := a.GetObject() // handle max counts for each kind of resource (pods, services, replicationControllers, etc.) if a.GetOperation() == admission.Create { // TODO v1beta1 had camel case, v1beta3 went to all lower, we can remove this line when we deprecate v1beta1 resourceNormalized := strings.ToLower(a.GetResource()) resourceName := resourceToResourceName[resourceNormalized] hard, hardFound := status.Hard[resourceName] if hardFound { used, usedFound := status.Used[resourceName] if !usedFound { return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.") } if used.Value() >= hard.Value() { return false, fmt.Errorf("Limited to %s %s", hard.String(), resourceName) } else { status.Used[resourceName] = *resource.NewQuantity(used.Value()+int64(1), resource.DecimalSI) dirty = true } } } // handle memory/cpu constraints, and any diff of usage based on memory/cpu on updates if a.GetResource() == "pods" && (set[api.ResourceMemory] || set[api.ResourceCPU]) { pod := obj.(*api.Pod) deltaCPU := resourcequota.PodCPU(pod) deltaMemory := resourcequota.PodMemory(pod) // if this is an update, we need to find the delta cpu/memory usage from previous state if a.GetOperation() == admission.Update { oldPod, err := client.Pods(a.GetNamespace()).Get(pod.Name) if err != nil { return false, err } oldCPU := resourcequota.PodCPU(oldPod) oldMemory := resourcequota.PodMemory(oldPod) deltaCPU = resource.NewMilliQuantity(deltaCPU.MilliValue()-oldCPU.MilliValue(), resource.DecimalSI) deltaMemory = resource.NewQuantity(deltaMemory.Value()-oldMemory.Value(), resource.DecimalSI) } hardMem, hardMemFound := status.Hard[api.ResourceMemory] if hardMemFound { if set[api.ResourceMemory] && resourcequota.IsPodMemoryUnbounded(pod) { return false, fmt.Errorf("Limited to %s memory, but pod has no specified memory limit", hardMem.String()) } used, usedFound := status.Used[api.ResourceMemory] if !usedFound { return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.") } if used.Value()+deltaMemory.Value() > hardMem.Value() { return false, fmt.Errorf("Limited to %s memory", hardMem.String()) } else { status.Used[api.ResourceMemory] = *resource.NewQuantity(used.Value()+deltaMemory.Value(), resource.DecimalSI) dirty = true } } hardCPU, hardCPUFound := status.Hard[api.ResourceCPU] if hardCPUFound { if set[api.ResourceCPU] && resourcequota.IsPodCPUUnbounded(pod) { return false, fmt.Errorf("Limited to %s CPU, but pod has no specified cpu limit", hardCPU.String()) } used, usedFound := status.Used[api.ResourceCPU] if !usedFound { return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.") } if used.MilliValue()+deltaCPU.MilliValue() > hardCPU.MilliValue() { return false, fmt.Errorf("Limited to %s CPU", hardCPU.String()) } else { status.Used[api.ResourceCPU] = *resource.NewMilliQuantity(used.MilliValue()+deltaCPU.MilliValue(), resource.DecimalSI) dirty = true } } } return dirty, nil }