Ejemplo n.º 1
0
func parseMesosState(blob []byte) (*mesosState, error) {
	type State struct {
		ClusterName string `json:"cluster"`
		Slaves      []*struct {
			Id        string                 `json:"id"`        // ex: 20150106-162714-3815890698-5050-2453-S2
			Pid       string                 `json:"pid"`       // ex: slave(1)@10.22.211.18:5051
			Hostname  string                 `json:"hostname"`  // ex: 10.22.211.18, or slave-123.nowhere.com
			Resources map[string]interface{} `json:"resources"` // ex: {"mem": 123, "ports": "[31000-3200]"}
		} `json:"slaves"`
	}
	state := &State{ClusterName: defaultClusterName}
	if err := json.Unmarshal(blob, state); err != nil {
		return nil, err
	}
	nodes := []*slaveNode{}
	for _, slave := range state.Slaves {
		if slave.Hostname == "" {
			continue
		}
		node := &slaveNode{hostname: slave.Hostname}
		cap := api.ResourceList{}
		if slave.Resources != nil && len(slave.Resources) > 0 {
			// attempt to translate CPU (cores) and memory (MB) resources
			if cpu, found := slave.Resources["cpus"]; found {
				if cpuNum, ok := cpu.(float64); ok {
					cap[api.ResourceCPU] = *resource.NewQuantity(int64(cpuNum), resource.DecimalSI)
				} else {
					log.Warningf("unexpected slave cpu resource type %T: %v", cpu, cpu)
				}
			} else {
				log.Warningf("slave failed to report cpu resource")
			}
			if mem, found := slave.Resources["mem"]; found {
				if memNum, ok := mem.(float64); ok {
					cap[api.ResourceMemory] = *resource.NewQuantity(int64(memNum), resource.BinarySI)
				} else {
					log.Warningf("unexpected slave mem resource type %T: %v", mem, mem)
				}
			} else {
				log.Warningf("slave failed to report mem resource")
			}
		}
		if len(cap) > 0 {
			node.resources = &api.NodeResources{
				Capacity: cap,
			}
			log.V(4).Infof("node %q reporting capacity %v", node.hostname, cap)
		}
		nodes = append(nodes, node)
	}

	result := &mesosState{
		clusterName: state.ClusterName,
		nodes:       nodes,
	}

	return result, nil
}
Ejemplo n.º 2
0
func makeResources(milliCPU int64, memory int64, pods int64) api.NodeResources {
	return api.NodeResources{
		Capacity: api.ResourceList{
			api.ResourceCPU:    *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
			api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
			api.ResourcePods:   *resource.NewQuantity(pods, resource.DecimalSI),
		},
	}
}
func ExampleFormat() {
	memorySize := resource.NewQuantity(5*1024*1024*1024, resource.BinarySI)
	fmt.Printf("memorySize = %v\n", memorySize)

	diskSize := resource.NewQuantity(5*1000*1000*1000, resource.DecimalSI)
	fmt.Printf("diskSize = %v\n", diskSize)

	cores := resource.NewMilliQuantity(5300, resource.DecimalSI)
	fmt.Printf("cores = %v\n", cores)

	// Output:
	// memorySize = 5Gi
	// diskSize = 5G
	// cores = 5300m
}
Ejemplo n.º 4
0
func TestResourceQuotaStatusConversion(t *testing.T) {
	// should serialize as "0"
	expected := resource.NewQuantity(int64(0), resource.DecimalSI)
	if "0" != expected.String() {
		t.Errorf("Expected: 0, Actual: %v, do not require units", expected.String())
	}

	parsed := resource.MustParse("0")
	if "0" != parsed.String() {
		t.Errorf("Expected: 0, Actual: %v, do not require units", parsed.String())
	}

	quota := &api.ResourceQuota{}
	quota.Status = api.ResourceQuotaStatus{}
	quota.Status.Hard = api.ResourceList{}
	quota.Status.Used = api.ResourceList{}
	quota.Status.Hard[api.ResourcePods] = *expected

	// round-trip the object
	data, _ := versioned.Codec.Encode(quota)
	object, _ := versioned.Codec.Decode(data)
	after := object.(*api.ResourceQuota)
	actualQuantity := after.Status.Hard[api.ResourcePods]
	actual := &actualQuantity

	// should be "0", but was "0m"
	if expected.String() != actual.String() {
		t.Errorf("Expected %v, Actual %v", expected.String(), actual.String())
	}
}
// PodMemory computes the memory usage of a pod
func PodMemory(pod *api.Pod) *resource.Quantity {
	val := int64(0)
	for j := range pod.Spec.Containers {
		val = val + pod.Spec.Containers[j].Resources.Limits.Memory().Value()
	}
	return resource.NewQuantity(int64(val), resource.DecimalSI)
}
Ejemplo n.º 6
0
// cpu is in cores, memory is in GiB
func makeResources(cpu float64, memory float64) *api.NodeResources {
	return &api.NodeResources{
		Capacity: api.ResourceList{
			api.ResourceCPU:    *resource.NewMilliQuantity(int64(cpu*1000), resource.DecimalSI),
			api.ResourceMemory: *resource.NewQuantity(int64(memory*1024*1024*1024), resource.BinarySI),
		},
	}
}
Ejemplo n.º 7
0
// Instances returns an implementation of Instances for OpenStack.
func (os *OpenStack) Instances() (cloudprovider.Instances, bool) {
	glog.V(4).Info("openstack.Instances() called")

	compute, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
		Region: os.region,
	})
	if err != nil {
		glog.Warningf("Failed to find compute endpoint: %v", err)
		return nil, false
	}

	pager := flavors.ListDetail(compute, nil)

	flavor_to_resource := make(map[string]*api.NodeResources)
	err = pager.EachPage(func(page pagination.Page) (bool, error) {
		flavorList, err := flavors.ExtractFlavors(page)
		if err != nil {
			return false, err
		}
		for _, flavor := range flavorList {
			rsrc := api.NodeResources{
				Capacity: api.ResourceList{
					api.ResourceCPU:            *resource.NewQuantity(int64(flavor.VCPUs), resource.DecimalSI),
					api.ResourceMemory:         *resource.NewQuantity(int64(flavor.RAM)*MiB, resource.BinarySI),
					"openstack.org/disk":       *resource.NewQuantity(int64(flavor.Disk)*GB, resource.DecimalSI),
					"openstack.org/rxTxFactor": *resource.NewMilliQuantity(int64(flavor.RxTxFactor)*1000, resource.DecimalSI),
					"openstack.org/swap":       *resource.NewQuantity(int64(flavor.Swap)*MiB, resource.BinarySI),
				},
			}
			flavor_to_resource[flavor.ID] = &rsrc
		}
		return true, nil
	})
	if err != nil {
		glog.Warningf("Failed to find compute flavors: %v", err)
		return nil, false
	}

	glog.V(3).Infof("Found %v compute flavors", len(flavor_to_resource))
	glog.V(1).Info("Claiming to support Instances")

	return &Instances{compute, flavor_to_resource}, true
}
Ejemplo n.º 8
0
func CapacityFromMachineInfo(info *cadvisorApi.MachineInfo) api.ResourceList {
	c := api.ResourceList{
		api.ResourceCPU: *resource.NewMilliQuantity(
			int64(info.NumCores*1000),
			resource.DecimalSI),
		api.ResourceMemory: *resource.NewQuantity(
			info.MemoryCapacity,
			resource.BinarySI),
	}
	return c
}
Ejemplo n.º 9
0
func makeMinion(node string, milliCPU, memory int64) api.Node {
	return api.Node{
		ObjectMeta: api.ObjectMeta{Name: node},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				"cpu":    *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
				"memory": *resource.NewQuantity(memory, resource.BinarySI),
			},
		},
	}
}
Ejemplo n.º 10
0
func newResourcePod(usage ...resourceRequest) *api.Pod {
	containers := []api.Container{}
	for _, req := range usage {
		containers = append(containers, api.Container{
			Resources: api.ResourceRequirements{
				Limits: api.ResourceList{
					api.ResourceCPU:    *resource.NewMilliQuantity(req.milliCPU, resource.DecimalSI),
					api.ResourceMemory: *resource.NewQuantity(req.memory, resource.BinarySI),
				},
			},
		})
	}
	return &api.Pod{
		Spec: api.PodSpec{
			Containers: containers,
		},
	}
}
Ejemplo n.º 11
0
func TestGetResources(t *testing.T) {
	var instance0 ec2.Instance
	var instance1 ec2.Instance
	var instance2 ec2.Instance

	//0
	instance0.InstanceID = aws.String("m3.medium")
	instance0.InstanceType = aws.String("m3.medium")
	state0 := ec2.InstanceState{
		Name: aws.String("running"),
	}
	instance0.State = &state0

	//1
	instance1.InstanceID = aws.String("r3.8xlarge")
	instance1.InstanceType = aws.String("r3.8xlarge")
	state1 := ec2.InstanceState{
		Name: aws.String("running"),
	}
	instance1.State = &state1

	//2
	instance2.InstanceID = aws.String("unknown.type")
	instance2.InstanceType = aws.String("unknown.type")
	state2 := ec2.InstanceState{
		Name: aws.String("running"),
	}
	instance2.State = &state2

	instances := []*ec2.Instance{&instance0, &instance1, &instance2}

	aws1 := mockInstancesResp(instances)

	res1, err1 := aws1.GetNodeResources("m3.medium")
	if err1 != nil {
		t.Errorf("Should not error when instance type found: %v", err1)
	}
	e1 := &api.NodeResources{
		Capacity: api.ResourceList{
			api.ResourceCPU:    *resource.NewMilliQuantity(int64(3.0*1000), resource.DecimalSI),
			api.ResourceMemory: *resource.NewQuantity(int64(3.75*1024*1024*1024), resource.BinarySI),
		},
	}
	if !reflect.DeepEqual(e1, res1) {
		t.Errorf("Expected %v, got %v", e1, res1)
	}

	res2, err2 := aws1.GetNodeResources("r3.8xlarge")
	if err2 != nil {
		t.Errorf("Should not error when instance type found: %v", err2)
	}
	e2 := &api.NodeResources{
		Capacity: api.ResourceList{
			api.ResourceCPU:    *resource.NewMilliQuantity(int64(104.0*1000), resource.DecimalSI),
			api.ResourceMemory: *resource.NewQuantity(int64(244.0*1024*1024*1024), resource.BinarySI),
		},
	}
	if !reflect.DeepEqual(e2, res2) {
		t.Errorf("Expected %v, got %v", e2, res2)
	}

	res3, err3 := aws1.GetNodeResources("unknown.type")
	if err3 != nil {
		t.Errorf("Should not error when unknown instance type")
	}
	if res3 != nil {
		t.Errorf("Should return nil resources when unknown instance type")
	}
}
Ejemplo n.º 12
0
func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore cache.Store) {
	goodCondition := api.NodeCondition{
		Type:              api.NodeReady,
		Status:            api.ConditionTrue,
		Reason:            fmt.Sprintf("schedulable condition"),
		LastHeartbeatTime: util.Time{time.Now()},
	}
	badCondition := api.NodeCondition{
		Type:              api.NodeReady,
		Status:            api.ConditionUnknown,
		Reason:            fmt.Sprintf("unschedulable condition"),
		LastHeartbeatTime: util.Time{time.Now()},
	}
	// Create a new schedulable node, since we're first going to apply
	// the unschedulable condition and verify that pods aren't scheduled.
	node := &api.Node{
		ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-node"},
		Spec:       api.NodeSpec{Unschedulable: false},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
			},
			Conditions: []api.NodeCondition{goodCondition},
		},
	}
	nodeKey, err := cache.MetaNamespaceKeyFunc(node)
	if err != nil {
		t.Fatalf("Couldn't retrieve key for node %v", node.Name)
	}

	// The test does the following for each nodeStateManager in this list:
	//	1. Create a new node
	//	2. Apply the makeUnSchedulable function
	//	3. Create a new pod
	//  4. Check that the pod doesn't get assigned to the node
	//  5. Apply the schedulable function
	//  6. Check that the pod *does* get assigned to the node
	//  7. Delete the pod and node.

	nodeModifications := []nodeStateManager{
		// Test node.Spec.Unschedulable=true/false
		{
			makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
				n.Spec.Unschedulable = true
				if _, err := c.Nodes().Update(n); err != nil {
					t.Fatalf("Failed to update node with unschedulable=true: %v", err)
				}
				err = waitForReflection(s, nodeKey, func(node interface{}) bool {
					// An unschedulable node should get deleted from the store
					return node == nil
				})
				if err != nil {
					t.Fatalf("Failed to observe reflected update for setting unschedulable=true: %v", err)
				}
			},
			makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
				n.Spec.Unschedulable = false
				if _, err := c.Nodes().Update(n); err != nil {
					t.Fatalf("Failed to update node with unschedulable=false: %v", err)
				}
				err = waitForReflection(s, nodeKey, func(node interface{}) bool {
					return node != nil && node.(*api.Node).Spec.Unschedulable == false
				})
				if err != nil {
					t.Fatalf("Failed to observe reflected update for setting unschedulable=false: %v", err)
				}
			},
		},
		// Test node.Status.Conditions=ConditionTrue/Unknown
		{
			makeUnSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
				n.Status = api.NodeStatus{
					Capacity: api.ResourceList{
						api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
					},
					Conditions: []api.NodeCondition{badCondition},
				}
				if _, err = c.Nodes().UpdateStatus(n); err != nil {
					t.Fatalf("Failed to update node with bad status condition: %v", err)
				}
				err = waitForReflection(s, nodeKey, func(node interface{}) bool {
					return node != nil && node.(*api.Node).Status.Conditions[0].Status == api.ConditionUnknown
				})
				if err != nil {
					t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
				}
			},
			makeSchedulable: func(t *testing.T, n *api.Node, s cache.Store, c *client.Client) {
				n.Status = api.NodeStatus{
					Capacity: api.ResourceList{
						api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
					},
					Conditions: []api.NodeCondition{goodCondition},
				}
				if _, err = c.Nodes().UpdateStatus(n); err != nil {
					t.Fatalf("Failed to update node with healthy status condition: %v", err)
				}
				waitForReflection(s, nodeKey, func(node interface{}) bool {
					return node != nil && node.(*api.Node).Status.Conditions[0].Status == api.ConditionTrue
				})
				if err != nil {
					t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
				}
			},
		},
	}

	for i, mod := range nodeModifications {
		unSchedNode, err := restClient.Nodes().Create(node)
		if err != nil {
			t.Fatalf("Failed to create node: %v", err)
		}

		// Apply the unschedulable modification to the node, and wait for the reflection
		mod.makeUnSchedulable(t, unSchedNode, nodeStore, restClient)

		// Create the new pod, note that this needs to happen post unschedulable
		// modification or we have a race in the test.
		pod := &api.Pod{
			ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-pod"},
			Spec: api.PodSpec{
				Containers: []api.Container{{Name: "container", Image: "qingyuan/pause:go"}},
			},
		}
		myPod, err := restClient.Pods(api.NamespaceDefault).Create(pod)
		if err != nil {
			t.Fatalf("Failed to create pod: %v", err)
		}

		// There are no schedulable nodes - the pod shouldn't be scheduled.
		err = wait.Poll(time.Second, time.Second*10, podScheduled(restClient, myPod.Namespace, myPod.Name))
		if err == nil {
			t.Errorf("Pod scheduled successfully on unschedulable nodes")
		}
		if err != wait.ErrWaitTimeout {
			t.Errorf("Test %d: failed while trying to confirm the pod does not get scheduled on the node: %v", i, err)
		} else {
			t.Logf("Test %d: Pod did not get scheduled on an unschedulable node", i)
		}

		// Apply the schedulable modification to the node, and wait for the reflection
		schedNode, err := restClient.Nodes().Get(unSchedNode.Name)
		if err != nil {
			t.Fatalf("Failed to get node: %v", err)
		}
		mod.makeSchedulable(t, schedNode, nodeStore, restClient)

		// Wait until the pod is scheduled.
		err = wait.Poll(time.Second, time.Second*10, podScheduled(restClient, myPod.Namespace, myPod.Name))
		if err != nil {
			t.Errorf("Test %d: failed to schedule a pod: %v", i, err)
		} else {
			t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
		}

		err = restClient.Pods(api.NamespaceDefault).Delete(myPod.Name, nil)
		if err != nil {
			t.Errorf("Failed to delete pod: %v", err)
		}
		err = restClient.Nodes().Delete(schedNode.Name)
		if err != nil {
			t.Errorf("Failed to delete node: %v", err)
		}
	}
}
Ejemplo n.º 13
0
			ObjectMeta: api.ObjectMeta{
				Name: name,
				Labels: map[string]string{
					"name": "foo",
					"time": value,
				},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:  "nginx",
						Image: "qingyuan/pause",
						Resources: api.ResourceRequirements{
							Limits: api.ResourceList{
								api.ResourceCPU:    *resource.NewMilliQuantity(100, resource.DecimalSI),
								api.ResourceMemory: *resource.NewQuantity(10*1024*1024, resource.DecimalSI),
							},
						},
					},
				},
			},
		}
		defer podClient.Delete(pod.Name, nil)
		_, err := podClient.Create(pod)
		if err != nil {
			Fail(fmt.Sprintf("Error creating a pod: %v", err))
		}
		expectNoError(waitForPodRunning(c, pod.Name))
	})
	It("should be submitted and removed", func() {
		podClient := c.Pods(api.NamespaceDefault)
// syncResourceQuota runs a complete sync of current status
func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err error) {

	// quota is dirty if any part of spec hard limits differs from the status hard limits
	dirty := !api.Semantic.DeepEqual(quota.Spec.Hard, quota.Status.Hard)

	// dirty tracks if the usage status differs from the previous sync,
	// if so, we send a new usage with latest status
	// if this is our first sync, it will be dirty by default, since we need track usage
	dirty = dirty || (quota.Status.Hard == nil || quota.Status.Used == nil)

	// Create a usage object that is based on the quota resource version
	usage := api.ResourceQuota{
		ObjectMeta: api.ObjectMeta{
			Name:            quota.Name,
			Namespace:       quota.Namespace,
			ResourceVersion: quota.ResourceVersion,
			Labels:          quota.Labels,
			Annotations:     quota.Annotations},
		Status: api.ResourceQuotaStatus{
			Hard: api.ResourceList{},
			Used: api.ResourceList{},
		},
	}

	// set the hard values supported on the quota
	for k, v := range quota.Spec.Hard {
		usage.Status.Hard[k] = *v.Copy()
	}
	// set any last known observed status values for usage
	for k, v := range quota.Status.Used {
		usage.Status.Used[k] = *v.Copy()
	}

	set := map[api.ResourceName]bool{}
	for k := range usage.Status.Hard {
		set[k] = true
	}

	pods := &api.PodList{}
	if set[api.ResourcePods] || set[api.ResourceMemory] || set[api.ResourceCPU] {
		pods, err = rm.qingClient.Pods(usage.Namespace).List(labels.Everything(), fields.Everything())
		if err != nil {
			return err
		}
	}

	filteredPods := FilterQuotaPods(pods.Items)

	// iterate over each resource, and update observation
	for k := range usage.Status.Hard {

		// look if there is a used value, if none, we are definitely dirty
		prevQuantity, found := usage.Status.Used[k]
		if !found {
			dirty = true
		}

		var value *resource.Quantity

		switch k {
		case api.ResourcePods:
			value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI)
		case api.ResourceMemory:
			val := int64(0)
			for _, pod := range filteredPods {
				val = val + PodMemory(pod).Value()
			}
			value = resource.NewQuantity(int64(val), resource.DecimalSI)
		case api.ResourceCPU:
			val := int64(0)
			for _, pod := range filteredPods {
				val = val + PodCPU(pod).MilliValue()
			}
			value = resource.NewMilliQuantity(int64(val), resource.DecimalSI)
		case api.ResourceServices:
			items, err := rm.qingClient.Services(usage.Namespace).List(labels.Everything())
			if err != nil {
				return err
			}
			value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
		case api.ResourceReplicationControllers:
			items, err := rm.qingClient.ReplicationControllers(usage.Namespace).List(labels.Everything())
			if err != nil {
				return err
			}
			value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
		case api.ResourceQuotas:
			items, err := rm.qingClient.ResourceQuotas(usage.Namespace).List(labels.Everything())
			if err != nil {
				return err
			}
			value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
		case api.ResourceSecrets:
			items, err := rm.qingClient.Secrets(usage.Namespace).List(labels.Everything(), fields.Everything())
			if err != nil {
				return err
			}
			value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
		case api.ResourcePersistentVolumeClaims:
			items, err := rm.qingClient.PersistentVolumeClaims(usage.Namespace).List(labels.Everything(), fields.Everything())
			if err != nil {
				return err
			}
			value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI)
		}

		// ignore fields we do not understand (assume another controller is tracking it)
		if value != nil {
			// see if the value has changed
			dirty = dirty || (value.Value() != prevQuantity.Value())
			// just update the value
			usage.Status.Used[k] = *value
		}
	}

	// update the usage only if it changed
	if dirty {
		_, err = rm.qingClient.ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
		return err
	}
	return nil
}
Ejemplo n.º 15
0
// IncrementUsage updates the supplied ResourceQuotaStatus object based on the incoming operation
// Return true if the usage must be recorded prior to admitting the new resource
// Return an error if the operation should not pass admission control
func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, client client.Interface) (bool, error) {
	dirty := false
	set := map[api.ResourceName]bool{}
	for k := range status.Hard {
		set[k] = true
	}
	obj := a.GetObject()
	// handle max counts for each kind of resource (pods, services, replicationControllers, etc.)
	if a.GetOperation() == admission.Create {
		// TODO v1beta1 had camel case, v1beta3 went to all lower, we can remove this line when we deprecate v1beta1
		resourceNormalized := strings.ToLower(a.GetResource())
		resourceName := resourceToResourceName[resourceNormalized]
		hard, hardFound := status.Hard[resourceName]
		if hardFound {
			used, usedFound := status.Used[resourceName]
			if !usedFound {
				return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
			}
			if used.Value() >= hard.Value() {
				return false, fmt.Errorf("Limited to %s %s", hard.String(), resourceName)
			} else {
				status.Used[resourceName] = *resource.NewQuantity(used.Value()+int64(1), resource.DecimalSI)
				dirty = true
			}
		}
	}
	// handle memory/cpu constraints, and any diff of usage based on memory/cpu on updates
	if a.GetResource() == "pods" && (set[api.ResourceMemory] || set[api.ResourceCPU]) {
		pod := obj.(*api.Pod)
		deltaCPU := resourcequota.PodCPU(pod)
		deltaMemory := resourcequota.PodMemory(pod)
		// if this is an update, we need to find the delta cpu/memory usage from previous state
		if a.GetOperation() == admission.Update {
			oldPod, err := client.Pods(a.GetNamespace()).Get(pod.Name)
			if err != nil {
				return false, err
			}
			oldCPU := resourcequota.PodCPU(oldPod)
			oldMemory := resourcequota.PodMemory(oldPod)
			deltaCPU = resource.NewMilliQuantity(deltaCPU.MilliValue()-oldCPU.MilliValue(), resource.DecimalSI)
			deltaMemory = resource.NewQuantity(deltaMemory.Value()-oldMemory.Value(), resource.DecimalSI)
		}

		hardMem, hardMemFound := status.Hard[api.ResourceMemory]
		if hardMemFound {
			if set[api.ResourceMemory] && resourcequota.IsPodMemoryUnbounded(pod) {
				return false, fmt.Errorf("Limited to %s memory, but pod has no specified memory limit", hardMem.String())
			}
			used, usedFound := status.Used[api.ResourceMemory]
			if !usedFound {
				return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
			}
			if used.Value()+deltaMemory.Value() > hardMem.Value() {
				return false, fmt.Errorf("Limited to %s memory", hardMem.String())
			} else {
				status.Used[api.ResourceMemory] = *resource.NewQuantity(used.Value()+deltaMemory.Value(), resource.DecimalSI)
				dirty = true
			}
		}
		hardCPU, hardCPUFound := status.Hard[api.ResourceCPU]
		if hardCPUFound {
			if set[api.ResourceCPU] && resourcequota.IsPodCPUUnbounded(pod) {
				return false, fmt.Errorf("Limited to %s CPU, but pod has no specified cpu limit", hardCPU.String())
			}
			used, usedFound := status.Used[api.ResourceCPU]
			if !usedFound {
				return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.")
			}
			if used.MilliValue()+deltaCPU.MilliValue() > hardCPU.MilliValue() {
				return false, fmt.Errorf("Limited to %s CPU", hardCPU.String())
			} else {
				status.Used[api.ResourceCPU] = *resource.NewMilliQuantity(used.MilliValue()+deltaCPU.MilliValue(), resource.DecimalSI)
				dirty = true
			}
		}
	}
	return dirty, nil
}