Example #1
0
func (config *RCConfig) applyTo(template *api.PodTemplateSpec) {
	if config.Env != nil {
		for k, v := range config.Env {
			c := &template.Spec.Containers[0]
			c.Env = append(c.Env, api.EnvVar{Name: k, Value: v})
		}
	}
	if config.Labels != nil {
		for k, v := range config.Labels {
			template.ObjectMeta.Labels[k] = v
		}
	}
	if config.NodeSelector != nil {
		template.Spec.NodeSelector = make(map[string]string)
		for k, v := range config.NodeSelector {
			template.Spec.NodeSelector[k] = v
		}
	}
	if config.Ports != nil {
		for k, v := range config.Ports {
			c := &template.Spec.Containers[0]
			c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v)})
		}
	}
	if config.HostPorts != nil {
		for k, v := range config.HostPorts {
			c := &template.Spec.Containers[0]
			c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)})
		}
	}
	if config.CpuLimit > 0 || config.MemLimit > 0 {
		template.Spec.Containers[0].Resources.Limits = api.ResourceList{}
	}
	if config.CpuLimit > 0 {
		template.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
	}
	if config.MemLimit > 0 {
		template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
	}
	if config.CpuRequest > 0 || config.MemRequest > 0 {
		template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
	}
	if config.CpuRequest > 0 {
		template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
	}
	if config.MemRequest > 0 {
		template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
	}
	if len(config.Volumes) > 0 {
		template.Spec.Volumes = config.Volumes
	}
	if len(config.VolumeMounts) > 0 {
		template.Spec.Containers[0].VolumeMounts = config.VolumeMounts
	}
}
Example #2
0
func testNodeMetricsData() (*metrics_api.NodeMetricsList, *api.NodeList) {
	metrics := &metrics_api.NodeMetricsList{
		ListMeta: unversioned.ListMeta{
			ResourceVersion: "1",
		},
		Items: []metrics_api.NodeMetrics{
			{
				ObjectMeta: v1.ObjectMeta{Name: "node1", ResourceVersion: "10"},
				Window:     unversioned.Duration{Duration: time.Minute},
				Usage: v1.ResourceList{
					v1.ResourceCPU:     *resource.NewMilliQuantity(1, resource.DecimalSI),
					v1.ResourceMemory:  *resource.NewQuantity(2*(1024*1024), resource.DecimalSI),
					v1.ResourceStorage: *resource.NewQuantity(3*(1024*1024), resource.DecimalSI),
				},
			},
			{
				ObjectMeta: v1.ObjectMeta{Name: "node2", ResourceVersion: "11"},
				Window:     unversioned.Duration{Duration: time.Minute},
				Usage: v1.ResourceList{
					v1.ResourceCPU:     *resource.NewMilliQuantity(5, resource.DecimalSI),
					v1.ResourceMemory:  *resource.NewQuantity(6*(1024*1024), resource.DecimalSI),
					v1.ResourceStorage: *resource.NewQuantity(7*(1024*1024), resource.DecimalSI),
				},
			},
		},
	}
	nodes := &api.NodeList{
		ListMeta: unversioned.ListMeta{
			ResourceVersion: "15",
		},
		Items: []api.Node{
			{
				ObjectMeta: api.ObjectMeta{Name: "node1", ResourceVersion: "10"},
				Status: api.NodeStatus{
					Allocatable: api.ResourceList{
						api.ResourceCPU:     *resource.NewMilliQuantity(10, resource.DecimalSI),
						api.ResourceMemory:  *resource.NewQuantity(20*(1024*1024), resource.DecimalSI),
						api.ResourceStorage: *resource.NewQuantity(30*(1024*1024), resource.DecimalSI),
					},
				},
			},
			{
				ObjectMeta: api.ObjectMeta{Name: "node2", ResourceVersion: "11"},
				Status: api.NodeStatus{
					Allocatable: api.ResourceList{
						api.ResourceCPU:     *resource.NewMilliQuantity(50, resource.DecimalSI),
						api.ResourceMemory:  *resource.NewQuantity(60*(1024*1024), resource.DecimalSI),
						api.ResourceStorage: *resource.NewQuantity(70*(1024*1024), resource.DecimalSI),
					},
				},
			},
		},
	}
	return metrics, nodes
}
func makeNode(node string, milliCPU, memory int64) *api.Node {
	return &api.Node{
		ObjectMeta: api.ObjectMeta{Name: node},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				"cpu":    *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
				"memory": *resource.NewQuantity(memory, resource.BinarySI),
			},
			Allocatable: api.ResourceList{
				"cpu":    *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
				"memory": *resource.NewQuantity(memory, resource.BinarySI),
			},
		},
	}
}
Example #4
0
// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU.
func createOutOfDiskPod(c *client.Client, ns, name string, milliCPU int64) {
	podClient := c.Pods(ns)

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "pause",
					Image: "beta.gcr.io/google_containers/pause:2.0",
					Resources: api.ResourceRequirements{
						Requests: api.ResourceList{
							// Request enough CPU to fit only two pods on a given node.
							api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
						},
					},
				},
			},
		},
	}

	_, err := podClient.Create(pod)
	expectNoError(err)
}
Example #5
0
// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU.
func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) {
	podClient := c.Core().Pods(ns)

	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  "pause",
					Image: framework.GetPauseImageName(c),
					Resources: api.ResourceRequirements{
						Requests: api.ResourceList{
							// Request enough CPU to fit only two pods on a given node.
							api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
						},
					},
				},
			},
		},
	}

	_, err := podClient.Create(pod)
	framework.ExpectNoError(err)
}
Example #6
0
// BuildTestPod creates a pod with specified resources.
func BuildTestPod(name string, cpu int64, mem int64) *kube_api.Pod {
	pod := &kube_api.Pod{
		ObjectMeta: kube_api.ObjectMeta{
			Namespace: "default",
			Name:      name,
		},
		Spec: kube_api.PodSpec{
			Containers: []kube_api.Container{
				{
					Resources: kube_api.ResourceRequirements{
						Requests: kube_api.ResourceList{},
					},
				},
			},
		},
	}

	if cpu >= 0 {
		pod.Spec.Containers[0].Resources.Requests[kube_api.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
	}
	if mem >= 0 {
		pod.Spec.Containers[0].Resources.Requests[kube_api.ResourceMemory] = *resource.NewQuantity(mem, resource.DecimalSI)
	}

	return pod
}
Example #7
0
func TestReservation(t *testing.T) {
	pod := buildPod("p1", 100, 200000)
	pod2 := &kube_api.Pod{
		Spec: kube_api.PodSpec{
			Containers: []kube_api.Container{
				{
					Resources: kube_api.ResourceRequirements{
						Requests: kube_api.ResourceList{},
					},
				},
			},
		},
	}
	nodeInfo := schedulercache.NewNodeInfo(pod, pod, pod2)

	node := &kube_api.Node{
		Status: kube_api.NodeStatus{
			Capacity: kube_api.ResourceList{
				kube_api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
			},
		},
	}

	reservation, err := calculateReservation(node, nodeInfo, kube_api.ResourceCPU)
	assert.NoError(t, err)
	assert.InEpsilon(t, 1.0/10, reservation, 0.01)

	_, err = calculateReservation(node, nodeInfo, kube_api.ResourceMemory)
	assert.Error(t, err)
}
Example #8
0
func (ir initialResources) getEstimation(kind api.ResourceName, c *api.Container) (*resource.Quantity, error) {
	end := time.Now()
	start := end.Add(-week)
	var usage, samples int64
	var err error

	// Historical data from last 7 days for the same image:tag.
	if usage, samples, err = ir.source.GetUsagePercentile(kind, *percentile, c.Image, true, start, end); err != nil {
		return nil, err
	}
	if samples < samplesThreshold {
		// Historical data from last 30 days for the same image:tag.
		start := end.Add(-month)
		if usage, samples, err = ir.source.GetUsagePercentile(kind, *percentile, c.Image, true, start, end); err != nil {
			return nil, err
		}
	}
	if samples < samplesThreshold {
		// Historical data from last 30 days for the same image.
		start := end.Add(-month)
		image := strings.Split(c.Image, ":")[0]
		if usage, samples, err = ir.source.GetUsagePercentile(kind, *percentile, image, false, start, end); err != nil {
			return nil, err
		}
	}

	if samples > 0 && kind == api.ResourceCPU {
		return resource.NewMilliQuantity(usage, resource.DecimalSI), nil
	}
	if samples > 0 && kind == api.ResourceMemory {
		return resource.NewQuantity(usage, resource.DecimalSI), nil
	}
	return nil, nil
}
// PodCPU computes total cpu limit across all containers in pod
// TODO: Remove this once the mesos scheduler becomes request aware
func PodCPU(pod *api.Pod) *resource.Quantity {
	val := int64(0)
	for j := range pod.Spec.Containers {
		val = val + pod.Spec.Containers[j].Resources.Limits.Cpu().MilliValue()
	}
	return resource.NewMilliQuantity(int64(val), resource.DecimalSI)
}
func makeAllocatableResources(milliCPU int64, memory int64, pods int64) api.ResourceList {
	return api.ResourceList{
		api.ResourceCPU:    *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
		api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
		api.ResourcePods:   *resource.NewQuantity(pods, resource.DecimalSI),
	}
}
Example #11
0
// cpu is in cores, memory is in GiB
func makeResources(cpu float64, memory float64) *api.NodeResources {
	return &api.NodeResources{
		Capacity: api.ResourceList{
			api.ResourceCPU:    *resource.NewMilliQuantity(int64(cpu*1000), resource.DecimalSI),
			api.ResourceMemory: *resource.NewQuantity(int64(memory*1024*1024*1024), resource.BinarySI),
		},
	}
}
Example #12
0
func resourceLimits(cpu resources.CPUShares, mem resources.MegaBytes) ctOpt {
	return ctOpt(func(c *api.Container) {
		if c.Resources.Limits == nil {
			c.Resources.Limits = make(api.ResourceList)
		}
		c.Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(int64(float64(cpu)*1000.0), resource.DecimalSI)
		c.Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(int64(float64(mem)*1024.0*1024.0), resource.BinarySI)
	})
}
Example #13
0
func TestEstimate(t *testing.T) {
	cpuPerPod := int64(500)
	memoryPerPod := int64(1000 * 1024 * 1024)

	pod := &apiv1.Pod{
		Spec: apiv1.PodSpec{
			Containers: []apiv1.Container{
				{
					Resources: apiv1.ResourceRequirements{
						Requests: apiv1.ResourceList{
							apiv1.ResourceCPU:    *resource.NewMilliQuantity(cpuPerPod, resource.DecimalSI),
							apiv1.ResourceMemory: *resource.NewQuantity(memoryPerPod, resource.DecimalSI),
						},
					},
				},
			},
		},
	}

	estimator := NewBasicNodeEstimator()

	for i := 0; i < 5; i++ {
		podCopy := *pod
		estimator.Add(&podCopy)
	}

	assert.Equal(t, int64(500*5), estimator.cpuSum.MilliValue())
	assert.Equal(t, int64(5*memoryPerPod), estimator.memorySum.Value())
	assert.Equal(t, 5, estimator.GetCount())

	node := &apiv1.Node{
		Status: apiv1.NodeStatus{
			Capacity: apiv1.ResourceList{
				apiv1.ResourceCPU:    *resource.NewMilliQuantity(3*cpuPerPod, resource.DecimalSI),
				apiv1.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI),
				apiv1.ResourcePods:   *resource.NewQuantity(10, resource.DecimalSI),
			},
		},
	}
	estimate, report := estimator.Estimate(node)
	assert.Contains(t, estimator.GetDebug(), "CPU")
	assert.Contains(t, report, "CPU")
	assert.Equal(t, 3, estimate)
}
func TestBinpackingEstimateWithPorts(t *testing.T) {
	estimator := NewBinpackingNodeEstimator(simulator.NewTestPredicateChecker())

	cpuPerPod := int64(200)
	memoryPerPod := int64(1000 * 1024 * 1024)
	pod := &kube_api.Pod{
		Spec: kube_api.PodSpec{
			Containers: []kube_api.Container{
				{
					Resources: kube_api.ResourceRequirements{
						Requests: kube_api.ResourceList{
							kube_api.ResourceCPU:    *resource.NewMilliQuantity(cpuPerPod, resource.DecimalSI),
							kube_api.ResourceMemory: *resource.NewQuantity(memoryPerPod, resource.DecimalSI),
						},
					},
					Ports: []kube_api.ContainerPort{
						{
							HostPort: 5555,
						},
					},
				},
			},
		},
	}
	pods := make([]*kube_api.Pod, 0)
	for i := 0; i < 8; i++ {
		pods = append(pods, pod)
	}
	node := &kube_api.Node{
		Status: kube_api.NodeStatus{
			Capacity: kube_api.ResourceList{
				kube_api.ResourceCPU:    *resource.NewMilliQuantity(5*cpuPerPod, resource.DecimalSI),
				kube_api.ResourceMemory: *resource.NewQuantity(5*memoryPerPod, resource.DecimalSI),
				kube_api.ResourcePods:   *resource.NewQuantity(10, resource.DecimalSI),
			},
		},
	}
	node.Status.Allocatable = node.Status.Capacity

	nodeInfo := schedulercache.NewNodeInfo()
	nodeInfo.SetNode(node)
	estimate := estimator.Estimate(pods, nodeInfo)
	assert.Equal(t, 8, estimate)
}
Example #15
0
func updateContainerResources(config *internalConfig, container *kapi.Container) {
	resources := container.Resources
	memLimit, memFound := resources.Limits[kapi.ResourceMemory]
	if memFound && config.memoryRequestToLimitRatio != 0 {
		// memory is measured in whole bytes.
		// the plugin rounds down to the nearest MiB rather than bytes to improve ease of use for end-users.
		amount := memLimit.Value() * int64(config.memoryRequestToLimitRatio*100) / 100
		// TODO: move into resource.Quantity
		var mod int64
		switch memLimit.Format {
		case resource.BinarySI:
			mod = 1024 * 1024
		default:
			mod = 1000 * 1000
		}
		if rem := amount % mod; rem != 0 {
			amount = amount - rem
		}
		q := resource.NewQuantity(int64(amount), memLimit.Format)
		if memFloor.Cmp(*q) > 0 {
			q = memFloor.Copy()
		}
		resources.Requests[kapi.ResourceMemory] = *q
	}
	if memFound && config.limitCPUToMemoryRatio != 0 {
		amount := float64(memLimit.Value()) * config.limitCPUToMemoryRatio * cpuBaseScaleFactor
		q := resource.NewMilliQuantity(int64(amount), resource.DecimalSI)
		if cpuFloor.Cmp(*q) > 0 {
			q = cpuFloor.Copy()
		}
		resources.Limits[kapi.ResourceCPU] = *q
	}

	cpuLimit, cpuFound := resources.Limits[kapi.ResourceCPU]
	if cpuFound && config.cpuRequestToLimitRatio != 0 {
		amount := float64(cpuLimit.MilliValue()) * config.cpuRequestToLimitRatio
		q := resource.NewMilliQuantity(int64(amount), cpuLimit.Format)
		if cpuFloor.Cmp(*q) > 0 {
			q = cpuFloor.Copy()
		}
		resources.Requests[kapi.ResourceCPU] = *q
	}

}
Example #16
0
func TestEstimateWithPorts(t *testing.T) {
	cpuPerPod := int64(500)
	memoryPerPod := int64(1000 * 1024 * 1024)

	pod := &apiv1.Pod{
		Spec: apiv1.PodSpec{
			Containers: []apiv1.Container{
				{
					Resources: apiv1.ResourceRequirements{
						Requests: apiv1.ResourceList{
							apiv1.ResourceCPU:    *resource.NewMilliQuantity(cpuPerPod, resource.DecimalSI),
							apiv1.ResourceMemory: *resource.NewQuantity(memoryPerPod, resource.DecimalSI),
						},
					},
					Ports: []apiv1.ContainerPort{
						{
							HostPort: 5555,
						},
					},
				},
			},
		},
	}

	estimator := NewBasicNodeEstimator()
	for i := 0; i < 5; i++ {
		estimator.Add(pod)
	}
	node := &apiv1.Node{
		Status: apiv1.NodeStatus{
			Capacity: apiv1.ResourceList{
				apiv1.ResourceCPU:    *resource.NewMilliQuantity(3*cpuPerPod, resource.DecimalSI),
				apiv1.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI),
				apiv1.ResourcePods:   *resource.NewQuantity(10, resource.DecimalSI),
			},
		},
	}

	estimate, report := estimator.Estimate(node)
	assert.Contains(t, estimator.GetDebug(), "CPU")
	assert.Contains(t, report, "CPU")
	assert.Equal(t, 5, estimate)
}
Example #17
0
func CapacityFromMachineInfo(info *cadvisorApi.MachineInfo) v1.ResourceList {
	c := v1.ResourceList{
		v1.ResourceCPU: *resource.NewMilliQuantity(
			int64(info.NumCores*1000),
			resource.DecimalSI),
		v1.ResourceMemory: *resource.NewQuantity(
			int64(info.MemoryCapacity),
			resource.BinarySI),
	}
	return c
}
Example #18
0
func (r *Resource) ResourceList() v1.ResourceList {
	result := v1.ResourceList{
		v1.ResourceCPU:       *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
		v1.ResourceMemory:    *resource.NewQuantity(r.Memory, resource.BinarySI),
		v1.ResourceNvidiaGPU: *resource.NewQuantity(r.NvidiaGPU, resource.DecimalSI),
	}
	for rName, rQuant := range r.OpaqueIntResources {
		result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
	}
	return result
}
func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) {
	// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
	// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
	info, err := kl.GetCachedMachineInfo()
	if err != nil {
		// TODO(roberthbailey): This is required for test-cmd.sh to pass.
		// See if the test should be updated instead.
		node.Status.Capacity = api.ResourceList{
			api.ResourceCPU:       *resource.NewMilliQuantity(0, resource.DecimalSI),
			api.ResourceMemory:    resource.MustParse("0Gi"),
			api.ResourcePods:      *resource.NewQuantity(int64(kl.maxPods), resource.DecimalSI),
			api.ResourceNvidiaGPU: *resource.NewQuantity(int64(kl.nvidiaGPUs), resource.DecimalSI),
		}
		glog.Errorf("Error getting machine info: %v", err)
	} else {
		node.Status.NodeInfo.MachineID = info.MachineID
		node.Status.NodeInfo.SystemUUID = info.SystemUUID
		node.Status.Capacity = cadvisor.CapacityFromMachineInfo(info)
		if kl.podsPerCore > 0 {
			node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(
				int64(math.Min(float64(info.NumCores*kl.podsPerCore), float64(kl.maxPods))), resource.DecimalSI)
		} else {
			node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(
				int64(kl.maxPods), resource.DecimalSI)
		}
		node.Status.Capacity[api.ResourceNvidiaGPU] = *resource.NewQuantity(
			int64(kl.nvidiaGPUs), resource.DecimalSI)
		if node.Status.NodeInfo.BootID != "" &&
			node.Status.NodeInfo.BootID != info.BootID {
			// TODO: This requires a transaction, either both node status is updated
			// and event is recorded or neither should happen, see issue #6055.
			kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.NodeRebooted,
				"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID)
		}
		node.Status.NodeInfo.BootID = info.BootID
	}

	// Set Allocatable.
	node.Status.Allocatable = make(api.ResourceList)
	for k, v := range node.Status.Capacity {
		value := *(v.Copy())
		if kl.reservation.System != nil {
			value.Sub(kl.reservation.System[k])
		}
		if kl.reservation.Kubernetes != nil {
			value.Sub(kl.reservation.Kubernetes[k])
		}
		if value.Sign() < 0 {
			// Negative Allocatable resources don't make sense.
			value.Set(0)
		}
		node.Status.Allocatable[k] = value
	}
}
Example #20
0
func TestEstimate(t *testing.T) {
	cpuPerPod := int64(500)
	memoryPerPod := int64(1000 * 1024 * 1024)

	pod := &kube_api.Pod{
		Spec: kube_api.PodSpec{
			Containers: []kube_api.Container{
				{
					Resources: kube_api.ResourceRequirements{
						Requests: kube_api.ResourceList{
							kube_api.ResourceCPU:    *resource.NewMilliQuantity(cpuPerPod, resource.DecimalSI),
							kube_api.ResourceMemory: *resource.NewQuantity(memoryPerPod, resource.DecimalSI),
						},
					},
				},
			},
		},
	}

	estimator := NewBasicNodeEstimator()

	for i := 0; i < 5; i++ {
		estimator.Add(pod)
	}

	assert.Equal(t, int64(500*5), estimator.cpuSum.MilliValue())
	assert.Equal(t, int64(5*memoryPerPod), estimator.memorySum.Value())
	assert.Equal(t, 5, estimator.count)

	node := &kube_api.Node{
		Status: kube_api.NodeStatus{
			Capacity: kube_api.ResourceList{
				kube_api.ResourceCPU:    *resource.NewMilliQuantity(3*cpuPerPod, resource.DecimalSI),
				kube_api.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI),
				kube_api.ResourcePods:   *resource.NewQuantity(10, resource.DecimalSI),
			},
		},
	}

	assert.Equal(t, 3, estimator.Estimate(node))
}
func TestBinpackingEstimate(t *testing.T) {
	estimator := NewBinpackingNodeEstimator(simulator.NewTestPredicateChecker())

	cpuPerPod := int64(350)
	memoryPerPod := int64(1000 * 1024 * 1024)
	pod := &apiv1.Pod{
		Spec: apiv1.PodSpec{
			Containers: []apiv1.Container{
				{
					Resources: apiv1.ResourceRequirements{
						Requests: apiv1.ResourceList{
							apiv1.ResourceCPU:    *resource.NewMilliQuantity(cpuPerPod, resource.DecimalSI),
							apiv1.ResourceMemory: *resource.NewQuantity(memoryPerPod, resource.DecimalSI),
						},
					},
				},
			},
		},
	}

	pods := make([]*apiv1.Pod, 0)
	for i := 0; i < 10; i++ {
		pods = append(pods, pod)
	}
	node := &apiv1.Node{
		Status: apiv1.NodeStatus{
			Capacity: apiv1.ResourceList{
				apiv1.ResourceCPU:    *resource.NewMilliQuantity(cpuPerPod*3-50, resource.DecimalSI),
				apiv1.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI),
				apiv1.ResourcePods:   *resource.NewQuantity(10, resource.DecimalSI),
			},
		},
	}
	node.Status.Allocatable = node.Status.Capacity

	nodeInfo := schedulercache.NewNodeInfo()
	nodeInfo.SetNode(node)
	estimate := estimator.Estimate(pods, nodeInfo)
	assert.Equal(t, 5, estimate)
}
Example #22
0
func testNodeMetricsData() []metrics_api.NodeMetrics {
	return []metrics_api.NodeMetrics{
		{
			ObjectMeta: api.ObjectMeta{Name: "node1", ResourceVersion: "10"},
			Window:     unversioned.Duration{Duration: time.Minute},
			Usage: api.ResourceList{
				api.ResourceCPU:     *resource.NewMilliQuantity(1, resource.DecimalSI),
				api.ResourceMemory:  *resource.NewQuantity(2*(1024*1024), resource.DecimalSI),
				api.ResourceStorage: *resource.NewQuantity(3*(1024*1024), resource.DecimalSI),
			},
		},
		{
			ObjectMeta: api.ObjectMeta{Name: "node2", ResourceVersion: "11"},
			Window:     unversioned.Duration{Duration: time.Minute},
			Usage: api.ResourceList{
				api.ResourceCPU:     *resource.NewMilliQuantity(5, resource.DecimalSI),
				api.ResourceMemory:  *resource.NewQuantity(6*(1024*1024), resource.DecimalSI),
				api.ResourceStorage: *resource.NewQuantity(7*(1024*1024), resource.DecimalSI),
			},
		},
	}
}
func (cm *containerManagerImpl) SystemCgroupsLimit() api.ResourceList {
	cpuLimit := int64(0)

	// Sum up resources of all external containers.
	for _, cont := range cm.systemContainers {
		cpuLimit += cont.cpuMillicores
	}

	return api.ResourceList{
		api.ResourceCPU: *resource.NewMilliQuantity(
			cpuLimit,
			resource.DecimalSI),
	}
}
Example #24
0
func buildNode(name string, cpu int64, mem int64) *kube_api.Node {
	return &kube_api.Node{
		ObjectMeta: kube_api.ObjectMeta{
			Name: name,
		},
		Status: kube_api.NodeStatus{
			Capacity: kube_api.ResourceList{
				kube_api.ResourceCPU:    *resource.NewMilliQuantity(cpu, resource.DecimalSI),
				kube_api.ResourceMemory: *resource.NewQuantity(mem, resource.DecimalSI),
				kube_api.ResourcePods:   *resource.NewQuantity(100, resource.DecimalSI),
			},
		},
	}
}
Example #25
0
func ExampleFormat() {
	memorySize := resource.NewQuantity(5*1024*1024*1024, resource.BinarySI)
	fmt.Printf("memorySize = %v\n", memorySize)

	diskSize := resource.NewQuantity(5*1000*1000*1000, resource.DecimalSI)
	fmt.Printf("diskSize = %v\n", diskSize)

	cores := resource.NewMilliQuantity(5300, resource.DecimalSI)
	fmt.Printf("cores = %v\n", cores)

	// Output:
	// memorySize = 5Gi
	// diskSize = 5G
	// cores = 5300m
}
func newResourcePod(usage ...resourceRequest) *api.Pod {
	containers := []api.Container{}
	for _, req := range usage {
		containers = append(containers, api.Container{
			Resources: api.ResourceRequirements{
				Requests: api.ResourceList{
					api.ResourceCPU:    *resource.NewMilliQuantity(req.milliCPU, resource.DecimalSI),
					api.ResourceMemory: *resource.NewQuantity(req.memory, resource.BinarySI),
				},
			},
		})
	}
	return &api.Pod{
		Spec: api.PodSpec{
			Containers: containers,
		},
	}
}
Example #27
0
func ParseResourceList(ms *core.MetricSet) (api.ResourceList, error) {
	cpu, found := ms.MetricValues[core.MetricCpuUsageRate.MetricDescriptor.Name]
	if !found {
		return api.ResourceList{}, fmt.Errorf("cpu not found")
	}
	mem, found := ms.MetricValues[core.MetricMemoryWorkingSet.MetricDescriptor.Name]
	if !found {
		return api.ResourceList{}, fmt.Errorf("memory not found")
	}

	return api.ResourceList{
		api.ResourceCPU: *resource.NewMilliQuantity(
			cpu.IntValue,
			resource.DecimalSI),
		api.ResourceMemory: *resource.NewQuantity(
			mem.IntValue,
			resource.BinarySI),
	}, nil
}
Example #28
0
// Instances returns an implementation of Instances for OpenStack.
func (os *OpenStack) Instances() (cloudprovider.Instances, bool) {
	glog.V(4).Info("openstack.Instances() called")

	compute, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
		Region: os.region,
	})
	if err != nil {
		glog.Warningf("Failed to find compute endpoint: %v", err)
		return nil, false
	}

	pager := flavors.ListDetail(compute, nil)

	flavor_to_resource := make(map[string]*api.NodeResources)
	err = pager.EachPage(func(page pagination.Page) (bool, error) {
		flavorList, err := flavors.ExtractFlavors(page)
		if err != nil {
			return false, err
		}
		for _, flavor := range flavorList {
			rsrc := api.NodeResources{
				Capacity: api.ResourceList{
					api.ResourceCPU:            *resource.NewQuantity(int64(flavor.VCPUs), resource.DecimalSI),
					api.ResourceMemory:         *resource.NewQuantity(int64(flavor.RAM)*MiB, resource.BinarySI),
					"openstack.org/disk":       *resource.NewQuantity(int64(flavor.Disk)*GB, resource.DecimalSI),
					"openstack.org/rxTxFactor": *resource.NewMilliQuantity(int64(flavor.RxTxFactor)*1000, resource.DecimalSI),
					"openstack.org/swap":       *resource.NewQuantity(int64(flavor.Swap)*MiB, resource.BinarySI),
				},
			}
			flavor_to_resource[flavor.ID] = &rsrc
		}
		return true, nil
	})
	if err != nil {
		glog.Warningf("Failed to find compute flavors: %v", err)
		return nil, false
	}

	glog.V(3).Infof("Found %v compute flavors", len(flavor_to_resource))
	glog.V(1).Info("Claiming to support Instances")

	return &Instances{compute, flavor_to_resource}, true
}
Example #29
0
func buildPod(name string, cpu int64, mem int64) *kube_api.Pod {
	return &kube_api.Pod{
		ObjectMeta: kube_api.ObjectMeta{
			Namespace: "default",
			Name:      name,
		},
		Spec: kube_api.PodSpec{
			Containers: []kube_api.Container{
				{
					Resources: kube_api.ResourceRequirements{
						Requests: kube_api.ResourceList{
							kube_api.ResourceCPU:    *resource.NewMilliQuantity(cpu, resource.DecimalSI),
							kube_api.ResourceMemory: *resource.NewQuantity(mem, resource.DecimalSI),
						},
					},
				},
			},
		},
	}
}
Example #30
0
// BuildTestNode creates a node with specified capacity.
func BuildTestNode(name string, cpu int64, mem int64) *kube_api.Node {
	node := &kube_api.Node{
		ObjectMeta: kube_api.ObjectMeta{
			Name: name,
		},
		Status: kube_api.NodeStatus{
			Capacity: kube_api.ResourceList{
				kube_api.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI),
			},
		},
	}

	if cpu >= 0 {
		node.Status.Capacity[kube_api.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
	}
	if mem >= 0 {
		node.Status.Capacity[kube_api.ResourceMemory] = *resource.NewQuantity(mem, resource.DecimalSI)
	}

	node.Status.Allocatable = node.Status.Capacity

	return node
}