func makeAllocatableResources(milliCPU int64, memory int64, pods int64) api.ResourceList {
	return api.ResourceList{
		api.ResourceCPU:    *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
		api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
		api.ResourcePods:   *resource.NewQuantity(pods, resource.DecimalSI),
	}
}
// makeImageStreamTagAdmissionUsageFunc returns a function that computes a resource usage for given image
// stream tag during admission.
func makeImageStreamTagAdmissionUsageFunc(isNamespacer osclient.ImageStreamsNamespacer) generic.UsageFunc {
	return func(object runtime.Object) kapi.ResourceList {
		ist, ok := object.(*imageapi.ImageStreamTag)
		if !ok {
			return kapi.ResourceList{}
		}

		res := map[kapi.ResourceName]resource.Quantity{
			imageapi.ResourceImageStreams: *resource.NewQuantity(0, resource.BinarySI),
		}

		isName, _, err := imageapi.ParseImageStreamTagName(ist.Name)
		if err != nil {
			utilruntime.HandleError(err)
			return kapi.ResourceList{}
		}

		is, err := isNamespacer.ImageStreams(ist.Namespace).Get(isName)
		if err != nil {
			if !kerrors.IsNotFound(err) {
				utilruntime.HandleError(fmt.Errorf("failed to get image stream %s/%s: %v", ist.Namespace, isName, err))
			}
		}
		if is == nil {
			res[imageapi.ResourceImageStreams] = *resource.NewQuantity(1, resource.BinarySI)
		}

		return res
	}
}
// makeImageStreamImportAdmissionUsageFunc retuns a function for computing a usage of an image stream import.
func makeImageStreamImportAdmissionUsageFunc(isNamespacer osclient.ImageStreamsNamespacer) generic.UsageFunc {
	return func(object runtime.Object) kapi.ResourceList {
		isi, ok := object.(*imageapi.ImageStreamImport)
		if !ok {
			return kapi.ResourceList{}
		}

		usage := map[kapi.ResourceName]resource.Quantity{
			imageapi.ResourceImageStreams: *resource.NewQuantity(0, resource.DecimalSI),
		}

		if !isi.Spec.Import || (len(isi.Spec.Images) == 0 && isi.Spec.Repository == nil) {
			return usage
		}

		is, err := isNamespacer.ImageStreams(isi.Namespace).Get(isi.Name)
		if err != nil {
			if !kerrors.IsNotFound(err) {
				utilruntime.HandleError(fmt.Errorf("failed to list image streams: %v", err))
			}
		}
		if is == nil {
			usage[imageapi.ResourceImageStreams] = *resource.NewQuantity(1, resource.DecimalSI)
		}

		return usage
	}
}
func testCompareThresholdValue(t *testing.T) {
	testCases := []struct {
		a, b  ThresholdValue
		equal bool
	}{
		{
			a: ThresholdValue{
				Quantity: resource.NewQuantity(123, resource.BinarySI),
			},
			b: ThresholdValue{
				Quantity: resource.NewQuantity(123, resource.BinarySI),
			},
			equal: true,
		},
		{
			a: ThresholdValue{
				Quantity: resource.NewQuantity(123, resource.BinarySI),
			},
			b: ThresholdValue{
				Quantity: resource.NewQuantity(456, resource.BinarySI),
			},
			equal: false,
		},
		{
			a: ThresholdValue{
				Quantity: resource.NewQuantity(123, resource.BinarySI),
			},
			b: ThresholdValue{
				Percentage: 0.1,
			},
			equal: false,
		},
		{
			a: ThresholdValue{
				Percentage: 0.1,
			},
			b: ThresholdValue{
				Percentage: 0.1,
			},
			equal: true,
		},
		{
			a: ThresholdValue{
				Percentage: 0.2,
			},
			b: ThresholdValue{
				Percentage: 0.1,
			},
			equal: false,
		},
	}

	for i, testCase := range testCases {
		if compareThresholdValue(testCase.a, testCase.b) != testCase.equal ||
			compareThresholdValue(testCase.b, testCase.a) != testCase.equal {
			t.Errorf("Test case: %v failed", i)
		}
	}
}
Beispiel #5
0
func parseMesosState(blob []byte) (*mesosState, error) {
	type State struct {
		ClusterName string `json:"cluster"`
		Slaves      []*struct {
			Id        string                 `json:"id"`        // ex: 20150106-162714-3815890698-5050-2453-S2
			Pid       string                 `json:"pid"`       // ex: slave(1)@10.22.211.18:5051
			Hostname  string                 `json:"hostname"`  // ex: 10.22.211.18, or slave-123.nowhere.com
			Resources map[string]interface{} `json:"resources"` // ex: {"mem": 123, "ports": "[31000-3200]"}
		} `json:"slaves"`
	}
	state := &State{ClusterName: defaultClusterName}
	if err := json.Unmarshal(blob, state); err != nil {
		return nil, err
	}
	nodes := []*slaveNode{}
	for _, slave := range state.Slaves {
		if slave.Hostname == "" {
			continue
		}
		node := &slaveNode{hostname: slave.Hostname}
		cap := api.ResourceList{}
		if slave.Resources != nil && len(slave.Resources) > 0 {
			// attempt to translate CPU (cores) and memory (MB) resources
			if cpu, found := slave.Resources["cpus"]; found {
				if cpuNum, ok := cpu.(float64); ok {
					cap[api.ResourceCPU] = *resource.NewQuantity(int64(cpuNum), resource.DecimalSI)
				} else {
					log.Warningf("unexpected slave cpu resource type %T: %v", cpu, cpu)
				}
			} else {
				log.Warningf("slave failed to report cpu resource")
			}
			if mem, found := slave.Resources["mem"]; found {
				if memNum, ok := mem.(float64); ok {
					cap[api.ResourceMemory] = *resource.NewQuantity(int64(memNum), resource.BinarySI)
				} else {
					log.Warningf("unexpected slave mem resource type %T: %v", mem, mem)
				}
			} else {
				log.Warningf("slave failed to report mem resource")
			}
		}
		if len(cap) > 0 {
			node.resources = &api.NodeResources{
				Capacity: cap,
			}
			log.V(4).Infof("node %q reporting capacity %v", node.hostname, cap)
		}
		nodes = append(nodes, node)
	}

	result := &mesosState{
		clusterName: state.ClusterName,
		nodes:       nodes,
	}

	return result, nil
}
Beispiel #6
0
// getFsInfo writes metrics.Capacity and metrics.Available from the filesystem info
func (md *metricsDu) getFsInfo(metrics *Metrics) error {
	available, capacity, _, err := util.FsInfo(md.path)
	if err != nil {
		return fmt.Errorf("Failed to get FsInfo due to error %v", err)
	}
	metrics.Available = resource.NewQuantity(available, resource.BinarySI)
	metrics.Capacity = resource.NewQuantity(capacity, resource.BinarySI)
	return nil
}
Beispiel #7
0
// getFsInfo writes metrics.Capacity, metrics.Used and metrics.Available from the filesystem info
func (md *metricsStatFS) getFsInfo(metrics *Metrics) error {
	available, capacity, usage, err := util.FsInfo(md.path)
	if err != nil {
		return NewFsInfoFailedError(err)
	}
	metrics.Available = resource.NewQuantity(available, resource.BinarySI)
	metrics.Capacity = resource.NewQuantity(capacity, resource.BinarySI)
	metrics.Used = resource.NewQuantity(usage, resource.BinarySI)
	return nil
}
Beispiel #8
0
func (config *RCConfig) applyTo(template *api.PodTemplateSpec) {
	if config.Env != nil {
		for k, v := range config.Env {
			c := &template.Spec.Containers[0]
			c.Env = append(c.Env, api.EnvVar{Name: k, Value: v})
		}
	}
	if config.Labels != nil {
		for k, v := range config.Labels {
			template.ObjectMeta.Labels[k] = v
		}
	}
	if config.NodeSelector != nil {
		template.Spec.NodeSelector = make(map[string]string)
		for k, v := range config.NodeSelector {
			template.Spec.NodeSelector[k] = v
		}
	}
	if config.Ports != nil {
		for k, v := range config.Ports {
			c := &template.Spec.Containers[0]
			c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v)})
		}
	}
	if config.HostPorts != nil {
		for k, v := range config.HostPorts {
			c := &template.Spec.Containers[0]
			c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)})
		}
	}
	if config.CpuLimit > 0 || config.MemLimit > 0 {
		template.Spec.Containers[0].Resources.Limits = api.ResourceList{}
	}
	if config.CpuLimit > 0 {
		template.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
	}
	if config.MemLimit > 0 {
		template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
	}
	if config.CpuRequest > 0 || config.MemRequest > 0 {
		template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
	}
	if config.CpuRequest > 0 {
		template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
	}
	if config.MemRequest > 0 {
		template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
	}
	if len(config.Volumes) > 0 {
		template.Spec.Volumes = config.Volumes
	}
	if len(config.VolumeMounts) > 0 {
		template.Spec.Containers[0].VolumeMounts = config.VolumeMounts
	}
}
Beispiel #9
0
// getFsInfo writes metrics.Capacity and metrics.Available from the filesystem
// info
func (md *metricsDu) getFsInfo(metrics *Metrics) error {
	available, capacity, _, inodes, inodesFree, _, err := util.FsInfo(md.path)
	if err != nil {
		return NewFsInfoFailedError(err)
	}
	metrics.Available = resource.NewQuantity(available, resource.BinarySI)
	metrics.Capacity = resource.NewQuantity(capacity, resource.BinarySI)
	metrics.Inodes = resource.NewQuantity(inodes, resource.BinarySI)
	metrics.InodesFree = resource.NewQuantity(inodesFree, resource.BinarySI)
	return nil
}
Beispiel #10
0
// makeSignalObservations derives observations using the specified summary provider.
func makeSignalObservations(summaryProvider stats.SummaryProvider) (signalObservations, statsFunc, error) {
	summary, err := summaryProvider.Get()
	if err != nil {
		return nil, nil, err
	}

	// build the function to work against for pod stats
	statsFunc := cachedStatsFunc(summary.Pods)
	// build an evaluation context for current eviction signals
	result := signalObservations{}

	if memory := summary.Node.Memory; memory != nil && memory.AvailableBytes != nil && memory.WorkingSetBytes != nil {
		result[SignalMemoryAvailable] = signalObservation{
			available: resource.NewQuantity(int64(*memory.AvailableBytes), resource.BinarySI),
			capacity:  resource.NewQuantity(int64(*memory.AvailableBytes+*memory.WorkingSetBytes), resource.BinarySI),
			time:      memory.Time,
		}
	}
	if nodeFs := summary.Node.Fs; nodeFs != nil {
		if nodeFs.AvailableBytes != nil && nodeFs.CapacityBytes != nil {
			result[SignalNodeFsAvailable] = signalObservation{
				available: resource.NewQuantity(int64(*nodeFs.AvailableBytes), resource.BinarySI),
				capacity:  resource.NewQuantity(int64(*nodeFs.CapacityBytes), resource.BinarySI),
				// TODO: add timestamp to stat (see memory stat)
			}
		}
		if nodeFs.InodesFree != nil && nodeFs.Inodes != nil {
			result[SignalNodeFsInodesFree] = signalObservation{
				available: resource.NewQuantity(int64(*nodeFs.InodesFree), resource.BinarySI),
				capacity:  resource.NewQuantity(int64(*nodeFs.Inodes), resource.BinarySI),
				// TODO: add timestamp to stat (see memory stat)
			}
		}
	}
	if summary.Node.Runtime != nil {
		if imageFs := summary.Node.Runtime.ImageFs; imageFs != nil {
			if imageFs.AvailableBytes != nil && imageFs.CapacityBytes != nil {
				result[SignalImageFsAvailable] = signalObservation{
					available: resource.NewQuantity(int64(*imageFs.AvailableBytes), resource.BinarySI),
					capacity:  resource.NewQuantity(int64(*imageFs.CapacityBytes), resource.BinarySI),
					// TODO: add timestamp to stat (see memory stat)
				}
				if imageFs.InodesFree != nil && imageFs.Inodes != nil {
					result[SignalImageFsInodesFree] = signalObservation{
						available: resource.NewQuantity(int64(*imageFs.InodesFree), resource.BinarySI),
						capacity:  resource.NewQuantity(int64(*imageFs.Inodes), resource.BinarySI),
						// TODO: add timestamp to stat (see memory stat)
					}
				}
			}
		}
	}
	return result, statsFunc, nil
}
func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) {
	// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
	// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
	info, err := kl.GetCachedMachineInfo()
	if err != nil {
		// TODO(roberthbailey): This is required for test-cmd.sh to pass.
		// See if the test should be updated instead.
		node.Status.Capacity = api.ResourceList{
			api.ResourceCPU:       *resource.NewMilliQuantity(0, resource.DecimalSI),
			api.ResourceMemory:    resource.MustParse("0Gi"),
			api.ResourcePods:      *resource.NewQuantity(int64(kl.maxPods), resource.DecimalSI),
			api.ResourceNvidiaGPU: *resource.NewQuantity(int64(kl.nvidiaGPUs), resource.DecimalSI),
		}
		glog.Errorf("Error getting machine info: %v", err)
	} else {
		node.Status.NodeInfo.MachineID = info.MachineID
		node.Status.NodeInfo.SystemUUID = info.SystemUUID
		node.Status.Capacity = cadvisor.CapacityFromMachineInfo(info)
		if kl.podsPerCore > 0 {
			node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(
				int64(math.Min(float64(info.NumCores*kl.podsPerCore), float64(kl.maxPods))), resource.DecimalSI)
		} else {
			node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(
				int64(kl.maxPods), resource.DecimalSI)
		}
		node.Status.Capacity[api.ResourceNvidiaGPU] = *resource.NewQuantity(
			int64(kl.nvidiaGPUs), resource.DecimalSI)
		if node.Status.NodeInfo.BootID != "" &&
			node.Status.NodeInfo.BootID != info.BootID {
			// TODO: This requires a transaction, either both node status is updated
			// and event is recorded or neither should happen, see issue #6055.
			kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.NodeRebooted,
				"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID)
		}
		node.Status.NodeInfo.BootID = info.BootID
	}

	// Set Allocatable.
	node.Status.Allocatable = make(api.ResourceList)
	for k, v := range node.Status.Capacity {
		value := *(v.Copy())
		if kl.reservation.System != nil {
			value.Sub(kl.reservation.System[k])
		}
		if kl.reservation.Kubernetes != nil {
			value.Sub(kl.reservation.Kubernetes[k])
		}
		if value.Sign() < 0 {
			// Negative Allocatable resources don't make sense.
			value.Set(0)
		}
		node.Status.Allocatable[k] = value
	}
}
Beispiel #12
0
func (r *Resource) ResourceList() v1.ResourceList {
	result := v1.ResourceList{
		v1.ResourceCPU:       *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
		v1.ResourceMemory:    *resource.NewQuantity(r.Memory, resource.BinarySI),
		v1.ResourceNvidiaGPU: *resource.NewQuantity(r.NvidiaGPU, resource.DecimalSI),
	}
	for rName, rQuant := range r.OpaqueIntResources {
		result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
	}
	return result
}
Beispiel #13
0
func TestSchedulerFailedSchedulingReasons(t *testing.T) {
	stop := make(chan struct{})
	defer close(stop)
	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
	scache := schedulercache.New(10*time.Minute, stop)
	node := api.Node{
		ObjectMeta: api.ObjectMeta{Name: "machine1"},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				api.ResourceCPU:    *(resource.NewQuantity(2, resource.DecimalSI)),
				api.ResourceMemory: *(resource.NewQuantity(100, resource.DecimalSI)),
				api.ResourcePods:   *(resource.NewQuantity(10, resource.DecimalSI)),
			},
			Allocatable: api.ResourceList{
				api.ResourceCPU:    *(resource.NewQuantity(2, resource.DecimalSI)),
				api.ResourceMemory: *(resource.NewQuantity(100, resource.DecimalSI)),
				api.ResourcePods:   *(resource.NewQuantity(10, resource.DecimalSI)),
			}},
	}
	scache.AddNode(&node)
	nodeLister := algorithm.FakeNodeLister([]*api.Node{&node})
	predicateMap := map[string]algorithm.FitPredicate{
		"PodFitsResources": predicates.PodFitsResources,
	}

	scheduler, _, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap)

	podWithTooBigResourceRequests := podWithResources("bar", "", api.ResourceList{
		api.ResourceCPU:    *(resource.NewQuantity(4, resource.DecimalSI)),
		api.ResourceMemory: *(resource.NewQuantity(500, resource.DecimalSI)),
	}, api.ResourceList{
		api.ResourceCPU:    *(resource.NewQuantity(4, resource.DecimalSI)),
		api.ResourceMemory: *(resource.NewQuantity(500, resource.DecimalSI)),
	})
	queuedPodStore.Add(podWithTooBigResourceRequests)
	scheduler.scheduleOne()

	select {
	case err := <-errChan:
		expectErr := &FitError{
			Pod: podWithTooBigResourceRequests,
			FailedPredicates: FailedPredicateMap{node.Name: []algorithm.PredicateFailureReason{
				predicates.NewInsufficientResourceError(api.ResourceCPU, 4000, 0, 2000),
				predicates.NewInsufficientResourceError(api.ResourceMemory, 500, 0, 100),
			}},
		}
		if !reflect.DeepEqual(expectErr, err) {
			t.Errorf("err want=%+v, get=%+v", expectErr, err)
		}
	case <-time.After(wait.ForeverTestTimeout):
		t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
	}
}
Beispiel #14
0
func buildNode(name string, cpu int64, mem int64) *kube_api.Node {
	return &kube_api.Node{
		ObjectMeta: kube_api.ObjectMeta{
			Name: name,
		},
		Status: kube_api.NodeStatus{
			Capacity: kube_api.ResourceList{
				kube_api.ResourceCPU:    *resource.NewMilliQuantity(cpu, resource.DecimalSI),
				kube_api.ResourceMemory: *resource.NewQuantity(mem, resource.DecimalSI),
				kube_api.ResourcePods:   *resource.NewQuantity(100, resource.DecimalSI),
			},
		},
	}
}
func makeNode(node string, milliCPU, memory int64) *api.Node {
	return &api.Node{
		ObjectMeta: api.ObjectMeta{Name: node},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				"cpu":    *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
				"memory": *resource.NewQuantity(memory, resource.BinarySI),
			},
			Allocatable: api.ResourceList{
				"cpu":    *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
				"memory": *resource.NewQuantity(memory, resource.BinarySI),
			},
		},
	}
}
Beispiel #16
0
func ExampleFormat() {
	memorySize := resource.NewQuantity(5*1024*1024*1024, resource.BinarySI)
	fmt.Printf("memorySize = %v\n", memorySize)

	diskSize := resource.NewQuantity(5*1000*1000*1000, resource.DecimalSI)
	fmt.Printf("diskSize = %v\n", diskSize)

	cores := resource.NewMilliQuantity(5300, resource.DecimalSI)
	fmt.Printf("cores = %v\n", cores)

	// Output:
	// memorySize = 5Gi
	// diskSize = 5G
	// cores = 5300m
}
Beispiel #17
0
// diskUsage converts used bytes into a resource quantity.
func diskUsage(fsStats *statsapi.FsStats) *resource.Quantity {
	if fsStats == nil || fsStats.UsedBytes == nil {
		return &resource.Quantity{Format: resource.BinarySI}
	}
	usage := int64(*fsStats.UsedBytes)
	return resource.NewQuantity(usage, resource.BinarySI)
}
Beispiel #18
0
func TestResourceQuotaStatusConversion(t *testing.T) {
	// should serialize as "0"
	expected := resource.NewQuantity(int64(0), resource.DecimalSI)
	if "0" != expected.String() {
		t.Errorf("Expected: 0, Actual: %v, do not require units", expected.String())
	}

	parsed := resource.MustParse("0")
	if "0" != parsed.String() {
		t.Errorf("Expected: 0, Actual: %v, do not require units", parsed.String())
	}

	quota := &api.ResourceQuota{}
	quota.Status = api.ResourceQuotaStatus{}
	quota.Status.Hard = api.ResourceList{}
	quota.Status.Used = api.ResourceList{}
	quota.Status.Hard[api.ResourcePods] = *expected

	// round-trip the object
	data, _ := versioned.Codec.Encode(quota)
	object, _ := versioned.Codec.Decode(data)
	after := object.(*api.ResourceQuota)
	actualQuantity := after.Status.Hard[api.ResourcePods]
	actual := &actualQuantity

	// should be "0", but was "0m"
	if expected.String() != actual.String() {
		t.Errorf("Expected %v, Actual %v", expected.String(), actual.String())
	}
}
Beispiel #19
0
func (ir initialResources) getEstimation(kind api.ResourceName, c *api.Container) (*resource.Quantity, error) {
	end := time.Now()
	start := end.Add(-week)
	var usage, samples int64
	var err error

	// Historical data from last 7 days for the same image:tag.
	if usage, samples, err = ir.source.GetUsagePercentile(kind, *percentile, c.Image, true, start, end); err != nil {
		return nil, err
	}
	if samples < samplesThreshold {
		// Historical data from last 30 days for the same image:tag.
		start := end.Add(-month)
		if usage, samples, err = ir.source.GetUsagePercentile(kind, *percentile, c.Image, true, start, end); err != nil {
			return nil, err
		}
	}
	if samples < samplesThreshold {
		// Historical data from last 30 days for the same image.
		start := end.Add(-month)
		image := strings.Split(c.Image, ":")[0]
		if usage, samples, err = ir.source.GetUsagePercentile(kind, *percentile, image, false, start, end); err != nil {
			return nil, err
		}
	}

	if samples > 0 && kind == api.ResourceCPU {
		return resource.NewMilliQuantity(usage, resource.DecimalSI), nil
	}
	if samples > 0 && kind == api.ResourceMemory {
		return resource.NewQuantity(usage, resource.DecimalSI), nil
	}
	return nil, nil
}
Beispiel #20
0
// exceedsLimits checks if given image exceeds LimitRanges defined in ImageStream's namespace.
func exceedsLimits(is *imageapi.ImageStream, image *imageapi.Image, limits map[string][]*kapi.LimitRange) bool {
	limitRanges, ok := limits[is.Namespace]
	if !ok {
		return false
	}
	if len(limitRanges) == 0 {
		return false
	}

	imageSize := resource.NewQuantity(image.DockerImageMetadata.Size, resource.BinarySI)
	for _, limitRange := range limitRanges {
		if limitRange == nil {
			continue
		}
		for _, limit := range limitRange.Spec.Limits {
			if limit.Type != imageapi.LimitTypeImage {
				continue
			}

			limitQuantity, ok := limit.Max[kapi.ResourceStorage]
			if !ok {
				continue
			}
			if limitQuantity.Cmp(*imageSize) < 0 {
				// image size is larger than the permitted limit range max size
				glog.V(4).Infof("Image %s in stream %s/%s exceeds limit %s: %v vs %v",
					image.Name, is.Namespace, is.Name, limitRange.Name, *imageSize, limitQuantity)
				return true
			}
		}
	}
	return false
}
Beispiel #21
0
func TestResourceQuotaStatusConversion(t *testing.T) {
	if !registered.IsAllowedVersion(versioned.SchemeGroupVersion) {
		return
	}

	// should serialize as "0"
	expected := resource.NewQuantity(int64(0), resource.DecimalSI)
	if "0" != expected.String() {
		t.Errorf("Expected: 0, Actual: %v, do not require units", expected.String())
	}

	parsed := resource.MustParse("0")
	if "0" != parsed.String() {
		t.Errorf("Expected: 0, Actual: %v, do not require units", parsed.String())
	}

	quota := &api.ResourceQuota{}
	quota.Status = api.ResourceQuotaStatus{}
	quota.Status.Hard = api.ResourceList{}
	quota.Status.Used = api.ResourceList{}
	quota.Status.Hard[api.ResourcePods] = *expected

	// round-trip the object
	object := roundTrip(t, quota)
	after := object.(*api.ResourceQuota)

	actualQuantity := after.Status.Hard[api.ResourcePods]
	actual := &actualQuantity

	// should be "0", but was "0m"
	if expected.String() != actual.String() {
		t.Errorf("Expected %v, Actual %v", expected.String(), actual.String())
	}
}
Beispiel #22
0
func persistantVolumes(names []string) []*api.PersistentVolume {
	var result []*api.PersistentVolume
	for _, name := range names {
		result = append(result, &api.PersistentVolume{
			TypeMeta: unversioned.TypeMeta{
				Kind:       "PersistentVolume",
				APIVersion: "v1",
			},
			ObjectMeta: api.ObjectMeta{
				Name: name,
			},
			Spec: api.PersistentVolumeSpec{
				Capacity: api.ResourceList{
					"storage": *resource.NewQuantity(defaultDiskSizeGb*1000*1000*1000, resource.DecimalSI),
				},
				PersistentVolumeSource: api.PersistentVolumeSource{
					GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
						PDName: name,
						FSType: "btrfs",
					},
				},
				AccessModes: []api.PersistentVolumeAccessMode{
					api.ReadWriteOnce,
					api.ReadOnlyMany,
				},
			},
		})
	}
	return result
}
Beispiel #23
0
// inodeUsage converts inodes consumed into a resource quantity.
func inodeUsage(fsStats *statsapi.FsStats) *resource.Quantity {
	if fsStats == nil || fsStats.InodesUsed == nil {
		return &resource.Quantity{Format: resource.BinarySI}
	}
	usage := int64(*fsStats.InodesUsed)
	return resource.NewQuantity(usage, resource.BinarySI)
}
Beispiel #24
0
func startMemoryThresholdNotifier(thresholds []Threshold, observations signalObservations, hard bool, handler thresholdNotifierHandlerFunc) error {
	for _, threshold := range thresholds {
		if threshold.Signal != SignalMemoryAvailable || hard != isHardEvictionThreshold(threshold) {
			continue
		}
		observed, found := observations[SignalMemoryAvailable]
		if !found {
			continue
		}
		cgroups, err := cm.GetCgroupSubsystems()
		if err != nil {
			return err
		}
		// TODO add support for eviction from --cgroup-root
		cgpath, found := cgroups.MountPoints["memory"]
		if !found || len(cgpath) == 0 {
			return fmt.Errorf("memory cgroup mount point not found")
		}
		attribute := "memory.usage_in_bytes"
		quantity := getThresholdQuantity(threshold.Value, observed.capacity)
		usageThreshold := resource.NewQuantity(observed.capacity.Value(), resource.DecimalSI)
		usageThreshold.Sub(*quantity)
		description := fmt.Sprintf("<%s available", formatThresholdValue(threshold.Value))
		memcgThresholdNotifier, err := NewMemCGThresholdNotifier(cgpath, attribute, usageThreshold.String(), description, handler)
		if err != nil {
			return err
		}
		go memcgThresholdNotifier.Start(wait.NeverStop)
		return nil
	}
	return nil
}
// PodMemory computes total memory limit across all containers in a pod
// TODO: Remove this once the mesos scheduler becomes request aware
func PodMemory(pod *api.Pod) *resource.Quantity {
	val := int64(0)
	for j := range pod.Spec.Containers {
		val = val + pod.Spec.Containers[j].Resources.Limits.Memory().Value()
	}
	return resource.NewQuantity(int64(val), resource.DecimalSI)
}
Beispiel #26
0
// memoryUsage converts working set into a resource quantity.
func memoryUsage(memStats *statsapi.MemoryStats) *resource.Quantity {
	if memStats == nil || memStats.WorkingSetBytes == nil {
		return &resource.Quantity{Format: resource.BinarySI}
	}
	usage := int64(*memStats.WorkingSetBytes)
	return resource.NewQuantity(usage, resource.BinarySI)
}
Beispiel #27
0
// BuildTestPod creates a pod with specified resources.
func BuildTestPod(name string, cpu int64, mem int64) *kube_api.Pod {
	pod := &kube_api.Pod{
		ObjectMeta: kube_api.ObjectMeta{
			Namespace: "default",
			Name:      name,
		},
		Spec: kube_api.PodSpec{
			Containers: []kube_api.Container{
				{
					Resources: kube_api.ResourceRequirements{
						Requests: kube_api.ResourceList{},
					},
				},
			},
		},
	}

	if cpu >= 0 {
		pod.Spec.Containers[0].Resources.Requests[kube_api.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
	}
	if mem >= 0 {
		pod.Spec.Containers[0].Resources.Requests[kube_api.ResourceMemory] = *resource.NewQuantity(mem, resource.DecimalSI)
	}

	return pod
}
Beispiel #28
0
func makeNNodes(c client.Interface, N int) {
	baseNode := &api.Node{
		ObjectMeta: api.ObjectMeta{
			GenerateName: "scheduler-test-node-",
		},
		Spec: api.NodeSpec{
			ExternalID: "foobar",
		},
		Status: api.NodeStatus{
			Capacity: api.ResourceList{
				api.ResourcePods:   *resource.NewQuantity(32, resource.DecimalSI),
				api.ResourceCPU:    resource.MustParse("4"),
				api.ResourceMemory: resource.MustParse("32Gi"),
			},
			Phase: api.NodeRunning,
			Conditions: []api.NodeCondition{
				{Type: api.NodeReady, Status: api.ConditionTrue},
			},
		},
	}
	for i := 0; i < N; i++ {
		if _, err := c.Nodes().Create(baseNode); err != nil {
			panic("error creating node: " + err.Error())
		}
	}
}
Beispiel #29
0
// ServiceUsageFunc knows how to measure usage associated with services
func ServiceUsageFunc(object runtime.Object) api.ResourceList {
	result := api.ResourceList{}
	var serviceType api.ServiceType
	var ports int

	switch t := object.(type) {
	case *v1.Service:
		serviceType = api.ServiceType(t.Spec.Type)
		ports = len(t.Spec.Ports)
	case *api.Service:
		serviceType = t.Spec.Type
		ports = len(t.Spec.Ports)
	default:
		panic(fmt.Sprintf("expect *api.Service or *v1.Service, got %v", t))
	}

	// default service usage
	result[api.ResourceServices] = resource.MustParse("1")
	result[api.ResourceServicesLoadBalancers] = resource.MustParse("0")
	result[api.ResourceServicesNodePorts] = resource.MustParse("0")
	switch serviceType {
	case api.ServiceTypeNodePort:
		// node port services need to count node ports
		value := resource.NewQuantity(int64(ports), resource.DecimalSI)
		result[api.ResourceServicesNodePorts] = *value
	case api.ServiceTypeLoadBalancer:
		// load balancer services need to count load balancers
		result[api.ResourceServicesLoadBalancers] = resource.MustParse("1")
	}
	return result
}
Beispiel #30
0
// runFind executes the "find" command and writes the results to metrics.InodesUsed
func (md *metricsDu) runFind(metrics *Metrics) error {
	inodesUsed, err := util.Find(md.path)
	if err != nil {
		return err
	}
	metrics.InodesUsed = resource.NewQuantity(inodesUsed, resource.BinarySI)
	return nil
}