// Validates that a Quantity is not negative func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if value.Cmp(resource.Quantity{}) < 0 { allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg)) } return allErrs }
func addCpuLimit(opts []*unit.UnitOption, limit *resource.Quantity) ([]*unit.UnitOption, error) { if limit.Value() > resource.MaxMilliValue { return nil, fmt.Errorf("cpu limit exceeds the maximum millivalue: %v", limit.String()) } quota := strconv.Itoa(int(limit.MilliValue()/10)) + "%" opts = append(opts, unit.NewUnitOption("Service", "CPUQuota", quota)) return opts, nil }
// FuzzerFor can randomly populate api objects that are destined for version. func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) *fuzz.Fuzzer { f := fuzz.New().NilChance(.5).NumElements(1, 1) if src != nil { f.RandSource(src) } f.Funcs( func(j *int, c fuzz.Continue) { *j = int(c.Int31()) }, func(j **int, c fuzz.Continue) { if c.RandBool() { i := int(c.Int31()) *j = &i } else { *j = nil } }, func(q *resource.Quantity, c fuzz.Continue) { *q = *resource.NewQuantity(c.Int63n(1000), resource.DecimalExponent) }, func(j *runtime.TypeMeta, c fuzz.Continue) { // We have to customize the randomization of TypeMetas because their // APIVersion and Kind must remain blank in memory. j.APIVersion = "" j.Kind = "" }, func(j *unversioned.TypeMeta, c fuzz.Continue) { // We have to customize the randomization of TypeMetas because their // APIVersion and Kind must remain blank in memory. j.APIVersion = "" j.Kind = "" }, func(j *api.ObjectMeta, c fuzz.Continue) { j.Name = c.RandString() j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10) j.SelfLink = c.RandString() j.UID = types.UID(c.RandString()) j.GenerateName = c.RandString() var sec, nsec int64 c.Fuzz(&sec) c.Fuzz(&nsec) j.CreationTimestamp = unversioned.Unix(sec, nsec).Rfc3339Copy() }, func(j *api.ObjectReference, c fuzz.Continue) { // We have to customize the randomization of TypeMetas because their // APIVersion and Kind must remain blank in memory. j.APIVersion = c.RandString() j.Kind = c.RandString() j.Namespace = c.RandString() j.Name = c.RandString() j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10) j.FieldPath = c.RandString() }, func(j *unversioned.ListMeta, c fuzz.Continue) { j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10) j.SelfLink = c.RandString() }, func(j *api.ListOptions, c fuzz.Continue) { label, _ := labels.Parse("a=b") j.LabelSelector = label field, _ := fields.ParseSelector("a=b") j.FieldSelector = field }, func(j *api.PodExecOptions, c fuzz.Continue) { j.Stdout = true j.Stderr = true }, func(j *api.PodAttachOptions, c fuzz.Continue) { j.Stdout = true j.Stderr = true }, func(s *api.PodSpec, c fuzz.Continue) { c.FuzzNoCustom(s) // has a default value ttl := int64(30) if c.RandBool() { ttl = int64(c.Uint32()) } s.TerminationGracePeriodSeconds = &ttl c.Fuzz(s.SecurityContext) if s.SecurityContext == nil { s.SecurityContext = new(api.PodSecurityContext) } }, func(j *api.PodPhase, c fuzz.Continue) { statuses := []api.PodPhase{api.PodPending, api.PodRunning, api.PodFailed, api.PodUnknown} *j = statuses[c.Rand.Intn(len(statuses))] }, func(j *api.Binding, c fuzz.Continue) { c.Fuzz(&j.ObjectMeta) j.Target.Name = c.RandString() }, func(j *api.ReplicationControllerSpec, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again //j.TemplateRef = nil // this is required for round trip }, func(j *extensions.DeploymentStrategy, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again // Ensure that strategyType is one of valid values. strategyTypes := []extensions.DeploymentStrategyType{extensions.RecreateDeploymentStrategyType, extensions.RollingUpdateDeploymentStrategyType} j.Type = strategyTypes[c.Rand.Intn(len(strategyTypes))] if j.Type != extensions.RollingUpdateDeploymentStrategyType { j.RollingUpdate = nil } else { rollingUpdate := extensions.RollingUpdateDeployment{} if c.RandBool() { rollingUpdate.MaxUnavailable = intstr.FromInt(int(c.RandUint64())) rollingUpdate.MaxSurge = intstr.FromInt(int(c.RandUint64())) } else { rollingUpdate.MaxSurge = intstr.FromString(fmt.Sprintf("%d%%", c.RandUint64())) } j.RollingUpdate = &rollingUpdate } }, func(j *batch.JobSpec, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again completions := int32(c.Rand.Int31()) parallelism := int32(c.Rand.Int31()) j.Completions = &completions j.Parallelism = ¶llelism if c.Rand.Int31()%2 == 0 { j.ManualSelector = newBool(true) } else { j.ManualSelector = nil } }, func(sj *batch.ScheduledJobSpec, c fuzz.Continue) { c.FuzzNoCustom(sj) suspend := c.RandBool() sj.Suspend = &suspend sds := int64(c.RandUint64()) sj.StartingDeadlineSeconds = &sds sj.Schedule = c.RandString() }, func(cp *batch.ConcurrencyPolicy, c fuzz.Continue) { policies := []batch.ConcurrencyPolicy{batch.AllowConcurrent, batch.ForbidConcurrent, batch.ReplaceConcurrent} *cp = policies[c.Rand.Intn(len(policies))] }, func(j *api.List, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again // TODO: uncomment when round trip starts from a versioned object if false { //j.Items == nil { j.Items = []runtime.Object{} } }, func(j *runtime.Object, c fuzz.Continue) { // TODO: uncomment when round trip starts from a versioned object if true { //c.RandBool() { *j = &runtime.Unknown{ // We do not set TypeMeta here because it is not carried through a round trip Raw: []byte(`{"apiVersion":"unknown.group/unknown","kind":"Something","someKey":"someValue"}`), ContentType: runtime.ContentTypeJSON, } } else { types := []runtime.Object{&api.Pod{}, &api.ReplicationController{}} t := types[c.Rand.Intn(len(types))] c.Fuzz(t) *j = t } }, func(q *api.ResourceRequirements, c fuzz.Continue) { randomQuantity := func() resource.Quantity { var q resource.Quantity c.Fuzz(&q) // precalc the string for benchmarking purposes _ = q.String() return q } q.Limits = make(api.ResourceList) q.Requests = make(api.ResourceList) cpuLimit := randomQuantity() q.Limits[api.ResourceCPU] = *cpuLimit.Copy() q.Requests[api.ResourceCPU] = *cpuLimit.Copy() memoryLimit := randomQuantity() q.Limits[api.ResourceMemory] = *memoryLimit.Copy() q.Requests[api.ResourceMemory] = *memoryLimit.Copy() storageLimit := randomQuantity() q.Limits[api.ResourceStorage] = *storageLimit.Copy() q.Requests[api.ResourceStorage] = *storageLimit.Copy() }, func(q *api.LimitRangeItem, c fuzz.Continue) { var cpuLimit resource.Quantity c.Fuzz(&cpuLimit) q.Type = api.LimitTypeContainer q.Default = make(api.ResourceList) q.Default[api.ResourceCPU] = *(cpuLimit.Copy()) q.DefaultRequest = make(api.ResourceList) q.DefaultRequest[api.ResourceCPU] = *(cpuLimit.Copy()) q.Max = make(api.ResourceList) q.Max[api.ResourceCPU] = *(cpuLimit.Copy()) q.Min = make(api.ResourceList) q.Min[api.ResourceCPU] = *(cpuLimit.Copy()) q.MaxLimitRequestRatio = make(api.ResourceList) q.MaxLimitRequestRatio[api.ResourceCPU] = resource.MustParse("10") }, func(p *api.PullPolicy, c fuzz.Continue) { policies := []api.PullPolicy{api.PullAlways, api.PullNever, api.PullIfNotPresent} *p = policies[c.Rand.Intn(len(policies))] }, func(rp *api.RestartPolicy, c fuzz.Continue) { policies := []api.RestartPolicy{api.RestartPolicyAlways, api.RestartPolicyNever, api.RestartPolicyOnFailure} *rp = policies[c.Rand.Intn(len(policies))] }, // Only api.DownwardAPIVolumeFile needs to have a specific func since FieldRef has to be // defaulted to a version otherwise roundtrip will fail // For the remaining volume plugins the default fuzzer is enough. func(m *api.DownwardAPIVolumeFile, c fuzz.Continue) { m.Path = c.RandString() versions := []string{"v1"} m.FieldRef = &api.ObjectFieldSelector{} m.FieldRef.APIVersion = versions[c.Rand.Intn(len(versions))] m.FieldRef.FieldPath = c.RandString() }, func(vs *api.VolumeSource, c fuzz.Continue) { // Exactly one of the fields must be set. v := reflect.ValueOf(vs).Elem() i := int(c.RandUint64() % uint64(v.NumField())) t := v.Field(i).Addr() for v.Field(i).IsNil() { c.Fuzz(t.Interface()) } }, func(i *api.ISCSIVolumeSource, c fuzz.Continue) { i.ISCSIInterface = c.RandString() if i.ISCSIInterface == "" { i.ISCSIInterface = "default" } }, func(d *api.DNSPolicy, c fuzz.Continue) { policies := []api.DNSPolicy{api.DNSClusterFirst, api.DNSDefault} *d = policies[c.Rand.Intn(len(policies))] }, func(p *api.Protocol, c fuzz.Continue) { protocols := []api.Protocol{api.ProtocolTCP, api.ProtocolUDP} *p = protocols[c.Rand.Intn(len(protocols))] }, func(p *api.ServiceAffinity, c fuzz.Continue) { types := []api.ServiceAffinity{api.ServiceAffinityClientIP, api.ServiceAffinityNone} *p = types[c.Rand.Intn(len(types))] }, func(p *api.ServiceType, c fuzz.Continue) { types := []api.ServiceType{api.ServiceTypeClusterIP, api.ServiceTypeNodePort, api.ServiceTypeLoadBalancer} *p = types[c.Rand.Intn(len(types))] }, func(ct *api.Container, c fuzz.Continue) { c.FuzzNoCustom(ct) // fuzz self without calling this function again ct.TerminationMessagePath = "/" + ct.TerminationMessagePath // Must be non-empty }, func(p *api.Probe, c fuzz.Continue) { c.FuzzNoCustom(p) // These fields have default values. intFieldsWithDefaults := [...]string{"TimeoutSeconds", "PeriodSeconds", "SuccessThreshold", "FailureThreshold"} v := reflect.ValueOf(p).Elem() for _, field := range intFieldsWithDefaults { f := v.FieldByName(field) if f.Int() == 0 { f.SetInt(1) } } }, func(ev *api.EnvVar, c fuzz.Continue) { ev.Name = c.RandString() if c.RandBool() { ev.Value = c.RandString() } else { ev.ValueFrom = &api.EnvVarSource{} ev.ValueFrom.FieldRef = &api.ObjectFieldSelector{} var versions []unversioned.GroupVersion for _, testGroup := range testapi.Groups { versions = append(versions, *testGroup.GroupVersion()) } ev.ValueFrom.FieldRef.APIVersion = versions[c.Rand.Intn(len(versions))].String() ev.ValueFrom.FieldRef.FieldPath = c.RandString() } }, func(sc *api.SecurityContext, c fuzz.Continue) { c.FuzzNoCustom(sc) // fuzz self without calling this function again if c.RandBool() { priv := c.RandBool() sc.Privileged = &priv } if c.RandBool() { sc.Capabilities = &api.Capabilities{ Add: make([]api.Capability, 0), Drop: make([]api.Capability, 0), } c.Fuzz(&sc.Capabilities.Add) c.Fuzz(&sc.Capabilities.Drop) } }, func(s *api.Secret, c fuzz.Continue) { c.FuzzNoCustom(s) // fuzz self without calling this function again s.Type = api.SecretTypeOpaque }, func(r *api.RBDVolumeSource, c fuzz.Continue) { r.RBDPool = c.RandString() if r.RBDPool == "" { r.RBDPool = "rbd" } r.RadosUser = c.RandString() if r.RadosUser == "" { r.RadosUser = "******" } r.Keyring = c.RandString() if r.Keyring == "" { r.Keyring = "/etc/ceph/keyring" } }, func(pv *api.PersistentVolume, c fuzz.Continue) { c.FuzzNoCustom(pv) // fuzz self without calling this function again types := []api.PersistentVolumePhase{api.VolumeAvailable, api.VolumePending, api.VolumeBound, api.VolumeReleased, api.VolumeFailed} pv.Status.Phase = types[c.Rand.Intn(len(types))] pv.Status.Message = c.RandString() reclamationPolicies := []api.PersistentVolumeReclaimPolicy{api.PersistentVolumeReclaimRecycle, api.PersistentVolumeReclaimRetain} pv.Spec.PersistentVolumeReclaimPolicy = reclamationPolicies[c.Rand.Intn(len(reclamationPolicies))] }, func(pvc *api.PersistentVolumeClaim, c fuzz.Continue) { c.FuzzNoCustom(pvc) // fuzz self without calling this function again types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending, api.ClaimLost} pvc.Status.Phase = types[c.Rand.Intn(len(types))] }, func(s *api.NamespaceSpec, c fuzz.Continue) { s.Finalizers = []api.FinalizerName{api.FinalizerKubernetes} }, func(s *api.NamespaceStatus, c fuzz.Continue) { s.Phase = api.NamespaceActive }, func(http *api.HTTPGetAction, c fuzz.Continue) { c.FuzzNoCustom(http) // fuzz self without calling this function again http.Path = "/" + http.Path // can't be blank http.Scheme = "x" + http.Scheme // can't be blank }, func(ss *api.ServiceSpec, c fuzz.Continue) { c.FuzzNoCustom(ss) // fuzz self without calling this function again if len(ss.Ports) == 0 { // There must be at least 1 port. ss.Ports = append(ss.Ports, api.ServicePort{}) c.Fuzz(&ss.Ports[0]) } for i := range ss.Ports { switch ss.Ports[i].TargetPort.Type { case intstr.Int: ss.Ports[i].TargetPort.IntVal = 1 + ss.Ports[i].TargetPort.IntVal%65535 // non-zero case intstr.String: ss.Ports[i].TargetPort.StrVal = "x" + ss.Ports[i].TargetPort.StrVal // non-empty } } }, func(n *api.Node, c fuzz.Continue) { c.FuzzNoCustom(n) n.Spec.ExternalID = "external" }, func(s *api.NodeStatus, c fuzz.Continue) { c.FuzzNoCustom(s) s.Allocatable = s.Capacity }, func(s *autoscaling.HorizontalPodAutoscalerSpec, c fuzz.Continue) { c.FuzzNoCustom(s) // fuzz self without calling this function again minReplicas := int32(c.Rand.Int31()) s.MinReplicas = &minReplicas targetCpu := int32(c.RandUint64()) s.TargetCPUUtilizationPercentage = &targetCpu }, func(psp *extensions.PodSecurityPolicySpec, c fuzz.Continue) { c.FuzzNoCustom(psp) // fuzz self without calling this function again runAsUserRules := []extensions.RunAsUserStrategy{extensions.RunAsUserStrategyMustRunAsNonRoot, extensions.RunAsUserStrategyMustRunAs, extensions.RunAsUserStrategyRunAsAny} psp.RunAsUser.Rule = runAsUserRules[c.Rand.Intn(len(runAsUserRules))] seLinuxRules := []extensions.SELinuxStrategy{extensions.SELinuxStrategyRunAsAny, extensions.SELinuxStrategyMustRunAs} psp.SELinux.Rule = seLinuxRules[c.Rand.Intn(len(seLinuxRules))] }, func(s *extensions.Scale, c fuzz.Continue) { c.FuzzNoCustom(s) // fuzz self without calling this function again // TODO: Implement a fuzzer to generate valid keys, values and operators for // selector requirements. if s.Status.Selector != nil { s.Status.Selector = &unversioned.LabelSelector{ MatchLabels: map[string]string{ "testlabelkey": "testlabelval", }, MatchExpressions: []unversioned.LabelSelectorRequirement{ { Key: "testkey", Operator: unversioned.LabelSelectorOpIn, Values: []string{"val1", "val2", "val3"}, }, }, } } }, func(r *runtime.RawExtension, c fuzz.Continue) { // Pick an arbitrary type and fuzz it types := []runtime.Object{&api.Pod{}, &extensions.Deployment{}, &api.Service{}} obj := types[c.Rand.Intn(len(types))] c.Fuzz(obj) // Find a codec for converting the object to raw bytes. This is necessary for the // api version and kind to be correctly set be serialization. var codec runtime.Codec switch obj.(type) { case *api.Pod: codec = testapi.Default.Codec() case *extensions.Deployment: codec = testapi.Extensions.Codec() case *api.Service: codec = testapi.Default.Codec() default: t.Errorf("Failed to find codec for object type: %T", obj) return } // Convert the object to raw bytes bytes, err := runtime.Encode(codec, obj) if err != nil { t.Errorf("Failed to encode object: %v", err) return } // Set the bytes field on the RawExtension r.Raw = bytes }, ) return f }
// limitRequestRatioConstraint enforces the limit to request ratio over the specified resource func limitRequestRatioConstraint(limitType api.LimitType, resourceName api.ResourceName, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error { req, reqExists := request[resourceName] lim, limExists := limit[resourceName] observedReqValue, observedLimValue, enforcedValue := requestLimitEnforcedValues(req, lim, enforced) if !reqExists || (observedReqValue == int64(0)) { return fmt.Errorf("%s max limit to request ratio per %s is %s, but no request is specified or request is 0.", resourceName, limitType, enforced.String()) } if !limExists || (observedLimValue == int64(0)) { return fmt.Errorf("%s max limit to request ratio per %s is %s, but no limit is specified or limit is 0.", resourceName, limitType, enforced.String()) } observedValue := observedLimValue / observedReqValue if observedValue > enforcedValue { return fmt.Errorf("%s max limit to request ratio per %s is %s, but provided ratio is %d.", resourceName, limitType, enforced.String(), observedValue) } return nil }
// maxConstraint enforces the max constraint over the specified resource func maxConstraint(limitType api.LimitType, resourceName api.ResourceName, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error { req, reqExists := request[resourceName] lim, limExists := limit[resourceName] observedReqValue, observedLimValue, enforcedValue := requestLimitEnforcedValues(req, lim, enforced) if !limExists { return fmt.Errorf("Maximum %s usage per %s is %s. No limit is specified.", resourceName, limitType, enforced.String()) } if observedLimValue > enforcedValue { return fmt.Errorf("Maximum %s usage per %s is %s, but limit is %s.", resourceName, limitType, enforced.String(), lim.String()) } if reqExists && (observedReqValue > enforcedValue) { return fmt.Errorf("Maximum %s usage per %s is %s, but request is %s.", resourceName, limitType, enforced.String(), req.String()) } return nil }
// The method veryfies whether resources should be set for the given pod and // if there is estimation available the method fills Request field. func (ir initialResources) estimateAndFillResourcesIfNotSet(pod *api.Pod) { annotations := []string{} for i := range pod.Spec.Containers { c := &pod.Spec.Containers[i] req := c.Resources.Requests lim := c.Resources.Limits var cpu, mem *resource.Quantity var err error if _, ok := req[api.ResourceCPU]; !ok { if _, ok2 := lim[api.ResourceCPU]; !ok2 { cpu, err = ir.getEstimation(api.ResourceCPU, c, pod.ObjectMeta.Namespace) if err != nil { glog.Errorf("Error while trying to estimate resources: %v", err) } } } if _, ok := req[api.ResourceMemory]; !ok { if _, ok2 := lim[api.ResourceMemory]; !ok2 { mem, err = ir.getEstimation(api.ResourceMemory, c, pod.ObjectMeta.Namespace) if err != nil { glog.Errorf("Error while trying to estimate resources: %v", err) } } } // If Requests doesn't exits and an estimation was made, create Requests. if req == nil && (cpu != nil || mem != nil) { c.Resources.Requests = api.ResourceList{} req = c.Resources.Requests } setRes := []string{} if cpu != nil { glog.Infof("CPU estimation for container %v in pod %v/%v is %v", c.Name, pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, cpu.String()) setRes = append(setRes, string(api.ResourceCPU)) req[api.ResourceCPU] = *cpu } if mem != nil { glog.Infof("Memory estimation for container %v in pod %v/%v is %v", c.Name, pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, mem.String()) setRes = append(setRes, string(api.ResourceMemory)) req[api.ResourceMemory] = *mem } if len(setRes) > 0 { sort.Strings(setRes) a := strings.Join(setRes, ", ") + " request for container " + c.Name annotations = append(annotations, a) } } if len(annotations) > 0 { if pod.ObjectMeta.Annotations == nil { pod.ObjectMeta.Annotations = make(map[string]string) } val := "Initial Resources plugin set: " + strings.Join(annotations, "; ") pod.ObjectMeta.Annotations[initialResourcesAnnotation] = val } }
// limitRequestRatioConstraint enforces the limit to request ratio over the specified resource func limitRequestRatioConstraint(limitType api.LimitType, resourceName api.ResourceName, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error { req, reqExists := request[resourceName] lim, limExists := limit[resourceName] observedReqValue, observedLimValue, _ := requestLimitEnforcedValues(req, lim, enforced) if !reqExists || (observedReqValue == int64(0)) { return fmt.Errorf("%s max limit to request ratio per %s is %s, but no request is specified or request is 0.", resourceName, limitType, enforced.String()) } if !limExists || (observedLimValue == int64(0)) { return fmt.Errorf("%s max limit to request ratio per %s is %s, but no limit is specified or limit is 0.", resourceName, limitType, enforced.String()) } observedRatio := float64(observedLimValue) / float64(observedReqValue) displayObservedRatio := observedRatio maxLimitRequestRatio := float64(enforced.Value()) if enforced.Value() <= resource.MaxMilliValue { observedRatio = observedRatio * 1000 maxLimitRequestRatio = float64(enforced.MilliValue()) } if observedRatio > maxLimitRequestRatio { return fmt.Errorf("%s max limit to request ratio per %s is %s, but provided ratio is %f.", resourceName, limitType, enforced.String(), displayObservedRatio) } return nil }
// maxRequestConstraint enforces the max constraint over the specified resource // use when specify LimitType resource doesn't recognize limit values func maxRequestConstraint(limitType api.LimitType, resourceName api.ResourceName, enforced resource.Quantity, request api.ResourceList) error { req, reqExists := request[resourceName] observedReqValue, _, enforcedValue := requestLimitEnforcedValues(req, resource.Quantity{}, enforced) if !reqExists { return fmt.Errorf("maximum %s usage per %s is %s. No request is specified.", resourceName, limitType, enforced.String()) } if observedReqValue > enforcedValue { return fmt.Errorf("maximum %s usage per %s is %s, but request is %s.", resourceName, limitType, enforced.String(), req.String()) } return nil }
// The method veryfies whether resources should be set for the given pod and // if there is estimation available the method fills Request field. func (ir initialResources) estimateAndFillResourcesIfNotSet(pod *api.Pod) { for i := range pod.Spec.Containers { c := &pod.Spec.Containers[i] req := c.Resources.Requests lim := c.Resources.Limits var cpu, mem *resource.Quantity var err error if _, ok := req[api.ResourceCPU]; !ok { if _, ok2 := lim[api.ResourceCPU]; !ok2 { cpu, err = ir.getEstimation(api.ResourceCPU, c) if err != nil { glog.Errorf("Error while trying to estimate resources: %v", err) } } } if _, ok := req[api.ResourceMemory]; !ok { if _, ok2 := lim[api.ResourceMemory]; !ok2 { mem, err = ir.getEstimation(api.ResourceMemory, c) if err != nil { glog.Errorf("Error while trying to estimate resources: %v", err) } } } // If Requests doesn't exits and an estimation was made, create Requests. if req == nil && (cpu != nil || mem != nil) { c.Resources.Requests = api.ResourceList{} req = c.Resources.Requests } if cpu != nil { glog.Infof("CPU estimation for container %v in pod %v/%v is %v", c.Name, pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, cpu.String()) req[api.ResourceCPU] = *cpu } if mem != nil { glog.Infof("Memory estimation for container %v in pod %v/%v is %v", c.Name, pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, mem.String()) req[api.ResourceMemory] = *mem } } // TODO(piosz): verify the estimates fits in LimitRanger }
func newLimitExceededError(limitType kapi.LimitType, resourceName kapi.ResourceName, requested, limit *resource.Quantity) error { return fmt.Errorf("requested usage of %s exceeds the maximum limit per %s (%s > %s)", resourceName, limitType, requested.String(), limit.String()) }
func (ir initialResources) estimateContainer(pod *api.Pod, c *api.Container, message string) []string { var annotations []string req := c.Resources.Requests lim := c.Resources.Limits var cpu, mem *resource.Quantity var err error if _, ok := req[api.ResourceCPU]; !ok { if _, ok2 := lim[api.ResourceCPU]; !ok2 { cpu, err = ir.getEstimation(api.ResourceCPU, c, pod.ObjectMeta.Namespace) if err != nil { glog.Errorf("Error while trying to estimate resources: %v", err) } } } if _, ok := req[api.ResourceMemory]; !ok { if _, ok2 := lim[api.ResourceMemory]; !ok2 { mem, err = ir.getEstimation(api.ResourceMemory, c, pod.ObjectMeta.Namespace) if err != nil { glog.Errorf("Error while trying to estimate resources: %v", err) } } } // If Requests doesn't exits and an estimation was made, create Requests. if req == nil && (cpu != nil || mem != nil) { c.Resources.Requests = api.ResourceList{} req = c.Resources.Requests } setRes := []string{} if cpu != nil { glog.Infof("CPU estimation for %s %v in pod %v/%v is %v", message, c.Name, pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, cpu.String()) setRes = append(setRes, string(api.ResourceCPU)) req[api.ResourceCPU] = *cpu } if mem != nil { glog.Infof("Memory estimation for %s %v in pod %v/%v is %v", message, c.Name, pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, mem.String()) setRes = append(setRes, string(api.ResourceMemory)) req[api.ResourceMemory] = *mem } if len(setRes) > 0 { sort.Strings(setRes) a := strings.Join(setRes, ", ") + fmt.Sprintf(" request for %s %s", message, c.Name) annotations = append(annotations, a) } return annotations }