func TestResourceNames(t *testing.T) { testCases := map[string]struct { a api.ResourceList expected []api.ResourceName }{ "empty": { a: api.ResourceList{}, expected: []api.ResourceName{}, }, "values": { a: api.ResourceList{ api.ResourceCPU: resource.MustParse("100m"), api.ResourceMemory: resource.MustParse("1Gi"), }, expected: []api.ResourceName{api.ResourceMemory, api.ResourceCPU}, }, } for testName, testCase := range testCases { actualSet := ToSet(ResourceNames(testCase.a)) expectedSet := ToSet(testCase.expected) if !actualSet.Equal(expectedSet) { t.Errorf("%s expected: %v, actual: %v", testName, expectedSet, actualSet) } } }
func TestIsZero(t *testing.T) { testCases := map[string]struct { a api.ResourceList expected bool }{ "empty": { a: api.ResourceList{}, expected: true, }, "zero": { a: api.ResourceList{ api.ResourceCPU: resource.MustParse("0"), api.ResourceMemory: resource.MustParse("0"), }, expected: true, }, "non-zero": { a: api.ResourceList{ api.ResourceCPU: resource.MustParse("200m"), api.ResourceMemory: resource.MustParse("1Gi"), }, expected: false, }, } for testName, testCase := range testCases { if result := IsZero(testCase.a); result != testCase.expected { t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, result) } } }
func getPod(cname, cpuRequest, cpuLimit, memoryRequest, memoryLimit string) *api.Pod { resources := api.ResourceRequirements{ Limits: make(api.ResourceList), Requests: make(api.ResourceList), } if cpuLimit != "" { resources.Limits[api.ResourceCPU] = resource.MustParse(cpuLimit) } if memoryLimit != "" { resources.Limits[api.ResourceMemory] = resource.MustParse(memoryLimit) } if cpuRequest != "" { resources.Requests[api.ResourceCPU] = resource.MustParse(cpuRequest) } if memoryRequest != "" { resources.Requests[api.ResourceMemory] = resource.MustParse(memoryRequest) } return &api.Pod{ Spec: api.PodSpec{ Containers: []api.Container{ { Name: cname, Resources: resources, }, }, }, } }
func TestUpdate(t *testing.T) { storage, server := newStorage(t) defer server.Terminate(t) test := registrytest.New(t, storage.Store).AllowCreateOnUpdate() test.TestUpdate( // valid validNewLimitRange(), // updateFunc func(obj runtime.Object) runtime.Object { object := obj.(*api.LimitRange) object.Spec.Limits = []api.LimitRangeItem{ { Type: api.LimitTypePod, Max: api.ResourceList{ api.ResourceCPU: resource.MustParse("1000"), api.ResourceMemory: resource.MustParse("100000"), }, Min: api.ResourceList{ api.ResourceCPU: resource.MustParse("10"), api.ResourceMemory: resource.MustParse("1000"), }, }, } return object }, ) }
func TestAdd(t *testing.T) { testCases := map[string]struct { a api.ResourceList b api.ResourceList expected api.ResourceList }{ "noKeys": { a: api.ResourceList{}, b: api.ResourceList{}, expected: api.ResourceList{}, }, "toEmpty": { a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, b: api.ResourceList{}, expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, }, "matching": { a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, expected: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, }, } for testName, testCase := range testCases { sum := Add(testCase.a, testCase.b) if result := Equals(testCase.expected, sum); !result { t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sum) } } }
func TestSyncResourceQuotaNoChange(t *testing.T) { resourceQuota := api.ResourceQuota{ ObjectMeta: api.ObjectMeta{ Namespace: "default", Name: "rq", }, Spec: api.ResourceQuotaSpec{ Hard: api.ResourceList{ api.ResourceCPU: resource.MustParse("4"), }, }, Status: api.ResourceQuotaStatus{ Hard: api.ResourceList{ api.ResourceCPU: resource.MustParse("4"), }, Used: api.ResourceList{ api.ResourceCPU: resource.MustParse("0"), }, }, } kubeClient := fake.NewSimpleClientset(&api.PodList{}, &resourceQuota) resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{ KubeClient: kubeClient, ResyncPeriod: controller.NoResyncPeriodFunc, Registry: install.NewRegistry(kubeClient), GroupKindsToReplenish: []unversioned.GroupKind{ api.Kind("Pod"), api.Kind("Service"), api.Kind("ReplicationController"), api.Kind("PersistentVolumeClaim"), }, ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient), ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc, } quotaController := NewResourceQuotaController(resourceQuotaControllerOptions) err := quotaController.syncResourceQuota(resourceQuota) if err != nil { t.Fatalf("Unexpected error %v", err) } expectedActionSet := sets.NewString( strings.Join([]string{"list", "replicationcontrollers", ""}, "-"), strings.Join([]string{"list", "services", ""}, "-"), strings.Join([]string{"list", "pods", ""}, "-"), strings.Join([]string{"list", "resourcequotas", ""}, "-"), strings.Join([]string{"list", "secrets", ""}, "-"), strings.Join([]string{"list", "persistentvolumeclaims", ""}, "-"), ) actionSet := sets.NewString() for _, action := range kubeClient.Actions() { actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-")) } if !actionSet.HasAll(expectedActionSet.List()...) { t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet)) } }
func getResourceList(cpu, memory string) api.ResourceList { res := api.ResourceList{} if cpu != "" { res[api.ResourceCPU] = resource.MustParse(cpu) } if memory != "" { res[api.ResourceMemory] = resource.MustParse(memory) } return res }
func TestResourceList(t *testing.T) { resourceList := api.ResourceList{} resourceList[api.ResourceCPU] = resource.MustParse("100m") resourceList[api.ResourceMemory] = resource.MustParse("5Gi") actual := ResourceList(resourceList) expected := "cpu=100m,memory=5Gi" if actual != expected { t.Errorf("Unexpected result, actual: %v, expected: %v", actual, expected) } }
// ServiceUsageFunc knows how to measure usage associated with services func ServiceUsageFunc(object runtime.Object) api.ResourceList { result := api.ResourceList{} if service, ok := object.(*api.Service); ok { result[api.ResourceServices] = resource.MustParse("1") switch service.Spec.Type { case api.ServiceTypeNodePort: result[api.ResourceServicesNodePorts] = resource.MustParse("1") case api.ServiceTypeLoadBalancer: result[api.ResourceServicesLoadBalancers] = resource.MustParse("1") } } return result }
func newNode(name string) *api.Node { return &api.Node{ ObjectMeta: api.ObjectMeta{Name: name}, Spec: api.NodeSpec{ ExternalID: name, }, Status: api.NodeStatus{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, } }
// UsageStats calculates latest observed usage stats for all objects func (g *GenericEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) { // default each tracked resource to zero result := quota.UsageStats{Used: api.ResourceList{}} for _, resourceName := range g.MatchedResourceNames { result.Used[resourceName] = resource.MustParse("0") } list, err := g.ListFuncByNamespace(options.Namespace, api.ListOptions{}) if err != nil { return result, fmt.Errorf("%s: Failed to list %v: %v", g.Name, g.GroupKind(), err) } _, err = meta.Accessor(list) if err != nil { return result, fmt.Errorf("%s: Unable to understand list result %#v", g.Name, list) } items, err := meta.ExtractList(list) if err != nil { return result, fmt.Errorf("%s: Unable to understand list result %#v (%v)", g.Name, list, err) } for _, item := range items { // need to verify that the item matches the set of scopes matchesScopes := true for _, scope := range options.Scopes { if !g.MatchesScope(scope, item) { matchesScopes = false } } // only count usage if there was a match if matchesScopes { result.Used = quota.Add(result.Used, g.Usage(item)) } } return result, nil }
// ObjectCountUsageFunc is useful if you are only counting your object // It always returns 1 as the usage for the named resource func ObjectCountUsageFunc(resourceName api.ResourceName) UsageFunc { return func(object runtime.Object) api.ResourceList { return api.ResourceList{ resourceName: resource.MustParse("1"), } } }
func TestSubtract(t *testing.T) { testCases := map[string]struct { a api.ResourceList b api.ResourceList expected api.ResourceList }{ "noKeys": { a: api.ResourceList{}, b: api.ResourceList{}, expected: api.ResourceList{}, }, "value-empty": { a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, b: api.ResourceList{}, expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, }, "empty-value": { a: api.ResourceList{}, b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, expected: api.ResourceList{api.ResourceCPU: resource.MustParse("-100m")}, }, "value-value": { a: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, }, } for testName, testCase := range testCases { sub := Subtract(testCase.a, testCase.b) if result := Equals(testCase.expected, sub); !result { t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sub) } } }
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a // recycle operation. The calculation and return value is either the // minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is // greater. func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *api.PersistentVolume) int64 { giQty := resource.MustParse("1Gi") pvQty := pv.Spec.Capacity[api.ResourceStorage] giSize := giQty.Value() pvSize := pvQty.Value() timeout := (pvSize / giSize) * int64(timeoutIncrement) if timeout < int64(minimumTimeout) { return int64(minimumTimeout) } else { return timeout } }
// GetPodQOS returns the QoS class of a pod. // A pod is besteffort if none of its containers have specified any requests or limits. // A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. // A pod is burstable if limits and requests do not match across all containers. func GetPodQOS(pod *api.Pod) QOSClass { requests := api.ResourceList{} limits := api.ResourceList{} zeroQuantity := resource.MustParse("0") isGuaranteed := true for _, container := range pod.Spec.Containers { // process requests for name, quantity := range container.Resources.Requests { if quantity.Cmp(zeroQuantity) == 1 { delta := quantity.Copy() if _, exists := requests[name]; !exists { requests[name] = *delta } else { delta.Add(requests[name]) requests[name] = *delta } } } // process limits for name, quantity := range container.Resources.Limits { if quantity.Cmp(zeroQuantity) == 1 { delta := quantity.Copy() if _, exists := limits[name]; !exists { limits[name] = *delta } else { delta.Add(limits[name]) limits[name] = *delta } } } if len(container.Resources.Limits) != len(supportedComputeResources) { isGuaranteed = false } } if len(requests) == 0 && len(limits) == 0 { return BestEffort } // Check is requests match limits for all resources. if isGuaranteed { for name, req := range requests { if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { isGuaranteed = false break } } } if isGuaranteed && len(requests) == len(limits) && len(limits) == len(supportedComputeResources) { return Guaranteed } return Burstable }
func validNewLimitRange() *api.LimitRange { return &api.LimitRange{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, Spec: api.LimitRangeSpec{ Limits: []api.LimitRangeItem{ { Type: api.LimitTypePod, Max: api.ResourceList{ api.ResourceCPU: resource.MustParse("100"), api.ResourceMemory: resource.MustParse("10000"), }, Min: api.ResourceList{ api.ResourceCPU: resource.MustParse("0"), api.ResourceMemory: resource.MustParse("100"), }, }, }, }, } }
func TestUpdate(t *testing.T) { storage, _, server := newStorage(t) defer server.Terminate(t) test := registrytest.New(t, storage.Store).ClusterScope() test.TestUpdate( // valid validNewPersistentVolume("foo"), // updateFunc func(obj runtime.Object) runtime.Object { object := obj.(*api.PersistentVolume) object.Spec.Capacity = api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("20G"), } return object }, ) }
// podUsageHelper can summarize the pod quota usage based on requests and limits func podUsageHelper(requests api.ResourceList, limits api.ResourceList) api.ResourceList { result := api.ResourceList{} result[api.ResourcePods] = resource.MustParse("1") if request, found := requests[api.ResourceCPU]; found { result[api.ResourceCPU] = request result[api.ResourceRequestsCPU] = request } if limit, found := limits[api.ResourceCPU]; found { result[api.ResourceLimitsCPU] = limit } if request, found := requests[api.ResourceMemory]; found { result[api.ResourceMemory] = request result[api.ResourceRequestsMemory] = request } if limit, found := limits[api.ResourceMemory]; found { result[api.ResourceLimitsMemory] = limit } return result }
func TestUpdate(t *testing.T) { storage, _, server := newStorage(t) defer server.Terminate(t) test := registrytest.New(t, storage.Store) test.TestUpdate( // valid validNewPersistentVolumeClaim("foo", api.NamespaceDefault), // updateFunc func(obj runtime.Object) runtime.Object { object := obj.(*api.PersistentVolumeClaim) object.Spec.Resources = api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("20G"), }, } return object }, ) }
func TestUpdateStatus(t *testing.T) { storage, statusStorage, server := newStorage(t) defer server.Terminate(t) ctx := api.NewDefaultContext() key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) pvcStart := validNewPersistentVolumeClaim("foo", api.NamespaceDefault) err := storage.Storage.Create(ctx, key, pvcStart, nil, 0) pvc := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, Spec: api.PersistentVolumeClaimSpec{ AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("3Gi"), }, }, }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimBound, }, } _, _, err = statusStorage.Update(ctx, pvc.Name, rest.DefaultUpdatedObjectInfo(pvc, api.Scheme)) if err != nil { t.Fatalf("Unexpected error: %v", err) } obj, err := storage.Get(ctx, "foo") if err != nil { t.Errorf("unexpected error: %v", err) } pvcOut := obj.(*api.PersistentVolumeClaim) // only compare relevant changes b/c of difference in metadata if !api.Semantic.DeepEqual(pvc.Status, pvcOut.Status) { t.Errorf("unexpected object: %s", diff.ObjectDiff(pvc.Status, pvcOut.Status)) } }
func validNewPersistentVolumeClaim(name, ns string) *api.PersistentVolumeClaim { pv := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.PersistentVolumeClaimSpec{ AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), }, }, }, Status: api.PersistentVolumeClaimStatus{ Phase: api.ClaimPending, }, } return pv }
func (c *gcePersistentDiskProvisioner) Provision() (*api.PersistentVolume, error) { volumeID, sizeGB, labels, err := c.manager.CreateVolume(c) if err != nil { return nil, err } pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: c.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ "kubernetes.io/createdby": "gce-pd-dynamic-provisioner", }, }, Spec: api.PersistentVolumeSpec{ PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, AccessModes: c.options.AccessModes, Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), }, PersistentVolumeSource: api.PersistentVolumeSource{ GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ PDName: volumeID, Partition: 0, ReadOnly: false, }, }, }, } if len(labels) != 0 { if pv.Labels == nil { pv.Labels = make(map[string]string) } for k, v := range labels { pv.Labels[k] = v } } return pv, nil }
func buildPod(namespace, podName string, podLabels map[string]string, phase api.PodPhase) api.Pod { return api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, Namespace: namespace, Labels: podLabels, }, Spec: api.PodSpec{ Containers: []api.Container{ { Resources: api.ResourceRequirements{ Requests: api.ResourceList{ api.ResourceCPU: resource.MustParse("10"), }, }, }, }, }, Status: api.PodStatus{ Phase: phase, }, } }
func validNewPersistentVolume(name string) *api.PersistentVolume { pv := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.PersistentVolumeSpec{ Capacity: api.ResourceList{ api.ResourceName(api.ResourceStorage): resource.MustParse("10G"), }, AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, PersistentVolumeSource: api.PersistentVolumeSource{ HostPath: &api.HostPathVolumeSource{Path: "/foo"}, }, PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRetain, }, Status: api.PersistentVolumeStatus{ Phase: api.VolumePending, Message: "bar", Reason: "foo", }, } return pv }
func validNewResourceQuota() *api.ResourceQuota { return &api.ResourceQuota{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, Spec: api.ResourceQuotaSpec{ Hard: api.ResourceList{ api.ResourceCPU: resource.MustParse("100"), api.ResourceMemory: resource.MustParse("4Gi"), api.ResourcePods: resource.MustParse("10"), api.ResourceServices: resource.MustParse("10"), api.ResourceReplicationControllers: resource.MustParse("10"), api.ResourceQuotas: resource.MustParse("1"), }, }, } }
func TestMonitorNodeStatusMarkPodsNotReady(t *testing.T) { fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) table := []struct { fakeNodeHandler *FakeNodeHandler timeToPass time.Duration newNodeStatus api.NodeStatus expectedPodStatusUpdate bool }{ // Node created recently, without status. // Expect no action from node controller (within startup grace period). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: fakeNow, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedPodStatusUpdate: false, }, // Node created long time ago, with status updated recently. // Expect no action from node controller (within monitor grace period). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated. LastHeartbeatTime: fakeNow, LastTransitionTime: fakeNow, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedPodStatusUpdate: false, }, // Node created long time ago, with status updated by kubelet exceeds grace period. // Expect pods status updated and Unknown node status posted from node controller { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, timeToPass: 1 * time.Minute, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, expectedPodStatusUpdate: true, }, } for i, item := range table { nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) nodeController.now = func() unversioned.Time { return fakeNow } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("Case[%d] unexpected error: %v", i, err) } if item.timeToPass > 0 { nodeController.now = func() unversioned.Time { return unversioned.Time{Time: fakeNow.Add(item.timeToPass)} } item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("Case[%d] unexpected error: %v", i, err) } } podStatusUpdated := false for _, action := range item.fakeNodeHandler.Actions() { if action.GetVerb() == "update" && action.GetResource().Resource == "pods" && action.GetSubresource() == "status" { podStatusUpdated = true } } if podStatusUpdated != item.expectedPodStatusUpdate { t.Errorf("Case[%d] expect pod status updated to be %v, but got %v", i, item.expectedPodStatusUpdate, podStatusUpdated) } } }
func TestMonitorNodeStatusUpdateStatus(t *testing.T) { fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) table := []struct { fakeNodeHandler *FakeNodeHandler timeToPass time.Duration newNodeStatus api.NodeStatus expectedEvictPods bool expectedRequestCount int expectedNodes []*api.Node }{ // Node created long time ago, without status: // Expect Unknown status posted from node controller. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedRequestCount: 2, // List+Update expectedNodes: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, Reason: "NodeStatusNeverUpdated", Message: "Kubelet never posted node status.", LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: fakeNow, }, { Type: api.NodeOutOfDisk, Status: api.ConditionUnknown, Reason: "NodeStatusNeverUpdated", Message: "Kubelet never posted node status.", LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: fakeNow, }, }, }, }, }, }, // Node created recently, without status. // Expect no action from node controller (within startup grace period). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: fakeNow, }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedRequestCount: 1, // List expectedNodes: nil, }, // Node created long time ago, with status updated by kubelet exceeds grace period. // Expect Unknown status posted from node controller. { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedRequestCount: 3, // (List+)List+Update timeToPass: time.Hour, newNodeStatus: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, { Type: api.NodeOutOfDisk, Status: api.ConditionFalse, // Node status hasn't been updated for 1hr. LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, expectedNodes: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionUnknown, Reason: "NodeStatusUnknown", Message: "Kubelet stopped posting node status.", LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Time{Time: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)}, }, { Type: api.NodeOutOfDisk, Status: api.ConditionUnknown, Reason: "NodeStatusUnknown", Message: "Kubelet stopped posting node status.", LastHeartbeatTime: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC), LastTransitionTime: unversioned.Time{Time: unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)}, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, }, // Node created long time ago, with status updated recently. // Expect no action from node controller (within monitor grace period). { fakeNodeHandler: &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated. LastHeartbeatTime: fakeNow, LastTransitionTime: fakeNow, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}), }, expectedRequestCount: 1, // List expectedNodes: nil, }, } for i, item := range table { nodeController := NewNodeController(nil, item.fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) nodeController.now = func() unversioned.Time { return fakeNow } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } if item.timeToPass > 0 { nodeController.now = func() unversioned.Time { return unversioned.Time{Time: fakeNow.Add(item.timeToPass)} } item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } } if item.expectedRequestCount != item.fakeNodeHandler.RequestCount { t.Errorf("expected %v call, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount) } if len(item.fakeNodeHandler.UpdatedNodes) > 0 && !api.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodes) { t.Errorf("Case[%d] unexpected nodes: %s", i, diff.ObjectDiff(item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodes[0])) } if len(item.fakeNodeHandler.UpdatedNodeStatuses) > 0 && !api.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodeStatuses) { t.Errorf("Case[%d] unexpected nodes: %s", i, diff.ObjectDiff(item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodeStatuses[0])) } } }
func TestNodeDeletion(t *testing.T) { fakeNow := unversioned.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) fakeNodeHandler := &FakeNodeHandler{ Existing: []*api.Node{ { ObjectMeta: api.ObjectMeta{ Name: "node0", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated. LastHeartbeatTime: fakeNow, LastTransitionTime: fakeNow, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, { ObjectMeta: api.ObjectMeta{ Name: "node1", CreationTimestamp: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), }, Status: api.NodeStatus{ Conditions: []api.NodeCondition{ { Type: api.NodeReady, Status: api.ConditionTrue, // Node status has just been updated. LastHeartbeatTime: fakeNow, LastTransitionTime: fakeNow, }, }, Capacity: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("10"), api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), }, }, Spec: api.NodeSpec{ ExternalID: "node0", }, }, }, Clientset: fake.NewSimpleClientset(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0"), *newPod("pod1", "node1")}}), } nodeController := NewNodeController(nil, fakeNodeHandler, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, nil, 0, false) nodeController.now = func() unversioned.Time { return fakeNow } if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } fakeNodeHandler.Delete("node1", nil) if err := nodeController.monitorNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } nodeController.podEvictor.Try(func(value TimedValue) (bool, time.Duration) { nodeController.deletePods(value.Value) return true, 0 }) podEvicted := false for _, action := range fakeNodeHandler.Actions() { if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" { podEvicted = true } } if !podEvicted { t.Error("expected pods to be evicted from the deleted node") } }
// FuzzerFor can randomly populate api objects that are destined for version. func FuzzerFor(t *testing.T, version unversioned.GroupVersion, src rand.Source) *fuzz.Fuzzer { f := fuzz.New().NilChance(.5).NumElements(1, 1) if src != nil { f.RandSource(src) } f.Funcs( func(j *int, c fuzz.Continue) { *j = int(c.Int31()) }, func(j **int, c fuzz.Continue) { if c.RandBool() { i := int(c.Int31()) *j = &i } else { *j = nil } }, func(q *resource.Quantity, c fuzz.Continue) { *q = *resource.NewQuantity(c.Int63n(1000), resource.DecimalExponent) }, func(j *runtime.TypeMeta, c fuzz.Continue) { // We have to customize the randomization of TypeMetas because their // APIVersion and Kind must remain blank in memory. j.APIVersion = "" j.Kind = "" }, func(j *unversioned.TypeMeta, c fuzz.Continue) { // We have to customize the randomization of TypeMetas because their // APIVersion and Kind must remain blank in memory. j.APIVersion = "" j.Kind = "" }, func(j *api.ObjectMeta, c fuzz.Continue) { j.Name = c.RandString() j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10) j.SelfLink = c.RandString() j.UID = types.UID(c.RandString()) j.GenerateName = c.RandString() var sec, nsec int64 c.Fuzz(&sec) c.Fuzz(&nsec) j.CreationTimestamp = unversioned.Unix(sec, nsec).Rfc3339Copy() }, func(j *api.ObjectReference, c fuzz.Continue) { // We have to customize the randomization of TypeMetas because their // APIVersion and Kind must remain blank in memory. j.APIVersion = c.RandString() j.Kind = c.RandString() j.Namespace = c.RandString() j.Name = c.RandString() j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10) j.FieldPath = c.RandString() }, func(j *unversioned.ListMeta, c fuzz.Continue) { j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10) j.SelfLink = c.RandString() }, func(j *api.ListOptions, c fuzz.Continue) { label, _ := labels.Parse("a=b") j.LabelSelector = label field, _ := fields.ParseSelector("a=b") j.FieldSelector = field }, func(j *api.PodExecOptions, c fuzz.Continue) { j.Stdout = true j.Stderr = true }, func(j *api.PodAttachOptions, c fuzz.Continue) { j.Stdout = true j.Stderr = true }, func(s *api.PodSpec, c fuzz.Continue) { c.FuzzNoCustom(s) // has a default value ttl := int64(30) if c.RandBool() { ttl = int64(c.Uint32()) } s.TerminationGracePeriodSeconds = &ttl c.Fuzz(s.SecurityContext) if s.SecurityContext == nil { s.SecurityContext = new(api.PodSecurityContext) } }, func(j *api.PodPhase, c fuzz.Continue) { statuses := []api.PodPhase{api.PodPending, api.PodRunning, api.PodFailed, api.PodUnknown} *j = statuses[c.Rand.Intn(len(statuses))] }, func(j *api.Binding, c fuzz.Continue) { c.Fuzz(&j.ObjectMeta) j.Target.Name = c.RandString() }, func(j *api.ReplicationControllerSpec, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again //j.TemplateRef = nil // this is required for round trip }, func(j *extensions.DeploymentStrategy, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again // Ensure that strategyType is one of valid values. strategyTypes := []extensions.DeploymentStrategyType{extensions.RecreateDeploymentStrategyType, extensions.RollingUpdateDeploymentStrategyType} j.Type = strategyTypes[c.Rand.Intn(len(strategyTypes))] if j.Type != extensions.RollingUpdateDeploymentStrategyType { j.RollingUpdate = nil } else { rollingUpdate := extensions.RollingUpdateDeployment{} if c.RandBool() { rollingUpdate.MaxUnavailable = intstr.FromInt(int(c.RandUint64())) rollingUpdate.MaxSurge = intstr.FromInt(int(c.RandUint64())) } else { rollingUpdate.MaxSurge = intstr.FromString(fmt.Sprintf("%d%%", c.RandUint64())) } j.RollingUpdate = &rollingUpdate } }, func(j *batch.JobSpec, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again completions := int32(c.Rand.Int31()) parallelism := int32(c.Rand.Int31()) j.Completions = &completions j.Parallelism = ¶llelism if c.Rand.Int31()%2 == 0 { j.ManualSelector = newBool(true) } else { j.ManualSelector = nil } }, func(cp *batch.ConcurrencyPolicy, c fuzz.Continue) { policies := []batch.ConcurrencyPolicy{batch.AllowConcurrent, batch.ForbidConcurrent, batch.ReplaceConcurrent} *cp = policies[c.Rand.Intn(len(policies))] }, func(j *api.List, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again // TODO: uncomment when round trip starts from a versioned object if false { //j.Items == nil { j.Items = []runtime.Object{} } }, func(j *runtime.Object, c fuzz.Continue) { // TODO: uncomment when round trip starts from a versioned object if true { //c.RandBool() { *j = &runtime.Unknown{ // We do not set TypeMeta here because it is not carried through a round trip Raw: []byte(`{"apiVersion":"unknown.group/unknown","kind":"Something","someKey":"someValue"}`), ContentType: runtime.ContentTypeJSON, } } else { types := []runtime.Object{&api.Pod{}, &api.ReplicationController{}} t := types[c.Rand.Intn(len(types))] c.Fuzz(t) *j = t } }, func(q *api.ResourceRequirements, c fuzz.Continue) { randomQuantity := func() resource.Quantity { var q resource.Quantity c.Fuzz(&q) // precalc the string for benchmarking purposes _ = q.String() return q } q.Limits = make(api.ResourceList) q.Requests = make(api.ResourceList) cpuLimit := randomQuantity() q.Limits[api.ResourceCPU] = *cpuLimit.Copy() q.Requests[api.ResourceCPU] = *cpuLimit.Copy() memoryLimit := randomQuantity() q.Limits[api.ResourceMemory] = *memoryLimit.Copy() q.Requests[api.ResourceMemory] = *memoryLimit.Copy() storageLimit := randomQuantity() q.Limits[api.ResourceStorage] = *storageLimit.Copy() q.Requests[api.ResourceStorage] = *storageLimit.Copy() }, func(q *api.LimitRangeItem, c fuzz.Continue) { var cpuLimit resource.Quantity c.Fuzz(&cpuLimit) q.Type = api.LimitTypeContainer q.Default = make(api.ResourceList) q.Default[api.ResourceCPU] = *(cpuLimit.Copy()) q.DefaultRequest = make(api.ResourceList) q.DefaultRequest[api.ResourceCPU] = *(cpuLimit.Copy()) q.Max = make(api.ResourceList) q.Max[api.ResourceCPU] = *(cpuLimit.Copy()) q.Min = make(api.ResourceList) q.Min[api.ResourceCPU] = *(cpuLimit.Copy()) q.MaxLimitRequestRatio = make(api.ResourceList) q.MaxLimitRequestRatio[api.ResourceCPU] = resource.MustParse("10") }, func(p *api.PullPolicy, c fuzz.Continue) { policies := []api.PullPolicy{api.PullAlways, api.PullNever, api.PullIfNotPresent} *p = policies[c.Rand.Intn(len(policies))] }, func(rp *api.RestartPolicy, c fuzz.Continue) { policies := []api.RestartPolicy{api.RestartPolicyAlways, api.RestartPolicyNever, api.RestartPolicyOnFailure} *rp = policies[c.Rand.Intn(len(policies))] }, // Only api.DownwardAPIVolumeFile needs to have a specific func since FieldRef has to be // defaulted to a version otherwise roundtrip will fail // For the remaining volume plugins the default fuzzer is enough. func(m *api.DownwardAPIVolumeFile, c fuzz.Continue) { m.Path = c.RandString() versions := []string{"v1"} m.FieldRef = &api.ObjectFieldSelector{} m.FieldRef.APIVersion = versions[c.Rand.Intn(len(versions))] m.FieldRef.FieldPath = c.RandString() }, func(vs *api.VolumeSource, c fuzz.Continue) { // Exactly one of the fields must be set. v := reflect.ValueOf(vs).Elem() i := int(c.RandUint64() % uint64(v.NumField())) t := v.Field(i).Addr() for v.Field(i).IsNil() { c.Fuzz(t.Interface()) } }, func(i *api.ISCSIVolumeSource, c fuzz.Continue) { i.ISCSIInterface = c.RandString() if i.ISCSIInterface == "" { i.ISCSIInterface = "default" } }, func(d *api.DNSPolicy, c fuzz.Continue) { policies := []api.DNSPolicy{api.DNSClusterFirst, api.DNSDefault} *d = policies[c.Rand.Intn(len(policies))] }, func(p *api.Protocol, c fuzz.Continue) { protocols := []api.Protocol{api.ProtocolTCP, api.ProtocolUDP} *p = protocols[c.Rand.Intn(len(protocols))] }, func(p *api.ServiceAffinity, c fuzz.Continue) { types := []api.ServiceAffinity{api.ServiceAffinityClientIP, api.ServiceAffinityNone} *p = types[c.Rand.Intn(len(types))] }, func(p *api.ServiceType, c fuzz.Continue) { types := []api.ServiceType{api.ServiceTypeClusterIP, api.ServiceTypeNodePort, api.ServiceTypeLoadBalancer} *p = types[c.Rand.Intn(len(types))] }, func(ct *api.Container, c fuzz.Continue) { c.FuzzNoCustom(ct) // fuzz self without calling this function again ct.TerminationMessagePath = "/" + ct.TerminationMessagePath // Must be non-empty }, func(p *api.Probe, c fuzz.Continue) { c.FuzzNoCustom(p) // These fields have default values. intFieldsWithDefaults := [...]string{"TimeoutSeconds", "PeriodSeconds", "SuccessThreshold", "FailureThreshold"} v := reflect.ValueOf(p).Elem() for _, field := range intFieldsWithDefaults { f := v.FieldByName(field) if f.Int() == 0 { f.SetInt(1) } } }, func(ev *api.EnvVar, c fuzz.Continue) { ev.Name = c.RandString() if c.RandBool() { ev.Value = c.RandString() } else { ev.ValueFrom = &api.EnvVarSource{} ev.ValueFrom.FieldRef = &api.ObjectFieldSelector{} var versions []unversioned.GroupVersion for _, testGroup := range testapi.Groups { versions = append(versions, *testGroup.GroupVersion()) } ev.ValueFrom.FieldRef.APIVersion = versions[c.Rand.Intn(len(versions))].String() ev.ValueFrom.FieldRef.FieldPath = c.RandString() } }, func(sc *api.SecurityContext, c fuzz.Continue) { c.FuzzNoCustom(sc) // fuzz self without calling this function again if c.RandBool() { priv := c.RandBool() sc.Privileged = &priv } if c.RandBool() { sc.Capabilities = &api.Capabilities{ Add: make([]api.Capability, 0), Drop: make([]api.Capability, 0), } c.Fuzz(&sc.Capabilities.Add) c.Fuzz(&sc.Capabilities.Drop) } }, func(s *api.Secret, c fuzz.Continue) { c.FuzzNoCustom(s) // fuzz self without calling this function again s.Type = api.SecretTypeOpaque }, func(r *api.RBDVolumeSource, c fuzz.Continue) { r.RBDPool = c.RandString() if r.RBDPool == "" { r.RBDPool = "rbd" } r.RadosUser = c.RandString() if r.RadosUser == "" { r.RadosUser = "******" } r.Keyring = c.RandString() if r.Keyring == "" { r.Keyring = "/etc/ceph/keyring" } }, func(pv *api.PersistentVolume, c fuzz.Continue) { c.FuzzNoCustom(pv) // fuzz self without calling this function again types := []api.PersistentVolumePhase{api.VolumeAvailable, api.VolumePending, api.VolumeBound, api.VolumeReleased, api.VolumeFailed} pv.Status.Phase = types[c.Rand.Intn(len(types))] pv.Status.Message = c.RandString() reclamationPolicies := []api.PersistentVolumeReclaimPolicy{api.PersistentVolumeReclaimRecycle, api.PersistentVolumeReclaimRetain} pv.Spec.PersistentVolumeReclaimPolicy = reclamationPolicies[c.Rand.Intn(len(reclamationPolicies))] }, func(pvc *api.PersistentVolumeClaim, c fuzz.Continue) { c.FuzzNoCustom(pvc) // fuzz self without calling this function again types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending, api.ClaimLost} pvc.Status.Phase = types[c.Rand.Intn(len(types))] }, func(s *api.NamespaceSpec, c fuzz.Continue) { s.Finalizers = []api.FinalizerName{api.FinalizerKubernetes} }, func(s *api.NamespaceStatus, c fuzz.Continue) { s.Phase = api.NamespaceActive }, func(http *api.HTTPGetAction, c fuzz.Continue) { c.FuzzNoCustom(http) // fuzz self without calling this function again http.Path = "/" + http.Path // can't be blank http.Scheme = "x" + http.Scheme // can't be blank }, func(ss *api.ServiceSpec, c fuzz.Continue) { c.FuzzNoCustom(ss) // fuzz self without calling this function again if len(ss.Ports) == 0 { // There must be at least 1 port. ss.Ports = append(ss.Ports, api.ServicePort{}) c.Fuzz(&ss.Ports[0]) } for i := range ss.Ports { switch ss.Ports[i].TargetPort.Type { case intstr.Int: ss.Ports[i].TargetPort.IntVal = 1 + ss.Ports[i].TargetPort.IntVal%65535 // non-zero case intstr.String: ss.Ports[i].TargetPort.StrVal = "x" + ss.Ports[i].TargetPort.StrVal // non-empty } } }, func(n *api.Node, c fuzz.Continue) { c.FuzzNoCustom(n) n.Spec.ExternalID = "external" }, func(s *api.NodeStatus, c fuzz.Continue) { c.FuzzNoCustom(s) s.Allocatable = s.Capacity }, func(s *autoscaling.HorizontalPodAutoscalerSpec, c fuzz.Continue) { c.FuzzNoCustom(s) // fuzz self without calling this function again minReplicas := int32(c.Rand.Int31()) s.MinReplicas = &minReplicas targetCpu := int32(c.RandUint64()) s.TargetCPUUtilizationPercentage = &targetCpu }, func(psp *extensions.PodSecurityPolicySpec, c fuzz.Continue) { c.FuzzNoCustom(psp) // fuzz self without calling this function again runAsUserRules := []extensions.RunAsUserStrategy{extensions.RunAsUserStrategyMustRunAsNonRoot, extensions.RunAsUserStrategyMustRunAs, extensions.RunAsUserStrategyRunAsAny} psp.RunAsUser.Rule = runAsUserRules[c.Rand.Intn(len(runAsUserRules))] seLinuxRules := []extensions.SELinuxStrategy{extensions.SELinuxStrategyRunAsAny, extensions.SELinuxStrategyMustRunAs} psp.SELinux.Rule = seLinuxRules[c.Rand.Intn(len(seLinuxRules))] }, func(s *extensions.Scale, c fuzz.Continue) { c.FuzzNoCustom(s) // fuzz self without calling this function again // TODO: Implement a fuzzer to generate valid keys, values and operators for // selector requirements. if s.Status.Selector != nil { s.Status.Selector = &unversioned.LabelSelector{ MatchLabels: map[string]string{ "testlabelkey": "testlabelval", }, MatchExpressions: []unversioned.LabelSelectorRequirement{ { Key: "testkey", Operator: unversioned.LabelSelectorOpIn, Values: []string{"val1", "val2", "val3"}, }, }, } } }, func(r *runtime.RawExtension, c fuzz.Continue) { // Pick an arbitrary type and fuzz it types := []runtime.Object{&api.Pod{}, &extensions.Deployment{}, &api.Service{}} obj := types[c.Rand.Intn(len(types))] c.Fuzz(obj) // Find a codec for converting the object to raw bytes. This is necessary for the // api version and kind to be correctly set be serialization. var codec runtime.Codec switch obj.(type) { case *api.Pod: codec = testapi.Default.Codec() case *extensions.Deployment: codec = testapi.Extensions.Codec() case *api.Service: codec = testapi.Default.Codec() default: t.Errorf("Failed to find codec for object type: %T", obj) return } // Convert the object to raw bytes bytes, err := runtime.Encode(codec, obj) if err != nil { t.Errorf("Failed to encode object: %v", err) return } // Set the bytes field on the RawExtension r.Raw = bytes }, ) return f }
func TestEquals(t *testing.T) { testCases := map[string]struct { a api.ResourceList b api.ResourceList expected bool }{ "isEqual": { a: api.ResourceList{}, b: api.ResourceList{}, expected: true, }, "isEqualWithKeys": { a: api.ResourceList{ api.ResourceCPU: resource.MustParse("100m"), api.ResourceMemory: resource.MustParse("1Gi"), }, b: api.ResourceList{ api.ResourceCPU: resource.MustParse("100m"), api.ResourceMemory: resource.MustParse("1Gi"), }, expected: true, }, "isNotEqualSameKeys": { a: api.ResourceList{ api.ResourceCPU: resource.MustParse("200m"), api.ResourceMemory: resource.MustParse("1Gi"), }, b: api.ResourceList{ api.ResourceCPU: resource.MustParse("100m"), api.ResourceMemory: resource.MustParse("1Gi"), }, expected: false, }, "isNotEqualDiffKeys": { a: api.ResourceList{ api.ResourceCPU: resource.MustParse("100m"), api.ResourceMemory: resource.MustParse("1Gi"), }, b: api.ResourceList{ api.ResourceCPU: resource.MustParse("100m"), api.ResourceMemory: resource.MustParse("1Gi"), api.ResourcePods: resource.MustParse("1"), }, expected: false, }, } for testName, testCase := range testCases { if result := Equals(testCase.a, testCase.b); result != testCase.expected { t.Errorf("%s expected: %v, actual: %v, a=%v, b=%v", testName, testCase.expected, result, testCase.a, testCase.b) } } }