func TestSetDefaulServiceTargetPort(t *testing.T) { in := &v1.Service{Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1234}}}} obj := roundTrip(t, runtime.Object(in)) out := obj.(*v1.Service) if out.Spec.Ports[0].TargetPort != intstr.FromInt(1234) { t.Errorf("Expected TargetPort to be defaulted, got %v", out.Spec.Ports[0].TargetPort) } in = &v1.Service{Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 1234, TargetPort: intstr.FromInt(5678)}}}} obj = roundTrip(t, runtime.Object(in)) out = obj.(*v1.Service) if out.Spec.Ports[0].TargetPort != intstr.FromInt(5678) { t.Errorf("Expected TargetPort to be unchanged, got %v", out.Spec.Ports[0].TargetPort) } }
// TestEncodePtr tests that a pointer to a golang type can be encoded and // decoded without information loss or mutation. func TestEncodePtr(t *testing.T) { grace := int64(30) pod := &api.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"name": "foo"}, }, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, TerminationGracePeriodSeconds: &grace, SecurityContext: &api.PodSecurityContext{}, SchedulerName: api.DefaultSchedulerName, }, } obj := runtime.Object(pod) data, err := runtime.Encode(testapi.Default.Codec(), obj) obj2, err2 := runtime.Decode(testapi.Default.Codec(), data) if err != nil || err2 != nil { t.Fatalf("Failure: '%v' '%v'", err, err2) } if _, ok := obj2.(*api.Pod); !ok { t.Fatalf("Got wrong type") } if !api.Semantic.DeepEqual(obj2, pod) { t.Errorf("\nExpected:\n\n %#v,\n\nGot:\n\n %#vDiff: %v\n\n", pod, obj2, diff.ObjectDiff(obj2, pod)) } }
func TestSetDefaultHorizontalPodAutoscalerMinReplicas(t *testing.T) { tests := []struct { hpa HorizontalPodAutoscaler expectReplicas int32 }{ { hpa: HorizontalPodAutoscaler{}, expectReplicas: 1, }, { hpa: HorizontalPodAutoscaler{ Spec: HorizontalPodAutoscalerSpec{ MinReplicas: newInt32(3), }, }, expectReplicas: 3, }, } for _, test := range tests { hpa := &test.hpa obj2 := roundTrip(t, runtime.Object(hpa)) hpa2, ok := obj2.(*HorizontalPodAutoscaler) if !ok { t.Errorf("unexpected object: %v", hpa2) t.FailNow() } if hpa2.Spec.MinReplicas == nil { t.Errorf("unexpected nil MinReplicas") } else if test.expectReplicas != *hpa2.Spec.MinReplicas { t.Errorf("expected: %d MinReplicas, got: %d", test.expectReplicas, *hpa2.Spec.MinReplicas) } } }
func TestEncode(t *testing.T) { internalGV := schema.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} externalGV := schema.GroupVersion{Group: "test.group", Version: "testExternal"} scheme := runtime.NewScheme() scheme.AddKnownTypeWithName(internalGV.WithKind("Simple"), &InternalSimple{}) scheme.AddKnownTypeWithName(externalGV.WithKind("Simple"), &ExternalSimple{}) codec := serializer.NewCodecFactory(scheme).LegacyCodec(externalGV) test := &InternalSimple{ TestString: "I'm the same", } obj := runtime.Object(test) data, err := runtime.Encode(codec, obj) obj2, gvk, err2 := codec.Decode(data, nil, nil) if err != nil || err2 != nil { t.Fatalf("Failure: '%v' '%v'", err, err2) } if _, ok := obj2.(*InternalSimple); !ok { t.Fatalf("Got wrong type") } if !reflect.DeepEqual(obj2, test) { t.Errorf("Expected:\n %#v,\n Got:\n %#v", test, obj2) } if !reflect.DeepEqual(gvk, &schema.GroupVersionKind{Group: "test.group", Version: "testExternal", Kind: "Simple"}) { t.Errorf("unexpected gvk returned by decode: %#v", gvk) } }
func TestSetDefaultObjectFieldSelectorAPIVersion(t *testing.T) { s := v1.PodSpec{ Containers: []v1.Container{ { Env: []v1.EnvVar{ { ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{}, }, }, }, }, }, } pod := &v1.Pod{ Spec: s, } obj2 := roundTrip(t, runtime.Object(pod)) pod2 := obj2.(*v1.Pod) s2 := pod2.Spec apiVersion := s2.Containers[0].Env[0].ValueFrom.FieldRef.APIVersion if apiVersion != "v1" { t.Errorf("Expected default APIVersion v1, got: %v", apiVersion) } }
func TestDefaultRequestIsNotSetForReplicationController(t *testing.T) { s := v1.PodSpec{} s.Containers = []v1.Container{ { Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("100m"), }, }, }, } rc := &v1.ReplicationController{ Spec: v1.ReplicationControllerSpec{ Replicas: newInt(3), Template: &v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ "foo": "bar", }, }, Spec: s, }, }, } output := roundTrip(t, runtime.Object(rc)) rc2 := output.(*v1.ReplicationController) defaultRequest := rc2.Spec.Template.Spec.Containers[0].Resources.Requests requestValue := defaultRequest[v1.ResourceCPU] if requestValue.String() != "0" { t.Errorf("Expected 0 request value, got: %s", requestValue.String()) } }
func TestSetDefaultPodSpecHostNetwork(t *testing.T) { portNum := int32(8080) s := v1.PodSpec{} s.HostNetwork = true s.Containers = []v1.Container{ { Ports: []v1.ContainerPort{ { ContainerPort: portNum, }, }, }, } pod := &v1.Pod{ Spec: s, } obj2 := roundTrip(t, runtime.Object(pod)) pod2 := obj2.(*v1.Pod) s2 := pod2.Spec hostPortNum := s2.Containers[0].Ports[0].HostPort if hostPortNum != portNum { t.Errorf("Expected container port to be defaulted, was made %d instead of %d", hostPortNum, portNum) } }
func TestUnversionedTypes(t *testing.T) { internalGV := schema.GroupVersion{Group: "test.group", Version: runtime.APIVersionInternal} externalGV := schema.GroupVersion{Group: "test.group", Version: "testExternal"} otherGV := schema.GroupVersion{Group: "group", Version: "other"} scheme := runtime.NewScheme() scheme.AddUnversionedTypes(externalGV, &InternalSimple{}) scheme.AddKnownTypeWithName(internalGV.WithKind("Simple"), &InternalSimple{}) scheme.AddKnownTypeWithName(externalGV.WithKind("Simple"), &ExternalSimple{}) scheme.AddKnownTypeWithName(otherGV.WithKind("Simple"), &ExternalSimple{}) codec := serializer.NewCodecFactory(scheme).LegacyCodec(externalGV) if unv, ok := scheme.IsUnversioned(&InternalSimple{}); !unv || !ok { t.Fatalf("type not unversioned and in scheme: %t %t", unv, ok) } kinds, _, err := scheme.ObjectKinds(&InternalSimple{}) if err != nil { t.Fatal(err) } kind := kinds[0] if kind != externalGV.WithKind("InternalSimple") { t.Fatalf("unexpected: %#v", kind) } test := &InternalSimple{ TestString: "I'm the same", } obj := runtime.Object(test) data, err := runtime.Encode(codec, obj) if err != nil { t.Fatal(err) } obj2, gvk, err := codec.Decode(data, nil, nil) if err != nil { t.Fatal(err) } if _, ok := obj2.(*InternalSimple); !ok { t.Fatalf("Got wrong type") } if !reflect.DeepEqual(obj2, test) { t.Errorf("Expected:\n %#v,\n Got:\n %#v", test, obj2) } // object is serialized as an unversioned object (in the group and version it was defined in) if !reflect.DeepEqual(gvk, &schema.GroupVersionKind{Group: "test.group", Version: "testExternal", Kind: "InternalSimple"}) { t.Errorf("unexpected gvk returned by decode: %#v", gvk) } // when serialized to a different group, the object is kept in its preferred name codec = serializer.NewCodecFactory(scheme).LegacyCodec(otherGV) data, err = runtime.Encode(codec, obj) if err != nil { t.Fatal(err) } if string(data) != `{"apiVersion":"test.group/testExternal","kind":"InternalSimple","testString":"I'm the same"}`+"\n" { t.Errorf("unexpected data: %s", data) } }
func TestSetDefaultSchedulerName(t *testing.T) { pod := &v1.Pod{} output := roundTrip(t, runtime.Object(pod)).(*v1.Pod) if output.Spec.SchedulerName != v1.DefaultSchedulerName { t.Errorf("Expected scheduler name: %+v\ngot: %+v\n", v1.DefaultSchedulerName, output.Spec.SchedulerName) } }
func TestSetDefaultSecret(t *testing.T) { s := &v1.Secret{} obj2 := roundTrip(t, runtime.Object(s)) s2 := obj2.(*v1.Secret) if s2.Type != v1.SecretTypeOpaque { t.Errorf("Expected secret type %v, got %v", v1.SecretTypeOpaque, s2.Type) } }
func TestSetDefaultPersistentVolumeClaim(t *testing.T) { pvc := &v1.PersistentVolumeClaim{} obj2 := roundTrip(t, runtime.Object(pvc)) pvc2 := obj2.(*v1.PersistentVolumeClaim) if pvc2.Status.Phase != v1.ClaimPending { t.Errorf("Expected claim phase %v, got %v", v1.ClaimPending, pvc2.Status.Phase) } }
func TestSetDefaultNamespace(t *testing.T) { s := &v1.Namespace{} obj2 := roundTrip(t, runtime.Object(s)) s2 := obj2.(*v1.Namespace) if s2.Status.Phase != v1.NamespaceActive { t.Errorf("Expected phase %v, got %v", v1.NamespaceActive, s2.Status.Phase) } }
func TestSetDefaultServicePort(t *testing.T) { // Unchanged if set. in := &v1.Service{Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ {Protocol: "UDP", Port: 9376, TargetPort: intstr.FromString("p")}, {Protocol: "UDP", Port: 8675, TargetPort: intstr.FromInt(309)}, }, }} out := roundTrip(t, runtime.Object(in)).(*v1.Service) if out.Spec.Ports[0].Protocol != v1.ProtocolUDP { t.Errorf("Expected protocol %s, got %s", v1.ProtocolUDP, out.Spec.Ports[0].Protocol) } if out.Spec.Ports[0].TargetPort != intstr.FromString("p") { t.Errorf("Expected port %v, got %v", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort) } if out.Spec.Ports[1].Protocol != v1.ProtocolUDP { t.Errorf("Expected protocol %s, got %s", v1.ProtocolUDP, out.Spec.Ports[1].Protocol) } if out.Spec.Ports[1].TargetPort != intstr.FromInt(309) { t.Errorf("Expected port %v, got %v", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort) } // Defaulted. in = &v1.Service{Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ {Protocol: "", Port: 9376, TargetPort: intstr.FromString("")}, {Protocol: "", Port: 8675, TargetPort: intstr.FromInt(0)}, }, }} out = roundTrip(t, runtime.Object(in)).(*v1.Service) if out.Spec.Ports[0].Protocol != v1.ProtocolTCP { t.Errorf("Expected protocol %s, got %s", v1.ProtocolTCP, out.Spec.Ports[0].Protocol) } if out.Spec.Ports[0].TargetPort != intstr.FromInt(int(in.Spec.Ports[0].Port)) { t.Errorf("Expected port %v, got %v", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort) } if out.Spec.Ports[1].Protocol != v1.ProtocolTCP { t.Errorf("Expected protocol %s, got %s", v1.ProtocolTCP, out.Spec.Ports[1].Protocol) } if out.Spec.Ports[1].TargetPort != intstr.FromInt(int(in.Spec.Ports[1].Port)) { t.Errorf("Expected port %v, got %v", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort) } }
func TestSetDefaultRequestsPod(t *testing.T) { // verify we default if limits are specified (and that request=0 is preserved) s := v1.PodSpec{} s.Containers = []v1.Container{ { Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("0"), }, Limits: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("100m"), v1.ResourceMemory: resource.MustParse("1Gi"), }, }, }, } pod := &v1.Pod{ Spec: s, } output := roundTrip(t, runtime.Object(pod)) pod2 := output.(*v1.Pod) defaultRequest := pod2.Spec.Containers[0].Resources.Requests if requestValue := defaultRequest[v1.ResourceCPU]; requestValue.String() != "100m" { t.Errorf("Expected request cpu: %s, got: %s", "100m", requestValue.String()) } if requestValue := defaultRequest[v1.ResourceMemory]; requestValue.String() != "0" { t.Errorf("Expected request memory: %s, got: %s", "0", requestValue.String()) } // verify we do nothing if no limits are specified s = v1.PodSpec{} s.Containers = []v1.Container{{}} pod = &v1.Pod{ Spec: s, } output = roundTrip(t, runtime.Object(pod)) pod2 = output.(*v1.Pod) defaultRequest = pod2.Spec.Containers[0].Resources.Requests if requestValue := defaultRequest[v1.ResourceCPU]; requestValue.String() != "0" { t.Errorf("Expected 0 request value, got: %s", requestValue.String()) } }
func TestSetDefaultService(t *testing.T) { svc := &v1.Service{} obj2 := roundTrip(t, runtime.Object(svc)) svc2 := obj2.(*v1.Service) if svc2.Spec.SessionAffinity != v1.ServiceAffinityNone { t.Errorf("Expected default session affinity type:%s, got: %s", v1.ServiceAffinityNone, svc2.Spec.SessionAffinity) } if svc2.Spec.Type != v1.ServiceTypeClusterIP { t.Errorf("Expected default type:%s, got: %s", v1.ServiceTypeClusterIP, svc2.Spec.Type) } }
func TestSetDefaultPersistentVolume(t *testing.T) { pv := &v1.PersistentVolume{} obj2 := roundTrip(t, runtime.Object(pv)) pv2 := obj2.(*v1.PersistentVolume) if pv2.Status.Phase != v1.VolumePending { t.Errorf("Expected volume phase %v, got %v", v1.VolumePending, pv2.Status.Phase) } if pv2.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimRetain { t.Errorf("Expected pv reclaim policy %v, got %v", v1.PersistentVolumeReclaimRetain, pv2.Spec.PersistentVolumeReclaimPolicy) } }
func TestSetDefaultNodeExternalID(t *testing.T) { name := "node0" n := &v1.Node{} n.Name = name obj2 := roundTrip(t, runtime.Object(n)) n2 := obj2.(*v1.Node) if n2.Spec.ExternalID != name { t.Errorf("Expected default External ID: %s, got: %s", name, n2.Spec.ExternalID) } if n2.Spec.ProviderID != "" { t.Errorf("Expected empty default Cloud Provider ID, got: %s", n2.Spec.ProviderID) } }
// newInformerWatchPod creates an informer to check whether all pods are running. func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes map[string]metav1.Time, podType string) cache.Controller { ns := f.Namespace.Name checkPodRunning := func(p *v1.Pod) { mutex.Lock() defer mutex.Unlock() defer GinkgoRecover() if p.Status.Phase == v1.PodRunning { if _, found := watchTimes[p.Name]; !found { watchTimes[p.Name] = metav1.Now() } } } _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() obj, err := f.ClientSet.Core().Pods(ns).List(options) return runtime.Object(obj), err }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": podType}).String() return f.ClientSet.Core().Pods(ns).Watch(options) }, }, &v1.Pod{}, 0, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { p, ok := obj.(*v1.Pod) Expect(ok).To(Equal(true)) go checkPodRunning(p) }, UpdateFunc: func(oldObj, newObj interface{}) { p, ok := newObj.(*v1.Pod) Expect(ok).To(Equal(true)) go checkPodRunning(p) }, }, ) return controller }
func NewPodStore(c clientset.Interface, namespace string, label labels.Selector, field fields.Selector) *PodStore { lw := &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { options.LabelSelector = label.String() options.FieldSelector = field.String() obj, err := c.Core().Pods(namespace).List(options) return runtime.Object(obj), err }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { options.LabelSelector = label.String() options.FieldSelector = field.String() return c.Core().Pods(namespace).Watch(options) }, } store := cache.NewStore(cache.MetaNamespaceKeyFunc) stopCh := make(chan struct{}) reflector := cache.NewReflector(lw, &v1.Pod{}, store, 0) reflector.RunUntil(stopCh) return &PodStore{Store: store, stopCh: stopCh, Reflector: reflector} }
func TestSetDefaultLimitRangeItem(t *testing.T) { limitRange := &v1.LimitRange{ ObjectMeta: metav1.ObjectMeta{ Name: "test-defaults", }, Spec: v1.LimitRangeSpec{ Limits: []v1.LimitRangeItem{{ Type: v1.LimitTypeContainer, Max: v1.ResourceList{ v1.ResourceCPU: resource.MustParse("100m"), }, Min: v1.ResourceList{ v1.ResourceMemory: resource.MustParse("100Mi"), }, Default: v1.ResourceList{}, DefaultRequest: v1.ResourceList{}, }}, }, } output := roundTrip(t, runtime.Object(limitRange)) limitRange2 := output.(*v1.LimitRange) defaultLimit := limitRange2.Spec.Limits[0].Default defaultRequest := limitRange2.Spec.Limits[0].DefaultRequest // verify that default cpu was set to the max defaultValue := defaultLimit[v1.ResourceCPU] if defaultValue.String() != "100m" { t.Errorf("Expected default cpu: %s, got: %s", "100m", defaultValue.String()) } // verify that default request was set to the limit requestValue := defaultRequest[v1.ResourceCPU] if requestValue.String() != "100m" { t.Errorf("Expected request cpu: %s, got: %s", "100m", requestValue.String()) } // verify that if a min is provided, it will be the default if no limit is specified requestMinValue := defaultRequest[v1.ResourceMemory] if requestMinValue.String() != "100Mi" { t.Errorf("Expected request memory: %s, got: %s", "100Mi", requestMinValue.String()) } }
func TestSetDefaultDownwardAPIVolumeSource(t *testing.T) { s := v1.PodSpec{} s.Volumes = []v1.Volume{ { VolumeSource: v1.VolumeSource{ DownwardAPI: &v1.DownwardAPIVolumeSource{}, }, }, } pod := &v1.Pod{ Spec: s, } output := roundTrip(t, runtime.Object(pod)) pod2 := output.(*v1.Pod) defaultMode := pod2.Spec.Volumes[0].VolumeSource.DownwardAPI.DefaultMode expectedMode := v1.DownwardAPIVolumeSourceDefaultMode if defaultMode == nil || *defaultMode != expectedMode { t.Errorf("Expected DownwardAPI DefaultMode %v, got %v", expectedMode, defaultMode) } }
func TestSetDefaulEndpointsProtocol(t *testing.T) { in := &v1.Endpoints{Subsets: []v1.EndpointSubset{ {Ports: []v1.EndpointPort{{}, {Protocol: "UDP"}, {}}}, }} obj := roundTrip(t, runtime.Object(in)) out := obj.(*v1.Endpoints) for i := range out.Subsets { for j := range out.Subsets[i].Ports { if in.Subsets[i].Ports[j].Protocol == "" { if out.Subsets[i].Ports[j].Protocol != v1.ProtocolTCP { t.Errorf("Expected protocol %s, got %s", v1.ProtocolTCP, out.Subsets[i].Ports[j].Protocol) } } else { if out.Subsets[i].Ports[j].Protocol != in.Subsets[i].Ports[j].Protocol { t.Errorf("Expected protocol %s, got %s", in.Subsets[i].Ports[j].Protocol, out.Subsets[i].Ports[j].Protocol) } } } } }
func TestHasUID(t *testing.T) { testcases := []struct { obj runtime.Object hasUID bool }{ {obj: nil, hasUID: false}, {obj: &api.Pod{}, hasUID: false}, {obj: nil, hasUID: false}, {obj: runtime.Object(nil), hasUID: false}, {obj: &api.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("A")}}, hasUID: true}, } for i, tc := range testcases { actual, err := hasUID(tc.obj) if err != nil { t.Errorf("%d: unexpected error %v", i, err) continue } if tc.hasUID != actual { t.Errorf("%d: expected %v, got %v", i, tc.hasUID, actual) } } }
func TestSetDefaultProbe(t *testing.T) { originalProbe := v1.Probe{} expectedProbe := v1.Probe{ InitialDelaySeconds: 0, TimeoutSeconds: 1, PeriodSeconds: 10, SuccessThreshold: 1, FailureThreshold: 3, } pod := &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{{LivenessProbe: &originalProbe}}, }, } output := roundTrip(t, runtime.Object(pod)).(*v1.Pod) actualProbe := *output.Spec.Containers[0].LivenessProbe if actualProbe != expectedProbe { t.Errorf("Expected probe: %+v\ngot: %+v\n", expectedProbe, actualProbe) } }
// blocks until it has finished syncing. func startEndpointWatcher(f *framework.Framework, q *endpointQueries) { _, controller := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { obj, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(options) return runtime.Object(obj), err }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { return f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options) }, }, &v1.Endpoints{}, 0, cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { if e, ok := obj.(*v1.Endpoints); ok { if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 { q.added(e) } } }, UpdateFunc: func(old, cur interface{}) { if e, ok := cur.(*v1.Endpoints); ok { if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 { q.added(e) } } }, }, ) go controller.Run(q.stop) // Wait for the controller to sync, so that we don't count any warm-up time. for !controller.HasSynced() { time.Sleep(100 * time.Millisecond) } }
func TestSetDefaultHPA(t *testing.T) { tests := []struct { hpa HorizontalPodAutoscaler expectReplicas int32 test string }{ { hpa: HorizontalPodAutoscaler{}, expectReplicas: 1, test: "unspecified min replicas, use the default value", }, { hpa: HorizontalPodAutoscaler{ Spec: HorizontalPodAutoscalerSpec{ MinReplicas: newInt32(3), }, }, expectReplicas: 3, test: "set min replicas to 3", }, } for _, test := range tests { hpa := &test.hpa obj2 := roundTrip(t, runtime.Object(hpa)) hpa2, ok := obj2.(*HorizontalPodAutoscaler) if !ok { t.Fatalf("unexpected object: %v", obj2) } if hpa2.Spec.MinReplicas == nil { t.Errorf("unexpected nil MinReplicas") } else if test.expectReplicas != *hpa2.Spec.MinReplicas { t.Errorf("expected: %d MinReplicas, got: %d", test.expectReplicas, *hpa2.Spec.MinReplicas) } } }
func TestSetDefaultHorizontalPodAutoscalerCpuUtilization(t *testing.T) { tests := []struct { hpa HorizontalPodAutoscaler expectUtilization int32 }{ { hpa: HorizontalPodAutoscaler{}, expectUtilization: 80, }, { hpa: HorizontalPodAutoscaler{ Spec: HorizontalPodAutoscalerSpec{ CPUUtilization: &CPUTargetUtilization{ TargetPercentage: int32(50), }, }, }, expectUtilization: 50, }, } for _, test := range tests { hpa := &test.hpa obj2 := roundTrip(t, runtime.Object(hpa)) hpa2, ok := obj2.(*HorizontalPodAutoscaler) if !ok { t.Errorf("unexpected object: %v", hpa2) t.FailNow() } if hpa2.Spec.CPUUtilization == nil { t.Errorf("unexpected nil CPUUtilization") } else if test.expectUtilization != hpa2.Spec.CPUUtilization.TargetPercentage { t.Errorf("expected: %d CPUUtilization, got: %d", test.expectUtilization, hpa2.Spec.CPUUtilization.TargetPercentage) } } }
// TODO: This should be done on types that are not part of our API func TestBeforeUpdate(t *testing.T) { testCases := []struct { name string tweakSvc func(oldSvc, newSvc *api.Service) // given basic valid services, each test case can customize them expectErr bool }{ { name: "no change", tweakSvc: func(oldSvc, newSvc *api.Service) { // nothing }, expectErr: false, }, { name: "change port", tweakSvc: func(oldSvc, newSvc *api.Service) { newSvc.Spec.Ports[0].Port++ }, expectErr: false, }, { name: "bad namespace", tweakSvc: func(oldSvc, newSvc *api.Service) { newSvc.Namespace = "#$%%invalid" }, expectErr: true, }, { name: "change name", tweakSvc: func(oldSvc, newSvc *api.Service) { newSvc.Name += "2" }, expectErr: true, }, { name: "change ClusterIP", tweakSvc: func(oldSvc, newSvc *api.Service) { oldSvc.Spec.ClusterIP = "1.2.3.4" newSvc.Spec.ClusterIP = "4.3.2.1" }, expectErr: true, }, { name: "change selectpor", tweakSvc: func(oldSvc, newSvc *api.Service) { newSvc.Spec.Selector = map[string]string{"newkey": "newvalue"} }, expectErr: false, }, } for _, tc := range testCases { oldSvc := makeValidService() newSvc := makeValidService() tc.tweakSvc(&oldSvc, &newSvc) ctx := genericapirequest.NewDefaultContext() err := rest.BeforeUpdate(Strategy, ctx, runtime.Object(&oldSvc), runtime.Object(&newSvc)) if tc.expectErr && err == nil { t.Errorf("unexpected non-error for %q", tc.name) } if !tc.expectErr && err != nil { t.Errorf("unexpected error for %q: %v", tc.name, err) } } }
podOpts = v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) } By("Set up watch on node status") nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name) stopCh := make(chan struct{}) newNode := make(chan *v1.Node) var controller cache.Controller _, controller = cache.NewInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { options.FieldSelector = nodeSelector.String() obj, err := f.ClientSet.Core().Nodes().List(options) return runtime.Object(obj), err }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { options.FieldSelector = nodeSelector.String() return f.ClientSet.Core().Nodes().Watch(options) }, }, &v1.Node{}, 0, cache.ResourceEventHandlerFuncs{ UpdateFunc: func(oldObj, newObj interface{}) { n, ok := newObj.(*v1.Node) Expect(ok).To(Equal(true)) newNode <- n },
func TestSetDefaultNodeStatusAllocatable(t *testing.T) { capacity := v1.ResourceList{ v1.ResourceCPU: resource.MustParse("1000m"), v1.ResourceMemory: resource.MustParse("10G"), } allocatable := v1.ResourceList{ v1.ResourceCPU: resource.MustParse("500m"), v1.ResourceMemory: resource.MustParse("5G"), } tests := []struct { capacity v1.ResourceList allocatable v1.ResourceList expectedAllocatable v1.ResourceList }{{ // Everything set, no defaulting. capacity: capacity, allocatable: allocatable, expectedAllocatable: allocatable, }, { // Allocatable set, no defaulting. capacity: nil, allocatable: allocatable, expectedAllocatable: allocatable, }, { // Capacity set, allocatable defaults to capacity. capacity: capacity, allocatable: nil, expectedAllocatable: capacity, }, { // Nothing set, allocatable "defaults" to capacity. capacity: nil, allocatable: nil, expectedAllocatable: nil, }} copyResourceList := func(rl v1.ResourceList) v1.ResourceList { if rl == nil { return nil } copy := make(v1.ResourceList, len(rl)) for k, v := range rl { copy[k] = *v.Copy() } return copy } resourceListsEqual := func(a v1.ResourceList, b v1.ResourceList) bool { if len(a) != len(b) { return false } for k, v := range a { vb, found := b[k] if !found { return false } if v.Cmp(vb) != 0 { return false } } return true } for i, testcase := range tests { node := v1.Node{ Status: v1.NodeStatus{ Capacity: copyResourceList(testcase.capacity), Allocatable: copyResourceList(testcase.allocatable), }, } node2 := roundTrip(t, runtime.Object(&node)).(*v1.Node) actual := node2.Status.Allocatable expected := testcase.expectedAllocatable if !resourceListsEqual(expected, actual) { t.Errorf("[%d] Expected NodeStatus.Allocatable: %+v; Got: %+v", i, expected, actual) } } }