func createService(fcs *federation_internalclientset.Clientset, clusterClientSets []*release_1_3.Clientset, namespace string) { By("Creating a federated service") labels := map[string]string{ "foo": "bar", } svc1port := "svc1" svc2port := "svc2" service := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: FederatedServiceName, }, Spec: api.ServiceSpec{ Selector: labels, Ports: []api.ServicePort{ { Name: "portname1", Port: 80, TargetPort: intstr.FromString(svc1port), }, { Name: "portname2", Port: 81, TargetPort: intstr.FromString(svc2port), }, }, }, } _, err := fcs.Core().Services(namespace).Create(service) Expect(err).NotTo(HaveOccurred()) for _, cs := range clusterClientSets { waitForFederatedServiceShard(cs, namespace, service, 1) } }
func SetDefaults_RollingDeploymentStrategyParams(obj *RollingDeploymentStrategyParams) { if obj.IntervalSeconds == nil { obj.IntervalSeconds = mkintp(deployapi.DefaultRollingIntervalSeconds) } if obj.UpdatePeriodSeconds == nil { obj.UpdatePeriodSeconds = mkintp(deployapi.DefaultRollingUpdatePeriodSeconds) } if obj.TimeoutSeconds == nil { obj.TimeoutSeconds = mkintp(deployapi.DefaultRollingTimeoutSeconds) } if obj.UpdatePercent == nil { // Apply defaults. if obj.MaxUnavailable == nil { maxUnavailable := intstr.FromString("25%") obj.MaxUnavailable = &maxUnavailable } if obj.MaxSurge == nil { maxSurge := intstr.FromString("25%") obj.MaxSurge = &maxSurge } } }
func TestDefaultResourceFromIngress(te *testing.T) { var ( is = assert.New(te) m = NewCache() ing = &extensions.Ingress{ ObjectMeta: api.ObjectMeta{Name: "ingress", Namespace: "test", UID: types.UID("one")}, Spec: extensions.IngressSpec{ Backend: &extensions.IngressBackend{ ServiceName: "service", ServicePort: intstr.FromString("web"), }, }, } svc = &api.Service{ ObjectMeta: api.ObjectMeta{Name: "service", Namespace: "test", UID: types.UID("two")}, Spec: api.ServiceSpec{ Type: api.ServiceTypeClusterIP, ClusterIP: "1.2.3.4", Ports: []api.ServicePort{ api.ServicePort{Name: "web", Port: 80, TargetPort: intstr.FromString("http")}, }, }, } end = &api.Endpoints{ ObjectMeta: api.ObjectMeta{Name: "service", Namespace: "test", UID: types.UID("three")}, Subsets: []api.EndpointSubset{ api.EndpointSubset{ Addresses: []api.EndpointAddress{ api.EndpointAddress{IP: "10.11.12.13"}, api.EndpointAddress{IP: "10.20.21.23"}, }, Ports: []api.EndpointPort{ api.EndpointPort{Name: "web", Port: 8080, Protocol: api.ProtocolTCP}, }, }, }, } ) if testing.Verbose() { logger.Configure("debug", "[romulus-test] ", os.Stdout) defer logger.SetLevel("error") } m.SetServiceStore(cache.NewStore(cache.MetaNamespaceKeyFunc)) m.SetEndpointsStore(cache.NewStore(cache.MetaNamespaceKeyFunc)) m.endpoints.Add(end) m.service.Add(svc) list := resourcesFromIngress(m, ing) te.Logf("Default ResourceList: %v", list) is.True(len(list) > 0, "ResourceList should be non-zero") ma := list.Map() rsc, ok := ma["test.service.web"] if is.True(ok, "'test.service.web' not created: %v", list) { is.False(rsc.NoServers(), "%v should have servers", rsc) } }
func TestGetURLParts(t *testing.T) { testCases := []struct { probe *api.HTTPGetAction ok bool host string port int path string }{ {&api.HTTPGetAction{Host: "", Port: intstr.FromInt(-1), Path: ""}, false, "", -1, ""}, {&api.HTTPGetAction{Host: "", Port: intstr.FromString(""), Path: ""}, false, "", -1, ""}, {&api.HTTPGetAction{Host: "", Port: intstr.FromString("-1"), Path: ""}, false, "", -1, ""}, {&api.HTTPGetAction{Host: "", Port: intstr.FromString("not-found"), Path: ""}, false, "", -1, ""}, {&api.HTTPGetAction{Host: "", Port: intstr.FromString("found"), Path: ""}, true, "127.0.0.1", 93, ""}, {&api.HTTPGetAction{Host: "", Port: intstr.FromInt(76), Path: ""}, true, "127.0.0.1", 76, ""}, {&api.HTTPGetAction{Host: "", Port: intstr.FromString("118"), Path: ""}, true, "127.0.0.1", 118, ""}, {&api.HTTPGetAction{Host: "hostname", Port: intstr.FromInt(76), Path: "path"}, true, "hostname", 76, "path"}, } for _, test := range testCases { state := api.PodStatus{PodIP: "127.0.0.1"} container := api.Container{ Ports: []api.ContainerPort{{Name: "found", ContainerPort: 93}}, LivenessProbe: &api.Probe{ Handler: api.Handler{ HTTPGet: test.probe, }, }, } scheme := test.probe.Scheme if scheme == "" { scheme = api.URISchemeHTTP } host := test.probe.Host if host == "" { host = state.PodIP } port, err := extractPort(test.probe.Port, container) if test.ok && err != nil { t.Errorf("Unexpected error: %v", err) } path := test.probe.Path if !test.ok && err == nil { t.Errorf("Expected error for %+v, got %s%s:%d/%s", test, scheme, host, port, path) } if test.ok { if host != test.host || port != test.port || path != test.path { t.Errorf("Expected %s:%d/%s, got %s:%d/%s", test.host, test.port, test.path, host, port, path) } } } }
// Verify that multiple controllers doesn't allow the PDB to be set true. func TestMultipleControllers(t *testing.T) { const rcCount = 2 const podCount = 2 dc, ps := newFakeDisruptionController() pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("1%")) add(t, dc.pdbLister.Store, pdb) for i := 0; i < podCount; i++ { pod, _ := newPod(t, fmt.Sprintf("pod %d", i)) add(t, dc.podLister.Indexer, pod) } dc.sync(pdbName) // No controllers yet => no disruption allowed ps.VerifyDisruptionAllowed(t, pdbName, 0) rc, _ := newReplicationController(t, 1) rc.Name = "rc 1" add(t, dc.rcLister.Indexer, rc) dc.sync(pdbName) // One RC and 200%>1% healthy => disruption allowed ps.VerifyDisruptionAllowed(t, pdbName, 1) rc, _ = newReplicationController(t, 1) rc.Name = "rc 2" add(t, dc.rcLister.Indexer, rc) dc.sync(pdbName) // 100%>1% healthy BUT two RCs => no disruption allowed ps.VerifyDisruptionAllowed(t, pdbName, 0) }
func Convert_v1beta3_RollingDeploymentStrategyParams_To_api_RollingDeploymentStrategyParams(in *RollingDeploymentStrategyParams, out *newer.RollingDeploymentStrategyParams, s conversion.Scope) error { out.UpdatePeriodSeconds = in.UpdatePeriodSeconds out.IntervalSeconds = in.IntervalSeconds out.TimeoutSeconds = in.TimeoutSeconds out.UpdatePercent = in.UpdatePercent if in.Pre != nil { if err := s.Convert(&in.Pre, &out.Pre, 0); err != nil { return err } } if in.Post != nil { if err := s.Convert(&in.Post, &out.Post, 0); err != nil { return err } } if in.UpdatePercent != nil { pct := intstr.FromString(fmt.Sprintf("%d%%", int(math.Abs(float64(*in.UpdatePercent))))) if *in.UpdatePercent > 0 { out.MaxSurge = pct } else { out.MaxUnavailable = pct } } else { if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { return err } if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { return err } } return nil }
// Generate accepts a set of parameters and maps them into a new route func (RouteGenerator) Generate(genericParams map[string]interface{}) (runtime.Object, error) { var ( labels map[string]string err error ) params := map[string]string{} for key, value := range genericParams { strVal, isString := value.(string) if !isString { return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) } params[key] = strVal } labelString, found := params["labels"] if found && len(labelString) > 0 { labels, err = kubectl.ParseLabels(labelString) if err != nil { return nil, err } } name, found := params["name"] if !found || len(name) == 0 { name, found = params["default-name"] if !found || len(name) == 0 { return nil, fmt.Errorf("'name' is a required parameter.") } } route := &api.Route{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: labels, }, Spec: api.RouteSpec{ Host: params["hostname"], Path: params["path"], To: api.RouteTargetReference{ Name: params["default-name"], }, }, } portString := params["port"] if len(portString) > 0 { var targetPort intstr.IntOrString if port, err := strconv.Atoi(portString); err == nil { targetPort = intstr.FromInt(port) } else { targetPort = intstr.FromString(portString) } route.Spec.Port = &api.RoutePort{ TargetPort: targetPort, } } return route, nil }
func TestStatefulSetController(t *testing.T) { labels := map[string]string{ "foo": "bar", "baz": "quux", } dc, ps := newFakeDisruptionController() // 34% should round up to 2 pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("34%")) add(t, dc.pdbLister.Store, pdb) ss, _ := newStatefulSet(t, 3) add(t, dc.ssLister.Store, ss) dc.sync(pdbName) // It starts out at 0 expected because, with no pods, the PDB doesn't know // about the SS. This is a known bug. TODO(mml): file issue ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]metav1.Time{}) pods := []*v1.Pod{} for i := int32(0); i < 3; i++ { pod, _ := newPod(t, fmt.Sprintf("foobar %d", i)) pods = append(pods, pod) pod.Labels = labels add(t, dc.podLister.Indexer, pod) dc.sync(pdbName) if i < 2 { ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]metav1.Time{}) } else { ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]metav1.Time{}) } } }
func TestSetDefaultServicePort(t *testing.T) { if !registered.IsAllowedVersion(versioned.SchemeGroupVersion) { return } // Unchanged if set. in := &versioned.Service{Spec: versioned.ServiceSpec{ Ports: []versioned.ServicePort{ {Protocol: "UDP", Port: 9376, TargetPort: intstr.FromString("p")}, {Protocol: "UDP", Port: 8675, TargetPort: intstr.FromInt(309)}, }, }} out := roundTrip(t, runtime.Object(in)).(*versioned.Service) if out.Spec.Ports[0].Protocol != versioned.ProtocolUDP { t.Errorf("Expected protocol %s, got %s", versioned.ProtocolUDP, out.Spec.Ports[0].Protocol) } if out.Spec.Ports[0].TargetPort != intstr.FromString("p") { t.Errorf("Expected port %d, got %s", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort) } if out.Spec.Ports[1].Protocol != versioned.ProtocolUDP { t.Errorf("Expected protocol %s, got %s", versioned.ProtocolUDP, out.Spec.Ports[1].Protocol) } if out.Spec.Ports[1].TargetPort != intstr.FromInt(309) { t.Errorf("Expected port %d, got %s", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort) } // Defaulted. in = &versioned.Service{Spec: versioned.ServiceSpec{ Ports: []versioned.ServicePort{ {Protocol: "", Port: 9376, TargetPort: intstr.FromString("")}, {Protocol: "", Port: 8675, TargetPort: intstr.FromInt(0)}, }, }} out = roundTrip(t, runtime.Object(in)).(*versioned.Service) if out.Spec.Ports[0].Protocol != versioned.ProtocolTCP { t.Errorf("Expected protocol %s, got %s", versioned.ProtocolTCP, out.Spec.Ports[0].Protocol) } if out.Spec.Ports[0].TargetPort != intstr.FromInt(in.Spec.Ports[0].Port) { t.Errorf("Expected port %d, got %d", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort) } if out.Spec.Ports[1].Protocol != versioned.ProtocolTCP { t.Errorf("Expected protocol %s, got %s", versioned.ProtocolTCP, out.Spec.Ports[1].Protocol) } if out.Spec.Ports[1].TargetPort != intstr.FromInt(in.Spec.Ports[1].Port) { t.Errorf("Expected port %d, got %d", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort) } }
func TestPodDisruptionBudgetStrategy(t *testing.T) { ctx := api.NewDefaultContext() if !Strategy.NamespaceScoped() { t.Errorf("PodDisruptionBudget must be namespace scoped") } if Strategy.AllowCreateOnUpdate() { t.Errorf("PodDisruptionBudget should not allow create on update") } validSelector := map[string]string{"a": "b"} pdb := &policy.PodDisruptionBudget{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, Spec: policy.PodDisruptionBudgetSpec{ MinAvailable: intstr.FromInt(3), Selector: &unversioned.LabelSelector{MatchLabels: validSelector}, }, } Strategy.PrepareForCreate(pdb) errs := Strategy.Validate(ctx, pdb) if len(errs) != 0 { t.Errorf("Unexpected error validating %v", errs) } newPdb := &policy.PodDisruptionBudget{ ObjectMeta: api.ObjectMeta{Name: pdb.Name, Namespace: pdb.Namespace}, Spec: pdb.Spec, Status: policy.PodDisruptionBudgetStatus{ PodDisruptionAllowed: true, CurrentHealthy: 3, DesiredHealthy: 3, ExpectedPods: 3, }, } // Nothing in Spec changes: OK Strategy.PrepareForUpdate(newPdb, pdb) errs = Strategy.ValidateUpdate(ctx, newPdb, pdb) if len(errs) != 0 { t.Errorf("Unexpected error updating PodDisruptionBudget.") } // Changing the selector? No. newPdb.Spec.Selector = &unversioned.LabelSelector{MatchLabels: map[string]string{"a": "bar"}} Strategy.PrepareForUpdate(newPdb, pdb) errs = Strategy.ValidateUpdate(ctx, newPdb, pdb) if len(errs) == 0 { t.Errorf("Expected a validation error since updates are disallowed on poddisruptionbudgets.") } newPdb.Spec.Selector = pdb.Spec.Selector // Changing MinAvailable? Also no. newPdb.Spec.MinAvailable = intstr.FromString("28%") Strategy.PrepareForUpdate(newPdb, pdb) errs = Strategy.ValidateUpdate(ctx, newPdb, pdb) if len(errs) == 0 { t.Errorf("Expected a validation error since updates are disallowed on poddisruptionbudgets.") } }
// Test pdb doesn't exist func TestPDBNotExist(t *testing.T) { dc, _ := newFakeDisruptionController() pdb, _ := newPodDisruptionBudget(t, intstr.FromString("67%")) add(t, dc.pdbLister.Store, pdb) if err := dc.sync("notExist"); err != nil { t.Errorf("Unexpected error: %v, expect nil", err) } }
func TestSetDefaultServicePort(t *testing.T) { // Unchanged if set. in := &v1.Service{Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ {Protocol: "UDP", Port: 9376, TargetPort: intstr.FromString("p")}, {Protocol: "UDP", Port: 8675, TargetPort: intstr.FromInt(309)}, }, }} out := roundTrip(t, runtime.Object(in)).(*v1.Service) if out.Spec.Ports[0].Protocol != v1.ProtocolUDP { t.Errorf("Expected protocol %s, got %s", v1.ProtocolUDP, out.Spec.Ports[0].Protocol) } if out.Spec.Ports[0].TargetPort != intstr.FromString("p") { t.Errorf("Expected port %v, got %v", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort) } if out.Spec.Ports[1].Protocol != v1.ProtocolUDP { t.Errorf("Expected protocol %s, got %s", v1.ProtocolUDP, out.Spec.Ports[1].Protocol) } if out.Spec.Ports[1].TargetPort != intstr.FromInt(309) { t.Errorf("Expected port %v, got %v", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort) } // Defaulted. in = &v1.Service{Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ {Protocol: "", Port: 9376, TargetPort: intstr.FromString("")}, {Protocol: "", Port: 8675, TargetPort: intstr.FromInt(0)}, }, }} out = roundTrip(t, runtime.Object(in)).(*v1.Service) if out.Spec.Ports[0].Protocol != v1.ProtocolTCP { t.Errorf("Expected protocol %s, got %s", v1.ProtocolTCP, out.Spec.Ports[0].Protocol) } if out.Spec.Ports[0].TargetPort != intstr.FromInt(int(in.Spec.Ports[0].Port)) { t.Errorf("Expected port %v, got %v", in.Spec.Ports[0].Port, out.Spec.Ports[0].TargetPort) } if out.Spec.Ports[1].Protocol != v1.ProtocolTCP { t.Errorf("Expected protocol %s, got %s", v1.ProtocolTCP, out.Spec.Ports[1].Protocol) } if out.Spec.Ports[1].TargetPort != intstr.FromInt(int(in.Spec.Ports[1].Port)) { t.Errorf("Expected port %v, got %v", in.Spec.Ports[1].Port, out.Spec.Ports[1].TargetPort) } }
func TestUpdate_assignOriginalAnnotation(t *testing.T) { oldRc := oldRc(1, 1) delete(oldRc.Annotations, originalReplicasAnnotation) newRc := newRc(1, 1) var updatedOldRc *api.ReplicationController fake := &testclient.Fake{} fake.AddReactor("*", "*", func(action testclient.Action) (handled bool, ret runtime.Object, err error) { switch a := action.(type) { case testclient.GetAction: return true, oldRc, nil case testclient.UpdateAction: updatedOldRc = a.GetObject().(*api.ReplicationController) return true, updatedOldRc, nil } return false, nil, nil }) updater := &RollingUpdater{ c: fake, ns: "default", scaleAndWait: func(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) { return rc, nil }, getOrCreateTargetController: func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) { return newRc, false, nil }, cleanup: func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error { return nil }, getReadyPods: func(oldRc, newRc *api.ReplicationController) (int32, int32, error) { return 1, 1, nil }, } var buffer bytes.Buffer config := &RollingUpdaterConfig{ Out: &buffer, OldRc: oldRc, NewRc: newRc, UpdatePeriod: 0, Interval: time.Millisecond, Timeout: time.Millisecond, CleanupPolicy: DeleteRollingUpdateCleanupPolicy, MaxUnavailable: intstr.FromString("100%"), } err := updater.Update(config) if err != nil { t.Fatalf("unexpected error: %v", err) } if updatedOldRc == nil { t.Fatalf("expected rc to be updated") } if e, a := "1", updatedOldRc.Annotations[originalReplicasAnnotation]; e != a { t.Fatalf("expected annotation value %s, got %s", e, a) } }
func SetDefaults_RollingDeploymentStrategyParams(obj *RollingDeploymentStrategyParams) { if obj.IntervalSeconds == nil { obj.IntervalSeconds = mkintp(deployapi.DefaultRollingIntervalSeconds) } if obj.UpdatePeriodSeconds == nil { obj.UpdatePeriodSeconds = mkintp(deployapi.DefaultRollingUpdatePeriodSeconds) } if obj.TimeoutSeconds == nil { obj.TimeoutSeconds = mkintp(deployapi.DefaultRollingTimeoutSeconds) } if obj.MaxUnavailable == nil && obj.MaxSurge == nil { maxUnavailable := intstr.FromString("25%") obj.MaxUnavailable = &maxUnavailable maxSurge := intstr.FromString("25%") obj.MaxSurge = &maxSurge } if obj.MaxUnavailable == nil && obj.MaxSurge != nil && (*obj.MaxSurge == intstr.FromInt(0) || *obj.MaxSurge == intstr.FromString("0%")) { maxUnavailable := intstr.FromString("25%") obj.MaxUnavailable = &maxUnavailable } if obj.MaxSurge == nil && obj.MaxUnavailable != nil && (*obj.MaxUnavailable == intstr.FromInt(0) || *obj.MaxUnavailable == intstr.FromString("0%")) { maxSurge := intstr.FromString("25%") obj.MaxSurge = &maxSurge } }
func TestGetTCPAddrParts(t *testing.T) { testCases := []struct { probe *api.TCPSocketAction ok bool host string port int }{ {&api.TCPSocketAction{Port: intstr.FromInt(-1)}, false, "", -1}, {&api.TCPSocketAction{Port: intstr.FromString("")}, false, "", -1}, {&api.TCPSocketAction{Port: intstr.FromString("-1")}, false, "", -1}, {&api.TCPSocketAction{Port: intstr.FromString("not-found")}, false, "", -1}, {&api.TCPSocketAction{Port: intstr.FromString("found")}, true, "1.2.3.4", 93}, {&api.TCPSocketAction{Port: intstr.FromInt(76)}, true, "1.2.3.4", 76}, {&api.TCPSocketAction{Port: intstr.FromString("118")}, true, "1.2.3.4", 118}, } for _, test := range testCases { host := "1.2.3.4" container := api.Container{ Ports: []api.ContainerPort{{Name: "found", ContainerPort: 93}}, LivenessProbe: &api.Probe{ Handler: api.Handler{ TCPSocket: test.probe, }, }, } port, err := extractPort(test.probe.Port, container) if !test.ok && err == nil { t.Errorf("Expected error for %+v, got %s:%d", test, host, port) } if test.ok && err != nil { t.Errorf("Unexpected error: %v", err) } if test.ok { if host != test.host || port != test.port { t.Errorf("Expected %s:%d, got %s:%d", test.host, test.port, host, port) } } } }
func TestContainerLabels(t *testing.T) { deletionGracePeriod := int64(10) terminationGracePeriod := int64(10) lifecycle := &api.Lifecycle{ // Left PostStart as nil PreStop: &api.Handler{ Exec: &api.ExecAction{ Command: []string{"action1", "action2"}, }, HTTPGet: &api.HTTPGetAction{ Path: "path", Host: "host", Port: intstr.FromInt(8080), Scheme: "scheme", }, TCPSocket: &api.TCPSocketAction{ Port: intstr.FromString("80"), }, }, } container := &api.Container{ Name: "test_container", TerminationMessagePath: "/somepath", Lifecycle: lifecycle, } pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "test_pod", Namespace: "test_pod_namespace", UID: "test_pod_uid", DeletionGracePeriodSeconds: &deletionGracePeriod, }, Spec: api.PodSpec{ Containers: []api.Container{*container}, TerminationGracePeriodSeconds: &terminationGracePeriod, }, } expected := &labeledContainerInfo{ PodName: pod.Name, PodNamespace: pod.Namespace, PodUID: pod.UID, ContainerName: container.Name, } // Test whether we can get right information from label labels := newContainerLabels(container, pod) containerInfo := getContainerInfoFromLabels(labels) if !reflect.DeepEqual(containerInfo, expected) { t.Errorf("expected %v, got %v", expected, containerInfo) } }
// Verify that we count the scale of a ReplicaSet even when it has no Deployment. func TestReplicaSet(t *testing.T) { dc, ps := newFakeDisruptionController() pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("20%")) add(t, dc.pdbLister.Store, pdb) rs, _ := newReplicaSet(t, 10) add(t, dc.rsLister.Indexer, rs) pod, _ := newPod(t, "pod") add(t, dc.podLister.Indexer, pod) dc.sync(pdbName) ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10, map[string]unversioned.Time{}) }
func createService(fcs *federation_release_1_3.Clientset, clusterClientSets []*release_1_3.Clientset, namespace string) { By(fmt.Sprintf("Creating federated service %q in namespace %q", FederatedServiceName, namespace)) labels := map[string]string{ "foo": "bar", } svc1port := "svc1" svc2port := "svc2" service := &v1.Service{ ObjectMeta: v1.ObjectMeta{ Name: FederatedServiceName, }, Spec: v1.ServiceSpec{ Selector: labels, Ports: []v1.ServicePort{ { Name: "portname1", Port: 80, TargetPort: intstr.FromString(svc1port), }, { Name: "portname2", Port: 81, TargetPort: intstr.FromString(svc2port), }, }, }, } nservice, err := fcs.Core().Services(namespace).Create(service) framework.Logf("Trying to create service %q in namespace %q", service.ObjectMeta.Name, service.ObjectMeta.Namespace) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("creating service %s: %+v", service.Name, err)) for _, cs := range clusterClientSets { waitForFederatedServiceShard(cs, namespace, nservice, 1) } }
func resolveRoutePort(portString string) *api.RoutePort { if len(portString) == 0 { return nil } var routePort intstr.IntOrString integer, err := strconv.Atoi(portString) if err != nil { routePort = intstr.FromString(portString) } else { routePort = intstr.FromInt(integer) } return &api.RoutePort{ TargetPort: routePort, } }
// Create a pod with no controller, and verify that a PDB with a percentage // specified won't allow a disruption. func TestNakedPod(t *testing.T) { dc, ps := newFakeDisruptionController() pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("28%")) add(t, dc.pdbLister.Store, pdb) dc.sync(pdbName) // This verifies that when a PDB has 0 pods, disruptions are not allowed. ps.VerifyDisruptionAllowed(t, pdbName, 0) pod, _ := newPod(t, "naked") add(t, dc.podLister.Indexer, pod) dc.sync(pdbName) ps.VerifyDisruptionAllowed(t, pdbName, 0) }
func TestResolvePortStringUnknown(t *testing.T) { expected := int32(80) name := "foo" container := &api.Container{ Ports: []api.ContainerPort{ {Name: "bar", ContainerPort: expected}, }, } port, err := resolvePort(intstr.FromString(name), container) if port != -1 { t.Errorf("expected: -1, saw: %d", port) } if err == nil { t.Error("unexpected non-error") } }
func TestResolvePortString(t *testing.T) { expected := 80 name := "foo" container := &api.Container{ Ports: []api.ContainerPort{ {Name: name, ContainerPort: int32(expected)}, }, } port, err := resolvePort(intstr.FromString(name), container) if port != expected { t.Errorf("expected: %d, saw: %d", expected, port) } if err != nil { t.Errorf("unexpected error: %v", err) } }
func SetDefaults_ServiceSpec(obj *ServiceSpec) { if obj.SessionAffinity == "" { obj.SessionAffinity = ServiceAffinityNone } if obj.Type == "" { obj.Type = ServiceTypeClusterIP } for i := range obj.Ports { sp := &obj.Ports[i] if sp.Protocol == "" { sp.Protocol = ProtocolTCP } if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { sp.TargetPort = intstr.FromInt(int(sp.Port)) } } }
func TestReplicationController(t *testing.T) { // The budget in this test matches foo=bar, but the RC and its pods match // {foo=bar, baz=quux}. Later, when we add a rogue pod with only a foo=bar // label, it will match the budget but have no controllers, which should // trigger the controller to set PodDisruptionAllowed to false. labels := map[string]string{ "foo": "bar", "baz": "quux", } dc, ps := newFakeDisruptionController() // 34% should round up to 2 pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("34%")) add(t, dc.pdbLister.Store, pdb) rc, _ := newReplicationController(t, 3) rc.Spec.Selector = labels add(t, dc.rcLister.Indexer, rc) dc.sync(pdbName) // It starts out at 0 expected because, with no pods, the PDB doesn't know // about the RC. This is a known bug. TODO(mml): file issue ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]unversioned.Time{}) pods := []*api.Pod{} for i := int32(0); i < 3; i++ { pod, _ := newPod(t, fmt.Sprintf("foobar %d", i)) pods = append(pods, pod) pod.Labels = labels add(t, dc.podLister.Indexer, pod) dc.sync(pdbName) if i < 2 { ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]unversioned.Time{}) } else { ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]unversioned.Time{}) } } rogue, _ := newPod(t, "rogue") add(t, dc.podLister.Indexer, rogue) dc.sync(pdbName) ps.VerifyDisruptionAllowed(t, pdbName, 0) }
func parsePorts(portString string) (int32, intstr.IntOrString, error) { portStringSlice := strings.Split(portString, ":") port, err := strconv.Atoi(portStringSlice[0]) if err != nil { return 0, intstr.FromInt(0), err } if len(portStringSlice) == 1 { return int32(port), intstr.FromInt(int(port)), nil } var targetPort intstr.IntOrString if portNum, err := strconv.Atoi(portStringSlice[1]); err != nil { targetPort = intstr.FromString(portStringSlice[1]) } else { targetPort = intstr.FromInt(portNum) } return int32(port), targetPort, nil }
func SetDefaults_ServiceSpec(obj *ServiceSpec) { if obj.SessionAffinity == "" { obj.SessionAffinity = ServiceAffinityNone } if obj.Type == "" { obj.Type = ServiceTypeClusterIP } for i := range obj.Ports { sp := &obj.Ports[i] if sp.Protocol == "" { sp.Protocol = ProtocolTCP } if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { sp.TargetPort = intstr.FromInt(int(sp.Port)) } //Carry conversion if len(obj.ClusterIP) == 0 && len(obj.DeprecatedPortalIP) > 0 { obj.ClusterIP = obj.DeprecatedPortalIP } } }
func TestGetEndpoints(t *testing.T) { // 2 pods each of which have 3 targetPorts exposed via a single service endpointAddresses := []api.EndpointAddress{ {IP: "1.2.3.4"}, {IP: "6.7.8.9"}, } ports := []int{80, 443, 3306} endpointPorts := []api.EndpointPort{ {Port: ports[0], Protocol: "TCP"}, {Port: ports[1], Protocol: "TCP"}, {Port: ports[2], Protocol: "TCP", Name: "mysql"}, } servicePorts := []api.ServicePort{ {Port: ports[0], TargetPort: intstr.FromInt(ports[0])}, {Port: ports[1], TargetPort: intstr.FromInt(ports[1])}, {Port: ports[2], TargetPort: intstr.FromString("mysql")}, } svc := getService(servicePorts) endpoints := []*api.Endpoints{getEndpoints(svc, endpointAddresses, endpointPorts)} flb := newFakeLoadBalancerController(endpoints, []*api.Service{svc}) for i := range ports { eps := flb.getEndpoints(svc, &svc.Spec.Ports[i]) expectedEps := sets.NewString() for _, address := range endpointAddresses { expectedEps.Insert(fmt.Sprintf("%v:%v", address.IP, ports[i])) } receivedEps := sets.NewString() for _, ep := range eps { receivedEps.Insert(ep) } if len(receivedEps) != len(expectedEps) || !expectedEps.IsSuperset(receivedEps) { t.Fatalf("Unexpected endpoints, received %+v, expected %+v", receivedEps, expectedEps) } glog.Infof("Got endpoints %+v", receivedEps) } }
func createDeployment(name, namespace, podTemplateName string, podLabel, labelSelector map[string]string) *extensions.Deployment { return &extensions.Deployment{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: namespace, Labels: labelSelector, }, Spec: extensions.DeploymentSpec{ Selector: &unversioned.LabelSelector{MatchLabels: labelSelector}, Replicas: 4, MinReadySeconds: 5, Strategy: extensions.DeploymentStrategy{ Type: extensions.RollingUpdateDeploymentStrategyType, RollingUpdate: &extensions.RollingUpdateDeployment{ MaxSurge: intstr.FromInt(1), MaxUnavailable: intstr.FromString("1"), }, }, Template: api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{Name: podTemplateName, Labels: podLabel}}, }, Status: extensions.DeploymentStatus{ Replicas: 4, UpdatedReplicas: 2, AvailableReplicas: 3, UnavailableReplicas: 1, }, } }
func TestValidatePodDisruptionBudgetSpec(t *testing.T) { successCases := []intstr.IntOrString{ intstr.FromString("0%"), intstr.FromString("1%"), intstr.FromString("100%"), intstr.FromInt(0), intstr.FromInt(1), intstr.FromInt(100), } for _, c := range successCases { spec := policy.PodDisruptionBudgetSpec{ MinAvailable: c, } errs := ValidatePodDisruptionBudgetSpec(spec, field.NewPath("foo")) if len(errs) != 0 { t.Errorf("unexpected failure %v for %v", errs, spec) } } failureCases := []intstr.IntOrString{ intstr.FromString("1.1%"), intstr.FromString("nope"), intstr.FromString("-1%"), intstr.FromString("101%"), intstr.FromInt(-1), } for _, c := range failureCases { spec := policy.PodDisruptionBudgetSpec{ MinAvailable: c, } errs := ValidatePodDisruptionBudgetSpec(spec, field.NewPath("foo")) if len(errs) == 0 { t.Errorf("unexpected success for %v", spec) } } }
// TestUpdate performs complex scenario testing for rolling updates. It // provides fine grained control over the states for each update interval to // allow the expression of as many edge cases as possible. func TestUpdate(t *testing.T) { // up represents a simulated scale up event and expectation type up struct { // to is the expected replica count for a scale-up to int } // down represents a simulated scale down event and expectation type down struct { // oldReady is the number of oldRc replicas which will be seen // as ready during the scale down attempt oldReady int // newReady is the number of newRc replicas which will be seen // as ready during the scale up attempt newReady int // to is the expected replica count for the scale down to int // noop and to are mutually exclusive; if noop is true, that means for // this down event, no scaling attempt should be made (for example, if // by scaling down, the readiness minimum would be crossed.) noop bool } tests := []struct { name string // oldRc is the "from" deployment oldRc *api.ReplicationController // newRc is the "to" deployment newRc *api.ReplicationController // whether newRc existed (false means it was created) newRcExists bool maxUnavail intstr.IntOrString maxSurge intstr.IntOrString // expected is the sequence of up/down events that will be simulated and // verified expected []interface{} // output is the expected textual output written output string }{ { name: "10->10 30/0 fast readiness", oldRc: oldRc(10, 10), newRc: newRc(0, 10), newRcExists: false, maxUnavail: intstr.FromString("30%"), maxSurge: intstr.FromString("0%"), expected: []interface{}{ down{oldReady: 10, newReady: 0, to: 7}, up{3}, down{oldReady: 7, newReady: 3, to: 4}, up{6}, down{oldReady: 4, newReady: 6, to: 1}, up{9}, down{oldReady: 1, newReady: 9, to: 0}, up{10}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 7 pods available, don't exceed 10 pods) Scaling foo-v1 down to 7 Scaling foo-v2 up to 3 Scaling foo-v1 down to 4 Scaling foo-v2 up to 6 Scaling foo-v1 down to 1 Scaling foo-v2 up to 9 Scaling foo-v1 down to 0 Scaling foo-v2 up to 10 `, }, { name: "10->10 30/0 delayed readiness", oldRc: oldRc(10, 10), newRc: newRc(0, 10), newRcExists: false, maxUnavail: intstr.FromString("30%"), maxSurge: intstr.FromString("0%"), expected: []interface{}{ down{oldReady: 10, newReady: 0, to: 7}, up{3}, down{oldReady: 7, newReady: 0, noop: true}, down{oldReady: 7, newReady: 1, to: 6}, up{4}, down{oldReady: 6, newReady: 4, to: 3}, up{7}, down{oldReady: 3, newReady: 7, to: 0}, up{10}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 7 pods available, don't exceed 10 pods) Scaling foo-v1 down to 7 Scaling foo-v2 up to 3 Scaling foo-v1 down to 6 Scaling foo-v2 up to 4 Scaling foo-v1 down to 3 Scaling foo-v2 up to 7 Scaling foo-v1 down to 0 Scaling foo-v2 up to 10 `, }, { name: "10->10 30/0 fast readiness, continuation", oldRc: oldRc(7, 10), newRc: newRc(3, 10), newRcExists: false, maxUnavail: intstr.FromString("30%"), maxSurge: intstr.FromString("0%"), expected: []interface{}{ down{oldReady: 7, newReady: 3, to: 4}, up{6}, down{oldReady: 4, newReady: 6, to: 1}, up{9}, down{oldReady: 1, newReady: 9, to: 0}, up{10}, }, output: `Created foo-v2 Scaling up foo-v2 from 3 to 10, scaling down foo-v1 from 7 to 0 (keep 7 pods available, don't exceed 10 pods) Scaling foo-v1 down to 4 Scaling foo-v2 up to 6 Scaling foo-v1 down to 1 Scaling foo-v2 up to 9 Scaling foo-v1 down to 0 Scaling foo-v2 up to 10 `, }, { name: "10->10 30/0 fast readiness, continued after restart which prevented first scale-up", oldRc: oldRc(7, 10), newRc: newRc(0, 10), newRcExists: false, maxUnavail: intstr.FromString("30%"), maxSurge: intstr.FromString("0%"), expected: []interface{}{ down{oldReady: 7, newReady: 0, noop: true}, up{3}, down{oldReady: 7, newReady: 3, to: 4}, up{6}, down{oldReady: 4, newReady: 6, to: 1}, up{9}, down{oldReady: 1, newReady: 9, to: 0}, up{10}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 7 to 0 (keep 7 pods available, don't exceed 10 pods) Scaling foo-v2 up to 3 Scaling foo-v1 down to 4 Scaling foo-v2 up to 6 Scaling foo-v1 down to 1 Scaling foo-v2 up to 9 Scaling foo-v1 down to 0 Scaling foo-v2 up to 10 `, }, { name: "10->10 0/30 fast readiness", oldRc: oldRc(10, 10), newRc: newRc(0, 10), newRcExists: false, maxUnavail: intstr.FromString("0%"), maxSurge: intstr.FromString("30%"), expected: []interface{}{ up{3}, down{oldReady: 10, newReady: 3, to: 7}, up{6}, down{oldReady: 7, newReady: 6, to: 4}, up{9}, down{oldReady: 4, newReady: 9, to: 1}, up{10}, down{oldReady: 1, newReady: 10, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 13 pods) Scaling foo-v2 up to 3 Scaling foo-v1 down to 7 Scaling foo-v2 up to 6 Scaling foo-v1 down to 4 Scaling foo-v2 up to 9 Scaling foo-v1 down to 1 Scaling foo-v2 up to 10 Scaling foo-v1 down to 0 `, }, { name: "10->10 0/30 delayed readiness", oldRc: oldRc(10, 10), newRc: newRc(0, 10), newRcExists: false, maxUnavail: intstr.FromString("0%"), maxSurge: intstr.FromString("30%"), expected: []interface{}{ up{3}, down{oldReady: 10, newReady: 0, noop: true}, down{oldReady: 10, newReady: 1, to: 9}, up{4}, down{oldReady: 9, newReady: 3, to: 7}, up{6}, down{oldReady: 7, newReady: 6, to: 4}, up{9}, down{oldReady: 4, newReady: 9, to: 1}, up{10}, down{oldReady: 1, newReady: 9, noop: true}, down{oldReady: 1, newReady: 10, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 13 pods) Scaling foo-v2 up to 3 Scaling foo-v1 down to 9 Scaling foo-v2 up to 4 Scaling foo-v1 down to 7 Scaling foo-v2 up to 6 Scaling foo-v1 down to 4 Scaling foo-v2 up to 9 Scaling foo-v1 down to 1 Scaling foo-v2 up to 10 Scaling foo-v1 down to 0 `, }, { name: "10->10 10/20 fast readiness", oldRc: oldRc(10, 10), newRc: newRc(0, 10), newRcExists: false, maxUnavail: intstr.FromString("10%"), maxSurge: intstr.FromString("20%"), expected: []interface{}{ up{2}, down{oldReady: 10, newReady: 2, to: 7}, up{5}, down{oldReady: 7, newReady: 5, to: 4}, up{8}, down{oldReady: 4, newReady: 8, to: 1}, up{10}, down{oldReady: 1, newReady: 10, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods) Scaling foo-v2 up to 2 Scaling foo-v1 down to 7 Scaling foo-v2 up to 5 Scaling foo-v1 down to 4 Scaling foo-v2 up to 8 Scaling foo-v1 down to 1 Scaling foo-v2 up to 10 Scaling foo-v1 down to 0 `, }, { name: "10->10 10/20 delayed readiness", oldRc: oldRc(10, 10), newRc: newRc(0, 10), newRcExists: false, maxUnavail: intstr.FromString("10%"), maxSurge: intstr.FromString("20%"), expected: []interface{}{ up{2}, down{oldReady: 10, newReady: 2, to: 7}, up{5}, down{oldReady: 7, newReady: 4, to: 5}, up{7}, down{oldReady: 5, newReady: 4, noop: true}, down{oldReady: 5, newReady: 7, to: 2}, up{10}, down{oldReady: 2, newReady: 9, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods) Scaling foo-v2 up to 2 Scaling foo-v1 down to 7 Scaling foo-v2 up to 5 Scaling foo-v1 down to 5 Scaling foo-v2 up to 7 Scaling foo-v1 down to 2 Scaling foo-v2 up to 10 Scaling foo-v1 down to 0 `, }, { name: "10->10 10/20 fast readiness continued after restart which prevented first scale-down", oldRc: oldRc(10, 10), newRc: newRc(2, 10), newRcExists: false, maxUnavail: intstr.FromString("10%"), maxSurge: intstr.FromString("20%"), expected: []interface{}{ down{oldReady: 10, newReady: 2, to: 7}, up{5}, down{oldReady: 7, newReady: 5, to: 4}, up{8}, down{oldReady: 4, newReady: 8, to: 1}, up{10}, down{oldReady: 1, newReady: 10, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 2 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods) Scaling foo-v1 down to 7 Scaling foo-v2 up to 5 Scaling foo-v1 down to 4 Scaling foo-v2 up to 8 Scaling foo-v1 down to 1 Scaling foo-v2 up to 10 Scaling foo-v1 down to 0 `, }, { name: "10->10 0/100 fast readiness", oldRc: oldRc(10, 10), newRc: newRc(0, 10), newRcExists: false, maxUnavail: intstr.FromString("0%"), maxSurge: intstr.FromString("100%"), expected: []interface{}{ up{10}, down{oldReady: 10, newReady: 10, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 20 pods) Scaling foo-v2 up to 10 Scaling foo-v1 down to 0 `, }, { name: "10->10 0/100 delayed readiness", oldRc: oldRc(10, 10), newRc: newRc(0, 10), newRcExists: false, maxUnavail: intstr.FromString("0%"), maxSurge: intstr.FromString("100%"), expected: []interface{}{ up{10}, down{oldReady: 10, newReady: 0, noop: true}, down{oldReady: 10, newReady: 2, to: 8}, down{oldReady: 8, newReady: 7, to: 3}, down{oldReady: 3, newReady: 10, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 20 pods) Scaling foo-v2 up to 10 Scaling foo-v1 down to 8 Scaling foo-v1 down to 3 Scaling foo-v1 down to 0 `, }, { name: "10->10 100/0 fast readiness", oldRc: oldRc(10, 10), newRc: newRc(0, 10), newRcExists: false, maxUnavail: intstr.FromString("100%"), maxSurge: intstr.FromString("0%"), expected: []interface{}{ down{oldReady: 10, newReady: 0, to: 0}, up{10}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 0 pods available, don't exceed 10 pods) Scaling foo-v1 down to 0 Scaling foo-v2 up to 10 `, }, { name: "1->1 25/25 maintain minimum availability", oldRc: oldRc(1, 1), newRc: newRc(0, 1), newRcExists: false, maxUnavail: intstr.FromString("25%"), maxSurge: intstr.FromString("25%"), expected: []interface{}{ up{1}, down{oldReady: 1, newReady: 0, noop: true}, down{oldReady: 1, newReady: 1, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) Scaling foo-v2 up to 1 Scaling foo-v1 down to 0 `, }, { name: "1->1 0/10 delayed readiness", oldRc: oldRc(1, 1), newRc: newRc(0, 1), newRcExists: false, maxUnavail: intstr.FromString("0%"), maxSurge: intstr.FromString("10%"), expected: []interface{}{ up{1}, down{oldReady: 1, newReady: 0, noop: true}, down{oldReady: 1, newReady: 1, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) Scaling foo-v2 up to 1 Scaling foo-v1 down to 0 `, }, { name: "1->1 10/10 delayed readiness", oldRc: oldRc(1, 1), newRc: newRc(0, 1), newRcExists: false, maxUnavail: intstr.FromString("10%"), maxSurge: intstr.FromString("10%"), expected: []interface{}{ up{1}, down{oldReady: 1, newReady: 0, noop: true}, down{oldReady: 1, newReady: 1, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) Scaling foo-v2 up to 1 Scaling foo-v1 down to 0 `, }, { name: "3->3 1/1 fast readiness (absolute values)", oldRc: oldRc(3, 3), newRc: newRc(0, 3), newRcExists: false, maxUnavail: intstr.FromInt(0), maxSurge: intstr.FromInt(1), expected: []interface{}{ up{1}, down{oldReady: 3, newReady: 1, to: 2}, up{2}, down{oldReady: 2, newReady: 2, to: 1}, up{3}, down{oldReady: 1, newReady: 3, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 3, scaling down foo-v1 from 3 to 0 (keep 3 pods available, don't exceed 4 pods) Scaling foo-v2 up to 1 Scaling foo-v1 down to 2 Scaling foo-v2 up to 2 Scaling foo-v1 down to 1 Scaling foo-v2 up to 3 Scaling foo-v1 down to 0 `, }, { name: "10->10 0/20 fast readiness, continued after restart which resulted in partial first scale-up", oldRc: oldRc(6, 10), newRc: newRc(5, 10), newRcExists: false, maxUnavail: intstr.FromString("0%"), maxSurge: intstr.FromString("20%"), expected: []interface{}{ up{6}, down{oldReady: 6, newReady: 6, to: 4}, up{8}, down{oldReady: 4, newReady: 8, to: 2}, up{10}, down{oldReady: 1, newReady: 10, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 5 to 10, scaling down foo-v1 from 6 to 0 (keep 10 pods available, don't exceed 12 pods) Scaling foo-v2 up to 6 Scaling foo-v1 down to 4 Scaling foo-v2 up to 8 Scaling foo-v1 down to 2 Scaling foo-v2 up to 10 Scaling foo-v1 down to 0 `, }, { name: "10->20 0/300 fast readiness", oldRc: oldRc(10, 10), newRc: newRc(0, 20), newRcExists: false, maxUnavail: intstr.FromString("0%"), maxSurge: intstr.FromString("300%"), expected: []interface{}{ up{20}, down{oldReady: 10, newReady: 20, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 20, scaling down foo-v1 from 10 to 0 (keep 20 pods available, don't exceed 80 pods) Scaling foo-v2 up to 20 Scaling foo-v1 down to 0 `, }, { name: "1->1 0/1 scale down unavailable rc to a ready rc (rollback)", oldRc: oldRc(1, 1), newRc: newRc(1, 1), newRcExists: true, maxUnavail: intstr.FromInt(0), maxSurge: intstr.FromInt(1), expected: []interface{}{ up{1}, down{oldReady: 0, newReady: 1, to: 0}, }, output: `Continuing update with existing controller foo-v2. Scaling up foo-v2 from 1 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) Scaling foo-v1 down to 0 `, }, { name: "3->0 1/1 desired 0 (absolute values)", oldRc: oldRc(3, 3), newRc: newRc(0, 0), newRcExists: true, maxUnavail: intstr.FromInt(1), maxSurge: intstr.FromInt(1), expected: []interface{}{ down{oldReady: 3, newReady: 0, to: 0}, }, output: `Continuing update with existing controller foo-v2. Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 1 pods) Scaling foo-v1 down to 0 `, }, { name: "3->0 10/10 desired 0 (percentages)", oldRc: oldRc(3, 3), newRc: newRc(0, 0), newRcExists: true, maxUnavail: intstr.FromString("10%"), maxSurge: intstr.FromString("10%"), expected: []interface{}{ down{oldReady: 3, newReady: 0, to: 0}, }, output: `Continuing update with existing controller foo-v2. Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 0 pods) Scaling foo-v1 down to 0 `, }, { name: "3->0 10/10 desired 0 (create new RC)", oldRc: oldRc(3, 3), newRc: newRc(0, 0), newRcExists: false, maxUnavail: intstr.FromString("10%"), maxSurge: intstr.FromString("10%"), expected: []interface{}{ down{oldReady: 3, newReady: 0, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 0 pods) Scaling foo-v1 down to 0 `, }, { name: "0->0 1/1 desired 0 (absolute values)", oldRc: oldRc(0, 0), newRc: newRc(0, 0), newRcExists: true, maxUnavail: intstr.FromInt(1), maxSurge: intstr.FromInt(1), expected: []interface{}{ down{oldReady: 0, newReady: 0, to: 0}, }, output: `Continuing update with existing controller foo-v2. Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 0 to 0 (keep 0 pods available, don't exceed 1 pods) `, }, { name: "30->2 50%/0", oldRc: oldRc(30, 30), newRc: newRc(0, 2), newRcExists: false, maxUnavail: intstr.FromString("50%"), maxSurge: intstr.FromInt(0), expected: []interface{}{ down{oldReady: 30, newReady: 0, to: 1}, up{1}, down{oldReady: 1, newReady: 2, to: 0}, up{2}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 30 to 0 (keep 1 pods available, don't exceed 2 pods) Scaling foo-v1 down to 1 Scaling foo-v2 up to 1 Scaling foo-v1 down to 0 Scaling foo-v2 up to 2 `, }, { name: "2->2 1/0 blocked oldRc", oldRc: oldRc(2, 2), newRc: newRc(0, 2), newRcExists: false, maxUnavail: intstr.FromInt(1), maxSurge: intstr.FromInt(0), expected: []interface{}{ down{oldReady: 1, newReady: 0, to: 1}, up{1}, down{oldReady: 1, newReady: 1, to: 0}, up{2}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 1 pods available, don't exceed 2 pods) Scaling foo-v1 down to 1 Scaling foo-v2 up to 1 Scaling foo-v1 down to 0 Scaling foo-v2 up to 2 `, }, { name: "1->1 1/0 allow maxUnavailability", oldRc: oldRc(1, 1), newRc: newRc(0, 1), newRcExists: false, maxUnavail: intstr.FromString("1%"), maxSurge: intstr.FromInt(0), expected: []interface{}{ down{oldReady: 1, newReady: 0, to: 0}, up{1}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 0 pods available, don't exceed 1 pods) Scaling foo-v1 down to 0 Scaling foo-v2 up to 1 `, }, { name: "1->2 25/25 complex asymetric deployment", oldRc: oldRc(1, 1), newRc: newRc(0, 2), newRcExists: false, maxUnavail: intstr.FromString("25%"), maxSurge: intstr.FromString("25%"), expected: []interface{}{ up{2}, down{oldReady: 1, newReady: 2, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 1 to 0 (keep 2 pods available, don't exceed 3 pods) Scaling foo-v2 up to 2 Scaling foo-v1 down to 0 `, }, { name: "2->2 25/1 maxSurge trumps maxUnavailable", oldRc: oldRc(2, 2), newRc: newRc(0, 2), newRcExists: false, maxUnavail: intstr.FromString("25%"), maxSurge: intstr.FromString("1%"), expected: []interface{}{ up{1}, down{oldReady: 2, newReady: 1, to: 1}, up{2}, down{oldReady: 1, newReady: 2, to: 0}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 2 pods available, don't exceed 3 pods) Scaling foo-v2 up to 1 Scaling foo-v1 down to 1 Scaling foo-v2 up to 2 Scaling foo-v1 down to 0 `, }, { name: "2->2 25/0 maxUnavailable resolves to zero, then one", oldRc: oldRc(2, 2), newRc: newRc(0, 2), newRcExists: false, maxUnavail: intstr.FromString("25%"), maxSurge: intstr.FromString("0%"), expected: []interface{}{ down{oldReady: 2, newReady: 0, to: 1}, up{1}, down{oldReady: 1, newReady: 1, to: 0}, up{2}, }, output: `Created foo-v2 Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 1 pods available, don't exceed 2 pods) Scaling foo-v1 down to 1 Scaling foo-v2 up to 1 Scaling foo-v1 down to 0 Scaling foo-v2 up to 2 `, }, } for i, test := range tests { // Extract expectations into some makeshift FIFOs so they can be returned // in the correct order from the right places. This lets scale downs be // expressed a single event even though the data is used from multiple // interface calls. oldReady := []int{} newReady := []int{} upTo := []int{} downTo := []int{} for _, event := range test.expected { switch e := event.(type) { case down: oldReady = append(oldReady, e.oldReady) newReady = append(newReady, e.newReady) if !e.noop { downTo = append(downTo, e.to) } case up: upTo = append(upTo, e.to) } } // Make a way to get the next item from our FIFOs. Returns -1 if the array // is empty. next := func(s *[]int) int { slice := *s v := -1 if len(slice) > 0 { v = slice[0] if len(slice) > 1 { *s = slice[1:] } else { *s = []int{} } } return v } t.Logf("running test %d (%s) (up: %v, down: %v, oldReady: %v, newReady: %v)", i, test.name, upTo, downTo, oldReady, newReady) updater := &RollingUpdater{ ns: "default", scaleAndWait: func(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) { // Return a scale up or scale down expectation depending on the rc, // and throw errors if there is no expectation expressed for this // call. expected := -1 switch { case rc == test.newRc: t.Logf("scaling up %s to %d", rc.Name, rc.Spec.Replicas) expected = next(&upTo) case rc == test.oldRc: t.Logf("scaling down %s to %d", rc.Name, rc.Spec.Replicas) expected = next(&downTo) } if expected == -1 { t.Fatalf("unexpected scale of %s to %d", rc.Name, rc.Spec.Replicas) } else if e, a := expected, int(rc.Spec.Replicas); e != a { t.Fatalf("expected scale of %s to %d, got %d", rc.Name, e, a) } // Simulate the scale. rc.Status.Replicas = rc.Spec.Replicas return rc, nil }, getOrCreateTargetController: func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) { // Simulate a create vs. update of an existing controller. return test.newRc, test.newRcExists, nil }, cleanup: func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error { return nil }, } // Set up a mock readiness check which handles the test assertions. updater.getReadyPods = func(oldRc, newRc *api.ReplicationController) (int32, int32, error) { // Return simulated readiness, and throw an error if this call has no // expectations defined. oldReady := next(&oldReady) newReady := next(&newReady) if oldReady == -1 || newReady == -1 { t.Fatalf("unexpected getReadyPods call for:\noldRc: %+v\nnewRc: %+v", oldRc, newRc) } return int32(oldReady), int32(newReady), nil } var buffer bytes.Buffer config := &RollingUpdaterConfig{ Out: &buffer, OldRc: test.oldRc, NewRc: test.newRc, UpdatePeriod: 0, Interval: time.Millisecond, Timeout: time.Millisecond, CleanupPolicy: DeleteRollingUpdateCleanupPolicy, MaxUnavailable: test.maxUnavail, MaxSurge: test.maxSurge, } err := updater.Update(config) if err != nil { t.Errorf("unexpected error: %v", err) } if buffer.String() != test.output { t.Errorf("Bad output. expected:\n%s\ngot:\n%s", test.output, buffer.String()) } } }