// NewFakeClusterManager creates a new fake ClusterManager. func NewFakeClusterManager(clusterName string) *fakeClusterManager { fakeLbs := loadbalancers.NewFakeLoadBalancers(clusterName) fakeBackends := backends.NewFakeBackendServices() fakeIGs := instances.NewFakeInstanceGroups(sets.NewString()) fakeHCs := healthchecks.NewFakeHealthChecks() namer := utils.NewNamer(clusterName) nodePool := instances.NewNodePool(fakeIGs) nodePool.Init(&instances.FakeZoneLister{[]string{"zone-a"}}) healthChecker := healthchecks.NewHealthChecker(fakeHCs, "/", namer) healthChecker.Init(&healthchecks.FakeHealthCheckGetter{nil}) backendPool := backends.NewBackendPool( fakeBackends, healthChecker, nodePool, namer, []int64{}, false) l7Pool := loadbalancers.NewLoadBalancerPool( fakeLbs, // TODO: change this backendPool, testDefaultBeNodePort, namer, ) frPool := firewalls.NewFirewallPool(firewalls.NewFakeFirewallRules(), namer) cm := &ClusterManager{ ClusterNamer: namer, instancePool: nodePool, backendPool: backendPool, l7Pool: l7Pool, firewallPool: frPool, } return &fakeClusterManager{cm, fakeLbs, fakeBackends, fakeIGs} }
func newNamer(kubeClient client.Interface, clusterName string) (*utils.Namer, error) { name, err := getClusterUID(kubeClient, clusterName) if err != nil { return nil, err } namer := utils.NewNamer(name) vault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName) // Start a goroutine to poll the cluster UID config map // We don't watch because we know exactly which configmap we want and this // controller already watches 5 other resources, so it isn't worth the cost // of another connection and complexity. go wait.Forever(func() { uid, found, err := vault.Get() existing := namer.GetClusterName() if found && uid != existing { glog.Infof("Cluster uid changed from %v -> %v", existing, uid) namer.SetClusterName(uid) } else if err != nil { glog.Errorf("Failed to reconcile cluster uid %v, currently set to %v", err, existing) } }, 5*time.Second) return namer, nil }
func TestNameParsing(t *testing.T) { clusterName := "123" namer := utils.NewNamer(clusterName) fullName := namer.Truncate(fmt.Sprintf("%v-%v", forwardingRulePrefix, namer.LBName("testlb"))) annotationsMap := map[string]string{ fmt.Sprintf("%v/forwarding-rule", utils.K8sAnnotationPrefix): fullName, } components := namer.ParseName(GCEResourceName(annotationsMap, "forwarding-rule")) t.Logf("%+v", components) if components.ClusterName != clusterName { t.Errorf("Failed to parse cluster name from %v, expected %v got %v", fullName, clusterName, components.ClusterName) } resourceName := "fw" if components.Resource != resourceName { t.Errorf("Failed to parse resource from %v, expected %v got %v", fullName, resourceName, components.Resource) } }
func TestInvalidClusterNameChange(t *testing.T) { namer := utils.NewNamer("test--123") if got := namer.GetClusterName(); got != "123" { t.Fatalf("Expected name 123, got %v", got) } // A name with `--` should take the last token for _, testCase := range []struct{ newName, expected string }{ {"foo--bar", "bar"}, {"--", ""}, {"", ""}, {"foo--bar--com", "com"}, } { namer.SetClusterName(testCase.newName) if got := namer.GetClusterName(); got != testCase.expected { t.Fatalf("Expected %q got %q", testCase.expected, got) } } }