func TestExternalKube(t *testing.T) { // Start one OpenShift master as "cluster1" to play the external kube server cluster1MasterConfig, cluster1AdminConfigFile, err := testserver.StartTestMasterAPI() if err != nil { t.Fatalf("unexpected error: %v", err) } cluster1AdminKubeClient, err := testutil.GetClusterAdminKubeClient(cluster1AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } // Copy admin.kubeconfig with a name-change top stop from over-writing it later persistentCluster1AdminConfigFile := cluster1AdminConfigFile + "old" err = copyFile(cluster1AdminConfigFile, persistentCluster1AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } // Set up cluster 2 to run against cluster 1 as external kubernetes cluster2MasterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } // Don't start kubernetes in process cluster2MasterConfig.KubernetesMasterConfig = nil // Connect to cluster1 using the service account credentials cluster2MasterConfig.MasterClients.ExternalKubernetesKubeConfig = persistentCluster1AdminConfigFile // Don't start etcd cluster2MasterConfig.EtcdConfig = nil // Use the same credentials as cluster1 to connect to existing etcd cluster2MasterConfig.EtcdClientInfo = cluster1MasterConfig.EtcdClientInfo // Set a custom etcd prefix to make sure data is getting sent to cluster1 cluster2MasterConfig.EtcdStorageConfig.KubernetesStoragePrefix += "2" cluster2MasterConfig.EtcdStorageConfig.OpenShiftStoragePrefix += "2" // Don't manage any names in cluster2 cluster2MasterConfig.ServiceAccountConfig.ManagedNames = []string{} // Don't create any service account tokens in cluster2 cluster2MasterConfig.ServiceAccountConfig.PrivateKeyFile = "" // Use the same public keys to validate tokens as cluster1 cluster2MasterConfig.ServiceAccountConfig.PublicKeyFiles = cluster1MasterConfig.ServiceAccountConfig.PublicKeyFiles // Don't run controllers in the second cluster cluster2MasterConfig.PauseControllers = true // don't try to start second dns server cluster2MasterConfig.DNSConfig = nil // Start cluster 2 (without clearing etcd) and get admin client configs and clients cluster2Options := testserver.TestOptions{DeleteAllEtcdKeys: false, EnableControllers: true} cluster2AdminConfigFile, err := testserver.StartConfiguredMasterWithOptions(cluster2MasterConfig, cluster2Options) if err != nil { t.Fatalf("unexpected error: %v", err) } cluster2AdminKubeClient, err := testutil.GetClusterAdminKubeClient(cluster2AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } healthzProxyTest(cluster2MasterConfig, t) watchProxyTest(cluster1AdminKubeClient, cluster2AdminKubeClient, t) }
func TestServiceAccountAuthorization(t *testing.T) { saNamespace := api.NamespaceDefault saName := serviceaccountadmission.DefaultServiceAccountName saUsername := serviceaccount.MakeUsername(saNamespace, saName) // Start one OpenShift master as "cluster1" to play the external kube server cluster1MasterConfig, cluster1AdminConfigFile, err := testserver.StartTestMaster() if err != nil { t.Fatalf("unexpected error: %v", err) } cluster1AdminConfig, err := testutil.GetClusterAdminClientConfig(cluster1AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } cluster1AdminKubeClient, err := testutil.GetClusterAdminKubeClient(cluster1AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } cluster1AdminOSClient, err := testutil.GetClusterAdminClient(cluster1AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } // Get a service account token and build a client saToken, err := waitForServiceAccountToken(cluster1AdminKubeClient, saNamespace, saName, 20, time.Second) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(saToken) == 0 { t.Fatalf("token was not created") } cluster1SAClientConfig := kclient.Config{ Host: cluster1AdminConfig.Host, Prefix: cluster1AdminConfig.Prefix, BearerToken: saToken, TLSClientConfig: kclient.TLSClientConfig{ CAFile: cluster1AdminConfig.CAFile, CAData: cluster1AdminConfig.CAData, }, } cluster1SAKubeClient, err := kclient.New(&cluster1SAClientConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } // Make sure the service account doesn't have access failNS := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-fail"}} if _, err := cluster1SAKubeClient.Namespaces().Create(failNS); !errors.IsForbidden(err) { t.Fatalf("expected forbidden error, got %v", err) } // Make the service account a cluster admin on cluster1 addRoleOptions := &policy.RoleModificationOptions{ RoleName: bootstrappolicy.ClusterAdminRoleName, RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(cluster1AdminOSClient), Users: []string{saUsername}, } if err := addRoleOptions.AddRole(); err != nil { t.Fatalf("could not add role to service account") } // Give the policy cache a second to catch it's breath time.Sleep(time.Second) // Make sure the service account now has access // This tests authentication using the etcd-based token getter passNS := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-pass"}} if _, err := cluster1SAKubeClient.Namespaces().Create(passNS); err != nil { t.Fatalf("unexpected error: %v", err) } // Create a kubeconfig from the serviceaccount config cluster1SAKubeConfigFile, err := ioutil.TempFile(testutil.GetBaseDir(), "cluster1-service-account.kubeconfig") if err != nil { t.Fatalf("error creating tmpfile: %v", err) } defer os.Remove(cluster1SAKubeConfigFile.Name()) if err := writeClientConfigToKubeConfig(cluster1SAClientConfig, cluster1SAKubeConfigFile.Name()); err != nil { t.Fatalf("error creating kubeconfig: %v", err) } // Set up cluster 2 to run against cluster 1 as external kubernetes cluster2MasterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("unexpected error: %v", err) } // Don't start kubernetes in process cluster2MasterConfig.KubernetesMasterConfig = nil // Connect to cluster1 using the service account credentials cluster2MasterConfig.MasterClients.ExternalKubernetesKubeConfig = cluster1SAKubeConfigFile.Name() // Don't start etcd cluster2MasterConfig.EtcdConfig = nil // Use the same credentials as cluster1 to connect to existing etcd cluster2MasterConfig.EtcdClientInfo = cluster1MasterConfig.EtcdClientInfo // Set a custom etcd prefix to make sure data is getting sent to cluster1 cluster2MasterConfig.EtcdStorageConfig.KubernetesStoragePrefix += "2" cluster2MasterConfig.EtcdStorageConfig.OpenShiftStoragePrefix += "2" // Don't manage any names in cluster2 cluster2MasterConfig.ServiceAccountConfig.ManagedNames = []string{} // Don't create any service account tokens in cluster2 cluster2MasterConfig.ServiceAccountConfig.PrivateKeyFile = "" // Use the same public keys to validate tokens as cluster1 cluster2MasterConfig.ServiceAccountConfig.PublicKeyFiles = cluster1MasterConfig.ServiceAccountConfig.PublicKeyFiles // don't try to start second dns server cluster2MasterConfig.DNSConfig = nil // Start cluster 2 (without clearing etcd) and get admin client configs and clients cluster2Options := testserver.TestOptions{DeleteAllEtcdKeys: false} cluster2AdminConfigFile, err := testserver.StartConfiguredMasterWithOptions(cluster2MasterConfig, cluster2Options) if err != nil { t.Fatalf("unexpected error: %v", err) } cluster2AdminConfig, err := testutil.GetClusterAdminClientConfig(cluster2AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } cluster2AdminOSClient, err := testutil.GetClusterAdminClient(cluster2AdminConfigFile) if err != nil { t.Fatalf("unexpected error: %v", err) } // Build a client to use the same service account token against cluster2 cluster2SAClientConfig := cluster1SAClientConfig cluster2SAClientConfig.Host = cluster2AdminConfig.Host cluster2SAKubeClient, err := kclient.New(&cluster2SAClientConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } // Make sure the service account doesn't have access // A forbidden error makes sure the token was recognized, and policy denied us // This exercises the client-based token getter // It also makes sure we don't loop back through the cluster2 kube proxy which would cause an auth loop failNS2 := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-fail2"}} if _, err := cluster2SAKubeClient.Namespaces().Create(failNS2); !errors.IsForbidden(err) { t.Fatalf("expected forbidden error, got %v", err) } // Make the service account a cluster admin on cluster2 addRoleOptions2 := &policy.RoleModificationOptions{ RoleName: bootstrappolicy.ClusterAdminRoleName, RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(cluster2AdminOSClient), Users: []string{saUsername}, } if err := addRoleOptions2.AddRole(); err != nil { t.Fatalf("could not add role to service account") } // Give the policy cache a second to catch it's breath time.Sleep(time.Second) // Make sure the service account now has access to cluster2 passNS2 := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-pass2"}} if _, err := cluster2SAKubeClient.Namespaces().Create(passNS2); err != nil { t.Fatalf("unexpected error: %v", err) } // Make sure the ns actually got created in cluster1 if _, err := cluster1SAKubeClient.Namespaces().Get(passNS2.Name); err != nil { t.Fatalf("unexpected error: %v", err) } }
// TestIngressIPAllocation validates that ingress ip allocation is // performed correctly even when multiple controllers are running. func TestIngressIPAllocation(t *testing.T) { testutil.RequireEtcd(t) masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("Unexpected error: %v", err) } masterConfig.NetworkConfig.ExternalIPNetworkCIDRs = []string{"172.16.0.0/24"} masterConfig.NetworkConfig.IngressIPNetworkCIDR = "172.16.1.0/24" clusterAdminKubeConfig, err := testserver.StartConfiguredMasterWithOptions(masterConfig, testserver.TestOptions{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } kc, _, err := configapi.GetKubeClient(clusterAdminKubeConfig, &configapi.ClientConnectionOverrides{ QPS: 20, Burst: 50, }) if err != nil { t.Fatalf("Unexpected error: %v", err) } stopChannel := make(chan struct{}) defer close(stopChannel) received := make(chan bool) rand.Seed(time.Now().UTC().UnixNano()) t.Log("start informer to watch for sentinel") _, informerController := framework.NewInformer( &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return kc.Services(kapi.NamespaceAll).List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return kc.Services(kapi.NamespaceAll).Watch(options) }, }, &kapi.Service{}, time.Minute*10, framework.ResourceEventHandlerFuncs{ UpdateFunc: func(old, cur interface{}) { service := cur.(*kapi.Service) if service.Name == sentinelName && len(service.Spec.ExternalIPs) > 0 { received <- true } }, }, ) go informerController.Run(stopChannel) t.Log("start generating service events") go generateServiceEvents(t, kc) // Start a second controller that will be out of sync with the first _, ipNet, err := net.ParseCIDR(masterConfig.NetworkConfig.IngressIPNetworkCIDR) c := ingressip.NewIngressIPController(kc, ipNet, 10*time.Minute) go c.Run(stopChannel) t.Log("waiting for sentinel to be updated with external ip") select { case <-received: case <-time.After(time.Duration(90 * time.Second)): t.Fatal("took too long") } // Validate that all services of type load balancer have a unique // ingress ip and corresponding external ip. services, err := kc.Services(kapi.NamespaceDefault).List(kapi.ListOptions{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } ips := sets.NewString() for _, s := range services.Items { typeLoadBalancer := s.Spec.Type == kapi.ServiceTypeLoadBalancer hasAllocation := len(s.Status.LoadBalancer.Ingress) > 0 switch { case !typeLoadBalancer && !hasAllocation: continue case !typeLoadBalancer && hasAllocation: t.Errorf("A service not of type load balancer has an ingress ip allocation") continue case typeLoadBalancer && !hasAllocation: t.Errorf("A service of type load balancer has not been allocated an ingress ip") continue } ingressIP := s.Status.LoadBalancer.Ingress[0].IP if ips.Has(ingressIP) { t.Errorf("One or more services have the same ingress ip") continue } ips.Insert(ingressIP) if len(s.Spec.ExternalIPs) == 0 || s.Spec.ExternalIPs[0] != ingressIP { t.Errorf("Service does not have the ingress ip as an external ip") continue } } }