Пример #1
0
func setupProjectRequestLimitTest(t *testing.T, pluginConfig *requestlimit.ProjectRequestLimitConfig) (kclient.Interface, client.Interface, *kclient.Config) {
	masterConfig, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("error creating config: %v", err)
	}
	masterConfig.AdmissionConfig.PluginOrderOverride = []string{"OriginNamespaceLifecycle", "BuildByStrategy", "ProjectRequestLimit"}
	masterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{
		"ProjectRequestLimit": {
			Configuration: runtime.EmbeddedObject{
				Object: pluginConfig,
			},
		},
	}
	kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig)
	if err != nil {
		t.Fatalf("error starting server: %v", err)
	}
	kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client: %v", err)
	}
	openshiftClient, err := testutil.GetClusterAdminClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting openshift client: %v", err)
	}
	clientConfig, err := testutil.GetClusterAdminClientConfig(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client config: %v", err)
	}
	return kubeClient, openshiftClient, clientConfig
}
func setupClusterResourceOverrideTest(t *testing.T, pluginConfig *overrideapi.ClusterResourceOverrideConfig) kclient.Interface {
	masterConfig, err := testserver.DefaultMasterOptions()
	checkErr(t, err)
	// fill in possibly-empty config values
	if masterConfig.KubernetesMasterConfig == nil {
		masterConfig.KubernetesMasterConfig = &api.KubernetesMasterConfig{}
	}
	kubeMaster := masterConfig.KubernetesMasterConfig
	if kubeMaster.AdmissionConfig.PluginConfig == nil {
		kubeMaster.AdmissionConfig.PluginConfig = map[string]api.AdmissionPluginConfig{}
	}
	// set our config as desired
	kubeMaster.AdmissionConfig.PluginConfig[overrideapi.PluginName] =
		api.AdmissionPluginConfig{Configuration: pluginConfig}

	// start up a server and return useful clients to that server
	clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterConfig)
	checkErr(t, err)
	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	checkErr(t, err)
	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	checkErr(t, err)
	// need to create a project and return client for project admin
	clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
	checkErr(t, err)
	_, err = testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, testutil.Namespace(), "peon")
	checkErr(t, err)
	checkErr(t, testserver.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}))
	return clusterAdminKubeClient
}
Пример #3
0
// TestProjectMustExist verifies that content cannot be added in a project that does not exist
func TestProjectMustExist(t *testing.T) {
	testutil.RequireEtcd(t)
	_, clusterAdminKubeConfig, err := testserver.StartTestMasterAPI()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	pod := &kapi.Pod{
		ObjectMeta: kapi.ObjectMeta{Name: "pod"},
		Spec: kapi.PodSpec{
			Containers:    []kapi.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}},
			RestartPolicy: kapi.RestartPolicyAlways,
			DNSPolicy:     kapi.DNSClusterFirst,
		},
	}

	_, err = clusterAdminKubeClient.Pods("test").Create(pod)
	if err == nil {
		t.Errorf("Expected an error on creation of a Kubernetes resource because namespace does not exist")
	}

	build := &buildapi.Build{
		ObjectMeta: kapi.ObjectMeta{Name: "buildid", Namespace: "default"},
		Spec: buildapi.BuildSpec{
			Source: buildapi.BuildSource{
				Git: &buildapi.GitBuildSource{
					URI: "http://github.com/my/repository",
				},
				ContextDir: "context",
			},
			Strategy: buildapi.BuildStrategy{
				DockerStrategy: &buildapi.DockerBuildStrategy{},
			},
			Output: buildapi.BuildOutput{
				To: &kapi.ObjectReference{
					Kind: "DockerImage",
					Name: "repository/data",
				},
			},
		},
		Status: buildapi.BuildStatus{
			Phase: buildapi.BuildPhaseNew,
		},
	}

	_, err = clusterAdminClient.Builds("test").Create(build)
	if err == nil {
		t.Errorf("Expected an error on creation of a Origin resource because namespace does not exist")
	}
}
func setupProjectRequestLimitTest(t *testing.T, pluginConfig *requestlimit.ProjectRequestLimitConfig) (kclientset.Interface, client.Interface, *restclient.Config) {
	testutil.RequireEtcd(t)
	masterConfig, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("error creating config: %v", err)
	}
	masterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{
		"ProjectRequestLimit": {
			Configuration: pluginConfig,
		},
	}
	kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig)
	if err != nil {
		t.Fatalf("error starting server: %v", err)
	}
	kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client: %v", err)
	}
	openshiftClient, err := testutil.GetClusterAdminClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting openshift client: %v", err)
	}
	clientConfig, err := testutil.GetClusterAdminClientConfig(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client config: %v", err)
	}
	return kubeClient, openshiftClient, clientConfig
}
Пример #5
0
func setupRunOnceDurationTest(t *testing.T, pluginConfig *pluginapi.RunOnceDurationConfig, nsAnnotations map[string]string) kclient.Interface {
	masterConfig, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("error creating config: %v", err)
	}
	plugins := append([]string{"RunOnceDuration"}, kubemaster.AdmissionPlugins...)
	masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginOrderOverride = plugins
	masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{
		"RunOnceDuration": {
			Configuration: runtime.EmbeddedObject{
				Object: pluginConfig,
			},
		},
	}
	kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig)
	if err != nil {
		t.Fatalf("error starting server: %v", err)
	}
	kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client: %v", err)
	}
	ns := &kapi.Namespace{}
	ns.Name = testutil.Namespace()
	ns.Annotations = nsAnnotations
	_, err = kubeClient.Namespaces().Create(ns)
	if err != nil {
		t.Fatalf("error creating namespace: %v", err)
	}
	if err := testserver.WaitForServiceAccounts(kubeClient, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	return kubeClient
}
Пример #6
0
func TestAutomaticCreationOfPullSecrets(t *testing.T) {
	saNamespace := api.NamespaceDefault
	saName := serviceaccountadmission.DefaultServiceAccountName

	_, clusterAdminConfig, err := testserver.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Get a service account token
	saToken, err := waitForServiceAccountToken(clusterAdminKubeClient, saNamespace, saName, 20, time.Second)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(saToken) == 0 {
		t.Errorf("token was not created")
	}

	// Get the matching dockercfg secret
	saPullSecret, err := waitForServiceAccountPullSecret(clusterAdminKubeClient, saNamespace, saName, 20, time.Second)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(saPullSecret) == 0 {
		t.Errorf("pull secret was not created")
	}
}
func setupRunOnceDurationTest(t *testing.T, pluginConfig *pluginapi.RunOnceDurationConfig, nsAnnotations map[string]string) kclient.Interface {
	testutil.RequireEtcd(t)
	masterConfig, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("error creating config: %v", err)
	}
	masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{
		"RunOnceDuration": {
			Configuration: pluginConfig,
		},
	}
	kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig)
	if err != nil {
		t.Fatalf("error starting server: %v", err)
	}
	kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client: %v", err)
	}
	ns := &kapi.Namespace{}
	ns.Name = testutil.Namespace()
	ns.Annotations = nsAnnotations
	_, err = kubeClient.Namespaces().Create(ns)
	if err != nil {
		t.Fatalf("error creating namespace: %v", err)
	}
	if err := testserver.WaitForPodCreationServiceAccounts(kubeClient, testutil.Namespace()); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	return kubeClient
}
func setupImageStreamAdmissionTest(t *testing.T) (*kclient.Client, *client.Client) {
	testutil.RequireEtcd(t)

	_, clusterAdminKubeConfig, err := testserver.StartTestMasterAPI()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	kClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	client, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	err = testutil.CreateNamespace(clusterAdminKubeConfig, testutil.Namespace())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	for {
		_, err = client.ImageStreams(testutil.Namespace()).Create(newImageStreamWithSpecTags("src", nil))
		t.Logf("initing: %v", err)
		if err != nil {
			if errForbiddenWithRetry(err) {
				t.Logf("waiting for limit ranger to catch up: %v", err)
				continue
			}
			t.Fatalf("err: %#v", err)
		}
		break
	}
	return kClient, client
}
Пример #9
0
func setupImageStreamAdmissionTest(t *testing.T) (*kclient.Client, *client.Client) {
	testutil.RequireEtcd(t)

	_, clusterAdminKubeConfig, err := testserver.StartTestMasterAPI()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	kClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	client, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	err = testutil.CreateNamespace(clusterAdminKubeConfig, testutil.Namespace())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	_, err = client.ImageStreams(testutil.Namespace()).Create(&imageapi.ImageStream{
		ObjectMeta: kapi.ObjectMeta{
			Name: "src",
		},
	})
	if err != nil {
		t.Fatal(err)
	}
	return kClient, client
}
Пример #10
0
func TestDiagNodeConditions(t *testing.T) {
	//masterConfig, clientFile, err := testutil.StartTestAllInOne()
	_, clientFile, err := testutil.StartTestAllInOne()
	//_, clientFile, err := testutil.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	client, err := testutil.GetClusterAdminKubeClient(clientFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	nodeDiag := clusterdiags.NodeDefinitions{KubeClient: client}
	nodeList := waitForNode(client, t)

	// start by testing that the diagnostic passes with all-in-one up.
	result := nodeDiag.Check()
	if warnings := result.Warnings(); len(warnings) > 0 {
		t.Fatalf("expected no warnings with one node ready, but: %#v", warnings)
	} else if errors := result.Errors(); len(errors) > 0 {
		t.Fatalf("expected no errors with one node ready, but: %#v", errors)
	}

	// Make the node unschedulable and verify diagnostics notices
	nodeList.Items[0].Spec.Unschedulable = true
	if _, err := client.Nodes().Update(&(nodeList.Items[0])); err != nil {
		t.Fatalf("expected no errors making node unschedulable, but: %#v", err)
	}
	result = nodeDiag.Check()
	if errors := result.Errors(); len(errors) != 1 ||
		!diagtype.MatchesDiagError(errors[0], "DClu0004") {
		t.Fatalf("expected 1 error about not having nodes, but: %#v", errors)
	} else if warnings := result.Warnings(); len(warnings) < 1 || !diagtype.MatchesDiagError(warnings[0], "DClu0003") {
		t.Fatalf("expected a warning about test-node not being schedulable, but: %#v", warnings)
	}

	// delete it and check with no nodes defined; should get an error about that.
	if err := client.Nodes().Delete(nodeList.Items[0].ObjectMeta.Name); err != nil {
		t.Fatalf("expected no errors deleting node, but: %#v", err)
	}
	if errors := nodeDiag.Check().Errors(); len(errors) != 1 ||
		!diagtype.MatchesDiagError(errors[0], "DClu0004") {
		t.Errorf("expected 1 error about not having nodes, not: %#v", errors)
	}

	// Next create a node and leave it in NotReady state. Should get a warning
	// about that, plus the previous error as there are still no nodes available.
	_, err = client.Nodes().Create(&kapi.Node{ObjectMeta: kapi.ObjectMeta{Name: "test-node"}})
	if err != nil {
		t.Fatalf("expected no errors creating a node: %#v", err)
	}
	result = nodeDiag.Check()
	if errors := result.Errors(); len(errors) != 1 ||
		!diagtype.MatchesDiagError(errors[0], "DClu0004") {
		t.Fatalf("expected 1 error about not having nodes, not: %#v", errors)
	} else if warnings := result.Warnings(); len(warnings) < 1 || !diagtype.MatchesDiagError(warnings[0], "DClu0002") {
		t.Fatalf("expected a warning about test-node not being ready, not: %#v", warnings)
	}
}
Пример #11
0
func TestWebhookGitHubPing(t *testing.T) {
	testutil.RequireEtcd(t)
	defer testutil.DumpEtcdOnFailure(t)
	_, clusterAdminKubeConfig, err := testserver.StartTestMaster()
	if err != nil {
		t.Fatalf("unable to start master: %v", err)
	}

	kubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unable to get kubeClient: %v", err)
	}
	osClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unable to get osClient: %v", err)
	}

	kubeClient.Core().Namespaces().Create(&kapi.Namespace{
		ObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()},
	})

	// create buildconfig
	buildConfig := mockBuildConfigImageParms("originalimage", "imagestream", "validtag")
	if _, err := osClient.BuildConfigs(testutil.Namespace()).Create(buildConfig); err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	watch, err := osClient.Builds(testutil.Namespace()).Watch(kapi.ListOptions{})
	if err != nil {
		t.Fatalf("Couldn't subscribe to builds: %v", err)
	}
	defer watch.Stop()

	for _, s := range []string{
		"/oapi/v1/namespaces/" + testutil.Namespace() + "/buildconfigs/pushbuild/webhooks/secret101/github",
		"/oapi/v1/namespaces/" + testutil.Namespace() + "/buildconfigs/pushbuild/webhooks/secret100/github",
		"/oapi/v1/namespaces/" + testutil.Namespace() + "/buildconfigs/pushbuild/webhooks/secret102/github",
	} {
		// trigger build event sending push notification
		clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}

		postFile(osClient.RESTClient.Client, "ping", "pingevent.json", clusterAdminClientConfig.Host+s, http.StatusOK, t)

		// TODO: improve negative testing
		timer := time.NewTimer(time.Second / 2)
		select {
		case <-timer.C:
			// nothing should happen
		case event := <-watch.ResultChan():
			build := event.Object.(*buildapi.Build)
			t.Fatalf("Unexpected build created: %#v", build)
		}
	}
}
Пример #12
0
func setupUserPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNodeConstraintsConfig, user string) (*client.Client, *kclientset.Clientset) {
	testutil.RequireEtcd(t)
	masterConfig, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("error creating config: %v", err)
	}
	cfg := map[string]configapi.AdmissionPluginConfig{
		"PodNodeConstraints": {
			Configuration: pluginConfig,
		},
	}
	masterConfig.AdmissionConfig.PluginConfig = cfg
	masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = cfg
	kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig)
	if err != nil {
		t.Fatalf("error starting server: %v", err)
	}
	clusterAdminClient, err := testutil.GetClusterAdminClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(kubeConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	userClient, userkubeClientset, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, user)
	if err != nil {
		t.Fatalf("error getting user/kube client: %v", err)
	}
	kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting kube client: %v", err)
	}
	ns := &kapi.Namespace{}
	ns.Name = testutil.Namespace()
	_, err = kubeClientset.Core().Namespaces().Create(ns)
	if err != nil {
		t.Fatalf("error creating namespace: %v", err)
	}
	if err := testserver.WaitForServiceAccounts(kubeClientset, testutil.Namespace(), []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	addUser := &policy.RoleModificationOptions{
		RoleNamespace:       ns.Name,
		RoleName:            bootstrappolicy.AdminRoleName,
		RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(clusterAdminClient),
		Users:               []string{user},
	}
	if err := addUser.AddRole(); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	return userClient, userkubeClientset
}
func TestNamespaceLifecycleAdmission(t *testing.T) {
	testutil.RequireEtcd(t)
	_, clusterAdminKubeConfig, err := testserver.StartTestMaster()
	checkErr(t, err)
	clusterAdminClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	checkErr(t, err)

	for _, ns := range []string{"default", "openshift", "openshift-infra"} {
		if err := clusterAdminClient.Namespaces().Delete(ns); err == nil {
			t.Fatalf("expected error deleting %q namespace, got none", ns)
		}
	}
}
Пример #14
0
// TestPushSecretName exercises one of the complex Build scenarios, where you
// first build a Docker image using Docker build strategy, which will later by
// consumed by Custom build strategy to verify that the 'PushSecretName' (Docker
// credentials) were successfully transported to the builder. The content of the
// Secret file is verified in the end.
func TestPushSecretName(t *testing.T) {
	namespace := testutil.RandomNamespace("secret")
	client, _ := testutil.GetClusterAdminClient(testutil.KubeConfigPath())
	kclient, _ := testutil.GetClusterAdminKubeClient(testutil.KubeConfigPath())

	stream := testutil.CreateSampleImageStream(namespace)
	if stream == nil {
		t.Fatal("Failed to create ImageStream")
	}
	defer testutil.DeleteSampleImageStream(stream, namespace)

	// Create Secret with dockercfg
	secret := testutil.GetSecretFixture("fixtures/test-secret.json")
	// TODO: Why do I need to set namespace here?
	secret.Namespace = namespace
	_, err := kclient.Secrets(namespace).Create(secret)
	if err != nil {
		t.Fatalf("Failed to create Secret: %v", err)
	}

	watcher, err := client.Builds(namespace).Watch(labels.Everything(), fields.Everything(), "0")
	if err != nil {
		t.Fatalf("Failed to create watcher: %v", err)
	}
	defer watcher.Stop()

	// First build the builder image (custom build builder)
	dockerBuild := testutil.GetBuildFixture("fixtures/test-secret-build.json")
	newDockerBuild, err := client.Builds(namespace).Create(dockerBuild)
	if err != nil {
		t.Fatalf("Unable to create Build %s: %v", dockerBuild.Name, err)
	}
	waitForComplete(newDockerBuild, watcher, t)

	// Now build the application image using custom build (run the previous image)
	// Custom build will copy the dockercfg file into the application image.
	customBuild := testutil.GetBuildFixture("fixtures/test-custom-build.json")
	imageName := fmt.Sprintf("%s/%s/%s", os.Getenv("REGISTRY_ADDR"), namespace, stream.Name)
	customBuild.Parameters.Strategy.CustomStrategy.Image = imageName
	newCustomBuild, err := client.Builds(namespace).Create(customBuild)
	if err != nil {
		t.Fatalf("Unable to create Build %s: %v", dockerBuild.Name, err)
	}
	waitForComplete(newCustomBuild, watcher, t)

	// Verify that the dockercfg file is there
	if err := testutil.VerifyImage(stream, "application", namespace, validatePushSecret); err != nil {
		t.Fatalf("Image verification failed: %v", err)
	}
}
func TestAlwaysPullImagesOn(t *testing.T) {
	testutil.RequireEtcd(t)
	defer testutil.DumpEtcdOnFailure(t)

	masterConfig, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("error creating config: %v", err)
	}
	masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{
		"AlwaysPullImages": {
			Configuration: &configapi.DefaultAdmissionConfig{},
		},
	}
	kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig)
	if err != nil {
		t.Fatalf("error starting server: %v", err)
	}
	kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client: %v", err)
	}

	ns := &kapi.Namespace{}
	ns.Name = testutil.Namespace()
	_, err = kubeClientset.Core().Namespaces().Create(ns)
	if err != nil {
		t.Fatalf("error creating namespace: %v", err)
	}
	if err := testserver.WaitForPodCreationServiceAccounts(kubeClientset, testutil.Namespace()); err != nil {
		t.Fatalf("error getting client config: %v", err)
	}

	testPod := &kapi.Pod{}
	testPod.GenerateName = "test"
	testPod.Spec.Containers = []kapi.Container{
		{
			Name:            "container",
			Image:           "openshift/origin-pod:notlatest",
			ImagePullPolicy: kapi.PullNever,
		},
	}

	actualPod, err := kubeClientset.Core().Pods(testutil.Namespace()).Create(testPod)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if actualPod.Spec.Containers[0].ImagePullPolicy != kapi.PullAlways {
		t.Errorf("expected %v, got %v", kapi.PullAlways, actualPod.Spec.Containers[0].ImagePullPolicy)
	}
}
func setupBuildControllerTest(counts controllerCount, t *testing.T) (*client.Client, *kclientset.Clientset) {
	testutil.RequireEtcd(t)
	master, clusterAdminKubeConfig, err := testserver.StartTestMaster()
	if err != nil {
		t.Fatal(err)
	}

	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatal(err)
	}

	clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatal(err)
	}
	_, err = clusterAdminKubeClientset.Core().Namespaces().Create(&kapi.Namespace{
		ObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()},
	})
	if err != nil {
		t.Fatal(err)
	}

	if err := testserver.WaitForServiceAccounts(clusterAdminKubeClientset, testutil.Namespace(), []string{bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName}); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	openshiftConfig, err := origin.BuildMasterConfig(*master)
	if err != nil {
		t.Fatal(err)
	}

	// Get the build controller clients, since those rely on service account tokens
	// We don't want to proceed with the rest of the test until those are available
	openshiftConfig.BuildControllerClients()

	for i := 0; i < counts.BuildControllers; i++ {
		openshiftConfig.RunBuildController(openshiftConfig.Informers)
	}
	for i := 0; i < counts.BuildPodControllers; i++ {
		openshiftConfig.RunBuildPodController()
	}
	for i := 0; i < counts.ImageChangeControllers; i++ {
		openshiftConfig.RunBuildImageChangeTriggerController()
	}
	for i := 0; i < counts.ConfigChangeControllers; i++ {
		openshiftConfig.RunBuildConfigChangeController()
	}
	return clusterAdminClient, clusterAdminKubeClientset
}
Пример #17
0
func TestOAuthDisabled(t *testing.T) {
	testutil.RequireEtcd(t)
	defer testutil.DumpEtcdOnFailure(t)
	// Build master config
	masterOptions, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Disable OAuth
	masterOptions.OAuthConfig = nil

	// Start server
	clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(masterOptions)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	client, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Make sure cert auth still works
	namespaces, err := client.Namespaces().List(kapi.ListOptions{})
	if err != nil {
		t.Fatalf("Unexpected error %v", err)
	}
	if len(namespaces.Items) == 0 {
		t.Errorf("Expected namespaces, got none")
	}

	// Use the server and CA info
	anonConfig := restclient.Config{}
	anonConfig.Host = clientConfig.Host
	anonConfig.CAFile = clientConfig.CAFile
	anonConfig.CAData = clientConfig.CAData

	// Make sure we can't authenticate using OAuth
	if _, err := tokencmd.RequestToken(&anonConfig, nil, "username", "password"); err == nil {
		t.Error("Expected error, got none")
	}

}
Пример #18
0
// launchAPI launches an api server and returns clients configured to
// access it.
func launchApi() (osclient.Interface, kclient.Interface, error) {
	_, clusterAdminKubeConfig, err := testserver.StartTestMasterAPI()
	if err != nil {
		return nil, nil, err
	}

	kc, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		return nil, nil, err
	}

	oc, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		return nil, nil, err
	}

	return oc, kc, nil
}
Пример #19
0
func TestDiscoveryGroupVersions(t *testing.T) {
	testutil.RequireEtcd(t)
	defer testutil.DumpEtcdOnFailure(t)
	_, clusterAdminKubeConfig, err := testserver.StartTestMasterAPI()
	if err != nil {
		t.Fatalf("unexpected error starting test master: %v", err)
	}

	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	resources, err := clusterAdminKubeClient.Discovery().ServerResources()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	for _, resource := range resources {
		gv, err := unversioned.ParseGroupVersion(resource.GroupVersion)
		if err != nil {
			continue
		}
		allowedVersions := sets.NewString(configapi.KubeAPIGroupsToAllowedVersions[gv.Group]...)
		if !allowedVersions.Has(gv.Version) {
			t.Errorf("Disallowed group/version found in discovery: %#v", gv)
		}
	}

	expectedGroupVersions := sets.NewString()
	for group, versions := range configapi.KubeAPIGroupsToAllowedVersions {
		for _, version := range versions {
			expectedGroupVersions.Insert(unversioned.GroupVersion{Group: group, Version: version}.String())
		}
	}

	discoveredGroupVersions := sets.StringKeySet(resources)
	if !reflect.DeepEqual(discoveredGroupVersions, expectedGroupVersions) {
		t.Fatalf("Expected %#v, got %#v", expectedGroupVersions.List(), discoveredGroupVersions.List())
	}

}
func setup(t *testing.T) *client.Client {
	_, clusterAdminKubeConfig, err := testutil.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	clusterAdminKubeClient.Namespaces().Create(&kapi.Namespace{
		ObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()},
	})
	return clusterAdminClient
}
Пример #21
0
func setupAdmissionTest(t *testing.T, setupConfig func(*configapi.MasterConfig)) (*kclient.Client, *client.Client) {
	masterConfig, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("error creating config: %v", err)
	}
	setupConfig(masterConfig)
	kubeConfigFile, err := testserver.StartConfiguredMasterAPI(masterConfig)
	if err != nil {
		t.Fatalf("error starting server: %v", err)
	}
	kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client: %v", err)
	}
	openshiftClient, err := testutil.GetClusterAdminClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting openshift client: %v", err)
	}
	return kubeClient, openshiftClient
}
func setupBuildPodAdmissionTest(t *testing.T, pluginConfig map[string]configapi.AdmissionPluginConfig) (*client.Client, *kclientset.Clientset) {
	testutil.RequireEtcd(t)
	master, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("%v", err)
	}
	master.AdmissionConfig.PluginConfig = pluginConfig
	clusterAdminKubeConfig, err := testserver.StartConfiguredMaster(master)
	if err != nil {
		t.Fatalf("%v", err)
	}
	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("%v", err)
	}

	clusterAdminKubeClientset, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("%v", err)
	}

	_, err = clusterAdminKubeClientset.Namespaces().Create(&kapi.Namespace{
		ObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()},
	})
	if err != nil {
		t.Fatalf("%v", err)
	}

	err = testserver.WaitForServiceAccounts(
		clusterAdminKubeClientset,
		testutil.Namespace(),
		[]string{
			bootstrappolicy.BuilderServiceAccountName,
			bootstrappolicy.DefaultServiceAccountName,
		})
	if err != nil {
		t.Fatalf("%v", err)
	}

	return clusterAdminClient, clusterAdminKubeClientset
}
func TestAlwaysPullImagesOff(t *testing.T) {
	testutil.RequireEtcd(t)
	defer testutil.DumpEtcdOnFailure(t)

	_, kubeConfigFile, err := testserver.StartTestMaster()
	if err != nil {
		t.Fatalf("error starting server: %v", err)
	}
	kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client: %v", err)
	}

	ns := &kapi.Namespace{}
	ns.Name = testutil.Namespace()
	_, err = kubeClientset.Core().Namespaces().Create(ns)
	if err != nil {
		t.Fatalf("error creating namespace: %v", err)
	}
	if err := testserver.WaitForPodCreationServiceAccounts(kubeClientset, testutil.Namespace()); err != nil {
		t.Fatalf("error getting client config: %v", err)
	}

	testPod := &kapi.Pod{}
	testPod.GenerateName = "test"
	testPod.Spec.Containers = []kapi.Container{
		{
			Name:            "container",
			Image:           "openshift/origin-pod:notlatest",
			ImagePullPolicy: kapi.PullNever,
		},
	}

	actualPod, err := kubeClientset.Core().Pods(testutil.Namespace()).Create(testPod)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if actualPod.Spec.Containers[0].ImagePullPolicy != kapi.PullNever {
		t.Errorf("expected %v, got %v", kapi.PullNever, actualPod.Spec.Containers[0].ImagePullPolicy)
	}
}
Пример #24
0
func setupAuditTest(t *testing.T) (*kclient.Client, *client.Client) {
	testutil.RequireEtcd(t)
	masterConfig, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("error creating config: %v", err)
	}
	masterConfig.AuditConfig.Enabled = true
	kubeConfigFile, err := testserver.StartConfiguredMasterAPI(masterConfig)
	if err != nil {
		t.Fatalf("error starting server: %v", err)
	}
	kubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client: %v", err)
	}
	openshiftClient, err := testutil.GetClusterAdminClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting openshift client: %v", err)
	}
	return kubeClient, openshiftClient

}
Пример #25
0
func setupClusterAdminPodNodeConstraintsTest(t *testing.T, pluginConfig *pluginapi.PodNodeConstraintsConfig) (*client.Client, *kclientset.Clientset) {
	testutil.RequireEtcd(t)
	masterConfig, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("error creating config: %v", err)
	}
	cfg := map[string]configapi.AdmissionPluginConfig{
		"PodNodeConstraints": {
			Configuration: pluginConfig,
		},
	}
	masterConfig.AdmissionConfig.PluginConfig = cfg
	masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = cfg

	kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig)
	if err != nil {
		t.Fatalf("error starting server: %v", err)
	}
	kubeClientset, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client: %v", err)
	}
	openShiftClient, err := testutil.GetClusterAdminClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client: %v", err)
	}
	ns := &kapi.Namespace{}
	ns.Name = testutil.Namespace()
	_, err = kubeClientset.Core().Namespaces().Create(ns)
	if err != nil {
		t.Fatalf("error creating namespace: %v", err)
	}
	if err := testserver.WaitForPodCreationServiceAccounts(kubeClientset, testutil.Namespace()); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	return openShiftClient, kubeClientset
}
Пример #26
0
func setupBuildControllerTest(additionalBuildControllers, additionalBuildPodControllers, additionalImageChangeControllers int, t *testing.T) (*client.Client, *kclient.Client) {
	master, clusterAdminKubeConfig, err := testutil.StartTestMaster()
	checkErr(t, err)

	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	checkErr(t, err)

	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig)
	checkErr(t, err)
	_, err = clusterAdminKubeClient.Namespaces().Create(&kapi.Namespace{
		ObjectMeta: kapi.ObjectMeta{Name: testutil.Namespace()},
	})
	checkErr(t, err)

	if err := testutil.WaitForServiceAccounts(clusterAdminKubeClient, testutil.Namespace(), []string{bootstrappolicy.BuilderServiceAccountName, bootstrappolicy.DefaultServiceAccountName}); err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	openshiftConfig, err := origin.BuildMasterConfig(*master)
	checkErr(t, err)

	// Get the build controller clients, since those rely on service account tokens
	// We don't want to proceed with the rest of the test until those are available
	openshiftConfig.BuildControllerClients()

	for i := 0; i < additionalBuildControllers; i++ {
		openshiftConfig.RunBuildController()
	}
	for i := 0; i < additionalBuildPodControllers; i++ {
		openshiftConfig.RunBuildPodController()
	}
	for i := 0; i < additionalImageChangeControllers; i++ {
		openshiftConfig.RunBuildImageChangeTriggerController()
	}
	return clusterAdminClient, clusterAdminKubeClient
}
Пример #27
0
func TestNodeAuth(t *testing.T) {
	// Server config
	masterConfig, nodeConfig, adminKubeConfigFile, err := testserver.StartTestAllInOne()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Cluster admin clients and client configs
	adminClient, err := testutil.GetClusterAdminKubeClient(adminKubeConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	originAdminClient, err := testutil.GetClusterAdminClient(adminKubeConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	adminConfig, err := testutil.GetClusterAdminClientConfig(adminKubeConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Client configs for lesser users
	masterKubeletClientConfig := configapi.GetKubeletClientConfig(*masterConfig)

	anonymousConfig := clientcmd.AnonymousClientConfig(*adminConfig)

	badTokenConfig := clientcmd.AnonymousClientConfig(*adminConfig)
	badTokenConfig.BearerToken = "bad-token"

	bobClient, _, bobConfig, err := testutil.GetClientForUser(*adminConfig, "bob")
	_, _, aliceConfig, err := testutil.GetClientForUser(*adminConfig, "alice")
	sa1Client, _, sa1Config, err := testutil.GetClientForServiceAccount(adminClient, *adminConfig, "default", "sa1")
	_, _, sa2Config, err := testutil.GetClientForServiceAccount(adminClient, *adminConfig, "default", "sa2")

	// Grant Bob system:node-reader, which should let them read metrics and stats
	addBob := &policy.RoleModificationOptions{
		RoleName:            bootstrappolicy.NodeReaderRoleName,
		RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(originAdminClient),
		Subjects:            []kapi.ObjectReference{{Kind: "User", Name: "bob"}},
	}
	if err := addBob.AddRole(); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Grant sa1 system:cluster-reader, which should let them read metrics and stats
	addSA1 := &policy.RoleModificationOptions{
		RoleName:            bootstrappolicy.ClusterReaderRoleName,
		RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(originAdminClient),
		Subjects:            []kapi.ObjectReference{{Kind: "ServiceAccount", Namespace: "default", Name: "sa1"}},
	}
	if err := addSA1.AddRole(); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Wait for policy cache
	if err := testutil.WaitForClusterPolicyUpdate(bobClient, "get", "nodes/metrics", true); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if err := testutil.WaitForClusterPolicyUpdate(sa1Client, "get", "nodes/metrics", true); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	_, nodePort, err := net.SplitHostPort(nodeConfig.ServingInfo.BindAddress)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	nodePortInt, err := strconv.ParseInt(nodePort, 0, 0)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	nodeTLS := configapi.UseTLS(nodeConfig.ServingInfo)

	kubeletClientConfig := func(config *kclient.Config) *kubeletclient.KubeletClientConfig {
		return &kubeletclient.KubeletClientConfig{
			Port:            uint(nodePortInt),
			EnableHttps:     nodeTLS,
			TLSClientConfig: config.TLSClientConfig,
			BearerToken:     config.BearerToken,
		}
	}

	testCases := map[string]struct {
		KubeletClientConfig *kubeletclient.KubeletClientConfig
		Forbidden           bool
		NodeViewer          bool
		NodeAdmin           bool
	}{
		"bad token": {
			KubeletClientConfig: kubeletClientConfig(&badTokenConfig),
		},
		"anonymous": {
			KubeletClientConfig: kubeletClientConfig(&anonymousConfig),
			Forbidden:           true,
		},
		"cluster admin": {
			KubeletClientConfig: kubeletClientConfig(adminConfig),
			NodeAdmin:           true,
		},
		"master kubelet client": {
			KubeletClientConfig: masterKubeletClientConfig,
			NodeAdmin:           true,
		},
		"bob": {
			KubeletClientConfig: kubeletClientConfig(bobConfig),
			NodeViewer:          true,
		},
		"alice": {
			KubeletClientConfig: kubeletClientConfig(aliceConfig),
			Forbidden:           true,
		},
		"sa1": {
			KubeletClientConfig: kubeletClientConfig(sa1Config),
			NodeViewer:          true,
		},
		"sa2": {
			KubeletClientConfig: kubeletClientConfig(sa2Config),
			Forbidden:           true,
		},
	}

	for k, tc := range testCases {

		var (
			// expected result for requests a viewer should be able to make
			viewResult int
			// expected result for requests an admin should be able to make (that can actually complete with a 200 in our tests)
			adminResultOK int
			// expected result for requests an admin should be able to make (that return a 404 in this test if the authn/authz layer is completed)
			adminResultMissing int
		)
		switch {
		case tc.NodeAdmin:
			viewResult = http.StatusOK
			adminResultOK = http.StatusOK
			adminResultMissing = http.StatusNotFound
		case tc.NodeViewer:
			viewResult = http.StatusOK
			adminResultOK = http.StatusForbidden
			adminResultMissing = http.StatusForbidden
		case tc.Forbidden:
			viewResult = http.StatusForbidden
			adminResultOK = http.StatusForbidden
			adminResultMissing = http.StatusForbidden
		default:
			viewResult = http.StatusUnauthorized
			adminResultOK = http.StatusUnauthorized
			adminResultMissing = http.StatusUnauthorized
		}

		requests := []testRequest{
			// Responses to invalid paths are the same for all users
			{"GET", "/", http.StatusNotFound},
			{"GET", "/stats", http.StatusMovedPermanently}, // ServeMux redirects to the directory
			{"GET", "/logs", http.StatusMovedPermanently},  // ServeMux redirects to the directory
			{"GET", "/invalid", http.StatusNotFound},

			// viewer requests
			{"GET", "/metrics", viewResult},
			{"GET", "/stats/", viewResult},
			{"POST", "/stats/", viewResult}, // stats requests can be POSTs which contain query options

			// successful admin requests
			{"GET", "/healthz", adminResultOK},
			{"GET", "/pods", adminResultOK},
			{"GET", "/logs/", adminResultOK},

			// not found admin requests
			{"GET", "/containerLogs/mynamespace/mypod/mycontainer", adminResultMissing},
			{"POST", "/exec/mynamespace/mypod/mycontainer", adminResultMissing},
			{"POST", "/run/mynamespace/mypod/mycontainer", adminResultMissing},
			{"POST", "/attach/mynamespace/mypod/mycontainer", adminResultMissing},
			{"POST", "/portForward/mynamespace/mypod/mycontainer", adminResultMissing},

			// GET is supported in origin on /exec and /attach for backwards compatibility
			// make sure node admin permissions are required
			{"GET", "/exec/mynamespace/mypod/mycontainer", adminResultMissing},
			{"GET", "/attach/mynamespace/mypod/mycontainer", adminResultMissing},
		}

		rt, err := kubeletclient.MakeTransport(tc.KubeletClientConfig)
		if err != nil {
			t.Errorf("%s: unexpected error: %v", k, err)
			continue
		}

		for _, r := range requests {
			req, err := http.NewRequest(r.Method, "https://"+nodeConfig.NodeName+":10250"+r.Path, nil)
			if err != nil {
				t.Errorf("%s: %s: unexpected error: %v", k, r.Path, err)
				continue
			}
			resp, err := rt.RoundTrip(req)
			if err != nil {
				t.Errorf("%s: %s: unexpected error: %v", k, r.Path, err)
				continue
			}
			resp.Body.Close()
			if resp.StatusCode != r.Result {
				t.Errorf("%s: %s: expected %d, got %d", k, r.Path, r.Result, resp.StatusCode)
				continue
			}
		}
	}
}
Пример #28
0
func TestDNS(t *testing.T) {
	testutil.RequireEtcd(t)
	defer testutil.DumpEtcdOnFailure(t)
	masterConfig, clientFile, err := testserver.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	localAddr := ""
	if ip, err := cmdutil.DefaultLocalIP4(); err == nil {
		localAddr = ip.String()
	} else if err == cmdutil.ErrorNoDefaultIP {
		localAddr = "127.0.0.1"
	} else if err != nil {
		t.Fatalf("Unable to find a local IP address: %v", err)
	}

	localIP := net.ParseIP(localAddr)
	var masterIP net.IP
	// verify service DNS entry is visible
	stop := make(chan struct{})
	waitutil.Until(func() {
		m1 := &dns.Msg{
			MsgHdr:   dns.MsgHdr{Id: dns.Id(), RecursionDesired: false},
			Question: []dns.Question{{"kubernetes.default.svc.cluster.local.", dns.TypeA, dns.ClassINET}},
		}
		in, err := dns.Exchange(m1, masterConfig.DNSConfig.BindAddress)
		if err != nil {
			t.Logf("unexpected error: %v", err)
			return
		}
		if len(in.Answer) != 1 {
			t.Logf("unexpected answer: %#v", in)
			return
		}
		if a, ok := in.Answer[0].(*dns.A); ok {
			if a.A == nil {
				t.Fatalf("expected an A record with an IP: %#v", a)
			}
			masterIP = a.A
		} else {
			t.Fatalf("expected an A record: %#v", in)
		}
		t.Log(in)
		close(stop)
	}, 50*time.Millisecond, stop)

	client, err := testutil.GetClusterAdminKubeClient(clientFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Verify kubernetes service port is 53 and target port is the
	// configured masterConfig.DNSConfig.BindAddress port.
	_, dnsPortString, err := net.SplitHostPort(masterConfig.DNSConfig.BindAddress)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	dnsPort, err := strconv.Atoi(dnsPortString)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	kubernetesService, err := client.Services(kapi.NamespaceDefault).Get("kubernetes")
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	found := false
	for _, port := range kubernetesService.Spec.Ports {
		if port.Port == 53 && port.TargetPort.IntVal == int32(dnsPort) && port.Protocol == kapi.ProtocolTCP {
			found = true
		}
	}
	if !found {
		t.Fatalf("did not find DNS port in kubernetes service: %#v", kubernetesService)
	}

	for {
		if _, err := client.Services(kapi.NamespaceDefault).Create(&kapi.Service{
			ObjectMeta: kapi.ObjectMeta{
				Name: "headless",
			},
			Spec: kapi.ServiceSpec{
				ClusterIP: kapi.ClusterIPNone,
				Ports:     []kapi.ServicePort{{Port: 443}},
			},
		}); err != nil {
			if errors.IsForbidden(err) {
				t.Logf("forbidden, sleeping: %v", err)
				time.Sleep(100 * time.Millisecond)
				continue
			}
			t.Fatalf("unexpected error: %v", err)
		}
		if _, err := client.Endpoints(kapi.NamespaceDefault).Create(&kapi.Endpoints{
			ObjectMeta: kapi.ObjectMeta{
				Name: "headless",
			},
			Subsets: []kapi.EndpointSubset{{
				Addresses: []kapi.EndpointAddress{{IP: "172.0.0.1"}},
				Ports: []kapi.EndpointPort{
					{Port: 2345},
				},
			}},
		}); err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		break
	}
	headlessIP := net.ParseIP("172.0.0.1")
	headlessIPHash := getHash(headlessIP.String())

	if _, err := client.Services(kapi.NamespaceDefault).Create(&kapi.Service{
		ObjectMeta: kapi.ObjectMeta{
			Name: "headless2",
		},
		Spec: kapi.ServiceSpec{
			ClusterIP: kapi.ClusterIPNone,
			Ports:     []kapi.ServicePort{{Port: 443}},
		},
	}); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if _, err := client.Endpoints(kapi.NamespaceDefault).Create(&kapi.Endpoints{
		ObjectMeta: kapi.ObjectMeta{
			Name: "headless2",
		},
		Subsets: []kapi.EndpointSubset{{
			Addresses: []kapi.EndpointAddress{{IP: "172.0.0.2"}},
			Ports: []kapi.EndpointPort{
				{Port: 2345, Name: "other"},
				{Port: 2346, Name: "http"},
			},
		}},
	}); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	headless2IP := net.ParseIP("172.0.0.2")
	precannedIP := net.ParseIP("10.2.4.50")

	headless2IPHash := getHash(headless2IP.String())

	tests := []struct {
		dnsQuestionName   string
		recursionExpected bool
		retry             bool
		expect            []*net.IP
		srv               []*dns.SRV
	}{
		{ // wildcard resolution of a service works
			dnsQuestionName: "foo.kubernetes.default.svc.cluster.local.",
			expect:          []*net.IP{&masterIP},
		},
		{ // resolving endpoints of a service works
			dnsQuestionName: "_endpoints.kubernetes.default.svc.cluster.local.",
			expect:          []*net.IP{&localIP},
		},
		{ // openshift override works
			dnsQuestionName: "openshift.default.svc.cluster.local.",
			expect:          []*net.IP{&masterIP},
		},
		{ // pod by IP
			dnsQuestionName: "10-2-4-50.default.pod.cluster.local.",
			expect:          []*net.IP{&precannedIP},
		},
		{ // headless service
			dnsQuestionName: "headless.default.svc.cluster.local.",
			expect:          []*net.IP{&headlessIP},
		},
		{ // specific port of a headless service
			dnsQuestionName: "unknown-port-2345.e1.headless.default.svc.cluster.local.",
			expect:          []*net.IP{&headlessIP},
		},
		{ // SRV record for that service
			dnsQuestionName: "headless.default.svc.cluster.local.",
			srv: []*dns.SRV{
				{
					Target: headlessIPHash + "._unknown-port-2345._tcp.headless.default.svc.cluster.local.",
					Port:   2345,
				},
			},
		},
		{ // the SRV record resolves to the IP
			dnsQuestionName: "unknown-port-2345.e1.headless.default.svc.cluster.local.",
			expect:          []*net.IP{&headlessIP},
		},
		{ // headless 2 service
			dnsQuestionName: "headless2.default.svc.cluster.local.",
			expect:          []*net.IP{&headless2IP},
		},
		{ // SRV records for that service
			dnsQuestionName: "headless2.default.svc.cluster.local.",
			srv: []*dns.SRV{
				{
					Target: headless2IPHash + "._http._tcp.headless2.default.svc.cluster.local.",
					Port:   2346,
				},
				{
					Target: headless2IPHash + "._other._tcp.headless2.default.svc.cluster.local.",
					Port:   2345,
				},
			},
		},
		{ // the SRV record resolves to the IP
			dnsQuestionName: "other.e1.headless2.default.svc.cluster.local.",
			expect:          []*net.IP{&headless2IP},
		},
		{
			dnsQuestionName:   "www.google.com.",
			recursionExpected: true,
		},
	}
	for i, tc := range tests {
		qType := dns.TypeA
		if tc.srv != nil {
			qType = dns.TypeSRV
		}
		m1 := &dns.Msg{
			MsgHdr:   dns.MsgHdr{Id: dns.Id(), RecursionDesired: tc.recursionExpected},
			Question: []dns.Question{{tc.dnsQuestionName, qType, dns.ClassINET}},
		}
		ch := make(chan struct{})
		count := 0
		failedLatency := 0
		waitutil.Until(func() {
			count++
			if count > 100 {
				t.Errorf("%d: failed after max iterations", i)
				close(ch)
				return
			}
			before := time.Now()
			in, err := dns.Exchange(m1, masterConfig.DNSConfig.BindAddress)
			if err != nil {
				return
			}
			after := time.Now()
			delta := after.Sub(before)
			if delta > 500*time.Millisecond {
				failedLatency++
				if failedLatency > 10 {
					t.Errorf("%d: failed after 10 requests took longer than 500ms", i)
					close(ch)
				}
				return
			}
			switch {
			case tc.srv != nil:
				if len(in.Answer) != len(tc.srv) {
					t.Logf("%d: incorrect number of answers: %#v", i, in)
					return
				}
			case tc.recursionExpected:
				if len(in.Answer) == 0 {
					t.Errorf("%d: expected forward resolution: %#v", i, in)
				}
				close(ch)
				return
			default:
				if len(in.Answer) != len(tc.expect) {
					t.Logf("%d: did not resolve or unexpected forward resolution: %#v", i, in)
					return
				}
			}
			for _, answer := range in.Answer {
				switch a := answer.(type) {
				case *dns.A:
					matches := false
					if a.A != nil {
						for _, expect := range tc.expect {
							if a.A.String() == expect.String() {
								matches = true
								break
							}
						}
					}
					if !matches {
						t.Errorf("%d: A record does not match any expected answer for %q: %v", i, tc.dnsQuestionName, a.A)
					}
				case *dns.SRV:
					matches := false
					for _, expect := range tc.srv {
						if expect.Port == a.Port && expect.Target == a.Target {
							matches = true
							break
						}
					}
					if !matches {
						t.Errorf("%d: SRV record does not match any expected answer %q: %#v", i, tc.dnsQuestionName, a)
					}
				default:
					t.Errorf("%d: expected an A or SRV record %q: %#v", i, tc.dnsQuestionName, in)
				}
			}
			t.Log(in)
			close(ch)
		}, 50*time.Millisecond, ch)
	}
}
Пример #29
0
func TestDiagNodeConditions(t *testing.T) {
	_, nodeConfig, clientFile, err := testserver.StartTestAllInOne()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	client, err := testutil.GetClusterAdminKubeClient(clientFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	nodeDiag := clusterdiags.NodeDefinitions{KubeClient: client}
	err = wait.Poll(200*time.Millisecond, 5*time.Second, func() (bool, error) {
		if _, err := client.Nodes().Get(nodeConfig.NodeName); kapierror.IsNotFound(err) {
			return false, nil
		}
		return true, err
	})
	if err != nil {
		t.Errorf("unexpected error waiting for all-in-one node: %v", err)
	}

	// start by testing that the diagnostic passes with all-in-one up.
	result := nodeDiag.Check()
	if warnings := result.Warnings(); len(warnings) > 0 {
		t.Fatalf("expected no warnings with one node ready, but: %#v", warnings)
	} else if errors := result.Errors(); len(errors) > 0 {
		t.Fatalf("expected no errors with one node ready, but: %#v", errors)
	}

	// Make the node unschedulable and verify diagnostics notices
	err = wait.Poll(200*time.Millisecond, time.Second, func() (bool, error) {
		node, err := client.Nodes().Get(nodeConfig.NodeName)
		if err != nil {
			return false, err
		}
		node.Spec.Unschedulable = true
		if _, err := client.Nodes().Update(node); kapierror.IsConflict(err) {
			return false, nil
		}
		return true, err
	})
	if err != nil {
		t.Errorf("unexpected error making node unschedulable: %v", err)
	}
	result = nodeDiag.Check()
	if errors := result.Errors(); len(errors) != 1 ||
		!diagtype.MatchesDiagError(errors[0], "DClu0004") {
		t.Fatalf("expected 1 error about not having nodes, but: %#v", errors)
	} else if warnings := result.Warnings(); len(warnings) < 1 || !diagtype.MatchesDiagError(warnings[0], "DClu0003") {
		t.Fatalf("expected a warning about test-node not being schedulable, but: %#v", warnings)
	}

	// delete it and check with no nodes defined; should get an error about that.
	if err := client.Nodes().Delete(nodeConfig.NodeName); err != nil {
		t.Errorf("unexpected error deleting node: %v", err)
	}
	if errors := nodeDiag.Check().Errors(); len(errors) != 1 ||
		!diagtype.MatchesDiagError(errors[0], "DClu0004") {
		t.Fatalf("expected 1 error about not having nodes, not: %#v", errors)
	}

	// Next create a node and leave it in NotReady state. Should get a warning
	// about that, plus the previous error as there are still no nodes available.
	_, err = client.Nodes().Create(&kapi.Node{ObjectMeta: kapi.ObjectMeta{Name: "test-node"}})
	if err != nil {
		t.Fatalf("expected no errors creating a node: %#v", err)
	}
	result = nodeDiag.Check()
	if errors := result.Errors(); len(errors) != 1 ||
		!diagtype.MatchesDiagError(errors[0], "DClu0004") {
		t.Fatalf("expected 1 error about not having nodes, not: %#v", errors)
	} else if warnings := result.Warnings(); len(warnings) < 1 || !diagtype.MatchesDiagError(warnings[0], "DClu0002") {
		t.Fatalf("expected a warning about test-node not being ready, not: %#v", warnings)
	}
}
Пример #30
0
func TestEndpointAdmission(t *testing.T) {
	testutil.RequireEtcd(t)
	defer testutil.DumpEtcdOnFailure(t)
	masterConfig, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("error creating config: %v", err)
	}
	masterConfig.KubernetesMasterConfig.AdmissionConfig.PluginConfig = map[string]configapi.AdmissionPluginConfig{
		"RestrictedEndpointsAdmission": {
			Configuration: &configapi.DefaultAdmissionConfig{},
		},
	}
	masterConfig.NetworkConfig.ClusterNetworkCIDR = clusterNetworkCIDR
	masterConfig.NetworkConfig.ServiceNetworkCIDR = serviceNetworkCIDR

	kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig)
	if err != nil {
		t.Fatalf("error starting server: %v", err)
	}
	clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting kube client: %v", err)
	}
	clusterAdminOSClient, err := testutil.GetClusterAdminClient(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client: %v", err)
	}
	clientConfig, err := testutil.GetClusterAdminClientConfig(kubeConfigFile)
	if err != nil {
		t.Fatalf("error getting client config: %v", err)
	}

	// Cluster admin
	testOne(t, clusterAdminKubeClient, "default", "cluster", true)
	testOne(t, clusterAdminKubeClient, "default", "service", true)
	testOne(t, clusterAdminKubeClient, "default", "external", true)

	// Endpoint controller service account
	_, serviceAccountClient, _, err := testutil.GetClientForServiceAccount(clusterAdminKubeClient, *clientConfig, bootstrappolicy.DefaultOpenShiftInfraNamespace, bootstrappolicy.InfraEndpointControllerServiceAccountName)
	if err != nil {
		t.Fatalf("error getting endpoint controller service account: %v", err)
	}
	testOne(t, serviceAccountClient, "default", "cluster", true)
	testOne(t, serviceAccountClient, "default", "service", true)
	testOne(t, serviceAccountClient, "default", "external", true)

	// Project admin
	_, err = testserver.CreateNewProject(clusterAdminOSClient, *clientConfig, "myproject", "myadmin")
	if err != nil {
		t.Fatalf("error creating project: %v", err)
	}
	_, projectAdminClient, _, err := testutil.GetClientForUser(*clientConfig, "myadmin")
	if err != nil {
		t.Fatalf("error getting project admin client: %v", err)
	}

	testOne(t, projectAdminClient, "myproject", "cluster", false)
	testOne(t, projectAdminClient, "myproject", "service", false)
	testOne(t, projectAdminClient, "myproject", "external", true)

	// User without restricted endpoint permission can't modify IPs but can still do other modifications
	ep := testOne(t, clusterAdminKubeClient, "myproject", "cluster", true)
	ep.Annotations = map[string]string{"foo": "bar"}
	ep, err = projectAdminClient.Endpoints("myproject").Update(ep)
	if err != nil {
		t.Fatalf("unexpected error updating endpoint annotation: %v", err)
	}
	ep.Subsets[0].Addresses[0].IP = exampleAddresses["service"]
	ep, err = projectAdminClient.Endpoints("myproject").Update(ep)
	if err == nil {
		t.Fatalf("unexpected success modifying endpoint")
	}
}