func TestOverlappingRCs(t *testing.T) {
	client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()})

	for i := 0; i < 5; i++ {
		manager := NewReplicationManager(client, 10)
		manager.podStoreSynced = alwaysReady

		// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
		var controllers []*api.ReplicationController
		for j := 1; j < 10; j++ {
			controllerSpec := newReplicationController(1)
			controllerSpec.CreationTimestamp = util.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
			controllerSpec.Name = string(util.NewUUID())
			controllers = append(controllers, controllerSpec)
		}
		shuffledControllers := shuffle(controllers)
		for j := range shuffledControllers {
			manager.rcStore.Store.Add(shuffledControllers[j])
		}
		// Add a pod and make sure only the oldest rc is synced
		pods := newPodList(nil, 1, api.PodPending, controllers[0])
		rcKey := getKey(controllers[0], t)

		manager.addPod(&pods.Items[0])
		queueRC, _ := manager.queue.Get()
		if queueRC != rcKey {
			t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
		}
	}
}
func testPodWithVolume(path string, source *api.EmptyDirVolumeSource) *api.Pod {
	podName := "pod-" + string(util.NewUUID())

	return &api.Pod{
		TypeMeta: api.TypeMeta{
			Kind:       "Pod",
			APIVersion: latest.Version,
		},
		ObjectMeta: api.ObjectMeta{
			Name: podName,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  containerName,
					Image: "kubernetes/mounttest:0.1",
					VolumeMounts: []api.VolumeMount{
						{
							Name:      volumeName,
							MountPath: path,
						},
					},
				},
			},
			Volumes: []api.Volume{
				{
					Name: volumeName,
					VolumeSource: api.VolumeSource{
						EmptyDir: source,
					},
				},
			},
		},
	}
}
Esempio n. 3
0
func (v *VolumeOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer) error {
	clientConfig, err := f.ClientConfig()
	if err != nil {
		return err
	}
	v.OutputVersion = kcmdutil.OutputVersion(cmd, clientConfig.Version)

	cmdNamespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}
	mapper, typer := f.Object()

	v.DefaultNamespace = cmdNamespace
	v.Writer = out
	v.Mapper = mapper
	v.Typer = typer
	v.RESTClientFactory = f.Factory.RESTClient
	v.UpdatePodSpecForObject = f.UpdatePodSpecForObject

	if v.Add && len(v.Name) == 0 {
		v.Name = string(kutil.NewUUID())
		if len(v.Output) == 0 {
			fmt.Fprintf(v.Writer, "Generated volume name: %s\n", v.Name)
		}
	}
	// In case of volume source ignore the default volume type
	if len(v.AddOpts.Source) > 0 {
		v.AddOpts.Type = ""
	}
	return nil
}
Esempio n. 4
0
// Create registers the given ReplicationController.
func (rs *REST) Create(ctx api.Context, obj runtime.Object) (<-chan apiserver.RESTResult, error) {
	controller, ok := obj.(*api.ReplicationController)
	if !ok {
		return nil, fmt.Errorf("not a replication controller: %#v", obj)
	}
	if !api.ValidNamespace(ctx, &controller.ObjectMeta) {
		return nil, errors.NewConflict("controller", controller.Namespace, fmt.Errorf("Controller.Namespace does not match the provided context"))
	}

	if len(controller.Name) == 0 {
		controller.Name = util.NewUUID().String()
	}
	if errs := validation.ValidateReplicationController(controller); len(errs) > 0 {
		return nil, errors.NewInvalid("replicationController", controller.Name, errs)
	}

	api.FillObjectMetaSystemFields(ctx, &controller.ObjectMeta)

	return apiserver.MakeAsync(func() (runtime.Object, error) {
		err := rs.registry.CreateController(ctx, controller)
		if err != nil {
			return nil, err
		}
		return rs.registry.GetController(ctx, controller.Name)
	}), nil
}
Esempio n. 5
0
//TODO: To merge this with the emptyDir tests, we can make source a lambda.
func testPodWithHostVol(path string, source *api.HostPathVolumeSource) *api.Pod {
	podName := "pod-" + string(util.NewUUID())

	return &api.Pod{
		TypeMeta: api.TypeMeta{
			Kind:       "Pod",
			APIVersion: latest.Version,
		},
		ObjectMeta: api.ObjectMeta{
			Name: podName,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  containerName,
					Image: "gcr.io/google_containers/mounttest:0.2",
					VolumeMounts: []api.VolumeMount{
						{
							Name:      volumeName,
							MountPath: path,
						},
					},
				},
			},
			RestartPolicy: api.RestartPolicyNever,
			Volumes:       mount(source),
		},
	}
}
Esempio n. 6
0
// Create a Secret object
func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) {
	secret, ok := obj.(*api.Secret)
	if !ok {
		return nil, fmt.Errorf("invalid object type")
	}

	if !api.ValidNamespace(ctx, &secret.ObjectMeta) {
		return nil, errors.NewConflict("secret", secret.Namespace, fmt.Errorf("Secret.Namespace does not match the provided context"))
	}

	if len(secret.Name) == 0 {
		secret.Name = string(util.NewUUID())
	}

	if errs := validation.ValidateSecret(secret); len(errs) > 0 {
		return nil, errors.NewInvalid("secret", secret.Name, errs)
	}
	api.FillObjectMetaSystemFields(ctx, &secret.ObjectMeta)

	err := rs.registry.CreateWithName(ctx, secret.Name, secret)
	if err != nil {
		return nil, err
	}
	return rs.registry.Get(ctx, secret.Name)
}
Esempio n. 7
0
// Create registers the given ReplicationController.
func (rs *REST) Create(ctx api.Context, obj runtime.Object) (<-chan apiserver.RESTResult, error) {
	controller, ok := obj.(*api.ReplicationController)
	if !ok {
		return nil, fmt.Errorf("not a replication controller: %#v", obj)
	}
	if !api.ValidNamespace(ctx, &controller.ObjectMeta) {
		return nil, errors.NewConflict("controller", controller.Namespace, fmt.Errorf("Controller.Namespace does not match the provided context"))
	}

	if len(controller.Name) == 0 {
		controller.Name = util.NewUUID().String()
	}
	// Pod Manifest ID should be assigned by the pod API
	controller.DesiredState.PodTemplate.DesiredState.Manifest.ID = ""
	if errs := validation.ValidateReplicationController(controller); len(errs) > 0 {
		return nil, errors.NewInvalid("replicationController", controller.Name, errs)
	}

	controller.CreationTimestamp = util.Now()

	return apiserver.MakeAsync(func() (runtime.Object, error) {
		err := rs.registry.CreateController(ctx, controller)
		if err != nil {
			return nil, err
		}
		return rs.registry.GetController(ctx, controller.Name)
	}), nil
}
Esempio n. 8
0
// testHostIP tests that a pod gets a host IP
func testHostIP(c *client.Client, pod *api.Pod) {
	ns := "e2e-test-" + string(util.NewUUID())
	podClient := c.Pods(ns)
	By("creating pod")
	defer podClient.Delete(pod.Name)
	_, err := podClient.Create(pod)
	if err != nil {
		Fail(fmt.Sprintf("Failed to create pod: %v", err))
	}
	By("ensuring that pod is running and has a hostIP")
	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	err = waitForPodRunningInNamespace(c, pod.Name, ns)
	Expect(err).NotTo(HaveOccurred())
	// Try to make sure we get a hostIP for each pod.
	hostIPTimeout := 2 * time.Minute
	t := time.Now()
	for {
		p, err := podClient.Get(pod.Name)
		Expect(err).NotTo(HaveOccurred())
		if p.Status.HostIP != "" {
			Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
			break
		}
		if time.Since(t) >= hostIPTimeout {
			Failf("Gave up waiting for hostIP of pod %s after %v seconds",
				p.Name, time.Since(t).Seconds())
		}
		Logf("Retrying to get the hostIP of pod %s", p.Name)
		time.Sleep(5 * time.Second)
	}
}
Esempio n. 9
0
func testPDPod(diskName, targetHost string, readOnly bool) *api.Pod {
	pod := &api.Pod{
		TypeMeta: api.TypeMeta{
			Kind:       "Pod",
			APIVersion: latest.Version,
		},
		ObjectMeta: api.ObjectMeta{
			Name: "pd-test-" + string(util.NewUUID()),
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:    "testpd",
					Image:   "gcr.io/google_containers/busybox",
					Command: []string{"sleep", "600"},
					VolumeMounts: []api.VolumeMount{
						{
							Name:      "testpd",
							MountPath: "/testpd",
						},
					},
				},
			},
			NodeName: targetHost,
		},
	}

	if testContext.Provider == "gce" || testContext.Provider == "gke" {
		pod.Spec.Volumes = []api.Volume{
			{
				Name: "testpd",
				VolumeSource: api.VolumeSource{
					GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
						PDName:   diskName,
						FSType:   "ext4",
						ReadOnly: readOnly,
					},
				},
			},
		}
	} else if testContext.Provider == "aws" {
		pod.Spec.Volumes = []api.Volume{
			{
				Name: "testpd",
				VolumeSource: api.VolumeSource{
					AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
						VolumeID: diskName,
						FSType:   "ext4",
						ReadOnly: readOnly,
					},
				},
			},
		}
	} else {
		panic("Unknown provider: " + testContext.Provider)
	}

	return pod
}
func getService(servicePorts []api.ServicePort) *api.Service {
	return &api.Service{
		ObjectMeta: api.ObjectMeta{
			Name: string(util.NewUUID()), Namespace: ns},
		Spec: api.ServiceSpec{
			Ports: servicePorts,
		},
	}
}
Esempio n. 11
0
func createDNSPod(namespace, probeCmd string) *api.Pod {
	pod := &api.Pod{
		TypeMeta: api.TypeMeta{
			Kind:       "Pod",
			APIVersion: latest.Version,
		},
		ObjectMeta: api.ObjectMeta{
			Name:      "dns-test-" + string(util.NewUUID()),
			Namespace: namespace,
		},
		Spec: api.PodSpec{
			Volumes: []api.Volume{
				{
					Name: "results",
					VolumeSource: api.VolumeSource{
						EmptyDir: &api.EmptyDirVolumeSource{},
					},
				},
			},
			Containers: []api.Container{
				// TODO: Consider scraping logs instead of running a webserver.
				{
					Name:  "webserver",
					Image: "gcr.io/google_containers/test-webserver",
					Ports: []api.ContainerPort{
						{
							Name:          "http",
							ContainerPort: 80,
						},
					},
					VolumeMounts: []api.VolumeMount{
						{
							Name:      "results",
							MountPath: "/results",
						},
					},
				},
				{
					Name:    "querier",
					Image:   "gcr.io/google_containers/dnsutils",
					Command: []string{"sh", "-c", probeCmd},
					VolumeMounts: []api.VolumeMount{
						{
							Name:      "results",
							MountPath: "/results",
						},
					},
				},
			},
		},
	}
	return pod
}
Esempio n. 12
0
func getUniqueName(basename string, existingNames *util.StringSet) string {
	if !existingNames.Has(basename) {
		return basename
	}

	for i := 0; i < 100; i++ {
		trialName := fmt.Sprintf("%v-%d", basename, i)
		if !existingNames.Has(trialName) {
			return trialName
		}
	}

	return string(util.NewUUID())
}
Esempio n. 13
0
func runLivenessTest(c *client.Client, podDescr *api.Pod, expectRestart bool) {
	ns := "e2e-test-" + string(util.NewUUID())
	_, err := createNamespaceIfDoesNotExist(c, ns)
	expectNoError(err, fmt.Sprintf("creating namespace %s", ns))

	By(fmt.Sprintf("Creating pod %s in namespace %s", podDescr.Name, ns))
	_, err = c.Pods(ns).Create(podDescr)
	expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name))

	// At the end of the test, clean up by removing the pod.
	defer func() {
		By("deleting the pod")
		c.Pods(ns).Delete(podDescr.Name, nil)
	}()

	// Wait until the pod is not pending. (Here we need to check for something other than
	// 'Pending' other than checking for 'Running', since when failures occur, we go to
	// 'Terminated' which can cause indefinite blocking.)
	expectNoError(waitForPodNotPending(c, ns, podDescr.Name),
		fmt.Sprintf("starting pod %s in namespace %s", podDescr.Name, ns))
	By(fmt.Sprintf("Started pod %s in namespace %s", podDescr.Name, ns))

	// Check the pod's current state and verify that restartCount is present.
	By("checking the pod's current state and verifying that restartCount is present")
	pod, err := c.Pods(ns).Get(podDescr.Name)
	expectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", podDescr.Name, ns))
	initialRestartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount
	By(fmt.Sprintf("Initial restart count of pod %s is %d", podDescr.Name, initialRestartCount))

	// Wait for at most 48 * 5 = 240s = 4 minutes until restartCount is incremented
	restarts := false
	for i := 0; i < 48; i++ {
		// Wait until restartCount is incremented.
		time.Sleep(5 * time.Second)
		pod, err = c.Pods(ns).Get(podDescr.Name)
		expectNoError(err, fmt.Sprintf("getting pod %s", podDescr.Name))
		restartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, "liveness").RestartCount
		By(fmt.Sprintf("Restart count of pod %s in namespace %s is now %d", podDescr.Name, ns, restartCount))
		if restartCount > initialRestartCount {
			By(fmt.Sprintf("Restart count of pod %s in namespace %s increased from %d to %d during the test", podDescr.Name, ns, initialRestartCount, restartCount))
			restarts = true
			break
		}
	}

	if restarts != expectRestart {
		Fail(fmt.Sprintf("pod %s in namespace %s - expected restarts: %v, found restarts: %v", podDescr.Name, ns, expectRestart, restarts))
	}
}
Esempio n. 14
0
func makePodSpec(readinessProbe, livenessProbe *api.Probe) *api.Pod {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "nginx-" + string(util.NewUUID())},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:           "nginx",
					Image:          "nginx",
					LivenessProbe:  livenessProbe,
					ReadinessProbe: readinessProbe,
				},
			},
		},
	}
	return pod
}
Esempio n. 15
0
func makePodSpec(readinessProbe, livenessProbe *api.Probe) *api.Pod {
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "test-webserver-" + string(util.NewUUID())},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:           "test-webserver",
					Image:          "gcr.io/google_containers/test-webserver",
					LivenessProbe:  livenessProbe,
					ReadinessProbe: readinessProbe,
				},
			},
		},
	}
	return pod
}
func GetBootstrapPolicy() *authorizationapi.ClusterPolicy {
	policy := &authorizationapi.ClusterPolicy{
		ObjectMeta: kapi.ObjectMeta{
			Name:              authorizationapi.PolicyName,
			CreationTimestamp: util.Now(),
			UID:               util.NewUUID(),
		},
		LastModified: util.Now(),
		Roles:        make(map[string]*authorizationapi.ClusterRole),
	}

	roles := bootstrappolicy.GetBootstrapClusterRoles()
	for i := range roles {
		policy.Roles[roles[i].Name] = &roles[i]
	}

	return policy
}
Esempio n. 17
0
// Return a prototypical entrypoint test pod
func entrypointTestPod() *api.Pod {
	podName := "client-containers-" + string(util.NewUUID())

	return &api.Pod{
		ObjectMeta: api.ObjectMeta{
			Name: podName,
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name:  testContainerName,
					Image: "gcr.io/google_containers/eptest:0.1",
				},
			},
			RestartPolicy: api.RestartPolicyNever,
		},
	}
}
func GetBootstrapPolicyBinding() *authorizationapi.ClusterPolicyBinding {
	policyBinding := &authorizationapi.ClusterPolicyBinding{
		ObjectMeta: kapi.ObjectMeta{
			Name:              ":Default",
			CreationTimestamp: util.Now(),
			UID:               util.NewUUID(),
		},
		LastModified: util.Now(),
		RoleBindings: make(map[string]*authorizationapi.ClusterRoleBinding),
	}

	bindings := bootstrappolicy.GetBootstrapClusterRoleBindings()
	for i := range bindings {
		policyBinding.RoleBindings[bindings[i].Name] = &bindings[i]
	}

	return policyBinding
}
Esempio n. 19
0
func NewWebserverTest(client *client.Client, namespace string, serviceName string) *WebserverTest {
	t := &WebserverTest{}
	t.Client = client
	t.Namespace = namespace
	t.ServiceName = serviceName
	t.TestId = t.ServiceName + "-" + string(util.NewUUID())
	t.Labels = map[string]string{
		"testid": t.TestId,
	}

	t.rcs = make(map[string]bool)
	t.services = make(map[string]bool)

	t.name = "webserver"
	t.image = "gcr.io/google_containers/test-webserver"

	return t
}
Esempio n. 20
0
func createPD() (string, error) {
	if testContext.Provider == "gce" {
		pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID()))

		zone := testContext.CloudConfig.Zone
		// TODO: make this hit the compute API directly instread of shelling out to gcloud.
		err := exec.Command("gcloud", "compute", "--project="+testContext.CloudConfig.ProjectID, "disks", "create", "--zone="+zone, "--size=10GB", pdName).Run()
		if err != nil {
			return "", err
		}
		return pdName, nil
	} else {
		volumes, ok := testContext.CloudConfig.Provider.(aws_cloud.Volumes)
		if !ok {
			return "", fmt.Errorf("Provider does not support volumes")
		}
		volumeOptions := &aws_cloud.VolumeOptions{}
		volumeOptions.CapacityMB = 10 * 1024
		return volumes.CreateVolume(volumeOptions)
	}
}
func newReplicationController(replicas int) *api.ReplicationController {
	rc := &api.ReplicationController{
		TypeMeta: api.TypeMeta{APIVersion: testapi.Version()},
		ObjectMeta: api.ObjectMeta{
			UID:             util.NewUUID(),
			Name:            "foobar",
			Namespace:       api.NamespaceDefault,
			ResourceVersion: "18",
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{"foo": "bar"},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{
						"name": "foo",
						"type": "production",
					},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Image: "foo/bar",
							TerminationMessagePath: api.TerminationMessagePathDefault,
							ImagePullPolicy:        api.PullIfNotPresent,
							SecurityContext:        securitycontext.ValidSecurityContextWithContainerDefaults(),
						},
					},
					RestartPolicy: api.RestartPolicyAlways,
					DNSPolicy:     api.DNSDefault,
					NodeSelector: map[string]string{
						"baz": "blah",
					},
				},
			},
		},
	}
	return rc
}
Esempio n. 22
0
// CreatePodForImage creates a Pod for the given image name. The dockerImageReference
// must be full docker pull spec.
func CreatePodForImage(dockerImageReference string) *kapi.Pod {
	podName := namer.GetPodName("test-pod", string(kutil.NewUUID()))
	return &kapi.Pod{
		TypeMeta: kapi.TypeMeta{
			Kind:       "Pod",
			APIVersion: "v1",
		},
		ObjectMeta: kapi.ObjectMeta{
			Name:   podName,
			Labels: map[string]string{"name": podName},
		},
		Spec: kapi.PodSpec{
			Containers: []kapi.Container{
				{
					Name:  podName,
					Image: dockerImageReference,
				},
			},
			RestartPolicy: kapi.RestartPolicyNever,
		},
	}
}
Esempio n. 23
0
func (rs *REST) Create(ctx api.Context, obj runtime.Object) (<-chan apiserver.RESTResult, error) {
	pod := obj.(*api.Pod)
	if !api.ValidNamespace(ctx, &pod.ObjectMeta) {
		return nil, errors.NewConflict("pod", pod.Namespace, fmt.Errorf("Pod.Namespace does not match the provided context"))
	}
	pod.DesiredState.Manifest.UUID = util.NewUUID().String()
	if len(pod.Name) == 0 {
		pod.Name = pod.DesiredState.Manifest.UUID
	}
	pod.DesiredState.Manifest.ID = pod.Name
	if errs := validation.ValidatePod(pod); len(errs) > 0 {
		return nil, errors.NewInvalid("pod", pod.Name, errs)
	}
	pod.CreationTimestamp = util.Now()

	return apiserver.MakeAsync(func() (runtime.Object, error) {
		if err := rs.registry.CreatePod(ctx, pod); err != nil {
			return nil, err
		}
		return rs.registry.GetPod(ctx, pod.Name)
	}), nil
}
Esempio n. 24
0
		probeCmd := "for i in `seq 1 600`; do "
		for _, name := range namesToResolve {
			probeCmd += fmt.Sprintf("wget -O /dev/null %s && echo OK > /results/%s;", name, name)
		}
		probeCmd += "sleep 1; done"

		// Run a pod which probes DNS and exposes the results by HTTP.
		By("creating a pod to probe DNS")
		pod := &api.Pod{
			TypeMeta: api.TypeMeta{
				Kind:       "Pod",
				APIVersion: latest.Version,
			},
			ObjectMeta: api.ObjectMeta{
				Name: "dns-test-" + string(util.NewUUID()),
			},
			Spec: api.PodSpec{
				Volumes: []api.Volume{
					{
						Name: "results",
						VolumeSource: api.VolumeSource{
							EmptyDir: &api.EmptyDirVolumeSource{},
						},
					},
				},
				Containers: []api.Container{
					{
						Name:  "webserver",
						Image: "gcr.io/google_containers/test-webserver",
						VolumeMounts: []api.VolumeMount{
Esempio n. 25
0
		Expect(nodeCount).NotTo(BeZero())

		totalPodCapacity = 0
		for _, node := range nodes.Items {
			podCapacity, found := node.Status.Capacity["pods"]
			Expect(found).To(Equal(true))
			totalPodCapacity += podCapacity.Value()
		}

		err = deleteTestingNS(c)
		expectNoError(err)

		nsForTesting, err := createTestingNS("maxp", c)
		ns = nsForTesting.Name
		expectNoError(err)
		uuid = string(util.NewUUID())
	})

	AfterEach(func() {
		rc, err := c.ReplicationControllers(ns).Get(RCName)
		if err == nil && rc.Spec.Replicas != 0 {
			By("Cleaning up the replication controller")
			err := DeleteRC(c, ns, RCName)
			expectNoError(err)
		}

		By(fmt.Sprintf("Destroying namespace for this suite %v", ns))
		if err := c.Namespaces().Delete(ns); err != nil {
			Failf("Couldn't delete ns %s", err)
		}
	})
Esempio n. 26
0
		Logf("Retrying to get the hostIP of pod %s", p.Name)
		time.Sleep(5 * time.Second)
	}
}

var _ = Describe("Pods", func() {
	var c *client.Client

	BeforeEach(func() {
		var err error
		c, err = loadClient()
		expectNoError(err)
	})

	PIt("should get a host IP", func() {
		name := "pod-hostip-" + string(util.NewUUID())
		testHostIP(c, &api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name: name,
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:  "test",
						Image: "gcr.io/google_containers/pause",
					},
				},
			},
		})
	})
	It("should be submitted and removed", func() {
Esempio n. 27
0
func TestOAuthLDAP(t *testing.T) {
	var (
		randomSuffix = string(kutil.NewUUID())

		providerName = "myldapprovider"

		bindDN       = "uid=admin,ou=company,ou=" + randomSuffix
		bindPassword = "******" + randomSuffix

		searchDN     = "ou=company,ou=" + randomSuffix
		searchAttr   = "myuid" + randomSuffix
		searchScope  = "one"              // must be "one","sub", or "base"
		searchFilter = "(myAttr=myValue)" // must be a valid LDAP filter format

		nameAttr1  = "missing-name-attr"
		nameAttr2  = "a-display-name" + randomSuffix
		idAttr1    = "missing-id-attr"
		idAttr2    = "dn" // "dn" is a special value, so don't add a random suffix to make sure we handle it correctly
		emailAttr1 = "missing-attr"
		emailAttr2 = "c-mail" + randomSuffix
		loginAttr1 = "missing-attr"
		loginAttr2 = "d-mylogin" + randomSuffix

		myUserUID      = "myuser"
		myUserName     = "******"
		myUserEmail    = "*****@*****.**"
		myUserDN       = searchAttr + "=" + myUserUID + "," + searchDN
		myUserPassword = "******" + randomSuffix
	)

	expectedAttributes := [][]byte{}
	for _, attr := range kutil.NewStringSet(searchAttr, nameAttr1, nameAttr2, idAttr1, idAttr2, emailAttr1, emailAttr2, loginAttr1, loginAttr2).List() {
		expectedAttributes = append(expectedAttributes, []byte(attr))
	}
	expectedSearchRequest := ldapserver.SearchRequest{
		BaseObject:   []byte(searchDN),
		Scope:        ldapserver.SearchRequestSingleLevel,
		DerefAliases: 0,
		SizeLimit:    2,
		TimeLimit:    0,
		TypesOnly:    false,
		Attributes:   expectedAttributes,
		Filter:       fmt.Sprintf("(&%s(%s=%s))", searchFilter, searchAttr, myUserUID),
	}

	// Start LDAP server
	ldapAddress, err := testutil.FindAvailableBindAddress(8389, 8400)
	if err != nil {
		t.Fatalf("could not allocate LDAP bind address: %v", err)
	}
	ldapServer := testutil.NewTestLDAPServer()
	ldapServer.SetPassword(bindDN, bindPassword)
	ldapServer.Start(ldapAddress)
	defer ldapServer.Stop()

	masterOptions, err := testutil.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	masterOptions.OAuthConfig.IdentityProviders[0] = configapi.IdentityProvider{
		Name:            providerName,
		UseAsChallenger: true,
		UseAsLogin:      true,
		Provider: runtime.EmbeddedObject{
			&configapi.LDAPPasswordIdentityProvider{
				URL:          fmt.Sprintf("ldap://%s/%s?%s?%s?%s", ldapAddress, searchDN, searchAttr, searchScope, searchFilter),
				BindDN:       bindDN,
				BindPassword: bindPassword,
				Insecure:     true,
				CA:           "",
				Attributes: configapi.LDAPAttributes{
					ID:                []string{idAttr1, idAttr2},
					PreferredUsername: []string{loginAttr1, loginAttr2},
					Name:              []string{nameAttr1, nameAttr2},
					Email:             []string{emailAttr1, emailAttr2},
				},
			},
		},
	}

	clusterAdminKubeConfig, err := testutil.StartConfiguredMaster(masterOptions)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	// Use the server and CA info
	anonConfig := kclient.Config{}
	anonConfig.Host = clusterAdminClientConfig.Host
	anonConfig.CAFile = clusterAdminClientConfig.CAFile
	anonConfig.CAData = clusterAdminClientConfig.CAData

	// Make sure we can't authenticate as a missing user
	ldapServer.ResetRequests()
	if _, err := tokencmd.RequestToken(&anonConfig, nil, myUserUID, myUserPassword); err == nil {
		t.Error("Expected error, got none")
	}
	if len(ldapServer.BindRequests) != 1 {
		t.Error("Expected a single bind request for the search phase, got %d:\n%#v", len(ldapServer.BindRequests), ldapServer.BindRequests)
	}
	if len(ldapServer.SearchRequests) != 1 {
		t.Error("Expected a single search request, got %d:\n%#v", len(ldapServer.BindRequests), ldapServer.BindRequests)
	}

	// Add user
	ldapServer.SetPassword(myUserDN, myUserPassword)
	ldapServer.AddSearchResult(myUserDN, map[string]string{emailAttr2: myUserEmail, nameAttr2: myUserName, loginAttr2: myUserUID})

	// Make sure we can't authenticate with a bad password
	ldapServer.ResetRequests()
	if _, err := tokencmd.RequestToken(&anonConfig, nil, myUserUID, "badpassword"); err == nil {
		t.Error("Expected error, got none")
	}
	if len(ldapServer.BindRequests) != 2 {
		t.Error("Expected a bind request for the search phase and a failed bind request for the auth phase, got %d:\n%#v", len(ldapServer.BindRequests), ldapServer.BindRequests)
	}
	if len(ldapServer.SearchRequests) != 1 {
		t.Error("Expected a single search request, got %d:\n%#v", len(ldapServer.BindRequests), ldapServer.BindRequests)
	}

	// Make sure we can get a token with a good password
	ldapServer.ResetRequests()
	accessToken, err := tokencmd.RequestToken(&anonConfig, nil, myUserUID, myUserPassword)
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	if len(accessToken) == 0 {
		t.Errorf("Expected access token, got none")
	}
	if len(ldapServer.BindRequests) != 2 {
		t.Error("Expected a bind request for the search phase and a failed bind request for the auth phase, got %d:\n%#v", len(ldapServer.BindRequests), ldapServer.BindRequests)
	}
	if len(ldapServer.SearchRequests) != 1 {
		t.Error("Expected a single search request, got %d:\n%#v", len(ldapServer.BindRequests), ldapServer.BindRequests)
	}
	if !reflect.DeepEqual(expectedSearchRequest.BaseObject, ldapServer.SearchRequests[0].BaseObject) {
		t.Errorf("Expected search base DN\n\t%#v\ngot\n\t%#v",
			string(expectedSearchRequest.BaseObject),
			string(ldapServer.SearchRequests[0].BaseObject),
		)
	}
	if !reflect.DeepEqual(expectedSearchRequest.Filter, ldapServer.SearchRequests[0].Filter) {
		t.Errorf("Expected search filter\n\t%#v\ngot\n\t%#v",
			string(expectedSearchRequest.Filter),
			string(ldapServer.SearchRequests[0].Filter),
		)
	}
	{
		expectedAttrs := []string{}
		for _, a := range expectedSearchRequest.Attributes {
			expectedAttrs = append(expectedAttrs, string(a))
		}
		actualAttrs := []string{}
		for _, a := range ldapServer.SearchRequests[0].Attributes {
			actualAttrs = append(actualAttrs, string(a))
		}
		if !reflect.DeepEqual(expectedAttrs, actualAttrs) {
			t.Errorf("Expected search attributes\n\t%#v\ngot\n\t%#v", expectedAttrs, actualAttrs)
		}
	}

	// Make sure we can use the token, and it represents who we expect
	userConfig := anonConfig
	userConfig.BearerToken = accessToken
	userClient, err := client.New(&userConfig)
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	user, err := userClient.Users().Get("~")
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	if user.Name != myUserUID {
		t.Fatalf("Expected %s as the user, got %v", myUserUID, user)
	}

	// Make sure the identity got created and contained the mapped attributes
	identity, err := clusterAdminClient.Identities().Get(fmt.Sprintf("%s:%s", providerName, myUserDN))
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	if identity.ProviderUserName != myUserDN {
		t.Error("Expected %q, got %q", myUserDN, identity.ProviderUserName)
	}
	if v := identity.Extra[authapi.IdentityDisplayNameKey]; v != myUserName {
		t.Error("Expected %q, got %q", myUserName, v)
	}
	if v := identity.Extra[authapi.IdentityPreferredUsernameKey]; v != myUserUID {
		t.Error("Expected %q, got %q", myUserUID, v)
	}
	if v := identity.Extra[authapi.IdentityEmailKey]; v != myUserEmail {
		t.Error("Expected %q, got %q", myUserEmail, v)
	}

}
Esempio n. 28
0
		var err error
		c, err = loadClient()
		Expect(err).NotTo(HaveOccurred())
	})

	It("should be sent by kubelets and the scheduler about pods scheduling and running", func() {
		provider := testContext.provider
		if len(provider) > 0 && provider != "gce" && provider != "gke" && provider != "aws" {
			By(fmt.Sprintf("skipping TestKubeletSendsEvent on cloud provider %s", provider))
			return
		}

		podClient := c.Pods(api.NamespaceDefault)

		By("creating the pod")
		name := "send-events-" + string(util.NewUUID())
		value := strconv.Itoa(time.Now().Nanosecond())
		pod := &api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name: name,
				Labels: map[string]string{
					"name": "foo",
					"time": value,
				},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name:  "p",
						Image: "kubernetes/serve_hostname",
						Ports: []api.ContainerPort{{ContainerPort: 80}},
Esempio n. 29
0
		podsPerNode int
		timeout     time.Duration
	}

	deleteTests := []DeleteTest{
		{podsPerNode: 10, timeout: 1 * time.Minute},
	}

	for _, itArg := range deleteTests {
		name := fmt.Sprintf(
			"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
		It(name, func() {
			totalPods := itArg.podsPerNode * numNodes

			By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
			rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(util.NewUUID()))

			Expect(RunRC(RCConfig{
				Client:    framework.Client,
				Name:      rcName,
				Namespace: framework.Namespace.Name,
				Image:     "gcr.io/google_containers/pause:go",
				Replicas:  totalPods,
			})).NotTo(HaveOccurred())
			// Perform a sanity check so that we know all desired pods are
			// running on the nodes according to kubelet. The timeout is set to
			// only 30 seconds here because RunRC already waited for all pods to
			// transition to the running status.
			Expect(waitTillNPodsRunningOnNodes(framework.Client, nodeNames, rcName, framework.Namespace.Name, totalPods,
				time.Second*30)).NotTo(HaveOccurred())
			logOneTimeResourceUsageSummary(framework.Client, nodeNames.List(), cpuIntervalInSeconds)
Esempio n. 30
0
package e2e

import (
	"fmt"

	"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
	"github.com/GoogleCloudPlatform/kubernetes/pkg/util"

	. "github.com/onsi/ginkgo"
)

var _ = Describe("Secrets", func() {
	f := NewFramework("secrets")

	It("should be consumable from pods", func() {
		name := "secret-test-" + string(util.NewUUID())
		volumeName := "secret-volume"
		volumeMountPath := "/etc/secret-volume"

		secret := &api.Secret{
			ObjectMeta: api.ObjectMeta{
				Namespace: f.Namespace.Name,
				Name:      name,
			},
			Data: map[string][]byte{
				"data-1": []byte("value-1\n"),
				"data-2": []byte("value-2\n"),
				"data-3": []byte("value-3\n"),
			},
		}