Ejemplo n.º 1
0
func RunPortForward(f *cmdutil.Factory, cmd *cobra.Command, args []string, fw portForwarder) error {
	podName := cmdutil.GetFlagString(cmd, "pod")
	if len(podName) == 0 {
		return cmdutil.UsageError(cmd, "POD is required for exec")
	}
	if len(args) < 1 {
		return cmdutil.UsageError(cmd, "at least 1 PORT is required for port-forward")
	}

	namespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	pod, err := client.Pods(namespace).Get(podName)
	if err != nil {
		return err
	}

	if pod.Status.Phase != api.PodRunning {
		glog.Fatalf("Unable to execute command because pod is not running. Current status=%v", pod.Status.Phase)
	}

	config, err := f.ClientConfig()
	if err != nil {
		return err
	}

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)
	defer signal.Stop(signals)

	stopCh := make(chan struct{}, 1)
	go func() {
		<-signals
		close(stopCh)
	}()

	req := client.RESTClient.Get().
		Resource("pods").
		Namespace(namespace).
		Name(pod.Name).
		SubResource("portforward")

	return fw.ForwardPorts(req, config, args, stopCh)
}
Ejemplo n.º 2
0
func TestApiserverMetrics(t *testing.T) {
	_, s := framework.RunAMaster(t)
	defer s.Close()

	// Make a request to the apiserver to ensure there's at least one data point
	// for the metrics we're expecting -- otherwise, they won't be exported.
	client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()})
	if _, err := client.Pods(api.NamespaceDefault).List(labels.Everything(), fields.Everything()); err != nil {
		t.Fatalf("unexpected error getting pods: %v", err)
	}

	metrics, err := scrapeMetrics(s)
	if err != nil {
		t.Fatal(err)
	}
	checkForExpectedMetrics(t, metrics, []string{
		"apiserver_request_count",
		"apiserver_request_latencies",
	})
}
Ejemplo n.º 3
0
func TestClient(t *testing.T) {
	helper, err := master.NewEtcdHelper(newEtcdClient(), "v1beta1")
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	m := master.New(&master.Config{
		EtcdHelper:        helper,
		KubeletClient:     client.FakeKubeletClient{},
		EnableLogsSupport: false,
		EnableUISupport:   false,
		APIPrefix:         "/api",
		Authorizer:        apiserver.NewAlwaysAllowAuthorizer(),
	})

	s := httptest.NewServer(m.Handler)
	defer s.Close()

	testCases := []string{
		"v1beta1",
		"v1beta2",
	}
	for _, apiVersion := range testCases {
		ns := api.NamespaceDefault
		deleteAllEtcdKeys()
		client := client.NewOrDie(&client.Config{Host: s.URL, Version: apiVersion})

		info, err := client.ServerVersion()
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		if e, a := version.Get(), *info; !reflect.DeepEqual(e, a) {
			t.Errorf("expected %#v, got %#v", e, a)
		}

		pods, err := client.Pods(ns).List(labels.Everything())
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		if len(pods.Items) != 0 {
			t.Errorf("expected no pods, got %#v", pods)
		}

		// get a validation error
		pod := &api.Pod{
			DesiredState: api.PodState{
				Manifest: api.ContainerManifest{
					Version: "v1beta2",
					Containers: []api.Container{
						{
							Name: "test",
						},
					},
				},
			},
		}
		got, err := client.Pods(ns).Create(pod)
		if err == nil {
			t.Fatalf("unexpected non-error: %v", err)
		}

		// get a created pod
		pod.DesiredState.Manifest.Containers[0].Image = "an-image"
		got, err = client.Pods(ns).Create(pod)
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		if got.Name == "" {
			t.Errorf("unexpected empty pod Name %v", got)
		}

		// pod is shown, but not scheduled
		pods, err = client.Pods(ns).List(labels.Everything())
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		if len(pods.Items) != 1 {
			t.Errorf("expected one pod, got %#v", pods)
		}
		actual := pods.Items[0]
		if actual.Name != got.Name {
			t.Errorf("expected pod %#v, got %#v", got, actual)
		}
		if actual.CurrentState.Host != "" {
			t.Errorf("expected pod to be unscheduled, got %#v", actual)
		}
	}
}
Ejemplo n.º 4
0
func TestClient(t *testing.T) {
	_, s := framework.RunAMaster(t)
	defer s.Close()

	ns := api.NamespaceDefault
	framework.DeleteAllEtcdKeys()
	client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()})

	info, err := client.ServerVersion()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if e, a := version.Get(), *info; !reflect.DeepEqual(e, a) {
		t.Errorf("expected %#v, got %#v", e, a)
	}

	pods, err := client.Pods(ns).List(labels.Everything(), fields.Everything())
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if len(pods.Items) != 0 {
		t.Errorf("expected no pods, got %#v", pods)
	}

	// get a validation error
	pod := &api.Pod{
		ObjectMeta: api.ObjectMeta{
			GenerateName: "test",
		},
		Spec: api.PodSpec{
			Containers: []api.Container{
				{
					Name: "test",
				},
			},
		},
	}

	got, err := client.Pods(ns).Create(pod)
	if err == nil {
		t.Fatalf("unexpected non-error: %v", got)
	}

	// get a created pod
	pod.Spec.Containers[0].Image = "an-image"
	got, err = client.Pods(ns).Create(pod)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if got.Name == "" {
		t.Errorf("unexpected empty pod Name %v", got)
	}

	// pod is shown, but not scheduled
	pods, err = client.Pods(ns).List(labels.Everything(), fields.Everything())
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if len(pods.Items) != 1 {
		t.Errorf("expected one pod, got %#v", pods)
	}
	actual := pods.Items[0]
	if actual.Name != got.Name {
		t.Errorf("expected pod %#v, got %#v", got, actual)
	}
	if actual.Spec.NodeName != "" {
		t.Errorf("expected pod to be unscheduled, got %#v", actual)
	}
}
Ejemplo n.º 5
0
func TestMultiWatch(t *testing.T) {
	// Disable this test as long as it demonstrates a problem.
	// TODO: Reenable this test when we get #6059 resolved.
	return
	const watcherCount = 50
	runtime.GOMAXPROCS(watcherCount)

	framework.DeleteAllEtcdKeys()
	defer framework.DeleteAllEtcdKeys()
	_, s := framework.RunAMaster(t)
	defer s.Close()

	ns := api.NamespaceDefault
	client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()})

	dummyEvent := func(i int) *api.Event {
		name := fmt.Sprintf("unrelated-%v", i)
		return &api.Event{
			ObjectMeta: api.ObjectMeta{
				Name:      fmt.Sprintf("%v.%x", name, time.Now().UnixNano()),
				Namespace: ns,
			},
			InvolvedObject: api.ObjectReference{
				Name:      name,
				Namespace: ns,
			},
			Reason: fmt.Sprintf("unrelated change %v", i),
		}
	}

	type timePair struct {
		t    time.Time
		name string
	}

	receivedTimes := make(chan timePair, watcherCount*2)
	watchesStarted := sync.WaitGroup{}

	// make a bunch of pods and watch them
	for i := 0; i < watcherCount; i++ {
		watchesStarted.Add(1)
		name := fmt.Sprintf("multi-watch-%v", i)
		got, err := client.Pods(ns).Create(&api.Pod{
			ObjectMeta: api.ObjectMeta{
				Name:   name,
				Labels: labels.Set{"watchlabel": name},
			},
			Spec: api.PodSpec{
				Containers: []api.Container{{
					Name:  "nothing",
					Image: "kubernetes/pause",
				}},
			},
		})

		if err != nil {
			t.Fatalf("Couldn't make %v: %v", name, err)
		}
		go func(name, rv string) {
			w, err := client.Pods(ns).Watch(
				labels.Set{"watchlabel": name}.AsSelector(),
				fields.Everything(),
				rv,
			)
			if err != nil {
				panic(fmt.Sprintf("watch error for %v: %v", name, err))
			}
			defer w.Stop()
			watchesStarted.Done()
			e, ok := <-w.ResultChan() // should get the update (that we'll do below)
			if !ok {
				panic(fmt.Sprintf("%v ended early?", name))
			}
			if e.Type != watch.Modified {
				panic(fmt.Sprintf("Got unexpected watch notification:\n%v: %+v %+v", name, e, e.Object))
			}
			receivedTimes <- timePair{time.Now(), name}
		}(name, got.ObjectMeta.ResourceVersion)
	}
	log.Printf("%v: %v pods made and watchers started", time.Now(), watcherCount)

	// wait for watches to start before we start spamming the system with
	// objects below, otherwise we'll hit the watch window restriction.
	watchesStarted.Wait()

	const (
		useEventsAsUnrelatedType = false
		usePodsAsUnrelatedType   = true
	)

	// make a bunch of unrelated changes in parallel
	if useEventsAsUnrelatedType {
		const unrelatedCount = 3000
		var wg sync.WaitGroup
		defer wg.Wait()
		changeToMake := make(chan int, unrelatedCount*2)
		changeMade := make(chan int, unrelatedCount*2)
		go func() {
			for i := 0; i < unrelatedCount; i++ {
				changeToMake <- i
			}
			close(changeToMake)
		}()
		for i := 0; i < 50; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()
				for {
					i, ok := <-changeToMake
					if !ok {
						return
					}
					if _, err := client.Events(ns).Create(dummyEvent(i)); err != nil {
						panic(fmt.Sprintf("couldn't make an event: %v", err))
					}
					changeMade <- i
				}
			}()
		}

		for i := 0; i < 2000; i++ {
			<-changeMade
			if (i+1)%50 == 0 {
				log.Printf("%v: %v unrelated changes made", time.Now(), i+1)
			}
		}
	}
	if usePodsAsUnrelatedType {
		const unrelatedCount = 3000
		var wg sync.WaitGroup
		defer wg.Wait()
		changeToMake := make(chan int, unrelatedCount*2)
		changeMade := make(chan int, unrelatedCount*2)
		go func() {
			for i := 0; i < unrelatedCount; i++ {
				changeToMake <- i
			}
			close(changeToMake)
		}()
		for i := 0; i < 50; i++ {
			wg.Add(1)
			go func() {
				defer wg.Done()
				for {
					i, ok := <-changeToMake
					if !ok {
						return
					}
					name := fmt.Sprintf("unrelated-%v", i)
					_, err := client.Pods(ns).Create(&api.Pod{
						ObjectMeta: api.ObjectMeta{
							Name: name,
						},
						Spec: api.PodSpec{
							Containers: []api.Container{{
								Name:  "nothing",
								Image: "kubernetes/pause",
							}},
						},
					})

					if err != nil {
						panic(fmt.Sprintf("couldn't make unrelated pod: %v", err))
					}
					changeMade <- i
				}
			}()
		}

		for i := 0; i < 2000; i++ {
			<-changeMade
			if (i+1)%50 == 0 {
				log.Printf("%v: %v unrelated changes made", time.Now(), i+1)
			}
		}
	}

	// Now we still have changes being made in parallel, but at least 1000 have been made.
	// Make some updates to send down the watches.
	sentTimes := make(chan timePair, watcherCount*2)
	for i := 0; i < watcherCount; i++ {
		go func(i int) {
			name := fmt.Sprintf("multi-watch-%v", i)
			pod, err := client.Pods(ns).Get(name)
			if err != nil {
				panic(fmt.Sprintf("Couldn't get %v: %v", name, err))
			}
			pod.Spec.Containers[0].Image = "kubernetes/pause:1"
			sentTimes <- timePair{time.Now(), name}
			if _, err := client.Pods(ns).Update(pod); err != nil {
				panic(fmt.Sprintf("Couldn't make %v: %v", name, err))
			}
		}(i)
	}

	sent := map[string]time.Time{}
	for i := 0; i < watcherCount; i++ {
		tp := <-sentTimes
		sent[tp.name] = tp.t
	}
	log.Printf("all changes made")
	dur := map[string]time.Duration{}
	for i := 0; i < watcherCount; i++ {
		tp := <-receivedTimes
		delta := tp.t.Sub(sent[tp.name])
		dur[tp.name] = delta
		log.Printf("%v: %v", tp.name, delta)
	}
	log.Printf("all watches ended")
	t.Errorf("durations: %v", dur)
}
Ejemplo n.º 6
0
func RunExec(f *cmdutil.Factory, cmd *cobra.Command, cmdIn io.Reader, cmdOut, cmdErr io.Writer, p *execParams, args []string, re remoteExecutor) error {
	if len(p.podName) == 0 {
		return cmdutil.UsageError(cmd, "POD is required for exec")
	}

	if len(args) < 1 {
		return cmdutil.UsageError(cmd, "COMMAND is required for exec")
	}
	namespace, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	pod, err := client.Pods(namespace).Get(p.podName)
	if err != nil {
		return err
	}

	if pod.Status.Phase != api.PodRunning {
		glog.Fatalf("Unable to execute command because pod is not running. Current status=%v", pod.Status.Phase)
	}

	containerName := p.containerName
	if len(containerName) == 0 {
		containerName = pod.Spec.Containers[0].Name
	}

	var stdin io.Reader
	tty := p.tty
	if p.stdin {
		stdin = cmdIn
		if tty {
			if file, ok := cmdIn.(*os.File); ok {
				inFd := file.Fd()
				if term.IsTerminal(inFd) {
					oldState, err := term.SetRawTerminal(inFd)
					if err != nil {
						glog.Fatal(err)
					}
					// this handles a clean exit, where the command finished
					defer term.RestoreTerminal(inFd, oldState)

					// SIGINT is handled by term.SetRawTerminal (it runs a goroutine that listens
					// for SIGINT and restores the terminal before exiting)

					// this handles SIGTERM
					sigChan := make(chan os.Signal, 1)
					signal.Notify(sigChan, syscall.SIGTERM)
					go func() {
						<-sigChan
						term.RestoreTerminal(inFd, oldState)
						os.Exit(0)
					}()
				} else {
					glog.Warning("Stdin is not a terminal")
				}
			} else {
				tty = false
				glog.Warning("Unable to use a TTY")
			}
		}
	}

	config, err := f.ClientConfig()
	if err != nil {
		return err
	}

	req := client.RESTClient.Get().
		Resource("pods").
		Name(pod.Name).
		Namespace(namespace).
		SubResource("exec").
		Param("container", containerName)

	return re.Execute(req, config, args, stdin, cmdOut, cmdErr, tty)
}
Ejemplo n.º 7
0
func RunPortForward(f *cmdutil.Factory, cmd *cobra.Command, args []string, fw portForwarder) error {
	podName := cmdutil.GetFlagString(cmd, "pod")
	if len(podName) == 0 {
		return cmdutil.UsageError(cmd, "POD is required for exec")
	}
	if len(args) < 1 {
		return cmdutil.UsageError(cmd, "at least 1 PORT is required for port-forward")
	}

	namespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	pod, err := client.Pods(namespace).Get(podName)
	if err != nil {
		return err
	}

	if pod.Status.Phase != api.PodRunning {
		glog.Fatalf("Unable to execute command because pod is not running. Current status=%v", pod.Status.Phase)
	}

	config, err := f.ClientConfig()
	if err != nil {
		return err
	}

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)
	defer signal.Stop(signals)

	stopCh := make(chan struct{}, 1)
	go func() {
		<-signals
		close(stopCh)
	}()

	req := client.RESTClient.Post().
		Resource("pods").
		Namespace(namespace).
		Name(pod.Name).
		SubResource("portforward")

	postErr := fw.ForwardPorts(req, config, args, stopCh)

	// if we don't have an error, return.  If we did get an error, try a GET because v3.0.0 shipped with port-forward running as a GET.
	if postErr == nil {
		return nil
	}

	// only try the get if the error is either a forbidden or method not supported, otherwise trying with a GET probably won't help
	if !apierrors.IsForbidden(postErr) && !apierrors.IsMethodNotSupported(postErr) {
		return postErr
	}

	getReq := client.RESTClient.Get().
		Resource("pods").
		Namespace(namespace).
		Name(pod.Name).
		SubResource("portforward")
	getErr := fw.ForwardPorts(getReq, config, args, stopCh)
	if getErr == nil {
		return nil
	}

	// if we got a getErr, return the postErr because it's more likely to be correct.  GET is legacy
	return postErr
}
Ejemplo n.º 8
0
func RunExec(f *cmdutil.Factory, cmd *cobra.Command, cmdIn io.Reader, cmdOut, cmdErr io.Writer, p *execParams, argsIn []string, re remoteExecutor) error {
	podName, containerName, args, err := extractPodAndContainer(cmd, argsIn, p)
	namespace, _, err := f.DefaultNamespace()
	if err != nil {
		return err
	}

	client, err := f.Client()
	if err != nil {
		return err
	}

	pod, err := client.Pods(namespace).Get(podName)
	if err != nil {
		return err
	}

	if pod.Status.Phase != api.PodRunning {
		glog.Fatalf("Unable to execute command because pod %s is not running. Current status=%v", podName, pod.Status.Phase)
	}

	if len(containerName) == 0 {
		glog.V(4).Infof("defaulting container name to %s", pod.Spec.Containers[0].Name)
		containerName = pod.Spec.Containers[0].Name
	}

	var stdin io.Reader
	tty := p.tty
	if p.stdin {
		stdin = cmdIn
		if tty {
			if file, ok := cmdIn.(*os.File); ok {
				inFd := file.Fd()
				if term.IsTerminal(inFd) {
					oldState, err := term.SetRawTerminal(inFd)
					if err != nil {
						glog.Fatal(err)
					}
					// this handles a clean exit, where the command finished
					defer term.RestoreTerminal(inFd, oldState)

					// SIGINT is handled by term.SetRawTerminal (it runs a goroutine that listens
					// for SIGINT and restores the terminal before exiting)

					// this handles SIGTERM
					sigChan := make(chan os.Signal, 1)
					signal.Notify(sigChan, syscall.SIGTERM)
					go func() {
						<-sigChan
						term.RestoreTerminal(inFd, oldState)
						os.Exit(0)
					}()
				} else {
					glog.Warning("Stdin is not a terminal")
				}
			} else {
				tty = false
				glog.Warning("Unable to use a TTY")
			}
		}
	}

	config, err := f.ClientConfig()
	if err != nil {
		return err
	}

	req := client.RESTClient.Post().
		Resource("pods").
		Name(pod.Name).
		Namespace(namespace).
		SubResource("exec").
		Param("container", containerName)

	postErr := re.Execute(req, config, args, stdin, cmdOut, cmdErr, tty)

	// if we don't have an error, return.  If we did get an error, try a GET because v3.0.0 shipped with exec running as a GET.
	if postErr == nil {
		return nil
	}

	// only try the get if the error is either a forbidden or method not supported, otherwise trying with a GET probably won't help
	if !apierrors.IsForbidden(postErr) && !apierrors.IsMethodNotSupported(postErr) {
		return postErr
	}

	getReq := client.RESTClient.Get().
		Resource("pods").
		Name(pod.Name).
		Namespace(namespace).
		SubResource("exec").
		Param("container", containerName)
	getErr := re.Execute(getReq, config, args, stdin, cmdOut, cmdErr, tty)
	if getErr == nil {
		return nil
	}

	// if we got a getErr, return the postErr because it's more likely to be correct.  GET is legacy
	return postErr
}
Ejemplo n.º 9
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{latest.RESTMapper}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := &clientCache{
		clients: make(map[string]*client.Client),
		loader:  clientConfig,
	}

	return &Factory{
		clients: clients,
		flags:   flags,

		Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			CheckErr(err)
			cmdApiVersion := cfg.Version

			return kubectl.OutputVersionMapper{mapper, cmdApiVersion}, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion("")
		},
		ClientConfig: func() (*client.Config, error) {
			return clients.ClientConfigForVersion("")
		},
		RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			return client.RESTClient, nil
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			describer, ok := kubectl.DescriberFor(mapping.Kind, client)
			if !ok {
				return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind)
			}
			return describer, nil
		},
		Printer: func(mapping *meta.RESTMapping, noHeaders bool) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(noHeaders), nil
		},
		PodSelectorForResource: func(mapping *meta.RESTMapping, namespace, name string) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			client, err := clients.ClientForVersion("")
			if err != nil {
				return "", err
			}
			switch mapping.Kind {
			case "ReplicationController":
				rc, err := client.ReplicationControllers(namespace).Get(name)
				if err != nil {
					return "", err
				}
				return kubectl.MakeLabels(rc.Spec.Selector), nil
			case "Pod":
				rc, err := client.Pods(namespace).Get(name)
				if err != nil {
					return "", err
				}
				if len(rc.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(rc.Labels), nil
			case "Service":
				rc, err := client.ReplicationControllers(namespace).Get(name)
				if err != nil {
					return "", err
				}
				if rc.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(rc.Spec.Selector), nil
			default:
				return "", fmt.Errorf("it is not possible to get a pod selector from %s", mapping.Kind)
			}
		},
		PortsForResource: func(mapping *meta.RESTMapping, namespace, name string) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			client, err := clients.ClientForVersion("")
			if err != nil {
				return nil, err
			}
			switch mapping.Kind {
			case "ReplicationController":
				rc, err := client.ReplicationControllers(namespace).Get(name)
				if err != nil {
					return nil, err
				}
				return getPorts(rc.Spec.Template.Spec), nil
			case "Pod":
				pod, err := client.Pods(namespace).Get(name)
				if err != nil {
					return nil, err
				}
				return getPorts(pod.Spec), nil
			default:
				return nil, fmt.Errorf("it is not possible to get ports from %s", mapping.Kind)
			}
		},
		Resizer: func(mapping *meta.RESTMapping) (kubectl.Resizer, error) {
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ResizerFor(mapping.Kind, kubectl.NewResizerClient(client))
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.Kind, client)
		},
		Validator: func() (validation.Schema, error) {
			if flags.Lookup("validate").Value.String() == "true" {
				client, err := clients.ClientForVersion("")
				if err != nil {
					return nil, err
				}
				return &clientSwaggerSchema{client, api.Scheme}, nil
			}
			return validation.NullSchema{}, nil
		},
		DefaultNamespace: func() (string, error) {
			return clientConfig.Namespace()
		},
	}
}
Ejemplo n.º 10
0
func TestClient(t *testing.T) {
	helper, err := master.NewEtcdHelper(newEtcdClient(), "v1beta1")
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	var m *master.Master
	s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		m.Handler.ServeHTTP(w, req)
	}))
	defer s.Close()

	m = master.New(&master.Config{
		Client:            client.NewOrDie(&client.Config{Host: s.URL}),
		EtcdHelper:        helper,
		KubeletClient:     client.FakeKubeletClient{},
		EnableLogsSupport: false,
		EnableProfiling:   true,
		EnableUISupport:   false,
		APIPrefix:         "/api",
		Authorizer:        apiserver.NewAlwaysAllowAuthorizer(),
		AdmissionControl:  admit.NewAlwaysAdmit(),
	})

	testCases := []string{
		"v1beta1",
		"v1beta2",
	}
	for _, apiVersion := range testCases {
		ns := api.NamespaceDefault
		deleteAllEtcdKeys()
		client := client.NewOrDie(&client.Config{Host: s.URL, Version: apiVersion})

		info, err := client.ServerVersion()
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		if e, a := version.Get(), *info; !reflect.DeepEqual(e, a) {
			t.Errorf("expected %#v, got %#v", e, a)
		}

		pods, err := client.Pods(ns).List(labels.Everything())
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		if len(pods.Items) != 0 {
			t.Errorf("expected no pods, got %#v", pods)
		}

		// get a validation error
		pod := &api.Pod{
			ObjectMeta: api.ObjectMeta{
				GenerateName: "test",
			},
			Spec: api.PodSpec{
				Containers: []api.Container{
					{
						Name: "test",
					},
				},
			},
		}

		got, err := client.Pods(ns).Create(pod)
		if err == nil {
			t.Fatalf("unexpected non-error: %v", got)
		}

		// get a created pod
		pod.Spec.Containers[0].Image = "an-image"
		got, err = client.Pods(ns).Create(pod)
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		if got.Name == "" {
			t.Errorf("unexpected empty pod Name %v", got)
		}

		// pod is shown, but not scheduled
		pods, err = client.Pods(ns).List(labels.Everything())
		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}
		if len(pods.Items) != 1 {
			t.Errorf("expected one pod, got %#v", pods)
		}
		actual := pods.Items[0]
		if actual.Name != got.Name {
			t.Errorf("expected pod %#v, got %#v", got, actual)
		}
		if actual.Status.Host != "" {
			t.Errorf("expected pod to be unscheduled, got %#v", actual)
		}
	}
}