func RunPortForward(f *cmdutil.Factory, cmd *cobra.Command, args []string, fw portForwarder) error { podName := cmdutil.GetFlagString(cmd, "pod") if len(podName) == 0 && len(args) == 0 { return cmdutil.UsageError(cmd, "POD is required for port-forward") } if len(podName) != 0 { printDeprecationWarning("port-forward POD", "-p POD") } else { podName = args[0] args = args[1:] } if len(args) < 1 { return cmdutil.UsageError(cmd, "at least 1 PORT is required for port-forward") } namespace, _, err := f.DefaultNamespace() if err != nil { return err } client, err := f.Client() if err != nil { return err } pod, err := client.Pods(namespace).Get(podName) if err != nil { return err } if pod.Status.Phase != api.PodRunning { glog.Fatalf("Unable to execute command because pod is not running. Current status=%v", pod.Status.Phase) } config, err := f.ClientConfig() if err != nil { return err } signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) defer signal.Stop(signals) stopCh := make(chan struct{}, 1) go func() { <-signals close(stopCh) }() req := client.RESTClient.Post(). Resource("pods"). Namespace(namespace). Name(pod.Name). SubResource("portforward") return fw.ForwardPorts("POST", req.URL(), config, args, stopCh) }
func TestApiserverMetrics(t *testing.T) { _, s := framework.RunAMaster(t) defer s.Close() // Make a request to the apiserver to ensure there's at least one data point // for the metrics we're expecting -- otherwise, they won't be exported. client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) if _, err := client.Pods(api.NamespaceDefault).List(labels.Everything(), fields.Everything()); err != nil { t.Fatalf("unexpected error getting pods: %v", err) } metrics, err := scrapeMetrics(s) if err != nil { t.Fatal(err) } checkForExpectedMetrics(t, metrics, []string{ "apiserver_request_count", "apiserver_request_latencies", }) }
func TestClient(t *testing.T) { _, s := framework.RunAMaster(t) defer s.Close() ns := api.NamespaceDefault framework.DeleteAllEtcdKeys() client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) info, err := client.ServerVersion() if err != nil { t.Fatalf("unexpected error: %v", err) } if e, a := version.Get(), *info; !reflect.DeepEqual(e, a) { t.Errorf("expected %#v, got %#v", e, a) } pods, err := client.Pods(ns).List(labels.Everything(), fields.Everything()) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(pods.Items) != 0 { t.Errorf("expected no pods, got %#v", pods) } // get a validation error pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ GenerateName: "test", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "test", }, }, }, } got, err := client.Pods(ns).Create(pod) if err == nil { t.Fatalf("unexpected non-error: %v", got) } // get a created pod pod.Spec.Containers[0].Image = "an-image" got, err = client.Pods(ns).Create(pod) if err != nil { t.Fatalf("unexpected error: %v", err) } if got.Name == "" { t.Errorf("unexpected empty pod Name %v", got) } // pod is shown, but not scheduled pods, err = client.Pods(ns).List(labels.Everything(), fields.Everything()) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(pods.Items) != 1 { t.Errorf("expected one pod, got %#v", pods) } actual := pods.Items[0] if actual.Name != got.Name { t.Errorf("expected pod %#v, got %#v", got, actual) } if actual.Spec.NodeName != "" { t.Errorf("expected pod to be unscheduled, got %#v", actual) } }
func TestMultiWatch(t *testing.T) { // Disable this test as long as it demonstrates a problem. // TODO: Reenable this test when we get #6059 resolved. return const watcherCount = 50 runtime.GOMAXPROCS(watcherCount) framework.DeleteAllEtcdKeys() defer framework.DeleteAllEtcdKeys() _, s := framework.RunAMaster(t) defer s.Close() ns := api.NamespaceDefault client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) dummyEvent := func(i int) *api.Event { name := fmt.Sprintf("unrelated-%v", i) return &api.Event{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()), Namespace: ns, }, InvolvedObject: api.ObjectReference{ Name: name, Namespace: ns, }, Reason: fmt.Sprintf("unrelated change %v", i), } } type timePair struct { t time.Time name string } receivedTimes := make(chan timePair, watcherCount*2) watchesStarted := sync.WaitGroup{} // make a bunch of pods and watch them for i := 0; i < watcherCount; i++ { watchesStarted.Add(1) name := fmt.Sprintf("multi-watch-%v", i) got, err := client.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: labels.Set{"watchlabel": name}, }, Spec: api.PodSpec{ Containers: []api.Container{{ Name: "nothing", Image: "kubernetes/pause", }}, }, }) if err != nil { t.Fatalf("Couldn't make %v: %v", name, err) } go func(name, rv string) { w, err := client.Pods(ns).Watch( labels.Set{"watchlabel": name}.AsSelector(), fields.Everything(), rv, ) if err != nil { panic(fmt.Sprintf("watch error for %v: %v", name, err)) } defer w.Stop() watchesStarted.Done() e, ok := <-w.ResultChan() // should get the update (that we'll do below) if !ok { panic(fmt.Sprintf("%v ended early?", name)) } if e.Type != watch.Modified { panic(fmt.Sprintf("Got unexpected watch notification:\n%v: %+v %+v", name, e, e.Object)) } receivedTimes <- timePair{time.Now(), name} }(name, got.ObjectMeta.ResourceVersion) } log.Printf("%v: %v pods made and watchers started", time.Now(), watcherCount) // wait for watches to start before we start spamming the system with // objects below, otherwise we'll hit the watch window restriction. watchesStarted.Wait() const ( useEventsAsUnrelatedType = false usePodsAsUnrelatedType = true ) // make a bunch of unrelated changes in parallel if useEventsAsUnrelatedType { const unrelatedCount = 3000 var wg sync.WaitGroup defer wg.Wait() changeToMake := make(chan int, unrelatedCount*2) changeMade := make(chan int, unrelatedCount*2) go func() { for i := 0; i < unrelatedCount; i++ { changeToMake <- i } close(changeToMake) }() for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() for { i, ok := <-changeToMake if !ok { return } if _, err := client.Events(ns).Create(dummyEvent(i)); err != nil { panic(fmt.Sprintf("couldn't make an event: %v", err)) } changeMade <- i } }() } for i := 0; i < 2000; i++ { <-changeMade if (i+1)%50 == 0 { log.Printf("%v: %v unrelated changes made", time.Now(), i+1) } } } if usePodsAsUnrelatedType { const unrelatedCount = 3000 var wg sync.WaitGroup defer wg.Wait() changeToMake := make(chan int, unrelatedCount*2) changeMade := make(chan int, unrelatedCount*2) go func() { for i := 0; i < unrelatedCount; i++ { changeToMake <- i } close(changeToMake) }() for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() for { i, ok := <-changeToMake if !ok { return } name := fmt.Sprintf("unrelated-%v", i) _, err := client.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.PodSpec{ Containers: []api.Container{{ Name: "nothing", Image: "kubernetes/pause", }}, }, }) if err != nil { panic(fmt.Sprintf("couldn't make unrelated pod: %v", err)) } changeMade <- i } }() } for i := 0; i < 2000; i++ { <-changeMade if (i+1)%50 == 0 { log.Printf("%v: %v unrelated changes made", time.Now(), i+1) } } } // Now we still have changes being made in parallel, but at least 1000 have been made. // Make some updates to send down the watches. sentTimes := make(chan timePair, watcherCount*2) for i := 0; i < watcherCount; i++ { go func(i int) { name := fmt.Sprintf("multi-watch-%v", i) pod, err := client.Pods(ns).Get(name) if err != nil { panic(fmt.Sprintf("Couldn't get %v: %v", name, err)) } pod.Spec.Containers[0].Image = "kubernetes/pause:1" sentTimes <- timePair{time.Now(), name} if _, err := client.Pods(ns).Update(pod); err != nil { panic(fmt.Sprintf("Couldn't make %v: %v", name, err)) } }(i) } sent := map[string]time.Time{} for i := 0; i < watcherCount; i++ { tp := <-sentTimes sent[tp.name] = tp.t } log.Printf("all changes made") dur := map[string]time.Duration{} for i := 0; i < watcherCount; i++ { tp := <-receivedTimes delta := tp.t.Sub(sent[tp.name]) dur[tp.name] = delta log.Printf("%v: %v", tp.name, delta) } log.Printf("all watches ended") t.Errorf("durations: %v", dur) }
// RunLog retrieves a pod log func RunLog(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, p *logParams) error { if len(os.Args) > 1 && os.Args[1] == "log" { printDeprecationWarning("logs", "log") } if len(args) == 0 { return cmdutil.UsageError(cmd, "POD is required for log") } if len(args) > 2 { return cmdutil.UsageError(cmd, "log POD [CONTAINER]") } sinceSeconds := cmdutil.GetFlagDuration(cmd, "since") sinceTime := cmdutil.GetFlagString(cmd, "since-time") if len(sinceTime) > 0 && sinceSeconds > 0 { return cmdutil.UsageError(cmd, "only one of --since, --since-time may be specified") } namespace, _, err := f.DefaultNamespace() if err != nil { return err } client, err := f.Client() if err != nil { return err } podID := args[0] pod, err := client.Pods(namespace).Get(podID) if err != nil { return err } // [-c CONTAINER] container := p.containerName if len(container) == 0 { // [CONTAINER] (container as arg not flag) is supported as legacy behavior. See PR #10519 for more details. if len(args) == 1 { if len(pod.Spec.Containers) != 1 { podContainersNames := []string{} for _, container := range pod.Spec.Containers { podContainersNames = append(podContainersNames, container.Name) } return fmt.Errorf("Pod %s has the following containers: %s; please specify the container to print logs for with -c", pod.ObjectMeta.Name, strings.Join(podContainersNames, ", ")) } container = pod.Spec.Containers[0].Name } else { container = args[1] } } logOptions := &api.PodLogOptions{ Container: container, Follow: cmdutil.GetFlagBool(cmd, "follow"), Previous: cmdutil.GetFlagBool(cmd, "previous"), Timestamps: cmdutil.GetFlagBool(cmd, "timestamps"), } if sinceSeconds > 0 { // round up to the nearest second sec := int64(math.Ceil(float64(sinceSeconds) / float64(time.Second))) logOptions.SinceSeconds = &sec } if t, err := api.ParseRFC3339(sinceTime, unversioned.Now); err == nil { logOptions.SinceTime = &t } if limitBytes := cmdutil.GetFlagInt(cmd, "limit-bytes"); limitBytes != 0 { i := int64(limitBytes) logOptions.LimitBytes = &i } if tail := cmdutil.GetFlagInt(cmd, "tail"); tail >= 0 { i := int64(tail) logOptions.TailLines = &i } return handleLog(client, namespace, podID, logOptions, out) }
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags generators := map[string]kubectl.Generator{ "run/v1": kubectl.BasicReplicationController{}, "run-pod/v1": kubectl.BasicPod{}, "service/v1": kubectl.ServiceGeneratorV1{}, "service/v2": kubectl.ServiceGeneratorV2{}, "horizontalpodautoscaler/v1beta1": kubectl.HorizontalPodAutoscalerV1Beta1{}, } clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, generators: generators, Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := cfg.Version return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cmdApiVersion}, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion("") }, ClientConfig: func() (*client.Config, error) { return clients.ClientConfigForVersion("") }, RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } switch group { case "": return client.RESTClient, nil case "extensions": return client.ExtensionsClient.RESTClient, nil } return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(group, mapping.Kind, client); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind) }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, columnLabels), nil }, PodSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %s", kind) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %s", kind) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.Kind, client) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.Kind, client) }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion("") if err != nil { return nil, err } dir := cacheDir if len(dir) > 0 { version, err := client.ServerVersion() if err != nil { return nil, err } dir = path.Join(cacheDir, version.String()) } return &clientSwaggerSchema{ c: client, cacheDir: dir, mapper: api.RESTMapper, }, nil } return validation.NullSchema{}, nil }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generator: func(name string) (kubectl.Generator, bool) { generator, ok := generators[name] return generator, ok }, CanBeExposed: func(kind string) error { switch kind { case "ReplicationController", "Service", "Pod": // nothing to do here default: return fmt.Errorf("cannot expose a %s", kind) } return nil }, CanBeAutoscaled: func(kind string) error { switch kind { // TODO: support autoscale for deployments case "ReplicationController": // nothing to do here default: return fmt.Errorf("cannot autoscale a %s", kind) } return nil }, AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) { client, err := clients.ClientForVersion("") if err != nil { return nil, err } switch t := object.(type) { case *api.ReplicationController: var pods *api.PodList for pods == nil || len(pods.Items) == 0 { var err error if pods, err = client.Pods(t.Namespace).List(labels.SelectorFromSet(t.Spec.Selector), fields.Everything()); err != nil { return nil, err } if len(pods.Items) == 0 { time.Sleep(2 * time.Second) } } pod := &pods.Items[0] return pod, nil case *api.Pod: return t, nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot attach to %s: not implemented", kind) } }, } }