func TestPodReadOnlyFilesystem(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close() isReadOnly := true ns := framework.CreateTestingNamespace("pod-readonly-root", s, t) defer framework.DeleteTestingNamespace(ns, s, t) client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "xxx", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "fake-name", Image: "fakeimage", SecurityContext: &api.SecurityContext{ ReadOnlyRootFilesystem: &isReadOnly, }, }, }, }, } if _, err := client.Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } deletePodOrErrorf(t, client, ns.Name, pod.Name) }
// Clean both server and client pods. func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) { By(fmt.Sprint("cleaning the environment after ", config.prefix)) defer GinkgoRecover() client := f.Client podClient := client.Pods(config.namespace) err := podClient.Delete(config.prefix+"-client", nil) if err != nil { // Log the error before failing test: if the test has already failed, // framework.ExpectNoError() won't print anything to logs! glog.Warningf("Failed to delete client pod: %v", err) framework.ExpectNoError(err, "Failed to delete client pod: %v", err) } if config.serverImage != "" { if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) { framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err) } // See issue #24100. // Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits. By("sleeping a bit so client can stop and unmount") time.Sleep(20 * time.Second) err = podClient.Delete(config.prefix+"-server", nil) if err != nil { glog.Warningf("Failed to delete server pod: %v", err) framework.ExpectNoError(err, "Failed to delete server pod: %v", err) } } }
// RunLog retrieves a pod log func RunLog(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, p *logParams) error { if len(os.Args) > 1 && os.Args[1] == "log" { printDeprecationWarning("logs", "log") } if len(args) == 0 { return cmdutil.UsageError(cmd, "POD is required for log") } if len(args) > 2 { return cmdutil.UsageError(cmd, "log POD [CONTAINER]") } namespace, _, err := f.DefaultNamespace() if err != nil { return err } client, err := f.Client() if err != nil { return err } podID := args[0] pod, err := client.Pods(namespace).Get(podID) if err != nil { return err } // [-c CONTAINER] container := p.containerName if len(container) == 0 { // [CONTAINER] (container as arg not flag) is supported as legacy behavior. See PR #10519 for more details. if len(args) == 1 { if len(pod.Spec.Containers) != 1 { podContainersNames := []string{} for _, container := range pod.Spec.Containers { podContainersNames = append(podContainersNames, container.Name) } return fmt.Errorf("Pod %s has the following containers: %s; please specify the container to print logs for with -c", pod.ObjectMeta.Name, strings.Join(podContainersNames, ", ")) } container = pod.Spec.Containers[0].Name } else { container = args[1] } } follow := false if cmdutil.GetFlagBool(cmd, "follow") { follow = true } previous := false if cmdutil.GetFlagBool(cmd, "previous") { previous = true } return handleLog(client, namespace, podID, container, follow, previous, out) }
func RunPortForward(f *cmdutil.Factory, cmd *cobra.Command, args []string, fw portForwarder) error { podName := cmdutil.GetFlagString(cmd, "pod") if len(podName) == 0 && len(args) == 0 { return cmdutil.UsageError(cmd, "POD is required for port-forward") } if len(podName) != 0 { printDeprecationWarning("port-forward POD", "-p POD") } else { podName = args[0] args = args[1:] } if len(args) < 1 { return cmdutil.UsageError(cmd, "at least 1 PORT is required for port-forward") } namespace, _, err := f.DefaultNamespace() if err != nil { return err } client, err := f.Client() if err != nil { return err } pod, err := client.Pods(namespace).Get(podName) if err != nil { return err } if pod.Status.Phase != api.PodRunning { glog.Fatalf("Unable to execute command because pod is not running. Current status=%v", pod.Status.Phase) } config, err := f.ClientConfig() if err != nil { return err } signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) defer signal.Stop(signals) stopCh := make(chan struct{}, 1) go func() { <-signals close(stopCh) }() req := client.RESTClient.Post(). Resource("pods"). Namespace(namespace). Name(pod.Name). SubResource("portforward") return fw.ForwardPorts("POST", req.URL(), config, args, stopCh) }
func TestRunOnceDurationAdmissionPlugin(t *testing.T) { var secs int64 = 3600 config := &pluginapi.RunOnceDurationConfig{ ActiveDeadlineSecondsOverride: &secs, } kclient := setupRunOnceDurationTest(t, config, nil) pod, err := kclient.Pods(testutil.Namespace()).Create(testRunOnceDurationPod()) if err != nil { t.Fatalf("Unexpected: %v", err) } if pod.Spec.ActiveDeadlineSeconds == nil || *pod.Spec.ActiveDeadlineSeconds != 3600 { t.Errorf("Unexpected value for pod.ActiveDeadlineSeconds %v", pod.Spec.ActiveDeadlineSeconds) } }
func TestRunOnceDurationAdmissionPluginProjectOverride(t *testing.T) { var secs int64 = 3600 config := &pluginapi.RunOnceDurationConfig{ ActiveDeadlineSecondsOverride: &secs, Enabled: true, } nsAnnotations := map[string]string{ pluginapi.ActiveDeadlineSecondsOverrideAnnotation: "100", } kclient := setupRunOnceDurationTest(t, config, nsAnnotations) pod, err := kclient.Pods(testutil.Namespace()).Create(testRunOnceDurationPod()) if err != nil { t.Fatalf("Unexpected: %v", err) } if pod.Spec.ActiveDeadlineSeconds == nil || *pod.Spec.ActiveDeadlineSeconds != 100 { t.Errorf("Unexpected value for pod.ActiveDeadlineSeconds %v", pod.Spec.ActiveDeadlineSeconds) } }
func TestPodReadOnlyFilesystem(t *testing.T) { var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) // TODO: Uncomment when fix #19254 // defer s.Close() isReadOnly := true ns := "pod-readonly-root" masterConfig := framework.NewIntegrationTestMasterConfig() m, err := master.New(masterConfig) if err != nil { t.Fatalf("Error in bringing up the master: %v", err) } framework.DeleteAllEtcdKeys() client := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "XXX", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "fake-name", Image: "fakeimage", SecurityContext: &api.SecurityContext{ ReadOnlyRootFilesystem: &isReadOnly, }, }, }, }, } if _, err := client.Pods(ns).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } deletePodOrErrorf(t, client, ns, pod.Name) }
func TestApiserverMetrics(t *testing.T) { _, s := framework.RunAMaster(t) defer s.Close() // Make a request to the apiserver to ensure there's at least one data point // for the metrics we're expecting -- otherwise, they won't be exported. client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) if _, err := client.Pods(api.NamespaceDefault).List(api.ListOptions{}); err != nil { t.Fatalf("unexpected error getting pods: %v", err) } metrics, err := scrapeMetrics(s) if err != nil { t.Fatal(err) } checkForExpectedMetrics(t, metrics, []string{ "apiserver_request_count", "apiserver_request_latencies", }) }
func TestMultiWatch(t *testing.T) { // Disable this test as long as it demonstrates a problem. // TODO: Reenable this test when we get #6059 resolved. return const watcherCount = 50 rt.GOMAXPROCS(watcherCount) framework.DeleteAllEtcdKeys() defer framework.DeleteAllEtcdKeys() _, s := framework.RunAMaster(t) defer s.Close() ns := api.NamespaceDefault client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) dummyEvent := func(i int) *api.Event { name := fmt.Sprintf("unrelated-%v", i) return &api.Event{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%v.%x", name, time.Now().UnixNano()), Namespace: ns, }, InvolvedObject: api.ObjectReference{ Name: name, Namespace: ns, }, Reason: fmt.Sprintf("unrelated change %v", i), } } type timePair struct { t time.Time name string } receivedTimes := make(chan timePair, watcherCount*2) watchesStarted := sync.WaitGroup{} // make a bunch of pods and watch them for i := 0; i < watcherCount; i++ { watchesStarted.Add(1) name := fmt.Sprintf("multi-watch-%v", i) got, err := client.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: labels.Set{"watchlabel": name}, }, Spec: api.PodSpec{ Containers: []api.Container{{ Name: "pause", Image: e2e.GetPauseImageName(client), }}, }, }) if err != nil { t.Fatalf("Couldn't make %v: %v", name, err) } go func(name, rv string) { options := api.ListOptions{ LabelSelector: labels.Set{"watchlabel": name}.AsSelector(), ResourceVersion: rv, } w, err := client.Pods(ns).Watch(options) if err != nil { panic(fmt.Sprintf("watch error for %v: %v", name, err)) } defer w.Stop() watchesStarted.Done() e, ok := <-w.ResultChan() // should get the update (that we'll do below) if !ok { panic(fmt.Sprintf("%v ended early?", name)) } if e.Type != watch.Modified { panic(fmt.Sprintf("Got unexpected watch notification:\n%v: %+v %+v", name, e, e.Object)) } receivedTimes <- timePair{time.Now(), name} }(name, got.ObjectMeta.ResourceVersion) } log.Printf("%v: %v pods made and watchers started", time.Now(), watcherCount) // wait for watches to start before we start spamming the system with // objects below, otherwise we'll hit the watch window restriction. watchesStarted.Wait() const ( useEventsAsUnrelatedType = false usePodsAsUnrelatedType = true ) // make a bunch of unrelated changes in parallel if useEventsAsUnrelatedType { const unrelatedCount = 3000 var wg sync.WaitGroup defer wg.Wait() changeToMake := make(chan int, unrelatedCount*2) changeMade := make(chan int, unrelatedCount*2) go func() { for i := 0; i < unrelatedCount; i++ { changeToMake <- i } close(changeToMake) }() for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() for { i, ok := <-changeToMake if !ok { return } if _, err := client.Events(ns).Create(dummyEvent(i)); err != nil { panic(fmt.Sprintf("couldn't make an event: %v", err)) } changeMade <- i } }() } for i := 0; i < 2000; i++ { <-changeMade if (i+1)%50 == 0 { log.Printf("%v: %v unrelated changes made", time.Now(), i+1) } } } if usePodsAsUnrelatedType { const unrelatedCount = 3000 var wg sync.WaitGroup defer wg.Wait() changeToMake := make(chan int, unrelatedCount*2) changeMade := make(chan int, unrelatedCount*2) go func() { for i := 0; i < unrelatedCount; i++ { changeToMake <- i } close(changeToMake) }() for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() for { i, ok := <-changeToMake if !ok { return } name := fmt.Sprintf("unrelated-%v", i) _, err := client.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.PodSpec{ Containers: []api.Container{{ Name: "nothing", Image: e2e.GetPauseImageName(client), }}, }, }) if err != nil { panic(fmt.Sprintf("couldn't make unrelated pod: %v", err)) } changeMade <- i } }() } for i := 0; i < 2000; i++ { <-changeMade if (i+1)%50 == 0 { log.Printf("%v: %v unrelated changes made", time.Now(), i+1) } } } // Now we still have changes being made in parallel, but at least 1000 have been made. // Make some updates to send down the watches. sentTimes := make(chan timePair, watcherCount*2) for i := 0; i < watcherCount; i++ { go func(i int) { name := fmt.Sprintf("multi-watch-%v", i) pod, err := client.Pods(ns).Get(name) if err != nil { panic(fmt.Sprintf("Couldn't get %v: %v", name, err)) } pod.Spec.Containers[0].Image = e2e.GetPauseImageName(client) sentTimes <- timePair{time.Now(), name} if _, err := client.Pods(ns).Update(pod); err != nil { panic(fmt.Sprintf("Couldn't make %v: %v", name, err)) } }(i) } sent := map[string]time.Time{} for i := 0; i < watcherCount; i++ { tp := <-sentTimes sent[tp.name] = tp.t } log.Printf("all changes made") dur := map[string]time.Duration{} for i := 0; i < watcherCount; i++ { tp := <-receivedTimes delta := tp.t.Sub(sent[tp.name]) dur[tp.name] = delta log.Printf("%v: %v", tp.name, delta) } log.Printf("all watches ended") t.Errorf("durations: %v", dur) }
func TestClient(t *testing.T) { _, s := framework.RunAMaster(t) defer s.Close() ns := api.NamespaceDefault framework.DeleteAllEtcdKeys() client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) info, err := client.Discovery().ServerVersion() if err != nil { t.Fatalf("unexpected error: %v", err) } if e, a := version.Get(), *info; !reflect.DeepEqual(e, a) { t.Errorf("expected %#v, got %#v", e, a) } pods, err := client.Pods(ns).List(api.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(pods.Items) != 0 { t.Errorf("expected no pods, got %#v", pods) } // get a validation error pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ GenerateName: "test", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "test", }, }, }, } got, err := client.Pods(ns).Create(pod) if err == nil { t.Fatalf("unexpected non-error: %v", got) } // get a created pod pod.Spec.Containers[0].Image = "an-image" got, err = client.Pods(ns).Create(pod) if err != nil { t.Fatalf("unexpected error: %v", err) } if got.Name == "" { t.Errorf("unexpected empty pod Name %v", got) } // pod is shown, but not scheduled pods, err = client.Pods(ns).List(api.ListOptions{}) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(pods.Items) != 1 { t.Errorf("expected one pod, got %#v", pods) } actual := pods.Items[0] if actual.Name != got.Name { t.Errorf("expected pod %#v, got %#v", got, actual) } if actual.Spec.NodeName != "" { t.Errorf("expected pod to be unscheduled, got %#v", actual) } }
// RunLog retrieves a pod log func RunLog(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, p *logParams) error { if len(os.Args) > 1 && os.Args[1] == "log" { printDeprecationWarning("logs", "log") } if len(args) == 0 { return cmdutil.UsageError(cmd, "POD is required for log") } if len(args) > 2 { return cmdutil.UsageError(cmd, "log POD [CONTAINER]") } sinceSeconds := cmdutil.GetFlagDuration(cmd, "since") sinceTime := cmdutil.GetFlagString(cmd, "since-time") if len(sinceTime) > 0 && sinceSeconds > 0 { return cmdutil.UsageError(cmd, "only one of --since, --since-time may be specified") } namespace, _, err := f.DefaultNamespace() if err != nil { return err } client, err := f.Client() if err != nil { return err } podID := args[0] pod, err := client.Pods(namespace).Get(podID) if err != nil { return err } // [-c CONTAINER] container := p.containerName if len(container) == 0 { // [CONTAINER] (container as arg not flag) is supported as legacy behavior. See PR #10519 for more details. if len(args) == 1 { if len(pod.Spec.Containers) != 1 { podContainersNames := []string{} for _, container := range pod.Spec.Containers { podContainersNames = append(podContainersNames, container.Name) } return fmt.Errorf("Pod %s has the following containers: %s; please specify the container to print logs for with -c", pod.ObjectMeta.Name, strings.Join(podContainersNames, ", ")) } container = pod.Spec.Containers[0].Name } else { container = args[1] } } logOptions := &api.PodLogOptions{ Container: container, Follow: cmdutil.GetFlagBool(cmd, "follow"), Previous: cmdutil.GetFlagBool(cmd, "previous"), Timestamps: cmdutil.GetFlagBool(cmd, "timestamps"), } if sinceSeconds > 0 { // round up to the nearest second sec := int64(math.Ceil(float64(sinceSeconds) / float64(time.Second))) logOptions.SinceSeconds = &sec } if t, err := api.ParseRFC3339(sinceTime, unversioned.Now); err == nil { logOptions.SinceTime = &t } if limitBytes := cmdutil.GetFlagInt(cmd, "limit-bytes"); limitBytes != 0 { i := int64(limitBytes) logOptions.LimitBytes = &i } if tail := cmdutil.GetFlagInt(cmd, "tail"); tail >= 0 { i := int64(tail) logOptions.TailLines = &i } return handleLog(client, namespace, podID, logOptions, out) }
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags generators := map[string]kubectl.Generator{ "run/v1": kubectl.BasicReplicationController{}, "run-pod/v1": kubectl.BasicPod{}, "service/v1": kubectl.ServiceGeneratorV1{}, "service/v2": kubectl.ServiceGeneratorV2{}, "horizontalpodautoscaler/v1beta1": kubectl.HorizontalPodAutoscalerV1Beta1{}, } clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, generators: generators, Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := "" if cfg.GroupVersion != nil { cmdApiVersion = cfg.GroupVersion.String() } return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cmdApiVersion}, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion("") }, ClientConfig: func() (*client.Config, error) { return clients.ClientConfigForVersion("") }, RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, err := clients.ClientForVersion(mapping.GroupVersionKind.GroupVersion().String()) if err != nil { return nil, err } switch group { case "": return client.RESTClient, nil case "extensions": return client.ExtensionsClient.RESTClient, nil } return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) if err != nil { return nil, err } client, err := clients.ClientForVersion(mapping.GroupVersionKind.GroupVersion().String()) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(group, mapping.GroupVersionKind.Kind, client); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind) }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, columnLabels), nil }, PodSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %s", kind) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %s", kind) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, LogsForObject: func(object, options runtime.Object) (*client.Request, error) { c, err := clients.ClientForVersion("") if err != nil { return nil, err } switch t := object.(type) { case *api.Pod: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot get the logs from %s", kind) } }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { client, err := clients.ClientForVersion(mapping.GroupVersionKind.GroupVersion().String()) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.GroupVersionKind.Kind, client) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { client, err := clients.ClientForVersion(mapping.GroupVersionKind.GroupVersion().String()) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.GroupVersionKind.Kind, client) }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion("") if err != nil { return nil, err } dir := cacheDir if len(dir) > 0 { version, err := client.ServerVersion() if err != nil { return nil, err } dir = path.Join(cacheDir, version.String()) } return &clientSwaggerSchema{ c: client, cacheDir: dir, mapper: api.RESTMapper, }, nil } return validation.NullSchema{}, nil }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generator: func(name string) (kubectl.Generator, bool) { generator, ok := generators[name] return generator, ok }, CanBeExposed: func(kind string) error { switch kind { case "ReplicationController", "Service", "Pod": // nothing to do here default: return fmt.Errorf("cannot expose a %s", kind) } return nil }, CanBeAutoscaled: func(kind string) error { switch kind { case "ReplicationController", "Deployment": // nothing to do here default: return fmt.Errorf("cannot autoscale a %s", kind) } return nil }, AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) { client, err := clients.ClientForVersion("") if err != nil { return nil, err } switch t := object.(type) { case *api.ReplicationController: var pods *api.PodList for pods == nil || len(pods.Items) == 0 { var err error if pods, err = client.Pods(t.Namespace).List(labels.SelectorFromSet(t.Spec.Selector), fields.Everything()); err != nil { return nil, err } if len(pods.Items) == 0 { time.Sleep(2 * time.Second) } } pod := &pods.Items[0] return pod, nil case *api.Pod: return t, nil default: _, kind, err := api.Scheme.ObjectVersionAndKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot attach to %s: not implemented", kind) } }, } }
func TestPodUpdateActiveDeadlineSeconds(t *testing.T) { var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) defer s.Close() ns := "pod-activedeadline-update" masterConfig := framework.NewIntegrationTestMasterConfig() m, err := master.New(masterConfig) if err != nil { t.Fatalf("Error in bringing up the master: %v", err) } framework.DeleteAllEtcdKeys() client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) var ( iZero = int64(0) i30 = int64(30) i60 = int64(60) iNeg = int64(-1) ) prototypePod := func() *api.Pod { return &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "xxx", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "fake-name", Image: "fakeimage", }, }, }, } } cases := []struct { name string original *int64 update *int64 valid bool }{ { name: "no change, nil", original: nil, update: nil, valid: true, }, { name: "no change, set", original: &i30, update: &i30, valid: true, }, { name: "change to positive from nil", original: nil, update: &i60, valid: true, }, { name: "change to smaller positive", original: &i60, update: &i30, valid: true, }, { name: "change to larger positive", original: &i30, update: &i60, valid: false, }, { name: "change to negative from positive", original: &i30, update: &iNeg, valid: false, }, { name: "change to negative from nil", original: nil, update: &iNeg, valid: false, }, // zero is not allowed, must be a positive integer { name: "change to zero from positive", original: &i30, update: &iZero, valid: false, }, { name: "change to nil from positive", original: &i30, update: nil, valid: false, }, } for i, tc := range cases { pod := prototypePod() pod.Spec.ActiveDeadlineSeconds = tc.original pod.ObjectMeta.Name = fmt.Sprintf("activedeadlineseconds-test-%v", i) if _, err := client.Pods(ns).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } pod.Spec.ActiveDeadlineSeconds = tc.update _, err := client.Pods(ns).Update(pod) if tc.valid && err != nil { t.Errorf("%v: failed to update pod: %v", tc.name, err) } else if !tc.valid && err == nil { t.Errorf("%v: unexpected allowed update to pod", tc.name) } deletePodOrErrorf(t, client, ns, pod.Name) } }
func RunPortForward(f *cmdutil.Factory, cmd *cobra.Command, args []string, fw portForwarder) error { podName := cmdutil.GetFlagString(cmd, "pod") if len(podName) == 0 && len(args) == 0 { return cmdutil.UsageError(cmd, "POD is required for port-forward") } if len(podName) != 0 { printDeprecationWarning("port-forward POD", "-p POD") } else { podName = args[0] args = args[1:] } if len(args) < 1 { return cmdutil.UsageError(cmd, "at least 1 PORT is required for port-forward") } namespace, _, err := f.DefaultNamespace() if err != nil { return err } client, err := f.Client() if err != nil { return err } pod, err := client.Pods(namespace).Get(podName) if err != nil { return err } if pod.Status.Phase != api.PodRunning { glog.Fatalf("Unable to execute command because pod is not running. Current status=%v", pod.Status.Phase) } config, err := f.ClientConfig() if err != nil { return err } signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) defer signal.Stop(signals) stopCh := make(chan struct{}, 1) go func() { <-signals close(stopCh) }() req := client.RESTClient.Post(). Resource("pods"). Namespace(namespace). Name(pod.Name). SubResource("portforward") postErr := fw.ForwardPorts("POST", req.URL(), config, args, stopCh) // if we don't have an error, return. If we did get an error, try a GET because v3.0.0 shipped with port-forward running as a GET. if postErr == nil { return nil } // only try the get if the error is either a forbidden or method not supported, otherwise trying with a GET probably won't help if !apierrors.IsForbidden(postErr) && !apierrors.IsMethodNotSupported(postErr) { return postErr } getReq := client.RESTClient.Get(). Resource("pods"). Namespace(namespace). Name(pod.Name). SubResource("portforward") getErr := fw.ForwardPorts("GET", getReq.URL(), config, args, stopCh) if getErr == nil { return nil } // if we got a getErr, return the postErr because it's more likely to be correct. GET is legacy return postErr }
func Run(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cobra.Command, args []string) error { if len(os.Args) > 1 && os.Args[1] == "run-container" { printDeprecationWarning("run", "run-container") } if len(args) == 0 { return cmdutil.UsageError(cmd, "NAME is required for run") } interactive := cmdutil.GetFlagBool(cmd, "stdin") tty := cmdutil.GetFlagBool(cmd, "tty") if tty && !interactive { return cmdutil.UsageError(cmd, "-i/--stdin is required for containers with --tty=true") } replicas := cmdutil.GetFlagInt(cmd, "replicas") if interactive && replicas != 1 { return cmdutil.UsageError(cmd, fmt.Sprintf("-i/--stdin requires that replicas is 1, found %d", replicas)) } namespace, _, err := f.DefaultNamespace() if err != nil { return err } client, err := f.Client() if err != nil { return err } restartPolicy, err := getRestartPolicy(cmd, interactive) if err != nil { return err } if restartPolicy != api.RestartPolicyAlways && replicas != 1 { return cmdutil.UsageError(cmd, fmt.Sprintf("--restart=%s requires that --repliacs=1, found %d", restartPolicy, replicas)) } generatorName := cmdutil.GetFlagString(cmd, "generator") if len(generatorName) == 0 { if restartPolicy == api.RestartPolicyAlways { generatorName = "run/v1" } else { generatorName = "run-pod/v1" } } generator, found := f.Generator(generatorName) if !found { return cmdutil.UsageError(cmd, fmt.Sprintf("Generator: %s not found.", generatorName)) } names := generator.ParamNames() params := kubectl.MakeParams(cmd, names) params["name"] = args[0] if len(args) > 1 { params["args"] = args[1:] } err = kubectl.ValidateParams(names, params) if err != nil { return err } obj, err := generator.Generate(params) if err != nil { return err } inline := cmdutil.GetFlagString(cmd, "overrides") if len(inline) > 0 { var objType string if restartPolicy == api.RestartPolicyAlways { objType = "ReplicationController" } else { objType = "Pod" } obj, err = cmdutil.Merge(obj, inline, objType) if err != nil { return err } } // TODO: extract this flag to a central location, when such a location exists. if !cmdutil.GetFlagBool(cmd, "dry-run") { if restartPolicy == api.RestartPolicyAlways { obj, err = client.ReplicationControllers(namespace).Create(obj.(*api.ReplicationController)) } else { obj, err = client.Pods(namespace).Create(obj.(*api.Pod)) } if err != nil { return err } } attachFlag := cmd.Flags().Lookup("attach") attach := cmdutil.GetFlagBool(cmd, "attach") if !attachFlag.Changed && interactive { attach = true } if attach { opts := &AttachOptions{ In: cmdIn, Out: cmdOut, Err: cmdErr, Stdin: interactive, TTY: tty, Attach: &DefaultRemoteAttach{}, } config, err := f.ClientConfig() if err != nil { return err } opts.Config = config client, err := f.Client() if err != nil { return err } opts.Client = client if restartPolicy == api.RestartPolicyAlways { return handleAttachReplicationController(client, obj.(*api.ReplicationController), opts) } else { return handleAttachPod(client, obj.(*api.Pod), opts) } } return f.PrintObject(cmd, obj, cmdOut) }
func main() { kingpin.Parse() var oldBuf string // Create a client which we can use to connect to the remote Kubernetes cluster. config := &client.Config{ Host: *cliMaster, } client, err := client.New(config) if err != nil { fmt.Println(err) os.Exit(1) } // This ensures we don't overwhelm the Kubernetes API. t, err := time.ParseDuration(*cliRefresh) if err != nil { fmt.Println(err) os.Exit(1) } // This is an ongoing process to pull in new services. limiter := time.Tick(t) for { <-limiter // Get the data from the Kubernetes service API. log.Println("Refreshing data...") // This is the object which we will populate with new data. haproxy := Config{ Port: *cliPort, } // Get a list of all the services. svcs, err := client.Services("").List(api.ListOptions{}) if err != nil { log.Warn(err) continue } // Filter the list down to only the services we need. for _, s := range svcs.Items { // Only register services which are being balanced internally. if s.Spec.Type != "LoadBalancer" { log.Printf("Skipped service: %s", s.ObjectMeta.Name) continue } // Ensure we have the "domain" label set. if val, ok := s.ObjectMeta.Labels["domain"]; ok { // Get a list of all the pods and there IPs to add to the HAProxy. pods, err := client.Pods(s.ObjectMeta.Namespace).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(s.Spec.Selector))}) if err != nil { log.Println(err) continue } // Start building the new listener object. l := Listener{ Domain: val, } // Populate the list of pod IPs. for _, p := range pods.Items { if p.Status.Phase != api.PodRunning { continue } l.Servers = append(l.Servers, Server{Name: p.ObjectMeta.Name, Address: p.Status.PodIP + ":80"}) } haproxy.Listeners = append(haproxy.Listeners, l) log.Printf("Added service: %s", s.ObjectMeta.Name) } } // Attempt to rebuild the HAProxy configuration. t := template.Must(template.New("haproxy").Parse(tpl)) buf := new(bytes.Buffer) err = t.Execute(buf, haproxy) if err != nil { log.Warn(err) continue } // Compare buffs. fmt.Println("Current") fmt.Println(buf.String()) fmt.Println("Old") fmt.Println(oldBuf) if buf.String() == oldBuf { log.Warn("Configuration has not changed") continue } // Determine the current running HAProxy process. pid, err := ioutil.ReadFile(*cliPid) if err != nil { log.Warn(err) continue } // Write out the configuration to a file. err = ioutil.WriteFile(*cliConf, buf.Bytes(), 0644) if err != nil { log.Warn(err) continue } // Trigger a reload of the HAProxy service. if err := exec.Command("haproxy", "-f", *cliConf, "-p", *cliPid, "-D", "-sF", string(pid)).Run(); err != nil { log.Warn(err) continue } } }
func TestPodUpdateActiveDeadlineSeconds(t *testing.T) { _, s := framework.RunAMaster(nil) defer s.Close() ns := framework.CreateTestingNamespace("pod-activedeadline-update", s, t) defer framework.DeleteTestingNamespace(ns, s, t) client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) var ( iZero = int64(0) i30 = int64(30) i60 = int64(60) iNeg = int64(-1) ) prototypePod := func() *api.Pod { return &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "xxx", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "fake-name", Image: "fakeimage", }, }, }, } } cases := []struct { name string original *int64 update *int64 valid bool }{ { name: "no change, nil", original: nil, update: nil, valid: true, }, { name: "no change, set", original: &i30, update: &i30, valid: true, }, { name: "change to positive from nil", original: nil, update: &i60, valid: true, }, { name: "change to smaller positive", original: &i60, update: &i30, valid: true, }, { name: "change to larger positive", original: &i30, update: &i60, valid: false, }, { name: "change to negative from positive", original: &i30, update: &iNeg, valid: false, }, { name: "change to negative from nil", original: nil, update: &iNeg, valid: false, }, // zero is not allowed, must be a positive integer { name: "change to zero from positive", original: &i30, update: &iZero, valid: false, }, { name: "change to nil from positive", original: &i30, update: nil, valid: false, }, } for i, tc := range cases { pod := prototypePod() pod.Spec.ActiveDeadlineSeconds = tc.original pod.ObjectMeta.Name = fmt.Sprintf("activedeadlineseconds-test-%v", i) if _, err := client.Pods(ns.Name).Create(pod); err != nil { t.Errorf("Failed to create pod: %v", err) } pod.Spec.ActiveDeadlineSeconds = tc.update _, err := client.Pods(ns.Name).Update(pod) if tc.valid && err != nil { t.Errorf("%v: failed to update pod: %v", tc.name, err) } else if !tc.valid && err == nil { t.Errorf("%v: unexpected allowed update to pod", tc.name) } deletePodOrErrorf(t, client, ns.Name, pod.Name) } }