func (n *NodeOptions) Complete(f *clientcmd.Factory, c *cobra.Command, args []string, out io.Writer) error { defaultNamespace, _, err := f.DefaultNamespace() if err != nil { return err } _, kc, err := f.Clients() if err != nil { return err } cmdPrinter, output, err := kcmdutil.PrinterForCommand(c) if err != nil { return err } mapper, typer := f.Object(false) n.DefaultNamespace = defaultNamespace n.Kclient = kc n.Writer = out n.Mapper = mapper n.Typer = typer n.RESTClientFactory = f.Factory.ClientForMapping n.Printer = f.Printer n.NodeNames = []string{} n.CmdPrinter = cmdPrinter n.CmdPrinterOutput = false if output { n.CmdPrinterOutput = true } if len(args) != 0 { n.NodeNames = append(n.NodeNames, args...) } return nil }
func NewCmdConfigView(out io.Writer, ConfigAccess clientcmd.ConfigAccess) *cobra.Command { options := &ViewOptions{ConfigAccess: ConfigAccess} // Default to yaml defaultOutputFormat := "yaml" cmd := &cobra.Command{ Use: "view", Short: "Displays merged kubeconfig settings or a specified kubeconfig file.", Long: view_long, Example: view_example, Run: func(cmd *cobra.Command, args []string) { options.Complete() outputFormat := cmdutil.GetFlagString(cmd, "output") if outputFormat == "wide" { fmt.Printf("--output wide is not available in kubectl config view; reset to default output format (%s)\n\n", defaultOutputFormat) cmd.Flags().Set("output", defaultOutputFormat) } if outputFormat == "" { fmt.Printf("reset to default output format (%s) as --output is empty", defaultOutputFormat) cmd.Flags().Set("output", defaultOutputFormat) } printer, _, err := cmdutil.PrinterForCommand(cmd) cmdutil.CheckErr(err) version, err := cmdutil.OutputVersion(cmd, &latest.ExternalVersion) cmdutil.CheckErr(err) printer = kubectl.NewVersionedPrinter(printer, clientcmdapi.Scheme, version) cmdutil.CheckErr(options.Run(out, printer)) }, } cmdutil.AddPrinterFlags(cmd) cmd.Flags().Set("output", defaultOutputFormat) options.Merge.Default(true) f := cmd.Flags().VarPF(&options.Merge, "merge", "", "merge together the full hierarchy of kubeconfig files") f.NoOptDefVal = "true" cmd.Flags().BoolVar(&options.RawByteData, "raw", false, "display raw byte data") cmd.Flags().BoolVar(&options.Flatten, "flatten", false, "flatten the resulting kubeconfig file into self contained output (useful for creating portable kubeconfig files)") cmd.Flags().BoolVar(&options.Minify, "minify", false, "remove all information not used by current-context from the output") return cmd }
// Preview the configuration if required - returns true|false and errors. func previewConfiguration(c *ipfailover.Configurator, cmd *cobra.Command, out io.Writer) (bool, error) { p, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return true, fmt.Errorf("Error configuring printer: %v", err) } // Check if we are outputting info. if !output { return false, nil } configList, err := c.Generate() if err != nil { return true, fmt.Errorf("Error generating config: %v", err) } if err := p.PrintObj(configList, out); err != nil { return true, fmt.Errorf("Unable to print object: %v", err) } return true, nil }
func TestImageLocal(t *testing.T) { f, tf, _, ns := cmdtesting.NewAPIFactory() tf.Client = &fake.RESTClient{ NegotiatedSerializer: ns, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { t.Fatalf("unexpected request: %s %#v\n%#v", req.Method, req.URL, req) return nil, nil }), } tf.Namespace = "test" tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}} buf := bytes.NewBuffer([]byte{}) cmd := NewCmdImage(f, buf, buf) cmd.SetOutput(buf) cmd.Flags().Set("output", "name") tf.Printer, _, _ = cmdutil.PrinterForCommand(cmd) opts := ImageOptions{FilenameOptions: resource.FilenameOptions{ Filenames: []string{"../../../../examples/storage/cassandra/cassandra-controller.yaml"}}, Out: buf, Local: true} err := opts.Complete(f, cmd, []string{"cassandra=thingy"}) if err == nil { err = opts.Validate() } if err == nil { err = opts.Run() } if err != nil { t.Fatalf("unexpected error: %v", err) } if !strings.Contains(buf.String(), "replicationcontroller/cassandra") { t.Errorf("did not set image: %s", buf.String()) } }
func NewCmdConfigView(out io.Writer, ConfigAccess ConfigAccess) *cobra.Command { options := &ViewOptions{ConfigAccess: ConfigAccess} cmd := &cobra.Command{ Use: "view", Short: "displays Merged kubeconfig settings or a specified kubeconfig file.", Long: view_long, Example: view_example, Run: func(cmd *cobra.Command, args []string) { options.Complete() printer, _, err := cmdutil.PrinterForCommand(cmd) if err != nil { glog.FatalDepth(1, err) } version := cmdutil.OutputVersion(cmd, latest.Version) printer = kubectl.NewVersionedPrinter(printer, clientcmdapi.Scheme, version) if err := options.Run(out, printer); err != nil { glog.FatalDepth(1, err) } }, } cmdutil.AddPrinterFlags(cmd) // Default to yaml cmd.Flags().Set("output", "yaml") options.Merge.Default(true) cmd.Flags().Var(&options.Merge, "merge", "merge together the full hierarchy of kubeconfig files") cmd.Flags().BoolVar(&options.RawByteData, "raw", false, "display raw byte data") cmd.Flags().BoolVar(&options.Flatten, "flatten", false, "flatten the resulting kubeconfig file into self contained output (useful for creating portable kubeconfig files)") cmd.Flags().BoolVar(&options.Minify, "minify", false, "remove all information not used by current-context from the output") return cmd }
// RunCmdRouter contains all the necessary functionality for the // OpenShift CLI router command. func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error { var name string switch len(args) { case 0: name = "router" case 1: name = args[0] default: return cmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return cmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { glog.Fatal(err) } // For the host networking case, ensure the ports match. if cfg.HostNetwork { for i := 0; i < len(ports); i++ { if ports[i].ContainerPort != ports[i].HostPort { return cmdutil.UsageError(cmd, "For host networking mode, please ensure that the container [%v] and host [%v] ports match", ports[i].ContainerPort, ports[i].HostPort) } } } if cfg.StatsPort > 0 { ports = append(ports, kapi.ContainerPort{ Name: "stats", HostPort: cfg.StatsPort, ContainerPort: cfg.StatsPort, Protocol: kapi.ProtocolTCP, }) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("router %q does not exist (no service)", name) } if len(cfg.ServiceAccount) == 0 { return fmt.Errorf("router could not be created; you must specify a service account with --service-account") } err := validateServiceAccount(kClient, namespace, cfg.ServiceAccount) if err != nil { return fmt.Errorf("router could not be created; %v", err) } // create new router if len(cfg.Credentials) == 0 { return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } defaultCert, err := loadCert(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file: %v", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() fmt.Fprintf(out, "password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, "DEFAULT_CERTIFICATE": defaultCert, "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "ROUTER_EXTERNAL_HOST_HOSTNAME": cfg.ExternalHost, "ROUTER_EXTERNAL_HOST_USERNAME": cfg.ExternalHostUsername, "ROUTER_EXTERNAL_HOST_PASSWORD": cfg.ExternalHostPassword, "ROUTER_EXTERNAL_HOST_HTTP_VSERVER": cfg.ExternalHostHttpVserver, "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER": cfg.ExternalHostHttpsVserver, "ROUTER_EXTERNAL_HOST_INSECURE": strconv.FormatBool(cfg.ExternalHostInsecure), "ROUTER_EXTERNAL_HOST_PARTITION_PATH": cfg.ExternalHostPartitionPath, "ROUTER_EXTERNAL_HOST_PRIVKEY": privkeyPath, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } updatePercent := int(-25) secrets, volumes, mounts, err := generateSecretsConfig(cfg, kClient, namespace) if err != nil { return fmt.Errorf("router could not be created: %v", err) } livenessProbe := generateLivenessProbeConfig(cfg, ports) containers := []kapi.Container{ { Name: "router", Image: image, Ports: ports, Env: env.List(), LivenessProbe: livenessProbe, ImagePullPolicy: kapi.PullIfNotPresent, VolumeMounts: mounts, }, } if cfg.StatsPort > 0 && cfg.ExposeMetrics { pc := generateMetricsExporterContainer(cfg, env) if pc != nil { containers = append(containers, *pc) } } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: dapi.DeploymentTemplate{ Strategy: dapi.DeploymentStrategy{ Type: dapi.DeploymentStrategyTypeRolling, RollingParams: &dapi.RollingDeploymentStrategyParams{UpdatePercent: &updatePercent}, }, ControllerTemplate: kapi.ReplicationControllerSpec{ Replicas: cfg.Replicas, Selector: label, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ SecurityContext: &kapi.PodSecurityContext{ HostNetwork: cfg.HostNetwork, }, ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: containers, Volumes: volumes, }, }, }, }, }, } if len(secrets) != 0 { serviceAccount, err := kClient.ServiceAccounts(namespace).Get(cfg.ServiceAccount) if err != nil { return fmt.Errorf("error looking up service account %s: %v", cfg.ServiceAccount, err) } for _, secret := range secrets { objects = append(objects, secret) serviceAccount.Secrets = append(serviceAccount.Secrets, kapi.ObjectReference{Name: secret.Name}) } _, err = kClient.ServiceAccounts(namespace).Update(serviceAccount) if err != nil { return fmt.Errorf("error adding secret key to service account %s: %v", cfg.ServiceAccount, err) } } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if output { if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("Unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(mapper, cmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Router %q service exists\n", name) return nil }
// RunGet implements the generic Get command // TODO: convert all direct flag accessors to a struct and pass that instead of cmd func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, options *GetOptions) error { selector := cmdutil.GetFlagString(cmd, "selector") allNamespaces := cmdutil.GetFlagBool(cmd, "all-namespaces") mapper, typer := f.Object() cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err } if len(args) == 0 && len(options.Filenames) == 0 { fmt.Fprint(out, "You must specify the type of resource to get. ", valid_resources) return cmdutil.UsageError(cmd, "Required resource not specified.") } // always show resources when getting by name or filename argsHasNames, err := resource.HasNames(args) if err != nil { return err } if len(options.Filenames) > 0 || argsHasNames { cmd.Flag("show-all").Value.Set("true") } export := cmdutil.GetFlagBool(cmd, "export") // handle watch separately since we cannot watch multiple resource types isWatch, isWatchOnly := cmdutil.GetFlagBool(cmd, "watch"), cmdutil.GetFlagBool(cmd, "watch-only") if isWatch || isWatchOnly { r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces). FilenameParam(enforceNamespace, options.Filenames...). SelectorParam(selector). ExportParam(export). ResourceTypeOrNameArgs(true, args...). SingleResourceType(). Latest(). Do() err := r.Err() if err != nil { return err } infos, err := r.Infos() if err != nil { return err } if len(infos) != 1 { return fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(infos)) } info := infos[0] mapping := info.ResourceMapping() printer, err := f.PrinterForMapping(cmd, mapping, allNamespaces) if err != nil { return err } obj, err := r.Object() if err != nil { return err } rv, err := mapping.MetadataAccessor.ResourceVersion(obj) if err != nil { return err } // print the current object if !isWatchOnly { if err := printer.PrintObj(obj, out); err != nil { return fmt.Errorf("unable to output the provided object: %v", err) } } // print watched changes w, err := r.Watch(rv) if err != nil { return err } kubectl.WatchLoop(w, func(e watch.Event) error { return printer.PrintObj(e.Object, out) }) return nil } b := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces). FilenameParam(enforceNamespace, options.Filenames...). SelectorParam(selector). ExportParam(export). ResourceTypeOrNameArgs(true, args...). ContinueOnError(). Latest() printer, generic, err := cmdutil.PrinterForCommand(cmd) if err != nil { return err } if generic { clientConfig, err := f.ClientConfig() if err != nil { return err } singular := false r := b.Flatten().Do() infos, err := r.IntoSingular(&singular).Infos() if err != nil { return err } // the outermost object will be converted to the output-version, but inner // objects can use their mappings version, err := cmdutil.OutputVersion(cmd, clientConfig.GroupVersion) if err != nil { return err } obj, err := resource.AsVersionedObject(infos, !singular, version.String(), f.JSONEncoder()) if err != nil { return err } return printer.PrintObj(obj, out) } infos, err := b.Flatten().Do().Infos() if err != nil { return err } objs := make([]runtime.Object, len(infos)) for ix := range infos { objs[ix] = infos[ix].Object } sorting, err := cmd.Flags().GetString("sort-by") var sorter *kubectl.RuntimeSort if err == nil && len(sorting) > 0 && len(objs) > 1 { // TODO: questionable if sorter, err = kubectl.SortObjects(f.Decoder(true), objs, sorting); err != nil { return err } } // use the default printer for each object printer = nil var lastMapping *meta.RESTMapping w := kubectl.GetNewTabWriter(out) defer w.Flush() for ix := range objs { var mapping *meta.RESTMapping if sorter != nil { mapping = infos[sorter.OriginalPosition(ix)].Mapping } else { mapping = infos[ix].Mapping } if printer == nil || lastMapping == nil || mapping == nil || mapping.Resource != lastMapping.Resource { printer, err = f.PrinterForMapping(cmd, mapping, allNamespaces) if err != nil { return err } lastMapping = mapping } if _, found := printer.(*kubectl.HumanReadablePrinter); found { if err := printer.PrintObj(objs[ix], w); err != nil { return err } continue } if err := printer.PrintObj(objs[ix], out); err != nil { return err } } return nil }
// NewFactory creates an object that holds common methods across all OpenShift commands func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory { restMapper := registered.RESTMapper() clients := &clientCache{ clients: make(map[string]*client.Client), configs: make(map[string]*restclient.Config), loader: clientConfig, } w := &Factory{ Factory: cmdutil.NewFactory(clientConfig), OpenShiftClientConfig: clientConfig, clients: clients, ImageResolutionOptions: &imageResolutionOptions{}, } w.Object = func(bool) (meta.RESTMapper, runtime.ObjectTyper) { defaultMapper := ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: restMapper}} defaultTyper := api.Scheme // Output using whatever version was negotiated in the client cache. The // version we decode with may not be the same as what the server requires. cfg, err := clients.ClientConfigForVersion(nil) if err != nil { return defaultMapper, defaultTyper } cmdApiVersion := unversioned.GroupVersion{} if cfg.GroupVersion != nil { cmdApiVersion = *cfg.GroupVersion } // at this point we've negotiated and can get the client oclient, err := clients.ClientForVersion(nil) if err != nil { return defaultMapper, defaultTyper } cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube"), cfg.Host) cachedDiscoverClient := NewCachedDiscoveryClient(client.NewDiscoveryClient(oclient.RESTClient), cacheDir, time.Duration(10*time.Minute)) // if we can't find the server version or its too old to have Kind information in the discovery doc, skip the discovery RESTMapper // and use our hardcoded levels mapper := registered.RESTMapper() if serverVersion, err := cachedDiscoverClient.ServerVersion(); err == nil && useDiscoveryRESTMapper(serverVersion.GitVersion) { mapper = restmapper.NewDiscoveryRESTMapper(cachedDiscoverClient) } mapper = NewShortcutExpander(cachedDiscoverClient, kubectl.ShortcutExpander{RESTMapper: mapper}) return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}, api.Scheme } w.UnstructuredObject = func() (meta.RESTMapper, runtime.ObjectTyper, error) { // load a discovery client from the default config cfg, err := clients.ClientConfigForVersion(nil) if err != nil { return nil, nil, err } dc, err := discovery.NewDiscoveryClientForConfig(cfg) if err != nil { return nil, nil, err } cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube"), cfg.Host) cachedDiscoverClient := NewCachedDiscoveryClient(client.NewDiscoveryClient(dc.RESTClient), cacheDir, time.Duration(10*time.Minute)) // enumerate all group resources groupResources, err := discovery.GetAPIGroupResources(cachedDiscoverClient) if err != nil { return nil, nil, err } // Register unknown APIs as third party for now to make // validation happy. TODO perhaps make a dynamic schema // validator to avoid this. for _, group := range groupResources { for _, version := range group.Group.Versions { gv := unversioned.GroupVersion{Group: group.Group.Name, Version: version.Version} if !registered.IsRegisteredVersion(gv) { registered.AddThirdPartyAPIGroupVersions(gv) } } } // construct unstructured mapper and typer mapper := discovery.NewRESTMapper(groupResources, meta.InterfacesForUnstructured) typer := discovery.NewUnstructuredObjectTyper(groupResources) return NewShortcutExpander(cachedDiscoverClient, kubectl.ShortcutExpander{RESTMapper: mapper}), typer, nil } kClientForMapping := w.Factory.ClientForMapping w.ClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) { if latest.OriginKind(mapping.GroupVersionKind) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return client.RESTClient, nil } return kClientForMapping(mapping) } kUnstructuredClientForMapping := w.Factory.UnstructuredClientForMapping w.UnstructuredClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) { if latest.OriginKind(mapping.GroupVersionKind) { cfg, err := clientConfig.ClientConfig() if err != nil { return nil, err } if err := client.SetOpenShiftDefaults(cfg); err != nil { return nil, err } cfg.APIPath = "/apis" if mapping.GroupVersionKind.Group == api.GroupName { cfg.APIPath = "/oapi" } gv := mapping.GroupVersionKind.GroupVersion() cfg.ContentConfig = dynamic.ContentConfig() cfg.GroupVersion = &gv return restclient.RESTClientFor(cfg) } return kUnstructuredClientForMapping(mapping) } // Save original Describer function kDescriberFunc := w.Factory.Describer w.Describer = func(mapping *meta.RESTMapping) (kubectl.Describer, error) { if latest.OriginKind(mapping.GroupVersionKind) { oClient, kClient, err := w.Clients() if err != nil { return nil, fmt.Errorf("unable to create client %s: %v", mapping.GroupVersionKind.Kind, err) } mappingVersion := mapping.GroupVersionKind.GroupVersion() cfg, err := clients.ClientConfigForVersion(&mappingVersion) if err != nil { return nil, fmt.Errorf("unable to load a client %s: %v", mapping.GroupVersionKind.Kind, err) } describer, ok := describe.DescriberFor(mapping.GroupVersionKind.GroupKind(), oClient, kClient, cfg.Host) if !ok { return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) } return describer, nil } return kDescriberFunc(mapping) } kScalerFunc := w.Factory.Scaler w.Scaler = func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { if mapping.GroupVersionKind.GroupKind() == deployapi.Kind("DeploymentConfig") { oc, kc, err := w.Clients() if err != nil { return nil, err } return deploycmd.NewDeploymentConfigScaler(oc, kc), nil } return kScalerFunc(mapping) } kReaperFunc := w.Factory.Reaper w.Reaper = func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { switch mapping.GroupVersionKind.GroupKind() { case deployapi.Kind("DeploymentConfig"): oc, kc, err := w.Clients() if err != nil { return nil, err } return deploycmd.NewDeploymentConfigReaper(oc, kc), nil case authorizationapi.Kind("Role"): oc, _, err := w.Clients() if err != nil { return nil, err } return authorizationreaper.NewRoleReaper(oc, oc), nil case authorizationapi.Kind("ClusterRole"): oc, _, err := w.Clients() if err != nil { return nil, err } return authorizationreaper.NewClusterRoleReaper(oc, oc, oc), nil case userapi.Kind("User"): oc, kc, err := w.Clients() if err != nil { return nil, err } return authenticationreaper.NewUserReaper( client.UsersInterface(oc), client.GroupsInterface(oc), client.ClusterRoleBindingsInterface(oc), client.RoleBindingsNamespacer(oc), kclient.SecurityContextConstraintsInterface(kc), ), nil case userapi.Kind("Group"): oc, kc, err := w.Clients() if err != nil { return nil, err } return authenticationreaper.NewGroupReaper( client.GroupsInterface(oc), client.ClusterRoleBindingsInterface(oc), client.RoleBindingsNamespacer(oc), kclient.SecurityContextConstraintsInterface(kc), ), nil case buildapi.Kind("BuildConfig"): oc, _, err := w.Clients() if err != nil { return nil, err } return buildcmd.NewBuildConfigReaper(oc), nil } return kReaperFunc(mapping) } kGenerators := w.Factory.Generators w.Generators = func(cmdName string) map[string]kubectl.Generator { originGenerators := DefaultGenerators(cmdName) kubeGenerators := kGenerators(cmdName) ret := map[string]kubectl.Generator{} for k, v := range kubeGenerators { ret[k] = v } for k, v := range originGenerators { ret[k] = v } return ret } kMapBasedSelectorForObjectFunc := w.Factory.MapBasedSelectorForObject w.MapBasedSelectorForObject = func(object runtime.Object) (string, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: return kubectl.MakeLabels(t.Spec.Selector), nil default: return kMapBasedSelectorForObjectFunc(object) } } kPortsForObjectFunc := w.Factory.PortsForObject w.PortsForObject = func(object runtime.Object) ([]string, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: return getPorts(t.Spec.Template.Spec), nil default: return kPortsForObjectFunc(object) } } kLogsForObjectFunc := w.Factory.LogsForObject w.LogsForObject = func(object, options runtime.Object) (*restclient.Request, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: dopts, ok := options.(*deployapi.DeploymentLogOptions) if !ok { return nil, errors.New("provided options object is not a DeploymentLogOptions") } oc, _, err := w.Clients() if err != nil { return nil, err } return oc.DeploymentLogs(t.Namespace).Get(t.Name, *dopts), nil case *buildapi.Build: bopts, ok := options.(*buildapi.BuildLogOptions) if !ok { return nil, errors.New("provided options object is not a BuildLogOptions") } if bopts.Version != nil { return nil, errors.New("cannot specify a version and a build") } oc, _, err := w.Clients() if err != nil { return nil, err } return oc.BuildLogs(t.Namespace).Get(t.Name, *bopts), nil case *buildapi.BuildConfig: bopts, ok := options.(*buildapi.BuildLogOptions) if !ok { return nil, errors.New("provided options object is not a BuildLogOptions") } oc, _, err := w.Clients() if err != nil { return nil, err } builds, err := oc.Builds(t.Namespace).List(api.ListOptions{}) if err != nil { return nil, err } builds.Items = buildapi.FilterBuilds(builds.Items, buildapi.ByBuildConfigPredicate(t.Name)) if len(builds.Items) == 0 { return nil, fmt.Errorf("no builds found for %q", t.Name) } if bopts.Version != nil { // If a version has been specified, try to get the logs from that build. desired := buildutil.BuildNameForConfigVersion(t.Name, int(*bopts.Version)) return oc.BuildLogs(t.Namespace).Get(desired, *bopts), nil } sort.Sort(sort.Reverse(buildapi.BuildSliceByCreationTimestamp(builds.Items))) return oc.BuildLogs(t.Namespace).Get(builds.Items[0].Name, *bopts), nil default: return kLogsForObjectFunc(object, options) } } // Saves current resource name (or alias if any) in PrintOptions. Once saved, it will not be overwritten by the // kubernetes resource alias look-up, as it will notice a non-empty value in `options.Kind` w.Printer = func(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) { if mapping != nil { options.Kind = mapping.Resource if alias, ok := resourceShortFormFor(mapping.Resource); ok { options.Kind = alias } } return describe.NewHumanReadablePrinter(options), nil } // PrintResourceInfos receives a list of resource infos and prints versioned objects if a generic output format was specified // otherwise, it iterates through info objects, printing each resource with a unique printer for its mapping w.PrintResourceInfos = func(cmd *cobra.Command, infos []*resource.Info, out io.Writer) error { printer, generic, err := cmdutil.PrinterForCommand(cmd) if err != nil { return nil } if !generic { for _, info := range infos { mapping := info.ResourceMapping() printer, err := w.PrinterForMapping(cmd, mapping, false) if err != nil { return err } if err := printer.PrintObj(info.Object, out); err != nil { return nil } } return nil } clientConfig, err := w.ClientConfig() if err != nil { return err } outputVersion, err := cmdutil.OutputVersion(cmd, clientConfig.GroupVersion) if err != nil { return err } object, err := resource.AsVersionedObject(infos, len(infos) != 1, outputVersion, api.Codecs.LegacyCodec(outputVersion)) if err != nil { return err } return printer.PrintObj(object, out) } kCanBeExposed := w.Factory.CanBeExposed w.CanBeExposed = func(kind unversioned.GroupKind) error { if kind == deployapi.Kind("DeploymentConfig") { return nil } return kCanBeExposed(kind) } kCanBeAutoscaled := w.Factory.CanBeAutoscaled w.CanBeAutoscaled = func(kind unversioned.GroupKind) error { if kind == deployapi.Kind("DeploymentConfig") { return nil } return kCanBeAutoscaled(kind) } kAttachablePodForObjectFunc := w.Factory.AttachablePodForObject w.AttachablePodForObject = func(object runtime.Object) (*api.Pod, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: _, kc, err := w.Clients() if err != nil { return nil, err } selector := labels.SelectorFromSet(t.Spec.Selector) f := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := cmdutil.GetFirstPod(kc, t.Namespace, selector, 1*time.Minute, f) return pod, err default: return kAttachablePodForObjectFunc(object) } } kUpdatePodSpecForObject := w.Factory.UpdatePodSpecForObject w.UpdatePodSpecForObject = func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) { switch t := obj.(type) { case *deployapi.DeploymentConfig: template := t.Spec.Template if template == nil { t.Spec.Template = template template = &api.PodTemplateSpec{} } return true, fn(&template.Spec) default: return kUpdatePodSpecForObject(obj, fn) } } kProtocolsForObject := w.Factory.ProtocolsForObject w.ProtocolsForObject = func(object runtime.Object) (map[string]string, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: return getProtocols(t.Spec.Template.Spec), nil default: return kProtocolsForObject(object) } } kSwaggerSchemaFunc := w.Factory.SwaggerSchema w.Factory.SwaggerSchema = func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) { if !latest.OriginKind(gvk) { return kSwaggerSchemaFunc(gvk) } // TODO: we need to register the OpenShift API under the Kube group, and start returning the OpenShift // group from the scheme. oc, _, err := w.Clients() if err != nil { return nil, err } return w.OriginSwaggerSchema(oc.RESTClient, gvk.GroupVersion()) } w.EditorEnvs = func() []string { return []string{"OC_EDITOR", "EDITOR"} } w.PrintObjectSpecificMessage = func(obj runtime.Object, out io.Writer) {} kPauseObjectFunc := w.Factory.PauseObject w.Factory.PauseObject = func(object runtime.Object) (bool, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: if t.Spec.Paused { return true, nil } t.Spec.Paused = true oc, _, err := w.Clients() if err != nil { return false, err } _, err = oc.DeploymentConfigs(t.Namespace).Update(t) // TODO: Pause the deployer containers. return false, err default: return kPauseObjectFunc(object) } } kResumeObjectFunc := w.Factory.ResumeObject w.Factory.ResumeObject = func(object runtime.Object) (bool, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: if !t.Spec.Paused { return true, nil } t.Spec.Paused = false oc, _, err := w.Clients() if err != nil { return false, err } _, err = oc.DeploymentConfigs(t.Namespace).Update(t) // TODO: Resume the deployer containers. return false, err default: return kResumeObjectFunc(object) } } kResolveImageFunc := w.Factory.ResolveImage w.Factory.ResolveImage = func(image string) (string, error) { options := w.ImageResolutionOptions.(*imageResolutionOptions) if imageutil.IsDocker(options.Source) { return kResolveImageFunc(image) } oc, _, err := w.Clients() if err != nil { return "", err } namespace, _, err := w.DefaultNamespace() if err != nil { return "", err } return imageutil.ResolveImagePullSpec(oc, oc, options.Source, image, namespace) } kHistoryViewerFunc := w.Factory.HistoryViewer w.Factory.HistoryViewer = func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { switch mapping.GroupVersionKind.GroupKind() { case deployapi.Kind("DeploymentConfig"): oc, kc, err := w.Clients() if err != nil { return nil, err } return deploycmd.NewDeploymentConfigHistoryViewer(oc, kc), nil } return kHistoryViewerFunc(mapping) } kRollbackerFunc := w.Factory.Rollbacker w.Factory.Rollbacker = func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { switch mapping.GroupVersionKind.GroupKind() { case deployapi.Kind("DeploymentConfig"): oc, _, err := w.Clients() if err != nil { return nil, err } return deploycmd.NewDeploymentConfigRollbacker(oc), nil } return kRollbackerFunc(mapping) } kStatusViewerFunc := w.Factory.StatusViewer w.Factory.StatusViewer = func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { oc, _, err := w.Clients() if err != nil { return nil, err } switch mapping.GroupVersionKind.GroupKind() { case deployapi.Kind("DeploymentConfig"): return deploycmd.NewDeploymentConfigStatusViewer(oc), nil } return kStatusViewerFunc(mapping) } return w }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return cmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry if len(cfg.Credentials) == 0 { return fmt.Errorf("registry does not exist; you must specify a .kubeconfig file path containing credentials for connecting the registry to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } healthzPort := defaultPort if len(ports) > 0 { healthzPort = ports[0].ContainerPort env["REGISTRY_HTTP_ADDR"] = fmt.Sprintf(":%d", healthzPort) env["REGISTRY_HTTP_NET"] = "tcp" } livenessProbe := generateLivenessProbeConfig(healthzPort) readinessProbe := generateReadinessProbeConfig(healthzPort) secretBytes := make([]byte, randomSecretSize) if _, err := cryptorand.Read(secretBytes); err != nil { return fmt.Errorf("registry does not exist; could not generate random bytes for HTTP secret: %v", err) } env["REGISTRY_HTTP_SECRET"] = base64.StdEncoding.EncodeToString(secretBytes) mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: []kapi.VolumeMount{ { Name: "registry-storage", MountPath: cfg.Volume, }, }, SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, }, }, Volumes: []kapi.Volume{ { Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }, }, }, } if mountHost { podTemplate.Spec.Volumes[0].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[0].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: dapi.DeploymentConfigSpec{ Replicas: cfg.Replicas, Selector: label, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: podTemplate, }, }, } objects = app.AddServices(objects, true) // Set registry service's sessionAffinity to ClientIP to prevent push // failures due to a use of poorly consistent storage shared by // multiple replicas. for _, obj := range objects { switch t := obj.(type) { case *kapi.Service: t.Spec.SessionAffinity = kapi.ServiceAffinityClientIP } } // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(mapper, cmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return cmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry if len(cfg.Credentials) == 0 { return fmt.Errorf("registry does not exist; you must specify a .kubeconfig file path containing credentials for connecting the registry to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: []kapi.VolumeMount{ { Name: "registry-storage", MountPath: cfg.Volume, }, }, SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, // TODO reenable the liveness probe when we no longer support the v1 registry. /* LivenessProbe: &kapi.Probe{ InitialDelaySeconds: 3, TimeoutSeconds: 5, Handler: kapi.Handler{ HTTPGet: &kapi.HTTPGetAction{ Path: "/healthz", Port: util.NewIntOrStringFromInt(5000), }, }, }, */ }, }, Volumes: []kapi.Volume{ { Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }, }, }, } if mountHost { podTemplate.Spec.Volumes[0].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[0].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: dapi.DeploymentTemplate{ ControllerTemplate: kapi.ReplicationControllerSpec{ Replicas: cfg.Replicas, Selector: label, Template: podTemplate, }, }, }, } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(out, os.Stderr), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil }
// RunGet implements the generic Get command // TODO: convert all direct flag accessors to a struct and pass that instead of cmd func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error { selector := cmdutil.GetFlagString(cmd, "selector") allNamespaces := cmdutil.GetFlagBool(cmd, "all-namespaces") mapper, typer := f.Object() cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err } filenames := cmdutil.GetFlagStringSlice(cmd, "filename") if len(args) == 0 && len(filenames) == 0 { fmt.Fprint(out, "You must specify the type of resource to get. ", valid_resources, ` * componentstatuses (aka 'cs') * endpoints (aka 'ep') `) return cmdutil.UsageError(cmd, "Required resource not specified.") } // handle watch separately since we cannot watch multiple resource types isWatch, isWatchOnly := cmdutil.GetFlagBool(cmd, "watch"), cmdutil.GetFlagBool(cmd, "watch-only") if isWatch || isWatchOnly { r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()). NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces). FilenameParam(enforceNamespace, filenames...). SelectorParam(selector). ResourceTypeOrNameArgs(true, args...). SingleResourceType(). Latest(). Do() if err != nil { return err } infos, err := r.Infos() if err != nil { return err } if len(infos) != 1 { return fmt.Errorf("watch is only supported on a single resource - %d resources were found", len(infos)) } info := infos[0] mapping := info.ResourceMapping() printer, err := f.PrinterForMapping(cmd, mapping, allNamespaces) if err != nil { return err } obj, err := r.Object() if err != nil { return err } rv, err := mapping.MetadataAccessor.ResourceVersion(obj) if err != nil { return err } // print the current object if !isWatchOnly { if err := printer.PrintObj(obj, out); err != nil { return fmt.Errorf("unable to output the provided object: %v", err) } } // print watched changes w, err := r.Watch(rv) if err != nil { return err } kubectl.WatchLoop(w, func(e watch.Event) error { return printer.PrintObj(e.Object, out) }) return nil } b := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()). NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces). FilenameParam(enforceNamespace, filenames...). SelectorParam(selector). ResourceTypeOrNameArgs(true, args...). ContinueOnError(). Latest() printer, generic, err := cmdutil.PrinterForCommand(cmd) if err != nil { return err } if generic { clientConfig, err := f.ClientConfig() if err != nil { return err } defaultVersion := clientConfig.Version singular := false r := b.Flatten().Do() infos, err := r.IntoSingular(&singular).Infos() if err != nil { return err } // the outermost object will be converted to the output-version, but inner // objects can use their mappings version := cmdutil.OutputVersion(cmd, defaultVersion) obj, err := resource.AsVersionedObject(infos, !singular, version) if err != nil { return err } return printer.PrintObj(obj, out) } // use the default printer for each object return b.Do().Visit(func(r *resource.Info) error { printer, err := f.PrinterForMapping(cmd, r.Mapping, allNamespaces) if err != nil { return err } return printer.PrintObj(r.Object, out) }) }
// RunGet implements the generic Get command // TODO: convert all direct flag accessors to a struct and pass that instead of cmd func RunGet(f cmdutil.Factory, out, errOut io.Writer, cmd *cobra.Command, args []string, options *GetOptions) error { if len(options.Raw) > 0 { restClient, err := f.RESTClient() if err != nil { return err } stream, err := restClient.Get().RequestURI(options.Raw).Stream() if err != nil { return err } defer stream.Close() _, err = io.Copy(out, stream) if err != nil && err != io.EOF { return err } return nil } selector := cmdutil.GetFlagString(cmd, "selector") allNamespaces := cmdutil.GetFlagBool(cmd, "all-namespaces") showKind := cmdutil.GetFlagBool(cmd, "show-kind") mapper, typer, err := f.UnstructuredObject() if err != nil { return err } filterFuncs := f.DefaultResourceFilterFunc() filterOpts := f.DefaultResourceFilterOptions(cmd, allNamespaces) cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err } if allNamespaces { enforceNamespace = false } if len(args) == 0 && cmdutil.IsFilenameEmpty(options.Filenames) { fmt.Fprint(errOut, "You must specify the type of resource to get. ", valid_resources) fullCmdName := cmd.Parent().CommandPath() usageString := "Required resource not specified." if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") { usageString = fmt.Sprintf("%s\nUse \"%s explain <resource>\" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName) } return cmdutil.UsageError(cmd, usageString) } // always show resources when getting by name or filename argsHasNames, err := resource.HasNames(args) if err != nil { return err } if len(options.Filenames) > 0 || argsHasNames { cmd.Flag("show-all").Value.Set("true") } export := cmdutil.GetFlagBool(cmd, "export") // handle watch separately since we cannot watch multiple resource types isWatch, isWatchOnly := cmdutil.GetFlagBool(cmd, "watch"), cmdutil.GetFlagBool(cmd, "watch-only") if isWatch || isWatchOnly { r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.UnstructuredClientForMapping), runtime.UnstructuredJSONScheme). NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces). FilenameParam(enforceNamespace, &options.FilenameOptions). SelectorParam(selector). ExportParam(export). ResourceTypeOrNameArgs(true, args...). SingleResourceType(). Latest(). Do() err := r.Err() if err != nil { return err } infos, err := r.Infos() if err != nil { return err } if len(infos) != 1 { return fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(infos)) } info := infos[0] mapping := info.ResourceMapping() printer, err := f.PrinterForMapping(cmd, mapping, allNamespaces) if err != nil { return err } obj, err := r.Object() if err != nil { return err } // watching from resourceVersion 0, starts the watch at ~now and // will return an initial watch event. Starting form ~now, rather // the rv of the object will insure that we start the watch from // inside the watch window, which the rv of the object might not be. rv := "0" isList := meta.IsListType(obj) if isList { // the resourceVersion of list objects is ~now but won't return // an initial watch event rv, err = mapping.MetadataAccessor.ResourceVersion(obj) if err != nil { return err } } // print the current object filteredResourceCount := 0 if !isWatchOnly { if err := printer.PrintObj(obj, out); err != nil { return fmt.Errorf("unable to output the provided object: %v", err) } filteredResourceCount++ cmdutil.PrintFilterCount(filteredResourceCount, mapping.Resource, filterOpts) } // print watched changes w, err := r.Watch(rv) if err != nil { return err } first := true filteredResourceCount = 0 intr := interrupt.New(nil, w.Stop) intr.Run(func() error { _, err := watch.Until(0, w, func(e watch.Event) (bool, error) { if !isList && first { // drop the initial watch event in the single resource case first = false return false, nil } err := printer.PrintObj(e.Object, out) if err != nil { return false, err } filteredResourceCount++ cmdutil.PrintFilterCount(filteredResourceCount, mapping.Resource, filterOpts) return false, nil }) return err }) return nil } r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.UnstructuredClientForMapping), runtime.UnstructuredJSONScheme). NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces). FilenameParam(enforceNamespace, &options.FilenameOptions). SelectorParam(selector). ExportParam(export). ResourceTypeOrNameArgs(true, args...). ContinueOnError(). Latest(). Flatten(). Do() err = r.Err() if err != nil { return err } printer, generic, err := cmdutil.PrinterForCommand(cmd) if err != nil { return err } if generic { // we flattened the data from the builder, so we have individual items, but now we'd like to either: // 1. if there is more than one item, combine them all into a single list // 2. if there is a single item and that item is a list, leave it as its specific list // 3. if there is a single item and it is not a a list, leave it as a single item var errs []error singular := false infos, err := r.IntoSingular(&singular).Infos() if err != nil { if singular { return err } errs = append(errs, err) } if len(infos) == 0 && len(errs) == 0 { outputEmptyListWarning(errOut) } res := "" if len(infos) > 0 { res = infos[0].ResourceMapping().Resource } var obj runtime.Object if singular { obj = infos[0].Object } else { // we have more than one item, so coerce all items into a list list := &runtime.UnstructuredList{ Object: map[string]interface{}{ "kind": "List", "apiVersion": "v1", "metadata": map[string]interface{}{}, }, } for _, info := range infos { list.Items = append(list.Items, info.Object.(*runtime.Unstructured)) } obj = list } isList := meta.IsListType(obj) if isList { filteredResourceCount, items, err := cmdutil.FilterResourceList(obj, filterFuncs, filterOpts) if err != nil { return err } // take the filtered items and create a new list for display list := &runtime.UnstructuredList{ Object: map[string]interface{}{ "kind": "List", "apiVersion": "v1", "metadata": map[string]interface{}{}, }, } if listMeta, err := meta.ListAccessor(obj); err == nil { list.Object["selfLink"] = listMeta.GetSelfLink() list.Object["resourceVersion"] = listMeta.GetResourceVersion() } for _, item := range items { list.Items = append(list.Items, item.(*runtime.Unstructured)) } if err := printer.PrintObj(list, out); err != nil { errs = append(errs, err) } cmdutil.PrintFilterCount(filteredResourceCount, res, filterOpts) return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs))) } filteredResourceCount := 0 if isFiltered, err := filterFuncs.Filter(obj, filterOpts); !isFiltered { if err != nil { glog.V(2).Infof("Unable to filter resource: %v", err) } else if err := printer.PrintObj(obj, out); err != nil { errs = append(errs, err) } } else if isFiltered { filteredResourceCount++ } cmdutil.PrintFilterCount(filteredResourceCount, res, filterOpts) return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs))) } allErrs := []error{} errs := sets.NewString() infos, err := r.Infos() if err != nil { allErrs = append(allErrs, err) } if len(infos) == 0 && len(allErrs) == 0 { outputEmptyListWarning(errOut) } objs := make([]runtime.Object, len(infos)) for ix := range infos { objs[ix] = infos[ix].Object } sorting, err := cmd.Flags().GetString("sort-by") if err != nil { return err } var sorter *kubectl.RuntimeSort if len(sorting) > 0 && len(objs) > 1 { // TODO: questionable if sorter, err = kubectl.SortObjects(f.Decoder(true), objs, sorting); err != nil { return err } } // use the default printer for each object printer = nil var lastMapping *meta.RESTMapping w := kubectl.GetNewTabWriter(out) filteredResourceCount := 0 if resource.MultipleTypesRequested(args) || cmdutil.MustPrintWithKinds(objs, infos, sorter) { showKind = true } for ix := range objs { var mapping *meta.RESTMapping var original runtime.Object if sorter != nil { mapping = infos[sorter.OriginalPosition(ix)].Mapping original = infos[sorter.OriginalPosition(ix)].Object } else { mapping = infos[ix].Mapping original = infos[ix].Object } if printer == nil || lastMapping == nil || mapping == nil || mapping.Resource != lastMapping.Resource { if printer != nil { w.Flush() cmdutil.PrintFilterCount(filteredResourceCount, lastMapping.Resource, filterOpts) } printer, err = f.PrinterForMapping(cmd, mapping, allNamespaces) if err != nil { if !errs.Has(err.Error()) { errs.Insert(err.Error()) allErrs = append(allErrs, err) } continue } // add linebreak between resource groups (if there is more than one) // skip linebreak above first resource group noHeaders := cmdutil.GetFlagBool(cmd, "no-headers") if lastMapping != nil && !noHeaders { fmt.Fprintf(errOut, "%s\n", "") } lastMapping = mapping } // filter objects if filter has been defined for current object if isFiltered, err := filterFuncs.Filter(original, filterOpts); isFiltered { if err == nil { filteredResourceCount++ continue } if !errs.Has(err.Error()) { errs.Insert(err.Error()) allErrs = append(allErrs, err) } } if resourcePrinter, found := printer.(*kubectl.HumanReadablePrinter); found { resourceName := resourcePrinter.GetResourceKind() if mapping != nil { if resourceName == "" { resourceName = mapping.Resource } if alias, ok := kubectl.ResourceShortFormFor(mapping.Resource); ok { resourceName = alias } else if resourceName == "" { resourceName = "none" } } else { resourceName = "none" } if showKind { resourcePrinter.EnsurePrintWithKind(resourceName) } if err := printer.PrintObj(original, w); err != nil { if !errs.Has(err.Error()) { errs.Insert(err.Error()) allErrs = append(allErrs, err) } } continue } if err := printer.PrintObj(original, w); err != nil { if !errs.Has(err.Error()) { errs.Insert(err.Error()) allErrs = append(allErrs, err) } continue } } w.Flush() if printer != nil && lastMapping != nil { cmdutil.PrintFilterCount(filteredResourceCount, lastMapping.Resource, filterOpts) } return utilerrors.NewAggregate(allErrs) }
// RunGet implements the generic Get command // TODO: convert all direct flag accessors to a struct and pass that instead of cmd func RunGet(f *cmdutil.Factory, out io.Writer, errOut io.Writer, cmd *cobra.Command, args []string, options *GetOptions) error { if len(options.Raw) > 0 { restClient, err := f.RESTClient() if err != nil { return err } stream, err := restClient.Get().RequestURI(options.Raw).Stream() if err != nil { return err } defer stream.Close() for { buffer := make([]byte, 1024, 1024) bytesRead, err := stream.Read(buffer) if bytesRead > 0 { fmt.Printf("%s", string(buffer[:bytesRead])) } if err == io.EOF { return nil } if err != nil { return err } } } selector := cmdutil.GetFlagString(cmd, "selector") allNamespaces := cmdutil.GetFlagBool(cmd, "all-namespaces") showKind := cmdutil.GetFlagBool(cmd, "show-kind") mapper, typer := f.Object() printAll := false cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { return err } if allNamespaces { enforceNamespace = false } if len(args) == 0 && cmdutil.IsFilenameEmpty(options.Filenames) { fmt.Fprint(errOut, "You must specify the type of resource to get. ", valid_resources) return cmdutil.UsageError(cmd, "Required resource not specified.") } // determine if args contains "all" for _, a := range args { if a == "all" { printAll = true break } } // always show resources when getting by name or filename argsHasNames, err := resource.HasNames(args) if err != nil { return err } if len(options.Filenames) > 0 || argsHasNames { cmd.Flag("show-all").Value.Set("true") } export := cmdutil.GetFlagBool(cmd, "export") // handle watch separately since we cannot watch multiple resource types isWatch, isWatchOnly := cmdutil.GetFlagBool(cmd, "watch"), cmdutil.GetFlagBool(cmd, "watch-only") if isWatch || isWatchOnly { r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces). FilenameParam(enforceNamespace, &options.FilenameOptions). SelectorParam(selector). ExportParam(export). ResourceTypeOrNameArgs(true, args...). SingleResourceType(). Latest(). Do() err := r.Err() if err != nil { return err } infos, err := r.Infos() if err != nil { return err } if len(infos) != 1 { return fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(infos)) } info := infos[0] mapping := info.ResourceMapping() printer, err := f.PrinterForMapping(cmd, mapping, allNamespaces) if err != nil { return err } obj, err := r.Object() if err != nil { return err } // watching from resourceVersion 0, starts the watch at ~now and // will return an initial watch event. Starting form ~now, rather // the rv of the object will insure that we start the watch from // inside the watch window, which the rv of the object might not be. rv := "0" isList := meta.IsListType(obj) if isList { // the resourceVersion of list objects is ~now but won't return // an initial watch event rv, err = mapping.MetadataAccessor.ResourceVersion(obj) if err != nil { return err } } // print the current object if !isWatchOnly { if err := printer.PrintObj(obj, out); err != nil { return fmt.Errorf("unable to output the provided object: %v", err) } printer.AfterPrint(errOut, mapping.Resource) } // print watched changes w, err := r.Watch(rv) if err != nil { return err } first := true kubectl.WatchLoop(w, func(e watch.Event) error { if !isList && first { // drop the initial watch event in the single resource case first = false return nil } err := printer.PrintObj(e.Object, out) if err == nil { printer.AfterPrint(errOut, mapping.Resource) } return err }) return nil } r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)). NamespaceParam(cmdNamespace).DefaultNamespace().AllNamespaces(allNamespaces). FilenameParam(enforceNamespace, &options.FilenameOptions). SelectorParam(selector). ExportParam(export). ResourceTypeOrNameArgs(true, args...). ContinueOnError(). Latest(). Flatten(). Do() err = r.Err() if err != nil { return err } printer, generic, err := cmdutil.PrinterForCommand(cmd) if err != nil { return err } if generic { clientConfig, err := f.ClientConfig() if err != nil { return err } allErrs := []error{} singular := false infos, err := r.IntoSingular(&singular).Infos() if err != nil { if singular { return err } allErrs = append(allErrs, err) } // the outermost object will be converted to the output-version, but inner // objects can use their mappings version, err := cmdutil.OutputVersion(cmd, clientConfig.GroupVersion) if err != nil { return err } res := "" if len(infos) > 0 { res = infos[0].ResourceMapping().Resource } obj, err := resource.AsVersionedObject(infos, !singular, version, f.JSONEncoder()) if err != nil { return err } if err := printer.PrintObj(obj, out); err != nil { allErrs = append(allErrs, err) } printer.AfterPrint(errOut, res) return utilerrors.NewAggregate(allErrs) } allErrs := []error{} infos, err := r.Infos() if err != nil { allErrs = append(allErrs, err) } objs := make([]runtime.Object, len(infos)) for ix := range infos { objs[ix] = infos[ix].Object } sorting, err := cmd.Flags().GetString("sort-by") if err != nil { return err } var sorter *kubectl.RuntimeSort if len(sorting) > 0 && len(objs) > 1 { clientConfig, err := f.ClientConfig() if err != nil { return err } version, err := cmdutil.OutputVersion(cmd, clientConfig.GroupVersion) if err != nil { return err } for ix := range infos { objs[ix], err = infos[ix].Mapping.ConvertToVersion(infos[ix].Object, version) if err != nil { allErrs = append(allErrs, err) continue } } // TODO: questionable if sorter, err = kubectl.SortObjects(f.Decoder(true), objs, sorting); err != nil { return err } } // use the default printer for each object printer = nil var lastMapping *meta.RESTMapping w := kubectl.GetNewTabWriter(out) if mustPrintWithKinds(objs, infos, sorter, printAll) { showKind = true } for ix := range objs { var mapping *meta.RESTMapping var original runtime.Object if sorter != nil { mapping = infos[sorter.OriginalPosition(ix)].Mapping original = infos[sorter.OriginalPosition(ix)].Object } else { mapping = infos[ix].Mapping original = infos[ix].Object } if printer == nil || lastMapping == nil || mapping == nil || mapping.Resource != lastMapping.Resource { if printer != nil { w.Flush() printer.AfterPrint(errOut, lastMapping.Resource) } printer, err = f.PrinterForMapping(cmd, mapping, allNamespaces) if err != nil { allErrs = append(allErrs, err) continue } lastMapping = mapping } if resourcePrinter, found := printer.(*kubectl.HumanReadablePrinter); found { resourceName := resourcePrinter.GetResourceKind() if mapping != nil { if resourceName == "" { resourceName = mapping.Resource } if alias, ok := kubectl.ResourceShortFormFor(mapping.Resource); ok { resourceName = alias } else if resourceName == "" { resourceName = "none" } } else { resourceName = "none" } if showKind { resourcePrinter.EnsurePrintWithKind(resourceName) } if err := printer.PrintObj(original, w); err != nil { allErrs = append(allErrs, err) } continue } if err := printer.PrintObj(original, w); err != nil { allErrs = append(allErrs, err) continue } } w.Flush() if printer != nil { printer.AfterPrint(errOut, lastMapping.Resource) } return utilerrors.NewAggregate(allErrs) }
// RunProcess contains all the necessary functionality for the OpenShift cli process command func RunProcess(f *clientcmd.Factory, in io.Reader, out, errout io.Writer, cmd *cobra.Command, args []string) error { templateName, templateParams := "", []string{} for _, s := range args { isValue := strings.Contains(s, "=") switch { case isValue: templateParams = append(templateParams, s) case !isValue && len(templateName) == 0: templateName = s case !isValue && len(templateName) > 0: return kcmdutil.UsageError(cmd, "template name must be specified only once: %s", s) } } if cmd.Flag("value").Changed || cmd.Flag("param").Changed { flagValues := getFlagStringArray(cmd, "param") cmdutil.WarnAboutCommaSeparation(errout, flagValues, "--param") templateParams = append(templateParams, flagValues...) } duplicatedKeys := sets.NewString() params, paramErr := app.ParseAndCombineEnvironment(templateParams, getFlagStringArray(cmd, "param-file"), in, func(key, file string) error { if file == "" { duplicatedKeys.Insert(key) } else { fmt.Fprintf(errout, "warning: Template parameter %q already defined, ignoring value from file %q", key, file) } return nil }) if len(duplicatedKeys) != 0 { return kcmdutil.UsageError(cmd, fmt.Sprintf("The following parameters were provided more than once: %s", strings.Join(duplicatedKeys.List(), ", "))) } filename := kcmdutil.GetFlagString(cmd, "filename") if len(templateName) == 0 && len(filename) == 0 { return kcmdutil.UsageError(cmd, "Must pass a filename or name of stored template") } if kcmdutil.GetFlagBool(cmd, "parameters") { for _, flag := range []string{"value", "param", "labels", "output", "output-version", "raw", "template"} { if f := cmd.Flags().Lookup(flag); f != nil && f.Changed { return kcmdutil.UsageError(cmd, "The --parameters flag does not process the template, can't be used with --%v", flag) } } } namespace, explicit, err := f.DefaultNamespace() if err != nil { return err } mapper, typer := f.Object(false) client, _, _, err := f.Clients() if err != nil { return err } var ( objects []runtime.Object infos []*resource.Info ) mapping, err := mapper.RESTMapping(templateapi.Kind("Template")) if err != nil { return err } // When templateName is not empty, then we fetch the template from the // server, otherwise we require to set the `-f` parameter. if len(templateName) > 0 { var ( storedTemplate, rs string sourceNamespace string ok bool ) sourceNamespace, rs, storedTemplate, ok = parseNamespaceResourceName(templateName, namespace) if !ok { return fmt.Errorf("invalid argument %q", templateName) } if len(rs) > 0 && (rs != "template" && rs != "templates") { return fmt.Errorf("unable to process invalid resource %q", rs) } if len(storedTemplate) == 0 { return fmt.Errorf("invalid value syntax %q", templateName) } templateObj, err := client.Templates(sourceNamespace).Get(storedTemplate) if err != nil { if errors.IsNotFound(err) { return fmt.Errorf("template %q could not be found", storedTemplate) } return err } templateObj.CreationTimestamp = unversioned.Now() infos = append(infos, &resource.Info{Object: templateObj}) } else { infos, err = resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), kapi.Codecs.UniversalDecoder()). NamespaceParam(namespace).RequireNamespace(). FilenameParam(explicit, false, filename). Do(). Infos() if err != nil { return err } } if len(infos) > 1 { // in order to run validation on the input given to us by a user, we only support the processing // of one template in a list. For instance, we want to be able to fail when a user does not give // a parameter that the template wants or when they give a parameter the template doesn't need, // as this may indicate that they have mis-used `oc process`. This is much less complicated when // we process at most one template. fmt.Fprintf(out, "%d input templates found, but only the first will be processed", len(infos)) } obj, ok := infos[0].Object.(*templateapi.Template) if !ok { sourceName := filename if len(templateName) > 0 { sourceName = namespace + "/" + templateName } return fmt.Errorf("unable to parse %q, not a valid Template but %s\n", sourceName, reflect.TypeOf(infos[0].Object)) } // If 'parameters' flag is set it does not do processing but only print // the template parameters to console for inspection. if kcmdutil.GetFlagBool(cmd, "parameters") { return describe.PrintTemplateParameters(obj.Parameters, out) } if label := kcmdutil.GetFlagString(cmd, "labels"); len(label) > 0 { lbl, err := kubectl.ParseLabels(label) if err != nil { return fmt.Errorf("error parsing labels: %v\n", err) } if obj.ObjectLabels == nil { obj.ObjectLabels = make(map[string]string) } for key, value := range lbl { obj.ObjectLabels[key] = value } } // Raise parameter parsing errors here after we had chance to return UsageErrors first if paramErr != nil { return paramErr } if errs := injectUserVars(params, obj); errs != nil { return kerrors.NewAggregate(errs) } resultObj, err := client.TemplateConfigs(namespace).Create(obj) if err != nil { return fmt.Errorf("error processing the template %q: %v\n", obj.Name, err) } outputFormat := kcmdutil.GetFlagString(cmd, "output") if outputFormat == "describe" { if s, err := (&describe.TemplateDescriber{ MetadataAccessor: meta.NewAccessor(), ObjectTyper: kapi.Scheme, ObjectDescriber: nil, }).DescribeTemplate(resultObj); err != nil { return fmt.Errorf("error describing %q: %v\n", obj.Name, err) } else { _, err := fmt.Fprintf(out, s) return err } } objects = append(objects, resultObj.Objects...) p, _, err := kcmdutil.PrinterForCommand(cmd) if err != nil { return err } gv := mapping.GroupVersionKind.GroupVersion() version, err := kcmdutil.OutputVersion(cmd, &gv) if err != nil { return err } p = kubectl.NewVersionedPrinter(p, kapi.Scheme, version) // use generic output if kcmdutil.GetFlagBool(cmd, "raw") { for i := range objects { p.PrintObj(objects[i], out) } return nil } return p.PrintObj(&kapi.List{ ListMeta: unversioned.ListMeta{}, Items: objects, }, out) }
// RunCmdRouter contains all the necessary functionality for the // OpenShift CLI router command. func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error { switch len(args) { case 0: // uses default value case 1: cfg.Name = args[0] default: return kcmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } name := cfg.Name if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return kcmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return fmt.Errorf("unable to parse --ports: %v", err) } // For the host networking case, ensure the ports match. Otherwise, remove host ports for i := 0; i < len(ports); i++ { if cfg.HostNetwork && ports[i].HostPort != 0 && ports[i].ContainerPort != ports[i].HostPort { return fmt.Errorf("when using host networking mode, container port %d and host port %d must be equal", ports[i].ContainerPort, ports[i].HostPort) } } if cfg.StatsPort > 0 { port := kapi.ContainerPort{ Name: "stats", ContainerPort: cfg.StatsPort, Protocol: kapi.ProtocolTCP, } ports = append(ports, port) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := kcmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } generate = true } } if !generate { fmt.Fprintf(out, "Router %q service exists\n", name) return nil } if cfg.DryRun && !output { return fmt.Errorf("router %q does not exist (no service)", name) } if len(cfg.ServiceAccount) == 0 { return fmt.Errorf("you must specify a service account for the router with --service-account") } if err := validateServiceAccount(kClient, namespace, cfg.ServiceAccount, cfg.HostNetwork); err != nil { return fmt.Errorf("router could not be created; %v", err) } // create new router secretEnv := app.Environment{} switch { case len(cfg.Credentials) == 0 && len(cfg.ServiceAccount) == 0: return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") case len(cfg.Credentials) > 0: clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } secretEnv.Add(app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, }) } createServiceAccount := len(cfg.ServiceAccount) > 0 && len(cfg.Credentials) == 0 defaultCert, err := fileutil.LoadData(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file: %v", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() if !output { fmt.Fprintf(cmd.Out(), "info: password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } } env := app.Environment{ "ROUTER_SUBDOMAIN": cfg.Subdomain, "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "ROUTER_EXTERNAL_HOST_HOSTNAME": cfg.ExternalHost, "ROUTER_EXTERNAL_HOST_USERNAME": cfg.ExternalHostUsername, "ROUTER_EXTERNAL_HOST_PASSWORD": cfg.ExternalHostPassword, "ROUTER_EXTERNAL_HOST_HTTP_VSERVER": cfg.ExternalHostHttpVserver, "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER": cfg.ExternalHostHttpsVserver, "ROUTER_EXTERNAL_HOST_INSECURE": strconv.FormatBool(cfg.ExternalHostInsecure), "ROUTER_EXTERNAL_HOST_PARTITION_PATH": cfg.ExternalHostPartitionPath, "ROUTER_EXTERNAL_HOST_PRIVKEY": privkeyPath, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } env.Add(secretEnv) if len(defaultCert) > 0 { if cfg.SecretsAsEnv { env.Add(app.Environment{"DEFAULT_CERTIFICATE": string(defaultCert)}) } else { // TODO: make --credentials create secrets and bypass service account env.Add(app.Environment{"DEFAULT_CERTIFICATE_PATH": defaultCertificatePath}) } } secrets, volumes, mounts, err := generateSecretsConfig(cfg, kClient, namespace, defaultCert) if err != nil { return fmt.Errorf("router could not be created: %v", err) } livenessProbe := generateLivenessProbeConfig(cfg, ports) readinessProbe := generateReadinessProbeConfig(cfg, ports) exposedPorts := make([]kapi.ContainerPort, len(ports)) copy(exposedPorts, ports) for i := range exposedPorts { exposedPorts[i].HostPort = 0 } containers := []kapi.Container{ { Name: "router", Image: image, Ports: exposedPorts, Env: env.List(), LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, ImagePullPolicy: kapi.PullIfNotPresent, VolumeMounts: mounts, }, } if cfg.StatsPort > 0 && cfg.ExposeMetrics { pc := generateMetricsExporterContainer(cfg, env) if pc != nil { containers = append(containers, *pc) } } objects := []runtime.Object{} for _, s := range secrets { objects = append(objects, s) } if createServiceAccount { objects = append(objects, &kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: cfg.ServiceAccount}}, &authapi.ClusterRoleBinding{ ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("router-%s-role", cfg.Name)}, Subjects: []kapi.ObjectReference{ { Kind: "ServiceAccount", Name: cfg.ServiceAccount, Namespace: namespace, }, }, RoleRef: kapi.ObjectReference{ Kind: "ClusterRole", Name: "system:router", }, }, ) } updatePercent := int(-25) objects = append(objects, &deployapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: deployapi.DeploymentConfigSpec{ Strategy: deployapi.DeploymentStrategy{ Type: deployapi.DeploymentStrategyTypeRolling, RollingParams: &deployapi.RollingDeploymentStrategyParams{UpdatePercent: &updatePercent}, }, Replicas: cfg.Replicas, Selector: label, Triggers: []deployapi.DeploymentTriggerPolicy{ {Type: deployapi.DeploymentTriggerOnConfigChange}, }, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ SecurityContext: &kapi.PodSecurityContext{ HostNetwork: cfg.HostNetwork, }, ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: containers, Volumes: volumes, }, }, }, }) objects = app.AddServices(objects, false) // set the service port to the provided hostport value for i := range objects { switch t := objects[i].(type) { case *kapi.Service: for j, servicePort := range t.Spec.Ports { for _, targetPort := range ports { if targetPort.ContainerPort == servicePort.Port && targetPort.HostPort != 0 { t.Spec.Ports[j].Port = targetPort.HostPort } } } } } // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if output { list.Items, err = cmdutil.ConvertItemsForDisplayFromDefaultCommand(cmd, list.Items) if err != nil { return err } if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.ClientForMapping, After: configcmd.NewPrintNameOrErrorAfter(mapper, kcmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return kcmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := kcmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } var clusterIP string generate := output service, err := kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) && !generate { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } else { clusterIP = service.Spec.ClusterIP } if !generate { fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil } if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry secretEnv := app.Environment{} switch { case len(cfg.ServiceAccount) == 0 && len(cfg.Credentials) == 0: return fmt.Errorf("registry could not be created; a service account or the path to a .kubeconfig file must be provided") case len(cfg.Credentials) > 0: clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := restclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } secretEnv = app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } } needServiceAccountRole := len(cfg.ServiceAccount) > 0 && len(cfg.Credentials) == 0 var servingCert, servingKey []byte if len(cfg.ServingCertPath) > 0 { data, err := ioutil.ReadFile(cfg.ServingCertPath) if err != nil { return fmt.Errorf("registry does not exist; could not load TLS certificate file %q: %v", cfg.ServingCertPath, err) } servingCert = data } if len(cfg.ServingKeyPath) > 0 { data, err := ioutil.ReadFile(cfg.ServingKeyPath) if err != nil { return fmt.Errorf("registry does not exist; could not load TLS private key file %q: %v", cfg.ServingKeyPath, err) } servingCert = data } env := app.Environment{} env.Add(secretEnv) healthzPort := defaultPort if len(ports) > 0 { healthzPort = ports[0].ContainerPort env["REGISTRY_HTTP_ADDR"] = fmt.Sprintf(":%d", healthzPort) env["REGISTRY_HTTP_NET"] = "tcp" } secrets, volumes, mounts, extraEnv, tls, err := generateSecretsConfig(cfg, namespace, servingCert, servingKey) if err != nil { return err } env.Add(extraEnv) livenessProbe := generateLivenessProbeConfig(healthzPort, tls) readinessProbe := generateReadinessProbeConfig(healthzPort, tls) mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: append(mounts, kapi.VolumeMount{ Name: "registry-storage", MountPath: cfg.Volume, }), SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, }, }, Volumes: append(volumes, kapi.Volume{ Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }), ServiceAccountName: cfg.ServiceAccount, }, } if mountHost { podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{} for _, s := range secrets { objects = append(objects, s) } if needServiceAccountRole { objects = append(objects, &kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: cfg.ServiceAccount}}, &authapi.ClusterRoleBinding{ ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("registry-%s-role", cfg.Name)}, Subjects: []kapi.ObjectReference{ { Kind: "ServiceAccount", Name: cfg.ServiceAccount, Namespace: namespace, }, }, RoleRef: kapi.ObjectReference{ Kind: "ClusterRole", Name: "system:registry", }, }, ) } objects = append(objects, &deployapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: deployapi.DeploymentConfigSpec{ Replicas: cfg.Replicas, Selector: label, Triggers: []deployapi.DeploymentTriggerPolicy{ {Type: deployapi.DeploymentTriggerOnConfigChange}, }, Template: podTemplate, }, }) objects = app.AddServices(objects, true) // Set registry service's sessionAffinity to ClientIP to prevent push // failures due to a use of poorly consistent storage shared by // multiple replicas. Also reuse the cluster IP if provided to avoid // changing the internal value. for _, obj := range objects { switch t := obj.(type) { case *kapi.Service: t.Spec.SessionAffinity = kapi.ServiceAffinityClientIP t.Spec.ClusterIP = clusterIP } } // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { fn := cmdutil.VersionedPrintObject(f.PrintObject, cmd, out) if err := fn(list); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.ClientForMapping, After: configcmd.NewPrintNameOrErrorAfter(mapper, kcmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil }