// RunReconcileClusterRoles contains all the necessary functionality for the OpenShift cli reconcile-cluster-roles command func (o *ReconcileClusterRolesOptions) RunReconcileClusterRoles(cmd *cobra.Command, f *clientcmd.Factory) error { changedClusterRoles, err := o.ChangedClusterRoles() if err != nil { return err } if len(changedClusterRoles) == 0 { return nil } if (len(o.Output) != 0) && !o.Confirmed { list := &kapi.List{} for _, item := range changedClusterRoles { list.Items = append(list.Items, item) } list.Items, err = ocmdutil.ConvertItemsForDisplayFromDefaultCommand(cmd, list.Items) if err != nil { return err } if err := f.Factory.PrintObject(cmd, list, o.Out); err != nil { return err } } if o.Confirmed { return o.ReplaceChangedRoles(changedClusterRoles) } return nil }
// RunReconcileSCCs contains the functionality for the reconcile-sccs command for making or // previewing changes. func (o *ReconcileSCCOptions) RunReconcileSCCs(cmd *cobra.Command, f *clientcmd.Factory) error { // get sccs that need updated changedSCCs, err := o.ChangedSCCs() if err != nil { return err } if len(changedSCCs) == 0 { return nil } if !o.Confirmed { list := &kapi.List{} for _, item := range changedSCCs { list.Items = append(list.Items, item) } list.Items, err = ocmdutil.ConvertItemsForDisplayFromDefaultCommand(cmd, list.Items) if err != nil { return err } if err := f.Factory.PrintObject(cmd, list, o.Out); err != nil { return err } } if o.Confirmed { return o.ReplaceChangedSCCs(changedSCCs) } return nil }
// Run creates the GroupSyncer specified and runs it to sync groups // the arguments are only here because its the only way to get the printer we need func (o *SyncOptions) Run(cmd *cobra.Command, f *clientcmd.Factory) error { clientConfig, err := ldaputil.NewLDAPClientConfig(o.Config.URL, o.Config.BindDN, o.Config.BindPassword, o.Config.CA, o.Config.Insecure) if err != nil { return fmt.Errorf("could not determine LDAP client configuration: %v", err) } errorHandler := o.CreateErrorHandler() syncBuilder, err := buildSyncBuilder(clientConfig, o.Config, errorHandler) if err != nil { return err } // populate schema-independent syncer fields syncer := &syncgroups.LDAPGroupSyncer{ Host: clientConfig.Host(), GroupClient: o.GroupInterface, DryRun: !o.Confirm, Out: o.Out, Err: os.Stderr, } switch o.Source { case GroupSyncSourceOpenShift: // when your source of ldapGroupUIDs is from an openshift group, the mapping of ldapGroupUID to openshift group name is logically // pinned by the existing mapping. listerMapper, err := getOpenShiftGroupListerMapper(clientConfig.Host(), o) if err != nil { return err } syncer.GroupLister = listerMapper syncer.GroupNameMapper = listerMapper case GroupSyncSourceLDAP: syncer.GroupLister, err = getLDAPGroupLister(syncBuilder, o) if err != nil { return err } syncer.GroupNameMapper, err = getGroupNameMapper(syncBuilder, o) if err != nil { return err } default: return fmt.Errorf("invalid group source: %v", o.Source) } syncer.GroupMemberExtractor, err = syncBuilder.GetGroupMemberExtractor() if err != nil { return err } syncer.UserNameMapper, err = syncBuilder.GetUserNameMapper() if err != nil { return err } // Now we run the Syncer and report any errors openshiftGroups, syncErrors := syncer.Sync() if o.Confirm { return kerrs.NewAggregate(syncErrors) } list := &kapi.List{} for _, item := range openshiftGroups { list.Items = append(list.Items, item) } list.Items, err = ocmdutil.ConvertItemsForDisplayFromDefaultCommand(cmd, list.Items) if err != nil { return err } if err := f.Factory.PrintObject(cmd, list, o.Out); err != nil { return err } return kerrs.NewAggregate(syncErrors) }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return cmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry if len(cfg.Credentials) == 0 { return fmt.Errorf("registry does not exist; you must specify a .kubeconfig file path containing credentials for connecting the registry to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } healthzPort := defaultPort if len(ports) > 0 { healthzPort = ports[0].ContainerPort env["REGISTRY_HTTP_ADDR"] = fmt.Sprintf(":%d", healthzPort) env["REGISTRY_HTTP_NET"] = "tcp" } livenessProbe := generateLivenessProbeConfig(healthzPort) readinessProbe := generateReadinessProbeConfig(healthzPort) secretBytes := make([]byte, randomSecretSize) if _, err := cryptorand.Read(secretBytes); err != nil { return fmt.Errorf("registry does not exist; could not generate random bytes for HTTP secret: %v", err) } env["REGISTRY_HTTP_SECRET"] = base64.StdEncoding.EncodeToString(secretBytes) mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: []kapi.VolumeMount{ { Name: "registry-storage", MountPath: cfg.Volume, }, }, SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, }, }, Volumes: []kapi.Volume{ { Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }, }, }, } if mountHost { podTemplate.Spec.Volumes[0].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[0].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: dapi.DeploymentConfigSpec{ Replicas: cfg.Replicas, Selector: label, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: podTemplate, }, }, } objects = app.AddServices(objects, true) // Set registry service's sessionAffinity to ClientIP to prevent push // failures due to a use of poorly consistent storage shared by // multiple replicas. for _, obj := range objects { switch t := obj.(type) { case *kapi.Service: t.Spec.SessionAffinity = kapi.ServiceAffinityClientIP } } // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { list.Items, err = ocmdutil.ConvertItemsForDisplayFromDefaultCommand(cmd, list.Items) if err != nil { return err } if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.ClientForMapping, After: configcmd.NewPrintNameOrErrorAfter(mapper, cmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil }
// RunNewApplication contains all the necessary functionality for the OpenShift cli new-app command func RunNewApplication(fullName string, f *clientcmd.Factory, out io.Writer, c *cobra.Command, args []string, config *newcmd.AppConfig) error { output := kcmdutil.GetFlagString(c, "output") shortOutput := output == "name" if err := setupAppConfig(f, out, c, args, config); err != nil { return err } if config.Querying() { result, err := config.RunQuery() if err != nil { return handleRunError(c, err, fullName) } if len(output) != 0 { result.List.Items, err = ocmdutil.ConvertItemsForDisplayFromDefaultCommand(c, result.List.Items) if err != nil { return err } return f.Factory.PrintObject(c, result.List, out) } return printHumanReadableQueryResult(result, out, fullName) } if err := setAppConfigLabels(c, config); err != nil { return err } result, err := config.Run() if err := handleRunError(c, err, fullName); err != nil { return err } if len(config.Labels) == 0 && len(result.Name) > 0 { config.Labels = map[string]string{"app": result.Name} } if err := setLabels(config.Labels, result); err != nil { return err } if err := setAnnotations(map[string]string{newcmd.GeneratedByNamespace: newcmd.GeneratedByNewApp}, result); err != nil { return err } indent := " " switch { case shortOutput: indent = "" case len(output) != 0: result.List.Items, err = ocmdutil.ConvertItemsForDisplayFromDefaultCommand(c, result.List.Items) if err != nil { return err } return f.Factory.PrintObject(c, result.List, out) case !result.GeneratedJobs: if len(config.Labels) > 0 { fmt.Fprintf(out, "--> Creating resources with label %s ...\n", labels.SelectorFromSet(config.Labels).String()) } else { fmt.Fprintf(out, "--> Creating resources ...\n") } } if config.DryRun { fmt.Fprintf(out, "--> Success (DRY RUN)\n") return nil } mapper, _ := f.Object() var afterFn configcmd.AfterFunc switch { // only print success if we don't have installables case !result.GeneratedJobs: afterFn = configcmd.NewPrintNameOrErrorAfterIndent(mapper, shortOutput, "created", out, c.Out(), indent) default: afterFn = configcmd.NewPrintErrorAfter(mapper, c.Out()) afterFn = configcmd.HaltOnError(afterFn) } if err := createObjects(f, afterFn, result); err != nil { return err } if !shortOutput && !result.GeneratedJobs { fmt.Fprintf(out, "--> Success\n") } hasMissingRepo := false installing := []*kapi.Pod{} for _, item := range result.List.Items { switch t := item.(type) { case *kapi.Pod: if t.Annotations[newcmd.GeneratedForJob] == "true" { installing = append(installing, t) } case *buildapi.BuildConfig: triggered := false for _, trigger := range t.Spec.Triggers { switch trigger.Type { case buildapi.ImageChangeBuildTriggerType, buildapi.ConfigChangeBuildTriggerType: triggered = true break } } if triggered { fmt.Fprintf(out, "%sBuild scheduled, use 'oc logs -f bc/%s' to track its progress.\n", indent, t.Name) } else { fmt.Fprintf(out, "%sUse 'oc start-build %s' to start a build.\n", indent, t.Name) } case *imageapi.ImageStream: if len(t.Status.DockerImageRepository) == 0 { if hasMissingRepo { continue } hasMissingRepo = true fmt.Fprintf(out, "%sWARNING: No Docker registry has been configured with the server. Automatic builds and deployments may not function.\n", indent) } } } if shortOutput { return nil } switch { case len(installing) == 1: // TODO: should get this set on the config or up above _, kclient, err := f.Clients() if err != nil { return err } jobInput := installing[0].Annotations[newcmd.GeneratedForJobFor] return followInstallation(f, jobInput, installing[0], kclient, out) case len(installing) > 1: for i := range installing { fmt.Fprintf(out, "%sTrack installation of %s with '%s logs %s'.\n", indent, installing[i].Name, fullName, installing[i].Name) } case len(result.List.Items) > 0: fmt.Fprintf(out, "%sRun '%s %s' to view your app.\n", indent, fullName, StatusRecommendedName) } return nil }
// RunNewBuild contains all the necessary functionality for the OpenShift cli new-build command func RunNewBuild(fullName string, f *clientcmd.Factory, out io.Writer, in io.Reader, c *cobra.Command, args []string, config *newcmd.AppConfig) error { output := kcmdutil.GetFlagString(c, "output") shortOutput := output == "name" if config.Dockerfile == "-" { data, err := ioutil.ReadAll(in) if err != nil { return err } config.Dockerfile = string(data) } if err := setupAppConfig(f, out, c, args, config); err != nil { return err } if err := setAppConfigLabels(c, config); err != nil { return err } result, err := config.Run() if err != nil { return handleBuildError(c, err, fullName) } if len(config.Labels) == 0 && len(result.Name) > 0 { config.Labels = map[string]string{"build": result.Name} } if err := setLabels(config.Labels, result); err != nil { return err } if err := setAnnotations(map[string]string{newcmd.GeneratedByNamespace: newcmd.GeneratedByNewBuild}, result); err != nil { return err } indent := " " switch { case shortOutput: indent = "" case len(output) != 0: result.List.Items, err = ocmdutil.ConvertItemsForDisplayFromDefaultCommand(c, result.List.Items) if err != nil { return err } return f.Factory.PrintObject(c, result.List, out) default: if len(config.Labels) > 0 { fmt.Fprintf(out, "--> Creating resources with label %s ...\n", labels.SelectorFromSet(config.Labels).String()) } else { fmt.Fprintf(out, "--> Creating resources ...\n") } } if config.DryRun { fmt.Fprintf(out, "--> Success (DRY RUN)\n") return nil } mapper, _ := f.Object() if err := createObjects(f, configcmd.NewPrintNameOrErrorAfterIndent(mapper, shortOutput, "created", out, c.Out(), indent), result); err != nil { return err } if shortOutput { return nil } fmt.Fprintf(out, "--> Success\n") for _, item := range result.List.Items { switch t := item.(type) { case *buildapi.BuildConfig: if len(t.Spec.Triggers) > 0 && t.Spec.Source.Binary == nil { fmt.Fprintf(out, "%sBuild configuration %q created and build triggered.\n", indent, t.Name) fmt.Fprintf(out, "%sRun '%s logs -f bc/%s' to stream the build progress.\n", indent, fullName, t.Name) } } } return nil }
// RunCmdRouter contains all the necessary functionality for the // OpenShift CLI router command. func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error { var name string switch len(args) { case 0: name = "router" case 1: name = args[0] default: return cmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return cmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { glog.Fatal(err) } // For the host networking case, ensure the ports match. if cfg.HostNetwork { for i := 0; i < len(ports); i++ { if ports[i].ContainerPort != ports[i].HostPort { return cmdutil.UsageError(cmd, "For host networking mode, please ensure that the container [%v] and host [%v] ports match", ports[i].ContainerPort, ports[i].HostPort) } } } if cfg.StatsPort > 0 { ports = append(ports, kapi.ContainerPort{ Name: "stats", HostPort: cfg.StatsPort, ContainerPort: cfg.StatsPort, Protocol: kapi.ProtocolTCP, }) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("router %q does not exist (no service)", name) } if len(cfg.ServiceAccount) == 0 { return fmt.Errorf("router could not be created; you must specify a service account with --service-account") } err := validateServiceAccount(kClient, namespace, cfg.ServiceAccount) if err != nil { return fmt.Errorf("router could not be created; %v", err) } // create new router if len(cfg.Credentials) == 0 { return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } defaultCert, err := fileutil.LoadData(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file: %v", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() fmt.Fprintf(out, "password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, "DEFAULT_CERTIFICATE": string(defaultCert), "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "ROUTER_EXTERNAL_HOST_HOSTNAME": cfg.ExternalHost, "ROUTER_EXTERNAL_HOST_USERNAME": cfg.ExternalHostUsername, "ROUTER_EXTERNAL_HOST_PASSWORD": cfg.ExternalHostPassword, "ROUTER_EXTERNAL_HOST_HTTP_VSERVER": cfg.ExternalHostHttpVserver, "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER": cfg.ExternalHostHttpsVserver, "ROUTER_EXTERNAL_HOST_INSECURE": strconv.FormatBool(cfg.ExternalHostInsecure), "ROUTER_EXTERNAL_HOST_PARTITION_PATH": cfg.ExternalHostPartitionPath, "ROUTER_EXTERNAL_HOST_PRIVKEY": privkeyPath, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } updatePercent := int(-25) secrets, volumes, mounts, err := generateSecretsConfig(cfg, kClient, namespace) if err != nil { return fmt.Errorf("router could not be created: %v", err) } livenessProbe := generateLivenessProbeConfig(cfg, ports) readinessProbe := generateReadinessProbeConfig(cfg, ports) containers := []kapi.Container{ { Name: "router", Image: image, Ports: ports, Env: env.List(), LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, ImagePullPolicy: kapi.PullIfNotPresent, VolumeMounts: mounts, }, } if cfg.StatsPort > 0 && cfg.ExposeMetrics { pc := generateMetricsExporterContainer(cfg, env) if pc != nil { containers = append(containers, *pc) } } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: dapi.DeploymentConfigSpec{ Strategy: dapi.DeploymentStrategy{ Type: dapi.DeploymentStrategyTypeRolling, RollingParams: &dapi.RollingDeploymentStrategyParams{UpdatePercent: &updatePercent}, }, Replicas: cfg.Replicas, Selector: label, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ SecurityContext: &kapi.PodSecurityContext{ HostNetwork: cfg.HostNetwork, }, ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: containers, Volumes: volumes, }, }, }, }, } if len(secrets) != 0 { serviceAccount, err := kClient.ServiceAccounts(namespace).Get(cfg.ServiceAccount) if err != nil { return fmt.Errorf("error looking up service account %s: %v", cfg.ServiceAccount, err) } for _, secret := range secrets { objects = append(objects, secret) serviceAccount.Secrets = append(serviceAccount.Secrets, kapi.ObjectReference{Name: secret.Name}) } _, err = kClient.ServiceAccounts(namespace).Update(serviceAccount) if err != nil { return fmt.Errorf("error adding secret key to service account %s: %v", cfg.ServiceAccount, err) } } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if output { list.Items, err = ocmdutil.ConvertItemsForDisplayFromDefaultCommand(cmd, list.Items) if err != nil { return err } if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("Unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.ClientForMapping, After: configcmd.NewPrintNameOrErrorAfter(mapper, cmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Router %q service exists\n", name) return nil }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return kcmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := kcmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } var clusterIP string generate := output service, err := kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) && !generate { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } else { clusterIP = service.Spec.ClusterIP } if !generate { fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil } if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry secretEnv := app.Environment{} switch { case len(cfg.ServiceAccount) == 0 && len(cfg.Credentials) == 0: return fmt.Errorf("registry could not be created; a service account or the path to a .kubeconfig file must be provided") case len(cfg.Credentials) > 0: clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := restclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } secretEnv = app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } } needServiceAccountRole := len(cfg.ServiceAccount) > 0 && len(cfg.Credentials) == 0 var servingCert, servingKey []byte if len(cfg.ServingCertPath) > 0 { data, err := ioutil.ReadFile(cfg.ServingCertPath) if err != nil { return fmt.Errorf("registry does not exist; could not load TLS certificate file %q: %v", cfg.ServingCertPath, err) } servingCert = data } if len(cfg.ServingKeyPath) > 0 { data, err := ioutil.ReadFile(cfg.ServingKeyPath) if err != nil { return fmt.Errorf("registry does not exist; could not load TLS private key file %q: %v", cfg.ServingKeyPath, err) } servingCert = data } env := app.Environment{} env.Add(secretEnv) healthzPort := defaultPort if len(ports) > 0 { healthzPort = ports[0].ContainerPort env["REGISTRY_HTTP_ADDR"] = fmt.Sprintf(":%d", healthzPort) env["REGISTRY_HTTP_NET"] = "tcp" } secrets, volumes, mounts, extraEnv, tls, err := generateSecretsConfig(cfg, namespace, servingCert, servingKey) if err != nil { return err } env.Add(extraEnv) livenessProbe := generateLivenessProbeConfig(healthzPort, tls) readinessProbe := generateReadinessProbeConfig(healthzPort, tls) mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: append(mounts, kapi.VolumeMount{ Name: "registry-storage", MountPath: cfg.Volume, }), SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, }, }, Volumes: append(volumes, kapi.Volume{ Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }), }, } if mountHost { podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{} for _, s := range secrets { objects = append(objects, s) } if needServiceAccountRole { objects = append(objects, &kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: cfg.ServiceAccount}}, &authapi.ClusterRoleBinding{ ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("registry-%s-role", cfg.Name)}, Subjects: []kapi.ObjectReference{ { Kind: "ServiceAccount", Name: cfg.ServiceAccount, Namespace: namespace, }, }, RoleRef: kapi.ObjectReference{ Kind: "ClusterRole", Name: "system:registry", }, }, ) podTemplate.Spec.ServiceAccountName = cfg.ServiceAccount } objects = append(objects, &deployapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: deployapi.DeploymentConfigSpec{ Replicas: cfg.Replicas, Selector: label, Triggers: []deployapi.DeploymentTriggerPolicy{ {Type: deployapi.DeploymentTriggerOnConfigChange}, }, Template: podTemplate, }, }) objects = app.AddServices(objects, true) // Set registry service's sessionAffinity to ClientIP to prevent push // failures due to a use of poorly consistent storage shared by // multiple replicas. Also reuse the cluster IP if provided to avoid // changing the internal value. for _, obj := range objects { switch t := obj.(type) { case *kapi.Service: t.Spec.SessionAffinity = kapi.ServiceAffinityClientIP t.Spec.ClusterIP = clusterIP } } // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { list.Items, err = cmdutil.ConvertItemsForDisplayFromDefaultCommand(cmd, list.Items) if err != nil { return err } if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.ClientForMapping, After: configcmd.NewPrintNameOrErrorAfter(mapper, kcmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil }
// RunCmdRouter contains all the necessary functionality for the // OpenShift CLI router command. func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error { switch len(args) { case 0: // uses default value case 1: cfg.Name = args[0] default: return kcmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } name := cfg.Name if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return kcmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } if len(cfg.Subdomain) > 0 && len(cfg.ForceSubdomain) > 0 { return kcmdutil.UsageError(cmd, "only one of --subdomain, --force-subdomain can be specified") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return fmt.Errorf("unable to parse --ports: %v", err) } // For the host networking case, ensure the ports match. Otherwise, remove host ports for i := 0; i < len(ports); i++ { if cfg.HostNetwork && ports[i].HostPort != 0 && ports[i].ContainerPort != ports[i].HostPort { return fmt.Errorf("when using host networking mode, container port %d and host port %d must be equal", ports[i].ContainerPort, ports[i].HostPort) } } if cfg.StatsPort > 0 { port := kapi.ContainerPort{ Name: "stats", ContainerPort: cfg.StatsPort, Protocol: kapi.ProtocolTCP, } ports = append(ports, port) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := kcmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } generate = true } } if !generate { fmt.Fprintf(out, "Router %q service exists\n", name) return nil } if cfg.DryRun && !output { return fmt.Errorf("router %q does not exist (no service)", name) } if len(cfg.ServiceAccount) == 0 { return fmt.Errorf("you must specify a service account for the router with --service-account") } if err := validateServiceAccount(kClient, namespace, cfg.ServiceAccount, cfg.HostNetwork); err != nil { return fmt.Errorf("router could not be created; %v", err) } // create new router secretEnv := app.Environment{} switch { case len(cfg.Credentials) == 0 && len(cfg.ServiceAccount) == 0: return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") case len(cfg.Credentials) > 0: clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := restclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } secretEnv.Add(app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, }) } createServiceAccount := len(cfg.ServiceAccount) > 0 && len(cfg.Credentials) == 0 defaultCert, err := fileutil.LoadData(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file: %v", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() if !output { fmt.Fprintf(cmd.Out(), "info: password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } } env := app.Environment{ "ROUTER_SUBDOMAIN": cfg.Subdomain, "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "ROUTER_SERVICE_HTTP_PORT": "80", "ROUTER_SERVICE_HTTPS_PORT": "443", "ROUTER_EXTERNAL_HOST_HOSTNAME": cfg.ExternalHost, "ROUTER_EXTERNAL_HOST_USERNAME": cfg.ExternalHostUsername, "ROUTER_EXTERNAL_HOST_PASSWORD": cfg.ExternalHostPassword, "ROUTER_EXTERNAL_HOST_HTTP_VSERVER": cfg.ExternalHostHttpVserver, "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER": cfg.ExternalHostHttpsVserver, "ROUTER_EXTERNAL_HOST_INSECURE": strconv.FormatBool(cfg.ExternalHostInsecure), "ROUTER_EXTERNAL_HOST_PARTITION_PATH": cfg.ExternalHostPartitionPath, "ROUTER_EXTERNAL_HOST_PRIVKEY": privkeyPath, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } if len(cfg.ForceSubdomain) > 0 { env["ROUTER_SUBDOMAIN"] = cfg.ForceSubdomain env["ROUTER_OVERRIDE_HOSTNAME"] = "true" } env.Add(secretEnv) if len(defaultCert) > 0 { if cfg.SecretsAsEnv { env.Add(app.Environment{"DEFAULT_CERTIFICATE": string(defaultCert)}) } else { // TODO: make --credentials create secrets and bypass service account env.Add(app.Environment{"DEFAULT_CERTIFICATE_PATH": defaultCertificatePath}) } } secrets, volumes, mounts, err := generateSecretsConfig(cfg, kClient, namespace, defaultCert) if err != nil { return fmt.Errorf("router could not be created: %v", err) } livenessProbe := generateLivenessProbeConfig(cfg, ports) readinessProbe := generateReadinessProbeConfig(cfg, ports) exposedPorts := make([]kapi.ContainerPort, len(ports)) copy(exposedPorts, ports) for i := range exposedPorts { exposedPorts[i].HostPort = 0 } containers := []kapi.Container{ { Name: "router", Image: image, Ports: exposedPorts, Env: env.List(), LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, ImagePullPolicy: kapi.PullIfNotPresent, VolumeMounts: mounts, }, } if cfg.StatsPort > 0 && cfg.ExposeMetrics { pc := generateMetricsExporterContainer(cfg, env) if pc != nil { containers = append(containers, *pc) } } objects := []runtime.Object{} for _, s := range secrets { objects = append(objects, s) } if createServiceAccount { objects = append(objects, &kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: cfg.ServiceAccount}}, &authapi.ClusterRoleBinding{ ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("router-%s-role", cfg.Name)}, Subjects: []kapi.ObjectReference{ { Kind: "ServiceAccount", Name: cfg.ServiceAccount, Namespace: namespace, }, }, RoleRef: kapi.ObjectReference{ Kind: "ClusterRole", Name: "system:router", }, }, ) } updatePercent := int(-25) objects = append(objects, &deployapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: deployapi.DeploymentConfigSpec{ Strategy: deployapi.DeploymentStrategy{ Type: deployapi.DeploymentStrategyTypeRolling, RollingParams: &deployapi.RollingDeploymentStrategyParams{UpdatePercent: &updatePercent}, }, Replicas: cfg.Replicas, Selector: label, Triggers: []deployapi.DeploymentTriggerPolicy{ {Type: deployapi.DeploymentTriggerOnConfigChange}, }, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ SecurityContext: &kapi.PodSecurityContext{ HostNetwork: cfg.HostNetwork, }, ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: containers, Volumes: volumes, }, }, }, }) objects = app.AddServices(objects, false) // set the service port to the provided hostport value for i := range objects { switch t := objects[i].(type) { case *kapi.Service: for j, servicePort := range t.Spec.Ports { for _, targetPort := range ports { if targetPort.ContainerPort == servicePort.Port && targetPort.HostPort != 0 { t.Spec.Ports[j].Port = targetPort.HostPort } } } } } // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if output { list.Items, err = cmdutil.ConvertItemsForDisplayFromDefaultCommand(cmd, list.Items) if err != nil { return err } if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.ClientForMapping, After: configcmd.NewPrintNameOrErrorAfter(mapper, kcmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil }