// Generate the IP failover monitor (keepalived) container configuration. func generateFailoverMonitorContainerConfig(name string, options *ipfailover.IPFailoverConfigCmdOptions, env app.Environment) *kapi.Container { containerName := fmt.Sprintf("%s-%s", name, options.Type) imageName := fmt.Sprintf("%s-%s", options.Type, ipfailover.DefaultName) image := options.ImageTemplate.ExpandOrDie(imageName) // Container port to expose the service interconnects between keepaliveds. ports := make([]kapi.ContainerPort, 1) ports[0] = kapi.ContainerPort{ ContainerPort: options.ServicePort, HostPort: options.ServicePort, } mounts := make([]kapi.VolumeMount, 1) mounts[0] = kapi.VolumeMount{ Name: libModulesVolumeName, ReadOnly: true, MountPath: libModulesPath, } privileged := true return &kapi.Container{ Name: containerName, Image: image, Ports: ports, SecurityContext: &kapi.SecurityContext{ Privileged: &privileged, }, ImagePullPolicy: kapi.PullIfNotPresent, VolumeMounts: mounts, Env: env.List(), } }
func generateMetricsExporterContainer(cfg *RouterConfig, env app.Environment) *kapi.Container { containerName := "metrics-exporter" if len(cfg.MetricsImage) > 0 { return &kapi.Container{ Name: containerName, Image: cfg.MetricsImage, Env: env.List(), } } switch cfg.Type { case "haproxy-router": return &kapi.Container{ Name: containerName, Image: "prom/haproxy-exporter:latest", Env: env.List(), Args: []string{ fmt.Sprintf("-haproxy.scrape-uri=http://$(STATS_USERNAME):$(STATS_PASSWORD)@localhost:$(STATS_PORT)/haproxy?stats;csv"), }, Ports: []kapi.ContainerPort{ { Name: "http", ContainerPort: 9101, }, }, } default: return nil } }
// buildTemplates converts a set of resolved, valid references into references to template objects. func (c *AppConfig) buildTemplates(components app.ComponentReferences, environment app.Environment) ([]runtime.Object, error) { objects := []runtime.Object{} for _, ref := range components { tpl := ref.Input().ResolvedMatch.Template glog.V(4).Infof("processing template %s/%s", c.originNamespace, tpl.Name) for _, env := range environment.List() { // only set environment values that match what's expected by the template. if v := template.GetParameterByName(tpl, env.Name); v != nil { v.Value = env.Value v.Generate = "" template.AddParameter(tpl, *v) } else { return nil, fmt.Errorf("unexpected parameter name %q", env.Name) } } result, err := c.osclient.TemplateConfigs(c.originNamespace).Create(tpl) if err != nil { return nil, fmt.Errorf("error processing template %s/%s: %v", c.originNamespace, tpl.Name, err) } errs := runtime.DecodeList(result.Objects, kapi.Scheme) if len(errs) > 0 { err = errors.NewAggregate(errs) return nil, fmt.Errorf("error processing template %s/%s: %v", c.originNamespace, tpl.Name, errs) } objects = append(objects, result.Objects...) } return objects, nil }
// buildTemplates converts a set of resolved, valid references into references to template objects. func (c *AppConfig) buildTemplates(components app.ComponentReferences, parameters app.Environment, environment app.Environment) (string, []runtime.Object, error) { objects := []runtime.Object{} name := "" for _, ref := range components { tpl := ref.Input().ResolvedMatch.Template glog.V(4).Infof("processing template %s/%s", c.OriginNamespace, tpl.Name) result, err := TransformTemplate(tpl, c.OSClient, c.OriginNamespace, parameters) if err != nil { return name, nil, err } if len(name) == 0 { name = tpl.Name } objects = append(objects, result.Objects...) if len(result.Objects) > 0 { // if environment variables were passed in, let's apply the environment variables // to every pod template object for i := range result.Objects { podSpec, _, err := ometa.GetPodSpec(result.Objects[i]) if err == nil { for ii := range podSpec.Containers { if podSpec.Containers[ii].Env != nil { podSpec.Containers[ii].Env = app.JoinEnvironment(environment.List(), podSpec.Containers[ii].Env) } else { podSpec.Containers[ii].Env = environment.List() } } } } } DescribeGeneratedTemplate(c.Out, ref.Input().String(), result, c.OriginNamespace) } return name, objects, nil }
// RunCmdRouter contains all the necessary functionality for the // OpenShift CLI router command. func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error { var name string switch len(args) { case 0: name = "router" case 1: name = args[0] default: return cmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return cmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { glog.Fatal(err) } // For the host networking case, ensure the ports match. if cfg.HostNetwork { for i := 0; i < len(ports); i++ { if ports[i].ContainerPort != ports[i].HostPort { return cmdutil.UsageError(cmd, "For host networking mode, please ensure that the container [%v] and host [%v] ports match", ports[i].ContainerPort, ports[i].HostPort) } } } if cfg.StatsPort > 0 { ports = append(ports, kapi.ContainerPort{ Name: "stats", HostPort: cfg.StatsPort, ContainerPort: cfg.StatsPort, Protocol: kapi.ProtocolTCP, }) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("router %q does not exist (no service)", name) } if len(cfg.ServiceAccount) == 0 { return fmt.Errorf("router could not be created; you must specify a service account with --service-account") } err := validateServiceAccount(kClient, namespace, cfg.ServiceAccount) if err != nil { return fmt.Errorf("router could not be created; %v", err) } // create new router if len(cfg.Credentials) == 0 { return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } defaultCert, err := loadCert(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file: %v", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() fmt.Fprintf(out, "password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, "DEFAULT_CERTIFICATE": defaultCert, "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "ROUTER_EXTERNAL_HOST_HOSTNAME": cfg.ExternalHost, "ROUTER_EXTERNAL_HOST_USERNAME": cfg.ExternalHostUsername, "ROUTER_EXTERNAL_HOST_PASSWORD": cfg.ExternalHostPassword, "ROUTER_EXTERNAL_HOST_HTTP_VSERVER": cfg.ExternalHostHttpVserver, "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER": cfg.ExternalHostHttpsVserver, "ROUTER_EXTERNAL_HOST_INSECURE": strconv.FormatBool(cfg.ExternalHostInsecure), "ROUTER_EXTERNAL_HOST_PARTITION_PATH": cfg.ExternalHostPartitionPath, "ROUTER_EXTERNAL_HOST_PRIVKEY": privkeyPath, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } updatePercent := int(-25) secrets, volumes, mounts, err := generateSecretsConfig(cfg, kClient, namespace) if err != nil { return fmt.Errorf("router could not be created: %v", err) } livenessProbe := generateLivenessProbeConfig(cfg, ports) containers := []kapi.Container{ { Name: "router", Image: image, Ports: ports, Env: env.List(), LivenessProbe: livenessProbe, ImagePullPolicy: kapi.PullIfNotPresent, VolumeMounts: mounts, }, } if cfg.StatsPort > 0 && cfg.ExposeMetrics { pc := generateMetricsExporterContainer(cfg, env) if pc != nil { containers = append(containers, *pc) } } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: dapi.DeploymentTemplate{ Strategy: dapi.DeploymentStrategy{ Type: dapi.DeploymentStrategyTypeRolling, RollingParams: &dapi.RollingDeploymentStrategyParams{UpdatePercent: &updatePercent}, }, ControllerTemplate: kapi.ReplicationControllerSpec{ Replicas: cfg.Replicas, Selector: label, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ SecurityContext: &kapi.PodSecurityContext{ HostNetwork: cfg.HostNetwork, }, ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: containers, Volumes: volumes, }, }, }, }, }, } if len(secrets) != 0 { serviceAccount, err := kClient.ServiceAccounts(namespace).Get(cfg.ServiceAccount) if err != nil { return fmt.Errorf("error looking up service account %s: %v", cfg.ServiceAccount, err) } for _, secret := range secrets { objects = append(objects, secret) serviceAccount.Secrets = append(serviceAccount.Secrets, kapi.ObjectReference{Name: secret.Name}) } _, err = kClient.ServiceAccounts(namespace).Update(serviceAccount) if err != nil { return fmt.Errorf("error adding secret key to service account %s: %v", cfg.ServiceAccount, err) } } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if output { if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("Unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(mapper, cmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Router %q service exists\n", name) return nil }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return cmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } p, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry if len(cfg.Credentials) == 0 { return fmt.Errorf("registry does not exist; you must specify a .kubeconfig file path containing credentials for connecting the registry to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: []kapi.VolumeMount{ { Name: "registry-storage", MountPath: cfg.Volume, }, }, SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, // TODO reenable the liveness probe when we no longer support the v1 registry. /* LivenessProbe: &kapi.Probe{ InitialDelaySeconds: 3, TimeoutSeconds: 5, Handler: kapi.Handler{ HTTPGet: &kapi.HTTPGetAction{ Path: "/healthz", Port: util.NewIntOrStringFromInt(5000), }, }, }, */ }, }, Volumes: []kapi.Volume{ { Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }, }, }, } if mountHost { podTemplate.Spec.Volumes[0].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[0].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: dapi.DeploymentTemplate{ ControllerTemplate: kapi.ReplicationControllerSpec{ Replicas: cfg.Replicas, Selector: label, Template: podTemplate, }, }, }, } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { if err := p.PrintObj(list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(out, os.Stderr), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return cmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry if len(cfg.Credentials) == 0 { return fmt.Errorf("registry does not exist; you must specify a .kubeconfig file path containing credentials for connecting the registry to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } healthzPort := defaultPort if len(ports) > 0 { healthzPort = ports[0].ContainerPort env["REGISTRY_HTTP_ADDR"] = fmt.Sprintf(":%d", healthzPort) env["REGISTRY_HTTP_NET"] = "tcp" } livenessProbe := generateLivenessProbeConfig(healthzPort) readinessProbe := generateReadinessProbeConfig(healthzPort) secretBytes := make([]byte, randomSecretSize) if _, err := cryptorand.Read(secretBytes); err != nil { return fmt.Errorf("registry does not exist; could not generate random bytes for HTTP secret: %v", err) } env["REGISTRY_HTTP_SECRET"] = base64.StdEncoding.EncodeToString(secretBytes) mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: []kapi.VolumeMount{ { Name: "registry-storage", MountPath: cfg.Volume, }, }, SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, }, }, Volumes: []kapi.Volume{ { Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }, }, }, } if mountHost { podTemplate.Spec.Volumes[0].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[0].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: dapi.DeploymentConfigSpec{ Replicas: cfg.Replicas, Selector: label, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: podTemplate, }, }, } objects = app.AddServices(objects, true) // Set registry service's sessionAffinity to ClientIP to prevent push // failures due to a use of poorly consistent storage shared by // multiple replicas. for _, obj := range objects { switch t := obj.(type) { case *kapi.Service: t.Spec.SessionAffinity = kapi.ServiceAffinityClientIP } } // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(mapper, cmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func (opts *RegistryOptions) RunCmdRegistry() error { name := "docker-registry" var clusterIP string output := opts.Config.Action.ShouldPrint() generate := output if !generate { service, err := opts.serviceClient.Services(opts.namespace).Get(name) if err != nil { if !errors.IsNotFound(err) && !generate { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } if !output && opts.Config.Action.DryRun { return fmt.Errorf("Docker registry %q service does not exist", name) } generate = true } else { clusterIP = service.Spec.ClusterIP } } if !generate { fmt.Fprintf(opts.out, "Docker registry %q service exists\n", name) return nil } // create new registry secretEnv := app.Environment{} switch { case len(opts.Config.ServiceAccount) == 0 && len(opts.Config.Credentials) == 0: return fmt.Errorf("registry could not be created; a service account or the path to a .kubeconfig file must be provided") case len(opts.Config.Credentials) > 0: clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: opts.Config.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", opts.Config.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", opts.Config.Credentials, err) } if err := restclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", opts.Config.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", opts.Config.Credentials) } } secretEnv = app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } } needServiceAccountRole := len(opts.Config.ServiceAccount) > 0 && len(opts.Config.Credentials) == 0 var servingCert, servingKey []byte if len(opts.Config.ServingCertPath) > 0 { data, err := ioutil.ReadFile(opts.Config.ServingCertPath) if err != nil { return fmt.Errorf("registry does not exist; could not load TLS certificate file %q: %v", opts.Config.ServingCertPath, err) } servingCert = data } if len(opts.Config.ServingKeyPath) > 0 { data, err := ioutil.ReadFile(opts.Config.ServingKeyPath) if err != nil { return fmt.Errorf("registry does not exist; could not load TLS private key file %q: %v", opts.Config.ServingKeyPath, err) } servingCert = data } env := app.Environment{} env.Add(secretEnv) env["REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_ENFORCEQUOTA"] = fmt.Sprintf("%t", opts.Config.EnforceQuota) healthzPort := defaultPort if len(opts.ports) > 0 { healthzPort = int(opts.ports[0].ContainerPort) env["REGISTRY_HTTP_ADDR"] = fmt.Sprintf(":%d", healthzPort) env["REGISTRY_HTTP_NET"] = "tcp" } secrets, volumes, mounts, extraEnv, tls, err := generateSecretsConfig(opts.Config, opts.namespace, servingCert, servingKey) if err != nil { return err } env.Add(extraEnv) livenessProbe := generateLivenessProbeConfig(healthzPort, tls) readinessProbe := generateReadinessProbeConfig(healthzPort, tls) mountHost := len(opts.Config.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: opts.label}, Spec: kapi.PodSpec{ NodeSelector: opts.nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: opts.image, Ports: opts.ports, Env: env.List(), VolumeMounts: append(mounts, kapi.VolumeMount{ Name: "registry-storage", MountPath: opts.Config.Volume, }), SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, }, }, Volumes: append(volumes, kapi.Volume{ Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }), ServiceAccountName: opts.Config.ServiceAccount, }, } if mountHost { podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].HostPath = &kapi.HostPathVolumeSource{Path: opts.Config.HostMount} } else { podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{} for _, s := range secrets { objects = append(objects, s) } if needServiceAccountRole { objects = append(objects, &kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: opts.Config.ServiceAccount}}, &authapi.ClusterRoleBinding{ ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("registry-%s-role", opts.Config.Name)}, Subjects: []kapi.ObjectReference{ { Kind: "ServiceAccount", Name: opts.Config.ServiceAccount, Namespace: opts.namespace, }, }, RoleRef: kapi.ObjectReference{ Kind: "ClusterRole", Name: "system:registry", }, }, ) } if opts.Config.DaemonSet { objects = append(objects, &extensions.DaemonSet{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: opts.label, }, Spec: extensions.DaemonSetSpec{ Template: kapi.PodTemplateSpec{ ObjectMeta: podTemplate.ObjectMeta, Spec: podTemplate.Spec, }, }, }) } else { objects = append(objects, &deployapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: opts.label, }, Spec: deployapi.DeploymentConfigSpec{ Replicas: opts.Config.Replicas, Selector: opts.label, Triggers: []deployapi.DeploymentTriggerPolicy{ {Type: deployapi.DeploymentTriggerOnConfigChange}, }, Template: podTemplate, }, }) } objects = app.AddServices(objects, true) // Set registry service's sessionAffinity to ClientIP to prevent push // failures due to a use of poorly consistent storage shared by // multiple replicas. Also reuse the cluster IP if provided to avoid // changing the internal value. for _, obj := range objects { switch t := obj.(type) { case *kapi.Service: t.Spec.SessionAffinity = kapi.ServiceAffinityClientIP t.Spec.ClusterIP = clusterIP } } // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if opts.Config.Action.ShouldPrint() { mapper, _ := opts.factory.Object(false) fn := cmdutil.VersionedPrintObject(opts.factory.PrintObject, opts.cmd, mapper, opts.out) if err := fn(list); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } if errs := opts.Config.Action.WithMessage(fmt.Sprintf("Creating registry %s", opts.Config.Name), "created").Run(list, opts.namespace); len(errs) > 0 { return cmdutil.ErrExit } return nil }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return kcmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } cfg.Action.Bulk.Mapper = clientcmd.ResourceMapper(f) cfg.Action.Out, cfg.Action.ErrOut = out, cmd.Out() cfg.Action.Bulk.Op = configcmd.Create var clusterIP string output := cfg.Action.ShouldPrint() generate := output if !generate { service, err := kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) && !generate { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } if !output && cfg.Action.DryRun { return fmt.Errorf("Docker registry %q service does not exist", name) } generate = true } else { clusterIP = service.Spec.ClusterIP } } if !generate { fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil } // create new registry secretEnv := app.Environment{} switch { case len(cfg.ServiceAccount) == 0 && len(cfg.Credentials) == 0: return fmt.Errorf("registry could not be created; a service account or the path to a .kubeconfig file must be provided") case len(cfg.Credentials) > 0: clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := restclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } secretEnv = app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } } needServiceAccountRole := len(cfg.ServiceAccount) > 0 && len(cfg.Credentials) == 0 var servingCert, servingKey []byte if len(cfg.ServingCertPath) > 0 { data, err := ioutil.ReadFile(cfg.ServingCertPath) if err != nil { return fmt.Errorf("registry does not exist; could not load TLS certificate file %q: %v", cfg.ServingCertPath, err) } servingCert = data } if len(cfg.ServingKeyPath) > 0 { data, err := ioutil.ReadFile(cfg.ServingKeyPath) if err != nil { return fmt.Errorf("registry does not exist; could not load TLS private key file %q: %v", cfg.ServingKeyPath, err) } servingCert = data } env := app.Environment{} env.Add(secretEnv) healthzPort := defaultPort if len(ports) > 0 { healthzPort = ports[0].ContainerPort env["REGISTRY_HTTP_ADDR"] = fmt.Sprintf(":%d", healthzPort) env["REGISTRY_HTTP_NET"] = "tcp" } secrets, volumes, mounts, extraEnv, tls, err := generateSecretsConfig(cfg, namespace, servingCert, servingKey) if err != nil { return err } env.Add(extraEnv) livenessProbe := generateLivenessProbeConfig(healthzPort, tls) readinessProbe := generateReadinessProbeConfig(healthzPort, tls) mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: append(mounts, kapi.VolumeMount{ Name: "registry-storage", MountPath: cfg.Volume, }), SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, }, }, Volumes: append(volumes, kapi.Volume{ Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }), ServiceAccountName: cfg.ServiceAccount, }, } if mountHost { podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{} for _, s := range secrets { objects = append(objects, s) } if needServiceAccountRole { objects = append(objects, &kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: cfg.ServiceAccount}}, &authapi.ClusterRoleBinding{ ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("registry-%s-role", cfg.Name)}, Subjects: []kapi.ObjectReference{ { Kind: "ServiceAccount", Name: cfg.ServiceAccount, Namespace: namespace, }, }, RoleRef: kapi.ObjectReference{ Kind: "ClusterRole", Name: "system:registry", }, }, ) } if cfg.DaemonSet { objects = append(objects, &extensions.DaemonSet{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: extensions.DaemonSetSpec{ Template: kapi.PodTemplateSpec{ ObjectMeta: podTemplate.ObjectMeta, Spec: podTemplate.Spec, }, }, }) } else { objects = append(objects, &deployapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: deployapi.DeploymentConfigSpec{ Replicas: cfg.Replicas, Selector: label, Triggers: []deployapi.DeploymentTriggerPolicy{ {Type: deployapi.DeploymentTriggerOnConfigChange}, }, Template: podTemplate, }, }) } objects = app.AddServices(objects, true) // Set registry service's sessionAffinity to ClientIP to prevent push // failures due to a use of poorly consistent storage shared by // multiple replicas. Also reuse the cluster IP if provided to avoid // changing the internal value. for _, obj := range objects { switch t := obj.(type) { case *kapi.Service: t.Spec.SessionAffinity = kapi.ServiceAffinityClientIP t.Spec.ClusterIP = clusterIP } } // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if cfg.Action.ShouldPrint() { mapper, _ := f.Object(false) fn := cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, out) if err := fn(list); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } if errs := cfg.Action.WithMessage(fmt.Sprintf("Creating registry %s", cfg.Name), "created").Run(list, namespace); len(errs) > 0 { return cmdutil.ErrExit } return nil }
// RunCmdRouter contains all the necessary functionality for the // OpenShift CLI router command. func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out, errout io.Writer, cfg *RouterConfig, args []string) error { switch len(args) { case 0: // uses default value case 1: cfg.Name = args[0] default: return kcmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } name := cfg.Name var defaultOutputErr error if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return kcmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } if len(cfg.Subdomain) > 0 && len(cfg.ForceSubdomain) > 0 { return kcmdutil.UsageError(cmd, "only one of --subdomain, --force-subdomain can be specified") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return fmt.Errorf("unable to parse --ports: %v", err) } // HostNetwork overrides HostPorts if cfg.HostNetwork { cfg.HostPorts = false } // For the host networking case, ensure the ports match. if cfg.HostNetwork { for i := 0; i < len(ports); i++ { if ports[i].HostPort != 0 && ports[i].ContainerPort != ports[i].HostPort { return fmt.Errorf("when using host networking mode, container port %d and host port %d must be equal", ports[i].ContainerPort, ports[i].HostPort) } } } if cfg.StatsPort > 0 { port := kapi.ContainerPort{ Name: "stats", ContainerPort: int32(cfg.StatsPort), Protocol: kapi.ProtocolTCP, } if cfg.HostPorts { port.HostPort = int32(cfg.StatsPort) } ports = append(ports, port) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, _, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } cfg.Action.Bulk.Mapper = clientcmd.ResourceMapper(f) cfg.Action.Out, cfg.Action.ErrOut = out, errout cfg.Action.Bulk.Op = configcmd.Create var clusterIP string output := cfg.Action.ShouldPrint() generate := output service, err := kClient.Services(namespace).Get(name) if err != nil { if !generate { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } if !output && cfg.Action.DryRun { return fmt.Errorf("Router %q service does not exist", name) } generate = true } } else { clusterIP = service.Spec.ClusterIP } if !generate { fmt.Fprintf(out, "Router %q service exists\n", name) return nil } if len(cfg.ServiceAccount) == 0 { return fmt.Errorf("you must specify a service account for the router with --service-account") } if err := validateServiceAccount(kClient, namespace, cfg.ServiceAccount, cfg.HostNetwork, cfg.HostPorts); err != nil { err = fmt.Errorf("router could not be created; %v", err) if !cfg.Action.ShouldPrint() { return err } fmt.Fprintf(errout, "error: %v\n", err) defaultOutputErr = cmdutil.ErrExit } // create new router secretEnv := app.Environment{} switch { case len(cfg.Credentials) == 0 && len(cfg.ServiceAccount) == 0: return fmt.Errorf("router could not be created; you must specify a service account with --service-account, or a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") case len(cfg.Credentials) > 0: clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := restclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } secretEnv.Add(app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, }) } createServiceAccount := len(cfg.ServiceAccount) > 0 && len(cfg.Credentials) == 0 defaultCert, err := fileutil.LoadData(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file: %v", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() if !cfg.Action.ShouldPrint() { fmt.Fprintf(errout, "info: password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } } env := app.Environment{ "ROUTER_SUBDOMAIN": cfg.Subdomain, "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "ROUTER_SERVICE_HTTP_PORT": "80", "ROUTER_SERVICE_HTTPS_PORT": "443", "ROUTER_EXTERNAL_HOST_HOSTNAME": cfg.ExternalHost, "ROUTER_EXTERNAL_HOST_USERNAME": cfg.ExternalHostUsername, "ROUTER_EXTERNAL_HOST_PASSWORD": cfg.ExternalHostPassword, "ROUTER_EXTERNAL_HOST_HTTP_VSERVER": cfg.ExternalHostHttpVserver, "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER": cfg.ExternalHostHttpsVserver, "ROUTER_EXTERNAL_HOST_INSECURE": strconv.FormatBool(cfg.ExternalHostInsecure), "ROUTER_EXTERNAL_HOST_PARTITION_PATH": cfg.ExternalHostPartitionPath, "ROUTER_EXTERNAL_HOST_PRIVKEY": privkeyPath, "ROUTER_EXTERNAL_HOST_INTERNAL_ADDRESS": cfg.ExternalHostInternalIP, "ROUTER_EXTERNAL_HOST_VXLAN_GW_CIDR": cfg.ExternalHostVxLANGateway, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } if len(cfg.ForceSubdomain) > 0 { env["ROUTER_SUBDOMAIN"] = cfg.ForceSubdomain env["ROUTER_OVERRIDE_HOSTNAME"] = "true" } env.Add(secretEnv) if len(defaultCert) > 0 { if cfg.SecretsAsEnv { env.Add(app.Environment{"DEFAULT_CERTIFICATE": string(defaultCert)}) } else { env.Add(app.Environment{"DEFAULT_CERTIFICATE_PATH": defaultCertificatePath}) } } env.Add(app.Environment{"DEFAULT_CERTIFICATE_DIR": defaultCertificateDir}) var certName = fmt.Sprintf("%s-certs", cfg.Name) secrets, volumes, mounts, err := generateSecretsConfig(cfg, kClient, namespace, defaultCert, certName) if err != nil { return fmt.Errorf("router could not be created: %v", err) } livenessProbe := generateLivenessProbeConfig(cfg, ports) readinessProbe := generateReadinessProbeConfig(cfg, ports) exposedPorts := make([]kapi.ContainerPort, len(ports)) copy(exposedPorts, ports) if !cfg.HostPorts { for i := range exposedPorts { exposedPorts[i].HostPort = 0 } } containers := []kapi.Container{ { Name: "router", Image: image, Ports: exposedPorts, Env: env.List(), LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, ImagePullPolicy: kapi.PullIfNotPresent, VolumeMounts: mounts, Resources: kapi.ResourceRequirements{ Requests: kapi.ResourceList{ kapi.ResourceCPU: resource.MustParse("100m"), kapi.ResourceMemory: resource.MustParse("256Mi"), }, }, }, } if cfg.StatsPort > 0 && cfg.ExposeMetrics { pc := generateMetricsExporterContainer(cfg, env) if pc != nil { containers = append(containers, *pc) } } objects := []runtime.Object{} for _, s := range secrets { objects = append(objects, s) } if createServiceAccount { objects = append(objects, &kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: cfg.ServiceAccount}}, &authapi.ClusterRoleBinding{ ObjectMeta: kapi.ObjectMeta{Name: generateRoleBindingName(cfg.Name)}, Subjects: []kapi.ObjectReference{ { Kind: "ServiceAccount", Name: cfg.ServiceAccount, Namespace: namespace, }, }, RoleRef: kapi.ObjectReference{ Kind: "ClusterRole", Name: "system:router", }, }, ) } objects = append(objects, &deployapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: deployapi.DeploymentConfigSpec{ Strategy: deployapi.DeploymentStrategy{ Type: deployapi.DeploymentStrategyTypeRolling, RollingParams: &deployapi.RollingDeploymentStrategyParams{MaxUnavailable: intstr.FromString("25%")}, }, Replicas: cfg.Replicas, Selector: label, Triggers: []deployapi.DeploymentTriggerPolicy{ {Type: deployapi.DeploymentTriggerOnConfigChange}, }, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ SecurityContext: &kapi.PodSecurityContext{ HostNetwork: cfg.HostNetwork, }, ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: containers, Volumes: volumes, }, }, }, }) objects = app.AddServices(objects, false) // set the service port to the provided output port value for i := range objects { switch t := objects[i].(type) { case *kapi.Service: t.Spec.ClusterIP = clusterIP for j, servicePort := range t.Spec.Ports { for _, targetPort := range ports { if targetPort.ContainerPort == servicePort.Port && targetPort.HostPort != 0 { t.Spec.Ports[j].Port = targetPort.HostPort } } } if len(defaultCert) == 0 { // When a user does not provide the default cert (pem), create one via a Service annotation // The secret generated by the service annotaion contains a tls.crt and tls.key // which ultimately need to be combined into a pem t.Annotations = map[string]string{"service.alpha.openshift.io/serving-cert-secret-name": certName} } } } // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if cfg.Action.ShouldPrint() { mapper, _ := f.Object(false) fn := cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, out) if err := fn(list); err != nil { return fmt.Errorf("unable to print object: %v", err) } return defaultOutputErr } levelPrefixFilter := func(e error) string { // only ignore SA/RB errors if we were creating the service account if createServiceAccount && ignoreError(e, cfg.ServiceAccount, generateRoleBindingName(cfg.Name)) { return "warning" } return "error" } cfg.Action.Bulk.IgnoreError = func(e error) bool { return levelPrefixFilter(e) == "warning" } if errs := cfg.Action.WithMessageAndPrefix(fmt.Sprintf("Creating router %s", cfg.Name), "created", levelPrefixFilter).Run(list, namespace); len(errs) > 0 { return cmdutil.ErrExit } return nil }
// RunCmdRouter contains all the necessary functionality for the // OpenShift CLI router command. func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error { switch len(args) { case 0: // uses default value case 1: cfg.Name = args[0] default: return kcmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } name := cfg.Name if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return kcmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return fmt.Errorf("unable to parse --ports: %v", err) } // For the host networking case, ensure the ports match. Otherwise, remove host ports for i := 0; i < len(ports); i++ { if cfg.HostNetwork && ports[i].HostPort != 0 && ports[i].ContainerPort != ports[i].HostPort { return fmt.Errorf("when using host networking mode, container port %d and host port %d must be equal", ports[i].ContainerPort, ports[i].HostPort) } } if cfg.StatsPort > 0 { port := kapi.ContainerPort{ Name: "stats", ContainerPort: cfg.StatsPort, Protocol: kapi.ProtocolTCP, } ports = append(ports, port) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := kcmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } generate = true } } if !generate { fmt.Fprintf(out, "Router %q service exists\n", name) return nil } if cfg.DryRun && !output { return fmt.Errorf("router %q does not exist (no service)", name) } if len(cfg.ServiceAccount) == 0 { return fmt.Errorf("you must specify a service account for the router with --service-account") } if err := validateServiceAccount(kClient, namespace, cfg.ServiceAccount, cfg.HostNetwork); err != nil { return fmt.Errorf("router could not be created; %v", err) } // create new router secretEnv := app.Environment{} switch { case len(cfg.Credentials) == 0 && len(cfg.ServiceAccount) == 0: return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") case len(cfg.Credentials) > 0: clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } secretEnv.Add(app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, }) } createServiceAccount := len(cfg.ServiceAccount) > 0 && len(cfg.Credentials) == 0 defaultCert, err := fileutil.LoadData(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file: %v", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() if !output { fmt.Fprintf(cmd.Out(), "info: password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } } env := app.Environment{ "ROUTER_SUBDOMAIN": cfg.Subdomain, "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "ROUTER_EXTERNAL_HOST_HOSTNAME": cfg.ExternalHost, "ROUTER_EXTERNAL_HOST_USERNAME": cfg.ExternalHostUsername, "ROUTER_EXTERNAL_HOST_PASSWORD": cfg.ExternalHostPassword, "ROUTER_EXTERNAL_HOST_HTTP_VSERVER": cfg.ExternalHostHttpVserver, "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER": cfg.ExternalHostHttpsVserver, "ROUTER_EXTERNAL_HOST_INSECURE": strconv.FormatBool(cfg.ExternalHostInsecure), "ROUTER_EXTERNAL_HOST_PARTITION_PATH": cfg.ExternalHostPartitionPath, "ROUTER_EXTERNAL_HOST_PRIVKEY": privkeyPath, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } env.Add(secretEnv) if len(defaultCert) > 0 { if cfg.SecretsAsEnv { env.Add(app.Environment{"DEFAULT_CERTIFICATE": string(defaultCert)}) } else { // TODO: make --credentials create secrets and bypass service account env.Add(app.Environment{"DEFAULT_CERTIFICATE_PATH": defaultCertificatePath}) } } secrets, volumes, mounts, err := generateSecretsConfig(cfg, kClient, namespace, defaultCert) if err != nil { return fmt.Errorf("router could not be created: %v", err) } livenessProbe := generateLivenessProbeConfig(cfg, ports) readinessProbe := generateReadinessProbeConfig(cfg, ports) exposedPorts := make([]kapi.ContainerPort, len(ports)) copy(exposedPorts, ports) for i := range exposedPorts { exposedPorts[i].HostPort = 0 } containers := []kapi.Container{ { Name: "router", Image: image, Ports: exposedPorts, Env: env.List(), LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, ImagePullPolicy: kapi.PullIfNotPresent, VolumeMounts: mounts, }, } if cfg.StatsPort > 0 && cfg.ExposeMetrics { pc := generateMetricsExporterContainer(cfg, env) if pc != nil { containers = append(containers, *pc) } } objects := []runtime.Object{} for _, s := range secrets { objects = append(objects, s) } if createServiceAccount { objects = append(objects, &kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: cfg.ServiceAccount}}, &authapi.ClusterRoleBinding{ ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("router-%s-role", cfg.Name)}, Subjects: []kapi.ObjectReference{ { Kind: "ServiceAccount", Name: cfg.ServiceAccount, Namespace: namespace, }, }, RoleRef: kapi.ObjectReference{ Kind: "ClusterRole", Name: "system:router", }, }, ) } updatePercent := int(-25) objects = append(objects, &deployapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: deployapi.DeploymentConfigSpec{ Strategy: deployapi.DeploymentStrategy{ Type: deployapi.DeploymentStrategyTypeRolling, RollingParams: &deployapi.RollingDeploymentStrategyParams{UpdatePercent: &updatePercent}, }, Replicas: cfg.Replicas, Selector: label, Triggers: []deployapi.DeploymentTriggerPolicy{ {Type: deployapi.DeploymentTriggerOnConfigChange}, }, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ SecurityContext: &kapi.PodSecurityContext{ HostNetwork: cfg.HostNetwork, }, ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: containers, Volumes: volumes, }, }, }, }) objects = app.AddServices(objects, false) // set the service port to the provided hostport value for i := range objects { switch t := objects[i].(type) { case *kapi.Service: for j, servicePort := range t.Spec.Ports { for _, targetPort := range ports { if targetPort.ContainerPort == servicePort.Port && targetPort.HostPort != 0 { t.Spec.Ports[j].Port = targetPort.HostPort } } } } } // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if output { list.Items, err = cmdutil.ConvertItemsForDisplayFromDefaultCommand(cmd, list.Items) if err != nil { return err } if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.ClientForMapping, After: configcmd.NewPrintNameOrErrorAfter(mapper, kcmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil }
// RunCmdRouter contains all the necessary functionality for the OpenShift cli router command func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error { var name string switch len(args) { case 0: name = "router" case 1: name = args[0] default: return cmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return cmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { glog.Fatal(err) } if cfg.StatsPort > 0 { ports = append(ports, kapi.ContainerPort{ Name: "stats", HostPort: cfg.StatsPort, ContainerPort: cfg.StatsPort, Protocol: kapi.ProtocolTCP, }) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } p, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("router %q does not exist (no service)", name) } // create new router if len(cfg.Credentials) == 0 { return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } defaultCert, err := loadDefaultCert(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() fmt.Fprintf(out, "password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, "DEFAULT_CERTIFICATE": defaultCert, "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: dapi.DeploymentTemplate{ ControllerTemplate: kapi.ReplicationControllerSpec{ Replicas: cfg.Replicas, Selector: label, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ ServiceAccount: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "router", Image: image, Ports: ports, Env: env.List(), LivenessProbe: &kapi.Probe{ Handler: kapi.Handler{ TCPSocket: &kapi.TCPSocketAction{ Port: kutil.IntOrString{ IntVal: ports[0].ContainerPort, }, }, }, InitialDelaySeconds: 10, }, ImagePullPolicy: kapi.PullIfNotPresent, }, }, }, }, }, }, }, } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if output { if err := p.PrintObj(list, out); err != nil { return fmt.Errorf("Unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(out, os.Stderr), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Router %q service exists\n", name) return nil }