// Create the config and services associated with this IP Failover configuration. func (p *KeepalivedPlugin) Create(out io.Writer) error { namespace, err := p.GetNamespace() if err != nil { return fmt.Errorf("error getting Namespace: %v", err) } mapper, typer := p.Factory.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: p.Factory.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(out, os.Stderr), } configList, err := p.Generate() if err != nil { return fmt.Errorf("error generating config: %v", err) } if errs := bulk.Create(configList, namespace); len(errs) != 0 { return fmt.Errorf("error creating config: %+v", errs) } glog.V(4).Infof("Created KeepAlived IP Failover DeploymentConfig: %q", p.Name) return nil }
// NewAppFromTemplate creates a new application from the given template and parameters // and returns the list of objects created, or the errors // // The template referenced should already have been created func (c *Context) NewAppFromTemplate(templateName string, templateParameters []string) (*kapi.List, []error) { factory, err := c.Factory() if err != nil { return nil, []error{err} } client, _, err := factory.Clients() if err != nil { return nil, []error{err} } namespace, err := c.Namespace() if err != nil { return nil, []error{err} } mapper, typer := factory.Object() clientMapper := factory.ClientMapperForCommand() appConfig := cmd.NewAppConfig() appConfig.SetTyper(typer) appConfig.SetMapper(mapper) appConfig.SetClientMapper(clientMapper) appConfig.Out = ioutil.Discard appConfig.ErrOut = ioutil.Discard appConfig.SetOpenShiftClient(client, namespace) appConfig.Components = append(appConfig.Components, templateName) if len(templateParameters) > 0 { appConfig.TemplateParameters = append(appConfig.TemplateParameters, templateParameters...) } // parse appResult, err := appConfig.Run() if err != nil { return nil, []error{err} } // and create all objects bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: factory.RESTClient, } if errs := bulk.Create(appResult.List, appResult.Namespace); len(errs) != 0 { return nil, errs } return appResult.List, []error{} }
func createObjects(f *clientcmd.Factory, out io.Writer, result *newcmd.AppResult) error { // TODO: Validate everything before building mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(out, os.Stderr), } if errs := bulk.Create(result.List, result.Namespace); len(errs) != 0 { return errExit } return nil }
func createObjects(config *newcmd.AppConfig, after configcmd.AfterFunc, result *newcmd.AppResult) error { bulk := configcmd.Bulk{ Mapper: config.Mapper, Typer: config.Typer, RESTClientFactory: config.ClientMapper.ClientForMapping, After: after, // Retry is used to support previous versions of the API server that will // consider the presence of an unknown trigger type to be an error. Retry: retryBuildConfig, } if errs := bulk.Create(result.List, result.Namespace); len(errs) != 0 { return cmdutil.ErrExit } return nil }
func createObjects(f *clientcmd.Factory, out io.Writer, shortOutput bool, result *newcmd.AppResult) error { mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(mapper, shortOutput, "created", out, os.Stderr), // Retry is used to support previous versions of the API server that will // consider the presence of an unknown trigger type to be an error. Retry: retryBuildConfig, } if errs := bulk.Create(result.List, result.Namespace); len(errs) != 0 { return errExit } return nil }
func createObjects(f *clientcmd.Factory, after configcmd.AfterFunc, result *newcmd.AppResult) error { mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: after, // Retry is used to support previous versions of the API server that will // consider the presence of an unknown trigger type to be an error. Retry: retryBuildConfig, } if errs := bulk.Create(result.List, result.Namespace); len(errs) != 0 { return errExit } return nil }
// RunCmdRouter contains all the necessary functionality for the // OpenShift CLI router command. func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error { var name string switch len(args) { case 0: name = "router" case 1: name = args[0] default: return cmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return cmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { glog.Fatal(err) } // For the host networking case, ensure the ports match. if cfg.HostNetwork { for i := 0; i < len(ports); i++ { if ports[i].ContainerPort != ports[i].HostPort { return cmdutil.UsageError(cmd, "For host networking mode, please ensure that the container [%v] and host [%v] ports match", ports[i].ContainerPort, ports[i].HostPort) } } } if cfg.StatsPort > 0 { ports = append(ports, kapi.ContainerPort{ Name: "stats", HostPort: cfg.StatsPort, ContainerPort: cfg.StatsPort, Protocol: kapi.ProtocolTCP, }) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("router %q does not exist (no service)", name) } if len(cfg.ServiceAccount) == 0 { return fmt.Errorf("router could not be created; you must specify a service account with --service-account") } err := validateServiceAccount(kClient, namespace, cfg.ServiceAccount) if err != nil { return fmt.Errorf("router could not be created; %v", err) } // create new router if len(cfg.Credentials) == 0 { return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } defaultCert, err := loadCert(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file: %v", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() fmt.Fprintf(out, "password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, "DEFAULT_CERTIFICATE": defaultCert, "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "ROUTER_EXTERNAL_HOST_HOSTNAME": cfg.ExternalHost, "ROUTER_EXTERNAL_HOST_USERNAME": cfg.ExternalHostUsername, "ROUTER_EXTERNAL_HOST_PASSWORD": cfg.ExternalHostPassword, "ROUTER_EXTERNAL_HOST_HTTP_VSERVER": cfg.ExternalHostHttpVserver, "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER": cfg.ExternalHostHttpsVserver, "ROUTER_EXTERNAL_HOST_INSECURE": strconv.FormatBool(cfg.ExternalHostInsecure), "ROUTER_EXTERNAL_HOST_PARTITION_PATH": cfg.ExternalHostPartitionPath, "ROUTER_EXTERNAL_HOST_PRIVKEY": privkeyPath, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } updatePercent := int(-25) secrets, volumes, mounts, err := generateSecretsConfig(cfg, kClient, namespace) if err != nil { return fmt.Errorf("router could not be created: %v", err) } livenessProbe := generateLivenessProbeConfig(cfg, ports) containers := []kapi.Container{ { Name: "router", Image: image, Ports: ports, Env: env.List(), LivenessProbe: livenessProbe, ImagePullPolicy: kapi.PullIfNotPresent, VolumeMounts: mounts, }, } if cfg.StatsPort > 0 && cfg.ExposeMetrics { pc := generateMetricsExporterContainer(cfg, env) if pc != nil { containers = append(containers, *pc) } } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: dapi.DeploymentTemplate{ Strategy: dapi.DeploymentStrategy{ Type: dapi.DeploymentStrategyTypeRolling, RollingParams: &dapi.RollingDeploymentStrategyParams{UpdatePercent: &updatePercent}, }, ControllerTemplate: kapi.ReplicationControllerSpec{ Replicas: cfg.Replicas, Selector: label, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ SecurityContext: &kapi.PodSecurityContext{ HostNetwork: cfg.HostNetwork, }, ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: containers, Volumes: volumes, }, }, }, }, }, } if len(secrets) != 0 { serviceAccount, err := kClient.ServiceAccounts(namespace).Get(cfg.ServiceAccount) if err != nil { return fmt.Errorf("error looking up service account %s: %v", cfg.ServiceAccount, err) } for _, secret := range secrets { objects = append(objects, secret) serviceAccount.Secrets = append(serviceAccount.Secrets, kapi.ObjectReference{Name: secret.Name}) } _, err = kClient.ServiceAccounts(namespace).Update(serviceAccount) if err != nil { return fmt.Errorf("error adding secret key to service account %s: %v", cfg.ServiceAccount, err) } } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if output { if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("Unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(mapper, cmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Router %q service exists\n", name) return nil }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return cmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } p, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry if len(cfg.Credentials) == 0 { return fmt.Errorf("registry does not exist; you must specify a .kubeconfig file path containing credentials for connecting the registry to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: []kapi.VolumeMount{ { Name: "registry-storage", MountPath: cfg.Volume, }, }, SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, // TODO reenable the liveness probe when we no longer support the v1 registry. /* LivenessProbe: &kapi.Probe{ InitialDelaySeconds: 3, TimeoutSeconds: 5, Handler: kapi.Handler{ HTTPGet: &kapi.HTTPGetAction{ Path: "/healthz", Port: util.NewIntOrStringFromInt(5000), }, }, }, */ }, }, Volumes: []kapi.Volume{ { Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }, }, }, } if mountHost { podTemplate.Spec.Volumes[0].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[0].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: dapi.DeploymentTemplate{ ControllerTemplate: kapi.ReplicationControllerSpec{ Replicas: cfg.Replicas, Selector: label, Template: podTemplate, }, }, }, } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { if err := p.PrintObj(list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(out, os.Stderr), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return cmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry if len(cfg.Credentials) == 0 { return fmt.Errorf("registry does not exist; you must specify a .kubeconfig file path containing credentials for connecting the registry to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } healthzPort := defaultPort if len(ports) > 0 { healthzPort = ports[0].ContainerPort env["REGISTRY_HTTP_ADDR"] = fmt.Sprintf(":%d", healthzPort) env["REGISTRY_HTTP_NET"] = "tcp" } livenessProbe := generateLivenessProbeConfig(healthzPort) readinessProbe := generateReadinessProbeConfig(healthzPort) secretBytes := make([]byte, randomSecretSize) if _, err := cryptorand.Read(secretBytes); err != nil { return fmt.Errorf("registry does not exist; could not generate random bytes for HTTP secret: %v", err) } env["REGISTRY_HTTP_SECRET"] = base64.StdEncoding.EncodeToString(secretBytes) mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: []kapi.VolumeMount{ { Name: "registry-storage", MountPath: cfg.Volume, }, }, SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, }, }, Volumes: []kapi.Volume{ { Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }, }, }, } if mountHost { podTemplate.Spec.Volumes[0].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[0].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: dapi.DeploymentConfigSpec{ Replicas: cfg.Replicas, Selector: label, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: podTemplate, }, }, } objects = app.AddServices(objects, true) // Set registry service's sessionAffinity to ClientIP to prevent push // failures due to a use of poorly consistent storage shared by // multiple replicas. for _, obj := range objects { switch t := obj.(type) { case *kapi.Service: t.Spec.SessionAffinity = kapi.ServiceAffinityClientIP } } // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(mapper, cmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil }
func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { if err := rest.BeforeCreate(projectrequestregistry.Strategy, ctx, obj); err != nil { return nil, err } projectRequest := obj.(*projectapi.ProjectRequest) if _, err := r.openshiftClient.Projects().Get(projectRequest.Name); err == nil { return nil, kapierror.NewAlreadyExists("project", projectRequest.Name) } projectName := projectRequest.Name projectAdmin := "" if userInfo, exists := kapi.UserFrom(ctx); exists { projectAdmin = userInfo.GetName() } template, err := r.getTemplate() if err != nil { return nil, err } for i := range template.Parameters { switch template.Parameters[i].Name { case ProjectAdminUserParam: template.Parameters[i].Value = projectAdmin case ProjectDescriptionParam: template.Parameters[i].Value = projectRequest.Description case ProjectDisplayNameParam: template.Parameters[i].Value = projectRequest.DisplayName case ProjectNameParam: template.Parameters[i].Value = projectName } } list, err := r.openshiftClient.TemplateConfigs(kapi.NamespaceDefault).Create(template) if err != nil { return nil, err } if err := utilerrors.NewAggregate(runtime.DecodeList(list.Objects, kapi.Scheme)); err != nil { return nil, kapierror.NewInternalError(err) } // one of the items in this list should be the project. We are going to locate it, remove it from the list, create it separately var projectFromTemplate *projectapi.Project objectsToCreate := &kapi.List{} for i := range list.Objects { if templateProject, ok := list.Objects[i].(*projectapi.Project); ok { projectFromTemplate = templateProject if len(list.Objects) > (i + 1) { objectsToCreate.Items = append(objectsToCreate.Items, list.Objects[i+1:]...) } break } objectsToCreate.Items = append(objectsToCreate.Items, list.Objects[i]) } if projectFromTemplate == nil { return nil, kapierror.NewInternalError(fmt.Errorf("the project template (%s/%s) is not correctly configured: must contain a project resource", r.templateNamespace, r.templateName)) } // we split out project creation separately so that in a case of racers for the same project, only one will win and create the rest of their template objects if _, err := r.openshiftClient.Projects().Create(projectFromTemplate); err != nil { return nil, err } bulk := configcmd.Bulk{ Mapper: latest.RESTMapper, Typer: kapi.Scheme, RESTClientFactory: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { return r.openshiftClient, nil }, } if err := utilerrors.NewAggregate(bulk.Create(objectsToCreate, projectName)); err != nil { return nil, kapierror.NewInternalError(err) } return r.openshiftClient.Projects().Get(projectName) }
// RunCmdRouter contains all the necessary functionality for the // OpenShift CLI router command. func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error { switch len(args) { case 0: // uses default value case 1: cfg.Name = args[0] default: return kcmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } name := cfg.Name if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return kcmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return fmt.Errorf("unable to parse --ports: %v", err) } // For the host networking case, ensure the ports match. Otherwise, remove host ports for i := 0; i < len(ports); i++ { if cfg.HostNetwork && ports[i].HostPort != 0 && ports[i].ContainerPort != ports[i].HostPort { return fmt.Errorf("when using host networking mode, container port %d and host port %d must be equal", ports[i].ContainerPort, ports[i].HostPort) } } if cfg.StatsPort > 0 { port := kapi.ContainerPort{ Name: "stats", ContainerPort: cfg.StatsPort, Protocol: kapi.ProtocolTCP, } ports = append(ports, port) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := kcmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } generate = true } } if !generate { fmt.Fprintf(out, "Router %q service exists\n", name) return nil } if cfg.DryRun && !output { return fmt.Errorf("router %q does not exist (no service)", name) } if len(cfg.ServiceAccount) == 0 { return fmt.Errorf("you must specify a service account for the router with --service-account") } if err := validateServiceAccount(kClient, namespace, cfg.ServiceAccount, cfg.HostNetwork); err != nil { return fmt.Errorf("router could not be created; %v", err) } // create new router secretEnv := app.Environment{} switch { case len(cfg.Credentials) == 0 && len(cfg.ServiceAccount) == 0: return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") case len(cfg.Credentials) > 0: clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } secretEnv.Add(app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, }) } createServiceAccount := len(cfg.ServiceAccount) > 0 && len(cfg.Credentials) == 0 defaultCert, err := fileutil.LoadData(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file: %v", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() if !output { fmt.Fprintf(cmd.Out(), "info: password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } } env := app.Environment{ "ROUTER_SUBDOMAIN": cfg.Subdomain, "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "ROUTER_EXTERNAL_HOST_HOSTNAME": cfg.ExternalHost, "ROUTER_EXTERNAL_HOST_USERNAME": cfg.ExternalHostUsername, "ROUTER_EXTERNAL_HOST_PASSWORD": cfg.ExternalHostPassword, "ROUTER_EXTERNAL_HOST_HTTP_VSERVER": cfg.ExternalHostHttpVserver, "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER": cfg.ExternalHostHttpsVserver, "ROUTER_EXTERNAL_HOST_INSECURE": strconv.FormatBool(cfg.ExternalHostInsecure), "ROUTER_EXTERNAL_HOST_PARTITION_PATH": cfg.ExternalHostPartitionPath, "ROUTER_EXTERNAL_HOST_PRIVKEY": privkeyPath, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } env.Add(secretEnv) if len(defaultCert) > 0 { if cfg.SecretsAsEnv { env.Add(app.Environment{"DEFAULT_CERTIFICATE": string(defaultCert)}) } else { // TODO: make --credentials create secrets and bypass service account env.Add(app.Environment{"DEFAULT_CERTIFICATE_PATH": defaultCertificatePath}) } } secrets, volumes, mounts, err := generateSecretsConfig(cfg, kClient, namespace, defaultCert) if err != nil { return fmt.Errorf("router could not be created: %v", err) } livenessProbe := generateLivenessProbeConfig(cfg, ports) readinessProbe := generateReadinessProbeConfig(cfg, ports) exposedPorts := make([]kapi.ContainerPort, len(ports)) copy(exposedPorts, ports) for i := range exposedPorts { exposedPorts[i].HostPort = 0 } containers := []kapi.Container{ { Name: "router", Image: image, Ports: exposedPorts, Env: env.List(), LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, ImagePullPolicy: kapi.PullIfNotPresent, VolumeMounts: mounts, }, } if cfg.StatsPort > 0 && cfg.ExposeMetrics { pc := generateMetricsExporterContainer(cfg, env) if pc != nil { containers = append(containers, *pc) } } objects := []runtime.Object{} for _, s := range secrets { objects = append(objects, s) } if createServiceAccount { objects = append(objects, &kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: cfg.ServiceAccount}}, &authapi.ClusterRoleBinding{ ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("router-%s-role", cfg.Name)}, Subjects: []kapi.ObjectReference{ { Kind: "ServiceAccount", Name: cfg.ServiceAccount, Namespace: namespace, }, }, RoleRef: kapi.ObjectReference{ Kind: "ClusterRole", Name: "system:router", }, }, ) } updatePercent := int(-25) objects = append(objects, &deployapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: deployapi.DeploymentConfigSpec{ Strategy: deployapi.DeploymentStrategy{ Type: deployapi.DeploymentStrategyTypeRolling, RollingParams: &deployapi.RollingDeploymentStrategyParams{UpdatePercent: &updatePercent}, }, Replicas: cfg.Replicas, Selector: label, Triggers: []deployapi.DeploymentTriggerPolicy{ {Type: deployapi.DeploymentTriggerOnConfigChange}, }, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ SecurityContext: &kapi.PodSecurityContext{ HostNetwork: cfg.HostNetwork, }, ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: containers, Volumes: volumes, }, }, }, }) objects = app.AddServices(objects, false) // set the service port to the provided hostport value for i := range objects { switch t := objects[i].(type) { case *kapi.Service: for j, servicePort := range t.Spec.Ports { for _, targetPort := range ports { if targetPort.ContainerPort == servicePort.Port && targetPort.HostPort != 0 { t.Spec.Ports[j].Port = targetPort.HostPort } } } } } // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if output { list.Items, err = cmdutil.ConvertItemsForDisplayFromDefaultCommand(cmd, list.Items) if err != nil { return err } if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.ClientForMapping, After: configcmd.NewPrintNameOrErrorAfter(mapper, kcmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil }
// RunCmdRouter contains all the necessary functionality for the OpenShift cli router command func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error { var name string switch len(args) { case 0: name = "router" case 1: name = args[0] default: return cmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return cmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { glog.Fatal(err) } if cfg.StatsPort > 0 { ports = append(ports, kapi.ContainerPort{ Name: "stats", HostPort: cfg.StatsPort, ContainerPort: cfg.StatsPort, Protocol: kapi.ProtocolTCP, }) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } p, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("router %q does not exist (no service)", name) } // create new router if len(cfg.Credentials) == 0 { return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } defaultCert, err := loadDefaultCert(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() fmt.Fprintf(out, "password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, "DEFAULT_CERTIFICATE": defaultCert, "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: dapi.DeploymentTemplate{ ControllerTemplate: kapi.ReplicationControllerSpec{ Replicas: cfg.Replicas, Selector: label, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ ServiceAccount: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "router", Image: image, Ports: ports, Env: env.List(), LivenessProbe: &kapi.Probe{ Handler: kapi.Handler{ TCPSocket: &kapi.TCPSocketAction{ Port: kutil.IntOrString{ IntVal: ports[0].ContainerPort, }, }, }, InitialDelaySeconds: 10, }, ImagePullPolicy: kapi.PullIfNotPresent, }, }, }, }, }, }, }, } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if output { if err := p.PrintObj(list, out); err != nil { return fmt.Errorf("Unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(out, os.Stderr), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Router %q service exists\n", name) return nil }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return kcmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return kcmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := kcmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } var clusterIP string generate := output service, err := kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) && !generate { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } else { clusterIP = service.Spec.ClusterIP } if !generate { fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil } if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry secretEnv := app.Environment{} switch { case len(cfg.ServiceAccount) == 0 && len(cfg.Credentials) == 0: return fmt.Errorf("registry could not be created; a service account or the path to a .kubeconfig file must be provided") case len(cfg.Credentials) > 0: clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := restclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } secretEnv = app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } } needServiceAccountRole := len(cfg.ServiceAccount) > 0 && len(cfg.Credentials) == 0 var servingCert, servingKey []byte if len(cfg.ServingCertPath) > 0 { data, err := ioutil.ReadFile(cfg.ServingCertPath) if err != nil { return fmt.Errorf("registry does not exist; could not load TLS certificate file %q: %v", cfg.ServingCertPath, err) } servingCert = data } if len(cfg.ServingKeyPath) > 0 { data, err := ioutil.ReadFile(cfg.ServingKeyPath) if err != nil { return fmt.Errorf("registry does not exist; could not load TLS private key file %q: %v", cfg.ServingKeyPath, err) } servingCert = data } env := app.Environment{} env.Add(secretEnv) healthzPort := defaultPort if len(ports) > 0 { healthzPort = ports[0].ContainerPort env["REGISTRY_HTTP_ADDR"] = fmt.Sprintf(":%d", healthzPort) env["REGISTRY_HTTP_NET"] = "tcp" } secrets, volumes, mounts, extraEnv, tls, err := generateSecretsConfig(cfg, namespace, servingCert, servingKey) if err != nil { return err } env.Add(extraEnv) livenessProbe := generateLivenessProbeConfig(healthzPort, tls) readinessProbe := generateReadinessProbeConfig(healthzPort, tls) mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: append(mounts, kapi.VolumeMount{ Name: "registry-storage", MountPath: cfg.Volume, }), SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, }, }, Volumes: append(volumes, kapi.Volume{ Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }), ServiceAccountName: cfg.ServiceAccount, }, } if mountHost { podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[len(podTemplate.Spec.Volumes)-1].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{} for _, s := range secrets { objects = append(objects, s) } if needServiceAccountRole { objects = append(objects, &kapi.ServiceAccount{ObjectMeta: kapi.ObjectMeta{Name: cfg.ServiceAccount}}, &authapi.ClusterRoleBinding{ ObjectMeta: kapi.ObjectMeta{Name: fmt.Sprintf("registry-%s-role", cfg.Name)}, Subjects: []kapi.ObjectReference{ { Kind: "ServiceAccount", Name: cfg.ServiceAccount, Namespace: namespace, }, }, RoleRef: kapi.ObjectReference{ Kind: "ClusterRole", Name: "system:registry", }, }, ) } objects = append(objects, &deployapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: deployapi.DeploymentConfigSpec{ Replicas: cfg.Replicas, Selector: label, Triggers: []deployapi.DeploymentTriggerPolicy{ {Type: deployapi.DeploymentTriggerOnConfigChange}, }, Template: podTemplate, }, }) objects = app.AddServices(objects, true) // Set registry service's sessionAffinity to ClientIP to prevent push // failures due to a use of poorly consistent storage shared by // multiple replicas. Also reuse the cluster IP if provided to avoid // changing the internal value. for _, obj := range objects { switch t := obj.(type) { case *kapi.Service: t.Spec.SessionAffinity = kapi.ServiceAffinityClientIP t.Spec.ClusterIP = clusterIP } } // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { fn := cmdutil.VersionedPrintObject(f.PrintObject, cmd, out) if err := fn(list); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.ClientForMapping, After: configcmd.NewPrintNameOrErrorAfter(mapper, kcmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil }