func GetClusterAdminClientConfig(adminKubeConfigFile string) (*kclient.Config, error) { _, conf, err := configapi.GetKubeClient(adminKubeConfigFile) if err != nil { return nil, err } return conf, nil }
func GetClusterAdminKubeClient(adminKubeConfigFile string) (*kclient.Client, error) { if c, _, err := configapi.GetKubeClient(adminKubeConfigFile); err != nil { return nil, err } else { return c, nil } }
func newServiceAccountTokenGetter(options configapi.MasterConfig, client *etcdclient.Client) (serviceaccount.ServiceAccountTokenGetter, error) { var tokenGetter serviceaccount.ServiceAccountTokenGetter if options.KubernetesMasterConfig == nil { // When we're running against an external Kubernetes, use the external kubernetes client to validate service account tokens // This prevents infinite auth loops if the privilegedLoopbackKubeClient authenticates using a service account token kubeClient, _, err := configapi.GetKubeClient(options.MasterClients.ExternalKubernetesKubeConfig) if err != nil { return nil, err } tokenGetter = serviceaccount.NewGetterFromClient(kubeClient) } else { // When we're running in-process, go straight to etcd (using the KubernetesStorageVersion/KubernetesStoragePrefix, since service accounts are kubernetes objects) ketcdHelper, err := master.NewEtcdHelper(client, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err) } tokenGetter = serviceaccount.NewGetterFromEtcdHelper(ketcdHelper) } return tokenGetter, nil }
// RunNode takes the options and: // 1. Creates certs if needed // 2. Reads fully specified node config OR builds a fully specified node config from the args // 3. Writes the fully specified node config and exits if needed // 4. Starts the node based on the fully specified config func (o NodeOptions) RunNode() error { if !o.IsRunFromConfig() || o.IsWriteConfigOnly() { glog.V(2).Infof("Generating node configuration") if err := o.CreateNodeConfig(); err != nil { return err } } if o.IsWriteConfigOnly() { return nil } var nodeConfig *configapi.NodeConfig var err error if o.IsRunFromConfig() { nodeConfig, err = configapilatest.ReadAndResolveNodeConfig(o.ConfigFile) } else { nodeConfig, err = o.NodeArgs.BuildSerializeableNodeConfig() } if err != nil { return err } errs := validation.ValidateNodeConfig(nodeConfig) if len(errs) != 0 { return kerrors.NewInvalid("NodeConfig", o.ConfigFile, errs) } _, kubeClientConfig, err := configapi.GetKubeClient(nodeConfig.MasterKubeConfig) if err != nil { return err } glog.Infof("Starting an OpenShift node, connecting to %s", kubeClientConfig.Host) if err := StartNode(*nodeConfig); err != nil { return err } return nil }
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) { kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig) if err != nil { return nil, err } if options.NodeName == "localhost" { glog.Warningf(`Using "localhost" as node name will not resolve from all locations`) } var dnsIP net.IP if len(options.DNSIP) > 0 { dnsIP = net.ParseIP(options.DNSIP) if dnsIP == nil { return nil, fmt.Errorf("Invalid DNS IP: %s", options.DNSIP) } } clientCAs, err := util.CertPoolFromFile(options.ServingInfo.ClientCA) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest var path string var fileCheckInterval int64 if options.PodManifestConfig != nil { path = options.PodManifestConfig.Path fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds } var dockerExecHandler dockertools.ExecHandler switch options.DockerConfig.ExecHandlerName { case configapi.DockerExecHandlerNative: dockerExecHandler = &dockertools.NativeExecHandler{} case configapi.DockerExecHandlerNsenter: dockerExecHandler = &dockertools.NsenterExecHandler{} } kubeAddress, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress) if err != nil { return nil, fmt.Errorf("cannot parse node address: %v", err) } kubePort, err := strconv.Atoi(kubePortStr) if err != nil { return nil, fmt.Errorf("cannot parse node port: %v", err) } address := util.IP{} if err := address.Set(kubeAddress); err != nil { return nil, err } // declare the OpenShift defaults from config server := kapp.NewKubeletServer() server.Config = path server.RootDirectory = options.VolumeDirectory server.HostnameOverride = options.NodeName server.AllowPrivileged = true server.RegisterNode = true server.Address = address server.Port = uint(kubePort) server.ReadOnlyPort = 0 // no read only access server.ClusterDNS = util.IP(dnsIP) server.ClusterDomain = options.DNSDomain server.NetworkPluginName = options.NetworkPluginName server.HostNetworkSources = strings.Join([]string{kubelet.ApiserverSource, kubelet.FileSource}, ",") server.HTTPCheckFrequency = 0 // no remote HTTP pod creation access server.FileCheckFrequency = time.Duration(fileCheckInterval) * time.Second server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod") // prevents kube from generating certs server.TLSCertFile = options.ServingInfo.ServerCert.CertFile server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile if value := cmdutil.Env("OPENSHIFT_CONTAINERIZED", ""); len(value) > 0 { server.Containerized = value == "true" } // resolve extended arguments // TODO: this should be done in config validation (along with the above) so we can provide // proper errors if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 { return nil, errors.NewAggregate(err) } cfg, err := server.KubeletConfig() if err != nil { return nil, err } // provide any config overrides cfg.StreamingConnectionIdleTimeout = 5 * time.Minute // TODO: should be set cfg.KubeClient = kubeClient cfg.DockerExecHandler = dockerExecHandler // TODO: could be cleaner if configapi.UseTLS(options.ServingInfo) { cfg.TLSOptions = &kubelet.TLSOptions{ Config: &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, // RequireAndVerifyClientCert lets us limit requests to ones with a valid client certificate ClientAuth: tls.RequireAndVerifyClientCert, ClientCAs: clientCAs, }, CertFile: options.ServingInfo.ServerCert.CertFile, KeyFile: options.ServingInfo.ServerCert.KeyFile, } } else { cfg.TLSOptions = nil } config := &NodeConfig{ BindAddress: options.ServingInfo.BindAddress, AllowDisabledDocker: options.AllowDisabledDocker, Client: kubeClient, VolumeDir: options.VolumeDirectory, KubeletServer: server, KubeletConfig: cfg, } return config, nil }
func StartMaster(openshiftMasterConfig *configapi.MasterConfig) error { glog.Infof("Starting an Atomic Enterprise master, reachable at %s (etcd: %v)", openshiftMasterConfig.ServingInfo.BindAddress, openshiftMasterConfig.EtcdClientInfo.URLs) glog.Infof("Atomic Enterprise master public address is %s", openshiftMasterConfig.AssetConfig.MasterPublicURL) if openshiftMasterConfig.EtcdConfig != nil { etcd.RunEtcd(openshiftMasterConfig.EtcdConfig) } // Allow privileged containers // TODO: make this configurable and not the default https://github.com/projectatomic/atomic-enterprise/issues/662 capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: true, HostNetworkSources: []string{kubelet.ApiserverSource, kubelet.FileSource}, }) openshiftConfig, err := origin.BuildMasterConfig(*openshiftMasterConfig) if err != nil { return err } go func() { openshiftConfig.ControllerPlug.WaitForStop() glog.Fatalf("Master shutdown requested") }() // Must start policy caching immediately openshiftConfig.RunPolicyCache() openshiftConfig.RunProjectCache() unprotectedInstallers := []origin.APIInstaller{} if openshiftMasterConfig.OAuthConfig != nil { authConfig, err := origin.BuildAuthConfig(*openshiftMasterConfig) if err != nil { return err } unprotectedInstallers = append(unprotectedInstallers, authConfig) } var standaloneAssetConfig *origin.AssetConfig if openshiftMasterConfig.AssetConfig != nil { config, err := origin.BuildAssetConfig(*openshiftMasterConfig) if err != nil { return err } if openshiftMasterConfig.AssetConfig.ServingInfo.BindAddress == openshiftMasterConfig.ServingInfo.BindAddress { unprotectedInstallers = append(unprotectedInstallers, config) } else { standaloneAssetConfig = config } } var kubeConfig *kubernetes.MasterConfig if openshiftMasterConfig.KubernetesMasterConfig != nil { kubeConfig, err = kubernetes.BuildKubernetesMasterConfig(*openshiftMasterConfig, openshiftConfig.RequestContextMapper, openshiftConfig.KubeClient()) if err != nil { return err } openshiftConfig.Run([]origin.APIInstaller{kubeConfig}, unprotectedInstallers) } else { _, kubeConfig, err := configapi.GetKubeClient(openshiftMasterConfig.MasterClients.ExternalKubernetesKubeConfig) if err != nil { return err } proxy := &kubernetes.ProxyConfig{ ClientConfig: kubeConfig, } openshiftConfig.Run([]origin.APIInstaller{proxy}, unprotectedInstallers) } glog.Infof("Using images from %q", openshiftConfig.ImageFor("<component>")) if standaloneAssetConfig != nil { standaloneAssetConfig.Run() } if openshiftMasterConfig.DNSConfig != nil { openshiftConfig.RunDNSServer() } openshiftConfig.RunProjectAuthorizationCache() if openshiftMasterConfig.Controllers != configapi.ControllersDisabled { go func() { openshiftConfig.ControllerPlug.WaitForStart() glog.Infof("Master controllers starting (%s)", openshiftMasterConfig.Controllers) // Start these first, because they provide credentials for other controllers' clients openshiftConfig.RunServiceAccountsController() openshiftConfig.RunServiceAccountTokensController() // used by admission controllers openshiftConfig.RunServiceAccountPullSecretsControllers() openshiftConfig.RunSecurityAllocationController() if kubeConfig != nil { _, rcClient, err := openshiftConfig.GetServiceAccountClients(openshiftConfig.ReplicationControllerServiceAccount) if err != nil { glog.Fatalf("Could not get client for replication controller: %v", err) } // called by admission control kubeConfig.RunResourceQuotaManager() // no special order kubeConfig.RunNodeController() kubeConfig.RunScheduler() kubeConfig.RunReplicationController(rcClient) kubeConfig.RunEndpointController() kubeConfig.RunNamespaceController() kubeConfig.RunPersistentVolumeClaimBinder() kubeConfig.RunPersistentVolumeClaimRecycler(openshiftConfig.ImageFor("deployer")) } // no special order openshiftConfig.RunBuildController() openshiftConfig.RunBuildPodController() openshiftConfig.RunBuildImageChangeTriggerController() openshiftConfig.RunDeploymentController() openshiftConfig.RunDeployerPodController() openshiftConfig.RunDeploymentConfigController() openshiftConfig.RunDeploymentConfigChangeController() openshiftConfig.RunDeploymentImageChangeTriggerController() openshiftConfig.RunImageImportController() openshiftConfig.RunOriginNamespaceController() openshiftConfig.RunSDNController() }() } return nil }
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) { client, err := etcd.GetAndTestEtcdClient(options.EtcdClientInfo) if err != nil { return nil, err } etcdHelper, err := NewEtcdHelper(client, options.EtcdStorageConfig.OpenShiftStorageVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix) if err != nil { return nil, fmt.Errorf("Error setting up server storage: %v", err) } clientCAs, err := configapi.GetClientCertCAPool(options) if err != nil { return nil, err } apiClientCAs, err := configapi.GetAPIClientCertCAPool(options) if err != nil { return nil, err } privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig) if err != nil { return nil, err } imageTemplate := variable.NewDefaultImageTemplate() imageTemplate.Format = options.ImageConfig.Format imageTemplate.Latest = options.ImageConfig.Latest policyCache, policyClient := newReadOnlyCacheAndClient(etcdHelper) requestContextMapper := kapi.NewRequestContextMapper() kubeletClientConfig := configapi.GetKubeletClientConfig(options) // in-order list of plug-ins that should intercept admission decisions (origin only intercepts) admissionControlPluginNames := []string{"OriginNamespaceLifecycle", "BuildByStrategy"} admissionClient := admissionControlClient(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient) admissionController := admission.NewFromPlugins(admissionClient, admissionControlPluginNames, "") serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, client) if err != nil { return nil, err } config := &MasterConfig{ Options: options, OpenshiftEnabled: options.OpenshiftEnabled, Authenticator: newAuthenticator(options, etcdHelper, serviceAccountTokenGetter, apiClientCAs), Authorizer: newAuthorizer(policyClient, options.ProjectConfig.ProjectRequestMessage), AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper), PolicyCache: policyCache, ProjectAuthorizationCache: newProjectAuthorizationCache(privilegedLoopbackOpenShiftClient, privilegedLoopbackKubeClient, policyClient), RequestContextMapper: requestContextMapper, AdmissionControl: admissionController, TLS: configapi.UseTLS(options.ServingInfo.ServingInfo), ControllerPlug: plug.NewPlug(!options.PauseControllers), ImageFor: imageTemplate.ExpandOrDie, EtcdHelper: etcdHelper, KubeletClientConfig: kubeletClientConfig, ClientCAs: clientCAs, APIClientCAs: apiClientCAs, PrivilegedLoopbackClientConfig: *privilegedLoopbackClientConfig, PrivilegedLoopbackOpenShiftClient: privilegedLoopbackOpenShiftClient, PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient, BuildControllerServiceAccount: bootstrappolicy.InfraBuildControllerServiceAccountName, DeploymentControllerServiceAccount: bootstrappolicy.InfraDeploymentControllerServiceAccountName, ReplicationControllerServiceAccount: bootstrappolicy.InfraReplicationControllerServiceAccountName, } return config, nil }