コード例 #1
0
ファイル: test.go プロジェクト: richm/origin
// verifyTestSuitePreconditions ensures that all namespaces prefixed with 'e2e-' have their
// service accounts in the privileged and anyuid SCCs, and that Origin/Kubernetes synthetic
// skip labels are applied
func verifyTestSuitePreconditions() {
	desc := ginkgo.CurrentGinkgoTestDescription()

	switch {
	case strings.Contains(desc.FileName, "/origin/test/"):
		if strings.Contains(config.GinkgoConfig.SkipString, "[Origin]") {
			ginkgo.Skip("skipping [Origin] tests")
		}

	case strings.Contains(desc.FileName, "/kubernetes/test/e2e/"):
		if strings.Contains(config.GinkgoConfig.SkipString, "[Kubernetes]") {
			ginkgo.Skip("skipping [Kubernetes] tests")
		}

		e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged")
		c, _, err := configapi.GetKubeClient(KubeConfigPath())
		if err != nil {
			FatalErr(err)
		}
		namespaces, err := c.Namespaces().List(kapi.ListOptions{})
		if err != nil {
			FatalErr(err)
		}
		// add to the "privileged" scc to ensure pods that explicitly
		// request extra capabilities are not rejected
		addE2EServiceAccountsToSCC(c, namespaces, "privileged")
		// add to the "anyuid" scc to ensure pods that don't specify a
		// uid don't get forced into a range (mimics upstream
		// behavior)
		addE2EServiceAccountsToSCC(c, namespaces, "anyuid")
	}
}
コード例 #2
0
// RunDeploymentController starts the deployment controller process.
func (c *MasterConfig) RunDeploymentController() {
	rcInformer := c.Informers.ReplicationControllers().Informer()
	podInformer := c.Informers.Pods().Informer()
	_, kclient := c.DeploymentControllerClients()

	_, kclientConfig, err := configapi.GetKubeClient(c.Options.MasterClients.OpenShiftLoopbackKubeConfig, c.Options.MasterClients.OpenShiftLoopbackClientConnectionOverrides)
	if err != nil {
		glog.Fatalf("Unable to initialize deployment controller: %v", err)
	}
	// TODO eliminate these environment variables once service accounts provide a kubeconfig that includes all of this info
	env := clientcmd.EnvVars(
		kclientConfig.Host,
		kclientConfig.CAData,
		kclientConfig.Insecure,
		path.Join(serviceaccountadmission.DefaultAPITokenMountPath, kapi.ServiceAccountTokenKey),
	)

	controller := deploycontroller.NewDeploymentController(
		rcInformer,
		podInformer,
		kclient,
		bootstrappolicy.DeployerServiceAccountName,
		c.ImageFor("deployer"),
		env,
		c.ExternalVersionCodec,
	)
	go controller.Run(5, utilwait.NeverStop)
}
コード例 #3
0
ファイル: run_components.go プロジェクト: RomainVabre/origin
// RunDeploymentController starts the deployment controller process.
func (c *MasterConfig) RunDeploymentController() {
	_, kclient := c.DeploymentControllerClients()

	_, kclientConfig, err := configapi.GetKubeClient(c.Options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		glog.Fatalf("Unable to initialize deployment controller: %v", err)
	}
	// TODO eliminate these environment variables once service accounts provide a kubeconfig that includes all of this info
	env := clientcmd.EnvVars(
		kclientConfig.Host,
		kclientConfig.CAData,
		kclientConfig.Insecure,
		path.Join(serviceaccountadmission.DefaultAPITokenMountPath, kapi.ServiceAccountTokenKey),
	)

	factory := deploycontroller.DeploymentControllerFactory{
		KubeClient:     kclient,
		Codec:          c.EtcdHelper.Codec(),
		Environment:    env,
		DeployerImage:  c.ImageFor("deployer"),
		ServiceAccount: bootstrappolicy.DeployerServiceAccountName,
	}

	controller := factory.Create()
	controller.Run()
}
コード例 #4
0
ファイル: client.go プロジェクト: LalatenduMohanty/origin
func GetClusterAdminKubeClient(adminKubeConfigFile string) (*kclientset.Clientset, error) {
	_, c, _, err := configapi.GetKubeClient(adminKubeConfigFile, nil)
	if err != nil {
		return nil, err
	}
	return c, nil
}
コード例 #5
0
ファイル: master_config.go プロジェクト: erinboyd/origin
func newServiceAccountTokenGetter(options configapi.MasterConfig, client *etcdclient.Client) (serviceaccount.ServiceAccountTokenGetter, error) {
	var tokenGetter serviceaccount.ServiceAccountTokenGetter
	if options.KubernetesMasterConfig == nil {
		// When we're running against an external Kubernetes, use the external kubernetes client to validate service account tokens
		// This prevents infinite auth loops if the privilegedLoopbackKubeClient authenticates using a service account token
		kubeClient, _, err := configapi.GetKubeClient(options.MasterClients.ExternalKubernetesKubeConfig)
		if err != nil {
			return nil, err
		}
		tokenGetter = serviceaccount.NewGetterFromClient(kubeClient)
	} else {
		// When we're running in-process, go straight to etcd (using the KubernetesStorageVersion/KubernetesStoragePrefix, since service accounts are kubernetes objects)
		legacyGroup, err := kapilatest.Group(kapi.SchemeGroupVersion.Group)
		if err != nil {
			return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err)
		}
		versionedInterface, err := legacyGroup.InterfacesFor(unversioned.GroupVersion{Group: kapi.SchemeGroupVersion.Group, Version: options.EtcdStorageConfig.KubernetesStorageVersion})
		if err != nil {
			return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err)
		}
		ketcdHelper := etcdstorage.NewEtcdStorage(client, versionedInterface.Codec, options.EtcdStorageConfig.KubernetesStoragePrefix)
		tokenGetter = serviceaccount.NewGetterFromStorageInterface(ketcdHelper)
	}
	return tokenGetter, nil
}
コード例 #6
0
ファイル: client.go プロジェクト: erinboyd/origin
func GetClusterAdminKubeClient(adminKubeConfigFile string) (*kclient.Client, error) {
	if c, _, err := configapi.GetKubeClient(adminKubeConfigFile); err != nil {
		return nil, err
	} else {
		return c, nil
	}
}
コード例 #7
0
ファイル: cli.go プロジェクト: asiainfoLDP/datafactory
// AdminKubeREST provides a Kubernetes REST client for the cluster admin user.
func (c *CLI) AdminKubeREST() *kclient.Client {
	kubeClient, _, err := configapi.GetKubeClient(c.adminConfigPath)
	if err != nil {
		FatalErr(err)
	}
	return kubeClient
}
コード例 #8
0
func newServiceAccountTokenGetter(options configapi.MasterConfig) (serviceaccount.ServiceAccountTokenGetter, error) {
	if options.KubernetesMasterConfig == nil {
		// When we're running against an external Kubernetes, use the external kubernetes client to validate service account tokens
		// This prevents infinite auth loops if the privilegedLoopbackKubeClient authenticates using a service account token
		kubeClient, _, err := configapi.GetKubeClient(options.MasterClients.ExternalKubernetesKubeConfig, options.MasterClients.ExternalKubernetesClientConnectionOverrides)
		if err != nil {
			return nil, err
		}
		return sacontroller.NewGetterFromClient(clientadapter.FromUnversionedClient(kubeClient)), nil
	}

	// TODO: could be hoisted if other Origin code needs direct access to etcd, otherwise discourage this access pattern
	// as we move to be more on top of Kube.
	_, kubeStorageFactory, err := kubernetes.BuildDefaultAPIServer(options)
	if err != nil {
		return nil, err
	}

	storageConfig, err := kubeStorageFactory.NewConfig(kapi.Resource("serviceaccounts"))
	if err != nil {
		return nil, err
	}
	// TODO: by doing this we will not be able to authenticate while a master quorum is not present - reimplement
	// as two storages called in succession (non quorum and then quorum).
	storageConfig.Quorum = true
	return sacontroller.NewGetterFromStorageInterface(storageConfig, kubeStorageFactory.ResourcePrefix(kapi.Resource("serviceaccounts")), kubeStorageFactory.ResourcePrefix(kapi.Resource("secrets"))), nil
}
コード例 #9
0
ファイル: client.go プロジェクト: erinboyd/origin
func GetClusterAdminClientConfig(adminKubeConfigFile string) (*kclient.Config, error) {
	_, conf, err := configapi.GetKubeClient(adminKubeConfigFile)
	if err != nil {
		return nil, err
	}
	return conf, nil
}
コード例 #10
0
// RunDeploymentController starts the deployment controller process.
func (c *MasterConfig) RunDeploymentController() {
	rcInformer := c.Informers.ReplicationControllers().Informer()
	podInformer := c.Informers.Pods().Informer()
	_, kclient := c.DeploymentControllerClients()

	_, kclientConfig, err := configapi.GetKubeClient(c.Options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		glog.Fatalf("Unable to initialize deployment controller: %v", err)
	}
	// TODO eliminate these environment variables once service accounts provide a kubeconfig that includes all of this info
	env := clientcmd.EnvVars(
		kclientConfig.Host,
		kclientConfig.CAData,
		kclientConfig.Insecure,
		path.Join(serviceaccountadmission.DefaultAPITokenMountPath, kapi.ServiceAccountTokenKey),
	)

	controller := deploycontroller.NewDeploymentController(
		rcInformer,
		podInformer,
		kclient,
		bootstrappolicy.DeployerServiceAccountName,
		c.ImageFor("deployer"),
		env,
		c.EtcdHelper.Codec(),
	)

	// TODO: Make the stop channel actually work.
	stopCh := make(chan struct{})
	// TODO: Make the number of workers configurable.
	go controller.Run(5, stopCh)
}
コード例 #11
0
ファイル: cli.go プロジェクト: LalatenduMohanty/origin
// AdminKubeClient provides a Kubernetes client for the cluster admin user.
func (c *CLI) AdminKubeClient() *kclientset.Clientset {
	_, kubeClient, _, err := configapi.GetKubeClient(c.adminConfigPath, nil)
	if err != nil {
		FatalErr(err)
	}
	return kubeClient
}
コード例 #12
0
ファイル: cli.go プロジェクト: asiainfoLDP/datafactory
// AdminREST provides an OpenShift REST client for the cluster admin user.
func (c *CLI) AdminREST() *client.Client {
	_, clientConfig, err := configapi.GetKubeClient(c.adminConfigPath)
	osClient, err := client.New(clientConfig)
	if err != nil {
		FatalErr(err)
	}
	return osClient
}
コード例 #13
0
ファイル: cli.go プロジェクト: LalatenduMohanty/origin
// Client provides an OpenShift client for the current user. If the user is not
// set, then it provides client for the cluster admin user
func (c *CLI) Client() *client.Client {
	_, _, clientConfig, err := configapi.GetKubeClient(c.configPath, nil)
	osClient, err := client.New(clientConfig)
	if err != nil {
		FatalErr(err)
	}
	return osClient
}
コード例 #14
0
func StartNode(nodeConfig configapi.NodeConfig, components *utilflags.ComponentFlag) error {
	config, err := kubernetes.BuildKubernetesNodeConfig(nodeConfig, components.Enabled(ComponentProxy), components.Enabled(ComponentDNS))
	if err != nil {
		return err
	}

	// In case of openshift network plugin, nodeConfig.networkPluginName is optional and is auto detected/finalized
	// once we build kubernetes node config. So perform plugin name related check here.
	if osdn.IsOpenShiftNetworkPlugin(config.KubeletServer.NetworkPluginName) {
		// TODO: SDN plugin depends on the Kubelet registering as a Node and doesn't retry cleanly,
		// and Kubelet also can't start the PodSync loop until the SDN plugin has loaded.
		if components.Enabled(ComponentKubelet) != components.Enabled(ComponentPlugins) {
			return fmt.Errorf("the SDN plugin must be run in the same process as the kubelet")
		}
	}

	if components.Enabled(ComponentKubelet) {
		glog.Infof("Starting node %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String())
	} else {
		glog.Infof("Starting node networking %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String())
	}

	_, kubeClientConfig, err := configapi.GetKubeClient(nodeConfig.MasterKubeConfig)
	if err != nil {
		return err
	}
	glog.Infof("Connecting to API server %s", kubeClientConfig.Host)

	// preconditions
	if components.Enabled(ComponentKubelet) {
		config.EnsureKubeletAccess()
		config.EnsureVolumeDir()
		config.EnsureDocker(docker.NewHelper())
		config.EnsureLocalQuota(nodeConfig) // must be performed after EnsureVolumeDir
	}

	if components.Enabled(ComponentKubelet) {
		config.RunKubelet()
	}
	if components.Enabled(ComponentPlugins) {
		config.RunPlugin()
	}
	if components.Enabled(ComponentProxy) {
		config.RunProxy()
	}
	if components.Enabled(ComponentDNS) {
		config.RunDNS()
	}

	config.RunServiceStores(components.Enabled(ComponentProxy), components.Enabled(ComponentDNS))

	return nil
}
コード例 #15
0
ファイル: start_node.go プロジェクト: erinboyd/origin
// RunNode takes the options and:
// 1.  Creates certs if needed
// 2.  Reads fully specified node config OR builds a fully specified node config from the args
// 3.  Writes the fully specified node config and exits if needed
// 4.  Starts the node based on the fully specified config
func (o NodeOptions) RunNode() error {
	if !o.IsRunFromConfig() || o.IsWriteConfigOnly() {
		glog.V(2).Infof("Generating node configuration")
		if err := o.CreateNodeConfig(); err != nil {
			return err
		}
	}

	if o.IsWriteConfigOnly() {
		return nil
	}

	var nodeConfig *configapi.NodeConfig
	var err error
	if o.IsRunFromConfig() {
		nodeConfig, err = configapilatest.ReadAndResolveNodeConfig(o.ConfigFile)
	} else {
		nodeConfig, err = o.NodeArgs.BuildSerializeableNodeConfig()
	}
	if err != nil {
		return err
	}

	validationResults := validation.ValidateNodeConfig(nodeConfig, nil)
	if len(validationResults.Warnings) != 0 {
		for _, warning := range validationResults.Warnings {
			glog.Warningf("%v", warning)
		}
	}
	if len(validationResults.Errors) != 0 {
		return kerrors.NewInvalid("NodeConfig", o.ConfigFile, validationResults.Errors)
	}

	_, kubeClientConfig, err := configapi.GetKubeClient(nodeConfig.MasterKubeConfig)
	if err != nil {
		return err
	}
	glog.Infof("Starting a node connected to %s", kubeClientConfig.Host)

	if err := StartNode(*nodeConfig); err != nil {
		return err
	}

	return nil
}
コード例 #16
0
ファイル: start_node.go プロジェクト: asiainfoLDP/datafactory
func StartNode(nodeConfig configapi.NodeConfig, components *utilflags.ComponentFlag) error {
	config, err := kubernetes.BuildKubernetesNodeConfig(nodeConfig)
	if err != nil {
		return err
	}

	if components.Enabled(ComponentKubelet) {
		glog.Infof("Starting node %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String())
	} else {
		glog.Infof("Starting node networking %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String())
	}

	_, kubeClientConfig, err := configapi.GetKubeClient(nodeConfig.MasterKubeConfig)
	if err != nil {
		return err
	}
	glog.Infof("Connecting to API server %s", kubeClientConfig.Host)

	// preconditions
	if components.Enabled(ComponentKubelet) {
		config.EnsureKubeletAccess()
		config.EnsureVolumeDir()
		config.EnsureDocker(docker.NewHelper())
		config.EnsureLocalQuota(nodeConfig) // must be performed after EnsureVolumeDir
	}

	// TODO: SDN plugin depends on the Kubelet registering as a Node and doesn't retry cleanly,
	// and Kubelet also can't start the PodSync loop until the SDN plugin has loaded.
	if components.Enabled(ComponentKubelet) {
		config.RunKubelet()
	}
	// SDN plugins get the opportunity to filter service rules, so they start first
	if components.Enabled(ComponentPlugins) {
		config.RunPlugin()
	}
	if components.Enabled(ComponentProxy) {
		config.RunProxy()
	}
	// if we are running plugins in this process, reset the bridge ip rule
	if components.Enabled(ComponentPlugins) {
		config.ResetSysctlFromProxy()
	}

	return nil
}
コード例 #17
0
ファイル: master_config.go プロジェクト: vikaslaad/origin
func newServiceAccountTokenGetter(options configapi.MasterConfig, client newetcdclient.Client) (serviceaccount.ServiceAccountTokenGetter, error) {
	var tokenGetter serviceaccount.ServiceAccountTokenGetter
	if options.KubernetesMasterConfig == nil {
		// When we're running against an external Kubernetes, use the external kubernetes client to validate service account tokens
		// This prevents infinite auth loops if the privilegedLoopbackKubeClient authenticates using a service account token
		kubeClient, _, err := configapi.GetKubeClient(options.MasterClients.ExternalKubernetesKubeConfig)
		if err != nil {
			return nil, err
		}
		tokenGetter = sacontroller.NewGetterFromClient(kubeClient)
	} else {
		// When we're running in-process, go straight to etcd (using the KubernetesStorageVersion/KubernetesStoragePrefix, since service accounts are kubernetes objects)
		codec := kapi.Codecs.LegacyCodec(unversioned.GroupVersion{Group: kapi.GroupName, Version: options.EtcdStorageConfig.KubernetesStorageVersion})
		ketcdHelper := etcdstorage.NewEtcdStorage(client, codec, options.EtcdStorageConfig.KubernetesStoragePrefix)
		tokenGetter = sacontroller.NewGetterFromStorageInterface(ketcdHelper)
	}
	return tokenGetter, nil
}
コード例 #18
0
ファイル: master_config.go プロジェクト: patrykattc/origin
func newServiceAccountTokenGetter(options configapi.MasterConfig, client *etcdclient.Client) (serviceaccount.ServiceAccountTokenGetter, error) {
	var tokenGetter serviceaccount.ServiceAccountTokenGetter
	if options.KubernetesMasterConfig == nil {
		// When we're running against an external Kubernetes, use the external kubernetes client to validate service account tokens
		// This prevents infinite auth loops if the privilegedLoopbackKubeClient authenticates using a service account token
		kubeClient, _, err := configapi.GetKubeClient(options.MasterClients.ExternalKubernetesKubeConfig)
		if err != nil {
			return nil, err
		}
		tokenGetter = serviceaccount.NewGetterFromClient(kubeClient)
	} else {
		// When we're running in-process, go straight to etcd (using the KubernetesStorageVersion/KubernetesStoragePrefix, since service accounts are kubernetes objects)
		ketcdHelper, err := master.NewEtcdHelper(client, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix)
		if err != nil {
			return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err)
		}
		tokenGetter = serviceaccount.NewGetterFromEtcdHelper(ketcdHelper)
	}
	return tokenGetter, nil
}
コード例 #19
0
ファイル: test.go プロジェクト: o8o/origin
// ensureKubeE2EPrivilegedSA ensures that all namespaces prefixed with 'e2e-' have their
// service accounts in the privileged and anyuid SCCs
func ensureKubeE2EPrivilegedSA() {
	desc := ginkgo.CurrentGinkgoTestDescription()
	if strings.Contains(desc.FileName, "/kubernetes/test/e2e/") {
		e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged")
		c, _, err := configapi.GetKubeClient(KubeConfigPath())
		if err != nil {
			FatalErr(err)
		}
		namespaces, err := c.Namespaces().List(kapi.ListOptions{})
		if err != nil {
			FatalErr(err)
		}
		// add to the "privileged" scc to ensure pods that explicitly
		// request extra capabilities are not rejected
		addE2EServiceAccountsToSCC(c, namespaces, "privileged")
		// add to the "anyuid" scc to ensure pods that don't specify a
		// uid don't get forced into a range (mimics upstream
		// behavior)
		addE2EServiceAccountsToSCC(c, namespaces, "anyuid")
	}
}
コード例 #20
0
ファイル: test.go プロジェクト: kcbabo/origin
// ensureKubeE2EPrivilegedSA ensures that all namespaces prefixed with 'e2e-' have their
// service accounts in the privileged SCC
func ensureKubeE2EPrivilegedSA() {
	desc := ginkgo.CurrentGinkgoTestDescription()
	if strings.Contains(desc.FileName, "/kubernetes/test/e2e/") {
		e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged")
		c, _, err := configapi.GetKubeClient(KubeConfigPath())
		if err != nil {
			FatalErr(err)
		}
		priv, err := c.SecurityContextConstraints().Get("privileged")
		if err != nil {
			if apierrs.IsNotFound(err) {
				return
			}
			FatalErr(err)
		}
		namespaces, err := c.Namespaces().List(labels.Everything(), fields.Everything())
		if err != nil {
			FatalErr(err)
		}
		groups := []string{}
		for _, name := range priv.Groups {
			if !strings.Contains(name, "e2e-") {
				groups = append(groups, name)
			}
		}
		for _, ns := range namespaces.Items {
			if strings.HasPrefix(ns.Name, "e2e-") {
				groups = append(groups, fmt.Sprintf("system:serviceaccounts:%s", ns.Name))
			}
		}
		priv.Groups = groups
		if _, err := c.SecurityContextConstraints().Update(priv); err != nil {
			FatalErr(err)
		}
	}
}
コード例 #21
0
ファイル: start_master.go プロジェクト: urashidmalik/origin
// startAPI starts the components of the master that are considered part of the API - the Kubernetes
// API and core controllers, the Origin API, the group, policy, project, and authorization caches,
// etcd, the asset server (for the UI), the OAuth server endpoints, and the DNS server.
// TODO: allow to be more granularly targeted
func startAPI(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) error {
	// start etcd
	if oc.Options.EtcdConfig != nil {
		etcd.RunEtcd(oc.Options.EtcdConfig)
	}

	// verify we can connect to etcd with the provided config
	if err := etcd.TestEtcdClient(oc.EtcdClient); err != nil {
		return err
	}

	// Must start policy caching immediately
	oc.RunGroupCache()
	oc.RunPolicyCache()
	oc.RunProjectCache()

	unprotectedInstallers := []origin.APIInstaller{}

	if oc.Options.OAuthConfig != nil {
		authConfig, err := origin.BuildAuthConfig(oc.Options)
		if err != nil {
			return err
		}
		unprotectedInstallers = append(unprotectedInstallers, authConfig)
	}

	var standaloneAssetConfig *origin.AssetConfig
	if oc.WebConsoleEnabled() {
		config, err := origin.BuildAssetConfig(*oc.Options.AssetConfig)
		if err != nil {
			return err
		}

		if oc.Options.AssetConfig.ServingInfo.BindAddress == oc.Options.ServingInfo.BindAddress {
			unprotectedInstallers = append(unprotectedInstallers, config)
		} else {
			standaloneAssetConfig = config
		}
	}

	if kc != nil {
		oc.Run([]origin.APIInstaller{kc}, unprotectedInstallers)
	} else {
		_, kubeClientConfig, err := configapi.GetKubeClient(oc.Options.MasterClients.ExternalKubernetesKubeConfig)
		if err != nil {
			return err
		}
		proxy := &kubernetes.ProxyConfig{
			ClientConfig: kubeClientConfig,
		}
		oc.Run([]origin.APIInstaller{proxy}, unprotectedInstallers)
	}

	oc.InitializeObjects()

	if standaloneAssetConfig != nil {
		standaloneAssetConfig.Run()
	}

	if oc.Options.DNSConfig != nil {
		oc.RunDNSServer()
	}

	oc.RunProjectAuthorizationCache()
	return nil
}
コード例 #22
0
// StartAPI starts the components of the master that are considered part of the API - the Kubernetes
// API and core controllers, the Origin API, the group, policy, project, and authorization caches,
// etcd, the asset server (for the UI), the OAuth server endpoints, and the DNS server.
// TODO: allow to be more granularly targeted
func StartAPI(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) error {
	// start etcd
	if oc.Options.EtcdConfig != nil {
		etcdserver.RunEtcd(oc.Options.EtcdConfig)
	}

	// verify we can connect to etcd with the provided config
	if _, err := etcd.GetAndTestEtcdClient(oc.Options.EtcdClientInfo); err != nil {
		return err
	}

	// Must start policy caching immediately
	oc.Informers.StartCore(utilwait.NeverStop)
	oc.RunClusterQuotaMappingController()
	oc.RunGroupCache()
	oc.RunProjectCache()

	unprotectedInstallers := []origin.APIInstaller{}

	if oc.Options.OAuthConfig != nil {
		authConfig, err := origin.BuildAuthConfig(oc)
		if err != nil {
			return err
		}
		unprotectedInstallers = append(unprotectedInstallers, authConfig)
	}

	var standaloneAssetConfig *origin.AssetConfig
	if oc.WebConsoleEnabled() {
		overrideConfig, err := getResourceOverrideConfig(oc)
		if err != nil {
			return err
		}
		config, err := origin.NewAssetConfig(*oc.Options.AssetConfig, overrideConfig)
		if err != nil {
			return err
		}

		if oc.Options.AssetConfig.ServingInfo.BindAddress == oc.Options.ServingInfo.BindAddress {
			unprotectedInstallers = append(unprotectedInstallers, config)
		} else {
			standaloneAssetConfig = config
		}
	}

	if kc != nil {
		oc.Run([]origin.APIInstaller{kc}, unprotectedInstallers)
	} else {
		_, kubeClientConfig, err := configapi.GetKubeClient(oc.Options.MasterClients.ExternalKubernetesKubeConfig, oc.Options.MasterClients.ExternalKubernetesClientConnectionOverrides)
		if err != nil {
			return err
		}
		proxy := &kubernetes.ProxyConfig{
			ClientConfig: kubeClientConfig,
		}
		oc.Run([]origin.APIInstaller{proxy}, unprotectedInstallers)
	}

	// start up the informers that we're trying to use in the API server
	oc.Informers.Start(utilwait.NeverStop)
	oc.InitializeObjects()

	if standaloneAssetConfig != nil {
		standaloneAssetConfig.Run()
	}

	if oc.Options.DNSConfig != nil {
		oc.RunDNSServer()
	}

	oc.RunProjectAuthorizationCache()
	return nil
}
コード例 #23
0
ファイル: master_config.go プロジェクト: vikaslaad/origin
// BuildMasterConfig builds and returns the OpenShift master configuration based on the
// provided options
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) {
	client, err := etcd.EtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion}
	etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		return nil, fmt.Errorf("Error setting up server storage: %v", err)
	}

	clientCAs, err := configapi.GetClientCertCAPool(options)
	if err != nil {
		return nil, err
	}
	apiClientCAs, err := configapi.GetAPIClientCertCAPool(options)
	if err != nil {
		return nil, err
	}

	privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}
	privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	policyCache, policyClient := newReadOnlyCacheAndClient(etcdHelper)
	requestContextMapper := kapi.NewRequestContextMapper()

	groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupstorage.NewREST(etcdHelper)))
	projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector)

	kubeletClientConfig := configapi.GetKubeletClientConfig(options)

	// in-order list of plug-ins that should intercept admission decisions (origin only intercepts)
	admissionControlPluginNames := []string{"OriginNamespaceLifecycle", "BuildByStrategy"}
	if len(options.AdmissionConfig.PluginOrderOverride) > 0 {
		admissionControlPluginNames = options.AdmissionConfig.PluginOrderOverride
	}

	pluginInitializer := oadmission.PluginInitializer{
		OpenshiftClient: privilegedLoopbackOpenShiftClient,
		ProjectCache:    projectCache,
	}
	plugins := []admission.Interface{}
	for _, pluginName := range admissionControlPluginNames {
		configFile, err := pluginconfig.GetPluginConfig(options.AdmissionConfig.PluginConfig[pluginName])
		if err != nil {
			return nil, err
		}
		plugin := admission.InitPlugin(pluginName, privilegedLoopbackKubeClient, configFile)
		if plugin != nil {
			plugins = append(plugins, plugin)
		}
	}
	pluginInitializer.Initialize(plugins)
	// ensure that plugins have been properly initialized
	if err := oadmission.Validate(plugins); err != nil {
		return nil, err
	}
	admissionController := admission.NewChainHandler(plugins...)

	serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient)
	if err != nil {
		return nil, err
	}

	plug, plugStart := newControllerPlug(options, client)

	authorizer := newAuthorizer(policyClient, options.ProjectConfig.ProjectRequestMessage)

	config := &MasterConfig{
		Options: options,

		Authenticator:                 newAuthenticator(options, etcdHelper, serviceAccountTokenGetter, apiClientCAs, groupCache),
		Authorizer:                    authorizer,
		AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper),

		PolicyCache:               policyCache,
		GroupCache:                groupCache,
		ProjectAuthorizationCache: newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, policyClient),
		ProjectCache:              projectCache,

		RequestContextMapper: requestContextMapper,

		AdmissionControl: admissionController,

		TLS: configapi.UseTLS(options.ServingInfo.ServingInfo),

		ControllerPlug:      plug,
		ControllerPlugStart: plugStart,

		ImageFor:            imageTemplate.ExpandOrDie,
		EtcdHelper:          etcdHelper,
		KubeletClientConfig: kubeletClientConfig,

		ClientCAs:    clientCAs,
		APIClientCAs: apiClientCAs,

		PrivilegedLoopbackClientConfig:     *privilegedLoopbackClientConfig,
		PrivilegedLoopbackOpenShiftClient:  privilegedLoopbackOpenShiftClient,
		PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient,
	}

	return config, nil
}
コード例 #24
0
ファイル: start_master.go プロジェクト: brandon-adams/origin
func StartMaster(openshiftMasterConfig *configapi.MasterConfig) error {
	glog.Infof("Starting an OpenShift master, reachable at %s (etcd: %v)", openshiftMasterConfig.ServingInfo.BindAddress, openshiftMasterConfig.EtcdClientInfo.URLs)
	glog.Infof("OpenShift master public address is %s", openshiftMasterConfig.AssetConfig.MasterPublicURL)

	if openshiftMasterConfig.EtcdConfig != nil {
		etcd.RunEtcd(openshiftMasterConfig.EtcdConfig)
	}

	// Allow privileged containers
	// TODO: make this configurable and not the default https://github.com/openshift/origin/issues/662
	capabilities.Initialize(capabilities.Capabilities{
		AllowPrivileged:    true,
		HostNetworkSources: []string{kubelet.ApiserverSource, kubelet.FileSource},
	})

	openshiftConfig, err := origin.BuildMasterConfig(*openshiftMasterConfig)
	if err != nil {
		return err
	}

	go func() {
		openshiftConfig.ControllerPlug.WaitForStop()
		glog.Fatalf("Master shutdown requested")
	}()

	// Must start policy caching immediately
	openshiftConfig.RunPolicyCache()
	openshiftConfig.RunProjectCache()

	unprotectedInstallers := []origin.APIInstaller{}

	if openshiftMasterConfig.OAuthConfig != nil {
		authConfig, err := origin.BuildAuthConfig(*openshiftMasterConfig)
		if err != nil {
			return err
		}
		unprotectedInstallers = append(unprotectedInstallers, authConfig)
	}

	var standaloneAssetConfig *origin.AssetConfig
	if openshiftMasterConfig.AssetConfig != nil {
		config, err := origin.BuildAssetConfig(*openshiftMasterConfig.AssetConfig)
		if err != nil {
			return err
		}

		if openshiftMasterConfig.AssetConfig.ServingInfo.BindAddress == openshiftMasterConfig.ServingInfo.BindAddress {
			unprotectedInstallers = append(unprotectedInstallers, config)
		} else {
			standaloneAssetConfig = config
		}
	}

	var kubeConfig *kubernetes.MasterConfig
	if openshiftMasterConfig.KubernetesMasterConfig != nil {
		kubeConfig, err = kubernetes.BuildKubernetesMasterConfig(*openshiftMasterConfig, openshiftConfig.RequestContextMapper, openshiftConfig.KubeClient())
		if err != nil {
			return err
		}

		openshiftConfig.Run([]origin.APIInstaller{kubeConfig}, unprotectedInstallers)

	} else {
		_, kubeConfig, err := configapi.GetKubeClient(openshiftMasterConfig.MasterClients.ExternalKubernetesKubeConfig)
		if err != nil {
			return err
		}

		proxy := &kubernetes.ProxyConfig{
			ClientConfig: kubeConfig,
		}

		openshiftConfig.Run([]origin.APIInstaller{proxy}, unprotectedInstallers)
	}

	glog.Infof("Using images from %q", openshiftConfig.ImageFor("<component>"))

	if standaloneAssetConfig != nil {
		standaloneAssetConfig.Run()
	}
	if openshiftMasterConfig.DNSConfig != nil {
		openshiftConfig.RunDNSServer()
	}

	openshiftConfig.RunProjectAuthorizationCache()

	if openshiftMasterConfig.Controllers != configapi.ControllersDisabled {
		go func() {
			openshiftConfig.ControllerPlug.WaitForStart()
			glog.Infof("Master controllers starting (%s)", openshiftMasterConfig.Controllers)

			// Start these first, because they provide credentials for other controllers' clients
			openshiftConfig.RunServiceAccountsController()
			openshiftConfig.RunServiceAccountTokensController()
			// used by admission controllers
			openshiftConfig.RunServiceAccountPullSecretsControllers()
			openshiftConfig.RunSecurityAllocationController()

			if kubeConfig != nil {
				_, rcClient, err := openshiftConfig.GetServiceAccountClients(openshiftConfig.ReplicationControllerServiceAccount)
				if err != nil {
					glog.Fatalf("Could not get client for replication controller: %v", err)
				}

				// called by admission control
				kubeConfig.RunResourceQuotaManager()

				// no special order
				kubeConfig.RunNodeController()
				kubeConfig.RunScheduler()
				kubeConfig.RunReplicationController(rcClient)
				kubeConfig.RunEndpointController()
				kubeConfig.RunNamespaceController()
				kubeConfig.RunPersistentVolumeClaimBinder()
				kubeConfig.RunPersistentVolumeClaimRecycler(openshiftConfig.ImageFor("deployer"))
			}

			// no special order
			openshiftConfig.RunBuildController()
			openshiftConfig.RunBuildPodController()
			openshiftConfig.RunBuildImageChangeTriggerController()
			openshiftConfig.RunDeploymentController()
			openshiftConfig.RunDeployerPodController()
			openshiftConfig.RunDeploymentConfigController()
			openshiftConfig.RunDeploymentConfigChangeController()
			openshiftConfig.RunDeploymentImageChangeTriggerController()
			openshiftConfig.RunImageImportController()
			openshiftConfig.RunOriginNamespaceController()
			openshiftConfig.RunSDNController()
		}()
	}

	return nil
}
コード例 #25
0
ファイル: node_config.go プロジェクト: hloganathan/origin
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) {
	kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}

	if options.NodeName == "localhost" {
		glog.Warningf(`Using "localhost" as node name will not resolve from all locations`)
	}

	var dnsIP net.IP
	if len(options.DNSIP) > 0 {
		dnsIP = net.ParseIP(options.DNSIP)
		if dnsIP == nil {
			return nil, fmt.Errorf("Invalid DNS IP: %s", options.DNSIP)
		}
	}

	clientCAs, err := util.CertPoolFromFile(options.ServingInfo.ClientCA)
	if err != nil {
		return nil, err
	}

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	var path string
	var fileCheckInterval int64
	if options.PodManifestConfig != nil {
		path = options.PodManifestConfig.Path
		fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds
	}

	var dockerExecHandler dockertools.ExecHandler

	switch options.DockerConfig.ExecHandlerName {
	case configapi.DockerExecHandlerNative:
		dockerExecHandler = &dockertools.NativeExecHandler{}
	case configapi.DockerExecHandlerNsenter:
		dockerExecHandler = &dockertools.NsenterExecHandler{}
	}

	kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node address: %v", err)
	}
	kubePort, err := strconv.Atoi(kubePortStr)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node port: %v", err)
	}
	kubeAddress := net.ParseIP(kubeAddressStr)
	if kubeAddress == nil {
		return nil, fmt.Errorf("Invalid DNS IP: %s", kubeAddressStr)
	}

	// declare the OpenShift defaults from config
	server := kapp.NewKubeletServer()
	server.Config = path
	server.RootDirectory = options.VolumeDirectory

	// kubelet finds the node IP address by doing net.ParseIP(hostname) and if that fails,
	// it does net.LookupIP(NodeName) and picks the first non-loopback address.
	// Pass node IP as hostname to make kubelet use the desired IP address.
	if len(options.NodeIP) > 0 {
		server.HostnameOverride = options.NodeIP
	} else {
		server.HostnameOverride = options.NodeName
	}
	server.AllowPrivileged = true
	server.RegisterNode = true
	server.Address = kubeAddress
	server.Port = uint(kubePort)
	server.ReadOnlyPort = 0 // no read only access
	server.CAdvisorPort = 0 // no unsecured cadvisor access
	server.HealthzPort = 0  // no unsecured healthz access
	server.ClusterDNS = dnsIP
	server.ClusterDomain = options.DNSDomain
	server.NetworkPluginName = options.NetworkConfig.NetworkPluginName
	server.HostNetworkSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HostPIDSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HostIPCSources = strings.Join([]string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, ",")
	server.HTTPCheckFrequency = 0 // no remote HTTP pod creation access
	server.FileCheckFrequency = time.Duration(fileCheckInterval) * time.Second
	server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod")
	server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default

	// prevents kube from generating certs
	server.TLSCertFile = options.ServingInfo.ServerCert.CertFile
	server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile

	if value := cmdutil.Env("OPENSHIFT_CONTAINERIZED", ""); len(value) > 0 {
		server.Containerized = value == "true"
	}

	// resolve extended arguments
	// TODO: this should be done in config validation (along with the above) so we can provide
	// proper errors
	if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 {
		return nil, errors.NewAggregate(err)
	}

	cfg, err := server.UnsecuredKubeletConfig()
	if err != nil {
		return nil, err
	}

	// provide any config overrides
	cfg.NodeName = options.NodeName
	cfg.StreamingConnectionIdleTimeout = 5 * time.Minute // TODO: should be set
	cfg.KubeClient = kubeClient
	cfg.DockerExecHandler = dockerExecHandler

	// Setup auth
	osClient, osClientConfig, err := configapi.GetOpenShiftClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}
	authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL)
	if err != nil {
		return nil, err
	}
	authn, err := newAuthenticator(clientCAs, clientcmd.AnonymousClientConfig(*osClientConfig), authnTTL, options.AuthConfig.AuthenticationCacheSize)
	if err != nil {
		return nil, err
	}

	authzAttr, err := newAuthorizerAttributesGetter(options.NodeName)
	if err != nil {
		return nil, err
	}

	authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL)
	if err != nil {
		return nil, err
	}
	authz, err := newAuthorizer(osClient, authzTTL, options.AuthConfig.AuthorizationCacheSize)
	if err != nil {
		return nil, err
	}

	cfg.Auth = kubelet.NewKubeletAuth(authn, authzAttr, authz)

	// Make sure the node doesn't think it is in standalone mode
	// This is required for the node to enforce nodeSelectors on pods, to set hostIP on pod status updates, etc
	cfg.StandaloneMode = false

	// TODO: could be cleaner
	if configapi.UseTLS(options.ServingInfo) {
		extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates)
		if err != nil {
			return nil, err
		}
		cfg.TLSOptions = &kubelet.TLSOptions{
			Config: crypto.SecureTLSConfig(&tls.Config{
				// RequestClientCert lets us request certs, but allow requests without client certs
				// Verification is done by the authn layer
				ClientAuth: tls.RequestClientCert,
				ClientCAs:  clientCAs,
				// Set SNI certificate func
				// Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list,
				// which we do not control when running with http.Server#ListenAndServeTLS
				GetCertificate: cmdutil.GetCertificateFunc(extraCerts),
			}),
			CertFile: options.ServingInfo.ServerCert.CertFile,
			KeyFile:  options.ServingInfo.ServerCert.KeyFile,
		}
	} else {
		cfg.TLSOptions = nil
	}

	// Prepare cloud provider
	cloud, err := cloudprovider.InitCloudProvider(server.CloudProvider, server.CloudConfigFile)
	if err != nil {
		return nil, err
	}
	if cloud != nil {
		glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", server.CloudProvider, server.CloudConfigFile)
	}
	cfg.Cloud = cloud

	config := &NodeConfig{
		BindAddress: options.ServingInfo.BindAddress,

		AllowDisabledDocker: options.AllowDisabledDocker,

		Client: kubeClient,

		VolumeDir: options.VolumeDirectory,

		KubeletServer: server,
		KubeletConfig: cfg,

		IPTablesSyncPeriod: options.IPTablesSyncPeriod,
	}

	return config, nil
}
コード例 #26
0
func BuildKubernetesNodeConfig(options configapi.NodeConfig, enableProxy, enableDNS bool) (*NodeConfig, error) {
	originClient, _, err := configapi.GetOpenShiftClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides)
	if err != nil {
		return nil, err
	}
	_, kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides)
	if err != nil {
		return nil, err
	}
	// Make a separate client for event reporting, to avoid event QPS blocking node calls
	_, eventClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig, options.MasterClientConnectionOverrides)
	if err != nil {
		return nil, err
	}

	if options.NodeName == "localhost" {
		glog.Warningf(`Using "localhost" as node name will not resolve from all locations`)
	}

	clientCAs, err := kcrypto.CertPoolFromFile(options.ServingInfo.ClientCA)
	if err != nil {
		return nil, err
	}

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	var path string
	var fileCheckInterval int64
	if options.PodManifestConfig != nil {
		path = options.PodManifestConfig.Path
		fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds
	}

	kubeAddressStr, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node address: %v", err)
	}
	kubePort, err := strconv.Atoi(kubePortStr)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node port: %v", err)
	}

	if err = validateNetworkPluginName(originClient, options.NetworkConfig.NetworkPluginName); err != nil {
		return nil, err
	}

	// Defaults are tested in TestKubeletDefaults
	server := kubeletoptions.NewKubeletServer()
	// Adjust defaults
	server.RequireKubeConfig = true
	server.PodManifestPath = path
	server.RootDirectory = options.VolumeDirectory
	server.NodeIP = options.NodeIP
	server.HostnameOverride = options.NodeName
	server.AllowPrivileged = true
	server.RegisterNode = true
	server.Address = kubeAddressStr
	server.Port = int32(kubePort)
	server.ReadOnlyPort = 0        // no read only access
	server.CAdvisorPort = 0        // no unsecured cadvisor access
	server.HealthzPort = 0         // no unsecured healthz access
	server.HealthzBindAddress = "" // no unsecured healthz access
	server.ClusterDNS = options.DNSIP
	server.ClusterDomain = options.DNSDomain
	server.NetworkPluginName = options.NetworkConfig.NetworkPluginName
	server.HostNetworkSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}
	server.HostPIDSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}
	server.HostIPCSources = []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}
	server.HTTPCheckFrequency = unversioned.Duration{Duration: time.Duration(0)} // no remote HTTP pod creation access
	server.FileCheckFrequency = unversioned.Duration{Duration: time.Duration(fileCheckInterval) * time.Second}
	server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod")
	server.CPUCFSQuota = true // enable cpu cfs quota enforcement by default
	server.MaxPods = 250
	server.PodsPerCore = 10
	server.SerializeImagePulls = false          // disable serialized image pulls by default
	server.EnableControllerAttachDetach = false // stay consistent with existing config, but admins should enable it
	if enableDNS {
		// if we are running local DNS, skydns will load the default recursive nameservers for us
		server.ResolverConfig = ""
	}
	server.DockerExecHandlerName = string(options.DockerConfig.ExecHandlerName)

	if sdnapi.IsOpenShiftNetworkPlugin(server.NetworkPluginName) {
		// set defaults for openshift-sdn
		server.HairpinMode = componentconfig.HairpinNone
		server.ConfigureCBR0 = false
	}

	// prevents kube from generating certs
	server.TLSCertFile = options.ServingInfo.ServerCert.CertFile
	server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile

	containerized := cmdutil.Env("OPENSHIFT_CONTAINERIZED", "") == "true"
	server.Containerized = containerized

	// resolve extended arguments
	// TODO: this should be done in config validation (along with the above) so we can provide
	// proper errors
	if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 {
		return nil, kerrors.NewAggregate(err)
	}

	proxyconfig, err := buildKubeProxyConfig(options)
	if err != nil {
		return nil, err
	}

	// Initialize SDN before building kubelet config so it can modify options
	iptablesSyncPeriod, err := time.ParseDuration(options.IPTablesSyncPeriod)
	if err != nil {
		return nil, fmt.Errorf("Cannot parse the provided ip-tables sync period (%s) : %v", options.IPTablesSyncPeriod, err)
	}
	sdnPlugin, err := sdnplugin.NewNodePlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient, options.NodeName, options.NodeIP, iptablesSyncPeriod, options.NetworkConfig.MTU)
	if err != nil {
		return nil, fmt.Errorf("SDN initialization failed: %v", err)
	}
	if sdnPlugin != nil {
		// SDN plugin pod setup/teardown is implemented as a CNI plugin
		server.NetworkPluginName = kubeletcni.CNIPluginName
		server.NetworkPluginDir = kubeletcni.DefaultNetDir
		server.HairpinMode = componentconfig.HairpinNone
		server.ConfigureCBR0 = false
	}

	deps, err := kubeletapp.UnsecuredKubeletDeps(server)
	if err != nil {
		return nil, err
	}

	// Initialize cloud provider
	cloud, err := buildCloudProvider(server)
	if err != nil {
		return nil, err
	}
	deps.Cloud = cloud

	// Replace the kubelet-created CNI plugin with the SDN plugin
	// Kubelet must be initialized with NetworkPluginName="cni" but
	// the SDN plugin (if available) needs to be the only one used
	if sdnPlugin != nil {
		deps.NetworkPlugins = []kubeletnetwork.NetworkPlugin{sdnPlugin}
	}

	// provide any config overrides
	//deps.NodeName = options.NodeName
	deps.KubeClient = kubeClient
	deps.EventClient = eventClient

	// Setup auth
	authnTTL, err := time.ParseDuration(options.AuthConfig.AuthenticationCacheTTL)
	if err != nil {
		return nil, err
	}
	authn, err := newAuthenticator(kubeClient.Authentication(), clientCAs, authnTTL, options.AuthConfig.AuthenticationCacheSize)
	if err != nil {
		return nil, err
	}

	authzAttr, err := newAuthorizerAttributesGetter(options.NodeName)
	if err != nil {
		return nil, err
	}

	authzTTL, err := time.ParseDuration(options.AuthConfig.AuthorizationCacheTTL)
	if err != nil {
		return nil, err
	}
	authz, err := newAuthorizer(originClient, authzTTL, options.AuthConfig.AuthorizationCacheSize)
	if err != nil {
		return nil, err
	}

	deps.Auth = kubeletserver.NewKubeletAuth(authn, authzAttr, authz)

	// TODO: could be cleaner
	if configapi.UseTLS(options.ServingInfo) {
		extraCerts, err := configapi.GetNamedCertificateMap(options.ServingInfo.NamedCertificates)
		if err != nil {
			return nil, err
		}
		deps.TLSOptions = &kubeletserver.TLSOptions{
			Config: crypto.SecureTLSConfig(&tls.Config{
				// RequestClientCert lets us request certs, but allow requests without client certs
				// Verification is done by the authn layer
				ClientAuth: tls.RequestClientCert,
				ClientCAs:  clientCAs,
				// Set SNI certificate func
				// Do not use NameToCertificate, since that requires certificates be included in the server's tlsConfig.Certificates list,
				// which we do not control when running with http.Server#ListenAndServeTLS
				GetCertificate: cmdutil.GetCertificateFunc(extraCerts),
			}),
			CertFile: options.ServingInfo.ServerCert.CertFile,
			KeyFile:  options.ServingInfo.ServerCert.KeyFile,
		}
	} else {
		deps.TLSOptions = nil
	}

	sdnProxy, err := sdnplugin.NewProxyPlugin(options.NetworkConfig.NetworkPluginName, originClient, kubeClient)
	if err != nil {
		return nil, fmt.Errorf("SDN proxy initialization failed: %v", err)
	}

	config := &NodeConfig{
		BindAddress: options.ServingInfo.BindAddress,

		AllowDisabledDocker: options.AllowDisabledDocker,
		Containerized:       containerized,

		Client: kubeClient,

		VolumeDir: options.VolumeDirectory,

		KubeletServer: server,
		KubeletDeps:   deps,

		ServicesReady: make(chan struct{}),

		ProxyConfig:    proxyconfig,
		EnableUnidling: options.EnableUnidling,

		SDNPlugin: sdnPlugin,
		SDNProxy:  sdnProxy,
	}

	if enableDNS {
		dnsConfig, err := dns.NewServerDefaults()
		if err != nil {
			return nil, fmt.Errorf("DNS configuration was not possible: %v", err)
		}
		if len(options.DNSIP) > 0 {
			dnsConfig.DnsAddr = options.DNSIP + ":53"
		}
		dnsConfig.Domain = server.ClusterDomain + "."
		dnsConfig.Local = "openshift.default.svc." + dnsConfig.Domain

		services, serviceStore := dns.NewCachedServiceAccessorAndStore()
		endpoints, endpointsStore := dns.NewCachedEndpointsAccessorAndStore()
		if !enableProxy {
			endpoints = deps.KubeClient
			endpointsStore = nil
		}

		// TODO: use kubeletConfig.ResolverConfig as an argument to etcd in the event the
		//   user sets it, instead of passing it to the kubelet.

		config.ServiceStore = serviceStore
		config.EndpointsStore = endpointsStore
		config.DNSServer = &dns.Server{
			Config:      dnsConfig,
			Services:    services,
			Endpoints:   endpoints,
			MetricsName: "node",
		}
	}

	return config, nil
}
コード例 #27
0
ファイル: node_config.go プロジェクト: cjnygard/origin
func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error) {
	kubeClient, _, err := configapi.GetKubeClient(options.MasterKubeConfig)
	if err != nil {
		return nil, err
	}

	if options.NodeName == "localhost" {
		glog.Warningf(`Using "localhost" as node name will not resolve from all locations`)
	}

	var dnsIP net.IP
	if len(options.DNSIP) > 0 {
		dnsIP = net.ParseIP(options.DNSIP)
		if dnsIP == nil {
			return nil, fmt.Errorf("Invalid DNS IP: %s", options.DNSIP)
		}
	}

	clientCAs, err := util.CertPoolFromFile(options.ServingInfo.ClientCA)
	if err != nil {
		return nil, err
	}

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	var path string
	var fileCheckInterval int64
	if options.PodManifestConfig != nil {
		path = options.PodManifestConfig.Path
		fileCheckInterval = options.PodManifestConfig.FileCheckIntervalSeconds
	}

	var dockerExecHandler dockertools.ExecHandler

	switch options.DockerConfig.ExecHandlerName {
	case configapi.DockerExecHandlerNative:
		dockerExecHandler = &dockertools.NativeExecHandler{}
	case configapi.DockerExecHandlerNsenter:
		dockerExecHandler = &dockertools.NsenterExecHandler{}
	}

	kubeAddress, kubePortStr, err := net.SplitHostPort(options.ServingInfo.BindAddress)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node address: %v", err)
	}
	kubePort, err := strconv.Atoi(kubePortStr)
	if err != nil {
		return nil, fmt.Errorf("cannot parse node port: %v", err)
	}

	address := util.IP{}
	if err := address.Set(kubeAddress); err != nil {
		return nil, err
	}

	// declare the OpenShift defaults from config
	server := kapp.NewKubeletServer()
	server.Config = path
	server.RootDirectory = options.VolumeDirectory
	server.HostnameOverride = options.NodeName
	server.AllowPrivileged = true
	server.RegisterNode = true
	server.Address = address
	server.Port = uint(kubePort)
	server.ReadOnlyPort = 0 // no read only access
	server.ClusterDNS = util.IP(dnsIP)
	server.ClusterDomain = options.DNSDomain
	server.NetworkPluginName = options.NetworkPluginName
	server.HostNetworkSources = strings.Join([]string{kubelet.ApiserverSource, kubelet.FileSource}, ",")
	server.HTTPCheckFrequency = 0 // no remote HTTP pod creation access
	server.FileCheckFrequency = time.Duration(fileCheckInterval) * time.Second
	server.PodInfraContainerImage = imageTemplate.ExpandOrDie("pod")

	// prevents kube from generating certs
	server.TLSCertFile = options.ServingInfo.ServerCert.CertFile
	server.TLSPrivateKeyFile = options.ServingInfo.ServerCert.KeyFile

	if value := cmdutil.Env("OPENSHIFT_CONTAINERIZED", ""); len(value) > 0 {
		server.Containerized = value == "true"
	}

	// resolve extended arguments
	// TODO: this should be done in config validation (along with the above) so we can provide
	// proper errors
	if err := cmdflags.Resolve(options.KubeletArguments, server.AddFlags); len(err) > 0 {
		return nil, errors.NewAggregate(err)
	}

	cfg, err := server.KubeletConfig()
	if err != nil {
		return nil, err
	}

	// provide any config overrides
	cfg.StreamingConnectionIdleTimeout = 5 * time.Minute // TODO: should be set
	cfg.KubeClient = kubeClient
	cfg.DockerExecHandler = dockerExecHandler

	// TODO: could be cleaner
	if configapi.UseTLS(options.ServingInfo) {
		cfg.TLSOptions = &kubelet.TLSOptions{
			Config: &tls.Config{
				// Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability)
				MinVersion: tls.VersionTLS10,
				// RequireAndVerifyClientCert lets us limit requests to ones with a valid client certificate
				ClientAuth: tls.RequireAndVerifyClientCert,
				ClientCAs:  clientCAs,
			},
			CertFile: options.ServingInfo.ServerCert.CertFile,
			KeyFile:  options.ServingInfo.ServerCert.KeyFile,
		}
	} else {
		cfg.TLSOptions = nil
	}

	config := &NodeConfig{
		BindAddress: options.ServingInfo.BindAddress,

		AllowDisabledDocker: options.AllowDisabledDocker,

		Client: kubeClient,

		VolumeDir: options.VolumeDirectory,

		KubeletServer: server,
		KubeletConfig: cfg,
	}

	return config, nil
}
コード例 #28
0
ファイル: start_master.go プロジェクト: sgallagher/origin
// StartAPI starts the components of the master that are considered part of the API - the Kubernetes
// API and core controllers, the Origin API, the group, policy, project, and authorization caches,
// etcd, the asset server (for the UI), the OAuth server endpoints, and the DNS server.
// TODO: allow to be more granularly targeted
func StartAPI(oc *origin.MasterConfig, kc *kubernetes.MasterConfig) error {
	// start etcd
	if oc.Options.EtcdConfig != nil {
		etcdserver.RunEtcd(oc.Options.EtcdConfig)
	}

	// verify we can connect to etcd with the provided config
	if etcdClient, err := etcd.GetAndTestEtcdClient(oc.Options.EtcdClientInfo); err != nil {
		return err
	} else {
		etcdClient.Close()
	}

	// Must start policy caching immediately
	oc.RunGroupCache()
	oc.RunPolicyCache()
	oc.RunProjectCache()

	unprotectedInstallers := []origin.APIInstaller{}

	if oc.Options.OAuthConfig != nil {
		authConfig, err := origin.BuildAuthConfig(oc)
		if err != nil {
			return err
		}
		unprotectedInstallers = append(unprotectedInstallers, authConfig)
	}

	var standaloneAssetConfig *origin.AssetConfig
	if oc.WebConsoleEnabled() {
		var overrideConfig *overrideapi.ClusterResourceOverrideConfig = nil
		if oc.Options.KubernetesMasterConfig != nil { // external kube gets you a nil pointer here
			if overridePluginConfigFile, err := pluginconfig.GetPluginConfigFile(oc.Options.KubernetesMasterConfig.AdmissionConfig.PluginConfig, overrideapi.PluginName, ""); err != nil {
				return err
			} else if overridePluginConfigFile != "" {
				configFile, err := os.Open(overridePluginConfigFile)
				if err != nil {
					return err
				}
				if overrideConfig, err = override.ReadConfig(configFile); err != nil {
					return err
				}
			}
		}

		config, err := origin.NewAssetConfig(*oc.Options.AssetConfig, overrideConfig)
		if err != nil {
			return err
		}

		if oc.Options.AssetConfig.ServingInfo.BindAddress == oc.Options.ServingInfo.BindAddress {
			unprotectedInstallers = append(unprotectedInstallers, config)
		} else {
			standaloneAssetConfig = config
		}
	}

	if kc != nil {
		oc.Run([]origin.APIInstaller{kc}, unprotectedInstallers)
	} else {
		_, kubeClientConfig, err := configapi.GetKubeClient(oc.Options.MasterClients.ExternalKubernetesKubeConfig)
		if err != nil {
			return err
		}
		proxy := &kubernetes.ProxyConfig{
			ClientConfig: kubeClientConfig,
		}
		oc.Run([]origin.APIInstaller{proxy}, unprotectedInstallers)
	}

	oc.InitializeObjects()

	if standaloneAssetConfig != nil {
		standaloneAssetConfig.Run()
	}

	if oc.Options.DNSConfig != nil {
		oc.RunDNSServer()
	}

	oc.RunProjectAuthorizationCache()
	return nil
}
コード例 #29
0
ファイル: master_config.go プロジェクト: jhadvig/origin
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) {
	client, err := etcd.EtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	etcdHelper, err := NewEtcdStorage(client, options.EtcdStorageConfig.OpenShiftStorageVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		return nil, fmt.Errorf("Error setting up server storage: %v", err)
	}

	clientCAs, err := configapi.GetClientCertCAPool(options)
	if err != nil {
		return nil, err
	}
	apiClientCAs, err := configapi.GetAPIClientCertCAPool(options)
	if err != nil {
		return nil, err
	}

	privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}
	privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	policyCache, policyClient := newReadOnlyCacheAndClient(etcdHelper)
	requestContextMapper := kapi.NewRequestContextMapper()

	groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupstorage.NewREST(etcdHelper)))

	kubeletClientConfig := configapi.GetKubeletClientConfig(options)

	// in-order list of plug-ins that should intercept admission decisions (origin only intercepts)
	admissionControlPluginNames := []string{"OriginNamespaceLifecycle", "BuildByStrategy"}

	admissionClient := admissionControlClient(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient)
	admissionController := admission.NewFromPlugins(admissionClient, admissionControlPluginNames, "")

	serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, client)
	if err != nil {
		return nil, err
	}

	plug, plugStart := newControllerPlug(options, client)

	config := &MasterConfig{
		Options: options,

		Authenticator:                 newAuthenticator(options, etcdHelper, serviceAccountTokenGetter, apiClientCAs, groupCache),
		Authorizer:                    newAuthorizer(policyClient, options.ProjectConfig.ProjectRequestMessage),
		AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper),

		PolicyCache:               policyCache,
		GroupCache:                groupCache,
		ProjectAuthorizationCache: newProjectAuthorizationCache(privilegedLoopbackOpenShiftClient, privilegedLoopbackKubeClient, policyClient),

		RequestContextMapper: requestContextMapper,

		AdmissionControl: admissionController,

		TLS: configapi.UseTLS(options.ServingInfo.ServingInfo),

		ControllerPlug:      plug,
		ControllerPlugStart: plugStart,

		ImageFor:            imageTemplate.ExpandOrDie,
		EtcdHelper:          etcdHelper,
		EtcdClient:          client,
		KubeletClientConfig: kubeletClientConfig,

		ClientCAs:    clientCAs,
		APIClientCAs: apiClientCAs,

		PrivilegedLoopbackClientConfig:     *privilegedLoopbackClientConfig,
		PrivilegedLoopbackOpenShiftClient:  privilegedLoopbackOpenShiftClient,
		PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient,

		BuildControllerServiceAccount:       bootstrappolicy.InfraBuildControllerServiceAccountName,
		DeploymentControllerServiceAccount:  bootstrappolicy.InfraDeploymentControllerServiceAccountName,
		ReplicationControllerServiceAccount: bootstrappolicy.InfraReplicationControllerServiceAccountName,
	}

	return config, nil
}
コード例 #30
0
ファイル: master_config.go プロジェクト: rhamilto/origin
// BuildMasterConfig builds and returns the OpenShift master configuration based on the
// provided options
func BuildMasterConfig(options configapi.MasterConfig) (*MasterConfig, error) {
	client, err := etcd.EtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	etcdClient, err := etcd.MakeNewEtcdClient(options.EtcdClientInfo)
	if err != nil {
		return nil, err
	}
	groupVersion := unversioned.GroupVersion{Group: "", Version: options.EtcdStorageConfig.OpenShiftStorageVersion}
	etcdHelper, err := NewEtcdStorage(etcdClient, groupVersion, options.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		return nil, fmt.Errorf("Error setting up server storage: %v", err)
	}

	restOptsGetter := restoptions.NewConfigGetter(options)

	clientCAs, err := configapi.GetClientCertCAPool(options)
	if err != nil {
		return nil, err
	}
	apiClientCAs, err := configapi.GetAPIClientCertCAPool(options)
	if err != nil {
		return nil, err
	}

	privilegedLoopbackKubeClient, _, err := configapi.GetKubeClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}
	privilegedLoopbackOpenShiftClient, privilegedLoopbackClientConfig, err := configapi.GetOpenShiftClient(options.MasterClients.OpenShiftLoopbackKubeConfig)
	if err != nil {
		return nil, err
	}

	customListerWatchers := shared.DefaultListerWatcherOverrides{}
	if err := addAuthorizationListerWatchers(customListerWatchers, restOptsGetter); err != nil {
		return nil, err
	}
	informerFactory := shared.NewInformerFactory(privilegedLoopbackKubeClient, privilegedLoopbackOpenShiftClient, customListerWatchers, 10*time.Minute)

	imageTemplate := variable.NewDefaultImageTemplate()
	imageTemplate.Format = options.ImageConfig.Format
	imageTemplate.Latest = options.ImageConfig.Latest

	requestContextMapper := kapi.NewRequestContextMapper()

	groupStorage, err := groupstorage.NewREST(restOptsGetter)
	if err != nil {
		return nil, err
	}
	groupCache := usercache.NewGroupCache(groupregistry.NewRegistry(groupStorage))
	projectCache := projectcache.NewProjectCache(privilegedLoopbackKubeClient.Namespaces(), options.ProjectConfig.DefaultNodeSelector)
	clusterQuotaMappingController := clusterquotamapping.NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas())

	kubeletClientConfig := configapi.GetKubeletClientConfig(options)

	// in-order list of plug-ins that should intercept admission decisions (origin only intercepts)
	admissionControlPluginNames := []string{
		"ProjectRequestLimit",
		"OriginNamespaceLifecycle",
		"PodNodeConstraints",
		"JenkinsBootstrapper",
		"BuildByStrategy",
		imageadmission.PluginName,
		quotaadmission.PluginName,
	}
	if len(options.AdmissionConfig.PluginOrderOverride) > 0 {
		admissionControlPluginNames = options.AdmissionConfig.PluginOrderOverride
	}

	quotaRegistry := quota.NewOriginQuotaRegistry(privilegedLoopbackOpenShiftClient)
	ruleResolver := rulevalidation.NewDefaultRuleResolver(
		informerFactory.Policies().Lister(),
		informerFactory.PolicyBindings().Lister(),
		informerFactory.ClusterPolicies().Lister().ClusterPolicies(),
		informerFactory.ClusterPolicyBindings().Lister().ClusterPolicyBindings(),
	)
	authorizer := newAuthorizer(ruleResolver, informerFactory, options.ProjectConfig.ProjectRequestMessage)

	pluginInitializer := oadmission.PluginInitializer{
		OpenshiftClient:       privilegedLoopbackOpenShiftClient,
		ProjectCache:          projectCache,
		OriginQuotaRegistry:   quotaRegistry,
		Authorizer:            authorizer,
		JenkinsPipelineConfig: options.JenkinsPipelineConfig,
		RESTClientConfig:      *privilegedLoopbackClientConfig,
	}

	plugins := []admission.Interface{}
	clientsetClient := clientadapter.FromUnversionedClient(privilegedLoopbackKubeClient)
	for _, pluginName := range admissionControlPluginNames {
		configFile, err := pluginconfig.GetPluginConfig(options.AdmissionConfig.PluginConfig[pluginName])
		if err != nil {
			return nil, err
		}
		plugin := admission.InitPlugin(pluginName, clientsetClient, configFile)
		if plugin != nil {
			plugins = append(plugins, plugin)
		}
	}
	pluginInitializer.Initialize(plugins)
	// ensure that plugins have been properly initialized
	if err := oadmission.Validate(plugins); err != nil {
		return nil, err
	}
	admissionController := admission.NewChainHandler(plugins...)

	// TODO: look up storage by resource
	serviceAccountTokenGetter, err := newServiceAccountTokenGetter(options, etcdClient)
	if err != nil {
		return nil, err
	}

	authenticator, err := newAuthenticator(options, restOptsGetter, serviceAccountTokenGetter, apiClientCAs, groupCache)
	if err != nil {
		return nil, err
	}

	plug, plugStart := newControllerPlug(options, client)

	config := &MasterConfig{
		Options: options,

		RESTOptionsGetter: restOptsGetter,

		RuleResolver:                  ruleResolver,
		Authenticator:                 authenticator,
		Authorizer:                    authorizer,
		AuthorizationAttributeBuilder: newAuthorizationAttributeBuilder(requestContextMapper),

		GroupCache:                    groupCache,
		ProjectAuthorizationCache:     newProjectAuthorizationCache(authorizer, privilegedLoopbackKubeClient, informerFactory),
		ProjectCache:                  projectCache,
		ClusterQuotaMappingController: clusterQuotaMappingController,

		RequestContextMapper: requestContextMapper,

		AdmissionControl: admissionController,

		TLS: configapi.UseTLS(options.ServingInfo.ServingInfo),

		ControllerPlug:      plug,
		ControllerPlugStart: plugStart,

		ImageFor:            imageTemplate.ExpandOrDie,
		EtcdHelper:          etcdHelper,
		KubeletClientConfig: kubeletClientConfig,

		ClientCAs:    clientCAs,
		APIClientCAs: apiClientCAs,

		PluginInitializer: pluginInitializer,

		PrivilegedLoopbackClientConfig:     *privilegedLoopbackClientConfig,
		PrivilegedLoopbackOpenShiftClient:  privilegedLoopbackOpenShiftClient,
		PrivilegedLoopbackKubernetesClient: privilegedLoopbackKubeClient,
		Informers:                          informerFactory,
	}

	return config, nil
}