Пример #1
0
// Start launches a master. It will error if possible, but some background processes may still
// be running and the process should exit after it finishes.
func (m *Master) Start() error {
	// Allow privileged containers
	// TODO: make this configurable and not the default https://github.com/openshift/origin/issues/662
	capabilities.Initialize(capabilities.Capabilities{
		AllowPrivileged: true,
		PrivilegedSources: capabilities.PrivilegedSources{
			HostNetworkSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource},
			HostPIDSources:     []string{kubelettypes.ApiserverSource, kubelettypes.FileSource},
			HostIPCSources:     []string{kubelettypes.ApiserverSource, kubelettypes.FileSource},
		},
	})

	openshiftConfig, err := origin.BuildMasterConfig(*m.config)
	if err != nil {
		return err
	}

	kubeMasterConfig, err := BuildKubernetesMasterConfig(openshiftConfig)
	if err != nil {
		return err
	}

	switch {
	case m.api:
		glog.Infof("Starting master on %s (%s)", m.config.ServingInfo.BindAddress, version.Get().String())
		glog.Infof("Public master address is %s", m.config.AssetConfig.MasterPublicURL)
		if len(m.config.DisabledFeatures) > 0 {
			glog.V(4).Infof("Disabled features: %s", strings.Join(m.config.DisabledFeatures, ", "))
		}
		glog.Infof("Using images from %q", openshiftConfig.ImageFor("<component>"))

		if err := StartAPI(openshiftConfig, kubeMasterConfig); err != nil {
			return err
		}

	case m.controllers:
		glog.Infof("Starting controllers on %s (%s)", m.config.ServingInfo.BindAddress, version.Get().String())
		if len(m.config.DisabledFeatures) > 0 {
			glog.V(4).Infof("Disabled features: %s", strings.Join(m.config.DisabledFeatures, ", "))
		}
		glog.Infof("Using images from %q", openshiftConfig.ImageFor("<component>"))

		if err := startHealth(openshiftConfig); err != nil {
			return err
		}
	}

	if m.controllers {
		// run controllers asynchronously (not required to be "ready")
		go func() {
			if err := startControllers(openshiftConfig, kubeMasterConfig); err != nil {
				glog.Fatal(err)
			}

			openshiftConfig.Informers.Start(utilwait.NeverStop)
		}()
	}

	return nil
}
Пример #2
0
// DefaultOpenShiftUserAgent returns the default user agent that clients can use.
func DefaultOpenShiftUserAgent() string {
	commit := version.Get().GitCommit
	if len(commit) > 7 {
		commit = commit[:7]
	}
	if len(commit) == 0 {
		commit = "unknown"
	}
	version := version.Get().GitVersion
	seg := strings.SplitN(version, "-", 2)
	version = seg[0]
	return fmt.Sprintf("%s/%s (%s/%s) openshift/%s", path.Base(os.Args[0]), version, runtime.GOOS, runtime.GOARCH, commit)
}
Пример #3
0
func main() {
	openshiftCmd := &cobra.Command{
		Use:   "openshift",
		Short: "OpenShift helps you build, deploy, and manage your applications",
		Long:  longDescription,
		Run: func(c *cobra.Command, args []string) {
			c.Help()
		},
	}

	openshiftCmd.AddCommand(master.NewCommandStartAllInOne("start"))
	openshiftCmd.AddCommand(client.NewCommandKubecfg("kube"))
	openshiftCmd.AddCommand(tour.NewCommandTour("tour"))

	// version information
	versionCmd := &cobra.Command{
		Use:   "version",
		Short: "Display version",
		Run: func(c *cobra.Command, args []string) {
			fmt.Printf("openshift %v\n", version.Get())
			fmt.Printf("kubernetes %v\n", kubeversion.Get())
		},
	}
	openshiftCmd.AddCommand(versionCmd)

	if err := openshiftCmd.Execute(); err != nil {
		fmt.Fprintf(os.Stderr, "Error: %s", err)
		os.Exit(1)
	}
}
Пример #4
0
func StartNode(nodeConfig configapi.NodeConfig) error {
	config, err := kubernetes.BuildKubernetesNodeConfig(nodeConfig)
	if err != nil {
		return err
	}
	glog.Infof("Starting node %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String())

	// preconditions
	config.EnsureVolumeDir()
	config.EnsureDocker(docker.NewHelper())

	// async starts
	config.RunKubelet()
	config.RunSDN()
	config.RunProxy()

	// HACK: RunProxy resets bridge-nf-call-iptables from what openshift-sdn requires
	if config.SDNPlugin != nil {
		sdnPluginName := nodeConfig.NetworkConfig.NetworkPluginName
		if sdnPluginName == osdn.SingleTenantPluginName() || sdnPluginName == osdn.MultiTenantPluginName() {
			if err := sysctl.SetSysctl("net/bridge/bridge-nf-call-iptables", 0); err != nil {
				glog.Warningf("Could not set net.bridge.bridge-nf-call-iptables sysctl: %s", err)
			}
		}
	}

	return nil
}
Пример #5
0
func (c *AssetConfig) buildAssetHandler() (http.Handler, error) {
	assets.RegisterMimeTypes()

	publicURL, err := url.Parse(c.Options.PublicURL)
	if err != nil {
		glog.Fatal(err)
	}

	assetFunc := assets.JoinAssetFuncs(assets.Asset, java.Asset)
	assetDirFunc := assets.JoinAssetDirFuncs(assets.AssetDir, java.AssetDir)

	handler := http.FileServer(&assetfs.AssetFS{Asset: assetFunc, AssetDir: assetDirFunc, Prefix: ""})

	// Map of context roots (no leading or trailing slash) to the asset path to serve for requests to a missing asset
	subcontextMap := map[string]string{
		"":     "index.html",
		"java": "java/index.html",
	}

	handler, err = assets.HTML5ModeHandler(publicURL.Path, subcontextMap, handler, assetFunc)
	if err != nil {
		return nil, err
	}

	// Cache control should happen after all Vary headers are added, but before
	// any asset related routing (HTML5ModeHandler and FileServer)
	handler = assets.CacheControlHandler(version.Get().GitCommit, handler)

	// Gzip first so that inner handlers can react to the addition of the Vary header
	handler = assets.GzipHandler(handler)

	return handler, nil
}
Пример #6
0
// handleVersion writes the server's version information.
func handleVersion(req *restful.Request, resp *restful.Response) {
	output, err := json.MarshalIndent(version.Get(), "", "  ")
	if err != nil {
		http.Error(resp.ResponseWriter, err.Error(), http.StatusInternalServerError)
		return
	}
	resp.ResponseWriter.Header().Set("Content-Type", "application/json")
	resp.ResponseWriter.WriteHeader(http.StatusOK)
	resp.ResponseWriter.Write(output)
}
Пример #7
0
// NewSentryMonitor creates a class that can capture panics and errors from OpenShift
// and Kubernetes that can roll up to a Sentry server.
func NewSentryMonitor(url string) (*SentryMonitor, error) {
	client, err := raven.NewClient(url, nil)
	if err != nil {
		return nil, err
	}
	client.SetRelease(version.Get().GitCommit)
	return &SentryMonitor{
		client: client,
	}, nil
}
Пример #8
0
func (c *AssetConfig) buildHandler() (http.Handler, error) {
	assets.RegisterMimeTypes()

	masterURL, err := url.Parse(c.Options.MasterPublicURL)
	if err != nil {
		return nil, err
	}

	publicURL, err := url.Parse(c.Options.PublicURL)
	if err != nil {
		glog.Fatal(err)
	}

	config := assets.WebConsoleConfig{
		MasterAddr:        masterURL.Host,
		MasterPrefix:      LegacyOpenShiftAPIPrefix, // TODO: change when the UI changes from v1beta3 to v1
		KubernetesAddr:    masterURL.Host,
		KubernetesPrefix:  KubernetesAPIPrefix,
		OAuthAuthorizeURI: OpenShiftOAuthAuthorizeURL(masterURL.String()),
		OAuthRedirectBase: c.Options.PublicURL,
		OAuthClientID:     OpenShiftWebConsoleClientID,
		LogoutURI:         c.Options.LogoutURL,
	}

	handler := http.FileServer(
		&assetfs.AssetFS{
			assets.Asset,
			assets.AssetDir,
			"",
		},
	)

	// Map of context roots (no leading or trailing slash) to the asset path to serve for requests to a missing asset
	subcontextMap := map[string]string{
		"":     "index.html",
		"java": "java/index.html",
	}

	handler, err = assets.HTML5ModeHandler(publicURL.Path, subcontextMap, handler)
	if err != nil {
		return nil, err
	}

	// Cache control should happen after all Vary headers are added, but before
	// any asset related routing (HTML5ModeHandler and FileServer)
	handler = assets.CacheControlHandler(version.Get().GitCommit, handler)

	// Generated config.js can not be cached since it changes depending on startup options
	handler = assets.GeneratedConfigHandler(config, handler)

	// Gzip first so that inner handlers can react to the addition of the Vary header
	handler = assets.GzipHandler(handler)

	return handler, nil
}
Пример #9
0
func StartNode(nodeConfig configapi.NodeConfig, components *utilflags.ComponentFlag) error {
	config, err := kubernetes.BuildKubernetesNodeConfig(nodeConfig, components.Enabled(ComponentProxy), components.Enabled(ComponentDNS))
	if err != nil {
		return err
	}

	// In case of openshift network plugin, nodeConfig.networkPluginName is optional and is auto detected/finalized
	// once we build kubernetes node config. So perform plugin name related check here.
	if osdn.IsOpenShiftNetworkPlugin(config.KubeletServer.NetworkPluginName) {
		// TODO: SDN plugin depends on the Kubelet registering as a Node and doesn't retry cleanly,
		// and Kubelet also can't start the PodSync loop until the SDN plugin has loaded.
		if components.Enabled(ComponentKubelet) != components.Enabled(ComponentPlugins) {
			return fmt.Errorf("the SDN plugin must be run in the same process as the kubelet")
		}
	}

	if components.Enabled(ComponentKubelet) {
		glog.Infof("Starting node %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String())
	} else {
		glog.Infof("Starting node networking %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String())
	}

	_, kubeClientConfig, err := configapi.GetKubeClient(nodeConfig.MasterKubeConfig)
	if err != nil {
		return err
	}
	glog.Infof("Connecting to API server %s", kubeClientConfig.Host)

	// preconditions
	if components.Enabled(ComponentKubelet) {
		config.EnsureKubeletAccess()
		config.EnsureVolumeDir()
		config.EnsureDocker(docker.NewHelper())
		config.EnsureLocalQuota(nodeConfig) // must be performed after EnsureVolumeDir
	}

	if components.Enabled(ComponentKubelet) {
		config.RunKubelet()
	}
	if components.Enabled(ComponentPlugins) {
		config.RunPlugin()
	}
	if components.Enabled(ComponentProxy) {
		config.RunProxy()
	}
	if components.Enabled(ComponentDNS) {
		config.RunDNS()
	}

	config.RunServiceStores(components.Enabled(ComponentProxy), components.Enabled(ComponentDNS))

	return nil
}
Пример #10
0
// Provide a summary at the end
func (l *Logger) Summary(warningsSeen int, errorsSeen int) {
	l.Notice("DL0001", fmt.Sprintf("Summary of diagnostics execution (version %v):\n", version.Get()))
	if warningsSeen > 0 {
		l.Notice("DL0002", fmt.Sprintf("Warnings seen: %d", warningsSeen))
	}
	if errorsSeen > 0 {
		l.Notice("DL0003", fmt.Sprintf("Errors seen: %d", errorsSeen))
	}
	if warningsSeen == 0 && errorsSeen == 0 {
		l.Notice("DL0004", "Completed with no errors or warnings seen.")
	}
}
Пример #11
0
func newBuilderConfigFromEnvironment(out io.Writer) (*builderConfig, error) {
	cfg := &builderConfig{}
	var err error

	cfg.out = out

	// build (BUILD)
	buildStr := os.Getenv("BUILD")
	glog.V(4).Infof("$BUILD env var is %s \n", buildStr)
	cfg.build = &api.Build{}
	if err := runtime.DecodeInto(kapi.Codecs.UniversalDecoder(), []byte(buildStr), cfg.build); err != nil {
		return nil, fmt.Errorf("unable to parse build: %v", err)
	}
	if errs := validation.ValidateBuild(cfg.build); len(errs) > 0 {
		return nil, errors.NewInvalid(unversioned.GroupKind{Kind: "Build"}, cfg.build.Name, errs)
	}
	glog.V(4).Infof("Build: %#v", cfg.build)

	masterVersion := os.Getenv(api.OriginVersion)
	thisVersion := version.Get().String()
	if len(masterVersion) != 0 && masterVersion != thisVersion {
		fmt.Fprintf(cfg.out, "warning: OpenShift server version %q differs from this image %q\n", masterVersion, thisVersion)
	} else {
		glog.V(4).Infof("Master version %q, Builder version %q", masterVersion, thisVersion)
	}

	// sourceSecretsDir (SOURCE_SECRET_PATH)
	cfg.sourceSecretDir = os.Getenv("SOURCE_SECRET_PATH")

	// dockerClient and dockerEndpoint (DOCKER_HOST)
	// usually not set, defaults to docker socket
	cfg.dockerClient, cfg.dockerEndpoint, err = dockerutil.NewHelper().GetClient()
	if err != nil {
		return nil, fmt.Errorf("no Docker configuration defined: %v", err)
	}

	// buildsClient (KUBERNETES_SERVICE_HOST, KUBERNETES_SERVICE_PORT)
	clientConfig, err := restclient.InClusterConfig()
	if err != nil {
		return nil, fmt.Errorf("cannot connect to the server: %v", err)
	}
	osClient, err := client.New(clientConfig)
	if err != nil {
		return nil, fmt.Errorf("failed to get client: %v", err)
	}
	cfg.buildsClient = osClient.Builds(cfg.build.Namespace)

	return cfg, nil
}
Пример #12
0
func StartNode(nodeConfig configapi.NodeConfig) error {
	config, err := kubernetes.BuildKubernetesNodeConfig(nodeConfig)
	if err != nil {
		return err
	}
	glog.Infof("Starting node %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String())

	RunSDNController(config, nodeConfig)
	config.EnsureVolumeDir()
	config.EnsureDocker(docker.NewHelper())
	config.RunProxy()
	config.RunKubelet()

	return nil
}
Пример #13
0
func StartNode(nodeConfig configapi.NodeConfig, components *utilflags.ComponentFlag) error {
	config, err := kubernetes.BuildKubernetesNodeConfig(nodeConfig)
	if err != nil {
		return err
	}

	if components.Enabled(ComponentKubelet) {
		glog.Infof("Starting node %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String())
	} else {
		glog.Infof("Starting node networking %s (%s)", config.KubeletServer.HostnameOverride, version.Get().String())
	}

	_, kubeClientConfig, err := configapi.GetKubeClient(nodeConfig.MasterKubeConfig)
	if err != nil {
		return err
	}
	glog.Infof("Connecting to API server %s", kubeClientConfig.Host)

	// preconditions
	if components.Enabled(ComponentKubelet) {
		config.EnsureKubeletAccess()
		config.EnsureVolumeDir()
		config.EnsureDocker(docker.NewHelper())
		config.EnsureLocalQuota(nodeConfig) // must be performed after EnsureVolumeDir
	}

	// TODO: SDN plugin depends on the Kubelet registering as a Node and doesn't retry cleanly,
	// and Kubelet also can't start the PodSync loop until the SDN plugin has loaded.
	if components.Enabled(ComponentKubelet) {
		config.RunKubelet()
	}
	// SDN plugins get the opportunity to filter service rules, so they start first
	if components.Enabled(ComponentPlugins) {
		config.RunPlugin()
	}
	if components.Enabled(ComponentProxy) {
		config.RunProxy()
	}
	// if we are running plugins in this process, reset the bridge ip rule
	if components.Enabled(ComponentPlugins) {
		config.ResetSysctlFromProxy()
	}

	return nil
}
Пример #14
0
func newBuilderConfigFromEnvironment() (*builderConfig, error) {
	cfg := &builderConfig{}
	var err error

	// build (BUILD)
	buildStr := os.Getenv("BUILD")
	glog.V(4).Infof("$BUILD env var is %s \n", buildStr)
	cfg.build = &api.Build{}
	if err = runtime.DecodeInto(kapi.Codecs.UniversalDecoder(), []byte(buildStr), cfg.build); err != nil {
		return nil, fmt.Errorf("unable to parse build: %v", err)
	}

	masterVersion := os.Getenv(api.OriginVersion)
	thisVersion := version.Get().String()
	if len(masterVersion) != 0 && masterVersion != thisVersion {
		glog.Warningf("Master version %q does not match Builder image version %q", masterVersion, thisVersion)
	} else {
		glog.V(2).Infof("Master version %q, Builder version %q", masterVersion, thisVersion)
	}

	// sourceSecretsDir (SOURCE_SECRET_PATH)
	cfg.sourceSecretDir = os.Getenv("SOURCE_SECRET_PATH")

	// dockerClient and dockerEndpoint (DOCKER_HOST)
	// usually not set, defaults to docker socket
	cfg.dockerClient, cfg.dockerEndpoint, err = dockerutil.NewHelper().GetClient()
	if err != nil {
		return nil, fmt.Errorf("error obtaining docker client: %v", err)
	}

	// buildsClient (KUBERNETES_SERVICE_HOST, KUBERNETES_SERVICE_PORT)
	clientConfig, err := restclient.InClusterConfig()
	if err != nil {
		return nil, fmt.Errorf("failed to get client config: %v", err)
	}
	osClient, err := client.New(clientConfig)
	if err != nil {
		return nil, fmt.Errorf("error obtaining OpenShift client: %v", err)
	}
	cfg.buildsClient = osClient.Builds(cfg.build.Namespace)

	return cfg, nil
}
Пример #15
0
// initVersionRoute initializes an HTTP endpoint for the server's version information.
func initVersionRoute(container *restful.Container, path string) {
	// Build version info once
	versionInfo, err := json.MarshalIndent(version.Get(), "", "  ")
	if err != nil {
		glog.Errorf("Unable to initialize version route: %v", err)
		return
	}

	// Set up a service to return the git code version.
	versionWS := new(restful.WebService)
	versionWS.Path(path)
	versionWS.Doc("git code version from which this is built")
	versionWS.Route(
		versionWS.GET("/").To(func(_ *restful.Request, resp *restful.Response) {
			writeJSON(resp, versionInfo)
		}).
			Doc("get the code version").
			Operation("getCodeVersion").
			Produces(restful.MIME_JSON))

	container.Add(versionWS)
}
Пример #16
0
func (c *AssetConfig) addHandlers(handler http.Handler) (http.Handler, error) {
	publicURL, err := url.Parse(c.Options.PublicURL)
	if err != nil {
		return nil, err
	}

	mux := http.NewServeMux()
	if handler != nil {
		// colocated with other routes, so pass any unrecognized routes through to
		// handler
		mux.Handle("/", handler)
	} else {
		// standalone mode, so redirect any unrecognized routes to the console
		if publicURL.Path != "/" {
			mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
				http.Redirect(w, req, publicURL.Path, http.StatusFound)
			})
		}
	}

	assetHandler, err := c.buildAssetHandler()
	if err != nil {
		return nil, err
	}

	masterURL, err := url.Parse(c.Options.MasterPublicURL)
	if err != nil {
		return nil, err
	}

	// Web console assets
	mux.Handle(publicURL.Path, http.StripPrefix(publicURL.Path, assetHandler))

	originResources := sets.NewString()
	k8sResources := sets.NewString()

	versions := []unversioned.GroupVersion{}
	versions = append(versions, registered.GroupOrDie(api.GroupName).GroupVersions...)
	versions = append(versions, registered.GroupOrDie(kapi.GroupName).GroupVersions...)
	deadOriginVersions := sets.NewString(configapi.DeadOpenShiftAPILevels...)
	deadKubernetesVersions := sets.NewString(configapi.DeadKubernetesAPILevels...)
	for _, version := range versions {
		for kind := range kapi.Scheme.KnownTypes(version) {
			if strings.HasSuffix(kind, "List") {
				continue
			}
			resource, _ := meta.KindToResource(version.WithKind(kind))
			if latest.OriginKind(version.WithKind(kind)) {
				if !deadOriginVersions.Has(version.String()) {
					originResources.Insert(resource.Resource)
				}
			} else {
				if !deadKubernetesVersions.Has(version.String()) {
					k8sResources.Insert(resource.Resource)
				}
			}
		}
	}

	commonResources := sets.NewString()
	for _, r := range originResources.List() {
		if k8sResources.Has(r) {
			commonResources.Insert(r)
		}
	}
	if commonResources.Len() > 0 {
		return nil, fmt.Errorf("Resources for kubernetes and origin types intersect: %v", commonResources.List())
	}

	// Generated web console config and server version
	config := assets.WebConsoleConfig{
		APIGroupAddr:          masterURL.Host,
		APIGroupPrefix:        genericapiserver.APIGroupPrefix,
		MasterAddr:            masterURL.Host,
		MasterPrefix:          api.Prefix,
		MasterResources:       originResources.List(),
		KubernetesAddr:        masterURL.Host,
		KubernetesPrefix:      genericapiserver.DefaultLegacyAPIPrefix,
		KubernetesResources:   k8sResources.List(),
		OAuthAuthorizeURI:     OpenShiftOAuthAuthorizeURL(masterURL.String()),
		OAuthTokenURI:         OpenShiftOAuthTokenURL(masterURL.String()),
		OAuthRedirectBase:     c.Options.PublicURL,
		OAuthClientID:         OpenShiftWebConsoleClientID,
		LogoutURI:             c.Options.LogoutURL,
		LoggingURL:            c.Options.LoggingPublicURL,
		MetricsURL:            c.Options.MetricsPublicURL,
		LimitRequestOverrides: c.LimitRequestOverrides,
	}
	kVersionInfo := kversion.Get()
	oVersionInfo := oversion.Get()
	versionInfo := assets.WebConsoleVersion{
		KubernetesVersion: kVersionInfo.GitVersion,
		OpenShiftVersion:  oVersionInfo.GitVersion,
	}

	extensionProps := assets.WebConsoleExtensionProperties{
		ExtensionProperties: extensionPropertyArray(c.Options.ExtensionProperties),
	}
	configPath := path.Join(publicURL.Path, "config.js")
	configHandler, err := assets.GeneratedConfigHandler(config, versionInfo, extensionProps)
	if err != nil {
		return nil, err
	}
	mux.Handle(configPath, assets.GzipHandler(configHandler))

	// Extension scripts
	extScriptsPath := path.Join(publicURL.Path, "scripts/extensions.js")
	extScriptsHandler, err := assets.ExtensionScriptsHandler(c.Options.ExtensionScripts, c.Options.ExtensionDevelopment)
	if err != nil {
		return nil, err
	}
	mux.Handle(extScriptsPath, assets.GzipHandler(extScriptsHandler))

	// Extension stylesheets
	extStylesheetsPath := path.Join(publicURL.Path, "styles/extensions.css")
	extStylesheetsHandler, err := assets.ExtensionStylesheetsHandler(c.Options.ExtensionStylesheets, c.Options.ExtensionDevelopment)
	if err != nil {
		return nil, err
	}
	mux.Handle(extStylesheetsPath, assets.GzipHandler(extStylesheetsHandler))

	// Extension files
	for _, extConfig := range c.Options.Extensions {
		extBasePath := path.Join(publicURL.Path, "extensions", extConfig.Name)
		extPath := extBasePath + "/"
		extHandler := assets.AssetExtensionHandler(extConfig.SourceDirectory, extPath, extConfig.HTML5Mode)
		mux.Handle(extPath, http.StripPrefix(extBasePath, extHandler))
	}

	return mux, nil
}
Пример #17
0
func StartMaster(openshiftMasterConfig *configapi.MasterConfig) error {
	glog.Infof("Starting OpenShift master on %s (%s)", openshiftMasterConfig.ServingInfo.BindAddress, version.Get().String())
	glog.Infof("Public master address is %s", openshiftMasterConfig.AssetConfig.MasterPublicURL)

	if openshiftMasterConfig.EtcdConfig != nil {
		etcd.RunEtcd(openshiftMasterConfig.EtcdConfig)
	}

	// Allow privileged containers
	// TODO: make this configurable and not the default https://github.com/openshift/origin/issues/662
	capabilities.Initialize(capabilities.Capabilities{
		AllowPrivileged:    true,
		HostNetworkSources: []string{kubelet.ApiserverSource, kubelet.FileSource},
	})

	openshiftConfig, err := origin.BuildMasterConfig(*openshiftMasterConfig)
	if err != nil {
		return err
	}

	go func() {
		openshiftConfig.ControllerPlug.WaitForStop()
		glog.Fatalf("Master shutdown requested")
	}()

	// Must start policy caching immediately
	openshiftConfig.RunPolicyCache()
	openshiftConfig.RunProjectCache()

	unprotectedInstallers := []origin.APIInstaller{}

	if openshiftMasterConfig.OAuthConfig != nil {
		authConfig, err := origin.BuildAuthConfig(*openshiftMasterConfig)
		if err != nil {
			return err
		}
		unprotectedInstallers = append(unprotectedInstallers, authConfig)
	}

	var standaloneAssetConfig *origin.AssetConfig
	if openshiftMasterConfig.AssetConfig != nil {
		config, err := origin.BuildAssetConfig(*openshiftMasterConfig.AssetConfig)
		if err != nil {
			return err
		}

		if openshiftMasterConfig.AssetConfig.ServingInfo.BindAddress == openshiftMasterConfig.ServingInfo.BindAddress {
			unprotectedInstallers = append(unprotectedInstallers, config)
		} else {
			standaloneAssetConfig = config
		}
	}

	var kubeConfig *kubernetes.MasterConfig
	if openshiftMasterConfig.KubernetesMasterConfig != nil {
		kubeConfig, err = kubernetes.BuildKubernetesMasterConfig(*openshiftMasterConfig, openshiftConfig.RequestContextMapper, openshiftConfig.KubeClient())
		if err != nil {
			return err
		}

		openshiftConfig.Run([]origin.APIInstaller{kubeConfig}, unprotectedInstallers)

	} else {
		_, kubeConfig, err := configapi.GetKubeClient(openshiftMasterConfig.MasterClients.ExternalKubernetesKubeConfig)
		if err != nil {
			return err
		}

		proxy := &kubernetes.ProxyConfig{
			ClientConfig: kubeConfig,
		}

		openshiftConfig.Run([]origin.APIInstaller{proxy}, unprotectedInstallers)
	}

	glog.Infof("Using images from %q", openshiftConfig.ImageFor("<component>"))

	if standaloneAssetConfig != nil {
		standaloneAssetConfig.Run()
	}
	if openshiftMasterConfig.DNSConfig != nil {
		openshiftConfig.RunDNSServer()
	}

	openshiftConfig.RunProjectAuthorizationCache()

	if openshiftMasterConfig.Controllers != configapi.ControllersDisabled {
		go func() {
			openshiftConfig.ControllerPlug.WaitForStart()
			glog.Infof("Master controllers starting (%s)", openshiftMasterConfig.Controllers)

			// Start these first, because they provide credentials for other controllers' clients
			openshiftConfig.RunServiceAccountsController()
			openshiftConfig.RunServiceAccountTokensController()
			// used by admission controllers
			openshiftConfig.RunServiceAccountPullSecretsControllers()
			openshiftConfig.RunSecurityAllocationController()

			if kubeConfig != nil {
				_, rcClient, err := openshiftConfig.GetServiceAccountClients(openshiftConfig.ReplicationControllerServiceAccount)
				if err != nil {
					glog.Fatalf("Could not get client for replication controller: %v", err)
				}

				// called by admission control
				kubeConfig.RunResourceQuotaManager()

				// no special order
				kubeConfig.RunNodeController()
				kubeConfig.RunScheduler()
				kubeConfig.RunReplicationController(rcClient)
				kubeConfig.RunEndpointController()
				kubeConfig.RunNamespaceController()
				kubeConfig.RunPersistentVolumeClaimBinder()
				kubeConfig.RunPersistentVolumeClaimRecycler(openshiftConfig.ImageFor("deployer"))
			}

			// no special order
			openshiftConfig.RunBuildController()
			openshiftConfig.RunBuildPodController()
			openshiftConfig.RunBuildImageChangeTriggerController()
			openshiftConfig.RunDeploymentController()
			openshiftConfig.RunDeployerPodController()
			openshiftConfig.RunDeploymentConfigController()
			openshiftConfig.RunDeploymentConfigChangeController()
			openshiftConfig.RunDeploymentImageChangeTriggerController()
			openshiftConfig.RunImageImportController()
			openshiftConfig.RunOriginNamespaceController()
			openshiftConfig.RunSDNController()
		}()
	}

	return nil
}
Пример #18
0
func (c *AssetConfig) addHandlers(mux *http.ServeMux) error {
	assetHandler, err := c.buildAssetHandler()
	if err != nil {
		return err
	}

	publicURL, err := url.Parse(c.Options.PublicURL)
	if err != nil {
		return err
	}

	masterURL, err := url.Parse(c.Options.MasterPublicURL)
	if err != nil {
		return err
	}

	// Web console assets
	mux.Handle(publicURL.Path, http.StripPrefix(publicURL.Path, assetHandler))

	originResources := sets.NewString()
	k8sResources := sets.NewString()

	versions := []unversioned.GroupVersion{}
	versions = append(versions, registered.GroupOrDie(api.GroupName).GroupVersions...)
	versions = append(versions, registered.GroupOrDie(kapi.GroupName).GroupVersions...)
	deadOriginVersions := sets.NewString(configapi.DeadOpenShiftAPILevels...)
	deadKubernetesVersions := sets.NewString(configapi.DeadKubernetesAPILevels...)
	for _, version := range versions {
		for kind := range kapi.Scheme.KnownTypes(version) {
			if strings.HasSuffix(kind, "List") {
				continue
			}
			resource, _ := meta.KindToResource(version.WithKind(kind), false)
			if latest.OriginKind(version.WithKind(kind)) {
				if !deadOriginVersions.Has(version.String()) {
					originResources.Insert(resource.Resource)
				}
			} else {
				if !deadKubernetesVersions.Has(version.String()) {
					k8sResources.Insert(resource.Resource)
				}
			}
		}
	}

	commonResources := sets.NewString()
	for _, r := range originResources.List() {
		if k8sResources.Has(r) {
			commonResources.Insert(r)
		}
	}
	if commonResources.Len() > 0 {
		return fmt.Errorf("Resources for kubernetes and origin types intersect: %v", commonResources.List())
	}

	// Generated web console config and server version
	config := assets.WebConsoleConfig{
		APIGroupAddr:        masterURL.Host,
		APIGroupPrefix:      KubernetesAPIGroupPrefix,
		MasterAddr:          masterURL.Host,
		MasterPrefix:        OpenShiftAPIPrefix,
		MasterResources:     originResources.List(),
		KubernetesAddr:      masterURL.Host,
		KubernetesPrefix:    KubernetesAPIPrefix,
		KubernetesResources: k8sResources.List(),
		OAuthAuthorizeURI:   OpenShiftOAuthAuthorizeURL(masterURL.String()),
		OAuthRedirectBase:   c.Options.PublicURL,
		OAuthClientID:       OpenShiftWebConsoleClientID,
		LogoutURI:           c.Options.LogoutURL,
		LoggingURL:          c.Options.LoggingPublicURL,
		MetricsURL:          c.Options.MetricsPublicURL,
	}
	kVersionInfo := kversion.Get()
	oVersionInfo := oversion.Get()
	versionInfo := assets.WebConsoleVersion{
		KubernetesVersion: kVersionInfo.GitVersion,
		OpenShiftVersion:  oVersionInfo.GitVersion,
	}
	configPath := path.Join(publicURL.Path, "config.js")
	configHandler, err := assets.GeneratedConfigHandler(config, versionInfo)
	if err != nil {
		return err
	}
	mux.Handle(configPath, assets.GzipHandler(configHandler))

	// Extension scripts
	extScriptsPath := path.Join(publicURL.Path, "scripts/extensions.js")
	extScriptsHandler, err := assets.ExtensionScriptsHandler(c.Options.ExtensionScripts, c.Options.ExtensionDevelopment)
	if err != nil {
		return err
	}
	mux.Handle(extScriptsPath, assets.GzipHandler(extScriptsHandler))

	// Extension stylesheets
	extStylesheetsPath := path.Join(publicURL.Path, "styles/extensions.css")
	extStylesheetsHandler, err := assets.ExtensionStylesheetsHandler(c.Options.ExtensionStylesheets, c.Options.ExtensionDevelopment)
	if err != nil {
		return err
	}
	mux.Handle(extStylesheetsPath, assets.GzipHandler(extStylesheetsHandler))

	// Extension files
	for _, extConfig := range c.Options.Extensions {
		extPath := path.Join(publicURL.Path, "extensions", extConfig.Name) + "/"
		extHandler := assets.AssetExtensionHandler(extConfig.SourceDirectory, extPath, extConfig.HTML5Mode)
		mux.Handle(extPath, http.StripPrefix(extPath, extHandler))
	}

	return nil
}
Пример #19
0
// RunVersion attempts to display client and server versions for Kubernetes and OpenShift
func (o VersionOptions) RunVersion() error {
	fmt.Fprintf(o.Out, "%s %v\n", o.BaseName, version.Get())
	fmt.Fprintf(o.Out, "kubernetes %v\n", kubeversion.Get())
	if o.PrintEtcdVersion {
		fmt.Fprintf(o.Out, "etcd %v\n", etcdversion.Version)
	}

	if o.PrintClientFeatures {
		features := []string{}
		if tokencmd.BasicEnabled() {
			features = append(features, "Basic-Auth")
		}
		if tokencmd.GSSAPIEnabled() {
			features = append(features, "GSSAPI")
			features = append(features, "Kerberos") // GSSAPI or SSPI
			features = append(features, "SPNEGO")   // GSSAPI or SSPI
		}
		fmt.Printf("features: %s\n", strings.Join(features, " "))
	}

	// do not attempt to print server info if already running cmd as the server
	// or if no client config is present
	if o.ClientConfig == nil || o.IsServer {
		return nil
	}

	// max amount of time we want to wait for server to respond
	timeout := 10 * time.Second

	done := make(chan error)
	oVersion := ""
	kVersion := ""
	versionHost := ""

	// start goroutine to fetch openshift / kubernetes server version
	go func() {
		defer close(done)

		// confirm config exists before makig request to server
		var err error
		clientConfig, err := o.ClientConfig.ClientConfig()
		if err != nil {
			done <- err
			return
		}
		versionHost = clientConfig.Host

		oClient, kClient, err := o.Clients()
		if err != nil {
			done <- err
			return
		}

		ocVersionBody, err := oClient.Get().AbsPath("/version/openshift").Do().Raw()
		if kapierrors.IsNotFound(err) || kapierrors.IsUnauthorized(err) || kapierrors.IsForbidden(err) {
			return
		}
		if err != nil {
			done <- err
			return
		}
		var ocServerInfo version.Info
		err = json.Unmarshal(ocVersionBody, &ocServerInfo)
		if err != nil && len(ocVersionBody) > 0 {
			done <- err
			return
		}
		oVersion = fmt.Sprintf("%v", ocServerInfo)

		kubeVersionBody, err := kClient.Get().AbsPath("/version").Do().Raw()
		if kapierrors.IsNotFound(err) || kapierrors.IsUnauthorized(err) || kapierrors.IsForbidden(err) {
			return
		}
		if err != nil {
			done <- err
			return
		}
		var kubeServerInfo kubeversion.Info
		err = json.Unmarshal(kubeVersionBody, &kubeServerInfo)
		if err != nil && len(kubeVersionBody) > 0 {
			done <- err
			return
		}
		kVersion = fmt.Sprintf("%v", kubeServerInfo)

	}()

	select {
	case err, closed := <-done:
		if strings.HasSuffix(fmt.Sprintf("%v", err), "connection refused") || clientcmd.IsConfigurationMissing(err) || kclientcmd.IsConfigurationInvalid(err) {
			return nil
		}
		if closed && err != nil {
			return err
		}
	case <-time.After(timeout):
		return fmt.Errorf("%s", "error: server took too long to respond with version information.")
	}

	if oVersion != "" || kVersion != "" {
		fmt.Fprintf(o.Out, "\n%s%s\n", "Server ", versionHost)
	}
	if oVersion != "" {
		fmt.Fprintf(o.Out, "openshift %s\n", oVersion)
	}
	if kVersion != "" {
		fmt.Fprintf(o.Out, "kubernetes %s\n", kVersion)
	}

	return nil
}
Пример #20
0
// Run launches the OpenShift master. It takes optional installers that may install additional endpoints into the server.
// All endpoints get configured CORS behavior
// Protected installers' endpoints are protected by API authentication and authorization.
// Unprotected installers' endpoints do not have any additional protection added.
func (c *MasterConfig) Run(protected []APIInstaller, unprotected []APIInstaller) {
	var extra []string

	safe := genericapiserver.NewHandlerContainer(http.NewServeMux(), kapi.Codecs)
	open := genericapiserver.NewHandlerContainer(http.NewServeMux(), kapi.Codecs)

	// enforce authentication on protected endpoints
	protected = append(protected, APIInstallFunc(c.InstallProtectedAPI))
	for _, i := range protected {
		msgs, err := i.InstallAPI(safe)
		if err != nil {
			glog.Fatalf("error installing api %v", err)
		}
		extra = append(extra, msgs...)
	}
	handler := c.versionSkewFilter(safe)
	handler = c.authorizationFilter(handler)
	handler = c.impersonationFilter(handler)
	// audit handler must comes before the impersonationFilter to read the original user
	handler = c.auditHandler(handler)
	handler = authenticationHandlerFilter(handler, c.Authenticator, c.getRequestContextMapper())
	handler = namespacingFilter(handler, c.getRequestContextMapper())
	handler = cacheControlFilter(handler, "no-store") // protected endpoints should not be cached

	// unprotected resources
	unprotected = append(unprotected, APIInstallFunc(c.InstallUnprotectedAPI))
	for _, i := range unprotected {
		msgs, err := i.InstallAPI(open)
		if err != nil {
			glog.Fatalf("error installing api %v", err)
		}
		extra = append(extra, msgs...)
	}

	var kubeAPILevels []string
	if c.Options.KubernetesMasterConfig != nil {
		kubeAPILevels = configapi.GetEnabledAPIVersionsForGroup(*c.Options.KubernetesMasterConfig, kapi.GroupName)
	}

	handler = indexAPIPaths(c.Options.APILevels, kubeAPILevels, handler)

	open.Handle("/", handler)

	// install swagger
	swaggerConfig := swagger.Config{
		WebServicesUrl:   c.Options.MasterPublicURL,
		WebServices:      append(safe.RegisteredWebServices(), open.RegisteredWebServices()...),
		ApiPath:          swaggerAPIPrefix,
		PostBuildHandler: customizeSwaggerDefinition,
	}
	// log nothing from swagger
	swagger.LogInfo = func(format string, v ...interface{}) {}
	swagger.RegisterSwaggerService(swaggerConfig, open)
	extra = append(extra, fmt.Sprintf("Started Swagger Schema API at %%s%s", swaggerAPIPrefix))

	openAPIConfig := openapi.Config{
		SwaggerConfig:  &swaggerConfig,
		IgnorePrefixes: []string{"/swaggerapi"},
		Info: &spec.Info{
			InfoProps: spec.InfoProps{
				Title:   "OpenShift API (with Kubernetes)",
				Version: version.Get().String(),
				License: &spec.License{
					Name: "Apache 2.0 (ASL2.0)",
					URL:  "http://www.apache.org/licenses/LICENSE-2.0",
				},
				Description: heredoc.Doc(`
					OpenShift provides builds, application lifecycle, image content management,
					and administrative policy on top of Kubernetes. The API allows consistent
					management of those objects.

					All API operations are authenticated via an Authorization	bearer token that
					is provided for service accounts as a generated secret (in JWT form) or via
					the native OAuth endpoint located at /oauth/authorize. Core infrastructure
					components may use client certificates that require no authentication.

					All API operations return a 'resourceVersion' string that represents the
					version of the object in the underlying storage. The standard LIST operation
					performs a snapshot read of the underlying objects, returning a resourceVersion
					representing a consistent version of the listed objects. The WATCH operation
					allows all updates to a set of objects after the provided resourceVersion to
					be observed by a client. By listing and beginning a watch from the returned
					resourceVersion, clients may observe a consistent view of the state of one
					or more objects. Note that WATCH always returns the update after the provided
					resourceVersion. Watch may be extended a limited time in the past - using
					etcd 2 the watch window is 1000 events (which on a large cluster may only
					be a few tens of seconds) so clients must explicitly handle the "watch
					to old error" by re-listing.

					Objects are divided into two rough categories - those that have a lifecycle
					and must reflect the state of the cluster, and those that have no state.
					Objects with lifecycle typically have three main sections:

					* 'metadata' common to all objects
					* a 'spec' that represents the desired state
					* a 'status' that represents how much of the desired state is reflected on
					  the cluster at the current time

					Objects that have no state have 'metadata' but may lack a 'spec' or 'status'
					section.

					Objects are divided into those that are namespace scoped (only exist inside
					of a namespace) and those that are cluster scoped (exist outside of
					a namespace). A namespace scoped resource will be deleted when the namespace
					is deleted and cannot be created if the namespace has not yet been created
					or is in the process of deletion. Cluster scoped resources are typically
					only accessible to admins - resources like nodes, persistent volumes, and
					cluster policy.

					All objects have a schema that is a combination of the 'kind' and
					'apiVersion' fields. This schema is additive only for any given version -
					no backwards incompatible changes are allowed without incrementing the
					apiVersion. The server will return and accept a number of standard
					responses that share a common schema - for instance, the common
					error type is 'unversioned.Status' (described below) and will be returned
					on any error from the API server.

					The API is available in multiple serialization formats - the default is
					JSON (Accept: application/json and Content-Type: application/json) but
					clients may also use YAML (application/yaml) or the native Protobuf
					schema (application/vnd.kubernetes.protobuf). Note that the format
					of the WATCH API call is slightly different - for JSON it returns newline
					delimited objects while for Protobuf it returns length-delimited frames
					(4 bytes in network-order) that contain a 'versioned.Watch' Protobuf
					object.

					See the OpenShift documentation at https://docs.openshift.org for more
					information.
				`),
			},
		},
		DefaultResponse: &spec.Response{
			ResponseProps: spec.ResponseProps{
				Description: "Default Response.",
			},
		},
	}
	err := openapi.RegisterOpenAPIService(&openAPIConfig, open)
	if err != nil {
		glog.Fatalf("Failed to generate open api spec: %v", err)
	}
	extra = append(extra, fmt.Sprintf("Started OpenAPI Schema at %%s%s", openapi.OpenAPIServePath))

	handler = open

	// add CORS support
	if origins := c.ensureCORSAllowedOrigins(); len(origins) != 0 {
		handler = apiserver.CORS(handler, origins, nil, nil, "true")
	}

	if c.WebConsoleEnabled() {
		handler = assetServerRedirect(handler, c.Options.AssetConfig.PublicURL)
	}

	// Make the outermost filter the requestContextMapper to ensure all components share the same context
	if contextHandler, err := kapi.NewRequestContextFilter(c.getRequestContextMapper(), handler); err != nil {
		glog.Fatalf("Error setting up request context filter: %v", err)
	} else {
		handler = contextHandler
	}

	longRunningRequestCheck := apiserver.BasicLongRunningRequestCheck(longRunningRE, map[string]string{"watch": "true"})
	// TODO: MaxRequestsInFlight should be subdivided by intent, type of behavior, and speed of
	// execution - updates vs reads, long reads vs short reads, fat reads vs skinny reads.
	if c.Options.ServingInfo.MaxRequestsInFlight > 0 {
		sem := make(chan bool, c.Options.ServingInfo.MaxRequestsInFlight)
		handler = apiserver.MaxInFlightLimit(sem, longRunningRequestCheck, handler)
	}

	c.serve(handler, extra)

	// Attempt to verify the server came up for 20 seconds (100 tries * 100ms, 100ms timeout per try)
	cmdutil.WaitForSuccessfulDial(c.TLS, c.Options.ServingInfo.BindNetwork, c.Options.ServingInfo.BindAddress, 100*time.Millisecond, 100*time.Millisecond, 100)
}
Пример #21
0
		s := OverrideVersion.GitCommit
		if len(s) > 7 {
			s = s[:7]
		}
		return s, true
	case "version":
		s := OverrideVersion.GitVersion
		seg := strings.SplitN(s, "-", 2)
		return seg[0], true
	default:
		return "", false
	}
}

// Env is a KeyFunc which always returns a string
func Env(key string) (string, bool) {
	return os.Getenv(key), true
}

// EnvPresent is a KeyFunc which returns an environment variable if it is present.
func EnvPresent(key string) (string, bool) {
	s := os.Getenv(key)
	if len(s) == 0 {
		return "", false
	}
	return s, true
}

// OverrideVersion is the latest version, exposed for testing.
var OverrideVersion = version.Get()
Пример #22
0
func StartMaster(openshiftMasterConfig *configapi.MasterConfig) error {
	glog.Infof("Starting master on %s (%s)", openshiftMasterConfig.ServingInfo.BindAddress, version.Get().String())
	glog.Infof("Public master address is %s", openshiftMasterConfig.AssetConfig.MasterPublicURL)
	if len(openshiftMasterConfig.DisabledFeatures) > 0 {
		glog.V(4).Infof("Disabled features: %s", strings.Join(openshiftMasterConfig.DisabledFeatures, ", "))
	}

	if openshiftMasterConfig.EtcdConfig != nil {
		etcd.RunEtcd(openshiftMasterConfig.EtcdConfig)
	}

	// Allow privileged containers
	// TODO: make this configurable and not the default https://github.com/openshift/origin/issues/662
	capabilities.Initialize(capabilities.Capabilities{
		AllowPrivileged:    true,
		HostNetworkSources: []string{kubelet.ApiserverSource, kubelet.FileSource},
	})

	openshiftConfig, err := origin.BuildMasterConfig(*openshiftMasterConfig)
	if err != nil {
		return err
	}

	// verify we can connect to etcd with the provided config
	if err := etcd.TestEtcdClient(openshiftConfig.EtcdClient); err != nil {
		return err
	}

	// Must start policy caching immediately
	openshiftConfig.RunGroupCache()
	openshiftConfig.RunPolicyCache()
	openshiftConfig.RunProjectCache()

	unprotectedInstallers := []origin.APIInstaller{}

	if openshiftMasterConfig.OAuthConfig != nil {
		authConfig, err := origin.BuildAuthConfig(*openshiftMasterConfig)
		if err != nil {
			return err
		}
		unprotectedInstallers = append(unprotectedInstallers, authConfig)
	}

	var standaloneAssetConfig *origin.AssetConfig
	if openshiftConfig.WebConsoleEnabled() {
		config, err := origin.BuildAssetConfig(*openshiftMasterConfig.AssetConfig)
		if err != nil {
			return err
		}

		if openshiftMasterConfig.AssetConfig.ServingInfo.BindAddress == openshiftMasterConfig.ServingInfo.BindAddress {
			unprotectedInstallers = append(unprotectedInstallers, config)
		} else {
			standaloneAssetConfig = config
		}
	}

	startKubeMaster, kubeMasterConfig, err := buildKubernetesMasterConfig(openshiftConfig)
	if err != nil {
		return err
	}
	if startKubeMaster {
		openshiftConfig.Run([]origin.APIInstaller{kubeMasterConfig}, unprotectedInstallers)
	} else {
		_, kubeMasterConfig, err := configapi.GetKubeClient(openshiftConfig.Options.MasterClients.ExternalKubernetesKubeConfig)
		if err != nil {
			return err
		}
		proxy := &kubernetes.ProxyConfig{
			ClientConfig: kubeMasterConfig,
		}
		openshiftConfig.Run([]origin.APIInstaller{proxy}, unprotectedInstallers)
	}

	glog.Infof("Using images from %q", openshiftConfig.ImageFor("<component>"))

	if standaloneAssetConfig != nil {
		standaloneAssetConfig.Run()
	}
	if openshiftMasterConfig.DNSConfig != nil {
		openshiftConfig.RunDNSServer()
	}

	openshiftConfig.RunProjectAuthorizationCache()

	// controllers don't block startup
	go func() {
		if err := StartControllers(openshiftConfig, kubeMasterConfig); err != nil {
			glog.Fatal(err)
		}
	}()

	return nil
}