func getFabric8BinLocation() string { home := homedir.HomeDir() if home == "" { util.Fatalf("No user home environment variable found for OS %s", runtime.GOOS) } return filepath.Join(home, ".fabric8", "bin") }
// currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions. // Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make // sure existing config files are migrated to their new locations properly. func currentMigrationRules() map[string]string { oldRecommendedHomeFile := path.Join(homedir.HomeDir(), ".kube/.config") oldRecommendedWindowsHomeFile := path.Join(os.Getenv("HOME"), OpenShiftConfigHomeDirFileName) migrationRules := map[string]string{} migrationRules[RecommendedHomeFile] = oldRecommendedHomeFile if runtime.GOOS == "windows" { migrationRules[RecommendedHomeFile] = oldRecommendedWindowsHomeFile } return migrationRules }
func (f *discoveryFactory) DiscoveryClient() (discovery.CachedDiscoveryInterface, error) { cfg, err := f.clientConfig.ClientConfig() if err != nil { return nil, err } discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) if err != nil { return nil, err } cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube", "cache", "discovery"), cfg.Host) return NewCachedDiscoveryClient(discoveryClient, cacheDir, time.Duration(10*time.Minute)), nil }
func isInstalled(isMinishift bool) bool { home := homedir.HomeDir() if home == "" { util.Fatalf("No user home environment variable found for OS %s", runtime.GOOS) } // check if we can find a local kube config file if _, err := os.Stat(home + "/.kube/config"); os.IsNotExist(err) { return false } // check for kubectl _, err := exec.LookPath(kubectl) if err != nil { return false } if isMinishift { // check for minishift _, err = exec.LookPath(minishift) if err != nil { return false } // check for oc client _, err = exec.LookPath(oc) if err != nil { return false } } else { // check for minikube _, err = exec.LookPath(minikube) if err != nil { return false } } return true }
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" clientcmdlatest "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest" "k8s.io/kubernetes/pkg/runtime" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/homedir" ) const ( RecommendedConfigPathFlag = "kubeconfig" RecommendedConfigPathEnvVar = "KUBECONFIG" RecommendedHomeDir = ".kube" RecommendedFileName = "config" RecommendedSchemaName = "schema" ) var RecommendedHomeFile = path.Join(homedir.HomeDir(), RecommendedHomeDir, RecommendedFileName) var RecommendedSchemaFile = path.Join(homedir.HomeDir(), RecommendedHomeDir, RecommendedSchemaName) // currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions. // Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make // sure existing config files are migrated to their new locations properly. func currentMigrationRules() map[string]string { oldRecommendedHomeFile := path.Join(os.Getenv("HOME"), "/.kube/.kubeconfig") oldRecommendedWindowsHomeFile := path.Join(os.Getenv("HOME"), RecommendedHomeDir, RecommendedFileName) migrationRules := map[string]string{} migrationRules[RecommendedHomeFile] = oldRecommendedHomeFile if goruntime.GOOS == "windows" { migrationRules[RecommendedHomeFile] = oldRecommendedWindowsHomeFile } return migrationRules
// NewFactory creates an object that holds common methods across all OpenShift commands func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory { restMapper := registered.RESTMapper() clients := &clientCache{ clients: make(map[string]*client.Client), configs: make(map[string]*restclient.Config), loader: clientConfig, } w := &Factory{ Factory: cmdutil.NewFactory(clientConfig), OpenShiftClientConfig: clientConfig, clients: clients, ImageResolutionOptions: &imageResolutionOptions{}, } w.Object = func(bool) (meta.RESTMapper, runtime.ObjectTyper) { defaultMapper := ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: restMapper}} defaultTyper := api.Scheme // Output using whatever version was negotiated in the client cache. The // version we decode with may not be the same as what the server requires. cfg, err := clients.ClientConfigForVersion(nil) if err != nil { return defaultMapper, defaultTyper } cmdApiVersion := unversioned.GroupVersion{} if cfg.GroupVersion != nil { cmdApiVersion = *cfg.GroupVersion } // at this point we've negotiated and can get the client oclient, err := clients.ClientForVersion(nil) if err != nil { return defaultMapper, defaultTyper } cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube"), cfg.Host) cachedDiscoverClient := NewCachedDiscoveryClient(client.NewDiscoveryClient(oclient.RESTClient), cacheDir, time.Duration(10*time.Minute)) // if we can't find the server version or its too old to have Kind information in the discovery doc, skip the discovery RESTMapper // and use our hardcoded levels mapper := registered.RESTMapper() if serverVersion, err := cachedDiscoverClient.ServerVersion(); err == nil && useDiscoveryRESTMapper(serverVersion.GitVersion) { mapper = restmapper.NewDiscoveryRESTMapper(cachedDiscoverClient) } mapper = NewShortcutExpander(cachedDiscoverClient, kubectl.ShortcutExpander{RESTMapper: mapper}) return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}, api.Scheme } w.UnstructuredObject = func() (meta.RESTMapper, runtime.ObjectTyper, error) { // load a discovery client from the default config cfg, err := clients.ClientConfigForVersion(nil) if err != nil { return nil, nil, err } dc, err := discovery.NewDiscoveryClientForConfig(cfg) if err != nil { return nil, nil, err } cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube"), cfg.Host) cachedDiscoverClient := NewCachedDiscoveryClient(client.NewDiscoveryClient(dc.RESTClient), cacheDir, time.Duration(10*time.Minute)) // enumerate all group resources groupResources, err := discovery.GetAPIGroupResources(cachedDiscoverClient) if err != nil { return nil, nil, err } // Register unknown APIs as third party for now to make // validation happy. TODO perhaps make a dynamic schema // validator to avoid this. for _, group := range groupResources { for _, version := range group.Group.Versions { gv := unversioned.GroupVersion{Group: group.Group.Name, Version: version.Version} if !registered.IsRegisteredVersion(gv) { registered.AddThirdPartyAPIGroupVersions(gv) } } } // construct unstructured mapper and typer mapper := discovery.NewRESTMapper(groupResources, meta.InterfacesForUnstructured) typer := discovery.NewUnstructuredObjectTyper(groupResources) return NewShortcutExpander(cachedDiscoverClient, kubectl.ShortcutExpander{RESTMapper: mapper}), typer, nil } kClientForMapping := w.Factory.ClientForMapping w.ClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) { if latest.OriginKind(mapping.GroupVersionKind) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return client.RESTClient, nil } return kClientForMapping(mapping) } kUnstructuredClientForMapping := w.Factory.UnstructuredClientForMapping w.UnstructuredClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) { if latest.OriginKind(mapping.GroupVersionKind) { cfg, err := clientConfig.ClientConfig() if err != nil { return nil, err } if err := client.SetOpenShiftDefaults(cfg); err != nil { return nil, err } cfg.APIPath = "/apis" if mapping.GroupVersionKind.Group == api.GroupName { cfg.APIPath = "/oapi" } gv := mapping.GroupVersionKind.GroupVersion() cfg.ContentConfig = dynamic.ContentConfig() cfg.GroupVersion = &gv return restclient.RESTClientFor(cfg) } return kUnstructuredClientForMapping(mapping) } // Save original Describer function kDescriberFunc := w.Factory.Describer w.Describer = func(mapping *meta.RESTMapping) (kubectl.Describer, error) { if latest.OriginKind(mapping.GroupVersionKind) { oClient, kClient, err := w.Clients() if err != nil { return nil, fmt.Errorf("unable to create client %s: %v", mapping.GroupVersionKind.Kind, err) } mappingVersion := mapping.GroupVersionKind.GroupVersion() cfg, err := clients.ClientConfigForVersion(&mappingVersion) if err != nil { return nil, fmt.Errorf("unable to load a client %s: %v", mapping.GroupVersionKind.Kind, err) } describer, ok := describe.DescriberFor(mapping.GroupVersionKind.GroupKind(), oClient, kClient, cfg.Host) if !ok { return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) } return describer, nil } return kDescriberFunc(mapping) } kScalerFunc := w.Factory.Scaler w.Scaler = func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { if mapping.GroupVersionKind.GroupKind() == deployapi.Kind("DeploymentConfig") { oc, kc, err := w.Clients() if err != nil { return nil, err } return deploycmd.NewDeploymentConfigScaler(oc, kc), nil } return kScalerFunc(mapping) } kReaperFunc := w.Factory.Reaper w.Reaper = func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { switch mapping.GroupVersionKind.GroupKind() { case deployapi.Kind("DeploymentConfig"): oc, kc, err := w.Clients() if err != nil { return nil, err } return deploycmd.NewDeploymentConfigReaper(oc, kc), nil case authorizationapi.Kind("Role"): oc, _, err := w.Clients() if err != nil { return nil, err } return authorizationreaper.NewRoleReaper(oc, oc), nil case authorizationapi.Kind("ClusterRole"): oc, _, err := w.Clients() if err != nil { return nil, err } return authorizationreaper.NewClusterRoleReaper(oc, oc, oc), nil case userapi.Kind("User"): oc, kc, err := w.Clients() if err != nil { return nil, err } return authenticationreaper.NewUserReaper( client.UsersInterface(oc), client.GroupsInterface(oc), client.ClusterRoleBindingsInterface(oc), client.RoleBindingsNamespacer(oc), kclient.SecurityContextConstraintsInterface(kc), ), nil case userapi.Kind("Group"): oc, kc, err := w.Clients() if err != nil { return nil, err } return authenticationreaper.NewGroupReaper( client.GroupsInterface(oc), client.ClusterRoleBindingsInterface(oc), client.RoleBindingsNamespacer(oc), kclient.SecurityContextConstraintsInterface(kc), ), nil case buildapi.Kind("BuildConfig"): oc, _, err := w.Clients() if err != nil { return nil, err } return buildcmd.NewBuildConfigReaper(oc), nil } return kReaperFunc(mapping) } kGenerators := w.Factory.Generators w.Generators = func(cmdName string) map[string]kubectl.Generator { originGenerators := DefaultGenerators(cmdName) kubeGenerators := kGenerators(cmdName) ret := map[string]kubectl.Generator{} for k, v := range kubeGenerators { ret[k] = v } for k, v := range originGenerators { ret[k] = v } return ret } kMapBasedSelectorForObjectFunc := w.Factory.MapBasedSelectorForObject w.MapBasedSelectorForObject = func(object runtime.Object) (string, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: return kubectl.MakeLabels(t.Spec.Selector), nil default: return kMapBasedSelectorForObjectFunc(object) } } kPortsForObjectFunc := w.Factory.PortsForObject w.PortsForObject = func(object runtime.Object) ([]string, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: return getPorts(t.Spec.Template.Spec), nil default: return kPortsForObjectFunc(object) } } kLogsForObjectFunc := w.Factory.LogsForObject w.LogsForObject = func(object, options runtime.Object) (*restclient.Request, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: dopts, ok := options.(*deployapi.DeploymentLogOptions) if !ok { return nil, errors.New("provided options object is not a DeploymentLogOptions") } oc, _, err := w.Clients() if err != nil { return nil, err } return oc.DeploymentLogs(t.Namespace).Get(t.Name, *dopts), nil case *buildapi.Build: bopts, ok := options.(*buildapi.BuildLogOptions) if !ok { return nil, errors.New("provided options object is not a BuildLogOptions") } if bopts.Version != nil { return nil, errors.New("cannot specify a version and a build") } oc, _, err := w.Clients() if err != nil { return nil, err } return oc.BuildLogs(t.Namespace).Get(t.Name, *bopts), nil case *buildapi.BuildConfig: bopts, ok := options.(*buildapi.BuildLogOptions) if !ok { return nil, errors.New("provided options object is not a BuildLogOptions") } oc, _, err := w.Clients() if err != nil { return nil, err } builds, err := oc.Builds(t.Namespace).List(api.ListOptions{}) if err != nil { return nil, err } builds.Items = buildapi.FilterBuilds(builds.Items, buildapi.ByBuildConfigPredicate(t.Name)) if len(builds.Items) == 0 { return nil, fmt.Errorf("no builds found for %q", t.Name) } if bopts.Version != nil { // If a version has been specified, try to get the logs from that build. desired := buildutil.BuildNameForConfigVersion(t.Name, int(*bopts.Version)) return oc.BuildLogs(t.Namespace).Get(desired, *bopts), nil } sort.Sort(sort.Reverse(buildapi.BuildSliceByCreationTimestamp(builds.Items))) return oc.BuildLogs(t.Namespace).Get(builds.Items[0].Name, *bopts), nil default: return kLogsForObjectFunc(object, options) } } // Saves current resource name (or alias if any) in PrintOptions. Once saved, it will not be overwritten by the // kubernetes resource alias look-up, as it will notice a non-empty value in `options.Kind` w.Printer = func(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) { if mapping != nil { options.Kind = mapping.Resource if alias, ok := resourceShortFormFor(mapping.Resource); ok { options.Kind = alias } } return describe.NewHumanReadablePrinter(options), nil } // PrintResourceInfos receives a list of resource infos and prints versioned objects if a generic output format was specified // otherwise, it iterates through info objects, printing each resource with a unique printer for its mapping w.PrintResourceInfos = func(cmd *cobra.Command, infos []*resource.Info, out io.Writer) error { printer, generic, err := cmdutil.PrinterForCommand(cmd) if err != nil { return nil } if !generic { for _, info := range infos { mapping := info.ResourceMapping() printer, err := w.PrinterForMapping(cmd, mapping, false) if err != nil { return err } if err := printer.PrintObj(info.Object, out); err != nil { return nil } } return nil } clientConfig, err := w.ClientConfig() if err != nil { return err } outputVersion, err := cmdutil.OutputVersion(cmd, clientConfig.GroupVersion) if err != nil { return err } object, err := resource.AsVersionedObject(infos, len(infos) != 1, outputVersion, api.Codecs.LegacyCodec(outputVersion)) if err != nil { return err } return printer.PrintObj(object, out) } kCanBeExposed := w.Factory.CanBeExposed w.CanBeExposed = func(kind unversioned.GroupKind) error { if kind == deployapi.Kind("DeploymentConfig") { return nil } return kCanBeExposed(kind) } kCanBeAutoscaled := w.Factory.CanBeAutoscaled w.CanBeAutoscaled = func(kind unversioned.GroupKind) error { if kind == deployapi.Kind("DeploymentConfig") { return nil } return kCanBeAutoscaled(kind) } kAttachablePodForObjectFunc := w.Factory.AttachablePodForObject w.AttachablePodForObject = func(object runtime.Object) (*api.Pod, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: _, kc, err := w.Clients() if err != nil { return nil, err } selector := labels.SelectorFromSet(t.Spec.Selector) f := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := cmdutil.GetFirstPod(kc, t.Namespace, selector, 1*time.Minute, f) return pod, err default: return kAttachablePodForObjectFunc(object) } } kUpdatePodSpecForObject := w.Factory.UpdatePodSpecForObject w.UpdatePodSpecForObject = func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) { switch t := obj.(type) { case *deployapi.DeploymentConfig: template := t.Spec.Template if template == nil { t.Spec.Template = template template = &api.PodTemplateSpec{} } return true, fn(&template.Spec) default: return kUpdatePodSpecForObject(obj, fn) } } kProtocolsForObject := w.Factory.ProtocolsForObject w.ProtocolsForObject = func(object runtime.Object) (map[string]string, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: return getProtocols(t.Spec.Template.Spec), nil default: return kProtocolsForObject(object) } } kSwaggerSchemaFunc := w.Factory.SwaggerSchema w.Factory.SwaggerSchema = func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) { if !latest.OriginKind(gvk) { return kSwaggerSchemaFunc(gvk) } // TODO: we need to register the OpenShift API under the Kube group, and start returning the OpenShift // group from the scheme. oc, _, err := w.Clients() if err != nil { return nil, err } return w.OriginSwaggerSchema(oc.RESTClient, gvk.GroupVersion()) } w.EditorEnvs = func() []string { return []string{"OC_EDITOR", "EDITOR"} } w.PrintObjectSpecificMessage = func(obj runtime.Object, out io.Writer) {} kPauseObjectFunc := w.Factory.PauseObject w.Factory.PauseObject = func(object runtime.Object) (bool, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: if t.Spec.Paused { return true, nil } t.Spec.Paused = true oc, _, err := w.Clients() if err != nil { return false, err } _, err = oc.DeploymentConfigs(t.Namespace).Update(t) // TODO: Pause the deployer containers. return false, err default: return kPauseObjectFunc(object) } } kResumeObjectFunc := w.Factory.ResumeObject w.Factory.ResumeObject = func(object runtime.Object) (bool, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: if !t.Spec.Paused { return true, nil } t.Spec.Paused = false oc, _, err := w.Clients() if err != nil { return false, err } _, err = oc.DeploymentConfigs(t.Namespace).Update(t) // TODO: Resume the deployer containers. return false, err default: return kResumeObjectFunc(object) } } kResolveImageFunc := w.Factory.ResolveImage w.Factory.ResolveImage = func(image string) (string, error) { options := w.ImageResolutionOptions.(*imageResolutionOptions) if imageutil.IsDocker(options.Source) { return kResolveImageFunc(image) } oc, _, err := w.Clients() if err != nil { return "", err } namespace, _, err := w.DefaultNamespace() if err != nil { return "", err } return imageutil.ResolveImagePullSpec(oc, oc, options.Source, image, namespace) } kHistoryViewerFunc := w.Factory.HistoryViewer w.Factory.HistoryViewer = func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { switch mapping.GroupVersionKind.GroupKind() { case deployapi.Kind("DeploymentConfig"): oc, kc, err := w.Clients() if err != nil { return nil, err } return deploycmd.NewDeploymentConfigHistoryViewer(oc, kc), nil } return kHistoryViewerFunc(mapping) } kRollbackerFunc := w.Factory.Rollbacker w.Factory.Rollbacker = func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { switch mapping.GroupVersionKind.GroupKind() { case deployapi.Kind("DeploymentConfig"): oc, _, err := w.Clients() if err != nil { return nil, err } return deploycmd.NewDeploymentConfigRollbacker(oc), nil } return kRollbackerFunc(mapping) } kStatusViewerFunc := w.Factory.StatusViewer w.Factory.StatusViewer = func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { oc, _, err := w.Clients() if err != nil { return nil, err } switch mapping.GroupVersionKind.GroupKind() { case deployapi.Kind("DeploymentConfig"): return deploycmd.NewDeploymentConfigStatusViewer(oc), nil } return kStatusViewerFunc(mapping) } return w }
DefaultDNSPort = 53 AlternateDNSPort = 8053 cmdDetermineNodeHost = "for name in %s; do ls /var/lib/origin/openshift.local.config/node-$name &> /dev/null && echo $name && break; done" ) var ( openShiftContainerBinds = []string{ "/var/log:/var/log:rw", "/var/run:/var/run:rw", "/sys:/sys:ro", "/var/lib/docker:/var/lib/docker", } BasePorts = []int{80, 443, 4001, 7001, 8443, 10250} DefaultPorts = append(BasePorts, DefaultDNSPort) PortsWithAlternateDNS = append(BasePorts, AlternateDNSPort) SocatPidFile = filepath.Join(homedir.HomeDir(), cliconfig.OpenShiftConfigHomeDir, "socat-8443.pid") ) // Helper contains methods and utilities to help with OpenShift startup type Helper struct { hostHelper *host.HostHelper dockerHelper *dockerhelper.Helper execHelper *dockerexec.ExecHelper runHelper *run.RunHelper client *docker.Client publicHost string image string containerName string routingSuffix string }
// ClientConfig returns a complete client config ClientConfig() (*restConfig, error) } // defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig. func defaultClientConfig() clientConfig { loadingRules := newOpenShiftClientConfigLoadingRules() // REMOVED: Allowing command-line overriding of loadingRules // REMOVED: clientcmd.ConfigOverrides clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules) return clientConfig } var recommendedHomeFile = path.Join(homedir.HomeDir(), ".kube/config") // newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules. // NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift. // 1. --config value // 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules { chain := []string{} envVarFile := os.Getenv("KUBECONFIG") if len(envVarFile) != 0 { chain = append(chain, filepath.SplitList(envVarFile)...) } else { chain = append(chain, recommendedHomeFile) }
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd" kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" kubecmdconfig "k8s.io/kubernetes/pkg/kubectl/cmd/config" kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/util/homedir" ) const ( OpenShiftConfigPathEnvVar = "KUBECONFIG" OpenShiftConfigFlagName = "config" OpenShiftConfigHomeDir = ".kube" OpenShiftConfigHomeFileName = "config" OpenShiftConfigHomeDirFileName = OpenShiftConfigHomeDir + "/" + OpenShiftConfigHomeFileName ) var RecommendedHomeFile = path.Join(homedir.HomeDir(), OpenShiftConfigHomeDirFileName) // currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions. // Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make // sure existing config files are migrated to their new locations properly. func currentMigrationRules() map[string]string { oldRecommendedHomeFile := path.Join(homedir.HomeDir(), ".kube/.config") oldRecommendedWindowsHomeFile := path.Join(os.Getenv("HOME"), OpenShiftConfigHomeDirFileName) migrationRules := map[string]string{} migrationRules[RecommendedHomeFile] = oldRecommendedHomeFile if runtime.GOOS == "windows" { migrationRules[RecommendedHomeFile] = oldRecommendedWindowsHomeFile } return migrationRules }
import ( "path/filepath" "github.com/jimmidyson/minishift/pkg/version" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/util/homedir" ) // MachineName is the name to use for the VM. const MachineName = "minishift" // APIServerPort is the port that the API server should listen on. const APIServerPort = 8443 // Fix for windows var Minipath = filepath.Join(homedir.HomeDir(), ".minishift") // TODO: Fix for windows // KubeconfigPath is the path to the Kubernetes client config var KubeconfigPath = clientcmd.RecommendedHomeFile // MinikubeContext is the kubeconfig context name used for minishift const MinikubeContext = "minishift" // MiniShiftEnvPrefix is the prefix for the environmental variables const MiniShiftEnvPrefix = "MINISHIFT" // MakeMiniPath is a utility to calculate a relative path to our directory. func MakeMiniPath(fileName ...string) string { args := []string{Minipath} args = append(args, fileName...)
import ( "path/filepath" "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" "k8s.io/kubernetes/pkg/util/homedir" "k8s.io/kubernetes/pkg/version" ) // MachineName is the name to use for the VM. const MachineName = "minikube" // APIServerPort is the port that the API server should listen on. const APIServerPort = 8443 // Minipath is the path to the user's minikube dir var Minipath = filepath.Join(homedir.HomeDir(), ".minikube") // KubeconfigPath is the path to the Kubernetes client config var KubeconfigPath = clientcmd.RecommendedHomeFile // MinikubeContext is the kubeconfig context name used for minikube const MinikubeContext = "minikube" // MinikubeEnvPrefix is the prefix for the environmental variables const MinikubeEnvPrefix = "MINIKUBE" // MakeMiniPath is a utility to calculate a relative path to our directory. func MakeMiniPath(fileName ...string) string { args := []string{Minipath} args = append(args, fileName...) return filepath.Join(args...)
func main() { cmds := &cobra.Command{ Use: "gofabric8", Short: "gofabric8 is used to validate & deploy fabric8 components on to your Kubernetes or OpenShift environment", Long: `gofabric8 is used to validate & deploy fabric8 components on to your Kubernetes or OpenShift environment Find more information at http://fabric8.io.`, Run: runHelp, } cmds.PersistentFlags().String("fabric8-version", "latest", "fabric8 version") cmds.PersistentFlags().BoolP("yes", "y", false, "assume yes") cmds.PersistentFlags().BoolP(batchFlag, "b", false, "Run in batch mode to avoid prompts") f := cmdutil.NewFactory(nil) f.BindFlags(cmds.PersistentFlags()) updated := false oldHandler := cmds.PersistentPreRun cmds.PersistentPreRun = func(cmd *cobra.Command, args []string) { if !updated { updated = true flag := cmds.Flags().Lookup(batchFlag) batch := false if flag != nil { batch = flag.Value.String() == "true" } if !batch { home := homedir.HomeDir() if home == "" { util.Fatalf("No user home environment variable found for OS %s", runtime.GOOS) } writeFileLocation := home + hiddenFolder err := os.MkdirAll(writeFileLocation, 0700) if err != nil { util.Errorf("Unable to create directory to store update file %s %v\n", writeFileLocation, err) } localVersion, err := version.GetSemverVersion() if err != nil { util.Errorf("Unable to get local version %v", err) } viper.SetDefault(config.WantUpdateNotification, true) viper.SetDefault(config.ReminderWaitPeriodInHours, 24) update.MaybeUpdate(os.Stdout, githubOrg, githubRepo, binaryName, writeFileLocation+lastUpdateCheck, localVersion) } } if oldHandler != nil { oldHandler(cmd, args) } } cmds.AddCommand(commands.NewCmdConsole(f)) cmds.AddCommand(commands.NewCmdDeploy(f)) cmds.AddCommand(commands.NewCmdDockerEnv(f)) cmds.AddCommand(commands.NewCmdIngress(f)) cmds.AddCommand(commands.NewCmdInstall(f)) cmds.AddCommand(commands.NewCmdPull(f)) cmds.AddCommand(commands.NewCmdRoutes(f)) cmds.AddCommand(commands.NewCmdRun(f)) cmds.AddCommand(commands.NewCmdSecrets(f)) cmds.AddCommand(commands.NewCmdService(f)) cmds.AddCommand(commands.NewCmdStart(f)) cmds.AddCommand(commands.NewCmdValidate(f)) cmds.AddCommand(commands.NewCmdVersion()) cmds.AddCommand(commands.NewCmdVolumes(f)) cmds.Execute() }
// NewFactory creates an object that holds common methods across all OpenShift commands func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory { restMapper := registered.RESTMapper() clients := &clientCache{ clients: make(map[string]*client.Client), configs: make(map[string]*restclient.Config), loader: clientConfig, } w := &Factory{ Factory: cmdutil.NewFactory(clientConfig), OpenShiftClientConfig: clientConfig, clients: clients, } w.Object = func(bool) (meta.RESTMapper, runtime.ObjectTyper) { defaultMapper := ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: restMapper}} defaultTyper := api.Scheme // Output using whatever version was negotiated in the client cache. The // version we decode with may not be the same as what the server requires. cfg, err := clients.ClientConfigForVersion(nil) if err != nil { return defaultMapper, defaultTyper } cmdApiVersion := unversioned.GroupVersion{} if cfg.GroupVersion != nil { cmdApiVersion = *cfg.GroupVersion } // at this point we've negotiated and can get the client oclient, err := clients.ClientForVersion(nil) if err != nil { return defaultMapper, defaultTyper } cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube"), cfg.Host) cachedDiscoverClient := NewCachedDiscoveryClient(client.NewDiscoveryClient(oclient.RESTClient), cacheDir, time.Duration(10*time.Minute)) mapper := restmapper.NewDiscoveryRESTMapper(cachedDiscoverClient) mapper = NewShortcutExpander(cachedDiscoverClient, kubectl.ShortcutExpander{RESTMapper: mapper}) return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}, api.Scheme } kClientForMapping := w.Factory.ClientForMapping w.ClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) { if latest.OriginKind(mapping.GroupVersionKind) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return client.RESTClient, nil } return kClientForMapping(mapping) } // Save original Describer function kDescriberFunc := w.Factory.Describer w.Describer = func(mapping *meta.RESTMapping) (kubectl.Describer, error) { if latest.OriginKind(mapping.GroupVersionKind) { oClient, kClient, err := w.Clients() if err != nil { return nil, fmt.Errorf("unable to create client %s: %v", mapping.GroupVersionKind.Kind, err) } mappingVersion := mapping.GroupVersionKind.GroupVersion() cfg, err := clients.ClientConfigForVersion(&mappingVersion) if err != nil { return nil, fmt.Errorf("unable to load a client %s: %v", mapping.GroupVersionKind.Kind, err) } describer, ok := describe.DescriberFor(mapping.GroupVersionKind.GroupKind(), oClient, kClient, cfg.Host) if !ok { return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) } return describer, nil } return kDescriberFunc(mapping) } kScalerFunc := w.Factory.Scaler w.Scaler = func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { oc, kc, err := w.Clients() if err != nil { return nil, err } if mapping.GroupVersionKind.GroupKind() == deployapi.Kind("DeploymentConfig") { return deployscaler.NewDeploymentConfigScaler(oc, kc), nil } return kScalerFunc(mapping) } kReaperFunc := w.Factory.Reaper w.Reaper = func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { oc, kc, err := w.Clients() if err != nil { return nil, err } switch mapping.GroupVersionKind.GroupKind() { case deployapi.Kind("DeploymentConfig"): return deployreaper.NewDeploymentConfigReaper(oc, kc), nil case authorizationapi.Kind("Role"): return authorizationreaper.NewRoleReaper(oc, oc), nil case authorizationapi.Kind("ClusterRole"): return authorizationreaper.NewClusterRoleReaper(oc, oc, oc), nil case userapi.Kind("User"): return authenticationreaper.NewUserReaper( client.UsersInterface(oc), client.GroupsInterface(oc), client.ClusterRoleBindingsInterface(oc), client.RoleBindingsNamespacer(oc), kclient.SecurityContextConstraintsInterface(kc), ), nil case userapi.Kind("Group"): return authenticationreaper.NewGroupReaper( client.GroupsInterface(oc), client.ClusterRoleBindingsInterface(oc), client.RoleBindingsNamespacer(oc), kclient.SecurityContextConstraintsInterface(kc), ), nil case buildapi.Kind("BuildConfig"): return buildreaper.NewBuildConfigReaper(oc), nil } return kReaperFunc(mapping) } kGenerators := w.Factory.Generators w.Generators = func(cmdName string) map[string]kubectl.Generator { originGenerators := DefaultGenerators(cmdName) kubeGenerators := kGenerators(cmdName) ret := map[string]kubectl.Generator{} for k, v := range kubeGenerators { ret[k] = v } for k, v := range originGenerators { ret[k] = v } return ret } kMapBasedSelectorForObjectFunc := w.Factory.MapBasedSelectorForObject w.MapBasedSelectorForObject = func(object runtime.Object) (string, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: return kubectl.MakeLabels(t.Spec.Selector), nil default: return kMapBasedSelectorForObjectFunc(object) } } kPortsForObjectFunc := w.Factory.PortsForObject w.PortsForObject = func(object runtime.Object) ([]string, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: return getPorts(t.Spec.Template.Spec), nil default: return kPortsForObjectFunc(object) } } kLogsForObjectFunc := w.Factory.LogsForObject w.LogsForObject = func(object, options runtime.Object) (*restclient.Request, error) { oc, _, err := w.Clients() if err != nil { return nil, err } switch t := object.(type) { case *deployapi.DeploymentConfig: dopts, ok := options.(*deployapi.DeploymentLogOptions) if !ok { return nil, errors.New("provided options object is not a DeploymentLogOptions") } return oc.DeploymentLogs(t.Namespace).Get(t.Name, *dopts), nil case *buildapi.Build: bopts, ok := options.(*buildapi.BuildLogOptions) if !ok { return nil, errors.New("provided options object is not a BuildLogOptions") } if bopts.Version != nil { return nil, errors.New("cannot specify a version and a build") } return oc.BuildLogs(t.Namespace).Get(t.Name, *bopts), nil case *buildapi.BuildConfig: bopts, ok := options.(*buildapi.BuildLogOptions) if !ok { return nil, errors.New("provided options object is not a BuildLogOptions") } builds, err := oc.Builds(t.Namespace).List(api.ListOptions{}) if err != nil { return nil, err } builds.Items = buildapi.FilterBuilds(builds.Items, buildapi.ByBuildConfigPredicate(t.Name)) if len(builds.Items) == 0 { return nil, fmt.Errorf("no builds found for %q", t.Name) } if bopts.Version != nil { // If a version has been specified, try to get the logs from that build. desired := buildutil.BuildNameForConfigVersion(t.Name, int(*bopts.Version)) return oc.BuildLogs(t.Namespace).Get(desired, *bopts), nil } sort.Sort(sort.Reverse(buildapi.BuildSliceByCreationTimestamp(builds.Items))) return oc.BuildLogs(t.Namespace).Get(builds.Items[0].Name, *bopts), nil default: return kLogsForObjectFunc(object, options) } } w.Printer = func(mapping *meta.RESTMapping, noHeaders, withNamespace, wide bool, showAll bool, showLabels, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return describe.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil } kCanBeExposed := w.Factory.CanBeExposed w.CanBeExposed = func(kind unversioned.GroupKind) error { if kind == deployapi.Kind("DeploymentConfig") { return nil } return kCanBeExposed(kind) } kCanBeAutoscaled := w.Factory.CanBeAutoscaled w.CanBeAutoscaled = func(kind unversioned.GroupKind) error { if kind == deployapi.Kind("DeploymentConfig") { return nil } return kCanBeAutoscaled(kind) } kAttachablePodForObjectFunc := w.Factory.AttachablePodForObject w.AttachablePodForObject = func(object runtime.Object) (*api.Pod, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: _, kc, err := w.Clients() if err != nil { return nil, err } selector := labels.SelectorFromSet(t.Spec.Selector) f := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := cmdutil.GetFirstPod(kc, t.Namespace, selector, 1*time.Minute, f) return pod, err default: return kAttachablePodForObjectFunc(object) } } kProtocolsForObject := w.Factory.ProtocolsForObject w.ProtocolsForObject = func(object runtime.Object) (map[string]string, error) { switch t := object.(type) { case *deployapi.DeploymentConfig: return getProtocols(t.Spec.Template.Spec), nil default: return kProtocolsForObject(object) } } kSwaggerSchemaFunc := w.Factory.SwaggerSchema w.Factory.SwaggerSchema = func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) { if !latest.OriginKind(gvk) { return kSwaggerSchemaFunc(gvk) } // TODO: we need to register the OpenShift API under the Kube group, and start returning the OpenShift // group from the scheme. oc, _, err := w.Clients() if err != nil { return nil, err } return w.OriginSwaggerSchema(oc.RESTClient, gvk.GroupVersion()) } w.EditorEnvs = func() []string { return []string{"OC_EDITOR", "EDITOR"} } w.PrintObjectSpecificMessage = func(obj runtime.Object, out io.Writer) {} kPauseObjectFunc := w.Factory.PauseObject w.Factory.PauseObject = func(object runtime.Object) (bool, error) { oc, _, err := w.Clients() if err != nil { return false, err } switch t := object.(type) { case *deployapi.DeploymentConfig: if t.Spec.Paused { return true, nil } t.Spec.Paused = true _, err := oc.DeploymentConfigs(t.Namespace).Update(t) // TODO: Pause the deployer containers. return false, err default: return kPauseObjectFunc(object) } } kResumeObjectFunc := w.Factory.ResumeObject w.Factory.ResumeObject = func(object runtime.Object) (bool, error) { oc, _, err := w.Clients() if err != nil { return false, err } switch t := object.(type) { case *deployapi.DeploymentConfig: if !t.Spec.Paused { return true, nil } t.Spec.Paused = false _, err := oc.DeploymentConfigs(t.Namespace).Update(t) // TODO: Resume the deployer containers. return false, err default: return kResumeObjectFunc(object) } } kHistoryViewerFunc := w.Factory.HistoryViewer w.Factory.HistoryViewer = func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { oc, kc, err := w.Clients() if err != nil { return nil, err } switch mapping.GroupVersionKind.GroupKind() { case deployapi.Kind("DeploymentConfig"): return deploycmd.NewDeploymentConfigHistoryViewer(oc, kc), nil } return kHistoryViewerFunc(mapping) } kRollbackerFunc := w.Factory.Rollbacker w.Factory.Rollbacker = func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { oc, _, err := w.Clients() if err != nil { return nil, err } switch mapping.GroupVersionKind.GroupKind() { case deployapi.Kind("DeploymentConfig"): return deploycmd.NewDeploymentConfigRollbacker(oc), nil } return kRollbackerFunc(mapping) } return w }