func newBuilderConfigFromEnvironment() (*builderConfig, error) { cfg := &builderConfig{} var err error // build (BUILD) buildStr := os.Getenv("BUILD") glog.V(4).Infof("$BUILD env var is %s \n", buildStr) cfg.build = &api.Build{} if err = latest.Codec.DecodeInto([]byte(buildStr), cfg.build); err != nil { return nil, fmt.Errorf("unable to parse build: %v", err) } // sourceSecretsDir (SOURCE_SECRET_PATH) cfg.sourceSecretDir = os.Getenv("SOURCE_SECRET_PATH") // dockerClient and dockerEndpoint (DOCKER_HOST) // usually not set, defaults to docker socket cfg.dockerClient, cfg.dockerEndpoint, err = dockerutil.NewHelper().GetClient() if err != nil { return nil, fmt.Errorf("error obtaining docker client: %v", err) } // buildsClient (KUBERNETES_SERVICE_HOST, KUBERNETES_SERVICE_PORT) clientConfig, err := kclient.InClusterConfig() if err != nil { return nil, fmt.Errorf("failed to get client config: %v", err) } osClient, err := client.New(clientConfig) if err != nil { return nil, fmt.Errorf("error obtaining OpenShift client: %v", err) } cfg.buildsClient = osClient.Builds(cfg.build.Namespace) return cfg, nil }
func main() { flag.Usage = usage flag.Parse() var ( cfg *kclient.Config err error ) if *local { cfg = &kclient.Config{Host: fmt.Sprintf("http://localhost:%d", *localPort)} } else { cfg, err = kclient.InClusterConfig() if err != nil { glog.Errorf("failed to load config: %v", err) flag.Usage() os.Exit(1) } } client, err = kclient.New(cfg) selector, err := labels.Parse(*userLabels) if err != nil { glog.Fatal(err) } tc, err := parseTimeCounts(*times, *counts) if err != nil { glog.Fatal(err) } if namespace == "" { glog.Fatal("POD_NAMESPACE is not set. Set to the namespace of the replication controller if running locally.") } scaler := scaler{timeCounts: tc, selector: selector} if err != nil { glog.Fatal(err) } sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM) glog.Info("starting scaling") if err := scaler.Start(); err != nil { glog.Fatal(err) } <-sigChan glog.Info("stopping scaling") if err := scaler.Stop(); err != nil { glog.Fatal(err) } }
func TestFromEnv(t *testing.T) { if _, err := unversioned.InClusterConfig(); err != nil { t.Skip("This can only be run inside Kubernetes. Skipping.") } me, err := FromEnv() if err != nil { t.Errorf("Could not get an environment: %s", err) } if len(me.Name) == 0 { t.Error("Could not get a pod name.") } }
// BuildConfigFromFlags is a helper function that builds configs from a master // url or a kubeconfig filepath. These are passed in as command line flags for cluster // components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath // are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback // to the default config. func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*client.Config, error) { if kubeconfigPath == "" && masterUrl == "" { glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") kubeconfig, err := client.InClusterConfig() if err == nil { return kubeconfig, nil } glog.Warning("error creating inClusterConfig, falling back to default config: ", err) } return NewNonInteractiveDeferredLoadingClientConfig( &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig() }
// run is responsible for preparing environment for actual build. // It accepts factoryFunc and an ordered array of SCMAuths. func run(b builder) { dockerClient, endpoint, err := dockerutil.NewHelper().GetClient() if err != nil { glog.Fatalf("Error obtaining docker client: %v", err) } buildStr := os.Getenv("BUILD") glog.V(4).Infof("$BUILD env var is %s \n", buildStr) build := api.Build{} if err := latest.Codec.DecodeInto([]byte(buildStr), &build); err != nil { glog.Fatalf("Unable to parse build: %v", err) } if build.Spec.Source.SourceSecret != nil { if build.Spec.Source.Git != nil { // TODO: this should be refactored to let each source type manage which secrets // it accepts sourceURL, err := git.ParseRepository(build.Spec.Source.Git.URI) if err != nil { glog.Fatalf("Cannot parse build URL: %s", build.Spec.Source.Git.URI) } scmAuths := auths(sourceURL) sourceURL, err = setupSourceSecret(build.Spec.Source.SourceSecret.Name, scmAuths) if err != nil { glog.Fatalf("Cannot setup secret file for accessing private repository: %v", err) } if sourceURL != nil { build.Annotations[bld.OriginalSourceURLAnnotationKey] = build.Spec.Source.Git.URI build.Spec.Source.Git.URI = sourceURL.String() } } } config, err := kclient.InClusterConfig() if err != nil { glog.Fatalf("Failed to get client config: %v", err) } osClient, err := client.New(config) if err != nil { glog.Fatalf("Error obtaining OpenShift client: %v", err) } buildsClient := osClient.Builds(build.Namespace) if err = b.Build(dockerClient, endpoint, buildsClient, &build); err != nil { glog.Fatalf("Build error: %v", err) } if build.Spec.Output.To == nil || len(build.Spec.Output.To.Name) == 0 { glog.Warning("Build does not have an Output defined, no output image was pushed to a registry.") } }
func makeClient() (*client.Client, error) { var cfg *client.Config var err error if *inCluster { if cfg, err = client.InClusterConfig(); err != nil { return nil, err } } else { clientConfig := kubectl_util.DefaultClientConfig(flags) if cfg, err = clientConfig.ClientConfig(); err != nil { return nil, err } } return client.New(cfg) }
func TestPodClient(t *testing.T) { // A pod can't really be mocked efficiently without major filesystem // manipulation. So we're testing fully only when this is running inside of // a k8s pod. if _, err := unversioned.InClusterConfig(); err != nil { t.Skip("This can only be run inside Kubernetes. Skipping.") } c, err := PodClient() if err != nil { t.Errorf("Error constructing client: %s", err) } if _, err := c.ServerVersion(); err != nil { t.Errorf("Failed to connect to given server: %s", err) } }
func getKubeConfig(url, user, pass string, insecure bool) (*unversioned.Config, error) { config, er := unversioned.InClusterConfig() if er != nil { config, er = clientcmd.NewNonInteractiveDeferredLoadingClientConfig( clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}, ).ClientConfig() if er != nil { return nil, er } config.Host = url } config.Insecure = insecure config.Username = user config.Password = pass return config, nil }
func main() { flag.Parse() var cfg *kclient.Config var err error // If local flag is provided, will try to connect to the api, throught a // localhost proxy: you can get it up, with the following command: // kubectl proxy, and later launch the script: go run main.go if *local { cfg = &kclient.Config{ Host: fmt.Sprintf("http://localhost:8001"), } } else { // This handles incluster config, when the script is runing inside // container, in a k8 cluster. cfg, err = kclient.InClusterConfig() if err != nil { log.Printf("failed to load incluster config %v", err) os.Exit(1) } } client, err := kclient.New(cfg) if err != nil { log.Printf("failed to create client %v", err) } // Query API for services, on the default namespace, matching // label proxy="true" services, err := client.Services("default").List( labels.SelectorFromSet(labels.Set{"proxy": "true"}), fields.Everything(), aclient.ListOptions{}) // log.Printf("Services %v", services) // log.Printf("Name %s", services.Items[0].Name) // log.Printf("Proxy Name %s", services.Items[0].Labels["proxyName"]) for _, k := range services.Items { // for every service found, write the desired nginx config file err = writeConfigFile(k.Name, k.Labels["proxyName"]) if err != nil { log.Printf("can't write %s config file: %v\n", k.Name, err) } } }
func newBuilderConfigFromEnvironment() (*builderConfig, error) { cfg := &builderConfig{} var err error // build (BUILD) buildStr := os.Getenv("BUILD") glog.V(4).Infof("$BUILD env var is %s \n", buildStr) cfg.build = &api.Build{} if err = runtime.DecodeInto(kapi.Codecs.UniversalDecoder(), []byte(buildStr), cfg.build); err != nil { return nil, fmt.Errorf("unable to parse build: %v", err) } masterVersion := os.Getenv(api.OriginVersion) thisVersion := version.Get().String() if len(masterVersion) != 0 && masterVersion != thisVersion { glog.Warningf("Master version %q does not match Builder image version %q", masterVersion, thisVersion) } else { glog.V(2).Infof("Master version %q, Builder versions %q", masterVersion, thisVersion) } // sourceSecretsDir (SOURCE_SECRET_PATH) cfg.sourceSecretDir = os.Getenv("SOURCE_SECRET_PATH") // dockerClient and dockerEndpoint (DOCKER_HOST) // usually not set, defaults to docker socket cfg.dockerClient, cfg.dockerEndpoint, err = dockerutil.NewHelper().GetClient() if err != nil { return nil, fmt.Errorf("error obtaining docker client: %v", err) } // buildsClient (KUBERNETES_SERVICE_HOST, KUBERNETES_SERVICE_PORT) clientConfig, err := kclient.InClusterConfig() if err != nil { return nil, fmt.Errorf("failed to get client config: %v", err) } osClient, err := client.New(clientConfig) if err != nil { return nil, fmt.Errorf("error obtaining OpenShift client: %v", err) } cfg.buildsClient = osClient.Builds(cfg.build.Namespace) return cfg, nil }
// NewCommandDeployer provides a CLI handler for deploy. func NewCommandDeployer(name string) *cobra.Command { cfg := &config{} cmd := &cobra.Command{ Use: fmt.Sprintf("%s%s", name, clientcmd.ConfigSyntax), Short: "Run the deployer", Long: deployerLong, Run: func(c *cobra.Command, args []string) { if len(cfg.DeploymentName) == 0 { glog.Fatal("deployment is required") } if len(cfg.Namespace) == 0 { glog.Fatal("namespace is required") } kcfg, err := kclient.InClusterConfig() if err != nil { glog.Fatal(err) } kc, err := kclient.New(kcfg) if err != nil { glog.Fatal(err) } oc, err := client.New(kcfg) if err != nil { glog.Fatal(err) } deployer := NewDeployer(kc, oc) if err = deployer.Deploy(cfg.Namespace, cfg.DeploymentName); err != nil { glog.Fatal(err) } }, } cmd.AddCommand(version.NewVersionCommand(name, false)) flag := cmd.Flags() flag.StringVar(&cfg.DeploymentName, "deployment", util.Env("OPENSHIFT_DEPLOYMENT_NAME", ""), "The deployment name to start") flag.StringVar(&cfg.Namespace, "namespace", util.Env("OPENSHIFT_DEPLOYMENT_NAMESPACE", ""), "The deployment namespace") return cmd }
// NewClient returns a usable Client. Don't forget to Stop it. func NewClient(addr string, resyncPeriod time.Duration) (Client, error) { var config *unversioned.Config if addr != "" { config = &unversioned.Config{Host: addr} } else { // If no API server address was provided, assume we are running // inside a pod. Try to connect to the API server through its // Service environment variables, using the default Service // Account Token. var err error if config, err = unversioned.InClusterConfig(); err != nil { return nil, err } } c, err := unversioned.New(config) if err != nil { return nil, err } podListWatch := cache.NewListWatchFromClient(c, "pods", api.NamespaceAll, fields.Everything()) podStore := cache.NewStore(cache.MetaNamespaceKeyFunc) podReflector := cache.NewReflector(podListWatch, &api.Pod{}, podStore, resyncPeriod) serviceListWatch := cache.NewListWatchFromClient(c, "services", api.NamespaceAll, fields.Everything()) serviceStore := cache.NewStore(cache.MetaNamespaceKeyFunc) serviceReflector := cache.NewReflector(serviceListWatch, &api.Service{}, serviceStore, resyncPeriod) quit := make(chan struct{}) runReflectorUntil(podReflector, resyncPeriod, quit) runReflectorUntil(serviceReflector, resyncPeriod, quit) return &client{ quit: quit, client: c, podReflector: podReflector, podStore: &cache.StoreToPodLister{Store: podStore}, serviceReflector: serviceReflector, serviceStore: &cache.StoreToServiceLister{Store: serviceStore}, }, nil }
func clientFromConfig(path string) (*kclient.Config, string, error) { if path == "-" { cfg, err := kclient.InClusterConfig() if err != nil { return nil, "", fmt.Errorf("cluster config not available: %v", err) } return cfg, "", nil } rules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: path} credentials, err := rules.Load() if err != nil { return nil, "", fmt.Errorf("the provided credentials %q could not be loaded: %v", path, err) } cfg := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}) config, err := cfg.ClientConfig() if err != nil { return nil, "", fmt.Errorf("the provided credentials %q could not be used: %v", path, err) } namespace, _, _ := cfg.Namespace() return config, namespace, nil }
// ClientConfig returns a complete client config func (c defaultingClientConfig) ClientConfig() (*kclient.Config, error) { cfg, err := c.nested.ClientConfig() if err == nil { return cfg, nil } if !kclientcmd.IsEmptyConfig(err) { return nil, err } // TODO: need to expose inClusterConfig upstream and use that if icc, err := kclient.InClusterConfig(); err == nil { glog.V(4).Infof("Using in-cluster configuration") return icc, nil } return nil, fmt.Errorf(`No configuration file found, please login or point to an existing file: 1. Via the command-line flag --config 2. Via the KUBECONFIG environment variable 3. In your home directory as ~/.kube/config To view or setup config directly use the 'config' command.`) }
func (inClusterClientConfig) ClientConfig() (*client.Config, error) { return client.InClusterConfig() }
func CreateKubeSources(uri *url.URL, c cache.Cache) ([]api.Source, error) { var ( kubeConfig *kube_client.Config err error ) opts := uri.Query() configOverrides, err := getConfigOverrides(uri) if err != nil { return nil, err } inClusterConfig := defaultInClusterConfig if len(opts["inClusterConfig"]) > 0 { inClusterConfig, err = strconv.ParseBool(opts["inClusterConfig"][0]) if err != nil { return nil, err } } if inClusterConfig { kubeConfig, err = kube_client.InClusterConfig() if err != nil { return nil, err } if configOverrides.ClusterInfo.Server != "" { kubeConfig.Host = configOverrides.ClusterInfo.Server } kubeConfig.Version = configOverrides.ClusterInfo.APIVersion } else { authFile := "" if len(opts["auth"]) > 0 { authFile = opts["auth"][0] } if authFile != "" { if kubeConfig, err = kubeClientCmd.NewNonInteractiveDeferredLoadingClientConfig( &kubeClientCmd.ClientConfigLoadingRules{ExplicitPath: authFile}, configOverrides).ClientConfig(); err != nil { return nil, err } } else { kubeConfig = &kube_client.Config{ Host: configOverrides.ClusterInfo.Server, Version: configOverrides.ClusterInfo.APIVersion, Insecure: configOverrides.ClusterInfo.InsecureSkipTLSVerify, } } } if len(kubeConfig.Host) == 0 { return nil, fmt.Errorf("invalid kubernetes master url specified") } if len(kubeConfig.Version) == 0 { return nil, fmt.Errorf("invalid kubernetes API version specified") } useServiceAccount := defaultUseServiceAccount if len(opts["useServiceAccount"]) >= 1 { useServiceAccount, err = strconv.ParseBool(opts["useServiceAccount"][0]) if err != nil { return nil, err } } if useServiceAccount { // If a readable service account token exists, then use it if contents, err := ioutil.ReadFile(defaultServiceAccountFile); err == nil { kubeConfig.BearerToken = string(contents) } } kubeClient := kube_client.NewOrDie(kubeConfig) nodesApi, err := nodes.NewKubeNodes(kubeClient) if err != nil { return nil, err } kubeletPort := defaultKubeletPort if len(opts["kubeletPort"]) >= 1 { kubeletPort, err = strconv.Atoi(opts["kubeletPort"][0]) if err != nil { return nil, err } } kubeletHttps := defaultKubeletHttps if len(opts["kubeletHttps"]) >= 1 { kubeletHttps, err = strconv.ParseBool(opts["kubeletHttps"][0]) if err != nil { return nil, err } } glog.Infof("Using Kubernetes client with master %q and version %q\n", kubeConfig.Host, kubeConfig.Version) glog.Infof("Using kubelet port %d", kubeletPort) kubeletConfig := &kube_client.KubeletConfig{ Port: uint(kubeletPort), EnableHttps: kubeletHttps, TLSClientConfig: kubeConfig.TLSClientConfig, } kubeletApi, err := datasource.NewKubelet(kubeletConfig) if err != nil { return nil, err } kubePodsSource := NewKubePodMetrics(kubeletPort, kubeletApi, nodesApi, newPodsApi(kubeClient)) kubeNodeSource := NewKubeNodeMetrics(kubeletPort, kubeletApi, nodesApi) kubeEventsSource := NewKubeEvents(kubeClient, c) return []api.Source{kubePodsSource, kubeNodeSource, kubeEventsSource}, nil }