func getKubeClient() (string, *kclient.Client, error) { c, err := kclientcmd.LoadFromFile(*kubeConfig) if err != nil { return "", nil, fmt.Errorf("error loading kubeConfig: %v", err.Error()) } if c.CurrentContext == "" || len(c.Clusters) == 0 { return "", nil, fmt.Errorf("invalid kubeConfig: %+v", *c) } config, err := kclientcmd.NewDefaultClientConfig( *c, &kclientcmd.ConfigOverrides{ ClusterInfo: kclientcmdkapi.Cluster{ APIVersion: "v1", }, }).ClientConfig() if err != nil { return "", nil, fmt.Errorf("error parsing kubeConfig: %v", err.Error()) } kubeClient, err := kclient.New(config) if err != nil { return "", nil, fmt.Errorf("error creating client - %q", err) } return c.Clusters[c.CurrentContext].Server, kubeClient, nil }
func CreateClientFromFile(path string) (*clientset.Clientset, error) { adminKubeconfig, err := clientcmd.LoadFromFile(path) if err != nil { return nil, fmt.Errorf("failed to load admin kubeconfig [%v]", err) } return createAPIClient(adminKubeconfig) }
// Login logs into the specified server using given credentials and CA file func Login(username, password, server, configDir string, f *clientcmd.Factory, c *cobra.Command, out io.Writer) error { existingConfig, err := f.OpenShiftClientConfig.RawConfig() if err != nil { if !os.IsNotExist(err) { return err } existingConfig = *(kclientcmdapi.NewConfig()) } adminConfig, err := kclientcmd.LoadFromFile(filepath.Join(configDir, "master", "admin.kubeconfig")) if err != nil { return err } for k := range adminConfig.AuthInfos { adminConfig.AuthInfos[k].LocationOfOrigin = "" } newConfig, err := config.MergeConfig(existingConfig, *adminConfig) if err != nil { return err } output := ioutil.Discard if glog.V(1) { output = out } opts := &cmd.LoginOptions{ Server: server, Username: username, Password: password, Out: output, StartingKubeConfig: newConfig, PathOptions: config.NewPathOptions(c), } return cmd.RunLogin(nil, opts) }
func loggedInUserFactory() (*clientcmd.Factory, error) { cfg, err := kclientcmd.LoadFromFile(config.RecommendedHomeFile) if err != nil { return nil, err } defaultCfg := kclientcmd.NewDefaultClientConfig(*cfg, &kclientcmd.ConfigOverrides{}) return clientcmd.NewFactory(defaultCfg), nil }
// getStartingConfig returns the Config object built from the sources specified by the options, the filename read (only if it was a single file), and an error if something goes wrong func (o *ViewOptions) getStartingConfig() (*clientcmdapi.Config, error) { switch { case !o.Merge.Value(): return clientcmd.LoadFromFile(o.ConfigAccess.GetExplicitFile()) default: return o.ConfigAccess.GetStartingConfig() } }
// Factory returns a command factory that works with OpenShift server's admin credentials func (c *ClientStartConfig) Factory() (*clientcmd.Factory, error) { if c.factory == nil { cfg, err := kclientcmd.LoadFromFile(filepath.Join(c.LocalConfigDir, "master", "admin.kubeconfig")) if err != nil { return nil, err } defaultCfg := kclientcmd.NewDefaultClientConfig(*cfg, &kclientcmd.ConfigOverrides{}) c.factory = clientcmd.NewFactory(defaultCfg) } return c.factory, nil }
func (c *HollowNodeConfig) createClientConfigFromFile() (*restclient.Config, error) { clientConfig, err := clientcmd.LoadFromFile(c.KubeconfigPath) if err != nil { return nil, fmt.Errorf("error while loading kubeconfig from file %v: %v", c.KubeconfigPath, err) } config, err := clientcmd.NewDefaultClientConfig(*clientConfig, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return nil, fmt.Errorf("error while creating kubeconfig: %v", err) } config.ContentType = c.ContentType return config, nil }
// getConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit func getConfigFromFileOrDie(filename string) *clientcmdapi.Config { config, err := clientcmd.LoadFromFile(filename) if err != nil && !os.IsNotExist(err) { glog.FatalDepth(1, err) } if config == nil { return clientcmdapi.NewConfig() } return config }
func testKubeconfigUpdate(t *testing.T, federationName, lbIP, kubeconfigGlobal, kubeconfigExplicit string) { filename := kubeconfigGlobal if kubeconfigExplicit != "" { filename = kubeconfigExplicit } config, err := clientcmd.LoadFromFile(filename) if err != nil { t.Errorf("Failed to open kubeconfig file: %v", err) return } cluster, ok := config.Clusters[federationName] if !ok { t.Errorf("No cluster info for %q", federationName) return } endpoint := lbIP if !strings.HasSuffix(lbIP, "https://") { endpoint = fmt.Sprintf("https://%s", lbIP) } if cluster.Server != endpoint { t.Errorf("Want federation API server endpoint %q, got %q", endpoint, cluster.Server) } authInfo, ok := config.AuthInfos[federationName] if !ok { t.Errorf("No credentials for %q", federationName) return } if len(authInfo.ClientCertificateData) == 0 { t.Errorf("Expected client certificate to be non-empty") return } if len(authInfo.ClientKeyData) == 0 { t.Errorf("Expected client key to be non-empty") return } if authInfo.Username != AdminCN { t.Errorf("Want username: %q, got: %q", AdminCN, authInfo.Username) } context, ok := config.Contexts[federationName] if !ok { t.Errorf("No context for %q", federationName) return } if context.Cluster != federationName { t.Errorf("Want context cluster name: %q, got: %q", federationName, context.Cluster) } if context.AuthInfo != federationName { t.Errorf("Want context auth info: %q, got: %q", federationName, context.AuthInfo) } }
func createClientFromFile(path string) (*client.Client, error) { c, err := clientcmd.LoadFromFile(path) if err != nil { return nil, fmt.Errorf("error while loading kubeconfig from file %v: %v", path, err) } config, err := clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return nil, fmt.Errorf("error while creating kubeconfig: %v", err) } client, err := client.New(config) if err != nil { return nil, fmt.Errorf("error while creating client: %v", err) } return client, nil }
// Factory returns a command factory that works with OpenShift server's admin credentials func (c *ClientStartConfig) Factory() (*clientcmd.Factory, error) { if c.factory == nil { cfg, err := kclientcmd.LoadFromFile(filepath.Join(c.LocalConfigDir, "master", "admin.kubeconfig")) if err != nil { return nil, err } overrides := &kclientcmd.ConfigOverrides{} if c.PortForwarding { overrides.ClusterInfo.Server = fmt.Sprintf("https://%s:8443", c.ServerIP) } defaultCfg := kclientcmd.NewDefaultClientConfig(*cfg, overrides) c.factory = clientcmd.NewFactory(defaultCfg) } return c.factory, nil }
func createClientsetForCluster(c federationapi.Cluster, i int, userAgentName string) *kubeclientset.Clientset { kubecfg, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig) framework.ExpectNoError(err, "error loading KubeConfig: %v", err) cfgOverride := &clientcmd.ConfigOverrides{ ClusterInfo: clientcmdapi.Cluster{ Server: c.Spec.ServerAddressByClientCIDRs[0].ServerAddress, }, } ccfg := clientcmd.NewNonInteractiveClientConfig(*kubecfg, c.Name, cfgOverride, clientcmd.NewDefaultClientConfigLoadingRules()) cfg, err := ccfg.ClientConfig() framework.ExpectNoError(err, "Error creating client config in cluster #%d (%q)", i, c.Name) cfg.QPS = KubeAPIQPS cfg.Burst = KubeAPIBurst return kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(cfg, userAgentName)) }
// k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go func directKClientConfig(kubeconfigPath string, context, server string) clientcmd.ClientConfig { conf, err := clientcmd.LoadFromFile(kubeconfigPath) //conf, err := clientcmd.NewDefaultPathOptions().GetStartingConfig() if err != nil { glog.Errorf("client not configured: %v\n", err) return nil } glog.Infof("kube client configured: %+v\n", conf) var clientConfig clientcmd.ClientConfig overrides := &clientcmd.ConfigOverrides{} if server != "" { overrides.ClusterInfo = kclientcmdapi.Cluster{Server: server} } if context != "" { overrides.CurrentContext = context } clientConfig = clientcmd.NewNonInteractiveClientConfig(*conf, context, overrides) return clientConfig }
func (test deleteContextTest) run(t *testing.T) { fakeKubeFile, _ := ioutil.TempFile("", "") defer os.Remove(fakeKubeFile.Name()) err := clientcmd.WriteToFile(test.config, fakeKubeFile.Name()) if err != nil { t.Fatalf("unexpected error: %v", err) } pathOptions := clientcmd.NewDefaultPathOptions() pathOptions.GlobalFile = fakeKubeFile.Name() pathOptions.EnvVar = "" buf := bytes.NewBuffer([]byte{}) cmd := NewCmdConfigDeleteContext(buf, pathOptions) cmd.SetArgs([]string{test.contextToDelete}) if err := cmd.Execute(); err != nil { t.Fatalf("unexpected error executing command: %v", err) } expectedOutWithFile := fmt.Sprintf(test.expectedOut, fakeKubeFile.Name()) if expectedOutWithFile != buf.String() { t.Errorf("expected output %s, but got %s", expectedOutWithFile, buf.String()) return } // Verify context was removed from kubeconfig file config, err := clientcmd.LoadFromFile(fakeKubeFile.Name()) if err != nil { t.Fatalf("unexpected error loading kubeconfig file: %v", err) } contexts := make([]string, 0, len(config.Contexts)) for k := range config.Contexts { contexts = append(contexts, k) } if !reflect.DeepEqual(test.expectedContexts, contexts) { t.Errorf("expected contexts %v, but found %v in kubeconfig", test.expectedContexts, contexts) } }
func main() { //kubeconfig = "/home/ubuntu/.kube/config" // k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go config, err := clientcmd.LoadFromFile(kubeconfig) if err != nil { log.Fatal(err) } // k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go clientConfig := clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}) restConfig, err := clientConfig.ClientConfig() if err != nil { log.Fatal(err) } // k8s.io/kubernetes/pkg/client/unversioned/helper.go kclient, err := unversioned.New(restConfig) if err != nil { log.Fatal(err) } // k8s.io/kubernetes/pkg/client/unversioned/client.go // k8s.io/kubernetes/pkg/client/unversioned/pods.go result, err := kclient.Pods("default").List(kapi.ListOptions{}) if err != nil { log.Fatal(err) } // k8s.io/kubernetes/pkg/runtime/codec.go // k8s.io/kubernetes/pkg/api/register.go // k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go b, err := runtime.Encode(kapi.Codecs.LegacyCodec(kapi.SchemeGroupVersion), result, kapi.SchemeGroupVersion) if err != nil { log.Fatal(err) } fmt.Printf("Output list:\n%s\n", b) }
func main() { flag.Parse() glog.Infof("Starting serve_hostnames soak test with queries=%d and podsPerNode=%d upTo=%d", *queriesAverage, *podsPerNode, *upTo) var spec string if *gke != "" { spec = filepath.Join(os.Getenv("HOME"), ".config", "gcloud", "kubernetes", "kubeconfig") } else { spec = filepath.Join(os.Getenv("HOME"), ".kube", "config") } settings, err := clientcmd.LoadFromFile(spec) if err != nil { glog.Fatalf("Error loading configuration: %v", err.Error()) } if *gke != "" { settings.CurrentContext = *gke } config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { glog.Fatalf("Failed to construct config: %v", err) } c, err := client.New(config) if err != nil { glog.Fatalf("Failed to make client: %v", err) } var nodes *api.NodeList for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) { nodes, err = c.Nodes().List(api.ListOptions{}) if err == nil { break } glog.Warningf("Failed to list nodes: %v", err) } if err != nil { glog.Fatalf("Giving up trying to list nodes: %v", err) } if len(nodes.Items) == 0 { glog.Fatalf("Failed to find any nodes.") } glog.Infof("Found %d nodes on this cluster:", len(nodes.Items)) for i, node := range nodes.Items { glog.Infof("%d: %s", i, node.Name) } queries := *queriesAverage * len(nodes.Items) * *podsPerNode // Create the namespace got, err := c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{GenerateName: "serve-hostnames-"}}) if err != nil { glog.Fatalf("Failed to create namespace: %v", err) } ns := got.Name defer func(ns string) { if err := c.Namespaces().Delete(ns); err != nil { glog.Warningf("Failed to delete namespace ns: %e", ns, err) } else { // wait until the namespace disappears for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ { if _, err := c.Namespaces().Get(ns); err != nil { if errors.IsNotFound(err) { return } } time.Sleep(time.Second) } } }(ns) glog.Infof("Created namespace %s", ns) // Create a service for these pods. glog.Infof("Creating service %s/serve-hostnames", ns) // Make several attempts to create a service. var svc *api.Service for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) { t := time.Now() svc, err = c.Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: "serve-hostnames", Labels: map[string]string{ "name": "serve-hostname", }, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Protocol: "TCP", Port: 9376, TargetPort: intstr.FromInt(9376), }}, Selector: map[string]string{ "name": "serve-hostname", }, }, }) glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t)) if err == nil { break } glog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err) } if err != nil { glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err) return } // Clean up service defer func() { glog.Infof("Cleaning up service %s/serve-hostnames", ns) // Make several attempts to delete the service. for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err := c.Services(ns).Delete(svc.Name); err == nil { return } glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) } }() // Put serve-hostname pods on each node. podNames := []string{} for i, node := range nodes.Items { for j := 0; j < *podsPerNode; j++ { podName := fmt.Sprintf("serve-hostname-%d-%d", i, j) podNames = append(podNames, podName) // Make several attempts for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) { glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) t := time.Now() _, err = c.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, Labels: map[string]string{ "name": "serve-hostname", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "serve-hostname", Image: "gcr.io/google_containers/serve_hostname:1.1", Ports: []api.ContainerPort{{ContainerPort: 9376}}, }, }, NodeName: node.Name, }, }) glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t)) if err == nil { break } glog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err) } if err != nil { glog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err) return } } } // Clean up the pods defer func() { glog.Info("Cleaning up pods") // Make several attempts to delete the pods. for _, podName := range podNames { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err = c.Pods(ns).Delete(podName, nil); err == nil { break } glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) } } }() glog.Info("Waiting for the serve-hostname pods to be ready") for _, podName := range podNames { var pod *api.Pod for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { pod, err = c.Pods(ns).Get(podName) if err != nil { glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) continue } if pod.Status.Phase == api.PodRunning { break } } if pod.Status.Phase != api.PodRunning { glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase) } else { glog.Infof("%s/%s is running", ns, podName) } } // Wait for the endpoints to propagate. for start := time.Now(); time.Since(start) < endpointTimeout; time.Sleep(10 * time.Second) { hostname, err := c.Get(). Namespace(ns). Prefix("proxy"). Resource("services"). Name("serve-hostnames"). DoRaw() if err != nil { glog.Infof("After %v while making a proxy call got error %v", time.Since(start), err) continue } var r unversioned.Status if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), hostname, &r); err != nil { break } if r.Status == unversioned.StatusFailure { glog.Infof("After %v got status %v", time.Since(start), string(hostname)) continue } break } // Repeatedly make requests. for iteration := 0; iteration != *upTo; iteration++ { responseChan := make(chan string, queries) // Use a channel of size *maxPar to throttle the number // of in-flight requests to avoid overloading the service. inFlight := make(chan struct{}, *maxPar) start := time.Now() for q := 0; q < queries; q++ { go func(i int, query int) { inFlight <- struct{}{} t := time.Now() hostname, err := c.Get(). Namespace(ns). Prefix("proxy"). Resource("services"). Name("serve-hostnames"). DoRaw() glog.V(4).Infof("Proxy call in namespace %s took %v", ns, time.Since(t)) if err != nil { glog.Warningf("Call failed during iteration %d query %d : %v", i, query, err) // If the query failed return a string which starts with a character // that can't be part of a hostname. responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err) } else { responseChan <- string(hostname) } <-inFlight }(iteration, q) } responses := make(map[string]int, *podsPerNode*len(nodes.Items)) missing := 0 for q := 0; q < queries; q++ { r := <-responseChan glog.V(4).Infof("Got response from %s", r) responses[r]++ // If the returned hostname starts with '!' then it indicates // an error response. if len(r) > 0 && r[0] == '!' { glog.V(3).Infof("Got response %s", r) missing++ } } if missing > 0 { glog.Warningf("Missing %d responses out of %d", missing, queries) } // Report any nodes that did not respond. for n, node := range nodes.Items { for i := 0; i < *podsPerNode; i++ { name := fmt.Sprintf("serve-hostname-%d-%d", n, i) if _, ok := responses[name]; !ok { glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration) } } } glog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing", iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing) } }
} framework.Logf("Checking that %d clusters are Ready", len(contexts)) for _, context := range contexts { clusterIsReadyOrFail(f, &context) } framework.Logf("%d clusters are Ready", len(contexts)) clusters = map[string]*cluster{} primaryClusterName = clusterList.Items[0].Name By(fmt.Sprintf("Labeling %q as the first cluster", primaryClusterName)) for i, c := range clusterList.Items { framework.Logf("Creating a clientset for the cluster %s", c.Name) Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config") kubecfg, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig) framework.ExpectNoError(err, "error loading KubeConfig: %v", err) cfgOverride := &clientcmd.ConfigOverrides{ ClusterInfo: clientcmdapi.Cluster{ Server: c.Spec.ServerAddressByClientCIDRs[0].ServerAddress, }, } ccfg := clientcmd.NewNonInteractiveClientConfig(*kubecfg, c.Name, cfgOverride, clientcmd.NewDefaultClientConfigLoadingRules()) cfg, err := ccfg.ClientConfig() framework.ExpectNoError(err, "Error creating client config in cluster #%d (%q)", i, c.Name) cfg.QPS = KubeAPIQPS cfg.Burst = KubeAPIBurst clset := release_1_3.NewForConfigOrDie(restclient.AddUserAgent(cfg, UserAgentName)) clusters[c.Name] = &cluster{c.Name, clset, false, nil}
// runFileDiscovery executes file-based discovery. func runFileDiscovery(fd *kubeadmapi.FileDiscovery) (*clientcmdapi.Config, error) { return clientcmd.LoadFromFile(fd.Path) }