// getFactoryFromCluster returns an OpenShift's Factory // using the config that is made available when we are running in a cluster // (using environment variables and token secret file) // or an error if those are not available (meaning we are not running in a cluster) func getFactoryFromCluster() (*clientcmd.Factory, error) { clusterConfig, err := k8client.InClusterConfig() if err != nil { return nil, err } // keep only what we need to initialize a factory overrides := &kclientcmd.ConfigOverrides{ ClusterInfo: kclientcmdapi.Cluster{ Server: clusterConfig.Host, APIVersion: clusterConfig.Version, }, AuthInfo: kclientcmdapi.AuthInfo{ Token: clusterConfig.BearerToken, }, Context: kclientcmdapi.Context{}, } if len(clusterConfig.TLSClientConfig.CAFile) > 0 { // FIXME "x509: cannot validate certificate for x.x.x.x because it doesn't contain any IP SANs" // overrides.ClusterInfo.CertificateAuthority = clusterConfig.TLSClientConfig.CAFile overrides.ClusterInfo.InsecureSkipTLSVerify = true } else { overrides.ClusterInfo.InsecureSkipTLSVerify = true } config := kclientcmd.NewDefaultClientConfig(*kclientcmdapi.NewConfig(), overrides) factory := clientcmd.NewFactory(config) return factory, nil }
// makes the client from the specified context and determines whether it is a cluster-admin. func (o DiagnosticsOptions) makeClusterClients(rawConfig *clientcmdapi.Config, contextName string, context *clientcmdapi.Context) (*client.Client, *kclient.Client, bool, error) { overrides := &clientcmd.ConfigOverrides{Context: *context} clientConfig := clientcmd.NewDefaultClientConfig(*rawConfig, overrides) factory := osclientcmd.NewFactory(clientConfig) o.Logger.Debug("CED1005", fmt.Sprintf("Checking if context is cluster-admin: '%s'", contextName)) if osClient, kubeClient, err := factory.Clients(); err != nil { o.Logger.Debug("CED1006", fmt.Sprintf("Error creating client for context '%s':\n%v", contextName, err)) return nil, nil, false, nil } else { subjectAccessReview := authorizationapi.SubjectAccessReview{Action: authorizationapi.AuthorizationAttributes{ // if you can do everything, you're the cluster admin. Verb: "*", Resource: "*", }} if resp, err := osClient.SubjectAccessReviews().Create(&subjectAccessReview); err != nil { if regexp.MustCompile(`User "[\w:]+" cannot create \w+ at the cluster scope`).MatchString(err.Error()) { o.Logger.Debug("CED1007", fmt.Sprintf("Context '%s' does not have cluster-admin access:\n%v", contextName, err)) return nil, nil, false, nil } else { o.Logger.Error("CED1008", fmt.Sprintf("Unknown error testing cluster-admin access for context '%s':\n%v", contextName, err)) return nil, nil, false, err } } else if resp.Allowed { o.Logger.Info("CED1009", fmt.Sprintf("Using context for cluster-admin access: '%s'", contextName)) return osClient, kubeClient, true, nil } } o.Logger.Debug("CED1010", fmt.Sprintf("Context does not have cluster-admin access: '%s'", contextName)) return nil, nil, false, nil }
func TestSetWithPathPrefixIntoExistingStruct(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() expectedConfig.Clusters["cow-cluster"].Server = "http://cow.org:8080/foo/baz" test := configCommandTest{ args: []string{"set", "clusters.cow-cluster.server", "http://cow.org:8080/foo/baz"}, startingConfig: newRedFederalCowHammerConfig(), expectedConfig: expectedConfig, } test.run(t) dc := clientcmd.NewDefaultClientConfig(expectedConfig, &clientcmd.ConfigOverrides{}) dcc, err := dc.ClientConfig() if err != nil { t.Fatalf("unexpected error: %v", err) } expectedHost := "http://cow.org:8080" if expectedHost != dcc.Host { t.Fatalf("expected client.Config.Host = %q instead of %q", expectedHost, dcc.Host) } expectedPrefix := "/foo/baz" if expectedPrefix != dcc.Prefix { t.Fatalf("expected client.Config.Prefix = %q instead of %q", expectedPrefix, dcc.Prefix) } }
func TestNewFactoryNoFlagBindings(t *testing.T) { clientConfig := clientcmd.NewDefaultClientConfig(*clientcmdapi.NewConfig(), &clientcmd.ConfigOverrides{}) factory := NewFactory(clientConfig) if factory.flags.HasFlags() { t.Errorf("Expected zero flags, but got %v", factory.flags) } }
// NewFactory builds a new openshift client factory from the given config func NewFactory(config *kclient.Config) *clientcmd.Factory { // keep only what we need to initialize a factory clientConfig := kclientcmd.NewDefaultClientConfig( *kclientcmdapi.NewConfig(), &kclientcmd.ConfigOverrides{ ClusterInfo: kclientcmdapi.Cluster{ Server: config.Host, APIVersion: config.Version, InsecureSkipTLSVerify: config.Insecure, }, AuthInfo: kclientcmdapi.AuthInfo{ Token: config.BearerToken, }, Context: kclientcmdapi.Context{}, }) factory := clientcmd.NewFactory(clientConfig) return factory }
func clientFromConfig(path string) (*kclient.Config, string, error) { if path == "-" { cfg, err := kclient.InClusterConfig() if err != nil { return nil, "", fmt.Errorf("cluster config not available: %v", err) } return cfg, "", nil } rules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: path} credentials, err := rules.Load() if err != nil { return nil, "", fmt.Errorf("the provided credentials %q could not be loaded: %v", path, err) } cfg := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}) config, err := cfg.ClientConfig() if err != nil { return nil, "", fmt.Errorf("the provided credentials %q could not be used: %v", path, err) } namespace, _, _ := cfg.Namespace() return config, namespace, nil }
func getKubeClient() (string, *kclient.Client, error) { c, err := kclientcmd.LoadFromFile(*kubeConfig) if err != nil { return "", nil, fmt.Errorf("error loading kubeConfig: %v", err.Error()) } config, err := kclientcmd.NewDefaultClientConfig( *c, &kclientcmd.ConfigOverrides{ ClusterInfo: kclientcmdapi.Cluster{ APIVersion: "v1", }, }).ClientConfig() if err != nil { return "", nil, fmt.Errorf("error parsing kubeConfig: %v", err.Error()) } kubeClient, err := kclient.New(config) if err != nil { return "", nil, fmt.Errorf("error creating client - %q", err) } return c.Clusters[c.CurrentContext].Server, kubeClient, nil }
// Get kube client configuration from a file containing credentials for // connecting to the master. func getClientConfig(path string) (*kclient.Config, error) { if 0 == len(path) { return nil, fmt.Errorf("You must specify a .kubeconfig file path containing credentials for connecting to the master with --credentials") } rules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: path, Precedence: []string{}} credentials, err := rules.Load() if err != nil { return nil, fmt.Errorf("Could not load credentials from %q: %v", path, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return nil, fmt.Errorf("Credentials %q error: %v", path, err) } if err := kclient.LoadTLSFiles(config); err != nil { return nil, fmt.Errorf("Unable to load certificate info using credentials from %q: %v", path, err) } return config, nil }
// RunCmdRouter contains all the necessary functionality for the OpenShift cli router command func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error { var name string switch len(args) { case 0: name = "router" case 1: name = args[0] default: return cmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return cmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { glog.Fatal(err) } // For the host networking case, ensure the ports match. if cfg.HostNetwork { for i := 0; i < len(ports); i++ { if ports[i].ContainerPort != ports[i].HostPort { return cmdutil.UsageError(cmd, "For host networking mode, please ensure that the container [%v] and host [%v] ports match", ports[i].ContainerPort, ports[i].HostPort) } } } if cfg.StatsPort > 0 { ports = append(ports, kapi.ContainerPort{ Name: "stats", HostPort: cfg.StatsPort, ContainerPort: cfg.StatsPort, Protocol: kapi.ProtocolTCP, }) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("router %q does not exist (no service)", name) } if len(cfg.ServiceAccount) == 0 { return fmt.Errorf("router could not be created; you must specify a service account with --service-account") } err := validateServiceAccount(kClient, namespace, cfg.ServiceAccount) if err != nil { return fmt.Errorf("router could not be created; %v", err) } // create new router if len(cfg.Credentials) == 0 { return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } defaultCert, err := loadDefaultCert(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() fmt.Fprintf(out, "password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, "DEFAULT_CERTIFICATE": defaultCert, "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } updatePercent := int(-10) objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: dapi.DeploymentTemplate{ Strategy: dapi.DeploymentStrategy{ Type: dapi.DeploymentStrategyTypeRolling, RollingParams: &dapi.RollingDeploymentStrategyParams{UpdatePercent: &updatePercent}, }, ControllerTemplate: kapi.ReplicationControllerSpec{ Replicas: cfg.Replicas, Selector: label, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ HostNetwork: cfg.HostNetwork, ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "router", Image: image, Ports: ports, Env: env.List(), LivenessProbe: &kapi.Probe{ Handler: kapi.Handler{ TCPSocket: &kapi.TCPSocketAction{ Port: kutil.IntOrString{ IntVal: ports[0].ContainerPort, }, }, }, InitialDelaySeconds: 10, }, ImagePullPolicy: kapi.PullIfNotPresent, }, }, }, }, }, }, }, } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if output { if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("Unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(out, os.Stderr), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Router %q service exists\n", name) return nil }
// Negotiate a bearer token with the auth server, or try to reuse one based on the // information already present. In case of any missing information, ask for user input // (usually username and password, interactive depending on the Reader). func (o *LoginOptions) gatherAuthInfo() error { directClientConfig, err := o.getClientConfig() if err != nil { return err } // make a copy and use it to avoid mutating the original t := *directClientConfig clientConfig := &t // if a token were explicitly provided, try to use it if o.tokenProvided() { clientConfig.BearerToken = o.Token if osClient, err := client.New(clientConfig); err == nil { me, err := whoAmI(osClient) if err == nil { o.Username = me.Name o.Config = clientConfig fmt.Fprintf(o.Out, "Logged into %q as %q using the token provided.\n\n", o.Config.Host, o.Username) return nil } if !kerrors.IsUnauthorized(err) { return err } fmt.Fprint(o.Out, "The token provided is invalid (probably expired).\n\n") } } // if a token was provided try to make use of it // make sure we have a username before continuing if !o.usernameProvided() { if cmdutil.IsTerminal(o.Reader) { for !o.usernameProvided() { o.Username = cmdutil.PromptForString(o.Reader, "Username: "******"Already logged into %q as %q.\n\n", o.Config.Host, o.Username) } return nil } } } } } // if kubeconfig doesn't already have a matching user stanza... clientConfig.BearerToken = "" clientConfig.CertData = []byte{} clientConfig.KeyData = []byte{} clientConfig.CertFile = o.CertFile clientConfig.KeyFile = o.KeyFile token, err := tokencmd.RequestToken(o.Config, o.Reader, o.Username, o.Password) if err != nil { return err } clientConfig.BearerToken = token osClient, err := client.New(clientConfig) if err != nil { return err } me, err := whoAmI(osClient) if err != nil { return err } o.Username = me.Name o.Config = clientConfig fmt.Fprint(o.Out, "Login successful.\n\n") return nil }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return cmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry if len(cfg.Credentials) == 0 { return fmt.Errorf("registry does not exist; you must specify a .kubeconfig file path containing credentials for connecting the registry to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: []kapi.VolumeMount{ { Name: "registry-storage", MountPath: cfg.Volume, }, }, SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, // TODO reenable the liveness probe when we no longer support the v1 registry. /* LivenessProbe: &kapi.Probe{ InitialDelaySeconds: 3, TimeoutSeconds: 5, Handler: kapi.Handler{ HTTPGet: &kapi.HTTPGetAction{ Path: "/healthz", Port: util.NewIntOrStringFromInt(5000), }, }, }, */ }, }, Volumes: []kapi.Volume{ { Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }, }, }, } if mountHost { podTemplate.Spec.Volumes[0].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[0].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: dapi.DeploymentTemplate{ ControllerTemplate: kapi.ReplicationControllerSpec{ Replicas: cfg.Replicas, Selector: label, Template: podTemplate, }, }, }, } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(out, os.Stderr), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil }
func main() { flag.Parse() glog.Infof("Starting serve_hostnames soak test with queries=%d and podsPerNode=%d upTo=%d", *queriesAverage, *podsPerNode, *upTo) var spec string if *gke != "" { spec = filepath.Join(os.Getenv("HOME"), ".config", "gcloud", "kubernetes", "kubeconfig") } else { spec = filepath.Join(os.Getenv("HOME"), ".kube", "config") } settings, err := clientcmd.LoadFromFile(spec) if err != nil { glog.Fatalf("Error loading configuration: %v", err.Error()) } if *gke != "" { settings.CurrentContext = *gke } config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { glog.Fatalf("Failed to construct config: %v", err) } c, err := client.New(config) if err != nil { glog.Fatalf("Failed to make client: %v", err) } var nodes *api.NodeList for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) { nodes, err = c.Nodes().List(labels.Everything(), fields.Everything()) if err == nil { break } glog.Warningf("Failed to list nodes: %v", err) } if err != nil { glog.Fatalf("Giving up trying to list nodes: %v", err) } if len(nodes.Items) == 0 { glog.Fatalf("Failed to find any nodes.") } glog.Infof("Found %d nodes on this cluster:", len(nodes.Items)) for i, node := range nodes.Items { glog.Infof("%d: %s", i, node.Name) } queries := *queriesAverage * len(nodes.Items) * *podsPerNode // Create the namespace got, err := c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{GenerateName: "serve-hostnames-"}}) if err != nil { glog.Fatalf("Failed to create namespace: %v", err) } ns := got.Name defer func(ns string) { if err := c.Namespaces().Delete(ns); err != nil { glog.Warningf("Failed to delete namespace ns: %e", ns, err) } }(ns) glog.Infof("Created namespace %s", ns) // Create a service for these pods. glog.Infof("Creating service %s/serve-hostnames", ns) // Make several attempts to create a service. var svc *api.Service for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) { t := time.Now() svc, err = c.Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: "serve-hostnames", Labels: map[string]string{ "name": "serve-hostname", }, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Protocol: "TCP", Port: 9376, TargetPort: util.NewIntOrStringFromInt(9376), }}, Selector: map[string]string{ "name": "serve-hostname", }, }, }) glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t)) if err == nil { break } glog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err) } if err != nil { glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err) return } // Clean up service defer func() { glog.Infof("Cleaning up service %s/serve-hostnames", ns) // Make several attempts to delete the service. for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err := c.Services(ns).Delete(svc.Name); err == nil { return } glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) } }() // Put serve-hostname pods on each node. podNames := []string{} for i, node := range nodes.Items { for j := 0; j < *podsPerNode; j++ { podName := fmt.Sprintf("serve-hostname-%d-%d", i, j) podNames = append(podNames, podName) // Make several attempts for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) { glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) t := time.Now() _, err = c.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, Labels: map[string]string{ "name": "serve-hostname", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "serve-hostname", Image: "gcr.io/google_containers/serve_hostname:1.1", Ports: []api.ContainerPort{{ContainerPort: 9376}}, }, }, NodeName: node.Name, }, }) glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t)) if err == nil { break } glog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err) } if err != nil { glog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err) return } } } // Clean up the pods defer func() { glog.Info("Cleaning up pods") // Make several attempts to delete the pods. for _, podName := range podNames { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err = c.Pods(ns).Delete(podName, nil); err == nil { break } glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) } } }() glog.Info("Waiting for the serve-hostname pods to be ready") for _, podName := range podNames { var pod *api.Pod for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { pod, err = c.Pods(ns).Get(podName) if err != nil { glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) continue } if pod.Status.Phase == api.PodRunning { break } } if pod.Status.Phase != api.PodRunning { glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase) } else { glog.Infof("%s/%s is running", ns, podName) } } // Wait for the endpoints to propagate. for start := time.Now(); time.Since(start) < endpointTimeout; time.Sleep(10 * time.Second) { hostname, err := c.Get(). Namespace(ns). Prefix("proxy"). Resource("services"). Name("serve-hostnames"). DoRaw() if err != nil { glog.Infof("After %v while making a proxy call got error %v", time.Since(start), err) continue } var r api.Status if err := api.Scheme.DecodeInto(hostname, &r); err != nil { break } if r.Status == api.StatusFailure { glog.Infof("After %v got status %v", time.Since(start), string(hostname)) continue } break } // Repeatedly make requests. for iteration := 0; iteration != *upTo; iteration++ { responseChan := make(chan string, queries) // Use a channel of size *maxPar to throttle the number // of in-flight requests to avoid overloading the service. inFlight := make(chan struct{}, *maxPar) start := time.Now() for q := 0; q < queries; q++ { go func(i int, query int) { inFlight <- struct{}{} t := time.Now() hostname, err := c.Get(). Namespace(ns). Prefix("proxy"). Resource("services"). Name("serve-hostnames"). DoRaw() glog.V(4).Infof("Proxy call in namespace %s took %v", ns, time.Since(t)) if err != nil { glog.Warningf("Call failed during iteration %d query %d : %v", i, query, err) // If the query failed return a string which starts with a character // that can't be part of a hostname. responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err) } else { responseChan <- string(hostname) } <-inFlight }(iteration, q) } responses := make(map[string]int, *podsPerNode*len(nodes.Items)) missing := 0 for q := 0; q < queries; q++ { r := <-responseChan glog.V(4).Infof("Got response from %s", r) responses[r]++ // If the returned hostname starts with '!' then it indicates // an error response. if len(r) > 0 && r[0] == '!' { glog.V(3).Infof("Got response %s", r) missing++ } } if missing > 0 { glog.Warningf("Missing %d responses out of %d", missing, queries) } // Report any nodes that did not respond. for n, node := range nodes.Items { for i := 0; i < *podsPerNode; i++ { name := fmt.Sprintf("serve-hostname-%d-%d", n, i) if _, ok := responses[name]; !ok { glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration) } } } glog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing", iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing) } }
func (d ConfigContext) Check() types.DiagnosticResult { r := types.NewDiagnosticResult(ConfigContextsName) isDefaultContext := d.RawConfig.CurrentContext == d.ContextName // prepare bad news message errorKey := "DCli0001" unusableLine := fmt.Sprintf("The client config context '%s' is unusable", d.ContextName) if isDefaultContext { errorKey = "DCli0002" unusableLine = fmt.Sprintf("The current client config context '%s' is unusable", d.ContextName) } // check that the context and its constitutuents are defined in the kubeconfig context, exists := d.RawConfig.Contexts[d.ContextName] if !exists { r.Error(errorKey, nil, fmt.Sprintf("%s:\n Client config context '%s' is not defined.", unusableLine, d.ContextName)) return r } clusterName := context.Cluster cluster, exists := d.RawConfig.Clusters[clusterName] if !exists { r.Error(errorKey, nil, fmt.Sprintf("%s:\n Client config context '%s' has a cluster '%s' which is not defined.", unusableLine, d.ContextName, clusterName)) return r } authName := context.AuthInfo if _, exists := d.RawConfig.AuthInfos[authName]; !exists { r.Error(errorKey, nil, fmt.Sprintf("%s:\n Client config context '%s' has a user '%s' which is not defined.", unusableLine, d.ContextName, authName)) return r } // we found a fully-defined context project := context.Namespace if project == "" { project = kapi.NamespaceDefault // k8s fills this in anyway if missing from the context } msgText := contextDesc if isDefaultContext { msgText = currContextDesc } msgText = fmt.Sprintf(msgText, d.ContextName, cluster.Server, authName, project) // Actually send a request to see if context has connectivity. // Note: we cannot reuse factories as they cache the clients, so build new factory for each context. osClient, _, err := osclientcmd.NewFactory(kclientcmd.NewDefaultClientConfig(*d.RawConfig, &kclientcmd.ConfigOverrides{Context: *context})).Clients() // client create now *fails* if cannot connect to server; so, address connectivity errors below if err == nil { if projects, projerr := osClient.Projects().List(labels.Everything(), fields.Everything()); projerr != nil { err = projerr } else { // success! list := []string{} for i, project := range projects.Items { if i > 9 { list = append(list, "...") break } list = append(list, project.Name) } if len(list) == 0 { r.Info("DCli0003", msgText+"Successfully requested project list, but it is empty, so user has no access to anything.") } else { r.Info("DCli0004", msgText+fmt.Sprintf("Successfully requested project list; has access to project(s):\n %v", list)) } return r } } // something went wrong; couldn't create client or get project list. // interpret the terse error messages with helpful info. errMsg := err.Error() errFull := fmt.Sprintf("(%T) %[1]v\n", err) var reason, errId string switch { case regexp.MustCompile("dial tcp: lookup (\\S+): no such host").MatchString(errMsg): errId, reason = "DCli0005", clientNoResolve case strings.Contains(errMsg, "x509: certificate signed by unknown authority"): errId, reason = "DCli0006", clientUnknownCa case strings.Contains(errMsg, "specifying a root certificates file with the insecure flag is not allowed"): errId, reason = "DCli0007", clientUnneededCa case invalidCertNameRx.MatchString(errMsg): match := invalidCertNameRx.FindStringSubmatch(errMsg) serverHost := match[len(match)-1] errId, reason = "DCli0008", fmt.Sprintf(clientInvCertName, serverHost) case regexp.MustCompile("dial tcp (\\S+): connection refused").MatchString(errMsg): errId, reason = "DCli0009", clientConnRefused case regexp.MustCompile("dial tcp (\\S+): (?:connection timed out|i/o timeout|no route to host)").MatchString(errMsg): errId, reason = "DCli0010", clientConnTimeout case strings.Contains(errMsg, "malformed HTTP response"): errId, reason = "DCli0011", clientMalformedHTTP case strings.Contains(errMsg, "tls: oversized record received with length"): errId, reason = "DCli0012", clientMalformedTLS case strings.Contains(errMsg, `User "system:anonymous" cannot`): errId, reason = "DCli0013", clientUnauthn case strings.Contains(errMsg, "provide credentials"): errId, reason = "DCli0014", clientUnauthz default: errId, reason = "DCli0015", `Diagnostics does not have an explanation for what this means. Please report this error so one can be added.` } r.Error(errId, err, msgText+errFull+reason) return r }