// PerformTLSBootstrap creates a RESTful client in order to execute certificate signing request. func PerformTLSBootstrap(s *kubeadmapi.KubeadmConfig, apiEndpoint string, caCert []byte) (*clientcmdapi.Config, error) { // TODO(phase1+) try all the api servers until we find one that works bareClientConfig := kubeadmutil.CreateBasicClientConfig("kubernetes", apiEndpoint, caCert) hostName, err := os.Hostname() if err != nil { return nil, fmt.Errorf("<node/csr> failed to get node hostname [%v]", err) } // TODO: hostname == nodename doesn't hold on all clouds (AWS). // But we don't have a cloudprovider, so we're stuck. glog.Errorf("assuming that hostname is the same as NodeName") nodeName := types.NodeName(hostName) bootstrapClientConfig, err := clientcmd.NewDefaultClientConfig( *kubeadmutil.MakeClientConfigWithToken( bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName), s.Secrets.BearerToken, ), &clientcmd.ConfigOverrides{}, ).ClientConfig() if err != nil { return nil, fmt.Errorf("<node/csr> failed to create API client configuration [%v]", err) } client, err := unversionedcertificates.NewForConfig(bootstrapClientConfig) if err != nil { return nil, fmt.Errorf("<node/csr> failed to create API client [%v]", err) } csrClient := client.CertificateSigningRequests() // TODO(phase1+) checkCertsAPI() has a side-effect of making first attempt of communicating with the API, // we should _make it more explicit_ and have a user-settable _retry timeout_ to account for potential connectivity issues // (for example user may be bringing up machines in parallel and for some reasons master is slow to boot) if err := checkCertsAPI(bootstrapClientConfig); err != nil { return nil, fmt.Errorf("<node/csr> failed to proceed due to API compatibility issue - %v", err) } fmt.Println("<node/csr> created API client to obtain unique certificate for this node, generating keys and certificate signing request") key, err := certutil.MakeEllipticPrivateKeyPEM() if err != nil { return nil, fmt.Errorf("<node/csr> failed to generating private key [%v]", err) } cert, err := csr.RequestNodeCertificate(csrClient, key, nodeName) if err != nil { return nil, fmt.Errorf("<node/csr> failed to request signed certificate from the API server [%v]", err) } // TODO(phase1+) print some basic info about the cert fmt.Println("<node/csr> received signed certificate from the API server, generating kubelet configuration") finalConfig := kubeadmutil.MakeClientConfigWithCerts( bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName), key, cert, ) return finalConfig, nil }
// makes the client from the specified context and determines whether it is a cluster-admin. func (o DiagnosticsOptions) makeClusterClients(rawConfig *clientcmdapi.Config, contextName string, context *clientcmdapi.Context) (*client.Client, *kclientset.Clientset, bool, string, error) { overrides := &clientcmd.ConfigOverrides{Context: *context} clientConfig := clientcmd.NewDefaultClientConfig(*rawConfig, overrides) serverUrl := rawConfig.Clusters[context.Cluster].Server factory := osclientcmd.NewFactory(clientConfig) o.Logger.Debug("CED1005", fmt.Sprintf("Checking if context is cluster-admin: '%s'", contextName)) if osClient, _, kubeClient, err := factory.Clients(); err != nil { o.Logger.Debug("CED1006", fmt.Sprintf("Error creating client for context '%s':\n%v", contextName, err)) return nil, nil, false, "", nil } else { subjectAccessReview := authorizationapi.SubjectAccessReview{Action: authorizationapi.Action{ // if you can do everything, you're the cluster admin. Verb: "*", Group: "*", Resource: "*", }} if resp, err := osClient.SubjectAccessReviews().Create(&subjectAccessReview); err != nil { if regexp.MustCompile(`User "[\w:]+" cannot create \w+ at the cluster scope`).MatchString(err.Error()) { o.Logger.Debug("CED1007", fmt.Sprintf("Context '%s' does not have cluster-admin access:\n%v", contextName, err)) return nil, nil, false, "", nil } else { o.Logger.Error("CED1008", fmt.Sprintf("Unknown error testing cluster-admin access for context '%s':\n%v", contextName, err)) return nil, nil, false, "", err } } else if resp.Allowed { o.Logger.Info("CED1009", fmt.Sprintf("Using context for cluster-admin access: '%s'", contextName)) return osClient, kubeClient, true, serverUrl, nil } } o.Logger.Debug("CED1010", fmt.Sprintf("Context does not have cluster-admin access: '%s'", contextName)) return nil, nil, false, "", nil }
func TestSetWithPathPrefixIntoExistingStruct(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() expectedConfig.Clusters["cow-cluster"].Server = "http://cow.org:8080/foo/baz" test := configCommandTest{ args: []string{"set", "clusters.cow-cluster.server", "http://cow.org:8080/foo/baz"}, startingConfig: newRedFederalCowHammerConfig(), expectedConfig: expectedConfig, } test.run(t) dc := clientcmd.NewDefaultClientConfig(expectedConfig, &clientcmd.ConfigOverrides{}) dcc, err := dc.ClientConfig() if err != nil { t.Fatalf("unexpected error: %v", err) } expectedHost := "http://cow.org:8080" if expectedHost != dcc.Host { t.Fatalf("expected client.Config.Host = %q instead of %q", expectedHost, dcc.Host) } expectedPrefix := "/foo/baz" if expectedPrefix != dcc.Prefix { t.Fatalf("expected client.Config.Prefix = %q instead of %q", expectedPrefix, dcc.Prefix) } }
func getKubeClient() (string, *kclient.Client, error) { c, err := kclientcmd.LoadFromFile(*kubeConfig) if err != nil { return "", nil, fmt.Errorf("error loading kubeConfig: %v", err.Error()) } if c.CurrentContext == "" || len(c.Clusters) == 0 { return "", nil, fmt.Errorf("invalid kubeConfig: %+v", *c) } config, err := kclientcmd.NewDefaultClientConfig( *c, &kclientcmd.ConfigOverrides{ ClusterInfo: kclientcmdkapi.Cluster{ APIVersion: "v1", }, }).ClientConfig() if err != nil { return "", nil, fmt.Errorf("error parsing kubeConfig: %v", err.Error()) } kubeClient, err := kclient.New(config) if err != nil { return "", nil, fmt.Errorf("error creating client - %q", err) } return c.Clusters[c.CurrentContext].Server, kubeClient, nil }
func loggedInUserFactory() (*clientcmd.Factory, error) { cfg, err := kclientcmd.LoadFromFile(config.RecommendedHomeFile) if err != nil { return nil, err } defaultCfg := kclientcmd.NewDefaultClientConfig(*cfg, &kclientcmd.ConfigOverrides{}) return clientcmd.NewFactory(defaultCfg), nil }
func loggedInUserFactory() (*clientcmd.Factory, error) { cfg, err := config.NewOpenShiftClientConfigLoadingRules().Load() if err != nil { return nil, err } defaultCfg := kclientcmd.NewDefaultClientConfig(*cfg, &kclientcmd.ConfigOverrides{}) return clientcmd.NewFactory(defaultCfg), nil }
func TestNewFactoryNoFlagBindings(t *testing.T) { clientConfig := clientcmd.NewDefaultClientConfig(*clientcmdapi.NewConfig(), &clientcmd.ConfigOverrides{}) factory := NewFactory(clientConfig) if factory.flags.HasFlags() { t.Errorf("Expected zero flags, but got %v", factory.flags) } }
// PerformTLSBootstrap creates a RESTful client in order to execute certificate signing request. func PerformTLSBootstrap(s *kubeadmapi.NodeConfiguration, apiEndpoint string, caCert []byte) (*clientcmdapi.Config, error) { // TODO(phase1+) try all the api servers until we find one that works bareClientConfig := kubeadmutil.CreateBasicClientConfig("kubernetes", apiEndpoint, caCert) hostName, err := os.Hostname() if err != nil { return nil, fmt.Errorf("<node/csr> failed to get node hostname [%v]", err) } // TODO(phase1+) https://github.com/kubernetes/kubernetes/issues/33641 nodeName := types.NodeName(hostName) bootstrapClientConfig, err := clientcmd.NewDefaultClientConfig( *kubeadmutil.MakeClientConfigWithToken( bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName), s.Secrets.BearerToken, ), &clientcmd.ConfigOverrides{}, ).ClientConfig() if err != nil { return nil, fmt.Errorf("<node/csr> failed to create API client configuration [%v]", err) } client, err := unversionedcertificates.NewForConfig(bootstrapClientConfig) if err != nil { return nil, fmt.Errorf("<node/csr> failed to create API client [%v]", err) } csrClient := client.CertificateSigningRequests() // TODO(phase1+) https://github.com/kubernetes/kubernetes/issues/33643 if err := checkCertsAPI(bootstrapClientConfig); err != nil { return nil, fmt.Errorf("<node/csr> failed to proceed due to API compatibility issue - %v", err) } fmt.Println("<node/csr> created API client to obtain unique certificate for this node, generating keys and certificate signing request") key, err := certutil.MakeEllipticPrivateKeyPEM() if err != nil { return nil, fmt.Errorf("<node/csr> failed to generating private key [%v]", err) } cert, err := csr.RequestNodeCertificate(csrClient, key, nodeName) if err != nil { return nil, fmt.Errorf("<node/csr> failed to request signed certificate from the API server [%v]", err) } fmtCert, err := certutil.FormatBytesCert(cert) if err != nil { return nil, fmt.Errorf("<node/csr> failed to format certificate [%v]", err) } fmt.Printf("<node/csr> received signed certificate from the API server:\n%s\n", fmtCert) fmt.Println("<node/csr> generating kubelet configuration") finalConfig := kubeadmutil.MakeClientConfigWithCerts( bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName), key, cert, ) return finalConfig, nil }
// Factory returns a command factory that works with OpenShift server's admin credentials func (c *ClientStartConfig) Factory() (*clientcmd.Factory, error) { if c.factory == nil { cfg, err := kclientcmd.LoadFromFile(filepath.Join(c.LocalConfigDir, "master", "admin.kubeconfig")) if err != nil { return nil, err } defaultCfg := kclientcmd.NewDefaultClientConfig(*cfg, &kclientcmd.ConfigOverrides{}) c.factory = clientcmd.NewFactory(defaultCfg) } return c.factory, nil }
func (c *HollowNodeConfig) createClientConfigFromFile() (*restclient.Config, error) { clientConfig, err := clientcmd.LoadFromFile(c.KubeconfigPath) if err != nil { return nil, fmt.Errorf("error while loading kubeconfig from file %v: %v", c.KubeconfigPath, err) } config, err := clientcmd.NewDefaultClientConfig(*clientConfig, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return nil, fmt.Errorf("error while creating kubeconfig: %v", err) } config.ContentType = c.ContentType return config, nil }
func createAPIClient(adminKubeconfig *clientcmdapi.Config) (*clientset.Clientset, error) { adminClientConfig, err := clientcmd.NewDefaultClientConfig( *adminKubeconfig, &clientcmd.ConfigOverrides{}, ).ClientConfig() if err != nil { return nil, fmt.Errorf("failed to create API client configuration [%v]", err) } client, err := clientset.NewForConfig(adminClientConfig) if err != nil { return nil, fmt.Errorf("failed to create API client [%v]", err) } return client, nil }
// Factory returns a command factory that works with OpenShift server's admin credentials func (c *ClientStartConfig) Factory() (*clientcmd.Factory, error) { if c.factory == nil { cfg, err := kclientcmd.LoadFromFile(filepath.Join(c.LocalConfigDir, "master", "admin.kubeconfig")) if err != nil { return nil, err } overrides := &kclientcmd.ConfigOverrides{} if c.PortForwarding { overrides.ClusterInfo.Server = fmt.Sprintf("https://%s:8443", c.ServerIP) } defaultCfg := kclientcmd.NewDefaultClientConfig(*cfg, overrides) c.factory = clientcmd.NewFactory(defaultCfg) } return c.factory, nil }
func LoadFederatedConfig(overrides *clientcmd.ConfigOverrides) (*restclient.Config, error) { c, err := framework.RestclientConfig(framework.TestContext.FederatedKubeContext) if err != nil { return nil, fmt.Errorf("error creating federation client config: %v", err.Error()) } cfg, err := clientcmd.NewDefaultClientConfig(*c, overrides).ClientConfig() if cfg != nil { //TODO(colhom): this is only here because https://github.com/kubernetes/kubernetes/issues/25422 cfg.NegotiatedSerializer = api.Codecs } if err != nil { return cfg, fmt.Errorf("error creating federation default client config: %v", err.Error()) } return cfg, nil }
func createClientFromFile(path string) (*client.Client, error) { c, err := clientcmd.LoadFromFile(path) if err != nil { return nil, fmt.Errorf("error while loading kubeconfig from file %v: %v", path, err) } config, err := clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return nil, fmt.Errorf("error while creating kubeconfig: %v", err) } client, err := client.New(config) if err != nil { return nil, fmt.Errorf("error while creating client: %v", err) } return client, nil }
// creates a set of clients for this endpoint func createClients(caCert []byte, endpoint, token string, nodeName types.NodeName) (*clientset.Clientset, error) { bareClientConfig := kubeadmutil.CreateBasicClientConfig("kubernetes", endpoint, caCert) bootstrapClientConfig, err := clientcmd.NewDefaultClientConfig( *kubeadmutil.MakeClientConfigWithToken( bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName), token, ), &clientcmd.ConfigOverrides{}, ).ClientConfig() if err != nil { return nil, fmt.Errorf("failed to create API client configuration [%v]", err) } clientSet, err := clientset.NewForConfig(bootstrapClientConfig) if err != nil { return nil, fmt.Errorf("failed to create clients for the API endpoint %s [%v]", endpoint, err) } return clientSet, nil }
// PerformTLSBootstrap executes a node certificate signing request. func PerformTLSBootstrap(cfg *clientcmdapi.Config) error { hostName, err := os.Hostname() if err != nil { return err } name := types.NodeName(hostName) rc, err := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return err } c, err := clientset.NewForConfig(rc) if err != nil { return err } fmt.Println("[csr] Created API client to obtain unique certificate for this node, generating keys and certificate signing request") key, err := certutil.MakeEllipticPrivateKeyPEM() if err != nil { return fmt.Errorf("failed to generate private key [%v]", err) } // Make sure there are no other nodes in the cluster with identical node name. if err := checkForNodeNameDuplicates(c); err != nil { return err } cert, err := csr.RequestNodeCertificate(c.Certificates().CertificateSigningRequests(), key, name) if err != nil { return fmt.Errorf("failed to request signed certificate from the API server [%v]", err) } fmt.Printf("[csr] Received signed certificate from the API server") fmt.Println("[csr] Generating kubelet configuration") cfg.AuthInfos["kubelet"] = &clientcmdapi.AuthInfo{ ClientKeyData: key, ClientCertificateData: cert, } cfg.Contexts["kubelet"] = &clientcmdapi.Context{ AuthInfo: "kubelet", Cluster: cfg.Contexts[cfg.CurrentContext].Cluster, } cfg.CurrentContext = "kubelet" return nil }
// PerformTLSBootstrap executes a certificate signing request with the // provided connection details. func PerformTLSBootstrap(cfg *clientcmdapi.Config) error { hostName, err := os.Hostname() if err != nil { return err } name := types.NodeName(hostName) rc, err := clientcmd.NewDefaultClientConfig(*cfg, nil).ClientConfig() if err != nil { return err } c, err := clientset.NewForConfig(rc) if err != nil { return err } fmt.Println("<node/csr> created API client to obtain unique certificate for this node, generating keys and certificate signing request") key, err := certutil.MakeEllipticPrivateKeyPEM() if err != nil { return fmt.Errorf("<node/csr> failed to generating private key [%v]", err) } cert, err := csr.RequestNodeCertificate(c.Certificates().CertificateSigningRequests(), key, name) if err != nil { return fmt.Errorf("<node/csr> failed to request signed certificate from the API server [%v]", err) } fmtCert, err := certutil.FormatBytesCert(cert) if err != nil { return fmt.Errorf("<node/csr> failed to format certificate [%v]", err) } fmt.Printf("<node/csr> received signed certificate from the API server:\n%s\n", fmtCert) fmt.Println("<node/csr> generating kubelet configuration") cfg.AuthInfos["kubelet"] = &clientcmdapi.AuthInfo{ ClientKeyData: key, ClientCertificateData: []byte(fmtCert), } cfg.Contexts["kubelet"] = &clientcmdapi.Context{ AuthInfo: "kubelet", Cluster: cfg.Contexts[cfg.CurrentContext].Cluster, } cfg.CurrentContext = "kubelet" return nil }
// NewFactory builds a new openshift client factory from the given config func NewFactory(config *kclient.Config) *clientcmd.Factory { // keep only what we need to initialize a factory clientConfig := kclientcmd.NewDefaultClientConfig( *kclientcmdapi.NewConfig(), &kclientcmd.ConfigOverrides{ ClusterInfo: kclientcmdapi.Cluster{ Server: config.Host, APIVersion: config.Version, InsecureSkipTLSVerify: config.Insecure, }, AuthInfo: kclientcmdapi.AuthInfo{ Token: config.BearerToken, }, Context: kclientcmdapi.Context{}, }) factory := clientcmd.NewFactory(clientConfig) return factory }
func clientFromConfig(path string) (*kclient.Config, string, error) { if path == "-" { cfg, err := kclient.InClusterConfig() if err != nil { return nil, "", fmt.Errorf("cluster config not available: %v", err) } return cfg, "", nil } rules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: path} credentials, err := rules.Load() if err != nil { return nil, "", fmt.Errorf("the provided credentials %q could not be loaded: %v", path, err) } cfg := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}) config, err := cfg.ClientConfig() if err != nil { return nil, "", fmt.Errorf("the provided credentials %q could not be used: %v", path, err) } namespace, _, _ := cfg.Namespace() return config, namespace, nil }
func main() { //kubeconfig = "/home/ubuntu/.kube/config" // k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go config, err := clientcmd.LoadFromFile(kubeconfig) if err != nil { log.Fatal(err) } // k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go clientConfig := clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}) restConfig, err := clientConfig.ClientConfig() if err != nil { log.Fatal(err) } // k8s.io/kubernetes/pkg/client/unversioned/helper.go kclient, err := unversioned.New(restConfig) if err != nil { log.Fatal(err) } // k8s.io/kubernetes/pkg/client/unversioned/client.go // k8s.io/kubernetes/pkg/client/unversioned/pods.go result, err := kclient.Pods("default").List(kapi.ListOptions{}) if err != nil { log.Fatal(err) } // k8s.io/kubernetes/pkg/runtime/codec.go // k8s.io/kubernetes/pkg/api/register.go // k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go b, err := runtime.Encode(kapi.Codecs.LegacyCodec(kapi.SchemeGroupVersion), result, kapi.SchemeGroupVersion) if err != nil { log.Fatal(err) } fmt.Printf("Output list:\n%s\n", b) }
// Get kube client configuration from a file containing credentials for // connecting to the master. func getClientConfig(path string) (*restclient.Config, error) { if 0 == len(path) { return nil, fmt.Errorf("You must specify a .kubeconfig file path containing credentials for connecting to the master with --credentials") } rules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: path, Precedence: []string{}} credentials, err := rules.Load() if err != nil { return nil, fmt.Errorf("Could not load credentials from %q: %v", path, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return nil, fmt.Errorf("Credentials %q error: %v", path, err) } if err = restclient.LoadTLSFiles(config); err != nil { return nil, fmt.Errorf("Unable to load certificate info using credentials from %q: %v", path, err) } return config, nil }
func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { loadingRules := config.NewOpenShiftClientConfigLoadingRules() flags.StringVar(&loadingRules.ExplicitPath, config.OpenShiftConfigFlagName, "", "Path to the config file to use for CLI requests.") cobra.MarkFlagFilename(flags, config.OpenShiftConfigFlagName) // set our explicit defaults defaultOverrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")}} loadingRules.DefaultClientConfig = clientcmd.NewDefaultClientConfig(clientcmdapi.Config{}, defaultOverrides) overrides := &clientcmd.ConfigOverrides{ClusterDefaults: defaultOverrides.ClusterDefaults} overrideFlags := clientcmd.RecommendedConfigOverrideFlags("") overrideFlags.ContextOverrideFlags.Namespace.ShortName = "n" overrideFlags.AuthOverrideFlags.Username.LongName = "" overrideFlags.AuthOverrideFlags.Password.LongName = "" clientcmd.BindOverrideFlags(overrides, flags, overrideFlags) cobra.MarkFlagFilename(flags, overrideFlags.AuthOverrideFlags.ClientCertificate.LongName) cobra.MarkFlagFilename(flags, overrideFlags.AuthOverrideFlags.ClientKey.LongName) cobra.MarkFlagFilename(flags, overrideFlags.ClusterOverrideFlags.CertificateAuthority.LongName) clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) return clientConfig }
// creates a set of clients for this endpoint func createClients(caCert []byte, endpoint, token string, nodeName types.NodeName) (*apiClient, error) { clientConfig := kubeconfigphase.MakeClientConfigWithToken( endpoint, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName), caCert, token, ) bootstrapClientConfig, err := clientcmd.NewDefaultClientConfig(*clientConfig, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return nil, fmt.Errorf("failed to create API client configuration [%v]", err) } clientSet, err := clientset.NewForConfig(bootstrapClientConfig) if err != nil { return nil, fmt.Errorf("failed to create clients for the API endpoint %q: [%v]", endpoint, err) } ac := &apiClient{ clientSet: clientSet, clientConfig: clientConfig, } return ac, nil }
// RunCmdRouter contains all the necessary functionality for the // OpenShift CLI router command. func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error { var name string switch len(args) { case 0: name = "router" case 1: name = args[0] default: return cmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router") } if len(cfg.StatsUsername) > 0 { if strings.Contains(cfg.StatsUsername, ":") { return cmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername) } } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { glog.Fatal(err) } // For the host networking case, ensure the ports match. if cfg.HostNetwork { for i := 0; i < len(ports); i++ { if ports[i].ContainerPort != ports[i].HostPort { return cmdutil.UsageError(cmd, "For host networking mode, please ensure that the container [%v] and host [%v] ports match", ports[i].ContainerPort, ports[i].HostPort) } } } if cfg.StatsPort > 0 { ports = append(ports, kapi.ContainerPort{ Name: "stats", HostPort: cfg.StatsPort, ContainerPort: cfg.StatsPort, Protocol: kapi.ProtocolTCP, }) } label := map[string]string{"router": name} if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { glog.Fatal(err) } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing router %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("router %q does not exist (no service)", name) } if len(cfg.ServiceAccount) == 0 { return fmt.Errorf("router could not be created; you must specify a service account with --service-account") } err := validateServiceAccount(kClient, namespace, cfg.ServiceAccount) if err != nil { return fmt.Errorf("router could not be created; %v", err) } // create new router if len(cfg.Credentials) == 0 { return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } defaultCert, err := loadCert(cfg.DefaultCertificate) if err != nil { return fmt.Errorf("router could not be created; error reading default certificate file: %v", err) } if len(cfg.StatsPassword) == 0 { cfg.StatsPassword = generateStatsPassword() fmt.Fprintf(out, "password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword) } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, "DEFAULT_CERTIFICATE": defaultCert, "ROUTER_SERVICE_NAME": name, "ROUTER_SERVICE_NAMESPACE": namespace, "ROUTER_EXTERNAL_HOST_HOSTNAME": cfg.ExternalHost, "ROUTER_EXTERNAL_HOST_USERNAME": cfg.ExternalHostUsername, "ROUTER_EXTERNAL_HOST_PASSWORD": cfg.ExternalHostPassword, "ROUTER_EXTERNAL_HOST_HTTP_VSERVER": cfg.ExternalHostHttpVserver, "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER": cfg.ExternalHostHttpsVserver, "ROUTER_EXTERNAL_HOST_INSECURE": strconv.FormatBool(cfg.ExternalHostInsecure), "ROUTER_EXTERNAL_HOST_PARTITION_PATH": cfg.ExternalHostPartitionPath, "ROUTER_EXTERNAL_HOST_PRIVKEY": privkeyPath, "STATS_PORT": strconv.Itoa(cfg.StatsPort), "STATS_USERNAME": cfg.StatsUsername, "STATS_PASSWORD": cfg.StatsPassword, } updatePercent := int(-25) secrets, volumes, mounts, err := generateSecretsConfig(cfg, kClient, namespace) if err != nil { return fmt.Errorf("router could not be created: %v", err) } livenessProbe := generateLivenessProbeConfig(cfg, ports) containers := []kapi.Container{ { Name: "router", Image: image, Ports: ports, Env: env.List(), LivenessProbe: livenessProbe, ImagePullPolicy: kapi.PullIfNotPresent, VolumeMounts: mounts, }, } if cfg.StatsPort > 0 && cfg.ExposeMetrics { pc := generateMetricsExporterContainer(cfg, env) if pc != nil { containers = append(containers, *pc) } } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: dapi.DeploymentTemplate{ Strategy: dapi.DeploymentStrategy{ Type: dapi.DeploymentStrategyTypeRolling, RollingParams: &dapi.RollingDeploymentStrategyParams{UpdatePercent: &updatePercent}, }, ControllerTemplate: kapi.ReplicationControllerSpec{ Replicas: cfg.Replicas, Selector: label, Template: &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ SecurityContext: &kapi.PodSecurityContext{ HostNetwork: cfg.HostNetwork, }, ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: containers, Volumes: volumes, }, }, }, }, }, } if len(secrets) != 0 { serviceAccount, err := kClient.ServiceAccounts(namespace).Get(cfg.ServiceAccount) if err != nil { return fmt.Errorf("error looking up service account %s: %v", cfg.ServiceAccount, err) } for _, secret := range secrets { objects = append(objects, secret) serviceAccount.Secrets = append(serviceAccount.Secrets, kapi.ObjectReference{Name: secret.Name}) } _, err = kClient.ServiceAccounts(namespace).Update(serviceAccount) if err != nil { return fmt.Errorf("error adding secret key to service account %s: %v", cfg.ServiceAccount, err) } } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label - router=<name> list := &kapi.List{Items: objects} if output { if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("Unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(mapper, cmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Router %q service exists\n", name) return nil }
func main() { flag.Parse() glog.Infof("Starting serve_hostnames soak test with queries=%d and podsPerNode=%d upTo=%d", *queriesAverage, *podsPerNode, *upTo) var spec string if *gke != "" { spec = filepath.Join(os.Getenv("HOME"), ".config", "gcloud", "kubernetes", "kubeconfig") } else { spec = filepath.Join(os.Getenv("HOME"), ".kube", "config") } settings, err := clientcmd.LoadFromFile(spec) if err != nil { glog.Fatalf("Error loading configuration: %v", err.Error()) } if *gke != "" { settings.CurrentContext = *gke } config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { glog.Fatalf("Failed to construct config: %v", err) } c, err := client.New(config) if err != nil { glog.Fatalf("Failed to make client: %v", err) } var nodes *api.NodeList for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) { nodes, err = c.Nodes().List(api.ListOptions{}) if err == nil { break } glog.Warningf("Failed to list nodes: %v", err) } if err != nil { glog.Fatalf("Giving up trying to list nodes: %v", err) } if len(nodes.Items) == 0 { glog.Fatalf("Failed to find any nodes.") } glog.Infof("Found %d nodes on this cluster:", len(nodes.Items)) for i, node := range nodes.Items { glog.Infof("%d: %s", i, node.Name) } queries := *queriesAverage * len(nodes.Items) * *podsPerNode // Create the namespace got, err := c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{GenerateName: "serve-hostnames-"}}) if err != nil { glog.Fatalf("Failed to create namespace: %v", err) } ns := got.Name defer func(ns string) { if err := c.Namespaces().Delete(ns); err != nil { glog.Warningf("Failed to delete namespace ns: %e", ns, err) } else { // wait until the namespace disappears for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ { if _, err := c.Namespaces().Get(ns); err != nil { if errors.IsNotFound(err) { return } } time.Sleep(time.Second) } } }(ns) glog.Infof("Created namespace %s", ns) // Create a service for these pods. glog.Infof("Creating service %s/serve-hostnames", ns) // Make several attempts to create a service. var svc *api.Service for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) { t := time.Now() svc, err = c.Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: "serve-hostnames", Labels: map[string]string{ "name": "serve-hostname", }, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Protocol: "TCP", Port: 9376, TargetPort: intstr.FromInt(9376), }}, Selector: map[string]string{ "name": "serve-hostname", }, }, }) glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t)) if err == nil { break } glog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err) } if err != nil { glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err) return } // Clean up service defer func() { glog.Infof("Cleaning up service %s/serve-hostnames", ns) // Make several attempts to delete the service. for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err := c.Services(ns).Delete(svc.Name); err == nil { return } glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) } }() // Put serve-hostname pods on each node. podNames := []string{} for i, node := range nodes.Items { for j := 0; j < *podsPerNode; j++ { podName := fmt.Sprintf("serve-hostname-%d-%d", i, j) podNames = append(podNames, podName) // Make several attempts for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) { glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) t := time.Now() _, err = c.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, Labels: map[string]string{ "name": "serve-hostname", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "serve-hostname", Image: "gcr.io/google_containers/serve_hostname:1.1", Ports: []api.ContainerPort{{ContainerPort: 9376}}, }, }, NodeName: node.Name, }, }) glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t)) if err == nil { break } glog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err) } if err != nil { glog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err) return } } } // Clean up the pods defer func() { glog.Info("Cleaning up pods") // Make several attempts to delete the pods. for _, podName := range podNames { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err = c.Pods(ns).Delete(podName, nil); err == nil { break } glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) } } }() glog.Info("Waiting for the serve-hostname pods to be ready") for _, podName := range podNames { var pod *api.Pod for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { pod, err = c.Pods(ns).Get(podName) if err != nil { glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) continue } if pod.Status.Phase == api.PodRunning { break } } if pod.Status.Phase != api.PodRunning { glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase) } else { glog.Infof("%s/%s is running", ns, podName) } } // Wait for the endpoints to propagate. for start := time.Now(); time.Since(start) < endpointTimeout; time.Sleep(10 * time.Second) { hostname, err := c.Get(). Namespace(ns). Prefix("proxy"). Resource("services"). Name("serve-hostnames"). DoRaw() if err != nil { glog.Infof("After %v while making a proxy call got error %v", time.Since(start), err) continue } var r unversioned.Status if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), hostname, &r); err != nil { break } if r.Status == unversioned.StatusFailure { glog.Infof("After %v got status %v", time.Since(start), string(hostname)) continue } break } // Repeatedly make requests. for iteration := 0; iteration != *upTo; iteration++ { responseChan := make(chan string, queries) // Use a channel of size *maxPar to throttle the number // of in-flight requests to avoid overloading the service. inFlight := make(chan struct{}, *maxPar) start := time.Now() for q := 0; q < queries; q++ { go func(i int, query int) { inFlight <- struct{}{} t := time.Now() hostname, err := c.Get(). Namespace(ns). Prefix("proxy"). Resource("services"). Name("serve-hostnames"). DoRaw() glog.V(4).Infof("Proxy call in namespace %s took %v", ns, time.Since(t)) if err != nil { glog.Warningf("Call failed during iteration %d query %d : %v", i, query, err) // If the query failed return a string which starts with a character // that can't be part of a hostname. responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err) } else { responseChan <- string(hostname) } <-inFlight }(iteration, q) } responses := make(map[string]int, *podsPerNode*len(nodes.Items)) missing := 0 for q := 0; q < queries; q++ { r := <-responseChan glog.V(4).Infof("Got response from %s", r) responses[r]++ // If the returned hostname starts with '!' then it indicates // an error response. if len(r) > 0 && r[0] == '!' { glog.V(3).Infof("Got response %s", r) missing++ } } if missing > 0 { glog.Warningf("Missing %d responses out of %d", missing, queries) } // Report any nodes that did not respond. for n, node := range nodes.Items { for i := 0; i < *podsPerNode; i++ { name := fmt.Sprintf("serve-hostname-%d-%d", n, i) if _, ok := responses[name]; !ok { glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration) } } } glog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing", iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing) } }
func CreateClientAndWaitForAPI(adminConfig *clientcmdapi.Config) (*clientset.Clientset, error) { adminClientConfig, err := clientcmd.NewDefaultClientConfig( *adminConfig, &clientcmd.ConfigOverrides{}, ).ClientConfig() if err != nil { return nil, fmt.Errorf("<master/apiclient> failed to create API client configuration [%v]", err) } fmt.Println("<master/apiclient> created API client configuration") client, err := clientset.NewForConfig(adminClientConfig) if err != nil { return nil, fmt.Errorf("<master/apiclient> failed to create API client [%v]", err) } fmt.Println("<master/apiclient> created API client, waiting for the control plane to become ready") start := time.Now() wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { cs, err := client.ComponentStatuses().List(api.ListOptions{}) if err != nil { return false, nil } // TODO(phase2) must revisit this when we implement HA if len(cs.Items) < 3 { fmt.Println("<master/apiclient> not all control plane components are ready yet") return false, nil } for _, item := range cs.Items { for _, condition := range item.Conditions { if condition.Type != api.ComponentHealthy { fmt.Printf("<master/apiclient> control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions) return false, nil } } } fmt.Printf("<master/apiclient> all control plane components are healthy after %f seconds\n", time.Since(start).Seconds()) return true, nil }) fmt.Println("<master/apiclient> waiting for at least one node to register and become ready") start = time.Now() wait.PollInfinite(apiCallRetryInterval, func() (bool, error) { nodeList, err := client.Nodes().List(api.ListOptions{}) if err != nil { fmt.Println("<master/apiclient> temporarily unable to list nodes (will retry)") return false, nil } if len(nodeList.Items) < 1 { return false, nil } n := &nodeList.Items[0] if !api.IsNodeReady(n) { fmt.Println("<master/apiclient> first node has registered, but is not ready yet") return false, nil } fmt.Printf("<master/apiclient> first node is ready after %f seconds\n", time.Since(start).Seconds()) return true, nil }) return client, nil }
// Negotiate a bearer token with the auth server, or try to reuse one based on the // information already present. In case of any missing information, ask for user input // (usually username and password, interactive depending on the Reader). func (o *LoginOptions) gatherAuthInfo() error { directClientConfig, err := o.getClientConfig() if err != nil { return err } // make a copy and use it to avoid mutating the original t := *directClientConfig clientConfig := &t // if a token were explicitly provided, try to use it if o.tokenProvided() { clientConfig.BearerToken = o.Token if osClient, err := client.New(clientConfig); err == nil { me, err := whoAmI(osClient) if err == nil { o.Username = me.Name o.Config = clientConfig fmt.Fprintf(o.Out, "Logged into %q as %q using the token provided.\n\n", o.Config.Host, o.Username) return nil } if !kerrors.IsUnauthorized(err) { return err } fmt.Fprint(o.Out, "The token provided is invalid (probably expired).\n\n") } } // if a username was provided try to make use of it if o.usernameProvided() { // search all valid contexts with matching server stanzas to see if we have a matching user stanza kubeconfig := *o.StartingKubeConfig matchingClusters := getMatchingClusters(*clientConfig, kubeconfig) for key, context := range o.StartingKubeConfig.Contexts { if matchingClusters.Has(context.Cluster) { clientcmdConfig := kclientcmd.NewDefaultClientConfig(kubeconfig, &kclientcmd.ConfigOverrides{CurrentContext: key}) if kubeconfigClientConfig, err := clientcmdConfig.ClientConfig(); err == nil { if osClient, err := client.New(kubeconfigClientConfig); err == nil { if me, err := whoAmI(osClient); err == nil && (o.Username == me.Name) { clientConfig.BearerToken = kubeconfigClientConfig.BearerToken clientConfig.CertFile = kubeconfigClientConfig.CertFile clientConfig.CertData = kubeconfigClientConfig.CertData clientConfig.KeyFile = kubeconfigClientConfig.KeyFile clientConfig.KeyData = kubeconfigClientConfig.KeyData o.Config = clientConfig if key == o.StartingKubeConfig.CurrentContext { fmt.Fprintf(o.Out, "Logged into %q as %q using existing credentials.\n\n", o.Config.Host, o.Username) } return nil } } } } } } // if kubeconfig doesn't already have a matching user stanza... clientConfig.BearerToken = "" clientConfig.CertData = []byte{} clientConfig.KeyData = []byte{} clientConfig.CertFile = o.CertFile clientConfig.KeyFile = o.KeyFile token, err := tokencmd.RequestToken(o.Config, o.Reader, o.Username, o.Password) if err != nil { return err } clientConfig.BearerToken = token osClient, err := client.New(clientConfig) if err != nil { return err } me, err := whoAmI(osClient) if err != nil { return err } o.Username = me.Name o.Config = clientConfig fmt.Fprint(o.Out, "Login successful.\n\n") return nil }
// Check is part of the Diagnostic interface; it runs the actual diagnostic logic func (d ConfigContext) Check() types.DiagnosticResult { r := types.NewDiagnosticResult(ConfigContextsName) isDefaultContext := d.RawConfig.CurrentContext == d.ContextName // prepare bad news message errorKey := "DCli0001" unusableLine := fmt.Sprintf("The client config context '%s' is unusable", d.ContextName) if isDefaultContext { errorKey = "DCli0002" unusableLine = fmt.Sprintf("The current client config context '%s' is unusable", d.ContextName) } // check that the context and its constituents are defined in the kubeconfig context, exists := d.RawConfig.Contexts[d.ContextName] if !exists { r.Error(errorKey, nil, fmt.Sprintf("%s:\n Client config context '%s' is not defined.", unusableLine, d.ContextName)) return r } clusterName := context.Cluster cluster, exists := d.RawConfig.Clusters[clusterName] if !exists { r.Error(errorKey, nil, fmt.Sprintf("%s:\n Client config context '%s' has a cluster '%s' which is not defined.", unusableLine, d.ContextName, clusterName)) return r } authName := context.AuthInfo if _, exists := d.RawConfig.AuthInfos[authName]; !exists { r.Error(errorKey, nil, fmt.Sprintf("%s:\n Client config context '%s' has a user '%s' which is not defined.", unusableLine, d.ContextName, authName)) return r } // we found a fully-defined context project := context.Namespace if project == "" { project = kapi.NamespaceDefault // k8s fills this in anyway if missing from the context } msgText := contextDesc if isDefaultContext { msgText = currContextDesc } msgText = fmt.Sprintf(msgText, d.ContextName, cluster.Server, authName, project) // Actually send a request to see if context has connectivity. // Note: we cannot reuse factories as they cache the clients, so build new factory for each context. osClient, _, _, err := osclientcmd.NewFactory(kclientcmd.NewDefaultClientConfig(*d.RawConfig, &kclientcmd.ConfigOverrides{Context: *context})).Clients() // client create now *fails* if cannot connect to server; so, address connectivity errors below if err == nil { if projects, projerr := osClient.Projects().List(kapi.ListOptions{}); projerr != nil { err = projerr } else { // success! list := []string{} for i, project := range projects.Items { if i > 9 { list = append(list, "...") break } list = append(list, project.Name) } if len(list) == 0 { r.Info("DCli0003", msgText+"Successfully requested project list, but it is empty, so user has no access to anything.") } else { r.Info("DCli0004", msgText+fmt.Sprintf("Successfully requested project list; has access to project(s):\n %v", list)) } return r } } // something went wrong; couldn't create client or get project list. // interpret the terse error messages with helpful info. errMsg := err.Error() errFull := fmt.Sprintf("(%T) %[1]v\n", err) var reason, errId string switch { case regexp.MustCompile("dial tcp: lookup (\\S+): no such host").MatchString(errMsg): errId, reason = "DCli0005", clientNoResolve case strings.Contains(errMsg, "x509: certificate signed by unknown authority"): errId, reason = "DCli0006", clientUnknownCa case strings.Contains(errMsg, "specifying a root certificates file with the insecure flag is not allowed"): errId, reason = "DCli0007", clientUnneededCa case invalidCertNameRx.MatchString(errMsg): match := invalidCertNameRx.FindStringSubmatch(errMsg) serverHost := match[len(match)-1] errId, reason = "DCli0008", fmt.Sprintf(clientInvCertName, serverHost) case regexp.MustCompile("dial tcp (\\S+): connection refused").MatchString(errMsg): errId, reason = "DCli0009", clientConnRefused case regexp.MustCompile("dial tcp (\\S+): (?:connection timed out|i/o timeout|no route to host)").MatchString(errMsg): errId, reason = "DCli0010", clientConnTimeout case strings.Contains(errMsg, "malformed HTTP response"): errId, reason = "DCli0011", clientMalformedHTTP case strings.Contains(errMsg, "tls: oversized record received with length"): errId, reason = "DCli0012", clientMalformedTLS case strings.Contains(errMsg, `User "system:anonymous" cannot`): errId, reason = "DCli0013", clientUnauthn case strings.Contains(errMsg, "provide credentials"): errId, reason = "DCli0014", clientUnauthz default: errId, reason = "DCli0015", `Diagnostics does not have an explanation for what this means. Please report this error so one can be added.` } r.Error(errId, err, msgText+errFull+reason) return r }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return cmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry if len(cfg.Credentials) == 0 { return fmt.Errorf("registry does not exist; you must specify a .kubeconfig file path containing credentials for connecting the registry to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } healthzPort := defaultPort if len(ports) > 0 { healthzPort = ports[0].ContainerPort env["REGISTRY_HTTP_ADDR"] = fmt.Sprintf(":%d", healthzPort) env["REGISTRY_HTTP_NET"] = "tcp" } livenessProbe := generateLivenessProbeConfig(healthzPort) readinessProbe := generateReadinessProbeConfig(healthzPort) secretBytes := make([]byte, randomSecretSize) if _, err := cryptorand.Read(secretBytes); err != nil { return fmt.Errorf("registry does not exist; could not generate random bytes for HTTP secret: %v", err) } env["REGISTRY_HTTP_SECRET"] = base64.StdEncoding.EncodeToString(secretBytes) mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: []kapi.VolumeMount{ { Name: "registry-storage", MountPath: cfg.Volume, }, }, SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, LivenessProbe: livenessProbe, ReadinessProbe: readinessProbe, }, }, Volumes: []kapi.Volume{ { Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }, }, }, } if mountHost { podTemplate.Spec.Volumes[0].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[0].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Spec: dapi.DeploymentConfigSpec{ Replicas: cfg.Replicas, Selector: label, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: podTemplate, }, }, } objects = app.AddServices(objects, true) // Set registry service's sessionAffinity to ClientIP to prevent push // failures due to a use of poorly consistent storage shared by // multiple replicas. for _, obj := range objects { switch t := obj.(type) { case *kapi.Service: t.Spec.SessionAffinity = kapi.ServiceAffinityClientIP } } // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(mapper, cmdutil.GetFlagString(cmd, "output") == "name", "created", out, cmd.Out()), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil }
// RunCmdRegistry contains all the necessary functionality for the OpenShift cli registry command func RunCmdRegistry(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RegistryConfig, args []string) error { var name string switch len(args) { case 0: name = "docker-registry" default: return cmdutil.UsageError(cmd, "No arguments are allowed to this command") } ports, err := app.ContainerPortsFromString(cfg.Ports) if err != nil { return err } label := map[string]string{ "docker-registry": "default", } if cfg.Labels != defaultLabel { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels) } label = valid } nodeSelector := map[string]string{} if len(cfg.Selector) > 0 { valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ",")) if err != nil { return err } if len(remove) > 0 { return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector) } nodeSelector = valid } image := cfg.ImageTemplate.ExpandOrDie(cfg.Type) namespace, _, err := f.OpenShiftClientConfig.Namespace() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, kClient, err := f.Clients() if err != nil { return fmt.Errorf("error getting client: %v", err) } _, output, err := cmdutil.PrinterForCommand(cmd) if err != nil { return fmt.Errorf("unable to configure printer: %v", err) } generate := output if !generate { _, err = kClient.Services(namespace).Get(name) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("can't check for existing docker-registry %q: %v", name, err) } generate = true } } if generate { if cfg.DryRun && !output { return fmt.Errorf("docker-registry %q does not exist (no service).", name) } // create new registry if len(cfg.Credentials) == 0 { return fmt.Errorf("registry does not exist; you must specify a .kubeconfig file path containing credentials for connecting the registry to the master with --credentials") } clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials} credentials, err := clientConfigLoadingRules.Load() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be loaded: %v", cfg.Credentials, err) } config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig() if err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not be used: %v", cfg.Credentials, err) } if err := kclient.LoadTLSFiles(config); err != nil { return fmt.Errorf("registry does not exist; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err) } insecure := "false" if config.Insecure { insecure = "true" } else { if len(config.KeyData) == 0 || len(config.CertData) == 0 { return fmt.Errorf("registry does not exist; the provided credentials %q are missing the client certificate and/or key", cfg.Credentials) } } env := app.Environment{ "OPENSHIFT_MASTER": config.Host, "OPENSHIFT_CA_DATA": string(config.CAData), "OPENSHIFT_KEY_DATA": string(config.KeyData), "OPENSHIFT_CERT_DATA": string(config.CertData), "OPENSHIFT_INSECURE": insecure, } mountHost := len(cfg.HostMount) > 0 podTemplate := &kapi.PodTemplateSpec{ ObjectMeta: kapi.ObjectMeta{Labels: label}, Spec: kapi.PodSpec{ ServiceAccountName: cfg.ServiceAccount, NodeSelector: nodeSelector, Containers: []kapi.Container{ { Name: "registry", Image: image, Ports: ports, Env: env.List(), VolumeMounts: []kapi.VolumeMount{ { Name: "registry-storage", MountPath: cfg.Volume, }, }, SecurityContext: &kapi.SecurityContext{ Privileged: &mountHost, }, // TODO reenable the liveness probe when we no longer support the v1 registry. /* LivenessProbe: &kapi.Probe{ InitialDelaySeconds: 3, TimeoutSeconds: 5, Handler: kapi.Handler{ HTTPGet: &kapi.HTTPGetAction{ Path: "/healthz", Port: util.NewIntOrStringFromInt(5000), }, }, }, */ }, }, Volumes: []kapi.Volume{ { Name: "registry-storage", VolumeSource: kapi.VolumeSource{}, }, }, }, } if mountHost { podTemplate.Spec.Volumes[0].HostPath = &kapi.HostPathVolumeSource{Path: cfg.HostMount} } else { podTemplate.Spec.Volumes[0].EmptyDir = &kapi.EmptyDirVolumeSource{} } objects := []runtime.Object{ &dapi.DeploymentConfig{ ObjectMeta: kapi.ObjectMeta{ Name: name, Labels: label, }, Triggers: []dapi.DeploymentTriggerPolicy{ {Type: dapi.DeploymentTriggerOnConfigChange}, }, Template: dapi.DeploymentTemplate{ ControllerTemplate: kapi.ReplicationControllerSpec{ Replicas: cfg.Replicas, Selector: label, Template: podTemplate, }, }, }, } objects = app.AddServices(objects, true) // TODO: label all created objects with the same label list := &kapi.List{Items: objects} if output { if err := f.PrintObject(cmd, list, out); err != nil { return fmt.Errorf("unable to print object: %v", err) } return nil } mapper, typer := f.Factory.Object() bulk := configcmd.Bulk{ Mapper: mapper, Typer: typer, RESTClientFactory: f.Factory.RESTClient, After: configcmd.NewPrintNameOrErrorAfter(out, os.Stderr), } if errs := bulk.Create(list, namespace); len(errs) != 0 { return errExit } return nil } fmt.Fprintf(out, "Docker registry %q service exists\n", name) return nil }