예제 #1
0
// ClientSetForVersion initializes or reuses a clientset for the specified version, or returns an
// error if that is not possible
func (c *ClientCache) ClientSetForVersion(requiredVersion *schema.GroupVersion) (*internalclientset.Clientset, error) {
	if requiredVersion != nil {
		if clientset, ok := c.clientsets[*requiredVersion]; ok {
			return clientset, nil
		}
	}
	config, err := c.ClientConfigForVersion(requiredVersion)
	if err != nil {
		return nil, err
	}

	clientset, err := internalclientset.NewForConfig(config)
	if err != nil {
		return nil, err
	}
	c.clientsets[*config.GroupVersion] = clientset

	// `version` does not necessarily equal `config.Version`.  However, we know that if we call this method again with
	// `version`, we should get a client based on the same config we just found.  There's no guarantee that a client
	// is copiable, so create a new client and save it in the cache.
	if requiredVersion != nil {
		configCopy := *config
		clientset, err := internalclientset.NewForConfig(&configCopy)
		if err != nil {
			return nil, err
		}
		c.clientsets[*requiredVersion] = clientset
	}

	return clientset, nil
}
예제 #2
0
func main() {
	cc, err := restclient.InClusterConfig()
	if err != nil {
		log.Fatalf("Failed to create client: %v", err)
	}

	kubeClient, err := clientset.NewForConfig(cc)
	if err != nil {
		log.Fatalf("Failed to create client: %v", err)
	}
	listAll := api.ListOptions{LabelSelector: labels.Everything(), FieldSelector: fields.Everything()}
	nodes, err := kubeClient.Core().Nodes().List(listAll)
	if err != nil {
		log.Fatalf("Failed to list nodes: %v", err)
	}
	log.Printf("Nodes:")
	for _, node := range nodes.Items {
		log.Printf("\t%v", node.Name)
	}
	services, err := kubeClient.Core().Services(api.NamespaceDefault).List(listAll)
	if err != nil {
		log.Fatalf("Failed to list services: %v", err)
	}
	log.Printf("Services:")
	for _, svc := range services.Items {
		log.Printf("\t%v", svc.Name)
	}
	log.Printf("Success")
	http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
		fmt.Fprintf(w, "Ok")
	})
	log.Fatal(http.ListenAndServe(":8080", nil))
}
예제 #3
0
func newKubeClient(conf *Kube2ConsulConfig) (clientset.Interface, error) {
	var (
		config *restclient.Config
		err    error
	)

	if conf.KubeMasterURL != "" && conf.KubeConfigFile == "" {
		// Only --kubemaster was provided.
		config = &restclient.Config{
			Host:          conf.KubeMasterURL,
			ContentConfig: restclient.ContentConfig{GroupVersion: &unversioned.GroupVersion{Version: "v1"}},
		}
	} else {
		// We either have:
		//  1) --kubemaster and --kubeconfig
		//  2) just --kubeconfig
		//  3) neither flag
		// In any case, the logic is the same.  If (3), this will automatically
		// fall back on the service account token.
		overrides := &kclientcmd.ConfigOverrides{}
		overrides.ClusterInfo.Server = conf.KubeMasterURL                                // might be "", but that is OK
		rules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: conf.KubeConfigFile} // might be "", but that is OK
		if config, err = kclientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).ClientConfig(); err != nil {
			return nil, err
		}
	}

	glog.Infof("Using %s for kubernetes master", config.Host)
	glog.Infof("Using kubernetes API %v", config.GroupVersion)
	return clientset.NewForConfig(config)
}
예제 #4
0
// Returns a clientset which can be used to talk to this apiserver.
func (s *ServerRunOptions) NewSelfClient(token string) (clientset.Interface, error) {
	clientConfig, err := s.NewSelfClientConfig(token)
	if err != nil {
		return nil, err
	}
	return clientset.NewForConfig(clientConfig)
}
func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface, error) {
	clientConfig, err := b.Config(name)
	if err != nil {
		return nil, err
	}
	return clientset.NewForConfig(restclient.AddUserAgent(clientConfig, name))
}
func (b SAControllerClientBuilder) Client(name string) (clientset.Interface, error) {
	clientConfig, err := b.Config(name)
	if err != nil {
		return nil, err
	}
	return clientset.NewForConfig(clientConfig)
}
예제 #7
0
func main() {
	config := HollowNodeConfig{}
	config.addFlags(pflag.CommandLine)
	flag.InitFlags()

	if !knownMorphs.Has(config.Morph) {
		glog.Fatalf("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List())
	}

	// create a client to communicate with API server.
	clientConfig, err := config.createClientConfigFromFile()
	if err != nil {
		glog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err)
	}
	cl, err := client.New(clientConfig)
	if err != nil {
		glog.Fatalf("Failed to create a Client: %v. Exiting.", err)
	}
	clientset, err := internalclientset.NewForConfig(clientConfig)
	if err != nil {
		glog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err)
	}

	if config.Morph == "kubelet" {
		cadvisorInterface := new(cadvisortest.Fake)
		containerManager := cm.NewStubContainerManager()

		fakeDockerClient := dockertools.NewFakeDockerClient()
		fakeDockerClient.EnableSleep = true

		hollowKubelet := kubemark.NewHollowKubelet(
			config.NodeName,
			clientset,
			cadvisorInterface,
			fakeDockerClient,
			config.KubeletPort,
			config.KubeletReadOnlyPort,
			containerManager,
			maxPods,
			podsPerCore,
		)
		hollowKubelet.Run()
	}

	if config.Morph == "proxy" {
		eventBroadcaster := record.NewBroadcaster()
		recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: config.NodeName})

		iptInterface := fakeiptables.NewFake()

		serviceConfig := proxyconfig.NewServiceConfig()
		serviceConfig.RegisterHandler(&kubemark.FakeProxyHandler{})

		endpointsConfig := proxyconfig.NewEndpointsConfig()
		endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{})

		hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder)
		hollowProxy.Run()
	}
}
예제 #8
0
// TODO(jdef): hacked from kubelet/server/server.go
// TODO(k8s): replace this with clientcmd
func (s *SchedulerServer) createAPIServerClient() (*clientset.Clientset, error) {
	authInfo, err := clientauth.LoadFromFile(s.authPath)
	if err != nil {
		log.Warningf("Could not load kubernetes auth path: %v. Continuing with defaults.", err)
	}
	if authInfo == nil {
		// authInfo didn't load correctly - continue with defaults.
		authInfo = &clientauth.Info{}
	}
	clientConfig, err := authInfo.MergeWithConfig(client.Config{})
	if err != nil {
		return nil, err
	}
	if len(s.apiServerList) < 1 {
		return nil, fmt.Errorf("no api servers specified")
	}
	// TODO: adapt Kube client to support LB over several servers
	if len(s.apiServerList) > 1 {
		log.Infof("Multiple api servers specified.  Picking first one")
	}
	clientConfig.Host = s.apiServerList[0]
	c, err := clientset.NewForConfig(&clientConfig)
	if err != nil {
		return nil, err
	}
	return c, nil
}
예제 #9
0
func waitForNodeReady() {
	const (
		// nodeReadyTimeout is the time to wait for node to become ready.
		nodeReadyTimeout = 2 * time.Minute
		// nodeReadyPollInterval is the interval to check node ready.
		nodeReadyPollInterval = 1 * time.Second
	)
	config, err := framework.LoadConfig()
	Expect(err).NotTo(HaveOccurred())
	client, err := clientset.NewForConfig(config)
	Expect(err).NotTo(HaveOccurred())
	Eventually(func() error {
		nodes, err := client.Nodes().List(api.ListOptions{})
		Expect(err).NotTo(HaveOccurred())
		if nodes == nil {
			return fmt.Errorf("the node list is nil.")
		}
		Expect(len(nodes.Items) > 1).NotTo(BeTrue())
		if len(nodes.Items) == 0 {
			return fmt.Errorf("empty node list: %+v", nodes)
		}
		node := nodes.Items[0]
		if !api.IsNodeReady(&node) {
			return fmt.Errorf("node is not ready: %+v", node)
		}
		return nil
	}, nodeReadyTimeout, nodeReadyPollInterval).Should(Succeed())
}
예제 #10
0
파일: server.go 프로젝트: ravilr/kubernetes
func getKubeClient(s *options.KubeletServer) (*clientset.Clientset, error) {
	clientConfig, err := CreateAPIServerClientConfig(s)
	if err == nil {
		kubeClient, err := clientset.NewForConfig(clientConfig)
		if err != nil {
			return nil, err
		}
		return kubeClient, nil
	}
	return nil, err
}
예제 #11
0
func startNamespaceController() {
	// Use the default QPS
	config := restclient.AddUserAgent(&restclient.Config{Host: framework.TestContext.Host}, "node-e2e-namespace-controller")
	client, err := clientset.NewForConfig(config)
	Expect(err).NotTo(HaveOccurred())
	clientPool := dynamic.NewClientPool(config, dynamic.LegacyAPIPathResolverFunc)
	resources, err := client.Discovery().ServerPreferredNamespacedResources()
	Expect(err).NotTo(HaveOccurred())
	nc := namespacecontroller.NewNamespaceController(client, clientPool, resources, ncResyncPeriod, api.FinalizerKubernetes)
	go nc.Run(ncConcurrency, wait.NeverStop)
}
예제 #12
0
// Returns a clientset which can be used to talk to this apiserver.
func (s *ServerRunOptions) NewSelfClient() (clientset.Interface, error) {
	clientConfig := &restclient.Config{
		Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)),
		// Increase QPS limits. The client is currently passed to all admission plugins,
		// and those can be throttled in case of higher load on apiserver - see #22340 and #22422
		// for more details. Once #22422 is fixed, we may want to remove it.
		QPS:   50,
		Burst: 100,
	}
	return clientset.NewForConfig(clientConfig)
}
예제 #13
0
파일: helm.go 프로젝트: bacongobbler/helm
// getKubeClient is a convenience method for creating kubernetes config and client
// for a given kubeconfig context
func getKubeClient(context string) (*restclient.Config, *internalclientset.Clientset, error) {
	config, err := kube.GetConfig(context).ClientConfig()
	if err != nil {
		return nil, nil, fmt.Errorf("could not get kubernetes config for context '%s': %s", context, err)
	}
	client, err := internalclientset.NewForConfig(config)
	if err != nil {
		return nil, nil, fmt.Errorf("could not get kubernetes client: %s", err)
	}
	return config, client, nil
}
// getAPIServerClient gets a apiserver client.
func getAPIServerClient() (*clientset.Clientset, error) {
	config, err := framework.LoadConfig()
	if err != nil {
		return nil, fmt.Errorf("failed to load config: %v", err)
	}
	client, err := clientset.NewForConfig(config)
	if err != nil {
		return nil, fmt.Errorf("failed to create client: %v", err)
	}
	return client, nil
}
예제 #15
0
// Start starts the namespace controller.
func (n *NamespaceController) Start() error {
	// Use the default QPS
	config := restclient.AddUserAgent(&restclient.Config{Host: framework.TestContext.Host}, ncName)
	client, err := clientset.NewForConfig(config)
	if err != nil {
		return err
	}
	clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	gvrFn := client.Discovery().ServerPreferredNamespacedResources
	nc := namespacecontroller.NewNamespaceController(client, clientPool, gvrFn, ncResyncPeriod, api.FinalizerKubernetes)
	go nc.Run(ncConcurrency, n.stopCh)
	return nil
}
예제 #16
0
func createClients(numberOfClients int) ([]*clientset.Clientset, []*internalclientset.Clientset, error) {
	clients := make([]*clientset.Clientset, numberOfClients)
	internalClients := make([]*internalclientset.Clientset, numberOfClients)
	for i := 0; i < numberOfClients; i++ {
		config, err := framework.LoadConfig()
		Expect(err).NotTo(HaveOccurred())
		config.QPS = 100
		config.Burst = 200
		if framework.TestContext.KubeAPIContentType != "" {
			config.ContentType = framework.TestContext.KubeAPIContentType
		}

		// For the purpose of this test, we want to force that clients
		// do not share underlying transport (which is a default behavior
		// in Kubernetes). Thus, we are explicitly creating transport for
		// each client here.
		transportConfig, err := config.TransportConfig()
		if err != nil {
			return nil, nil, err
		}
		tlsConfig, err := transport.TLSConfigFor(transportConfig)
		if err != nil {
			return nil, nil, err
		}
		config.Transport = utilnet.SetTransportDefaults(&http.Transport{
			Proxy:               http.ProxyFromEnvironment,
			TLSHandshakeTimeout: 10 * time.Second,
			TLSClientConfig:     tlsConfig,
			MaxIdleConnsPerHost: 100,
			Dial: (&net.Dialer{
				Timeout:   30 * time.Second,
				KeepAlive: 30 * time.Second,
			}).Dial,
		})
		// Overwrite TLS-related fields from config to avoid collision with
		// Transport field.
		config.TLSClientConfig = restclient.TLSClientConfig{}

		c, err := clientset.NewForConfig(config)
		if err != nil {
			return nil, nil, err
		}
		clients[i] = c
		internalClient, err := internalclientset.NewForConfig(config)
		if err != nil {
			return nil, nil, err
		}
		internalClients[i] = internalClient
	}
	return clients, internalClients, nil
}
예제 #17
0
// Find all sibling pods in the service and post to their /write handler.
func contactOthers(state *State) {
	sleepTime := 5 * time.Second
	// In large cluster getting all endpoints is pretty expensive.
	// Thus, we will limit ourselves to send on average at most 10 such
	// requests per second
	if sleepTime < time.Duration(*peerCount/10)*time.Second {
		sleepTime = time.Duration(*peerCount/10) * time.Second
	}
	timeout := 5 * time.Minute
	// Similarly we need to bump timeout so that it is reasonable in large
	// clusters.
	if timeout < time.Duration(*peerCount)*time.Second {
		timeout = time.Duration(*peerCount) * time.Second
	}
	defer state.doneContactingPeers()

	config, err := restclient.InClusterConfig()
	if err != nil {
		log.Fatalf("Unable to create config; error: %v\n", err)
	}
	config.ContentType = "application/vnd.kubernetes.protobuf"
	client, err := clientset.NewForConfig(config)
	if err != nil {
		log.Fatalf("Unable to create client; error: %v\n", err)
	}
	// Double check that that worked by getting the server version.
	if v, err := client.Discovery().ServerVersion(); err != nil {
		log.Fatalf("Unable to get server version: %v\n", err)
	} else {
		log.Printf("Server version: %#v\n", v)
	}

	for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
		eps := getWebserverEndpoints(client)
		if eps.Len() >= *peerCount {
			break
		}
		state.Logf("%v/%v has %v endpoints (%v), which is less than %v as expected. Waiting for all endpoints to come up.", *namespace, *service, len(eps), eps.List(), *peerCount)
	}

	// Do this repeatedly, in case there's some propagation delay with getting
	// newly started pods into the endpoints list.
	for i := 0; i < 15; i++ {
		eps := getWebserverEndpoints(client)
		for ep := range eps {
			state.Logf("Attempting to contact %s", ep)
			contactSingle(ep, state)
		}
		time.Sleep(sleepTime)
	}
}
예제 #18
0
// Clients returns an OpenShift and a Kubernetes client from a given configuration
func (cfg *Config) Clients() (osclient.Interface, kclientset.Interface, error) {
	cfg.bindEnv()

	kubeClientset, err := kclientset.NewForConfig(cfg.KubeConfig())
	if err != nil {
		return nil, nil, fmt.Errorf("Unable to configure Kubernetes client: %v", err)
	}

	osClient, err := osclient.New(cfg.OpenShiftConfig())
	if err != nil {
		return nil, nil, fmt.Errorf("Unable to configure Origin client: %v", err)
	}

	return osClient, kubeClientset, nil
}
예제 #19
0
// CreateHeapsterRESTClient creates new Heapster REST client. When heapsterHost param is empty
// string the function assumes that it is running inside a Kubernetes cluster and connects via
// service proxy. heapsterHost param is in the format of protocol://address:port,
// e.g., http://localhost:8002.
func CreateHeapsterRESTClient(heapsterHost string, apiclient *client.Clientset) (
	HeapsterClient, error) {

	if heapsterHost == "" {
		log.Print("Creating in-cluster Heapster client")
		return InClusterHeapsterClient{client: apiclient.Core().RESTClient()}, nil
	}

	cfg := &restclient.Config{Host: heapsterHost, QPS: defaultQPS, Burst: defaultBurst}
	restClient, err := client.NewForConfig(cfg)
	if err != nil {
		return nil, err
	}
	log.Printf("Creating remote Heapster client for %s", heapsterHost)
	return RemoteHeapsterClient{client: restClient.Core().RESTClient()}, nil
}
예제 #20
0
// Run runs the specified KubeletExecutorServer.
func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error {
	// create shared channels
	kubeletFinished := make(chan struct{})
	nodeInfos := make(chan executor.NodeInfo, 1)

	// create static pods directory
	staticPodsConfigPath := filepath.Join(s.RootDirectory, "static-pods")
	err := os.Mkdir(staticPodsConfigPath, 0750)
	if err != nil {
		return err
	}

	// we're expecting that either Mesos or the minion process will set this for us
	s.containerID = os.Getenv(envContainerID)
	if s.containerID == "" {
		log.Warningf("missing expected environment variable %q", envContainerID)
	}

	// create apiserver client
	var apiclient *clientset.Clientset
	clientConfig, err := kubeletapp.CreateAPIServerClientConfig(s.KubeletServer)
	if err == nil {
		apiclient, err = clientset.NewForConfig(clientConfig)
	}
	if err != nil {
		// required for k8sm since we need to send api.Binding information back to the apiserver
		return fmt.Errorf("cannot create API client: %v", err)
	}

	var (
		pw = cache.NewListWatchFromClient(apiclient.CoreClient, "pods", api.NamespaceAll,
			fields.OneTermEqualSelector(client.PodHost, s.HostnameOverride),
		)
		reg = executor.NewRegistry(apiclient)
	)

	// start executor
	var executorDone <-chan struct{}
	executorDone, err = s.runExecutor(nodeInfos, kubeletFinished, staticPodsConfigPath, apiclient, reg)
	if err != nil {
		return err
	}

	// start kubelet, blocking
	return s.runKubelet(nodeInfos, kubeletFinished, staticPodsConfigPath, apiclient, pw, reg, executorDone)
}
예제 #21
0
// creates a set of clients for this endpoint
func createClients(caCert []byte, endpoint, token string, nodeName types.NodeName) (*clientset.Clientset, error) {
	bareClientConfig := kubeadmutil.CreateBasicClientConfig("kubernetes", endpoint, caCert)
	bootstrapClientConfig, err := clientcmd.NewDefaultClientConfig(
		*kubeadmutil.MakeClientConfigWithToken(
			bareClientConfig, "kubernetes", fmt.Sprintf("kubelet-%s", nodeName), token,
		),
		&clientcmd.ConfigOverrides{},
	).ClientConfig()
	if err != nil {
		return nil, fmt.Errorf("failed to create API client configuration [%v]", err)
	}
	clientSet, err := clientset.NewForConfig(bootstrapClientConfig)
	if err != nil {
		return nil, fmt.Errorf("failed to create clients for the API endpoint %s [%v]", endpoint, err)
	}
	return clientSet, nil
}
예제 #22
0
파일: client.go 프로젝트: xgwang-zte/origin
// Clients returns an OpenShift and Kubernetes client with the credentials of the named service account
// TODO: change return types to client.Interface/kclientset.Interface to allow auto-reloading credentials
func Clients(config restclient.Config, tokenRetriever TokenRetriever, namespace, name string) (*restclient.Config, *client.Client, *kclientset.Clientset, error) {
	// Clear existing auth info
	config.Username = ""
	config.Password = ""
	config.CertFile = ""
	config.CertData = []byte{}
	config.KeyFile = ""
	config.KeyData = []byte{}
	config.BearerToken = ""

	kubeUserAgent := ""
	openshiftUserAgent := ""

	// they specified, don't mess with it
	if len(config.UserAgent) > 0 {
		kubeUserAgent = config.UserAgent
		openshiftUserAgent = config.UserAgent

	} else {
		kubeUserAgent = fmt.Sprintf("%s system:serviceaccount:%s:%s", restclient.DefaultKubernetesUserAgent(), namespace, name)
		openshiftUserAgent = fmt.Sprintf("%s system:serviceaccount:%s:%s", client.DefaultOpenShiftUserAgent(), namespace, name)
	}

	// For now, just initialize the token once
	// TODO: refetch the token if the client encounters 401 errors
	token, err := tokenRetriever.GetToken(namespace, name)
	if err != nil {
		return nil, nil, nil, err
	}
	config.BearerToken = token

	config.UserAgent = openshiftUserAgent
	c, err := client.New(&config)
	if err != nil {
		return nil, nil, nil, err
	}

	config.UserAgent = kubeUserAgent
	kcset, err := kclientset.NewForConfig(&config)
	if err != nil {
		return nil, nil, nil, err
	}

	return &config, c, kcset, nil
}
예제 #23
0
// Returns a clientset which can be used to talk to this apiserver.
func (s *ServerRunOptions) NewSelfClient() (clientset.Interface, error) {
	clientConfig := &restclient.Config{
		Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)),
		// Increase QPS limits. The client is currently passed to all admission plugins,
		// and those can be throttled in case of higher load on apiserver - see #22340 and #22422
		// for more details. Once #22422 is fixed, we may want to remove it.
		QPS:   50,
		Burst: 100,
	}
	if len(s.DeprecatedStorageVersion) != 0 {
		gv, err := unversioned.ParseGroupVersion(s.DeprecatedStorageVersion)
		if err != nil {
			glog.Fatalf("error in parsing group version: %s", err)
		}
		clientConfig.GroupVersion = &gv
	}

	return clientset.NewForConfig(clientConfig)
}
예제 #24
0
파일: kube.go 프로젝트: luxas/flannel
func NewSubnetManager() (subnet.Manager, error) {
	cfg, err := restclient.InClusterConfig()
	if err != nil {
		return nil, fmt.Errorf("unable to initialize inclusterconfig: %v", err)
	}
	c, err := clientset.NewForConfig(cfg)
	if err != nil {
		return nil, fmt.Errorf("unable to initialize client: %v", err)
	}

	podName := os.Getenv("POD_NAME")
	podNamespace := os.Getenv("POD_NAMESPACE")
	if podName == "" || podNamespace == "" {
		return nil, fmt.Errorf("env variables POD_NAME and POD_NAMESPACE must be set")
	}

	pod, err := c.Pods(podNamespace).Get(podName)
	if err != nil {
		return nil, fmt.Errorf("error retrieving pod spec for '%s/%s': %v", podNamespace, podName, err)
	}
	nodeName := pod.Spec.NodeName
	if nodeName == "" {
		return nil, fmt.Errorf("node name not present in pod spec '%s/%s'", podNamespace, podName)
	}

	netConf, err := ioutil.ReadFile(netConfPath)
	if err != nil {
		return nil, fmt.Errorf("failed to read net conf: %v", err)
	}

	sc, err := subnet.ParseConfig(string(netConf))
	if err != nil {
		return nil, fmt.Errorf("error parsing subnet config: %s", err)
	}
	sm, err := newKubeSubnetManager(c, sc, nodeName)
	if err != nil {
		return nil, fmt.Errorf("error creating network manager: %s", err)
	}
	go sm.Run(context.Background())
	return sm, err
}
예제 #25
0
// CreateApiserverClient creates new Kubernetes Apiserver client. When kubeconfig or apiserverHost param is empty
// the function assumes that it is running inside a Kubernetes cluster and attempts to
// discover the Apiserver. Otherwise, it connects to the Apiserver specified.
//
// apiserverHost param is in the format of protocol://address:port/pathPrefix, e.g.http://localhost:8001.
// kubeConfig location of kubeconfig file
func CreateApiserverClient(apiserverHost string, kubeConfig string) (*client.Clientset, clientcmd.ClientConfig, error) {

	clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: apiserverHost}})

	cfg, err := clientConfig.ClientConfig()
	if err != nil {
		return nil, nil, err
	}

	cfg.QPS = defaultQPS
	cfg.Burst = defaultBurst

	log.Printf("Creating API server client for %s", cfg.Host)

	client, err := client.NewForConfig(cfg)

	if err != nil {
		return nil, nil, err
	}
	return client, clientConfig, nil
}
예제 #26
0
func (cfg *config) RunDeployer() error {
	if len(cfg.rcName) == 0 {
		return fmt.Errorf("--deployment or OPENSHIFT_DEPLOYMENT_NAME is required")
	}
	if len(cfg.Namespace) == 0 {
		return fmt.Errorf("--namespace or OPENSHIFT_DEPLOYMENT_NAMESPACE is required")
	}

	kcfg, err := restclient.InClusterConfig()
	if err != nil {
		return err
	}
	kc, err := kclientset.NewForConfig(kcfg)
	if err != nil {
		return err
	}
	oc, err := client.New(kcfg)
	if err != nil {
		return err
	}

	deployer := NewDeployer(kc, oc, cfg.Out, cfg.ErrOut, cfg.Until)
	return deployer.Deploy(cfg.Namespace, cfg.rcName)
}
예제 #27
0
func main() {
	flag.Parse()

	glog.Infof("Starting cauldron soak test with queries=%d podsPerNode=%d upTo=%d maxPar=%d",
		*queriesAverage, *podsPerNode, *upTo, *maxPar)

	cc, err := restclient.InClusterConfig()
	if err != nil {
		glog.Fatalf("Failed to make client: %v", err)
	}

	client, err := clientset.NewForConfig(cc)
	if err != nil {
		glog.Fatalf("Failed to make client: %v", err)
	}

	var nodes *api.NodeList
	for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) {
		nodes, err = client.Core().Nodes().List(api.ListOptions{})
		if err == nil {
			break
		}
		glog.Warningf("Failed to list nodes: %v", err)
	}
	if err != nil {
		glog.Fatalf("Giving up trying to list nodes: %v", err)
	}

	if len(nodes.Items) == 0 {
		glog.Fatalf("Failed to find any nodes.")
	}

	glog.Infof("Found %d nodes on this cluster:", len(nodes.Items))
	for i, node := range nodes.Items {
		glog.Infof("%d: %s", i, node.Name)
	}

	queries := *queriesAverage * len(nodes.Items) * *podsPerNode

	// Create a uniquely named namespace.
	got, err := client.Core().Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{GenerateName: "serve-hostnames-"}})
	if err != nil {
		glog.Fatalf("Failed to create namespace: %v", err)
	}
	ns := got.Name
	defer func(ns string) {
		if err := client.Core().Namespaces().Delete(ns, nil); err != nil {
			glog.Warningf("Failed to delete namespace ns: %e", ns, err)
		} else {
			// wait until the namespace disappears
			for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ {
				if _, err := client.Core().Namespaces().Get(ns); err != nil {
					if errors.IsNotFound(err) {
						return
					}
				}
				time.Sleep(time.Second)
			}
		}
	}(ns)
	glog.Infof("Created namespace %s", ns)

	// Create a service for these pods.
	glog.Infof("Creating service %s/serve-hostnames", ns)
	// Make several attempts to create a service.
	var svc *api.Service
	for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) {
		t := time.Now()
		svc, err = client.Core().Services(ns).Create(&api.Service{
			ObjectMeta: api.ObjectMeta{
				Name: "serve-hostnames",
				Labels: map[string]string{
					"name": "serve-hostname",
				},
			},
			Spec: api.ServiceSpec{
				Ports: []api.ServicePort{{
					Protocol:   "TCP",
					Port:       9376,
					TargetPort: intstr.FromInt(9376),
				}},
				Selector: map[string]string{
					"name": "serve-hostname",
				},
			},
		})
		glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t))
		if err == nil {
			break
		}
		glog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err)
	}
	if err != nil {
		glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err)
		return
	}
	// Clean up service
	defer func() {
		glog.Infof("Cleaning up service %s/serve-hostnames", ns)
		// Make several attempts to delete the service.
		for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) {
			if err := client.Core().Services(ns).Delete(svc.Name, nil); err == nil {
				return
			}
			glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err)
		}
	}()

	// Put serve-hostname pods on each node.
	podNames := []string{}
	for i, node := range nodes.Items {
		for j := 0; j < *podsPerNode; j++ {
			podName := fmt.Sprintf("serve-hostname-%d-%d", i, j)
			podNames = append(podNames, podName)
			// Make several attempts
			for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) {
				glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name)
				t := time.Now()
				_, err = client.Core().Pods(ns).Create(&api.Pod{
					ObjectMeta: api.ObjectMeta{
						Name: podName,
						Labels: map[string]string{
							"name": "serve-hostname",
						},
					},
					Spec: api.PodSpec{
						Containers: []api.Container{
							{
								Name:  "serve-hostname",
								Image: "gcr.io/google_containers/serve_hostname:v1.4",
								Ports: []api.ContainerPort{{ContainerPort: 9376}},
							},
						},
						NodeName: node.Name,
					},
				})
				glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t))
				if err == nil {
					break
				}
				glog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err)
			}
			if err != nil {
				glog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err)
				return
			}
		}
	}
	// Clean up the pods
	defer func() {
		glog.Info("Cleaning up pods")
		// Make several attempts to delete the pods.
		for _, podName := range podNames {
			for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) {
				if err = client.Core().Pods(ns).Delete(podName, nil); err == nil {
					break
				}
				glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err)
			}
		}
	}()

	glog.Info("Waiting for the serve-hostname pods to be ready")
	for _, podName := range podNames {
		var pod *api.Pod
		for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) {
			pod, err = client.Core().Pods(ns).Get(podName)
			if err != nil {
				glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout)
				continue
			}
			if pod.Status.Phase == api.PodRunning {
				break
			}
		}
		if pod.Status.Phase != api.PodRunning {
			glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase)
		} else {
			glog.Infof("%s/%s is running", ns, podName)
		}
	}

	// Wait for the endpoints to propagate.
	for start := time.Now(); time.Since(start) < endpointTimeout; time.Sleep(10 * time.Second) {
		_, err = http.Get(fmt.Sprintf("http://serve-hostnames.%s:9376", ns))
		if err == nil {
			break
		}
		glog.Infof("After %v while making a request got error %v", time.Since(start), err)
	}
	if err != nil {
		glog.Errorf("Failed to get a response from service: %v", err)
	}

	// Repeatedly make requests.
	for iteration := 0; iteration != *upTo; iteration++ {
		responseChan := make(chan string, queries)
		// Use a channel of size *maxPar to throttle the number
		// of in-flight requests to avoid overloading the service.
		inFlight := make(chan struct{}, *maxPar)
		start := time.Now()
		for q := 0; q < queries; q++ {
			go func(i int, query int) {
				inFlight <- struct{}{}
				t := time.Now()
				resp, err := http.Get(fmt.Sprintf("http://serve-hostnames.%s:9376", ns))
				glog.V(4).Infof("Call to serve-hostnames in namespace %s took %v", ns, time.Since(t))
				if err != nil {
					glog.Warningf("Call failed during iteration %d query %d : %v", i, query, err)
					// If the query failed return a string which starts with a character
					// that can't be part of a hostname.
					responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err)
				} else {
					defer resp.Body.Close()
					hostname, err := ioutil.ReadAll(resp.Body)
					if err != nil {
						responseChan <- fmt.Sprintf("!failed in iteration %d to read body of response: %v", i, err)
					} else {
						responseChan <- string(hostname)
					}
				}
				<-inFlight
			}(iteration, q)
		}
		responses := make(map[string]int, *podsPerNode*len(nodes.Items))
		missing := 0
		for q := 0; q < queries; q++ {
			r := <-responseChan
			glog.V(4).Infof("Got response from %s", r)
			responses[r]++
			// If the returned hostname starts with '!' then it indicates
			// an error response.
			if len(r) > 0 && r[0] == '!' {
				glog.V(3).Infof("Got response %s", r)
				missing++
			}
		}
		if missing > 0 {
			glog.Warningf("Missing %d responses out of %d", missing, queries)
		}
		// Report any nodes that did not respond.
		for n, node := range nodes.Items {
			for i := 0; i < *podsPerNode; i++ {
				name := fmt.Sprintf("serve-hostname-%d-%d", n, i)
				if _, ok := responses[name]; !ok {
					glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration)
				}
			}
		}
		glog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing",
			iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing)
	}
}
예제 #28
0
func CreateClientAndWaitForAPI(adminConfig *clientcmdapi.Config) (*clientset.Clientset, error) {
	adminClientConfig, err := clientcmd.NewDefaultClientConfig(
		*adminConfig,
		&clientcmd.ConfigOverrides{},
	).ClientConfig()
	if err != nil {
		return nil, fmt.Errorf("<master/apiclient> failed to create API client configuration [%v]", err)
	}

	fmt.Println("<master/apiclient> created API client configuration")

	client, err := clientset.NewForConfig(adminClientConfig)
	if err != nil {
		return nil, fmt.Errorf("<master/apiclient> failed to create API client [%v]", err)
	}

	fmt.Println("<master/apiclient> created API client, waiting for the control plane to become ready")

	start := time.Now()
	wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
		cs, err := client.ComponentStatuses().List(api.ListOptions{})
		if err != nil {
			return false, nil
		}
		// TODO(phase2) must revisit this when we implement HA
		if len(cs.Items) < 3 {
			fmt.Println("<master/apiclient> not all control plane components are ready yet")
			return false, nil
		}
		for _, item := range cs.Items {
			for _, condition := range item.Conditions {
				if condition.Type != api.ComponentHealthy {
					fmt.Printf("<master/apiclient> control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions)
					return false, nil
				}
			}
		}

		fmt.Printf("<master/apiclient> all control plane components are healthy after %f seconds\n", time.Since(start).Seconds())
		return true, nil
	})

	fmt.Println("<master/apiclient> waiting for at least one node to register and become ready")
	start = time.Now()
	wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
		nodeList, err := client.Nodes().List(api.ListOptions{})
		if err != nil {
			fmt.Println("<master/apiclient> temporarily unable to list nodes (will retry)")
			return false, nil
		}
		if len(nodeList.Items) < 1 {
			return false, nil
		}
		n := &nodeList.Items[0]
		if !api.IsNodeReady(n) {
			fmt.Println("<master/apiclient> first node has registered, but is not ready yet")
			return false, nil
		}

		fmt.Printf("<master/apiclient> first node is ready after %f seconds\n", time.Since(start).Seconds())
		return true, nil
	})

	return client, nil
}
예제 #29
0
// Run runs the CMServer.  This should never exit.
func Run(s *options.CMServer) error {
	if c, err := configz.New("componentconfig"); err == nil {
		c.Set(s.KubeControllerManagerConfiguration)
	} else {
		glog.Errorf("unable to register configz: %s", err)
	}
	kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
	if err != nil {
		return err
	}

	kubeconfig.ContentConfig.ContentType = s.ContentType
	// Override kubeconfig qps/burst settings from flags
	kubeconfig.QPS = s.KubeAPIQPS
	kubeconfig.Burst = int(s.KubeAPIBurst)
	kubeClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "controller-manager"))
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}
	leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election"))

	go func() {
		mux := http.NewServeMux()
		healthz.InstallHandler(mux)
		if s.EnableProfiling {
			mux.HandleFunc("/debug/pprof/", pprof.Index)
			mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
			mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
		}
		configz.InstallHandler(mux)
		mux.Handle("/metrics", prometheus.Handler())

		server := &http.Server{
			Addr:    net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
			Handler: mux,
		}
		glog.Fatal(server.ListenAndServe())
	}()

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
	recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controller-manager"})

	run := func(stop <-chan struct{}) {
		rootClientBuilder := controller.SimpleControllerClientBuilder{
			ClientConfig: kubeconfig,
		}
		var clientBuilder controller.ControllerClientBuilder
		if len(s.ServiceAccountKeyFile) > 0 {
			clientBuilder = controller.SAControllerClientBuilder{
				ClientConfig: restclient.AnonymousClientConfig(kubeconfig),
				CoreClient:   kubeClient.Core(),
				Namespace:    "kube-system",
			}
		} else {
			clientBuilder = rootClientBuilder
		}

		err := StartControllers(s, kubeconfig, rootClientBuilder, clientBuilder, stop, recorder)
		glog.Fatalf("error running controllers: %v", err)
		panic("unreachable")
	}

	if !s.LeaderElection.LeaderElect {
		run(nil)
		panic("unreachable")
	}

	id, err := os.Hostname()
	if err != nil {
		return err
	}

	// TODO: enable other lock types
	rl := resourcelock.EndpointsLock{
		EndpointsMeta: api.ObjectMeta{
			Namespace: "kube-system",
			Name:      "kube-controller-manager",
		},
		Client: leaderElectionClient,
		LockConfig: resourcelock.ResourceLockConfig{
			Identity:      id,
			EventRecorder: recorder,
		},
	}

	leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
		Lock:          &rl,
		LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
		RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
		RetryPeriod:   s.LeaderElection.RetryPeriod.Duration,
		Callbacks: leaderelection.LeaderCallbacks{
			OnStartedLeading: run,
			OnStoppedLeading: func() {
				glog.Fatalf("leaderelection lost")
			},
		},
	})
	panic("unreachable")
}
예제 #30
0
// NewProxyServerDefault creates a new ProxyServer object with default parameters.
func NewProxyServerDefault(config *options.ProxyServerConfig) (*ProxyServer, error) {
	if c, err := configz.New("componentconfig"); err == nil {
		c.Set(config.KubeProxyConfiguration)
	} else {
		glog.Errorf("unable to register configz: %s", err)
	}
	protocol := utiliptables.ProtocolIpv4
	if net.ParseIP(config.BindAddress).To4() == nil {
		protocol = utiliptables.ProtocolIpv6
	}

	// Create a iptables utils.
	execer := exec.New()
	dbus := utildbus.New()
	iptInterface := utiliptables.New(execer, dbus, protocol)

	// We omit creation of pretty much everything if we run in cleanup mode
	if config.CleanupAndExit {
		return &ProxyServer{
			Config:       config,
			IptInterface: iptInterface,
		}, nil
	}

	// TODO(vmarmol): Use container config for this.
	var oomAdjuster *oom.OOMAdjuster
	if config.OOMScoreAdj != nil {
		oomAdjuster = oom.NewOOMAdjuster()
		if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*config.OOMScoreAdj)); err != nil {
			glog.V(2).Info(err)
		}
	}

	if config.ResourceContainer != "" {
		// Run in its own container.
		if err := resourcecontainer.RunInResourceContainer(config.ResourceContainer); err != nil {
			glog.Warningf("Failed to start in resource-only container %q: %v", config.ResourceContainer, err)
		} else {
			glog.V(2).Infof("Running in resource-only container %q", config.ResourceContainer)
		}
	}

	// Create a Kube Client
	// define api config source
	if config.Kubeconfig == "" && config.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}
	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: config.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: config.Master}}).ClientConfig()
	if err != nil {
		return nil, err
	}

	kubeconfig.ContentType = config.ContentType
	// Override kubeconfig qps/burst settings from flags
	kubeconfig.QPS = config.KubeAPIQPS
	kubeconfig.Burst = int(config.KubeAPIBurst)

	client, err := clientset.NewForConfig(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	// Create event recorder
	hostname := nodeutil.GetHostname(config.HostnameOverride)
	eventBroadcaster := record.NewBroadcaster()
	recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: hostname})

	var proxier proxy.ProxyProvider
	var endpointsHandler proxyconfig.EndpointsConfigHandler

	proxyMode := getProxyMode(string(config.Mode), client.Core().Nodes(), hostname, iptInterface, iptables.LinuxKernelCompatTester{})
	if proxyMode == proxyModeIPTables {
		glog.V(0).Info("Using iptables Proxier.")
		if config.IPTablesMasqueradeBit == nil {
			// IPTablesMasqueradeBit must be specified or defaulted.
			return nil, fmt.Errorf("Unable to read IPTablesMasqueradeBit from config")
		}
		proxierIPTables, err := iptables.NewProxier(iptInterface, utilsysctl.New(), execer, config.IPTablesSyncPeriod.Duration, config.MasqueradeAll, int(*config.IPTablesMasqueradeBit), config.ClusterCIDR, hostname, getNodeIP(client, hostname))
		if err != nil {
			glog.Fatalf("Unable to create proxier: %v", err)
		}
		proxier = proxierIPTables
		endpointsHandler = proxierIPTables
		// No turning back. Remove artifacts that might still exist from the userspace Proxier.
		glog.V(0).Info("Tearing down userspace rules.")
		userspace.CleanupLeftovers(iptInterface)
	} else {
		glog.V(0).Info("Using userspace Proxier.")
		// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
		// our config.EndpointsConfigHandler.
		loadBalancer := userspace.NewLoadBalancerRR()
		// set EndpointsConfigHandler to our loadBalancer
		endpointsHandler = loadBalancer

		proxierUserspace, err := userspace.NewProxier(
			loadBalancer,
			net.ParseIP(config.BindAddress),
			iptInterface,
			*utilnet.ParsePortRangeOrDie(config.PortRange),
			config.IPTablesSyncPeriod.Duration,
			config.UDPIdleTimeout.Duration,
		)
		if err != nil {
			glog.Fatalf("Unable to create proxier: %v", err)
		}
		proxier = proxierUserspace
		// Remove artifacts from the pure-iptables Proxier.
		glog.V(0).Info("Tearing down pure-iptables proxy rules.")
		iptables.CleanupLeftovers(iptInterface)
	}
	iptInterface.AddReloadFunc(proxier.Sync)

	// Create configs (i.e. Watches for Services and Endpoints)
	// Note: RegisterHandler() calls need to happen before creation of Sources because sources
	// only notify on changes, and the initial update (on process start) may be lost if no handlers
	// are registered yet.
	serviceConfig := proxyconfig.NewServiceConfig()
	serviceConfig.RegisterHandler(proxier)

	endpointsConfig := proxyconfig.NewEndpointsConfig()
	endpointsConfig.RegisterHandler(endpointsHandler)

	proxyconfig.NewSourceAPI(
		client.Core().RESTClient(),
		config.ConfigSyncPeriod,
		serviceConfig.Channel("api"),
		endpointsConfig.Channel("api"),
	)

	config.NodeRef = &api.ObjectReference{
		Kind:      "Node",
		Name:      hostname,
		UID:       types.UID(hostname),
		Namespace: "",
	}

	conntracker := realConntracker{}

	return NewProxyServer(client, config, iptInterface, proxier, eventBroadcaster, recorder, conntracker, proxyMode)
}