예제 #1
0
파일: client.go 프로젝트: jhadvig/origin
// Clients returns an OpenShift and Kubernetes client with the credentials of the named service account
// TODO: change return types to client.Interface/kclient.Interface to allow auto-reloading credentials
func Clients(config kclient.Config, tokenRetriever TokenRetriever, namespace, name string) (*client.Client, *kclient.Client, error) {
	// Clear existing auth info
	config.Username = ""
	config.Password = ""
	config.CertFile = ""
	config.CertData = []byte{}
	config.KeyFile = ""
	config.KeyData = []byte{}

	// For now, just initialize the token once
	// TODO: refetch the token if the client encounters 401 errors
	token, err := tokenRetriever.GetToken(namespace, name)
	if err != nil {
		return nil, nil, err
	}
	config.BearerToken = token

	c, err := client.New(&config)
	if err != nil {
		return nil, nil, err
	}

	kc, err := kclient.New(&config)
	if err != nil {
		return nil, nil, err
	}

	return c, kc, nil
}
예제 #2
0
// Run runs the specified SimulatorBuilder.  This should never exit.
func (s *SimulatorBuilder) Init(_ []string) *SimulatorBuilder {
	glog.V(3).Info("Try to get kubernetes client.")
	if s.Kubeconfig == "" && s.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}

	if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) {
		glog.Fatalf("specify either --etcd-servers or --etcd-config")
	}
	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
	if err != nil {
		return nil
	}
	kubeconfig.QPS = 20.0
	kubeconfig.Burst = 30

	kubeClient, err := client.New(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}
	s.KubeClient = kubeClient

	s.EtcdPathPrefix = master.DefaultEtcdPathPrefix

	return s
}
예제 #3
0
파일: server.go 프로젝트: nikkomega/origin
func GetClientForUser(clientConfig kclient.Config, username string) (*client.Client, *kclient.Client, *kclient.Config, error) {
	token, err := tokencmd.RequestToken(&clientConfig, nil, username, "password")
	if err != nil {
		return nil, nil, nil, err
	}

	userClientConfig := clientConfig
	userClientConfig.BearerToken = token
	userClientConfig.Username = ""
	userClientConfig.Password = ""
	userClientConfig.TLSClientConfig.CertFile = ""
	userClientConfig.TLSClientConfig.KeyFile = ""
	userClientConfig.TLSClientConfig.CertData = nil
	userClientConfig.TLSClientConfig.KeyData = nil

	kubeClient, err := kclient.New(&userClientConfig)
	if err != nil {
		return nil, nil, nil, err
	}

	osClient, err := client.New(&userClientConfig)
	if err != nil {
		return nil, nil, nil, err
	}

	return osClient, kubeClient, &userClientConfig, nil
}
예제 #4
0
// TODO(jdef): hacked from kubelet/server/server.go
// TODO(k8s): replace this with clientcmd
func (s *SchedulerServer) createAPIServerClient() (*client.Client, error) {
	authInfo, err := clientauth.LoadFromFile(s.AuthPath)
	if err != nil {
		log.Warningf("Could not load kubernetes auth path: %v. Continuing with defaults.", err)
	}
	if authInfo == nil {
		// authInfo didn't load correctly - continue with defaults.
		authInfo = &clientauth.Info{}
	}
	clientConfig, err := authInfo.MergeWithConfig(client.Config{})
	if err != nil {
		return nil, err
	}
	if len(s.APIServerList) < 1 {
		return nil, fmt.Errorf("no api servers specified")
	}
	// TODO: adapt Kube client to support LB over several servers
	if len(s.APIServerList) > 1 {
		log.Infof("Multiple api servers specified.  Picking first one")
	}
	clientConfig.Host = s.APIServerList[0]
	c, err := client.New(&clientConfig)
	if err != nil {
		return nil, err
	}
	return c, nil
}
예제 #5
0
func Kubeclient() (*kubeclient.Client, error) {
	config := &kubeclient.Config{
		//Host:     "http://10.65.224.102:8080",
		Host:     "http://172.20.2.3:8080",
		Username: "******",
	}
	return kubeclient.New(config)
}
예제 #6
0
파일: main.go 프로젝트: cpg1111/krud
func (k *Krud) update(h *Webhook) error {
	h.UpdateAttempt = true
	h.UpdateStart = time.Now()
	defer func() {
		h.UpdateEnd = time.Now()
	}()
	conf := &client.Config{
		Host: k.Endpoint,
	}
	client, err := client.New(conf)
	if err != nil {
		return err
	}
	rcs := client.ReplicationControllers(k.Namespace)
	oldRc, err := rcs.Get(k.ControllerName)
	if err != nil {
		return err
	}
	newRc, err := rcs.Get(k.ControllerName)
	if err != nil {
		return err
	}
	hash, err := api.HashObject(oldRc, client.Codec)
	if err != nil {
		return err
	}
	h.UpdateID = hash
	newRc.Name = fmt.Sprintf("%s-%s", k.ControllerName, hash)
	newRc.ResourceVersion = ""
	apply := func(key, value string, ms ...map[string]string) {
		for _, m := range ms {
			m[key] = value
		}
	}
	apply(k.DeploymentKey, hash, newRc.Spec.Selector, newRc.Spec.Template.Labels)
	apply("run", k.ControllerName, newRc.Spec.Selector, newRc.Spec.Template.Labels)
	ruconf := kubectl.RollingUpdaterConfig{
		Out: &lockBuffer{
			k: k,
			h: h,
		},
		OldRc:          oldRc,
		NewRc:          newRc,
		UpdatePeriod:   time.Second * 3, // todo: change to time.Minute
		Timeout:        time.Minute * 5,
		Interval:       time.Second * 3,
		UpdateAcceptor: kubectl.DefaultUpdateAcceptor,
		CleanupPolicy:  kubectl.RenameRollingUpdateCleanupPolicy,
	}
	ruc := kubectl.NewRollingUpdaterClient(client)
	println("doing rolling update")
	err = kubectl.NewRollingUpdater(k.Namespace, ruc).Update(&ruconf)
	println("done")
	k.Lock()
	h.UpdateSuccess = err == nil
	k.Unlock()
	return err
}
예제 #7
0
파일: server.go 프로젝트: Tlacenka/origin
// Run runs the specified SchedulerServer.  This should never exit.
func (s *SchedulerServer) Run(_ []string) error {
	if s.Kubeconfig == "" && s.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}

	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
	if err != nil {
		return err
	}
	kubeconfig.QPS = 20.0
	kubeconfig.Burst = 30

	kubeClient, err := client.New(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	go func() {
		mux := http.NewServeMux()
		healthz.InstallHandler(mux)
		if s.EnableProfiling {
			mux.HandleFunc("/debug/pprof/", pprof.Index)
			mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
			mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
		}
		mux.Handle("/metrics", prometheus.Handler())

		server := &http.Server{
			Addr:    net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
			Handler: mux,
		}
		glog.Fatal(server.ListenAndServe())
	}()

	configFactory := factory.NewConfigFactory(kubeClient)
	config, err := s.createConfig(configFactory)
	if err != nil {
		glog.Fatalf("Failed to create scheduler configuration: %v", err)
	}

	eventBroadcaster := record.NewBroadcaster()
	config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	sched := scheduler.New(config)
	sched.Run()

	select {}
}
예제 #8
0
파일: dc.go 프로젝트: hvescovi/kubernetes
func main() {
	flag.Usage = usage
	flag.Parse()

	var (
		cfg *kclient.Config
		err error
	)
	if *local {
		cfg = &kclient.Config{Host: fmt.Sprintf("http://localhost:%d", *localPort)}
	} else {
		cfg, err = kclient.InClusterConfig()
		if err != nil {
			glog.Errorf("failed to load config: %v", err)
			flag.Usage()
			os.Exit(1)
		}
	}
	client, err = kclient.New(cfg)

	selector, err := labels.Parse(*userLabels)
	if err != nil {
		glog.Fatal(err)
	}
	tc, err := parseTimeCounts(*times, *counts)
	if err != nil {
		glog.Fatal(err)
	}
	if namespace == "" {
		glog.Fatal("POD_NAMESPACE is not set. Set to the namespace of the replication controller if running locally.")
	}
	scaler := Scaler{timeCounts: tc, selector: selector}
	if err != nil {
		glog.Fatal(err)
	}

	sigChan := make(chan os.Signal, 1)
	signal.Notify(sigChan,
		syscall.SIGHUP,
		syscall.SIGINT,
		syscall.SIGQUIT,
		syscall.SIGTERM)

	glog.Info("starting scaling")
	if err := scaler.Start(); err != nil {
		glog.Fatal(err)
	}
	<-sigChan
	glog.Info("stopping scaling")
	if err := scaler.Stop(); err != nil {
		glog.Fatal(err)
	}
}
예제 #9
0
func NewClient(f *cmdutil.Factory) (*client.Client, *client.Config) {
	var err error
	cfg, err := f.ClientConfig()
	if err != nil {
		util.Error("Could not initialise a client - is your server setting correct?\n\n")
		util.Fatalf("%v", err)
	}
	c, err := client.New(cfg)
	if err != nil {
		util.Fatalf("Could not initialise a client: %v", err)
	}

	return c, cfg
}
예제 #10
0
// Clients returns an OpenShift and a Kubernetes client from a given configuration
func (cfg *Config) Clients() (osclient.Interface, kclient.Interface, error) {
	cfg.bindEnv()

	kubeClient, err := kclient.New(cfg.KubeConfig())
	if err != nil {
		return nil, nil, fmt.Errorf("Unable to configure Kubernetes client: %v", err)
	}

	osClient, err := osclient.New(cfg.OpenShiftConfig())
	if err != nil {
		return nil, nil, fmt.Errorf("Unable to configure Origin client: %v", err)
	}

	return osClient, kubeClient, nil
}
예제 #11
0
// ClientForVersion initializes or reuses a client for the specified version, or returns an
// error if that is not possible
func (c *ClientCache) ClientForVersion(version string) (*client.Client, error) {
	if client, ok := c.clients[version]; ok {
		return client, nil
	}
	config, err := c.ClientConfigForVersion(version)
	if err != nil {
		return nil, err
	}
	client, err := client.New(config)
	if err != nil {
		return nil, err
	}

	c.clients[config.Version] = client
	return client, nil
}
예제 #12
0
파일: registrar.go 프로젝트: leonfs/romulus
// NewRegistrar returns a ptr to a new Registrar from a Config
func NewRegistrar(c *Config) (*Registrar, error) {
	cf := c.kc()
	kc, err := client.New(&cf)
	if err != nil {
		return nil, err
	}
	ec, err := NewEtcdClient(c.ps(), formatEtcdNamespace(c.VulcanEtcdNamespace), c.EtcdTimeout)
	if err != nil {
		return nil, err
	}
	return &Registrar{
		e:  ec,
		k:  kc,
		v:  c.APIVersion,
		s:  c.Selector.fixNamespace(),
		vk: formatEtcdNamespace(c.VulcanEtcdNamespace),
	}, nil
}
예제 #13
0
func main() {
	flags.Parse(os.Args)
	cfg := parseCfg(*config)
	if len(*tcpServices) == 0 {
		glog.Infof("All tcp/https services will be ignored.")
	}
	go healthzServer()

	var kubeClient *client.Client
	var err error
	clientConfig := kubectl_util.DefaultClientConfig(flags)
	if *cluster {
		if kubeClient, err = client.NewInCluster(); err != nil {
			glog.Fatalf("Failed to create client: %v", err)
		}
	} else {
		config, err := clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error connecting to the client: %v", err)
		}
		kubeClient, err = client.New(config)
	}
	namespace, specified, err := clientConfig.Namespace()
	if err != nil {
		glog.Fatalf("unexpected error: %v", err)
	}
	if !specified {
		namespace = "default"
	}

	namespace = ""

	// TODO: Handle multiple namespaces
	lbc := newLoadBalancerController(cfg, kubeClient, namespace)
	go lbc.epController.Run(util.NeverStop)
	go lbc.svcController.Run(util.NeverStop)
	if *dry {
		dryRun(lbc)
	} else {
		util.Until(lbc.worker, time.Second, util.NeverStop)
	}
}
예제 #14
0
func getKubeClient() (string, *kclient.Client, error) {
	c, err := kclientcmd.LoadFromFile(*kubeConfig)
	if err != nil {
		return "", nil, fmt.Errorf("error loading kubeConfig: %v", err.Error())
	}
	config, err := kclientcmd.NewDefaultClientConfig(
		*c,
		&kclientcmd.ConfigOverrides{
			ClusterInfo: kclientcmdapi.Cluster{
				APIVersion: "v1",
			},
		}).ClientConfig()
	if err != nil {
		return "", nil, fmt.Errorf("error parsing kubeConfig: %v", err.Error())
	}
	kubeClient, err := kclient.New(config)
	if err != nil {
		return "", nil, fmt.Errorf("error creating client - %q", err)
	}

	return c.Clusters[c.CurrentContext].Server, kubeClient, nil
}
예제 #15
0
// TODO: evaluate using pkg/client/clientcmd
func newKubeClient() (*kclient.Client, error) {
	var (
		config    *kclient.Config
		err       error
		masterURL string
	)
	// If the user specified --kube_master_url, expand env vars and verify it.
	if *argKubeMasterURL != "" {
		masterURL, err = expandKubeMasterURL()
		if err != nil {
			return nil, err
		}
	}
	if masterURL != "" && *argKubecfgFile == "" {
		// Only --kube_master_url was provided.
		config = &kclient.Config{
			Host:    masterURL,
			Version: "v1",
		}
	} else {
		// We either have:
		//  1) --kube_master_url and --kubecfg_file
		//  2) just --kubecfg_file
		//  3) neither flag
		// In any case, the logic is the same.  If (3), this will automatically
		// fall back on the service account token.
		overrides := &kclientcmd.ConfigOverrides{}
		overrides.ClusterInfo.Server = masterURL                                     // might be "", but that is OK
		rules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: *argKubecfgFile} // might be "", but that is OK
		if config, err = kclientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).ClientConfig(); err != nil {
			return nil, err
		}
	}

	glog.Infof("Using %s for kubernetes master", config.Host)
	glog.Infof("Using kubernetes API %s", config.Version)
	return kclient.New(config)
}
예제 #16
0
func main() {
	fmt.Printf("kubernetes-haproxy-bridge\n")

	ns := api.NamespaceDefault

	config := k8.Config{
		Host: "http://172.17.8.101:8080",
		// Username: "******",
		// Password: "******",
	}
	client, err := k8.New(&config)

	if err != nil {
		fmt.Print(err.Error())
		return
	}

	selector := labels.Set{"external/public": "true"}.AsSelector()
	//selector := labels.Everything()
	servicesList, err := client.Services(ns).List(selector)

	if err != nil {
		fmt.Print(err.Error())
		return
	}
	for _, service := range servicesList.Items {
		//		fmt.Printf("%v \n", service.ObjectMeta.Name)
		spew.Dump(service)
		spew.Dump(service.ObjectMeta.Annotations)

	}
	//fmt.Printf("", client.Pods("default").List(labels.Everything(), fields.Everything()));

	//construct state
	//format the config file
	// reload ha proxy

}
예제 #17
0
파일: helpers.go 프로젝트: jhadvig/origin
// TODO: clients should be copied and instantiated from a common client config, tweaked, then
// given to individual controllers and other infrastructure components.
func GetKubeClient(kubeConfigFile string) (*kclient.Client, *kclient.Config, error) {
	loadingRules := &clientcmd.ClientConfigLoadingRules{}
	loadingRules.ExplicitPath = kubeConfigFile
	loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{})

	kubeConfig, err := loader.ClientConfig()
	if err != nil {
		return nil, nil, err
	}

	// This is an internal client which is shared by most controllers, so boost default QPS
	// TODO: this should be configured by the caller, not in this method.
	kubeConfig.QPS = 100.0
	kubeConfig.Burst = 200

	kubeConfig.WrapTransport = DefaultClientTransport
	kubeClient, err := kclient.New(kubeConfig)
	if err != nil {
		return nil, nil, err
	}

	return kubeClient, kubeConfig, nil
}
예제 #18
0
// TestProjectIsNamespace verifies that a project is a namespace, and a namespace is a project
func TestProjectIsNamespace(t *testing.T) {
	testutil.DeleteAllEtcdKeys()
	etcdClient := testutil.NewEtcdClient()
	etcdHelper, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, "v1", etcdtest.PathPrefix())
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	// create a kube and its client
	kubeInterfaces, _ := klatest.InterfacesFor(klatest.Version)
	namespaceStorage, _, _ := namespaceetcd.NewStorage(etcdHelper)
	kubeStorage := map[string]rest.Storage{
		"namespaces": namespaceStorage,
	}

	osMux := http.NewServeMux()
	server := httptest.NewServer(osMux)
	defer server.Close()
	handlerContainer := master.NewHandlerContainer(osMux)

	version := &apiserver.APIGroupVersion{
		Root:    "/api",
		Version: "v1beta3",

		Storage: kubeStorage,
		Codec:   kv1beta3.Codec,

		Mapper: klatest.RESTMapper,

		Creater:   kapi.Scheme,
		Typer:     kapi.Scheme,
		Convertor: kapi.Scheme,
		Linker:    kubeInterfaces.MetadataAccessor,

		Admit:   admit.NewAlwaysAdmit(),
		Context: kapi.NewRequestContextMapper(),
	}
	if err := version.InstallREST(handlerContainer); err != nil {
		t.Fatalf("unable to install REST: %v", err)
	}

	kubeClient, err := kclient.New(&kclient.Config{Host: server.URL, Version: "v1beta3"})
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}

	// create an origin
	originInterfaces, _ := latest.InterfacesFor(latest.Version)
	originStorage := map[string]rest.Storage{
		"projects": projectregistry.NewREST(kubeClient.Namespaces(), nil),
	}
	osVersion := &apiserver.APIGroupVersion{
		Root:    "/oapi",
		Version: "v1",

		Storage: originStorage,
		Codec:   latest.Codec,

		Mapper: latest.RESTMapper,

		Creater:   kapi.Scheme,
		Typer:     kapi.Scheme,
		Convertor: kapi.Scheme,
		Linker:    originInterfaces.MetadataAccessor,

		Admit:   admit.NewAlwaysAdmit(),
		Context: kapi.NewRequestContextMapper(),
	}
	if err := osVersion.InstallREST(handlerContainer); err != nil {
		t.Fatalf("unable to install REST: %v", err)
	}

	originClient, err := client.New(&kclient.Config{Host: server.URL})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// create a namespace
	namespace := &kapi.Namespace{
		ObjectMeta: kapi.ObjectMeta{Name: "integration-test"},
	}
	namespaceResult, err := kubeClient.Namespaces().Create(namespace)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// now try to get the project with the same name and ensure it is our namespace
	project, err := originClient.Projects().Get(namespaceResult.Name)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if project.Name != namespace.Name {
		t.Fatalf("Project name did not match namespace name, project %v, namespace %v", project.Name, namespace.Name)
	}

	// now create a project
	project = &projectapi.Project{
		ObjectMeta: kapi.ObjectMeta{
			Name: "new-project",
			Annotations: map[string]string{
				"openshift.io/display-name":  "Hello World",
				"openshift.io/node-selector": "env=test",
			},
		},
	}
	projectResult, err := originClient.Projects().Create(project)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// now get the namespace for that project
	namespace, err = kubeClient.Namespaces().Get(projectResult.Name)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if project.Name != namespace.Name {
		t.Fatalf("Project name did not match namespace name, project %v, namespace %v", project.Name, namespace.Name)
	}
	if project.Annotations["openshift.io/display-name"] != namespace.Annotations["openshift.io/display-name"] {
		t.Fatalf("Project display name did not match namespace annotation, project %v, namespace %v", project.Annotations["openshift.io/display-name"], namespace.Annotations["openshift.io/display-name"])
	}
	if project.Annotations["openshift.io/node-selector"] != namespace.Annotations["openshift.io/node-selector"] {
		t.Fatalf("Project node selector did not match namespace node selector, project %v, namespace %v", project.Annotations["openshift.io/node-selector"], namespace.Annotations["openshift.io/node-selector"])
	}
}
예제 #19
0
// Run runs the specified APIServer.  This should never exit.
func (s *APIServer) Run(_ []string) error {
	s.verifyClusterIPFlags()

	// If advertise-address is not specified, use bind-address. If bind-address
	// is also unset (or 0.0.0.0), setDefaults() in pkg/master/master.go will
	// do the right thing and use the host's default interface.
	if s.AdvertiseAddress == nil || net.IP(s.AdvertiseAddress).IsUnspecified() {
		s.AdvertiseAddress = s.BindAddress
	}

	if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) {
		glog.Fatalf("specify either --etcd-servers or --etcd-config")
	}

	capabilities.Initialize(capabilities.Capabilities{
		AllowPrivileged: s.AllowPrivileged,
		// TODO(vmarmol): Implement support for HostNetworkSources.
		HostNetworkSources:                     []string{},
		PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec,
	})

	cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
	if err != nil {
		glog.Fatalf("Cloud provider could not be initialized: %v", err)
	}

	kubeletClient, err := client.NewKubeletClient(&s.KubeletConfig)
	if err != nil {
		glog.Fatalf("Failure to start kubelet client: %v", err)
	}

	// "api/all=false" allows users to selectively enable specific api versions.
	disableAllAPIs := false
	allAPIFlagValue, ok := s.RuntimeConfig["api/all"]
	if ok && allAPIFlagValue == "false" {
		disableAllAPIs = true
	}

	// "api/legacy=false" allows users to disable legacy api versions.
	disableLegacyAPIs := false
	legacyAPIFlagValue, ok := s.RuntimeConfig["api/legacy"]
	if ok && legacyAPIFlagValue == "false" {
		disableLegacyAPIs = true
	}
	_ = disableLegacyAPIs // hush the compiler while we don't have legacy APIs to disable.

	// "api/v1={true|false} allows users to enable/disable v1 API.
	// This takes preference over api/all and api/legacy, if specified.
	disableV1 := disableAllAPIs
	disableV1 = !s.getRuntimeConfigValue("api/v1", !disableV1)

	// "experimental/v1={true|false} allows users to enable/disable the experimental API.
	// This takes preference over api/all, if specified.
	enableExp := s.getRuntimeConfigValue("experimental/v1", false)

	// TODO: expose same flags as client.BindClientConfigFlags but for a server
	clientConfig := &client.Config{
		Host:    net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)),
		Version: s.StorageVersion,
	}
	client, err := client.New(clientConfig)
	if err != nil {
		glog.Fatalf("Invalid server address: %v", err)
	}

	etcdStorage, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, latest.InterfacesFor, latest.Version, s.StorageVersion, s.EtcdPathPrefix)
	if err != nil {
		glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err)
	}
	expEtcdStorage, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, explatest.InterfacesFor, explatest.Version, s.ExpStorageVersion, s.EtcdPathPrefix)
	if err != nil {
		glog.Fatalf("Invalid experimental storage version or misconfigured etcd: %v", err)
	}

	n := net.IPNet(s.ServiceClusterIPRange)

	// Default to the private server key for service account token signing
	if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" {
		if apiserver.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) {
			s.ServiceAccountKeyFile = s.TLSPrivateKeyFile
		} else {
			glog.Warning("no RSA key provided, service account token authentication disabled")
		}
	}
	authenticator, err := apiserver.NewAuthenticator(s.BasicAuthFile, s.ClientCAFile, s.TokenAuthFile, s.ServiceAccountKeyFile, s.ServiceAccountLookup, etcdStorage)
	if err != nil {
		glog.Fatalf("Invalid Authentication Config: %v", err)
	}

	authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(s.AuthorizationMode, s.AuthorizationPolicyFile)
	if err != nil {
		glog.Fatalf("Invalid Authorization Config: %v", err)
	}

	admissionControlPluginNames := strings.Split(s.AdmissionControl, ",")
	admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile)

	if len(s.ExternalHost) == 0 {
		// TODO: extend for other providers
		if s.CloudProvider == "gce" {
			instances, supported := cloud.Instances()
			if !supported {
				glog.Fatalf("gce cloud provider has no instances.  this shouldn't happen. exiting.")
			}
			name, err := os.Hostname()
			if err != nil {
				glog.Fatalf("failed to get hostname: %v", err)
			}
			addrs, err := instances.NodeAddresses(name)
			if err != nil {
				glog.Warningf("unable to obtain external host address from cloud provider: %v", err)
			} else {
				for _, addr := range addrs {
					if addr.Type == api.NodeExternalIP {
						s.ExternalHost = addr.Address
					}
				}
			}
		}
	}
	var installSSH master.InstallSSHKey
	if cloud != nil {
		if instances, supported := cloud.Instances(); supported {
			installSSH = instances.AddSSHKeyToAllInstances
		}
	}
	config := &master.Config{
		DatabaseStorage:    etcdStorage,
		ExpDatabaseStorage: expEtcdStorage,

		EventTTL:               s.EventTTL,
		KubeletClient:          kubeletClient,
		ServiceClusterIPRange:  &n,
		EnableCoreControllers:  true,
		EnableLogsSupport:      s.EnableLogsSupport,
		EnableUISupport:        true,
		EnableSwaggerSupport:   true,
		EnableProfiling:        s.EnableProfiling,
		EnableIndex:            true,
		APIPrefix:              s.APIPrefix,
		ExpAPIPrefix:           s.ExpAPIPrefix,
		CorsAllowedOriginList:  s.CorsAllowedOriginList,
		ReadWritePort:          s.SecurePort,
		PublicAddress:          net.IP(s.AdvertiseAddress),
		Authenticator:          authenticator,
		SupportsBasicAuth:      len(s.BasicAuthFile) > 0,
		Authorizer:             authorizer,
		AdmissionControl:       admissionController,
		DisableV1:              disableV1,
		EnableExp:              enableExp,
		MasterServiceNamespace: s.MasterServiceNamespace,
		ClusterName:            s.ClusterName,
		ExternalHost:           s.ExternalHost,
		MinRequestTimeout:      s.MinRequestTimeout,
		SSHUser:                s.SSHUser,
		SSHKeyfile:             s.SSHKeyfile,
		InstallSSHKey:          installSSH,
		ServiceNodePortRange:   s.ServiceNodePortRange,
	}
	m := master.New(config)

	// We serve on 2 ports.  See docs/accessing_the_api.md
	secureLocation := ""
	if s.SecurePort != 0 {
		secureLocation = net.JoinHostPort(s.BindAddress.String(), strconv.Itoa(s.SecurePort))
	}
	insecureLocation := net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort))

	// See the flag commentary to understand our assumptions when opening the read-only and read-write ports.

	var sem chan bool
	if s.MaxRequestsInFlight > 0 {
		sem = make(chan bool, s.MaxRequestsInFlight)
	}

	longRunningRE := regexp.MustCompile(s.LongRunningRequestRE)
	longRunningTimeout := func(req *http.Request) (<-chan time.Time, string) {
		// TODO unify this with apiserver.MaxInFlightLimit
		if longRunningRE.MatchString(req.URL.Path) || req.URL.Query().Get("watch") == "true" {
			return nil, ""
		}
		return time.After(time.Minute), ""
	}

	if secureLocation != "" {
		handler := apiserver.TimeoutHandler(m.Handler, longRunningTimeout)
		secureServer := &http.Server{
			Addr:           secureLocation,
			Handler:        apiserver.MaxInFlightLimit(sem, longRunningRE, apiserver.RecoverPanics(handler)),
			MaxHeaderBytes: 1 << 20,
			TLSConfig: &tls.Config{
				// Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability)
				MinVersion: tls.VersionTLS10,
			},
		}

		if len(s.ClientCAFile) > 0 {
			clientCAs, err := util.CertPoolFromFile(s.ClientCAFile)
			if err != nil {
				glog.Fatalf("unable to load client CA file: %v", err)
			}
			// Populate PeerCertificates in requests, but don't reject connections without certificates
			// This allows certificates to be validated by authenticators, while still allowing other auth types
			secureServer.TLSConfig.ClientAuth = tls.RequestClientCert
			// Specify allowed CAs for client certificates
			secureServer.TLSConfig.ClientCAs = clientCAs
		}

		glog.Infof("Serving securely on %s", secureLocation)
		go func() {
			defer util.HandleCrash()
			for {
				if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" {
					s.TLSCertFile = path.Join(s.CertDirectory, "apiserver.crt")
					s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "apiserver.key")
					// TODO (cjcullen): Is PublicAddress the right address to sign a cert with?
					alternateIPs := []net.IP{config.ServiceReadWriteIP}
					alternateDNS := []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"}
					// It would be nice to set a fqdn subject alt name, but only the kubelets know, the apiserver is clueless
					// alternateDNS = append(alternateDNS, "kubernetes.default.svc.CLUSTER.DNS.NAME")
					if err := util.GenerateSelfSignedCert(config.PublicAddress.String(), s.TLSCertFile, s.TLSPrivateKeyFile, alternateIPs, alternateDNS); err != nil {
						glog.Errorf("Unable to generate self signed cert: %v", err)
					} else {
						glog.Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile)
					}
				}
				// err == systemd.SdNotifyNoSocket when not running on a systemd system
				if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket {
					glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err)
				}
				if err := secureServer.ListenAndServeTLS(s.TLSCertFile, s.TLSPrivateKeyFile); err != nil {
					glog.Errorf("Unable to listen for secure (%v); will try again.", err)
				}
				time.Sleep(15 * time.Second)
			}
		}()
	}
	handler := apiserver.TimeoutHandler(m.InsecureHandler, longRunningTimeout)
	http := &http.Server{
		Addr:           insecureLocation,
		Handler:        apiserver.RecoverPanics(handler),
		MaxHeaderBytes: 1 << 20,
	}
	if secureLocation == "" {
		// err == systemd.SdNotifyNoSocket when not running on a systemd system
		if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket {
			glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err)
		}
	}
	glog.Infof("Serving insecurely on %s", insecureLocation)
	glog.Fatal(http.ListenAndServe())
	return nil
}
예제 #20
0
// Run runs the specified KubeletExecutorServer.
func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error {
	rand.Seed(time.Now().UTC().UnixNano())

	oomAdjuster := oom.NewOomAdjuster()
	if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
		log.Info(err)
	}

	// derive the executor cgroup and use it as docker cgroup root
	mesosCgroup := findMesosCgroup(s.cgroupPrefix)
	s.cgroupRoot = mesosCgroup
	s.SystemContainer = mesosCgroup
	s.ResourceContainer = mesosCgroup
	log.V(2).Infof("passing cgroup %q to the kubelet as cgroup root", s.CgroupRoot)

	// create apiserver client
	var apiclient *client.Client
	clientConfig, err := s.CreateAPIServerClientConfig()
	if err == nil {
		apiclient, err = client.New(clientConfig)
	}
	if err != nil {
		// required for k8sm since we need to send api.Binding information
		// back to the apiserver
		log.Fatalf("No API client: %v", err)
	}

	log.Infof("Using root directory: %v", s.RootDirectory)
	credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)

	shutdownCloser, err := s.syncExternalShutdownWatcher()
	if err != nil {
		return err
	}

	cadvisorInterface, err := cadvisor.New(s.CadvisorPort)
	if err != nil {
		return err
	}

	imageGCPolicy := kubelet.ImageGCPolicy{
		HighThresholdPercent: s.ImageGCHighThresholdPercent,
		LowThresholdPercent:  s.ImageGCLowThresholdPercent,
	}

	diskSpacePolicy := kubelet.DiskSpacePolicy{
		DockerFreeDiskMB: s.LowDiskSpaceThresholdMB,
		RootFreeDiskMB:   s.LowDiskSpaceThresholdMB,
	}

	//TODO(jdef) intentionally NOT initializing a cloud provider here since:
	//(a) the kubelet doesn't actually use it
	//(b) we don't need to create N-kubelet connections to zookeeper for no good reason
	//cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
	//log.Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)

	hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
	if err != nil {
		return err
	}

	tlsOptions, err := s.InitializeTLS()
	if err != nil {
		return err
	}
	mounter := mount.New()
	if s.Containerized {
		log.V(2).Info("Running kubelet in containerized mode (experimental)")
		mounter = &mount.NsenterMounter{}
	}

	var dockerExecHandler dockertools.ExecHandler
	switch s.DockerExecHandlerName {
	case "native":
		dockerExecHandler = &dockertools.NativeExecHandler{}
	case "nsenter":
		dockerExecHandler = &dockertools.NsenterExecHandler{}
	default:
		log.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName)
		dockerExecHandler = &dockertools.NativeExecHandler{}
	}

	kcfg := app.KubeletConfig{
		Address:            s.Address,
		AllowPrivileged:    s.AllowPrivileged,
		HostNetworkSources: hostNetworkSources,
		HostnameOverride:   s.HostnameOverride,
		RootDirectory:      s.RootDirectory,
		// ConfigFile: ""
		// ManifestURL: ""
		FileCheckFrequency: s.FileCheckFrequency,
		// HTTPCheckFrequency
		PodInfraContainerImage:  s.PodInfraContainerImage,
		SyncFrequency:           s.SyncFrequency,
		RegistryPullQPS:         s.RegistryPullQPS,
		RegistryBurst:           s.RegistryBurst,
		MinimumGCAge:            s.MinimumGCAge,
		MaxPerPodContainerCount: s.MaxPerPodContainerCount,
		MaxContainerCount:       s.MaxContainerCount,
		RegisterNode:            s.RegisterNode,
		// StandaloneMode: false
		ClusterDomain:                  s.ClusterDomain,
		ClusterDNS:                     s.ClusterDNS,
		Runonce:                        s.RunOnce,
		Port:                           s.Port,
		ReadOnlyPort:                   s.ReadOnlyPort,
		CadvisorInterface:              cadvisorInterface,
		EnableServer:                   s.EnableServer,
		EnableDebuggingHandlers:        s.EnableDebuggingHandlers,
		DockerClient:                   dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
		KubeClient:                     apiclient,
		MasterServiceNamespace:         s.MasterServiceNamespace,
		VolumePlugins:                  app.ProbeVolumePlugins(),
		NetworkPlugins:                 app.ProbeNetworkPlugins(s.NetworkPluginDir),
		NetworkPluginName:              s.NetworkPluginName,
		StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout,
		TLSOptions:                     tlsOptions,
		ImageGCPolicy:                  imageGCPolicy,
		DiskSpacePolicy:                diskSpacePolicy,
		Cloud:                          nil, // TODO(jdef) Cloud, specifying null here because we don't want all kubelets polling mesos-master; need to account for this in the cloudprovider impl
		NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency,
		ResourceContainer:         s.ResourceContainer,
		CgroupRoot:                s.cgroupRoot,
		ContainerRuntime:          s.ContainerRuntime,
		Mounter:                   mounter,
		DockerDaemonContainer:     s.DockerDaemonContainer,
		SystemContainer:           s.SystemContainer,
		ConfigureCBR0:             s.ConfigureCBR0,
		MaxPods:                   s.MaxPods,
		DockerExecHandler:         dockerExecHandler,
	}

	kcfg.NodeName = kcfg.Hostname

	err = app.RunKubelet(&kcfg, app.KubeletBuilder(func(kc *app.KubeletConfig) (app.KubeletBootstrap, *kconfig.PodConfig, error) {
		return s.createAndInitKubelet(kc, hks, clientConfig, shutdownCloser)
	}))
	if err != nil {
		return err
	}

	if s.HealthzPort > 0 {
		healthz.DefaultHealthz()
		go util.Forever(func() {
			err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil)
			if err != nil {
				log.Errorf("Starting health server failed: %v", err)
			}
		}, 5*time.Second)
	}

	// block until executor is shut down or commits shutdown
	select {}
}
예제 #21
0
파일: server.go 프로젝트: shrids/kubernetes
// Run runs the specified KubeletServer for the given KubeletConfig.  This should never exit.
// The kcfg argument may be nil - if so, it is initialized from the settings on KubeletServer.
// Otherwise, the caller is assumed to have set up the KubeletConfig object and all defaults
// will be ignored.
func (s *KubeletServer) Run(kcfg *KubeletConfig) error {
	if kcfg == nil {
		cfg, err := s.KubeletConfig()
		if err != nil {
			return err
		}
		kcfg = cfg

		clientConfig, err := s.CreateAPIServerClientConfig()
		if err == nil {
			kcfg.KubeClient, err = client.New(clientConfig)
		}
		if err != nil && len(s.APIServerList) > 0 {
			glog.Warningf("No API client: %v", err)
		}

		cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
		if err != nil {
			return err
		}
		glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
		kcfg.Cloud = cloud
	}

	if kcfg.CadvisorInterface == nil {
		ca, err := cadvisor.New(s.CadvisorPort)
		if err != nil {
			return err
		}
		kcfg.CadvisorInterface = ca
	}

	util.ReallyCrash = s.ReallyCrashForTesting
	rand.Seed(time.Now().UTC().UnixNano())

	credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)

	glog.V(2).Infof("Using root directory: %v", s.RootDirectory)

	// TODO(vmarmol): Do this through container config.
	oomAdjuster := oom.NewOomAdjuster()
	if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
		glog.Warning(err)
	}

	if err := RunKubelet(kcfg, nil); err != nil {
		return err
	}

	if s.HealthzPort > 0 {
		healthz.DefaultHealthz()
		go util.Forever(func() {
			err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil)
			if err != nil {
				glog.Errorf("Starting health server failed: %v", err)
			}
		}, 5*time.Second)
	}

	if s.RunOnce {
		return nil
	}

	// run forever
	select {}
}
예제 #22
0
// Run runs the specified VMTServer.  This should never exit.
func (s *VMTServer) Run(_ []string) error {
	if s.Kubeconfig == "" && s.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}

	glog.V(3).Infof("Master is %s", s.Master)

	if s.MetaConfigPath == "" {
		glog.Fatalf("The path to the VMT config file is not provided.Exiting...")
		os.Exit(1)
	}

	if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) {
		glog.Fatalf("specify either --etcd-servers or --etcd-config")
	}

	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
	if err != nil {
		glog.Errorf("Error getting kubeconfig:  %s", err)
		return err
	}
	// This specifies the number and the max number of query per second to the api server.
	kubeconfig.QPS = 20.0
	kubeconfig.Burst = 30

	kubeClient, err := client.New(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	// TODO not clear
	// go func() {
	// 	mux := http.NewServeMux()
	// 	healthz.InstallHandler(mux)
	// 	if s.EnableProfiling {
	// 		mux.HandleFunc("/debug/pprof/", pprof.Index)
	// 		mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
	// 		mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
	// 	}
	// 	mux.Handle("/metrics", prometheus.Handler())

	// 	server := &http.Server{
	// 		Addr:    net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
	// 		Handler: mux,
	// 	}
	// 	glog.Fatal(server.ListenAndServe())
	// }()

	// serverAddr, targetType, nameOrAddress, targetIdentifier, password
	vmtMeta, err := metadata.NewVMTMeta(s.MetaConfigPath)
	if err != nil {
		glog.Errorf("Get error when loading configurations: %s", err)
		os.Exit(1)
	}
	glog.V(3).Infof("Finished loading configuration from %s", s.MetaConfigPath)

	etcdclientBuilder := etcdhelper.NewEtcdClientBuilder().ServerList(s.EtcdServerList).SetTransport(s.EtcdCA, s.EtcdClientCertificate, s.EtcdClientKey)
	etcdClient, err := etcdclientBuilder.CreateAndTest()
	if err != nil {
		glog.Errorf("Error creating etcd client instance for vmt service: %s", err)
		return err
	}

	s.EtcdPathPrefix = master.DefaultEtcdPathPrefix
	etcdStorage, err := newEtcd(etcdClient, latest.InterfacesFor, latest.Version, "", s.EtcdPathPrefix)
	if err != nil {
		glog.Warningf("Error creating etcd storage instance for vmt service: %s", err)
		return err
	}

	vmtConfig := vmturbo.NewVMTConfig(kubeClient, etcdStorage, vmtMeta)

	vmtService := vmturbo.NewVMTurboService(vmtConfig)

	vmtService.Run()

	select {}
}
예제 #23
0
// Run runs the CMServer.  This should never exit.
func (s *CMServer) Run(_ []string) error {
	if s.Kubeconfig == "" && s.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}

	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
	if err != nil {
		return err
	}

	kubeconfig.QPS = 20.0
	kubeconfig.Burst = 30

	kubeClient, err := client.New(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	go func() {
		mux := http.NewServeMux()
		healthz.InstallHandler(mux)
		if s.EnableProfiling {
			mux.HandleFunc("/debug/pprof/", pprof.Index)
			mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
			mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
		}
		mux.Handle("/metrics", prometheus.Handler())

		server := &http.Server{
			Addr:    net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
			Handler: mux,
		}
		glog.Fatal(server.ListenAndServe())
	}()

	endpoints := endpointcontroller.NewEndpointController(kubeClient)
	go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop)

	controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient, replicationControllerPkg.BurstReplicas)
	go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop)

	cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
	if err != nil {
		glog.Fatalf("Cloud provider could not be initialized: %v", err)
	}

	nodeController := nodecontroller.NewNodeController(cloud, kubeClient,
		s.PodEvictionTimeout, nodecontroller.NewPodEvictor(util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst)),
		s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs)
	nodeController.Run(s.NodeSyncPeriod)

	serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName)
	if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
		glog.Errorf("Failed to start service controller: %v", err)
	}

	if s.AllocateNodeCIDRs {
		if cloud == nil {
			glog.Warning("allocate-node-cidrs is set, but no cloud provider specified. Will not manage routes.")
		} else if routes, ok := cloud.Routes(); !ok {
			glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
		} else {
			routeController := routecontroller.New(routes, kubeClient, s.ClusterName, &s.ClusterCIDR)
			routeController.Run(s.NodeSyncPeriod)
		}
	}

	resourceQuotaController := resourcequotacontroller.NewResourceQuotaController(kubeClient)
	resourceQuotaController.Run(s.ResourceQuotaSyncPeriod)

	namespaceController := namespacecontroller.NewNamespaceController(kubeClient, s.NamespaceSyncPeriod)
	namespaceController.Run()

	pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
	pvclaimBinder.Run()

	volumeConfig := volume.NewVolumeConfig()
	volumeConfig.PersistentVolumeRecyclerMinTimeoutHostPath = int64(s.PersistentVolumeRecyclerMinTimeoutHostPath)
	volumeConfig.PersistentVolumeRecyclerTimeoutIncrementHostPath = int64(s.PersistentVolumeRecyclerTimeoutIncrementHostPath)
	volumeConfig.PersistentVolumeRecyclerMinTimeoutNfs = int64(s.PersistentVolumeRecyclerMinTimeoutNfs)
	volumeConfig.PersistentVolumeRecyclerTimeoutIncrementNfs = int64(s.PersistentVolumeRecyclerTimeoutIncrementNfs)
	if s.PersistentVolumeRecyclerDefaultScrubPod != "" {
		scrubPod, err := volume.InitScrubPod(s.PersistentVolumeRecyclerDefaultScrubPod)
		if err != nil {
			glog.Fatalf("Override of default PersistentVolume scrub pod failed: %+v", err)
		}
		volumeConfig.PersistentVolumeRecyclerDefaultScrubPod = scrubPod
	}

	pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(volumeConfig))
	if err != nil {
		glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
	}
	pvRecycler.Run()

	var rootCA []byte

	if s.RootCAFile != "" {
		rootCA, err = ioutil.ReadFile(s.RootCAFile)
		if err != nil {
			return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
		}
		if _, err := util.CertsFromPEM(rootCA); err != nil {
			return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
		}
	} else {
		rootCA = kubeconfig.CAData
	}

	if len(s.ServiceAccountKeyFile) > 0 {
		privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
		if err != nil {
			glog.Errorf("Error reading key for service account token controller: %v", err)
		} else {
			serviceaccount.NewTokensController(
				kubeClient,
				serviceaccount.TokensControllerOptions{
					TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
					RootCA:         rootCA,
				},
			).Run()
		}
	}

	serviceaccount.NewServiceAccountsController(
		kubeClient,
		serviceaccount.DefaultServiceAccountsControllerOptions(),
	).Run()

	select {}
	return nil
}
예제 #24
0
func TestServiceAccountAuthorization(t *testing.T) {
	saNamespace := api.NamespaceDefault
	saName := serviceaccountadmission.DefaultServiceAccountName
	saUsername := serviceaccount.MakeUsername(saNamespace, saName)

	// Start one OpenShift master as "cluster1" to play the external kube server
	cluster1MasterConfig, cluster1AdminConfigFile, err := testserver.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	cluster1AdminConfig, err := testutil.GetClusterAdminClientConfig(cluster1AdminConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	cluster1AdminKubeClient, err := testutil.GetClusterAdminKubeClient(cluster1AdminConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	cluster1AdminOSClient, err := testutil.GetClusterAdminClient(cluster1AdminConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Get a service account token and build a client
	saToken, err := waitForServiceAccountToken(cluster1AdminKubeClient, saNamespace, saName, 20, time.Second)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if len(saToken) == 0 {
		t.Fatalf("token was not created")
	}
	cluster1SAClientConfig := kclient.Config{
		Host:        cluster1AdminConfig.Host,
		Prefix:      cluster1AdminConfig.Prefix,
		BearerToken: saToken,
		TLSClientConfig: kclient.TLSClientConfig{
			CAFile: cluster1AdminConfig.CAFile,
			CAData: cluster1AdminConfig.CAData,
		},
	}
	cluster1SAKubeClient, err := kclient.New(&cluster1SAClientConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Make sure the service account doesn't have access
	failNS := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-fail"}}
	if _, err := cluster1SAKubeClient.Namespaces().Create(failNS); !errors.IsForbidden(err) {
		t.Fatalf("expected forbidden error, got %v", err)
	}

	// Make the service account a cluster admin on cluster1
	addRoleOptions := &policy.RoleModificationOptions{
		RoleName:            bootstrappolicy.ClusterAdminRoleName,
		RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(cluster1AdminOSClient),
		Users:               []string{saUsername},
	}
	if err := addRoleOptions.AddRole(); err != nil {
		t.Fatalf("could not add role to service account")
	}

	// Give the policy cache a second to catch it's breath
	time.Sleep(time.Second)

	// Make sure the service account now has access
	// This tests authentication using the etcd-based token getter
	passNS := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-pass"}}
	if _, err := cluster1SAKubeClient.Namespaces().Create(passNS); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Create a kubeconfig from the serviceaccount config
	cluster1SAKubeConfigFile, err := ioutil.TempFile(testutil.GetBaseDir(), "cluster1-service-account.kubeconfig")
	if err != nil {
		t.Fatalf("error creating tmpfile: %v", err)
	}
	defer os.Remove(cluster1SAKubeConfigFile.Name())
	if err := writeClientConfigToKubeConfig(cluster1SAClientConfig, cluster1SAKubeConfigFile.Name()); err != nil {
		t.Fatalf("error creating kubeconfig: %v", err)
	}

	// Set up cluster 2 to run against cluster 1 as external kubernetes
	cluster2MasterConfig, err := testserver.DefaultMasterOptions()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	// Don't start kubernetes in process
	cluster2MasterConfig.KubernetesMasterConfig = nil
	// Connect to cluster1 using the service account credentials
	cluster2MasterConfig.MasterClients.ExternalKubernetesKubeConfig = cluster1SAKubeConfigFile.Name()
	// Don't start etcd
	cluster2MasterConfig.EtcdConfig = nil
	// Use the same credentials as cluster1 to connect to existing etcd
	cluster2MasterConfig.EtcdClientInfo = cluster1MasterConfig.EtcdClientInfo
	// Set a custom etcd prefix to make sure data is getting sent to cluster1
	cluster2MasterConfig.EtcdStorageConfig.KubernetesStoragePrefix += "2"
	cluster2MasterConfig.EtcdStorageConfig.OpenShiftStoragePrefix += "2"
	// Don't manage any names in cluster2
	cluster2MasterConfig.ServiceAccountConfig.ManagedNames = []string{}
	// Don't create any service account tokens in cluster2
	cluster2MasterConfig.ServiceAccountConfig.PrivateKeyFile = ""
	// Use the same public keys to validate tokens as cluster1
	cluster2MasterConfig.ServiceAccountConfig.PublicKeyFiles = cluster1MasterConfig.ServiceAccountConfig.PublicKeyFiles
	// don't try to start second dns server
	cluster2MasterConfig.DNSConfig = nil

	// Start cluster 2 (without clearing etcd) and get admin client configs and clients
	cluster2Options := testserver.TestOptions{DeleteAllEtcdKeys: false}
	cluster2AdminConfigFile, err := testserver.StartConfiguredMasterWithOptions(cluster2MasterConfig, cluster2Options)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	cluster2AdminConfig, err := testutil.GetClusterAdminClientConfig(cluster2AdminConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	cluster2AdminOSClient, err := testutil.GetClusterAdminClient(cluster2AdminConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Build a client to use the same service account token against cluster2
	cluster2SAClientConfig := cluster1SAClientConfig
	cluster2SAClientConfig.Host = cluster2AdminConfig.Host
	cluster2SAKubeClient, err := kclient.New(&cluster2SAClientConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Make sure the service account doesn't have access
	// A forbidden error makes sure the token was recognized, and policy denied us
	// This exercises the client-based token getter
	// It also makes sure we don't loop back through the cluster2 kube proxy which would cause an auth loop
	failNS2 := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-fail2"}}
	if _, err := cluster2SAKubeClient.Namespaces().Create(failNS2); !errors.IsForbidden(err) {
		t.Fatalf("expected forbidden error, got %v", err)
	}

	// Make the service account a cluster admin on cluster2
	addRoleOptions2 := &policy.RoleModificationOptions{
		RoleName:            bootstrappolicy.ClusterAdminRoleName,
		RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(cluster2AdminOSClient),
		Users:               []string{saUsername},
	}
	if err := addRoleOptions2.AddRole(); err != nil {
		t.Fatalf("could not add role to service account")
	}

	// Give the policy cache a second to catch it's breath
	time.Sleep(time.Second)

	// Make sure the service account now has access to cluster2
	passNS2 := &api.Namespace{ObjectMeta: api.ObjectMeta{Name: "test-pass2"}}
	if _, err := cluster2SAKubeClient.Namespaces().Create(passNS2); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Make sure the ns actually got created in cluster1
	if _, err := cluster1SAKubeClient.Namespaces().Get(passNS2.Name); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
}
예제 #25
0
파일: server.go 프로젝트: nanit/kubernetes
// Run runs the specified ProxyServer.  This should never exit.
func (s *ProxyServer) Run(_ []string) error {
	// TODO(vmarmol): Use container config for this.
	oomAdjuster := oom.NewOomAdjuster()
	if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
		glog.V(2).Info(err)
	}

	// Run in its own container.
	if err := util.RunInResourceContainer(s.ResourceContainer); err != nil {
		glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err)
	} else {
		glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer)
	}

	// define api config source
	if s.Kubeconfig == "" && s.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}

	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
	if err != nil {
		return err
	}

	client, err := client.New(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	// Add event recorder
	Hostname := nodeutil.GetHostname(s.HostnameOverride)
	eventBroadcaster := record.NewBroadcaster()
	s.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: Hostname})
	eventBroadcaster.StartRecordingToSink(client.Events(""))

	s.nodeRef = &api.ObjectReference{
		Kind:      "Node",
		Name:      Hostname,
		UID:       types.UID(Hostname),
		Namespace: "",
	}

	// Birth Cry
	s.birthCry()

	serviceConfig := config.NewServiceConfig()
	endpointsConfig := config.NewEndpointsConfig()

	protocol := utiliptables.ProtocolIpv4
	if s.BindAddress.To4() == nil {
		protocol = utiliptables.ProtocolIpv6
	}

	var proxier proxy.ProxyProvider
	var endpointsHandler config.EndpointsConfigHandler

	// guaranteed false on error, error only necessary for debugging
	shouldUseIptables, err := iptables.ShouldUseIptablesProxier()
	if err != nil {
		glog.Errorf("Can't determine whether to use iptables or userspace, using userspace proxier: %v", err)
	}
	if !s.ForceUserspaceProxy && shouldUseIptables {
		glog.V(2).Info("Using iptables Proxier.")

		proxierIptables, err := iptables.NewProxier(utiliptables.New(exec.New(), protocol))
		if err != nil {
			glog.Fatalf("Unable to create proxier: %v", err)
		}
		proxier = proxierIptables
		endpointsHandler = proxierIptables
	} else {
		glog.V(2).Info("Using userspace Proxier.")
		// This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for
		// our config.EndpointsConfigHandler.
		loadBalancer := userspace.NewLoadBalancerRR()
		// set EndpointsConfigHandler to our loadBalancer
		endpointsHandler = loadBalancer

		proxierUserspace, err := userspace.NewProxier(loadBalancer, s.BindAddress, utiliptables.New(exec.New(), protocol), s.PortRange)
		if err != nil {
			glog.Fatalf("Unable to create proxer: %v", err)
		}
		proxier = proxierUserspace
	}

	// Wire proxier to handle changes to services
	serviceConfig.RegisterHandler(proxier)
	// And wire endpointsHandler to handle changes to endpoints to services
	endpointsConfig.RegisterHandler(endpointsHandler)

	// Note: RegisterHandler() calls need to happen before creation of Sources because sources
	// only notify on changes, and the initial update (on process start) may be lost if no handlers
	// are registered yet.

	config.NewSourceAPI(
		client,
		30*time.Second,
		serviceConfig.Channel("api"),
		endpointsConfig.Channel("api"),
	)

	if s.HealthzPort > 0 {
		go util.Forever(func() {
			err := http.ListenAndServe(s.HealthzBindAddress.String()+":"+strconv.Itoa(s.HealthzPort), nil)
			if err != nil {
				glog.Errorf("Starting health server failed: %v", err)
			}
		}, 5*time.Second)
	}

	// Just loop forever for now...
	proxier.SyncLoop()
	return nil
}
예제 #26
0
// Run runs the specified ProxyServer.  This should never exit.
func (s *ProxyServer) Run(_ []string) error {
	// TODO(vmarmol): Use container config for this.
	oomAdjuster := oom.NewOomAdjuster()
	if err := oomAdjuster.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
		glog.V(2).Info(err)
	}

	// Run in its own container.
	if err := util.RunInResourceContainer(s.ResourceContainer); err != nil {
		glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err)
	} else {
		glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer)
	}

	serviceConfig := config.NewServiceConfig()
	endpointsConfig := config.NewEndpointsConfig()

	protocol := iptables.ProtocolIpv4
	if s.BindAddress.To4() == nil {
		protocol = iptables.ProtocolIpv6
	}
	loadBalancer := userspace.NewLoadBalancerRR()
	proxier, err := userspace.NewProxier(loadBalancer, s.BindAddress, iptables.New(exec.New(), protocol), s.PortRange)
	if err != nil {
		glog.Fatalf("Unable to create proxer: %v", err)
	}

	// Wire proxier to handle changes to services
	serviceConfig.RegisterHandler(proxier)
	// And wire loadBalancer to handle changes to endpoints to services
	endpointsConfig.RegisterHandler(loadBalancer)

	// Note: RegisterHandler() calls need to happen before creation of Sources because sources
	// only notify on changes, and the initial update (on process start) may be lost if no handlers
	// are registered yet.

	// define api config source
	if s.Kubeconfig == "" && s.Master == "" {
		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using default API client.  This might not work.")
	}

	// This creates a client, first loading any specified kubeconfig
	// file, and then overriding the Master flag, if non-empty.
	kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
		&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
		&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
	if err != nil {
		return err
	}

	client, err := client.New(kubeconfig)
	if err != nil {
		glog.Fatalf("Invalid API configuration: %v", err)
	}

	config.NewSourceAPI(
		client,
		30*time.Second,
		serviceConfig.Channel("api"),
		endpointsConfig.Channel("api"),
	)

	if s.HealthzPort > 0 {
		go util.Forever(func() {
			err := http.ListenAndServe(s.HealthzBindAddress.String()+":"+strconv.Itoa(s.HealthzPort), nil)
			if err != nil {
				glog.Errorf("Starting health server failed: %v", err)
			}
		}, 5*time.Second)
	}

	// Just loop forever for now...
	proxier.SyncLoop()
	return nil
}
예제 #27
0
// Run runs the specified KubeletServer.  This should never exit.
func (s *KubeletServer) Run(_ []string) error {
	util.ReallyCrash = s.ReallyCrashForTesting
	rand.Seed(time.Now().UTC().UnixNano())

	// TODO(vmarmol): Do this through container config.
	if err := util.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
		glog.Warning(err)
	}

	var apiclient *client.Client
	clientConfig, err := s.CreateAPIServerClientConfig()
	if err == nil {
		apiclient, err = client.New(clientConfig)
	}
	if err != nil && len(s.APIServerList) > 0 {
		glog.Warningf("No API client: %v", err)
	}

	glog.V(2).Infof("Using root directory: %v", s.RootDirectory)

	credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)

	cadvisorInterface, err := cadvisor.New(s.CadvisorPort)
	if err != nil {
		return err
	}

	imageGCPolicy := kubelet.ImageGCPolicy{
		HighThresholdPercent: s.ImageGCHighThresholdPercent,
		LowThresholdPercent:  s.ImageGCLowThresholdPercent,
	}

	diskSpacePolicy := kubelet.DiskSpacePolicy{
		DockerFreeDiskMB: s.LowDiskSpaceThresholdMB,
		RootFreeDiskMB:   s.LowDiskSpaceThresholdMB,
	}
	cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
	if err != nil {
		return err
	}
	glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)

	manifestURLHeader := make(http.Header)
	if s.ManifestURLHeader != "" {
		pieces := strings.Split(s.ManifestURLHeader, ":")
		if len(pieces) != 2 {
			return fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader)
		}
		manifestURLHeader.Set(pieces[0], pieces[1])
	}

	hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
	if err != nil {
		return err
	}

	tlsOptions, err := s.InitializeTLS()
	if err != nil {
		return err
	}

	mounter := mount.New()
	if s.Containerized {
		glog.V(2).Info("Running kubelet in containerized mode (experimental)")
		mounter = mount.NewNsenterMounter()
	}

	var dockerExecHandler dockertools.ExecHandler
	switch s.DockerExecHandlerName {
	case "native":
		dockerExecHandler = &dockertools.NativeExecHandler{}
	case "nsenter":
		dockerExecHandler = &dockertools.NsenterExecHandler{}
	default:
		glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName)
		dockerExecHandler = &dockertools.NativeExecHandler{}
	}

	kcfg := KubeletConfig{
		Address:                        s.Address,
		AllowPrivileged:                s.AllowPrivileged,
		HostNetworkSources:             hostNetworkSources,
		HostnameOverride:               s.HostnameOverride,
		RootDirectory:                  s.RootDirectory,
		ConfigFile:                     s.Config,
		ManifestURL:                    s.ManifestURL,
		ManifestURLHeader:              manifestURLHeader,
		FileCheckFrequency:             s.FileCheckFrequency,
		HTTPCheckFrequency:             s.HTTPCheckFrequency,
		PodInfraContainerImage:         s.PodInfraContainerImage,
		SyncFrequency:                  s.SyncFrequency,
		RegistryPullQPS:                s.RegistryPullQPS,
		RegistryBurst:                  s.RegistryBurst,
		MinimumGCAge:                   s.MinimumGCAge,
		MaxPerPodContainerCount:        s.MaxPerPodContainerCount,
		MaxContainerCount:              s.MaxContainerCount,
		RegisterNode:                   s.RegisterNode,
		StandaloneMode:                 (len(s.APIServerList) == 0),
		ClusterDomain:                  s.ClusterDomain,
		ClusterDNS:                     s.ClusterDNS,
		Runonce:                        s.RunOnce,
		Port:                           s.Port,
		ReadOnlyPort:                   s.ReadOnlyPort,
		CadvisorInterface:              cadvisorInterface,
		EnableServer:                   s.EnableServer,
		EnableDebuggingHandlers:        s.EnableDebuggingHandlers,
		DockerClient:                   dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
		KubeClient:                     apiclient,
		MasterServiceNamespace:         s.MasterServiceNamespace,
		VolumePlugins:                  ProbeVolumePlugins(),
		NetworkPlugins:                 ProbeNetworkPlugins(s.NetworkPluginDir),
		NetworkPluginName:              s.NetworkPluginName,
		StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout,
		TLSOptions:                     tlsOptions,
		ImageGCPolicy:                  imageGCPolicy,
		DiskSpacePolicy:                diskSpacePolicy,
		Cloud:                          cloud,
		NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency,
		ResourceContainer:         s.ResourceContainer,
		CgroupRoot:                s.CgroupRoot,
		ContainerRuntime:          s.ContainerRuntime,
		Mounter:                   mounter,
		DockerDaemonContainer:     s.DockerDaemonContainer,
		SystemContainer:           s.SystemContainer,
		ConfigureCBR0:             s.ConfigureCBR0,
		PodCIDR:                   s.PodCIDR,
		MaxPods:                   s.MaxPods,
		DockerExecHandler:         dockerExecHandler,
	}

	if err := RunKubelet(&kcfg, nil); err != nil {
		return err
	}

	if s.HealthzPort > 0 {
		healthz.DefaultHealthz()
		go util.Forever(func() {
			err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil)
			if err != nil {
				glog.Errorf("Starting health server failed: %v", err)
			}
		}, 5*time.Second)
	}

	if s.RunOnce {
		return nil
	}

	// run forever
	select {}
}
예제 #28
0
func main() {
	flag.Parse()

	glog.Infof("Starting serve_hostnames soak test with queries=%d and podsPerNode=%d upTo=%d",
		*queriesAverage, *podsPerNode, *upTo)

	var spec string
	if *gke != "" {
		spec = filepath.Join(os.Getenv("HOME"), ".config", "gcloud", "kubernetes", "kubeconfig")
	} else {
		spec = filepath.Join(os.Getenv("HOME"), ".kube", "config")
	}
	settings, err := clientcmd.LoadFromFile(spec)
	if err != nil {
		glog.Fatalf("Error loading configuration: %v", err.Error())
	}
	if *gke != "" {
		settings.CurrentContext = *gke
	}
	config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig()
	if err != nil {
		glog.Fatalf("Failed to construct config: %v", err)
	}

	c, err := client.New(config)
	if err != nil {
		glog.Fatalf("Failed to make client: %v", err)
	}

	var nodes *api.NodeList
	for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) {
		nodes, err = c.Nodes().List(labels.Everything(), fields.Everything())
		if err == nil {
			break
		}
		glog.Warningf("Failed to list nodes: %v", err)
	}
	if err != nil {
		glog.Fatalf("Giving up trying to list nodes: %v", err)
	}

	if len(nodes.Items) == 0 {
		glog.Fatalf("Failed to find any nodes.")
	}

	glog.Infof("Found %d nodes on this cluster:", len(nodes.Items))
	for i, node := range nodes.Items {
		glog.Infof("%d: %s", i, node.Name)
	}

	queries := *queriesAverage * len(nodes.Items) * *podsPerNode

	// Create the namespace
	got, err := c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{GenerateName: "serve-hostnames-"}})
	if err != nil {
		glog.Fatalf("Failed to create namespace: %v", err)
	}
	ns := got.Name
	defer func(ns string) {
		if err := c.Namespaces().Delete(ns); err != nil {
			glog.Warningf("Failed to delete namespace ns: %e", ns, err)
		}
	}(ns)
	glog.Infof("Created namespace %s", ns)

	// Create a service for these pods.
	glog.Infof("Creating service %s/serve-hostnames", ns)
	// Make several attempts to create a service.
	var svc *api.Service
	for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) {
		t := time.Now()
		svc, err = c.Services(ns).Create(&api.Service{
			ObjectMeta: api.ObjectMeta{
				Name: "serve-hostnames",
				Labels: map[string]string{
					"name": "serve-hostname",
				},
			},
			Spec: api.ServiceSpec{
				Ports: []api.ServicePort{{
					Protocol:   "TCP",
					Port:       9376,
					TargetPort: util.NewIntOrStringFromInt(9376),
				}},
				Selector: map[string]string{
					"name": "serve-hostname",
				},
			},
		})
		glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t))
		if err == nil {
			break
		}
		glog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err)
	}
	if err != nil {
		glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err)
		return
	}
	// Clean up service
	defer func() {
		glog.Infof("Cleaning up service %s/serve-hostnames", ns)
		// Make several attempts to delete the service.
		for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) {
			if err := c.Services(ns).Delete(svc.Name); err == nil {
				return
			}
			glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err)
		}
	}()

	// Put serve-hostname pods on each node.
	podNames := []string{}
	for i, node := range nodes.Items {
		for j := 0; j < *podsPerNode; j++ {
			podName := fmt.Sprintf("serve-hostname-%d-%d", i, j)
			podNames = append(podNames, podName)
			// Make several attempts
			for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) {
				glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name)
				t := time.Now()
				_, err = c.Pods(ns).Create(&api.Pod{
					ObjectMeta: api.ObjectMeta{
						Name: podName,
						Labels: map[string]string{
							"name": "serve-hostname",
						},
					},
					Spec: api.PodSpec{
						Containers: []api.Container{
							{
								Name:  "serve-hostname",
								Image: "gcr.io/google_containers/serve_hostname:1.1",
								Ports: []api.ContainerPort{{ContainerPort: 9376}},
							},
						},
						NodeName: node.Name,
					},
				})
				glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t))
				if err == nil {
					break
				}
				glog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err)
			}
			if err != nil {
				glog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err)
				return
			}
		}
	}
	// Clean up the pods
	defer func() {
		glog.Info("Cleaning up pods")
		// Make several attempts to delete the pods.
		for _, podName := range podNames {
			for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) {
				if err = c.Pods(ns).Delete(podName, nil); err == nil {
					break
				}
				glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err)
			}
		}
	}()

	glog.Info("Waiting for the serve-hostname pods to be ready")
	for _, podName := range podNames {
		var pod *api.Pod
		for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) {
			pod, err = c.Pods(ns).Get(podName)
			if err != nil {
				glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout)
				continue
			}
			if pod.Status.Phase == api.PodRunning {
				break
			}
		}
		if pod.Status.Phase != api.PodRunning {
			glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase)
		} else {
			glog.Infof("%s/%s is running", ns, podName)
		}
	}

	// Wait for the endpoints to propagate.
	for start := time.Now(); time.Since(start) < endpointTimeout; time.Sleep(10 * time.Second) {
		hostname, err := c.Get().
			Namespace(ns).
			Prefix("proxy").
			Resource("services").
			Name("serve-hostnames").
			DoRaw()
		if err != nil {
			glog.Infof("After %v while making a proxy call got error %v", time.Since(start), err)
			continue
		}
		var r api.Status
		if err := api.Scheme.DecodeInto(hostname, &r); err != nil {
			break
		}
		if r.Status == api.StatusFailure {
			glog.Infof("After %v got status %v", time.Since(start), string(hostname))
			continue
		}
		break
	}

	// Repeatedly make requests.
	for iteration := 0; iteration != *upTo; iteration++ {
		responseChan := make(chan string, queries)
		// Use a channel of size *maxPar to throttle the number
		// of in-flight requests to avoid overloading the service.
		inFlight := make(chan struct{}, *maxPar)
		start := time.Now()
		for q := 0; q < queries; q++ {
			go func(i int, query int) {
				inFlight <- struct{}{}
				t := time.Now()
				hostname, err := c.Get().
					Namespace(ns).
					Prefix("proxy").
					Resource("services").
					Name("serve-hostnames").
					DoRaw()
				glog.V(4).Infof("Proxy call in namespace %s took %v", ns, time.Since(t))
				if err != nil {
					glog.Warningf("Call failed during iteration %d query %d : %v", i, query, err)
					// If the query failed return a string which starts with a character
					// that can't be part of a hostname.
					responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err)
				} else {
					responseChan <- string(hostname)
				}
				<-inFlight
			}(iteration, q)
		}
		responses := make(map[string]int, *podsPerNode*len(nodes.Items))
		missing := 0
		for q := 0; q < queries; q++ {
			r := <-responseChan
			glog.V(4).Infof("Got response from %s", r)
			responses[r]++
			// If the returned hostname starts with '!' then it indicates
			// an error response.
			if len(r) > 0 && r[0] == '!' {
				glog.V(3).Infof("Got response %s", r)
				missing++
			}
		}
		if missing > 0 {
			glog.Warningf("Missing %d responses out of %d", missing, queries)
		}
		// Report any nodes that did not respond.
		for n, node := range nodes.Items {
			for i := 0; i < *podsPerNode; i++ {
				name := fmt.Sprintf("serve-hostname-%d-%d", n, i)
				if _, ok := responses[name]; !ok {
					glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration)
				}
			}
		}
		glog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing",
			iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing)
	}
}