コード例 #1
0
ファイル: types.go プロジェクト: MohamedFAhmed/heapster
func NewKubelet(kubeletConfig *kube_client.KubeletConfig) (Kubelet, error) {
	transport, err := kube_client.MakeTransport(kubeletConfig)
	if err != nil {
		return nil, err
	}
	c := &http.Client{
		Transport: transport,
		Timeout:   kubeletConfig.HTTPTimeout,
	}
	return &kubeletSource{
		config: kubeletConfig,
		client: c,
	}, nil
}
コード例 #2
0
ファイル: master.go プロジェクト: kzkohashi/kubernetes
// init initializes master.
func (m *Master) init(c *Config) {
	healthzChecks := []healthz.HealthzChecker{}
	m.clock = util.RealClock{}
	podStorage := podetcd.NewStorage(c.DatabaseStorage, true, c.KubeletClient)

	podTemplateStorage := podtemplateetcd.NewREST(c.DatabaseStorage)

	eventStorage := eventetcd.NewREST(c.DatabaseStorage, uint64(c.EventTTL.Seconds()))
	limitRangeStorage := limitrangeetcd.NewREST(c.DatabaseStorage)

	resourceQuotaStorage, resourceQuotaStatusStorage := resourcequotaetcd.NewREST(c.DatabaseStorage)
	secretStorage := secretetcd.NewREST(c.DatabaseStorage)
	serviceAccountStorage := serviceaccountetcd.NewREST(c.DatabaseStorage)
	persistentVolumeStorage, persistentVolumeStatusStorage := pvetcd.NewREST(c.DatabaseStorage)
	persistentVolumeClaimStorage, persistentVolumeClaimStatusStorage := pvcetcd.NewREST(c.DatabaseStorage)

	namespaceStorage, namespaceStatusStorage, namespaceFinalizeStorage := namespaceetcd.NewREST(c.DatabaseStorage)
	m.namespaceRegistry = namespace.NewRegistry(namespaceStorage)

	endpointsStorage := endpointsetcd.NewREST(c.DatabaseStorage, true)
	m.endpointRegistry = endpoint.NewRegistry(endpointsStorage)

	nodeStorage, nodeStatusStorage := nodeetcd.NewREST(c.DatabaseStorage, true, c.KubeletClient)
	m.nodeRegistry = minion.NewRegistry(nodeStorage)

	serviceStorage := serviceetcd.NewREST(c.DatabaseStorage)
	m.serviceRegistry = service.NewRegistry(serviceStorage)

	var serviceClusterIPRegistry service.RangeRegistry
	serviceClusterIPAllocator := ipallocator.NewAllocatorCIDRRange(m.serviceClusterIPRange, func(max int, rangeSpec string) allocator.Interface {
		mem := allocator.NewAllocationMap(max, rangeSpec)
		etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", c.DatabaseStorage)
		serviceClusterIPRegistry = etcd
		return etcd
	})
	m.serviceClusterIPAllocator = serviceClusterIPRegistry

	var serviceNodePortRegistry service.RangeRegistry
	serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.serviceNodePortRange, func(max int, rangeSpec string) allocator.Interface {
		mem := allocator.NewAllocationMap(max, rangeSpec)
		etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", "servicenodeportallocation", c.DatabaseStorage)
		serviceNodePortRegistry = etcd
		return etcd
	})
	m.serviceNodePortAllocator = serviceNodePortRegistry

	controllerStorage := controlleretcd.NewREST(c.DatabaseStorage)

	// TODO: Factor out the core API registration
	m.storage = map[string]rest.Storage{
		"pods":             podStorage.Pod,
		"pods/attach":      podStorage.Attach,
		"pods/status":      podStorage.Status,
		"pods/log":         podStorage.Log,
		"pods/exec":        podStorage.Exec,
		"pods/portforward": podStorage.PortForward,
		"pods/proxy":       podStorage.Proxy,
		"pods/binding":     podStorage.Binding,
		"bindings":         podStorage.Binding,

		"podTemplates": podTemplateStorage,

		"replicationControllers": controllerStorage,
		"services":               service.NewStorage(m.serviceRegistry, m.endpointRegistry, serviceClusterIPAllocator, serviceNodePortAllocator),
		"endpoints":              endpointsStorage,
		"nodes":                  nodeStorage,
		"nodes/status":           nodeStatusStorage,
		"events":                 eventStorage,

		"limitRanges":                   limitRangeStorage,
		"resourceQuotas":                resourceQuotaStorage,
		"resourceQuotas/status":         resourceQuotaStatusStorage,
		"namespaces":                    namespaceStorage,
		"namespaces/status":             namespaceStatusStorage,
		"namespaces/finalize":           namespaceFinalizeStorage,
		"secrets":                       secretStorage,
		"serviceAccounts":               serviceAccountStorage,
		"persistentVolumes":             persistentVolumeStorage,
		"persistentVolumes/status":      persistentVolumeStatusStorage,
		"persistentVolumeClaims":        persistentVolumeClaimStorage,
		"persistentVolumeClaims/status": persistentVolumeClaimStatusStorage,

		"componentStatuses": componentstatus.NewStorage(func() map[string]apiserver.Server { return m.getServersToValidate(c) }),
	}

	// establish the node proxy dialer
	if len(c.SSHUser) > 0 {
		// Usernames are capped @ 32
		if len(c.SSHUser) > 32 {
			glog.Warning("SSH User is too long, truncating to 32 chars")
			c.SSHUser = c.SSHUser[0:32]
		}
		glog.Infof("Setting up proxy: %s %s", c.SSHUser, c.SSHKeyfile)

		// public keyfile is written last, so check for that.
		publicKeyFile := c.SSHKeyfile + ".pub"
		exists, err := util.FileExists(publicKeyFile)
		if err != nil {
			glog.Errorf("Error detecting if key exists: %v", err)
		} else if !exists {
			glog.Infof("Key doesn't exist, attempting to create")
			err := m.generateSSHKey(c.SSHUser, c.SSHKeyfile, publicKeyFile)
			if err != nil {
				glog.Errorf("Failed to create key pair: %v", err)
			}
		}
		m.tunnels = &util.SSHTunnelList{}
		m.dialer = m.Dial
		m.setupSecureProxy(c.SSHUser, c.SSHKeyfile, publicKeyFile)
		m.lastSync = m.clock.Now().Unix()

		// This is pretty ugly.  A better solution would be to pull this all the way up into the
		// server.go file.
		httpKubeletClient, ok := c.KubeletClient.(*client.HTTPKubeletClient)
		if ok {
			httpKubeletClient.Config.Dial = m.dialer
			transport, err := client.MakeTransport(httpKubeletClient.Config)
			if err != nil {
				glog.Errorf("Error setting up transport over SSH: %v", err)
			} else {
				httpKubeletClient.Client.Transport = transport
			}
		} else {
			glog.Errorf("Failed to cast %v to HTTPKubeletClient, skipping SSH tunnel.", c.KubeletClient)
		}
		healthzChecks = append(healthzChecks, healthz.NamedCheck("SSH Tunnel Check", m.IsTunnelSyncHealthy))
		m.lastSyncMetric = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
			Name: "apiserver_proxy_tunnel_sync_latency_secs",
			Help: "The time since the last successful synchronization of the SSH tunnels for proxy requests.",
		}, func() float64 { return float64(m.secondsSinceSync()) })
	}

	apiVersions := []string{}
	if m.v1 {
		if err := m.api_v1().InstallREST(m.handlerContainer); err != nil {
			glog.Fatalf("Unable to setup API v1: %v", err)
		}
		apiVersions = append(apiVersions, "v1")
	}

	apiserver.InstallSupport(m.muxHelper, m.rootWebService, c.EnableProfiling, healthzChecks...)
	apiserver.AddApiWebService(m.handlerContainer, c.APIPrefix, apiVersions)
	defaultVersion := m.defaultAPIGroupVersion()
	requestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: util.NewStringSet(strings.TrimPrefix(defaultVersion.Root, "/")), RestMapper: defaultVersion.Mapper}
	apiserver.InstallServiceErrorHandler(m.handlerContainer, requestInfoResolver, apiVersions)

	if m.exp {
		expVersion := m.expapi(c)
		if err := expVersion.InstallREST(m.handlerContainer); err != nil {
			glog.Fatalf("Unable to setup experimental api: %v", err)
		}
		apiserver.AddApiWebService(m.handlerContainer, c.ExpAPIPrefix, []string{expVersion.Version})
		expRequestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: util.NewStringSet(strings.TrimPrefix(expVersion.Root, "/")), RestMapper: expVersion.Mapper}
		apiserver.InstallServiceErrorHandler(m.handlerContainer, expRequestInfoResolver, []string{expVersion.Version})
	}

	// Register root handler.
	// We do not register this using restful Webservice since we do not want to surface this in api docs.
	// Allow master to be embedded in contexts which already have something registered at the root
	if c.EnableIndex {
		m.mux.HandleFunc("/", apiserver.IndexHandler(m.handlerContainer, m.muxHelper))
	}

	if c.EnableLogsSupport {
		apiserver.InstallLogsSupport(m.muxHelper)
	}
	if c.EnableUISupport {
		ui.InstallSupport(m.muxHelper, m.enableSwaggerSupport)
	}

	if c.EnableProfiling {
		m.mux.HandleFunc("/debug/pprof/", pprof.Index)
		m.mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
		m.mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
	}

	handler := http.Handler(m.mux.(*http.ServeMux))

	// TODO: handle CORS and auth using go-restful
	// See github.com/emicklei/go-restful/blob/master/examples/restful-CORS-filter.go, and
	// github.com/emicklei/go-restful/blob/master/examples/restful-basic-authentication.go

	if len(c.CorsAllowedOriginList) > 0 {
		allowedOriginRegexps, err := util.CompileRegexps(c.CorsAllowedOriginList)
		if err != nil {
			glog.Fatalf("Invalid CORS allowed origin, --cors-allowed-origins flag was set to %v - %v", strings.Join(c.CorsAllowedOriginList, ","), err)
		}
		handler = apiserver.CORS(handler, allowedOriginRegexps, nil, nil, "true")
	}

	m.InsecureHandler = handler

	attributeGetter := apiserver.NewRequestAttributeGetter(m.requestContextMapper, latest.RESTMapper, "api")
	handler = apiserver.WithAuthorizationCheck(handler, attributeGetter, m.authorizer)

	// Install Authenticator
	if c.Authenticator != nil {
		authenticatedHandler, err := handlers.NewRequestAuthenticator(m.requestContextMapper, c.Authenticator, handlers.Unauthorized(c.SupportsBasicAuth), handler)
		if err != nil {
			glog.Fatalf("Could not initialize authenticator: %v", err)
		}
		handler = authenticatedHandler
	}

	// Install root web services
	m.handlerContainer.Add(m.rootWebService)

	// TODO: Make this optional?  Consumers of master depend on this currently.
	m.Handler = handler

	if m.enableSwaggerSupport {
		m.InstallSwaggerAPI()
	}

	// After all wrapping is done, put a context filter around both handlers
	if handler, err := api.NewRequestContextFilter(m.requestContextMapper, m.Handler); err != nil {
		glog.Fatalf("Could not initialize request context filter: %v", err)
	} else {
		m.Handler = handler
	}

	if handler, err := api.NewRequestContextFilter(m.requestContextMapper, m.InsecureHandler); err != nil {
		glog.Fatalf("Could not initialize request context filter: %v", err)
	} else {
		m.InsecureHandler = handler
	}

	// TODO: Attempt clean shutdown?
	if m.enableCoreControllers {
		m.NewBootstrapController().Start()
	}
}
コード例 #3
0
ファイル: node_auth_test.go プロジェクト: redlocal/origin
func TestNodeAuth(t *testing.T) {
	// Server config
	masterConfig, nodeConfig, adminKubeConfigFile, err := testserver.StartTestAllInOne()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Cluster admin clients and client configs
	adminClient, err := testutil.GetClusterAdminKubeClient(adminKubeConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	originAdminClient, err := testutil.GetClusterAdminClient(adminKubeConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	adminConfig, err := testutil.GetClusterAdminClientConfig(adminKubeConfigFile)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Client configs for lesser users
	masterKubeletClientConfig := configapi.GetKubeletClientConfig(*masterConfig)

	anonymousConfig := clientcmd.AnonymousClientConfig(*adminConfig)

	badTokenConfig := clientcmd.AnonymousClientConfig(*adminConfig)
	badTokenConfig.BearerToken = "bad-token"

	bobClient, _, bobConfig, err := testutil.GetClientForUser(*adminConfig, "bob")
	_, _, aliceConfig, err := testutil.GetClientForUser(*adminConfig, "alice")
	sa1Client, _, sa1Config, err := testutil.GetClientForServiceAccount(adminClient, *adminConfig, "default", "sa1")
	_, _, sa2Config, err := testutil.GetClientForServiceAccount(adminClient, *adminConfig, "default", "sa2")

	// Grant Bob system:node-reader, which should let them read metrics and stats
	addBob := &policy.RoleModificationOptions{
		RoleName:            bootstrappolicy.NodeReaderRoleName,
		RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(originAdminClient),
		Subjects:            []kapi.ObjectReference{{Kind: "User", Name: "bob"}},
	}
	if err := addBob.AddRole(); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Grant sa1 system:cluster-reader, which should let them read metrics and stats
	addSA1 := &policy.RoleModificationOptions{
		RoleName:            bootstrappolicy.ClusterReaderRoleName,
		RoleBindingAccessor: policy.NewClusterRoleBindingAccessor(originAdminClient),
		Subjects:            []kapi.ObjectReference{{Kind: "ServiceAccount", Namespace: "default", Name: "sa1"}},
	}
	if err := addSA1.AddRole(); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	// Wait for policy cache
	if err := testutil.WaitForClusterPolicyUpdate(bobClient, "get", "nodes/metrics", true); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	if err := testutil.WaitForClusterPolicyUpdate(sa1Client, "get", "nodes/metrics", true); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	_, nodePort, err := net.SplitHostPort(nodeConfig.ServingInfo.BindAddress)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	nodePortInt, err := strconv.ParseInt(nodePort, 0, 0)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	nodeTLS := configapi.UseTLS(nodeConfig.ServingInfo)

	kubeletClientConfig := func(config *kclient.Config) *kclient.KubeletConfig {
		return &kclient.KubeletConfig{
			Port:            uint(nodePortInt),
			EnableHttps:     nodeTLS,
			TLSClientConfig: config.TLSClientConfig,
			BearerToken:     config.BearerToken,
		}
	}

	testCases := map[string]struct {
		KubeletClientConfig *kclient.KubeletConfig
		Forbidden           bool
		NodeViewer          bool
		NodeAdmin           bool
	}{
		"bad token": {
			KubeletClientConfig: kubeletClientConfig(&badTokenConfig),
		},
		"anonymous": {
			KubeletClientConfig: kubeletClientConfig(&anonymousConfig),
			Forbidden:           true,
		},
		"cluster admin": {
			KubeletClientConfig: kubeletClientConfig(adminConfig),
			NodeAdmin:           true,
		},
		"master kubelet client": {
			KubeletClientConfig: masterKubeletClientConfig,
			NodeAdmin:           true,
		},
		"bob": {
			KubeletClientConfig: kubeletClientConfig(bobConfig),
			NodeViewer:          true,
		},
		"alice": {
			KubeletClientConfig: kubeletClientConfig(aliceConfig),
			Forbidden:           true,
		},
		"sa1": {
			KubeletClientConfig: kubeletClientConfig(sa1Config),
			NodeViewer:          true,
		},
		"sa2": {
			KubeletClientConfig: kubeletClientConfig(sa2Config),
			Forbidden:           true,
		},
	}

	for k, tc := range testCases {

		var (
			// expected result for requests a viewer should be able to make
			viewResult int
			// expected result for requests an admin should be able to make (that can actually complete with a 200 in our tests)
			adminResultOK int
			// expected result for requests an admin should be able to make (that return a 404 in this test if the authn/authz layer is completed)
			adminResultMissing int
		)
		switch {
		case tc.NodeAdmin:
			viewResult = http.StatusOK
			adminResultOK = http.StatusOK
			adminResultMissing = http.StatusNotFound
		case tc.NodeViewer:
			viewResult = http.StatusOK
			adminResultOK = http.StatusForbidden
			adminResultMissing = http.StatusForbidden
		case tc.Forbidden:
			viewResult = http.StatusForbidden
			adminResultOK = http.StatusForbidden
			adminResultMissing = http.StatusForbidden
		default:
			viewResult = http.StatusUnauthorized
			adminResultOK = http.StatusUnauthorized
			adminResultMissing = http.StatusUnauthorized
		}

		requests := []testRequest{
			// Responses to invalid paths are the same for all users
			{"GET", "/", http.StatusNotFound},
			{"GET", "/stats", http.StatusMovedPermanently}, // ServeMux redirects to the directory
			{"GET", "/invalid", http.StatusNotFound},

			// viewer requests
			{"GET", "/metrics", viewResult},
			{"GET", "/stats/", viewResult},

			// successful admin requests
			{"GET", "/healthz", adminResultOK},
			{"GET", "/pods", adminResultOK},

			// not found admin requests
			{"GET", "/containerLogs/mynamespace/mypod/mycontainer", adminResultMissing},
			{"POST", "/exec/mynamespace/mypod/mycontainer", adminResultMissing},
			{"POST", "/run/mynamespace/mypod/mycontainer", adminResultMissing},
			{"POST", "/attach/mynamespace/mypod/mycontainer", adminResultMissing},
			{"POST", "/portForward/mynamespace/mypod/mycontainer", adminResultMissing},

			// GET is supported in origin on /exec and /attach for backwards compatibility
			// make sure node admin permissions are required
			{"GET", "/exec/mynamespace/mypod/mycontainer", adminResultMissing},
			{"GET", "/attach/mynamespace/mypod/mycontainer", adminResultMissing},
		}

		rt, err := kclient.MakeTransport(tc.KubeletClientConfig)
		if err != nil {
			t.Errorf("%s: unexpected error: %v", k, err)
			continue
		}

		for _, r := range requests {
			req, err := http.NewRequest(r.Method, "https://"+nodeConfig.NodeName+":10250"+r.Path, nil)
			if err != nil {
				t.Errorf("%s: %s: unexpected error: %v", k, r.Path, err)
				continue
			}
			resp, err := rt.RoundTrip(req)
			if err != nil {
				t.Errorf("%s: %s: unexpected error: %v", k, r.Path, err)
				continue
			}
			resp.Body.Close()
			if resp.StatusCode != r.Result {
				t.Errorf("%s: %s: expected %d, got %d", k, r.Path, r.Result, resp.StatusCode)
				continue
			}
		}
	}
}