func TestStore(t *testing.T) { storage, fakeClient, backing := newStorage(t) if _, err := fakeClient.Set(key(), runtime.EncodeOrDie(testapi.Default.Codec(), validNewRangeAllocation()), 0); err != nil { t.Fatalf("unexpected error: %v", err) } if _, err := storage.Allocate(2); err != nil { t.Fatal(err) } ok, err := backing.Allocate(2) if err != nil { t.Fatal(err) } if ok { t.Fatal("Expected backing allocation to fail") } if ok, err := storage.Allocate(2); ok || err != nil { t.Fatal("Expected allocation to fail") } obj := fakeClient.Data[key()] if obj.R == nil || obj.R.Node == nil { t.Fatalf("%s is empty: %#v", key(), obj) } t.Logf("data: %#v", obj.R.Node) other := allocator.NewAllocationMap(100, "rangeSpecValue") allocation := &api.RangeAllocation{} if err := storage.storage.Get(key(), allocation, false); err != nil { t.Fatal(err) } if allocation.ResourceVersion != "2" { t.Fatalf("%#v", allocation) } if allocation.Range != "rangeSpecValue" { t.Errorf("unexpected stored Range: %s", allocation.Range) } if err := other.Restore("rangeSpecValue", allocation.Data); err != nil { t.Fatal(err) } if !other.Has(2) { t.Fatalf("could not restore allocated IP: %#v", other) } other = allocator.NewAllocationMap(100, "rangeSpecValue") otherStorage := NewEtcd(other, "/ranges/serviceips", "serviceipallocation", storage.storage) if ok, err := otherStorage.Allocate(2); ok || err != nil { t.Fatal(err) } }
func TestStore(t *testing.T) { storage, backing, ecli := newStorage(t) initialObject(ecli) if _, err := storage.Allocate(2); err != nil { t.Fatal(err) } ok, err := backing.Allocate(2) if err != nil { t.Fatal(err) } if ok { t.Fatal("Expected backing allocation to fail") } if ok, err := storage.Allocate(2); ok || err != nil { t.Fatal("Expected allocation to fail") } obj := ecli.Data[key()] if obj.R == nil || obj.R.Node == nil { t.Fatalf("%s is empty: %#v", key(), obj) } t.Logf("data: %#v", obj.R.Node) other := allocator.NewAllocationMap(100, "rangeSpecValue") allocation := &api.RangeAllocation{} if err := storage.storage.Get(key(), allocation, false); err != nil { t.Fatal(err) } if allocation.ResourceVersion != "1" { t.Fatalf("%#v", allocation) } if allocation.Range != "rangeSpecValue" { t.Errorf("unexpected stored Range: %s", allocation.Range) } if err := other.Restore("rangeSpecValue", allocation.Data); err != nil { t.Fatal(err) } if !other.Has(2) { t.Fatalf("could not restore allocated IP: %#v", other) } other = allocator.NewAllocationMap(100, "rangeSpecValue") otherStorage := NewEtcd(other, "/ranges/serviceips", "serviceipallocation", storage.storage) if ok, err := otherStorage.Allocate(2); ok || err != nil { t.Fatal(err) } }
func newStorage(t *testing.T) (*Etcd, allocator.Interface, *tools.FakeEtcdClient) { fakeEtcdClient, s := newEtcdStorage(t) mem := allocator.NewAllocationMap(100, "rangeSpecValue") etcd := NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", s) return etcd, mem, fakeEtcdClient }
func TestStore(t *testing.T) { storage, server, backing := newStorage(t) defer server.Terminate(t) if err := storage.storage.Set(context.TODO(), key(), validNewRangeAllocation(), nil, 0); err != nil { t.Fatalf("unexpected error: %v", err) } if _, err := storage.Allocate(2); err != nil { t.Fatal(err) } ok, err := backing.Allocate(2) if err != nil { t.Fatal(err) } if ok { t.Fatal("Expected backing allocation to fail") } if ok, err := storage.Allocate(2); ok || err != nil { t.Fatal("Expected allocation to fail") } other := allocator.NewAllocationMap(100, "rangeSpecValue") allocation := &api.RangeAllocation{} if err := storage.storage.Get(context.TODO(), key(), allocation, false); err != nil { t.Fatal(err) } if allocation.Range != "rangeSpecValue" { t.Errorf("unexpected stored Range: %s", allocation.Range) } if err := other.Restore("rangeSpecValue", allocation.Data); err != nil { t.Fatal(err) } if !other.Has(2) { t.Fatalf("could not restore allocated IP: %#v", other) } other = allocator.NewAllocationMap(100, "rangeSpecValue") otherStorage := NewEtcd(other, "/ranges/serviceips", api.Resource("serviceipallocations"), storage.storage) if ok, err := otherStorage.Allocate(2); ok || err != nil { t.Fatal(err) } }
// NewIngressIPController creates a new IngressIPController. // TODO this should accept a shared informer func NewIngressIPController(kc kclient.Interface, ipNet *net.IPNet, resyncInterval time.Duration) *IngressIPController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(kc.Events("")) recorder := eventBroadcaster.NewRecorder(kapi.EventSource{Component: "ingressip-controller"}) ic := &IngressIPController{ client: kc, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), maxRetries: 10, recorder: recorder, } ic.cache, ic.controller = framework.NewInformer( &cache.ListWatch{ ListFunc: func(options kapi.ListOptions) (runtime.Object, error) { return ic.client.Services(kapi.NamespaceAll).List(options) }, WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) { return ic.client.Services(kapi.NamespaceAll).Watch(options) }, }, &kapi.Service{}, resyncInterval, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { service := obj.(*kapi.Service) glog.V(5).Infof("Adding service %s/%s", service.Namespace, service.Name) ic.enqueueChange(obj, nil) }, UpdateFunc: func(old, cur interface{}) { service := cur.(*kapi.Service) glog.V(5).Infof("Updating service %s/%s", service.Namespace, service.Name) ic.enqueueChange(cur, old) }, DeleteFunc: func(obj interface{}) { service := obj.(*kapi.Service) glog.V(5).Infof("Deleting service %s/%s", service.Namespace, service.Name) ic.enqueueChange(nil, obj) }, }, ) ic.changeHandler = ic.processChange ic.persistenceHandler = persistService ic.ipAllocator = ipallocator.NewAllocatorCIDRRange(ipNet, func(max int, rangeSpec string) allocator.Interface { return allocator.NewAllocationMap(max, rangeSpec) }) ic.allocationMap = make(map[string]string) ic.requeuedAllocations = sets.NewString() return ic }
func newStorage(t *testing.T) (*tools.FakeEtcdClient, ipallocator.Interface, allocator.Interface) { etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") _, cidr, err := net.ParseCIDR("192.168.1.0/24") if err != nil { t.Fatal(err) } var backing allocator.Interface storage := ipallocator.NewAllocatorCIDRRange(cidr, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) backing = mem etcd := allocator_etcd.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", etcdStorage) return etcd }) return fakeClient, storage, backing }
func newStorage(t *testing.T) (*etcdtesting.EtcdTestServer, ipallocator.Interface, allocator.Interface, storage.Interface) { etcdStorage, server := registrytest.NewEtcdStorage(t, "") _, cidr, err := net.ParseCIDR("192.168.1.0/24") if err != nil { t.Fatal(err) } var backing allocator.Interface storage := ipallocator.NewAllocatorCIDRRange(cidr, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) backing = mem etcd := allocatoretcd.NewEtcd(mem, "/ranges/serviceips", api.Resource("serviceipallocations"), etcdStorage) return etcd }) return server, storage, backing, generic.NewRawStorage(etcdStorage) }
func (m *Master) initV1ResourcesStorage(c *Config) { restOptions := func(resource string) generic.RESTOptions { return m.GetRESTOptionsOrDie(c, api.Resource(resource)) } podTemplateStorage := podtemplateetcd.NewREST(restOptions("podTemplates")) eventStorage := eventetcd.NewREST(restOptions("events"), uint64(c.EventTTL.Seconds())) limitRangeStorage := limitrangeetcd.NewREST(restOptions("limitRanges")) resourceQuotaStorage, resourceQuotaStatusStorage := resourcequotaetcd.NewREST(restOptions("resourceQuotas")) secretStorage := secretetcd.NewREST(restOptions("secrets")) serviceAccountStorage := serviceaccountetcd.NewREST(restOptions("serviceAccounts")) persistentVolumeStorage, persistentVolumeStatusStorage := pvetcd.NewREST(restOptions("persistentVolumes")) persistentVolumeClaimStorage, persistentVolumeClaimStatusStorage := pvcetcd.NewREST(restOptions("persistentVolumeClaims")) configMapStorage := configmapetcd.NewREST(restOptions("configMaps")) namespaceStorage, namespaceStatusStorage, namespaceFinalizeStorage := namespaceetcd.NewREST(restOptions("namespaces")) m.namespaceRegistry = namespace.NewRegistry(namespaceStorage) endpointsStorage := endpointsetcd.NewREST(restOptions("endpoints")) m.endpointRegistry = endpoint.NewRegistry(endpointsStorage) nodeStorage := nodeetcd.NewStorage(restOptions("nodes"), c.KubeletClient, m.ProxyTransport) m.nodeRegistry = node.NewRegistry(nodeStorage.Node) podStorage := podetcd.NewStorage( restOptions("pods"), kubeletclient.ConnectionInfoGetter(nodeStorage.Node), m.ProxyTransport, ) serviceRESTStorage, serviceStatusStorage := serviceetcd.NewREST(restOptions("services")) m.serviceRegistry = service.NewRegistry(serviceRESTStorage) var serviceClusterIPRegistry service.RangeRegistry serviceClusterIPRange := m.ServiceClusterIPRange if serviceClusterIPRange == nil { glog.Fatalf("service clusterIPRange is nil") return } serviceStorage, err := c.StorageFactory.New(api.Resource("services")) if err != nil { glog.Fatal(err.Error()) } serviceClusterIPAllocator := ipallocator.NewAllocatorCIDRRange(serviceClusterIPRange, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) // TODO etcdallocator package to return a storage interface via the storageFactory etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", api.Resource("serviceipallocations"), serviceStorage) serviceClusterIPRegistry = etcd return etcd }) m.serviceClusterIPAllocator = serviceClusterIPRegistry var serviceNodePortRegistry service.RangeRegistry serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.ServiceNodePortRange, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) // TODO etcdallocator package to return a storage interface via the storageFactory etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", api.Resource("servicenodeportallocations"), serviceStorage) serviceNodePortRegistry = etcd return etcd }) m.serviceNodePortAllocator = serviceNodePortRegistry controllerStorage := controlleretcd.NewStorage(restOptions("replicationControllers")) serviceRest := service.NewStorage(m.serviceRegistry, m.endpointRegistry, serviceClusterIPAllocator, serviceNodePortAllocator, m.ProxyTransport) // TODO: Factor out the core API registration m.v1ResourcesStorage = map[string]rest.Storage{ "pods": podStorage.Pod, "pods/attach": podStorage.Attach, "pods/status": podStorage.Status, "pods/log": podStorage.Log, "pods/exec": podStorage.Exec, "pods/portforward": podStorage.PortForward, "pods/proxy": podStorage.Proxy, "pods/binding": podStorage.Binding, "bindings": podStorage.Binding, "podTemplates": podTemplateStorage, "replicationControllers": controllerStorage.Controller, "replicationControllers/status": controllerStorage.Status, "services": serviceRest.Service, "services/proxy": serviceRest.Proxy, "services/status": serviceStatusStorage, "endpoints": endpointsStorage, "nodes": nodeStorage.Node, "nodes/status": nodeStorage.Status, "nodes/proxy": nodeStorage.Proxy, "events": eventStorage, "limitRanges": limitRangeStorage, "resourceQuotas": resourceQuotaStorage, "resourceQuotas/status": resourceQuotaStatusStorage, "namespaces": namespaceStorage, "namespaces/status": namespaceStatusStorage, "namespaces/finalize": namespaceFinalizeStorage, "secrets": secretStorage, "serviceAccounts": serviceAccountStorage, "persistentVolumes": persistentVolumeStorage, "persistentVolumes/status": persistentVolumeStatusStorage, "persistentVolumeClaims": persistentVolumeClaimStorage, "persistentVolumeClaims/status": persistentVolumeClaimStatusStorage, "configMaps": configMapStorage, "componentStatuses": componentstatus.NewStorage(func() map[string]apiserver.Server { return m.getServersToValidate(c) }), } if registered.IsEnabledVersion(unversioned.GroupVersion{Group: "autoscaling", Version: "v1"}) { m.v1ResourcesStorage["replicationControllers/scale"] = controllerStorage.Scale } }
func (m *Master) initV1ResourcesStorage(c *Config) { storageDecorator := m.StorageDecorator() dbClient := func(resource string) storage.Interface { return c.StorageDestinations.Get("", resource) } podTemplateStorage := podtemplateetcd.NewREST(dbClient("podTemplates"), storageDecorator) eventStorage := eventetcd.NewREST(dbClient("events"), storageDecorator, uint64(c.EventTTL.Seconds())) limitRangeStorage := limitrangeetcd.NewREST(dbClient("limitRanges"), storageDecorator) resourceQuotaStorage, resourceQuotaStatusStorage := resourcequotaetcd.NewREST(dbClient("resourceQuotas"), storageDecorator) secretStorage := secretetcd.NewREST(dbClient("secrets"), storageDecorator) serviceAccountStorage := serviceaccountetcd.NewREST(dbClient("serviceAccounts"), storageDecorator) persistentVolumeStorage, persistentVolumeStatusStorage := pvetcd.NewREST(dbClient("persistentVolumes"), storageDecorator) persistentVolumeClaimStorage, persistentVolumeClaimStatusStorage := pvcetcd.NewREST(dbClient("persistentVolumeClaims"), storageDecorator) configMapStorage := configmapetcd.NewREST(dbClient("configMaps"), storageDecorator) namespaceStorage, namespaceStatusStorage, namespaceFinalizeStorage := namespaceetcd.NewREST(dbClient("namespaces"), storageDecorator) m.namespaceRegistry = namespace.NewRegistry(namespaceStorage) endpointsStorage := endpointsetcd.NewREST(dbClient("endpoints"), storageDecorator) m.endpointRegistry = endpoint.NewRegistry(endpointsStorage) nodeStorage, nodeStatusStorage := nodeetcd.NewREST(dbClient("nodes"), storageDecorator, c.KubeletClient, m.ProxyTransport) m.nodeRegistry = node.NewRegistry(nodeStorage) podStorage := podetcd.NewStorage( dbClient("pods"), storageDecorator, kubeletclient.ConnectionInfoGetter(nodeStorage), m.ProxyTransport, ) serviceStorage, serviceStatusStorage := serviceetcd.NewREST(dbClient("services"), storageDecorator) m.serviceRegistry = service.NewRegistry(serviceStorage) var serviceClusterIPRegistry service.RangeRegistry serviceClusterIPRange := m.ServiceClusterIPRange if serviceClusterIPRange == nil { glog.Fatalf("service clusterIPRange is nil") return } serviceClusterIPAllocator := ipallocator.NewAllocatorCIDRRange(serviceClusterIPRange, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", api.Resource("serviceipallocations"), dbClient("services")) serviceClusterIPRegistry = etcd return etcd }) m.serviceClusterIPAllocator = serviceClusterIPRegistry var serviceNodePortRegistry service.RangeRegistry serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.ServiceNodePortRange, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", api.Resource("servicenodeportallocations"), dbClient("services")) serviceNodePortRegistry = etcd return etcd }) m.serviceNodePortAllocator = serviceNodePortRegistry controllerStorage, controllerStatusStorage := controlleretcd.NewREST(dbClient("replicationControllers"), storageDecorator) m.v1ResourcesStorage = map[string]rest.Storage{ "pods": podStorage.Pod, "pods/attach": podStorage.Attach, "pods/status": podStorage.Status, "pods/log": podStorage.Log, "pods/exec": podStorage.Exec, "pods/portforward": podStorage.PortForward, "pods/proxy": podStorage.Proxy, "pods/binding": podStorage.Binding, "bindings": podStorage.Binding, "podTemplates": podTemplateStorage, "replicationControllers": controllerStorage, "replicationControllers/status": controllerStatusStorage, "services": service.NewStorage(m.serviceRegistry, m.endpointRegistry, serviceClusterIPAllocator, serviceNodePortAllocator, m.ProxyTransport), "services/status": serviceStatusStorage, "endpoints": endpointsStorage, "nodes": nodeStorage, "nodes/status": nodeStatusStorage, "events": eventStorage, "limitRanges": limitRangeStorage, "resourceQuotas": resourceQuotaStorage, "resourceQuotas/status": resourceQuotaStatusStorage, "namespaces": namespaceStorage, "namespaces/status": namespaceStatusStorage, "namespaces/finalize": namespaceFinalizeStorage, "secrets": secretStorage, "serviceAccounts": serviceAccountStorage, "persistentVolumes": persistentVolumeStorage, "persistentVolumes/status": persistentVolumeStatusStorage, "persistentVolumeClaims": persistentVolumeClaimStorage, "persistentVolumeClaims/status": persistentVolumeClaimStatusStorage, "configMaps": configMapStorage, "componentStatuses": componentstatus.NewStorage(func() map[string]apiserver.Server { return m.getServersToValidate(c) }), } }
// init initializes master. func (m *Master) init(c *Config) { if c.ProxyDialer != nil || c.ProxyTLSClientConfig != nil { m.proxyTransport = util.SetTransportDefaults(&http.Transport{ Dial: c.ProxyDialer, TLSClientConfig: c.ProxyTLSClientConfig, }) } healthzChecks := []healthz.HealthzChecker{} storageDecorator := c.storageDecorator() dbClient := func(resource string) storage.Interface { return c.StorageDestinations.get("", resource) } podStorage := podetcd.NewStorage(dbClient("pods"), storageDecorator, c.KubeletClient, m.proxyTransport) podTemplateStorage := podtemplateetcd.NewREST(dbClient("podTemplates"), storageDecorator) eventStorage := eventetcd.NewREST(dbClient("events"), storageDecorator, uint64(c.EventTTL.Seconds())) limitRangeStorage := limitrangeetcd.NewREST(dbClient("limitRanges"), storageDecorator) resourceQuotaStorage, resourceQuotaStatusStorage := resourcequotaetcd.NewREST(dbClient("resourceQuotas"), storageDecorator) secretStorage := secretetcd.NewREST(dbClient("secrets"), storageDecorator) serviceAccountStorage := serviceaccountetcd.NewREST(dbClient("serviceAccounts"), storageDecorator) persistentVolumeStorage, persistentVolumeStatusStorage := pvetcd.NewREST(dbClient("persistentVolumes"), storageDecorator) persistentVolumeClaimStorage, persistentVolumeClaimStatusStorage := pvcetcd.NewREST(dbClient("persistentVolumeClaims"), storageDecorator) namespaceStorage, namespaceStatusStorage, namespaceFinalizeStorage := namespaceetcd.NewREST(dbClient("namespaces"), storageDecorator) m.namespaceRegistry = namespace.NewRegistry(namespaceStorage) endpointsStorage := endpointsetcd.NewREST(dbClient("endpoints"), storageDecorator) m.endpointRegistry = endpoint.NewRegistry(endpointsStorage) nodeStorage, nodeStatusStorage := nodeetcd.NewREST(dbClient("nodes"), storageDecorator, c.KubeletClient, m.proxyTransport) m.nodeRegistry = node.NewRegistry(nodeStorage) serviceStorage := serviceetcd.NewREST(dbClient("services"), storageDecorator) m.serviceRegistry = service.NewRegistry(serviceStorage) var serviceClusterIPRegistry service.RangeRegistry serviceClusterIPAllocator := ipallocator.NewAllocatorCIDRRange(m.serviceClusterIPRange, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", dbClient("services")) serviceClusterIPRegistry = etcd return etcd }) m.serviceClusterIPAllocator = serviceClusterIPRegistry var serviceNodePortRegistry service.RangeRegistry serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.serviceNodePortRange, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", "servicenodeportallocation", dbClient("services")) serviceNodePortRegistry = etcd return etcd }) m.serviceNodePortAllocator = serviceNodePortRegistry controllerStorage, controllerStatusStorage := controlleretcd.NewREST(dbClient("replicationControllers"), storageDecorator) // TODO: Factor out the core API registration m.storage = map[string]rest.Storage{ "pods": podStorage.Pod, "pods/attach": podStorage.Attach, "pods/status": podStorage.Status, "pods/log": podStorage.Log, "pods/exec": podStorage.Exec, "pods/portforward": podStorage.PortForward, "pods/proxy": podStorage.Proxy, "pods/binding": podStorage.Binding, "bindings": podStorage.Binding, "podTemplates": podTemplateStorage, "replicationControllers": controllerStorage, "replicationControllers/status": controllerStatusStorage, "services": service.NewStorage(m.serviceRegistry, m.endpointRegistry, serviceClusterIPAllocator, serviceNodePortAllocator, m.proxyTransport), "endpoints": endpointsStorage, "nodes": nodeStorage, "nodes/status": nodeStatusStorage, "events": eventStorage, "limitRanges": limitRangeStorage, "resourceQuotas": resourceQuotaStorage, "resourceQuotas/status": resourceQuotaStatusStorage, "namespaces": namespaceStorage, "namespaces/status": namespaceStatusStorage, "namespaces/finalize": namespaceFinalizeStorage, "secrets": secretStorage, "serviceAccounts": serviceAccountStorage, "persistentVolumes": persistentVolumeStorage, "persistentVolumes/status": persistentVolumeStatusStorage, "persistentVolumeClaims": persistentVolumeClaimStorage, "persistentVolumeClaims/status": persistentVolumeClaimStatusStorage, "componentStatuses": componentstatus.NewStorage(func() map[string]apiserver.Server { return m.getServersToValidate(c) }), } if m.tunneler != nil { m.tunneler.Run(m.getNodeAddresses) healthzChecks = append(healthzChecks, healthz.NamedCheck("SSH Tunnel Check", m.IsTunnelSyncHealthy)) prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: "apiserver_proxy_tunnel_sync_latency_secs", Help: "The time since the last successful synchronization of the SSH tunnels for proxy requests.", }, func() float64 { return float64(m.tunneler.SecondsSinceSync()) }) } apiVersions := []string{} // Install v1 unless disabled. if !m.apiGroupVersionOverrides["api/v1"].Disable { if err := m.api_v1().InstallREST(m.handlerContainer); err != nil { glog.Fatalf("Unable to setup API v1: %v", err) } apiVersions = append(apiVersions, "v1") } apiserver.InstallSupport(m.muxHelper, m.rootWebService, c.EnableProfiling, healthzChecks...) apiserver.AddApiWebService(m.handlerContainer, c.APIPrefix, apiVersions) apiserver.InstallServiceErrorHandler(m.handlerContainer, m.newRequestInfoResolver(), apiVersions) // allGroups records all supported groups at /apis allGroups := []unversioned.APIGroup{} // Install extensions unless disabled. if !m.apiGroupVersionOverrides["extensions/v1beta1"].Disable { m.thirdPartyStorage = c.StorageDestinations.APIGroups["extensions"].Default m.thirdPartyResources = map[string]*thirdpartyresourcedataetcd.REST{} expVersion := m.experimental(c) if err := expVersion.InstallREST(m.handlerContainer); err != nil { glog.Fatalf("Unable to setup experimental api: %v", err) } g, err := latest.Group("extensions") if err != nil { glog.Fatalf("Unable to setup experimental api: %v", err) } expAPIVersions := []unversioned.GroupVersionForDiscovery{ { GroupVersion: expVersion.GroupVersion.String(), Version: expVersion.GroupVersion.Version, }, } storageVersion, found := c.StorageVersions[g.Group] if !found { glog.Fatalf("Couldn't find storage version of group %v", g.Group) } group := unversioned.APIGroup{ Name: g.Group, Versions: expAPIVersions, PreferredVersion: unversioned.GroupVersionForDiscovery{GroupVersion: storageVersion, Version: apiutil.GetVersion(storageVersion)}, } apiserver.AddGroupWebService(m.handlerContainer, c.APIGroupPrefix+"/"+latest.GroupOrDie("extensions").Group, group) allGroups = append(allGroups, group) apiserver.InstallServiceErrorHandler(m.handlerContainer, m.newRequestInfoResolver(), []string{expVersion.GroupVersion.String()}) } // This should be done after all groups are registered // TODO: replace the hardcoded "apis". apiserver.AddApisWebService(m.handlerContainer, "/apis", allGroups) // Register root handler. // We do not register this using restful Webservice since we do not want to surface this in api docs. // Allow master to be embedded in contexts which already have something registered at the root if c.EnableIndex { m.mux.HandleFunc("/", apiserver.IndexHandler(m.handlerContainer, m.muxHelper)) } if c.EnableLogsSupport { apiserver.InstallLogsSupport(m.muxHelper) } if c.EnableUISupport { ui.InstallSupport(m.muxHelper, m.enableSwaggerSupport) } if c.EnableProfiling { m.mux.HandleFunc("/debug/pprof/", pprof.Index) m.mux.HandleFunc("/debug/pprof/profile", pprof.Profile) m.mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } handler := http.Handler(m.mux.(*http.ServeMux)) insecureHandler := handler // TODO: handle CORS and auth using go-restful // See github.com/emicklei/go-restful/blob/master/examples/restful-CORS-filter.go, and // github.com/emicklei/go-restful/blob/master/examples/restful-basic-authentication.go attributeGetter := apiserver.NewRequestAttributeGetter(m.requestContextMapper, m.newRequestInfoResolver()) handler = apiserver.WithAuthorizationCheck(handler, attributeGetter, m.authorizer) // Install Authenticator if c.Authenticator != nil { authenticatedHandler, err := handlers.NewRequestAuthenticator(m.requestContextMapper, c.Authenticator, handlers.Unauthorized(c.SupportsBasicAuth), handler) if err != nil { glog.Fatalf("Could not initialize authenticator: %v", err) } handler = authenticatedHandler } // Since OPTIONS request cannot carry authn headers (by w3c standards), we are doing CORS check // before auth check. Otherwise all the CORS request will be rejected. if len(c.CorsAllowedOriginList) > 0 { allowedOriginRegexps, err := util.CompileRegexps(c.CorsAllowedOriginList) if err != nil { glog.Fatalf("Invalid CORS allowed origin, --cors-allowed-origins flag was set to %v - %v", strings.Join(c.CorsAllowedOriginList, ","), err) } handler = apiserver.CORS(handler, allowedOriginRegexps, nil, nil, "true") insecureHandler = apiserver.CORS(insecureHandler, allowedOriginRegexps, nil, nil, "true") } m.InsecureHandler = insecureHandler // Install root web services m.handlerContainer.Add(m.rootWebService) // TODO: Make this optional? Consumers of master depend on this currently. m.Handler = handler if m.enableSwaggerSupport { m.InstallSwaggerAPI() } // After all wrapping is done, put a context filter around both handlers if handler, err := api.NewRequestContextFilter(m.requestContextMapper, m.Handler); err != nil { glog.Fatalf("Could not initialize request context filter: %v", err) } else { m.Handler = handler } if handler, err := api.NewRequestContextFilter(m.requestContextMapper, m.InsecureHandler); err != nil { glog.Fatalf("Could not initialize request context filter: %v", err) } else { m.InsecureHandler = handler } // TODO: Attempt clean shutdown? if m.enableCoreControllers { m.NewBootstrapController().Start() } }
func newStorage(t *testing.T) (*Etcd, *etcdtesting.EtcdTestServer, allocator.Interface) { etcdStorage, server := registrytest.NewEtcdStorage(t, "") mem := allocator.NewAllocationMap(100, "rangeSpecValue") etcd := NewEtcd(mem, "/ranges/serviceips", api.Resource("serviceipallocations"), etcdStorage) return etcd, server, mem }
func newStorage(t *testing.T) (*Etcd, *tools.FakeEtcdClient, allocator.Interface) { etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") mem := allocator.NewAllocationMap(100, "rangeSpecValue") etcd := NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", etcdStorage) return etcd, fakeClient, mem }
// init initializes master. func (m *Master) init(c *Config) { if c.ProxyDialer != nil || c.ProxyTLSClientConfig != nil { m.proxyTransport = util.SetTransportDefaults(&http.Transport{ Dial: c.ProxyDialer, TLSClientConfig: c.ProxyTLSClientConfig, }) } healthzChecks := []healthz.HealthzChecker{} podStorage := podetcd.NewStorage(c.DatabaseStorage, c.EnableWatchCache, c.KubeletClient, m.proxyTransport) podTemplateStorage := podtemplateetcd.NewREST(c.DatabaseStorage) eventStorage := eventetcd.NewREST(c.DatabaseStorage, uint64(c.EventTTL.Seconds())) limitRangeStorage := limitrangeetcd.NewREST(c.DatabaseStorage) resourceQuotaStorage, resourceQuotaStatusStorage := resourcequotaetcd.NewREST(c.DatabaseStorage) secretStorage := secretetcd.NewREST(c.DatabaseStorage) serviceAccountStorage := serviceaccountetcd.NewREST(c.DatabaseStorage) persistentVolumeStorage, persistentVolumeStatusStorage := pvetcd.NewREST(c.DatabaseStorage) persistentVolumeClaimStorage, persistentVolumeClaimStatusStorage := pvcetcd.NewREST(c.DatabaseStorage) namespaceStorage, namespaceStatusStorage, namespaceFinalizeStorage := namespaceetcd.NewREST(c.DatabaseStorage) m.namespaceRegistry = namespace.NewRegistry(namespaceStorage) endpointsStorage := endpointsetcd.NewREST(c.DatabaseStorage, c.EnableWatchCache) m.endpointRegistry = endpoint.NewRegistry(endpointsStorage) securityContextConstraintsStorage := sccetcd.NewStorage(c.DatabaseStorage) nodeStorage, nodeStatusStorage := nodeetcd.NewREST(c.DatabaseStorage, c.EnableWatchCache, c.KubeletClient, m.proxyTransport) m.nodeRegistry = node.NewRegistry(nodeStorage) serviceStorage := serviceetcd.NewREST(c.DatabaseStorage) m.serviceRegistry = service.NewRegistry(serviceStorage) var serviceClusterIPRegistry service.RangeRegistry serviceClusterIPAllocator := ipallocator.NewAllocatorCIDRRange(m.serviceClusterIPRange, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", c.DatabaseStorage) serviceClusterIPRegistry = etcd return etcd }) m.serviceClusterIPAllocator = serviceClusterIPRegistry var serviceNodePortRegistry service.RangeRegistry serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.serviceNodePortRange, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", "servicenodeportallocation", c.DatabaseStorage) serviceNodePortRegistry = etcd return etcd }) m.serviceNodePortAllocator = serviceNodePortRegistry controllerStorage := controlleretcd.NewREST(c.DatabaseStorage) // TODO: Factor out the core API registration m.storage = map[string]rest.Storage{ "pods": podStorage.Pod, "pods/attach": podStorage.Attach, "pods/status": podStorage.Status, "pods/log": podStorage.Log, "pods/exec": podStorage.Exec, "pods/portforward": podStorage.PortForward, "pods/proxy": podStorage.Proxy, "pods/binding": podStorage.Binding, "bindings": podStorage.Binding, "podTemplates": podTemplateStorage, "replicationControllers": controllerStorage, "services": service.NewStorage(m.serviceRegistry, m.endpointRegistry, serviceClusterIPAllocator, serviceNodePortAllocator, m.proxyTransport), "endpoints": endpointsStorage, "nodes": nodeStorage, "nodes/status": nodeStatusStorage, "events": eventStorage, "limitRanges": limitRangeStorage, "resourceQuotas": resourceQuotaStorage, "resourceQuotas/status": resourceQuotaStatusStorage, "namespaces": namespaceStorage, "namespaces/status": namespaceStatusStorage, "namespaces/finalize": namespaceFinalizeStorage, "secrets": secretStorage, "serviceAccounts": serviceAccountStorage, "securityContextConstraints": securityContextConstraintsStorage, "persistentVolumes": persistentVolumeStorage, "persistentVolumes/status": persistentVolumeStatusStorage, "persistentVolumeClaims": persistentVolumeClaimStorage, "persistentVolumeClaims/status": persistentVolumeClaimStatusStorage, "componentStatuses": componentstatus.NewStorage(func() map[string]apiserver.Server { return m.getServersToValidate(c) }), } if m.tunneler != nil { m.tunneler.Run(m.getNodeAddresses) healthzChecks = append(healthzChecks, healthz.NamedCheck("SSH Tunnel Check", m.IsTunnelSyncHealthy)) prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: "apiserver_proxy_tunnel_sync_latency_secs", Help: "The time since the last successful synchronization of the SSH tunnels for proxy requests.", }, func() float64 { return float64(m.tunneler.SecondsSinceSync()) }) } apiVersions := []string{} if m.v1beta3 { if err := m.api_v1beta3().InstallREST(m.handlerContainer); err != nil { glog.Fatalf("Unable to setup API v1beta3: %v", err) } apiVersions = append(apiVersions, "v1beta3") } if m.v1 { if err := m.api_v1().InstallREST(m.handlerContainer); err != nil { glog.Fatalf("Unable to setup API v1: %v", err) } apiVersions = append(apiVersions, "v1") } apiserver.InstallSupport(m.muxHelper, m.rootWebService, c.EnableProfiling, healthzChecks...) apiserver.AddApiWebService(m.handlerContainer, c.APIPrefix, apiVersions) defaultVersion := m.defaultAPIGroupVersion() requestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: sets.NewString(strings.TrimPrefix(defaultVersion.Root, "/")), RestMapper: defaultVersion.Mapper} apiserver.InstallServiceErrorHandler(m.handlerContainer, requestInfoResolver, apiVersions) if m.exp { expVersion := m.experimental(c) if err := expVersion.InstallREST(m.handlerContainer); err != nil { glog.Fatalf("Unable to setup experimental api: %v", err) } apiserver.AddApiWebService(m.handlerContainer, c.ExpAPIPrefix, []string{expVersion.Version}) expRequestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: sets.NewString(strings.TrimPrefix(expVersion.Root, "/")), RestMapper: expVersion.Mapper} apiserver.InstallServiceErrorHandler(m.handlerContainer, expRequestInfoResolver, []string{expVersion.Version}) } // Register root handler. // We do not register this using restful Webservice since we do not want to surface this in api docs. // Allow master to be embedded in contexts which already have something registered at the root if c.EnableIndex { m.mux.HandleFunc("/", apiserver.IndexHandler(m.handlerContainer, m.muxHelper)) } if c.EnableLogsSupport { apiserver.InstallLogsSupport(m.muxHelper) } /*if c.EnableUISupport { ui.InstallSupport(m.mux) }*/ if c.EnableProfiling { m.mux.HandleFunc("/debug/pprof/", pprof.Index) m.mux.HandleFunc("/debug/pprof/profile", pprof.Profile) m.mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } handler := http.Handler(m.mux.(*http.ServeMux)) // TODO: handle CORS and auth using go-restful // See github.com/emicklei/go-restful/blob/master/examples/restful-CORS-filter.go, and // github.com/emicklei/go-restful/blob/master/examples/restful-basic-authentication.go if len(c.CorsAllowedOriginList) > 0 { allowedOriginRegexps, err := util.CompileRegexps(c.CorsAllowedOriginList) if err != nil { glog.Fatalf("Invalid CORS allowed origin, --cors-allowed-origins flag was set to %v - %v", strings.Join(c.CorsAllowedOriginList, ","), err) } handler = apiserver.CORS(handler, allowedOriginRegexps, nil, nil, "true") } m.InsecureHandler = handler attributeGetter := apiserver.NewRequestAttributeGetter(m.requestContextMapper, latest.RESTMapper, "api") handler = apiserver.WithAuthorizationCheck(handler, attributeGetter, m.authorizer) // Install Authenticator if c.Authenticator != nil { authenticatedHandler, err := handlers.NewRequestAuthenticator(m.requestContextMapper, c.Authenticator, handlers.Unauthorized(c.SupportsBasicAuth), handler) if err != nil { glog.Fatalf("Could not initialize authenticator: %v", err) } handler = authenticatedHandler } // Install root web services m.handlerContainer.Add(m.rootWebService) // TODO: Make this optional? Consumers of master depend on this currently. m.Handler = handler if m.enableSwaggerSupport { m.InstallSwaggerAPI() } // After all wrapping is done, put a context filter around both handlers if handler, err := api.NewRequestContextFilter(m.requestContextMapper, m.Handler); err != nil { glog.Fatalf("Could not initialize request context filter: %v", err) } else { m.Handler = handler } if handler, err := api.NewRequestContextFilter(m.requestContextMapper, m.InsecureHandler); err != nil { glog.Fatalf("Could not initialize request context filter: %v", err) } else { m.InsecureHandler = handler } // TODO: Attempt clean shutdown? if m.enableCoreControllers { m.NewBootstrapController().Start() } }
// Helper that wraps NewAllocatorCIDRRange, for creating a range backed by an in-memory store. func NewCIDRRange(cidr *net.IPNet) *Range { return NewAllocatorCIDRRange(cidr, func(max int, rangeSpec string) allocator.Interface { return allocator.NewAllocationMap(max, rangeSpec) }) }
// Helper that wraps NewAllocatorCIDRRange, for creating a range backed by an in-memory store. func NewPortAllocator(pr net.PortRange) *PortAllocator { return NewPortAllocatorCustom(pr, func(max int, rangeSpec string) allocator.Interface { return allocator.NewAllocationMap(max, rangeSpec) }) }
// init initializes master. func (m *Master) init(c *Config) { healthzChecks := []healthz.HealthzChecker{} m.clock = util.RealClock{} podStorage := podetcd.NewStorage(c.DatabaseStorage, c.KubeletClient) podTemplateStorage := podtemplateetcd.NewREST(c.DatabaseStorage) eventRegistry := event.NewEtcdRegistry(c.DatabaseStorage, uint64(c.EventTTL.Seconds())) limitRangeStorage := limitrangeetcd.NewStorage(c.DatabaseStorage) resourceQuotaStorage, resourceQuotaStatusStorage := resourcequotaetcd.NewStorage(c.DatabaseStorage) secretStorage := secretetcd.NewStorage(c.DatabaseStorage) serviceAccountStorage := serviceaccountetcd.NewStorage(c.DatabaseStorage) persistentVolumeStorage, persistentVolumeStatusStorage := pvetcd.NewStorage(c.DatabaseStorage) persistentVolumeClaimStorage, persistentVolumeClaimStatusStorage := pvcetcd.NewStorage(c.DatabaseStorage) namespaceStorage, namespaceStatusStorage, namespaceFinalizeStorage := namespaceetcd.NewStorage(c.DatabaseStorage) m.namespaceRegistry = namespace.NewRegistry(namespaceStorage) endpointsStorage := endpointsetcd.NewStorage(c.DatabaseStorage) m.endpointRegistry = endpoint.NewRegistry(endpointsStorage) nodeStorage, nodeStatusStorage := nodeetcd.NewStorage(c.DatabaseStorage, c.KubeletClient) m.nodeRegistry = minion.NewRegistry(nodeStorage) serviceStorage := serviceetcd.NewStorage(c.DatabaseStorage) m.serviceRegistry = service.NewRegistry(serviceStorage) var serviceClusterIPRegistry service.RangeRegistry serviceClusterIPAllocator := ipallocator.NewAllocatorCIDRRange(m.serviceClusterIPRange, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", c.DatabaseStorage) serviceClusterIPRegistry = etcd return etcd }) m.serviceClusterIPAllocator = serviceClusterIPRegistry var serviceNodePortRegistry service.RangeRegistry serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.serviceNodePortRange, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", "servicenodeportallocation", c.DatabaseStorage) serviceNodePortRegistry = etcd return etcd }) m.serviceNodePortAllocator = serviceNodePortRegistry controllerStorage := controlleretcd.NewREST(c.DatabaseStorage) // TODO: Factor out the core API registration m.storage = map[string]rest.Storage{ "pods": podStorage.Pod, "pods/attach": podStorage.Attach, "pods/status": podStorage.Status, "pods/log": podStorage.Log, "pods/exec": podStorage.Exec, "pods/portforward": podStorage.PortForward, "pods/proxy": podStorage.Proxy, "pods/binding": podStorage.Binding, "bindings": podStorage.Binding, "podTemplates": podTemplateStorage, "replicationControllers": controllerStorage, "services": service.NewStorage(m.serviceRegistry, m.endpointRegistry, serviceClusterIPAllocator, serviceNodePortAllocator), "endpoints": endpointsStorage, "nodes": nodeStorage, "nodes/status": nodeStatusStorage, "events": event.NewStorage(eventRegistry), "limitRanges": limitRangeStorage, "resourceQuotas": resourceQuotaStorage, "resourceQuotas/status": resourceQuotaStatusStorage, "namespaces": namespaceStorage, "namespaces/status": namespaceStatusStorage, "namespaces/finalize": namespaceFinalizeStorage, "secrets": secretStorage, "serviceAccounts": serviceAccountStorage, "persistentVolumes": persistentVolumeStorage, "persistentVolumes/status": persistentVolumeStatusStorage, "persistentVolumeClaims": persistentVolumeClaimStorage, "persistentVolumeClaims/status": persistentVolumeClaimStatusStorage, "componentStatuses": componentstatus.NewStorage(func() map[string]apiserver.Server { return m.getServersToValidate(c) }), } // establish the node proxy dialer if len(c.SSHUser) > 0 { // Usernames are capped @ 32 if len(c.SSHUser) > 32 { glog.Warning("SSH User is too long, truncating to 32 chars") c.SSHUser = c.SSHUser[0:32] } glog.Infof("Setting up proxy: %s %s", c.SSHUser, c.SSHKeyfile) // public keyfile is written last, so check for that. publicKeyFile := c.SSHKeyfile + ".pub" exists, err := util.FileExists(publicKeyFile) if err != nil { glog.Errorf("Error detecting if key exists: %v", err) } else if !exists { glog.Infof("Key doesn't exist, attempting to create") err := m.generateSSHKey(c.SSHUser, c.SSHKeyfile, publicKeyFile) if err != nil { glog.Errorf("Failed to create key pair: %v", err) } } m.tunnels = &util.SSHTunnelList{} m.dialer = m.Dial m.setupSecureProxy(c.SSHUser, c.SSHKeyfile, publicKeyFile) m.lastSync = m.clock.Now().Unix() // This is pretty ugly. A better solution would be to pull this all the way up into the // server.go file. httpKubeletClient, ok := c.KubeletClient.(*client.HTTPKubeletClient) if ok { httpKubeletClient.Config.Dial = m.dialer transport, err := client.MakeTransport(httpKubeletClient.Config) if err != nil { glog.Errorf("Error setting up transport over SSH: %v", err) } else { httpKubeletClient.Client.Transport = transport } } else { glog.Errorf("Failed to cast %v to HTTPKubeletClient, skipping SSH tunnel.", c.KubeletClient) } healthzChecks = append(healthzChecks, healthz.NamedCheck("SSH Tunnel Check", m.IsTunnelSyncHealthy)) m.lastSyncMetric = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: "apiserver_proxy_tunnel_sync_latency_secs", Help: "The time since the last successful synchronization of the SSH tunnels for proxy requests.", }, func() float64 { return float64(m.secondsSinceSync()) }) } apiVersions := []string{} if m.v1 { if err := m.api_v1().InstallREST(m.handlerContainer); err != nil { glog.Fatalf("Unable to setup API v1: %v", err) } apiVersions = append(apiVersions, "v1") } apiserver.InstallSupport(m.muxHelper, m.rootWebService, c.EnableProfiling, healthzChecks...) apiserver.AddApiWebService(m.handlerContainer, c.APIPrefix, apiVersions) defaultVersion := m.defaultAPIGroupVersion() requestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: util.NewStringSet(strings.TrimPrefix(defaultVersion.Root, "/")), RestMapper: defaultVersion.Mapper} apiserver.InstallServiceErrorHandler(m.handlerContainer, requestInfoResolver, apiVersions) if m.exp { expVersion := m.expapi(c) if err := expVersion.InstallREST(m.handlerContainer); err != nil { glog.Fatalf("Unable to setup experimental api: %v", err) } apiserver.AddApiWebService(m.handlerContainer, c.ExpAPIPrefix, []string{expVersion.Version}) expRequestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: util.NewStringSet(strings.TrimPrefix(expVersion.Root, "/")), RestMapper: expVersion.Mapper} apiserver.InstallServiceErrorHandler(m.handlerContainer, expRequestInfoResolver, []string{expVersion.Version}) } // Register root handler. // We do not register this using restful Webservice since we do not want to surface this in api docs. // Allow master to be embedded in contexts which already have something registered at the root if c.EnableIndex { m.mux.HandleFunc("/", apiserver.IndexHandler(m.handlerContainer, m.muxHelper)) } if c.EnableLogsSupport { apiserver.InstallLogsSupport(m.muxHelper) } if c.EnableUISupport { ui.InstallSupport(m.muxHelper, m.enableSwaggerSupport) } if c.EnableProfiling { m.mux.HandleFunc("/debug/pprof/", pprof.Index) m.mux.HandleFunc("/debug/pprof/profile", pprof.Profile) m.mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) } handler := http.Handler(m.mux.(*http.ServeMux)) // TODO: handle CORS and auth using go-restful // See github.com/emicklei/go-restful/blob/master/examples/restful-CORS-filter.go, and // github.com/emicklei/go-restful/blob/master/examples/restful-basic-authentication.go if len(c.CorsAllowedOriginList) > 0 { allowedOriginRegexps, err := util.CompileRegexps(c.CorsAllowedOriginList) if err != nil { glog.Fatalf("Invalid CORS allowed origin, --cors-allowed-origins flag was set to %v - %v", strings.Join(c.CorsAllowedOriginList, ","), err) } handler = apiserver.CORS(handler, allowedOriginRegexps, nil, nil, "true") } m.InsecureHandler = handler attributeGetter := apiserver.NewRequestAttributeGetter(m.requestContextMapper, latest.RESTMapper, "api") handler = apiserver.WithAuthorizationCheck(handler, attributeGetter, m.authorizer) // Install Authenticator if c.Authenticator != nil { authenticatedHandler, err := handlers.NewRequestAuthenticator(m.requestContextMapper, c.Authenticator, handlers.Unauthorized(c.SupportsBasicAuth), handler) if err != nil { glog.Fatalf("Could not initialize authenticator: %v", err) } handler = authenticatedHandler } // Install root web services m.handlerContainer.Add(m.rootWebService) // TODO: Make this optional? Consumers of master depend on this currently. m.Handler = handler if m.enableSwaggerSupport { m.InstallSwaggerAPI() } // After all wrapping is done, put a context filter around both handlers if handler, err := api.NewRequestContextFilter(m.requestContextMapper, m.Handler); err != nil { glog.Fatalf("Could not initialize request context filter: %v", err) } else { m.Handler = handler } if handler, err := api.NewRequestContextFilter(m.requestContextMapper, m.InsecureHandler); err != nil { glog.Fatalf("Could not initialize request context filter: %v", err) } else { m.InsecureHandler = handler } // TODO: Attempt clean shutdown? if m.enableCoreControllers { m.NewBootstrapController().Start() } }
// Helper that wraps New, for creating a range backed by an in-memory store. func NewInMemory(r *NetIDRange) *Allocator { return New(r, func(max int, rangeSpec string) allocator.Interface { return allocator.NewAllocationMap(max, rangeSpec) }) }