示例#1
0
// NewClusterManager creates a cluster manager for shared resources.
// - name: is the name used to tag cluster wide shared resources. This is the
//   string passed to glbc via --gce-cluster-name.
// - defaultBackendNodePort: is the node port of glbc's default backend. This is
//	 the kubernetes Service that serves the 404 page if no urls match.
// - defaultHealthCheckPath: is the default path used for L7 health checks, eg: "/healthz"
func NewClusterManager(
	name string,
	defaultBackendNodePort int64,
	defaultHealthCheckPath string) (*ClusterManager, error) {

	cloudInterface, err := cloudprovider.GetCloudProvider("gce", nil)
	if err != nil {
		return nil, err
	}
	cloud := cloudInterface.(*gce.GCECloud)
	cluster := ClusterManager{ClusterNamer: utils.Namer{name}}
	zone, err := cloud.GetZone()
	if err != nil {
		return nil, err
	}
	cluster.instancePool = instances.NewNodePool(cloud, zone.FailureDomain)
	healthChecker := healthchecks.NewHealthChecker(cloud, defaultHealthCheckPath, cluster.ClusterNamer)
	cluster.backendPool = backends.NewBackendPool(
		cloud, healthChecker, cluster.instancePool, cluster.ClusterNamer)
	defaultBackendHealthChecker := healthchecks.NewHealthChecker(cloud, "/healthz", cluster.ClusterNamer)
	defaultBackendPool := backends.NewBackendPool(
		cloud, defaultBackendHealthChecker, cluster.instancePool, cluster.ClusterNamer)
	cluster.defaultBackendNodePort = defaultBackendNodePort
	cluster.l7Pool = loadbalancers.NewLoadBalancerPool(
		cloud, defaultBackendPool, defaultBackendNodePort, cluster.ClusterNamer)
	return &cluster, nil
}
示例#2
0
// NewClusterManager creates a cluster manager for shared resources.
// - name: is the name used to tag cluster wide shared resources. This is the
//   string passed to glbc via --gce-cluster-name.
// - defaultBackendNodePort: is the node port of glbc's default backend. This is
//	 the kubernetes Service that serves the 404 page if no urls match.
// - defaultHealthCheckPath: is the default path used for L7 health checks, eg: "/healthz"
func NewClusterManager(
	name string,
	defaultBackendNodePort int64,
	defaultHealthCheckPath string) (*ClusterManager, error) {

	// TODO: Make this more resilient. Currently we create the cloud client
	// and pass it through to all the pools. This makes unittesting easier.
	// However if the cloud client suddenly fails, we should try to re-create it
	// and continue.
	cloud := getGCEClient()

	cluster := ClusterManager{ClusterNamer: utils.Namer{name}}
	zone, err := cloud.GetZone()
	if err != nil {
		return nil, err
	}
	cluster.instancePool = instances.NewNodePool(cloud, zone.FailureDomain)
	healthChecker := healthchecks.NewHealthChecker(cloud, defaultHealthCheckPath, cluster.ClusterNamer)

	// TODO: This needs to change to a consolidated management of the default backend.
	cluster.backendPool = backends.NewBackendPool(
		cloud, healthChecker, cluster.instancePool, cluster.ClusterNamer, []int64{defaultBackendNodePort}, true)
	defaultBackendHealthChecker := healthchecks.NewHealthChecker(cloud, "/healthz", cluster.ClusterNamer)
	defaultBackendPool := backends.NewBackendPool(
		cloud, defaultBackendHealthChecker, cluster.instancePool, cluster.ClusterNamer, []int64{}, false)
	cluster.defaultBackendNodePort = defaultBackendNodePort
	cluster.l7Pool = loadbalancers.NewLoadBalancerPool(
		cloud, defaultBackendPool, defaultBackendNodePort, cluster.ClusterNamer)

	return &cluster, nil
}
示例#3
0
// NewFakeClusterManager creates a new fake ClusterManager.
func NewFakeClusterManager(clusterName string) *fakeClusterManager {
	fakeLbs := loadbalancers.NewFakeLoadBalancers(clusterName)
	fakeBackends := backends.NewFakeBackendServices()
	fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
	fakeHCs := healthchecks.NewFakeHealthChecks()
	namer := utils.Namer{clusterName}
	nodePool := instances.NewNodePool(fakeIGs, defaultZone)
	healthChecker := healthchecks.NewHealthChecker(fakeHCs, "/", namer)
	backendPool := backends.NewBackendPool(
		fakeBackends,
		healthChecker, nodePool, namer, []int64{}, false)
	l7Pool := loadbalancers.NewLoadBalancerPool(
		fakeLbs,
		// TODO: change this
		backendPool,
		testDefaultBeNodePort,
		namer,
	)
	frPool := firewalls.NewFirewallPool(firewalls.NewFakeFirewallRules(), namer)
	cm := &ClusterManager{
		ClusterNamer: namer,
		instancePool: nodePool,
		backendPool:  backendPool,
		l7Pool:       l7Pool,
		firewallPool: frPool,
	}
	return &fakeClusterManager{cm, fakeLbs, fakeBackends, fakeIGs}
}
示例#4
0
// NewClusterManager creates a cluster manager for shared resources.
// - name: is the name used to tag cluster wide shared resources. This is the
//   string passed to glbc via --gce-cluster-name.
// - defaultBackendNodePort: is the node port of glbc's default backend. This is
//	 the kubernetes Service that serves the 404 page if no urls match.
// - defaultHealthCheckPath: is the default path used for L7 health checks, eg: "/healthz".
func NewClusterManager(
	configFilePath string,
	name string,
	defaultBackendNodePort int64,
	defaultHealthCheckPath string) (*ClusterManager, error) {

	// TODO: Make this more resilient. Currently we create the cloud client
	// and pass it through to all the pools. This makes unittesting easier.
	// However if the cloud client suddenly fails, we should try to re-create it
	// and continue.
	var cloud *gce.GCECloud
	if configFilePath != "" {
		glog.Infof("Reading config from path %v", configFilePath)
		config, err := os.Open(configFilePath)
		if err != nil {
			return nil, err
		}
		defer config.Close()
		cloud = getGCEClient(config)
		glog.Infof("Successfully loaded cloudprovider using config %q", configFilePath)
	} else {
		// While you might be tempted to refactor so we simply assing nil to the
		// config and only invoke getGCEClient once, that will not do the right
		// thing because a nil check against an interface isn't true in golang.
		cloud = getGCEClient(nil)
		glog.Infof("Created GCE client without a config file")
	}

	// Names are fundamental to the cluster, the uid allocator makes sure names don't collide.
	cluster := ClusterManager{ClusterNamer: &utils.Namer{name}}

	// NodePool stores GCE vms that are in this Kubernetes cluster.
	cluster.instancePool = instances.NewNodePool(cloud)

	// BackendPool creates GCE BackendServices and associated health checks.
	healthChecker := healthchecks.NewHealthChecker(cloud, defaultHealthCheckPath, cluster.ClusterNamer)
	// Loadbalancer pool manages the default backend and its health check.
	defaultBackendHealthChecker := healthchecks.NewHealthChecker(cloud, "/healthz", cluster.ClusterNamer)

	cluster.healthCheckers = []healthchecks.HealthChecker{healthChecker, defaultBackendHealthChecker}

	// TODO: This needs to change to a consolidated management of the default backend.
	cluster.backendPool = backends.NewBackendPool(
		cloud, healthChecker, cluster.instancePool, cluster.ClusterNamer, []int64{defaultBackendNodePort}, true)
	defaultBackendPool := backends.NewBackendPool(
		cloud, defaultBackendHealthChecker, cluster.instancePool, cluster.ClusterNamer, []int64{}, false)
	cluster.defaultBackendNodePort = defaultBackendNodePort

	// L7 pool creates targetHTTPProxy, ForwardingRules, UrlMaps, StaticIPs.
	cluster.l7Pool = loadbalancers.NewLoadBalancerPool(
		cloud, defaultBackendPool, defaultBackendNodePort, cluster.ClusterNamer)
	cluster.firewallPool = firewalls.NewFirewallPool(cloud, cluster.ClusterNamer)
	return &cluster, nil
}