Ejemplo n.º 1
0
// NewFakeClusterManager creates a new fake ClusterManager.
func NewFakeClusterManager(clusterName string) *fakeClusterManager {
	fakeLbs := loadbalancers.NewFakeLoadBalancers(clusterName)
	fakeBackends := backends.NewFakeBackendServices()
	fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
	fakeHCs := healthchecks.NewFakeHealthChecks()
	namer := utils.NewNamer(clusterName)

	nodePool := instances.NewNodePool(fakeIGs)
	nodePool.Init(&instances.FakeZoneLister{[]string{"zone-a"}})

	healthChecker := healthchecks.NewHealthChecker(fakeHCs, "/", namer)
	healthChecker.Init(&healthchecks.FakeHealthCheckGetter{nil})

	backendPool := backends.NewBackendPool(
		fakeBackends,
		healthChecker, nodePool, namer, []int64{}, false)
	l7Pool := loadbalancers.NewLoadBalancerPool(
		fakeLbs,
		// TODO: change this
		backendPool,
		testDefaultBeNodePort,
		namer,
	)
	frPool := firewalls.NewFirewallPool(firewalls.NewFakeFirewallRules(), namer)
	cm := &ClusterManager{
		ClusterNamer: namer,
		instancePool: nodePool,
		backendPool:  backendPool,
		l7Pool:       l7Pool,
		firewallPool: frPool,
	}
	return &fakeClusterManager{cm, fakeLbs, fakeBackends, fakeIGs}
}
Ejemplo n.º 2
0
// NewClusterManager creates a cluster manager for shared resources.
// - namer: is the namer used to tag cluster wide shared resources.
// - defaultBackendNodePort: is the node port of glbc's default backend. This is
//	 the kubernetes Service that serves the 404 page if no urls match.
// - defaultHealthCheckPath: is the default path used for L7 health checks, eg: "/healthz".
func NewClusterManager(
	configFilePath string,
	namer *utils.Namer,
	defaultBackendNodePort int64,
	defaultHealthCheckPath string) (*ClusterManager, error) {

	// TODO: Make this more resilient. Currently we create the cloud client
	// and pass it through to all the pools. This makes unittesting easier.
	// However if the cloud client suddenly fails, we should try to re-create it
	// and continue.
	var cloud *gce.GCECloud
	if configFilePath != "" {
		glog.Infof("Reading config from path %v", configFilePath)
		config, err := os.Open(configFilePath)
		if err != nil {
			return nil, err
		}
		defer config.Close()
		cloud = getGCEClient(config)
		glog.Infof("Successfully loaded cloudprovider using config %q", configFilePath)
	} else {
		// While you might be tempted to refactor so we simply assing nil to the
		// config and only invoke getGCEClient once, that will not do the right
		// thing because a nil check against an interface isn't true in golang.
		cloud = getGCEClient(nil)
		glog.Infof("Created GCE client without a config file")
	}

	// Names are fundamental to the cluster, the uid allocator makes sure names don't collide.
	cluster := ClusterManager{ClusterNamer: namer}

	// NodePool stores GCE vms that are in this Kubernetes cluster.
	cluster.instancePool = instances.NewNodePool(cloud)

	// BackendPool creates GCE BackendServices and associated health checks.
	healthChecker := healthchecks.NewHealthChecker(cloud, defaultHealthCheckPath, cluster.ClusterNamer)
	// Loadbalancer pool manages the default backend and its health check.
	defaultBackendHealthChecker := healthchecks.NewHealthChecker(cloud, "/healthz", cluster.ClusterNamer)

	cluster.healthCheckers = []healthchecks.HealthChecker{healthChecker, defaultBackendHealthChecker}

	// TODO: This needs to change to a consolidated management of the default backend.
	cluster.backendPool = backends.NewBackendPool(
		cloud, healthChecker, cluster.instancePool, cluster.ClusterNamer, []int64{defaultBackendNodePort}, true)
	defaultBackendPool := backends.NewBackendPool(
		cloud, defaultBackendHealthChecker, cluster.instancePool, cluster.ClusterNamer, []int64{}, false)
	cluster.defaultBackendNodePort = defaultBackendNodePort

	// L7 pool creates targetHTTPProxy, ForwardingRules, UrlMaps, StaticIPs.
	cluster.l7Pool = loadbalancers.NewLoadBalancerPool(
		cloud, defaultBackendPool, defaultBackendNodePort, cluster.ClusterNamer)
	cluster.firewallPool = firewalls.NewFirewallPool(cloud, cluster.ClusterNamer)
	return &cluster, nil
}