Exemplo n.º 1
0
func TestBackendPoolAdd(t *testing.T) {
	f := NewFakeBackendServices()
	fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
	pool := newBackendPool(f, fakeIGs, false)
	namer := utils.Namer{}

	// Add a backend for a port, then re-add the same port and
	// make sure it corrects a broken link from the backend to
	// the instance group.
	nodePort := int64(8080)
	pool.Add(nodePort)
	beName := namer.BeName(nodePort)

	// Check that the new backend has the right port
	be, err := f.GetBackendService(beName)
	if err != nil {
		t.Fatalf("Did not find expected backend %v", beName)
	}
	if be.Port != nodePort {
		t.Fatalf("Backend %v has wrong port %v, expected %v", be.Name, be.Port, nodePort)
	}
	// Check that the instance group has the new port
	var found bool
	for _, port := range fakeIGs.Ports {
		if port == nodePort {
			found = true
		}
	}
	if !found {
		t.Fatalf("Port %v not added to instance group", nodePort)
	}

	// Mess up the link between backend service and instance group.
	// This simulates a user doing foolish things through the UI.
	f.calls = []int{}
	be, err = f.GetBackendService(beName)
	be.Backends[0].Group = "test edge hop"
	f.UpdateBackendService(be)

	pool.Add(nodePort)
	for _, call := range f.calls {
		if call == utils.Create {
			t.Fatalf("Unexpected create for existing backend service")
		}
	}
	gotBackend, _ := f.GetBackendService(beName)
	gotGroup, _ := fakeIGs.GetInstanceGroup(namer.IGName(), "default-zone")
	if gotBackend.Backends[0].Group != gotGroup.SelfLink {
		t.Fatalf(
			"Broken instance group link: %v %v",
			gotBackend.Backends[0].Group,
			gotGroup.SelfLink)
	}
}
Exemplo n.º 2
0
func TestBackendPoolShutdown(t *testing.T) {
	f := NewFakeBackendServices()
	fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
	pool := newBackendPool(f, fakeIGs, false)
	namer := utils.Namer{}

	pool.Add(80)
	pool.Shutdown()
	if _, err := f.GetBackendService(namer.BeName(80)); err == nil {
		t.Fatalf("%v", err)
	}
}
Exemplo n.º 3
0
// getClusterUID returns the cluster UID. Rules for UID generation:
// If the user specifies a --cluster-uid param it overwrites everything
// else, check UID config map for a previously recorded uid
// else, check if there are any working Ingresses
//	- remember that "" is the cluster uid
// else, allocate a new uid
func getClusterUID(kubeClient *client.Client, name string) (string, error) {
	cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
	if name != "" {
		glog.Infof("Using user provided cluster uid %v", name)
		// Don't save the uid in the vault, so users can rollback through
		// --cluster-uid=""
		return name, nil
	}

	existingUID, found, err := cfgVault.Get()
	if found {
		glog.Infof("Using saved cluster uid %q", existingUID)
		return existingUID, nil
	} else if err != nil {
		// This can fail because of:
		// 1. No such config map - found=false, err=nil
		// 2. No such key in config map - found=false, err=nil
		// 3. Apiserver flake - found=false, err!=nil
		// It is not safe to proceed in 3.
		return "", fmt.Errorf("Failed to retrieve current uid: %v, using %q as name", err, name)
	}

	// Check if the cluster has an Ingress with ip
	ings, err := kubeClient.Extensions().Ingress(api.NamespaceAll).List(api.ListOptions{LabelSelector: labels.Everything()})
	if err != nil {
		return "", err
	}
	namer := utils.Namer{}
	for _, ing := range ings.Items {
		if len(ing.Status.LoadBalancer.Ingress) != 0 {
			c := namer.ParseName(loadbalancers.GCEResourceName(ing.Annotations, "forwarding-rule"))
			if c.ClusterName != "" {
				return c.ClusterName, cfgVault.Put(c.ClusterName)
			}
			glog.Infof("Found a working Ingress, assuming uid is empty string")
			return "", cfgVault.Put("")
		}
	}

	// Allocate new uid
	f, err := os.Open("/dev/urandom")
	if err != nil {
		return "", err
	}
	defer f.Close()
	b := make([]byte, 8)
	if _, err := f.Read(b); err != nil {
		return "", err
	}
	uid := fmt.Sprintf("%x", b)
	return uid, cfgVault.Put(uid)
}
Exemplo n.º 4
0
func TestBackendInstanceGroupClobbering(t *testing.T) {
	f := NewFakeBackendServices()
	fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
	pool := newBackendPool(f, fakeIGs, false)
	namer := utils.Namer{}

	// This will add the instance group k8s-ig to the instance pool
	pool.Add(80)

	be, err := f.GetBackendService(namer.BeName(80))
	if err != nil {
		t.Fatalf("%v", err)
	}
	// Simulate another controller updating the same backend service with
	// a different instance group
	newGroups := []*compute.Backend{
		{Group: "k8s-ig-bar"},
		{Group: "k8s-ig-foo"},
	}
	be.Backends = append(be.Backends, newGroups...)
	if err := f.UpdateBackendService(be); err != nil {
		t.Fatalf("Failed to update backend service %v", be.Name)
	}

	// Make sure repeated adds don't clobber the inserted instance group
	pool.Add(80)
	be, err = f.GetBackendService(namer.BeName(80))
	if err != nil {
		t.Fatalf("%v", err)
	}
	gotGroups := sets.NewString()
	for _, g := range be.Backends {
		gotGroups.Insert(g.Group)
	}

	// seed expectedGroups with the first group native to this controller
	expectedGroups := sets.NewString("k8s-ig")
	for _, newGroup := range newGroups {
		expectedGroups.Insert(newGroup.Group)
	}
	if !expectedGroups.Equal(gotGroups) {
		t.Fatalf("Expected %v Got %v", expectedGroups, gotGroups)
	}
}
Exemplo n.º 5
0
// NewBackendPool returns a new backend pool.
// - cloud: implements BackendServices and syncs backends with a cloud provider
// - healthChecker: is capable of producing health checks for backends.
// - nodePool: implements NodePool, used to create/delete new instance groups.
// - namer: procudes names for backends.
// - ignorePorts: is a set of ports to avoid syncing/GCing.
// - resyncWithCloud: if true, periodically syncs with cloud resources.
func NewBackendPool(
	cloud BackendServices,
	healthChecker healthchecks.HealthChecker,
	nodePool instances.NodePool,
	namer *utils.Namer,
	ignorePorts []int64,
	resyncWithCloud bool) *Backends {

	ignored := []string{}
	for _, p := range ignorePorts {
		ignored = append(ignored, portKey(p))
	}
	backendPool := &Backends{
		cloud:         cloud,
		nodePool:      nodePool,
		healthChecker: healthChecker,
		namer:         namer,
		ignoredPorts:  sets.NewString(ignored...),
	}
	if !resyncWithCloud {
		backendPool.snapshotter = storage.NewInMemoryPool()
		return backendPool
	}
	backendPool.snapshotter = storage.NewCloudListingPool(
		func(i interface{}) (string, error) {
			bs := i.(*compute.BackendService)
			if !namer.NameBelongsToCluster(bs.Name) {
				return "", fmt.Errorf("Unrecognized name %v", bs.Name)
			}
			port, err := namer.BePort(bs.Name)
			if err != nil {
				return "", err
			}
			return port, nil
		},
		backendPool,
		30*time.Second,
	)
	return backendPool
}