Exemplo n.º 1
0
func newNamer(kubeClient *client.Client, clusterName string) (*utils.Namer, error) {
	name, err := getClusterUID(kubeClient, clusterName)
	if err != nil {
		return nil, err
	}

	namer := utils.NewNamer(name)
	vault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)

	// Start a goroutine to poll the cluster UID config map
	// We don't watch because we know exactly which configmap we want and this
	// controller already watches 5 other resources, so it isn't worth the cost
	// of another connection and complexity.
	go wait.Forever(func() {
		uid, found, err := vault.Get()
		existing := namer.GetClusterName()
		if found && uid != existing {
			glog.Infof("Cluster uid changed from %v -> %v", existing, uid)
			namer.SetClusterName(uid)
		} else if err != nil {
			glog.Errorf("Failed to reconcile cluster uid %v, currently set to %v", err, existing)
		}
	}, 5*time.Second)
	return namer, nil
}
Exemplo n.º 2
0
// getClusterUID returns the cluster UID. Rules for UID generation:
// If the user specifies a --cluster-uid param it overwrites everything
// else, check UID config map for a previously recorded uid
// else, check if there are any working Ingresses
//	- remember that "" is the cluster uid
// else, allocate a new uid
func getClusterUID(kubeClient *client.Client, name string) (string, error) {
	cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
	if name != "" {
		glog.Infof("Using user provided cluster uid %v", name)
		// Don't save the uid in the vault, so users can rollback through
		// --cluster-uid=""
		return name, nil
	}

	existingUID, found, err := cfgVault.Get()
	if found {
		glog.Infof("Using saved cluster uid %q", existingUID)
		return existingUID, nil
	} else if err != nil {
		// This can fail because of:
		// 1. No such config map - found=false, err=nil
		// 2. No such key in config map - found=false, err=nil
		// 3. Apiserver flake - found=false, err!=nil
		// It is not safe to proceed in 3.
		return "", fmt.Errorf("Failed to retrieve current uid: %v, using %q as name", err, name)
	}

	// Check if the cluster has an Ingress with ip
	ings, err := kubeClient.Extensions().Ingress(api.NamespaceAll).List(api.ListOptions{LabelSelector: labels.Everything()})
	if err != nil {
		return "", err
	}
	namer := utils.Namer{}
	for _, ing := range ings.Items {
		if len(ing.Status.LoadBalancer.Ingress) != 0 {
			c := namer.ParseName(loadbalancers.GCEResourceName(ing.Annotations, "forwarding-rule"))
			if c.ClusterName != "" {
				return c.ClusterName, cfgVault.Put(c.ClusterName)
			}
			glog.Infof("Found a working Ingress, assuming uid is empty string")
			return "", cfgVault.Put("")
		}
	}

	// Allocate new uid
	f, err := os.Open("/dev/urandom")
	if err != nil {
		return "", err
	}
	defer f.Close()
	b := make([]byte, 8)
	if _, err := f.Read(b); err != nil {
		return "", err
	}
	uid := fmt.Sprintf("%x", b)
	return uid, cfgVault.Put(uid)
}