// NewSharedContextEvaluator creates an evaluator object that allows to share context while computing usage of
// single namespace. Context is represented by an object returned by usageComputerFactory and is destroyed
// when the namespace is processed.
func NewSharedContextEvaluator(
	name string,
	groupKind unversioned.GroupKind,
	operationResources map[admission.Operation][]kapi.ResourceName,
	matchedResourceNames []kapi.ResourceName,
	matchesScopeFunc generic.MatchesScopeFunc,
	getFuncByNamespace generic.GetFuncByNamespace,
	listFuncByNamespace generic.ListFuncByNamespace,
	constraintsFunc generic.ConstraintsFunc,
	usageComputerFactory UsageComputerFactory,
) quota.Evaluator {

	rnSet := sets.String{}
	for _, resourceNames := range operationResources {
		rnSet.Insert(quota.ToSet(resourceNames).List()...)
	}

	return &SharedContextEvaluator{
		GenericEvaluator: &generic.GenericEvaluator{
			Name:                       name,
			InternalGroupKind:          groupKind,
			InternalOperationResources: operationResources,
			MatchedResourceNames:       matchedResourceNames,
			MatchesScopeFunc:           matchesScopeFunc,
			GetFuncByNamespace:         getFuncByNamespace,
			ListFuncByNamespace:        listFuncByNamespace,
			ConstraintsFunc:            constraintsFunc,
			UsageFunc: func(object runtime.Object) kapi.ResourceList {
				comp := usageComputerFactory()
				return comp.Usage(object)
			},
		},
		UsageComputerFactory: usageComputerFactory,
	}
}
Ejemplo n.º 2
0
// Print formats and prints the give PodDiff.
func (p PodDiff) String(ignorePhases sets.String) string {
	ret := ""
	for name, info := range p {
		if ignorePhases.Has(info.phase) {
			continue
		}
		if info.phase == nonExist {
			ret += fmt.Sprintf("Pod %v was deleted, had phase %v and host %v\n", name, info.oldPhase, info.oldHostname)
			continue
		}
		phaseChange, hostChange := false, false
		msg := fmt.Sprintf("Pod %v ", name)
		if info.oldPhase != info.phase {
			phaseChange = true
			if info.oldPhase == nonExist {
				msg += fmt.Sprintf("in phase %v ", info.phase)
			} else {
				msg += fmt.Sprintf("went from phase: %v -> %v ", info.oldPhase, info.phase)
			}
		}
		if info.oldHostname != info.hostname {
			hostChange = true
			if info.oldHostname == nonExist || info.oldHostname == "" {
				msg += fmt.Sprintf("assigned host %v ", info.hostname)
			} else {
				msg += fmt.Sprintf("went from host: %v -> %v ", info.oldHostname, info.hostname)
			}
		}
		if phaseChange || hostChange {
			ret += msg + "\n"
		}
	}
	return ret
}
Ejemplo n.º 3
0
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) {
	names := sets.String{}
	for i, pod := range pods {
		var errlist field.ErrorList
		if errs := validation.ValidatePod(pod); len(errs) != 0 {
			errlist = append(errlist, errs...)
			// If validation fails, don't trust it any further -
			// even Name could be bad.
		} else {
			name := kubecontainer.GetPodFullName(pod)
			if names.Has(name) {
				// TODO: when validation becomes versioned, this gets a bit
				// more complicated.
				errlist = append(errlist, field.Duplicate(field.NewPath("metadata", "name"), pod.Name))
			} else {
				names.Insert(name)
			}
		}
		if len(errlist) > 0 {
			name := bestPodIdentString(pod)
			err := errlist.ToAggregate()
			glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
			recorder.Eventf(pod, api.EventTypeWarning, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err)
			continue
		}
		filtered = append(filtered, pod)
	}
	return
}
Ejemplo n.º 4
0
func getPullSecretNames(serviceaccount *kapi.ServiceAccount) sets.String {
	names := sets.String{}
	for _, secret := range serviceaccount.ImagePullSecrets {
		names.Insert(secret.Name)
	}
	return names
}
Ejemplo n.º 5
0
// Index returns a list of items that match on the index function
// Index is thread-safe so long as you treat all items as immutable
func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) {
	c.lock.RLock()
	defer c.lock.RUnlock()

	indexFunc := c.indexers[indexName]
	if indexFunc == nil {
		return nil, fmt.Errorf("Index with name %s does not exist", indexName)
	}

	indexKeys, err := indexFunc(obj)
	if err != nil {
		return nil, err
	}
	index := c.indices[indexName]

	// need to de-dupe the return list.  Since multiple keys are allowed, this can happen.
	returnKeySet := sets.String{}
	for _, indexKey := range indexKeys {
		set := index[indexKey]
		for _, key := range set.List() {
			returnKeySet.Insert(key)
		}
	}

	list := make([]interface{}, 0, returnKeySet.Len())
	for absoluteKey := range returnKeySet {
		list = append(list, c.items[absoluteKey])
	}
	return list, nil
}
Ejemplo n.º 6
0
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) {
	names := sets.String{}
	for i, pod := range pods {
		var errlist []error
		if errs := validation.ValidatePod(pod); len(errs) != 0 {
			errlist = append(errlist, errs...)
			// If validation fails, don't trust it any further -
			// even Name could be bad.
		} else {
			name := kubecontainer.GetPodFullName(pod)
			if names.Has(name) {
				errlist = append(errlist, fielderrors.NewFieldDuplicate("name", pod.Name))
			} else {
				names.Insert(name)
			}
		}
		if len(errlist) > 0 {
			name := bestPodIdentString(pod)
			err := utilerrors.NewAggregate(errlist)
			glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
			recorder.Eventf(pod, "FailedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err)
			continue
		}
		filtered = append(filtered, pod)
	}
	return
}
Ejemplo n.º 7
0
func (t *tcShaper) nextClassID() (int, error) {
	data, err := t.e.Command("tc", "class", "show", "dev", t.iface).CombinedOutput()
	if err != nil {
		return -1, err
	}

	scanner := bufio.NewScanner(bytes.NewBuffer(data))
	classes := sets.String{}
	for scanner.Scan() {
		line := strings.TrimSpace(scanner.Text())
		// skip empty lines
		if len(line) == 0 {
			continue
		}
		parts := strings.Split(line, " ")
		// expected tc line:
		// class htb 1:1 root prio 0 rate 1000Kbit ceil 1000Kbit burst 1600b cburst 1600b
		if len(parts) != 14 {
			return -1, fmt.Errorf("unexpected output from tc: %s (%v)", scanner.Text(), parts)
		}
		classes.Insert(parts[2])
	}

	// Make sure it doesn't go forever
	for nextClass := 1; nextClass < 10000; nextClass++ {
		if !classes.Has(fmt.Sprintf("1:%d", nextClass)) {
			return nextClass, nil
		}
	}
	// This should really never happen
	return -1, fmt.Errorf("exhausted class space, please try again")
}
Ejemplo n.º 8
0
// Expand takes aliases and expands them into owner lists.
func (a *Aliases) Expand(toExpand sets.String) sets.String {
	expanded := sets.String{}
	for _, owner := range toExpand.List() {
		expanded.Insert(a.resolve(owner)...)
	}
	return expanded
}
Ejemplo n.º 9
0
// nameMatches checks to see if the resourceName of the action is in a the specified whitelist.  An empty whitelist indicates that any name is allowed.
// An empty string in the whitelist should only match the action's resourceName if the resourceName itself is empty string.  This behavior allows for the
// combination of a whitelist for gets in the same rule as a list that won't have a resourceName.  I don't recommend writing such a rule, but we do
// handle it like you'd expect: white list is respected for gets while not preventing the list you explicitly asked for.
func (a DefaultAuthorizationAttributes) nameMatches(allowedResourceNames sets.String) bool {
	if len(allowedResourceNames) == 0 {
		return true
	}

	return allowedResourceNames.Has(a.GetResourceName())
}
Ejemplo n.º 10
0
func (config *GithubConfig) UsersWithCommit() ([]string, error) {
	userSet := sets.String{}

	teams, err := config.fetchAllTeams()
	if err != nil {
		glog.Errorf("%v", err)
		return nil, err
	}

	teamIDs := []int{}
	for _, team := range teams {
		repo, _, err := config.client.Organizations.IsTeamRepo(*team.ID, config.Org, config.Project)
		if repo == nil || err != nil {
			continue
		}
		perms := *repo.Permissions
		if perms["push"] {
			teamIDs = append(teamIDs, *team.ID)
		}
	}

	for _, team := range teamIDs {
		users, err := config.fetchAllUsers(team)
		if err != nil {
			glog.Errorf("%v", err)
			continue
		}
		for _, user := range users {
			userSet.Insert(*user.Login)
		}
	}

	return userSet.List(), nil
}
Ejemplo n.º 11
0
// dialFromNode executes a tcp or udp request based on protocol via kubectl exec
// in a test container running with host networking.
// - minTries is the minimum number of curl attempts required before declaring
//   success. Set to 0 if you'd like to return as soon as all endpoints respond
//   at least once.
// - maxTries is the maximum number of curl attempts. If this many attempts pass
//   and we don't see all expected endpoints, the test fails.
// maxTries == minTries will confirm that we see the expected endpoints and no
// more for maxTries. Use this if you want to eg: fail a readiness check on a
// pod and confirm it doesn't show up as an endpoint.
func (config *NetworkingTestConfig) dialFromNode(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) {
	var cmd string
	if protocol == "udp" {
		cmd = fmt.Sprintf("echo 'hostName' | timeout -t 3 nc -w 1 -u %s %d", targetIP, targetPort)
	} else {
		cmd = fmt.Sprintf("curl -q -s --connect-timeout 1 http://%s:%d/hostName", targetIP, targetPort)
	}

	// TODO: This simply tells us that we can reach the endpoints. Check that
	// the probability of hitting a specific endpoint is roughly the same as
	// hitting any other.
	eps := sets.NewString()

	filterCmd := fmt.Sprintf("%s | grep -v '^\\s*$'", cmd)
	for i := 0; i < maxTries; i++ {
		stdout, err := framework.RunHostCmd(config.ns, config.hostTestContainerPod.Name, filterCmd)
		if err != nil {
			// A failure to kubectl exec counts as a try, not a hard fail.
			// Also note that we will keep failing for maxTries in tests where
			// we confirm unreachability.
			framework.Logf("Failed to execute %v: %v", filterCmd, err)
		} else {
			eps.Insert(strings.TrimSpace(stdout))
		}
		framework.Logf("Waiting for %+v endpoints, got endpoints %+v", expectedEps.Difference(eps), eps)

		// Check against i+1 so we exit if minTries == maxTries.
		if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries {
			return
		}
	}

	config.diagnoseMissingEndpoints(eps)
	framework.Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", minTries, cmd, eps, expectedEps)
}
Ejemplo n.º 12
0
func parseMetrics(data string, knownMetrics map[string][]string, output *Metrics, unknownMetrics sets.String) error {
	dec := expfmt.NewDecoder(strings.NewReader(data), expfmt.FmtText)
	decoder := expfmt.SampleDecoder{
		Dec:  dec,
		Opts: &expfmt.DecodeOptions{},
	}

	for {
		var v model.Vector
		if err := decoder.Decode(&v); err != nil {
			if err == io.EOF {
				// Expected loop termination condition.
				return nil
			}
			glog.Warningf("Invalid Decode. Skipping.")
			continue
		}
		for _, metric := range v {
			name := string(metric.Metric[model.MetricNameLabel])
			_, isCommonMetric := CommonMetrics[name]
			_, isKnownMetric := knownMetrics[name]
			if isKnownMetric || isCommonMetric {
				(*output)[name] = append((*output)[name], metric)
			} else {
				if unknownMetrics != nil {
					unknownMetrics.Insert(name)
				}
			}
		}
	}
}
Ejemplo n.º 13
0
// Pass ports=nil for all ports.
func formatEndpoints(endpoints *api.Endpoints, ports sets.String) string {
	if len(endpoints.Subsets) == 0 {
		return "<none>"
	}
	list := []string{}
	max := 3
	more := false
	count := 0
	for i := range endpoints.Subsets {
		ss := &endpoints.Subsets[i]
		for i := range ss.Ports {
			port := &ss.Ports[i]
			if ports == nil || ports.Has(port.Name) {
				for i := range ss.Addresses {
					if len(list) == max {
						more = true
					}
					addr := &ss.Addresses[i]
					if !more {
						list = append(list, fmt.Sprintf("%s:%d", addr.IP, port.Port))
					}
					count++
				}
			}
		}
	}
	ret := strings.Join(list, ",")
	if more {
		return fmt.Sprintf("%s + %d more...", ret, count-max)
	}
	return ret
}
Ejemplo n.º 14
0
// ScopesToVisibleNamespaces returns a list of namespaces that the provided scopes have "get" access to.
// This exists only to support efficiently list/watch of projects (ACLed namespaces)
func ScopesToVisibleNamespaces(scopes []string, clusterPolicyGetter client.ClusterPolicyLister) (sets.String, error) {
	if len(scopes) == 0 {
		return sets.NewString("*"), nil
	}

	visibleNamespaces := sets.String{}

	errors := []error{}
	for _, scope := range scopes {
		found := false

		for _, evaluator := range ScopeEvaluators {
			if evaluator.Handles(scope) {
				found = true
				allowedNamespaces, err := evaluator.ResolveGettableNamespaces(scope, clusterPolicyGetter)
				if err != nil {
					errors = append(errors, err)
					continue
				}

				visibleNamespaces.Insert(allowedNamespaces...)
				break
			}
		}

		if !found {
			errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope))
		}
	}

	return visibleNamespaces, kutilerrors.NewAggregate(errors)
}
Ejemplo n.º 15
0
// List returns the set of namespace names the user has access to view
func (ac *AuthorizationCache) List(userInfo user.Info) (*kapi.NamespaceList, error) {
	keys := sets.String{}
	user := userInfo.GetName()
	groups := userInfo.GetGroups()

	obj, exists, _ := ac.userSubjectRecordStore.GetByKey(user)
	if exists {
		subjectRecord := obj.(*subjectRecord)
		keys.Insert(subjectRecord.namespaces.List()...)
	}

	for _, group := range groups {
		obj, exists, _ := ac.groupSubjectRecordStore.GetByKey(group)
		if exists {
			subjectRecord := obj.(*subjectRecord)
			keys.Insert(subjectRecord.namespaces.List()...)
		}
	}

	namespaceList := &kapi.NamespaceList{}
	for key := range keys {
		namespace, exists, err := ac.namespaceStore.GetByKey(key)
		if err != nil {
			return nil, err
		}
		if exists {
			namespaceList.Items = append(namespaceList.Items, *namespace.(*kapi.Namespace))
		}
	}
	return namespaceList, nil
}
Ejemplo n.º 16
0
func (c *MasterConfig) defaultAPIGroupVersion() *apiserver.APIGroupVersion {
	var restMapper meta.MultiRESTMapper
	seenGroups := sets.String{}
	for _, gv := range registered.EnabledVersions() {
		if seenGroups.Has(gv.Group) {
			continue
		}
		seenGroups.Insert(gv.Group)

		groupMeta, err := registered.Group(gv.Group)
		if err != nil {
			continue
		}
		restMapper = meta.MultiRESTMapper(append(restMapper, groupMeta.RESTMapper))
	}

	statusMapper := meta.NewDefaultRESTMapper([]unversioned.GroupVersion{kubeapiv1.SchemeGroupVersion}, registered.GroupOrDie(kapi.GroupName).InterfacesFor)
	statusMapper.Add(kubeapiv1.SchemeGroupVersion.WithKind("Status"), meta.RESTScopeRoot)
	restMapper = meta.MultiRESTMapper(append(restMapper, statusMapper))

	return &apiserver.APIGroupVersion{
		Root: OpenShiftAPIPrefix,

		Mapper: restMapper,

		Creater:   kapi.Scheme,
		Typer:     kapi.Scheme,
		Convertor: kapi.Scheme,
		Linker:    registered.GroupOrDie("").SelfLinker,

		Admit:                       c.AdmissionControl,
		Context:                     c.getRequestContextMapper(),
		SubresourceGroupVersionKind: map[string]unversioned.GroupVersionKind{},
	}
}
Ejemplo n.º 17
0
// GetServerCertHostnames returns the set of hostnames and IP addresses a serving certificate for node on this host might need to be valid for.
func (args NodeArgs) GetServerCertHostnames() (sets.String, error) {
	allHostnames := sets.NewString(args.NodeName)

	listenIP := net.ParseIP(args.ListenArg.ListenAddr.Host)
	// add the IPs that might be used based on the ListenAddr.
	if listenIP != nil && listenIP.IsUnspecified() {
		allAddresses, _ := cmdutil.AllLocalIP4()
		for _, ip := range allAddresses {
			allHostnames.Insert(ip.String())
		}
	} else {
		allHostnames.Insert(args.ListenArg.ListenAddr.Host)
	}

	certHostnames := sets.String{}
	for hostname := range allHostnames {
		if host, _, err := net.SplitHostPort(hostname); err == nil {
			// add the hostname without the port
			certHostnames.Insert(host)
		} else {
			// add the originally specified hostname
			certHostnames.Insert(hostname)
		}
	}

	return certHostnames, nil
}
Ejemplo n.º 18
0
func (m *DefaultRESTMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) {
	kinds, err := m.KindsFor(resource)
	if err != nil {
		return unversioned.GroupVersionKind{}, err
	}

	// TODO for each group, choose the most preferred (first) version.  This keeps us consistent with code today.
	// eventually, we'll need a RESTMapper that is aware of what's available server-side and deconflicts that with
	// user preferences
	oneKindPerGroup := []unversioned.GroupVersionKind{}
	groupsAdded := sets.String{}
	for _, kind := range kinds {
		if groupsAdded.Has(kind.Group) {
			continue
		}

		oneKindPerGroup = append(oneKindPerGroup, kind)
		groupsAdded.Insert(kind.Group)
	}

	if len(oneKindPerGroup) == 1 {
		return oneKindPerGroup[0], nil
	}

	return unversioned.GroupVersionKind{}, fmt.Errorf("%v is ambiguous, got: %v", resource, kinds)
}
Ejemplo n.º 19
0
func (r *templateRouter) FilterNamespaces(namespaces sets.String) {
	r.lock.Lock()
	defer r.lock.Unlock()

	if len(namespaces) == 0 {
		r.state = make(map[string]ServiceAliasConfig)
		r.serviceUnits = make(map[string]ServiceUnit)
	}
	for k := range r.serviceUnits {
		// TODO: the id of a service unit should be defined inside this class, not passed in from the outside
		//   remove the leak of the abstraction when we refactor this code
		ns := strings.SplitN(k, "/", 2)[0]
		if namespaces.Has(ns) {
			continue
		}
		delete(r.serviceUnits, k)
	}

	for k := range r.state {
		ns := strings.SplitN(k, "_", 2)[0]
		if namespaces.Has(ns) {
			continue
		}
		delete(r.state, k)
	}
}
Ejemplo n.º 20
0
func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, sets.String, error) {
	g := osgraph.New()

	loaders := []GraphLoader{
		&serviceLoader{namespace: namespace, lister: d.K},
		&serviceAccountLoader{namespace: namespace, lister: d.K},
		&secretLoader{namespace: namespace, lister: d.K},
		&rcLoader{namespace: namespace, lister: d.K},
		&podLoader{namespace: namespace, lister: d.K},
		// TODO check swagger for feature enablement and selectively add bcLoader and buildLoader
		// then remove errors.TolerateNotFoundError method.
		&bcLoader{namespace: namespace, lister: d.C},
		&buildLoader{namespace: namespace, lister: d.C},
		&isLoader{namespace: namespace, lister: d.C},
		&dcLoader{namespace: namespace, lister: d.C},
		&routeLoader{namespace: namespace, lister: d.C},
	}
	loadingFuncs := []func() error{}
	for _, loader := range loaders {
		loadingFuncs = append(loadingFuncs, loader.Load)
	}

	forbiddenResources := sets.String{}
	if errs := parallel.Run(loadingFuncs...); len(errs) > 0 {
		actualErrors := []error{}
		for _, err := range errs {
			if kapierrors.IsForbidden(err) {
				forbiddenErr := err.(*kapierrors.StatusError)
				if (forbiddenErr.Status().Details != nil) && (len(forbiddenErr.Status().Details.Kind) > 0) {
					forbiddenResources.Insert(forbiddenErr.Status().Details.Kind)
				}
				continue
			}
			actualErrors = append(actualErrors, err)
		}

		if len(actualErrors) > 0 {
			return g, forbiddenResources, utilerrors.NewAggregate(actualErrors)
		}
	}

	for _, loader := range loaders {
		loader.AddToGraph(g)
	}

	kubeedges.AddAllExposedPodTemplateSpecEdges(g)
	kubeedges.AddAllExposedPodEdges(g)
	kubeedges.AddAllManagedByRCPodEdges(g)
	kubeedges.AddAllRequestedServiceAccountEdges(g)
	kubeedges.AddAllMountableSecretEdges(g)
	kubeedges.AddAllMountedSecretEdges(g)
	buildedges.AddAllInputOutputEdges(g)
	buildedges.AddAllBuildEdges(g)
	deployedges.AddAllTriggerEdges(g)
	deployedges.AddAllDeploymentEdges(g)
	imageedges.AddAllImageStreamRefEdges(g)
	routeedges.AddAllRouteEdges(g)

	return g, forbiddenResources, nil
}
Ejemplo n.º 21
0
func getSecretNames(secrets []*kapi.Secret) sets.String {
	names := sets.String{}
	for _, secret := range secrets {
		names.Insert(secret.Name)
	}
	return names
}
Ejemplo n.º 22
0
// GetMountSecretNames Get a list of the names of the mount secrets associated
// with a service account
func (o SecretOptions) GetMountSecretNames(serviceaccount *kapi.ServiceAccount) sets.String {
	names := sets.String{}
	for _, secret := range serviceaccount.Secrets {
		names.Insert(secret.Name)
	}
	return names
}
Ejemplo n.º 23
0
func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]algorithm.PriorityConfig, error) {
	schedulerFactoryMutex.Lock()
	defer schedulerFactoryMutex.Unlock()

	configs := []algorithm.PriorityConfig{}
	for _, name := range names.List() {
		factory, ok := priorityFunctionMap[name]
		if !ok {
			return nil, fmt.Errorf("Invalid priority name %s specified - no corresponding function found", name)
		}
		if factory.Function != nil {
			configs = append(configs, algorithm.PriorityConfig{
				Function: factory.Function(args),
				Weight:   factory.Weight,
			})
		} else {
			mapFunction, reduceFunction := factory.MapReduceFunction(args)
			configs = append(configs, algorithm.PriorityConfig{
				Map:    mapFunction,
				Reduce: reduceFunction,
				Weight: factory.Weight,
			})
		}
	}
	return configs, nil
}
Ejemplo n.º 24
0
func (r *Requirement) Values() sets.String {
	ret := sets.String{}
	for k := range r.strValues {
		ret.Insert(k)
	}
	return ret
}
Ejemplo n.º 25
0
func runMasterServiceTest(client *client.Client) {
	time.Sleep(12 * time.Second)
	svcList, err := client.Services(api.NamespaceDefault).List(api.ListOptions{})
	if err != nil {
		glog.Fatalf("Unexpected error listing services: %v", err)
	}
	var foundRW bool
	found := sets.String{}
	for i := range svcList.Items {
		found.Insert(svcList.Items[i].Name)
		if svcList.Items[i].Name == "kubernetes" {
			foundRW = true
		}
	}
	if foundRW {
		ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes")
		if err != nil {
			glog.Fatalf("Unexpected error listing endpoints for kubernetes service: %v", err)
		}
		if countEndpoints(ep) == 0 {
			glog.Fatalf("No endpoints for kubernetes service: %v", ep)
		}
	} else {
		glog.Errorf("No RW service found: %v", found)
		glog.Fatal("Kubernetes service test failed")
	}
	glog.Infof("Master service test passed.")
}
Ejemplo n.º 26
0
// mergeGroupMeta takes an lhs and an rhs GroupMeta, then builds a resulting GroupMeta that contains the
// merged information.  Order of merging matters: lhs wins.
func mergeGroupMeta(lhs, rhs apimachinery.GroupMeta) apimachinery.GroupMeta {
	merged := apimachinery.GroupMeta{}

	merged.GroupVersion = lhs.GroupVersion
	merged.SelfLinker = lhs.SelfLinker

	knownGVs := sets.String{}
	for _, lhsGV := range lhs.GroupVersions {
		knownGVs.Insert(lhsGV.String())
		merged.GroupVersions = append(merged.GroupVersions, lhsGV)
	}
	for _, rhsGV := range lhs.GroupVersions {
		if knownGVs.Has(rhsGV.String()) {
			continue
		}
		merged.GroupVersions = append(merged.GroupVersions, rhsGV)
	}

	merged.RESTMapper = meta.MultiRESTMapper(append([]meta.RESTMapper{}, lhs.RESTMapper, rhs.RESTMapper))

	merged.InterfacesFor = func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) {
		if ret, err := lhs.InterfacesFor(version); err == nil {
			return ret, nil
		}

		return rhs.InterfacesFor(version)
	}

	return merged
}
Ejemplo n.º 27
0
// clusterSyncLoop observes running clusters changes, and apply all services to new added cluster
// and add dns records for the changes
func (s *ServiceController) clusterSyncLoop() {
	var servicesToUpdate []*cachedService
	// should we remove cache for cluster from ready to not ready? should remove the condition predicate if no
	clusters, err := s.clusterStore.ClusterCondition(getClusterConditionPredicate()).List()
	if err != nil {
		glog.Infof("Fail to get cluster list")
		return
	}
	newClusters := clustersFromList(&clusters)
	var newSet, increase sets.String
	newSet = sets.NewString(newClusters...)
	if newSet.Equal(s.knownClusterSet) {
		// The set of cluster names in the services in the federation hasn't changed, but we can retry
		// updating any services that we failed to update last time around.
		servicesToUpdate = s.updateDNSRecords(servicesToUpdate, newClusters)
		return
	}
	glog.Infof("Detected change in list of cluster names. New  set: %v, Old set: %v", newSet, s.knownClusterSet)
	increase = newSet.Difference(s.knownClusterSet)
	// do nothing when cluster is removed.
	if increase != nil {
		// Try updating all services, and save the ones that fail to try again next
		// round.
		servicesToUpdate = s.serviceCache.allServices()
		numServices := len(servicesToUpdate)
		for newCluster := range increase {
			glog.Infof("New cluster observed %s", newCluster)
			s.updateAllServicesToCluster(servicesToUpdate, newCluster)
		}
		servicesToUpdate = s.updateDNSRecords(servicesToUpdate, newClusters)
		glog.Infof("Successfully updated %d out of %d DNS records to direct traffic to the updated cluster",
			numServices-len(servicesToUpdate), numServices)
	}
	s.knownClusterSet = newSet
}
Ejemplo n.º 28
0
func NewDefaultRESTMapper(group string, groupVersionStrings []string, interfacesFunc meta.VersionInterfacesFunc,
	importPathPrefix string, ignoredKinds, rootScoped sets.String) *meta.DefaultRESTMapper {

	mapper := meta.NewDefaultRESTMapper(group, groupVersionStrings, interfacesFunc)
	// enumerate all supported versions, get the kinds, and register with the mapper how to address
	// our resources.
	for _, gvString := range groupVersionStrings {
		gv, err := unversioned.ParseGroupVersion(gvString)
		// TODO stop panicing when the types are fixed
		if err != nil {
			panic(err)
		}
		if gv.Group != group {
			panic(fmt.Sprintf("%q does not match the expect %q", gv.Group, group))
		}

		for kind, oType := range Scheme.KnownTypes(gv.String()) {
			// TODO: Remove import path prefix check.
			// We check the import path prefix because we currently stuff both "api" and "extensions" objects
			// into the same group within Scheme since Scheme has no notion of groups yet.
			if !strings.HasPrefix(oType.PkgPath(), importPathPrefix) || ignoredKinds.Has(kind) {
				continue
			}
			scope := meta.RESTScopeNamespace
			if rootScoped.Has(kind) {
				scope = meta.RESTScopeRoot
			}
			mapper.Add(scope, kind, gv.String(), false)
		}
	}
	return mapper
}
Ejemplo n.º 29
0
// SeenAllSources returns true if seenSources contains all sources in the
// config, and also this config has received a SET message from each source.
func (c *PodConfig) SeenAllSources(seenSources sets.String) bool {
	if c.pods == nil {
		return false
	}
	glog.V(6).Infof("Looking for %v, have seen %v", c.sources.List(), seenSources)
	return seenSources.HasAll(c.sources.List()...) && c.pods.seenSources(c.sources.List()...)
}
Ejemplo n.º 30
0
func (p *Parser) parseRequirement() (*Requirement, error) {
	key, operator, err := p.parseKeyAndInferOperator()
	if err != nil {
		return nil, err
	}
	if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked
		return NewRequirement(key, operator, []string{})
	}
	operator, err = p.parseOperator()
	if err != nil {
		return nil, err
	}
	var values sets.String
	switch operator {
	case selection.In, selection.NotIn:
		values, err = p.parseValues()
	case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan:
		values, err = p.parseExactValue()
	}
	if err != nil {
		return nil, err
	}
	return NewRequirement(key, operator, values.List())

}