// ScopesToVisibleNamespaces returns a list of namespaces that the provided scopes have "get" access to. // This exists only to support efficiently list/watch of projects (ACLed namespaces) func ScopesToVisibleNamespaces(scopes []string, clusterPolicyGetter client.ClusterPolicyLister) (sets.String, error) { if len(scopes) == 0 { return sets.NewString("*"), nil } visibleNamespaces := sets.String{} errors := []error{} for _, scope := range scopes { found := false for _, evaluator := range ScopeEvaluators { if evaluator.Handles(scope) { found = true allowedNamespaces, err := evaluator.ResolveGettableNamespaces(scope, clusterPolicyGetter) if err != nil { errors = append(errors, err) continue } visibleNamespaces.Insert(allowedNamespaces...) break } } if !found { errors = append(errors, fmt.Errorf("no scope evaluator found for %q", scope)) } } return visibleNamespaces, kutilerrors.NewAggregate(errors) }
func (r *Requirement) Values() sets.String { ret := sets.String{} for k := range r.strValues { ret.Insert(k) } return ret }
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { names := sets.String{} for i, pod := range pods { var errlist field.ErrorList if errs := validation.ValidatePod(pod); len(errs) != 0 { errlist = append(errlist, errs...) // If validation fails, don't trust it any further - // even Name could be bad. } else { name := kubecontainer.GetPodFullName(pod) if names.Has(name) { // TODO: when validation becomes versioned, this gets a bit // more complicated. errlist = append(errlist, field.Duplicate(field.NewPath("metadata", "name"), pod.Name)) } else { names.Insert(name) } } if len(errlist) > 0 { name := bestPodIdentString(pod) err := errlist.ToAggregate() glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err) recorder.Eventf(pod, api.EventTypeWarning, kubecontainer.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err) continue } filtered = append(filtered, pod) } return }
func computeStatus(statusList []*github.CombinedStatus, requiredContexts []string) string { states := sets.String{} providers := sets.String{} for ix := range statusList { status := statusList[ix] glog.V(8).Infof("Checking commit: %s status:%v", *status.SHA, status) states.Insert(*status.State) for _, subStatus := range status.Statuses { glog.V(8).Infof("Found status from: %v", subStatus) providers.Insert(*subStatus.Context) } } for _, provider := range requiredContexts { if !providers.Has(provider) { glog.V(8).Infof("Failed to find %q in %v", provider, providers) return "incomplete" } } switch { case states.Has("pending"): return "pending" case states.Has("error"): return "error" case states.Has("failure"): return "failure" default: return "success" } }
// Expand takes aliases and expands them into owner lists. func (a *Aliases) Expand(toExpand sets.String) sets.String { expanded := sets.String{} for _, owner := range toExpand.List() { expanded.Insert(a.resolve(owner)...) } return expanded }
func (config *GithubConfig) UsersWithCommit() ([]string, error) { userSet := sets.String{} teams, err := config.fetchAllTeams() if err != nil { glog.Errorf("%v", err) return nil, err } teamIDs := []int{} for _, team := range teams { repo, _, err := config.client.Organizations.IsTeamRepo(*team.ID, config.Org, config.Project) if repo == nil || err != nil { continue } perms := *repo.Permissions if perms["push"] { teamIDs = append(teamIDs, *team.ID) } } for _, team := range teamIDs { users, err := config.fetchAllUsers(team) if err != nil { glog.Errorf("%v", err) continue } for _, user := range users { userSet.Insert(*user.Login) } } return userSet.List(), nil }
// mergeGroupMeta takes an lhs and an rhs GroupMeta, then builds a resulting GroupMeta that contains the // merged information. Order of merging matters: lhs wins. func mergeGroupMeta(lhs, rhs apimachinery.GroupMeta) apimachinery.GroupMeta { merged := apimachinery.GroupMeta{} merged.GroupVersion = lhs.GroupVersion merged.SelfLinker = lhs.SelfLinker knownGVs := sets.String{} for _, lhsGV := range lhs.GroupVersions { knownGVs.Insert(lhsGV.String()) merged.GroupVersions = append(merged.GroupVersions, lhsGV) } for _, rhsGV := range lhs.GroupVersions { if knownGVs.Has(rhsGV.String()) { continue } merged.GroupVersions = append(merged.GroupVersions, rhsGV) } merged.RESTMapper = meta.MultiRESTMapper(append([]meta.RESTMapper{}, lhs.RESTMapper, rhs.RESTMapper)) merged.InterfacesFor = func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { if ret, err := lhs.InterfacesFor(version); err == nil { return ret, nil } return rhs.InterfacesFor(version) } return merged }
// NewSharedContextEvaluator creates an evaluator object that allows to share context while computing usage of // single namespace. Context is represented by an object returned by usageComputerFactory and is destroyed // when the namespace is processed. func NewSharedContextEvaluator( name string, groupKind unversioned.GroupKind, operationResources map[admission.Operation][]kapi.ResourceName, matchedResourceNames []kapi.ResourceName, matchesScopeFunc generic.MatchesScopeFunc, getFuncByNamespace generic.GetFuncByNamespace, listFuncByNamespace generic.ListFuncByNamespace, constraintsFunc generic.ConstraintsFunc, usageComputerFactory UsageComputerFactory, ) quota.Evaluator { rnSet := sets.String{} for _, resourceNames := range operationResources { rnSet.Insert(quota.ToSet(resourceNames).List()...) } return &SharedContextEvaluator{ GenericEvaluator: &generic.GenericEvaluator{ Name: name, InternalGroupKind: groupKind, InternalOperationResources: operationResources, MatchedResourceNames: matchedResourceNames, MatchesScopeFunc: matchesScopeFunc, GetFuncByNamespace: getFuncByNamespace, ListFuncByNamespace: listFuncByNamespace, ConstraintsFunc: constraintsFunc, UsageFunc: func(object runtime.Object) kapi.ResourceList { comp := usageComputerFactory() return comp.Usage(object) }, }, UsageComputerFactory: usageComputerFactory, } }
func runMasterServiceTest(client *client.Client) { time.Sleep(12 * time.Second) svcList, err := client.Services(api.NamespaceDefault).List(api.ListOptions{}) if err != nil { glog.Fatalf("Unexpected error listing services: %v", err) } var foundRW bool found := sets.String{} for i := range svcList.Items { found.Insert(svcList.Items[i].Name) if svcList.Items[i].Name == "kubernetes" { foundRW = true } } if foundRW { ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { glog.Fatalf("Unexpected error listing endpoints for kubernetes service: %v", err) } if countEndpoints(ep) == 0 { glog.Fatalf("No endpoints for kubernetes service: %v", ep) } } else { glog.Errorf("No RW service found: %v", found) glog.Fatal("Kubernetes service test failed") } glog.Infof("Master service test passed.") }
// Index returns a list of items that match on the index function // Index is thread-safe so long as you treat all items as immutable func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) { c.lock.RLock() defer c.lock.RUnlock() indexFunc := c.indexers[indexName] if indexFunc == nil { return nil, fmt.Errorf("Index with name %s does not exist", indexName) } indexKeys, err := indexFunc(obj) if err != nil { return nil, err } index := c.indices[indexName] // need to de-dupe the return list. Since multiple keys are allowed, this can happen. returnKeySet := sets.String{} for _, indexKey := range indexKeys { set := index[indexKey] for _, key := range set.List() { returnKeySet.Insert(key) } } list := make([]interface{}, 0, returnKeySet.Len()) for absoluteKey := range returnKeySet { list = append(list, c.items[absoluteKey]) } return list, nil }
func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, sets.String, error) { g := osgraph.New() loaders := []GraphLoader{ &serviceLoader{namespace: namespace, lister: d.K}, &serviceAccountLoader{namespace: namespace, lister: d.K}, &secretLoader{namespace: namespace, lister: d.K}, &rcLoader{namespace: namespace, lister: d.K}, &podLoader{namespace: namespace, lister: d.K}, // TODO check swagger for feature enablement and selectively add bcLoader and buildLoader // then remove errors.TolerateNotFoundError method. &bcLoader{namespace: namespace, lister: d.C}, &buildLoader{namespace: namespace, lister: d.C}, &isLoader{namespace: namespace, lister: d.C}, &dcLoader{namespace: namespace, lister: d.C}, &routeLoader{namespace: namespace, lister: d.C}, } loadingFuncs := []func() error{} for _, loader := range loaders { loadingFuncs = append(loadingFuncs, loader.Load) } forbiddenResources := sets.String{} if errs := parallel.Run(loadingFuncs...); len(errs) > 0 { actualErrors := []error{} for _, err := range errs { if kapierrors.IsForbidden(err) { forbiddenErr := err.(*kapierrors.StatusError) if (forbiddenErr.Status().Details != nil) && (len(forbiddenErr.Status().Details.Kind) > 0) { forbiddenResources.Insert(forbiddenErr.Status().Details.Kind) } continue } actualErrors = append(actualErrors, err) } if len(actualErrors) > 0 { return g, forbiddenResources, utilerrors.NewAggregate(actualErrors) } } for _, loader := range loaders { loader.AddToGraph(g) } kubeedges.AddAllExposedPodTemplateSpecEdges(g) kubeedges.AddAllExposedPodEdges(g) kubeedges.AddAllManagedByRCPodEdges(g) kubeedges.AddAllRequestedServiceAccountEdges(g) kubeedges.AddAllMountableSecretEdges(g) kubeedges.AddAllMountedSecretEdges(g) buildedges.AddAllInputOutputEdges(g) buildedges.AddAllBuildEdges(g) deployedges.AddAllTriggerEdges(g) deployedges.AddAllDeploymentEdges(g) imageedges.AddAllImageStreamRefEdges(g) routeedges.AddAllRouteEdges(g) return g, forbiddenResources, nil }
// GetMountSecretNames Get a list of the names of the mount secrets associated // with a service account func (o SecretOptions) GetMountSecretNames(serviceaccount *kapi.ServiceAccount) sets.String { names := sets.String{} for _, secret := range serviceaccount.Secrets { names.Insert(secret.Name) } return names }
func computeStatus(combinedStatus *github.CombinedStatus, requiredContexts []string) string { states := sets.String{} providers := sets.String{} if len(requiredContexts) == 0 { return *combinedStatus.State } requires := sets.NewString(requiredContexts...) for _, status := range combinedStatus.Statuses { if !requires.Has(*status.Context) { continue } states.Insert(*status.State) providers.Insert(*status.Context) } missing := requires.Difference(providers) if missing.Len() != 0 { glog.V(8).Infof("Failed to find %v in CombinedStatus for %s", missing.List(), *combinedStatus.SHA) return "incomplete" } switch { case states.Has("pending"): return "pending" case states.Has("error"): return "error" case states.Has("failure"): return "failure" default: return "success" } }
func (m *DefaultRESTMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { kinds, err := m.KindsFor(resource) if err != nil { return unversioned.GroupVersionKind{}, err } // TODO for each group, choose the most preferred (first) version. This keeps us consistent with code today. // eventually, we'll need a RESTMapper that is aware of what's available server-side and deconflicts that with // user preferences oneKindPerGroup := []unversioned.GroupVersionKind{} groupsAdded := sets.String{} for _, kind := range kinds { if groupsAdded.Has(kind.Group) { continue } oneKindPerGroup = append(oneKindPerGroup, kind) groupsAdded.Insert(kind.Group) } if len(oneKindPerGroup) == 1 { return oneKindPerGroup[0], nil } return unversioned.GroupVersionKind{}, fmt.Errorf("%v is ambiguous, got: %v", resource, kinds) }
func (c *MasterConfig) defaultAPIGroupVersion() *apiserver.APIGroupVersion { var restMapper meta.MultiRESTMapper seenGroups := sets.String{} for _, gv := range registered.EnabledVersions() { if seenGroups.Has(gv.Group) { continue } seenGroups.Insert(gv.Group) groupMeta, err := registered.Group(gv.Group) if err != nil { continue } restMapper = meta.MultiRESTMapper(append(restMapper, groupMeta.RESTMapper)) } statusMapper := meta.NewDefaultRESTMapper([]unversioned.GroupVersion{kubeapiv1.SchemeGroupVersion}, registered.GroupOrDie(kapi.GroupName).InterfacesFor) statusMapper.Add(kubeapiv1.SchemeGroupVersion.WithKind("Status"), meta.RESTScopeRoot) restMapper = meta.MultiRESTMapper(append(restMapper, statusMapper)) return &apiserver.APIGroupVersion{ Root: OpenShiftAPIPrefix, Mapper: restMapper, Creater: kapi.Scheme, Typer: kapi.Scheme, Convertor: kapi.Scheme, Linker: registered.GroupOrDie("").SelfLinker, Admit: c.AdmissionControl, Context: c.getRequestContextMapper(), SubresourceGroupVersionKind: map[string]unversioned.GroupVersionKind{}, } }
// edgeHop checks the links of the given backend by executing an edge hop. // It fixes broken links. func (b *Backends) edgeHop(be *compute.BackendService, igs []*compute.InstanceGroup) error { beIGs := sets.String{} for _, beToIG := range be.Backends { beIGs.Insert(beToIG.Group) } igLinks := sets.String{} for _, igToBE := range igs { igLinks.Insert(igToBE.SelfLink) } if beIGs.IsSuperset(igLinks) { return nil } glog.Infof("Backend %v has a broken edge, expected igs %+v, current igs %+v", be.Name, igLinks.List(), beIGs.List()) newBackends := []*compute.Backend{} for _, b := range getBackendsForIGs(igs) { if !beIGs.Has(b.Group) { newBackends = append(newBackends, b) } } be.Backends = append(be.Backends, newBackends...) if err := b.cloud.UpdateBackendService(be); err != nil { return err } return nil }
func parseMetrics(data string, knownMetrics map[string][]string, output *Metrics, unknownMetrics sets.String) error { dec := expfmt.NewDecoder(strings.NewReader(data), expfmt.FmtText) decoder := expfmt.SampleDecoder{ Dec: dec, Opts: &expfmt.DecodeOptions{}, } for { var v model.Vector if err := decoder.Decode(&v); err != nil { if err == io.EOF { // Expected loop termination condition. return nil } glog.Warningf("Invalid Decode. Skipping.") continue } for _, metric := range v { name := string(metric.Metric[model.MetricNameLabel]) _, isCommonMetric := CommonMetrics[name] _, isKnownMetric := knownMetrics[name] if isKnownMetric || isCommonMetric { (*output)[name] = append((*output)[name], metric) } else { if unknownMetrics != nil { unknownMetrics.Insert(name) } } } } }
func getPullSecretNames(serviceaccount *kapi.ServiceAccount) sets.String { names := sets.String{} for _, secret := range serviceaccount.ImagePullSecrets { names.Insert(secret.Name) } return names }
func getSecretNames(secrets []*kapi.Secret) sets.String { names := sets.String{} for _, secret := range secrets { names.Insert(secret.Name) } return names }
// List returns the set of namespace names the user has access to view func (ac *AuthorizationCache) List(userInfo user.Info) (*kapi.NamespaceList, error) { keys := sets.String{} user := userInfo.GetName() groups := userInfo.GetGroups() obj, exists, _ := ac.userSubjectRecordStore.GetByKey(user) if exists { subjectRecord := obj.(*subjectRecord) keys.Insert(subjectRecord.namespaces.List()...) } for _, group := range groups { obj, exists, _ := ac.groupSubjectRecordStore.GetByKey(group) if exists { subjectRecord := obj.(*subjectRecord) keys.Insert(subjectRecord.namespaces.List()...) } } namespaceList := &kapi.NamespaceList{} for key := range keys { namespace, exists, err := ac.namespaceStore.GetByKey(key) if err != nil { return nil, err } if exists { namespaceList.Items = append(namespaceList.Items, *namespace.(*kapi.Namespace)) } } return namespaceList, nil }
func (t *tcShaper) nextClassID() (int, error) { data, err := t.e.Command("tc", "class", "show", "dev", t.iface).CombinedOutput() if err != nil { return -1, err } scanner := bufio.NewScanner(bytes.NewBuffer(data)) classes := sets.String{} for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) // skip empty lines if len(line) == 0 { continue } parts := strings.Split(line, " ") // expected tc line: // class htb 1:1 root prio 0 rate 1000Kbit ceil 1000Kbit burst 1600b cburst 1600b if len(parts) != 14 { return -1, fmt.Errorf("unexpected output from tc: %s (%v)", scanner.Text(), parts) } classes.Insert(parts[2]) } // Make sure it doesn't go forever for nextClass := 1; nextClass < 10000; nextClass++ { if !classes.Has(fmt.Sprintf("1:%d", nextClass)) { return nextClass, nil } } // This should really never happen return -1, fmt.Errorf("exhausted class space, please try again") }
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { names := sets.String{} for i, pod := range pods { var errlist []error if errs := validation.ValidatePod(pod); len(errs) != 0 { errlist = append(errlist, errs...) // If validation fails, don't trust it any further - // even Name could be bad. } else { name := kubecontainer.GetPodFullName(pod) if names.Has(name) { errlist = append(errlist, fielderrors.NewFieldDuplicate("name", pod.Name)) } else { names.Insert(name) } } if len(errlist) > 0 { name := bestPodIdentString(pod) err := utilerrors.NewAggregate(errlist) glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err) recorder.Eventf(pod, "FailedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err) continue } filtered = append(filtered, pod) } return }
// GetServerCertHostnames returns the set of hostnames and IP addresses a serving certificate for node on this host might need to be valid for. func (args NodeArgs) GetServerCertHostnames() (sets.String, error) { allHostnames := sets.NewString(args.NodeName) listenIP := net.ParseIP(args.ListenArg.ListenAddr.Host) // add the IPs that might be used based on the ListenAddr. if listenIP != nil && listenIP.IsUnspecified() { allAddresses, _ := cmdutil.AllLocalIP4() for _, ip := range allAddresses { allHostnames.Insert(ip.String()) } } else { allHostnames.Insert(args.ListenArg.ListenAddr.Host) } certHostnames := sets.String{} for hostname := range allHostnames { if host, _, err := net.SplitHostPort(hostname); err == nil { // add the hostname without the port certHostnames.Insert(host) } else { // add the originally specified hostname certHostnames.Insert(hostname) } } return certHostnames, nil }
// Test public interface func doTestIndex(t *testing.T, indexer Indexer) { mkObj := func(id string, val string) testStoreObject { return testStoreObject{id: id, val: val} } // Test Index expected := map[string]sets.String{} expected["b"] = sets.NewString("a", "c") expected["f"] = sets.NewString("e") expected["h"] = sets.NewString("g") indexer.Add(mkObj("a", "b")) indexer.Add(mkObj("c", "b")) indexer.Add(mkObj("e", "f")) indexer.Add(mkObj("g", "h")) { for k, v := range expected { found := sets.String{} indexResults, err := indexer.Index("by_val", mkObj("", k)) if err != nil { t.Errorf("Unexpected error %v", err) } for _, item := range indexResults { found.Insert(item.(testStoreObject).id) } items := v.List() if !found.HasAll(items...) { t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List()) } } } }
func TestFilterQuotaPods(t *testing.T) { pods := []api.Pod{ { ObjectMeta: api.ObjectMeta{Name: "pod-running"}, Status: api.PodStatus{Phase: api.PodRunning}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-pending"}, Status: api.PodStatus{Phase: api.PodPending}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-succeeded"}, Status: api.PodStatus{Phase: api.PodSucceeded}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-unknown"}, Status: api.PodStatus{Phase: api.PodUnknown}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed"}, Status: api.PodStatus{Phase: api.PodFailed}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-always"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, }, Status: api.PodStatus{Phase: api.PodFailed}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-on-failure"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyOnFailure, }, Status: api.PodStatus{Phase: api.PodFailed}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-never"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyNever, }, Status: api.PodStatus{Phase: api.PodFailed}, }, } expectedResults := sets.NewString("pod-running", "pod-pending", "pod-unknown", "pod-failed-with-restart-always", "pod-failed-with-restart-on-failure") actualResults := sets.String{} result := FilterQuotaPods(pods) for i := range result { actualResults.Insert(result[i].Name) } if len(expectedResults) != len(actualResults) || !actualResults.HasAll(expectedResults.List()...) { t.Errorf("Expected results %v, Actual results %v", expectedResults, actualResults) } }
// NegotiateVersion queries the server's supported api versions to find // a version that both client and server support. // - If no version is provided, try registered client versions in order of // preference. // - If version is provided and the server does not support it, // return an error. func NegotiateVersion(client DiscoveryInterface, requiredGV *schema.GroupVersion, clientRegisteredGVs []schema.GroupVersion) (*schema.GroupVersion, error) { clientVersions := sets.String{} for _, gv := range clientRegisteredGVs { clientVersions.Insert(gv.String()) } groups, err := client.ServerGroups() if err != nil { // This is almost always a connection error, and higher level code should treat this as a generic error, // not a negotiation specific error. return nil, err } versions := metav1.ExtractGroupVersions(groups) serverVersions := sets.String{} for _, v := range versions { serverVersions.Insert(v) } // If version explicitly requested verify that both client and server support it. // If server does not support warn, but try to negotiate a lower version. if requiredGV != nil { if !clientVersions.Has(requiredGV.String()) { return nil, fmt.Errorf("client does not support API version %q; client supported API versions: %v", requiredGV, clientVersions) } // If the server supports no versions, then we should just use the preferredGV // This can happen because discovery fails due to 403 Forbidden errors if len(serverVersions) == 0 { return requiredGV, nil } if serverVersions.Has(requiredGV.String()) { return requiredGV, nil } // If we are using an explicit config version the server does not support, fail. return nil, fmt.Errorf("server does not support API version %q", requiredGV) } for _, clientGV := range clientRegisteredGVs { if serverVersions.Has(clientGV.String()) { // Version was not explicitly requested in command config (--api-version). // Ok to fall back to a supported version with a warning. // TODO: caesarxuchao: enable the warning message when we have // proper fix. Please refer to issue #14895. // if len(version) != 0 { // glog.Warningf("Server does not support API version '%s'. Falling back to '%s'.", version, clientVersion) // } t := clientGV return &t, nil } } // if we have no server versions and we have no required version, choose the first clientRegisteredVersion if len(serverVersions) == 0 && len(clientRegisteredGVs) > 0 { return &clientRegisteredGVs[0], nil } return nil, fmt.Errorf("failed to negotiate an api version; server supports: %v, client supports: %v", serverVersions, clientVersions) }
// NegotiateVersion queries the server's supported api versions to find // a version that both client and server support. // - If no version is provided, try registered client versions in order of // preference. // - If version is provided, but not default config (explicitly requested via // commandline flag), and is unsupported by the server, print a warning to // stderr and try client's registered versions in order of preference. // - If version is config default, and the server does not support it, // return an error. func NegotiateVersion(client *Client, c *Config, version string, clientRegisteredVersions []string) (string, error) { var err error if client == nil { client, err = New(c) if err != nil { return "", err } } clientVersions := sets.String{} for _, v := range clientRegisteredVersions { clientVersions.Insert(v) } apiVersions, err := client.ServerAPIVersions() if err != nil { // This is almost always a connection error, and higher level code should treat this as a generic error, // not a version specific error. return "", err } serverVersions := sets.String{} for _, v := range apiVersions.Versions { serverVersions.Insert(v) } // If no version requested, use config version (may also be empty). if len(version) == 0 { version = c.Version } // If version explicitly requested verify that both client and server support it. // If server does not support warn, but try to negotiate a lower version. if len(version) != 0 { if !clientVersions.Has(version) { return "", fmt.Errorf("client does not support API version '%s'. Client supported API versions: %v", version, clientVersions) } if serverVersions.Has(version) { return version, nil } // If we are using an explicit config version the server does not support, fail. if version == c.Version { return "", fmt.Errorf("server does not support API version '%s'.", version) } } for _, clientVersion := range clientRegisteredVersions { if serverVersions.Has(clientVersion) { // Version was not explicitly requested in command config (--api-version). // Ok to fall back to a supported version with a warning. // TODO: caesarxuchao: enable the warning message when we have // proper fix. Please refer to issue #14895. // if len(version) != 0 { // glog.Warningf("Server does not support API version '%s'. Falling back to '%s'.", version, clientVersion) // } return clientVersion, nil } } return "", fmt.Errorf("failed to negotiate an api version. Server supports: %v. Client supports: %v.", serverVersions, clientRegisteredVersions) }
// GetServerCertHostnames returns the set of hostnames that any serving certificate for master needs to be valid for. func (args MasterArgs) GetServerCertHostnames() (sets.String, error) { masterAddr, err := args.GetMasterAddress() if err != nil { return nil, err } masterPublicAddr, err := args.GetMasterPublicAddress() if err != nil { return nil, err } assetPublicAddr, err := args.GetAssetPublicAddress() if err != nil { return nil, err } allHostnames := sets.NewString( "localhost", "127.0.0.1", "openshift.default.svc.cluster.local", "openshift.default.svc", "openshift.default", "openshift", "kubernetes.default.svc.cluster.local", "kubernetes.default.svc", "kubernetes.default", "kubernetes", masterAddr.Host, masterPublicAddr.Host, assetPublicAddr.Host) if _, ipnet, err := net.ParseCIDR(args.NetworkArgs.ServiceNetworkCIDR); err == nil { // CIDR is ignored if it is invalid, other code handles validation. if firstServiceIP, err := ipallocator.GetIndexedIP(ipnet, 1); err == nil { allHostnames.Insert(firstServiceIP.String()) } } listenIP := net.ParseIP(args.ListenArg.ListenAddr.Host) // add the IPs that might be used based on the ListenAddr. if listenIP != nil && listenIP.IsUnspecified() { allAddresses, _ := cmdutil.AllLocalIP4() for _, ip := range allAddresses { allHostnames.Insert(ip.String()) } } else { allHostnames.Insert(args.ListenArg.ListenAddr.Host) } certHostnames := sets.String{} for hostname := range allHostnames { if host, _, err := net.SplitHostPort(hostname); err == nil { // add the hostname without the port certHostnames.Insert(host) } else { // add the originally specified hostname certHostnames.Insert(hostname) } } return certHostnames, nil }
// GetNamespacesFromPodAffinityTerm returns a set of names // according to the namespaces indicated in podAffinityTerm. // if the NameSpaces is nil considers the given pod's namespace // if the Namespaces is empty list then considers all the namespaces func GetNamespacesFromPodAffinityTerm(pod *api.Pod, podAffinityTerm api.PodAffinityTerm) sets.String { names := sets.String{} if podAffinityTerm.Namespaces == nil { names.Insert(pod.Namespace) } else if len(podAffinityTerm.Namespaces) != 0 { names.Insert(podAffinityTerm.Namespaces...) } return names }
// ContainedIDs returns a stringset.StringSet containing all IDs of the stored items. // This is a snapshot of a moment in time, and one should keep in mind that // other go routines can add or remove items after you call this. func (c *DelayFIFO) ContainedIDs() sets.String { c.rlock() defer c.runlock() set := sets.String{} for id := range c.items { set.Insert(id) } return set }