func usersWithCommit(client *github.Client, org, project string) ([]string, error) { userSet := util.StringSet{} teams, err := fetchAllTeams(client, org) if err != nil { glog.Errorf("%v", err) return nil, err } teamIDs := []int{} for _, team := range teams { repo, _, err := client.Organizations.IsTeamRepo(*team.ID, org, project) if repo == nil || err != nil { continue } perms := *repo.Permissions if perms["push"] { teamIDs = append(teamIDs, *team.ID) } } for _, team := range teamIDs { users, err := fetchAllUsers(client, team) if err != nil { glog.Errorf("%v", err) continue } for _, user := range users { userSet.Insert(*user.Login) } } return userSet.List(), nil }
func getSecretNames(secrets []*kapi.Secret) util.StringSet { names := util.StringSet{} for _, secret := range secrets { names.Insert(secret.Name) } return names }
func (t *tcShaper) nextClassID() (int, error) { data, err := t.e.Command("tc", "class", "show", "dev", t.iface).CombinedOutput() if err != nil { return -1, err } scanner := bufio.NewScanner(bytes.NewBuffer(data)) classes := util.StringSet{} for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) // skip empty lines if len(line) == 0 { continue } parts := strings.Split(line, " ") // expected tc line: // class htb 1:1 root prio 0 rate 1000Kbit ceil 1000Kbit burst 1600b cburst 1600b if len(parts) != 14 { return -1, fmt.Errorf("unexpected output from tc: %s (%v)", scanner.Text(), parts) } classes.Insert(parts[2]) } // Make sure it doesn't go forever for nextClass := 1; nextClass < 10000; nextClass++ { if !classes.Has(fmt.Sprintf("1:%d", nextClass)) { return nextClass, nil } } // This should really never happen return -1, fmt.Errorf("exhausted class space, please try again") }
// nameMatches checks to see if the resourceName of the action is in a the specified whitelist. An empty whitelist indicates that any name is allowed. // An empty string in the whitelist should only match the action's resourceName if the resourceName itself is empty string. This behavior allows for the // combination of a whitelist for gets in the same rule as a list that won't have a resourceName. I don't recommend writing such a rule, but we do // handle it like you'd expect: white list is respected for gets while not preventing the list you explicitly asked for. func (a DefaultAuthorizationAttributes) nameMatches(allowedResourceNames util.StringSet) bool { if len(allowedResourceNames) == 0 { return true } return allowedResourceNames.Has(a.GetResourceName()) }
// Pass ports=nil for all ports. func formatEndpoints(endpoints *api.Endpoints, ports util.StringSet) string { if len(endpoints.Subsets) == 0 { return "<none>" } list := []string{} max := 3 more := false count := 0 for i := range endpoints.Subsets { ss := &endpoints.Subsets[i] for i := range ss.Ports { port := &ss.Ports[i] if ports == nil || ports.Has(port.Name) { for i := range ss.Addresses { if len(list) == max { more = true } addr := &ss.Addresses[i] if !more { list = append(list, fmt.Sprintf("%s:%d", addr.IP, port.Port)) } count++ } } } } ret := strings.Join(list, ",") if more { return fmt.Sprintf("%s + %d more...", ret, count-max) } return ret }
// Index returns a list of items that match on the index function // Index is thread-safe so long as you treat all items as immutable func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) { c.lock.RLock() defer c.lock.RUnlock() indexFunc := c.indexers[indexName] if indexFunc == nil { return nil, fmt.Errorf("Index with name %s does not exist", indexName) } indexKeys, err := indexFunc(obj) if err != nil { return nil, err } index := c.indices[indexName] // need to de-dupe the return list. Since multiple keys are allowed, this can happen. returnKeySet := util.StringSet{} for _, indexKey := range indexKeys { set := index[indexKey] for _, key := range set.List() { returnKeySet.Insert(key) } } list := make([]interface{}, 0, returnKeySet.Len()) for absoluteKey := range returnKeySet { list = append(list, c.items[absoluteKey]) } return list, nil }
func runMasterServiceTest(client *client.Client) { time.Sleep(12 * time.Second) svcList, err := client.Services(api.NamespaceDefault).List(labels.Everything()) if err != nil { glog.Fatalf("unexpected error listing services: %v", err) } var foundRW bool found := util.StringSet{} for i := range svcList.Items { found.Insert(svcList.Items[i].Name) if svcList.Items[i].Name == "kubernetes" { foundRW = true } } if foundRW { ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { glog.Fatalf("unexpected error listing endpoints for kubernetes service: %v", err) } if countEndpoints(ep) == 0 { glog.Fatalf("no endpoints for kubernetes service: %v", ep) } } else { glog.Errorf("no RW service found: %v", found) glog.Fatal("Kubernetes service test failed") } glog.Infof("Master service test passed.") }
// List returns the set of namespace names the user has access to view func (ac *AuthorizationCache) List(userInfo user.Info) (*kapi.NamespaceList, error) { keys := util.StringSet{} user := userInfo.GetName() groups := userInfo.GetGroups() obj, exists, _ := ac.userSubjectRecordStore.GetByKey(user) if exists { subjectRecord := obj.(*subjectRecord) keys.Insert(subjectRecord.namespaces.List()...) } for _, group := range groups { obj, exists, _ := ac.groupSubjectRecordStore.GetByKey(group) if exists { subjectRecord := obj.(*subjectRecord) keys.Insert(subjectRecord.namespaces.List()...) } } namespaceList := &kapi.NamespaceList{} for key := range keys { namespace, exists, err := ac.namespaceStore.GetByKey(key) if err != nil { return nil, err } if exists { namespaceList.Items = append(namespaceList.Items, *namespace.(*kapi.Namespace)) } } return namespaceList, nil }
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { names := util.StringSet{} for i, pod := range pods { var errlist []error if errs := validation.ValidatePod(pod); len(errs) != 0 { errlist = append(errlist, errs...) // If validation fails, don't trust it any further - // even Name could be bad. } else { name := kubecontainer.GetPodFullName(pod) if names.Has(name) { errlist = append(errlist, fielderrors.NewFieldDuplicate("name", pod.Name)) } else { names.Insert(name) } } if len(errlist) > 0 { name := bestPodIdentString(pod) err := utilerrors.NewAggregate(errlist) glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err) recorder.Eventf(pod, "FailedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err) continue } filtered = append(filtered, pod) } return }
func getPullSecretNames(serviceaccount *kapi.ServiceAccount) util.StringSet { names := util.StringSet{} for _, secret := range serviceaccount.ImagePullSecrets { names.Insert(secret.Name) } return names }
func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, util.StringSet, error) { g := osgraph.New() loaders := []GraphLoader{ &serviceLoader{namespace: namespace, lister: d.K}, &serviceAccountLoader{namespace: namespace, lister: d.K}, &secretLoader{namespace: namespace, lister: d.K}, &rcLoader{namespace: namespace, lister: d.K}, &podLoader{namespace: namespace, lister: d.K}, // TODO check swagger for feature enablement and selectively add bcLoader and buildLoader // then remove tolerateNotFoundErrors method. &bcLoader{namespace: namespace, lister: d.C}, &buildLoader{namespace: namespace, lister: d.C}, &isLoader{namespace: namespace, lister: d.C}, &dcLoader{namespace: namespace, lister: d.C}, } loadingFuncs := []func() error{} for _, loader := range loaders { loadingFuncs = append(loadingFuncs, loader.Load) } forbiddenResources := util.StringSet{} if errs := parallel.Run(loadingFuncs...); len(errs) > 0 { actualErrors := []error{} for _, err := range errs { if kapierrors.IsForbidden(err) { forbiddenErr := err.(*kapierrors.StatusError) if (forbiddenErr.Status().Details != nil) && (len(forbiddenErr.Status().Details.Kind) > 0) { forbiddenResources.Insert(forbiddenErr.Status().Details.Kind) } continue } actualErrors = append(actualErrors, err) } if len(actualErrors) > 0 { return g, forbiddenResources, utilerrors.NewAggregate(actualErrors) } } for _, loader := range loaders { loader.AddToGraph(g) } kubeedges.AddAllExposedPodTemplateSpecEdges(g) kubeedges.AddAllExposedPodEdges(g) kubeedges.AddAllManagedByRCPodEdges(g) kubeedges.AddAllRequestedServiceAccountEdges(g) kubeedges.AddAllMountableSecretEdges(g) kubeedges.AddAllMountedSecretEdges(g) buildedges.AddAllInputOutputEdges(g) buildedges.AddAllBuildEdges(g) deployedges.AddAllTriggerEdges(g) deployedges.AddAllDeploymentEdges(g) imageedges.AddAllImageStreamRefEdges(g) return g, forbiddenResources, nil }
func TestFilterQuotaPods(t *testing.T) { pods := []api.Pod{ { ObjectMeta: api.ObjectMeta{Name: "pod-running"}, Status: api.PodStatus{Phase: api.PodRunning}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-pending"}, Status: api.PodStatus{Phase: api.PodPending}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-succeeded"}, Status: api.PodStatus{Phase: api.PodSucceeded}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-unknown"}, Status: api.PodStatus{Phase: api.PodUnknown}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed"}, Status: api.PodStatus{Phase: api.PodFailed}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-always"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, }, Status: api.PodStatus{Phase: api.PodFailed}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-on-failure"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyOnFailure, }, Status: api.PodStatus{Phase: api.PodFailed}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-never"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyNever, }, Status: api.PodStatus{Phase: api.PodFailed}, }, } expectedResults := util.NewStringSet("pod-running", "pod-pending", "pod-unknown", "pod-failed-with-restart-always", "pod-failed-with-restart-on-failure") actualResults := util.StringSet{} result := FilterQuotaPods(pods) for i := range result { actualResults.Insert(result[i].Name) } if len(expectedResults) != len(actualResults) || !actualResults.HasAll(expectedResults.List()...) { t.Errorf("Expected results %v, Actual results %v", expectedResults, actualResults) } }
func findKnownValue(parts []string, valueOptions util.StringSet) int { for i := range parts { if valueOptions.Has(parts[i]) { return i } } return -1 }
// ContainedIDs returns a util.StringSet containing all IDs of the stored items. // This is a snapshot of a moment in time, and one should keep in mind that // other go routines can add or remove items after you call this. func (c *DelayFIFO) ContainedIDs() util.StringSet { c.rlock() defer c.runlock() set := util.StringSet{} for id := range c.items { set.Insert(id) } return set }
func (m *mockPruneRecorder) Verify(t *testing.T, expected util.StringSet) { if len(m.set) != len(expected) || !m.set.HasAll(expected.List()...) { expectedValues := expected.List() actualValues := m.set.List() sort.Strings(expectedValues) sort.Strings(actualValues) t.Errorf("expected \n\t%v\n, actual \n\t%v\n", expectedValues, actualValues) } }
// TODO move upstream func intersection(s1 util.StringSet, s2 util.StringSet) util.StringSet { result := util.NewStringSet() for key := range s1 { if s2.Has(key) { result.Insert(key) } } return result }
// GetServerCertHostnames returns the set of hostnames that any serving certificate for master needs to be valid for. func (args MasterArgs) GetServerCertHostnames() (util.StringSet, error) { masterAddr, err := args.GetMasterAddress() if err != nil { return nil, err } masterPublicAddr, err := args.GetMasterPublicAddress() if err != nil { return nil, err } assetPublicAddr, err := args.GetAssetPublicAddress() if err != nil { return nil, err } allHostnames := util.NewStringSet( "localhost", "127.0.0.1", "openshift.default.svc.cluster.local", "openshift.default.svc", "openshift.default", "openshift", "kubernetes.default.svc.cluster.local", "kubernetes.default.svc", "kubernetes.default", "kubernetes", masterAddr.Host, masterPublicAddr.Host, assetPublicAddr.Host) if _, ipnet, err := net.ParseCIDR(args.NetworkArgs.ServiceNetworkCIDR); err == nil { // CIDR is ignored if it is invalid, other code handles validation. if firstServiceIP, err := ipallocator.GetIndexedIP(ipnet, 1); err == nil { allHostnames.Insert(firstServiceIP.String()) } } listenIP := net.ParseIP(args.ListenArg.ListenAddr.Host) // add the IPs that might be used based on the ListenAddr. if listenIP != nil && listenIP.IsUnspecified() { allAddresses, _ := cmdutil.AllLocalIP4() for _, ip := range allAddresses { allHostnames.Insert(ip.String()) } } else { allHostnames.Insert(args.ListenArg.ListenAddr.Host) } certHostnames := util.StringSet{} for hostname := range allHostnames { if host, _, err := net.SplitHostPort(hostname); err == nil { // add the hostname without the port certHostnames.Insert(host) } else { // add the originally specified hostname certHostnames.Insert(hostname) } } return certHostnames, nil }
func (b *BlunderbussConfig) FindOwners(filename string) []string { owners := util.StringSet{} for prefix, ownersList := range b.PrefixMap { if strings.HasPrefix(filename, prefix) { owners.Insert(ownersList...) } } return owners.List() }
func ExampleInformer() { // source simulates an apiserver object endpoint. source := framework.NewFakeControllerSource() // Let's do threadsafe output to get predictable test results. deletionCounter := make(chan string, 1000) // Make a controller that immediately deletes anything added to it, and // logs anything deleted. _, controller := framework.NewInformer( source, &api.Pod{}, time.Millisecond*100, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { source.Delete(obj.(runtime.Object)) }, DeleteFunc: func(obj interface{}) { key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { key = "oops something went wrong with the key" } // Report this deletion. deletionCounter <- key }, }, ) // Run the controller and run it until we close stop. stop := make(chan struct{}) defer close(stop) go controller.Run(stop) // Let's add a few objects to the source. testIDs := []string{"a-hello", "b-controller", "c-framework"} for _, name := range testIDs { // Note that these pods are not valid-- the fake source doesn't // call validation or anything. source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}}) } // Let's wait for the controller to process the things we just added. outputSet := util.StringSet{} for i := 0; i < len(testIDs); i++ { outputSet.Insert(<-deletionCounter) } for _, key := range outputSet.List() { fmt.Println(key) } // Output: // a-hello // b-controller // c-framework }
func printPolicyBinding(policyBinding *authorizationapi.PolicyBinding, w io.Writer, withNamespace, wide bool, columnLabels []string) error { roleBindingNames := util.StringSet{} for key := range policyBinding.RoleBindings { roleBindingNames.Insert(key) } roleBindingsString := strings.Join(roleBindingNames.List(), ", ") _, err := fmt.Fprintf(w, "%s\t%s\t%v\n", policyBinding.Name, roleBindingsString, policyBinding.LastModified) return err }
func printDeploymentConfig(dc *deployapi.DeploymentConfig, w io.Writer, withNamespace, wide bool, columnLabels []string) error { triggers := util.StringSet{} for _, trigger := range dc.Triggers { triggers.Insert(string(trigger.Type)) } tStr := strings.Join(triggers.List(), ", ") _, err := fmt.Fprintf(w, "%s\t%s\t%v\n", dc.Name, tStr, dc.LatestVersion) return err }
// getMatchingClusters examines the kubeconfig for all clusters that point to the same server func getMatchingClusters(clientConfig kclient.Config, kubeconfig clientcmdapi.Config) util.StringSet { ret := util.StringSet{} for key, cluster := range kubeconfig.Clusters { if (cluster.Server == clientConfig.Host) && (cluster.InsecureSkipTLSVerify == clientConfig.Insecure) && (cluster.CertificateAuthority == clientConfig.CAFile) && (bytes.Compare(cluster.CertificateAuthorityData, clientConfig.CAData) == 0) { ret.Insert(key) } } return ret }
// ContainedIDs returns a util.StringSet containing all IDs of the enqueued items. // This is a snapshot of a moment in time, and one should keep in mind that // other go routines can add or remove items after you call this. func (eq *EventQueue) ContainedIDs() util.StringSet { eq.lock.RLock() defer eq.lock.RUnlock() s := util.StringSet{} for _, key := range eq.queue { s.Insert(key) } return s }
// purgeDeletedNamespaces will remove all namespaces enumerated in a reviewRecordStore that are not in the namespace set func purgeDeletedNamespaces(namespaceSet *util.StringSet, userSubjectRecordStore cache.Store, groupSubjectRecordStore cache.Store, reviewRecordStore cache.Store) { reviewRecordItems := reviewRecordStore.List() for i := range reviewRecordItems { reviewRecord := reviewRecordItems[i].(*reviewRecord) if !namespaceSet.Has(reviewRecord.namespace) { deleteNamespaceFromSubjects(userSubjectRecordStore, reviewRecord.users, reviewRecord.namespace) deleteNamespaceFromSubjects(groupSubjectRecordStore, reviewRecord.groups, reviewRecord.namespace) reviewRecordStore.Delete(reviewRecord) } } }
// ContainedIDs returns a util.StringSet containing all IDs of the stored items. // This is a snapshot of a moment in time, and one should keep in mind that // other go routines can add or remove items after you call this. func (c *HistoricalFIFO) ContainedIDs() util.StringSet { c.lock.RLock() defer c.lock.RUnlock() set := util.StringSet{} for id, entry := range c.items { if entry.Is(DELETE_EVENT | POP_EVENT) { continue } set.Insert(id) } return set }
func doesApplyToUser(ruleUsers, ruleGroups util.StringSet, user user.Info) bool { if ruleUsers.Has(user.GetName()) { return true } for _, currGroup := range user.GetGroups() { if ruleGroups.Has(currGroup) { return true } } return false }
func (r *TestRouter) FilterNamespaces(namespaces util.StringSet) { if len(namespaces) == 0 { r.State = make(map[string]ServiceUnit) } for k := range r.State { // TODO: the id of a service unit should be defined inside this class, not passed in from the outside // remove the leak of the abstraction when we refactor this code ns := strings.SplitN(k, "/", 2)[0] if namespaces.Has(ns) { continue } delete(r.State, k) } }
// Calls "udevadm trigger --action=change" for newly created "/dev/sd*" drives (exist only in after set). // This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed. func udevadmChangeToNewDrives(sdBeforeSet util.StringSet) error { sdAfter, err := filepath.Glob(diskSDPattern) if err != nil { return fmt.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err) } for _, sd := range sdAfter { if !sdBeforeSet.Has(sd) { return udevadmChangeToDrive(sd) } } return nil }
func getFitPredicateFunctions(names util.StringSet, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() predicates := map[string]algorithm.FitPredicate{} for _, name := range names.List() { factory, ok := fitPredicateMap[name] if !ok { return nil, fmt.Errorf("Invalid predicate name %q specified - no corresponding function found", name) } predicates[name] = factory(args) } return predicates, nil }
// GetServerCertHostnames returns the set of hostnames that any serving certificate for master needs to be valid for. func (args MasterArgs) GetServerCertHostnames() (util.StringSet, error) { masterAddr, err := args.GetMasterAddress() if err != nil { return nil, err } masterPublicAddr, err := args.GetMasterPublicAddress() if err != nil { return nil, err } assetPublicAddr, err := args.GetAssetPublicAddress() if err != nil { return nil, err } allHostnames := util.NewStringSet( "localhost", "127.0.0.1", "openshift.default.svc.cluster.local", "openshift.default.svc", "openshift.default", "openshift", "kubernetes.default.svc.cluster.local", "kubernetes.default.svc", "kubernetes.default", "kubernetes", masterAddr.Host, masterPublicAddr.Host, assetPublicAddr.Host) listenIP := net.ParseIP(args.ListenArg.ListenAddr.Host) // add the IPs that might be used based on the ListenAddr. if listenIP != nil && listenIP.IsUnspecified() { allAddresses, _ := cmdutil.AllLocalIP4() for _, ip := range allAddresses { allHostnames.Insert(ip.String()) } } else { allHostnames.Insert(args.ListenArg.ListenAddr.Host) } certHostnames := util.StringSet{} for hostname := range allHostnames { if host, _, err := net.SplitHostPort(hostname); err == nil { // add the hostname without the port certHostnames.Insert(host) } else { // add the originally specified hostname certHostnames.Insert(hostname) } } return certHostnames, nil }