func validateVolumes(volumes []Volume) (util.StringSet, errs.ErrorList) { allErrs := errs.ErrorList{} allNames := util.StringSet{} for i := range volumes { vol := &volumes[i] // so we can set default values el := errs.ErrorList{} // TODO(thockin) enforce that a source is set once we deprecate the implied form. if vol.Source != nil { el = validateSource(vol.Source).Prefix("source") } if len(vol.Name) == 0 { el = append(el, errs.NewRequired("name", vol.Name)) } else if !util.IsDNSLabel(vol.Name) { el = append(el, errs.NewInvalid("name", vol.Name)) } else if allNames.Has(vol.Name) { el = append(el, errs.NewDuplicate("name", vol.Name)) } if len(el) == 0 { allNames.Insert(vol.Name) } else { allErrs = append(allErrs, el.PrefixIndex(i)...) } } return allNames, allErrs }
func getPullSecretNames(serviceaccount *kapi.ServiceAccount) util.StringSet { names := util.StringSet{} for _, secret := range serviceaccount.ImagePullSecrets { names.Insert(secret.Name) } return names }
func getSecretNames(secrets []*kapi.Secret) util.StringSet { names := util.StringSet{} for _, secret := range secrets { names.Insert(secret.Name) } return names }
// Index returns a list of items that match on the index function // Index is thread-safe so long as you treat all items as immutable func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) { c.lock.RLock() defer c.lock.RUnlock() indexFunc := c.indexers[indexName] if indexFunc == nil { return nil, fmt.Errorf("Index with name %s does not exist", indexName) } indexKeys, err := indexFunc(obj) if err != nil { return nil, err } index := c.indices[indexName] // need to de-dupe the return list. Since multiple keys are allowed, this can happen. returnKeySet := util.StringSet{} for _, indexKey := range indexKeys { set := index[indexKey] for _, key := range set.List() { returnKeySet.Insert(key) } } list := []interface{}{} for absoluteKey := range returnKeySet { list = append(list, c.items[absoluteKey]) } return list, nil }
func computeStatus(statusList []*github.CombinedStatus, requiredContexts []string) string { states := util.StringSet{} providers := util.StringSet{} for ix := range statusList { status := statusList[ix] glog.V(8).Infof("Checking commit: %s", *status.SHA) glog.V(8).Infof("Checking commit: %v", status) states.Insert(*status.State) for _, subStatus := range status.Statuses { glog.V(8).Infof("Found status from: %v", subStatus) providers.Insert(*subStatus.Context) } } for _, provider := range requiredContexts { if !providers.Has(provider) { glog.V(8).Infof("Failed to find %s in %v", provider, providers) return "incomplete" } } switch { case states.Has("pending"): return "pending" case states.Has("error"): return "error" case states.Has("failure"): return "failure" default: return "success" } }
// Test public interface func doTestIndex(t *testing.T, indexer Indexer) { mkObj := func(id string, val string) testStoreObject { return testStoreObject{id: id, val: val} } // Test Index expected := map[string]util.StringSet{} expected["b"] = util.NewStringSet("a", "c") expected["f"] = util.NewStringSet("e") expected["h"] = util.NewStringSet("g") indexer.Add(mkObj("a", "b")) indexer.Add(mkObj("c", "b")) indexer.Add(mkObj("e", "f")) indexer.Add(mkObj("g", "h")) { for k, v := range expected { found := util.StringSet{} indexResults, err := indexer.Index("by_val", mkObj("", k)) if err != nil { t.Errorf("Unexpected error %v", err) } for _, item := range indexResults { found.Insert(item.(testStoreObject).id) } items := v.List() if !found.HasAll(items...) { t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List()) } } } }
func (o AddSecretOptions) AddSecretsToSAMountableSecrets(serviceAccount *api.ServiceAccount) (*api.ServiceAccount, error) { secrets, err := o.getSecrets() if err != nil { return nil, err } if len(secrets) == 0 { return nil, errors.New("no secrets found") } currentSecrets := util.StringSet{} for _, secretRef := range serviceAccount.Secrets { currentSecrets.Insert(secretRef.Name) } for _, secret := range secrets { if currentSecrets.Has(secret.Name) { continue } serviceAccount.Secrets = append(serviceAccount.Secrets, api.ObjectReference{Name: secret.Name}) currentSecrets.Insert(secret.Name) } return o.ClientInterface.ServiceAccounts(o.Namespace).Update(serviceAccount) }
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { names := util.StringSet{} for i, pod := range pods { var errlist []error if errs := validation.ValidatePod(pod); len(errs) != 0 { errlist = append(errlist, errs...) // If validation fails, don't trust it any further - // even Name could be bad. } else { name := kubecontainer.GetPodFullName(pod) if names.Has(name) { errlist = append(errlist, fielderrors.NewFieldDuplicate("name", pod.Name)) } else { names.Insert(name) } } if len(errlist) > 0 { name := bestPodIdentString(pod) err := utilerrors.NewAggregate(errlist) glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err) recorder.Eventf(pod, "failedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err) continue } filtered = append(filtered, pod) } return }
func validatePorts(ports []api.Port) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allNames := util.StringSet{} for i, port := range ports { pErrs := errs.ValidationErrorList{} if len(port.Name) > 0 { if len(port.Name) > util.DNS1123LabelMaxLength || !util.IsDNSLabel(port.Name) { pErrs = append(pErrs, errs.NewFieldInvalid("name", port.Name, dnsLabelErrorMsg)) } else if allNames.Has(port.Name) { pErrs = append(pErrs, errs.NewFieldDuplicate("name", port.Name)) } else { allNames.Insert(port.Name) } } if port.ContainerPort == 0 { pErrs = append(pErrs, errs.NewFieldInvalid("containerPort", port.ContainerPort, portRangeErrorMsg)) } else if !util.IsValidPortNum(port.ContainerPort) { pErrs = append(pErrs, errs.NewFieldInvalid("containerPort", port.ContainerPort, portRangeErrorMsg)) } if port.HostPort != 0 && !util.IsValidPortNum(port.HostPort) { pErrs = append(pErrs, errs.NewFieldInvalid("hostPort", port.HostPort, portRangeErrorMsg)) } if len(port.Protocol) == 0 { pErrs = append(pErrs, errs.NewFieldRequired("protocol", port.Protocol)) } else if !supportedPortProtocols.Has(strings.ToUpper(string(port.Protocol))) { pErrs = append(pErrs, errs.NewFieldNotSupported("protocol", port.Protocol)) } allErrs = append(allErrs, pErrs.PrefixIndex(i)...) } return allErrs }
func validatePorts(ports []api.Port) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allNames := util.StringSet{} for i := range ports { pErrs := errs.ValidationErrorList{} port := &ports[i] // so we can set default values if len(port.Name) > 0 { if len(port.Name) > 63 || !util.IsDNSLabel(port.Name) { pErrs = append(pErrs, errs.NewFieldInvalid("name", port.Name, "")) } else if allNames.Has(port.Name) { pErrs = append(pErrs, errs.NewFieldDuplicate("name", port.Name)) } else { allNames.Insert(port.Name) } } if port.ContainerPort == 0 { pErrs = append(pErrs, errs.NewFieldRequired("containerPort", port.ContainerPort)) } else if !util.IsValidPortNum(port.ContainerPort) { pErrs = append(pErrs, errs.NewFieldInvalid("containerPort", port.ContainerPort, "")) } if port.HostPort != 0 && !util.IsValidPortNum(port.HostPort) { pErrs = append(pErrs, errs.NewFieldInvalid("hostPort", port.HostPort, "")) } if len(port.Protocol) == 0 { port.Protocol = "TCP" } else if !supportedPortProtocols.Has(strings.ToUpper(string(port.Protocol))) { pErrs = append(pErrs, errs.NewFieldNotSupported("protocol", port.Protocol)) } allErrs = append(allErrs, pErrs.PrefixIndex(i)...) } return allErrs }
func validateContainers(containers []Container, volumes util.StringSet) errorList { allErrs := errorList{} allNames := util.StringSet{} for i := range containers { ctr := &containers[i] // so we can set default values if !util.IsDNSLabel(ctr.Name) { allErrs.Append(makeInvalidError("Container.Name", ctr.Name)) } else if allNames.Has(ctr.Name) { allErrs.Append(makeDuplicateError("Container.Name", ctr.Name)) } else { allNames.Insert(ctr.Name) } if len(ctr.Image) == 0 { allErrs.Append(makeInvalidError("Container.Image", ctr.Name)) } allErrs.Append(validatePorts(ctr.Ports)...) allErrs.Append(validateEnv(ctr.Env)...) allErrs.Append(validateVolumeMounts(ctr.VolumeMounts, volumes)...) } // Check for colliding ports across all containers. // TODO(thockin): This really is dependent on the network config of the host (IP per pod?) // and the config of the new manifest. But we have not specced that out yet, so we'll just // make some assumptions for now. As of now, pods share a network namespace, which means that // every Port.HostPort across the whole pod must be unique. allErrs.Append(checkHostPortConflicts(containers)...) return allErrs }
func validateVolumes(volumes []api.Volume) (util.StringSet, errs.ValidationErrorList) { allErrs := errs.ValidationErrorList{} allNames := util.StringSet{} for i := range volumes { vol := &volumes[i] // so we can set default values el := errs.ValidationErrorList{} if vol.Source == nil { // TODO: Enforce that a source is set once we deprecate the implied form. vol.Source = &api.VolumeSource{ EmptyDir: &api.EmptyDir{}, } } el = validateSource(vol.Source).Prefix("source") if len(vol.Name) == 0 { el = append(el, errs.NewFieldRequired("name", vol.Name)) } else if !util.IsDNSLabel(vol.Name) { el = append(el, errs.NewFieldInvalid("name", vol.Name, "")) } else if allNames.Has(vol.Name) { el = append(el, errs.NewFieldDuplicate("name", vol.Name)) } if len(el) == 0 { allNames.Insert(vol.Name) } else { allErrs = append(allErrs, el.PrefixIndex(i)...) } } return allNames, allErrs }
func validatePorts(ports []Port) errorList { allErrs := errorList{} allNames := util.StringSet{} for i := range ports { port := &ports[i] // so we can set default values if len(port.Name) > 0 { if len(port.Name) > 63 || !util.IsDNSLabel(port.Name) { allErrs.Append(makeInvalidError("Port.Name", port.Name)) } else if allNames.Has(port.Name) { allErrs.Append(makeDuplicateError("Port.name", port.Name)) } else { allNames.Insert(port.Name) } } if !util.IsValidPortNum(port.ContainerPort) { allErrs.Append(makeInvalidError("Port.ContainerPort", port.ContainerPort)) } if port.HostPort == 0 { port.HostPort = port.ContainerPort } else if !util.IsValidPortNum(port.HostPort) { allErrs.Append(makeInvalidError("Port.HostPort", port.HostPort)) } if len(port.Protocol) == 0 { port.Protocol = "TCP" } else if !supportedPortProtocols.Has(strings.ToUpper(port.Protocol)) { allErrs.Append(makeNotSupportedError("Port.Protocol", port.Protocol)) } } return allErrs }
// Find all sibling pods in the service and post to their /write handler. func contactOthers(state *State) { defer state.doneContactingPeers() masterRO := url.URL{ Scheme: "http", Host: os.Getenv("KUBERNETES_RO_SERVICE_HOST") + ":" + os.Getenv("KUBERNETES_RO_SERVICE_PORT"), Path: "/api/v1beta1", } client := &client.Client{client.NewRESTClient(&masterRO, "v1beta1", latest.Codec, true, 5, 10)} // Do this repeatedly, in case there's some propagation delay with getting // newly started pods into the endpoints list. for i := 0; i < 15; i++ { endpoints, err := client.Endpoints(*namespace).Get(*service) if err != nil { state.Logf("Unable to read the endpoints for %v/%v: %v; will try again.", *namespace, *service, err) time.Sleep(time.Duration(1+rand.Intn(10)) * time.Second) } eps := util.StringSet{} for _, ss := range endpoints.Subsets { for _, a := range ss.Addresses { for _, p := range ss.Ports { eps.Insert(fmt.Sprintf("http://%s:%d", a.IP, p.Port)) } } } for ep := range eps { state.Logf("Attempting to contact %s", ep) contactSingle(ep, state) } time.Sleep(5 * time.Second) } }
// OnUpdate manages the active set of service proxies. // Active service proxies are reinitialized if found in the update set or // shutdown if missing from the update set. func (proxier *Proxier) OnUpdate(services []api.Service) { glog.Infof("Received update notice: %+v", services) activeServices := util.StringSet{} for _, service := range services { activeServices.Insert(service.ID) info, exists := proxier.getServiceInfo(service.ID) if exists && info.port == service.Port { continue } if exists { proxier.StopProxy(service.ID) } glog.Infof("Adding a new service %s on port %d", service.ID, service.Port) listener, err := proxier.addService(service.ID, service.Port) if err != nil { glog.Infof("Failed to start listening for %s on %d", service.ID, service.Port) continue } proxier.setServiceInfo(service.ID, &serviceInfo{ port: service.Port, active: true, listener: listener, }) } proxier.mu.Lock() defer proxier.mu.Unlock() for name, info := range proxier.serviceMap { if !activeServices.Has(name) { proxier.stopProxyInternal(info) } } }
func (a *openshiftAuthorizer) getAllowedSubjectsFromNamespaceBindings(ctx kapi.Context, passedAttributes AuthorizationAttributes) (util.StringSet, util.StringSet, error) { attributes := coerceToDefaultAuthorizationAttributes(passedAttributes) roleBindings, err := a.ruleResolver.GetRoleBindings(ctx) if err != nil { return nil, nil, err } users := util.StringSet{} groups := util.StringSet{} for _, roleBinding := range roleBindings { role, err := a.ruleResolver.GetRole(roleBinding) if err != nil { return nil, nil, err } for _, rule := range role.Rules() { matches, err := attributes.RuleMatches(rule) if err != nil { return nil, nil, err } if matches { users.Insert(roleBinding.Users().List()...) groups.Insert(roleBinding.Groups().List()...) } } } return users, groups, nil }
// Test public interface func doTestStore(t *testing.T, store Store) { store.Add("foo", "bar") if item, ok := store.Get("foo"); !ok { t.Errorf("didn't find inserted item") } else { if e, a := "bar", item.(string); e != a { t.Errorf("expected %v, got %v", e, a) } } store.Update("foo", "baz") if item, ok := store.Get("foo"); !ok { t.Errorf("didn't find inserted item") } else { if e, a := "baz", item.(string); e != a { t.Errorf("expected %v, got %v", e, a) } } store.Delete("foo", "qux") if _, ok := store.Get("foo"); ok { t.Errorf("found deleted item??") } store.Add("a", "b") store.Add("c", "d") store.Add("e", "e") found := util.StringSet{} for _, item := range store.List() { found.Insert(item.(string)) } if !found.HasAll("b", "d", "e") { t.Errorf("missing items") } if len(found) != 3 { t.Errorf("extra items") } }
// List returns the set of namespace names the user has access to view func (ac *AuthorizationCache) List(userInfo user.Info) (*kapi.NamespaceList, error) { keys := util.StringSet{} user := userInfo.GetName() groups := userInfo.GetGroups() obj, exists, _ := ac.userSubjectRecordStore.GetByKey(user) if exists { subjectRecord := obj.(*subjectRecord) keys.Insert(subjectRecord.namespaces.List()...) } for _, group := range groups { obj, exists, _ := ac.groupSubjectRecordStore.GetByKey(group) if exists { subjectRecord := obj.(*subjectRecord) keys.Insert(subjectRecord.namespaces.List()...) } } namespaceList := &kapi.NamespaceList{} for key := range keys { namespace, exists, err := ac.namespaceStore.GetByKey(key) if err != nil { return nil, err } if exists { namespaceList.Items = append(namespaceList.Items, *namespace.(*kapi.Namespace)) } } return namespaceList, nil }
func runMasterServiceTest(client *client.Client) { time.Sleep(12 * time.Second) svcList, err := client.Services(api.NamespaceDefault).List(labels.Everything()) if err != nil { glog.Fatalf("unexpected error listing services: %v", err) } var foundRW bool found := util.StringSet{} for i := range svcList.Items { found.Insert(svcList.Items[i].Name) if svcList.Items[i].Name == "kubernetes" { foundRW = true } } if foundRW { ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { glog.Fatalf("unexpected error listing endpoints for kubernetes service: %v", err) } if countEndpoints(ep) == 0 { glog.Fatalf("no endpoints for kubernetes service: %v", ep) } } else { glog.Errorf("no RW service found: %v", found) glog.Fatal("Kubernetes service test failed") } glog.Infof("Master service test passed.") }
// OnUpdate manages the active set of service proxies. // Active service proxies are reinitialized if found in the update set or // shutdown if missing from the update set. func (proxier *Proxier) OnUpdate(services []api.Service) { glog.V(4).Infof("Received update notice: %+v", services) activeServices := util.StringSet{} for _, service := range services { activeServices.Insert(service.Name) info, exists := proxier.getServiceInfo(service.Name) serviceIP := net.ParseIP(service.Spec.PortalIP) // TODO: check health of the socket? What if ProxyLoop exited? if exists && info.portalPort == service.Spec.Port && info.portalIP.Equal(serviceIP) { continue } if exists && (info.portalPort != service.Spec.Port || !info.portalIP.Equal(serviceIP) || !ipsEqual(service.Spec.PublicIPs, info.publicIP)) { glog.V(4).Infof("Something changed for service %q: stopping it", service.Name) err := proxier.closePortal(service.Name, info) if err != nil { glog.Errorf("Failed to close portal for %q: %v", service.Name, err) } err = proxier.stopProxy(service.Name, info) if err != nil { glog.Errorf("Failed to stop service %q: %v", service.Name, err) } } glog.V(1).Infof("Adding new service %q at %s:%d/%s (local :%d)", service.Name, serviceIP, service.Spec.Port, service.Spec.Protocol, service.Spec.ProxyPort) info, err := proxier.addServiceOnPort(service.Name, service.Spec.Protocol, service.Spec.ProxyPort, udpIdleTimeout) if err != nil { glog.Errorf("Failed to start proxy for %q: %v", service.Name, err) continue } info.portalIP = serviceIP info.portalPort = service.Spec.Port info.publicIP = service.Spec.PublicIPs info.sessionAffinityType = service.Spec.SessionAffinity // TODO: paramaterize this in the types api file as an attribute of sticky session. For now it's hardcoded to 3 hours. info.stickyMaxAgeMinutes = 180 glog.V(4).Infof("info: %+v", info) err = proxier.openPortal(service.Name, info) if err != nil { glog.Errorf("Failed to open portal for %q: %v", service.Name, err) } proxier.loadBalancer.NewService(service.Name, info.sessionAffinityType, info.stickyMaxAgeMinutes) } proxier.mu.Lock() defer proxier.mu.Unlock() for name, info := range proxier.serviceMap { if !activeServices.Has(name) { glog.V(1).Infof("Stopping service %q", name) err := proxier.closePortal(name, info) if err != nil { glog.Errorf("Failed to close portal for %q: %v", name, err) } err = proxier.stopProxyInternal(name, info) if err != nil { glog.Errorf("Failed to stop service %q: %v", name, err) } } } }
func TestFilterQuotaPods(t *testing.T) { pods := []api.Pod{ { ObjectMeta: api.ObjectMeta{Name: "pod-running"}, Status: api.PodStatus{Phase: api.PodRunning}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-pending"}, Status: api.PodStatus{Phase: api.PodPending}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-succeeded"}, Status: api.PodStatus{Phase: api.PodSucceeded}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-unknown"}, Status: api.PodStatus{Phase: api.PodUnknown}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed"}, Status: api.PodStatus{Phase: api.PodFailed}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-always"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, }, Status: api.PodStatus{Phase: api.PodFailed}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-on-failure"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyOnFailure, }, Status: api.PodStatus{Phase: api.PodFailed}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-never"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyNever, }, Status: api.PodStatus{Phase: api.PodFailed}, }, } expectedResults := util.NewStringSet("pod-running", "pod-pending", "pod-unknown", "pod-failed-with-restart-always", "pod-failed-with-restart-on-failure") actualResults := util.StringSet{} result := FilterQuotaPods(pods) for i := range result { actualResults.Insert(result[i].Name) } if len(expectedResults) != len(actualResults) || !actualResults.HasAll(expectedResults.List()...) { t.Errorf("Expected results %v, Actual results %v", expectedResults, actualResults) } }
// Contains returns a util.StringSet containing all IDs of stored the items. // This is a snapshot of a moment in time, and one should keep in mind that // other go routines can add or remove items after you call this. func (c *FIFO) Contains() util.StringSet { c.lock.RLock() defer c.lock.RUnlock() set := util.StringSet{} for id := range c.items { set.Insert(id) } return set }
// ContainedIDs returns a util.StringSet containing all IDs of the stored items. // This is a snapshot of a moment in time, and one should keep in mind that // other go routines can add or remove items after you call this. func (c *DelayFIFO) ContainedIDs() util.StringSet { c.rlock() defer c.runlock() set := util.StringSet{} for id := range c.items { set.Insert(id) } return set }
func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, util.StringSet, error) { g := osgraph.New() loaders := []GraphLoader{ &serviceLoader{namespace: namespace, lister: d.K}, &serviceAccountLoader{namespace: namespace, lister: d.K}, &secretLoader{namespace: namespace, lister: d.K}, &rcLoader{namespace: namespace, lister: d.K}, &podLoader{namespace: namespace, lister: d.K}, &bcLoader{namespace: namespace, lister: d.C}, &buildLoader{namespace: namespace, lister: d.C}, &isLoader{namespace: namespace, lister: d.C}, &dcLoader{namespace: namespace, lister: d.C}, } loadingFuncs := []func() error{} for _, loader := range loaders { loadingFuncs = append(loadingFuncs, loader.Load) } forbiddenResources := util.StringSet{} if errs := parallel.Run(loadingFuncs...); len(errs) > 0 { actualErrors := []error{} for _, err := range errs { if kapierrors.IsForbidden(err) { forbiddenErr := err.(*kapierrors.StatusError) if (forbiddenErr.Status().Details != nil) && (len(forbiddenErr.Status().Details.Kind) > 0) { forbiddenResources.Insert(forbiddenErr.Status().Details.Kind) } continue } actualErrors = append(actualErrors, err) } if len(actualErrors) > 0 { return g, forbiddenResources, utilerrors.NewAggregate(actualErrors) } } for _, loader := range loaders { loader.AddToGraph(g) } kubeedges.AddAllExposedPodTemplateSpecEdges(g) kubeedges.AddAllExposedPodEdges(g) kubeedges.AddAllManagedByRCPodEdges(g) kubeedges.AddAllRequestedServiceAccountEdges(g) kubeedges.AddAllMountableSecretEdges(g) kubeedges.AddAllMountedSecretEdges(g) buildedges.AddAllInputOutputEdges(g) buildedges.AddAllBuildEdges(g) deployedges.AddAllTriggerEdges(g) deployedges.AddAllDeploymentEdges(g) imageedges.AddAllImageStreamRefEdges(g) return g, forbiddenResources, nil }
func ExampleInformer() { // source simulates an apiserver object endpoint. source := framework.NewFakeControllerSource() // Let's do threadsafe output to get predictable test results. deletionCounter := make(chan string, 1000) // Make a controller that immediately deletes anything added to it, and // logs anything deleted. _, controller := framework.NewInformer( source, &api.Pod{}, time.Millisecond*100, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { source.Delete(obj.(runtime.Object)) }, DeleteFunc: func(obj interface{}) { key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { key = "oops something went wrong with the key" } // Report this deletion. deletionCounter <- key }, }, ) // Run the controller and run it until we close stop. stop := make(chan struct{}) defer close(stop) go controller.Run(stop) // Let's add a few objects to the source. testIDs := []string{"a-hello", "b-controller", "c-framework"} for _, name := range testIDs { // Note that these pods are not valid-- the fake source doesn't // call validation or anything. source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}}) } // Let's wait for the controller to process the things we just added. outputSet := util.StringSet{} for i := 0; i < len(testIDs); i++ { outputSet.Insert(<-deletionCounter) } for _, key := range outputSet.List() { fmt.Println(key) } // Output: // a-hello // b-controller // c-framework }
func printPolicy(policy *authorizationapi.Policy, w io.Writer, withNamespace bool) error { roleNames := util.StringSet{} for key := range policy.Roles { roleNames.Insert(key) } rolesString := strings.Join(roleNames.List(), ", ") _, err := fmt.Fprintf(w, "%s\t%s\t%v\n", policy.Name, rolesString, policy.LastModified) return err }
func printPolicyBinding(policyBinding *authorizationapi.PolicyBinding, w io.Writer, withNamespace, wide bool, columnLabels []string) error { roleBindingNames := util.StringSet{} for key := range policyBinding.RoleBindings { roleBindingNames.Insert(key) } roleBindingsString := strings.Join(roleBindingNames.List(), ", ") _, err := fmt.Fprintf(w, "%s\t%s\t%v\n", policyBinding.Name, roleBindingsString, policyBinding.LastModified) return err }
func printDeploymentConfig(dc *deployapi.DeploymentConfig, w io.Writer, withNamespace, wide bool, columnLabels []string) error { triggers := util.StringSet{} for _, trigger := range dc.Triggers { triggers.Insert(string(trigger.Type)) } tStr := strings.Join(triggers.List(), ", ") _, err := fmt.Fprintf(w, "%s\t%s\t%v\n", dc.Name, tStr, dc.LatestVersion) return err }
// ContainedIDs returns a util.StringSet containing all IDs of the enqueued items. // This is a snapshot of a moment in time, and one should keep in mind that // other go routines can add or remove items after you call this. func (eq *EventQueue) ContainedIDs() util.StringSet { eq.lock.RLock() defer eq.lock.RUnlock() s := util.StringSet{} for _, key := range eq.queue { s.Insert(key) } return s }
// getMatchingClusters examines the kubeconfig for all clusters that point to the same server func getMatchingClusters(clientConfig kclient.Config, kubeconfig clientcmdapi.Config) util.StringSet { ret := util.StringSet{} for key, cluster := range kubeconfig.Clusters { if (cluster.Server == clientConfig.Host) && (cluster.InsecureSkipTLSVerify == clientConfig.Insecure) && (cluster.CertificateAuthority == clientConfig.CAFile) && (bytes.Compare(cluster.CertificateAuthorityData, clientConfig.CAData) == 0) { ret.Insert(key) } } return ret }