// Index returns a list of items that match on the index function // Index is thread-safe so long as you treat all items as immutable func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) { c.lock.RLock() defer c.lock.RUnlock() indexFunc := c.indexers[indexName] if indexFunc == nil { return nil, fmt.Errorf("Index with name %s does not exist", indexName) } indexKeys, err := indexFunc(obj) if err != nil { return nil, err } index := c.indices[indexName] // need to de-dupe the return list. Since multiple keys are allowed, this can happen. returnKeySet := sets.String{} for _, indexKey := range indexKeys { set := index[indexKey] for _, key := range set.UnsortedList() { returnKeySet.Insert(key) } } list := make([]interface{}, 0, returnKeySet.Len()) for absoluteKey := range returnKeySet { list = append(list, c.items[absoluteKey]) } return list, nil }
// SeenAllSources returns true if seenSources contains all sources in the // config, and also this config has received a SET message from each source. func (c *PodConfig) SeenAllSources(seenSources sets.String) bool { if c.pods == nil { return false } glog.V(6).Infof("Looking for %v, have seen %v", c.sources.List(), seenSources) return seenSources.HasAll(c.sources.List()...) && c.pods.seenSources(c.sources.List()...) }
func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]algorithm.PriorityConfig, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() configs := []algorithm.PriorityConfig{} for _, name := range names.List() { factory, ok := priorityFunctionMap[name] if !ok { return nil, fmt.Errorf("Invalid priority name %s specified - no corresponding function found", name) } if factory.Function != nil { configs = append(configs, algorithm.PriorityConfig{ Function: factory.Function(args), Weight: factory.Weight, }) } else { mapFunction, reduceFunction := factory.MapReduceFunction(args) configs = append(configs, algorithm.PriorityConfig{ Map: mapFunction, Reduce: reduceFunction, Weight: factory.Weight, }) } } return configs, nil }
// Print formats and prints the give PodDiff. func (p PodDiff) String(ignorePhases sets.String) string { ret := "" for name, info := range p { if ignorePhases.Has(info.phase) { continue } if info.phase == nonExist { ret += fmt.Sprintf("Pod %v was deleted, had phase %v and host %v\n", name, info.oldPhase, info.oldHostname) continue } phaseChange, hostChange := false, false msg := fmt.Sprintf("Pod %v ", name) if info.oldPhase != info.phase { phaseChange = true if info.oldPhase == nonExist { msg += fmt.Sprintf("in phase %v ", info.phase) } else { msg += fmt.Sprintf("went from phase: %v -> %v ", info.oldPhase, info.phase) } } if info.oldHostname != info.hostname { hostChange = true if info.oldHostname == nonExist || info.oldHostname == "" { msg += fmt.Sprintf("assigned host %v ", info.hostname) } else { msg += fmt.Sprintf("went from host: %v -> %v ", info.oldHostname, info.hostname) } } if phaseChange || hostChange { ret += msg + "\n" } } return ret }
func (r *Requirement) Values() sets.String { ret := sets.String{} for i := range r.strValues { ret.Insert(r.strValues[i]) } return ret }
func (t *tcShaper) nextClassID() (int, error) { data, err := t.e.Command("tc", "class", "show", "dev", t.iface).CombinedOutput() if err != nil { return -1, err } scanner := bufio.NewScanner(bytes.NewBuffer(data)) classes := sets.String{} for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) // skip empty lines if len(line) == 0 { continue } parts := strings.Split(line, " ") // expected tc line: // class htb 1:1 root prio 0 rate 1000Kbit ceil 1000Kbit burst 1600b cburst 1600b if len(parts) != 14 { return -1, fmt.Errorf("unexpected output from tc: %s (%v)", scanner.Text(), parts) } classes.Insert(parts[2]) } // Make sure it doesn't go forever for nextClass := 1; nextClass < 10000; nextClass++ { if !classes.Has(fmt.Sprintf("1:%d", nextClass)) { return nextClass, nil } } // This should really never happen return -1, fmt.Errorf("exhausted class space, please try again") }
// clusterSyncLoop observes running clusters changes, and apply all services to new added cluster // and add dns records for the changes func (s *ServiceController) clusterSyncLoop() { var servicesToUpdate []*cachedService // should we remove cache for cluster from ready to not ready? should remove the condition predicate if no clusters, err := s.clusterStore.ClusterCondition(getClusterConditionPredicate()).List() if err != nil { glog.Infof("Fail to get cluster list") return } newClusters := clustersFromList(&clusters) var newSet, increase sets.String newSet = sets.NewString(newClusters...) if newSet.Equal(s.knownClusterSet) { // The set of cluster names in the services in the federation hasn't changed, but we can retry // updating any services that we failed to update last time around. servicesToUpdate = s.updateDNSRecords(servicesToUpdate, newClusters) return } glog.Infof("Detected change in list of cluster names. New set: %v, Old set: %v", newSet, s.knownClusterSet) increase = newSet.Difference(s.knownClusterSet) // do nothing when cluster is removed. if increase != nil { // Try updating all services, and save the ones that fail to try again next // round. servicesToUpdate = s.serviceCache.allServices() numServices := len(servicesToUpdate) for newCluster := range increase { glog.Infof("New cluster observed %s", newCluster) s.updateAllServicesToCluster(servicesToUpdate, newCluster) } servicesToUpdate = s.updateDNSRecords(servicesToUpdate, newClusters) glog.Infof("Successfully updated %d out of %d DNS records to direct traffic to the updated cluster", numServices-len(servicesToUpdate), numServices) } s.knownClusterSet = newSet }
func (p *Parser) parseRequirement() (*Requirement, error) { key, operator, err := p.parseKeyAndInferOperator() if err != nil { return nil, err } if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked return NewRequirement(key, operator, []string{}) } operator, err = p.parseOperator() if err != nil { return nil, err } var values sets.String switch operator { case selection.In, selection.NotIn: values, err = p.parseValues() case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan: values, err = p.parseExactValue() } if err != nil { return nil, err } return NewRequirement(key, operator, values.List()) }
func getRequirement(key string, op selection.Operator, vals sets.String, t *testing.T) Requirement { req, err := NewRequirement(key, op, vals.List()) if err != nil { t.Errorf("NewRequirement(%v, %v, %v) resulted in error:%v", key, op, vals, err) return Requirement{} } return *req }
func copyAndReplace(set sets.String, replaceWhat, replaceWith string) sets.String { result := sets.NewString(set.List()...) if result.Has(replaceWhat) { result.Delete(replaceWhat) result.Insert(replaceWith) } return result }
func findKnownValue(parts []string, valueOptions sets.String) int { for i := range parts { if valueOptions.Has(parts[i]) { return i } } return -1 }
// GetNamespacesFromPodAffinityTerm returns a set of names // according to the namespaces indicated in podAffinityTerm. // 1. If the namespaces is nil considers the given pod's namespace // 2. If the namespaces is empty list then considers all the namespaces func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.String { names := sets.String{} if podAffinityTerm.Namespaces == nil { names.Insert(pod.Namespace) } else if len(podAffinityTerm.Namespaces) != 0 { names.Insert(podAffinityTerm.Namespaces...) } return names }
func ExampleNewInformer() { // source simulates an apiserver object endpoint. source := fcache.NewFakeControllerSource() // Let's do threadsafe output to get predictable test results. deletionCounter := make(chan string, 1000) // Make a controller that immediately deletes anything added to it, and // logs anything deleted. _, controller := NewInformer( source, &v1.Pod{}, time.Millisecond*100, ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { source.Delete(obj.(runtime.Object)) }, DeleteFunc: func(obj interface{}) { key, err := DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { key = "oops something went wrong with the key" } // Report this deletion. deletionCounter <- key }, }, ) // Run the controller and run it until we close stop. stop := make(chan struct{}) defer close(stop) go controller.Run(stop) // Let's add a few objects to the source. testIDs := []string{"a-hello", "b-controller", "c-framework"} for _, name := range testIDs { // Note that these pods are not valid-- the fake source doesn't // call validation or anything. source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: name}}) } // Let's wait for the controller to process the things we just added. outputSet := sets.String{} for i := 0; i < len(testIDs); i++ { outputSet.Insert(<-deletionCounter) } for _, key := range outputSet.List() { fmt.Println(key) } // Output: // a-hello // b-controller // c-framework }
// filterUnmountedVolumes adds each element of expectedVolumes that is not in // mountedVolumes to a list of unmountedVolumes and returns it. func filterUnmountedVolumes( mountedVolumes sets.String, expectedVolumes []string) []string { unmountedVolumes := []string{} for _, expectedVolume := range expectedVolumes { if !mountedVolumes.Has(expectedVolume) { unmountedVolumes = append(unmountedVolumes, expectedVolume) } } return unmountedVolumes }
// enforcePodContainerConstraints checks for required resources that are not set on this container and // adds them to missingSet. func enforcePodContainerConstraints(container *api.Container, requiredSet, missingSet sets.String) { requests := container.Resources.Requests limits := container.Resources.Limits containerUsage := podUsageHelper(requests, limits) containerSet := quota.ToSet(quota.ResourceNames(containerUsage)) if !containerSet.Equal(requiredSet) { difference := requiredSet.Difference(containerSet) missingSet.Insert(difference.List()...) } }
// PodMatchesTermsNamespaceAndSelector returns true if the given <pod> // matches the namespace and selector defined by <affinityPod>`s <term>. func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.String, selector labels.Selector) bool { if len(namespaces) != 0 && !namespaces.Has(pod.Namespace) { return false } if !selector.Matches(labels.Set(pod.Labels)) { return false } return true }
// BasicLongRunningRequestCheck returns true if the given request has one of the specified verbs or one of the specified subresources func BasicLongRunningRequestCheck(longRunningVerbs, longRunningSubresources sets.String) LongRunningRequestCheck { return func(r *http.Request, requestInfo *apirequest.RequestInfo) bool { if longRunningVerbs.Has(requestInfo.Verb) { return true } if requestInfo.IsResourceRequest && longRunningSubresources.Has(requestInfo.Subresource) { return true } return false } }
// removeUserVisiblePaths removes the set of paths from the user-visible // portion of the writer's target directory. func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error { orderedPaths := paths.List() for ii := len(orderedPaths) - 1; ii >= 0; ii-- { if err := os.Remove(path.Join(w.targetDir, orderedPaths[ii])); err != nil { glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, orderedPaths[ii], err) return err } } return nil }
// diagnoseMissingEndpoints prints debug information about the endpoints that // are NOT in the given list of foundEndpoints. These are the endpoints we // expected a response from. func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets.String) { for _, e := range config.EndpointPods { if foundEndpoints.Has(e.Name) { continue } Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name) desc, _ := RunKubectl( "describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace)) Logf(desc) } }
func NewRequestInfoResolver(c *Config) *apirequest.RequestInfoFactory { apiPrefixes := sets.NewString(strings.Trim(APIGroupPrefix, "/")) // all possible API prefixes legacyAPIPrefixes := sets.String{} // APIPrefixes that won't have groups (legacy) for legacyAPIPrefix := range c.LegacyAPIGroupPrefixes { apiPrefixes.Insert(strings.Trim(legacyAPIPrefix, "/")) legacyAPIPrefixes.Insert(strings.Trim(legacyAPIPrefix, "/")) } return &apirequest.RequestInfoFactory{ APIPrefixes: apiPrefixes, GrouplessAPIPrefixes: legacyAPIPrefixes, } }
// RESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: // 1. if KUBE_API_VERSIONS is specified, then KUBE_API_VERSIONS in order, OR // 1. legacy kube group preferred version, extensions preferred version, metrics perferred version, legacy // kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, // all other groups alphabetical. func (m *APIRegistrationManager) RESTMapper(versionPatterns ...schema.GroupVersion) meta.RESTMapper { unionMapper := meta.MultiRESTMapper{} unionedGroups := sets.NewString() for enabledVersion := range m.enabledVersions { if !unionedGroups.Has(enabledVersion.Group) { unionedGroups.Insert(enabledVersion.Group) groupMeta := m.groupMetaMap[enabledVersion.Group] unionMapper = append(unionMapper, groupMeta.RESTMapper) } } if len(versionPatterns) != 0 { resourcePriority := []schema.GroupVersionResource{} kindPriority := []schema.GroupVersionKind{} for _, versionPriority := range versionPatterns { resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) } return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} } if len(m.envRequestedVersions) != 0 { resourcePriority := []schema.GroupVersionResource{} kindPriority := []schema.GroupVersionKind{} for _, versionPriority := range m.envRequestedVersions { resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) } return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} } prioritizedGroups := []string{"", "extensions", "metrics"} resourcePriority, kindPriority := m.prioritiesForGroups(prioritizedGroups...) prioritizedGroupsSet := sets.NewString(prioritizedGroups...) remainingGroups := sets.String{} for enabledVersion := range m.enabledVersions { if !prioritizedGroupsSet.Has(enabledVersion.Group) { remainingGroups.Insert(enabledVersion.Group) } } remainingResourcePriority, remainingKindPriority := m.prioritiesForGroups(remainingGroups.List()...) resourcePriority = append(resourcePriority, remainingResourcePriority...) kindPriority = append(kindPriority, remainingKindPriority...) return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} }
// Triggers the application of udev rules by calling "udevadm trigger // --action=change" for newly created "/dev/sd*" drives (exist only in // after set). This is workaround for Issue #7972. Once the underlying // issue has been resolved, this may be removed. func udevadmChangeToNewDrives(sdBeforeSet sets.String) error { sdAfter, err := filepath.Glob(diskSDPattern) if err != nil { return fmt.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err) } for _, sd := range sdAfter { if !sdBeforeSet.Has(sd) { return udevadmChangeToDrive(sd) } } return nil }
func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() predicates := map[string]algorithm.FitPredicate{} for _, name := range names.List() { factory, ok := fitPredicateMap[name] if !ok { return nil, fmt.Errorf("Invalid predicate name %q specified - no corresponding function found", name) } predicates[name] = factory(args) } return predicates, nil }
// ChooseZone implements our heuristics for choosing a zone for volume creation based on the volume name // Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name. // However, if the PVCName ends with `-<integer>`, we will hash the prefix, and then add the integer to the hash. // This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones, // assuming the id values are consecutive. func ChooseZoneForVolume(zones sets.String, pvcName string) string { // We create the volume in a zone determined by the name // Eventually the scheduler will coordinate placement into an available zone var hash uint32 var index uint32 if pvcName == "" { // We should always be called with a name; this shouldn't happen glog.Warningf("No name defined during volume create; choosing random zone") hash = rand.Uint32() } else { hashString := pvcName // Heuristic to make sure that volumes in a StatefulSet are spread across zones // StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id, // where Id is an integer index lastDash := strings.LastIndexByte(pvcName, '-') if lastDash != -1 { petIDString := pvcName[lastDash+1:] petID, err := strconv.ParseUint(petIDString, 10, 32) if err == nil { // Offset by the pet id, so we round-robin across zones index = uint32(petID) // We still hash the volume name, but only the base hashString = pvcName[:lastDash] glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index) } } // We hash the (base) volume name, so we don't bias towards the first N zones h := fnv.New32() h.Write([]byte(hashString)) hash = h.Sum32() } // Zones.List returns zones in a consistent order (sorted) // We do have a potential failure case where volumes will not be properly spread, // if the set of zones changes during StatefulSet volume creation. However, this is // probably relatively unlikely because we expect the set of zones to be essentially // static for clusters. // Hopefully we can address this problem if/when we do full scheduler integration of // PVC placement (which could also e.g. avoid putting volumes in overloaded or // unhealthy zones) zoneSlice := zones.List() zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))] glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice) return zone }
//getWebserverEndpoints returns the webserver endpoints as a set of String, each in the format like "http://{ip}:{port}" func getWebserverEndpoints(client clientset.Interface) sets.String { endpoints, err := client.Core().Endpoints(*namespace).Get(*service, v1.GetOptions{}) eps := sets.String{} if err != nil { state.Logf("Unable to read the endpoints for %v/%v: %v.", *namespace, *service, err) return eps } for _, ss := range endpoints.Subsets { for _, a := range ss.Addresses { for _, p := range ss.Ports { eps.Insert(fmt.Sprintf("http://%s:%d", a.IP, p.Port)) } } } return eps }
func (e *EndpointController) getPodServiceMemberships(pod *v1.Pod) (sets.String, error) { set := sets.String{} services, err := e.serviceStore.GetPodServices(pod) if err != nil { // don't log this error because this function makes pointless // errors when no services match. return set, nil } for i := range services { key, err := keyFunc(services[i]) if err != nil { return nil, err } set.Insert(key) } return set, nil }
// DialFromContainers executes a curl via kubectl exec in a test container, // which might then translate to a tcp or udp request based on the protocol // argument in the url. // - minTries is the minimum number of curl attempts required before declaring // success. Set to 0 if you'd like to return as soon as all endpoints respond // at least once. // - maxTries is the maximum number of curl attempts. If this many attempts pass // and we don't see all expected endpoints, the test fails. // - expectedEps is the set of endpointnames to wait for. Typically this is also // the hostname reported by each pod in the service through /hostName. // maxTries == minTries will confirm that we see the expected endpoints and no // more for maxTries. Use this if you want to eg: fail a readiness check on a // pod and confirm it doesn't show up as an endpoint. func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, targetIP string, containerHttpPort, targetPort, maxTries, minTries int, expectedEps sets.String) { cmd := fmt.Sprintf("curl -q -s 'http://%s:%d/dial?request=hostName&protocol=%s&host=%s&port=%d&tries=1'", containerIP, containerHttpPort, protocol, targetIP, targetPort) eps := sets.NewString() for i := 0; i < maxTries; i++ { stdout, stderr, err := config.f.ExecShellInPodWithFullOutput(config.HostTestContainerPod.Name, cmd) if err != nil { // A failure to kubectl exec counts as a try, not a hard fail. // Also note that we will keep failing for maxTries in tests where // we confirm unreachability. Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr) } else { var output map[string][]string if err := json.Unmarshal([]byte(stdout), &output); err != nil { Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v", cmd, config.HostTestContainerPod.Name, stdout, err) continue } for _, hostName := range output["responses"] { trimmed := strings.TrimSpace(hostName) if trimmed != "" { eps.Insert(trimmed) } } } Logf("Waiting for endpoints: %v", expectedEps.Difference(eps)) // Check against i+1 so we exit if minTries == maxTries. if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries { return } // TODO: get rid of this delay #36281 time.Sleep(hitEndpointRetryDelay) } config.diagnoseMissingEndpoints(eps) Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", minTries, cmd, eps, expectedEps) }
// Test public interface func doTestIndex(t *testing.T, indexer Indexer) { mkObj := func(id string, val string) testStoreObject { return testStoreObject{id: id, val: val} } // Test Index expected := map[string]sets.String{} expected["b"] = sets.NewString("a", "c") expected["f"] = sets.NewString("e") expected["h"] = sets.NewString("g") indexer.Add(mkObj("a", "b")) indexer.Add(mkObj("c", "b")) indexer.Add(mkObj("e", "f")) indexer.Add(mkObj("g", "h")) { for k, v := range expected { found := sets.String{} indexResults, err := indexer.Index("by_val", mkObj("", k)) if err != nil { t.Errorf("Unexpected error %v", err) } for _, item := range indexResults { found.Insert(item.(testStoreObject).id) } items := v.List() if !found.HasAll(items...) { t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List()) } } } }
func (t *ThirdPartyController) syncResourceList(list runtime.Object) error { existing := sets.String{} switch list := list.(type) { case *extensions.ThirdPartyResourceList: // Loop across all schema objects for third party resources for ix := range list.Items { item := &list.Items[ix] // extract the api group and resource kind from the schema _, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(item) if err != nil { return err } // place it in the set of resources that we expect, so that we don't delete it in the delete pass existing.Insert(MakeThirdPartyPath(group)) // ensure a RESTful resource for this schema exists on the master if err := t.SyncOneResource(item); err != nil { return err } } default: return fmt.Errorf("expected a *ThirdPartyResourceList, got %#v", list) } // deletion phase, get all installed RESTful resources installed := t.master.ListThirdPartyResources() for _, installedAPI := range installed { found := false // search across the expected restful resources to see if this resource belongs to one of the expected ones for _, apiPath := range existing.List() { if installedAPI == apiPath || strings.HasPrefix(installedAPI, apiPath+"/") { found = true break } } // not expected, delete the resource if !found { if err := t.master.RemoveThirdPartyResource(installedAPI); err != nil { return err } } } return nil }
// cleanupBandwidthLimits updates the status of bandwidth-limited containers // and ensures that only the appropriate CIDRs are active on the node. func (kl *Kubelet) cleanupBandwidthLimits(allPods []*v1.Pod) error { if kl.shaper == nil { return nil } currentCIDRs, err := kl.shaper.GetCIDRs() if err != nil { return err } possibleCIDRs := sets.String{} for ix := range allPods { pod := allPods[ix] ingress, egress, err := bandwidth.ExtractPodBandwidthResources(pod.Annotations) if err != nil { return err } if ingress == nil && egress == nil { glog.V(8).Infof("Not a bandwidth limited container...") continue } status, found := kl.statusManager.GetPodStatus(pod.UID) if !found { // TODO(random-liu): Cleanup status get functions. (issue #20477) s, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { return err } status = kl.generateAPIPodStatus(pod, s) } if status.Phase == v1.PodRunning { possibleCIDRs.Insert(fmt.Sprintf("%s/32", status.PodIP)) } } for _, cidr := range currentCIDRs { if !possibleCIDRs.Has(cidr) { glog.V(2).Infof("Removing CIDR: %s (%v)", cidr, possibleCIDRs) if err := kl.shaper.Reset(cidr); err != nil { return err } } } return nil }