func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { names := sets.String{} for i, pod := range pods { var errlist []error if errs := validation.ValidatePod(pod); len(errs) != 0 { errlist = append(errlist, errs...) // If validation fails, don't trust it any further - // even Name could be bad. } else { name := kubecontainer.GetPodFullName(pod) if names.Has(name) { errlist = append(errlist, fielderrors.NewFieldDuplicate("name", pod.Name)) } else { names.Insert(name) } } if len(errlist) > 0 { name := bestPodIdentString(pod) err := utilerrors.NewAggregate(errlist) glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err) recorder.Eventf(pod, "FailedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err) continue } filtered = append(filtered, pod) } return }
func runMasterServiceTest(client *client.Client) { time.Sleep(12 * time.Second) svcList, err := client.Services(api.NamespaceDefault).List(labels.Everything()) if err != nil { glog.Fatalf("unexpected error listing services: %v", err) } var foundRW bool found := sets.String{} for i := range svcList.Items { found.Insert(svcList.Items[i].Name) if svcList.Items[i].Name == "kubernetes" { foundRW = true } } if foundRW { ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { glog.Fatalf("unexpected error listing endpoints for kubernetes service: %v", err) } if countEndpoints(ep) == 0 { glog.Fatalf("no endpoints for kubernetes service: %v", ep) } } else { glog.Errorf("no RW service found: %v", found) glog.Fatal("Kubernetes service test failed") } glog.Infof("Master service test passed.") }
// Index returns a list of items that match on the index function // Index is thread-safe so long as you treat all items as immutable func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) { c.lock.RLock() defer c.lock.RUnlock() indexFunc := c.indexers[indexName] if indexFunc == nil { return nil, fmt.Errorf("Index with name %s does not exist", indexName) } indexKeys, err := indexFunc(obj) if err != nil { return nil, err } index := c.indices[indexName] // need to de-dupe the return list. Since multiple keys are allowed, this can happen. returnKeySet := sets.String{} for _, indexKey := range indexKeys { set := index[indexKey] for _, key := range set.List() { returnKeySet.Insert(key) } } list := make([]interface{}, 0, returnKeySet.Len()) for absoluteKey := range returnKeySet { list = append(list, c.items[absoluteKey]) } return list, nil }
func (t *tcShaper) nextClassID() (int, error) { data, err := t.e.Command("tc", "class", "show", "dev", t.iface).CombinedOutput() if err != nil { return -1, err } scanner := bufio.NewScanner(bytes.NewBuffer(data)) classes := sets.String{} for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) // skip empty lines if len(line) == 0 { continue } parts := strings.Split(line, " ") // expected tc line: // class htb 1:1 root prio 0 rate 1000Kbit ceil 1000Kbit burst 1600b cburst 1600b if len(parts) != 14 { return -1, fmt.Errorf("unexpected output from tc: %s (%v)", scanner.Text(), parts) } classes.Insert(parts[2]) } // Make sure it doesn't go forever for nextClass := 1; nextClass < 10000; nextClass++ { if !classes.Has(fmt.Sprintf("1:%d", nextClass)) { return nextClass, nil } } // This should really never happen return -1, fmt.Errorf("exhausted class space, please try again") }
func TestFilterQuotaPods(t *testing.T) { pods := []api.Pod{ { ObjectMeta: api.ObjectMeta{Name: "pod-running"}, Status: api.PodStatus{Phase: api.PodRunning}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-pending"}, Status: api.PodStatus{Phase: api.PodPending}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-succeeded"}, Status: api.PodStatus{Phase: api.PodSucceeded}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-unknown"}, Status: api.PodStatus{Phase: api.PodUnknown}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed"}, Status: api.PodStatus{Phase: api.PodFailed}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-always"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, }, Status: api.PodStatus{Phase: api.PodFailed}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-on-failure"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyOnFailure, }, Status: api.PodStatus{Phase: api.PodFailed}, }, { ObjectMeta: api.ObjectMeta{Name: "pod-failed-with-restart-never"}, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyNever, }, Status: api.PodStatus{Phase: api.PodFailed}, }, } expectedResults := sets.NewString("pod-running", "pod-pending", "pod-unknown", "pod-failed-with-restart-always", "pod-failed-with-restart-on-failure") actualResults := sets.String{} result := FilterQuotaPods(pods) for i := range result { actualResults.Insert(result[i].Name) } if len(expectedResults) != len(actualResults) || !actualResults.HasAll(expectedResults.List()...) { t.Errorf("Expected results %v, Actual results %v", expectedResults, actualResults) } }
func findKnownValue(parts []string, valueOptions sets.String) int { for i := range parts { if valueOptions.Has(parts[i]) { return i } } return -1 }
// ContainedIDs returns a stringset.StringSet containing all IDs of the stored items. // This is a snapshot of a moment in time, and one should keep in mind that // other go routines can add or remove items after you call this. func (c *DelayFIFO) ContainedIDs() sets.String { c.rlock() defer c.runlock() set := sets.String{} for id := range c.items { set.Insert(id) } return set }
func ExampleInformer() { // source simulates an apiserver object endpoint. source := framework.NewFakeControllerSource() // Let's do threadsafe output to get predictable test results. deletionCounter := make(chan string, 1000) // Make a controller that immediately deletes anything added to it, and // logs anything deleted. _, controller := framework.NewInformer( source, &api.Pod{}, time.Millisecond*100, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { source.Delete(obj.(runtime.Object)) }, DeleteFunc: func(obj interface{}) { key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { key = "oops something went wrong with the key" } // Report this deletion. deletionCounter <- key }, }, ) // Run the controller and run it until we close stop. stop := make(chan struct{}) defer close(stop) go controller.Run(stop) // Let's add a few objects to the source. testIDs := []string{"a-hello", "b-controller", "c-framework"} for _, name := range testIDs { // Note that these pods are not valid-- the fake source doesn't // call validation or anything. source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}}) } // Let's wait for the controller to process the things we just added. outputSet := sets.String{} for i := 0; i < len(testIDs); i++ { outputSet.Insert(<-deletionCounter) } for _, key := range outputSet.List() { fmt.Println(key) } // Output: // a-hello // b-controller // c-framework }
// ContainedIDs returns a stringset.StringSet containing all IDs of the stored items. // This is a snapshot of a moment in time, and one should keep in mind that // other go routines can add or remove items after you call this. func (c *HistoricalFIFO) ContainedIDs() sets.String { c.lock.RLock() defer c.lock.RUnlock() set := sets.String{} for id, entry := range c.items { if entry.Is(DELETE_EVENT | POP_EVENT) { continue } set.Insert(id) } return set }
func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() predicates := map[string]algorithm.FitPredicate{} for _, name := range names.List() { factory, ok := fitPredicateMap[name] if !ok { return nil, fmt.Errorf("Invalid predicate name %q specified - no corresponding function found", name) } predicates[name] = factory(args) } return predicates, nil }
// Calls "udevadm trigger --action=change" for newly created "/dev/sd*" drives (exist only in after set). // This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed. func udevadmChangeToNewDrives(sdBeforeSet sets.String) error { sdAfter, err := filepath.Glob(diskSDPattern) if err != nil { return fmt.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err) } for _, sd := range sdAfter { if !sdBeforeSet.Has(sd) { return udevadmChangeToDrive(sd) } } return nil }
func (g *conversionGenerator) RepackImports(exclude sets.String) { var packages []string for key := range g.imports { packages = append(packages, key) } sort.Strings(packages) g.imports = make(map[string]string) g.shortImports = make(map[string]string) g.targetPackage(g.targetPkg) for _, pkg := range packages { if !exclude.Has(pkg) { g.addImportByPath(pkg) } } }
// getInstanceList returns an instance list based on the given names. // The names cannot contain a '.', the real gce api validates against this. func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances { instanceNames := nodeNames.List() computeInstances := []*compute.InstanceWithNamedPorts{} for _, name := range instanceNames { instanceLink := fmt.Sprintf( "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", "project", "zone", name) computeInstances = append( computeInstances, &compute.InstanceWithNamedPorts{ Instance: instanceLink}) } return &compute.InstanceGroupsListInstances{ Items: computeInstances, } }
func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) (map[string]algorithm.PriorityConfig, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() configs := map[string]algorithm.PriorityConfig{} for _, name := range names.List() { factory, ok := priorityFunctionMap[name] if !ok { return nil, fmt.Errorf("Invalid priority name %s specified - no corresponding function found", name) } configs[name] = algorithm.PriorityConfig{ Function: factory.Function(args), Weight: factory.Weight, } } return configs, nil }
func (e *endpointController) getPodServiceMemberships(pod *api.Pod) (sets.String, error) { set := sets.String{} services, err := e.serviceStore.GetPodServices(pod) if err != nil { // don't log this error because this function makes pointless // errors when no services match. return set, nil } for i := range services { key, err := keyFunc(&services[i]) if err != nil { return nil, err } set.Insert(key) } return set, nil }
// Test public interface func doTestIndex(t *testing.T, indexer Indexer) { mkObj := func(id string, val string) testStoreObject { return testStoreObject{id: id, val: val} } // Test Index expected := map[string]sets.String{} expected["b"] = sets.NewString("a", "c") expected["f"] = sets.NewString("e") expected["h"] = sets.NewString("g") indexer.Add(mkObj("a", "b")) indexer.Add(mkObj("c", "b")) indexer.Add(mkObj("e", "f")) indexer.Add(mkObj("g", "h")) { for k, v := range expected { found := sets.String{} indexResults, err := indexer.Index("by_val", mkObj("", k)) if err != nil { t.Errorf("Unexpected error %v", err) } for _, item := range indexResults { found.Insert(item.(testStoreObject).id) } items := v.List() if !found.HasAll(items...) { t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List()) } } } }
func ValidateThirdPartyResource(obj *extensions.ThirdPartyResource) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} if len(obj.Name) == 0 { allErrs = append(allErrs, errs.NewFieldInvalid("name", obj.Name, "name must be non-empty")) } versions := sets.String{} for ix := range obj.Versions { version := &obj.Versions[ix] if len(version.Name) == 0 { allErrs = append(allErrs, errs.NewFieldInvalid("name", version, "name can not be empty")) } if versions.Has(version.Name) { allErrs = append(allErrs, errs.NewFieldDuplicate("version", version)) } versions.Insert(version.Name) } return allErrs }
func (t *ThirdPartyController) syncResourceList(list runtime.Object) error { existing := sets.String{} switch list := list.(type) { case *expapi.ThirdPartyResourceList: // Loop across all schema objects for third party resources for ix := range list.Items { item := &list.Items[ix] // extract the api group and resource kind from the schema _, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(item) if err != nil { return err } // place it in the set of resources that we expect, so that we don't delete it in the delete pass existing.Insert(makeThirdPartyPath(group)) // ensure a RESTful resource for this schema exists on the master if err := t.SyncOneResource(item); err != nil { return err } } default: return fmt.Errorf("expected a *ThirdPartyResourceList, got %#v", list) } // deletion phase, get all installed RESTful resources installed := t.master.ListThirdPartyResources() for _, installedAPI := range installed { found := false // search across the expected restful resources to see if this resource belongs to one of the expected ones for _, apiPath := range existing.List() { if installedAPI == apiPath || strings.HasPrefix(installedAPI, apiPath+"/") { found = true break } } // not expected, delete the resource if !found { if err := t.master.RemoveThirdPartyResource(installedAPI); err != nil { return err } } } return nil }
func loadWhitelist(file string) (sets.String, error) { result := sets.String{} if len(file) == 0 { return result, nil } fp, err := os.Open(file) if err != nil { return result, err } defer fp.Close() scanner := bufio.NewScanner(fp) for scanner.Scan() { current := scanner.Text() if !strings.HasPrefix(current, "#") { result.Insert(current) } } return result, scanner.Err() }
func selectContainer(pod *api.Pod, in io.Reader, out io.Writer) string { fmt.Fprintf(out, "Please select a container:\n") options := sets.String{} for ix := range pod.Spec.Containers { fmt.Fprintf(out, "[%d] %s\n", ix+1, pod.Spec.Containers[ix].Name) options.Insert(pod.Spec.Containers[ix].Name) } for { var input string fmt.Fprintf(out, "> ") fmt.Fscanln(in, &input) if options.Has(input) { return input } ix, err := strconv.Atoi(input) if err == nil && ix > 0 && ix <= len(pod.Spec.Containers) { return pod.Spec.Containers[ix-1].Name } fmt.Fprintf(out, "Invalid input: %s", input) } }
func generateCSV(buildLatency BuildLatencyData, resources, methods sets.String, out io.Writer) error { header := []string{"build"} for _, rsrc := range resources.List() { header = append(header, fmt.Sprintf("%s_50", rsrc)) header = append(header, fmt.Sprintf("%s_90", rsrc)) header = append(header, fmt.Sprintf("%s_99", rsrc)) } if _, err := fmt.Fprintln(out, strings.Join(header, ",")); err != nil { return err } for _, method := range methods.List() { if _, err := fmt.Fprintln(out, method); err != nil { return err } for build, data := range buildLatency { line := []string{fmt.Sprintf("%d", build)} for _, rsrc := range resources.List() { podData := data[rsrc] line = append(line, fmt.Sprintf("%g", findMethod(method, "Perc50", podData))) line = append(line, fmt.Sprintf("%g", findMethod(method, "Perc90", podData))) line = append(line, fmt.Sprintf("%g", findMethod(method, "Perc99", podData))) } if _, err := fmt.Fprintln(out, strings.Join(line, ",")); err != nil { return err } } } return nil }
// waitTillNPodsRunningOnNodes polls the /runningpods endpoint on kubelet until // it finds targetNumPods pods that match the given criteria (namespace and // podNamePrefix). Note that we usually use label selector to filter pods that // belong to the same RC. However, we use podNamePrefix with namespace here // because pods returned from /runningpods do not contain the original label // information; they are reconstructed by examining the container runtime. In // the scope of this test, we do not expect pod naming conflicts so // podNamePrefix should be sufficient to identify the pods. func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error { return wait.Poll(pollInterval, timeout, func() (bool, error) { matchCh := make(chan sets.String, len(nodeNames)) for _, item := range nodeNames.List() { // Launch a goroutine per node to check the pods running on the nodes. nodeName := item go func() { matchCh <- getPodMatches(c, nodeName, podNamePrefix, namespace) }() } seen := sets.NewString() for i := 0; i < len(nodeNames.List()); i++ { seen = seen.Union(<-matchCh) } if seen.Len() == targetNumPods { return true, nil } Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len()) return false, nil }) }
// Find all sibling pods in the service and post to their /write handler. func contactOthers(state *State) { defer state.doneContactingPeers() client, err := client.NewInCluster() if err != nil { log.Fatalf("Unable to create client; error: %v\n", err) } // Double check that that worked by getting the server version. if v, err := client.ServerVersion(); err != nil { log.Fatalf("Unable to get server version: %v\n", err) } else { log.Printf("Server version: %#v\n", v) } // Do this repeatedly, in case there's some propagation delay with getting // newly started pods into the endpoints list. for i := 0; i < 15; i++ { endpoints, err := client.Endpoints(*namespace).Get(*service) if err != nil { state.Logf("Unable to read the endpoints for %v/%v: %v; will try again.", *namespace, *service, err) time.Sleep(time.Duration(1+rand.Intn(10)) * time.Second) } eps := sets.String{} for _, ss := range endpoints.Subsets { for _, a := range ss.Addresses { for _, p := range ss.Ports { eps.Insert(fmt.Sprintf("http://%s:%d", a.IP, p.Port)) } } } for ep := range eps { state.Logf("Attempting to contact %s", ep) contactSingle(ep, state) } time.Sleep(5 * time.Second) } }
func NewDefaultRESTMapper(group string, versions []string, interfacesFunc meta.VersionInterfacesFunc, importPathPrefix string, ignoredKinds, rootScoped sets.String) *meta.DefaultRESTMapper { mapper := meta.NewDefaultRESTMapper(group, versions, interfacesFunc) // enumerate all supported versions, get the kinds, and register with the mapper how to address // our resources. for _, version := range versions { for kind, oType := range Scheme.KnownTypes(version) { // TODO: Remove import path prefix check. // We check the import path prefix because we currently stuff both "api" and "extensions" objects // into the same group within Scheme since Scheme has no notion of groups yet. if !strings.HasPrefix(oType.PkgPath(), importPathPrefix) || ignoredKinds.Has(kind) { continue } scope := meta.RESTScopeNamespace if rootScoped.Has(kind) { scope = meta.RESTScopeRoot } mapper.Add(scope, kind, version, false) } } return mapper }
// Object returns a single object representing the output of a single visit to all // found resources. If the Builder was a singular context (expected to return a // single resource by user input) and only a single resource was found, the resource // will be returned as is. Otherwise, the returned resources will be part of an // api.List. The ResourceVersion of the api.List will be set only if it is identical // across all infos returned. func (r *Result) Object() (runtime.Object, error) { infos, err := r.Infos() if err != nil { return nil, err } versions := sets.String{} objects := []runtime.Object{} for _, info := range infos { if info.Object != nil { objects = append(objects, info.Object) versions.Insert(info.ResourceVersion) } } if len(objects) == 1 { if r.singular { return objects[0], nil } // if the item is a list already, don't create another list if runtime.IsListType(objects[0]) { return objects[0], nil } } version := "" if len(versions) == 1 { version = versions.List()[0] } return &api.List{ ListMeta: unversioned.ListMeta{ ResourceVersion: version, }, Items: objects, }, err }
// Get all backends for all registered storage destinations. // Used for getting all instances for health validations. func (s *StorageDestinations) backends() []string { backends := sets.String{} for _, group := range s.APIGroups { if group.Default != nil { for _, backend := range group.Default.Backends() { backends.Insert(backend) } } if group.Overrides != nil { for _, storage := range group.Overrides { for _, backend := range storage.Backends() { backends.Insert(backend) } } } } return backends.List() }
func runResourceTrackingTest(framework *Framework, podsPerNode int, nodeNames sets.String, resourceMonitor *resourceMonitor) { numNodes := nodeNames.Len() totalPods := podsPerNode * numNodes By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID())) // TODO: Use a more realistic workload Expect(RunRC(RCConfig{ Client: framework.Client, Name: rcName, Namespace: framework.Namespace.Name, Image: "gcr.io/google_containers/pause:go", Replicas: totalPods, })).NotTo(HaveOccurred()) // Log once and flush the stats. resourceMonitor.LogLatest() resourceMonitor.Reset() By("Start monitoring resource usage") // Periodically dump the cpu summary until the deadline is met. // Note that without calling resourceMonitor.Reset(), the stats // would occupy increasingly more memory. This should be fine // for the current test duration, but we should reclaim the // entries if we plan to monitor longer (e.g., 8 hours). deadline := time.Now().Add(monitoringTime) for time.Now().Before(deadline) { Logf("Still running...%v left", deadline.Sub(time.Now())) time.Sleep(reportingPeriod) timeLeft := deadline.Sub(time.Now()) Logf("Still running...%v left", timeLeft) if timeLeft < reportingPeriod { time.Sleep(timeLeft) } else { time.Sleep(reportingPeriod) } logPodsOnNodes(framework.Client, nodeNames.List()) } By("Reporting overall resource usage") logPodsOnNodes(framework.Client, nodeNames.List()) resourceMonitor.LogCPUSummary() resourceMonitor.LogLatest() By("Deleting the RC") DeleteRC(framework.Client, framework.Namespace.Name, rcName) }
// NegotiateVersion queries the server's supported api versions to find // a version that both client and server support. // - If no version is provided, try registered client versions in order of // preference. // - If version is provided, but not default config (explicitly requested via // commandline flag), and is unsupported by the server, print a warning to // stderr and try client's registered versions in order of preference. // - If version is config default, and the server does not support it, // return an error. func NegotiateVersion(client *Client, c *Config, version string, clientRegisteredVersions []string) (string, error) { var err error if client == nil { client, err = New(c) if err != nil { return "", err } } clientVersions := sets.String{} for _, v := range clientRegisteredVersions { clientVersions.Insert(v) } apiVersions, err := client.ServerAPIVersions() if err != nil { // This is almost always a connection error, and higher level code should treat this as a generic error, // not a negotiation specific error. return "", err } serverVersions := sets.String{} for _, v := range apiVersions.Versions { serverVersions.Insert(v) } // If no version requested, use config version (may also be empty). if len(version) == 0 { version = c.Version } // If version explicitly requested verify that both client and server support it. // If server does not support warn, but try to negotiate a lower version. if len(version) != 0 { if !clientVersions.Has(version) { return "", fmt.Errorf("client does not support API version %q; client supported API versions: %v", version, clientVersions) } if serverVersions.Has(version) { return version, nil } // If we are using an explicit config version the server does not support, fail. if version == c.Version { return "", fmt.Errorf("server does not support API version %q", version) } } for _, clientVersion := range clientRegisteredVersions { if serverVersions.Has(clientVersion) { // Version was not explicitly requested in command config (--api-version). // Ok to fall back to a supported version with a warning. // TODO: caesarxuchao: enable the warning message when we have // proper fix. Please refer to issue #14895. // if len(version) != 0 { // glog.Warningf("Server does not support API version '%s'. Falling back to '%s'.", version, clientVersion) // } return clientVersion, nil } } return "", fmt.Errorf("failed to negotiate an api version; server supports: %v, client supports: %v", serverVersions, clientRegisteredVersions) }
func TestWatchCacheBasic(t *testing.T) { store := newWatchCache(2) // Test Add/Update/Delete. pod1 := makeTestPod("pod", 1) if err := store.Add(pod1); err != nil { t.Errorf("unexpected error: %v", err) } if item, ok, _ := store.Get(pod1); !ok { t.Errorf("didn't find pod") } else { if !api.Semantic.DeepEqual(pod1, item) { t.Errorf("expected %v, got %v", pod1, item) } } pod2 := makeTestPod("pod", 2) if err := store.Update(pod2); err != nil { t.Errorf("unexpected error: %v", err) } if item, ok, _ := store.Get(pod2); !ok { t.Errorf("didn't find pod") } else { if !api.Semantic.DeepEqual(pod2, item) { t.Errorf("expected %v, got %v", pod1, item) } } pod3 := makeTestPod("pod", 3) if err := store.Delete(pod3); err != nil { t.Errorf("unexpected error: %v", err) } if _, ok, _ := store.Get(pod3); ok { t.Errorf("found pod") } // Test List. store.Add(makeTestPod("pod1", 4)) store.Add(makeTestPod("pod2", 5)) store.Add(makeTestPod("pod3", 6)) { podNames := sets.String{} for _, item := range store.List() { podNames.Insert(item.(*api.Pod).ObjectMeta.Name) } if !podNames.HasAll("pod1", "pod2", "pod3") { t.Errorf("missing pods, found %v", podNames) } if len(podNames) != 3 { t.Errorf("found missing/extra items") } } // Test Replace. store.Replace([]interface{}{ makeTestPod("pod4", 7), makeTestPod("pod5", 8), }, "8") { podNames := sets.String{} for _, item := range store.List() { podNames.Insert(item.(*api.Pod).ObjectMeta.Name) } if !podNames.HasAll("pod4", "pod5") { t.Errorf("missing pods, found %v", podNames) } if len(podNames) != 2 { t.Errorf("found missing/extra items") } } }
func TestListFromMemory(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) prefixedKey := etcdtest.AddPrefix("pods") fakeClient.ExpectNotFoundGet(prefixedKey) cacher := newTestCacher(fakeClient) fakeClient.WaitForWatchCompletion() podFoo := makeTestPod("foo") podBar := makeTestPod("bar") podBaz := makeTestPod("baz") podFooPrime := makeTestPod("foo") podFooPrime.Spec.NodeName = "fakeNode" testCases := []*etcd.Response{ { Action: "create", Node: &etcd.Node{ Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 1, }, }, { Action: "create", Node: &etcd.Node{ Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podBar)), CreatedIndex: 2, ModifiedIndex: 2, }, }, { Action: "create", Node: &etcd.Node{ Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podBaz)), CreatedIndex: 3, ModifiedIndex: 3, }, }, { Action: "set", Node: &etcd.Node{ Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFooPrime)), CreatedIndex: 1, ModifiedIndex: 4, }, PrevNode: &etcd.Node{ Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 1, }, }, { Action: "delete", Node: &etcd.Node{ CreatedIndex: 1, ModifiedIndex: 5, }, PrevNode: &etcd.Node{ Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podBar)), CreatedIndex: 1, ModifiedIndex: 1, }, }, } // Propagate some data to etcd. for _, test := range testCases { fakeClient.WatchResponse <- test } if err := waitForUpToDateCache(cacher, 5); err != nil { t.Errorf("watch cache didn't propagated correctly: %v", err) } result := &api.PodList{} if err := cacher.ListFromMemory("pods/ns", result); err != nil { t.Errorf("unexpected error: %v", err) } if result.ListMeta.ResourceVersion != "5" { t.Errorf("incorrect resource version: %v", result.ListMeta.ResourceVersion) } if len(result.Items) != 2 { t.Errorf("unexpected list result: %d", len(result.Items)) } keys := sets.String{} for _, item := range result.Items { keys.Insert(item.ObjectMeta.Name) } if !keys.HasAll("foo", "baz") { t.Errorf("unexpected list result: %#v", result) } for _, item := range result.Items { // unset fields that are set by the infrastructure item.ObjectMeta.ResourceVersion = "" item.ObjectMeta.CreationTimestamp = unversioned.Time{} var expected *api.Pod switch item.ObjectMeta.Name { case "foo": expected = podFooPrime case "baz": expected = podBaz default: t.Errorf("unexpected item: %v", item) } if e, a := *expected, item; !reflect.DeepEqual(e, a) { t.Errorf("expected: %#v, got: %#v", e, a) } } close(fakeClient.WatchResponse) }