// Test public interface func doTestIndex(t *testing.T, indexer Indexer) { mkObj := func(id string, val string) testStoreObject { return testStoreObject{id: id, val: val} } // Test Index expected := map[string]util.StringSet{} expected["b"] = util.NewStringSet("a", "c") expected["f"] = util.NewStringSet("e") expected["h"] = util.NewStringSet("g") indexer.Add(mkObj("a", "b")) indexer.Add(mkObj("c", "b")) indexer.Add(mkObj("e", "f")) indexer.Add(mkObj("g", "h")) { for k, v := range expected { found := util.StringSet{} indexResults, err := indexer.Index("by_val", mkObj("", k)) if err != nil { t.Errorf("Unexpected error %v", err) } for _, item := range indexResults { found.Insert(item.(testStoreObject).id) } items := v.List() if !found.HasAll(items...) { t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List()) } } } }
func ExampleInformer() { // source simulates an apiserver object endpoint. source := framework.NewFakeControllerSource() // Let's do threadsafe output to get predictable test results. deletionCounter := make(chan string, 1000) // Make a controller that immediately deletes anything added to it, and // logs anything deleted. _, controller := framework.NewInformer( source, &api.Pod{}, time.Millisecond*100, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { source.Delete(obj.(runtime.Object)) }, DeleteFunc: func(obj interface{}) { key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { key = "oops something went wrong with the key" } // Report this deletion. deletionCounter <- key }, }, ) // Run the controller and run it until we close stop. stop := make(chan struct{}) defer close(stop) go controller.Run(stop) // Let's add a few objects to the source. testIDs := []string{"a-hello", "b-controller", "c-framework"} for _, name := range testIDs { // Note that these pods are not valid-- the fake source doesn't // call validation or anything. source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}}) } // Let's wait for the controller to process the things we just added. outputSet := util.StringSet{} for i := 0; i < len(testIDs); i++ { outputSet.Insert(<-deletionCounter) } for _, key := range outputSet.List() { fmt.Println(key) } // Output: // a-hello // b-controller // c-framework }
func getFitPredicateFunctions(names util.StringSet, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() predicates := map[string]algorithm.FitPredicate{} for _, name := range names.List() { factory, ok := fitPredicateMap[name] if !ok { return nil, fmt.Errorf("Invalid predicate name %q specified - no corresponding function found", name) } predicates[name] = factory(args) } return predicates, nil }
func getPriorityFunctionConfigs(names util.StringSet, args PluginFactoryArgs) ([]algorithm.PriorityConfig, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() configs := []algorithm.PriorityConfig{} for _, name := range names.List() { factory, ok := priorityFunctionMap[name] if !ok { return nil, fmt.Errorf("Invalid priority name %s specified - no corresponding function found", name) } configs = append(configs, algorithm.PriorityConfig{ Function: factory.Function(args), Weight: factory.Weight, }) } return configs, nil }
// Object returns a single object representing the output of a single visit to all // found resources. If the Builder was a singular context (expected to return a // single resource by user input) and only a single resource was found, the resource // will be returned as is. Otherwise, the returned resources will be part of an // api.List. The ResourceVersion of the api.List will be set only if it is identical // across all infos returned. func (r *Result) Object() (runtime.Object, error) { infos, err := r.Infos() if err != nil { return nil, err } versions := util.StringSet{} objects := []runtime.Object{} for _, info := range infos { if info.Object != nil { objects = append(objects, info.Object) versions.Insert(info.ResourceVersion) } } if len(objects) == 1 { if r.singular { return objects[0], nil } // if the item is a list already, don't create another list if runtime.IsListType(objects[0]) { return objects[0], nil } } version := "" if len(versions) == 1 { version = versions.List()[0] } return &api.List{ ListMeta: api.ListMeta{ ResourceVersion: version, }, Items: objects, }, err }
func CheckSetEq(lhs, rhs util.StringSet) bool { return lhs.HasAll(rhs.List()...) && rhs.HasAll(lhs.List()...) }
func Example() { // source simulates an apiserver object endpoint. source := framework.NewFakeControllerSource() // This will hold the downstream state, as we know it. downstream := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc) // This will hold incoming changes. Note how we pass downstream in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, downstream) // Let's do threadsafe output to get predictable test results. deletionCounter := make(chan string, 1000) cfg := &framework.Config{ Queue: fifo, ListerWatcher: source, ObjectType: &api.Pod{}, FullResyncPeriod: time.Millisecond * 100, RetryOnError: false, // Let's implement a simple controller that just deletes // everything that comes in. Process: func(obj interface{}) error { // Obj is from the Pop method of the Queue we make above. newest := obj.(cache.Deltas).Newest() if newest.Type != cache.Deleted { // Update our downstream store. err := downstream.Add(newest.Object) if err != nil { return err } // Delete this object. source.Delete(newest.Object.(runtime.Object)) } else { // Update our downstream store. err := downstream.Delete(newest.Object) if err != nil { return err } // fifo's KeyOf is easiest, because it handles // DeletedFinalStateUnknown markers. key, err := fifo.KeyOf(newest.Object) if err != nil { return err } // Report this deletion. deletionCounter <- key } return nil }, } // Create the controller and run it until we close stop. stop := make(chan struct{}) defer close(stop) go framework.New(cfg).Run(stop) // Let's add a few objects to the source. testIDs := []string{"a-hello", "b-controller", "c-framework"} for _, name := range testIDs { // Note that these pods are not valid-- the fake source doesn't // call validation or anything. source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}}) } // Let's wait for the controller to process the things we just added. outputSet := util.StringSet{} for i := 0; i < len(testIDs); i++ { outputSet.Insert(<-deletionCounter) } for _, key := range outputSet.List() { fmt.Println(key) } // Output: // a-hello // b-controller // c-framework }
func TestHammerController(t *testing.T) { // This test executes a bunch of requests through the fake source and // controller framework to make sure there's no locking/threading // errors. If an error happens, it should hang forever or trigger the // race detector. // source simulates an apiserver object endpoint. source := framework.NewFakeControllerSource() // Let's do threadsafe output to get predictable test results. outputSetLock := sync.Mutex{} // map of key to operations done on the key outputSet := map[string][]string{} recordFunc := func(eventType string, obj interface{}) { key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { t.Errorf("something wrong with key: %v", err) key = "oops something went wrong with the key" } // Record some output when items are deleted. outputSetLock.Lock() defer outputSetLock.Unlock() outputSet[key] = append(outputSet[key], eventType) } // Make a controller which just logs all the changes it gets. _, controller := framework.NewInformer( source, &api.Pod{}, time.Millisecond*100, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { recordFunc("add", obj) }, UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) }, DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) }, }, ) if controller.HasSynced() { t.Errorf("Expected HasSynced() to return false before we started the controller") } // Run the controller and run it until we close stop. stop := make(chan struct{}) go controller.Run(stop) // Let's wait for the controller to do its initial sync time.Sleep(100 * time.Millisecond) if !controller.HasSynced() { t.Errorf("Expected HasSynced() to return true after the initial sync") } wg := sync.WaitGroup{} const threads = 3 wg.Add(threads) for i := 0; i < threads; i++ { go func() { defer wg.Done() // Let's add a few objects to the source. currentNames := util.StringSet{} rs := rand.NewSource(rand.Int63()) f := fuzz.New().NilChance(.5).NumElements(0, 2).RandSource(rs) r := rand.New(rs) // Mustn't use r and f concurrently! for i := 0; i < 100; i++ { var name string var isNew bool if currentNames.Len() == 0 || r.Intn(3) == 1 { f.Fuzz(&name) isNew = true } else { l := currentNames.List() name = l[r.Intn(len(l))] } pod := &api.Pod{} f.Fuzz(pod) pod.ObjectMeta.Name = name pod.ObjectMeta.Namespace = "default" // Add, update, or delete randomly. // Note that these pods are not valid-- the fake source doesn't // call validation or perform any other checking. if isNew { currentNames.Insert(name) source.Add(pod) continue } switch r.Intn(2) { case 0: currentNames.Insert(name) source.Modify(pod) case 1: currentNames.Delete(name) source.Delete(pod) } } }() } wg.Wait() // Let's wait for the controller to finish processing the things we just added. time.Sleep(100 * time.Millisecond) close(stop) outputSetLock.Lock() t.Logf("got: %#v", outputSet) }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) addFlags(pflag.CommandLine) util.InitFlags() util.ReallyCrash = true util.InitLogs() defer util.FlushLogs() go func() { defer util.FlushLogs() time.Sleep(3 * time.Minute) glog.Fatalf("This test has timed out.") }() glog.Infof("Running tests for APIVersion: %s", apiVersion) firstManifestURL := ServeCachedManifestFile(testPodSpecFile) secondManifestURL := ServeCachedManifestFile(testPodSpecFile) apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL, apiVersion) // Ok. we're good to go. glog.Infof("API Server started on %s", apiServerURL) // Wait for the synchronization threads to come up. time.Sleep(time.Second * 10) qingClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: apiVersion}) // Run tests in parallel testFuncs := []testFunc{ runReplicationControllerTest, runAtomicPutTest, runPatchTest, runServiceTest, runAPIVersionsTest, runMasterServiceTest, func(c *client.Client) { runSelfLinkTestOnNamespace(c, api.NamespaceDefault) runSelfLinkTestOnNamespace(c, "other") }, } // Only run at most maxConcurrency tests in parallel. if maxConcurrency <= 0 { maxConcurrency = len(testFuncs) } glog.Infof("Running %d tests in parallel.", maxConcurrency) ch := make(chan struct{}, maxConcurrency) var wg sync.WaitGroup wg.Add(len(testFuncs)) for i := range testFuncs { f := testFuncs[i] go func() { ch <- struct{}{} f(qingClient) <-ch wg.Done() }() } wg.Wait() close(ch) // Check that qinglet tried to make the containers. // Using a set to list unique creation attempts. Our fake is // really stupid, so qinglet tries to create these multiple times. createdConts := util.StringSet{} for _, p := range fakeDocker1.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdConts.Insert(p[:n-8]) } } for _, p := range fakeDocker2.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdConts.Insert(p[:n-8]) } } // We expect 9: 2 pod infra containers + 2 containers from the replication controller + // 1 pod infra container + 2 containers from the URL on first Qinglet + // 1 pod infra container + 2 containers from the URL on second Qinglet + // 1 pod infra container + 1 container from the service test. // The total number of container created is 9 if len(createdConts) != 12 { glog.Fatalf("Expected 12 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created) } glog.Infof("OK - found created containers: %#v", createdConts.List()) // This test doesn't run with the others because it can't run in // parallel and also it schedules extra pods which would change the // above pod counting logic. runSchedulerNoPhantomPodsTest(qingClient) }