// Test public interface func doTestIndex(t *testing.T, indexer Indexer) { mkObj := func(id string, val string) testStoreObject { return testStoreObject{id: id, val: val} } // Test Index expected := map[string]util.StringSet{} expected["b"] = util.NewStringSet("a", "c") expected["f"] = util.NewStringSet("e") expected["h"] = util.NewStringSet("g") indexer.Add(mkObj("a", "b")) indexer.Add(mkObj("c", "b")) indexer.Add(mkObj("e", "f")) indexer.Add(mkObj("g", "h")) { for k, v := range expected { found := util.StringSet{} indexResults, err := indexer.Index("by_val", mkObj("", k)) if err != nil { t.Errorf("Unexpected error %v", err) } for _, item := range indexResults { found.Insert(item.(testStoreObject).id) } items := v.List() if !found.HasAll(items...) { t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List()) } } } }
func usersWithCommit(client *github.Client, org, project string) ([]string, error) { userSet := util.StringSet{} teams, err := fetchAllTeams(client, org) if err != nil { glog.Errorf("%v", err) return nil, err } teamIDs := []int{} for _, team := range teams { repo, _, err := client.Organizations.IsTeamRepo(*team.ID, org, project) if repo == nil || err != nil { continue } perms := *repo.Permissions if perms["push"] { teamIDs = append(teamIDs, *team.ID) } } for _, team := range teamIDs { users, err := fetchAllUsers(client, team) if err != nil { glog.Errorf("%v", err) continue } for _, user := range users { userSet.Insert(*user.Login) } } return userSet.List(), nil }
func (m *mockPruneRecorder) Verify(t *testing.T, expected util.StringSet) { if len(m.set) != len(expected) || !m.set.HasAll(expected.List()...) { expectedValues := expected.List() actualValues := m.set.List() sort.Strings(expectedValues) sort.Strings(actualValues) t.Errorf("expected \n\t%v\n, actual \n\t%v\n", expectedValues, actualValues) } }
func (b *BlunderbussConfig) FindOwners(filename string) []string { owners := util.StringSet{} for prefix, ownersList := range b.PrefixMap { if strings.HasPrefix(filename, prefix) { owners.Insert(ownersList...) } } return owners.List() }
func ExampleInformer() { // source simulates an apiserver object endpoint. source := framework.NewFakeControllerSource() // Let's do threadsafe output to get predictable test results. deletionCounter := make(chan string, 1000) // Make a controller that immediately deletes anything added to it, and // logs anything deleted. _, controller := framework.NewInformer( source, &api.Pod{}, time.Millisecond*100, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { source.Delete(obj.(runtime.Object)) }, DeleteFunc: func(obj interface{}) { key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { key = "oops something went wrong with the key" } // Report this deletion. deletionCounter <- key }, }, ) // Run the controller and run it until we close stop. stop := make(chan struct{}) defer close(stop) go controller.Run(stop) // Let's add a few objects to the source. testIDs := []string{"a-hello", "b-controller", "c-framework"} for _, name := range testIDs { // Note that these pods are not valid-- the fake source doesn't // call validation or anything. source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}}) } // Let's wait for the controller to process the things we just added. outputSet := util.StringSet{} for i := 0; i < len(testIDs); i++ { outputSet.Insert(<-deletionCounter) } for _, key := range outputSet.List() { fmt.Println(key) } // Output: // a-hello // b-controller // c-framework }
func printDeploymentConfig(dc *deployapi.DeploymentConfig, w io.Writer, withNamespace, wide bool, columnLabels []string) error { triggers := util.StringSet{} for _, trigger := range dc.Triggers { triggers.Insert(string(trigger.Type)) } tStr := strings.Join(triggers.List(), ", ") _, err := fmt.Fprintf(w, "%s\t%s\t%v\n", dc.Name, tStr, dc.LatestVersion) return err }
func printPolicyBinding(policyBinding *authorizationapi.PolicyBinding, w io.Writer, withNamespace, wide bool, columnLabels []string) error { roleBindingNames := util.StringSet{} for key := range policyBinding.RoleBindings { roleBindingNames.Insert(key) } roleBindingsString := strings.Join(roleBindingNames.List(), ", ") _, err := fmt.Fprintf(w, "%s\t%s\t%v\n", policyBinding.Name, roleBindingsString, policyBinding.LastModified) return err }
func TestOrphanBuildResolver(t *testing.T) { activeBuildConfig := mockBuildConfig("a", "active-build-config") inactiveBuildConfig := mockBuildConfig("a", "inactive-build-config") buildConfigs := []*buildapi.BuildConfig{activeBuildConfig} builds := []*buildapi.Build{} expectedNames := util.StringSet{} BuildPhaseOptions := []buildapi.BuildPhase{ buildapi.BuildPhaseCancelled, buildapi.BuildPhaseComplete, buildapi.BuildPhaseError, buildapi.BuildPhaseFailed, buildapi.BuildPhaseNew, buildapi.BuildPhasePending, buildapi.BuildPhaseRunning, } BuildPhaseFilter := []buildapi.BuildPhase{ buildapi.BuildPhaseCancelled, buildapi.BuildPhaseComplete, buildapi.BuildPhaseError, buildapi.BuildPhaseFailed, } BuildPhaseFilterSet := util.StringSet{} for _, BuildPhase := range BuildPhaseFilter { BuildPhaseFilterSet.Insert(string(BuildPhase)) } for _, BuildPhaseOption := range BuildPhaseOptions { builds = append(builds, withStatus(mockBuild("a", string(BuildPhaseOption)+"-active", activeBuildConfig), BuildPhaseOption)) builds = append(builds, withStatus(mockBuild("a", string(BuildPhaseOption)+"-inactive", inactiveBuildConfig), BuildPhaseOption)) builds = append(builds, withStatus(mockBuild("a", string(BuildPhaseOption)+"-orphan", nil), BuildPhaseOption)) if BuildPhaseFilterSet.Has(string(BuildPhaseOption)) { expectedNames.Insert(string(BuildPhaseOption) + "-inactive") expectedNames.Insert(string(BuildPhaseOption) + "-orphan") } } dataSet := NewDataSet(buildConfigs, builds) resolver := NewOrphanBuildResolver(dataSet, BuildPhaseFilter) results, err := resolver.Resolve() if err != nil { t.Errorf("Unexpected error %v", err) } foundNames := util.StringSet{} for _, result := range results { foundNames.Insert(result.Name) } if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) { t.Errorf("expected %v, actual %v", expectedNames, foundNames) } }
func validateList(t *testing.T, lister Lister, user user.Info, expectedSet util.StringSet) { namespaceList, err := lister.List(user) if err != nil { t.Errorf("Unexpected error %v", err) } results := util.StringSet{} for _, namespace := range namespaceList.Items { results.Insert(namespace.Name) } if results.Len() != expectedSet.Len() || !results.HasAll(expectedSet.List()...) { t.Errorf("User %v, Expected: %v, Actual: %v", user.GetName(), expectedSet, results) } }
func getFitPredicateFunctions(names util.StringSet, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() predicates := map[string]algorithm.FitPredicate{} for _, name := range names.List() { factory, ok := fitPredicateMap[name] if !ok { return nil, fmt.Errorf("Invalid predicate name %q specified - no corresponding function found", name) } predicates[name] = factory(args) } return predicates, nil }
func TestOrphanDeploymentResolver(t *testing.T) { activeDeploymentConfig := mockDeploymentConfig("a", "active-deployment-config") inactiveDeploymentConfig := mockDeploymentConfig("a", "inactive-deployment-config") deploymentConfigs := []*deployapi.DeploymentConfig{activeDeploymentConfig} deployments := []*kapi.ReplicationController{} expectedNames := util.StringSet{} deploymentStatusOptions := []deployapi.DeploymentStatus{ deployapi.DeploymentStatusComplete, deployapi.DeploymentStatusFailed, deployapi.DeploymentStatusNew, deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning, } deploymentStatusFilter := []deployapi.DeploymentStatus{ deployapi.DeploymentStatusComplete, deployapi.DeploymentStatusFailed, } deploymentStatusFilterSet := util.StringSet{} for _, deploymentStatus := range deploymentStatusFilter { deploymentStatusFilterSet.Insert(string(deploymentStatus)) } for _, deploymentStatusOption := range deploymentStatusOptions { deployments = append(deployments, withStatus(mockDeployment("a", string(deploymentStatusOption)+"-active", activeDeploymentConfig), deploymentStatusOption)) deployments = append(deployments, withStatus(mockDeployment("a", string(deploymentStatusOption)+"-inactive", inactiveDeploymentConfig), deploymentStatusOption)) deployments = append(deployments, withStatus(mockDeployment("a", string(deploymentStatusOption)+"-orphan", nil), deploymentStatusOption)) if deploymentStatusFilterSet.Has(string(deploymentStatusOption)) { expectedNames.Insert(string(deploymentStatusOption) + "-inactive") expectedNames.Insert(string(deploymentStatusOption) + "-orphan") } } dataSet := NewDataSet(deploymentConfigs, deployments) resolver := NewOrphanDeploymentResolver(dataSet, deploymentStatusFilter) results, err := resolver.Resolve() if err != nil { t.Errorf("Unexpected error %v", err) } foundNames := util.StringSet{} for _, result := range results { foundNames.Insert(result.Name) } if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) { t.Errorf("expected %v, actual %v", expectedNames, foundNames) } }
func (ca *CA) MakeServerCert(certFile, keyFile string, hostnames util.StringSet) (*TLSCertificateConfig, error) { glog.V(4).Infof("Generating server certificate in %s, key in %s", certFile, keyFile) serverPublicKey, serverPrivateKey, _ := NewKeyPair() serverTemplate, _ := newServerCertificateTemplate(pkix.Name{CommonName: hostnames.List()[0]}, hostnames.List()) serverCrt, _ := ca.signCertificate(serverTemplate, serverPublicKey) server := &TLSCertificateConfig{ Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), Key: serverPrivateKey, } if err := server.writeCertConfig(certFile, keyFile); err != nil { return server, err } return server, nil }
func printPolicy(policy *authorizationapi.Policy, w io.Writer, withNamespace, wide bool, columnLabels []string) error { roleNames := util.StringSet{} for key := range policy.Roles { roleNames.Insert(key) } rolesString := strings.Join(roleNames.List(), ", ") if withNamespace { if _, err := fmt.Fprintf(w, "%s\t", policy.Namespace); err != nil { return err } } _, err := fmt.Fprintf(w, "%s\t%s\t%v\n", policy.Name, rolesString, policy.LastModified) return err }
func getPriorityFunctionConfigs(names util.StringSet, args PluginFactoryArgs) ([]algorithm.PriorityConfig, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() configs := []algorithm.PriorityConfig{} for _, name := range names.List() { factory, ok := priorityFunctionMap[name] if !ok { return nil, fmt.Errorf("Invalid priority name %s specified - no corresponding function found", name) } configs = append(configs, algorithm.PriorityConfig{ Function: factory.Function(args), Weight: factory.Weight, }) } return configs, nil }
func GetServerCert(certFile, keyFile string, hostnames util.StringSet) (*TLSCertificateConfig, error) { server, err := GetTLSCertificateConfig(certFile, keyFile) if err != nil { return nil, err } cert := server.Certs[0] ips, dns := IPAddressesDNSNames(hostnames.List()) missingIps := ipsNotInSlice(ips, cert.IPAddresses) missingDns := stringsNotInSlice(dns, cert.DNSNames) if len(missingIps) == 0 && len(missingDns) == 0 { glog.V(4).Infof("Found existing server certificate in %s", certFile) return server, nil } return nil, fmt.Errorf("Existing server certificate in %s was missing some hostnames (%v) or IP addresses (%v).", certFile, missingDns, missingIps) }
func (b *BlunderbussMunger) MungePullRequest(client *github.Client, org, project string, pr *github.PullRequest, issue *github.Issue, commits []github.RepositoryCommit, events []github.IssueEvent, dryrun bool) { if b.config == nil { b.loadConfig() } if issue.Assignee != nil { return } potentialOwners := util.StringSet{} for _, commit := range commits { if commit.Author == nil || commit.Author.Login == nil || commit.SHA == nil { glog.Warningf("Skipping invalid commit for %d: %#v", *pr.Number, commit) continue } commit, _, err := client.Repositories.GetCommit(*commit.Author.Login, project, *commit.SHA) if err != nil { glog.Errorf("Can't load commit %s %s %s", *commit.Author.Login, project, *commit.SHA) continue } for _, file := range commit.Files { fileOwners := b.config.FindOwners(*file.Filename) if len(fileOwners) == 0 { glog.Warningf("Couldn't find an owner for: %s", *file.Filename) } potentialOwners.Insert(fileOwners...) } } if potentialOwners.Len() == 0 { glog.Errorf("No owners found for PR %d", *pr.Number) return } ix := rand.Int() % potentialOwners.Len() owner := potentialOwners.List()[ix] if dryrun { glog.Infof("would have assigned %s to PR %d", owner, *pr.Number) return } if _, _, err := client.Issues.Edit(org, project, *pr.Number, &github.IssueRequest{Assignee: &owner}); err != nil { glog.Errorf("Error updating issue: %v", err) } }
// waitTillNPodsRunningOnNodes polls the /runningpods endpoint on kubelet until // it finds targetNumPods pods that match the given criteria (namespace and // podNamePrefix). Note that we usually use label selector to filter pods that // belong to the same RC. However, we use podNamePrefix with namespace here // because pods returned from /runningpods do not contain the original label // information; they are reconstructed by examining the container runtime. In // the scope of this test, we do not expect pod naming conflicts so // podNamePrefix should be sufficient to identify the pods. func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames util.StringSet, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error { return wait.Poll(pollInterval, timeout, func() (bool, error) { matchCh := make(chan util.StringSet, len(nodeNames)) for _, item := range nodeNames.List() { // Launch a goroutine per node to check the pods running on the nodes. nodeName := item go func() { matchCh <- getPodMatches(c, nodeName, podNamePrefix, namespace) }() } seen := util.NewStringSet() for i := 0; i < len(nodeNames.List()); i++ { seen = seen.Union(<-matchCh) } if seen.Len() == targetNumPods { return true, nil } Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len()) return false, nil }) }
// Object returns a single object representing the output of a single visit to all // found resources. If the Builder was a singular context (expected to return a // single resource by user input) and only a single resource was found, the resource // will be returned as is. Otherwise, the returned resources will be part of an // api.List. The ResourceVersion of the api.List will be set only if it is identical // across all infos returned. func (r *Result) Object() (runtime.Object, error) { infos, err := r.Infos() if err != nil { return nil, err } versions := util.StringSet{} objects := []runtime.Object{} for _, info := range infos { if info.Object != nil { objects = append(objects, info.Object) versions.Insert(info.ResourceVersion) } } if len(objects) == 1 { if r.singular { return objects[0], nil } // if the item is a list already, don't create another list if runtime.IsListType(objects[0]) { return objects[0], nil } } version := "" if len(versions) == 1 { version = versions.List()[0] } return &api.List{ ListMeta: api.ListMeta{ ResourceVersion: version, }, Items: objects, }, err }
func ExpandResources(rawResources kutil.StringSet) kutil.StringSet { ret := kutil.StringSet{} toVisit := rawResources.List() visited := kutil.StringSet{} for i := 0; i < len(toVisit); i++ { currResource := toVisit[i] if visited.Has(currResource) { continue } visited.Insert(currResource) if strings.Index(currResource, ResourceGroupPrefix+":") != 0 { ret.Insert(strings.ToLower(currResource)) continue } if resourceTypes, exists := GroupsToResources[currResource]; exists { toVisit = append(toVisit, resourceTypes...) } } return ret }
func TestPerBuildConfigResolver(t *testing.T) { BuildPhaseOptions := []buildapi.BuildPhase{ buildapi.BuildPhaseCancelled, buildapi.BuildPhaseComplete, buildapi.BuildPhaseError, buildapi.BuildPhaseFailed, buildapi.BuildPhaseNew, buildapi.BuildPhasePending, buildapi.BuildPhaseRunning, } buildConfigs := []*buildapi.BuildConfig{ mockBuildConfig("a", "build-config-1"), mockBuildConfig("b", "build-config-2"), } buildsPerStatus := 100 builds := []*buildapi.Build{} for _, buildConfig := range buildConfigs { for _, BuildPhaseOption := range BuildPhaseOptions { for i := 0; i < buildsPerStatus; i++ { build := withStatus(mockBuild(buildConfig.Namespace, fmt.Sprintf("%v-%v-%v", buildConfig.Name, BuildPhaseOption, i), buildConfig), BuildPhaseOption) builds = append(builds, build) } } } now := util.Now() for i := range builds { creationTimestamp := util.NewTime(now.Time.Add(-1 * time.Duration(i) * time.Hour)) builds[i].CreationTimestamp = creationTimestamp } // test number to keep at varying ranges for keep := 0; keep < buildsPerStatus*2; keep++ { dataSet := NewDataSet(buildConfigs, builds) expectedNames := util.StringSet{} buildCompleteStatusFilterSet := util.NewStringSet(string(buildapi.BuildPhaseComplete)) buildFailedStatusFilterSet := util.NewStringSet(string(buildapi.BuildPhaseCancelled), string(buildapi.BuildPhaseError), string(buildapi.BuildPhaseFailed)) for _, buildConfig := range buildConfigs { buildItems, err := dataSet.ListBuildsByBuildConfig(buildConfig) if err != nil { t.Errorf("Unexpected err %v", err) } completedBuilds, failedBuilds := []*buildapi.Build{}, []*buildapi.Build{} for _, build := range buildItems { if buildCompleteStatusFilterSet.Has(string(build.Status.Phase)) { completedBuilds = append(completedBuilds, build) } else if buildFailedStatusFilterSet.Has(string(build.Status.Phase)) { failedBuilds = append(failedBuilds, build) } } sort.Sort(sortableBuilds(completedBuilds)) sort.Sort(sortableBuilds(failedBuilds)) purgeCompleted := []*buildapi.Build{} purgeFailed := []*buildapi.Build{} if keep >= 0 && keep < len(completedBuilds) { purgeCompleted = completedBuilds[keep:] } if keep >= 0 && keep < len(failedBuilds) { purgeFailed = failedBuilds[keep:] } for _, build := range purgeCompleted { expectedNames.Insert(build.Name) } for _, build := range purgeFailed { expectedNames.Insert(build.Name) } } resolver := NewPerBuildConfigResolver(dataSet, keep, keep) results, err := resolver.Resolve() if err != nil { t.Errorf("Unexpected error %v", err) } foundNames := util.StringSet{} for _, result := range results { foundNames.Insert(result.Name) } if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) { expectedValues := expectedNames.List() actualValues := foundNames.List() sort.Strings(expectedValues) sort.Strings(actualValues) t.Errorf("keep %v\n, expected \n\t%v\n, actual \n\t%v\n", keep, expectedValues, actualValues) } } }
func CheckSetEq(lhs, rhs util.StringSet) bool { return lhs.HasAll(rhs.List()...) && rhs.HasAll(lhs.List()...) }
// Discover the projects available for the established session and take one to use. It // fails in case of no existing projects, and print out useful information in case of // multiple projects. // Requires o.Username to be set. func (o *LoginOptions) gatherProjectInfo() error { me, err := o.whoAmI() if err != nil { return err } if o.Username != me.Name { return fmt.Errorf("current user, %v, does not match expected user %v", me.Name, o.Username) } oClient, err := client.New(o.Config) if err != nil { return err } projects, err := oClient.Projects().List(labels.Everything(), fields.Everything()) if err != nil { return err } projectsItems := projects.Items switch len(projectsItems) { case 0: fmt.Fprintf(o.Out, `You don't have any projects. You can try to create a new project, by running $ oc new-project <projectname> `) o.Project = o.DefaultNamespace case 1: o.Project = projectsItems[0].Name fmt.Fprintf(o.Out, "Using project %q.\n", o.Project) default: projects := util.StringSet{} for _, project := range projectsItems { projects.Insert(project.Name) } namespace := o.DefaultNamespace if !projects.Has(namespace) { if namespace != kapi.NamespaceDefault && projects.Has(kapi.NamespaceDefault) { namespace = kapi.NamespaceDefault } else { namespace = projects.List()[0] } } if current, err := oClient.Projects().Get(namespace); err == nil { o.Project = current.Name fmt.Fprintf(o.Out, "Using project %q.\n", o.Project) } else if !kerrors.IsNotFound(err) && !clientcmd.IsForbidden(err) { return err } fmt.Fprintf(o.Out, "\nYou have access to the following projects and can switch between them with 'oc project <projectname>':\n\n") for _, p := range projects.List() { if o.Project == p { fmt.Fprintf(o.Out, " * %s (current)\n", p) } else { fmt.Fprintf(o.Out, " * %s\n", p) } } fmt.Fprintln(o.Out) } return nil }
func Example() { // source simulates an apiserver object endpoint. source := framework.NewFakeControllerSource() // This will hold the downstream state, as we know it. downstream := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc) // This will hold incoming changes. Note how we pass downstream in as a // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, downstream) // Let's do threadsafe output to get predictable test results. deletionCounter := make(chan string, 1000) cfg := &framework.Config{ Queue: fifo, ListerWatcher: source, ObjectType: &api.Pod{}, FullResyncPeriod: time.Millisecond * 100, RetryOnError: false, // Let's implement a simple controller that just deletes // everything that comes in. Process: func(obj interface{}) error { // Obj is from the Pop method of the Queue we make above. newest := obj.(cache.Deltas).Newest() if newest.Type != cache.Deleted { // Update our downstream store. err := downstream.Add(newest.Object) if err != nil { return err } // Delete this object. source.Delete(newest.Object.(runtime.Object)) } else { // Update our downstream store. err := downstream.Delete(newest.Object) if err != nil { return err } // fifo's KeyOf is easiest, because it handles // DeletedFinalStateUnknown markers. key, err := fifo.KeyOf(newest.Object) if err != nil { return err } // Report this deletion. deletionCounter <- key } return nil }, } // Create the controller and run it until we close stop. stop := make(chan struct{}) defer close(stop) go framework.New(cfg).Run(stop) // Let's add a few objects to the source. testIDs := []string{"a-hello", "b-controller", "c-framework"} for _, name := range testIDs { // Note that these pods are not valid-- the fake source doesn't // call validation or anything. source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}}) } // Let's wait for the controller to process the things we just added. outputSet := util.StringSet{} for i := 0; i < len(testIDs); i++ { outputSet.Insert(<-deletionCounter) } for _, key := range outputSet.List() { fmt.Println(key) } // Output: // a-hello // b-controller // c-framework }
func TestPerDeploymentConfigResolver(t *testing.T) { deploymentStatusOptions := []deployapi.DeploymentStatus{ deployapi.DeploymentStatusComplete, deployapi.DeploymentStatusFailed, deployapi.DeploymentStatusNew, deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning, } deploymentConfigs := []*deployapi.DeploymentConfig{ mockDeploymentConfig("a", "deployment-config-1"), mockDeploymentConfig("b", "deployment-config-2"), } deploymentsPerStatus := 100 deployments := []*kapi.ReplicationController{} for _, deploymentConfig := range deploymentConfigs { for _, deploymentStatusOption := range deploymentStatusOptions { for i := 0; i < deploymentsPerStatus; i++ { deployment := withStatus(mockDeployment(deploymentConfig.Namespace, fmt.Sprintf("%v-%v-%v", deploymentConfig.Name, deploymentStatusOption, i), deploymentConfig), deploymentStatusOption) deployments = append(deployments, deployment) } } } now := util.Now() for i := range deployments { creationTimestamp := util.NewTime(now.Time.Add(-1 * time.Duration(i) * time.Hour)) deployments[i].CreationTimestamp = creationTimestamp } // test number to keep at varying ranges for keep := 0; keep < deploymentsPerStatus*2; keep++ { dataSet := NewDataSet(deploymentConfigs, deployments) expectedNames := util.StringSet{} deploymentCompleteStatusFilterSet := util.NewStringSet(string(deployapi.DeploymentStatusComplete)) deploymentFailedStatusFilterSet := util.NewStringSet(string(deployapi.DeploymentStatusFailed)) for _, deploymentConfig := range deploymentConfigs { deploymentItems, err := dataSet.ListDeploymentsByDeploymentConfig(deploymentConfig) if err != nil { t.Errorf("Unexpected err %v", err) } completedDeployments, failedDeployments := []*kapi.ReplicationController{}, []*kapi.ReplicationController{} for _, deployment := range deploymentItems { status := deployment.Annotations[deployapi.DeploymentStatusAnnotation] if deploymentCompleteStatusFilterSet.Has(status) { completedDeployments = append(completedDeployments, deployment) } else if deploymentFailedStatusFilterSet.Has(status) { failedDeployments = append(failedDeployments, deployment) } } sort.Sort(sortableReplicationControllers(completedDeployments)) sort.Sort(sortableReplicationControllers(failedDeployments)) purgeCompleted := []*kapi.ReplicationController{} purgeFailed := []*kapi.ReplicationController{} if keep >= 0 && keep < len(completedDeployments) { purgeCompleted = completedDeployments[keep:] } if keep >= 0 && keep < len(failedDeployments) { purgeFailed = failedDeployments[keep:] } for _, deployment := range purgeCompleted { expectedNames.Insert(deployment.Name) } for _, deployment := range purgeFailed { expectedNames.Insert(deployment.Name) } } resolver := NewPerDeploymentConfigResolver(dataSet, keep, keep) results, err := resolver.Resolve() if err != nil { t.Errorf("Unexpected error %v", err) } foundNames := util.StringSet{} for _, result := range results { foundNames.Insert(result.Name) } if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) { expectedValues := expectedNames.List() actualValues := foundNames.List() sort.Strings(expectedValues) sort.Strings(actualValues) t.Errorf("keep %v\n, expected \n\t%v\n, actual \n\t%v\n", keep, expectedValues, actualValues) } } }
func main() { runtime.GOMAXPROCS(runtime.NumCPU()) addFlags(pflag.CommandLine) util.InitFlags() util.ReallyCrash = true util.InitLogs() defer util.FlushLogs() go func() { defer util.FlushLogs() time.Sleep(3 * time.Minute) glog.Fatalf("This test has timed out.") }() glog.Infof("Running tests for APIVersion: %s", apiVersion) firstManifestURL := ServeCachedManifestFile(testPodSpecFile) secondManifestURL := ServeCachedManifestFile(testPodSpecFile) apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL, apiVersion) // Ok. we're good to go. glog.Infof("API Server started on %s", apiServerURL) // Wait for the synchronization threads to come up. time.Sleep(time.Second * 10) kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: apiVersion}) // Run tests in parallel testFuncs := []testFunc{ runReplicationControllerTest, runAtomicPutTest, runPatchTest, runServiceTest, runAPIVersionsTest, runMasterServiceTest, func(c *client.Client) { runSelfLinkTestOnNamespace(c, api.NamespaceDefault) runSelfLinkTestOnNamespace(c, "other") }, } // Only run at most maxConcurrency tests in parallel. if maxConcurrency <= 0 { maxConcurrency = len(testFuncs) } glog.Infof("Running %d tests in parallel.", maxConcurrency) ch := make(chan struct{}, maxConcurrency) var wg sync.WaitGroup wg.Add(len(testFuncs)) for i := range testFuncs { f := testFuncs[i] go func() { ch <- struct{}{} f(kubeClient) <-ch wg.Done() }() } wg.Wait() close(ch) // Check that kubelet tried to make the containers. // Using a set to list unique creation attempts. Our fake is // really stupid, so kubelet tries to create these multiple times. createdConts := util.StringSet{} for _, p := range fakeDocker1.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdConts.Insert(p[:n-8]) } } for _, p := range fakeDocker2.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { createdConts.Insert(p[:n-8]) } } // We expect 9: 2 pod infra containers + 2 containers from the replication controller + // 1 pod infra container + 2 containers from the URL on first Kubelet + // 1 pod infra container + 2 containers from the URL on second Kubelet + // 1 pod infra container + 1 container from the service test. // The total number of container created is 9 if len(createdConts) != 12 { glog.Fatalf("Expected 12 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created) } glog.Infof("OK - found created containers: %#v", createdConts.List()) // This test doesn't run with the others because it can't run in // parallel and also it schedules extra pods which would change the // above pod counting logic. runSchedulerNoPhantomPodsTest(kubeClient) glog.Infof("\n\nLogging high latency metrics from the 10250 kubelet") e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10250") glog.Infof("\n\nLogging high latency metrics from the 10251 kubelet") e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10251") }
func TestHammerController(t *testing.T) { // This test executes a bunch of requests through the fake source and // controller framework to make sure there's no locking/threading // errors. If an error happens, it should hang forever or trigger the // race detector. // source simulates an apiserver object endpoint. source := framework.NewFakeControllerSource() // Let's do threadsafe output to get predictable test results. outputSetLock := sync.Mutex{} // map of key to operations done on the key outputSet := map[string][]string{} recordFunc := func(eventType string, obj interface{}) { key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { t.Errorf("something wrong with key: %v", err) key = "oops something went wrong with the key" } // Record some output when items are deleted. outputSetLock.Lock() defer outputSetLock.Unlock() outputSet[key] = append(outputSet[key], eventType) } // Make a controller which just logs all the changes it gets. _, controller := framework.NewInformer( source, &api.Pod{}, time.Millisecond*100, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { recordFunc("add", obj) }, UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) }, DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) }, }, ) if controller.HasSynced() { t.Errorf("Expected HasSynced() to return false before we started the controller") } // Run the controller and run it until we close stop. stop := make(chan struct{}) go controller.Run(stop) // Let's wait for the controller to do its initial sync time.Sleep(100 * time.Millisecond) if !controller.HasSynced() { t.Errorf("Expected HasSynced() to return true after the initial sync") } wg := sync.WaitGroup{} const threads = 3 wg.Add(threads) for i := 0; i < threads; i++ { go func() { defer wg.Done() // Let's add a few objects to the source. currentNames := util.StringSet{} rs := rand.NewSource(rand.Int63()) f := fuzz.New().NilChance(.5).NumElements(0, 2).RandSource(rs) r := rand.New(rs) // Mustn't use r and f concurrently! for i := 0; i < 100; i++ { var name string var isNew bool if currentNames.Len() == 0 || r.Intn(3) == 1 { f.Fuzz(&name) isNew = true } else { l := currentNames.List() name = l[r.Intn(len(l))] } pod := &api.Pod{} f.Fuzz(pod) pod.ObjectMeta.Name = name pod.ObjectMeta.Namespace = "default" // Add, update, or delete randomly. // Note that these pods are not valid-- the fake source doesn't // call validation or perform any other checking. if isNew { currentNames.Insert(name) source.Add(pod) continue } switch r.Intn(2) { case 0: currentNames.Insert(name) source.Modify(pod) case 1: currentNames.Delete(name) source.Delete(pod) } } }() } wg.Wait() // Let's wait for the controller to finish processing the things we just added. time.Sleep(100 * time.Millisecond) close(stop) outputSetLock.Lock() t.Logf("got: %#v", outputSet) }