示例#1
0
// Test public interface
func doTestIndex(t *testing.T, indexer Indexer) {
	mkObj := func(id string, val string) testStoreObject {
		return testStoreObject{id: id, val: val}
	}

	// Test Index
	expected := map[string]sets.String{}
	expected["b"] = sets.NewString("a", "c")
	expected["f"] = sets.NewString("e")
	expected["h"] = sets.NewString("g")
	indexer.Add(mkObj("a", "b"))
	indexer.Add(mkObj("c", "b"))
	indexer.Add(mkObj("e", "f"))
	indexer.Add(mkObj("g", "h"))
	{
		for k, v := range expected {
			found := sets.String{}
			indexResults, err := indexer.Index("by_val", mkObj("", k))
			if err != nil {
				t.Errorf("Unexpected error %v", err)
			}
			for _, item := range indexResults {
				found.Insert(item.(testStoreObject).id)
			}
			items := v.List()
			if !found.HasAll(items...) {
				t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List())
			}
		}
	}
}
示例#2
0
func ExampleInformer() {
	// source simulates an apiserver object endpoint.
	source := framework.NewFakeControllerSource()

	// Let's do threadsafe output to get predictable test results.
	deletionCounter := make(chan string, 1000)

	// Make a controller that immediately deletes anything added to it, and
	// logs anything deleted.
	_, controller := framework.NewInformer(
		source,
		&api.Pod{},
		time.Millisecond*100,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				source.Delete(obj.(runtime.Object))
			},
			DeleteFunc: func(obj interface{}) {
				key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj)
				if err != nil {
					key = "oops something went wrong with the key"
				}

				// Report this deletion.
				deletionCounter <- key
			},
		},
	)

	// Run the controller and run it until we close stop.
	stop := make(chan struct{})
	defer close(stop)
	go controller.Run(stop)

	// Let's add a few objects to the source.
	testIDs := []string{"a-hello", "b-controller", "c-framework"}
	for _, name := range testIDs {
		// Note that these pods are not valid-- the fake source doesn't
		// call validation or anything.
		source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}})
	}

	// Let's wait for the controller to process the things we just added.
	outputSet := sets.String{}
	for i := 0; i < len(testIDs); i++ {
		outputSet.Insert(<-deletionCounter)
	}

	for _, key := range outputSet.List() {
		fmt.Println(key)
	}
	// Output:
	// a-hello
	// b-controller
	// c-framework
}
示例#3
0
func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) {
	schedulerFactoryMutex.Lock()
	defer schedulerFactoryMutex.Unlock()

	predicates := map[string]algorithm.FitPredicate{}
	for _, name := range names.List() {
		factory, ok := fitPredicateMap[name]
		if !ok {
			return nil, fmt.Errorf("Invalid predicate name %q specified - no corresponding function found", name)
		}
		predicates[name] = factory(args)
	}
	return predicates, nil
}
示例#4
0
文件: fakes.go 项目: qinguoan/vulcan
// getInstanceList returns an instance list based on the given names.
// The names cannot contain a '.', the real gce api validates against this.
func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances {
	instanceNames := nodeNames.List()
	computeInstances := []*compute.InstanceWithNamedPorts{}
	for _, name := range instanceNames {
		instanceLink := fmt.Sprintf(
			"https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s",
			"project", "zone", name)
		computeInstances = append(
			computeInstances, &compute.InstanceWithNamedPorts{
				Instance: instanceLink})
	}
	return &compute.InstanceGroupsListInstances{
		Items: computeInstances,
	}
}
示例#5
0
func runResourceTrackingTest(framework *Framework, podsPerNode int, nodeNames sets.String, resourceMonitor *resourceMonitor) {
	numNodes := nodeNames.Len()
	totalPods := podsPerNode * numNodes
	By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
	rcName := fmt.Sprintf("resource%d-%s", totalPods, string(util.NewUUID()))

	// TODO: Use a more realistic workload
	Expect(RunRC(RCConfig{
		Client:    framework.Client,
		Name:      rcName,
		Namespace: framework.Namespace.Name,
		Image:     "gcr.io/google_containers/pause:go",
		Replicas:  totalPods,
	})).NotTo(HaveOccurred())

	// Log once and flush the stats.
	resourceMonitor.LogLatest()
	resourceMonitor.Reset()

	By("Start monitoring resource usage")
	// Periodically dump the cpu summary until the deadline is met.
	// Note that without calling resourceMonitor.Reset(), the stats
	// would occupy increasingly more memory. This should be fine
	// for the current test duration, but we should reclaim the
	// entries if we plan to monitor longer (e.g., 8 hours).
	deadline := time.Now().Add(monitoringTime)
	for time.Now().Before(deadline) {
		Logf("Still running...%v left", deadline.Sub(time.Now()))
		time.Sleep(reportingPeriod)
		timeLeft := deadline.Sub(time.Now())
		Logf("Still running...%v left", timeLeft)
		if timeLeft < reportingPeriod {
			time.Sleep(timeLeft)
		} else {
			time.Sleep(reportingPeriod)
		}
		logPodsOnNodes(framework.Client, nodeNames.List())
	}

	By("Reporting overall resource usage")
	logPodsOnNodes(framework.Client, nodeNames.List())
	resourceMonitor.LogCPUSummary()
	resourceMonitor.LogLatest()

	By("Deleting the RC")
	DeleteRC(framework.Client, framework.Namespace.Name, rcName)
}
示例#6
0
func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) (map[string]algorithm.PriorityConfig, error) {
	schedulerFactoryMutex.Lock()
	defer schedulerFactoryMutex.Unlock()

	configs := map[string]algorithm.PriorityConfig{}
	for _, name := range names.List() {
		factory, ok := priorityFunctionMap[name]
		if !ok {
			return nil, fmt.Errorf("Invalid priority name %s specified - no corresponding function found", name)
		}
		configs[name] = algorithm.PriorityConfig{
			Function: factory.Function(args),
			Weight:   factory.Weight,
		}
	}
	return configs, nil
}
示例#7
0
文件: master.go 项目: qinguoan/vulcan
// Get all backends for all registered storage destinations.
// Used for getting all instances for health validations.
func (s *StorageDestinations) backends() []string {
	backends := sets.String{}
	for _, group := range s.APIGroups {
		if group.Default != nil {
			for _, backend := range group.Default.Backends() {
				backends.Insert(backend)
			}
		}
		if group.Overrides != nil {
			for _, storage := range group.Overrides {
				for _, backend := range storage.Backends() {
					backends.Insert(backend)
				}
			}
		}
	}
	return backends.List()
}
示例#8
0
func (t *ThirdPartyController) syncResourceList(list runtime.Object) error {
	existing := sets.String{}
	switch list := list.(type) {
	case *expapi.ThirdPartyResourceList:
		// Loop across all schema objects for third party resources
		for ix := range list.Items {
			item := &list.Items[ix]
			// extract the api group and resource kind from the schema
			_, group, err := thirdpartyresourcedata.ExtractApiGroupAndKind(item)
			if err != nil {
				return err
			}
			// place it in the set of resources that we expect, so that we don't delete it in the delete pass
			existing.Insert(makeThirdPartyPath(group))
			// ensure a RESTful resource for this schema exists on the master
			if err := t.SyncOneResource(item); err != nil {
				return err
			}
		}
	default:
		return fmt.Errorf("expected a *ThirdPartyResourceList, got %#v", list)
	}
	// deletion phase, get all installed RESTful resources
	installed := t.master.ListThirdPartyResources()
	for _, installedAPI := range installed {
		found := false
		// search across the expected restful resources to see if this resource belongs to one of the expected ones
		for _, apiPath := range existing.List() {
			if installedAPI == apiPath || strings.HasPrefix(installedAPI, apiPath+"/") {
				found = true
				break
			}
		}
		// not expected, delete the resource
		if !found {
			if err := t.master.RemoveThirdPartyResource(installedAPI); err != nil {
				return err
			}
		}
	}

	return nil
}
示例#9
0
func generateCSV(buildLatency BuildLatencyData, resources, methods sets.String, out io.Writer) error {
	header := []string{"build"}
	for _, rsrc := range resources.List() {
		header = append(header, fmt.Sprintf("%s_50", rsrc))
		header = append(header, fmt.Sprintf("%s_90", rsrc))
		header = append(header, fmt.Sprintf("%s_99", rsrc))
	}
	if _, err := fmt.Fprintln(out, strings.Join(header, ",")); err != nil {
		return err
	}

	for _, method := range methods.List() {
		if _, err := fmt.Fprintln(out, method); err != nil {
			return err
		}
		for build, data := range buildLatency {
			line := []string{fmt.Sprintf("%d", build)}
			for _, rsrc := range resources.List() {
				podData := data[rsrc]
				line = append(line, fmt.Sprintf("%g", findMethod(method, "Perc50", podData)))
				line = append(line, fmt.Sprintf("%g", findMethod(method, "Perc90", podData)))
				line = append(line, fmt.Sprintf("%g", findMethod(method, "Perc99", podData)))
			}
			if _, err := fmt.Fprintln(out, strings.Join(line, ",")); err != nil {
				return err
			}
		}
	}
	return nil
}
示例#10
0
// waitTillNPodsRunningOnNodes polls the /runningpods endpoint on kubelet until
// it finds targetNumPods pods that match the given criteria (namespace and
// podNamePrefix). Note that we usually use label selector to filter pods that
// belong to the same RC. However, we use podNamePrefix with namespace here
// because pods returned from /runningpods do not contain the original label
// information; they are reconstructed by examining the container runtime. In
// the scope of this test, we do not expect pod naming conflicts so
// podNamePrefix should be sufficient to identify the pods.
func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error {
	return wait.Poll(pollInterval, timeout, func() (bool, error) {
		matchCh := make(chan sets.String, len(nodeNames))
		for _, item := range nodeNames.List() {
			// Launch a goroutine per node to check the pods running on the nodes.
			nodeName := item
			go func() {
				matchCh <- getPodMatches(c, nodeName, podNamePrefix, namespace)
			}()
		}

		seen := sets.NewString()
		for i := 0; i < len(nodeNames.List()); i++ {
			seen = seen.Union(<-matchCh)
		}
		if seen.Len() == targetNumPods {
			return true, nil
		}
		Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
		return false, nil
	})
}
示例#11
0
文件: result.go 项目: qinguoan/vulcan
// Object returns a single object representing the output of a single visit to all
// found resources.  If the Builder was a singular context (expected to return a
// single resource by user input) and only a single resource was found, the resource
// will be returned as is.  Otherwise, the returned resources will be part of an
// api.List. The ResourceVersion of the api.List will be set only if it is identical
// across all infos returned.
func (r *Result) Object() (runtime.Object, error) {
	infos, err := r.Infos()
	if err != nil {
		return nil, err
	}

	versions := sets.String{}
	objects := []runtime.Object{}
	for _, info := range infos {
		if info.Object != nil {
			objects = append(objects, info.Object)
			versions.Insert(info.ResourceVersion)
		}
	}

	if len(objects) == 1 {
		if r.singular {
			return objects[0], nil
		}
		// if the item is a list already, don't create another list
		if runtime.IsListType(objects[0]) {
			return objects[0], nil
		}
	}

	version := ""
	if len(versions) == 1 {
		version = versions.List()[0]
	}
	return &api.List{
		ListMeta: unversioned.ListMeta{
			ResourceVersion: version,
		},
		Items: objects,
	}, err
}
示例#12
0
func Example() {
	// source simulates an apiserver object endpoint.
	source := framework.NewFakeControllerSource()

	// This will hold the downstream state, as we know it.
	downstream := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)

	// This will hold incoming changes. Note how we pass downstream in as a
	// KeyLister, that way resync operations will result in the correct set
	// of update/delete deltas.
	fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, downstream)

	// Let's do threadsafe output to get predictable test results.
	deletionCounter := make(chan string, 1000)

	cfg := &framework.Config{
		Queue:            fifo,
		ListerWatcher:    source,
		ObjectType:       &api.Pod{},
		FullResyncPeriod: time.Millisecond * 100,
		RetryOnError:     false,

		// Let's implement a simple controller that just deletes
		// everything that comes in.
		Process: func(obj interface{}) error {
			// Obj is from the Pop method of the Queue we make above.
			newest := obj.(cache.Deltas).Newest()

			if newest.Type != cache.Deleted {
				// Update our downstream store.
				err := downstream.Add(newest.Object)
				if err != nil {
					return err
				}

				// Delete this object.
				source.Delete(newest.Object.(runtime.Object))
			} else {
				// Update our downstream store.
				err := downstream.Delete(newest.Object)
				if err != nil {
					return err
				}

				// fifo's KeyOf is easiest, because it handles
				// DeletedFinalStateUnknown markers.
				key, err := fifo.KeyOf(newest.Object)
				if err != nil {
					return err
				}

				// Report this deletion.
				deletionCounter <- key
			}
			return nil
		},
	}

	// Create the controller and run it until we close stop.
	stop := make(chan struct{})
	defer close(stop)
	go framework.New(cfg).Run(stop)

	// Let's add a few objects to the source.
	testIDs := []string{"a-hello", "b-controller", "c-framework"}
	for _, name := range testIDs {
		// Note that these pods are not valid-- the fake source doesn't
		// call validation or anything.
		source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}})
	}

	// Let's wait for the controller to process the things we just added.
	outputSet := sets.String{}
	for i := 0; i < len(testIDs); i++ {
		outputSet.Insert(<-deletionCounter)
	}

	for _, key := range outputSet.List() {
		fmt.Println(key)
	}
	// Output:
	// a-hello
	// b-controller
	// c-framework
}
示例#13
0
func TestHammerController(t *testing.T) {
	// This test executes a bunch of requests through the fake source and
	// controller framework to make sure there's no locking/threading
	// errors. If an error happens, it should hang forever or trigger the
	// race detector.

	// source simulates an apiserver object endpoint.
	source := framework.NewFakeControllerSource()

	// Let's do threadsafe output to get predictable test results.
	outputSetLock := sync.Mutex{}
	// map of key to operations done on the key
	outputSet := map[string][]string{}

	recordFunc := func(eventType string, obj interface{}) {
		key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj)
		if err != nil {
			t.Errorf("something wrong with key: %v", err)
			key = "oops something went wrong with the key"
		}

		// Record some output when items are deleted.
		outputSetLock.Lock()
		defer outputSetLock.Unlock()
		outputSet[key] = append(outputSet[key], eventType)
	}

	// Make a controller which just logs all the changes it gets.
	_, controller := framework.NewInformer(
		source,
		&api.Pod{},
		time.Millisecond*100,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    func(obj interface{}) { recordFunc("add", obj) },
			UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) },
			DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) },
		},
	)

	if controller.HasSynced() {
		t.Errorf("Expected HasSynced() to return false before we started the controller")
	}

	// Run the controller and run it until we close stop.
	stop := make(chan struct{})
	go controller.Run(stop)

	// Let's wait for the controller to do its initial sync
	time.Sleep(100 * time.Millisecond)
	if !controller.HasSynced() {
		t.Errorf("Expected HasSynced() to return true after the initial sync")
	}

	wg := sync.WaitGroup{}
	const threads = 3
	wg.Add(threads)
	for i := 0; i < threads; i++ {
		go func() {
			defer wg.Done()
			// Let's add a few objects to the source.
			currentNames := sets.String{}
			rs := rand.NewSource(rand.Int63())
			f := fuzz.New().NilChance(.5).NumElements(0, 2).RandSource(rs)
			r := rand.New(rs) // Mustn't use r and f concurrently!
			for i := 0; i < 100; i++ {
				var name string
				var isNew bool
				if currentNames.Len() == 0 || r.Intn(3) == 1 {
					f.Fuzz(&name)
					isNew = true
				} else {
					l := currentNames.List()
					name = l[r.Intn(len(l))]
				}

				pod := &api.Pod{}
				f.Fuzz(pod)
				pod.ObjectMeta.Name = name
				pod.ObjectMeta.Namespace = "default"
				// Add, update, or delete randomly.
				// Note that these pods are not valid-- the fake source doesn't
				// call validation or perform any other checking.
				if isNew {
					currentNames.Insert(name)
					source.Add(pod)
					continue
				}
				switch r.Intn(2) {
				case 0:
					currentNames.Insert(name)
					source.Modify(pod)
				case 1:
					currentNames.Delete(name)
					source.Delete(pod)
				}
			}
		}()
	}
	wg.Wait()

	// Let's wait for the controller to finish processing the things we just added.
	time.Sleep(100 * time.Millisecond)
	close(stop)

	outputSetLock.Lock()
	t.Logf("got: %#v", outputSet)
}
示例#14
0
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	addFlags(pflag.CommandLine)

	util.InitFlags()
	util.ReallyCrash = true
	util.InitLogs()
	defer util.FlushLogs()

	go func() {
		defer util.FlushLogs()
		time.Sleep(3 * time.Minute)
		glog.Fatalf("This test has timed out.")
	}()

	glog.Infof("Running tests for APIVersion: %s", os.Getenv("KUBE_TEST_API"))

	firstManifestURL := ServeCachedManifestFile(testPodSpecFile)
	secondManifestURL := ServeCachedManifestFile(testPodSpecFile)
	apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL)

	// Ok. we're good to go.
	glog.Infof("API Server started on %s", apiServerURL)
	// Wait for the synchronization threads to come up.
	time.Sleep(time.Second * 10)

	kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: testapi.Default.GroupAndVersion()})
	// TODO: caesarxuchao: hacky way to specify version of Experimental client.
	// We will fix this by supporting multiple group versions in Config
	kubeClient.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{Host: apiServerURL, Version: testapi.Extensions.GroupAndVersion()})

	// Run tests in parallel
	testFuncs := []testFunc{
		runReplicationControllerTest,
		runAtomicPutTest,
		runPatchTest,
		runServiceTest,
		runAPIVersionsTest,
		runMasterServiceTest,
		func(c *client.Client) {
			runSelfLinkTestOnNamespace(c, api.NamespaceDefault)
			runSelfLinkTestOnNamespace(c, "other")
		},
	}

	// Only run at most maxConcurrency tests in parallel.
	if maxConcurrency <= 0 {
		maxConcurrency = len(testFuncs)
	}
	glog.Infof("Running %d tests in parallel.", maxConcurrency)
	ch := make(chan struct{}, maxConcurrency)

	var wg sync.WaitGroup
	wg.Add(len(testFuncs))
	for i := range testFuncs {
		f := testFuncs[i]
		go func() {
			ch <- struct{}{}
			f(kubeClient)
			<-ch
			wg.Done()
		}()
	}
	wg.Wait()
	close(ch)

	// Check that kubelet tried to make the containers.
	// Using a set to list unique creation attempts. Our fake is
	// really stupid, so kubelet tries to create these multiple times.
	createdConts := sets.String{}
	for _, p := range fakeDocker1.Created {
		// The last 8 characters are random, so slice them off.
		if n := len(p); n > 8 {
			createdConts.Insert(p[:n-8])
		}
	}
	for _, p := range fakeDocker2.Created {
		// The last 8 characters are random, so slice them off.
		if n := len(p); n > 8 {
			createdConts.Insert(p[:n-8])
		}
	}
	// We expect 9: 2 pod infra containers + 2 containers from the replication controller +
	//              1 pod infra container + 2 containers from the URL on first Kubelet +
	//              1 pod infra container + 2 containers from the URL on second Kubelet +
	//              1 pod infra container + 1 container from the service test.
	// The total number of container created is 9

	if len(createdConts) != 12 {
		glog.Fatalf("Expected 12 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created)
	}
	glog.Infof("OK - found created containers: %#v", createdConts.List())

	// This test doesn't run with the others because it can't run in
	// parallel and also it schedules extra pods which would change the
	// above pod counting logic.
	runSchedulerNoPhantomPodsTest(kubeClient)

	glog.Infof("\n\nLogging high latency metrics from the 10250 kubelet")
	e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10250")
	glog.Infof("\n\nLogging high latency metrics from the 10251 kubelet")
	e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10251")
}
示例#15
0
func writeWhitelist(fileName, header string, users sets.String) error {
	items := append([]string{header}, users.List()...)
	items = append(items, "")
	return ioutil.WriteFile(fileName, []byte(strings.Join(items, "\n")), 0640)
}
示例#16
0
func CheckSetEq(lhs, rhs sets.String) bool {
	return lhs.HasAll(rhs.List()...) && rhs.HasAll(lhs.List()...)
}