コード例 #1
0
// Test public interface
func doTestIndex(t *testing.T, indexer Indexer) {
	mkObj := func(id string, val string) testStoreObject {
		return testStoreObject{id: id, val: val}
	}

	// Test Index
	expected := map[string]util.StringSet{}
	expected["b"] = util.NewStringSet("a", "c")
	expected["f"] = util.NewStringSet("e")
	expected["h"] = util.NewStringSet("g")
	indexer.Add(mkObj("a", "b"))
	indexer.Add(mkObj("c", "b"))
	indexer.Add(mkObj("e", "f"))
	indexer.Add(mkObj("g", "h"))
	{
		for k, v := range expected {
			found := util.StringSet{}
			indexResults, err := indexer.Index("by_val", mkObj("", k))
			if err != nil {
				t.Errorf("Unexpected error %v", err)
			}
			for _, item := range indexResults {
				found.Insert(item.(testStoreObject).id)
			}
			items := v.List()
			if !found.HasAll(items...) {
				t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List())
			}
		}
	}
}
コード例 #2
0
ファイル: prune_test.go プロジェクト: cjnygard/origin
func (m *mockPruneRecorder) Verify(t *testing.T, expected util.StringSet) {
	if len(m.set) != len(expected) || !m.set.HasAll(expected.List()...) {
		expectedValues := expected.List()
		actualValues := m.set.List()
		sort.Strings(expectedValues)
		sort.Strings(actualValues)
		t.Errorf("expected \n\t%v\n, actual \n\t%v\n", expectedValues, actualValues)
	}
}
コード例 #3
0
func ExampleInformer() {
	// source simulates an apiserver object endpoint.
	source := framework.NewFakeControllerSource()

	// Let's do threadsafe output to get predictable test results.
	deletionCounter := make(chan string, 1000)

	// Make a controller that immediately deletes anything added to it, and
	// logs anything deleted.
	_, controller := framework.NewInformer(
		source,
		&api.Pod{},
		time.Millisecond*100,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				source.Delete(obj.(runtime.Object))
			},
			DeleteFunc: func(obj interface{}) {
				key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj)
				if err != nil {
					key = "oops something went wrong with the key"
				}

				// Report this deletion.
				deletionCounter <- key
			},
		},
	)

	// Run the controller and run it until we close stop.
	stop := make(chan struct{})
	defer close(stop)
	go controller.Run(stop)

	// Let's add a few objects to the source.
	testIDs := []string{"a-hello", "b-controller", "c-framework"}
	for _, name := range testIDs {
		// Note that these pods are not valid-- the fake source doesn't
		// call validation or anything.
		source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}})
	}

	// Let's wait for the controller to process the things we just added.
	outputSet := util.StringSet{}
	for i := 0; i < len(testIDs); i++ {
		outputSet.Insert(<-deletionCounter)
	}

	for _, key := range outputSet.List() {
		fmt.Println(key)
	}
	// Output:
	// a-hello
	// b-controller
	// c-framework
}
コード例 #4
0
ファイル: printer.go プロジェクト: patrykattc/origin
func printDeploymentConfig(dc *deployapi.DeploymentConfig, w io.Writer, withNamespace, wide bool, columnLabels []string) error {
	triggers := util.StringSet{}
	for _, trigger := range dc.Triggers {
		triggers.Insert(string(trigger.Type))
	}
	tStr := strings.Join(triggers.List(), ", ")

	_, err := fmt.Fprintf(w, "%s\t%s\t%v\n", dc.Name, tStr, dc.LatestVersion)
	return err
}
コード例 #5
0
func printPolicy(policy *authorizationapi.Policy, w io.Writer, withNamespace bool) error {
	roleNames := util.StringSet{}
	for key := range policy.Roles {
		roleNames.Insert(key)
	}
	rolesString := strings.Join(roleNames.List(), ", ")

	_, err := fmt.Fprintf(w, "%s\t%s\t%v\n", policy.Name, rolesString, policy.LastModified)
	return err
}
コード例 #6
0
ファイル: printer.go プロジェクト: patrykattc/origin
func printPolicyBinding(policyBinding *authorizationapi.PolicyBinding, w io.Writer, withNamespace, wide bool, columnLabels []string) error {
	roleBindingNames := util.StringSet{}
	for key := range policyBinding.RoleBindings {
		roleBindingNames.Insert(key)
	}
	roleBindingsString := strings.Join(roleBindingNames.List(), ", ")

	_, err := fmt.Fprintf(w, "%s\t%s\t%v\n", policyBinding.Name, roleBindingsString, policyBinding.LastModified)
	return err
}
コード例 #7
0
func printDeployment(d *api.Deployment, w io.Writer) error {
	causes := util.StringSet{}
	if d.Details != nil {
		for _, cause := range d.Details.Causes {
			causes.Insert(string(cause.Type))
		}
	}
	cStr := strings.Join(causes.List(), ", ")
	_, err := fmt.Fprintf(w, "%s\t%s\t%s\n", d.Name, d.Status, cStr)
	return err
}
コード例 #8
0
func TestOrphanBuildResolver(t *testing.T) {
	activeBuildConfig := mockBuildConfig("a", "active-build-config")
	inactiveBuildConfig := mockBuildConfig("a", "inactive-build-config")

	buildConfigs := []*buildapi.BuildConfig{activeBuildConfig}
	builds := []*buildapi.Build{}

	expectedNames := util.StringSet{}
	buildStatusOptions := []buildapi.BuildStatus{
		buildapi.BuildStatusCancelled,
		buildapi.BuildStatusComplete,
		buildapi.BuildStatusError,
		buildapi.BuildStatusFailed,
		buildapi.BuildStatusNew,
		buildapi.BuildStatusPending,
		buildapi.BuildStatusRunning,
	}
	buildStatusFilter := []buildapi.BuildStatus{
		buildapi.BuildStatusCancelled,
		buildapi.BuildStatusComplete,
		buildapi.BuildStatusError,
		buildapi.BuildStatusFailed,
	}
	buildStatusFilterSet := util.StringSet{}
	for _, buildStatus := range buildStatusFilter {
		buildStatusFilterSet.Insert(string(buildStatus))
	}

	for _, buildStatusOption := range buildStatusOptions {
		builds = append(builds, withStatus(mockBuild("a", string(buildStatusOption)+"-active", activeBuildConfig), buildStatusOption))
		builds = append(builds, withStatus(mockBuild("a", string(buildStatusOption)+"-inactive", inactiveBuildConfig), buildStatusOption))
		builds = append(builds, withStatus(mockBuild("a", string(buildStatusOption)+"-orphan", nil), buildStatusOption))
		if buildStatusFilterSet.Has(string(buildStatusOption)) {
			expectedNames.Insert(string(buildStatusOption) + "-inactive")
			expectedNames.Insert(string(buildStatusOption) + "-orphan")
		}
	}

	dataSet := NewDataSet(buildConfigs, builds)
	resolver := NewOrphanBuildResolver(dataSet, buildStatusFilter)
	results, err := resolver.Resolve()
	if err != nil {
		t.Errorf("Unexpected error %v", err)
	}
	foundNames := util.StringSet{}
	for _, result := range results {
		foundNames.Insert(result.Name)
	}
	if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) {
		t.Errorf("expected %v, actual %v", expectedNames, foundNames)
	}
}
コード例 #9
0
ファイル: cache_test.go プロジェクト: cjnygard/origin
func validateList(t *testing.T, lister Lister, user user.Info, expectedSet util.StringSet) {
	namespaceList, err := lister.List(user)
	if err != nil {
		t.Errorf("Unexpected error %v", err)
	}
	results := util.StringSet{}
	for _, namespace := range namespaceList.Items {
		results.Insert(namespace.Name)
	}
	if results.Len() != expectedSet.Len() || !results.HasAll(expectedSet.List()...) {
		t.Errorf("User %v, Expected: %v, Actual: %v", user.GetName(), expectedSet, results)
	}
}
コード例 #10
0
func getPriorityFunctionConfigs(names util.StringSet) ([]algorithm.PriorityConfig, error) {
	schedulerFactoryMutex.Lock()
	defer schedulerFactoryMutex.Unlock()

	configs := []algorithm.PriorityConfig{}
	for _, name := range names.List() {
		config, ok := priorityFunctionMap[name]
		if !ok {
			return nil, fmt.Errorf("Invalid priority name %s specified - no corresponding function found", name)
		}
		configs = append(configs, config)
	}
	return configs, nil
}
コード例 #11
0
func getFitPredicateFunctions(names util.StringSet) ([]algorithm.FitPredicate, error) {
	schedulerFactoryMutex.Lock()
	defer schedulerFactoryMutex.Unlock()

	predicates := []algorithm.FitPredicate{}
	for _, name := range names.List() {
		function, ok := fitPredicateMap[name]
		if !ok {
			return nil, fmt.Errorf("Invalid predicate name %q specified - no corresponding function found", name)
		}
		predicates = append(predicates, function)
	}
	return predicates, nil
}
コード例 #12
0
ファイル: plugins.go プロジェクト: chenzhen411/kubernetes
func getFitPredicateFunctions(names util.StringSet, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) {
	schedulerFactoryMutex.Lock()
	defer schedulerFactoryMutex.Unlock()

	predicates := map[string]algorithm.FitPredicate{}
	for _, name := range names.List() {
		factory, ok := fitPredicateMap[name]
		if !ok {
			return nil, fmt.Errorf("Invalid predicate name %q specified - no corresponding function found", name)
		}
		predicates[name] = factory(args)
	}
	return predicates, nil
}
コード例 #13
0
func TestOrphanDeploymentResolver(t *testing.T) {
	activeDeploymentConfig := mockDeploymentConfig("a", "active-deployment-config")
	inactiveDeploymentConfig := mockDeploymentConfig("a", "inactive-deployment-config")

	deploymentConfigs := []*deployapi.DeploymentConfig{activeDeploymentConfig}
	deployments := []*kapi.ReplicationController{}

	expectedNames := util.StringSet{}
	deploymentStatusOptions := []deployapi.DeploymentStatus{
		deployapi.DeploymentStatusComplete,
		deployapi.DeploymentStatusFailed,
		deployapi.DeploymentStatusNew,
		deployapi.DeploymentStatusPending,
		deployapi.DeploymentStatusRunning,
	}

	deploymentStatusFilter := []deployapi.DeploymentStatus{
		deployapi.DeploymentStatusComplete,
		deployapi.DeploymentStatusFailed,
	}
	deploymentStatusFilterSet := util.StringSet{}
	for _, deploymentStatus := range deploymentStatusFilter {
		deploymentStatusFilterSet.Insert(string(deploymentStatus))
	}

	for _, deploymentStatusOption := range deploymentStatusOptions {
		deployments = append(deployments, withStatus(mockDeployment("a", string(deploymentStatusOption)+"-active", activeDeploymentConfig), deploymentStatusOption))
		deployments = append(deployments, withStatus(mockDeployment("a", string(deploymentStatusOption)+"-inactive", inactiveDeploymentConfig), deploymentStatusOption))
		deployments = append(deployments, withStatus(mockDeployment("a", string(deploymentStatusOption)+"-orphan", nil), deploymentStatusOption))
		if deploymentStatusFilterSet.Has(string(deploymentStatusOption)) {
			expectedNames.Insert(string(deploymentStatusOption) + "-inactive")
			expectedNames.Insert(string(deploymentStatusOption) + "-orphan")
		}
	}

	dataSet := NewDataSet(deploymentConfigs, deployments)
	resolver := NewOrphanDeploymentResolver(dataSet, deploymentStatusFilter)
	results, err := resolver.Resolve()
	if err != nil {
		t.Errorf("Unexpected error %v", err)
	}
	foundNames := util.StringSet{}
	for _, result := range results {
		foundNames.Insert(result.Name)
	}
	if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) {
		t.Errorf("expected %v, actual %v", expectedNames, foundNames)
	}
}
コード例 #14
0
func (ca *CA) MakeServerCert(certFile, keyFile string, hostnames util.StringSet) (*TLSCertificateConfig, error) {
	glog.V(4).Infof("Generating server certificate in %s, key in %s", certFile, keyFile)

	serverPublicKey, serverPrivateKey, _ := NewKeyPair()
	serverTemplate, _ := newServerCertificateTemplate(pkix.Name{CommonName: hostnames.List()[0]}, hostnames.List())
	serverCrt, _ := ca.signCertificate(serverTemplate, serverPublicKey)
	server := &TLSCertificateConfig{
		Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...),
		Key:   serverPrivateKey,
	}
	if err := server.writeCertConfig(certFile, keyFile); err != nil {
		return server, err
	}
	return server, nil
}
コード例 #15
0
func GetServerCert(certFile, keyFile string, hostnames util.StringSet) (*TLSCertificateConfig, error) {
	server, err := GetTLSCertificateConfig(certFile, keyFile)
	if err != nil {
		return nil, err
	}

	cert := server.Certs[0]
	ips, dns := IPAddressesDNSNames(hostnames.List())
	missingIps := ipsNotInSlice(ips, cert.IPAddresses)
	missingDns := stringsNotInSlice(dns, cert.DNSNames)
	if len(missingIps) == 0 && len(missingDns) == 0 {
		glog.V(4).Infof("Found existing server certificate in %s", certFile)
		return server, nil
	}

	return nil, fmt.Errorf("Existing server certificate in %s was missing some hostnames (%v) or IP addresses (%v).", certFile, missingDns, missingIps)
}
コード例 #16
0
ファイル: kubelet.go プロジェクト: Ima8/kubernetes
// waitTillNPodsRunningOnNodes polls the /runningpods endpoint on kubelet until
// it finds targetNumPods pods that match the given criteria (namespace and
// podNamePrefix). Note that we usually use label selector to filter pods that
// belong to the same RC. However, we use podNamePrefix with namespace here
// because pods returned from /runningpods do not contain the original label
// information; they are reconstructed by examining the container runtime. In
// the scope of this test, we do not expect pod naming conflicts so
// podNamePrefix should be sufficient to identify the pods.
func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames util.StringSet, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error {
	return wait.Poll(pollInterval, timeout, func() (bool, error) {
		matchCh := make(chan util.StringSet, len(nodeNames))
		for _, item := range nodeNames.List() {
			// Launch a goroutine per node to check the pods running on the nodes.
			nodeName := item
			go func() {
				matchCh <- getPodMatches(c, nodeName, podNamePrefix, namespace)
			}()
		}

		seen := util.NewStringSet()
		for i := 0; i < len(nodeNames.List()); i++ {
			seen = seen.Union(<-matchCh)
		}
		if seen.Len() == targetNumPods {
			return true, nil
		}
		Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
		return false, nil
	})
}
コード例 #17
0
ファイル: helpers.go プロジェクト: cjnygard/origin
func ExpandResources(rawResources kutil.StringSet) kutil.StringSet {
	ret := kutil.StringSet{}
	toVisit := rawResources.List()
	visited := kutil.StringSet{}

	for i := 0; i < len(toVisit); i++ {
		currResource := toVisit[i]
		if visited.Has(currResource) {
			continue
		}
		visited.Insert(currResource)

		if strings.Index(currResource, ResourceGroupPrefix+":") != 0 {
			ret.Insert(strings.ToLower(currResource))
			continue
		}

		if resourceTypes, exists := GroupsToResources[currResource]; exists {
			toVisit = append(toVisit, resourceTypes...)
		}
	}

	return ret
}
コード例 #18
0
ファイル: result.go プロジェクト: hortonworks/kubernetes-yarn
// Object returns a single object representing the output of a single visit to all
// found resources.  If the Builder was a singular context (expected to return a
// single resource by user input) and only a single resource was found, the resource
// will be returned as is.  Otherwise, the returned resources will be part of an
// api.List. The ResourceVersion of the api.List will be set only if it is identical
// across all infos returned.
func (r *Result) Object() (runtime.Object, error) {
	infos, err := r.Infos()
	if err != nil {
		return nil, err
	}

	versions := util.StringSet{}
	objects := []runtime.Object{}
	for _, info := range infos {
		if info.Object != nil {
			objects = append(objects, info.Object)
			versions.Insert(info.ResourceVersion)
		}
	}

	if len(objects) == 1 {
		if r.singular {
			return objects[0], nil
		}
		// if the item is a list already, don't create another list
		if _, err := runtime.GetItemsPtr(objects[0]); err == nil {
			return objects[0], nil
		}
	}

	version := ""
	if len(versions) == 1 {
		version = versions.List()[0]
	}
	return &api.List{
		ListMeta: api.ListMeta{
			ResourceVersion: version,
		},
		Items: objects,
	}, err
}
コード例 #19
0
func main() {
	util.InitFlags()
	runtime.GOMAXPROCS(runtime.NumCPU())
	util.ReallyCrash = true
	util.InitLogs()
	defer util.FlushLogs()

	go func() {
		defer util.FlushLogs()
		time.Sleep(3 * time.Minute)
		glog.Fatalf("This test has timed out.")
	}()

	manifestURL := ServeCachedManifestFile()

	apiServerURL := startComponents(manifestURL)

	// Ok. we're good to go.
	glog.Infof("API Server started on %s", apiServerURL)
	// Wait for the synchronization threads to come up.
	time.Sleep(time.Second * 10)

	kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: testapi.Version()})

	// Run tests in parallel
	testFuncs := []testFunc{
		runReplicationControllerTest,
		runAtomicPutTest,
		runServiceTest,
		runAPIVersionsTest,
		runMasterServiceTest,
		runSelfLinkTest,
	}
	var wg sync.WaitGroup
	wg.Add(len(testFuncs))
	for i := range testFuncs {
		f := testFuncs[i]
		go func() {
			f(kubeClient)
			wg.Done()
		}()
	}
	wg.Wait()

	// Check that kubelet tried to make the pods.
	// Using a set to list unique creation attempts. Our fake is
	// really stupid, so kubelet tries to create these multiple times.
	createdPods := util.StringSet{}
	for _, p := range fakeDocker1.Created {
		// The last 8 characters are random, so slice them off.
		if n := len(p); n > 8 {
			createdPods.Insert(p[:n-8])
		}
	}
	for _, p := range fakeDocker2.Created {
		// The last 8 characters are random, so slice them off.
		if n := len(p); n > 8 {
			createdPods.Insert(p[:n-8])
		}
	}
	// We expect 9: 2 net containers + 2 pods from the replication controller +
	//              1 net container + 2 pods from the URL +
	//              1 net container + 1 pod from the service test.
	if len(createdPods) != 9 {
		glog.Fatalf("Unexpected list of created pods:\n\n%#v\n\n%#v\n\n%#v\n\n", createdPods.List(), fakeDocker1.Created, fakeDocker2.Created)
	}
	glog.Infof("OK - found created pods: %#v", createdPods.List())
}
コード例 #20
0
func Example() {
	// source simulates an apiserver object endpoint.
	source := framework.NewFakeControllerSource()

	// This will hold the downstream state, as we know it.
	downstream := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)

	// This will hold incoming changes. Note how we pass downstream in as a
	// KeyLister, that way resync operations will result in the correct set
	// of update/delete deltas.
	fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, downstream)

	// Let's do threadsafe output to get predictable test results.
	deletionCounter := make(chan string, 1000)

	cfg := &framework.Config{
		Queue:            fifo,
		ListerWatcher:    source,
		ObjectType:       &api.Pod{},
		FullResyncPeriod: time.Millisecond * 100,
		RetryOnError:     false,

		// Let's implement a simple controller that just deletes
		// everything that comes in.
		Process: func(obj interface{}) error {
			// Obj is from the Pop method of the Queue we make above.
			newest := obj.(cache.Deltas).Newest()

			if newest.Type != cache.Deleted {
				// Update our downstream store.
				err := downstream.Add(newest.Object)
				if err != nil {
					return err
				}

				// Delete this object.
				source.Delete(newest.Object.(runtime.Object))
			} else {
				// Update our downstream store.
				err := downstream.Delete(newest.Object)
				if err != nil {
					return err
				}

				// fifo's KeyOf is easiest, because it handles
				// DeletedFinalStateUnknown markers.
				key, err := fifo.KeyOf(newest.Object)
				if err != nil {
					return err
				}

				// Report this deletion.
				deletionCounter <- key
			}
			return nil
		},
	}

	// Create the controller and run it until we close stop.
	stop := make(chan struct{})
	defer close(stop)
	go framework.New(cfg).Run(stop)

	// Let's add a few objects to the source.
	testIDs := []string{"a-hello", "b-controller", "c-framework"}
	for _, name := range testIDs {
		// Note that these pods are not valid-- the fake source doesn't
		// call validation or anything.
		source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}})
	}

	// Let's wait for the controller to process the things we just added.
	outputSet := util.StringSet{}
	for i := 0; i < len(testIDs); i++ {
		outputSet.Insert(<-deletionCounter)
	}

	for _, key := range outputSet.List() {
		fmt.Println(key)
	}
	// Output:
	// a-hello
	// b-controller
	// c-framework
}
コード例 #21
0
func TestHammerController(t *testing.T) {
	// This test executes a bunch of requests through the fake source and
	// controller framework to make sure there's no locking/threading
	// errors. If an error happens, it should hang forever or trigger the
	// race detector.

	// source simulates an apiserver object endpoint.
	source := framework.NewFakeControllerSource()

	// Let's do threadsafe output to get predictable test results.
	outputSetLock := sync.Mutex{}
	// map of key to operations done on the key
	outputSet := map[string][]string{}

	recordFunc := func(eventType string, obj interface{}) {
		key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj)
		if err != nil {
			t.Errorf("something wrong with key: %v", err)
			key = "oops something went wrong with the key"
		}

		// Record some output when items are deleted.
		outputSetLock.Lock()
		defer outputSetLock.Unlock()
		outputSet[key] = append(outputSet[key], eventType)
	}

	// Make a controller which just logs all the changes it gets.
	_, controller := framework.NewInformer(
		source,
		&api.Pod{},
		time.Millisecond*100,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    func(obj interface{}) { recordFunc("add", obj) },
			UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) },
			DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) },
		},
	)

	// Run the controller and run it until we close stop.
	stop := make(chan struct{})
	go controller.Run(stop)

	wg := sync.WaitGroup{}
	const threads = 3
	wg.Add(threads)
	for i := 0; i < threads; i++ {
		go func() {
			defer wg.Done()
			// Let's add a few objects to the source.
			currentNames := util.StringSet{}
			rs := rand.NewSource(rand.Int63())
			f := fuzz.New().NilChance(.5).NumElements(0, 2).RandSource(rs)
			r := rand.New(rs) // Mustn't use r and f concurrently!
			for i := 0; i < 100; i++ {
				var name string
				var isNew bool
				if currentNames.Len() == 0 || r.Intn(3) == 1 {
					f.Fuzz(&name)
					isNew = true
				} else {
					l := currentNames.List()
					name = l[r.Intn(len(l))]
				}

				pod := &api.Pod{}
				f.Fuzz(pod)
				pod.ObjectMeta.Name = name
				pod.ObjectMeta.Namespace = "default"
				// Add, update, or delete randomly.
				// Note that these pods are not valid-- the fake source doesn't
				// call validation or perform any other checking.
				if isNew {
					currentNames.Insert(name)
					source.Add(pod)
					continue
				}
				switch r.Intn(2) {
				case 0:
					currentNames.Insert(name)
					source.Modify(pod)
				case 1:
					currentNames.Delete(name)
					source.Delete(pod)
				}
			}
		}()
	}
	wg.Wait()

	// Let's wait for the controller to finish processing the things we just added.
	time.Sleep(100 * time.Millisecond)
	close(stop)

	outputSetLock.Lock()
	t.Logf("got: %#v", outputSet)
}
コード例 #22
0
func TestPerBuildConfigResolver(t *testing.T) {
	buildStatusOptions := []buildapi.BuildStatus{
		buildapi.BuildStatusCancelled,
		buildapi.BuildStatusComplete,
		buildapi.BuildStatusError,
		buildapi.BuildStatusFailed,
		buildapi.BuildStatusNew,
		buildapi.BuildStatusPending,
		buildapi.BuildStatusRunning,
	}
	buildConfigs := []*buildapi.BuildConfig{
		mockBuildConfig("a", "build-config-1"),
		mockBuildConfig("b", "build-config-2"),
	}
	buildsPerStatus := 100
	builds := []*buildapi.Build{}
	for _, buildConfig := range buildConfigs {
		for _, buildStatusOption := range buildStatusOptions {
			for i := 0; i < buildsPerStatus; i++ {
				build := withStatus(mockBuild(buildConfig.Namespace, fmt.Sprintf("%v-%v-%v", buildConfig.Name, buildStatusOption, i), buildConfig), buildStatusOption)
				builds = append(builds, build)
			}
		}
	}

	now := util.Now()
	for i := range builds {
		creationTimestamp := util.NewTime(now.Time.Add(-1 * time.Duration(i) * time.Hour))
		builds[i].CreationTimestamp = creationTimestamp
	}

	// test number to keep at varying ranges
	for keep := 0; keep < buildsPerStatus*2; keep++ {
		dataSet := NewDataSet(buildConfigs, builds)

		expectedNames := util.StringSet{}
		buildCompleteStatusFilterSet := util.NewStringSet(string(buildapi.BuildStatusComplete))
		buildFailedStatusFilterSet := util.NewStringSet(string(buildapi.BuildStatusCancelled), string(buildapi.BuildStatusError), string(buildapi.BuildStatusFailed))

		for _, buildConfig := range buildConfigs {
			buildItems, err := dataSet.ListBuildsByBuildConfig(buildConfig)
			if err != nil {
				t.Errorf("Unexpected err %v", err)
			}
			completedBuilds, failedBuilds := []*buildapi.Build{}, []*buildapi.Build{}
			for _, build := range buildItems {
				if buildCompleteStatusFilterSet.Has(string(build.Status)) {
					completedBuilds = append(completedBuilds, build)
				} else if buildFailedStatusFilterSet.Has(string(build.Status)) {
					failedBuilds = append(failedBuilds, build)
				}
			}
			sort.Sort(sortableBuilds(completedBuilds))
			sort.Sort(sortableBuilds(failedBuilds))
			purgeCompleted := []*buildapi.Build{}
			purgeFailed := []*buildapi.Build{}
			if keep >= 0 && keep < len(completedBuilds) {
				purgeCompleted = completedBuilds[keep:]
			}
			if keep >= 0 && keep < len(failedBuilds) {
				purgeFailed = failedBuilds[keep:]
			}
			for _, build := range purgeCompleted {
				expectedNames.Insert(build.Name)
			}
			for _, build := range purgeFailed {
				expectedNames.Insert(build.Name)
			}
		}

		resolver := NewPerBuildConfigResolver(dataSet, keep, keep)
		results, err := resolver.Resolve()
		if err != nil {
			t.Errorf("Unexpected error %v", err)
		}
		foundNames := util.StringSet{}
		for _, result := range results {
			foundNames.Insert(result.Name)
		}
		if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) {
			expectedValues := expectedNames.List()
			actualValues := foundNames.List()
			sort.Strings(expectedValues)
			sort.Strings(actualValues)
			t.Errorf("keep %v\n, expected \n\t%v\n, actual \n\t%v\n", keep, expectedValues, actualValues)
		}
	}
}
コード例 #23
0
ファイル: loginoptions.go プロジェクト: patrykattc/origin
// Discover the projects available for the established session and take one to use. It
// fails in case of no existing projects, and print out useful information in case of
// multiple projects.
// Requires o.Username to be set.
func (o *LoginOptions) gatherProjectInfo() error {
	me, err := o.whoAmI()
	if err != nil {
		return err
	}

	if o.Username != me.Name {
		return fmt.Errorf("current user, %v, does not match expected user %v", me.Name, o.Username)
	}

	oClient, err := client.New(o.Config)
	if err != nil {
		return err
	}

	projects, err := oClient.Projects().List(labels.Everything(), fields.Everything())
	if err != nil {
		return err
	}

	projectsItems := projects.Items

	switch len(projectsItems) {
	case 0:
		fmt.Fprintf(o.Out, `You don't have any projects. You can try to create a new project, by running

    $ oc new-project <projectname>

`)
		o.Project = o.DefaultNamespace

	case 1:
		o.Project = projectsItems[0].Name
		fmt.Fprintf(o.Out, "Using project %q.\n", o.Project)

	default:
		projects := util.StringSet{}
		for _, project := range projectsItems {
			projects.Insert(project.Name)
		}

		namespace := o.DefaultNamespace
		if !projects.Has(namespace) {
			if namespace != kapi.NamespaceDefault && projects.Has(kapi.NamespaceDefault) {
				namespace = kapi.NamespaceDefault
			} else {
				namespace = projects.List()[0]
			}
		}

		if current, err := oClient.Projects().Get(namespace); err == nil {
			o.Project = current.Name
			fmt.Fprintf(o.Out, "Using project %q.\n", o.Project)
		} else if !kerrors.IsNotFound(err) && !clientcmd.IsForbidden(err) {
			return err
		}

		fmt.Fprintf(o.Out, "\nYou have access to the following projects and can switch between them with 'oc project <projectname>':\n\n")
		for _, p := range projects.List() {
			if o.Project == p {
				fmt.Fprintf(o.Out, "  * %s (current)\n", p)
			} else {
				fmt.Fprintf(o.Out, "  * %s\n", p)
			}
		}
		fmt.Fprintln(o.Out)
	}

	return nil
}
コード例 #24
0
func NewCmdPruneImages(f *clientcmd.Factory, parentName, name string, out io.Writer) *cobra.Command {
	cfg := &pruneImagesConfig{
		Confirm:            false,
		KeepYoungerThan:    60 * time.Minute,
		TagRevisionsToKeep: 3,
	}

	cmd := &cobra.Command{
		Use:   name,
		Short: "Remove unreferenced images",
		Long:  fmt.Sprintf(imagesLongDesc, parentName, name),

		Run: func(cmd *cobra.Command, args []string) {
			if len(args) > 0 {
				glog.Fatal("No arguments are allowed to this command")
			}

			osClient, kClient, registryClient, err := getClients(f, cfg)
			cmdutil.CheckErr(err)

			allImages, err := osClient.Images().List(labels.Everything(), fields.Everything())
			cmdutil.CheckErr(err)

			allStreams, err := osClient.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
			cmdutil.CheckErr(err)

			allPods, err := kClient.Pods(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
			cmdutil.CheckErr(err)

			allRCs, err := kClient.ReplicationControllers(kapi.NamespaceAll).List(labels.Everything())
			cmdutil.CheckErr(err)

			allBCs, err := osClient.BuildConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
			cmdutil.CheckErr(err)

			allBuilds, err := osClient.Builds(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
			cmdutil.CheckErr(err)

			allDCs, err := osClient.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
			cmdutil.CheckErr(err)

			pruner := prune.NewImagePruner(
				cfg.KeepYoungerThan,
				cfg.TagRevisionsToKeep,
				allImages,
				allStreams,
				allPods,
				allRCs,
				allBCs,
				allBuilds,
				allDCs,
			)

			w := tabwriter.NewWriter(out, 10, 4, 3, ' ', 0)
			defer w.Flush()

			var streams util.StringSet
			printImageHeader := true
			describingImagePruneFunc := func(image *imageapi.Image) error {
				if printImageHeader {
					printImageHeader = false
					fmt.Fprintf(w, "IMAGE\tSTREAMS")
				}

				if streams.Len() > 0 {
					fmt.Fprintf(w, strings.Join(streams.List(), ", "))
				}

				fmt.Fprintf(w, "\n%s\t", image.Name)
				streams = util.NewStringSet()

				return nil
			}

			describingImageStreamPruneFunc := func(stream *imageapi.ImageStream, image *imageapi.Image) (*imageapi.ImageStream, error) {
				streams.Insert(stream.Status.DockerImageRepository)
				return stream, nil
			}

			printLayerHeader := true
			describingLayerPruneFunc := func(registryURL, repo, layer string) error {
				if printLayerHeader {
					printLayerHeader = false
					// need to print the remaining streams for the last image
					if streams.Len() > 0 {
						fmt.Fprintf(w, strings.Join(streams.List(), ", "))
					}
					fmt.Fprintf(w, "\n\nREGISTRY\tSTREAM\tLAYER\n")
				}
				fmt.Fprintf(w, "%s\t%s\t%s\n", registryURL, repo, layer)
				return nil
			}

			var (
				imagePruneFunc       prune.ImagePruneFunc
				imageStreamPruneFunc prune.ImageStreamPruneFunc
				layerPruneFunc       prune.LayerPruneFunc
				blobPruneFunc        prune.BlobPruneFunc
				manifestPruneFunc    prune.ManifestPruneFunc
			)

			switch cfg.Confirm {
			case true:
				imagePruneFunc = func(image *imageapi.Image) error {
					describingImagePruneFunc(image)
					return prune.DeletingImagePruneFunc(osClient.Images())(image)
				}
				imageStreamPruneFunc = func(stream *imageapi.ImageStream, image *imageapi.Image) (*imageapi.ImageStream, error) {
					describingImageStreamPruneFunc(stream, image)
					return prune.DeletingImageStreamPruneFunc(osClient)(stream, image)
				}
				layerPruneFunc = func(registryURL, repo, layer string) error {
					describingLayerPruneFunc(registryURL, repo, layer)
					return prune.DeletingLayerPruneFunc(registryClient)(registryURL, repo, layer)
				}
				blobPruneFunc = prune.DeletingBlobPruneFunc(registryClient)
				manifestPruneFunc = prune.DeletingManifestPruneFunc(registryClient)
			default:
				fmt.Fprintln(os.Stderr, "Dry run enabled - no modifications will be made.")
				imagePruneFunc = describingImagePruneFunc
				imageStreamPruneFunc = describingImageStreamPruneFunc
				layerPruneFunc = describingLayerPruneFunc
				blobPruneFunc = func(registryURL, blob string) error {
					return nil
				}
				manifestPruneFunc = func(registryURL, repo, manifest string) error {
					return nil
				}
			}

			if len(cfg.RegistryUrlOverride) > 0 {
				originalLayerPruneFunc := layerPruneFunc
				layerPruneFunc = func(registryURL, repo, layer string) error {
					return originalLayerPruneFunc(cfg.RegistryUrlOverride, repo, layer)
				}
				originalBlobPruneFunc := blobPruneFunc
				blobPruneFunc = func(registryURL, blob string) error {
					return originalBlobPruneFunc(cfg.RegistryUrlOverride, blob)
				}
				originalManifestPruneFunc := manifestPruneFunc
				manifestPruneFunc = func(registryURL, repo, manifest string) error {
					return originalManifestPruneFunc(cfg.RegistryUrlOverride, repo, manifest)
				}
			}

			pruner.Run(imagePruneFunc, imageStreamPruneFunc, layerPruneFunc, blobPruneFunc, manifestPruneFunc)
		},
	}

	cmd.Flags().BoolVar(&cfg.Confirm, "confirm", cfg.Confirm, "Specify that image pruning should proceed. Defaults to false, displaying what would be deleted but not actually deleting anything.")
	cmd.Flags().DurationVar(&cfg.KeepYoungerThan, "keep-younger-than", cfg.KeepYoungerThan, "Specify the minimum age of a build for it to be considered a candidate for pruning.")
	cmd.Flags().IntVar(&cfg.TagRevisionsToKeep, "keep-tag-revisions", cfg.TagRevisionsToKeep, "Specify the number of image revisions for a tag in an image stream that will be preserved.")
	cmd.Flags().StringVar(&cfg.CABundle, "certificate-authority", cfg.CABundle, "The path to a certificate authority bundle to use when communicating with the OpenShift-managed registries. Defaults to the certificate authority data from the current user's config file.")
	cmd.Flags().StringVar(&cfg.RegistryUrlOverride, "registry-url", cfg.RegistryUrlOverride, "The address to use when contacting the registry, instead of using the default value. This is useful if you can't resolve or reach the registry (e.g.; the default is a cluster-internal URL) but you do have an alternative route that works.")

	return cmd
}
コード例 #25
0
ファイル: integration.go プロジェクト: Bazooki/kubernetes
func main() {
	runtime.GOMAXPROCS(runtime.NumCPU())
	addFlags(pflag.CommandLine)

	util.InitFlags()
	util.ReallyCrash = true
	util.InitLogs()
	defer util.FlushLogs()

	go func() {
		defer util.FlushLogs()
		time.Sleep(3 * time.Minute)
		glog.Fatalf("This test has timed out.")
	}()

	glog.Infof("Running tests for APIVersion: %s", apiVersion)

	firstManifestURL := ServeCachedManifestFile(testPodSpecFile)
	secondManifestURL := ServeCachedManifestFile(testPodSpecFile)
	apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL, apiVersion)

	// Ok. we're good to go.
	glog.Infof("API Server started on %s", apiServerURL)
	// Wait for the synchronization threads to come up.
	time.Sleep(time.Second * 10)

	kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: apiVersion})

	// Run tests in parallel
	testFuncs := []testFunc{
		runReplicationControllerTest,
		runAtomicPutTest,
		runPatchTest,
		runServiceTest,
		runAPIVersionsTest,
		runMasterServiceTest,
		func(c *client.Client) {
			runSelfLinkTestOnNamespace(c, api.NamespaceDefault)
			runSelfLinkTestOnNamespace(c, "other")
		},
	}

	// Only run at most maxConcurrency tests in parallel.
	if maxConcurrency <= 0 {
		maxConcurrency = len(testFuncs)
	}
	glog.Infof("Running %d tests in parallel.", maxConcurrency)
	ch := make(chan struct{}, maxConcurrency)

	var wg sync.WaitGroup
	wg.Add(len(testFuncs))
	for i := range testFuncs {
		f := testFuncs[i]
		go func() {
			ch <- struct{}{}
			f(kubeClient)
			<-ch
			wg.Done()
		}()
	}
	wg.Wait()
	close(ch)

	// Check that kubelet tried to make the containers.
	// Using a set to list unique creation attempts. Our fake is
	// really stupid, so kubelet tries to create these multiple times.
	createdConts := util.StringSet{}
	for _, p := range fakeDocker1.Created {
		// The last 8 characters are random, so slice them off.
		if n := len(p); n > 8 {
			createdConts.Insert(p[:n-8])
		}
	}
	for _, p := range fakeDocker2.Created {
		// The last 8 characters are random, so slice them off.
		if n := len(p); n > 8 {
			createdConts.Insert(p[:n-8])
		}
	}
	// We expect 9: 2 pod infra containers + 2 containers from the replication controller +
	//              1 pod infra container + 2 containers from the URL on first Kubelet +
	//              1 pod infra container + 2 containers from the URL on second Kubelet +
	//              1 pod infra container + 1 container from the service test.
	// The total number of container created is 9

	if len(createdConts) != 12 {
		glog.Fatalf("Expected 12 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created)
	}
	glog.Infof("OK - found created containers: %#v", createdConts.List())

	// This test doesn't run with the others because it can't run in
	// parallel and also it schedules extra pods which would change the
	// above pod counting logic.
	runSchedulerNoPhantomPodsTest(kubeClient)

	glog.Infof("\n\nLogging high latency metrics from the 10250 kubelet")
	e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10250")
	glog.Infof("\n\nLogging high latency metrics from the 10251 kubelet")
	e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10251")
}
コード例 #26
0
ファイル: kubelet.go プロジェクト: Ima8/kubernetes
}

var _ = Describe("Clean up pods on node", func() {
	var numNodes int
	var nodeNames util.StringSet
	framework := NewFramework("kubelet-delete")

	BeforeEach(func() {
		nodes, err := framework.Client.Nodes().List(labels.Everything(), fields.Everything())
		expectNoError(err)
		numNodes = len(nodes.Items)
		nodeNames = util.NewStringSet()
		for _, node := range nodes.Items {
			nodeNames.Insert(node.Name)
		}
		logOneTimeResourceUsageSummary(framework.Client, nodeNames.List(), cpuIntervalInSeconds)
	})

	type DeleteTest struct {
		podsPerNode int
		timeout     time.Duration
	}

	deleteTests := []DeleteTest{
		{podsPerNode: 10, timeout: 1 * time.Minute},
	}

	for _, itArg := range deleteTests {
		name := fmt.Sprintf(
			"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
		It(name, func() {
コード例 #27
0
func CheckSetEq(lhs, rhs util.StringSet) bool {
	return lhs.HasAll(rhs.List()...) && rhs.HasAll(lhs.List()...)
}
コード例 #28
0
func TestPerDeploymentConfigResolver(t *testing.T) {
	deploymentStatusOptions := []deployapi.DeploymentStatus{
		deployapi.DeploymentStatusComplete,
		deployapi.DeploymentStatusFailed,
		deployapi.DeploymentStatusNew,
		deployapi.DeploymentStatusPending,
		deployapi.DeploymentStatusRunning,
	}
	deploymentConfigs := []*deployapi.DeploymentConfig{
		mockDeploymentConfig("a", "deployment-config-1"),
		mockDeploymentConfig("b", "deployment-config-2"),
	}
	deploymentsPerStatus := 100
	deployments := []*kapi.ReplicationController{}
	for _, deploymentConfig := range deploymentConfigs {
		for _, deploymentStatusOption := range deploymentStatusOptions {
			for i := 0; i < deploymentsPerStatus; i++ {
				deployment := withStatus(mockDeployment(deploymentConfig.Namespace, fmt.Sprintf("%v-%v-%v", deploymentConfig.Name, deploymentStatusOption, i), deploymentConfig), deploymentStatusOption)
				deployments = append(deployments, deployment)
			}
		}
	}

	now := util.Now()
	for i := range deployments {
		creationTimestamp := util.NewTime(now.Time.Add(-1 * time.Duration(i) * time.Hour))
		deployments[i].CreationTimestamp = creationTimestamp
	}

	// test number to keep at varying ranges
	for keep := 0; keep < deploymentsPerStatus*2; keep++ {
		dataSet := NewDataSet(deploymentConfigs, deployments)

		expectedNames := util.StringSet{}
		deploymentCompleteStatusFilterSet := util.NewStringSet(string(deployapi.DeploymentStatusComplete))
		deploymentFailedStatusFilterSet := util.NewStringSet(string(deployapi.DeploymentStatusFailed))

		for _, deploymentConfig := range deploymentConfigs {
			deploymentItems, err := dataSet.ListDeploymentsByDeploymentConfig(deploymentConfig)
			if err != nil {
				t.Errorf("Unexpected err %v", err)
			}
			completedDeployments, failedDeployments := []*kapi.ReplicationController{}, []*kapi.ReplicationController{}
			for _, deployment := range deploymentItems {
				status := deployment.Annotations[deployapi.DeploymentStatusAnnotation]
				if deploymentCompleteStatusFilterSet.Has(status) {
					completedDeployments = append(completedDeployments, deployment)
				} else if deploymentFailedStatusFilterSet.Has(status) {
					failedDeployments = append(failedDeployments, deployment)
				}
			}
			sort.Sort(sortableReplicationControllers(completedDeployments))
			sort.Sort(sortableReplicationControllers(failedDeployments))
			purgeCompleted := []*kapi.ReplicationController{}
			purgeFailed := []*kapi.ReplicationController{}
			if keep >= 0 && keep < len(completedDeployments) {
				purgeCompleted = completedDeployments[keep:]
			}
			if keep >= 0 && keep < len(failedDeployments) {
				purgeFailed = failedDeployments[keep:]
			}
			for _, deployment := range purgeCompleted {
				expectedNames.Insert(deployment.Name)
			}
			for _, deployment := range purgeFailed {
				expectedNames.Insert(deployment.Name)
			}
		}

		resolver := NewPerDeploymentConfigResolver(dataSet, keep, keep)
		results, err := resolver.Resolve()
		if err != nil {
			t.Errorf("Unexpected error %v", err)
		}
		foundNames := util.StringSet{}
		for _, result := range results {
			foundNames.Insert(result.Name)
		}
		if len(foundNames) != len(expectedNames) || !expectedNames.HasAll(foundNames.List()...) {
			expectedValues := expectedNames.List()
			actualValues := foundNames.List()
			sort.Strings(expectedValues)
			sort.Strings(actualValues)
			t.Errorf("keep %v\n, expected \n\t%v\n, actual \n\t%v\n", keep, expectedValues, actualValues)
		}
	}
}