示例#1
0
// Creates a scheduler from the configuration file
func (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) {
	glog.V(2).Infof("Creating scheduler from configuration: %v", policy)

	// validate the policy configuration
	if err := validation.ValidatePolicy(policy); err != nil {
		return nil, err
	}

	predicateKeys := sets.NewString()
	for _, predicate := range policy.Predicates {
		glog.V(2).Infof("Registering predicate: %s", predicate.Name)
		predicateKeys.Insert(RegisterCustomFitPredicate(predicate))
	}

	priorityKeys := sets.NewString()
	for _, priority := range policy.Priorities {
		glog.V(2).Infof("Registering priority: %s", priority.Name)
		priorityKeys.Insert(RegisterCustomPriorityFunction(priority))
	}

	extenders := make([]algorithm.SchedulerExtender, 0)
	if len(policy.ExtenderConfigs) != 0 {
		for ii := range policy.ExtenderConfigs {
			glog.V(2).Infof("Creating extender with config %+v", policy.ExtenderConfigs[ii])
			if extender, err := scheduler.NewHTTPExtender(&policy.ExtenderConfigs[ii], policy.APIVersion); err != nil {
				return nil, err
			} else {
				extenders = append(extenders, extender)
			}
		}
	}
	return f.CreateFromKeys(predicateKeys, priorityKeys, extenders)
}
示例#2
0
// Generate creates the capabilities based on policy rules.  Generate will produce the following:
// 1.  a capabilities.Add set containing all the required adds (unless the
// 		container specifically is dropping the cap) and container requested adds
// 2.  a capabilities.Drop set containing all the required drops and container requested drops
func (s *defaultCapabilities) Generate(pod *api.Pod, container *api.Container) (*api.Capabilities, error) {
	defaultAdd := makeCapSet(s.defaultAddCapabilities)
	requiredDrop := makeCapSet(s.requiredDropCapabilities)
	containerAdd := sets.NewString()
	containerDrop := sets.NewString()

	if container.SecurityContext != nil && container.SecurityContext.Capabilities != nil {
		containerAdd = makeCapSet(container.SecurityContext.Capabilities.Add)
		containerDrop = makeCapSet(container.SecurityContext.Capabilities.Drop)
	}

	// remove any default adds that the container is specifically dropping
	defaultAdd = defaultAdd.Difference(containerDrop)

	combinedAdd := defaultAdd.Union(containerAdd).List()
	combinedDrop := requiredDrop.Union(containerDrop).List()

	// nothing generated?  return nil
	if len(combinedAdd) == 0 && len(combinedDrop) == 0 {
		return nil, nil
	}

	return &api.Capabilities{
		Add:  capabilityFromStringSlice(combinedAdd),
		Drop: capabilityFromStringSlice(combinedDrop),
	}, nil
}
示例#3
0
func TestPetQueueScaleDown(t *testing.T) {
	replicas := 1
	ps := newStatefulSet(replicas)

	// knownPods are the pods in the system
	knownPods := newPodList(ps, 3)

	q := NewPetQueue(ps, knownPods)

	// The iterator will insert a single replica, the enqueue
	// mimics that behavior.
	pet, _ := newPCB(fmt.Sprintf("%v", 0), ps)
	q.enqueue(pet)

	deletes := sets.NewString(fmt.Sprintf("%v-1", ps.Name), fmt.Sprintf("%v-2", ps.Name))
	syncs := sets.NewString(fmt.Sprintf("%v-0", ps.Name))

	// Confirm that 2 known pods are deleted
	for i := 0; i < 3; i++ {
		p := q.dequeue()
		switch p.event {
		case syncPet:
			if !syncs.Has(p.pod.Name) {
				t.Errorf("Unexpected sync %v expecting %+v", p.pod.Name, syncs)
			}
		case deletePet:
			if !deletes.Has(p.pod.Name) {
				t.Errorf("Unexpected deletes %v expecting %+v", p.pod.Name, deletes)
			}
		}
	}
	if q.dequeue() != nil {
		t.Errorf("Expected no pods")
	}
}
示例#4
0
func TestProxyProtocolEnabled(t *testing.T) {
	policies := sets.NewString(ProxyProtocolPolicyName, "FooBarFoo")
	fakeBackend := &elb.BackendServerDescription{
		InstancePort: aws.Int64(80),
		PolicyNames:  stringSetToPointers(policies),
	}
	result := proxyProtocolEnabled(fakeBackend)
	assert.True(t, result, "expected to find %s in %s", ProxyProtocolPolicyName, policies)

	policies = sets.NewString("FooBarFoo")
	fakeBackend = &elb.BackendServerDescription{
		InstancePort: aws.Int64(80),
		PolicyNames: []*string{
			aws.String("FooBarFoo"),
		},
	}
	result = proxyProtocolEnabled(fakeBackend)
	assert.False(t, result, "did not expect to find %s in %s", ProxyProtocolPolicyName, policies)

	policies = sets.NewString()
	fakeBackend = &elb.BackendServerDescription{
		InstancePort: aws.Int64(80),
	}
	result = proxyProtocolEnabled(fakeBackend)
	assert.False(t, result, "did not expect to find %s in %s", ProxyProtocolPolicyName, policies)
}
func TestAddAfterTry(t *testing.T) {
	evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
	evictor.Add("first", "11111")
	evictor.Add("second", "22222")
	evictor.Add("third", "33333")
	evictor.Remove("second")

	deletedMap := sets.NewString()
	evictor.Try(func(value TimedValue) (bool, time.Duration) {
		deletedMap.Insert(value.Value)
		return true, 0
	})

	setPattern := sets.NewString("first", "third")
	if len(deletedMap) != len(setPattern) {
		t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
	}
	if !CheckSetEq(setPattern, deletedMap) {
		t.Errorf("Invalid map. Got %v, expected %v", deletedMap, setPattern)
	}

	evictor.Add("first", "11111")
	evictor.Try(func(value TimedValue) (bool, time.Duration) {
		t.Errorf("We shouldn't process the same value if the explicit remove wasn't called.")
		return true, 0
	})
}
示例#6
0
func TestWaitFlagNew(t *testing.T) {
	fcmd := exec.FakeCmd{
		CombinedOutputScript: []exec.FakeCombinedOutputAction{
			// iptables version check
			func() ([]byte, error) { return []byte("iptables v1.4.22"), nil },
			// Success.
			func() ([]byte, error) { return []byte{}, nil },
		},
	}
	fexec := exec.FakeExec{
		CommandScript: []exec.FakeCommandAction{
			func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
			func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
		},
	}
	runner := New(&fexec, dbus.NewFake(nil, nil), ProtocolIpv4)
	defer runner.Destroy()
	err := runner.DeleteChain(TableNAT, Chain("FOOBAR"))
	if err != nil {
		t.Errorf("expected success, got %v", err)
	}
	if fcmd.CombinedOutputCalls != 2 {
		t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls)
	}
	if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-w2") {
		t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1])
	}
	if sets.NewString(fcmd.CombinedOutputLog[1]...).HasAny("-w") {
		t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1])
	}
}
示例#7
0
// Test public interface
func doTestIndex(t *testing.T, indexer Indexer) {
	mkObj := func(id string, val string) testStoreObject {
		return testStoreObject{id: id, val: val}
	}

	// Test Index
	expected := map[string]sets.String{}
	expected["b"] = sets.NewString("a", "c")
	expected["f"] = sets.NewString("e")
	expected["h"] = sets.NewString("g")
	indexer.Add(mkObj("a", "b"))
	indexer.Add(mkObj("c", "b"))
	indexer.Add(mkObj("e", "f"))
	indexer.Add(mkObj("g", "h"))
	{
		for k, v := range expected {
			found := sets.String{}
			indexResults, err := indexer.Index("by_val", mkObj("", k))
			if err != nil {
				t.Errorf("Unexpected error %v", err)
			}
			for _, item := range indexResults {
				found.Insert(item.(testStoreObject).id)
			}
			items := v.List()
			if !found.HasAll(items...) {
				t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List())
			}
		}
	}
}
示例#8
0
func newRESTMapper(externalVersions []schema.GroupVersion) meta.RESTMapper {
	// the list of kinds that are scoped at the root of the api hierarchy
	// if a kind is not enumerated here, it is assumed to have a namespace scope
	rootScoped := sets.NewString(
		"Node",
		"Namespace",
		"PersistentVolume",
		"ComponentStatus",
	)

	// these kinds should be excluded from the list of resources
	ignoredKinds := sets.NewString(
		"ListOptions",
		"DeleteOptions",
		"Status",
		"PodLogOptions",
		"PodExecOptions",
		"PodAttachOptions",
		"PodProxyOptions",
		"NodeProxyOptions",
		"ServiceProxyOptions",
		"ThirdPartyResource",
		"ThirdPartyResourceData",
		"ThirdPartyResourceList")

	mapper := api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)

	return mapper
}
示例#9
0
// NewCmdConfigGetContexts creates a command object for the "get-contexts" action, which
// retrieves one or more contexts from a kubeconfig.
func NewCmdConfigGetContexts(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command {
	options := &GetContextsOptions{configAccess: configAccess}

	cmd := &cobra.Command{
		Use:     "get-contexts [(-o|--output=)name)]",
		Short:   "Describe one or many contexts",
		Long:    getContextsLong,
		Example: getContextsExample,
		Run: func(cmd *cobra.Command, args []string) {
			validOutputTypes := sets.NewString("", "json", "yaml", "wide", "name", "custom-columns", "custom-columns-file", "go-template", "go-template-file", "jsonpath", "jsonpath-file")
			supportedOutputTypes := sets.NewString("", "name")
			outputFormat := cmdutil.GetFlagString(cmd, "output")
			if !validOutputTypes.Has(outputFormat) {
				cmdutil.CheckErr(fmt.Errorf("output must be one of '' or 'name': %v", outputFormat))
			}
			if !supportedOutputTypes.Has(outputFormat) {
				fmt.Fprintf(out, "--output %v is not available in kubectl config get-contexts; resetting to default output format\n", outputFormat)
				cmd.Flags().Set("output", "")
			}
			cmdutil.CheckErr(options.Complete(cmd, args, out))
			cmdutil.CheckErr(options.RunGetContexts())
		},
	}
	cmdutil.AddOutputFlags(cmd)
	cmdutil.AddNoHeadersFlags(cmd)
	return cmd
}
示例#10
0
func newRESTMapper(externalVersions []schema.GroupVersion) meta.RESTMapper {
	// the list of kinds that are scoped at the root of the api hierarchy
	// if a kind is not enumerated here, it is assumed to have a namespace scope
	rootScoped := sets.NewString()

	ignoredKinds := sets.NewString()

	return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
}
示例#11
0
func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList {
	const newFinalizersErrorMsg string = `no new finalizers can be added if the object is being deleted`
	allErrs := field.ErrorList{}
	extra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...))
	if len(extra) != 0 {
		allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("no new finalizers can be added if the object is being deleted, found new finalizers %#v", extra.List())))
	}
	return allErrs
}
func TestFilteredBy(t *testing.T) {
	all := discovery.ResourcePredicateFunc(func(gv string, r *metav1.APIResource) bool {
		return true
	})
	none := discovery.ResourcePredicateFunc(func(gv string, r *metav1.APIResource) bool {
		return false
	})
	onlyV2 := discovery.ResourcePredicateFunc(func(gv string, r *metav1.APIResource) bool {
		return strings.HasSuffix(gv, "/v2") || gv == "v2"
	})
	onlyBar := discovery.ResourcePredicateFunc(func(gv string, r *metav1.APIResource) bool {
		return r.Kind == "Bar"
	})

	foo := []*metav1.APIResourceList{
		{
			GroupVersion: "foo/v1",
			APIResources: []metav1.APIResource{
				{Name: "bar", Kind: "Bar"},
				{Name: "test", Kind: "Test"},
			},
		},
		{
			GroupVersion: "foo/v2",
			APIResources: []metav1.APIResource{
				{Name: "bar", Kind: "Bar"},
				{Name: "test", Kind: "Test"},
			},
		},
		{
			GroupVersion: "foo/v3",
			APIResources: []metav1.APIResource{},
		},
	}

	tests := []struct {
		input             []*metav1.APIResourceList
		pred              discovery.ResourcePredicate
		expectedResources []string
	}{
		{nil, all, []string{}},
		{[]*metav1.APIResourceList{
			{GroupVersion: "foo/v1"},
		}, all, []string{}},
		{foo, all, []string{"foo/v1.bar", "foo/v1.test", "foo/v2.bar", "foo/v2.test"}},
		{foo, onlyV2, []string{"foo/v2.bar", "foo/v2.test"}},
		{foo, onlyBar, []string{"foo/v1.bar", "foo/v2.bar"}},
		{foo, none, []string{}},
	}
	for i, test := range tests {
		filtered := discovery.FilteredBy(test.pred, test.input)

		if expected, got := sets.NewString(test.expectedResources...), sets.NewString(stringify(filtered)...); !expected.Equal(got) {
			t.Errorf("[%d] unexpected group versions: expected=%v, got=%v", i, test.expectedResources, stringify(filtered))
		}
	}
}
示例#13
0
func (im *realImageGCManager) detectImages(detectTime time.Time) error {
	images, err := im.runtime.ListImages()
	if err != nil {
		return err
	}
	pods, err := im.runtime.GetPods(true)
	if err != nil {
		return err
	}

	// Make a set of images in use by containers.
	imagesInUse := sets.NewString()
	for _, pod := range pods {
		for _, container := range pod.Containers {
			glog.V(5).Infof("Pod %s/%s, container %s uses image %s(%s)", pod.Namespace, pod.Name, container.Name, container.Image, container.ImageID)
			imagesInUse.Insert(container.ImageID)
		}
	}

	// Add new images and record those being used.
	now := time.Now()
	currentImages := sets.NewString()
	im.imageRecordsLock.Lock()
	defer im.imageRecordsLock.Unlock()
	for _, image := range images {
		glog.V(5).Infof("Adding image ID %s to currentImages", image.ID)
		currentImages.Insert(image.ID)

		// New image, set it as detected now.
		if _, ok := im.imageRecords[image.ID]; !ok {
			glog.V(5).Infof("Image ID %s is new", image.ID)
			im.imageRecords[image.ID] = &imageRecord{
				firstDetected: detectTime,
			}
		}

		// Set last used time to now if the image is being used.
		if isImageUsed(image, imagesInUse) {
			glog.V(5).Infof("Setting Image ID %s lastUsed to %v", image.ID, now)
			im.imageRecords[image.ID].lastUsed = now
		}

		glog.V(5).Infof("Image ID %s has size %d", image.ID, image.Size)
		im.imageRecords[image.ID].size = image.Size
	}

	// Remove old images from our records.
	for image := range im.imageRecords {
		if !currentImages.Has(image) {
			glog.V(5).Infof("Image ID %s is no longer present; removing from imageRecords", image)
			delete(im.imageRecords, image)
		}
	}

	return nil
}
示例#14
0
func TestRequirementConstructor(t *testing.T) {
	requirementConstructorTests := []struct {
		Key     string
		Op      selection.Operator
		Vals    sets.String
		Success bool
	}{
		{"x", selection.In, nil, false},
		{"x", selection.NotIn, sets.NewString(), false},
		{"x", selection.In, sets.NewString("foo"), true},
		{"x", selection.NotIn, sets.NewString("foo"), true},
		{"x", selection.Exists, nil, true},
		{"x", selection.DoesNotExist, nil, true},
		{"1foo", selection.In, sets.NewString("bar"), true},
		{"1234", selection.In, sets.NewString("bar"), true},
		{"y", selection.GreaterThan, sets.NewString("1"), true},
		{"z", selection.LessThan, sets.NewString("6"), true},
		{"foo", selection.GreaterThan, sets.NewString("bar"), false},
		{"barz", selection.LessThan, sets.NewString("blah"), false},
		{strings.Repeat("a", 254), selection.Exists, nil, false}, //breaks DNS rule that len(key) <= 253
	}
	for _, rc := range requirementConstructorTests {
		if _, err := NewRequirement(rc.Key, rc.Op, rc.Vals.List()); err == nil && !rc.Success {
			t.Errorf("expected error with key:%#v op:%v vals:%v, got no error", rc.Key, rc.Op, rc.Vals)
		} else if err != nil && rc.Success {
			t.Errorf("expected no error with key:%#v op:%v vals:%v, got:%v", rc.Key, rc.Op, rc.Vals, err)
		}
	}
}
// test the processItem function making the expected actions.
func TestProcessItem(t *testing.T) {
	pod := getPod("ToBeDeletedPod", []metav1.OwnerReference{
		{
			Kind:       "ReplicationController",
			Name:       "owner1",
			UID:        "123",
			APIVersion: "v1",
		},
	})
	testHandler := &fakeActionHandler{
		response: map[string]FakeResponse{
			"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/owner1": {
				404,
				[]byte{},
			},
			"GET" + "/api/v1/namespaces/ns1/pods/ToBeDeletedPod": {
				200,
				serilizeOrDie(t, pod),
			},
		},
	}
	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
	defer srv.Close()
	gc := setupGC(t, clientConfig)
	item := &node{
		identity: objectReference{
			OwnerReference: metav1.OwnerReference{
				Kind:       pod.Kind,
				APIVersion: pod.APIVersion,
				Name:       pod.Name,
				UID:        pod.UID,
			},
			Namespace: pod.Namespace,
		},
		// owners are intentionally left empty. The processItem routine should get the latest item from the server.
		owners: nil,
	}
	err := gc.processItem(item)
	if err != nil {
		t.Errorf("Unexpected Error: %v", err)
	}
	expectedActionSet := sets.NewString()
	expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/replicationcontrollers/owner1")
	expectedActionSet.Insert("DELETE=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
	expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")

	actualActionSet := sets.NewString()
	for _, action := range testHandler.actions {
		actualActionSet.Insert(action.String())
	}
	if !expectedActionSet.Equal(actualActionSet) {
		t.Errorf("expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet,
			actualActionSet, expectedActionSet.Difference(actualActionSet))
	}
}
示例#16
0
// Makes sure that exactly the specified hosts are registered as instances with the load balancer
func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances []*elb.Instance, instances []*ec2.Instance) error {
	expected := sets.NewString()
	for _, instance := range instances {
		expected.Insert(orEmpty(instance.InstanceId))
	}

	actual := sets.NewString()
	for _, lbInstance := range lbInstances {
		actual.Insert(orEmpty(lbInstance.InstanceId))
	}

	additions := expected.Difference(actual)
	removals := actual.Difference(expected)

	addInstances := []*elb.Instance{}
	for _, instanceId := range additions.List() {
		addInstance := &elb.Instance{}
		addInstance.InstanceId = aws.String(instanceId)
		addInstances = append(addInstances, addInstance)
	}

	removeInstances := []*elb.Instance{}
	for _, instanceId := range removals.List() {
		removeInstance := &elb.Instance{}
		removeInstance.InstanceId = aws.String(instanceId)
		removeInstances = append(removeInstances, removeInstance)
	}

	if len(addInstances) > 0 {
		registerRequest := &elb.RegisterInstancesWithLoadBalancerInput{}
		registerRequest.Instances = addInstances
		registerRequest.LoadBalancerName = aws.String(loadBalancerName)
		_, err := c.elb.RegisterInstancesWithLoadBalancer(registerRequest)
		if err != nil {
			return err
		}
		glog.V(1).Infof("Instances added to load-balancer %s", loadBalancerName)
	}

	if len(removeInstances) > 0 {
		deregisterRequest := &elb.DeregisterInstancesFromLoadBalancerInput{}
		deregisterRequest.Instances = removeInstances
		deregisterRequest.LoadBalancerName = aws.String(loadBalancerName)
		_, err := c.elb.DeregisterInstancesFromLoadBalancer(deregisterRequest)
		if err != nil {
			return err
		}
		glog.V(1).Infof("Instances removed from load-balancer %s", loadBalancerName)
	}

	return nil
}
func TestReAddExpiredItem(t *testing.T) {
	deleteChan := make(chan string, 1)
	exp := &FakeExpirationPolicy{
		NeverExpire: sets.NewString(),
		RetrieveKeyFunc: func(obj interface{}) (string, error) {
			return obj.(*timestampedEntry).obj.(testStoreObject).id, nil
		},
	}
	ttlStore := NewFakeExpirationStore(
		testStoreKeyFunc, deleteChan, exp, clock.RealClock{})
	testKey := "foo"
	testObj := testStoreObject{id: testKey, val: "bar"}
	err := ttlStore.Add(testObj)
	if err != nil {
		t.Errorf("Unable to add obj %#v", testObj)
	}

	// This get will expire the item.
	item, exists, err := ttlStore.Get(testObj)
	if err != nil {
		t.Errorf("Failed to get from store, %v", err)
	}
	if exists || item != nil {
		t.Errorf("Got unexpected item %#v", item)
	}

	key, _ := testStoreKeyFunc(testObj)
	differentValue := "different_bar"
	err = ttlStore.Add(
		testStoreObject{id: testKey, val: differentValue})
	if err != nil {
		t.Errorf("Failed to add second value")
	}

	select {
	case delKey := <-deleteChan:
		if delKey != key {
			t.Errorf("Unexpected delete for key %s", key)
		}
	case <-time.After(wait.ForeverTestTimeout):
		t.Errorf("Unexpected timeout waiting on delete")
	}
	exp.NeverExpire = sets.NewString(testKey)
	item, exists, err = ttlStore.GetByKey(testKey)
	if err != nil {
		t.Errorf("Failed to get from store, %v", err)
	}
	if !exists || item == nil || item.(testStoreObject).val != differentValue {
		t.Errorf("Got unexpected item %#v", item)
	}
	close(deleteChan)
}
示例#18
0
func compareResults(t *testing.T, expected, actual []types.UID) {
	expectedSet := sets.NewString()
	for _, u := range expected {
		expectedSet.Insert(string(u))
	}
	actualSet := sets.NewString()
	for _, u := range actual {
		actualSet.Insert(string(u))
	}
	if !expectedSet.Equal(actualSet) {
		t.Errorf("Expected %#v, got %#v", expectedSet.List(), actualSet.List())
	}
}
func TestSyncResourceQuotaNoChange(t *testing.T) {
	resourceQuota := v1.ResourceQuota{
		ObjectMeta: metav1.ObjectMeta{
			Namespace: "default",
			Name:      "rq",
		},
		Spec: v1.ResourceQuotaSpec{
			Hard: v1.ResourceList{
				v1.ResourceCPU: resource.MustParse("4"),
			},
		},
		Status: v1.ResourceQuotaStatus{
			Hard: v1.ResourceList{
				v1.ResourceCPU: resource.MustParse("4"),
			},
			Used: v1.ResourceList{
				v1.ResourceCPU: resource.MustParse("0"),
			},
		},
	}

	kubeClient := fake.NewSimpleClientset(&v1.PodList{}, &resourceQuota)
	resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
		KubeClient:   kubeClient,
		ResyncPeriod: controller.NoResyncPeriodFunc,
		Registry:     install.NewRegistry(kubeClient, nil),
		GroupKindsToReplenish: []schema.GroupKind{
			api.Kind("Pod"),
			api.Kind("Service"),
			api.Kind("ReplicationController"),
			api.Kind("PersistentVolumeClaim"),
		},
		ControllerFactory:         NewReplenishmentControllerFactoryFromClient(kubeClient),
		ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
	}
	quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
	err := quotaController.syncResourceQuota(resourceQuota)
	if err != nil {
		t.Fatalf("Unexpected error %v", err)
	}
	expectedActionSet := sets.NewString(
		strings.Join([]string{"list", "pods", ""}, "-"),
	)
	actionSet := sets.NewString()
	for _, action := range kubeClient.Actions() {
		actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
	}
	if !actionSet.HasAll(expectedActionSet.List()...) {
		t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
	}
}
示例#20
0
// RESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order:
//  1. if KUBE_API_VERSIONS is specified, then KUBE_API_VERSIONS in order, OR
//  1. legacy kube group preferred version, extensions preferred version, metrics perferred version, legacy
//     kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version,
//     all other groups alphabetical.
func (m *APIRegistrationManager) RESTMapper(versionPatterns ...schema.GroupVersion) meta.RESTMapper {
	unionMapper := meta.MultiRESTMapper{}
	unionedGroups := sets.NewString()
	for enabledVersion := range m.enabledVersions {
		if !unionedGroups.Has(enabledVersion.Group) {
			unionedGroups.Insert(enabledVersion.Group)
			groupMeta := m.groupMetaMap[enabledVersion.Group]
			unionMapper = append(unionMapper, groupMeta.RESTMapper)
		}
	}

	if len(versionPatterns) != 0 {
		resourcePriority := []schema.GroupVersionResource{}
		kindPriority := []schema.GroupVersionKind{}
		for _, versionPriority := range versionPatterns {
			resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource))
			kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind))
		}

		return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
	}

	if len(m.envRequestedVersions) != 0 {
		resourcePriority := []schema.GroupVersionResource{}
		kindPriority := []schema.GroupVersionKind{}

		for _, versionPriority := range m.envRequestedVersions {
			resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource))
			kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind))
		}

		return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
	}

	prioritizedGroups := []string{"", "extensions", "metrics"}
	resourcePriority, kindPriority := m.prioritiesForGroups(prioritizedGroups...)

	prioritizedGroupsSet := sets.NewString(prioritizedGroups...)
	remainingGroups := sets.String{}
	for enabledVersion := range m.enabledVersions {
		if !prioritizedGroupsSet.Has(enabledVersion.Group) {
			remainingGroups.Insert(enabledVersion.Group)
		}
	}

	remainingResourcePriority, remainingKindPriority := m.prioritiesForGroups(remainingGroups.List()...)
	resourcePriority = append(resourcePriority, remainingResourcePriority...)
	kindPriority = append(kindPriority, remainingKindPriority...)

	return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority}
}
示例#21
0
func (s *serviceAccount) limitSecretReferences(serviceAccount *api.ServiceAccount, pod *api.Pod) error {
	// Ensure all secrets the pod references are allowed by the service account
	mountableSecrets := sets.NewString()
	for _, s := range serviceAccount.Secrets {
		mountableSecrets.Insert(s.Name)
	}
	for _, volume := range pod.Spec.Volumes {
		source := volume.VolumeSource
		if source.Secret == nil {
			continue
		}
		secretName := source.Secret.SecretName
		if !mountableSecrets.Has(secretName) {
			return fmt.Errorf("volume with secret.secretName=\"%s\" is not allowed because service account %s does not reference that secret", secretName, serviceAccount.Name)
		}
	}

	for _, container := range pod.Spec.InitContainers {
		for _, env := range container.Env {
			if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil {
				if !mountableSecrets.Has(env.ValueFrom.SecretKeyRef.Name) {
					return fmt.Errorf("init container %s with envVar %s referencing secret.secretName=\"%s\" is not allowed because service account %s does not reference that secret", container.Name, env.Name, env.ValueFrom.SecretKeyRef.Name, serviceAccount.Name)
				}
			}
		}
	}

	for _, container := range pod.Spec.Containers {
		for _, env := range container.Env {
			if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil {
				if !mountableSecrets.Has(env.ValueFrom.SecretKeyRef.Name) {
					return fmt.Errorf("container %s with envVar %s referencing secret.secretName=\"%s\" is not allowed because service account %s does not reference that secret", container.Name, env.Name, env.ValueFrom.SecretKeyRef.Name, serviceAccount.Name)
				}
			}
		}
	}

	// limit pull secret references as well
	pullSecrets := sets.NewString()
	for _, s := range serviceAccount.ImagePullSecrets {
		pullSecrets.Insert(s.Name)
	}
	for i, pullSecretRef := range pod.Spec.ImagePullSecrets {
		if !pullSecrets.Has(pullSecretRef.Name) {
			return fmt.Errorf(`imagePullSecrets[%d].name="%s" is not allowed because service account %s does not reference that imagePullSecret`, i, pullSecretRef.Name, serviceAccount.Name)
		}
	}
	return nil
}
示例#22
0
func TestCloneTLSConfig(t *testing.T) {
	expected := sets.NewString(
		// These fields are copied in CloneTLSConfig
		"Rand",
		"Time",
		"Certificates",
		"RootCAs",
		"NextProtos",
		"ServerName",
		"InsecureSkipVerify",
		"CipherSuites",
		"PreferServerCipherSuites",
		"MinVersion",
		"MaxVersion",
		"CurvePreferences",
		"NameToCertificate",
		"GetCertificate",
		"ClientAuth",
		"ClientCAs",
		"ClientSessionCache",

		// These fields are not copied
		"SessionTicketsDisabled",
		"SessionTicketKey",

		// These fields are unexported
		"serverInitOnce",
		"mutex",
		"sessionTicketKeys",
	)

	// See #33936.
	if strings.HasPrefix(runtime.Version(), "go1.7") {
		expected.Insert("DynamicRecordSizingDisabled", "Renegotiation")
	}

	fields := sets.NewString()
	structType := reflect.TypeOf(tls.Config{})
	for i := 0; i < structType.NumField(); i++ {
		fields.Insert(structType.Field(i).Name)
	}

	if missing := expected.Difference(fields); len(missing) > 0 {
		t.Errorf("Expected fields that were not seen in http.Transport: %v", missing.List())
	}
	if extra := fields.Difference(expected); len(extra) > 0 {
		t.Errorf("New fields seen in http.Transport: %v\nAdd to CopyClientTLSConfig if client-relevant, then add to expected list in TestCopyClientTLSConfig", extra.List())
	}
}
示例#23
0
// SameStringArray verifies whether two string arrays have the same strings, return error if not.
// Order does not matter.
// When `include` is set to true, verifies whether result includes all elements from expected.
func SameStringArray(result, expected []string, include bool) error {
	res := sets.NewString(result...)
	exp := sets.NewString(expected...)
	if !include {
		diff := res.Difference(exp)
		if len(diff) != 0 {
			return fmt.Errorf("found differences: %v", diff)
		}
	} else {
		if !res.IsSuperset(exp) {
			return fmt.Errorf("some elements are missing: expected %v, got %v", expected, result)
		}
	}
	return nil
}
示例#24
0
// roundTripSame verifies the same source object is tested in all API versions
// yielded by codecsToTest
func roundTripSame(t *testing.T, group testapi.TestGroup, item runtime.Object, except ...string) {
	set := sets.NewString(except...)
	seed := rand.Int63()
	fuzzInternalObject(t, group.InternalGroupVersion(), item, seed)

	version := *group.GroupVersion()
	codecs := []runtime.Codec{}
	for _, fn := range codecsToTest {
		codec, ok, err := fn(version, item)
		if err != nil {
			t.Errorf("unable to get codec: %v", err)
			return
		}
		if !ok {
			continue
		}
		codecs = append(codecs, codec)
	}

	if !set.Has(version.String()) {
		fuzzInternalObject(t, version, item, seed)
		for _, codec := range codecs {
			roundTrip(t, codec, item)
		}
	}
}
示例#25
0
// ToSet takes a list of resource names and converts to a string set
func ToSet(resourceNames []api.ResourceName) sets.String {
	result := sets.NewString()
	for _, resourceName := range resourceNames {
		result.Insert(string(resourceName))
	}
	return result
}
func TestGetServerResourcesWithV1Server(t *testing.T) {
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
		var obj interface{}
		switch req.URL.Path {
		case "/api":
			obj = &metav1.APIVersions{
				Versions: []string{
					"v1",
				},
			}
		default:
			w.WriteHeader(http.StatusNotFound)
			return
		}
		output, err := json.Marshal(obj)
		if err != nil {
			t.Errorf("unexpected encoding error: %v", err)
			return
		}
		w.Header().Set("Content-Type", "application/json")
		w.WriteHeader(http.StatusOK)
		w.Write(output)
	}))
	defer server.Close()
	client := NewDiscoveryClientForConfigOrDie(&restclient.Config{Host: server.URL})
	// ServerResources should not return an error even if server returns error at /api/v1.
	serverResources, err := client.ServerResources()
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	gvs := groupVersions(serverResources)
	if !sets.NewString(gvs...).Has("v1") {
		t.Errorf("missing v1 in resource list: %v", serverResources)
	}
}
示例#27
0
// GetEndpointNodes returns a map of nodenames:external-ip on which the
// endpoints of the given Service are running.
func (j *ServiceTestJig) GetEndpointNodes(svc *v1.Service) map[string][]string {
	nodes := j.GetNodes(MaxNodesForEndpointsTests)
	endpoints, err := j.Client.Core().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{})
	if err != nil {
		Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err)
	}
	if len(endpoints.Subsets) == 0 {
		Failf("Endpoint has no subsets, cannot determine node addresses.")
	}
	epNodes := sets.NewString()
	for _, ss := range endpoints.Subsets {
		for _, e := range ss.Addresses {
			if e.NodeName != nil {
				epNodes.Insert(*e.NodeName)
			}
		}
	}
	nodeMap := map[string][]string{}
	for _, n := range nodes.Items {
		if epNodes.Has(n.Name) {
			nodeMap[n.Name] = GetNodeAddresses(&n, v1.NodeExternalIP)
		}
	}
	return nodeMap
}
示例#28
0
// Find the names of all zones and the region in which we have nodes in this cluster.
func getZoneNames(client *clientset.Clientset) (zones []string, region string, err error) {
	zoneNames := sets.NewString()
	nodes, err := client.Core().Nodes().List(api.ListOptions{})
	if err != nil {
		glog.Errorf("Failed to list nodes while getting zone names: %v", err)
		return nil, "", err
	}
	for i, node := range nodes.Items {
		// TODO: quinton-hoole make this more efficient.
		//       For non-multi-zone clusters the zone will
		//       be identical for all nodes, so we only need to look at one node
		//       For multi-zone clusters we know at build time
		//       which zones are included.  Rather get this info from there, because it's cheaper.
		zoneName, err := getZoneNameForNode(node)
		if err != nil {
			return nil, "", err
		}
		zoneNames.Insert(zoneName)
		if i == 0 {
			region, err = getRegionNameForNode(node)
			if err != nil {
				return nil, "", err
			}
		}
	}
	return zoneNames.List(), region, nil
}
示例#29
0
func TestAnonymous(t *testing.T) {
	var a authenticator.Request = NewAuthenticator()
	u, ok, err := a.AuthenticateRequest(nil)
	if err != nil {
		t.Fatalf("Unexpected error %v", err)
	}
	if !ok {
		t.Fatalf("Unexpectedly unauthenticated")
	}
	if u.GetName() != user.Anonymous {
		t.Fatalf("Expected username %s, got %s", user.Anonymous, u.GetName())
	}
	if !sets.NewString(u.GetGroups()...).Equal(sets.NewString(user.AllUnauthenticated)) {
		t.Fatalf("Expected group %s, got %v", user.AllUnauthenticated, u.GetGroups())
	}
}
示例#30
0
// clusterSyncLoop observes running clusters changes, and apply all services to new added cluster
// and add dns records for the changes
func (s *ServiceController) clusterSyncLoop() {
	var servicesToUpdate []*cachedService
	// should we remove cache for cluster from ready to not ready? should remove the condition predicate if no
	clusters, err := s.clusterStore.ClusterCondition(getClusterConditionPredicate()).List()
	if err != nil {
		glog.Infof("Fail to get cluster list")
		return
	}
	newClusters := clustersFromList(&clusters)
	var newSet, increase sets.String
	newSet = sets.NewString(newClusters...)
	if newSet.Equal(s.knownClusterSet) {
		// The set of cluster names in the services in the federation hasn't changed, but we can retry
		// updating any services that we failed to update last time around.
		servicesToUpdate = s.updateDNSRecords(servicesToUpdate, newClusters)
		return
	}
	glog.Infof("Detected change in list of cluster names. New  set: %v, Old set: %v", newSet, s.knownClusterSet)
	increase = newSet.Difference(s.knownClusterSet)
	// do nothing when cluster is removed.
	if increase != nil {
		// Try updating all services, and save the ones that fail to try again next
		// round.
		servicesToUpdate = s.serviceCache.allServices()
		numServices := len(servicesToUpdate)
		for newCluster := range increase {
			glog.Infof("New cluster observed %s", newCluster)
			s.updateAllServicesToCluster(servicesToUpdate, newCluster)
		}
		servicesToUpdate = s.updateDNSRecords(servicesToUpdate, newClusters)
		glog.Infof("Successfully updated %d out of %d DNS records to direct traffic to the updated cluster",
			numServices-len(servicesToUpdate), numServices)
	}
	s.knownClusterSet = newSet
}