Пример #1
0
// buildClientDiagnostics builds client Diagnostic objects based on the rawConfig passed in.
// Returns the Diagnostics built, "ok" bool for whether to proceed or abort, and an error if any was encountered during the building of diagnostics.) {
func (o DiagnosticsOptions) buildClientDiagnostics(rawConfig *clientcmdapi.Config) ([]types.Diagnostic, bool, error) {
	available := availableClientDiagnostics

	// osClient, kubeClient, clientErr := o.Factory.Clients() // use with a diagnostic that needs OpenShift/Kube client
	_, _, clientErr := o.Factory.Clients()
	if clientErr != nil {
		o.Logger.Notice("CED0001", "Failed creating client from config; client diagnostics will be limited to config testing")
		available = sets.NewString(clientdiags.ConfigContextsName)
	}

	diagnostics := []types.Diagnostic{}
	requestedDiagnostics := intersection(sets.NewString(o.RequestedDiagnostics...), available).List()
	for _, diagnosticName := range requestedDiagnostics {
		switch diagnosticName {
		case clientdiags.ConfigContextsName:
			for contextName := range rawConfig.Contexts {
				diagnostics = append(diagnostics, clientdiags.ConfigContext{rawConfig, contextName})
			}

		default:
			return nil, false, fmt.Errorf("unknown diagnostic: %v", diagnosticName)
		}
	}
	return diagnostics, true, clientErr
}
Пример #2
0
// test the processItem function making the expected actions.
func TestProcessItem(t *testing.T) {
	pod := newDanglingPod()
	podBytes, err := json.Marshal(pod)
	if err != nil {
		t.Fatal(err)
	}
	testHandler := &fakeActionHandler{
		response: map[string]FakeResponse{
			"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/owner1": {
				404,
				[]byte{},
			},
			"GET" + "/api/v1/namespaces/ns1/pods/ToBeDeletedPod": {
				200,
				podBytes,
			},
		},
	}
	podResource := []unversioned.GroupVersionResource{{Version: "v1", Resource: "pods"}}
	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
	defer srv.Close()
	clientConfig.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
	metaOnlyClientPool := dynamic.NewClientPool(clientConfig, dynamic.LegacyAPIPathResolverFunc)
	clientConfig.ContentConfig.NegotiatedSerializer = nil
	clientPool := dynamic.NewClientPool(clientConfig, dynamic.LegacyAPIPathResolverFunc)
	gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, podResource)
	if err != nil {
		t.Fatal(err)
	}
	item := &node{
		identity: objectReference{
			OwnerReference: metatypes.OwnerReference{
				Kind:       pod.Kind,
				APIVersion: pod.APIVersion,
				Name:       pod.Name,
				UID:        pod.UID,
			},
			Namespace: pod.Namespace,
		},
		// owners are intentionally left empty. The processItem routine should get the latest item from the server.
		owners: nil,
	}
	err = gc.processItem(item)
	if err != nil {
		t.Errorf("Unexpected Error: %v", err)
	}
	expectedActionSet := sets.NewString()
	expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/replicationcontrollers/owner1")
	expectedActionSet.Insert("DELETE=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
	expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")

	actualActionSet := sets.NewString()
	for _, action := range testHandler.actions {
		actualActionSet.Insert(action.String())
	}
	if !expectedActionSet.Equal(actualActionSet) {
		t.Errorf("expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet,
			actualActionSet, expectedActionSet.Difference(actualActionSet))
	}
}
Пример #3
0
func (s *serviceAccount) limitSecretReferences(serviceAccount *api.ServiceAccount, pod *api.Pod) error {
	// Ensure all secrets the pod references are allowed by the service account
	mountableSecrets := sets.NewString()
	for _, s := range serviceAccount.Secrets {
		mountableSecrets.Insert(s.Name)
	}
	for _, volume := range pod.Spec.Volumes {
		source := volume.VolumeSource
		if source.Secret == nil {
			continue
		}
		secretName := source.Secret.SecretName
		if !mountableSecrets.Has(secretName) {
			return fmt.Errorf("Volume with secret.secretName=\"%s\" is not allowed because service account %s does not reference that secret", secretName, serviceAccount.Name)
		}
	}

	// limit pull secret references as well
	pullSecrets := sets.NewString()
	for _, s := range serviceAccount.ImagePullSecrets {
		pullSecrets.Insert(s.Name)
	}
	for i, pullSecretRef := range pod.Spec.ImagePullSecrets {
		if !pullSecrets.Has(pullSecretRef.Name) {
			return fmt.Errorf(`imagePullSecrets[%d].name="%s" is not allowed because service account %s does not reference that imagePullSecret`, i, pullSecretRef.Name, serviceAccount.Name)
		}
	}
	return nil
}
Пример #4
0
func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper {
	// the list of kinds that are scoped at the root of the api hierarchy
	// if a kind is not enumerated here, it is assumed to have a namespace scope
	rootScoped := sets.NewString(
		"Node",
		"Namespace",
		"PersistentVolume",
		"ComponentStatus",
	)

	// these kinds should be excluded from the list of resources
	ignoredKinds := sets.NewString(
		"ListOptions",
		"DeleteOptions",
		"Status",
		"PodLogOptions",
		"PodExecOptions",
		"PodAttachOptions",
		"PodProxyOptions",
		"ThirdPartyResource",
		"ThirdPartyResourceData",
		"ThirdPartyResourceList")

	mapper := api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
	// setup aliases for groups of resources
	mapper.AddResourceAlias("all", userResources...)

	return mapper
}
Пример #5
0
// Sync sync firewall rules with the cloud.
func (fr *FirewallRules) Sync(nodePorts []int64, nodeNames []string) error {
	if len(nodePorts) == 0 {
		return fr.Shutdown()
	}
	// Firewall rule prefix must match that inserted by the gce library.
	suffix := fr.namer.FrSuffix()
	// TODO: Fix upstream gce cloudprovider lib so GET also takes the suffix
	// instead of the whole name.
	name := fr.namer.FrName(suffix)
	rule, _ := fr.cloud.GetFirewall(name)
	if rule == nil {
		glog.Infof("Creating global l7 firewall rule %v", name)
		return fr.cloud.CreateFirewall(suffix, "GCE L7 firewall rule", fr.srcRange, nodePorts, nodeNames)
	}

	requiredPorts := sets.NewString()
	for _, p := range nodePorts {
		requiredPorts.Insert(strconv.Itoa(int(p)))
	}
	existingPorts := sets.NewString()
	for _, allowed := range rule.Allowed {
		for _, p := range allowed.Ports {
			existingPorts.Insert(p)
		}
	}
	if requiredPorts.Equal(existingPorts) {
		return nil
	}
	glog.V(3).Infof("Firewall rule %v already exists, updating nodeports %v", name, nodePorts)
	return fr.cloud.UpdateFirewall(suffix, "GCE L7 firewall rule", fr.srcRange, nodePorts, nodeNames)
}
Пример #6
0
func TestAddAfterTry(t *testing.T) {
	evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
	evictor.Add("first", "11111")
	evictor.Add("second", "22222")
	evictor.Add("third", "33333")
	evictor.Remove("second")

	deletedMap := sets.NewString()
	evictor.Try(func(value TimedValue) (bool, time.Duration) {
		deletedMap.Insert(value.Value)
		return true, 0
	})

	setPattern := sets.NewString("first", "third")
	if len(deletedMap) != len(setPattern) {
		t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern))
	}
	if !CheckSetEq(setPattern, deletedMap) {
		t.Errorf("Invalid map. Got %v, expected %v", deletedMap, setPattern)
	}

	evictor.Add("first", "11111")
	evictor.Try(func(value TimedValue) (bool, time.Duration) {
		t.Errorf("We shouldn't process the same value if the explicit remove wasn't called.")
		return true, 0
	})
}
Пример #7
0
// TestAdmissionPluginChains makes sure that the admission plugin lists are coherent.
// we have to maintain three different lists of plugins: default origin, default kube, default combined
// the set of (default origin and default kube) and default combined, but must be equal
// the order of default origin must follow the order of default combined
// the order of default kube must follow the order of default combined
func TestAdmissionPluginChains(t *testing.T) {
	individualSet := sets.NewString(openshiftAdmissionControlPlugins...)
	individualSet.Insert(KubeAdmissionPlugins...)
	combinedSet := sets.NewString(CombinedAdmissionControlPlugins...)

	if !individualSet.Equal(combinedSet) {
		t.Fatalf("individualSets are missing: %v combinedSet is missing: %v", combinedSet.Difference(individualSet), individualSet.Difference(combinedSet))
	}

	lastCurrIndex := -1
	for _, plugin := range openshiftAdmissionControlPlugins {
		for lastCurrIndex = lastCurrIndex + 1; lastCurrIndex < len(CombinedAdmissionControlPlugins); lastCurrIndex++ {
			if CombinedAdmissionControlPlugins[lastCurrIndex] == plugin {
				break
			}
		}

		if lastCurrIndex >= len(CombinedAdmissionControlPlugins) {
			t.Errorf("openshift admission plugins are out of order compared to the combined list.  Failed at %v", plugin)
		}
	}

	lastCurrIndex = -1
	for _, plugin := range KubeAdmissionPlugins {
		for lastCurrIndex = lastCurrIndex + 1; lastCurrIndex < len(CombinedAdmissionControlPlugins); lastCurrIndex++ {
			if CombinedAdmissionControlPlugins[lastCurrIndex] == plugin {
				break
			}
		}

		if lastCurrIndex >= len(CombinedAdmissionControlPlugins) {
			t.Errorf("kube admission plugins are out of order compared to the combined list.  Failed at %v", plugin)
		}
	}
}
Пример #8
0
// buildClientDiagnostics builds client Diagnostic objects based on the rawConfig passed in.
// Returns the Diagnostics built, "ok" bool for whether to proceed or abort, and an error if any was encountered during the building of diagnostics.) {
func (o DiagnosticsOptions) buildClientDiagnostics(rawConfig *clientcmdapi.Config) ([]types.Diagnostic, bool, error) {
	available := availableClientDiagnostics

	// osClient, kubeClient, clientErr := o.Factory.Clients() // use with a diagnostic that needs OpenShift/Kube client
	_, _, clientErr := o.Factory.Clients()
	if clientErr != nil {
		o.Logger.Notice("CED0001", "Could not configure a client, so client diagnostics are limited to testing configuration and connection")
		available = sets.NewString(clientdiags.ConfigContextsName)
	}

	diagnostics := []types.Diagnostic{}
	requestedDiagnostics := intersection(sets.NewString(o.RequestedDiagnostics...), available).List()
	for _, diagnosticName := range requestedDiagnostics {
		switch diagnosticName {
		case clientdiags.ConfigContextsName:
			seen := map[string]bool{}
			for contextName := range rawConfig.Contexts {
				diagnostic := clientdiags.ConfigContext{RawConfig: rawConfig, ContextName: contextName}
				if clusterUser, defined := diagnostic.ContextClusterUser(); !defined {
					// definitely want to diagnose the broken context
					diagnostics = append(diagnostics, diagnostic)
				} else if !seen[clusterUser] {
					seen[clusterUser] = true // avoid validating same user for multiple projects
					diagnostics = append(diagnostics, diagnostic)
				}
			}

		default:
			return nil, false, fmt.Errorf("unknown diagnostic: %v", diagnosticName)
		}
	}
	return diagnostics, true, clientErr
}
Пример #9
0
func (sq *SubmitQueue) doGenCommitters(config *github_util.Config) error {
	pushUsers, pullUsers, err := config.UsersWithAccess()
	if err != nil {
		glog.Fatalf("Unable to read committers from github: %v", err)
	}

	pushSet := sets.NewString()
	for _, user := range pushUsers {
		pushSet.Insert(*user.Login)
	}
	pullSet := sets.NewString()
	for _, user := range pullUsers {
		pullSet.Insert(*user.Login)
	}

	if err = writeWhitelist(sq.Committers, "# auto-generated by "+os.Args[0]+" gen-committers; manual additions should go in the whitelist", pushSet); err != nil {
		glog.Fatalf("Unable to write committers: %v", err)
	}
	glog.Info("Successfully updated committers file.")

	existingWhitelist, err := loadWhitelist(sq.Whitelist)
	if err != nil {
		glog.Fatalf("error loading whitelist; it will not be updated: %v", err)
	}

	neededInWhitelist := existingWhitelist.Union(pullSet)
	neededInWhitelist = neededInWhitelist.Difference(pushSet)
	if err = writeWhitelist(sq.Whitelist, "# auto-generated by "+os.Args[0]+" gen-committers; manual additions may be added by hand", neededInWhitelist); err != nil {
		glog.Fatalf("Unable to write additional user whitelist: %v", err)
	}
	glog.Info("Successfully update whitelist file.")
	return nil
}
Пример #10
0
// computeUpdatedSCC determines if the expected SCC looks like the actual SCC
// it does this by making the expected SCC mirror the actual SCC for items that
// we are not reconciling and performing a diff (ignoring changes to metadata).
// If a diff is produced then the expected SCC is submitted as needing an update.
func (o *ReconcileSCCOptions) computeUpdatedSCC(expected kapi.SecurityContextConstraints, actual kapi.SecurityContextConstraints) (*kapi.SecurityContextConstraints, bool) {
	needsUpdate := false

	// if unioning old and new groups/users then make the expected contain all
	// also preserve and set priorities
	if o.Union {
		groupSet := sets.NewString(actual.Groups...)
		groupSet.Insert(expected.Groups...)
		expected.Groups = groupSet.List()

		userSet := sets.NewString(actual.Users...)
		userSet.Insert(expected.Users...)
		expected.Users = userSet.List()

		if actual.Priority != nil {
			expected.Priority = actual.Priority
		}
	}

	// sort users and groups to remove any variants in order when diffing
	sort.StringSlice(actual.Groups).Sort()
	sort.StringSlice(actual.Users).Sort()
	sort.StringSlice(expected.Groups).Sort()
	sort.StringSlice(expected.Users).Sort()

	// make a copy of the expected scc here so we can ignore metadata diffs.
	updated := expected
	expected.ObjectMeta = actual.ObjectMeta

	if !kapi.Semantic.DeepEqual(expected, actual) {
		needsUpdate = true
	}

	return &updated, needsUpdate
}
Пример #11
0
func TestCacheKeyFields(t *testing.T) {
	keyJSON, err := cacheKey(kapi.NewContext(), &authorizer.DefaultAuthorizationAttributes{})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	keyMap := map[string]interface{}{}
	if err := json.Unmarshal([]byte(keyJSON), &keyMap); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	keys := sets.NewString()
	for k := range keyMap {
		keys.Insert(strings.ToLower(k))
	}

	// These are results we don't expect to be in the cache key
	expectedMissingKeys := sets.NewString("requestattributes")

	attrType := reflect.TypeOf((*authorizer.AuthorizationAttributes)(nil)).Elem()
	for i := 0; i < attrType.NumMethod(); i++ {
		name := attrType.Method(i).Name
		name = strings.TrimPrefix(name, "Get")
		name = strings.TrimPrefix(name, "Is")
		name = strings.ToLower(name)
		if !keys.Has(name) && !expectedMissingKeys.Has(name) {
			t.Errorf("computed cache is missing an entry for %s", attrType.Method(i).Name)
		}
	}
}
Пример #12
0
func TestInstancesAddedToZones(t *testing.T) {
	cm := NewFakeClusterManager(DefaultClusterUID)
	lbc := newLoadBalancerController(t, cm, "")
	zoneToNode := map[string][]string{
		"zone-1": {"n1", "n2"},
		"zone-2": {"n3"},
	}
	addNodes(lbc, zoneToNode)

	// Create 2 igs, one per zone.
	testIG := "test-ig"
	testPort := int64(3001)
	lbc.CloudClusterManager.instancePool.AddInstanceGroup(testIG, testPort)

	// node pool syncs kube-nodes, this will add them to both igs.
	lbc.CloudClusterManager.instancePool.Sync([]string{"n1", "n2", "n3"})
	gotZonesToNode := cm.fakeIGs.GetInstancesByZone()

	i := 0
	for z, nodeNames := range zoneToNode {
		if ig, err := cm.fakeIGs.GetInstanceGroup(testIG, z); err != nil {
			t.Errorf("Failed to find ig %v in zone %v, found %+v: %v", testIG, z, ig, err)
		}
		if cm.fakeIGs.Ports[i] != testPort {
			t.Errorf("Expected the same node port on all igs, got ports %+v", cm.fakeIGs.Ports)
		}
		expNodes := sets.NewString(nodeNames...)
		gotNodes := sets.NewString(gotZonesToNode[z]...)
		if !gotNodes.Equal(expNodes) {
			t.Errorf("Nodes not added to zones, expected %+v got %+v", expNodes, gotNodes)
		}
		i++
	}
}
Пример #13
0
// Test public interface
func doTestIndex(t *testing.T, indexer Indexer) {
	mkObj := func(id string, val string) testStoreObject {
		return testStoreObject{id: id, val: val}
	}

	// Test Index
	expected := map[string]sets.String{}
	expected["b"] = sets.NewString("a", "c")
	expected["f"] = sets.NewString("e")
	expected["h"] = sets.NewString("g")
	indexer.Add(mkObj("a", "b"))
	indexer.Add(mkObj("c", "b"))
	indexer.Add(mkObj("e", "f"))
	indexer.Add(mkObj("g", "h"))
	{
		for k, v := range expected {
			found := sets.String{}
			indexResults, err := indexer.Index("by_val", mkObj("", k))
			if err != nil {
				t.Errorf("Unexpected error %v", err)
			}
			for _, item := range indexResults {
				found.Insert(item.(testStoreObject).id)
			}
			items := v.List()
			if !found.HasAll(items...) {
				t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List())
			}
		}
	}
}
Пример #14
0
func TestProxyProtocolEnabled(t *testing.T) {
	policies := sets.NewString(ProxyProtocolPolicyName, "FooBarFoo")
	fakeBackend := &elb.BackendServerDescription{
		InstancePort: aws.Int64(80),
		PolicyNames:  stringSetToPointers(policies),
	}
	result := proxyProtocolEnabled(fakeBackend)
	assert.True(t, result, "expected to find %s in %s", ProxyProtocolPolicyName, policies)

	policies = sets.NewString("FooBarFoo")
	fakeBackend = &elb.BackendServerDescription{
		InstancePort: aws.Int64(80),
		PolicyNames: []*string{
			aws.String("FooBarFoo"),
		},
	}
	result = proxyProtocolEnabled(fakeBackend)
	assert.False(t, result, "did not expect to find %s in %s", ProxyProtocolPolicyName, policies)

	policies = sets.NewString()
	fakeBackend = &elb.BackendServerDescription{
		InstancePort: aws.Int64(80),
	}
	result = proxyProtocolEnabled(fakeBackend)
	assert.False(t, result, "did not expect to find %s in %s", ProxyProtocolPolicyName, policies)
}
Пример #15
0
// Ingest method implements extraction.Ingester (necessary for Prometheus library
// to parse the metrics).
func (a *APIResponsiveness) Ingest(samples model.Samples) error {
	ignoredResources := sets.NewString("events")
	ignoredVerbs := sets.NewString("WATCHLIST", "PROXY")

	for _, sample := range samples {
		// Example line:
		// apiserver_request_latencies_summary{resource="namespaces",verb="LIST",quantile="0.99"} 908
		if sample.Metric[model.MetricNameLabel] != "apiserver_request_latencies_summary" {
			continue
		}

		resource := string(sample.Metric["resource"])
		verb := string(sample.Metric["verb"])
		if ignoredResources.Has(resource) || ignoredVerbs.Has(verb) {
			continue
		}
		latency := sample.Value
		quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
		if err != nil {
			return err
		}
		a.addMetric(resource, verb, quantile, time.Duration(int64(latency))*time.Microsecond)
	}
	return nil
}
Пример #16
0
// NewAuthorizationCache creates a new AuthorizationCache
func NewAuthorizationCache(reviewer Reviewer, namespaceInterface kclient.NamespaceInterface,
	clusterPolicyLister client.SyncedClusterPoliciesListerInterface, clusterPolicyBindingLister client.SyncedClusterPolicyBindingsListerInterface,
	policyNamespacer client.SyncedPoliciesListerNamespacer, policyBindingNamespacer client.SyncedPolicyBindingsListerNamespacer,
) *AuthorizationCache {

	result := &AuthorizationCache{
		allKnownNamespaces:        sets.String{},
		namespaceStore:            cache.NewStore(cache.MetaNamespaceKeyFunc),
		namespaceInterface:        namespaceInterface,
		lastSyncResourceVersioner: &unchangingLastSyncResourceVersioner{},

		clusterPolicyResourceVersions:  sets.NewString(),
		clusterBindingResourceVersions: sets.NewString(),

		clusterPolicyLister:             clusterPolicyLister,
		clusterPolicyBindingLister:      clusterPolicyBindingLister,
		policyNamespacer:                policyNamespacer,
		policyBindingNamespacer:         policyBindingNamespacer,
		policyLastSyncResourceVersioner: unionLastSyncResourceVersioner{clusterPolicyLister, clusterPolicyBindingLister, policyNamespacer, policyBindingNamespacer},

		reviewRecordStore:       cache.NewStore(reviewRecordKeyFn),
		userSubjectRecordStore:  cache.NewStore(subjectRecordKeyFn),
		groupSubjectRecordStore: cache.NewStore(subjectRecordKeyFn),

		reviewer: reviewer,
		skip:     &neverSkipSynchronizer{},

		watchers: []CacheWatcher{},
	}
	result.syncHandler = result.syncRequest
	return result
}
Пример #17
0
func TestEscalating(t *testing.T) {
	escalatingResources := NormalizeResources(sets.NewString(GroupsToResources[EscalatingResourcesGroupName]...))
	nonEscalatingResources := NormalizeResources(sets.NewString(GroupsToResources[NonEscalatingResourcesGroupName]...))
	if len(nonEscalatingResources) <= len(escalatingResources) {
		t.Errorf("groups look bad: escalating=%v nonescalating=%v", escalatingResources.List(), nonEscalatingResources.List())
	}
}
Пример #18
0
func init() {
	groupMeta, err := latest.RegisterGroup("experimental")
	if err != nil {
		glog.V(4).Infof("%v", err)
		return
	}
	registeredGroupVersions := registered.GroupVersionsForGroup("experimental")
	groupVersion := registeredGroupVersions[0]
	*groupMeta = latest.GroupMeta{
		GroupVersion: groupVersion,
		Group:        apiutil.GetGroup(groupVersion),
		Version:      apiutil.GetVersion(groupVersion),
		Codec:        runtime.CodecFor(api.Scheme, groupVersion),
	}
	var versions []string
	var groupVersions []string
	for i := len(registeredGroupVersions) - 1; i >= 0; i-- {
		versions = append(versions, apiutil.GetVersion(registeredGroupVersions[i]))
		groupVersions = append(groupVersions, registeredGroupVersions[i])
	}
	groupMeta.Versions = versions
	groupMeta.GroupVersions = groupVersions

	groupMeta.SelfLinker = runtime.SelfLinker(accessor)

	// the list of kinds that are scoped at the root of the api hierarchy
	// if a kind is not enumerated here, it is assumed to have a namespace scope
	rootScoped := sets.NewString()

	ignoredKinds := sets.NewString()

	groupMeta.RESTMapper = api.NewDefaultRESTMapper("experimental", groupVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
	api.RegisterRESTMapper(groupMeta.RESTMapper)
	groupMeta.InterfacesFor = interfacesFor
}
Пример #19
0
// Generate creates the capabilities based on policy rules.  Generate will produce the following:
// 1.  a capabilities.Add set containing all the required adds (unless the
// 		container specifically is dropping the cap) and container requested adds
// 2.  a capabilities.Drop set containing all the required drops and container requested drops
func (s *defaultCapabilities) Generate(pod *api.Pod, container *api.Container) (*api.Capabilities, error) {
	defaultAdd := makeCapSet(s.defaultAddCapabilities)
	requiredDrop := makeCapSet(s.requiredDropCapabilities)
	containerAdd := sets.NewString()
	containerDrop := sets.NewString()

	if container.SecurityContext != nil && container.SecurityContext.Capabilities != nil {
		containerAdd = makeCapSet(container.SecurityContext.Capabilities.Add)
		containerDrop = makeCapSet(container.SecurityContext.Capabilities.Drop)
	}

	// remove any default adds that the container is specifically dropping
	defaultAdd = defaultAdd.Difference(containerDrop)

	combinedAdd := defaultAdd.Union(containerAdd).List()
	combinedDrop := requiredDrop.Union(containerDrop).List()

	// nothing generated?  return nil
	if len(combinedAdd) == 0 && len(combinedDrop) == 0 {
		return nil, nil
	}

	return &api.Capabilities{
		Add:  capabilityFromStringSlice(combinedAdd),
		Drop: capabilityFromStringSlice(combinedDrop),
	}, nil
}
Пример #20
0
// NewCmdConfigGetContexts creates a command object for the "get-contexts" action, which
// retrieves one or more contexts from a kubeconfig.
func NewCmdConfigGetContexts(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command {
	options := &GetContextsOptions{configAccess: configAccess}

	cmd := &cobra.Command{
		Use:     "get-contexts [(-o|--output=)name)]",
		Short:   "Describe one or many contexts",
		Long:    getContextsLong,
		Example: getContextsExample,
		Run: func(cmd *cobra.Command, args []string) {
			validOutputTypes := sets.NewString("", "json", "yaml", "wide", "name", "custom-columns", "custom-columns-file", "go-template", "go-template-file", "jsonpath", "jsonpath-file")
			supportedOutputTypes := sets.NewString("", "name")
			outputFormat := cmdutil.GetFlagString(cmd, "output")
			if !validOutputTypes.Has(outputFormat) {
				cmdutil.CheckErr(fmt.Errorf("output must be one of '' or 'name': %v", outputFormat))
			}
			if !supportedOutputTypes.Has(outputFormat) {
				fmt.Fprintf(out, "--output %v is not available in kubectl config get-contexts; resetting to default output format\n", outputFormat)
				cmd.Flags().Set("output", "")
			}
			cmdutil.CheckErr(options.Complete(cmd, args, out))
			cmdutil.CheckErr(options.RunGetContexts())
		},
	}
	cmdutil.AddOutputFlags(cmd)
	cmdutil.AddNoHeadersFlags(cmd)
	return cmd
}
Пример #21
0
func init() {
	groupMeta, err := latest.RegisterGroup("metrics")
	if err != nil {
		glog.V(4).Infof("%v", err)
		return
	}

	registeredGroupVersions := registered.GroupVersionsForGroup("metrics")
	groupVersion := registeredGroupVersions[0]
	*groupMeta = latest.GroupMeta{
		GroupVersion: groupVersion,
		Codec:        runtime.CodecFor(api.Scheme, groupVersion.String()),
	}

	worstToBestGroupVersions := []unversioned.GroupVersion{}
	for i := len(registeredGroupVersions) - 1; i >= 0; i-- {
		worstToBestGroupVersions = append(worstToBestGroupVersions, registeredGroupVersions[i])
	}
	groupMeta.GroupVersions = registeredGroupVersions

	groupMeta.SelfLinker = runtime.SelfLinker(accessor)

	// the list of kinds that are scoped at the root of the api hierarchy
	// if a kind is not enumerated here, it is assumed to have a namespace scope
	rootScoped := sets.NewString()

	ignoredKinds := sets.NewString()

	groupMeta.RESTMapper = api.NewDefaultRESTMapper(worstToBestGroupVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
	api.RegisterRESTMapper(groupMeta.RESTMapper)
	groupMeta.InterfacesFor = interfacesFor
}
Пример #22
0
func TestPetQueueScaleDown(t *testing.T) {
	replicas := 1
	ps := newPetSet(replicas)

	// knownPods are the pods in the system
	knownPods := newPodList(ps, 3)

	q := NewPetQueue(ps, knownPods)

	// The iterator will insert a single replica, the enqueue
	// mimics that behavior.
	pet, _ := newPCB(fmt.Sprintf("%v", 0), ps)
	q.enqueue(pet)

	deletes := sets.NewString(fmt.Sprintf("%v-1", ps.Name), fmt.Sprintf("%v-2", ps.Name))
	syncs := sets.NewString(fmt.Sprintf("%v-0", ps.Name))

	// Confirm that 2 known pods are deleted
	for i := 0; i < 3; i++ {
		p := q.dequeue()
		switch p.event {
		case syncPet:
			if !syncs.Has(p.pod.Name) {
				t.Errorf("Unexpected sync %v expecting %+v", p.pod.Name, syncs)
			}
		case deletePet:
			if !deletes.Has(p.pod.Name) {
				t.Errorf("Unexpected deletes %v expecting %+v", p.pod.Name, deletes)
			}
		}
	}
	if q.dequeue() != nil {
		t.Errorf("Expected no pets")
	}
}
Пример #23
0
func GetBootstrapOpenshiftRoles(openshiftNamespace string) []authorizationapi.Role {
	roles := []authorizationapi.Role{
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:      OpenshiftSharedResourceViewRoleName,
				Namespace: openshiftNamespace,
			},
			Rules: []authorizationapi.PolicyRule{
				{
					Verbs:     sets.NewString("get", "list"),
					Resources: sets.NewString("templates", authorizationapi.ImageGroupName),
				},
				{
					// so anyone can pull from openshift/* image streams
					Verbs:     sets.NewString("get"),
					Resources: sets.NewString("imagestreams/layers"),
				},
			},
		},
	}

	// we don't want to expose the resourcegroups externally because it makes it very difficult for customers to learn from
	// our default roles and hard for them to reason about what power they are granting their users
	for i := range roles {
		for j := range roles[i].Rules {
			roles[i].Rules[j].Resources = authorizationapi.NormalizeResources(roles[i].Rules[j].Resources)
		}
	}

	return roles

}
Пример #24
0
// syncRequest takes a reviewRequest and determines if it should update the caches supplied, it is not thread-safe
func (ac *AuthorizationCache) syncRequest(request *reviewRequest, userSubjectRecordStore cache.Store, groupSubjectRecordStore cache.Store, reviewRecordStore cache.Store) error {

	lastKnownValue, err := lastKnown(reviewRecordStore, request.namespace)
	if err != nil {
		return err
	}

	if skipReview(request, lastKnownValue) {
		return nil
	}

	namespace := request.namespace
	review, err := ac.reviewer.Review(namespace)
	if err != nil {
		return err
	}

	usersToRemove := sets.NewString()
	groupsToRemove := sets.NewString()
	if lastKnownValue != nil {
		usersToRemove.Insert(lastKnownValue.users...)
		usersToRemove.Delete(review.Users()...)
		groupsToRemove.Insert(lastKnownValue.groups...)
		groupsToRemove.Delete(review.Groups()...)
	}

	deleteNamespaceFromSubjects(userSubjectRecordStore, usersToRemove.List(), namespace)
	deleteNamespaceFromSubjects(groupSubjectRecordStore, groupsToRemove.List(), namespace)
	addSubjectsToNamespace(userSubjectRecordStore, review.Users(), namespace)
	addSubjectsToNamespace(groupSubjectRecordStore, review.Groups(), namespace)
	cacheReviewRecord(request, lastKnownValue, review, reviewRecordStore)
	ac.notifyWatchers(namespace, lastKnownValue, sets.NewString(review.Users()...), sets.NewString(review.Groups()...))
	return nil
}
Пример #25
0
// Creates a scheduler from the configuration file
func (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) {
	glog.V(2).Infof("Creating scheduler from configuration: %v", policy)

	// validate the policy configuration
	if err := validation.ValidatePolicy(policy); err != nil {
		return nil, err
	}

	predicateKeys := sets.NewString()
	for _, predicate := range policy.Predicates {
		glog.V(2).Infof("Registering predicate: %s", predicate.Name)
		predicateKeys.Insert(RegisterCustomFitPredicate(predicate))
	}

	priorityKeys := sets.NewString()
	for _, priority := range policy.Priorities {
		glog.V(2).Infof("Registering priority: %s", priority.Name)
		priorityKeys.Insert(RegisterCustomPriorityFunction(priority))
	}

	extenders := make([]algorithm.SchedulerExtender, 0)
	if len(policy.ExtenderConfigs) != 0 {
		for ii := range policy.ExtenderConfigs {
			glog.V(2).Infof("Creating extender with config %+v", policy.ExtenderConfigs[ii])
			if extender, err := scheduler.NewHTTPExtender(&policy.ExtenderConfigs[ii], policy.APIVersion); err != nil {
				return nil, err
			} else {
				extenders = append(extenders, extender)
			}
		}
	}
	return f.CreateFromKeys(predicateKeys, priorityKeys, extenders)
}
Пример #26
0
func (o *DiagnosticsOptions) Validate() error {
	available := availableDiagnostics()

	if common := available.Intersection(sets.NewString(o.RequestedDiagnostics...)); len(common) == 0 {
		o.Logger.Error("CED3012", log.EvalTemplate("CED3012", "None of the requested diagnostics are available:\n  {{.requested}}\nPlease try from the following:\n  {{.available}}",
			log.Hash{"requested": o.RequestedDiagnostics, "available": available.List()}))
		return fmt.Errorf("No requested diagnostics are available: requested=%s available=%s", strings.Join(o.RequestedDiagnostics, " "), strings.Join(available.List(), " "))

	} else if len(common) < len(o.RequestedDiagnostics) {
		o.Logger.Error("CED3013", log.EvalTemplate("CED3013", `
Of the requested diagnostics:
    {{.requested}}
only these are available:
    {{.common}}
The list of all possible is:
    {{.available}}
		`, log.Hash{"requested": o.RequestedDiagnostics, "common": common.List(), "available": available.List()}))

		return fmt.Errorf("Not all requested diagnostics are available: missing=%s requested=%s available=%s",
			strings.Join(sets.NewString(o.RequestedDiagnostics...).Difference(available).List(), " "),
			strings.Join(o.RequestedDiagnostics, " "),
			strings.Join(available.List(), " "))
	}

	return nil
}
Пример #27
0
func TestConflictingUpdate(t *testing.T) {
	storage := makeLocalTestStorage()
	ctx := kapi.WithUser(kapi.WithNamespace(kapi.NewContext(), "unittest"), &user.DefaultInfo{Name: "system:admin"})
	realizedRoleObj, err := storage.Create(ctx, &authorizationapi.Role{
		ObjectMeta: kapi.ObjectMeta{Name: "my-role"},
		Rules: []authorizationapi.PolicyRule{
			{Verbs: sets.NewString(authorizationapi.VerbAll)},
		},
	})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	realizedRole := realizedRoleObj.(*authorizationapi.Role)

	role := &authorizationapi.Role{
		ObjectMeta: realizedRole.ObjectMeta,
		Rules: []authorizationapi.PolicyRule{
			{Verbs: sets.NewString("list", "update")},
		},
	}
	role.ResourceVersion += "1"

	_, _, err = storage.Update(ctx, role.Name, rest.DefaultUpdatedObjectInfo(role, kapi.Scheme))
	if err == nil || !kapierrors.IsConflict(err) {
		t.Errorf("Expected conflict error, got: %#v", err)
	}
}
Пример #28
0
func createSummaryTestPods(f *framework.Framework, podNamePrefix string, count int, volumeNamePrefix string) (sets.String, sets.String) {
	podNames := sets.NewString()
	volumes := sets.NewString(volumeNamePrefix)
	for i := 0; i < count; i++ {
		podNames.Insert(fmt.Sprintf("%s%v", podNamePrefix, i))
	}

	for _, podName := range podNames.List() {
		createPod(f, podName, []api.Container{
			{
				Image:   ImageRegistry[busyBoxImage],
				Command: []string{"sh", "-c", "while true; do echo 'hello world' | tee ~/file | tee /test-empty-dir-mnt ; sleep 1; done"},
				Name:    podName + containerSuffix,
				VolumeMounts: []api.VolumeMount{
					{MountPath: "/test-empty-dir-mnt", Name: volumeNamePrefix},
				},
			},
		}, []api.Volume{
			// TODO: Test secret volumes
			// TODO: Test hostpath volumes
			{Name: volumeNamePrefix, VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
		})
	}

	return podNames, volumes
}
Пример #29
0
func newInvalidExtensionPolicies() []authorizationapi.Policy {
	return []authorizationapi.Policy{
		{
			ObjectMeta: kapi.ObjectMeta{
				Name:      authorizationapi.PolicyName,
				Namespace: "mallet",
			},
			Roles: map[string]*authorizationapi.Role{
				"badExtension": {
					ObjectMeta: kapi.ObjectMeta{
						Name:      "failure",
						Namespace: "mallet",
					},
					Rules: []authorizationapi.PolicyRule{
						{
							APIGroups:             []string{""},
							Verbs:                 sets.NewString("watch", "list", "get"),
							Resources:             sets.NewString("buildConfigs"),
							AttributeRestrictions: &authorizationapi.Role{},
						},
						{
							APIGroups: []string{""},
							Verbs:     sets.NewString("update"),
							Resources: sets.NewString("buildConfigs"),
						},
					},
				},
			},
		}}
}
Пример #30
0
func TestNodeConfigTLS(t *testing.T) {
	signerCert, signerKey, signerSerial := makeSignerCert(t)
	defer os.Remove(signerCert)
	defer os.Remove(signerKey)
	defer os.Remove(signerSerial)

	configDirName := executeNodeConfig([]string{"--node=my-node", "--hostnames=example.org", "--listen=https://0.0.0.0", "--certificate-authority=" + signerCert, "--node-client-certificate-authority=" + signerCert, "--signer-cert=" + signerCert, "--signer-key=" + signerKey, "--signer-serial=" + signerSerial})
	defer os.Remove(configDirName)

	configDir, err := os.Open(configDirName)
	if err != nil {
		t.Fatalf("unable to read %v", configDirName)
	}

	fileNameSlice, err := configDir.Readdirnames(0)
	if err != nil {
		t.Fatalf("unable to read %v", configDirName)
	}
	filenames := sets.NewString(fileNameSlice...)

	expectedNames := sets.NewString("master-client.crt", "master-client.key", "server.crt", "server.key", "node-client-ca.crt", "node.kubeconfig", "node-config.yaml", "node-registration.json", "ca.crt")
	if !filenames.HasAll(expectedNames.List()...) || !expectedNames.HasAll(filenames.List()...) {
		t.Errorf("expected %v, got %v", expectedNames.List(), filenames.List())
	}
}