예제 #1
0
파일: factory.go 프로젝트: nak3/kubernetes
func (f *factory) Object() (meta.RESTMapper, runtime.ObjectTyper) {
	mapper := registered.RESTMapper()
	discoveryClient, err := f.DiscoveryClient()
	if err == nil {
		mapper = meta.FirstHitRESTMapper{
			MultiRESTMapper: meta.MultiRESTMapper{
				discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, registered.InterfacesFor),
				registered.RESTMapper(), // hardcoded fall back
			},
		}
	}

	// wrap with shortcuts
	mapper = NewShortcutExpander(mapper, discoveryClient)

	// wrap with output preferences
	cfg, err := f.clients.ClientConfigForVersion(nil)
	checkErrWithPrefix("failed to get client config: ", err)
	cmdApiVersion := schema.GroupVersion{}
	if cfg.GroupVersion != nil {
		cmdApiVersion = *cfg.GroupVersion
	}
	mapper = kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []schema.GroupVersion{cmdApiVersion}}
	return mapper, api.Scheme
}
func setup(t *testing.T) (*httptest.Server, *garbagecollector.GarbageCollector, clientset.Interface) {
	masterConfig := framework.NewIntegrationTestMasterConfig()
	masterConfig.EnableCoreControllers = false
	masterConfig.EnableGarbageCollection = true
	_, s := framework.RunAMaster(masterConfig)

	clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL})
	if err != nil {
		t.Fatalf("Error in create clientset: %v", err)
	}
	groupVersionResources, err := clientSet.Discovery().ServerPreferredResources()
	if err != nil {
		t.Fatalf("Failed to get supported resources from server: %v", err)
	}
	config := &restclient.Config{Host: s.URL}
	config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
	metaOnlyClientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	config.ContentConfig.NegotiatedSerializer = nil
	clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	gc, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, registered.RESTMapper(), groupVersionResources)
	if err != nil {
		t.Fatalf("Failed to create garbage collector")
	}
	return s, gc, clientSet
}
예제 #3
0
// NewFixtureClients returns mocks of the OpenShift and Kubernetes clients
func NewFixtureClients(o testclient.ObjectRetriever) (osclient.Interface, kclient.Interface) {
	oc := &Fake{}
	oc.AddReactor("*", "*", testclient.ObjectReaction(o, registered.RESTMapper()))

	kc := &testclient.Fake{}
	kc.AddReactor("*", "*", testclient.ObjectReaction(o, registered.RESTMapper()))

	return oc, kc
}
예제 #4
0
func setupGC(t *testing.T, config *restclient.Config) *GarbageCollector {
	config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
	metaOnlyClientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	config.ContentConfig.NegotiatedSerializer = nil
	clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	podResource := []schema.GroupVersionResource{{Version: "v1", Resource: "pods"}}
	gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, registered.RESTMapper(), podResource)
	if err != nil {
		t.Fatal(err)
	}
	return gc
}
예제 #5
0
func TestNewGarbageCollector(t *testing.T) {
	config := &restclient.Config{}
	config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
	metaOnlyClientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	config.ContentConfig.NegotiatedSerializer = nil
	clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	podResource := []schema.GroupVersionResource{{Version: "v1", Resource: "pods"}}
	gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, registered.RESTMapper(), podResource)
	if err != nil {
		t.Fatal(err)
	}
	assert.Equal(t, 1, len(gc.monitors))
}
예제 #6
0
파일: edges.go 프로젝트: RomainVabre/origin
func AddHPAScaleRefEdges(g osgraph.Graph) {
	for _, node := range g.NodesByKind(kubegraph.HorizontalPodAutoscalerNodeKind) {
		hpaNode := node.(*kubegraph.HorizontalPodAutoscalerNode)

		syntheticMeta := kapi.ObjectMeta{
			Name:      hpaNode.HorizontalPodAutoscaler.Spec.ScaleRef.Name,
			Namespace: hpaNode.HorizontalPodAutoscaler.Namespace,
		}

		var groupVersionResource unversioned.GroupVersionResource
		resource := strings.ToLower(hpaNode.HorizontalPodAutoscaler.Spec.ScaleRef.Kind)
		if groupVersion, err := unversioned.ParseGroupVersion(hpaNode.HorizontalPodAutoscaler.Spec.ScaleRef.APIVersion); err == nil {
			groupVersionResource = groupVersion.WithResource(resource)
		} else {
			groupVersionResource = unversioned.GroupVersionResource{Resource: resource}
		}

		groupVersionResource, err := registered.RESTMapper().ResourceFor(groupVersionResource)
		if err != nil {
			continue
		}

		var syntheticNode graph.Node
		switch groupVersionResource.GroupResource() {
		case kapi.Resource("replicationcontrollers"):
			syntheticNode = kubegraph.FindOrCreateSyntheticReplicationControllerNode(g, &kapi.ReplicationController{ObjectMeta: syntheticMeta})
		case deployapi.Resource("deploymentconfigs"):
			syntheticNode = deploygraph.FindOrCreateSyntheticDeploymentConfigNode(g, &deployapi.DeploymentConfig{ObjectMeta: syntheticMeta})
		default:
			continue
		}

		g.AddEdge(hpaNode, syntheticNode, ScalingEdgeKind)
	}
}
예제 #7
0
func startGarbageCollectorController(ctx ControllerContext) (bool, error) {
	if !ctx.Options.EnableGarbageCollector {
		return false, nil
	}

	// TODO: should use a dynamic RESTMapper built from the discovery results.
	restMapper := registered.RESTMapper()

	gcClientset := ctx.ClientBuilder.ClientOrDie("generic-garbage-collector")
	preferredResources, err := gcClientset.Discovery().ServerPreferredResources()
	if err != nil {
		return true, fmt.Errorf("failed to get supported resources from server: %v", err)
	}
	deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete"}}, preferredResources)
	deletableGroupVersionResources, err := discovery.GroupVersionResources(deletableResources)
	if err != nil {
		return true, fmt.Errorf("Failed to parse resources from server: %v", err)
	}

	config := ctx.ClientBuilder.ConfigOrDie("generic-garbage-collector")
	config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
	metaOnlyClientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
	config.ContentConfig = dynamic.ContentConfig()
	clientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
	garbageCollector, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, restMapper, deletableGroupVersionResources)
	if err != nil {
		return true, fmt.Errorf("Failed to start the generic garbage collector: %v", err)
	}
	workers := int(ctx.Options.ConcurrentGCSyncs)
	go garbageCollector.Run(workers, ctx.Stop)

	return true, nil
}
예제 #8
0
func startNamespaceController(ctx ControllerContext) (bool, error) {
	// TODO: should use a dynamic RESTMapper built from the discovery results.
	restMapper := registered.RESTMapper()

	// Find the list of namespaced resources via discovery that the namespace controller must manage
	namespaceKubeClient := ctx.ClientBuilder.ClientOrDie("namespace-controller")
	namespaceClientPool := dynamic.NewClientPool(ctx.ClientBuilder.ConfigOrDie("namespace-controller"), restMapper, dynamic.LegacyAPIPathResolverFunc)
	// TODO: consider using a list-watch + cache here rather than polling
	resources, err := namespaceKubeClient.Discovery().ServerResources()
	if err != nil {
		return true, fmt.Errorf("failed to get preferred server resources: %v", err)
	}
	gvrs, err := discovery.GroupVersionResources(resources)
	if err != nil {
		return true, fmt.Errorf("failed to parse preferred server resources: %v", err)
	}
	discoverResourcesFn := namespaceKubeClient.Discovery().ServerPreferredNamespacedResources
	if _, found := gvrs[extensions.SchemeGroupVersion.WithResource("thirdpartyresource")]; found {
		// make discovery static
		snapshot, err := discoverResourcesFn()
		if err != nil {
			return true, fmt.Errorf("failed to get server resources: %v", err)
		}
		discoverResourcesFn = func() ([]*metav1.APIResourceList, error) {
			return snapshot, nil
		}
	}
	namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, discoverResourcesFn, ctx.Options.NamespaceSyncPeriod.Duration, v1.FinalizerKubernetes)
	go namespaceController.Run(int(ctx.Options.ConcurrentNamespaceSyncs), ctx.Stop)

	return true, nil

}
예제 #9
0
func TestStartBuildWebHook(t *testing.T) {
	invoked := make(chan struct{}, 1)
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		invoked <- struct{}{}
		w.WriteHeader(http.StatusOK)
	}))
	defer server.Close()

	cfg := &FakeClientConfig{}
	buf := &bytes.Buffer{}
	o := &StartBuildOptions{
		Out:          buf,
		ClientConfig: cfg,
		FromWebhook:  server.URL + "/webhook",
		Mapper:       registered.RESTMapper(),
	}
	if err := o.Run(); err != nil {
		t.Fatalf("unable to start hook: %v", err)
	}
	<-invoked

	o = &StartBuildOptions{
		Out:            buf,
		FromWebhook:    server.URL + "/webhook",
		GitPostReceive: "unknownpath",
	}
	if err := o.Run(); err == nil {
		t.Fatalf("unexpected non-error: %v", err)
	}
}
예제 #10
0
func NewGarbageCollector(clientPool dynamic.ClientPool, resources []unversioned.GroupVersionResource) (*GarbageCollector, error) {
	gc := &GarbageCollector{
		clientPool:  clientPool,
		dirtyQueue:  workqueue.New(),
		orphanQueue: workqueue.New(),
		// TODO: should use a dynamic RESTMapper built from the discovery results.
		restMapper: registered.RESTMapper(),
	}
	gc.propagator = &Propagator{
		eventQueue: workqueue.New(),
		uidToNode: &concurrentUIDToNode{
			RWMutex:   &sync.RWMutex{},
			uidToNode: make(map[types.UID]*node),
		},
		gc: gc,
	}
	for _, resource := range resources {
		if _, ok := ignoredResources[resource]; ok {
			glog.V(6).Infof("ignore resource %#v", resource)
			continue
		}
		monitor, err := monitorFor(gc.propagator, gc.clientPool, resource)
		if err != nil {
			return nil, err
		}
		gc.monitors = append(gc.monitors, monitor)
	}
	return gc, nil
}
예제 #11
0
func TestNewRESTInvalidType(t *testing.T) {
	storage := NewREST()
	_, err := storage.Create(nil, &kapi.Pod{})
	if err == nil {
		t.Errorf("Expected type error.")
	}

	if _, err := registered.RESTMapper().KindFor(template.Resource("processedtemplates").WithVersion("")); err != nil {
		t.Errorf("no processed templates: %v", err)
	}
}
예제 #12
0
func TestResourceToKind(t *testing.T) {
	// Ensure we resolve to latest.Version
	expectedGVK := Version.WithKind("User")
	gvk, err := registered.RESTMapper().KindFor(userapi.SchemeGroupVersion.WithResource("User"))
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	if gvk != expectedGVK {
		t.Fatalf("Expected RESTMapper.KindFor('user') to be %#v, got %#v", expectedGVK, gvk)
	}
}
예제 #13
0
func TestRESTRootScope(t *testing.T) {
	for _, v := range [][]string{{"v1"}} {
		mapping, err := registered.RESTMapper().RESTMapping(kapi.Kind("Node"), v...)
		if err != nil {
			t.Fatal(err)
		}
		if mapping.Scope.Name() != meta.RESTScopeNameRoot {
			t.Errorf("Node should have a root scope: %#v", mapping.Scope)
		}
	}
}
예제 #14
0
func TestUpstreamResourceToKind(t *testing.T) {
	// Ensure we resolve to klatest.ExternalVersions[0]
	meta, _ := registered.Group("")
	expectedGVK := meta.GroupVersion.WithKind("Pod")
	gvk, err := registered.RESTMapper().KindFor(kapi.SchemeGroupVersion.WithResource("Pod"))
	if err != nil {
		t.Fatalf("Unexpected error: %v", err)
	}
	if gvk != expectedGVK {
		t.Fatalf("Expected RESTMapper.KindFor('pod') to be %#v, got %#v", expectedGVK, gvk)
	}
}
예제 #15
0
// Start starts the namespace controller.
func (n *NamespaceController) Start() error {
	// Use the default QPS
	config := restclient.AddUserAgent(&restclient.Config{Host: framework.TestContext.Host}, ncName)
	client, err := clientset.NewForConfig(config)
	if err != nil {
		return err
	}
	clientPool := dynamic.NewClientPool(config, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	discoverResourcesFn := client.Discovery().ServerPreferredNamespacedResources
	nc := namespacecontroller.NewNamespaceController(client, clientPool, discoverResourcesFn, ncResyncPeriod, v1.FinalizerKubernetes)
	go nc.Run(ncConcurrency, n.stopCh)
	return nil
}
예제 #16
0
// NewSimpleFake returns a client that will respond with the provided objects
func NewSimpleFake(objects ...runtime.Object) *Fake {
	o := ktestclient.NewObjects(kapi.Scheme, kapi.Codecs.UniversalDecoder())
	for _, obj := range objects {
		if err := o.Add(obj); err != nil {
			panic(err)
		}
	}

	fakeClient := &Fake{}
	fakeClient.AddReactor("*", "*", ktestclient.ObjectReaction(o, registered.RESTMapper()))

	return fakeClient
}
예제 #17
0
func GetRequiresNamespace(obj runtime.Object) (bool, error) {
	groupVersionKinds, _, err := kapi.Scheme.ObjectKinds(obj)
	if err != nil {
		return false, err
	}

	restMapping, err := registered.RESTMapper().RESTMapping(groupVersionKinds[0].GroupKind())
	if err != nil {
		return false, err
	}

	return restMapping.Scope.Name() == meta.RESTScopeNameNamespace, nil
}
예제 #18
0
// NewSimpleFake returns a client that will respond with the provided objects
func NewSimpleFake(objects ...runtime.Object) *Fake {
	o := NewObjects(api.Scheme, api.Codecs.UniversalDecoder())
	for _, obj := range objects {
		if err := o.Add(obj); err != nil {
			panic(err)
		}
	}

	fakeClient := &Fake{}
	fakeClient.AddReactor("*", "*", ObjectReaction(o, registered.RESTMapper()))

	fakeClient.AddWatchReactor("*", DefaultWatchReactor(watch.NewFake(), nil))

	return fakeClient
}
예제 #19
0
// Clientset returns a clientset that will respond with the provided objects
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
	o := core.NewObjects(api.Scheme, api.Codecs.UniversalDecoder())
	for _, obj := range objects {
		if err := o.Add(obj); err != nil {
			panic(err)
		}
	}

	fakePtr := core.Fake{}
	fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper()))

	fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil))

	return &Clientset{fakePtr}
}
예제 #20
0
func (f *factory) Object() (meta.RESTMapper, runtime.ObjectTyper) {
	cfg, err := f.clientConfig.ClientConfig()
	checkErrWithPrefix("failed to get client config: ", err)
	cmdApiVersion := unversioned.GroupVersion{}
	if cfg.GroupVersion != nil {
		cmdApiVersion = *cfg.GroupVersion
	}

	mapper := registered.RESTMapper()
	discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg)
	if err == nil {
		// register third party resources with the api machinery groups.  This probably should be done, but
		// its consistent with old code, so we'll start with it.
		if err := registerThirdPartyResources(discoveryClient); err != nil {
			fmt.Fprintf(os.Stderr, "Unable to register third party resources: %v\n", err)
		}
		// ThirdPartyResourceData is special.  It's not discoverable, but needed for thirdparty resource listing
		// TODO eliminate this once we're truly generic.
		thirdPartyResourceDataMapper := meta.NewDefaultRESTMapper([]unversioned.GroupVersion{extensionsv1beta1.SchemeGroupVersion}, registered.InterfacesFor)
		thirdPartyResourceDataMapper.Add(extensionsv1beta1.SchemeGroupVersion.WithKind("ThirdPartyResourceData"), meta.RESTScopeNamespace)

		mapper = meta.FirstHitRESTMapper{
			MultiRESTMapper: meta.MultiRESTMapper{
				discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, registered.InterfacesFor),
				thirdPartyResourceDataMapper, // needed for TPR printing
				registered.RESTMapper(),      // hardcoded fall back
			},
		}
	}

	// wrap with shortcuts
	mapper = NewShortcutExpander(mapper, discoveryClient)
	// wrap with output preferences
	mapper = kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}
	return mapper, api.Scheme
}
예제 #21
0
func TestStartBuildHookPostReceive(t *testing.T) {
	invoked := make(chan *buildapi.GenericWebHookEvent, 1)
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		event := buildapi.GenericWebHookEvent{}
		decoder := json.NewDecoder(r.Body)
		if err := decoder.Decode(&event); err != nil {
			t.Errorf("unmarshal failed: %v", err)
		}
		invoked <- &event
		w.WriteHeader(http.StatusOK)
	}))
	defer server.Close()

	f, _ := ioutil.TempFile("", "test")
	defer os.Remove(f.Name())
	fmt.Fprintf(f, `0000 2384 refs/heads/master
2548 2548 refs/heads/stage`)
	f.Close()

	testErr := errors.New("not enabled")
	cfg := &FakeClientConfig{
		Err: testErr,
	}
	buf := &bytes.Buffer{}
	o := &StartBuildOptions{
		Out:            buf,
		ClientConfig:   cfg,
		FromWebhook:    server.URL + "/webhook",
		GitPostReceive: f.Name(),
		Mapper:         registered.RESTMapper(),
	}
	if err := o.Run(); err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	event := <-invoked
	if event == nil || event.Git == nil || len(event.Git.Refs) != 1 {
		t.Fatalf("unexpected event: %#v", event)
	}
	if event.Git.Refs[0].Commit != "2384" {
		t.Fatalf("unexpected ref: %#v", event.Git.Refs[0])
	}
}
예제 #22
0
func TestRESTMapper(t *testing.T) {
	gv := schema.GroupVersion{Group: "", Version: "v1"}
	rcGVK := gv.WithKind("ReplicationController")
	podTemplateGVK := gv.WithKind("PodTemplate")

	if gvk, err := registered.RESTMapper().KindFor(internal.SchemeGroupVersion.WithResource("replicationcontrollers")); err != nil || gvk != rcGVK {
		t.Errorf("unexpected version mapping: %v %v", gvk, err)
	}

	if m, err := registered.GroupOrDie(internal.GroupName).RESTMapper.RESTMapping(podTemplateGVK.GroupKind(), ""); err != nil || m.GroupVersionKind != podTemplateGVK || m.Resource != "podtemplates" {
		t.Errorf("unexpected version mapping: %#v %v", m, err)
	}

	for _, version := range registered.GroupOrDie(internal.GroupName).GroupVersions {
		mapping, err := registered.GroupOrDie(internal.GroupName).RESTMapper.RESTMapping(rcGVK.GroupKind(), version.Version)
		if err != nil {
			t.Errorf("unexpected error: %v", err)
		}

		if mapping.Resource != "replicationControllers" && mapping.Resource != "replicationcontrollers" {
			t.Errorf("incorrect resource name: %#v", mapping)
		}
		if mapping.GroupVersionKind.GroupVersion() != version {
			t.Errorf("incorrect version: %v", mapping)
		}

		interfaces, _ := registered.GroupOrDie(internal.GroupName).InterfacesFor(version)
		if mapping.ObjectConvertor != interfaces.ObjectConvertor {
			t.Errorf("unexpected: %#v, expected: %#v", mapping, interfaces)
		}

		rc := &internal.ReplicationController{ObjectMeta: internal.ObjectMeta{Name: "foo"}}
		name, err := mapping.MetadataAccessor.Name(rc)
		if err != nil {
			t.Errorf("unexpected error: %v", err)
		}
		if name != "foo" {
			t.Errorf("unable to retrieve object meta with: %v", mapping.MetadataAccessor)
		}
	}
}
예제 #23
0
func BuildGraph(path string) (osgraph.Graph, []runtime.Object, error) {
	g := osgraph.New()
	objs := []runtime.Object{}

	abspath, err := filepath.Abs(path)
	if err != nil {
		return g, objs, err
	}

	mapper := registered.RESTMapper()
	typer := kapi.Scheme
	clientMapper := resource.ClientMapperFunc(func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
		return nil, nil
	})

	r := resource.NewBuilder(mapper, typer, clientMapper, kapi.Codecs.UniversalDecoder()).
		FilenameParam(false, &resource.FilenameOptions{Recursive: false, Filenames: []string{abspath}}).
		Flatten().
		Do()

	if r.Err() != nil {
		return g, objs, r.Err()
	}

	infos, err := r.Infos()
	if err != nil {
		return g, objs, err
	}
	for _, info := range infos {
		objs = append(objs, info.Object)

		if err := EnsureNode(g, info.Object); err != nil {
			return g, objs, err
		}
	}

	return g, objs, nil
}
예제 #24
0
파일: apply.go 프로젝트: nak3/kubernetes
func getRESTMappings(pruneResources *[]pruneResource) (namespaced, nonNamespaced []*meta.RESTMapping, err error) {
	if len(*pruneResources) == 0 {
		// default whitelist
		// TODO: need to handle the older api versions - e.g. v1beta1 jobs. Github issue: #35991
		*pruneResources = []pruneResource{
			{"", "v1", "ConfigMap", true},
			{"", "v1", "Endpoints", true},
			{"", "v1", "Namespace", false},
			{"", "v1", "PersistentVolumeClaim", true},
			{"", "v1", "PersistentVolume", false},
			{"", "v1", "Pod", true},
			{"", "v1", "ReplicationController", true},
			{"", "v1", "Secret", true},
			{"", "v1", "Service", true},
			{"batch", "v1", "Job", true},
			{"extensions", "v1beta1", "DaemonSet", true},
			{"extensions", "v1beta1", "Deployment", true},
			{"extensions", "v1beta1", "HorizontalPodAutoscaler", true},
			{"extensions", "v1beta1", "Ingress", true},
			{"extensions", "v1beta1", "ReplicaSet", true},
			{"apps", "v1beta1", "StatefulSet", true},
		}
	}
	registeredMapper := registered.RESTMapper()
	for _, resource := range *pruneResources {
		addedMapping, err := registeredMapper.RESTMapping(schema.GroupKind{Group: resource.group, Kind: resource.kind}, resource.version)
		if err != nil {
			return nil, nil, fmt.Errorf("invalid resource %v: %v", resource, err)
		}
		if resource.namespaced {
			namespaced = append(namespaced, addedMapping)
		} else {
			nonNamespaced = append(nonNamespaced, addedMapping)
		}
	}

	return namespaced, nonNamespaced, nil
}
예제 #25
0
func NewGarbageCollector(metaOnlyClientPool dynamic.ClientPool, clientPool dynamic.ClientPool, resources []unversioned.GroupVersionResource) (*GarbageCollector, error) {
	gc := &GarbageCollector{
		metaOnlyClientPool: metaOnlyClientPool,
		clientPool:         clientPool,
		// TODO: should use a dynamic RESTMapper built from the discovery results.
		restMapper:                       registered.RESTMapper(),
		clock:                            clock.RealClock{},
		dirtyQueue:                       workqueue.NewTimedWorkQueue(),
		orphanQueue:                      workqueue.NewTimedWorkQueue(),
		registeredRateLimiter:            NewRegisteredRateLimiter(resources),
		registeredRateLimiterForMonitors: NewRegisteredRateLimiter(resources),
		absentOwnerCache:                 NewUIDCache(100),
	}
	gc.propagator = &Propagator{
		eventQueue: workqueue.NewTimedWorkQueue(),
		uidToNode: &concurrentUIDToNode{
			RWMutex:   &sync.RWMutex{},
			uidToNode: make(map[types.UID]*node),
		},
		gc: gc,
	}
	for _, resource := range resources {
		if _, ok := ignoredResources[resource]; ok {
			glog.V(6).Infof("ignore resource %#v", resource)
			continue
		}
		kind, err := gc.restMapper.KindFor(resource)
		if err != nil {
			return nil, err
		}
		monitor, err := gc.monitorFor(resource, kind)
		if err != nil {
			return nil, err
		}
		gc.monitors = append(gc.monitors, monitor)
	}
	return gc, nil
}
예제 #26
0
func TestStartBuildWebHookHTTPS(t *testing.T) {
	invoked := make(chan struct{}, 1)
	server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		invoked <- struct{}{}
		w.WriteHeader(http.StatusOK)
	}))
	defer server.Close()

	testErr := errors.New("not enabled")
	cfg := &FakeClientConfig{
		Err: testErr,
	}
	buf := &bytes.Buffer{}
	o := &StartBuildOptions{
		Out:          buf,
		ClientConfig: cfg,
		FromWebhook:  server.URL + "/webhook",
		Mapper:       registered.RESTMapper(),
	}
	if err := o.Run(); err == nil || !strings.Contains(err.Error(), "certificate signed by unknown authority") {
		t.Fatalf("unexpected non-error: %v", err)
	}
}
예제 #27
0
// test the list and watch functions correctly converts the ListOptions
func TestGCListWatcher(t *testing.T) {
	testHandler := &fakeActionHandler{}
	srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
	defer srv.Close()
	clientPool := dynamic.NewClientPool(clientConfig, registered.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
	podResource := schema.GroupVersionResource{Version: "v1", Resource: "pods"}
	client, err := clientPool.ClientForGroupVersionResource(podResource)
	if err != nil {
		t.Fatal(err)
	}
	lw := gcListWatcher(client, podResource)
	lw.Watch(v1.ListOptions{ResourceVersion: "1"})
	lw.List(v1.ListOptions{ResourceVersion: "1"})
	if e, a := 2, len(testHandler.actions); e != a {
		t.Errorf("expect %d requests, got %d", e, a)
	}
	if e, a := "resourceVersion=1", testHandler.actions[0].query; e != a {
		t.Errorf("expect %s, got %s", e, a)
	}
	if e, a := "resourceVersion=1", testHandler.actions[1].query; e != a {
		t.Errorf("expect %s, got %s", e, a)
	}
}
예제 #28
0
func TestResolveResource(t *testing.T) {
	mapper := clientcmd.ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()}}

	tests := []struct {
		name             string
		defaultResource  string
		resourceString   string
		expectedResource string
		expectedName     string
		expectedErr      bool
	}{
		{
			name:             "invalid case #1",
			defaultResource:  "",
			resourceString:   "a/b/c",
			expectedResource: "",
			expectedName:     "",
			expectedErr:      true,
		},
		{
			name:             "invalid case #2",
			defaultResource:  "",
			resourceString:   "foo/bar",
			expectedResource: "",
			expectedName:     "",
			expectedErr:      true,
		},
		{
			name:             "empty resource string case #1",
			defaultResource:  "",
			resourceString:   "",
			expectedResource: "",
			expectedName:     "",
			expectedErr:      false,
		},
		{
			name:             "empty resource string case #2",
			defaultResource:  "",
			resourceString:   "bar",
			expectedResource: "",
			expectedName:     "bar",
			expectedErr:      false,
		},
		{
			name:             "empty resource string case #3",
			defaultResource:  "foo",
			resourceString:   "bar",
			expectedResource: "foo",
			expectedName:     "bar",
			expectedErr:      false,
		},
		{
			name:             "(KUBE) short name",
			defaultResource:  "foo",
			resourceString:   "rc/bar",
			expectedResource: "replicationcontrollers",
			expectedName:     "bar",
			expectedErr:      false,
		},
		{
			name:             "(KUBE) long name, case insensitive #1",
			defaultResource:  "foo",
			resourceString:   "replicationcontroller/bar",
			expectedResource: "replicationcontrollers",
			expectedName:     "bar",
			expectedErr:      false,
		},
		{
			name:             "(KUBE) long name, case insensitive #2",
			defaultResource:  "foo",
			resourceString:   "replicationcontrollers/bar",
			expectedResource: "replicationcontrollers",
			expectedName:     "bar",
			expectedErr:      false,
		},
		{
			name:             "(KUBE) long name, case insensitive #3",
			defaultResource:  "foo",
			resourceString:   "ReplicationControllers/bar",
			expectedResource: "replicationcontrollers",
			expectedName:     "bar",
			expectedErr:      false,
		},
		{
			name:             "(KUBE) long name, case insensitive #4",
			defaultResource:  "foo",
			resourceString:   "ReplicationControllers/bar",
			expectedResource: "replicationcontrollers",
			expectedName:     "bar",
			expectedErr:      false,
		},
		{
			name:             "(KUBE) long name, case insensitive #5",
			defaultResource:  "foo",
			resourceString:   "ReplicationControllers/Bar",
			expectedResource: "replicationcontrollers",
			expectedName:     "Bar",
			expectedErr:      false,
		},
		{
			name:             "(ORIGIN) short name",
			defaultResource:  "foo",
			resourceString:   "bc/bar",
			expectedResource: "buildconfigs",
			expectedName:     "bar",
			expectedErr:      false,
		},
		{
			name:             "(ORIGIN) long name, case insensitive #1",
			defaultResource:  "foo",
			resourceString:   "buildconfig/bar",
			expectedResource: "buildconfigs",
			expectedName:     "bar",
			expectedErr:      false,
		},
		{
			name:             "(ORIGIN) long name, case insensitive #2",
			defaultResource:  "foo",
			resourceString:   "buildconfigs/bar",
			expectedResource: "buildconfigs",
			expectedName:     "bar",
			expectedErr:      false,
		},
		{
			name:             "(ORIGIN) long name, case insensitive #3",
			defaultResource:  "foo",
			resourceString:   "BuildConfigs/bar",
			expectedResource: "buildconfigs",
			expectedName:     "bar",
			expectedErr:      false,
		},
		{
			name:             "(ORIGIN) long name, case insensitive #4",
			defaultResource:  "foo",
			resourceString:   "BuildConfigs/bar",
			expectedResource: "buildconfigs",
			expectedName:     "bar",
			expectedErr:      false,
		},
		{
			name:             "(ORIGIN) long name, case insensitive #5",
			defaultResource:  "foo",
			resourceString:   "BuildConfigs/Bar",
			expectedResource: "buildconfigs",
			expectedName:     "Bar",
			expectedErr:      false,
		},
	}

	for _, test := range tests {
		gotResource, gotName, gotErr := util.ResolveResource(test.defaultResource, test.resourceString, mapper)
		if gotErr != nil && !test.expectedErr {
			t.Errorf("%s: expected no error, got %v", test.name, gotErr)
			continue
		}
		if gotErr == nil && test.expectedErr {
			t.Errorf("%s: expected error but got none", test.name)
			continue
		}
		if gotResource != test.expectedResource {
			t.Errorf("%s: expected resource type %s, got %s", test.name, test.expectedResource, gotResource)
			continue
		}
		if gotName != test.expectedName {
			t.Errorf("%s: expected resource name %s, got %s", test.name, test.expectedName, gotName)
			continue
		}
	}
}
예제 #29
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients: clients,
		flags:   flags,

		Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			CheckErr(err)
			cmdApiVersion := unversioned.GroupVersion{}
			if cfg.GroupVersion != nil {
				cmdApiVersion = *cfg.GroupVersion
			}

			outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}

			// eventually this should allow me choose a group priority based on the order of the discovery doc, for now hardcode a given order
			priorityRESTMapper := meta.PriorityRESTMapper{
				Delegate: outputRESTMapper,
				ResourcePriority: []unversioned.GroupVersionResource{
					{Group: api.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
					{Group: extensions.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
					{Group: metrics.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
				},
				KindPriority: []unversioned.GroupVersionKind{
					{Group: api.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
					{Group: extensions.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
					{Group: metrics.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
				},
			}

			return priorityRESTMapper, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion(nil)
		},
		ClientConfig: func() (*restclient.Config, error) {
			return clients.ClientConfigForVersion(nil)
		},
		ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			switch mapping.GroupVersionKind.Group {
			case api.GroupName:
				return client.RESTClient, nil
			case autoscaling.GroupName:
				return client.AutoscalingClient.RESTClient, nil
			case batch.GroupName:
				return client.BatchClient.RESTClient, nil
			case extensions.GroupName:
				return client.ExtensionsClient.RESTClient, nil
			}
			return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource)
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
		},
		Decoder: func(toInternal bool) runtime.Decoder {
			if toInternal {
				return api.Codecs.UniversalDecoder()
			}
			return api.Codecs.UniversalDeserializer()
		},
		JSONEncoder: func() runtime.Encoder {
			return api.Codecs.LegacyCodec(registered.EnabledVersions()...)
		},
		Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil
		},
		PodSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return "", fmt.Errorf("invalid label selector: %v", err)
				}
				return selector.String(), nil
			case *extensions.ReplicaSet:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return "", fmt.Errorf("failed to convert label selector to selector: %v", err)
				}
				return selector.String(), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvk)
			}
		},
		MapBasedSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			case *extensions.ReplicaSet:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvk)
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			case *extensions.Deployment:
				return getPorts(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getPorts(t.Spec.Template.Spec), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %v", gvk)
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}

			switch t := object.(type) {
			case *api.Pod:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil

			case *api.ReplicationController:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector := labels.SelectorFromSet(t.Spec.Selector)
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			case *extensions.ReplicaSet:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot get the logs from %v", gvk)
			}
		},
		PauseObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = true
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot pause %v", gvk)
			}
		},
		ResumeObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if !t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = false
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot resume %v", gvk)
			}
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			clientset := clientset.FromUnversionedClient(client)
			if err != nil {
				return nil, err
			}
			return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := client.ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				return &clientSwaggerSchema{
					c:        client,
					cacheDir: dir,
					mapper:   api.RESTMapper,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
			version := gvk.GroupVersion()
			client, err := clients.ClientForVersion(&version)
			if err != nil {
				return nil, err
			}
			return client.Discovery().SwaggerSchema(version)
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generators: func(cmdName string) map[string]kubectl.Generator {
			return DefaultGenerators(cmdName)
		},
		CanBeExposed: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %v", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			client, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				selector := labels.SelectorFromSet(t.Spec.Selector)
				pod, _, err := GetFirstPod(client, t.Namespace, selector)
				return pod, err
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				pod, _, err := GetFirstPod(client, t.Namespace, selector)
				return pod, err
			case *extensions.Job:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				pod, _, err := GetFirstPod(client, t.Namespace, selector)
				return pod, err
			case *api.Pod:
				return t, nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk)
			}
		},
		EditorEnvs: func() []string {
			return []string{"KUBE_EDITOR", "EDITOR"}
		},
	}
}
예제 #30
0
// NewFactory creates an object that holds common methods across all OpenShift commands
func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory {
	restMapper := registered.RESTMapper()

	clients := &clientCache{
		clients: make(map[string]*client.Client),
		configs: make(map[string]*restclient.Config),
		loader:  clientConfig,
	}

	w := &Factory{
		Factory:                cmdutil.NewFactory(clientConfig),
		OpenShiftClientConfig:  clientConfig,
		clients:                clients,
		ImageResolutionOptions: &imageResolutionOptions{},
	}

	w.Object = func(bool) (meta.RESTMapper, runtime.ObjectTyper) {
		defaultMapper := ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: restMapper}}
		defaultTyper := api.Scheme

		// Output using whatever version was negotiated in the client cache. The
		// version we decode with may not be the same as what the server requires.
		cfg, err := clients.ClientConfigForVersion(nil)
		if err != nil {
			return defaultMapper, defaultTyper
		}

		cmdApiVersion := unversioned.GroupVersion{}
		if cfg.GroupVersion != nil {
			cmdApiVersion = *cfg.GroupVersion
		}

		// at this point we've negotiated and can get the client
		oclient, err := clients.ClientForVersion(nil)
		if err != nil {
			return defaultMapper, defaultTyper
		}

		cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube"), cfg.Host)
		cachedDiscoverClient := NewCachedDiscoveryClient(client.NewDiscoveryClient(oclient.RESTClient), cacheDir, time.Duration(10*time.Minute))

		// if we can't find the server version or its too old to have Kind information in the discovery doc, skip the discovery RESTMapper
		// and use our hardcoded levels
		mapper := registered.RESTMapper()
		if serverVersion, err := cachedDiscoverClient.ServerVersion(); err == nil && useDiscoveryRESTMapper(serverVersion.GitVersion) {
			mapper = restmapper.NewDiscoveryRESTMapper(cachedDiscoverClient)
		}
		mapper = NewShortcutExpander(cachedDiscoverClient, kubectl.ShortcutExpander{RESTMapper: mapper})
		return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}, api.Scheme
	}

	w.UnstructuredObject = func() (meta.RESTMapper, runtime.ObjectTyper, error) {
		// load a discovery client from the default config
		cfg, err := clients.ClientConfigForVersion(nil)
		if err != nil {
			return nil, nil, err
		}
		dc, err := discovery.NewDiscoveryClientForConfig(cfg)
		if err != nil {
			return nil, nil, err
		}
		cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube"), cfg.Host)
		cachedDiscoverClient := NewCachedDiscoveryClient(client.NewDiscoveryClient(dc.RESTClient), cacheDir, time.Duration(10*time.Minute))

		// enumerate all group resources
		groupResources, err := discovery.GetAPIGroupResources(cachedDiscoverClient)
		if err != nil {
			return nil, nil, err
		}

		// Register unknown APIs as third party for now to make
		// validation happy. TODO perhaps make a dynamic schema
		// validator to avoid this.
		for _, group := range groupResources {
			for _, version := range group.Group.Versions {
				gv := unversioned.GroupVersion{Group: group.Group.Name, Version: version.Version}
				if !registered.IsRegisteredVersion(gv) {
					registered.AddThirdPartyAPIGroupVersions(gv)
				}
			}
		}

		// construct unstructured mapper and typer
		mapper := discovery.NewRESTMapper(groupResources, meta.InterfacesForUnstructured)
		typer := discovery.NewUnstructuredObjectTyper(groupResources)
		return NewShortcutExpander(cachedDiscoverClient, kubectl.ShortcutExpander{RESTMapper: mapper}), typer, nil
	}

	kClientForMapping := w.Factory.ClientForMapping
	w.ClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
		if latest.OriginKind(mapping.GroupVersionKind) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return client.RESTClient, nil
		}
		return kClientForMapping(mapping)
	}

	kUnstructuredClientForMapping := w.Factory.UnstructuredClientForMapping
	w.UnstructuredClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
		if latest.OriginKind(mapping.GroupVersionKind) {
			cfg, err := clientConfig.ClientConfig()
			if err != nil {
				return nil, err
			}
			if err := client.SetOpenShiftDefaults(cfg); err != nil {
				return nil, err
			}
			cfg.APIPath = "/apis"
			if mapping.GroupVersionKind.Group == api.GroupName {
				cfg.APIPath = "/oapi"
			}
			gv := mapping.GroupVersionKind.GroupVersion()
			cfg.ContentConfig = dynamic.ContentConfig()
			cfg.GroupVersion = &gv
			return restclient.RESTClientFor(cfg)
		}
		return kUnstructuredClientForMapping(mapping)
	}

	// Save original Describer function
	kDescriberFunc := w.Factory.Describer
	w.Describer = func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
		if latest.OriginKind(mapping.GroupVersionKind) {
			oClient, kClient, err := w.Clients()
			if err != nil {
				return nil, fmt.Errorf("unable to create client %s: %v", mapping.GroupVersionKind.Kind, err)
			}

			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			cfg, err := clients.ClientConfigForVersion(&mappingVersion)
			if err != nil {
				return nil, fmt.Errorf("unable to load a client %s: %v", mapping.GroupVersionKind.Kind, err)
			}

			describer, ok := describe.DescriberFor(mapping.GroupVersionKind.GroupKind(), oClient, kClient, cfg.Host)
			if !ok {
				return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
			}
			return describer, nil
		}
		return kDescriberFunc(mapping)
	}
	kScalerFunc := w.Factory.Scaler
	w.Scaler = func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
		if mapping.GroupVersionKind.GroupKind() == deployapi.Kind("DeploymentConfig") {
			oc, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return deploycmd.NewDeploymentConfigScaler(oc, kc), nil
		}
		return kScalerFunc(mapping)
	}
	kReaperFunc := w.Factory.Reaper
	w.Reaper = func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
		switch mapping.GroupVersionKind.GroupKind() {
		case deployapi.Kind("DeploymentConfig"):
			oc, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return deploycmd.NewDeploymentConfigReaper(oc, kc), nil
		case authorizationapi.Kind("Role"):
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return authorizationreaper.NewRoleReaper(oc, oc), nil
		case authorizationapi.Kind("ClusterRole"):
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return authorizationreaper.NewClusterRoleReaper(oc, oc, oc), nil
		case userapi.Kind("User"):
			oc, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return authenticationreaper.NewUserReaper(
				client.UsersInterface(oc),
				client.GroupsInterface(oc),
				client.ClusterRoleBindingsInterface(oc),
				client.RoleBindingsNamespacer(oc),
				kclient.SecurityContextConstraintsInterface(kc),
			), nil
		case userapi.Kind("Group"):
			oc, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return authenticationreaper.NewGroupReaper(
				client.GroupsInterface(oc),
				client.ClusterRoleBindingsInterface(oc),
				client.RoleBindingsNamespacer(oc),
				kclient.SecurityContextConstraintsInterface(kc),
			), nil
		case buildapi.Kind("BuildConfig"):
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return buildcmd.NewBuildConfigReaper(oc), nil
		}
		return kReaperFunc(mapping)
	}
	kGenerators := w.Factory.Generators
	w.Generators = func(cmdName string) map[string]kubectl.Generator {
		originGenerators := DefaultGenerators(cmdName)
		kubeGenerators := kGenerators(cmdName)

		ret := map[string]kubectl.Generator{}
		for k, v := range kubeGenerators {
			ret[k] = v
		}
		for k, v := range originGenerators {
			ret[k] = v
		}
		return ret
	}
	kMapBasedSelectorForObjectFunc := w.Factory.MapBasedSelectorForObject
	w.MapBasedSelectorForObject = func(object runtime.Object) (string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return kubectl.MakeLabels(t.Spec.Selector), nil
		default:
			return kMapBasedSelectorForObjectFunc(object)
		}
	}
	kPortsForObjectFunc := w.Factory.PortsForObject
	w.PortsForObject = func(object runtime.Object) ([]string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return getPorts(t.Spec.Template.Spec), nil
		default:
			return kPortsForObjectFunc(object)
		}
	}
	kLogsForObjectFunc := w.Factory.LogsForObject
	w.LogsForObject = func(object, options runtime.Object) (*restclient.Request, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			dopts, ok := options.(*deployapi.DeploymentLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a DeploymentLogOptions")
			}
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return oc.DeploymentLogs(t.Namespace).Get(t.Name, *dopts), nil
		case *buildapi.Build:
			bopts, ok := options.(*buildapi.BuildLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a BuildLogOptions")
			}
			if bopts.Version != nil {
				return nil, errors.New("cannot specify a version and a build")
			}
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return oc.BuildLogs(t.Namespace).Get(t.Name, *bopts), nil
		case *buildapi.BuildConfig:
			bopts, ok := options.(*buildapi.BuildLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a BuildLogOptions")
			}
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			builds, err := oc.Builds(t.Namespace).List(api.ListOptions{})
			if err != nil {
				return nil, err
			}
			builds.Items = buildapi.FilterBuilds(builds.Items, buildapi.ByBuildConfigPredicate(t.Name))
			if len(builds.Items) == 0 {
				return nil, fmt.Errorf("no builds found for %q", t.Name)
			}
			if bopts.Version != nil {
				// If a version has been specified, try to get the logs from that build.
				desired := buildutil.BuildNameForConfigVersion(t.Name, int(*bopts.Version))
				return oc.BuildLogs(t.Namespace).Get(desired, *bopts), nil
			}
			sort.Sort(sort.Reverse(buildapi.BuildSliceByCreationTimestamp(builds.Items)))
			return oc.BuildLogs(t.Namespace).Get(builds.Items[0].Name, *bopts), nil
		default:
			return kLogsForObjectFunc(object, options)
		}
	}
	// Saves current resource name (or alias if any) in PrintOptions. Once saved, it will not be overwritten by the
	// kubernetes resource alias look-up, as it will notice a non-empty value in `options.Kind`
	w.Printer = func(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) {
		if mapping != nil {
			options.Kind = mapping.Resource
			if alias, ok := resourceShortFormFor(mapping.Resource); ok {
				options.Kind = alias
			}
		}
		return describe.NewHumanReadablePrinter(options), nil
	}
	// PrintResourceInfos receives a list of resource infos and prints versioned objects if a generic output format was specified
	// otherwise, it iterates through info objects, printing each resource with a unique printer for its mapping
	w.PrintResourceInfos = func(cmd *cobra.Command, infos []*resource.Info, out io.Writer) error {
		printer, generic, err := cmdutil.PrinterForCommand(cmd)
		if err != nil {
			return nil
		}
		if !generic {
			for _, info := range infos {
				mapping := info.ResourceMapping()
				printer, err := w.PrinterForMapping(cmd, mapping, false)
				if err != nil {
					return err
				}
				if err := printer.PrintObj(info.Object, out); err != nil {
					return nil
				}
			}
			return nil
		}

		clientConfig, err := w.ClientConfig()
		if err != nil {
			return err
		}
		outputVersion, err := cmdutil.OutputVersion(cmd, clientConfig.GroupVersion)
		if err != nil {
			return err
		}
		object, err := resource.AsVersionedObject(infos, len(infos) != 1, outputVersion, api.Codecs.LegacyCodec(outputVersion))
		if err != nil {
			return err
		}
		return printer.PrintObj(object, out)

	}
	kCanBeExposed := w.Factory.CanBeExposed
	w.CanBeExposed = func(kind unversioned.GroupKind) error {
		if kind == deployapi.Kind("DeploymentConfig") {
			return nil
		}
		return kCanBeExposed(kind)
	}
	kCanBeAutoscaled := w.Factory.CanBeAutoscaled
	w.CanBeAutoscaled = func(kind unversioned.GroupKind) error {
		if kind == deployapi.Kind("DeploymentConfig") {
			return nil
		}
		return kCanBeAutoscaled(kind)
	}
	kAttachablePodForObjectFunc := w.Factory.AttachablePodForObject
	w.AttachablePodForObject = func(object runtime.Object) (*api.Pod, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			_, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			selector := labels.SelectorFromSet(t.Spec.Selector)
			f := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
			pod, _, err := cmdutil.GetFirstPod(kc, t.Namespace, selector, 1*time.Minute, f)
			return pod, err
		default:
			return kAttachablePodForObjectFunc(object)
		}
	}
	kUpdatePodSpecForObject := w.Factory.UpdatePodSpecForObject
	w.UpdatePodSpecForObject = func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) {
		switch t := obj.(type) {
		case *deployapi.DeploymentConfig:
			template := t.Spec.Template
			if template == nil {
				t.Spec.Template = template
				template = &api.PodTemplateSpec{}
			}
			return true, fn(&template.Spec)
		default:
			return kUpdatePodSpecForObject(obj, fn)
		}
	}
	kProtocolsForObject := w.Factory.ProtocolsForObject
	w.ProtocolsForObject = func(object runtime.Object) (map[string]string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return getProtocols(t.Spec.Template.Spec), nil
		default:
			return kProtocolsForObject(object)
		}
	}

	kSwaggerSchemaFunc := w.Factory.SwaggerSchema
	w.Factory.SwaggerSchema = func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
		if !latest.OriginKind(gvk) {
			return kSwaggerSchemaFunc(gvk)
		}
		// TODO: we need to register the OpenShift API under the Kube group, and start returning the OpenShift
		// group from the scheme.
		oc, _, err := w.Clients()
		if err != nil {
			return nil, err
		}
		return w.OriginSwaggerSchema(oc.RESTClient, gvk.GroupVersion())
	}

	w.EditorEnvs = func() []string {
		return []string{"OC_EDITOR", "EDITOR"}
	}
	w.PrintObjectSpecificMessage = func(obj runtime.Object, out io.Writer) {}
	kPauseObjectFunc := w.Factory.PauseObject
	w.Factory.PauseObject = func(object runtime.Object) (bool, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			if t.Spec.Paused {
				return true, nil
			}
			t.Spec.Paused = true
			oc, _, err := w.Clients()
			if err != nil {
				return false, err
			}
			_, err = oc.DeploymentConfigs(t.Namespace).Update(t)
			// TODO: Pause the deployer containers.
			return false, err
		default:
			return kPauseObjectFunc(object)
		}
	}
	kResumeObjectFunc := w.Factory.ResumeObject
	w.Factory.ResumeObject = func(object runtime.Object) (bool, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			if !t.Spec.Paused {
				return true, nil
			}
			t.Spec.Paused = false
			oc, _, err := w.Clients()
			if err != nil {
				return false, err
			}
			_, err = oc.DeploymentConfigs(t.Namespace).Update(t)
			// TODO: Resume the deployer containers.
			return false, err
		default:
			return kResumeObjectFunc(object)
		}
	}
	kResolveImageFunc := w.Factory.ResolveImage
	w.Factory.ResolveImage = func(image string) (string, error) {
		options := w.ImageResolutionOptions.(*imageResolutionOptions)
		if imageutil.IsDocker(options.Source) {
			return kResolveImageFunc(image)
		}
		oc, _, err := w.Clients()
		if err != nil {
			return "", err
		}
		namespace, _, err := w.DefaultNamespace()
		if err != nil {
			return "", err
		}
		return imageutil.ResolveImagePullSpec(oc, oc, options.Source, image, namespace)
	}
	kHistoryViewerFunc := w.Factory.HistoryViewer
	w.Factory.HistoryViewer = func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
		switch mapping.GroupVersionKind.GroupKind() {
		case deployapi.Kind("DeploymentConfig"):
			oc, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return deploycmd.NewDeploymentConfigHistoryViewer(oc, kc), nil
		}
		return kHistoryViewerFunc(mapping)
	}
	kRollbackerFunc := w.Factory.Rollbacker
	w.Factory.Rollbacker = func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
		switch mapping.GroupVersionKind.GroupKind() {
		case deployapi.Kind("DeploymentConfig"):
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return deploycmd.NewDeploymentConfigRollbacker(oc), nil
		}
		return kRollbackerFunc(mapping)
	}
	kStatusViewerFunc := w.Factory.StatusViewer
	w.Factory.StatusViewer = func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) {
		oc, _, err := w.Clients()
		if err != nil {
			return nil, err
		}

		switch mapping.GroupVersionKind.GroupKind() {
		case deployapi.Kind("DeploymentConfig"):
			return deploycmd.NewDeploymentConfigStatusViewer(oc), nil
		}
		return kStatusViewerFunc(mapping)
	}

	return w
}