コード例 #1
0
// manageReplicas checks and updates replicas for the given ReplicaSet.
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *extensions.ReplicaSet) {
	diff := len(filteredPods) - rs.Spec.Replicas
	rsKey, err := controller.KeyFunc(rs)
	if err != nil {
		glog.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err)
		return
	}
	if diff < 0 {
		diff *= -1
		if diff > rsc.burstReplicas {
			diff = rsc.burstReplicas
		}
		rsc.expectations.ExpectCreations(rsKey, diff)
		wait := sync.WaitGroup{}
		wait.Add(diff)
		glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rs.Namespace, rs.Name, rs.Spec.Replicas, diff)
		for i := 0; i < diff; i++ {
			go func() {
				defer wait.Done()
				if err := rsc.podControl.CreatePods(rs.Namespace, rs.Spec.Template, rs); err != nil {
					// Decrement the expected number of creates because the informer won't observe this pod
					glog.V(2).Infof("Failed creation, decrementing expectations for replica set %q/%q", rs.Namespace, rs.Name)
					rsc.expectations.CreationObserved(rsKey)
					utilruntime.HandleError(err)
				}
			}()
		}
		wait.Wait()
	} else if diff > 0 {
		if diff > rsc.burstReplicas {
			diff = rsc.burstReplicas
		}
		rsc.expectations.ExpectDeletions(rsKey, diff)
		glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rs.Namespace, rs.Name, rs.Spec.Replicas, diff)
		// No need to sort pods if we are about to delete all of them
		if rs.Spec.Replicas != 0 {
			// Sort the pods in the order such that not-ready < ready, unscheduled
			// < scheduled, and pending < running. This ensures that we delete pods
			// in the earlier stages whenever possible.
			sort.Sort(controller.ActivePods(filteredPods))
		}

		wait := sync.WaitGroup{}
		wait.Add(diff)
		for i := 0; i < diff; i++ {
			go func(ix int) {
				defer wait.Done()
				if err := rsc.podControl.DeletePod(rs.Namespace, filteredPods[ix].Name, rs); err != nil {
					// Decrement the expected number of deletes because the informer won't observe this deletion
					glog.V(2).Infof("Failed deletion, decrementing expectations for replica set %q/%q", rs.Namespace, rs.Name)
					rsc.expectations.DeletionObserved(rsKey)
					utilruntime.HandleError(err)
				}
			}(i)
		}
		wait.Wait()
	}
}
コード例 #2
0
func (f *ring1Factory) AttachablePodForObject(object runtime.Object) (*api.Pod, error) {
	clientset, err := f.clientAccessFactory.ClientSetForVersion(nil)
	if err != nil {
		return nil, err
	}
	switch t := object.(type) {
	case *api.ReplicationController:
		selector := labels.SelectorFromSet(t.Spec.Selector)
		sortBy := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
		pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy)
		return pod, err
	case *extensions.Deployment:
		selector, err := metav1.LabelSelectorAsSelector(t.Spec.Selector)
		if err != nil {
			return nil, fmt.Errorf("invalid label selector: %v", err)
		}
		sortBy := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
		pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy)
		return pod, err
	case *batch.Job:
		selector, err := metav1.LabelSelectorAsSelector(t.Spec.Selector)
		if err != nil {
			return nil, fmt.Errorf("invalid label selector: %v", err)
		}
		sortBy := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
		pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy)
		return pod, err
	case *api.Pod:
		return t, nil
	default:
		gvks, _, err := api.Scheme.ObjectKinds(object)
		if err != nil {
			return nil, err
		}
		return nil, fmt.Errorf("cannot attach to %v: not implemented", gvks[0])
	}
}
コード例 #3
0
func TestGetFirstPod(t *testing.T) {
	labelSet := map[string]string{"test": "selector"}
	tests := []struct {
		name string

		podList  *api.PodList
		watching []watch.Event
		sortBy   func([]*api.Pod) sort.Interface

		expected    *api.Pod
		expectedNum int
		expectedErr bool
	}{
		{
			name:    "kubectl logs - two ready pods",
			podList: newPodList(2, -1, -1, labelSet),
			sortBy:  func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) },
			expected: &api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:              "pod-1",
					Namespace:         api.NamespaceDefault,
					CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
					Labels:            map[string]string{"test": "selector"},
				},
				Status: api.PodStatus{
					Conditions: []api.PodCondition{
						{
							Status: api.ConditionTrue,
							Type:   api.PodReady,
						},
					},
				},
			},
			expectedNum: 2,
		},
		{
			name:    "kubectl logs - one unhealthy, one healthy",
			podList: newPodList(2, -1, 1, labelSet),
			sortBy:  func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) },
			expected: &api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:              "pod-2",
					Namespace:         api.NamespaceDefault,
					CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 1, 0, time.UTC),
					Labels:            map[string]string{"test": "selector"},
				},
				Status: api.PodStatus{
					Conditions: []api.PodCondition{
						{
							Status: api.ConditionTrue,
							Type:   api.PodReady,
						},
					},
					ContainerStatuses: []api.ContainerStatus{{RestartCount: 5}},
				},
			},
			expectedNum: 2,
		},
		{
			name:    "kubectl attach - two ready pods",
			podList: newPodList(2, -1, -1, labelSet),
			sortBy:  func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) },
			expected: &api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:              "pod-1",
					Namespace:         api.NamespaceDefault,
					CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
					Labels:            map[string]string{"test": "selector"},
				},
				Status: api.PodStatus{
					Conditions: []api.PodCondition{
						{
							Status: api.ConditionTrue,
							Type:   api.PodReady,
						},
					},
				},
			},
			expectedNum: 2,
		},
		{
			name:    "kubectl attach - wait for ready pod",
			podList: newPodList(1, 1, -1, labelSet),
			watching: []watch.Event{
				{
					Type: watch.Modified,
					Object: &api.Pod{
						ObjectMeta: api.ObjectMeta{
							Name:              "pod-1",
							Namespace:         api.NamespaceDefault,
							CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
							Labels:            map[string]string{"test": "selector"},
						},
						Status: api.PodStatus{
							Conditions: []api.PodCondition{
								{
									Status: api.ConditionTrue,
									Type:   api.PodReady,
								},
							},
						},
					},
				},
			},
			sortBy: func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) },
			expected: &api.Pod{
				ObjectMeta: api.ObjectMeta{
					Name:              "pod-1",
					Namespace:         api.NamespaceDefault,
					CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
					Labels:            map[string]string{"test": "selector"},
				},
				Status: api.PodStatus{
					Conditions: []api.PodCondition{
						{
							Status: api.ConditionTrue,
							Type:   api.PodReady,
						},
					},
				},
			},
			expectedNum: 1,
		},
	}

	for i := range tests {
		test := tests[i]
		fake := fake.NewSimpleClientset(test.podList)
		if len(test.watching) > 0 {
			watcher := watch.NewFake()
			for _, event := range test.watching {
				switch event.Type {
				case watch.Added:
					go watcher.Add(event.Object)
				case watch.Modified:
					go watcher.Modify(event.Object)
				}
			}
			fake.PrependWatchReactor("pods", testcore.DefaultWatchReactor(watcher, nil))
		}
		selector := labels.Set(labelSet).AsSelector()

		pod, numPods, err := GetFirstPod(fake.Core(), api.NamespaceDefault, selector, 1*time.Minute, test.sortBy)
		if !test.expectedErr && err != nil {
			t.Errorf("%s: unexpected error: %v", test.name, err)
			continue
		}
		if test.expectedErr && err == nil {
			t.Errorf("%s: expected an error", test.name)
			continue
		}
		if test.expectedNum != numPods {
			t.Errorf("%s: expected %d pods, got %d", test.name, test.expectedNum, numPods)
			continue
		}
		if !reflect.DeepEqual(test.expected, pod) {
			t.Errorf("%s:\nexpected pod:\n%#v\ngot:\n%#v\n\n", test.name, test.expected, pod)
		}
	}
}
コード例 #4
0
// manageReplicas checks and updates replicas for the given replication controller.
// Does NOT modify <filteredPods>.
func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.ReplicationController) error {
	diff := len(filteredPods) - int(rc.Spec.Replicas)
	rcKey, err := controller.KeyFunc(rc)
	if err != nil {
		return err
	}
	if diff == 0 {
		return nil
	}

	if diff < 0 {
		diff *= -1
		if diff > rm.burstReplicas {
			diff = rm.burstReplicas
		}
		// TODO: Track UIDs of creates just like deletes. The problem currently
		// is we'd need to wait on the result of a create to record the pod's
		// UID, which would require locking *across* the create, which will turn
		// into a performance bottleneck. We should generate a UID for the pod
		// beforehand and store it via ExpectCreations.
		errCh := make(chan error, diff)
		rm.expectations.ExpectCreations(rcKey, diff)
		var wg sync.WaitGroup
		wg.Add(diff)
		glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff)
		for i := 0; i < diff; i++ {
			go func() {
				defer wg.Done()
				var err error
				if rm.garbageCollectorEnabled {
					var trueVar = true
					controllerRef := &api.OwnerReference{
						APIVersion: getRCKind().GroupVersion().String(),
						Kind:       getRCKind().Kind,
						Name:       rc.Name,
						UID:        rc.UID,
						Controller: &trueVar,
					}
					err = rm.podControl.CreatePodsWithControllerRef(rc.Namespace, rc.Spec.Template, rc, controllerRef)
				} else {
					err = rm.podControl.CreatePods(rc.Namespace, rc.Spec.Template, rc)
				}
				if err != nil {
					// Decrement the expected number of creates because the informer won't observe this pod
					glog.V(2).Infof("Failed creation, decrementing expectations for controller %q/%q", rc.Namespace, rc.Name)
					rm.expectations.CreationObserved(rcKey)
					errCh <- err
					utilruntime.HandleError(err)
				}
			}()
		}
		wg.Wait()

		select {
		case err := <-errCh:
			// all errors have been reported before and they're likely to be the same, so we'll only return the first one we hit.
			if err != nil {
				return err
			}
		default:
		}

		return nil
	}

	if diff > rm.burstReplicas {
		diff = rm.burstReplicas
	}
	glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff)
	// No need to sort pods if we are about to delete all of them
	if rc.Spec.Replicas != 0 {
		// Sort the pods in the order such that not-ready < ready, unscheduled
		// < scheduled, and pending < running. This ensures that we delete pods
		// in the earlier stages whenever possible.
		sort.Sort(controller.ActivePods(filteredPods))
	}
	// Snapshot the UIDs (ns/name) of the pods we're expecting to see
	// deleted, so we know to record their expectations exactly once either
	// when we see it as an update of the deletion timestamp, or as a delete.
	// Note that if the labels on a pod/rc change in a way that the pod gets
	// orphaned, the rs will only wake up after the expectations have
	// expired even if other pods are deleted.
	deletedPodKeys := []string{}
	for i := 0; i < diff; i++ {
		deletedPodKeys = append(deletedPodKeys, controller.PodKey(filteredPods[i]))
	}
	// We use pod namespace/name as a UID to wait for deletions, so if the
	// labels on a pod/rc change in a way that the pod gets orphaned, the
	// rc will only wake up after the expectation has expired.
	errCh := make(chan error, diff)
	rm.expectations.ExpectDeletions(rcKey, deletedPodKeys)
	var wg sync.WaitGroup
	wg.Add(diff)
	for i := 0; i < diff; i++ {
		go func(ix int) {
			defer wg.Done()
			if err := rm.podControl.DeletePod(rc.Namespace, filteredPods[ix].Name, rc); err != nil {
				// Decrement the expected number of deletes because the informer won't observe this deletion
				podKey := controller.PodKey(filteredPods[ix])
				glog.V(2).Infof("Failed to delete %v due to %v, decrementing expectations for controller %q/%q", podKey, err, rc.Namespace, rc.Name)
				rm.expectations.DeletionObserved(rcKey, podKey)
				errCh <- err
				utilruntime.HandleError(err)
			}
		}(i)
	}
	wg.Wait()

	select {
	case err := <-errCh:
		// all errors have been reported before and they're likely to be the same, so we'll only return the first one we hit.
		if err != nil {
			return err
		}
	default:
	}

	return nil

}
コード例 #5
0
ファイル: controller.go プロジェクト: ruizeng/kubernetes
// manageJob is the core method responsible for managing the number of running
// pods according to what is specified in the job.Spec.
func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *extensions.Job) int {
	var activeLock sync.Mutex
	active := len(activePods)
	parallelism := *job.Spec.Parallelism
	jobKey, err := controller.KeyFunc(job)
	if err != nil {
		glog.Errorf("Couldn't get key for job %#v: %v", job, err)
		return 0
	}

	if active > parallelism {
		diff := active - parallelism
		jm.expectations.ExpectDeletions(jobKey, diff)
		glog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff)
		// Sort the pods in the order such that not-ready < ready, unscheduled
		// < scheduled, and pending < running. This ensures that we delete pods
		// in the earlier stages whenever possible.
		sort.Sort(controller.ActivePods(activePods))

		active -= diff
		wait := sync.WaitGroup{}
		wait.Add(diff)
		for i := 0; i < diff; i++ {
			go func(ix int) {
				defer wait.Done()
				if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name); err != nil {
					defer util.HandleError(err)
					// Decrement the expected number of deletes because the informer won't observe this deletion
					jm.expectations.DeletionObserved(jobKey)
					activeLock.Lock()
					active++
					activeLock.Unlock()
				}
			}(i)
		}
		wait.Wait()

	} else if active < parallelism {
		// how many executions are left to run
		diff := *job.Spec.Completions - succeeded
		// limit to parallelism and count active pods as well
		if diff > parallelism {
			diff = parallelism
		}
		diff -= active
		jm.expectations.ExpectCreations(jobKey, diff)
		glog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, parallelism, diff)

		active += diff
		wait := sync.WaitGroup{}
		wait.Add(diff)
		for i := 0; i < diff; i++ {
			go func() {
				defer wait.Done()
				if err := jm.podControl.CreatePods(job.Namespace, &job.Spec.Template, job); err != nil {
					defer util.HandleError(err)
					// Decrement the expected number of creates because the informer won't observe this pod
					jm.expectations.CreationObserved(jobKey)
					activeLock.Lock()
					active--
					activeLock.Unlock()
				}
			}()
		}
		wait.Wait()
	}

	return active
}
コード例 #6
0
ファイル: factory.go プロジェクト: juanluisvaladas/origin
func (f *Factory) PodForResource(resource string, timeout time.Duration) (string, error) {
	sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
	namespace, _, err := f.DefaultNamespace()
	if err != nil {
		return "", err
	}
	mapper, _ := f.Object(false)
	resourceType, name, err := util.ResolveResource(api.Resource("pods"), resource, mapper)
	if err != nil {
		return "", err
	}

	switch resourceType {
	case api.Resource("pods"):
		return name, nil
	case api.Resource("replicationcontrollers"):
		kc, err := f.Client()
		if err != nil {
			return "", err
		}
		rc, err := kc.ReplicationControllers(namespace).Get(name)
		if err != nil {
			return "", err
		}
		selector := labels.SelectorFromSet(rc.Spec.Selector)
		pod, _, err := cmdutil.GetFirstPod(kc, namespace, selector, timeout, sortBy)
		if err != nil {
			return "", err
		}
		return pod.Name, nil
	case deployapi.Resource("deploymentconfigs"):
		oc, kc, err := f.Clients()
		if err != nil {
			return "", err
		}
		dc, err := oc.DeploymentConfigs(namespace).Get(name)
		if err != nil {
			return "", err
		}
		selector := labels.SelectorFromSet(dc.Spec.Selector)
		pod, _, err := cmdutil.GetFirstPod(kc, namespace, selector, timeout, sortBy)
		if err != nil {
			return "", err
		}
		return pod.Name, nil
	case extensions.Resource("daemonsets"):
		kc, err := f.Client()
		if err != nil {
			return "", err
		}
		ds, err := kc.Extensions().DaemonSets(namespace).Get(name)
		if err != nil {
			return "", err
		}
		selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector)
		if err != nil {
			return "", err
		}
		pod, _, err := cmdutil.GetFirstPod(kc, namespace, selector, timeout, sortBy)
		if err != nil {
			return "", err
		}
		return pod.Name, nil
	case extensions.Resource("jobs"):
		kc, err := f.Client()
		if err != nil {
			return "", err
		}
		job, err := kc.Extensions().Jobs(namespace).Get(name)
		if err != nil {
			return "", err
		}
		return podNameForJob(job, kc, timeout, sortBy)
	case batch.Resource("jobs"):
		kc, err := f.Client()
		if err != nil {
			return "", err
		}
		job, err := kc.Batch().Jobs(namespace).Get(name)
		if err != nil {
			return "", err
		}
		return podNameForJob(job, kc, timeout, sortBy)
	default:
		return "", fmt.Errorf("remote shell for %s is not supported", resourceType)
	}
}
コード例 #7
0
ファイル: factory.go プロジェクト: juanluisvaladas/origin
// NewFactory creates an object that holds common methods across all OpenShift commands
func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory {
	restMapper := registered.RESTMapper()

	clients := &clientCache{
		clients: make(map[string]*client.Client),
		configs: make(map[string]*restclient.Config),
		loader:  clientConfig,
	}

	w := &Factory{
		Factory:                cmdutil.NewFactory(clientConfig),
		OpenShiftClientConfig:  clientConfig,
		clients:                clients,
		ImageResolutionOptions: &imageResolutionOptions{},
	}

	w.Object = func(bool) (meta.RESTMapper, runtime.ObjectTyper) {
		defaultMapper := ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: restMapper}}
		defaultTyper := api.Scheme

		// Output using whatever version was negotiated in the client cache. The
		// version we decode with may not be the same as what the server requires.
		cfg, err := clients.ClientConfigForVersion(nil)
		if err != nil {
			return defaultMapper, defaultTyper
		}

		cmdApiVersion := unversioned.GroupVersion{}
		if cfg.GroupVersion != nil {
			cmdApiVersion = *cfg.GroupVersion
		}

		// at this point we've negotiated and can get the client
		oclient, err := clients.ClientForVersion(nil)
		if err != nil {
			return defaultMapper, defaultTyper
		}

		cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube"), cfg.Host)
		cachedDiscoverClient := NewCachedDiscoveryClient(client.NewDiscoveryClient(oclient.RESTClient), cacheDir, time.Duration(10*time.Minute))

		// if we can't find the server version or its too old to have Kind information in the discovery doc, skip the discovery RESTMapper
		// and use our hardcoded levels
		mapper := registered.RESTMapper()
		if serverVersion, err := cachedDiscoverClient.ServerVersion(); err == nil && useDiscoveryRESTMapper(serverVersion.GitVersion) {
			mapper = restmapper.NewDiscoveryRESTMapper(cachedDiscoverClient)
		}
		mapper = NewShortcutExpander(cachedDiscoverClient, kubectl.ShortcutExpander{RESTMapper: mapper})
		return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}, api.Scheme
	}

	w.UnstructuredObject = func() (meta.RESTMapper, runtime.ObjectTyper, error) {
		// load a discovery client from the default config
		cfg, err := clients.ClientConfigForVersion(nil)
		if err != nil {
			return nil, nil, err
		}
		dc, err := discovery.NewDiscoveryClientForConfig(cfg)
		if err != nil {
			return nil, nil, err
		}
		cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube"), cfg.Host)
		cachedDiscoverClient := NewCachedDiscoveryClient(client.NewDiscoveryClient(dc.RESTClient), cacheDir, time.Duration(10*time.Minute))

		// enumerate all group resources
		groupResources, err := discovery.GetAPIGroupResources(cachedDiscoverClient)
		if err != nil {
			return nil, nil, err
		}

		// Register unknown APIs as third party for now to make
		// validation happy. TODO perhaps make a dynamic schema
		// validator to avoid this.
		for _, group := range groupResources {
			for _, version := range group.Group.Versions {
				gv := unversioned.GroupVersion{Group: group.Group.Name, Version: version.Version}
				if !registered.IsRegisteredVersion(gv) {
					registered.AddThirdPartyAPIGroupVersions(gv)
				}
			}
		}

		// construct unstructured mapper and typer
		mapper := discovery.NewRESTMapper(groupResources, meta.InterfacesForUnstructured)
		typer := discovery.NewUnstructuredObjectTyper(groupResources)
		return NewShortcutExpander(cachedDiscoverClient, kubectl.ShortcutExpander{RESTMapper: mapper}), typer, nil
	}

	kClientForMapping := w.Factory.ClientForMapping
	w.ClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
		if latest.OriginKind(mapping.GroupVersionKind) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return client.RESTClient, nil
		}
		return kClientForMapping(mapping)
	}

	kUnstructuredClientForMapping := w.Factory.UnstructuredClientForMapping
	w.UnstructuredClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
		if latest.OriginKind(mapping.GroupVersionKind) {
			cfg, err := clientConfig.ClientConfig()
			if err != nil {
				return nil, err
			}
			if err := client.SetOpenShiftDefaults(cfg); err != nil {
				return nil, err
			}
			cfg.APIPath = "/apis"
			if mapping.GroupVersionKind.Group == api.GroupName {
				cfg.APIPath = "/oapi"
			}
			gv := mapping.GroupVersionKind.GroupVersion()
			cfg.ContentConfig = dynamic.ContentConfig()
			cfg.GroupVersion = &gv
			return restclient.RESTClientFor(cfg)
		}
		return kUnstructuredClientForMapping(mapping)
	}

	// Save original Describer function
	kDescriberFunc := w.Factory.Describer
	w.Describer = func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
		if latest.OriginKind(mapping.GroupVersionKind) {
			oClient, kClient, err := w.Clients()
			if err != nil {
				return nil, fmt.Errorf("unable to create client %s: %v", mapping.GroupVersionKind.Kind, err)
			}

			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			cfg, err := clients.ClientConfigForVersion(&mappingVersion)
			if err != nil {
				return nil, fmt.Errorf("unable to load a client %s: %v", mapping.GroupVersionKind.Kind, err)
			}

			describer, ok := describe.DescriberFor(mapping.GroupVersionKind.GroupKind(), oClient, kClient, cfg.Host)
			if !ok {
				return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
			}
			return describer, nil
		}
		return kDescriberFunc(mapping)
	}
	kScalerFunc := w.Factory.Scaler
	w.Scaler = func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
		if mapping.GroupVersionKind.GroupKind() == deployapi.Kind("DeploymentConfig") {
			oc, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return deploycmd.NewDeploymentConfigScaler(oc, kc), nil
		}
		return kScalerFunc(mapping)
	}
	kReaperFunc := w.Factory.Reaper
	w.Reaper = func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
		switch mapping.GroupVersionKind.GroupKind() {
		case deployapi.Kind("DeploymentConfig"):
			oc, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return deploycmd.NewDeploymentConfigReaper(oc, kc), nil
		case authorizationapi.Kind("Role"):
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return authorizationreaper.NewRoleReaper(oc, oc), nil
		case authorizationapi.Kind("ClusterRole"):
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return authorizationreaper.NewClusterRoleReaper(oc, oc, oc), nil
		case userapi.Kind("User"):
			oc, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return authenticationreaper.NewUserReaper(
				client.UsersInterface(oc),
				client.GroupsInterface(oc),
				client.ClusterRoleBindingsInterface(oc),
				client.RoleBindingsNamespacer(oc),
				kclient.SecurityContextConstraintsInterface(kc),
			), nil
		case userapi.Kind("Group"):
			oc, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return authenticationreaper.NewGroupReaper(
				client.GroupsInterface(oc),
				client.ClusterRoleBindingsInterface(oc),
				client.RoleBindingsNamespacer(oc),
				kclient.SecurityContextConstraintsInterface(kc),
			), nil
		case buildapi.Kind("BuildConfig"):
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return buildcmd.NewBuildConfigReaper(oc), nil
		}
		return kReaperFunc(mapping)
	}
	kGenerators := w.Factory.Generators
	w.Generators = func(cmdName string) map[string]kubectl.Generator {
		originGenerators := DefaultGenerators(cmdName)
		kubeGenerators := kGenerators(cmdName)

		ret := map[string]kubectl.Generator{}
		for k, v := range kubeGenerators {
			ret[k] = v
		}
		for k, v := range originGenerators {
			ret[k] = v
		}
		return ret
	}
	kMapBasedSelectorForObjectFunc := w.Factory.MapBasedSelectorForObject
	w.MapBasedSelectorForObject = func(object runtime.Object) (string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return kubectl.MakeLabels(t.Spec.Selector), nil
		default:
			return kMapBasedSelectorForObjectFunc(object)
		}
	}
	kPortsForObjectFunc := w.Factory.PortsForObject
	w.PortsForObject = func(object runtime.Object) ([]string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return getPorts(t.Spec.Template.Spec), nil
		default:
			return kPortsForObjectFunc(object)
		}
	}
	kLogsForObjectFunc := w.Factory.LogsForObject
	w.LogsForObject = func(object, options runtime.Object) (*restclient.Request, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			dopts, ok := options.(*deployapi.DeploymentLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a DeploymentLogOptions")
			}
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return oc.DeploymentLogs(t.Namespace).Get(t.Name, *dopts), nil
		case *buildapi.Build:
			bopts, ok := options.(*buildapi.BuildLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a BuildLogOptions")
			}
			if bopts.Version != nil {
				return nil, errors.New("cannot specify a version and a build")
			}
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return oc.BuildLogs(t.Namespace).Get(t.Name, *bopts), nil
		case *buildapi.BuildConfig:
			bopts, ok := options.(*buildapi.BuildLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a BuildLogOptions")
			}
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			builds, err := oc.Builds(t.Namespace).List(api.ListOptions{})
			if err != nil {
				return nil, err
			}
			builds.Items = buildapi.FilterBuilds(builds.Items, buildapi.ByBuildConfigPredicate(t.Name))
			if len(builds.Items) == 0 {
				return nil, fmt.Errorf("no builds found for %q", t.Name)
			}
			if bopts.Version != nil {
				// If a version has been specified, try to get the logs from that build.
				desired := buildutil.BuildNameForConfigVersion(t.Name, int(*bopts.Version))
				return oc.BuildLogs(t.Namespace).Get(desired, *bopts), nil
			}
			sort.Sort(sort.Reverse(buildapi.BuildSliceByCreationTimestamp(builds.Items)))
			return oc.BuildLogs(t.Namespace).Get(builds.Items[0].Name, *bopts), nil
		default:
			return kLogsForObjectFunc(object, options)
		}
	}
	// Saves current resource name (or alias if any) in PrintOptions. Once saved, it will not be overwritten by the
	// kubernetes resource alias look-up, as it will notice a non-empty value in `options.Kind`
	w.Printer = func(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) {
		if mapping != nil {
			options.Kind = mapping.Resource
			if alias, ok := resourceShortFormFor(mapping.Resource); ok {
				options.Kind = alias
			}
		}
		return describe.NewHumanReadablePrinter(options), nil
	}
	// PrintResourceInfos receives a list of resource infos and prints versioned objects if a generic output format was specified
	// otherwise, it iterates through info objects, printing each resource with a unique printer for its mapping
	w.PrintResourceInfos = func(cmd *cobra.Command, infos []*resource.Info, out io.Writer) error {
		printer, generic, err := cmdutil.PrinterForCommand(cmd)
		if err != nil {
			return nil
		}
		if !generic {
			for _, info := range infos {
				mapping := info.ResourceMapping()
				printer, err := w.PrinterForMapping(cmd, mapping, false)
				if err != nil {
					return err
				}
				if err := printer.PrintObj(info.Object, out); err != nil {
					return nil
				}
			}
			return nil
		}

		clientConfig, err := w.ClientConfig()
		if err != nil {
			return err
		}
		outputVersion, err := cmdutil.OutputVersion(cmd, clientConfig.GroupVersion)
		if err != nil {
			return err
		}
		object, err := resource.AsVersionedObject(infos, len(infos) != 1, outputVersion, api.Codecs.LegacyCodec(outputVersion))
		if err != nil {
			return err
		}
		return printer.PrintObj(object, out)

	}
	kCanBeExposed := w.Factory.CanBeExposed
	w.CanBeExposed = func(kind unversioned.GroupKind) error {
		if kind == deployapi.Kind("DeploymentConfig") {
			return nil
		}
		return kCanBeExposed(kind)
	}
	kCanBeAutoscaled := w.Factory.CanBeAutoscaled
	w.CanBeAutoscaled = func(kind unversioned.GroupKind) error {
		if kind == deployapi.Kind("DeploymentConfig") {
			return nil
		}
		return kCanBeAutoscaled(kind)
	}
	kAttachablePodForObjectFunc := w.Factory.AttachablePodForObject
	w.AttachablePodForObject = func(object runtime.Object) (*api.Pod, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			_, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			selector := labels.SelectorFromSet(t.Spec.Selector)
			f := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
			pod, _, err := cmdutil.GetFirstPod(kc, t.Namespace, selector, 1*time.Minute, f)
			return pod, err
		default:
			return kAttachablePodForObjectFunc(object)
		}
	}
	kUpdatePodSpecForObject := w.Factory.UpdatePodSpecForObject
	w.UpdatePodSpecForObject = func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) {
		switch t := obj.(type) {
		case *deployapi.DeploymentConfig:
			template := t.Spec.Template
			if template == nil {
				t.Spec.Template = template
				template = &api.PodTemplateSpec{}
			}
			return true, fn(&template.Spec)
		default:
			return kUpdatePodSpecForObject(obj, fn)
		}
	}
	kProtocolsForObject := w.Factory.ProtocolsForObject
	w.ProtocolsForObject = func(object runtime.Object) (map[string]string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return getProtocols(t.Spec.Template.Spec), nil
		default:
			return kProtocolsForObject(object)
		}
	}

	kSwaggerSchemaFunc := w.Factory.SwaggerSchema
	w.Factory.SwaggerSchema = func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
		if !latest.OriginKind(gvk) {
			return kSwaggerSchemaFunc(gvk)
		}
		// TODO: we need to register the OpenShift API under the Kube group, and start returning the OpenShift
		// group from the scheme.
		oc, _, err := w.Clients()
		if err != nil {
			return nil, err
		}
		return w.OriginSwaggerSchema(oc.RESTClient, gvk.GroupVersion())
	}

	w.EditorEnvs = func() []string {
		return []string{"OC_EDITOR", "EDITOR"}
	}
	w.PrintObjectSpecificMessage = func(obj runtime.Object, out io.Writer) {}
	kPauseObjectFunc := w.Factory.PauseObject
	w.Factory.PauseObject = func(object runtime.Object) (bool, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			if t.Spec.Paused {
				return true, nil
			}
			t.Spec.Paused = true
			oc, _, err := w.Clients()
			if err != nil {
				return false, err
			}
			_, err = oc.DeploymentConfigs(t.Namespace).Update(t)
			// TODO: Pause the deployer containers.
			return false, err
		default:
			return kPauseObjectFunc(object)
		}
	}
	kResumeObjectFunc := w.Factory.ResumeObject
	w.Factory.ResumeObject = func(object runtime.Object) (bool, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			if !t.Spec.Paused {
				return true, nil
			}
			t.Spec.Paused = false
			oc, _, err := w.Clients()
			if err != nil {
				return false, err
			}
			_, err = oc.DeploymentConfigs(t.Namespace).Update(t)
			// TODO: Resume the deployer containers.
			return false, err
		default:
			return kResumeObjectFunc(object)
		}
	}
	kResolveImageFunc := w.Factory.ResolveImage
	w.Factory.ResolveImage = func(image string) (string, error) {
		options := w.ImageResolutionOptions.(*imageResolutionOptions)
		if imageutil.IsDocker(options.Source) {
			return kResolveImageFunc(image)
		}
		oc, _, err := w.Clients()
		if err != nil {
			return "", err
		}
		namespace, _, err := w.DefaultNamespace()
		if err != nil {
			return "", err
		}
		return imageutil.ResolveImagePullSpec(oc, oc, options.Source, image, namespace)
	}
	kHistoryViewerFunc := w.Factory.HistoryViewer
	w.Factory.HistoryViewer = func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
		switch mapping.GroupVersionKind.GroupKind() {
		case deployapi.Kind("DeploymentConfig"):
			oc, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return deploycmd.NewDeploymentConfigHistoryViewer(oc, kc), nil
		}
		return kHistoryViewerFunc(mapping)
	}
	kRollbackerFunc := w.Factory.Rollbacker
	w.Factory.Rollbacker = func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
		switch mapping.GroupVersionKind.GroupKind() {
		case deployapi.Kind("DeploymentConfig"):
			oc, _, err := w.Clients()
			if err != nil {
				return nil, err
			}
			return deploycmd.NewDeploymentConfigRollbacker(oc), nil
		}
		return kRollbackerFunc(mapping)
	}
	kStatusViewerFunc := w.Factory.StatusViewer
	w.Factory.StatusViewer = func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) {
		oc, _, err := w.Clients()
		if err != nil {
			return nil, err
		}

		switch mapping.GroupVersionKind.GroupKind() {
		case deployapi.Kind("DeploymentConfig"):
			return deploycmd.NewDeploymentConfigStatusViewer(oc), nil
		}
		return kStatusViewerFunc(mapping)
	}

	return w
}
コード例 #8
0
ファイル: factory.go プロジェクト: ZenoRewn/origin
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients: clients,
		flags:   flags,

		// If discoverDynamicAPIs is true, make API calls to the discovery service to find APIs that
		// have been dynamically added to the apiserver
		Object: func(discoverDynamicAPIs bool) (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			CheckErr(err)
			cmdApiVersion := unversioned.GroupVersion{}
			if cfg.GroupVersion != nil {
				cmdApiVersion = *cfg.GroupVersion
			}
			if discoverDynamicAPIs {
				client, err := clients.ClientForVersion(&unversioned.GroupVersion{Version: "v1"})
				CheckErr(err)

				versions, gvks, err := GetThirdPartyGroupVersions(client.Discovery())
				CheckErr(err)
				if len(versions) > 0 {
					priorityMapper, ok := mapper.RESTMapper.(meta.PriorityRESTMapper)
					if !ok {
						CheckErr(fmt.Errorf("expected PriorityMapper, saw: %v", mapper.RESTMapper))
						return nil, nil
					}
					multiMapper, ok := priorityMapper.Delegate.(meta.MultiRESTMapper)
					if !ok {
						CheckErr(fmt.Errorf("unexpected type: %v", mapper.RESTMapper))
						return nil, nil
					}
					groupsMap := map[string][]unversioned.GroupVersion{}
					for _, version := range versions {
						groupsMap[version.Group] = append(groupsMap[version.Group], version)
					}
					for group, versionList := range groupsMap {
						preferredExternalVersion := versionList[0]

						thirdPartyMapper, err := kubectl.NewThirdPartyResourceMapper(versionList, getGroupVersionKinds(gvks, group))
						CheckErr(err)
						accessor := meta.NewAccessor()
						groupMeta := apimachinery.GroupMeta{
							GroupVersion:  preferredExternalVersion,
							GroupVersions: versionList,
							RESTMapper:    thirdPartyMapper,
							SelfLinker:    runtime.SelfLinker(accessor),
							InterfacesFor: makeInterfacesFor(versionList),
						}

						CheckErr(registered.RegisterGroup(groupMeta))
						registered.AddThirdPartyAPIGroupVersions(versionList...)
						multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...)
					}
					priorityMapper.Delegate = multiMapper
					// Re-assign to the RESTMapper here because priorityMapper is actually a copy, so if we
					// don't re-assign, the above assignement won't actually update mapper.RESTMapper
					mapper.RESTMapper = priorityMapper
				}
			}
			outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}
			priorityRESTMapper := meta.PriorityRESTMapper{
				Delegate: outputRESTMapper,
				ResourcePriority: []unversioned.GroupVersionResource{
					{Group: api.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
					{Group: extensions.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
					{Group: metrics.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
				},
				KindPriority: []unversioned.GroupVersionKind{
					{Group: api.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
					{Group: extensions.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
					{Group: metrics.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
				},
			}
			return priorityRESTMapper, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion(nil)
		},
		ClientConfig: func() (*restclient.Config, error) {
			return clients.ClientConfigForVersion(nil)
		},
		ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			gvk := mapping.GroupVersionKind
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			c, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			switch gvk.Group {
			case api.GroupName:
				return c.RESTClient, nil
			case autoscaling.GroupName:
				return c.AutoscalingClient.RESTClient, nil
			case batch.GroupName:
				return c.BatchClient.RESTClient, nil
			case apps.GroupName:
				return c.AppsClient.RESTClient, nil
			case extensions.GroupName:
				return c.ExtensionsClient.RESTClient, nil
			case api.SchemeGroupVersion.Group:
				return c.RESTClient, nil
			case extensions.SchemeGroupVersion.Group:
				return c.ExtensionsClient.RESTClient, nil
			default:
				if !registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) {
					return nil, fmt.Errorf("unknown api group/version: %s", gvk.String())
				}
				cfg, err := clientConfig.ClientConfig()
				if err != nil {
					return nil, err
				}
				gv := gvk.GroupVersion()
				cfg.GroupVersion = &gv
				cfg.APIPath = "/apis"
				cfg.Codec = thirdpartyresourcedata.NewCodec(c.ExtensionsClient.RESTClient.Codec(), gvk.Kind)
				return restclient.RESTClientFor(cfg)
			}
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
		},
		Decoder: func(toInternal bool) runtime.Decoder {
			if toInternal {
				return api.Codecs.UniversalDecoder()
			}
			return api.Codecs.UniversalDeserializer()
		},
		JSONEncoder: func() runtime.Encoder {
			return api.Codecs.LegacyCodec(registered.EnabledVersions()...)
		},
		Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil
		},
		MapBasedSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			case *extensions.ReplicaSet:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvk)
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			case *extensions.Deployment:
				return getPorts(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getPorts(t.Spec.Template.Spec), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %v", gvk)
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}

			switch t := object.(type) {
			case *api.Pod:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil

			case *api.ReplicationController:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			case *extensions.ReplicaSet:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot get the logs from %v", gvk)
			}
		},
		PauseObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = true
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot pause %v", gvk)
			}
		},
		ResumeObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if !t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = false
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot resume %v", gvk)
			}
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			clientset := clientset.FromUnversionedClient(client)
			if err != nil {
				return nil, err
			}
			return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := client.ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				return &clientSwaggerSchema{
					c:        client,
					cacheDir: dir,
					mapper:   api.RESTMapper,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
			version := gvk.GroupVersion()
			client, err := clients.ClientForVersion(&version)
			if err != nil {
				return nil, err
			}
			return client.Discovery().SwaggerSchema(version)
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generators: func(cmdName string) map[string]kubectl.Generator {
			return DefaultGenerators(cmdName)
		},
		CanBeExposed: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %v", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			client, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *batch.Job:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *api.Pod:
				return t, nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk)
			}
		},
		EditorEnvs: func() []string {
			return []string{"KUBE_EDITOR", "EDITOR"}
		},
		PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) {
			switch obj := obj.(type) {
			case *api.Service:
				if obj.Spec.Type == api.ServiceTypeNodePort {
					msg := fmt.Sprintf(
						`You have exposed your service on an external port on all nodes in your
cluster.  If you want to expose this service to the external internet, you may
need to set up firewall rules for the service port(s) (%s) to serve traffic.

See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
`,
						makePortsString(obj.Spec.Ports, true))
					out.Write([]byte(msg))
				}
			}
		},
	}
}
コード例 #9
0
ファイル: replica_set.go プロジェクト: Cloven/minikube
// manageReplicas checks and updates replicas for the given ReplicaSet.
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *extensions.ReplicaSet) {
	diff := len(filteredPods) - int(rs.Spec.Replicas)
	rsKey, err := controller.KeyFunc(rs)
	if err != nil {
		glog.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err)
		return
	}
	if diff < 0 {
		diff *= -1
		if diff > rsc.burstReplicas {
			diff = rsc.burstReplicas
		}
		// TODO: Track UIDs of creates just like deletes. The problem currently
		// is we'd need to wait on the result of a create to record the pod's
		// UID, which would require locking *across* the create, which will turn
		// into a performance bottleneck. We should generate a UID for the pod
		// beforehand and store it via ExpectCreations.
		rsc.expectations.ExpectCreations(rsKey, diff)
		wait := sync.WaitGroup{}
		wait.Add(diff)
		glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rs.Namespace, rs.Name, rs.Spec.Replicas, diff)
		for i := 0; i < diff; i++ {
			go func() {
				defer wait.Done()
				if err := rsc.podControl.CreatePods(rs.Namespace, &rs.Spec.Template, rs); err != nil {
					// Decrement the expected number of creates because the informer won't observe this pod
					glog.V(2).Infof("Failed creation, decrementing expectations for replica set %q/%q", rs.Namespace, rs.Name)
					rsc.expectations.CreationObserved(rsKey)
					utilruntime.HandleError(err)
				}
			}()
		}
		wait.Wait()
	} else if diff > 0 {
		if diff > rsc.burstReplicas {
			diff = rsc.burstReplicas
		}
		glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rs.Namespace, rs.Name, rs.Spec.Replicas, diff)
		// No need to sort pods if we are about to delete all of them
		if rs.Spec.Replicas != 0 {
			// Sort the pods in the order such that not-ready < ready, unscheduled
			// < scheduled, and pending < running. This ensures that we delete pods
			// in the earlier stages whenever possible.
			sort.Sort(controller.ActivePods(filteredPods))
		}
		// Snapshot the UIDs (ns/name) of the pods we're expecting to see
		// deleted, so we know to record their expectations exactly once either
		// when we see it as an update of the deletion timestamp, or as a delete.
		// Note that if the labels on a pod/rs change in a way that the pod gets
		// orphaned, the rs will only wake up after the expectations have
		// expired even if other pods are deleted.
		deletedPodKeys := []string{}
		for i := 0; i < diff; i++ {
			deletedPodKeys = append(deletedPodKeys, controller.PodKey(filteredPods[i]))
		}
		rsc.expectations.ExpectDeletions(rsKey, deletedPodKeys)
		wait := sync.WaitGroup{}
		wait.Add(diff)
		for i := 0; i < diff; i++ {
			go func(ix int) {
				defer wait.Done()
				if err := rsc.podControl.DeletePod(rs.Namespace, filteredPods[ix].Name, rs); err != nil {
					// Decrement the expected number of deletes because the informer won't observe this deletion
					podKey := controller.PodKey(filteredPods[ix])
					glog.V(2).Infof("Failed to delete %v, decrementing expectations for controller %q/%q", podKey, rs.Namespace, rs.Name)
					rsc.expectations.DeletionObserved(rsKey, podKey)
					utilruntime.HandleError(err)
				}
			}(i)
		}
		wait.Wait()
	}
}
コード例 #10
0
ファイル: jobcontroller.go プロジェクト: AdoHe/kubernetes
// manageJob is the core method responsible for managing the number of running
// pods according to what is specified in the job.Spec.
func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int32, job *batch.Job) int32 {
	var activeLock sync.Mutex
	active := int32(len(activePods))
	parallelism := *job.Spec.Parallelism
	jobKey, err := controller.KeyFunc(job)
	if err != nil {
		glog.Errorf("Couldn't get key for job %#v: %v", job, err)
		return 0
	}

	if active > parallelism {
		diff := active - parallelism
		jm.expectations.ExpectDeletions(jobKey, int(diff))
		glog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff)
		// Sort the pods in the order such that not-ready < ready, unscheduled
		// < scheduled, and pending < running. This ensures that we delete pods
		// in the earlier stages whenever possible.
		sort.Sort(controller.ActivePods(activePods))

		active -= diff
		wait := sync.WaitGroup{}
		wait.Add(int(diff))
		for i := int32(0); i < diff; i++ {
			go func(ix int32) {
				defer wait.Done()
				if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, job); err != nil {
					defer utilruntime.HandleError(err)
					// Decrement the expected number of deletes because the informer won't observe this deletion
					jm.expectations.DeletionObserved(jobKey)
					activeLock.Lock()
					active++
					activeLock.Unlock()
				}
			}(i)
		}
		wait.Wait()

	} else if active < parallelism {
		wantActive := int32(0)
		if job.Spec.Completions == nil {
			// Job does not specify a number of completions.  Therefore, number active
			// should be equal to parallelism, unless the job has seen at least
			// once success, in which leave whatever is running, running.
			if succeeded > 0 {
				wantActive = active
			} else {
				wantActive = parallelism
			}
		} else {
			// Job specifies a specific number of completions.  Therefore, number
			// active should not ever exceed number of remaining completions.
			wantActive = *job.Spec.Completions - succeeded
			if wantActive > parallelism {
				wantActive = parallelism
			}
		}
		diff := wantActive - active
		if diff < 0 {
			glog.Errorf("More active than wanted: job %q, want %d, have %d", jobKey, wantActive, active)
			diff = 0
		}
		jm.expectations.ExpectCreations(jobKey, int(diff))
		glog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, wantActive, diff)

		active += diff
		wait := sync.WaitGroup{}
		wait.Add(int(diff))
		for i := int32(0); i < diff; i++ {
			go func() {
				defer wait.Done()
				if err := jm.podControl.CreatePods(job.Namespace, &job.Spec.Template, job); err != nil {
					defer utilruntime.HandleError(err)
					// Decrement the expected number of creates because the informer won't observe this pod
					jm.expectations.CreationObserved(jobKey)
					activeLock.Lock()
					active--
					activeLock.Unlock()
				}
			}()
		}
		wait.Wait()
	}

	return active
}
コード例 #11
0
ファイル: factory.go プロジェクト: legionus/origin
// NewFactory creates an object that holds common methods across all OpenShift commands
func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory {
	restMapper := registered.RESTMapper()

	clients := &clientCache{
		clients: make(map[string]*client.Client),
		configs: make(map[string]*restclient.Config),
		loader:  clientConfig,
	}

	w := &Factory{
		Factory:               cmdutil.NewFactory(clientConfig),
		OpenShiftClientConfig: clientConfig,
		clients:               clients,
	}

	w.Object = func(bool) (meta.RESTMapper, runtime.ObjectTyper) {

		defaultMapper := ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: restMapper}}
		defaultTyper := api.Scheme

		// Output using whatever version was negotiated in the client cache. The
		// version we decode with may not be the same as what the server requires.
		cfg, err := clients.ClientConfigForVersion(nil)
		if err != nil {
			return defaultMapper, defaultTyper
		}

		cmdApiVersion := unversioned.GroupVersion{}
		if cfg.GroupVersion != nil {
			cmdApiVersion = *cfg.GroupVersion
		}

		// at this point we've negotiated and can get the client
		oclient, err := clients.ClientForVersion(nil)
		if err != nil {
			return defaultMapper, defaultTyper
		}

		cacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube"), cfg.Host)
		cachedDiscoverClient := NewCachedDiscoveryClient(client.NewDiscoveryClient(oclient.RESTClient), cacheDir, time.Duration(10*time.Minute))

		mapper := restmapper.NewDiscoveryRESTMapper(cachedDiscoverClient)
		mapper = NewShortcutExpander(cachedDiscoverClient, kubectl.ShortcutExpander{RESTMapper: mapper})
		return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}, api.Scheme
	}

	kClientForMapping := w.Factory.ClientForMapping
	w.ClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
		if latest.OriginKind(mapping.GroupVersionKind) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return client.RESTClient, nil
		}
		return kClientForMapping(mapping)
	}

	// Save original Describer function
	kDescriberFunc := w.Factory.Describer
	w.Describer = func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
		if latest.OriginKind(mapping.GroupVersionKind) {
			oClient, kClient, err := w.Clients()
			if err != nil {
				return nil, fmt.Errorf("unable to create client %s: %v", mapping.GroupVersionKind.Kind, err)
			}

			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			cfg, err := clients.ClientConfigForVersion(&mappingVersion)
			if err != nil {
				return nil, fmt.Errorf("unable to load a client %s: %v", mapping.GroupVersionKind.Kind, err)
			}

			describer, ok := describe.DescriberFor(mapping.GroupVersionKind.GroupKind(), oClient, kClient, cfg.Host)
			if !ok {
				return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
			}
			return describer, nil
		}
		return kDescriberFunc(mapping)
	}
	kScalerFunc := w.Factory.Scaler
	w.Scaler = func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
		oc, kc, err := w.Clients()
		if err != nil {
			return nil, err
		}

		if mapping.GroupVersionKind.GroupKind() == deployapi.Kind("DeploymentConfig") {
			return deployscaler.NewDeploymentConfigScaler(oc, kc), nil
		}
		return kScalerFunc(mapping)
	}
	kReaperFunc := w.Factory.Reaper
	w.Reaper = func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
		oc, kc, err := w.Clients()
		if err != nil {
			return nil, err
		}

		switch mapping.GroupVersionKind.GroupKind() {
		case deployapi.Kind("DeploymentConfig"):
			return deployreaper.NewDeploymentConfigReaper(oc, kc), nil
		case authorizationapi.Kind("Role"):
			return authorizationreaper.NewRoleReaper(oc, oc), nil
		case authorizationapi.Kind("ClusterRole"):
			return authorizationreaper.NewClusterRoleReaper(oc, oc, oc), nil
		case userapi.Kind("User"):
			return authenticationreaper.NewUserReaper(
				client.UsersInterface(oc),
				client.GroupsInterface(oc),
				client.ClusterRoleBindingsInterface(oc),
				client.RoleBindingsNamespacer(oc),
				kclient.SecurityContextConstraintsInterface(kc),
			), nil
		case userapi.Kind("Group"):
			return authenticationreaper.NewGroupReaper(
				client.GroupsInterface(oc),
				client.ClusterRoleBindingsInterface(oc),
				client.RoleBindingsNamespacer(oc),
				kclient.SecurityContextConstraintsInterface(kc),
			), nil
		case buildapi.Kind("BuildConfig"):
			return buildreaper.NewBuildConfigReaper(oc), nil
		}
		return kReaperFunc(mapping)
	}
	kGenerators := w.Factory.Generators
	w.Generators = func(cmdName string) map[string]kubectl.Generator {
		originGenerators := DefaultGenerators(cmdName)
		kubeGenerators := kGenerators(cmdName)

		ret := map[string]kubectl.Generator{}
		for k, v := range kubeGenerators {
			ret[k] = v
		}
		for k, v := range originGenerators {
			ret[k] = v
		}
		return ret
	}
	kMapBasedSelectorForObjectFunc := w.Factory.MapBasedSelectorForObject
	w.MapBasedSelectorForObject = func(object runtime.Object) (string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return kubectl.MakeLabels(t.Spec.Selector), nil
		default:
			return kMapBasedSelectorForObjectFunc(object)
		}
	}
	kPortsForObjectFunc := w.Factory.PortsForObject
	w.PortsForObject = func(object runtime.Object) ([]string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return getPorts(t.Spec.Template.Spec), nil
		default:
			return kPortsForObjectFunc(object)
		}
	}
	kLogsForObjectFunc := w.Factory.LogsForObject
	w.LogsForObject = func(object, options runtime.Object) (*restclient.Request, error) {
		oc, _, err := w.Clients()
		if err != nil {
			return nil, err
		}

		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			dopts, ok := options.(*deployapi.DeploymentLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a DeploymentLogOptions")
			}
			return oc.DeploymentLogs(t.Namespace).Get(t.Name, *dopts), nil
		case *buildapi.Build:
			bopts, ok := options.(*buildapi.BuildLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a BuildLogOptions")
			}
			if bopts.Version != nil {
				return nil, errors.New("cannot specify a version and a build")
			}
			return oc.BuildLogs(t.Namespace).Get(t.Name, *bopts), nil
		case *buildapi.BuildConfig:
			bopts, ok := options.(*buildapi.BuildLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a BuildLogOptions")
			}
			builds, err := oc.Builds(t.Namespace).List(api.ListOptions{})
			if err != nil {
				return nil, err
			}
			builds.Items = buildapi.FilterBuilds(builds.Items, buildapi.ByBuildConfigPredicate(t.Name))
			if len(builds.Items) == 0 {
				return nil, fmt.Errorf("no builds found for %q", t.Name)
			}
			if bopts.Version != nil {
				// If a version has been specified, try to get the logs from that build.
				desired := buildutil.BuildNameForConfigVersion(t.Name, int(*bopts.Version))
				return oc.BuildLogs(t.Namespace).Get(desired, *bopts), nil
			}
			sort.Sort(sort.Reverse(buildapi.BuildSliceByCreationTimestamp(builds.Items)))
			return oc.BuildLogs(t.Namespace).Get(builds.Items[0].Name, *bopts), nil
		default:
			return kLogsForObjectFunc(object, options)
		}
	}
	w.Printer = func(mapping *meta.RESTMapping, noHeaders, withNamespace, wide bool, showAll bool, showLabels, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
		return describe.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil
	}
	kCanBeExposed := w.Factory.CanBeExposed
	w.CanBeExposed = func(kind unversioned.GroupKind) error {
		if kind == deployapi.Kind("DeploymentConfig") {
			return nil
		}
		return kCanBeExposed(kind)
	}
	kCanBeAutoscaled := w.Factory.CanBeAutoscaled
	w.CanBeAutoscaled = func(kind unversioned.GroupKind) error {
		if kind == deployapi.Kind("DeploymentConfig") {
			return nil
		}
		return kCanBeAutoscaled(kind)
	}
	kAttachablePodForObjectFunc := w.Factory.AttachablePodForObject
	w.AttachablePodForObject = func(object runtime.Object) (*api.Pod, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			_, kc, err := w.Clients()
			if err != nil {
				return nil, err
			}
			selector := labels.SelectorFromSet(t.Spec.Selector)
			f := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
			pod, _, err := cmdutil.GetFirstPod(kc, t.Namespace, selector, 1*time.Minute, f)
			return pod, err
		default:
			return kAttachablePodForObjectFunc(object)
		}
	}
	kProtocolsForObject := w.Factory.ProtocolsForObject
	w.ProtocolsForObject = func(object runtime.Object) (map[string]string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return getProtocols(t.Spec.Template.Spec), nil
		default:
			return kProtocolsForObject(object)
		}
	}

	kSwaggerSchemaFunc := w.Factory.SwaggerSchema
	w.Factory.SwaggerSchema = func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
		if !latest.OriginKind(gvk) {
			return kSwaggerSchemaFunc(gvk)
		}
		// TODO: we need to register the OpenShift API under the Kube group, and start returning the OpenShift
		// group from the scheme.
		oc, _, err := w.Clients()
		if err != nil {
			return nil, err
		}
		return w.OriginSwaggerSchema(oc.RESTClient, gvk.GroupVersion())
	}

	w.EditorEnvs = func() []string {
		return []string{"OC_EDITOR", "EDITOR"}
	}
	w.PrintObjectSpecificMessage = func(obj runtime.Object, out io.Writer) {}
	kPauseObjectFunc := w.Factory.PauseObject
	w.Factory.PauseObject = func(object runtime.Object) (bool, error) {
		oc, _, err := w.Clients()
		if err != nil {
			return false, err
		}

		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			if t.Spec.Paused {
				return true, nil
			}
			t.Spec.Paused = true
			_, err := oc.DeploymentConfigs(t.Namespace).Update(t)
			// TODO: Pause the deployer containers.
			return false, err
		default:
			return kPauseObjectFunc(object)
		}
	}
	kResumeObjectFunc := w.Factory.ResumeObject
	w.Factory.ResumeObject = func(object runtime.Object) (bool, error) {
		oc, _, err := w.Clients()
		if err != nil {
			return false, err
		}

		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			if !t.Spec.Paused {
				return true, nil
			}
			t.Spec.Paused = false
			_, err := oc.DeploymentConfigs(t.Namespace).Update(t)
			// TODO: Resume the deployer containers.
			return false, err
		default:
			return kResumeObjectFunc(object)
		}
	}
	kHistoryViewerFunc := w.Factory.HistoryViewer
	w.Factory.HistoryViewer = func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
		oc, kc, err := w.Clients()
		if err != nil {
			return nil, err
		}

		switch mapping.GroupVersionKind.GroupKind() {
		case deployapi.Kind("DeploymentConfig"):
			return deploycmd.NewDeploymentConfigHistoryViewer(oc, kc), nil
		}
		return kHistoryViewerFunc(mapping)
	}
	kRollbackerFunc := w.Factory.Rollbacker
	w.Factory.Rollbacker = func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
		oc, _, err := w.Clients()
		if err != nil {
			return nil, err
		}

		switch mapping.GroupVersionKind.GroupKind() {
		case deployapi.Kind("DeploymentConfig"):
			return deploycmd.NewDeploymentConfigRollbacker(oc), nil
		}
		return kRollbackerFunc(mapping)
	}

	return w
}
コード例 #12
0
ファイル: factory.go プロジェクト: maxfrei/kubernetes
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients: clients,
		flags:   flags,

		// If discoverDynamicAPIs is true, make API calls to the discovery service to find APIs that
		// have been dynamically added to the apiserver
		Object: func(discoverDynamicAPIs bool) (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			checkErrWithPrefix("failed to get client config: ", err)
			cmdApiVersion := unversioned.GroupVersion{}
			if cfg.GroupVersion != nil {
				cmdApiVersion = *cfg.GroupVersion
			}
			if discoverDynamicAPIs {
				client, err := clients.ClientForVersion(&unversioned.GroupVersion{Version: "v1"})
				checkErrWithPrefix("failed to find client for version v1: ", err)

				var versions []unversioned.GroupVersion
				var gvks []unversioned.GroupVersionKind
				retries := 3
				for i := 0; i < retries; i++ {
					versions, gvks, err = GetThirdPartyGroupVersions(client.Discovery())
					// Retry if we got a NotFound error, because user may delete
					// a thirdparty group when the GetThirdPartyGroupVersions is
					// running.
					if err == nil || !apierrors.IsNotFound(err) {
						break
					}
				}
				checkErrWithPrefix("failed to get third-party group versions: ", err)
				if len(versions) > 0 {
					priorityMapper, ok := mapper.RESTMapper.(meta.PriorityRESTMapper)
					if !ok {
						CheckErr(fmt.Errorf("expected PriorityMapper, saw: %v", mapper.RESTMapper))
						return nil, nil
					}
					multiMapper, ok := priorityMapper.Delegate.(meta.MultiRESTMapper)
					if !ok {
						CheckErr(fmt.Errorf("unexpected type: %v", mapper.RESTMapper))
						return nil, nil
					}
					groupsMap := map[string][]unversioned.GroupVersion{}
					for _, version := range versions {
						groupsMap[version.Group] = append(groupsMap[version.Group], version)
					}
					for group, versionList := range groupsMap {
						preferredExternalVersion := versionList[0]

						thirdPartyMapper, err := kubectl.NewThirdPartyResourceMapper(versionList, getGroupVersionKinds(gvks, group))
						checkErrWithPrefix("failed to create third party resource mapper: ", err)
						accessor := meta.NewAccessor()
						groupMeta := apimachinery.GroupMeta{
							GroupVersion:  preferredExternalVersion,
							GroupVersions: versionList,
							RESTMapper:    thirdPartyMapper,
							SelfLinker:    runtime.SelfLinker(accessor),
							InterfacesFor: makeInterfacesFor(versionList),
						}

						checkErrWithPrefix("failed to register group: ", registered.RegisterGroup(groupMeta))
						registered.AddThirdPartyAPIGroupVersions(versionList...)
						multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...)
					}
					priorityMapper.Delegate = multiMapper
					// Reassign to the RESTMapper here because priorityMapper is actually a copy, so if we
					// don't reassign, the above assignement won't actually update mapper.RESTMapper
					mapper.RESTMapper = priorityMapper
				}
			}
			outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}
			priorityRESTMapper := meta.PriorityRESTMapper{
				Delegate: outputRESTMapper,
			}
			// TODO: this should come from registered versions
			groups := []string{api.GroupName, autoscaling.GroupName, extensions.GroupName, federation.GroupName, batch.GroupName}
			// set a preferred version
			for _, group := range groups {
				gvs := registered.EnabledVersionsForGroup(group)
				if len(gvs) == 0 {
					continue
				}
				priorityRESTMapper.ResourcePriority = append(priorityRESTMapper.ResourcePriority, unversioned.GroupVersionResource{Group: group, Version: gvs[0].Version, Resource: meta.AnyResource})
				priorityRESTMapper.KindPriority = append(priorityRESTMapper.KindPriority, unversioned.GroupVersionKind{Group: group, Version: gvs[0].Version, Kind: meta.AnyKind})
			}
			for _, group := range groups {
				priorityRESTMapper.ResourcePriority = append(priorityRESTMapper.ResourcePriority, unversioned.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource})
				priorityRESTMapper.KindPriority = append(priorityRESTMapper.KindPriority, unversioned.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind})
			}
			return priorityRESTMapper, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion(nil)
		},
		ClientConfig: func() (*restclient.Config, error) {
			return clients.ClientConfigForVersion(nil)
		},
		ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			cfg, err := clientConfig.ClientConfig()
			if err != nil {
				return nil, err
			}
			if err := client.SetKubernetesDefaults(cfg); err != nil {
				return nil, err
			}
			gvk := mapping.GroupVersionKind
			switch gvk.Group {
			case federation.GroupName:
				mappingVersion := mapping.GroupVersionKind.GroupVersion()
				return clients.FederationClientForVersion(&mappingVersion)
			case api.GroupName:
				cfg.APIPath = "/api"
			default:
				cfg.APIPath = "/apis"
			}
			gv := gvk.GroupVersion()
			cfg.GroupVersion = &gv
			if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) {
				cfg.NegotiatedSerializer = thirdpartyresourcedata.NewNegotiatedSerializer(api.Codecs, gvk.Kind, gv, gv)
			}
			return restclient.RESTClientFor(cfg)
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			if mapping.GroupVersionKind.Group == federation.GroupName {
				fedClientSet, err := clients.FederationClientSetForVersion(&mappingVersion)
				if err != nil {
					return nil, err
				}
				if mapping.GroupVersionKind.Kind == "Cluster" {
					return &kubectl.ClusterDescriber{Interface: fedClientSet}, nil
				}
			}
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
		},
		Decoder: func(toInternal bool) runtime.Decoder {
			var decoder runtime.Decoder
			if toInternal {
				decoder = api.Codecs.UniversalDecoder()
			} else {
				decoder = api.Codecs.UniversalDeserializer()
			}
			return thirdpartyresourcedata.NewDecoder(decoder, "")

		},
		JSONEncoder: func() runtime.Encoder {
			return api.Codecs.LegacyCodec(registered.EnabledVersions()...)
		},
		Printer: func(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(options), nil
		},
		MapBasedSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			case *extensions.ReplicaSet:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvks[0])
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			case *extensions.Deployment:
				return getPorts(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getPorts(t.Spec.Template.Spec), nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %v", gvks[0])
			}
		},
		ProtocolsForObject: func(object runtime.Object) (map[string]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getProtocols(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getProtocols(t.Spec), nil
			case *api.Service:
				return getServiceProtocols(t.Spec), nil
			case *extensions.Deployment:
				return getProtocols(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getProtocols(t.Spec.Template.Spec), nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract protocols from %v", gvks[0])
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}

			switch t := object.(type) {
			case *api.Pod:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil

			case *api.ReplicationController:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			case *extensions.ReplicaSet:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot get the logs from %v", gvks[0])
			}
		},
		PauseObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = true
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot pause %v", gvks[0])
			}
		},
		ResumeObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if !t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = false
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot resume %v", gvks[0])
			}
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			clientset := clientset.FromUnversionedClient(client)
			if err != nil {
				return nil, err
			}
			return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		StatusViewer: func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := client.ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				fedClient, err := clients.FederationClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				return &clientSwaggerSchema{
					c:        client,
					fedc:     fedClient,
					cacheDir: dir,
					mapper:   api.RESTMapper,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
			version := gvk.GroupVersion()
			client, err := clients.ClientForVersion(&version)
			if err != nil {
				return nil, err
			}
			return client.Discovery().SwaggerSchema(version)
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generators: func(cmdName string) map[string]kubectl.Generator {
			return DefaultGenerators(cmdName)
		},
		CanBeExposed: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %v", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			client, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *batch.Job:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *api.Pod:
				return t, nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %v: not implemented", gvks[0])
			}
		},
		// UpdatePodSpecForObject update the pod specification for the provided object
		UpdatePodSpecForObject: func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) {
			// TODO: replace with a swagger schema based approach (identify pod template via schema introspection)
			switch t := obj.(type) {
			case *api.Pod:
				return true, fn(&t.Spec)
			case *api.ReplicationController:
				if t.Spec.Template == nil {
					t.Spec.Template = &api.PodTemplateSpec{}
				}
				return true, fn(&t.Spec.Template.Spec)
			case *extensions.Deployment:
				return true, fn(&t.Spec.Template.Spec)
			case *extensions.DaemonSet:
				return true, fn(&t.Spec.Template.Spec)
			case *extensions.ReplicaSet:
				return true, fn(&t.Spec.Template.Spec)
			case *apps.PetSet:
				return true, fn(&t.Spec.Template.Spec)
			case *batch.Job:
				return true, fn(&t.Spec.Template.Spec)
			default:
				return false, fmt.Errorf("the object is not a pod or does not have a pod template")
			}
		},
		EditorEnvs: func() []string {
			return []string{"KUBE_EDITOR", "EDITOR"}
		},
		PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) {
			switch obj := obj.(type) {
			case *api.Service:
				if obj.Spec.Type == api.ServiceTypeNodePort {
					msg := fmt.Sprintf(
						`You have exposed your service on an external port on all nodes in your
cluster.  If you want to expose this service to the external internet, you may
need to set up firewall rules for the service port(s) (%s) to serve traffic.

See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
`,
						makePortsString(obj.Spec.Ports, true))
					out.Write([]byte(msg))
				}

				if _, ok := obj.Annotations[service.AnnotationLoadBalancerSourceRangesKey]; ok {
					msg := fmt.Sprintf(
						`You are using service annotation [service.beta.kubernetes.io/load-balancer-source-ranges].
It has been promoted to field [loadBalancerSourceRanges] in service spec. This annotation will be deprecated in the future.
Please use the loadBalancerSourceRanges field instead.

See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
`)
					out.Write([]byte(msg))
				}
			}
		},
	}
}
コード例 #13
0
ファイル: factory.go プロジェクト: PeterLamar/kubernetes
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients: clients,
		flags:   flags,

		Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			checkErrWithPrefix("failed to get client config: ", err)
			cmdApiVersion := unversioned.GroupVersion{}
			if cfg.GroupVersion != nil {
				cmdApiVersion = *cfg.GroupVersion
			}

			mapper := registered.RESTMapper()
			discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg)
			// if we can find the server version and it's current enough to have discovery information, use it.  Otherwise,
			// fallback to our hardcoded list
			if err == nil {
				if serverVersion, err := discoveryClient.ServerVersion(); err == nil && useDiscoveryRESTMapper(serverVersion.GitVersion) {
					// register third party resources with the api machinery groups.  This probably should be done, but
					// its consistent with old code, so we'll start with it.
					if err := registerThirdPartyResources(discoveryClient); err != nil {
						fmt.Fprintf(os.Stderr, "Unable to register third party resources: %v\n", err)
					}
					// ThirdPartyResourceData is special.  It's not discoverable, but needed for thirdparty resource listing
					// TODO eliminate this once we're truly generic.
					thirdPartyResourceDataMapper := meta.NewDefaultRESTMapper([]unversioned.GroupVersion{extensionsv1beta1.SchemeGroupVersion}, registered.InterfacesFor)
					thirdPartyResourceDataMapper.Add(extensionsv1beta1.SchemeGroupVersion.WithKind("ThirdPartyResourceData"), meta.RESTScopeNamespace)

					mapper = meta.FirstHitRESTMapper{
						MultiRESTMapper: meta.MultiRESTMapper{
							discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, registered.InterfacesFor),
							thirdPartyResourceDataMapper,
						},
					}
				}
			}

			// wrap with shortcuts
			mapper = NewShortcutExpander(mapper, discoveryClient)
			// wrap with output preferences
			mapper = kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}
			return mapper, api.Scheme
		},
		UnstructuredObject: func() (meta.RESTMapper, runtime.ObjectTyper, error) {
			cfg, err := clients.ClientConfigForVersion(nil)
			if err != nil {
				return nil, nil, err
			}

			dc, err := discovery.NewDiscoveryClientForConfig(cfg)
			if err != nil {
				return nil, nil, err
			}

			groupResources, err := discovery.GetAPIGroupResources(dc)
			if err != nil {
				return nil, nil, err
			}

			// Register unknown APIs as third party for now to make
			// validation happy. TODO perhaps make a dynamic schema
			// validator to avoid this.
			for _, group := range groupResources {
				for _, version := range group.Group.Versions {
					gv := unversioned.GroupVersion{Group: group.Group.Name, Version: version.Version}
					if !registered.IsRegisteredVersion(gv) {
						registered.AddThirdPartyAPIGroupVersions(gv)
					}
				}
			}

			mapper := discovery.NewRESTMapper(groupResources, meta.InterfacesForUnstructured)

			typer := discovery.NewUnstructuredObjectTyper(groupResources)

			return NewShortcutExpander(mapper, dc), typer, nil
		},
		RESTClient: func() (*restclient.RESTClient, error) {
			clientConfig, err := clients.ClientConfigForVersion(nil)
			if err != nil {
				return nil, err
			}
			return restclient.RESTClientFor(clientConfig)
		},
		ClientSet: func() (*internalclientset.Clientset, error) {
			return clients.ClientSetForVersion(nil)
		},
		ClientConfig: func() (*restclient.Config, error) {
			return clients.ClientConfigForVersion(nil)
		},
		ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			cfg, err := clientConfig.ClientConfig()
			if err != nil {
				return nil, err
			}
			if err := client.SetKubernetesDefaults(cfg); err != nil {
				return nil, err
			}
			gvk := mapping.GroupVersionKind
			switch gvk.Group {
			case federation.GroupName:
				mappingVersion := mapping.GroupVersionKind.GroupVersion()
				return clients.FederationClientForVersion(&mappingVersion)
			case api.GroupName:
				cfg.APIPath = "/api"
			default:
				cfg.APIPath = "/apis"
			}
			gv := gvk.GroupVersion()
			cfg.GroupVersion = &gv
			if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) {
				cfg.NegotiatedSerializer = thirdpartyresourcedata.NewNegotiatedSerializer(api.Codecs, gvk.Kind, gv, gv)
			}
			return restclient.RESTClientFor(cfg)
		},
		UnstructuredClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			cfg, err := clientConfig.ClientConfig()
			if err != nil {
				return nil, err
			}
			if err := restclient.SetKubernetesDefaults(cfg); err != nil {
				return nil, err
			}
			cfg.APIPath = "/apis"
			if mapping.GroupVersionKind.Group == api.GroupName {
				cfg.APIPath = "/api"
			}
			gv := mapping.GroupVersionKind.GroupVersion()
			cfg.ContentConfig = dynamic.ContentConfig()
			cfg.GroupVersion = &gv
			return restclient.RESTClientFor(cfg)
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			if mapping.GroupVersionKind.Group == federation.GroupName {
				fedClientSet, err := clients.FederationClientSetForVersion(&mappingVersion)
				if err != nil {
					return nil, err
				}
				if mapping.GroupVersionKind.Kind == "Cluster" {
					return &kubectl.ClusterDescriber{Interface: fedClientSet}, nil
				}
			}
			clientset, err := clients.ClientSetForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), clientset); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
		},
		Decoder: func(toInternal bool) runtime.Decoder {
			var decoder runtime.Decoder
			if toInternal {
				decoder = api.Codecs.UniversalDecoder()
			} else {
				decoder = api.Codecs.UniversalDeserializer()
			}
			return thirdpartyresourcedata.NewDecoder(decoder, "")

		},
		JSONEncoder: func() runtime.Encoder {
			return api.Codecs.LegacyCodec(registered.EnabledVersions()...)
		},
		Printer: func(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(options), nil
		},
		MapBasedSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			case *extensions.ReplicaSet:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvks[0])
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			case *extensions.Deployment:
				return getPorts(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getPorts(t.Spec.Template.Spec), nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %v", gvks[0])
			}
		},
		ProtocolsForObject: func(object runtime.Object) (map[string]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getProtocols(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getProtocols(t.Spec), nil
			case *api.Service:
				return getServiceProtocols(t.Spec), nil
			case *extensions.Deployment:
				return getProtocols(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getProtocols(t.Spec.Template.Spec), nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract protocols from %v", gvks[0])
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) {
			clientset, err := clients.ClientSetForVersion(nil)
			if err != nil {
				return nil, err
			}

			switch t := object.(type) {
			case *api.Pod:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				return clientset.Core().Pods(t.Namespace).GetLogs(t.Name, opts), nil

			case *api.ReplicationController:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return clientset.Core().Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			case *extensions.ReplicaSet:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return clientset.Core().Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot get the logs from %v", gvks[0])
			}
		},
		PauseObject: func(object runtime.Object) (bool, error) {
			clientset, err := clients.ClientSetForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = true
				_, err := clientset.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot pause %v", gvks[0])
			}
		},
		ResumeObject: func(object runtime.Object) (bool, error) {
			clientset, err := clients.ClientSetForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if !t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = false
				_, err := clientset.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot resume %v", gvks[0])
			}
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			clientset, err := clients.ClientSetForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			clientset, err := clients.ClientSetForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			clientset, err := clients.ClientSetForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			clientset, err := clients.ClientSetForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		StatusViewer: func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			clientset, err := clients.ClientSetForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				clientConfig, err := clients.ClientConfigForVersion(nil)
				if err != nil {
					return nil, err
				}
				restclient, err := restclient.RESTClientFor(clientConfig)
				if err != nil {
					return nil, err
				}
				clientset, err := clients.ClientSetForVersion(nil)
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := clientset.Discovery().ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				fedClient, err := clients.FederationClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				return &clientSwaggerSchema{
					c:        restclient,
					fedc:     fedClient,
					cacheDir: dir,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
			version := gvk.GroupVersion()
			clientset, err := clients.ClientSetForVersion(&version)
			if err != nil {
				return nil, err
			}
			return clientset.Discovery().SwaggerSchema(version)
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generators: func(cmdName string) map[string]kubectl.Generator {
			return DefaultGenerators(cmdName)
		},
		CanBeExposed: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %v", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			clientset, err := clients.ClientSetForVersion(nil)
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *batch.Job:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *api.Pod:
				return t, nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %v: not implemented", gvks[0])
			}
		},
		// UpdatePodSpecForObject update the pod specification for the provided object
		UpdatePodSpecForObject: func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) {
			// TODO: replace with a swagger schema based approach (identify pod template via schema introspection)
			switch t := obj.(type) {
			case *api.Pod:
				return true, fn(&t.Spec)
			case *api.ReplicationController:
				if t.Spec.Template == nil {
					t.Spec.Template = &api.PodTemplateSpec{}
				}
				return true, fn(&t.Spec.Template.Spec)
			case *extensions.Deployment:
				return true, fn(&t.Spec.Template.Spec)
			case *extensions.DaemonSet:
				return true, fn(&t.Spec.Template.Spec)
			case *extensions.ReplicaSet:
				return true, fn(&t.Spec.Template.Spec)
			case *apps.PetSet:
				return true, fn(&t.Spec.Template.Spec)
			case *batch.Job:
				return true, fn(&t.Spec.Template.Spec)
			default:
				return false, fmt.Errorf("the object is not a pod or does not have a pod template")
			}
		},
		EditorEnvs: func() []string {
			return []string{"KUBE_EDITOR", "EDITOR"}
		},
		PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) {
			switch obj := obj.(type) {
			case *api.Service:
				if obj.Spec.Type == api.ServiceTypeNodePort {
					msg := fmt.Sprintf(
						`You have exposed your service on an external port on all nodes in your
cluster.  If you want to expose this service to the external internet, you may
need to set up firewall rules for the service port(s) (%s) to serve traffic.

See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
`,
						makePortsString(obj.Spec.Ports, true))
					out.Write([]byte(msg))
				}

				if _, ok := obj.Annotations[service.AnnotationLoadBalancerSourceRangesKey]; ok {
					msg := fmt.Sprintf(
						`You are using service annotation [service.beta.kubernetes.io/load-balancer-source-ranges].
It has been promoted to field [loadBalancerSourceRanges] in service spec. This annotation will be deprecated in the future.
Please use the loadBalancerSourceRanges field instead.

See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
`)
					out.Write([]byte(msg))
				}
			}
		},
	}
}