Example #1
0
func runDelete(namespace, name string, mapping *meta.RESTMapping, c resource.RESTClient, helper *resource.Helper, cascade bool, gracePeriod int, clientsetFunc func() (*internalclientset.Clientset, error)) error {
	if !cascade {
		if helper == nil {
			helper = resource.NewHelper(c, mapping)
		}
		return helper.Delete(namespace, name)
	}
	cs, err := clientsetFunc()
	if err != nil {
		return err
	}
	r, err := kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), cs)
	if err != nil {
		if _, ok := err.(*kubectl.NoSuchReaperError); !ok {
			return err
		}
		return resource.NewHelper(c, mapping).Delete(namespace, name)
	}
	var options *api.DeleteOptions
	if gracePeriod >= 0 {
		options = api.NewDeleteOptions(int64(gracePeriod))
	}
	if err := r.Stop(namespace, name, 2*time.Minute, options); err != nil {
		return err
	}
	return nil
}
Example #2
0
func (f *factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
	mappingVersion := mapping.GroupVersionKind.GroupVersion()
	clientset, err := f.clients.ClientSetForVersion(&mappingVersion)
	if err != nil {
		return nil, err
	}
	return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), clientset)
}
Example #3
0
// StopRC stops the rc via kubectl's stop library
func StopRC(rc *api.ReplicationController, restClient *client.Client) error {
	reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientsetadapter.FromUnversionedClient(restClient))
	if err != nil || reaper == nil {
		return err
	}
	err = reaper.Stop(rc.Namespace, rc.Name, 0, nil)
	if err != nil {
		return err
	}
	return nil
}
// StopRC stops the rc via kubectl's stop library
func StopRC(rc *api.ReplicationController, restClient *client.Client) error {
	reaper, err := kubectl.ReaperFor("ReplicationController", restClient)
	if err != nil || reaper == nil {
		return err
	}
	_, err = reaper.Stop(rc.Namespace, rc.Name, 0, nil)
	if err != nil {
		return err
	}
	return nil
}
Example #5
0
// StopRC stops the rc via kubectl's stop library
func StopRC(rc *api.ReplicationController, clientset clientset.Interface) error {
	reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset)
	if err != nil || reaper == nil {
		return err
	}
	err = reaper.Stop(rc.Namespace, rc.Name, 0, nil)
	if err != nil {
		return err
	}
	return nil
}
func (f *ring1Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
	mappingVersion := mapping.GroupVersionKind.GroupVersion()
	clientset, clientsetErr := f.clientAccessFactory.ClientSetForVersion(&mappingVersion)
	reaper, reaperErr := kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), clientset)

	if kubectl.IsNoSuchReaperError(reaperErr) {
		return nil, reaperErr
	}
	if clientsetErr != nil {
		return nil, clientsetErr
	}
	return reaper, reaperErr
}
Example #7
0
func (p *pruner) delete(namespace, name string, mapping *meta.RESTMapping, c resource.RESTClient) error {
	if !p.cascade {
		if err := resource.NewHelper(c, mapping).Delete(namespace, name); err != nil {
			return err
		}
		return nil
	}
	cs, err := p.clientsetFunc()
	if err != nil {
		return err
	}
	r, err := kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), cs)
	if err != nil {
		return err
	}
	if err := r.Stop(namespace, name, 2*time.Minute, api.NewDeleteOptions(int64(p.gracePeriod))); err != nil {
		return err
	}
	return nil
}
Example #8
0
// Stop scales a replication controller via its deployment configuration down to
// zero replicas, waits for all of them to get deleted and then deletes both the
// replication controller and its deployment configuration.
func (reaper *DeploymentConfigReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error {
	// If the config is already deleted, it may still have associated
	// deployments which didn't get cleaned up during prior calls to Stop. If
	// the config can't be found, still make an attempt to clean up the
	// deployments.
	//
	// It's important to delete the config first to avoid an undesirable side
	// effect which can cause the deployment to be re-triggered upon the
	// config's deletion. See https://github.com/openshift/origin/issues/2721
	// for more details.
	err := reaper.oc.DeploymentConfigs(namespace).Delete(name)
	configNotFound := kerrors.IsNotFound(err)
	if err != nil && !configNotFound {
		return err
	}

	// Clean up deployments related to the config.
	options := kapi.ListOptions{LabelSelector: util.ConfigSelector(name)}
	rcList, err := reaper.kc.ReplicationControllers(namespace).List(options)
	if err != nil {
		return err
	}
	rcReaper, err := kubectl.ReaperFor(kapi.Kind("ReplicationController"), reaper.kc)
	if err != nil {
		return err
	}

	// If there is neither a config nor any deployments, we can return NotFound.
	deployments := rcList.Items
	if configNotFound && len(deployments) == 0 {
		return kerrors.NewNotFound(kapi.Resource("deploymentconfig"), name)
	}
	for _, rc := range deployments {
		if err = rcReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
			// Better not error out here...
			glog.Infof("Cannot delete ReplicationController %s/%s: %v", rc.Namespace, rc.Name, err)
		}
	}

	return nil
}
Example #9
0
func stopDeployment(c *clientset.Clientset, oldC client.Interface, ns, deploymentName string) {
	deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
	Expect(err).NotTo(HaveOccurred())

	Logf("deleting deployment %s", deploymentName)
	reaper, err := kubectl.ReaperFor(extensions.Kind("Deployment"), oldC)
	Expect(err).NotTo(HaveOccurred())
	timeout := 1 * time.Minute
	err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0))
	Expect(err).NotTo(HaveOccurred())

	Logf("ensuring deployment %s was deleted", deploymentName)
	_, err = c.Extensions().Deployments(ns).Get(deployment.Name)
	Expect(err).To(HaveOccurred())
	Expect(errors.IsNotFound(err)).To(BeTrue())
	Logf("ensuring deployment %s rcs were deleted", deploymentName)
	selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
	Expect(err).NotTo(HaveOccurred())
	options := api.ListOptions{LabelSelector: selector}
	rss, err := c.Extensions().ReplicaSets(ns).List(options)
	Expect(err).NotTo(HaveOccurred())
	Expect(rss.Items).Should(HaveLen(0))
	Logf("ensuring deployment %s pods were deleted", deploymentName)
	var pods *api.PodList
	if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
		pods, err = c.Core().Pods(ns).List(api.ListOptions{})
		if err != nil {
			return false, err
		}
		if len(pods.Items) == 0 {
			return true, nil
		}
		return false, nil
	}); err != nil {
		Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods)
	}
}
Example #10
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients: clients,
		flags:   flags,

		Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			checkErrWithPrefix("failed to get client config: ", err)
			cmdApiVersion := unversioned.GroupVersion{}
			if cfg.GroupVersion != nil {
				cmdApiVersion = *cfg.GroupVersion
			}

			mapper := registered.RESTMapper()
			discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg)
			// if we can find the server version and it's current enough to have discovery information, use it.  Otherwise,
			// fallback to our hardcoded list
			if err == nil {
				if serverVersion, err := discoveryClient.ServerVersion(); err == nil && useDiscoveryRESTMapper(serverVersion.GitVersion) {
					// register third party resources with the api machinery groups.  This probably should be done, but
					// its consistent with old code, so we'll start with it.
					if err := registerThirdPartyResources(discoveryClient); err != nil {
						fmt.Fprintf(os.Stderr, "Unable to register third party resources: %v\n", err)
					}
					// ThirdPartyResourceData is special.  It's not discoverable, but needed for thirdparty resource listing
					// TODO eliminate this once we're truly generic.
					thirdPartyResourceDataMapper := meta.NewDefaultRESTMapper([]unversioned.GroupVersion{extensionsv1beta1.SchemeGroupVersion}, registered.InterfacesFor)
					thirdPartyResourceDataMapper.Add(extensionsv1beta1.SchemeGroupVersion.WithKind("ThirdPartyResourceData"), meta.RESTScopeNamespace)

					mapper = meta.FirstHitRESTMapper{
						MultiRESTMapper: meta.MultiRESTMapper{
							discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, registered.InterfacesFor),
							thirdPartyResourceDataMapper,
						},
					}
				}
			}

			// wrap with shortcuts
			mapper = NewShortcutExpander(mapper, discoveryClient)
			// wrap with output preferences
			mapper = kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}
			return mapper, api.Scheme
		},
		UnstructuredObject: func() (meta.RESTMapper, runtime.ObjectTyper, error) {
			cfg, err := clients.ClientConfigForVersion(nil)
			if err != nil {
				return nil, nil, err
			}

			dc, err := discovery.NewDiscoveryClientForConfig(cfg)
			if err != nil {
				return nil, nil, err
			}

			groupResources, err := discovery.GetAPIGroupResources(dc)
			if err != nil {
				return nil, nil, err
			}

			// Register unknown APIs as third party for now to make
			// validation happy. TODO perhaps make a dynamic schema
			// validator to avoid this.
			for _, group := range groupResources {
				for _, version := range group.Group.Versions {
					gv := unversioned.GroupVersion{Group: group.Group.Name, Version: version.Version}
					if !registered.IsRegisteredVersion(gv) {
						registered.AddThirdPartyAPIGroupVersions(gv)
					}
				}
			}

			mapper := discovery.NewRESTMapper(groupResources, meta.InterfacesForUnstructured)

			typer := discovery.NewUnstructuredObjectTyper(groupResources)

			return NewShortcutExpander(mapper, dc), typer, nil
		},
		RESTClient: func() (*restclient.RESTClient, error) {
			clientConfig, err := clients.ClientConfigForVersion(nil)
			if err != nil {
				return nil, err
			}
			return restclient.RESTClientFor(clientConfig)
		},
		ClientSet: func() (*internalclientset.Clientset, error) {
			return clients.ClientSetForVersion(nil)
		},
		ClientConfig: func() (*restclient.Config, error) {
			return clients.ClientConfigForVersion(nil)
		},
		ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			cfg, err := clientConfig.ClientConfig()
			if err != nil {
				return nil, err
			}
			if err := client.SetKubernetesDefaults(cfg); err != nil {
				return nil, err
			}
			gvk := mapping.GroupVersionKind
			switch gvk.Group {
			case federation.GroupName:
				mappingVersion := mapping.GroupVersionKind.GroupVersion()
				return clients.FederationClientForVersion(&mappingVersion)
			case api.GroupName:
				cfg.APIPath = "/api"
			default:
				cfg.APIPath = "/apis"
			}
			gv := gvk.GroupVersion()
			cfg.GroupVersion = &gv
			if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) {
				cfg.NegotiatedSerializer = thirdpartyresourcedata.NewNegotiatedSerializer(api.Codecs, gvk.Kind, gv, gv)
			}
			return restclient.RESTClientFor(cfg)
		},
		UnstructuredClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			cfg, err := clientConfig.ClientConfig()
			if err != nil {
				return nil, err
			}
			if err := restclient.SetKubernetesDefaults(cfg); err != nil {
				return nil, err
			}
			cfg.APIPath = "/apis"
			if mapping.GroupVersionKind.Group == api.GroupName {
				cfg.APIPath = "/api"
			}
			gv := mapping.GroupVersionKind.GroupVersion()
			cfg.ContentConfig = dynamic.ContentConfig()
			cfg.GroupVersion = &gv
			return restclient.RESTClientFor(cfg)
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			if mapping.GroupVersionKind.Group == federation.GroupName {
				fedClientSet, err := clients.FederationClientSetForVersion(&mappingVersion)
				if err != nil {
					return nil, err
				}
				if mapping.GroupVersionKind.Kind == "Cluster" {
					return &kubectl.ClusterDescriber{Interface: fedClientSet}, nil
				}
			}
			clientset, err := clients.ClientSetForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), clientset); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
		},
		Decoder: func(toInternal bool) runtime.Decoder {
			var decoder runtime.Decoder
			if toInternal {
				decoder = api.Codecs.UniversalDecoder()
			} else {
				decoder = api.Codecs.UniversalDeserializer()
			}
			return thirdpartyresourcedata.NewDecoder(decoder, "")

		},
		JSONEncoder: func() runtime.Encoder {
			return api.Codecs.LegacyCodec(registered.EnabledVersions()...)
		},
		Printer: func(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(options), nil
		},
		MapBasedSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			case *extensions.ReplicaSet:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvks[0])
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			case *extensions.Deployment:
				return getPorts(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getPorts(t.Spec.Template.Spec), nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %v", gvks[0])
			}
		},
		ProtocolsForObject: func(object runtime.Object) (map[string]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getProtocols(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getProtocols(t.Spec), nil
			case *api.Service:
				return getServiceProtocols(t.Spec), nil
			case *extensions.Deployment:
				return getProtocols(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getProtocols(t.Spec.Template.Spec), nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract protocols from %v", gvks[0])
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) {
			clientset, err := clients.ClientSetForVersion(nil)
			if err != nil {
				return nil, err
			}

			switch t := object.(type) {
			case *api.Pod:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				return clientset.Core().Pods(t.Namespace).GetLogs(t.Name, opts), nil

			case *api.ReplicationController:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return clientset.Core().Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			case *extensions.ReplicaSet:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return clientset.Core().Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot get the logs from %v", gvks[0])
			}
		},
		PauseObject: func(object runtime.Object) (bool, error) {
			clientset, err := clients.ClientSetForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = true
				_, err := clientset.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot pause %v", gvks[0])
			}
		},
		ResumeObject: func(object runtime.Object) (bool, error) {
			clientset, err := clients.ClientSetForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if !t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = false
				_, err := clientset.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot resume %v", gvks[0])
			}
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			clientset, err := clients.ClientSetForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			clientset, err := clients.ClientSetForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			clientset, err := clients.ClientSetForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			clientset, err := clients.ClientSetForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		StatusViewer: func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			clientset, err := clients.ClientSetForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				clientConfig, err := clients.ClientConfigForVersion(nil)
				if err != nil {
					return nil, err
				}
				restclient, err := restclient.RESTClientFor(clientConfig)
				if err != nil {
					return nil, err
				}
				clientset, err := clients.ClientSetForVersion(nil)
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := clientset.Discovery().ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				fedClient, err := clients.FederationClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				return &clientSwaggerSchema{
					c:        restclient,
					fedc:     fedClient,
					cacheDir: dir,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
			version := gvk.GroupVersion()
			clientset, err := clients.ClientSetForVersion(&version)
			if err != nil {
				return nil, err
			}
			return clientset.Discovery().SwaggerSchema(version)
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generators: func(cmdName string) map[string]kubectl.Generator {
			return DefaultGenerators(cmdName)
		},
		CanBeExposed: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %v", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			clientset, err := clients.ClientSetForVersion(nil)
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *batch.Job:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(clientset.Core(), t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *api.Pod:
				return t, nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %v: not implemented", gvks[0])
			}
		},
		// UpdatePodSpecForObject update the pod specification for the provided object
		UpdatePodSpecForObject: func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) {
			// TODO: replace with a swagger schema based approach (identify pod template via schema introspection)
			switch t := obj.(type) {
			case *api.Pod:
				return true, fn(&t.Spec)
			case *api.ReplicationController:
				if t.Spec.Template == nil {
					t.Spec.Template = &api.PodTemplateSpec{}
				}
				return true, fn(&t.Spec.Template.Spec)
			case *extensions.Deployment:
				return true, fn(&t.Spec.Template.Spec)
			case *extensions.DaemonSet:
				return true, fn(&t.Spec.Template.Spec)
			case *extensions.ReplicaSet:
				return true, fn(&t.Spec.Template.Spec)
			case *apps.PetSet:
				return true, fn(&t.Spec.Template.Spec)
			case *batch.Job:
				return true, fn(&t.Spec.Template.Spec)
			default:
				return false, fmt.Errorf("the object is not a pod or does not have a pod template")
			}
		},
		EditorEnvs: func() []string {
			return []string{"KUBE_EDITOR", "EDITOR"}
		},
		PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) {
			switch obj := obj.(type) {
			case *api.Service:
				if obj.Spec.Type == api.ServiceTypeNodePort {
					msg := fmt.Sprintf(
						`You have exposed your service on an external port on all nodes in your
cluster.  If you want to expose this service to the external internet, you may
need to set up firewall rules for the service port(s) (%s) to serve traffic.

See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
`,
						makePortsString(obj.Spec.Ports, true))
					out.Write([]byte(msg))
				}

				if _, ok := obj.Annotations[service.AnnotationLoadBalancerSourceRangesKey]; ok {
					msg := fmt.Sprintf(
						`You are using service annotation [service.beta.kubernetes.io/load-balancer-source-ranges].
It has been promoted to field [loadBalancerSourceRanges] in service spec. This annotation will be deprecated in the future.
Please use the loadBalancerSourceRanges field instead.

See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
`)
					out.Write([]byte(msg))
				}
			}
		},
	}
}
Example #11
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags

	generators := map[string]kubectl.Generator{
		"run/v1":     kubectl.BasicReplicationController{},
		"run-pod/v1": kubectl.BasicPod{},
		"service/v1": kubectl.ServiceGeneratorV1{},
		"service/v2": kubectl.ServiceGeneratorV2{},
	}

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients:    clients,
		flags:      flags,
		generators: generators,

		Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			CheckErr(err)
			cmdApiVersion := cfg.Version

			return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cmdApiVersion}, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion("")
		},
		ClientConfig: func() (*client.Config, error) {
			return clients.ClientConfigForVersion("")
		},
		RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			group, err := api.RESTMapper.GroupForResource(mapping.Resource)
			if err != nil {
				return nil, err
			}
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			switch group {
			case "":
				return client.RESTClient, nil
			case "extensions":
				return client.ExtensionsClient.RESTClient, nil
			}
			return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource)
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			group, err := api.RESTMapper.GroupForResource(mapping.Resource)
			if err != nil {
				return nil, err
			}
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(group, mapping.Kind, client); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind)
		},
		Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, columnLabels), nil
		},
		PodSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			default:
				_, kind, err := api.Scheme.ObjectVersionAndKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %s", kind)
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			default:
				_, kind, err := api.Scheme.ObjectVersionAndKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %s", kind)
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		LogsForObject: func(object, options runtime.Object) (*client.Request, error) {
			c, err := clients.ClientForVersion("")
			if err != nil {
				return nil, err
			}

			switch t := object.(type) {
			case *api.Pod:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				return c.PodLogs(t.Namespace).Get(t.Name, opts)
			default:
				_, kind, err := api.Scheme.ObjectVersionAndKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot get the logs from %s", kind)
			}
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.Kind, client)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.Kind, client)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion("")
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := client.ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				return &clientSwaggerSchema{
					c:        client,
					cacheDir: dir,
					mapper:   api.RESTMapper,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generator: func(name string) (kubectl.Generator, bool) {
			generator, ok := generators[name]
			return generator, ok
		},
		CanBeExposed: func(kind string) error {
			if kind != "ReplicationController" && kind != "Service" && kind != "Pod" {
				return fmt.Errorf("invalid resource provided: %v, only a replication controller, service or pod is accepted", kind)
			}
			return nil
		},
	}
}
Example #12
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients: clients,
		flags:   flags,

		Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			CheckErr(err)
			cmdApiVersion := unversioned.GroupVersion{}
			if cfg.GroupVersion != nil {
				cmdApiVersion = *cfg.GroupVersion
			}

			return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion(nil)
		},
		ClientConfig: func() (*client.Config, error) {
			return clients.ClientConfigForVersion(nil)
		},
		RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			switch mapping.GroupVersionKind.Group {
			case api.GroupName:
				return client.RESTClient, nil
			case extensions.GroupName:
				return client.ExtensionsClient.RESTClient, nil
			}
			return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource)
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
		},
		Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, absoluteTimestamps, columnLabels), nil
		},
		PodSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvk)
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %v", gvk)
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		LogsForObject: func(object, options runtime.Object) (*client.Request, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}

			switch t := object.(type) {
			case *api.Pod:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot get the logs from %v", gvk)
			}
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := client.ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				return &clientSwaggerSchema{
					c:        client,
					cacheDir: dir,
					mapper:   api.RESTMapper,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		SwaggerSchema: func(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) {
			client, err := clients.ClientForVersion(&version)
			if err != nil {
				return nil, err
			}
			return client.SwaggerSchema(version)
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generators: func(cmdName string) map[string]kubectl.Generator {
			return DefaultGenerators(cmdName)
		},
		CanBeExposed: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), extensions.Kind("Deployment"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %v", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			client, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				return GetFirstPod(client, t.Namespace, t.Spec.Selector)
			case *extensions.Deployment:
				return GetFirstPod(client, t.Namespace, t.Spec.Selector)
			case *extensions.Job:
				return GetFirstPod(client, t.Namespace, t.Spec.Selector.MatchLabels)
			case *api.Pod:
				return t, nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk)
			}
		},
		EditorEnvs: func() []string {
			return []string{"KUBE_EDITOR", "EDITOR"}
		},
	}
}
Example #13
0
					Spec: api.PodSpec{
						Containers: []api.Container{
							{
								Name:  dsName,
								Image: image,
								Ports: []api.ContainerPort{{ContainerPort: 9376}},
							},
						},
					},
				},
			},
		})
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			Logf("Check that reaper kills all daemon pods for %s", dsName)
			dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), c)
			Expect(err).NotTo(HaveOccurred())
			err = dsReaper.Stop(ns, dsName, 0, nil)
			Expect(err).NotTo(HaveOccurred())
			err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, label))
			Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
		}()

		By("Check that daemon pods launch on every node of the cluster.")
		Expect(err).NotTo(HaveOccurred())
		err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
		Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")

		By("Stop a daemon pod, check that the daemon pod is revived.")
		podClient := c.Pods(ns)
Example #14
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags

	generators := map[string]kubectl.Generator{
		"run/v1":                          kubectl.BasicReplicationController{},
		"run-pod/v1":                      kubectl.BasicPod{},
		"service/v1":                      kubectl.ServiceGeneratorV1{},
		"service/v2":                      kubectl.ServiceGeneratorV2{},
		"horizontalpodautoscaler/v1beta1": kubectl.HorizontalPodAutoscalerV1Beta1{},
	}

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients:    clients,
		flags:      flags,
		generators: generators,

		Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			CheckErr(err)
			cmdApiVersion := cfg.Version

			return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cmdApiVersion}, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion("")
		},
		ClientConfig: func() (*client.Config, error) {
			return clients.ClientConfigForVersion("")
		},
		RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			group, err := api.RESTMapper.GroupForResource(mapping.Resource)
			if err != nil {
				return nil, err
			}
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			switch group {
			case "":
				return client.RESTClient, nil
			case "extensions":
				return client.ExtensionsClient.RESTClient, nil
			}
			return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource)
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			group, err := api.RESTMapper.GroupForResource(mapping.Resource)
			if err != nil {
				return nil, err
			}
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(group, mapping.Kind, client); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind)
		},
		Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, columnLabels), nil
		},
		PodSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			default:
				_, kind, err := api.Scheme.ObjectVersionAndKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %s", kind)
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			default:
				_, kind, err := api.Scheme.ObjectVersionAndKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %s", kind)
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.Kind, client)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.Kind, client)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion("")
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := client.ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				return &clientSwaggerSchema{
					c:        client,
					cacheDir: dir,
					mapper:   api.RESTMapper,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generator: func(name string) (kubectl.Generator, bool) {
			generator, ok := generators[name]
			return generator, ok
		},
		CanBeExposed: func(kind string) error {
			switch kind {
			case "ReplicationController", "Service", "Pod":
			// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind string) error {
			switch kind {
			// TODO: support autoscale for deployments
			case "ReplicationController":
			// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %s", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			client, err := clients.ClientForVersion("")
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				var pods *api.PodList
				for pods == nil || len(pods.Items) == 0 {
					var err error
					if pods, err = client.Pods(t.Namespace).List(labels.SelectorFromSet(t.Spec.Selector), fields.Everything()); err != nil {
						return nil, err
					}
					if len(pods.Items) == 0 {
						time.Sleep(2 * time.Second)
					}
				}
				pod := &pods.Items[0]
				return pod, nil
			case *api.Pod:
				return t, nil
			default:
				_, kind, err := api.Scheme.ObjectVersionAndKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %s: not implemented", kind)
			}
		},
	}
}
Example #15
0
		err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, endParallelism)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should delete a job", func() {
		By("Creating a job")
		job := newTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
		job, err := createJob(f.ClientSet, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == parallelism")
		err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
		Expect(err).NotTo(HaveOccurred())

		By("delete a job")
		reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
		Expect(err).NotTo(HaveOccurred())
		timeout := 1 * time.Minute
		err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring job was deleted")
		_, err = getJob(f.ClientSet, f.Namespace.Name, job.Name)
		Expect(err).To(HaveOccurred())
		Expect(errors.IsNotFound(err)).To(BeTrue())
	})

	It("should fail a job", func() {
		By("Creating a job")
		job := newTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
		activeDeadlineSeconds := int64(10)
Example #16
0
		err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, endParallelism)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should delete a job", func() {
		By("Creating a job")
		job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
		job, err := createV1Job(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == parallelism")
		err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, parallelism)
		Expect(err).NotTo(HaveOccurred())

		By("delete a job")
		reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.Client)
		Expect(err).NotTo(HaveOccurred())
		timeout := 1 * time.Minute
		err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring job was deleted")
		_, err = f.Client.Batch().Jobs(f.Namespace.Name).Get(job.Name)
		Expect(err).To(HaveOccurred())
		Expect(errors.IsNotFound(err)).To(BeTrue())
	})

	It("should fail a job", func() {
		By("Creating a job")
		job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
		activeDeadlineSeconds := int64(10)
Example #17
0
		err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should stop a job", func() {
		By("Creating a job")
		job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
		job, err := createJob(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == parallelism")
		err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, parallelism)
		Expect(err).NotTo(HaveOccurred())

		By("scale job down")
		reaper, err := kubectl.ReaperFor("Job", f.Client)
		Expect(err).NotTo(HaveOccurred())
		timeout := 1 * time.Minute
		_, err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring job was deleted")
		_, err = f.Client.Extensions().Jobs(f.Namespace.Name).Get(job.Name)
		Expect(err).To(HaveOccurred())
		Expect(errors.IsNotFound(err)).To(BeTrue())
	})
})

// newTestJob returns a job which does one of several testing behaviors.
func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, completions int) *extensions.Job {
	job := &extensions.Job{
Example #18
0
func testDaemonSets(f *Framework) {
	ns := f.Namespace.Name
	c := f.Client
	simpleDSName := "simple-daemon-set"
	image := "gcr.io/google_containers/serve_hostname:1.1"
	label := map[string]string{daemonsetNameLabel: simpleDSName}
	retryTimeout := 1 * time.Minute
	retryInterval := 5 * time.Second

	Logf("Creating simple daemon set %s", simpleDSName)
	_, err := c.DaemonSets(ns).Create(&extensions.DaemonSet{
		ObjectMeta: api.ObjectMeta{
			Name: simpleDSName,
		},
		Spec: extensions.DaemonSetSpec{
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: label,
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  simpleDSName,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	defer func() {
		Logf("Check that reaper kills all daemon pods for %s", simpleDSName)
		dsReaper, err := kubectl.ReaperFor("DaemonSet", c)
		Expect(err).NotTo(HaveOccurred())
		_, err = dsReaper.Stop(ns, simpleDSName, 0, nil)
		Expect(err).NotTo(HaveOccurred())
		err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, label))
		Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
	}()

	By("Check that daemon pods launch on every node of the cluster.")
	Expect(err).NotTo(HaveOccurred())
	err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")

	By("Stop a daemon pod, check that the daemon pod is revived.")
	podClient := c.Pods(ns)

	podList, err := podClient.List(labels.Set(label).AsSelector(), fields.Everything())
	Expect(err).NotTo(HaveOccurred())
	Expect(len(podList.Items)).To(BeNumerically(">", 0))
	pod := podList.Items[0]
	err = podClient.Delete(pod.Name, nil)
	Expect(err).NotTo(HaveOccurred())
	err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")

	complexDSName := "complex-daemon-set"
	complexLabel := map[string]string{daemonsetNameLabel: complexDSName}
	nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
	Logf("Creating daemon with a node selector %s", complexDSName)
	_, err = c.DaemonSets(ns).Create(&extensions.DaemonSet{
		ObjectMeta: api.ObjectMeta{
			Name: complexDSName,
		},
		Spec: extensions.DaemonSetSpec{
			Selector: complexLabel,
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: complexLabel,
				},
				Spec: api.PodSpec{
					NodeSelector: nodeSelector,
					Containers: []api.Container{
						{
							Name:  complexDSName,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())

	By("Initially, daemon pods should not be running on any nodes.")
	err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")

	By("Change label of node, check that daemon pod is launched.")
	nodeClient := c.Nodes()
	nodeList, err := nodeClient.List(labels.Everything(), fields.Everything())
	Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
	newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
	Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
	daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
	Expect(len(daemonSetLabels)).To(Equal(1))
	err = wait.Poll(retryInterval, retryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")

	By("remove the node selector and wait for daemons to be unscheduled")
	_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
	Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
	Expect(wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))).
		NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")

	By("We should now be able to delete the daemon set.")
	Expect(c.DaemonSets(ns).Delete(complexDSName)).NotTo(HaveOccurred())
}
Example #19
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags

	generators := map[string]kubectl.Generator{
		"run/v1":     kubectl.BasicReplicationController{},
		"run-pod/v1": kubectl.BasicPod{},
		"service/v1": kubectl.ServiceGeneratorV1{},
		"service/v2": kubectl.ServiceGeneratorV2{},
	}

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)
	expClients := NewExperimentalClientCache(clientConfig)

	noClientErr := errors.New("could not get client")
	getBothClients := func(group string, version string) (*client.Client, *client.ExperimentalClient, error) {
		switch group {
		case "api":
			client, err := clients.ClientForVersion(version)
			return client, nil, err

		case "experimental":
			client, err := clients.ClientForVersion(version)
			if err != nil {
				return nil, nil, err
			}
			expClient, err := expClients.Client()
			if err != nil {
				return nil, nil, err
			}
			return client, expClient, err
		}
		return nil, nil, noClientErr
	}
	return &Factory{
		clients:    clients,
		flags:      flags,
		generators: generators,

		Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			CheckErr(err)
			cmdApiVersion := cfg.Version

			return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cmdApiVersion}, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion("")
		},
		ExperimentalClient: func() (*client.ExperimentalClient, error) {
			return expClients.Client()
		},
		ClientConfig: func() (*client.Config, error) {
			return clients.ClientConfigForVersion("")
		},
		RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			group, err := api.RESTMapper.GroupForResource(mapping.Resource)
			if err != nil {
				return nil, err
			}
			switch group {
			case "api":
				client, err := clients.ClientForVersion(mapping.APIVersion)
				if err != nil {
					return nil, err
				}
				return client.RESTClient, nil
			case "experimental":
				client, err := expClients.Client()
				if err != nil {
					return nil, err
				}
				return client.RESTClient, nil
			}
			return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource)
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			group, err := api.RESTMapper.GroupForResource(mapping.Resource)
			if err != nil {
				return nil, err
			}
			client, expClient, err := getBothClients(group, mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(mapping.Kind, client, expClient); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind)
		},
		Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, columnLabels), nil
		},
		PodSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			default:
				_, kind, err := api.Scheme.ObjectVersionAndKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %s", kind)
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			default:
				_, kind, err := api.Scheme.ObjectVersionAndKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %s", kind)
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			group, err := api.RESTMapper.GroupForResource(mapping.Resource)
			if err != nil {
				return nil, err
			}
			client, _, err := getBothClients(group, mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.Kind, kubectl.NewScalerClient(client))
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			group, err := api.RESTMapper.GroupForResource(mapping.Resource)
			if err != nil {
				return nil, err
			}
			client, expClient, err := getBothClients(group, mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.Kind, client, expClient)
		},
		Validator: func(validate bool) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion("")
				if err != nil {
					return nil, err
				}
				expClient, _ := expClients.Client()
				return &clientSwaggerSchema{client, expClient, api.Scheme}, nil
			}
			return validation.NullSchema{}, nil
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generator: func(name string) (kubectl.Generator, bool) {
			generator, ok := generators[name]
			return generator, ok
		},
	}
}
Example #20
0
// Stop scales a replication controller via its deployment configuration down to
// zero replicas, waits for all of them to get deleted and then deletes both the
// replication controller and its deployment configuration.
func (reaper *DeploymentConfigReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error {
	// Pause the deployment configuration to prevent the new deployments from
	// being triggered.
	config, err := reaper.pause(namespace, name)
	configNotFound := kerrors.IsNotFound(err)
	if err != nil && !configNotFound {
		return err
	}

	var (
		isPaused bool
		legacy   bool
	)
	// Determine if the deployment config controller noticed the pause.
	if !configNotFound {
		if err := wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
			dc, err := reaper.oc.DeploymentConfigs(namespace).Get(name)
			if err != nil {
				return false, err
			}
			isPaused = dc.Spec.Paused
			return dc.Status.ObservedGeneration >= config.Generation, nil
		}); err != nil {
			return err
		}

		// If we failed to pause the deployment config, it means we are talking to
		// old API that does not support pausing. In that case, we delete the
		// deployment config to stay backward compatible.
		if !isPaused {
			if err := reaper.oc.DeploymentConfigs(namespace).Delete(name); err != nil {
				return err
			}
			// Setting this to true avoid deleting the config at the end.
			legacy = true
		}
	}

	// Clean up deployments related to the config. Even if the deployment
	// configuration has been deleted, we want to sweep the existing replication
	// controllers and clean them up.
	options := kapi.ListOptions{LabelSelector: util.ConfigSelector(name)}
	rcList, err := reaper.kc.ReplicationControllers(namespace).List(options)
	if err != nil {
		return err
	}
	rcReaper, err := kubectl.ReaperFor(kapi.Kind("ReplicationController"), reaper.kc)
	if err != nil {
		return err
	}

	// If there is neither a config nor any deployments, nor any deployer pods, we can return NotFound.
	deployments := rcList.Items

	if configNotFound && len(deployments) == 0 {
		return kerrors.NewNotFound(kapi.Resource("deploymentconfig"), name)
	}

	for _, rc := range deployments {
		if err = rcReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
			// Better not error out here...
			glog.Infof("Cannot delete ReplicationController %s/%s for deployment config %s/%s: %v", rc.Namespace, rc.Name, namespace, name, err)
		}

		// Only remove deployer pods when the deployment was failed. For completed
		// deployment the pods should be already deleted.
		if !util.IsFailedDeployment(&rc) {
			continue
		}

		// Delete all deployer and hook pods
		options = kapi.ListOptions{LabelSelector: util.DeployerPodSelector(rc.Name)}
		podList, err := reaper.kc.Pods(rc.Namespace).List(options)
		if err != nil {
			return err
		}
		for _, pod := range podList.Items {
			err := reaper.kc.Pods(pod.Namespace).Delete(pod.Name, gracePeriod)
			if err != nil {
				// Better not error out here...
				glog.Infof("Cannot delete lifecycle Pod %s/%s for deployment config %s/%s: %v", pod.Namespace, pod.Name, namespace, name, err)
			}
		}
	}

	// Nothing to delete or we already deleted the deployment config because we
	// failed to pause.
	if configNotFound || legacy {
		return nil
	}

	return reaper.oc.DeploymentConfigs(namespace).Delete(name)
}
Example #21
0
					Spec: api.PodSpec{
						Containers: []api.Container{
							{
								Name:  dsName,
								Image: image,
								Ports: []api.ContainerPort{{ContainerPort: 9376}},
							},
						},
					},
				},
			},
		})
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			Logf("Check that reaper kills all daemon pods for %s", dsName)
			dsReaper, err := kubectl.ReaperFor("DaemonSet", c)
			Expect(err).NotTo(HaveOccurred())
			_, err = dsReaper.Stop(ns, dsName, 0, nil)
			Expect(err).NotTo(HaveOccurred())
			err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, label))
			Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
		}()

		By("Check that daemon pods launch on every node of the cluster.")
		Expect(err).NotTo(HaveOccurred())
		err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
		Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")

		By("Stop a daemon pod, check that the daemon pod is revived.")
		podClient := c.Pods(ns)
Example #22
0
					Spec: v1.PodSpec{
						Containers: []v1.Container{
							{
								Name:  dsName,
								Image: image,
								Ports: []v1.ContainerPort{{ContainerPort: 9376}},
							},
						},
					},
				},
			},
		})
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			framework.Logf("Check that reaper kills all daemon pods for %s", dsName)
			dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset)
			Expect(err).NotTo(HaveOccurred())
			err = dsReaper.Stop(ns, dsName, 0, nil)
			Expect(err).NotTo(HaveOccurred())
			err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, label))
			Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
		}()

		By("Check that daemon pods launch on every node of the cluster.")
		Expect(err).NotTo(HaveOccurred())
		err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
		Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
		err = checkDaemonStatus(f, dsName)
		Expect(err).NotTo(HaveOccurred())

		By("Stop a daemon pod, check that the daemon pod is revived.")
Example #23
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients: clients,
		flags:   flags,

		// If discoverDynamicAPIs is true, make API calls to the discovery service to find APIs that
		// have been dynamically added to the apiserver
		Object: func(discoverDynamicAPIs bool) (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			CheckErr(err)
			cmdApiVersion := unversioned.GroupVersion{}
			if cfg.GroupVersion != nil {
				cmdApiVersion = *cfg.GroupVersion
			}
			if discoverDynamicAPIs {
				client, err := clients.ClientForVersion(&unversioned.GroupVersion{Version: "v1"})
				CheckErr(err)

				versions, gvks, err := GetThirdPartyGroupVersions(client.Discovery())
				CheckErr(err)
				if len(versions) > 0 {
					priorityMapper, ok := mapper.RESTMapper.(meta.PriorityRESTMapper)
					if !ok {
						CheckErr(fmt.Errorf("expected PriorityMapper, saw: %v", mapper.RESTMapper))
						return nil, nil
					}
					multiMapper, ok := priorityMapper.Delegate.(meta.MultiRESTMapper)
					if !ok {
						CheckErr(fmt.Errorf("unexpected type: %v", mapper.RESTMapper))
						return nil, nil
					}
					groupsMap := map[string][]unversioned.GroupVersion{}
					for _, version := range versions {
						groupsMap[version.Group] = append(groupsMap[version.Group], version)
					}
					for group, versionList := range groupsMap {
						preferredExternalVersion := versionList[0]

						thirdPartyMapper, err := kubectl.NewThirdPartyResourceMapper(versionList, getGroupVersionKinds(gvks, group))
						CheckErr(err)
						accessor := meta.NewAccessor()
						groupMeta := apimachinery.GroupMeta{
							GroupVersion:  preferredExternalVersion,
							GroupVersions: versionList,
							RESTMapper:    thirdPartyMapper,
							SelfLinker:    runtime.SelfLinker(accessor),
							InterfacesFor: makeInterfacesFor(versionList),
						}

						CheckErr(registered.RegisterGroup(groupMeta))
						registered.AddThirdPartyAPIGroupVersions(versionList...)
						multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...)
					}
					priorityMapper.Delegate = multiMapper
					// Re-assign to the RESTMapper here because priorityMapper is actually a copy, so if we
					// don't re-assign, the above assignement won't actually update mapper.RESTMapper
					mapper.RESTMapper = priorityMapper
				}
			}
			outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}
			priorityRESTMapper := meta.PriorityRESTMapper{
				Delegate: outputRESTMapper,
				ResourcePriority: []unversioned.GroupVersionResource{
					{Group: api.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
					{Group: extensions.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
					{Group: metrics.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
				},
				KindPriority: []unversioned.GroupVersionKind{
					{Group: api.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
					{Group: extensions.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
					{Group: metrics.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
				},
			}
			return priorityRESTMapper, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion(nil)
		},
		ClientConfig: func() (*restclient.Config, error) {
			return clients.ClientConfigForVersion(nil)
		},
		ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			gvk := mapping.GroupVersionKind
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			c, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			switch gvk.Group {
			case api.GroupName:
				return c.RESTClient, nil
			case autoscaling.GroupName:
				return c.AutoscalingClient.RESTClient, nil
			case batch.GroupName:
				return c.BatchClient.RESTClient, nil
			case apps.GroupName:
				return c.AppsClient.RESTClient, nil
			case extensions.GroupName:
				return c.ExtensionsClient.RESTClient, nil
			case api.SchemeGroupVersion.Group:
				return c.RESTClient, nil
			case extensions.SchemeGroupVersion.Group:
				return c.ExtensionsClient.RESTClient, nil
			default:
				if !registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) {
					return nil, fmt.Errorf("unknown api group/version: %s", gvk.String())
				}
				cfg, err := clientConfig.ClientConfig()
				if err != nil {
					return nil, err
				}
				gv := gvk.GroupVersion()
				cfg.GroupVersion = &gv
				cfg.APIPath = "/apis"
				cfg.Codec = thirdpartyresourcedata.NewCodec(c.ExtensionsClient.RESTClient.Codec(), gvk.Kind)
				return restclient.RESTClientFor(cfg)
			}
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
		},
		Decoder: func(toInternal bool) runtime.Decoder {
			if toInternal {
				return api.Codecs.UniversalDecoder()
			}
			return api.Codecs.UniversalDeserializer()
		},
		JSONEncoder: func() runtime.Encoder {
			return api.Codecs.LegacyCodec(registered.EnabledVersions()...)
		},
		Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil
		},
		MapBasedSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			case *extensions.ReplicaSet:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvk)
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			case *extensions.Deployment:
				return getPorts(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getPorts(t.Spec.Template.Spec), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %v", gvk)
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}

			switch t := object.(type) {
			case *api.Pod:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil

			case *api.ReplicationController:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			case *extensions.ReplicaSet:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot get the logs from %v", gvk)
			}
		},
		PauseObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = true
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot pause %v", gvk)
			}
		},
		ResumeObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if !t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = false
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot resume %v", gvk)
			}
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			clientset := clientset.FromUnversionedClient(client)
			if err != nil {
				return nil, err
			}
			return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := client.ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				return &clientSwaggerSchema{
					c:        client,
					cacheDir: dir,
					mapper:   api.RESTMapper,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
			version := gvk.GroupVersion()
			client, err := clients.ClientForVersion(&version)
			if err != nil {
				return nil, err
			}
			return client.Discovery().SwaggerSchema(version)
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generators: func(cmdName string) map[string]kubectl.Generator {
			return DefaultGenerators(cmdName)
		},
		CanBeExposed: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %v", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			client, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *batch.Job:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *api.Pod:
				return t, nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk)
			}
		},
		EditorEnvs: func() []string {
			return []string{"KUBE_EDITOR", "EDITOR"}
		},
		PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) {
			switch obj := obj.(type) {
			case *api.Service:
				if obj.Spec.Type == api.ServiceTypeNodePort {
					msg := fmt.Sprintf(
						`You have exposed your service on an external port on all nodes in your
cluster.  If you want to expose this service to the external internet, you may
need to set up firewall rules for the service port(s) (%s) to serve traffic.

See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
`,
						makePortsString(obj.Spec.Ports, true))
					out.Write([]byte(msg))
				}
			}
		},
	}
}
Example #24
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients: clients,
		flags:   flags,

		// If discoverDynamicAPIs is true, make API calls to the discovery service to find APIs that
		// have been dynamically added to the apiserver
		Object: func(discoverDynamicAPIs bool) (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			checkErrWithPrefix("failed to get client config: ", err)
			cmdApiVersion := unversioned.GroupVersion{}
			if cfg.GroupVersion != nil {
				cmdApiVersion = *cfg.GroupVersion
			}
			if discoverDynamicAPIs {
				client, err := clients.ClientForVersion(&unversioned.GroupVersion{Version: "v1"})
				checkErrWithPrefix("failed to find client for version v1: ", err)

				var versions []unversioned.GroupVersion
				var gvks []unversioned.GroupVersionKind
				retries := 3
				for i := 0; i < retries; i++ {
					versions, gvks, err = GetThirdPartyGroupVersions(client.Discovery())
					// Retry if we got a NotFound error, because user may delete
					// a thirdparty group when the GetThirdPartyGroupVersions is
					// running.
					if err == nil || !apierrors.IsNotFound(err) {
						break
					}
				}
				checkErrWithPrefix("failed to get third-party group versions: ", err)
				if len(versions) > 0 {
					priorityMapper, ok := mapper.RESTMapper.(meta.PriorityRESTMapper)
					if !ok {
						CheckErr(fmt.Errorf("expected PriorityMapper, saw: %v", mapper.RESTMapper))
						return nil, nil
					}
					multiMapper, ok := priorityMapper.Delegate.(meta.MultiRESTMapper)
					if !ok {
						CheckErr(fmt.Errorf("unexpected type: %v", mapper.RESTMapper))
						return nil, nil
					}
					groupsMap := map[string][]unversioned.GroupVersion{}
					for _, version := range versions {
						groupsMap[version.Group] = append(groupsMap[version.Group], version)
					}
					for group, versionList := range groupsMap {
						preferredExternalVersion := versionList[0]

						thirdPartyMapper, err := kubectl.NewThirdPartyResourceMapper(versionList, getGroupVersionKinds(gvks, group))
						checkErrWithPrefix("failed to create third party resource mapper: ", err)
						accessor := meta.NewAccessor()
						groupMeta := apimachinery.GroupMeta{
							GroupVersion:  preferredExternalVersion,
							GroupVersions: versionList,
							RESTMapper:    thirdPartyMapper,
							SelfLinker:    runtime.SelfLinker(accessor),
							InterfacesFor: makeInterfacesFor(versionList),
						}

						checkErrWithPrefix("failed to register group: ", registered.RegisterGroup(groupMeta))
						registered.AddThirdPartyAPIGroupVersions(versionList...)
						multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...)
					}
					priorityMapper.Delegate = multiMapper
					// Reassign to the RESTMapper here because priorityMapper is actually a copy, so if we
					// don't reassign, the above assignement won't actually update mapper.RESTMapper
					mapper.RESTMapper = priorityMapper
				}
			}
			outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}
			priorityRESTMapper := meta.PriorityRESTMapper{
				Delegate: outputRESTMapper,
			}
			// TODO: this should come from registered versions
			groups := []string{api.GroupName, autoscaling.GroupName, extensions.GroupName, federation.GroupName, batch.GroupName}
			// set a preferred version
			for _, group := range groups {
				gvs := registered.EnabledVersionsForGroup(group)
				if len(gvs) == 0 {
					continue
				}
				priorityRESTMapper.ResourcePriority = append(priorityRESTMapper.ResourcePriority, unversioned.GroupVersionResource{Group: group, Version: gvs[0].Version, Resource: meta.AnyResource})
				priorityRESTMapper.KindPriority = append(priorityRESTMapper.KindPriority, unversioned.GroupVersionKind{Group: group, Version: gvs[0].Version, Kind: meta.AnyKind})
			}
			for _, group := range groups {
				priorityRESTMapper.ResourcePriority = append(priorityRESTMapper.ResourcePriority, unversioned.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource})
				priorityRESTMapper.KindPriority = append(priorityRESTMapper.KindPriority, unversioned.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind})
			}
			return priorityRESTMapper, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion(nil)
		},
		ClientConfig: func() (*restclient.Config, error) {
			return clients.ClientConfigForVersion(nil)
		},
		ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			cfg, err := clientConfig.ClientConfig()
			if err != nil {
				return nil, err
			}
			if err := client.SetKubernetesDefaults(cfg); err != nil {
				return nil, err
			}
			gvk := mapping.GroupVersionKind
			switch gvk.Group {
			case federation.GroupName:
				mappingVersion := mapping.GroupVersionKind.GroupVersion()
				return clients.FederationClientForVersion(&mappingVersion)
			case api.GroupName:
				cfg.APIPath = "/api"
			default:
				cfg.APIPath = "/apis"
			}
			gv := gvk.GroupVersion()
			cfg.GroupVersion = &gv
			if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) {
				cfg.NegotiatedSerializer = thirdpartyresourcedata.NewNegotiatedSerializer(api.Codecs, gvk.Kind, gv, gv)
			}
			return restclient.RESTClientFor(cfg)
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			if mapping.GroupVersionKind.Group == federation.GroupName {
				fedClientSet, err := clients.FederationClientSetForVersion(&mappingVersion)
				if err != nil {
					return nil, err
				}
				if mapping.GroupVersionKind.Kind == "Cluster" {
					return &kubectl.ClusterDescriber{Interface: fedClientSet}, nil
				}
			}
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
		},
		Decoder: func(toInternal bool) runtime.Decoder {
			var decoder runtime.Decoder
			if toInternal {
				decoder = api.Codecs.UniversalDecoder()
			} else {
				decoder = api.Codecs.UniversalDeserializer()
			}
			return thirdpartyresourcedata.NewDecoder(decoder, "")

		},
		JSONEncoder: func() runtime.Encoder {
			return api.Codecs.LegacyCodec(registered.EnabledVersions()...)
		},
		Printer: func(mapping *meta.RESTMapping, options kubectl.PrintOptions) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(options), nil
		},
		MapBasedSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			case *extensions.ReplicaSet:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions)
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvks[0])
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			case *extensions.Deployment:
				return getPorts(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getPorts(t.Spec.Template.Spec), nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %v", gvks[0])
			}
		},
		ProtocolsForObject: func(object runtime.Object) (map[string]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getProtocols(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getProtocols(t.Spec), nil
			case *api.Service:
				return getServiceProtocols(t.Spec), nil
			case *extensions.Deployment:
				return getProtocols(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getProtocols(t.Spec.Template.Spec), nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract protocols from %v", gvks[0])
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}

			switch t := object.(type) {
			case *api.Pod:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil

			case *api.ReplicationController:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			case *extensions.ReplicaSet:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return controller.ByLogging(pods) }
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot get the logs from %v", gvks[0])
			}
		},
		PauseObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = true
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot pause %v", gvks[0])
			}
		},
		ResumeObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if !t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = false
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot resume %v", gvks[0])
			}
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			clientset := clientset.FromUnversionedClient(client)
			if err != nil {
				return nil, err
			}
			return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		StatusViewer: func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := client.ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				fedClient, err := clients.FederationClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				return &clientSwaggerSchema{
					c:        client,
					fedc:     fedClient,
					cacheDir: dir,
					mapper:   api.RESTMapper,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
			version := gvk.GroupVersion()
			client, err := clients.ClientForVersion(&version)
			if err != nil {
				return nil, err
			}
			return client.Discovery().SwaggerSchema(version)
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generators: func(cmdName string) map[string]kubectl.Generator {
			return DefaultGenerators(cmdName)
		},
		CanBeExposed: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %v", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			client, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				selector := labels.SelectorFromSet(t.Spec.Selector)
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *batch.Job:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
				pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy)
				return pod, err
			case *api.Pod:
				return t, nil
			default:
				gvks, _, err := api.Scheme.ObjectKinds(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %v: not implemented", gvks[0])
			}
		},
		// UpdatePodSpecForObject update the pod specification for the provided object
		UpdatePodSpecForObject: func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) {
			// TODO: replace with a swagger schema based approach (identify pod template via schema introspection)
			switch t := obj.(type) {
			case *api.Pod:
				return true, fn(&t.Spec)
			case *api.ReplicationController:
				if t.Spec.Template == nil {
					t.Spec.Template = &api.PodTemplateSpec{}
				}
				return true, fn(&t.Spec.Template.Spec)
			case *extensions.Deployment:
				return true, fn(&t.Spec.Template.Spec)
			case *extensions.DaemonSet:
				return true, fn(&t.Spec.Template.Spec)
			case *extensions.ReplicaSet:
				return true, fn(&t.Spec.Template.Spec)
			case *apps.PetSet:
				return true, fn(&t.Spec.Template.Spec)
			case *batch.Job:
				return true, fn(&t.Spec.Template.Spec)
			default:
				return false, fmt.Errorf("the object is not a pod or does not have a pod template")
			}
		},
		EditorEnvs: func() []string {
			return []string{"KUBE_EDITOR", "EDITOR"}
		},
		PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) {
			switch obj := obj.(type) {
			case *api.Service:
				if obj.Spec.Type == api.ServiceTypeNodePort {
					msg := fmt.Sprintf(
						`You have exposed your service on an external port on all nodes in your
cluster.  If you want to expose this service to the external internet, you may
need to set up firewall rules for the service port(s) (%s) to serve traffic.

See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
`,
						makePortsString(obj.Spec.Ports, true))
					out.Write([]byte(msg))
				}

				if _, ok := obj.Annotations[service.AnnotationLoadBalancerSourceRangesKey]; ok {
					msg := fmt.Sprintf(
						`You are using service annotation [service.beta.kubernetes.io/load-balancer-source-ranges].
It has been promoted to field [loadBalancerSourceRanges] in service spec. This annotation will be deprecated in the future.
Please use the loadBalancerSourceRanges field instead.

See http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md for more details.
`)
					out.Write([]byte(msg))
				}
			}
		},
	}
}
Example #25
0
		err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should stop a job", func() {
		By("Creating a job")
		job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
		job, err := createJob(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == parallelism")
		err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, parallelism)
		Expect(err).NotTo(HaveOccurred())

		By("scale job down")
		reaper, err := kubectl.ReaperFor(extensions.Kind("Job"), f.Client)
		Expect(err).NotTo(HaveOccurred())
		timeout := 1 * time.Minute
		err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring job was deleted")
		_, err = f.Client.Extensions().Jobs(f.Namespace.Name).Get(job.Name)
		Expect(err).To(HaveOccurred())
		Expect(errors.IsNotFound(err)).To(BeTrue())
	})
})

// newTestJob returns a job which does one of several testing behaviors.
func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, completions int) *extensions.Job {
	job := &extensions.Job{
Example #26
0
					Spec: api.PodSpec{
						Containers: []api.Container{
							{
								Name:  dsName,
								Image: image,
								Ports: []api.ContainerPort{{ContainerPort: 9376}},
							},
						},
					},
				},
			},
		})
		Expect(err).NotTo(HaveOccurred())
		defer func() {
			framework.Logf("Check that reaper kills all daemon pods for %s", dsName)
			dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), clientsetadapter.FromUnversionedClient(c))
			Expect(err).NotTo(HaveOccurred())
			err = dsReaper.Stop(ns, dsName, 0, nil)
			Expect(err).NotTo(HaveOccurred())
			err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, label))
			Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
		}()

		By("Check that daemon pods launch on every node of the cluster.")
		Expect(err).NotTo(HaveOccurred())
		err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
		Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")

		By("Stop a daemon pod, check that the daemon pod is revived.")
		podClient := c.Pods(ns)
Example #27
0
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
	mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()}

	flags := pflag.NewFlagSet("", pflag.ContinueOnError)
	flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags

	clientConfig := optionalClientConfig
	if optionalClientConfig == nil {
		clientConfig = DefaultClientConfig(flags)
	}

	clients := NewClientCache(clientConfig)

	return &Factory{
		clients: clients,
		flags:   flags,

		Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
			cfg, err := clientConfig.ClientConfig()
			CheckErr(err)
			cmdApiVersion := unversioned.GroupVersion{}
			if cfg.GroupVersion != nil {
				cmdApiVersion = *cfg.GroupVersion
			}

			outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}

			// eventually this should allow me choose a group priority based on the order of the discovery doc, for now hardcode a given order
			priorityRESTMapper := meta.PriorityRESTMapper{
				Delegate: outputRESTMapper,
				ResourcePriority: []unversioned.GroupVersionResource{
					{Group: api.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
					{Group: extensions.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
					{Group: metrics.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource},
				},
				KindPriority: []unversioned.GroupVersionKind{
					{Group: api.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
					{Group: extensions.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
					{Group: metrics.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind},
				},
			}

			return priorityRESTMapper, api.Scheme
		},
		Client: func() (*client.Client, error) {
			return clients.ClientForVersion(nil)
		},
		ClientConfig: func() (*restclient.Config, error) {
			return clients.ClientConfigForVersion(nil)
		},
		ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			switch mapping.GroupVersionKind.Group {
			case api.GroupName:
				return client.RESTClient, nil
			case autoscaling.GroupName:
				return client.AutoscalingClient.RESTClient, nil
			case batch.GroupName:
				return client.BatchClient.RESTClient, nil
			case extensions.GroupName:
				return client.ExtensionsClient.RESTClient, nil
			}
			return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource)
		},
		Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok {
				return describer, nil
			}
			return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
		},
		Decoder: func(toInternal bool) runtime.Decoder {
			if toInternal {
				return api.Codecs.UniversalDecoder()
			}
			return api.Codecs.UniversalDeserializer()
		},
		JSONEncoder: func() runtime.Encoder {
			return api.Codecs.LegacyCodec(registered.EnabledVersions()...)
		},
		Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
			return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil
		},
		PodSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return "", fmt.Errorf("invalid label selector: %v", err)
				}
				return selector.String(), nil
			case *extensions.ReplicaSet:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return "", fmt.Errorf("failed to convert label selector to selector: %v", err)
				}
				return selector.String(), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvk)
			}
		},
		MapBasedSelectorForObject: func(object runtime.Object) (string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *api.Pod:
				if len(t.Labels) == 0 {
					return "", fmt.Errorf("the pod has no labels and cannot be exposed")
				}
				return kubectl.MakeLabels(t.Labels), nil
			case *api.Service:
				if t.Spec.Selector == nil {
					return "", fmt.Errorf("the service has no pod selector set")
				}
				return kubectl.MakeLabels(t.Spec.Selector), nil
			case *extensions.Deployment:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			case *extensions.ReplicaSet:
				// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
				// operator, DoubleEquals operator and In operator with only one element in the set.
				if len(t.Spec.Selector.MatchExpressions) > 0 {
					return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
				}
				return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return "", err
				}
				return "", fmt.Errorf("cannot extract pod selector from %v", gvk)
			}
		},
		PortsForObject: func(object runtime.Object) ([]string, error) {
			// TODO: replace with a swagger schema based approach (identify pod selector via schema introspection)
			switch t := object.(type) {
			case *api.ReplicationController:
				return getPorts(t.Spec.Template.Spec), nil
			case *api.Pod:
				return getPorts(t.Spec), nil
			case *api.Service:
				return getServicePorts(t.Spec), nil
			case *extensions.Deployment:
				return getPorts(t.Spec.Template.Spec), nil
			case *extensions.ReplicaSet:
				return getPorts(t.Spec.Template.Spec), nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot extract ports from %v", gvk)
			}
		},
		LabelsForObject: func(object runtime.Object) (map[string]string, error) {
			return meta.NewAccessor().Labels(object)
		},
		LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}

			switch t := object.(type) {
			case *api.Pod:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil

			case *api.ReplicationController:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector := labels.SelectorFromSet(t.Spec.Selector)
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			case *extensions.ReplicaSet:
				opts, ok := options.(*api.PodLogOptions)
				if !ok {
					return nil, errors.New("provided options object is not a PodLogOptions")
				}
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				pod, numPods, err := GetFirstPod(c, t.Namespace, selector)
				if err != nil {
					return nil, err
				}
				if numPods > 1 {
					fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name)
				}

				return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil

			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot get the logs from %v", gvk)
			}
		},
		PauseObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = true
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot pause %v", gvk)
			}
		},
		ResumeObject: func(object runtime.Object) (bool, error) {
			c, err := clients.ClientForVersion(nil)
			if err != nil {
				return false, err
			}

			switch t := object.(type) {
			case *extensions.Deployment:
				if !t.Spec.Paused {
					return true, nil
				}
				t.Spec.Paused = false
				_, err := c.Extensions().Deployments(t.Namespace).Update(t)
				return false, err
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return false, err
				}
				return false, fmt.Errorf("cannot resume %v", gvk)
			}
		},
		Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			clientset := clientset.FromUnversionedClient(client)
			if err != nil {
				return nil, err
			}
			return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
		},
		Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client)
		},
		Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
			if validate {
				client, err := clients.ClientForVersion(nil)
				if err != nil {
					return nil, err
				}
				dir := cacheDir
				if len(dir) > 0 {
					version, err := client.ServerVersion()
					if err != nil {
						return nil, err
					}
					dir = path.Join(cacheDir, version.String())
				}
				return &clientSwaggerSchema{
					c:        client,
					cacheDir: dir,
					mapper:   api.RESTMapper,
				}, nil
			}
			return validation.NullSchema{}, nil
		},
		SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
			version := gvk.GroupVersion()
			client, err := clients.ClientForVersion(&version)
			if err != nil {
				return nil, err
			}
			return client.Discovery().SwaggerSchema(version)
		},
		DefaultNamespace: func() (string, bool, error) {
			return clientConfig.Namespace()
		},
		Generators: func(cmdName string) map[string]kubectl.Generator {
			return DefaultGenerators(cmdName)
		},
		CanBeExposed: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot expose a %s", kind)
			}
			return nil
		},
		CanBeAutoscaled: func(kind unversioned.GroupKind) error {
			switch kind {
			case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
				// nothing to do here
			default:
				return fmt.Errorf("cannot autoscale a %v", kind)
			}
			return nil
		},
		AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) {
			client, err := clients.ClientForVersion(nil)
			if err != nil {
				return nil, err
			}
			switch t := object.(type) {
			case *api.ReplicationController:
				selector := labels.SelectorFromSet(t.Spec.Selector)
				pod, _, err := GetFirstPod(client, t.Namespace, selector)
				return pod, err
			case *extensions.Deployment:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				pod, _, err := GetFirstPod(client, t.Namespace, selector)
				return pod, err
			case *extensions.Job:
				selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
				if err != nil {
					return nil, fmt.Errorf("invalid label selector: %v", err)
				}
				pod, _, err := GetFirstPod(client, t.Namespace, selector)
				return pod, err
			case *api.Pod:
				return t, nil
			default:
				gvk, err := api.Scheme.ObjectKind(object)
				if err != nil {
					return nil, err
				}
				return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk)
			}
		},
		EditorEnvs: func() []string {
			return []string{"KUBE_EDITOR", "EDITOR"}
		},
	}
}
Example #28
0
// A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname
// which is checked for each replica.
func ServeImageOrFail(f *Framework, test string, image string) {
	name := "my-hostname-" + test + "-" + string(util.NewUUID())
	replicas := 2

	// Create a replication controller for a service
	// that serves its hostname.
	// The source for the Docker containter kubernetes/serve_hostname is
	// in contrib/for-demos/serve_hostname
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		By("Cleaning up the replication controller")
		rcReaper, err := kubectl.ReaperFor("ReplicationController", f.Client)
		if err != nil {
			Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
		if _, err = rcReaper.Stop(f.Namespace.Name, controller.Name, 0, nil); err != nil {
			Logf("Failed to stop replication controller %v: %v.", controller.Name, err)
		}
	}()

	// List the pods, making sure we observe all the replicas.
	listTimeout := time.Minute
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	pods, err := f.Client.Pods(f.Namespace.Name).List(label, fields.Everything())
	Expect(err).NotTo(HaveOccurred())
	t := time.Now()
	for {
		Logf("Controller %s: Found %d pods out of %d", name, len(pods.Items), replicas)
		if len(pods.Items) == replicas {
			break
		}
		if time.Since(t) > listTimeout {
			Failf("Controller %s: Gave up waiting for %d pods to come up after seeing only %d pods after %v seconds",
				name, replicas, len(pods.Items), time.Since(t).Seconds())
		}
		time.Sleep(5 * time.Second)
		pods, err = f.Client.Pods(f.Namespace.Name).List(label, fields.Everything())
		Expect(err).NotTo(HaveOccurred())
	}

	By("Ensuring each pod is running")

	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	for _, pod := range pods.Items {
		err = f.WaitForPodRunning(pod.Name)
		Expect(err).NotTo(HaveOccurred())
	}

	// Verify that something is listening.
	By("Trying to dial each unique pod")
	retryTimeout := 2 * time.Minute
	retryInterval := 5 * time.Second
	err = wait.Poll(retryInterval, retryTimeout, podResponseChecker{f.Client, f.Namespace.Name, label, name, true, pods}.checkAllResponses)
	if err != nil {
		Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
	}
}
Example #29
0
func testDaemonSets(f *Framework) {
	ns := f.Namespace.Name
	c := f.Client
	simpleDSName := "simple-daemon-set"
	image := "gcr.io/google_containers/serve_hostname:1.1"
	label := map[string]string{"name": simpleDSName}
	retryTimeout := 1 * time.Minute
	retryInterval := 5 * time.Second

	Logf("Creating simple daemon set %s", simpleDSName)
	_, err := c.DaemonSets(ns).Create(&experimental.DaemonSet{
		ObjectMeta: api.ObjectMeta{
			Name: simpleDSName,
		},
		Spec: experimental.DaemonSetSpec{
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: label,
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  simpleDSName,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	defer func() {
		Logf("Check that reaper kills all daemon pods for %s", simpleDSName)
		dsReaper, err := kubectl.ReaperFor("DaemonSet", c)
		Expect(err).NotTo(HaveOccurred())
		_, err = dsReaper.Stop(ns, simpleDSName, 0, nil)
		Expect(err).NotTo(HaveOccurred())
		err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, label))
		Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
	}()

	By("Check that daemon pods launch on every node of the cluster.")
	Expect(err).NotTo(HaveOccurred())
	err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")

	By("Stop a daemon pod, check that the daemon pod is revived.")
	podClient := c.Pods(ns)

	podList, err := podClient.List(labels.Set(label).AsSelector(), fields.Everything())
	Expect(err).NotTo(HaveOccurred())
	Expect(len(podList.Items)).To(BeNumerically(">", 0))
	pod := podList.Items[0]
	err = podClient.Delete(pod.Name, nil)
	Expect(err).NotTo(HaveOccurred())
	err = wait.Poll(retryInterval, retryTimeout, checkRunningOnAllNodes(f, label))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")

	complexDSName := "complex-daemon-set"
	complexLabel := map[string]string{"name": complexDSName}
	nodeSelector := map[string]string{"color": "blue"}
	Logf("Creating daemon with a node selector %s", complexDSName)
	_, err = c.DaemonSets(ns).Create(&experimental.DaemonSet{
		ObjectMeta: api.ObjectMeta{
			Name: complexDSName,
		},
		Spec: experimental.DaemonSetSpec{
			Selector: complexLabel,
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: complexLabel,
				},
				Spec: api.PodSpec{
					NodeSelector: nodeSelector,
					Containers: []api.Container{
						{
							Name:  complexDSName,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())

	By("Initially, daemon pods should not be running on any nodes.")
	err = wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")

	By("Change label of node, check that daemon pod is launched.")
	nodeClient := c.Nodes()
	nodeList, err := nodeClient.List(labels.Everything(), fields.Everything())
	Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
	nodeList.Items[0].Labels = nodeSelector
	var newNode *api.Node
	err = wait.Poll(updateRetryPeriod, updateRetryTimeout, func() (bool, error) {
		newNode, err = nodeClient.Update(&nodeList.Items[0])
		if err == nil {
			return true, err
		}
		if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == unversioned.StatusReasonConflict {
			Logf("failed to update node due to resource version conflict")
			return false, nil
		}
		return false, err
	})
	Expect(err).NotTo(HaveOccurred())
	Expect(len(newNode.Labels)).To(Equal(1))
	err = wait.Poll(retryInterval, retryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
	Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")

	By("remove the node selector and wait for daemons to be unscheduled")
	newNode, err = nodeClient.Get(newNode.Name)
	Expect(err).NotTo(HaveOccurred(), "error getting node")
	newNode.Labels = map[string]string{}
	err = wait.Poll(updateRetryPeriod, updateRetryTimeout, func() (bool, error) {
		newNode, err = nodeClient.Update(newNode)
		if err == nil {
			return true, err
		}
		if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == unversioned.StatusReasonConflict {
			Logf("failed to update node due to resource version conflict")
			return false, nil
		}
		return false, err
	})
	Expect(err).NotTo(HaveOccurred())
	Expect(wait.Poll(retryInterval, retryTimeout, checkRunningOnNoNodes(f, complexLabel))).
		NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")

	By("We should now be able to delete the daemon set.")
	Expect(c.DaemonSets(ns).Delete(complexDSName)).NotTo(HaveOccurred())
}
Example #30
0
		err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
		Expect(err).NotTo(HaveOccurred())
	})

	It("should delete a job", func() {
		By("Creating a job")
		job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
		job, err := createJob(f.Client, f.Namespace.Name, job)
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring active pods == parallelism")
		err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, parallelism)
		Expect(err).NotTo(HaveOccurred())

		By("delete a job")
		reaper, err := kubectl.ReaperFor(batch.Kind("Job"), clientsetadapter.FromUnversionedClient(f.Client))
		Expect(err).NotTo(HaveOccurred())
		timeout := 1 * time.Minute
		err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
		Expect(err).NotTo(HaveOccurred())

		By("Ensuring job was deleted")
		_, err = f.Client.Extensions().Jobs(f.Namespace.Name).Get(job.Name)
		Expect(err).To(HaveOccurred())
		Expect(errors.IsNotFound(err)).To(BeTrue())
	})

	It("should fail a job", func() {
		By("Creating a job")
		job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
		activeDeadlineSeconds := int64(10)