func validateLabels(a, b string) bool { sA, eA := labels.Parse(a) if eA != nil { return false } sB, eB := labels.Parse(b) if eB != nil { return false } return sA.String() == sB.String() }
func init() { Scheme.AddDefaultingFuncs( func(obj *ListOptions) { obj.LabelSelector = labels.Everything() obj.FieldSelector = fields.Everything() }, // TODO: see about moving this into v1/defaults.go func(obj *PodExecOptions) { obj.Stderr = true obj.Stdout = true }, func(obj *PodAttachOptions) { obj.Stderr = true obj.Stdout = true }, ) Scheme.AddConversionFuncs( func(in *util.Time, out *util.Time, s conversion.Scope) error { // Cannot deep copy these, because time.Time has unexported fields. *out = *in return nil }, func(in *string, out *labels.Selector, s conversion.Scope) error { selector, err := labels.Parse(*in) if err != nil { return err } *out = selector return nil }, func(in *string, out *fields.Selector, s conversion.Scope) error { selector, err := fields.ParseSelector(*in) if err != nil { return err } *out = selector return nil }, func(in *labels.Selector, out *string, s conversion.Scope) error { if *in == nil { return nil } *out = (*in).String() return nil }, func(in *fields.Selector, out *string, s conversion.Scope) error { if *in == nil { return nil } *out = (*in).String() return nil }, func(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error { // Cannot deep copy these, because inf.Dec has unexported fields. *out = *in.Copy() return nil }, ) }
func (self *realPodsApi) getNodeSelector(nodeList *nodes.NodeList) (labels.Selector, error) { nodeLabels := []string{} for host := range nodeList.Items { nodeLabels = append(nodeLabels, fmt.Sprintf("spec.nodeName==%s", host)) } glog.V(2).Infof("using labels %v to find pods", nodeLabels) return labels.Parse(strings.Join(nodeLabels, ",")) }
// Watch watches all Pods that have a build label, for deletion func (lw *buildPodDeleteLW) Watch(resourceVersion string) (watch.Interface, error) { // FIXME: since we cannot have OR on label name we'll just get builds with new label sel, err := labels.Parse(buildapi.BuildLabel) if err != nil { return nil, err } return lw.KubeClient.Pods(kapi.NamespaceAll).Watch(sel, fields.Everything(), resourceVersion) }
func main() { flag.Parse() glog.Info("Elasticsearch discovery") apiServer := *server if apiServer == "" { kubernetesService := os.Getenv("KUBERNETES_SERVICE_HOST") if kubernetesService == "" { glog.Fatalf("Please specify the Kubernetes server with --server") } apiServer = fmt.Sprintf("https://%s:%s", kubernetesService, os.Getenv("KUBERNETES_SERVICE_PORT")) } glog.Infof("Server: %s", apiServer) glog.Infof("Namespace: %q", *namespace) glog.Infof("selector: %q", *selector) config := client.Config{ Host: apiServer, BearerToken: *token, Insecure: true, } c, err := client.New(&config) if err != nil { glog.Fatalf("Failed to make client: %v", err) } l, err := labels.Parse(*selector) if err != nil { glog.Fatalf("Failed to parse selector %q: %v", *selector, err) } pods, err := c.Pods(*namespace).List(l, fields.Everything()) if err != nil { glog.Fatalf("Failed to list pods: %v", err) } glog.Infof("Elasticsearch pods in namespace %s with selector %q", *namespace, *selector) podIPs := []string{} for i := range pods.Items { p := &pods.Items[i] for attempt := 0; attempt < 10; attempt++ { glog.Infof("%d: %s PodIP: %s", i, p.Name, p.Status.PodIP) if p.Status.PodIP != "" { podIPs = append(podIPs, fmt.Sprintf(`"%s"`, p.Status.PodIP)) break } time.Sleep(1 * time.Second) p, err = c.Pods(*namespace).Get(p.Name) if err != nil { glog.Warningf("Failed to get pod %s: %v", p.Name, err) } } if p.Status.PodIP == "" { glog.Warningf("Failed to obtain PodIP for %s", p.Name) } } fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(podIPs, ", ")) }
func (c *k8sClient) activeServices(selector string) (*api.ServiceList, error) { l, err := labels.Parse(selector) if err != nil { glog.Fatalf("Failed to parse selector %q: %v", selector, err) } return c.Services(api.NamespaceAll).List(l) }
func main() { flag.Usage = usage flag.Parse() var ( cfg *kclient.Config err error ) if *local { cfg = &kclient.Config{Host: fmt.Sprintf("http://localhost:%d", *localPort)} } else { cfg, err = kclient.InClusterConfig() if err != nil { glog.Errorf("failed to load config: %v", err) flag.Usage() os.Exit(1) } } client, err = kclient.New(cfg) selector, err := labels.Parse(*userLabels) if err != nil { glog.Fatal(err) } tc, err := parseTimeCounts(*times, *counts) if err != nil { glog.Fatal(err) } if namespace == "" { glog.Fatal("POD_NAMESPACE is not set. Set to the namespace of the replication controller if running locally.") } scaler := Scaler{timeCounts: tc, selector: selector} if err != nil { glog.Fatal(err) } sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM) glog.Info("starting scaling") if err := scaler.Start(); err != nil { glog.Fatal(err) } <-sigChan glog.Info("stopping scaling") if err := scaler.Stop(); err != nil { glog.Fatal(err) } }
func listPods(client kclient.Interface) (*kapi.PodList, error) { // get builds with new label sel, err := labels.Parse(buildapi.BuildLabel) if err != nil { return nil, err } listNew, err := client.Pods(kapi.NamespaceAll).List(sel, fields.Everything()) if err != nil { return nil, err } // FIXME: get builds with old label - remove this when depracated label will be removed selOld, err := labels.Parse(buildapi.DeprecatedBuildLabel) if err != nil { return nil, err } listOld, err := client.Pods(kapi.NamespaceAll).List(selOld, fields.Everything()) if err != nil { return nil, err } listNew.Items = mergeWithoutDuplicates(listNew.Items, listOld.Items) return listNew, nil }
func (n *NodeOptions) Validate(checkNodeSelector bool) error { errList := []error{} if checkNodeSelector { if len(n.Selector) > 0 { if _, err := labels.Parse(n.Selector); err != nil { errList = append(errList, errors.New("--selector=<node_selector> must be a valid label selector")) } } if len(n.NodeNames) != 0 { errList = append(errList, errors.New("either specify --selector=<node_selector> or nodes but not both")) } } else if len(n.NodeNames) == 0 { errList = append(errList, errors.New("must provide --selector=<node_selector> or nodes")) } if len(n.PodSelector) > 0 { if _, err := labels.Parse(n.PodSelector); err != nil { errList = append(errList, errors.New("--pod-selector=<pod_selector> must be a valid label selector")) } } return kerrors.NewAggregate(errList) }
func discoverSeedsFromKubernetesMaster() []net.IP { var seeds []net.IP kubeMaster := os.ExpandEnv("${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT}") if len(os.Getenv("KUBERNETES_MASTER")) > 0 { kubeMaster = os.Getenv("KUBERNETES_MASTER") } kubeMaster = os.ExpandEnv(kubeMaster) if len(os.Getenv("KUBERNETES_SELECTOR")) > 0 && len(kubeMaster) > 0 { if !(strings.HasPrefix(kubeMaster, "http://") || strings.HasPrefix(kubeMaster, "https://")) { kubeMaster = "https://" + kubeMaster } insecure, _ := strconv.ParseBool(os.Getenv("KUBERNETES_INSECURE")) kubeClient := client.NewOrDie(&client.Config{ Host: os.ExpandEnv(kubeMaster), Insecure: len(os.Getenv("KUBERNETES_INSECURE")) > 0 && insecure, }) selector, err := labels.Parse(os.Getenv("KUBERNETES_SELECTOR")) if err != nil { log.Println(err) } else { namespace := os.Getenv("KUBERNETES_NAMESPACE") if len(namespace) == 0 { namespace = api.NamespaceDefault } podList, err := kubeClient.Pods(namespace).List(selector, fields.Everything()) if err != nil { log.Println(err) } else { currentHostname, err := os.Hostname() if err != nil { log.Println(err) } for _, pod := range podList.Items { if pod.Status.Phase == api.PodRunning && len(pod.Status.PodIP) > 0 && pod.Name != currentHostname { podIP := net.ParseIP(pod.Status.PodIP) if podIP != nil { seeds = append(seeds, podIP) } } } } } } return seeds }
// SelectorParam defines a selector that should be applied to the object types to load. // This will not affect files loaded from disk or URL. If the parameter is empty it is // a no-op - to select all resources invoke `b.Selector(labels.Everything)`. func (b *Builder) SelectorParam(s string) *Builder { selector, err := labels.Parse(s) if err != nil { b.errs = append(b.errs, fmt.Errorf("the provided selector %q is not valid: %v", s, err)) return b } if selector.Empty() { return b } if b.selectAll { b.errs = append(b.errs, fmt.Errorf("found non empty selector %q with previously set 'all' parameter. ", s)) return b } return b.Selector(selector) }
func (v *VolumeOptions) Validate(args []string) error { errList := []error{} if len(v.Selector) > 0 { if _, err := labels.Parse(v.Selector); err != nil { errList = append(errList, errors.New("--selector=<selector> must be a valid label selector")) } if v.All { errList = append(errList, errors.New("either specify --selector or --all but not both")) } } if len(v.Filenames) == 0 && len(args) < 1 { errList = append(errList, errors.New("one or more resources must be specified as <resource> <name> or <resource>/<name>")) } numOps := 0 if v.Add { numOps++ } if v.Remove { numOps++ } if v.List { numOps++ } switch { case numOps == 0: errList = append(errList, errors.New("must provide a volume operation. Valid values are --add, --remove and --list")) case numOps > 1: errList = append(errList, errors.New("you may only specify one operation at a time")) } if v.List && len(v.Output) > 0 { errList = append(errList, errors.New("--list and --output may not be specified together")) } err := v.AddOpts.Validate(v.Add) if err != nil { errList = append(errList, err) } // Removing all volumes for the resource type needs confirmation if v.Remove && len(v.Name) == 0 && !v.Confirm { errList = append(errList, errors.New("must provide --confirm for removing more than one volume")) } return kerrors.NewAggregate(errList) }
func parseSelectorQueryParams(query url.Values, version, apiResource string) (label labels.Selector, field fields.Selector, err error) { labelString := query.Get(api.LabelSelectorQueryParam(version)) label, err = labels.Parse(labelString) if err != nil { return nil, nil, errors.NewBadRequest(fmt.Sprintf("The 'labels' selector parameter (%s) could not be parsed: %v", labelString, err)) } convertToInternalVersionFunc := func(label, value string) (newLabel, newValue string, err error) { return api.Scheme.ConvertFieldLabel(version, apiResource, label, value) } fieldString := query.Get(api.FieldSelectorQueryParam(version)) field, err = fields.ParseAndTransformSelector(fieldString, convertToInternalVersionFunc) if err != nil { return nil, nil, errors.NewBadRequest(fmt.Sprintf("The 'fields' selector parameter (%s) could not be parsed: %v", fieldString, err)) } return label, field, nil }
func (l *ListPodsOptions) runListPods(node *kapi.Node, printer kubectl.ResourcePrinter) error { labelSelector, err := labels.Parse(l.Options.PodSelector) if err != nil { return err } fieldSelector := fields.Set{GetPodHostFieldLabel(node.TypeMeta.APIVersion): node.ObjectMeta.Name}.AsSelector() // Filter all pods that satisfies pod label selector and belongs to the given node pods, err := l.Options.Kclient.Pods(kapi.NamespaceAll).List(labelSelector, fieldSelector) if err != nil { return err } fmt.Fprintln(l.Options.Writer, "\nListing matched pods on node: ", node.ObjectMeta.Name, "\n") printer.PrintObj(pods, l.Options.Writer) return err }
// List returns an empty list but adds delete events to the store for all Builds that have been deleted but still have pods. func (lw *buildDeleteLW) List() (runtime.Object, error) { glog.V(5).Info("Checking for deleted builds") sel, _ := labels.Parse(buildapi.BuildLabel) podList, err := lw.KubeClient.Pods(kapi.NamespaceAll).List(sel, fields.Everything()) if err != nil { glog.V(4).Infof("Failed to find any pods due to error %v", err) return nil, err } for _, pod := range podList.Items { if len(pod.Labels[buildapi.BuildLabel]) == 0 { continue } glog.V(5).Infof("Found build pod %s/%s", pod.Namespace, pod.Name) build, err := lw.Client.Builds(pod.Namespace).Get(pod.Labels[buildapi.BuildLabel]) if err != nil && !kerrors.IsNotFound(err) { glog.V(4).Infof("Error getting build for pod %s/%s: %v", pod.Namespace, pod.Name, err) return nil, err } if err != nil && kerrors.IsNotFound(err) { build = nil } if build == nil { deletedBuild := &buildapi.Build{ ObjectMeta: kapi.ObjectMeta{ Name: pod.Labels[buildapi.BuildLabel], Namespace: pod.Namespace, }, } glog.V(4).Infof("No build found for build pod %s/%s, deleting pod", pod.Namespace, pod.Name) err := lw.store.Delete(deletedBuild) if err != nil { glog.V(4).Infof("Error queuing delete event: %v", err) } } else { glog.V(5).Infof("Found build %s/%s for pod %s", build.Namespace, build.Name, pod.Name) } } return &buildapi.BuildList{}, nil }
func (l *ListPodsOptions) RunListPods(node *kapi.Node) error { labelSelector, err := labels.Parse(l.Options.PodSelector) if err != nil { return err } fieldSelector := fields.Set{GetPodHostFieldLabel(node.TypeMeta.APIVersion): node.ObjectMeta.Name}.AsSelector() // Filter all pods that satisfies pod label selector and belongs to the given node pods, err := l.Options.Kclient.Pods(kapi.NamespaceAll).List(labelSelector, fieldSelector) if err != nil { return err } var printerWithHeaders, printerNoHeaders kubectl.ResourcePrinter if l.Options.CmdPrinterOutput { printerWithHeaders = l.Options.CmdPrinter printerNoHeaders = l.Options.CmdPrinter } else { printerWithHeaders, printerNoHeaders, err = l.Options.GetPrintersByResource("pod") if err != nil { return err } } firstPod := true for _, pod := range pods.Items { if firstPod { fmt.Fprintln(l.Options.Writer, "\nListing matched pods on node: ", node.ObjectMeta.Name, "\n") printerWithHeaders.PrintObj(&pod, l.Options.Writer) firstPod = false } else { printerNoHeaders.PrintObj(&pod, l.Options.Writer) } } return err }
func TestSelectionPredicate(t *testing.T) { table := map[string]struct { labelSelector, fieldSelector string labels labels.Set fields fields.Set err error shouldMatch bool matchSingleKey string }{ "A": { labelSelector: "name=foo", fieldSelector: "uid=12345", labels: labels.Set{"name": "foo"}, fields: fields.Set{"uid": "12345"}, shouldMatch: true, }, "B": { labelSelector: "name=foo", fieldSelector: "uid=12345", labels: labels.Set{"name": "foo"}, fields: fields.Set{}, shouldMatch: false, }, "C": { labelSelector: "name=foo", fieldSelector: "uid=12345", labels: labels.Set{}, fields: fields.Set{"uid": "12345"}, shouldMatch: false, }, "D": { fieldSelector: "metadata.name=12345", labels: labels.Set{}, fields: fields.Set{"metadata.name": "12345"}, shouldMatch: true, matchSingleKey: "12345", }, "error": { labelSelector: "name=foo", fieldSelector: "uid=12345", err: errors.New("maybe this is a 'wrong object type' error"), shouldMatch: false, }, } for name, item := range table { parsedLabel, err := labels.Parse(item.labelSelector) if err != nil { panic(err) } parsedField, err := fields.ParseSelector(item.fieldSelector) if err != nil { panic(err) } sp := &SelectionPredicate{ Label: parsedLabel, Field: parsedField, GetAttrs: func(runtime.Object) (label labels.Set, field fields.Set, err error) { return item.labels, item.fields, item.err }, } got, err := sp.Matches(&Ignored{}) if e, a := item.err, err; e != a { t.Errorf("%v: expected %v, got %v", name, e, a) continue } if e, a := item.shouldMatch, got; e != a { t.Errorf("%v: expected %v, got %v", name, e, a) } if key := item.matchSingleKey; key != "" { got, ok := sp.MatchesSingle() if !ok { t.Errorf("%v: expected single match", name) } if e, a := key, got; e != a { t.Errorf("%v: expected %v, got %v", name, e, a) } } } }
func (o DeployOptions) RunDeploy() error { config, err := o.osClient.DeploymentConfigs(o.namespace).Get(o.deploymentConfigName) if err != nil { return err } commandClient := &deployCommandClientImpl{ GetDeploymentFn: func(namespace, name string) (*kapi.ReplicationController, error) { return o.kubeClient.ReplicationControllers(namespace).Get(name) }, ListDeploymentsForConfigFn: func(namespace, configName string) (*kapi.ReplicationControllerList, error) { list, err := o.kubeClient.ReplicationControllers(namespace).List(deployutil.ConfigSelector(configName)) if err != nil { return nil, err } return list, nil }, UpdateDeploymentConfigFn: func(config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) { return o.osClient.DeploymentConfigs(config.Namespace).Update(config) }, UpdateDeploymentFn: func(deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) { return o.kubeClient.ReplicationControllers(deployment.Namespace).Update(deployment) }, ListDeployerPodsForFn: func(namespace, deploymentName string) (*kapi.PodList, error) { selector, err := labels.Parse(fmt.Sprintf("%s=%s", deployapi.DeployerPodForDeploymentLabel, deploymentName)) if err != nil { return nil, err } return o.kubeClient.Pods(namespace).List(selector, fields.Everything()) }, DeletePodFn: func(pod *kapi.Pod) error { return o.kubeClient.Pods(pod.Namespace).Delete(pod.Name, nil) }, } switch { case o.deployLatest: c := &deployLatestCommand{client: commandClient} err = c.deploy(config, o.out) case o.retryDeploy: c := &retryDeploymentCommand{client: commandClient} err = c.retry(config, o.out) case o.cancelDeploy: c := &cancelDeploymentCommand{client: commandClient} err = c.cancel(config, o.out) case o.enableTriggers: t := &triggerEnabler{ updateConfig: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) { return o.osClient.DeploymentConfigs(namespace).Update(config) }, } err = t.enableTriggers(config, o.out) default: describer := describe.NewLatestDeploymentsDescriber(o.osClient, o.kubeClient, -1) desc, err := describer.Describe(config.Namespace, config.Name) if err != nil { return err } fmt.Fprint(o.out, desc) } return err }
func TestListPodListSelection(t *testing.T) { fakeEtcdClient, etcdStorage := newEtcdStorage(t) ctx := api.NewDefaultContext() storage := NewStorage(etcdStorage, nil).Pod rootKey := etcdtest.AddPrefix("pods/default") key := etcdtest.AddPrefix("pods/default/zot") fakeEtcdClient.Data[rootKey] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Nodes: []*etcd.Node{ {Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo"}, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "bar"}, Spec: api.PodSpec{NodeName: "barhost"}, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "baz"}, Status: api.PodStatus{Phase: api.PodFailed}, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "qux", Labels: map[string]string{"label": "qux"}, }, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "zot"}, })}, }, }, }, } fakeEtcdClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "zot"}, }), }, }, } table := []struct { label, field string expectedIDs util.StringSet }{ { expectedIDs: util.NewStringSet("foo", "bar", "baz", "qux", "zot"), }, { field: "metadata.name=zot", expectedIDs: util.NewStringSet("zot"), }, { label: "label=qux", expectedIDs: util.NewStringSet("qux"), }, { field: "status.phase=Failed", expectedIDs: util.NewStringSet("baz"), }, { field: "spec.nodeName=barhost", expectedIDs: util.NewStringSet("bar"), }, { field: "spec.nodeName=", expectedIDs: util.NewStringSet("foo", "baz", "qux", "zot"), }, { field: "spec.nodeName!=", expectedIDs: util.NewStringSet("bar"), }, } for index, item := range table { label, err := labels.Parse(item.label) if err != nil { t.Errorf("unexpected error: %v", err) continue } field, err := fields.ParseSelector(item.field) if err != nil { t.Errorf("unexpected error: %v", err) continue } podsObj, err := storage.List(ctx, label, field) if err != nil { t.Errorf("unexpected error: %v", err) } pods := podsObj.(*api.PodList) set := util.NewStringSet() for i := range pods.Items { set.Insert(pods.Items[i].Name) } if e, a := len(item.expectedIDs), len(set); e != a { t.Errorf("%v: Expected %v, got %v", index, item.expectedIDs, set) } /*for _, pod := range pods.Items { if !item.expectedIDs.Has(pod.Name) { t.Errorf("%v: Unexpected pod %v", index, pod.Name) } t.Logf("%v: Got pod Name: %v", index, pod.Name) }*/ } }
// AnyDeployerPodSelector returns a label Selector which can be used to find // all deployer pods across all deployments, including hook and custom // deployer pods. func AnyDeployerPodSelector() labels.Selector { sel, _ := labels.Parse(deployapi.DeployerPodForDeploymentLabel) return sel }
// List lists all Pods that have a build label. func (lw *podLW) List() (runtime.Object, error) { sel, _ := labels.Parse(buildapi.BuildLabel) return lw.client.Pods(kapi.NamespaceAll).List(sel, fields.Everything()) }
// Create creates a DeploymentController. func (factory *DeploymentControllerFactory) Create() controller.RunnableController { deploymentLW := &deployutil.ListWatcherImpl{ // TODO: Investigate specifying annotation field selectors to fetch only 'deployments' // Currently field selectors are not supported for replication controllers ListFunc: func() (runtime.Object, error) { return factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).List(labels.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion) }, } deploymentQueue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) cache.NewReflector(deploymentLW, &kapi.ReplicationController{}, deploymentQueue, 2*time.Minute).Run() eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events("")) deployController := &DeploymentController{ serviceAccount: factory.ServiceAccount, deploymentClient: &deploymentClientImpl{ getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) { return factory.KubeClient.ReplicationControllers(namespace).Get(name) }, updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) { return factory.KubeClient.ReplicationControllers(namespace).Update(deployment) }, }, podClient: &podClientImpl{ getPodFunc: func(namespace, name string) (*kapi.Pod, error) { return factory.KubeClient.Pods(namespace).Get(name) }, createPodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) { return factory.KubeClient.Pods(namespace).Create(pod) }, deletePodFunc: func(namespace, name string) error { return factory.KubeClient.Pods(namespace).Delete(name, nil) }, updatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) { return factory.KubeClient.Pods(namespace).Update(pod) }, // Find deployer pods using the label they should all have which // correlates them to the named deployment. getDeployerPodsForFunc: func(namespace, name string) ([]kapi.Pod, error) { labelSel, err := labels.Parse(fmt.Sprintf("%s=%s", deployapi.DeployerPodForDeploymentLabel, name)) if err != nil { return []kapi.Pod{}, err } pods, err := factory.KubeClient.Pods(namespace).List(labelSel, fields.Everything()) if err != nil { return []kapi.Pod{}, err } return pods.Items, nil }, }, makeContainer: func(strategy *deployapi.DeploymentStrategy) (*kapi.Container, error) { return factory.makeContainer(strategy) }, decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) { return deployutil.DecodeDeploymentConfig(deployment, factory.Codec) }, recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}), } return &controller.RetryController{ Queue: deploymentQueue, RetryManager: controller.NewQueueRetryManager( deploymentQueue, cache.MetaNamespaceKeyFunc, func(obj interface{}, err error, retries controller.Retry) bool { if _, isFatal := err.(fatalError); isFatal { kutil.HandleError(err) return false } if retries.Count > 1 { return false } return true }, kutil.NewTokenBucketRateLimiter(1, 10), ), Handle: func(obj interface{}) error { deployment := obj.(*kapi.ReplicationController) return deployController.Handle(deployment) }, } }
// Watch watches all Pods that have a build label, for deletion func (lw *buildPodDeleteLW) Watch(resourceVersion string) (watch.Interface, error) { sel, _ := labels.Parse(buildapi.BuildLabel) return lw.KubeClient.Pods(kapi.NamespaceAll).Watch(sel, fields.Everything(), resourceVersion) }
// FuzzerFor can randomly populate api objects that are destined for version. func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer { f := fuzz.New().NilChance(.5).NumElements(1, 1) if src != nil { f.RandSource(src) } f.Funcs( func(j *runtime.PluginBase, c fuzz.Continue) { // Do nothing; this struct has only a Kind field and it must stay blank in memory. }, func(j *runtime.TypeMeta, c fuzz.Continue) { // We have to customize the randomization of TypeMetas because their // APIVersion and Kind must remain blank in memory. j.APIVersion = "" j.Kind = "" }, func(j *api.TypeMeta, c fuzz.Continue) { // We have to customize the randomization of TypeMetas because their // APIVersion and Kind must remain blank in memory. j.APIVersion = "" j.Kind = "" }, func(j *api.ObjectMeta, c fuzz.Continue) { j.Name = c.RandString() j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10) j.SelfLink = c.RandString() j.UID = types.UID(c.RandString()) j.GenerateName = c.RandString() var sec, nsec int64 c.Fuzz(&sec) c.Fuzz(&nsec) j.CreationTimestamp = util.Unix(sec, nsec).Rfc3339Copy() }, func(j *api.ObjectReference, c fuzz.Continue) { // We have to customize the randomization of TypeMetas because their // APIVersion and Kind must remain blank in memory. j.APIVersion = c.RandString() j.Kind = c.RandString() j.Namespace = c.RandString() j.Name = c.RandString() j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10) j.FieldPath = c.RandString() }, func(j *api.ListMeta, c fuzz.Continue) { j.ResourceVersion = strconv.FormatUint(c.RandUint64(), 10) j.SelfLink = c.RandString() }, func(j *api.ListOptions, c fuzz.Continue) { // TODO: add some parsing j.LabelSelector, _ = labels.Parse("a=b") j.FieldSelector, _ = fields.ParseSelector("a=b") }, func(j *api.PodPhase, c fuzz.Continue) { statuses := []api.PodPhase{api.PodPending, api.PodRunning, api.PodFailed, api.PodUnknown} *j = statuses[c.Rand.Intn(len(statuses))] }, func(j *api.PodTemplateSpec, c fuzz.Continue) { // TODO: v1beta1/2 can't round trip a nil template correctly, fix by having v1beta1/2 // conversion compare converted object to nil via DeepEqual j.ObjectMeta = api.ObjectMeta{} c.Fuzz(&j.ObjectMeta) j.ObjectMeta = api.ObjectMeta{Labels: j.ObjectMeta.Labels} j.Spec = api.PodSpec{} c.Fuzz(&j.Spec) }, func(j *api.Binding, c fuzz.Continue) { c.Fuzz(&j.ObjectMeta) j.Target.Name = c.RandString() }, func(j *api.ReplicationControllerSpec, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again //j.TemplateRef = nil // this is required for round trip }, func(j *api.ReplicationControllerStatus, c fuzz.Continue) { // only replicas round trips j.Replicas = int(c.RandUint64()) }, func(j *api.List, c fuzz.Continue) { c.FuzzNoCustom(j) // fuzz self without calling this function again // TODO: uncomment when round trip starts from a versioned object if false { //j.Items == nil { j.Items = []runtime.Object{} } }, func(j *runtime.Object, c fuzz.Continue) { // TODO: uncomment when round trip starts from a versioned object if true { //c.RandBool() { *j = &runtime.Unknown{ TypeMeta: runtime.TypeMeta{Kind: "Something", APIVersion: "unknown"}, RawJSON: []byte(`{"apiVersion":"unknown","kind":"Something","someKey":"someValue"}`), } } else { types := []runtime.Object{&api.Pod{}, &api.ReplicationController{}} t := types[c.Rand.Intn(len(types))] c.Fuzz(t) *j = t } }, func(pb map[docker.Port][]docker.PortBinding, c fuzz.Continue) { // This is necessary because keys with nil values get omitted. // TODO: Is this a bug? pb[docker.Port(c.RandString())] = []docker.PortBinding{ {c.RandString(), c.RandString()}, {c.RandString(), c.RandString()}, } }, func(pm map[string]docker.PortMapping, c fuzz.Continue) { // This is necessary because keys with nil values get omitted. // TODO: Is this a bug? pm[c.RandString()] = docker.PortMapping{ c.RandString(): c.RandString(), } }, func(q *resource.Quantity, c fuzz.Continue) { // Real Quantity fuzz testing is done elsewhere; // this limited subset of functionality survives // round-tripping to v1beta1/2. q.Amount = &inf.Dec{} q.Format = resource.DecimalExponent //q.Amount.SetScale(inf.Scale(-c.Intn(12))) q.Amount.SetUnscaled(c.Int63n(1000)) }, func(p *api.PullPolicy, c fuzz.Continue) { policies := []api.PullPolicy{api.PullAlways, api.PullNever, api.PullIfNotPresent} *p = policies[c.Rand.Intn(len(policies))] }, func(rp *api.RestartPolicy, c fuzz.Continue) { policies := []api.RestartPolicy{api.RestartPolicyAlways, api.RestartPolicyNever, api.RestartPolicyOnFailure} *rp = policies[c.Rand.Intn(len(policies))] }, func(vs *api.VolumeSource, c fuzz.Continue) { // Exactly one of the fields must be set. v := reflect.ValueOf(vs).Elem() i := int(c.RandUint64() % uint64(v.NumField())) v = v.Field(i).Addr() // Use a new fuzzer which cannot populate nil to ensure one field will be set. fuzz.New().NilChance(0).NumElements(1, 1).Fuzz(v.Interface()) }, func(d *api.DNSPolicy, c fuzz.Continue) { policies := []api.DNSPolicy{api.DNSClusterFirst, api.DNSDefault} *d = policies[c.Rand.Intn(len(policies))] }, func(p *api.Protocol, c fuzz.Continue) { protocols := []api.Protocol{api.ProtocolTCP, api.ProtocolUDP} *p = protocols[c.Rand.Intn(len(protocols))] }, func(p *api.ServiceAffinity, c fuzz.Continue) { types := []api.ServiceAffinity{api.ServiceAffinityClientIP, api.ServiceAffinityNone} *p = types[c.Rand.Intn(len(types))] }, func(p *api.ServiceType, c fuzz.Continue) { types := []api.ServiceType{api.ServiceTypeClusterIP, api.ServiceTypeNodePort, api.ServiceTypeLoadBalancer} *p = types[c.Rand.Intn(len(types))] }, func(ct *api.Container, c fuzz.Continue) { c.FuzzNoCustom(ct) // fuzz self without calling this function again ct.TerminationMessagePath = "/" + ct.TerminationMessagePath // Must be non-empty }, func(ev *api.EnvVar, c fuzz.Continue) { ev.Name = c.RandString() if c.RandBool() { ev.Value = c.RandString() } else { ev.ValueFrom = &api.EnvVarSource{} ev.ValueFrom.FieldRef = &api.ObjectFieldSelector{} versions := []string{"v1beta1", "v1beta2", "v1beta3"} ev.ValueFrom.FieldRef.APIVersion = versions[c.Rand.Intn(len(versions))] ev.ValueFrom.FieldRef.FieldPath = c.RandString() } }, func(sc *api.SecurityContext, c fuzz.Continue) { c.FuzzNoCustom(sc) // fuzz self without calling this function again priv := c.RandBool() sc.Privileged = &priv sc.Capabilities = &api.Capabilities{ Add: make([]api.Capability, 0), Drop: make([]api.Capability, 0), } c.Fuzz(&sc.Capabilities.Add) c.Fuzz(&sc.Capabilities.Drop) }, func(e *api.Event, c fuzz.Continue) { c.FuzzNoCustom(e) // fuzz self without calling this function again // Fix event count to 1, otherwise, if a v1beta1 or v1beta2 event has a count set arbitrarily, it's count is ignored if e.FirstTimestamp.IsZero() { e.Count = 1 } else { c.Fuzz(&e.Count) } }, func(s *api.Secret, c fuzz.Continue) { c.FuzzNoCustom(s) // fuzz self without calling this function again s.Type = api.SecretTypeOpaque }, func(pv *api.PersistentVolume, c fuzz.Continue) { c.FuzzNoCustom(pv) // fuzz self without calling this function again types := []api.PersistentVolumePhase{api.VolumeAvailable, api.VolumePending, api.VolumeBound, api.VolumeReleased, api.VolumeFailed} pv.Status.Phase = types[c.Rand.Intn(len(types))] pv.Status.Message = c.RandString() reclamationPolicies := []api.PersistentVolumeReclaimPolicy{api.PersistentVolumeReclaimRecycle, api.PersistentVolumeReclaimRetain} pv.Spec.PersistentVolumeReclaimPolicy = reclamationPolicies[c.Rand.Intn(len(reclamationPolicies))] }, func(pvc *api.PersistentVolumeClaim, c fuzz.Continue) { c.FuzzNoCustom(pvc) // fuzz self without calling this function again types := []api.PersistentVolumeClaimPhase{api.ClaimBound, api.ClaimPending} pvc.Status.Phase = types[c.Rand.Intn(len(types))] }, func(s *api.NamespaceSpec, c fuzz.Continue) { s.Finalizers = []api.FinalizerName{api.FinalizerKubernetes} }, func(s *api.NamespaceStatus, c fuzz.Continue) { s.Phase = api.NamespaceActive }, func(http *api.HTTPGetAction, c fuzz.Continue) { c.FuzzNoCustom(http) // fuzz self without calling this function again http.Path = "/" + http.Path // can't be blank http.Scheme = "x" + http.Scheme // can't be blank }, func(ss *api.ServiceSpec, c fuzz.Continue) { c.FuzzNoCustom(ss) // fuzz self without calling this function again if len(ss.Ports) == 0 { // There must be at least 1 port. ss.Ports = append(ss.Ports, api.ServicePort{}) c.Fuzz(&ss.Ports[0]) } for i := range ss.Ports { switch ss.Ports[i].TargetPort.Kind { case util.IntstrInt: ss.Ports[i].TargetPort.IntVal = 1 + ss.Ports[i].TargetPort.IntVal%65535 // non-zero case util.IntstrString: ss.Ports[i].TargetPort.StrVal = "x" + ss.Ports[i].TargetPort.StrVal // non-empty } } }, func(n *api.Node, c fuzz.Continue) { c.FuzzNoCustom(n) n.Spec.ExternalID = "external" }, ) return f }
func (e *EvacuateOptions) RunEvacuate(node *kapi.Node) error { if e.DryRun { listpodsOp := ListPodsOptions{Options: e.Options} return listpodsOp.Run() } // We do *not* automatically mark the node unschedulable to perform evacuation. // Rationale: If we unschedule the node and later the operation is unsuccessful (stopped by user, network error, etc.), // we may not be able to recover in some cases to mark the node back to schedulable. To avoid these cases, we recommend // user to explicitly set the node to schedulable/unschedulable. if !node.Spec.Unschedulable { return fmt.Errorf("Node '%s' must be unschedulable to perform evacuation.\nYou can mark the node unschedulable with 'openshift admin manage-node %s --schedulable=false'", node.ObjectMeta.Name, node.ObjectMeta.Name) } labelSelector, err := labels.Parse(e.Options.PodSelector) if err != nil { return err } fieldSelector := fields.Set{GetPodHostFieldLabel(node.TypeMeta.APIVersion): node.ObjectMeta.Name}.AsSelector() // Filter all pods that satisfies pod label selector and belongs to the given node pods, err := e.Options.Kclient.Pods(kapi.NamespaceAll).List(labelSelector, fieldSelector) if err != nil { return err } rcs, err := e.Options.Kclient.ReplicationControllers(kapi.NamespaceAll).List(labels.Everything()) if err != nil { return err } printerWithHeaders, printerNoHeaders, err := e.Options.GetPrintersByResource("pod") if err != nil { return err } errList := []error{} firstPod := true numPodsWithNoRC := 0 // grace = 0 implies delete the pod immediately grace := int64(0) deleteOptions := &kapi.DeleteOptions{GracePeriodSeconds: &grace} for _, pod := range pods.Items { foundrc := false for _, rc := range rcs.Items { selector := labels.SelectorFromSet(rc.Spec.Selector) if selector.Matches(labels.Set(pod.Labels)) { foundrc = true break } } if firstPod { fmt.Fprintln(e.Options.Writer, "\nMigrating these pods on node: ", node.ObjectMeta.Name, "\n") firstPod = false printerWithHeaders.PrintObj(&pod, e.Options.Writer) } else { printerNoHeaders.PrintObj(&pod, e.Options.Writer) } if foundrc || e.Force { if err := e.Options.Kclient.Pods(pod.Namespace).Delete(pod.Name, deleteOptions); err != nil { glog.Errorf("Unable to delete a pod: %+v, error: %v", pod, err) errList = append(errList, err) continue } } else { // Pods without replication controller and no --force option numPodsWithNoRC++ } } if numPodsWithNoRC > 0 { err := fmt.Errorf(`Unable to evacuate some pods because they are not backed by replication controller. Suggested options: - You can list bare pods in json/yaml format using '--list-pods -o json|yaml' - Force deletion of bare pods with --force option to --evacuate - Optionally recreate these bare pods by massaging the json/yaml output from above list pods `) errList = append(errList, err) } if len(errList) != 0 { return kerrors.NewAggregate(errList) } return nil }
func TestListNamespaceListSelection(t *testing.T) { fakeEtcdClient, etcdStorage := newEtcdStorage(t) key := etcdtest.AddPrefix("/namespaces") fakeEtcdClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Nodes: []*etcd.Node{ {Value: runtime.EncodeOrDie(latest.Codec, &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "foo"}, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "bar"}, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "baz"}, Status: api.NamespaceStatus{Phase: api.NamespaceTerminating}, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: "qux", Labels: map[string]string{"label": "qux"}, }, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "zot"}, })}, }, }, }, } storage, _, _ := NewStorage(etcdStorage) ctx := api.NewContext() table := []struct { label, field string expectedIDs util.StringSet }{ { expectedIDs: util.NewStringSet("foo", "bar", "baz", "qux", "zot"), }, { field: "name=zot", expectedIDs: util.NewStringSet("zot"), }, { label: "label=qux", expectedIDs: util.NewStringSet("qux"), }, { field: "status.phase=Terminating", expectedIDs: util.NewStringSet("baz"), }, } for index, item := range table { label, err := labels.Parse(item.label) if err != nil { t.Errorf("unexpected error: %v", err) continue } field, err := fields.ParseSelector(item.field) if err != nil { t.Errorf("unexpected error: %v", err) continue } namespacesObj, err := storage.List(ctx, label, field) if err != nil { t.Errorf("unexpected error: %v", err) } namespaces := namespacesObj.(*api.NamespaceList) set := util.NewStringSet() for i := range namespaces.Items { set.Insert(namespaces.Items[i].Name) } if e, a := len(item.expectedIDs), len(set); e != a { t.Errorf("%v: Expected %v, got %v", index, item.expectedIDs, set) } } }
func init() { Scheme.AddDefaultingFuncs( func(obj *ListOptions) { obj.LabelSelector = labels.Everything() obj.FieldSelector = fields.Everything() }, func(obj *PodExecOptions) { obj.Stderr = true obj.Stdout = true }, ) Scheme.AddConversionFuncs( func(in *util.Time, out *util.Time, s conversion.Scope) error { // Cannot deep copy these, because time.Time has unexported fields. *out = *in return nil }, func(in *string, out *labels.Selector, s conversion.Scope) error { selector, err := labels.Parse(*in) if err != nil { return err } *out = selector return nil }, func(in *string, out *fields.Selector, s conversion.Scope) error { selector, err := fields.ParseSelector(*in) if err != nil { return err } *out = selector return nil }, func(in *labels.Selector, out *string, s conversion.Scope) error { if *in == nil { return nil } *out = (*in).String() return nil }, func(in *fields.Selector, out *string, s conversion.Scope) error { if *in == nil { return nil } *out = (*in).String() return nil }, func(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error { // Cannot deep copy these, because inf.Dec has unexported fields. *out = *in.Copy() return nil }, // Convert ContainerManifest to Pod func(in *ContainerManifest, out *Pod, s conversion.Scope) error { out.Spec.Containers = in.Containers out.Spec.Volumes = in.Volumes out.Spec.RestartPolicy = in.RestartPolicy out.Spec.DNSPolicy = in.DNSPolicy out.Name = in.ID out.UID = in.UUID if in.ID != "" { out.SelfLink = "/api/v1beta1/pods/" + in.ID } return nil }, func(in *Pod, out *ContainerManifest, s conversion.Scope) error { out.Containers = in.Spec.Containers out.Volumes = in.Spec.Volumes out.RestartPolicy = in.Spec.RestartPolicy out.DNSPolicy = in.Spec.DNSPolicy out.Version = "v1beta2" out.ID = in.Name out.UUID = in.UID return nil }, // ContainerManifestList func(in *ContainerManifestList, out *PodList, s conversion.Scope) error { if err := s.Convert(&in.Items, &out.Items, 0); err != nil { return err } for i := range out.Items { item := &out.Items[i] item.ResourceVersion = in.ResourceVersion } return nil }, func(in *PodList, out *ContainerManifestList, s conversion.Scope) error { if err := s.Convert(&in.Items, &out.Items, 0); err != nil { return err } out.ResourceVersion = in.ResourceVersion return nil }, // Conversion between Manifest and PodSpec func(in *PodSpec, out *ContainerManifest, s conversion.Scope) error { if err := s.Convert(&in.Volumes, &out.Volumes, 0); err != nil { return err } if err := s.Convert(&in.Containers, &out.Containers, 0); err != nil { return err } if err := s.Convert(&in.RestartPolicy, &out.RestartPolicy, 0); err != nil { return err } if in.TerminationGracePeriodSeconds != nil { out.TerminationGracePeriodSeconds = new(int64) *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds } if in.ActiveDeadlineSeconds != nil { out.ActiveDeadlineSeconds = new(int64) *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds } out.DNSPolicy = in.DNSPolicy out.Version = "v1beta2" return nil }, func(in *ContainerManifest, out *PodSpec, s conversion.Scope) error { if err := s.Convert(&in.Volumes, &out.Volumes, 0); err != nil { return err } if err := s.Convert(&in.Containers, &out.Containers, 0); err != nil { return err } if err := s.Convert(&in.RestartPolicy, &out.RestartPolicy, 0); err != nil { return err } if in.TerminationGracePeriodSeconds != nil { out.TerminationGracePeriodSeconds = new(int64) *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds } if in.ActiveDeadlineSeconds != nil { out.ActiveDeadlineSeconds = new(int64) *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds } out.DNSPolicy = in.DNSPolicy return nil }, ) }
func TestListResourceQuotaListSelection(t *testing.T) { fakeEtcdClient, etcdStorage := newEtcdStorage(t) storage, _ := NewStorage(etcdStorage) ctx := api.NewDefaultContext() key := storage.Etcd.KeyRootFunc(ctx) key = etcdtest.AddPrefix(key) fakeEtcdClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Nodes: []*etcd.Node{ {Value: runtime.EncodeOrDie(latest.Codec, &api.ResourceQuota{ ObjectMeta: api.ObjectMeta{Name: "foo"}, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.ResourceQuota{ ObjectMeta: api.ObjectMeta{ Name: "qux", Labels: map[string]string{"label": "qux"}, }, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.ResourceQuota{ ObjectMeta: api.ObjectMeta{Name: "zot"}, })}, }, }, }, } table := []struct { label, field string expectedIDs util.StringSet }{ { expectedIDs: util.NewStringSet("foo", "qux", "zot"), }, { field: "name=zot", expectedIDs: util.NewStringSet("zot"), }, { label: "label=qux", expectedIDs: util.NewStringSet("qux"), }, } for index, item := range table { label, err := labels.Parse(item.label) if err != nil { t.Errorf("unexpected error: %v", err) continue } field, err := fields.ParseSelector(item.field) if err != nil { t.Errorf("unexpected error: %v", err) continue } resourcequotasObj, err := storage.List(ctx, label, field) if err != nil { t.Errorf("unexpected error: %v", err) } resourcequotas := resourcequotasObj.(*api.ResourceQuotaList) set := util.NewStringSet() for i := range resourcequotas.Items { set.Insert(resourcequotas.Items[i].Name) } if e, a := len(item.expectedIDs), len(set); e != a { t.Errorf("%v: Expected %v, got %v", index, item.expectedIDs, set) } } }
func TestListPodListSelection(t *testing.T) { fakeEtcdClient, helper := newHelper(t) fakeEtcdClient.Data["/registry/pods/default"] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Nodes: []*etcd.Node{ {Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo"}, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "bar"}, Spec: api.PodSpec{Host: "barhost"}, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "baz"}, Status: api.PodStatus{Phase: api.PodFailed}, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "qux", Labels: map[string]string{"label": "qux"}, }, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "zot"}, })}, }, }, }, } storage, _, _ := NewREST(helper) cache := &fakeCache{statusToReturn: &api.PodStatus{Phase: api.PodRunning}} storage = storage.WithPodStatus(cache) ctx := api.NewDefaultContext() table := []struct { label, field string expectedIDs util.StringSet }{ { expectedIDs: util.NewStringSet("foo", "bar", "baz", "qux", "zot"), }, { field: "name=zot", expectedIDs: util.NewStringSet("zot"), }, { label: "label=qux", expectedIDs: util.NewStringSet("qux"), }, { field: "status.phase=Failed", expectedIDs: util.NewStringSet("baz"), }, { field: "spec.host=barhost", expectedIDs: util.NewStringSet("bar"), }, { field: "spec.host=", expectedIDs: util.NewStringSet("foo", "baz", "qux", "zot"), }, { field: "spec.host!=", expectedIDs: util.NewStringSet("bar"), }, } for index, item := range table { label, err := labels.Parse(item.label) if err != nil { t.Errorf("unexpected error: %v", err) continue } field, err := fields.ParseSelector(item.field) if err != nil { t.Errorf("unexpected error: %v", err) continue } podsObj, err := storage.List(ctx, label, field) if err != nil { t.Errorf("unexpected error: %v", err) } pods := podsObj.(*api.PodList) set := util.NewStringSet() for i := range pods.Items { set.Insert(pods.Items[i].Name) } if e, a := len(item.expectedIDs), len(set); e != a { t.Errorf("%v: Expected %v, got %v", index, item.expectedIDs, set) } /*for _, pod := range pods.Items { if !item.expectedIDs.Has(pod.Name) { t.Errorf("%v: Unexpected pod %v", index, pod.Name) } t.Logf("%v: Got pod Name: %v", index, pod.Name) }*/ } }