// Complete converts string representations of field and label selectors to their parsed equivalent, or // returns an error. func (o *RouterSelection) Complete() error { if len(o.HostnameTemplate) == 0 && o.OverrideHostname { return fmt.Errorf("--override-hostname requires that --hostname-template be specified") } if len(o.LabelSelector) > 0 { s, err := labels.Parse(o.LabelSelector) if err != nil { return fmt.Errorf("label selector is not valid: %v", err) } o.Labels = s } else { o.Labels = labels.Everything() } if len(o.FieldSelector) > 0 { s, err := fields.ParseSelector(o.FieldSelector) if err != nil { return fmt.Errorf("field selector is not valid: %v", err) } o.Fields = s } else { o.Fields = fields.Everything() } if len(o.ProjectLabelSelector) > 0 { if len(o.Namespace) > 0 { return fmt.Errorf("only one of --project-labels and --namespace may be used") } if len(o.NamespaceLabelSelector) > 0 { return fmt.Errorf("only one of --namespace-labels and --project-labels may be used") } if o.ProjectLabelSelector == "*" { o.ProjectLabels = labels.Everything() } else { s, err := labels.Parse(o.ProjectLabelSelector) if err != nil { return fmt.Errorf("--project-labels selector is not valid: %v", err) } o.ProjectLabels = s } } if len(o.NamespaceLabelSelector) > 0 { if len(o.Namespace) > 0 { return fmt.Errorf("only one of --namespace-labels and --namespace may be used") } s, err := labels.Parse(o.NamespaceLabelSelector) if err != nil { return fmt.Errorf("--namespace-labels selector is not valid: %v", err) } o.NamespaceLabels = s } return nil }
func validateLabels(a, b string) bool { sA, eA := labels.Parse(a) if eA != nil { return false } sB, eB := labels.Parse(b) if eB != nil { return false } return sA.String() == sB.String() }
// Complete converts string representations of field and label selectors to their parsed equivalent, or // returns an error. func (o *RouterSelection) Complete() error { if len(o.LabelSelector) > 0 { s, err := labels.Parse(o.LabelSelector) if err != nil { return fmt.Errorf("label selector is not valid: %v", err) } o.Labels = s } else { o.Labels = labels.Everything() } if len(o.FieldSelector) > 0 { s, err := fields.ParseSelector(o.FieldSelector) if err != nil { return fmt.Errorf("field selector is not valid: %v", err) } o.Fields = s } else { o.Fields = fields.Everything() } if len(o.ProjectLabelSelector) > 0 { if len(o.Namespace) > 0 { return fmt.Errorf("only one of --project-labels and --namespace may be used") } if len(o.NamespaceLabelSelector) > 0 { return fmt.Errorf("only one of --namespace-labels and --project-labels may be used") } if o.ProjectLabelSelector == "*" { o.ProjectLabels = labels.Everything() } else { s, err := labels.Parse(o.ProjectLabelSelector) if err != nil { return fmt.Errorf("--project-labels selector is not valid: %v", err) } o.ProjectLabels = s } } if len(o.NamespaceLabelSelector) > 0 { if len(o.Namespace) > 0 { return fmt.Errorf("only one of --namespace-labels and --namespace may be used") } s, err := labels.Parse(o.NamespaceLabelSelector) if err != nil { return fmt.Errorf("--namespace-labels selector is not valid: %v", err) } o.NamespaceLabels = s } return nil }
// UnmarshalJSON implements the json.Unmarshaler interface for deserializing the JSON // representation of an DS. Lets json interface know how to unmarshal a DaemonSet // Can be called using json.Unmarshal func (ds *DaemonSet) UnmarshalJSON(b []byte) error { var rawDS RawDaemonSet if err := json.Unmarshal(b, &rawDS); err != nil { return err } var podManifest manifest.Manifest if rawDS.Manifest != "" { var err error podManifest, err = manifest.FromBytes([]byte(rawDS.Manifest)) if err != nil { return err } } nodeSelector, err := labels.Parse(rawDS.NodeSelector) if err != nil { return err } *ds = DaemonSet{ ID: rawDS.ID, Disabled: rawDS.Disabled, Manifest: podManifest, MinHealth: rawDS.MinHealth, Name: rawDS.Name, NodeSelector: nodeSelector, PodID: rawDS.PodID, Timeout: rawDS.Timeout, } return nil }
func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fieldSelector fields.Selector, resourceVersion string) { var err error switch t := opts.(type) { case api.ListOptions: labelSelector = t.LabelSelector fieldSelector = t.FieldSelector resourceVersion = t.ResourceVersion case v1.ListOptions: labelSelector, err = labels.Parse(t.LabelSelector) if err != nil { panic(err) } fieldSelector, err = fields.ParseSelector(t.FieldSelector) if err != nil { panic(err) } resourceVersion = t.ResourceVersion default: panic(fmt.Errorf("expect a ListOptions")) } if labelSelector == nil { labelSelector = labels.Everything() } if fieldSelector == nil { fieldSelector = fields.Everything() } return labelSelector, fieldSelector, resourceVersion }
func (l *labelHTTPServer) Select(resp http.ResponseWriter, req *http.Request) { labelType, err := AsType(req.URL.Query().Get("type")) if err != nil { http.Error(resp, err.Error(), http.StatusBadRequest) return } selector, err := klabels.Parse(req.URL.Query().Get("selector")) if err != nil { http.Error(resp, err.Error(), http.StatusBadRequest) return } matches := []Labeled{} if l.useBatcher { allLabels, err := l.batcher.ForType(labelType).Retrieve() if err != nil { http.Error(resp, err.Error(), http.StatusServiceUnavailable) return } for _, candidate := range allLabels { if selector.Matches(candidate.Labels) { matches = append(matches, candidate) } } } else { matches, err = l.applicator.GetMatches(selector, labelType, false) if err != nil { http.Error(resp, err.Error(), http.StatusServiceUnavailable) return } } l.respondWithJSON(matches, resp) }
func (a *Api) nodeMetricsList(request *restful.Request, response *restful.Response) { selector := request.QueryParameter("labelSelector") labelSelector, err := labels.Parse(selector) if err != nil { errMsg := fmt.Errorf("Error while parsing selector %v: %v", selector, err) glog.Error(errMsg) response.WriteError(http.StatusBadRequest, errMsg) return } nodes, err := a.nodeLister.NodeCondition(func(node *kube_api.Node) bool { if labelSelector.Empty() { return true } return labelSelector.Matches(labels.Set(node.Labels)) }).List() if err != nil { errMsg := fmt.Errorf("Error while listing nodes: %v", err) glog.Error(errMsg) response.WriteError(http.StatusInternalServerError, errMsg) return } res := v1alpha1.NodeMetricsList{} for _, node := range nodes { if m := a.getNodeMetrics(node.Name); m != nil { res.Items = append(res.Items, *m) } } response.WriteEntity(&res) }
func podMetricsInNamespaceList(a *Api, request *restful.Request, response *restful.Response, namespace string) { selector := request.QueryParameter("labelSelector") labelSelector, err := labels.Parse(selector) if err != nil { errMsg := fmt.Errorf("Error while parsing selector %v: %v", selector, err) glog.Error(errMsg) response.WriteError(http.StatusBadRequest, errMsg) return } pods, err := a.podLister.Pods(namespace).List(labelSelector) if err != nil { errMsg := fmt.Errorf("Error while listing pods for selector %v: %v", selector, err) glog.Error(errMsg) response.WriteError(http.StatusInternalServerError, errMsg) return } res := v1alpha1.PodMetricsList{} for _, pod := range pods.Items { if m := a.getPodMetrics(&pod); m != nil { res.Items = append(res.Items, *m) } else { glog.Infof("No metrics for pod %s/%s", pod.Namespace, pod.Name) } } response.WriteEntity(&res) }
// UnmarshalJSON implements the json.Unmarshaler interface for deserializing the JSON // representation of an RC. func (rc *RC) UnmarshalJSON(b []byte) error { var rawRC RawRC if err := json.Unmarshal(b, &rawRC); err != nil { return err } var m manifest.Manifest if rawRC.Manifest != "" { var err error m, err = manifest.FromBytes([]byte(rawRC.Manifest)) if err != nil { return err } } nodeSel, err := labels.Parse(rawRC.NodeSelector) if err != nil { return err } *rc = RC{ ID: rawRC.ID, Manifest: m, NodeSelector: nodeSel, PodLabels: rawRC.PodLabels, ReplicasDesired: rawRC.ReplicasDesired, Disabled: rawRC.Disabled, } return nil }
func parseNodeSelector(selectorString string) (klabels.Selector, error) { selector, err := klabels.Parse(selectorString) if err != nil { return selector, util.Errorf("Malformed selector: %v", err) } return selector, nil }
// ParseLabelsOrDie turns the given string into a label selector or // panics; for tests or other cases where you know the string is valid. // TODO: Move this to the upstream labels package. func ParseLabelsOrDie(str string) labels.Selector { ret, err := labels.Parse(str) if err != nil { panic(fmt.Sprintf("cannot parse '%v': %v", str, err)) } return ret }
// Common validations func (p *ProjectOptions) Validate() error { errList := []error{} if p.CheckSelector { if len(p.Selector) > 0 { if _, err := labels.Parse(p.Selector); err != nil { errList = append(errList, errors.New("--selector=<project_selector> must be a valid label selector")) } } if len(p.ProjectNames) != 0 { errList = append(errList, errors.New("either specify --selector=<project_selector> or projects but not both")) } } else if len(p.ProjectNames) == 0 { errList = append(errList, errors.New("must provide --selector=<project_selector> or projects")) } clusterNetwork, err := p.Oclient.ClusterNetwork().Get(sdnapi.ClusterNetworkDefault) if err != nil { if kapierrors.IsNotFound(err) { errList = append(errList, errors.New("Managing pod network is only supported for openshift multitenant network plugin")) } else { errList = append(errList, errors.New("Failed to fetch current network plugin info")) } } else if !sdnapi.IsOpenShiftMultitenantNetworkPlugin(clusterNetwork.PluginName) { errList = append(errList, fmt.Errorf("Using plugin: %q, managing pod network is only supported for openshift multitenant network plugin", clusterNetwork.PluginName)) } return kerrors.NewAggregate(errList) }
// rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment targets, with pod-template-hash information synced. func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *v1.PodList, error) { rsList, err := deploymentutil.ListReplicaSets(deployment, func(namespace string, options v1.ListOptions) ([]*extensions.ReplicaSet, error) { parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err } return dc.rsLister.ReplicaSets(namespace).List(parsed) }) if err != nil { return nil, nil, fmt.Errorf("error listing ReplicaSets: %v", err) } syncedRSList := []*extensions.ReplicaSet{} for _, rs := range rsList { // Add pod-template-hash information if it's not in the RS. // Otherwise, new RS produced by Deployment will overlap with pre-existing ones // that aren't constrained by the pod-template-hash. syncedRS, err := dc.addHashKeyToRSAndPods(rs) if err != nil { return nil, nil, err } syncedRSList = append(syncedRSList, syncedRS) } syncedPodList, err := dc.listPods(deployment) if err != nil { return nil, nil, err } return syncedRSList, syncedPodList, nil }
func (o TopPodOptions) RunTopPod() error { var err error selector := labels.Everything() if len(o.Selector) > 0 { selector, err = labels.Parse(o.Selector) if err != nil { return err } } metrics, err := o.Client.GetPodMetrics(o.Namespace, o.ResourceName, o.AllNamespaces, selector) // TODO: Refactor this once Heapster becomes the API server. // First we check why no metrics have been received. if len(metrics) == 0 { // If the API server query is successful but all the pods are newly created, // the metrics are probably not ready yet, so we return the error here in the first place. e := verifyEmptyMetrics(o, selector) if e != nil { return e } } if err != nil { return err } return o.Printer.PrintPodMetrics(metrics, o.PrintContainers, o.AllNamespaces) }
func init() { var err error servicesSelector, err = labels.Parse(fmt.Sprintf("%s/routable==true", prefix)) if err != nil { log.Fatal(err) } }
func runJoinCluster(cmd *cobra.Command, args []string) error { cli, err := client.NewInCluster() if err != nil { return fmt.Errorf("unable to connect k8s api server: %v", err) } labelSelector, err := labels.Parse(influxSelectors) if err != nil { return fmt.Errorf("unable to parse labels: %v", err) } fieldSelector := fields.Everything() podIPs, err := podIps(cli, labelSelector, fieldSelector) if err != nil { return err } hostIP, err := externalIP() if err != nil { return err } peers := influxdbPeers(hostIP, podIPs) iOpts := influxdOpts(hostIP, peers) if err := ioutil.WriteFile(envVarFile, []byte(iOpts), 0644); err != nil { return err } return nil }
// Nothing returns a selector that cannot match any labels. It is different // from klabels.Nothing() in that it can be serialzed as a string and re- // parsed as a selector while being in the grammar for selectors. func Nothing() labels.Selector { sel, err := labels.Parse("x=1,x=0") if err != nil { panic(err) } return sel }
func TestBatchRequests(t *testing.T) { server, applicator := fakeServerAndApplicator(t, 100*time.Millisecond) defer server.Close() Assert(t).IsNil(applicator.SetLabels(POD, "abc", labels.Set{"color": "green", "state": "experimental"}), "Should not err setting labels") Assert(t).IsNil(applicator.SetLabels(POD, "def", labels.Set{"color": "green", "state": "production"}), "Should not err setting labels") Assert(t).IsNil(applicator.SetLabels(POD, "f98", labels.Set{"color": "blue", "state": "production"}), "Should not err setting labels") Assert(t).IsNil(applicator.SetLabels(POD, "c56", labels.Set{"color": "blue", "state": "experimental"}), "Should not err setting labels") queryToResults := map[string][]string{ "color = green": []string{"abc", "def"}, "state = production": []string{"def", "f98"}, "color = blue, state = production": []string{"f98"}, "color = blue": []string{"f98", "c56"}, "state = experimental": []string{"c56", "abc"}, "color = blue, state = experimental": []string{"c56"}, } var tests sync.WaitGroup for q, expect := range queryToResults { tests.Add(1) go func(query string, expect []string) { defer tests.Done() selector, err := labels.Parse(query) if err != nil { t.Errorf("Test setup error: %v", err) return } res, err := applicator.GetMatches(selector, POD, false) if err != nil { t.Errorf("Could not run applicator query: %v", err) return } if len(expect) != len(res) { t.Errorf("Incorrect number of query results for %v", query) return } for _, labeled := range res { var found bool for _, id := range expect { if id == labeled.ID { found = true } } if !found { t.Errorf("Found %v but shouldn't have found it", labeled.ID) } } }(q, expect) } doneCh := make(chan struct{}) go func() { tests.Wait() close(doneCh) }() select { case <-doneCh: return case <-time.After(500 * time.Millisecond): t.Fatalf("Tests timed out") } }
func (r rctlParams) Create(manifestPath, nodeSelector string, podLabels map[string]string, rcLabels map[string]string) { manifest, err := manifest.FromPath(manifestPath) if err != nil { r.logger.WithErrorAndFields(err, logrus.Fields{ "manifest": manifestPath, }).Fatalln("Could not read pod manifest") } nodeSel, err := klabels.Parse(nodeSelector) if err != nil { r.logger.WithErrorAndFields(err, logrus.Fields{ "selector": nodeSelector, }).Fatalln("Could not parse node selector") } newRC, err := r.rcs.Create(manifest, nodeSel, klabels.Set(podLabels)) if err != nil { r.logger.WithError(err).Fatalln("Could not create replication controller in Consul") } r.logger.WithField("id", newRC.ID).Infoln("Created new replication controller") err = r.labeler.SetLabels(labels.RC, newRC.ID.String(), rcLabels) if err != nil { r.logger.WithError(err).Fatalln("Could not label replication controller") } }
// getClusterNodesIP returns the IP address of each node in the kubernetes cluster func getClusterNodesIP(kubeClient *unversioned.Client, nodeSelector string) (clusterNodes []string) { listOpts := api.ListOptions{} if nodeSelector != "" { label, err := labels.Parse(nodeSelector) if err != nil { glog.Fatalf("'%v' is not a valid selector: %v", nodeSelector, err) } listOpts.LabelSelector = label } nodes, err := kubeClient.Nodes().List(listOpts) if err != nil { glog.Fatalf("Error getting running nodes: %v", err) } for _, nodo := range nodes.Items { nodeIP, err := node.GetNodeHostIP(&nodo) if err == nil { clusterNodes = append(clusterNodes, nodeIP.String()) } } sort.Strings(clusterNodes) return }
func runTest(cmd *cobra.Command, args []string) error { if localProxy == "" { return fmt.Errorf("please set env variable LOCAL_PROXY. ex: LOCAL_PROXY=\"http://localhost:8080\". Setup proxy as \"kubectl proxy --port 8080 &\" ") } config := &client.Config{ Host: localProxy, Insecure: true, } cli, err := client.New(config) if err != nil { return fmt.Errorf("unable to connect k8s api server: %v", err) } labelSelector, err := labels.Parse(influxSelectors) if err != nil { return fmt.Errorf("unable to parse labels: %v", err) } fieldSelector := fields.Everything() if podIPs, err := podIps(cli, labelSelector, fieldSelector); err != nil { return err } else { hostIP, _ := externalIP() peers := influxdbPeers(hostIP, podIPs) iOpts := influxdOpts(hostIP, peers) fmt.Println("Content of /etc/default/influxdb : ", iOpts) } return nil }
func (fs Filesystem) checksyncstatus(path string) error { path = strings.TrimPrefix(path, "/home/minio") path = "mnt/minio/data" + path var lock sync.RWMutex nosync := make(map[string]bool) kubeClient, err := client.NewInCluster() if err != nil { return fmt.Errorf("unable to create client") } pclient := kubeClient.Pods("default") selector, _ := labels.Parse("app=minio-sync") list, err := pclient.List(selector, nil) if err != nil { return fmt.Errorf("list pods failed") } for _, pod := range list.Items { fmt.Println(pod.Status.PodIP) if pod.Status.Phase == "Running" { nosync[pod.Status.PodIP] = false } } allsync := true var duration float64 for duration = 1; duration < 60; duration++ { timeperiod := time.Duration(time.Second * time.Duration(duration)) fmt.Println(timeperiod) time.Sleep(timeperiod) var wg sync.WaitGroup wg.Add(len(nosync)) for ip, sync := range nosync { go func(ip string, sync bool) { if !sync { if doCurl("http://" + ip + ":3000/" + path) { lock.Lock() nosync[ip] = true lock.Unlock() } else { if allsync { allsync = false } } } wg.Done() }(ip, sync) } wg.Wait() if allsync { break } allsync = true } for _, sync := range nosync { if !sync { return fmt.Errorf("sync failed took more time ") } } return nil }
func init() { Scheme.AddDefaultingFuncs( func(obj *ListOptions) { obj.LabelSelector = labels.Everything() obj.FieldSelector = fields.Everything() }, // TODO: see about moving this into v1/defaults.go func(obj *PodExecOptions) { obj.Stderr = true obj.Stdout = true }, func(obj *PodAttachOptions) { obj.Stderr = true obj.Stdout = true }, ) Scheme.AddConversionFuncs( func(in *unversioned.Time, out *unversioned.Time, s conversion.Scope) error { // Cannot deep copy these, because time.Time has unexported fields. *out = *in return nil }, func(in *string, out *labels.Selector, s conversion.Scope) error { selector, err := labels.Parse(*in) if err != nil { return err } *out = selector return nil }, func(in *string, out *fields.Selector, s conversion.Scope) error { selector, err := fields.ParseSelector(*in) if err != nil { return err } *out = selector return nil }, func(in *labels.Selector, out *string, s conversion.Scope) error { if *in == nil { return nil } *out = (*in).String() return nil }, func(in *fields.Selector, out *string, s conversion.Scope) error { if *in == nil { return nil } *out = (*in).String() return nil }, func(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error { // Cannot deep copy these, because inf.Dec has unexported fields. *out = *in.Copy() return nil }, ) }
func (self *realPodsApi) getNodeSelector(nodeList *nodes.NodeList) (labels.Selector, error) { nodeLabels := []string{} for host := range nodeList.Items { nodeLabels = append(nodeLabels, fmt.Sprintf("spec.nodeName==%s", host)) } glog.V(2).Infof("using labels %v to find pods", nodeLabels) return labels.Parse(strings.Join(nodeLabels, ",")) }
func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error { selector, err := labels.Parse(*in) if err != nil { return err } *out = selector return nil }
// Watch watches all Pods that have a build label, for deletion func (lw *buildPodDeleteLW) Watch(resourceVersion string) (watch.Interface, error) { // FIXME: since we cannot have OR on label name we'll just get builds with new label sel, err := labels.Parse(buildapi.BuildLabel) if err != nil { return nil, err } return lw.KubeClient.Pods(kapi.NamespaceAll).Watch(sel, fields.Everything(), resourceVersion) }
func TestLabelSelectorAsSelector(t *testing.T) { matchLabels := map[string]string{"foo": "bar"} matchExpressions := []LabelSelectorRequirement{{ Key: "baz", Operator: LabelSelectorOpIn, Values: []string{"qux", "norf"}, }} mustParse := func(s string) labels.Selector { out, e := labels.Parse(s) if e != nil { panic(e) } return out } tc := []struct { in *LabelSelector out labels.Selector expectErr bool }{ {in: nil, out: labels.Nothing()}, {in: &LabelSelector{}, out: labels.Everything()}, { in: &LabelSelector{MatchLabels: matchLabels}, out: mustParse("foo in (bar)"), }, { in: &LabelSelector{MatchExpressions: matchExpressions}, out: mustParse("baz in (norf,qux)"), }, { in: &LabelSelector{MatchLabels: matchLabels, MatchExpressions: matchExpressions}, out: mustParse("foo in (bar),baz in (norf,qux)"), }, { in: &LabelSelector{ MatchExpressions: []LabelSelectorRequirement{{ Key: "baz", Operator: LabelSelectorOpExists, Values: []string{"qux", "norf"}, }}, }, expectErr: true, }, } for i, tc := range tc { out, err := LabelSelectorAsSelector(tc.in) if err == nil && tc.expectErr { t.Errorf("[%v]expected error but got none.", i) } if err != nil && !tc.expectErr { t.Errorf("[%v]did not expect error but got: %v", i, err) } if !reflect.DeepEqual(out, tc.out) { t.Errorf("[%v]expected:\n\t%+v\nbut got:\n\t%+v", i, tc.out, out) } } }
func main() { flag.Usage = usage flag.Parse() var ( cfg *kclient.Config err error ) if *local { cfg = &kclient.Config{Host: fmt.Sprintf("http://localhost:%d", *localPort)} } else { cfg, err = kclient.InClusterConfig() if err != nil { glog.Errorf("failed to load config: %v", err) flag.Usage() os.Exit(1) } } client, err = kclient.New(cfg) selector, err := labels.Parse(*userLabels) if err != nil { glog.Fatal(err) } tc, err := parseTimeCounts(*times, *counts) if err != nil { glog.Fatal(err) } if namespace == "" { glog.Fatal("POD_NAMESPACE is not set. Set to the namespace of the replication controller if running locally.") } scaler := scaler{timeCounts: tc, selector: selector} if err != nil { glog.Fatal(err) } sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM) glog.Info("starting scaling") if err := scaler.Start(); err != nil { glog.Fatal(err) } <-sigChan glog.Info("stopping scaling") if err := scaler.Stop(); err != nil { glog.Fatal(err) } }
func (o *TopNodeOptions) Validate() error { if len(o.ResourceName) > 0 && len(o.Selector) > 0 { return errors.New("only one of NAME or --selector can be provided") } if len(o.Selector) > 0 { _, err := labels.Parse(o.Selector) if err != nil { return err } } return nil }
// Watch watches all Pods that have a build label, for deletion func (lw *buildPodDeleteLW) Watch(options kapi.ListOptions) (watch.Interface, error) { // FIXME: since we cannot have OR on label name we'll just get builds with new label sel, err := labels.Parse(buildapi.BuildLabel) if err != nil { return nil, err } opts := kapi.ListOptions{ LabelSelector: sel, ResourceVersion: options.ResourceVersion, } return lw.KubeClient.Pods(kapi.NamespaceAll).Watch(opts) }