func protoLabelSet(base, ext model.LabelSet) ([]*dto.LabelPair, error) { labels := base.Clone().Merge(ext) delete(labels, model.MetricNameLabel) names := make([]string, 0, len(labels)) for ln := range labels { names = append(names, string(ln)) } sort.Strings(names) pairs := make([]*dto.LabelPair, 0, len(labels)) for _, ln := range names { if !model.LabelNameRE.MatchString(ln) { return nil, fmt.Errorf("invalid label name %q", ln) } lv := labels[model.LabelName(ln)] pairs = append(pairs, &dto.LabelPair{ Name: proto.String(ln), Value: proto.String(string(lv)), }) } return pairs, nil }
// Mutes returns true iff the given label set is muted. func (ih *Inhibitor) Mutes(lset model.LabelSet) bool { alerts := ih.alerts.GetPending() defer alerts.Close() // TODO(fabxc): improve erroring for iterators so it does not // go silenced here. for alert := range alerts.Next() { if err := alerts.Err(); err != nil { log.Errorf("Error iterating alerts: %s", err) continue } if alert.Resolved() { continue } for _, rule := range ih.rules { if rule.Mutes(alert.Labels, lset) { ih.marker.SetInhibited(lset.Fingerprint(), true) return true } } } if err := alerts.Err(); err != nil { log.Errorf("Error after iterating alerts: %s", err) } ih.marker.SetInhibited(lset.Fingerprint(), false) return false }
func newTestTarget(targetURL string, deadline time.Duration, labels model.LabelSet) *Target { labels = labels.Clone() labels[model.SchemeLabel] = "http" labels[model.AddressLabel] = model.LabelValue(strings.TrimLeft(targetURL, "http://")) labels[model.MetricsPathLabel] = "/metrics" return &Target{ labels: labels, } }
// Mutes returns true iff the given label set is muted. func (ih *Inhibitor) Mutes(lset model.LabelSet) bool { fp := lset.Fingerprint() for _, r := range ih.rules { if r.TargetMatchers.Match(lset) && r.hasEqual(lset) { ih.marker.SetInhibited(fp, true) return true } } ih.marker.SetInhibited(fp, false) return false }
// The Silences provider must implement the Muter interface // for all its silences. The data provider may have access to an // optimized view of the data to perform this evaluation. func (s *Silences) Mutes(lset model.LabelSet) bool { sils, err := s.All() if err != nil { log.Errorf("retrieving silences failed: %s", err) // In doubt, do not silence anything. return false } for _, sil := range sils { if sil.Mutes(lset) { s.mk.SetSilenced(lset.Fingerprint(), sil.ID) return true } } s.mk.SetSilenced(lset.Fingerprint()) return false }
func protoLabelSet(base, ext model.LabelSet) []*dto.LabelPair { labels := base.Clone().Merge(ext) delete(labels, model.MetricNameLabel) names := make([]string, 0, len(labels)) for ln := range labels { names = append(names, string(ln)) } sort.Strings(names) pairs := make([]*dto.LabelPair, 0, len(labels)) for _, ln := range names { lv := labels[model.LabelName(ln)] pairs = append(pairs, &dto.LabelPair{ Name: proto.String(ln), Value: proto.String(string(lv)), }) } return pairs }
// processAlert determines in which aggregation group the alert falls // and insert it. func (d *Dispatcher) processAlert(alert *types.Alert, route *Route) { group := model.LabelSet{} for ln, lv := range alert.Labels { if _, ok := route.RouteOpts.GroupBy[ln]; ok { group[ln] = lv } } fp := group.Fingerprint() d.mtx.Lock() groups, ok := d.aggrGroups[route] if !ok { groups = map[model.Fingerprint]*aggrGroup{} d.aggrGroups[route] = groups } d.mtx.Unlock() // If the group does not exist, create it. ag, ok := groups[fp] if !ok { ag = newAggrGroup(d.ctx, group, &route.RouteOpts, d.timeout) groups[fp] = ag go ag.run(func(ctx context.Context, alerts ...*types.Alert) bool { _, _, err := d.stage.Exec(ctx, alerts...) if err != nil { log.Errorf("Notify for %d alerts failed: %s", len(alerts), err) } return err == nil }) } ag.insert(alert) }
func newTestTarget(targetURL string, deadline time.Duration, labels model.LabelSet) *Target { labels = labels.Clone() labels[model.SchemeLabel] = "http" labels[model.AddressLabel] = model.LabelValue(strings.TrimLeft(targetURL, "http://")) labels[model.MetricsPathLabel] = "/metrics" t := &Target{ scrapeConfig: &config.ScrapeConfig{ ScrapeInterval: model.Duration(time.Millisecond), ScrapeTimeout: model.Duration(deadline), }, labels: labels, status: &TargetStatus{}, scraperStopping: make(chan struct{}), scraperStopped: make(chan struct{}), } var err error if t.httpClient, err = t.client(); err != nil { panic(err) } return t }
func (g *Group) fingerprint() model.Fingerprint { l := model.LabelSet{"name": model.LabelValue(g.name)} return l.Fingerprint() }
func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *config.TargetGroup { if len(eps.Subsets) == 0 { return nil } tg := &config.TargetGroup{ Source: endpointsSource(eps), } tg.Labels = model.LabelSet{ namespaceLabel: lv(eps.Namespace), endpointsNameLabel: lv(eps.Name), } e.addServiceLabels(eps.Namespace, eps.Name, tg) type podEntry struct { pod *apiv1.Pod servicePorts []apiv1.EndpointPort } seenPods := map[string]*podEntry{} add := func(addr apiv1.EndpointAddress, port apiv1.EndpointPort, ready string) { a := net.JoinHostPort(addr.IP, strconv.FormatUint(uint64(port.Port), 10)) target := model.LabelSet{ model.AddressLabel: lv(a), endpointPortNameLabel: lv(port.Name), endpointPortProtocolLabel: lv(string(port.Protocol)), endpointReadyLabel: lv(ready), } pod := e.resolvePodRef(addr.TargetRef) if pod == nil { // This target is not a Pod, so don't continue with Pod specific logic. tg.Targets = append(tg.Targets, target) return } s := pod.Namespace + "/" + pod.Name sp, ok := seenPods[s] if !ok { sp = &podEntry{pod: pod} seenPods[s] = sp } // Attach standard pod labels. target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. for _, c := range pod.Spec.Containers { for _, cport := range c.Ports { if port.Port == cport.ContainerPort { ports := strconv.FormatUint(uint64(port.Port), 10) target[podContainerNameLabel] = lv(c.Name) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(port.Protocol)) break } } } // Add service port so we know that we have already generated a target // for it. sp.servicePorts = append(sp.servicePorts, port) tg.Targets = append(tg.Targets, target) } for _, ss := range eps.Subsets { for _, port := range ss.Ports { for _, addr := range ss.Addresses { add(addr, port, "true") } // Although this generates the same target again, as it was generated in // the loop above, it causes the ready meta label to be overridden. for _, addr := range ss.NotReadyAddresses { add(addr, port, "false") } } } // For all seen pods, check all container ports. If they were not covered // by one of the service endpoints, generate targets for them. for _, pe := range seenPods { for _, c := range pe.pod.Spec.Containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { if cport.ContainerPort == eport.Port { return true } } return false } if hasSeenPort() { continue } a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } } } return tg }
// populateLabels builds a label set from the given label set and scrape configuration. // It returns a label set before relabeling was applied as the second return value. // Returns a nil label set if the target is dropped during relabeling. func populateLabels(lset model.LabelSet, cfg *config.ScrapeConfig) (res, orig model.LabelSet, err error) { if _, ok := lset[model.AddressLabel]; !ok { return nil, nil, fmt.Errorf("no address") } // Copy labels into the labelset for the target if they are not // set already. Apply the labelsets in order of decreasing precedence. scrapeLabels := model.LabelSet{ model.SchemeLabel: model.LabelValue(cfg.Scheme), model.MetricsPathLabel: model.LabelValue(cfg.MetricsPath), model.JobLabel: model.LabelValue(cfg.JobName), } for ln, lv := range scrapeLabels { if _, ok := lset[ln]; !ok { lset[ln] = lv } } // Encode scrape query parameters as labels. for k, v := range cfg.Params { if len(v) > 0 { lset[model.LabelName(model.ParamLabelPrefix+k)] = model.LabelValue(v[0]) } } preRelabelLabels := lset.Clone() lset = relabel.Process(lset, cfg.RelabelConfigs...) // Check if the target was dropped. if lset == nil { return nil, nil, nil } // addPort checks whether we should add a default port to the address. // If the address is not valid, we don't append a port either. addPort := func(s string) bool { // If we can split, a port exists and we don't have to add one. if _, _, err := net.SplitHostPort(s); err == nil { return false } // If adding a port makes it valid, the previous error // was not due to an invalid address and we can append a port. _, _, err := net.SplitHostPort(s + ":1234") return err == nil } // If it's an address with no trailing port, infer it based on the used scheme. if addr := string(lset[model.AddressLabel]); addPort(addr) { // Addresses reaching this point are already wrapped in [] if necessary. switch lset[model.SchemeLabel] { case "http", "": addr = addr + ":80" case "https": addr = addr + ":443" default: return nil, nil, fmt.Errorf("invalid scheme: %q", cfg.Scheme) } lset[model.AddressLabel] = model.LabelValue(addr) } if err := config.CheckTargetAddress(lset[model.AddressLabel]); err != nil { return nil, nil, err } // Meta labels are deleted after relabelling. Other internal labels propagate to // the target which decides whether they will be part of their label set. for ln := range lset { if strings.HasPrefix(string(ln), model.MetaLabelPrefix) { delete(lset, ln) } } // Default the instance label to the target address. if _, ok := lset[model.InstanceLabel]; !ok { lset[model.InstanceLabel] = lset[model.AddressLabel] } return lset, preRelabelLabels, nil }