func (kd *Discovery) updateServiceTargetGroup(service *Service, eps *Endpoints) *config.TargetGroup { tg := &config.TargetGroup{ Source: serviceSource(service), Labels: model.LabelSet{ serviceNamespaceLabel: model.LabelValue(service.ObjectMeta.Namespace), serviceNameLabel: model.LabelValue(service.ObjectMeta.Name), }, } for k, v := range service.ObjectMeta.Labels { labelName := strutil.SanitizeLabelName(serviceLabelPrefix + k) tg.Labels[model.LabelName(labelName)] = model.LabelValue(v) } for k, v := range service.ObjectMeta.Annotations { labelName := strutil.SanitizeLabelName(serviceAnnotationPrefix + k) tg.Labels[model.LabelName(labelName)] = model.LabelValue(v) } serviceAddress := service.ObjectMeta.Name + "." + service.ObjectMeta.Namespace + ".svc" // Append the first TCP service port if one exists. for _, port := range service.Spec.Ports { if port.Protocol == ProtocolTCP { serviceAddress += fmt.Sprintf(":%d", port.Port) break } } t := model.LabelSet{ model.AddressLabel: model.LabelValue(serviceAddress), roleLabel: model.LabelValue("service"), } tg.Targets = append(tg.Targets, t) // Now let's loop through the endpoints & add them to the target group with appropriate labels. for _, ss := range eps.Subsets { epPort := ss.Ports[0].Port for _, addr := range ss.Addresses { ipAddr := addr.IP if len(ipAddr) == net.IPv6len { ipAddr = "[" + ipAddr + "]" } address := fmt.Sprintf("%s:%d", ipAddr, epPort) t := model.LabelSet{ model.AddressLabel: model.LabelValue(address), roleLabel: model.LabelValue("endpoint"), } tg.Targets = append(tg.Targets, t) } } return tg }
func (kd *Discovery) updateNodesTargetGroup() *config.TargetGroup { kd.nodesMu.Lock() defer kd.nodesMu.Unlock() tg := &config.TargetGroup{ Source: nodesTargetGroupName, Labels: model.LabelSet{ roleLabel: model.LabelValue("node"), }, } // Now let's loop through the nodes & add them to the target group with appropriate labels. for nodeName, node := range kd.nodes { address := fmt.Sprintf("%s:%d", node.Status.Addresses[0].Address, kd.Conf.KubeletPort) t := model.LabelSet{ model.AddressLabel: model.LabelValue(address), model.InstanceLabel: model.LabelValue(nodeName), } for k, v := range node.ObjectMeta.Labels { labelName := strutil.SanitizeLabelName(nodeLabelPrefix + k) t[model.LabelName(labelName)] = model.LabelValue(v) } tg.Targets = append(tg.Targets, t) } return tg }
func parseServersetMember(data []byte, path string) (model.LabelSet, error) { member := serversetMember{} if err := json.Unmarshal(data, &member); err != nil { return nil, fmt.Errorf("error unmarshaling serverset member %q: %s", path, err) } labels := model.LabelSet{} labels[serversetPathLabel] = model.LabelValue(path) labels[model.AddressLabel] = model.LabelValue( net.JoinHostPort(member.ServiceEndpoint.Host, fmt.Sprintf("%d", member.ServiceEndpoint.Port))) labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host) labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.ServiceEndpoint.Port)) for name, endpoint := range member.AdditionalEndpoints { cleanName := model.LabelName(strutil.SanitizeLabelName(name)) labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue( endpoint.Host) labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue( fmt.Sprintf("%d", endpoint.Port)) } labels[serversetStatusLabel] = model.LabelValue(member.Status) labels[serversetShardLabel] = model.LabelValue(strconv.Itoa(member.Shard)) return labels, nil }
func nodeLabels(n *apiv1.Node) model.LabelSet { ls := make(model.LabelSet, len(n.Labels)+len(n.Annotations)+2) ls[nodeNameLabel] = lv(n.Name) for k, v := range n.Labels { ln := strutil.SanitizeLabelName(nodeLabelPrefix + k) ls[model.LabelName(ln)] = lv(v) } for k, v := range n.Annotations { ln := strutil.SanitizeLabelName(nodeAnnotationPrefix + k) ls[model.LabelName(ln)] = lv(v) } return ls }
func (n *Node) buildNode(node *apiv1.Node) *config.TargetGroup { tg := &config.TargetGroup{ Source: nodeSource(node), } tg.Labels = nodeLabels(node) addr, addrMap, err := nodeAddress(node) if err != nil { n.logger.With("err", err).Debugf("No node address found") return nil } addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10)) t := model.LabelSet{ model.AddressLabel: lv(addr), model.InstanceLabel: lv(node.Name), } for ty, a := range addrMap { ln := strutil.SanitizeLabelName(nodeAddressPrefix + string(ty)) t[model.LabelName(ln)] = lv(a[0]) } tg.Targets = append(tg.Targets, t) return tg }
func (ed *EC2Discovery) refresh() (*config.TargetGroup, error) { ec2s := ec2.New(ed.aws) tg := &config.TargetGroup{ Source: *ed.aws.Region, } if err := ec2s.DescribeInstancesPages(nil, func(p *ec2.DescribeInstancesOutput, lastPage bool) bool { for _, r := range p.Reservations { for _, inst := range r.Instances { if inst.PrivateIpAddress == nil { continue } labels := model.LabelSet{ ec2LabelInstanceID: model.LabelValue(*inst.InstanceId), } if inst.PublicIpAddress != nil { labels[ec2LabelPublicIP] = model.LabelValue(*inst.PublicIpAddress) } labels[ec2LabelPrivateIP] = model.LabelValue(*inst.PrivateIpAddress) addr := fmt.Sprintf("%s:%d", *inst.PrivateIpAddress, ed.port) labels[model.AddressLabel] = model.LabelValue(addr) for _, t := range inst.Tags { name := strutil.SanitizeLabelName(*t.Key) labels[ec2LabelTag+model.LabelName(name)] = model.LabelValue(*t.Value) } tg.Targets = append(tg.Targets, labels) } } return true }); err != nil { return nil, fmt.Errorf("could not describe instances: %s", err) } return tg, nil }
func (kd *Discovery) updateNodesTargetGroup() *config.TargetGroup { kd.nodesMu.RLock() defer kd.nodesMu.RUnlock() tg := &config.TargetGroup{ Source: nodesTargetGroupName, Labels: model.LabelSet{ roleLabel: model.LabelValue("node"), }, } // Now let's loop through the nodes & add them to the target group with appropriate labels. for nodeName, node := range kd.nodes { nodeAddress, err := nodeHostIP(node) if err != nil { log.Debugf("Skipping node %s: %s", node.Name, err) continue } address := fmt.Sprintf("%s:%d", nodeAddress.String(), kd.Conf.KubeletPort) t := model.LabelSet{ model.AddressLabel: model.LabelValue(address), model.InstanceLabel: model.LabelValue(nodeName), } for k, v := range node.ObjectMeta.Labels { labelName := strutil.SanitizeLabelName(nodeLabelPrefix + k) t[model.LabelName(labelName)] = model.LabelValue(v) } tg.Targets = append(tg.Targets, t) } return tg }
func serviceLabels(svc *apiv1.Service) model.LabelSet { ls := make(model.LabelSet, len(svc.Labels)+len(svc.Annotations)+2) ls[serviceNameLabel] = lv(svc.Name) for k, v := range svc.Labels { ln := strutil.SanitizeLabelName(serviceLabelPrefix + k) ls[model.LabelName(ln)] = lv(v) } for k, v := range svc.Annotations { ln := strutil.SanitizeLabelName(serviceAnnotationPrefix + k) ls[model.LabelName(ln)] = lv(v) } return ls }
func (kd *Discovery) updateNodesTargetGroup() *config.TargetGroup { kd.nodesMu.RLock() defer kd.nodesMu.RUnlock() tg := &config.TargetGroup{ Source: nodesTargetGroupName, Labels: model.LabelSet{ roleLabel: model.LabelValue("node"), }, } // Now let's loop through the nodes & add them to the target group with appropriate labels. for nodeName, node := range kd.nodes { defaultNodeAddress, nodeAddressMap, err := nodeAddresses(node) if err != nil { log.Debugf("Skipping node %s: %s", node.Name, err) continue } kubeletPort := int(node.Status.DaemonEndpoints.KubeletEndpoint.Port) address := fmt.Sprintf("%s:%d", defaultNodeAddress.String(), kubeletPort) t := model.LabelSet{ model.AddressLabel: model.LabelValue(address), model.InstanceLabel: model.LabelValue(nodeName), } for addrType, ip := range nodeAddressMap { labelName := strutil.SanitizeLabelName(nodeAddressPrefix + string(addrType)) t[model.LabelName(labelName)] = model.LabelValue(ip[0].String()) } t[model.LabelName(nodePortLabel)] = model.LabelValue(strconv.Itoa(kubeletPort)) for k, v := range node.ObjectMeta.Labels { labelName := strutil.SanitizeLabelName(nodeLabelPrefix + k) t[model.LabelName(labelName)] = model.LabelValue(v) } tg.Targets = append(tg.Targets, t) } return tg }
func (ed *EC2Discovery) refresh() (*config.TargetGroup, error) { ec2s := ec2.New(ed.aws) tg := &config.TargetGroup{ Source: *ed.aws.Region, } if err := ec2s.DescribeInstancesPages(nil, func(p *ec2.DescribeInstancesOutput, lastPage bool) bool { for _, r := range p.Reservations { for _, inst := range r.Instances { if inst.PrivateIpAddress == nil { continue } labels := model.LabelSet{ ec2LabelInstanceID: model.LabelValue(*inst.InstanceId), } labels[ec2LabelPrivateIP] = model.LabelValue(*inst.PrivateIpAddress) addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", ed.port)) labels[model.AddressLabel] = model.LabelValue(addr) if inst.PublicIpAddress != nil { labels[ec2LabelPublicIP] = model.LabelValue(*inst.PublicIpAddress) labels[ec2LabelPublicDNS] = model.LabelValue(*inst.PublicDnsName) } labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone) labels[ec2LabelInstanceState] = model.LabelValue(*inst.State.Name) if inst.VpcId != nil { labels[ec2LabelVPCID] = model.LabelValue(*inst.VpcId) subnetsMap := make(map[string]struct{}) for _, eni := range inst.NetworkInterfaces { subnetsMap[*eni.SubnetId] = struct{}{} } subnets := []string{} for k := range subnetsMap { subnets = append(subnets, k) } labels[ec2LabelSubnetID] = model.LabelValue( subnetSeparator + strings.Join(subnets, subnetSeparator) + subnetSeparator) } for _, t := range inst.Tags { name := strutil.SanitizeLabelName(*t.Key) labels[ec2LabelTag+model.LabelName(name)] = model.LabelValue(*t.Value) } tg.Targets = append(tg.Targets, labels) } } return true }); err != nil { return nil, fmt.Errorf("could not describe instances: %s", err) } return tg, nil }
func podLabels(pod *apiv1.Pod) model.LabelSet { ls := model.LabelSet{ podNameLabel: lv(pod.ObjectMeta.Name), podIPLabel: lv(pod.Status.PodIP), podReadyLabel: podReady(pod), podNodeNameLabel: lv(pod.Spec.NodeName), podHostIPLabel: lv(pod.Status.HostIP), } for k, v := range pod.Labels { ln := strutil.SanitizeLabelName(podLabelPrefix + k) ls[model.LabelName(ln)] = lv(v) } for k, v := range pod.Annotations { ln := strutil.SanitizeLabelName(podAnnotationPrefix + k) ls[model.LabelName(ln)] = lv(v) } return ls }
func (kd *KubernetesDiscovery) updateServiceTargetGroup(service *Service, endpoints *Endpoints) *config.TargetGroup { tg := &config.TargetGroup{ Source: serviceSource(service), Labels: clientmodel.LabelSet{ serviceNamespaceLabel: clientmodel.LabelValue(service.ObjectMeta.Namespace), serviceNameLabel: clientmodel.LabelValue(service.ObjectMeta.Name), }, } for k, v := range service.ObjectMeta.Labels { labelName := strutil.SanitizeLabelName(serviceLabelPrefix + k) tg.Labels[clientmodel.LabelName(labelName)] = clientmodel.LabelValue(v) } for k, v := range service.ObjectMeta.Annotations { labelName := strutil.SanitizeLabelName(serviceAnnotationPrefix + k) tg.Labels[clientmodel.LabelName(labelName)] = clientmodel.LabelValue(v) } // Now let's loop through the endpoints & add them to the target group with appropriate labels. for _, eps := range endpoints.Subsets { epPort := eps.Ports[0].Port for _, addr := range eps.Addresses { ipAddr := addr.IP if len(ipAddr) == net.IPv6len { ipAddr = "[" + ipAddr + "]" } address := fmt.Sprintf("%s:%d", ipAddr, epPort) t := clientmodel.LabelSet{clientmodel.AddressLabel: clientmodel.LabelValue(address)} tg.Targets = append(tg.Targets, t) } } return tg }
func updatePodTargets(pod *Pod, allContainers bool) []model.LabelSet { var targets []model.LabelSet = make([]model.LabelSet, 0, len(pod.PodSpec.Containers)) if pod.PodStatus.PodIP == "" { log.Debugf("skipping pod %s -- PodStatus.PodIP is empty", pod.ObjectMeta.Name) return targets } if pod.PodStatus.Phase != "Running" { log.Debugf("skipping pod %s -- status is not `Running`", pod.ObjectMeta.Name) return targets } ready := "unknown" for _, cond := range pod.PodStatus.Conditions { if strings.ToLower(cond.Type) == "ready" { ready = strings.ToLower(cond.Status) } } sort.Sort(ByContainerName(pod.PodSpec.Containers)) for _, container := range pod.PodSpec.Containers { // Collect a list of TCP ports // Sort by port number, ascending // Product a target pointed at the first port // Include a label containing all ports (portName=port,PortName=port,...,) var tcpPorts []ContainerPort var portLabel *bytes.Buffer = bytes.NewBufferString(",") for _, port := range container.Ports { if port.Protocol == "TCP" { tcpPorts = append(tcpPorts, port) } } if len(tcpPorts) == 0 { log.Debugf("skipping container %s with no TCP ports", container.Name) continue } sort.Sort(ByContainerPort(tcpPorts)) t := model.LabelSet{ model.AddressLabel: model.LabelValue(net.JoinHostPort(pod.PodIP, strconv.FormatInt(int64(tcpPorts[0].ContainerPort), 10))), podNameLabel: model.LabelValue(pod.ObjectMeta.Name), podAddressLabel: model.LabelValue(pod.PodStatus.PodIP), podNamespaceLabel: model.LabelValue(pod.ObjectMeta.Namespace), podContainerNameLabel: model.LabelValue(container.Name), podContainerPortNameLabel: model.LabelValue(tcpPorts[0].Name), podReadyLabel: model.LabelValue(ready), } for _, port := range tcpPorts { portLabel.WriteString(port.Name) portLabel.WriteString("=") portLabel.WriteString(strconv.FormatInt(int64(port.ContainerPort), 10)) portLabel.WriteString(",") t[model.LabelName(podContainerPortMapPrefix+port.Name)] = model.LabelValue(strconv.FormatInt(int64(port.ContainerPort), 10)) } t[model.LabelName(podContainerPortListLabel)] = model.LabelValue(portLabel.String()) for k, v := range pod.ObjectMeta.Labels { labelName := strutil.SanitizeLabelName(podLabelPrefix + k) t[model.LabelName(labelName)] = model.LabelValue(v) } for k, v := range pod.ObjectMeta.Annotations { labelName := strutil.SanitizeLabelName(podAnnotationPrefix + k) t[model.LabelName(labelName)] = model.LabelValue(v) } targets = append(targets, t) if !allContainers { break } } if len(targets) == 0 { log.Debugf("no targets for pod %s", pod.ObjectMeta.Name) } return targets }
func (ad *AzureDiscovery) refresh() (tg *config.TargetGroup, err error) { t0 := time.Now() defer func() { azureSDRefreshDuration.Observe(time.Since(t0).Seconds()) if err != nil { azureSDRefreshFailuresCount.Inc() } }() tg = &config.TargetGroup{} client, err := createAzureClient(*ad.cfg) if err != nil { return tg, fmt.Errorf("could not create Azure client: %s", err) } var machines []compute.VirtualMachine result, err := client.vm.ListAll() if err != nil { return tg, fmt.Errorf("could not list virtual machines: %s", err) } machines = append(machines, *result.Value...) // If we still have results, keep going until we have no more. for result.NextLink != nil { result, err = client.vm.ListAllNextResults(result) if err != nil { return tg, fmt.Errorf("could not list virtual machines: %s", err) } machines = append(machines, *result.Value...) } log.Debugf("Found %d virtual machines during Azure discovery.", len(machines)) // We have the slice of machines. Now turn them into targets. // Doing them in go routines because the network interface calls are slow. type target struct { labelSet model.LabelSet err error } ch := make(chan target, len(machines)) for i, vm := range machines { go func(i int, vm compute.VirtualMachine) { r, err := newAzureResourceFromID(*vm.ID) if err != nil { ch <- target{labelSet: nil, err: err} return } labels := model.LabelSet{ azureLabelMachineID: model.LabelValue(*vm.ID), azureLabelMachineName: model.LabelValue(*vm.Name), azureLabelMachineLocation: model.LabelValue(*vm.Location), azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroup), } if vm.Tags != nil { for k, v := range *vm.Tags { name := strutil.SanitizeLabelName(k) labels[azureLabelMachineTag+model.LabelName(name)] = model.LabelValue(*v) } } // Get the IP address information via separate call to the network provider. for _, nic := range *vm.Properties.NetworkProfile.NetworkInterfaces { r, err := newAzureResourceFromID(*nic.ID) if err != nil { ch <- target{labelSet: nil, err: err} return } networkInterface, err := client.nic.Get(r.ResourceGroup, r.Name, "") if err != nil { log.Errorf("Unable to get network interface %s: %s", r.Name, err) ch <- target{labelSet: nil, err: err} // Get out of this routine because we cannot continue without a network interface. return } // Unfortunately Azure does not return information on whether a VM is deallocated. // This information is available via another API call however the Go SDK does not // yet support this. On deallocated machines, this value happens to be nil so it // is a cheap and easy way to determine if a machine is allocated or not. if networkInterface.Properties.Primary == nil { log.Debugf("Virtual machine %s is deallocated. Skipping during Azure SD.", *vm.Name) ch <- target{} return } if *networkInterface.Properties.Primary { for _, ip := range *networkInterface.Properties.IPConfigurations { if ip.Properties.PrivateIPAddress != nil { labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress) address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", ad.port)) labels[model.AddressLabel] = model.LabelValue(address) ch <- target{labelSet: labels, err: nil} return } // If we made it here, we don't have a private IP which should be impossible. // Return an empty target and error to ensure an all or nothing situation. err = fmt.Errorf("unable to find a private IP for VM %s", *vm.Name) ch <- target{labelSet: nil, err: err} return } } } }(i, vm) } for range machines { tgt := <-ch if tgt.err != nil { return nil, fmt.Errorf("unable to complete Azure service discovery: %s", err) } if tgt.labelSet != nil { tg.Targets = append(tg.Targets, tgt.labelSet) } } log.Debugf("Azure discovery completed.") return tg, nil }
func (gd *GCEDiscovery) refresh() (tg *config.TargetGroup, err error) { t0 := time.Now() defer func() { gceSDRefreshDuration.Observe(time.Since(t0).Seconds()) if err != nil { gceSDRefreshFailuresCount.Inc() } }() tg = &config.TargetGroup{ Source: fmt.Sprintf("GCE_%s_%s", gd.project, gd.zone), } ilc := gd.isvc.List(gd.project, gd.zone) if len(gd.filter) > 0 { ilc = ilc.Filter(gd.filter) } err = ilc.Pages(nil, func(l *compute.InstanceList) error { for _, inst := range l.Items { if len(inst.NetworkInterfaces) == 0 { continue } labels := model.LabelSet{ gceLabelProject: model.LabelValue(gd.project), gceLabelZone: model.LabelValue(inst.Zone), gceLabelInstanceName: model.LabelValue(inst.Name), gceLabelInstanceStatus: model.LabelValue(inst.Status), } priIface := inst.NetworkInterfaces[0] labels[gceLabelNetwork] = model.LabelValue(priIface.Network) labels[gceLabelSubnetwork] = model.LabelValue(priIface.Subnetwork) labels[gceLabelPrivateIP] = model.LabelValue(priIface.NetworkIP) addr := fmt.Sprintf("%s:%d", priIface.NetworkIP, gd.port) labels[model.AddressLabel] = model.LabelValue(addr) // Tags in GCE are usually only used for networking rules. if inst.Tags != nil && len(inst.Tags.Items) > 0 { // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. tags := gd.tagSeparator + strings.Join(inst.Tags.Items, gd.tagSeparator) + gd.tagSeparator labels[gceLabelTags] = model.LabelValue(tags) } // GCE metadata are key-value pairs for user supplied attributes. if inst.Metadata != nil { for _, i := range inst.Metadata.Items { // Protect against occasional nil pointers. if i.Value == nil { continue } name := strutil.SanitizeLabelName(i.Key) labels[gceLabelMetadata+model.LabelName(name)] = model.LabelValue(*i.Value) } } if len(priIface.AccessConfigs) > 0 { ac := priIface.AccessConfigs[0] if ac.Type == "ONE_TO_ONE_NAT" { labels[gceLabelPublicIP] = model.LabelValue(ac.NatIP) } } tg.Targets = append(tg.Targets, labels) } return nil }) if err != nil { return tg, fmt.Errorf("error retrieving refresh targets from gce: %s", err) } return tg, nil }