/
main.go
180 lines (153 loc) · 5.35 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
/*
Copyright 2014 Rohith All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"os/signal"
"syscall"
"time"
"github.com/golang/glog"
yaml "gopkg.in/yaml.v2"
)
func main() {
flag.Parse()
// step: ensure we have some jobs to process
if config.jobs.Size() <= 0 && !config.includeAll {
glog.Infof("Zero job specifications have been defined and the includeAll option is false, nothing to do!")
os.Exit(0)
}
glog.Infof("Starting the %s service, version: %s, git+sha: %s", Prog, Version, GitSha)
// step: we grab a client to fleet api
client, err := newFleetClient()
if err != nil {
glog.Errorf("Failed to create a fleet client on socket: %s, error: %s", config.fleetSocket, err)
os.Exit(1)
}
// step: setup the termination signal
signalChannel := make(chan os.Signal)
signal.Notify(signalChannel, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
for {
var targets TargetGroups
// step: grab a list of machines from fleet
machines, err := client.GetMachines()
if err != nil {
glog.Errorf("Failed to retrieve a list of machines from fleet, error: %s", err)
goto NEXT_LOOP
}
// step: if we have zero jobs; we don't need to filter, just place everything into the default group
switch config.jobs.Size() {
case 0:
targets = produceDefaultTargets(machines)
default:
targets = produceFilteredTargets(config.jobs, machines)
}
// step: write the targets to file
if err := persistTargets(targets, config.prometheusFile); err != nil {
glog.Errorf("Failed to persist the targets to file: %s, error: %s", config.prometheusFile, err)
}
NEXT_LOOP:
select {
case <-time.After(config.interval):
case <-signalChannel:
glog.Infof("Exitting the service")
os.Exit(0)
}
}
}
func produceDefaultTargets(machines []*Machine) TargetGroups {
var group TargetGroups
target := group.AddTarget(config.defaultJobName)
// step: iterate and place all the machine inside it
for _, machine := range machines {
target.Targets = append(target.Targets, fmt.Sprintf("%s:%d", machine.name, config.defaultPort))
}
return group
}
func produceFilteredTargets(jobs *Jobs, machines []*Machine) TargetGroups {
var groups TargetGroups
// we use a map to keep track of the machines which have been added to a group
added := make(map[string]bool, 0)
// step: for each of the jobs we need to produce the targets
for _, job := range jobs.items {
target := groups.AddTarget(job.name)
target.Labels[job.tagName] = job.tagValue
glog.V(6).Infof("Processing the %d machines against the job: %s", len(machines), job)
// step: iterate the machines and find any one that matches our metadata
for _, machine := range machines {
// step: does it have a matching tag
if value, found := machine.metadata[job.tagName]; !found {
glog.V(6).Infof("The machine: %s does not hae matching tag: %s in it's metadata", machine, job.tagName)
continue
} else if value == job.tagValue {
// we can append this machine to the list of targets
target.Targets = append(target.Targets, fmt.Sprintf("%s:%d", machine.name, job.port))
// keep the tracker updates
added[machine.name] = true
}
}
}
// step: are we adding 'all' the machines, even those not matching.
// We check if we have left overs, then then iterate the machines and find any machine which has not been
// added already and add to the default group
if config.includeAll {
if len(added) != len(machines) {
// we have machines that haven't been added
target := groups.AddTarget(config.defaultJobName)
for _, machine := range machines {
if _, found := added[machine.name]; !found {
target.Targets = append(target.Targets, fmt.Sprintf("%s:%d", machine.name, config.defaultPort))
}
}
}
}
return groups
}
func persistTargets(targets TargetGroups, filename string) error {
// step: first we encode the structure
content, err := encode(targets.targets)
if err != nil {
glog.Errorf("Failed to encode the target stucture into yaml, error: %s", err)
return err
}
// step: attempt to write the file
if config.dryRun {
fmt.Printf("%s", content)
return nil
}
err = ioutil.WriteFile(filename, content, os.FileMode(0664))
if err != nil {
glog.Errorf("Failed to write to file: '%s', error: %s", filename, err)
return err
}
glog.V(4).Infof("Successfully wrote the targets to file: %s", filename)
return nil
}
func encode(data interface{}) (output []byte, err error) {
output, err = yaml.Marshal(data)
if err != nil {
glog.Errorf("Failed to marshall the structure to yaml, %s, error: %s", data, err)
return []byte{}, fmt.Errorf("marshalling failure, data: %V, error: %s", data, err)
}
return
}
func decode(input []byte, data interface{}) error {
err := yaml.Unmarshal(input, data)
if err != nil {
glog.Errorf("Failed to unmarshall the content into a struct, %s, error: %s", data, err)
return err
}
return nil
}