func assembleConfigFiles(configFile string) (map[string]interface{}, error) { logs.Info("Reading config file : %s", configFile) doc, err := yaml.ReadFile(configFile) if err != nil { return nil, err } mapping := YamlUnmarshal(doc.Root).(map[string]interface{}) confD_path := path.Dir(configFile) + "/conf.d/*.yaml" logs.Info("Reading config folder : %s", confD_path) entries, err := filepath.Glob(confD_path) if err != nil { logs.Warn("Can't read relevant conf.d: %s", err) } else { for _, path := range entries { logs.Info("Reading config file : %s", path) doc, err := yaml.ReadFile(path) if err != nil { logs.Warn("Can't read relevant conf.d: %s", err) } else { inner := YamlUnmarshal(doc.Root).(map[string]interface{}) RecursiveMergeNoConflict(mapping, inner, "") } } } return mapping, nil }
func (*EsbulkapiEncoder) Encode(out map[string]Column, writer io.Writer) { var header string stripped := make(map[string]interface{}) for k, v := range out { stripped[k] = v.Value } header = "\{ \"index\" : \{ \"_index\" : \"test\", \"_type\" : \"" + stripped["_group"] + "\} \}" bytes := []byte(header) bytes = append(bytes, '\n') body, err := json.Marshal(stripped) if err != nil { panic(err) } bytes = bytes + body bytes = append(bytes, '\n') logs.Info("Writing bytes to amqp: " + string(bytes[:])) writer.Write(bytes) }
func NewAMQPReadWriter(u *url.URL, options []string) (io.ReadWriteCloser, error) { var exchange, rkey string exchange = options[1] rkey = options[2] logs.Info("AMQP Exchange: " + exchange + " and routing key: " + rkey) logs.Info("AMQP url: " + u.String()) conn, err := amqp.Dial(u.String()) if err != nil { return nil, err } channel, err := conn.Channel() if err != nil { return nil, err } return &rwStruct{EmptyReader, &AMQPWriter{channel, exchange, rkey}, conn}, nil }
func (tail *Tail) StartWatching() { go func() { fi, err := tail.Stat() if err != nil { logs.Error("Can't stat file: %s", err) return } logs.Info("Start Watching: %s ", tail.Path) c := tail.Watcher.ChangeEvents(&tomb.Tomb{}, fi) go tail.pollChannel(c.Modified) go tail.pollChannel(c.Truncated) go tail.pollChannel(c.Deleted) }() }
func (dests *Destinations) Consume(ch chan Record, finished chan bool) { if len(*dests) == 0 { logs.Warn("No destinations specified") } for { rec := <-ch if rec == nil { break } else { for _, dest := range *dests { dest.Encoder.Encode(rec, dest.RW) } } } logs.Info("Finished consuming log records") finished <- true }
func (er *AMQPWriter) Write(p []byte) (int, error) { logs.Info("Publishing to amqp: " + string(p[:])) err := er.channel.Publish( er.exchange, // publish to an exchange er.rkey, // routing to 0 or more queues false, // mandatory false, // immediate amqp.Publishing{ Headers: amqp.Table{}, ContentType: "text/plain", ContentEncoding: "", Body: p[:], DeliveryMode: amqp.Transient, // 1=non-persistent, 2=persistent Priority: 0, // 0-9 // a bunch of application/implementation-specific fields }) if err != nil { panic(err) } return 0, nil }
func NewRedisReadWriter(u *url.URL, options []string) (io.ReadWriteCloser, error) { var method, redisdest string switch options[1] { case "list": method = "LIST" case "publish": method = "PUBLISH" default: panic("Unknown redis method...") } redisdest = options[2] logs.Info("Redis method: " + method + " and destination: " + redisdest) conn, err := net.Dial("tcp", u.Host) if err != nil { return nil, err } return &rwStruct{EmptyReader, &RedisWriter{conn, method, redisdest}, conn}, nil }
func configFromMapping(mapping map[string]interface{}, hostname string) (*Config, error) { b, _ := json.Marshal(mapping) logs.Debug("mapping: %s", string(b)) var err error = nil config := new(Config) config.Sources = make([]SourceConfig, 0) config.Destinations = make([]DestinationConfig, 0) global, err := getMap(mapping, "global") if err != nil { return nil, fmt.Errorf("no global section in the config file") } config.OffsetDir, err = getString(global, "offset_dir") if err != nil { logs.Warn("no offset_dir specified") config.MaxBackfillBytes = -1 } config.MaxBackfillBytes, err = getInt64(global, "max_backfill_bytes") if err != nil { logs.Warn("no max_backfill_bytes, continuing with unlimited") config.MaxBackfillBytes = -1 } config.MaxLineSizeBytes, err = getInt64(global, "max_linesize_bytes") if err != nil { logs.Warn("no max_linesize_bytes, continuing with 32768") config.MaxLineSizeBytes = 32768 } sources, err := getMap(mapping, "sources") if err != nil { return nil, fmt.Errorf("no sources section in the config file") } for name, _ := range sources { src, err := getMap(sources, name) if err != nil { logs.Warn("Invalid source: %s, continuing...", name) continue } var source SourceConfig source.Hostname = hostname source.Fields = make([]FieldConfig, 0) source.OffsetDir = config.OffsetDir source.MaxBackfillBytes = config.MaxBackfillBytes source.MaxLineSizeBytes = config.MaxLineSizeBytes source.Name = name source.Glob, err = getString(src, "glob") if err != nil { return nil, err } source.Pattern, err = getString(src, "pattern") if err != nil { source.Pattern = DefaultPattern } _, err = regexp.Compile(source.Pattern) if err != nil { logs.Warn("%s is not a valid regexp, continuing... (%s)", source.Pattern, err) continue } fields, err := getMap(src, "fields") for name, _ := range fields { fld, err := getMap(fields, name) if err != nil { logs.Warn("%s is not a map, continuing... (%s)", name, err) continue } var field FieldConfig field.Alias = name field.Name, err = getString(fld, "name") if err != nil { field.Name = field.Alias } field.Group, err = getInt(fld, "group") s, err := getString(fld, "type") if err != nil { field.Type = String } else { field.Type, err = parseFieldType(s) if err != nil { logs.Warn("Invalid field type: %s, continuing... (error was %s)", s, err) continue } } logs.Info("found type %s", field.Type) s, err = getString(fld, "treatment") if err != nil { field.Treatment = String } else { field.Treatment, err = parseFieldTreatment(s) if err != nil { logs.Warn("Invalid field treatment: %s, continuing... (error was %s)", s, err) continue } } logs.Info("found treatment %s", field.Treatment) field.Salt, err = getString(fld, "salt") field.Format, err = getString(fld, "format") s, err = getString(fld, "pattern") field.Pattern, err = regexp.Compile(s) if err != nil { logs.Warn("Invalid regex: %s, continuing... (error was %s)", s, err) continue } source.Fields = append(source.Fields, field) } config.Sources = append(config.Sources, source) } destinations, err := getMap(mapping, "destinations") if err != nil { return nil, fmt.Errorf("no destinations section in the config file") } for name, _ := range destinations { var dest DestinationConfig urlString, err := getString(destinations, name) u, err := url.Parse(urlString) if err != nil { logs.Warn("Invalid URL: %s, continuing... (error was %s)", urlString, err) continue } logs.Info("Found destination: %s", urlString) dest.Name = name dest.Url = u config.Destinations = append(config.Destinations, dest) } return config, nil }
func main() { flag.Parse() runtime.GOMAXPROCS(*cpus) // set the logger path handle, err := os.OpenFile(*logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666) if err != nil { logs.Warn("Unable to open log file %s, using stderr: %s", *logFile, err) } else { logs.Logger = log.New(handle, "", log.LstdFlags|log.Lshortfile) } // Check whether we're in debug mode if *debug { logs.SetLevel(logs.DEBUG) logs.Debug("logging at DEBUG") } else { logs.SetLevel(logs.INFO) } if *name == "unknown" { *name, err = os.Hostname() if err != nil { logs.Warn("Unable to determine hostname: %s", err) } } // Read the config files config, err := dendrite.NewConfig(*configFile, *name) if err != nil { logs.Fatal("Can't read configuration: %s", err) } // Link up all of the objects ch := make(chan dendrite.Record, 100) logs.Debug("original %s", ch) dests := config.CreateDestinations() groups := config.CreateAllTailGroups(ch) // If any of our destinations talk back, log it. go func() { reader := bufio.NewReader(dests.Reader()) for { str, err := reader.ReadString('\n') if err == io.EOF { logs.Debug("eof") time.Sleep(1 * time.Second) } else if err != nil { logs.Error("error reading: %s", err) } else { logs.Info("received: %s", str) } } }() // Do the event loop finished := make(chan bool, 0) go dests.Consume(ch, finished) if *quitAfter >= 0 { start := time.Now() logs.Debug("starting the poll") i := 0 for { i++ if i%10 == 0 { groups.Refresh() } groups.Poll() if time.Now().Sub(start) >= time.Duration((*quitAfter)*float64(time.Second)) { break } } } else { logs.Debug("starting the loop") groups.Loop() } logs.Info("Closing...") close(ch) <-finished logs.Info("Goodbye!") }