func InitCollectorController(metrics met.Backend) { sec := setting.Cfg.Section("event_publisher") cmd := &m.ClearCollectorSessionCommand{ InstanceId: setting.InstanceId, } if err := bus.Dispatch(cmd); err != nil { log.Fatal(0, "failed to clear collectorSessions", err) } if sec.Key("enabled").MustBool(false) { url := sec.Key("rabbitmq_url").String() exchange := sec.Key("exchange").String() exch := rabbitmq.Exchange{ Name: exchange, ExchangeType: "topic", Durable: true, } q := rabbitmq.Queue{ Name: "", Durable: false, AutoDelete: true, Exclusive: true, } consumer := rabbitmq.Consumer{ Url: url, Exchange: &exch, Queue: &q, BindingKey: []string{"INFO.monitor.*", "INFO.collector.*"}, } err := consumer.Connect() if err != nil { log.Fatal(0, "failed to start event.consumer.", err) } consumer.Consume(eventConsumer) } else { //tap into the update/add/Delete events emitted when monitors are modified. bus.AddEventListener(EmitUpdateMonitor) bus.AddEventListener(EmitAddMonitor) bus.AddEventListener(EmitDeleteMonitor) bus.AddEventListener(HandleCollectorConnected) bus.AddEventListener(HandleCollectorDisconnected) } metricsRecvd = metrics.NewCount("collector-ctrl.metrics-recv") bufCh = make(chan m.MetricDefinition, runtime.NumCPU()*100) go metricpublisher.ProcessBuffer(bufCh) }
func distributed(url string, cache *lru.Cache) error { exchange := "alertingJobs" exch := rabbitmq.Exchange{ Name: exchange, ExchangeType: "x-consistent-hash", Durable: true, } if setting.EnableScheduler { publisher := &rabbitmq.Publisher{Url: url, Exchange: &exch} err := publisher.Connect() if err != nil { return err } jobQueue := newPreAMQPJobQueue(setting.PreAMQPJobQueueSize, publisher) go Dispatcher(jobQueue) } q := rabbitmq.Queue{ Name: "", Durable: false, AutoDelete: true, Exclusive: true, } for i := 0; i < setting.Executors; i++ { consumer := rabbitmq.Consumer{ Url: url, Exchange: &exch, Queue: &q, BindingKey: []string{"10"}, //consistant hashing weight. } if err := consumer.Connect(); err != nil { log.Fatal(0, "failed to start event.consumer.", err) } AmqpExecutor(GraphiteAuthContextReturner, consumer, cache) } return nil }
func InitCollectorController() { sec := setting.Cfg.Section("event_publisher") // get our instance-id dataPath := setting.DataPath + "/instance-id" log.Info("instance-id path: " + dataPath) fs, err := os.Open(dataPath) if err != nil { fs, err = os.Create(dataPath) if err != nil { log.Fatal(0, "failed to create instance-id file", err) } defer fs.Close() instanceId = uuid.NewV4().String() if _, err := fs.Write([]byte(instanceId)); err != nil { log.Fatal(0, "failed to write instanceId to file", err) } } else { defer fs.Close() content, err := ioutil.ReadAll(fs) if err != nil { log.Fatal(0, "failed to read instanceId", err) } instanceId = strings.Split(string(content), "\n")[0] } if instanceId == "" { log.Fatal(0, "invalid instanceId. check "+dataPath, nil) } cmd := &m.ClearCollectorSessionCommand{ InstanceId: instanceId, } if err := bus.Dispatch(cmd); err != nil { log.Fatal(0, "failed to clear collectorSessions", err) } if sec.Key("enabled").MustBool(false) { url := sec.Key("rabbitmq_url").String() exchange := sec.Key("exchange").String() exch := rabbitmq.Exchange{ Name: exchange, ExchangeType: "topic", Durable: true, } q := rabbitmq.Queue{ Name: "", Durable: false, AutoDelete: true, Exclusive: true, } consumer := rabbitmq.Consumer{ Url: url, Exchange: &exch, Queue: &q, BindingKey: []string{"INFO.monitor.*", "INFO.collector.*"}, } err := consumer.Connect() if err != nil { log.Fatal(0, "failed to start event.consumer.", err) } consumer.Consume(eventConsumer) } else { //tap into the update/add/Delete events emitted when monitors are modified. bus.AddEventListener(EmitUpdateMonitor) bus.AddEventListener(EmitAddMonitor) bus.AddEventListener(EmitDeleteMonitor) bus.AddEventListener(HandleCollectorConnected) bus.AddEventListener(HandleCollectorDisconnected) } bufCh = make(chan m.MetricDefinition, runtime.NumCPU()) go metricpublisher.ProcessBuffer(bufCh) }
func distributed(url string) error { exchange := "alertingJobs" exch := rabbitmq.Exchange{ Name: exchange, ExchangeType: "x-consistent-hash", Durable: true, } publisher := &rabbitmq.Publisher{Url: url, Exchange: &exch} err := publisher.Connect() if err != nil { return err } jobQueue := make(chan Job, jobQueueSize) go Dispatcher(jobQueue) //send dispatched jobs to rabbitmq. go func(jobQueue <-chan Job) { for job := range jobQueue { routingKey := fmt.Sprintf("%d", job.MonitorId) msg, err := json.Marshal(job) //log.Info("sending: " + string(msg)) if err != nil { log.Error(3, "failed to marshal job to json.", err) continue } publisher.Publish(routingKey, msg) } }(jobQueue) q := rabbitmq.Queue{ Name: "", Durable: false, AutoDelete: true, Exclusive: true, } consumer := rabbitmq.Consumer{ Url: url, Exchange: &exch, Queue: &q, BindingKey: []string{"10"}, //consistant hashing weight. } if err := consumer.Connect(); err != nil { log.Fatal(0, "failed to start event.consumer.", err) } consumeQueue := make(chan Job, jobQueueSize) //read jobs from rabbitmq and push them into the execution channel. consumer.Consume(func(msg *amqp.Delivery) error { //convert from json to Job job := Job{} //log.Info("recvd: " + string(msg.Body)) if err := json.Unmarshal(msg.Body, &job); err != nil { log.Error(0, "failed to unmarshal msg body.", err) return err } job.StoreMetricFunc = api.StoreMetric select { case consumeQueue <- job: default: // TODO: alert when this happens Stat.Increment("alert-dispatcher.jobs-skipped-due-to-slow-jobqueue") } return nil }) //start group of workers to execute the jobs in the execution channel. for i := 0; i < 10; i++ { go Executor(GraphiteAuthContextReturner, consumeQueue) } return nil }