// Event logger sending func (ev *EventLogger) _run() { defer func() { lib_gc_panic_catching.PanicCatching("EventLogger.run") ev.is_running = false }() lib_gc_log.Info.Printf("EVENT_LOGGER content [%p, %s], container [%p, %s] RUN STARTED.\n", &ev.Contents, DEFAULT_EVENT_LOGGER, ev.Container, ev.Container.GetName()) ev.is_running = true for { if ev.Contents.Status == lib_gc_container.CONTAINER_STATUS_STARTED { if message, ok := <-lib_bpulse_event.Messages_chan; ok { // Send the event if err := ev.Send_event(message); err != nil { lib_gc_log.Error.Println("ERROR on sending an event: ", err.Error()) } } else { // The channel is closed !!! lib_gc_log.Error.Printf("ERROR: The event logger channel is closed !!! \n") } } else { // Wait for the next message timer := time.NewTimer(500 * time.Millisecond) <-timer.C } } }
func (dpa *DefaultPulseAggregator) run() { // panic catching defer func() { lib_gc_panic_catching.PanicCatching("DefaultSender.run") dpa.isrunning = false }() lib_gc_log.Info.Printf("AGGREGATOR content [%p, %s], container [%p, %s] RUN STARTED. \n", &dpa.Contents, DEFAULT_AGGREGATOR_NAME, dpa.Container, dpa.Container.GetName()) var record *lib_pulse_storage.PulseRecord var last_storage_at time.Time = time.Now() ticker := time.NewTicker(time.Nanosecond * 100) for _ = range ticker.C { if dpa.Contents.Status == lib_gc_container.CONTAINER_STATUS_STARTED && dpa.storage != nil { select { case pulse, ok := <-dpa.aggregation_channel: _ = pulse _ = ok lib_gc_log.Trace.Printf("AGGREGATOR recived pulse %p on content %p, container %p [%s] \n", pulse, &dpa.Contents, dpa.Container, dpa.Container.GetName()) lib_gc_log.Trace.Printf("AGGREGATOR recived pulse %p : %v+ \n", pulse.String()) // Check for channel keeps opened if !ok { _msg, _ := lib_gc_event.NotifyEvent("003-001", "", &[]string{}) lib_gc_log.Warning.Println(_msg) dpa.Status = lib_gc_container.CONTAINER_STATUS_STOPPED break } // Check for an active record if record == nil { key := strconv.FormatInt(time.Now().Unix(), 10) pulses := make([]proto.Message, dpa.record_size) record = &lib_pulse_storage.PulseRecord{Key: key, Pulses: pulses, Pos: 0} } // Add the pulse to the record. record.Pulses[record.Pos] = pulse lib_gc_log.Trace.Printf("AGGREGATOR added pulse %p on content %p, container %p [%s], pos %d / %d \n", pulse, &dpa.Contents, dpa.Container, dpa.Container.GetName(), record.Pos, dpa.record_size) record.Pos = record.Pos + 1 // Check if the record pulses have reached the block_size size if record.Pos == dpa.record_size { // Store lib_gc_log.Trace.Printf("AGGREGATOR storing BY BLOCK SIZE [%d - %d] pulse %p on content %p, container %p [%s] \n", record.Pos, dpa.record_size, pulse, &dpa.Contents, dpa.Container, dpa.Container.GetName()) record.Pulses = record.Pulses[:record.Pos] dpa.storage.AddPulseRecord(record) last_storage_at = time.Now() record = nil } default: delay := int64(time.Now().Sub(last_storage_at).Nanoseconds() / 1000000) if record != nil && delay >= dpa.max_time_without_store { lib_gc_log.Info.Printf("AGGREGATOR storing BY TIMING pulses on content %p, container %p [%s] with delay %s \n", &dpa.Contents, dpa.Container, dpa.Container.GetName(), fmt.Sprint(time.Now().Sub(last_storage_at))) // Store record.Pulses = record.Pulses[:record.Pos] dpa.storage.AddPulseRecord(record) last_storage_at = time.Now() record = nil } } } } }
/* This method starts the Go BPulse client. Parameters: rq_maker: BPulse RQ builder from a slice of pulses rs_processor: BPulse responses writer. (optional) rs_instance_getter: BPulse RS intance getter. Returns error: The fired up error if some issue occurs. */ func StartBPulseClient(rq_maker lib_pulses_sender.PulseRQMaker, rs_processor lib_pulses_sender.PulseRSProcessor, rs_instance_getter lib_send_channel_adapter.PulseRSInstanceGetter) error { // panic catching defer lib_gc_panic_catching.PanicCatching("StartBPulseClient") if status == BPULSECLIENT_STARTED { msg, _ := lib_gc_event.NotifyEvent("008-003", "", &[]string{}) return errors.New(msg) } else { // Start the contents. lib_gc_contents_loader.InitializeContainerContents() // start event logger container if err := waitForContainerStarts(lib_bpulse_event.Container.GetGenericContainer()); err != nil { return err } else { // start storage container if err := waitForContainerStarts(lib_pulse_storage.Container.GetGenericContainer()); err != nil { return err } else { // start aggregator container if err := waitForContainerStarts(lib_pulse_aggregator.Container.GetGenericContainer()); err != nil { return err } else { // Set the bpulse rs instance getter for unmarsal bpulse respones. It's mandatory if rs_instance_getter == nil { msg, _ := lib_gc_event.NotifyEvent("008-002", "", &[]string{}) return errors.New(msg) } else { lib_send_channel_adapter.Container.GetGenericContainer().AddParameter(lib_send_channel_adapter.RS_INSTANCE_GETTER, rs_instance_getter) // start sender channel adapter container if err := waitForContainerStarts(lib_send_channel_adapter.Container.GetGenericContainer()); err != nil { return err } else { // Set the rq maker. It's mandatory. if rq_maker == nil { msg, _ := lib_gc_event.NotifyEvent("008-001", "", &[]string{}) return errors.New(msg) } else { lib_pulses_sender.Container.GetGenericContainer().AddParameter(lib_pulses_sender.PULSE_RQ_MAKER, rq_maker) // RS processor is optional. if rs_processor != nil { lib_pulses_sender.Container.GetGenericContainer().AddParameter(lib_pulses_sender.PULSE_RS_PROCESSOR, rs_processor) } if err := waitForContainerStarts(lib_pulses_sender.Container.GetGenericContainer()); err != nil { return err } } } } } } } status = BPULSECLIENT_STARTED return nil } }
func (ds *DefaultSender) run() { // panic catching defer func() { lib_gc_panic_catching.PanicCatching("DefaultSender.run") ds.isrunning = false }() lib_gc_log.Info.Printf("SENDER content [%p, %s], container [%p, %s] RUN STARTED.\n", &ds.Contents, DEFAULT_SENDER, ds.Container, ds.Container.GetName()) for { ds.isrunning = true if ds.Contents.Status == lib_gc_container.CONTAINER_STATUS_STARTED && ds.storage != nil { max_concurrent_records_semaphore.Lock(1) if record, err := ds.storage.GetPulseRecord(); err != nil { msg, _ := EVENT.NotifyEvent("006-001", "", &[]string{err.Error()}) lib_gc_log.Error.Printf("%s\n", msg) ds.writeToRSWriter([]byte(msg)) } else { if record != nil { lib_gc_log.Trace.Printf("SENDER content [%p, %s], container [%p, %s] Taken pulse record, with %d pulses.\n", &ds.Contents, DEFAULT_SENDER, ds.Container, ds.Container.GetName(), len(record.Pulses)) if ds.pulse_rq_maker == nil { msg, _ := EVENT.NotifyEvent("006-004", "", &[]string{DEFAULT_SENDER}) lib_gc_log.Error.Println(msg) } else { if rq, err := ds.pulse_rq_maker(record.Pulses); err != nil { msg, _ := EVENT.NotifyEvent("006-005", "", &[]string{DEFAULT_SENDER}) lib_gc_log.Error.Println(msg) } else { go func(rq proto.Message, rs_processor lib_pulses_sender.PulseRSProcessor, semaphore lib_gc_semaphore.Semaphore_I) { // panic catching defer lib_gc_panic_catching.PanicCatching("DefaultSender.run - go") if adapter, err := lib_send_channel_adapter.Container.GetSendChannelAdapter(adapter_name); err != nil { msg, _ := EVENT.NotifyEvent("006-002", "", &[]string{err.Error()}) lib_gc_log.Error.Printf("%s\n", msg) ds.writeToRSWriter([]byte(msg)) } else { defer lib_send_channel_adapter.Container.ReleaseSendChannelAdapter(adapter_name, adapter) semaphore.Lock(1) sent := false retries := 0 for (!sent) && (retries < msg_sending_reintries) { retries += 1 if rs, err := adapter.SendPulseRQ(rq); err != nil { if msg, err2 := EVENT.NotifyEvent("006-003", "", &[]string{err.Error()}); err2 == nil { lib_gc_log.Error.Printf("%s \n !!", msg) ds.writeToRSWriter([]byte(msg)) } else { lib_gc_log.Error.Printf("%s \n !!", err2.Error()) ds.writeToRSWriter([]byte(msg)) } } else { sent = true if rs_processor != nil { if err := rs_processor(rs, ds.rs_writer); err != nil { ds.writeToRSWriter([]byte(err.Error())) } } } } semaphore.ForceToUnLock(1) } }(rq, ds.pulse_rs_processor, max_concurrent_sents_semaphore) } } } } max_concurrent_records_semaphore.ForceToUnLock(1) } } }