func (adapter *RESTfulSendChannelAdapter) SendPulseRQ(pulserq proto.Message) (proto.Message, error) {

	// Marshal the rq
	if _bytes, err := proto.Marshal(pulserq); err != nil {
		msg, _ := lib_gc_event.NotifyEvent("004-005", "", &[]string{adapter.url, err.Error()})
		return nil, errors.New(msg)
	} else {
		lib_gc_log.Trace.Printf("ADAPTER content [%p, %s], container [%p, %s] PULSE RQ: %v+ \n", &adapter.Contents, RESTFUL_SEND_CHANNEL_ADAPTER, adapter.Container, adapter.Container.GetName(), pulserq)

		// build the request
		if request, err := http.NewRequest(adapter.method, adapter.url, bytes.NewBuffer(_bytes)); err != nil {
			msg, _ := lib_gc_event.NotifyEvent("004-001", "", &[]string{adapter.url, err.Error()})
			return nil, errors.New(msg)
		} else {

			// Set headers
			request.Header.Set("Content-Type", "application/x-protobuf")
			request.Header.Set("Accept", "application/x-protobuf")

			// Set the request auth
			request.SetBasicAuth(adapter.user, adapter.pwd)

			// Do the request
			if response, err := adapter.http_client.Do(request); err != nil {
				msg, _ := lib_gc_event.NotifyEvent("004-002", "", &[]string{adapter.url, err.Error()})
				return nil, errors.New(msg)
			} else {

				// Close the response body to reuse the connection.
				defer response.Body.Close()

				// Read the response
				if body, err := ioutil.ReadAll(response.Body); err != nil {
					msg, _ := lib_gc_event.NotifyEvent("004-003", "", &[]string{adapter.url, err.Error()})
					return nil, errors.New(msg)
				} else {
					rs := adapter.pulse_rs_instance_getter()
					if err := proto.Unmarshal(body, rs); err != nil {
						msg, _ := lib_gc_event.NotifyEvent("004-004", "", &[]string{adapter.url, err.Error(), string(body)})
						return nil, errors.New(msg)
					} else {
						lib_gc_log.Info.Printf("ADAPTER content [%p, %s], container [%p, %s] Sent %d bytes, RS [%d] received from BPulse: %v \n", &adapter.Contents, RESTFUL_SEND_CHANNEL_ADAPTER, adapter.Container, adapter.Container.GetName(), len(_bytes), response.StatusCode, rs.String())
						return rs, nil
					}
				}
			}
		}
	}
}
/*
 Shutdown the BPulse client

 Parameters:

 Returns
  The list of fired up errors if some issue has occurred at shutdown the containers.
*/
func ShutdownBPulseClient() *list.List {
	l := list.New()
	if status == BPULSECLIENT_NOT_STARTED {
		msg, _ := lib_gc_event.NotifyEvent("008-004", "", &[]string{})
		l.PushBack(errors.New(msg))
	} else {

		if err := waitForContainerShutdown(lib_bpulse_event.Container.GetGenericContainer()); err != nil {
			l.PushBack(err)
		} else {
			if err := waitForContainerShutdown(lib_pulse_aggregator.Container.GetGenericContainer()); err != nil {
				l.PushBack(err)
			} else {
				if err := waitForContainerShutdown(lib_pulse_storage.Container.GetGenericContainer()); err != nil {
					l.PushBack(err)
				} else {
					if err := waitForContainerShutdown(lib_pulses_sender.Container.GetGenericContainer()); err != nil {
						l.PushBack(err)
					} else {
						if err := waitForContainerShutdown(lib_send_channel_adapter.Container.GetGenericContainer()); err != nil {
							l.PushBack(err)
						}
					}
				}
			}
		}
	}
	return l
}
func (container *genericContainer) GetParameter(key string) (interface{}, error) {
	if value, ok := container.parameters[key]; !ok {
		_msg, _ := lib_gc_event.NotifyEvent("005-002", "", &[]string{key})
		return nil, errors.New(_msg)
	} else {
		return value, nil
	}
}
func (container *genericContainer) GetItem(name string) (IGenericContents, error) {
	if item, ok := container.items[name]; !ok {
		_msg, _ := lib_gc_event.NotifyEvent("005-001", "", &[]string{name})
		return nil, errors.New(_msg)
	} else {
		return item, nil
	}
}
func (container *SendChannelAdapterContainer) takePool(name string) (lib_gc_pool.Pooler, error) {

	if pool, err := container.Container.GetItem(name); err != nil {
		_msg, _ := lib_gc_event.NotifyEvent("004-001", "", &[]string{name})
		return nil, errors.New(_msg)
	} else {
		return pool.(lib_gc_pool.Pooler), nil
	}
}
func (dps *DefaultPulseStorage) Shutdown() error {

	lib_gc_log.Info.Printf("STORAGE content [%p, %s], container [%p, %s] SHUTDOWN catched. \n", &dps.Contents, DEFAULT_STORAGE_NAME, dps.Container, dps.Container.GetName())

	// Wait for all pulses has been sent
	if _, _, timeout_err := lib_gc_timeout_wrapper.TimeoutWrapper.Wrap(time.Duration(shutdown_timeout)*time.Millisecond, dps); timeout_err != nil {
		_msg, _ := lib_gc_event.NotifyEvent("007-001", "", &[]string{DEFAULT_STORAGE_NAME})
		return errors.New(_msg)
	} else {
		return nil
	}
}
/**
  This method checks if the writer (store pulses) has exceeded the reader (send pulses) on the ring
*/
func (dps *DefaultPulseStorage) checkForWriterReaderRelation() {
	exceeded := dps.count_stored-dps.count_taken >= int32(dps.ring_size)
	lib_gc_log.Trace.Printf("STORAGE content [%p, %s], container [%p, %s] CHECK for writer-reader relation [%d - %d] [%t] \n", &dps.Contents, DEFAULT_STORAGE_NAME, dps.Container, dps.Container.GetName(), dps.count_stored, dps.count_taken, exceeded)
	if exceeded {
		// The writer has reached the reader.
		msg, _ := lib_gc_event.NotifyEvent("007-002", "", &[]string{fmt.Sprint(time.Now())})
		lib_gc_log.Error.Println(msg)

		// Normalize  the counters. Skip the lost changes.
		dps.incTaken((dps.count_stored % int32(dps.ring_size)) * int32(dps.ring_size))
	}
}
func (adapter *RESTfulSendChannelAdapter) Start() error {
	lib_gc_log.Info.Printf("ADAPTER content [%p, %s], container [%p, %s] START catched. \n", &adapter.Contents, RESTFUL_SEND_CHANNEL_ADAPTER, adapter.Container, adapter.Container.GetName())

	// Set bpulse rs instance gettter
	// It's necessary to unmarshal the responses
	f := func() {
		if rs_instance_getter, err := adapter.Container.(lib_gc_container.IGenericContainer).GetParameter(lib_send_channel_adapter.RS_INSTANCE_GETTER); err != nil {
			msg, _ := lib_gc_event.NotifyEvent("004-006", "", &[]string{})
			lib_gc_log.Error.Println(msg)
		} else {
			adapter.pulse_rs_instance_getter = rs_instance_getter.(lib_send_channel_adapter.PulseRSInstanceGetter)
		}
	}
	adapter.once.Do(f)

	return nil
}
func (contents *Contents) WaitForStatusChanges() {
	lib_gc_log.Info.Printf("Start WaitForStatusChanges on content %p, container %p [%s] \n", contents, contents.Container, contents.Container.GetName())

	// Synchronize the container Start with the goroutine
	wg := &sync.WaitGroup{}
	wg.Add(1)
	var once sync.Once

	go func(contents *Contents, wg *sync.WaitGroup) {
		var stop bool = false
		//        var first_loop bool = true
		for !stop {
			shutdown_chan, start_chan, stop_chan := contents.Container.GetContainerStatusChannels()
			lib_gc_log.Info.Printf("Check for status, content %p [status %d], conainer %p [%s], shutdown chan %p, start chan %p, stop chan %p", contents, contents.Status, contents.Container, contents.Container.GetName(), shutdown_chan, start_chan, stop_chan)

			// Only on the first loop, the goroutine must be synchronized with the container. If not,
			// the container would close its start channel before the goroutine was running and the
			// start event would not be catched.
			once.Do(wg.Done)

			// Catch container events.
			select {
			case <-shutdown_chan:
				lib_gc_log.Info.Printf("Received SHUTDOWN on content %p, container %p [%s] \n", contents, contents.Container, contents.Container.GetName())
				if err := contents.Shutdown(); err != nil {
					_msg, _ := lib_gc_event.NotifyEvent("003-002", "", &[]string{err.Error()})
					lib_gc_log.Error.Println(_msg)
				}
				stop = true
			case <-start_chan:
				lib_gc_log.Info.Printf("Received START on content %p, container %p [%s] \n", contents, contents.Container, contents.Container.GetName())
				contents.Start()
			case <-stop_chan:
				lib_gc_log.Info.Printf("Received STOP on content %p, container %p [%s] \n", contents, contents.Container, contents.Container.GetName())
				contents.Stop()
			}
		}
	}(contents, wg)
	wg.Wait()
}
func (ev *EventLogger) Send_event(event_message *lib_bpulse_event.Event_message) error {

	m := map[string]interface{}{
		"id":           strconv.FormatInt(time.Now().UnixNano(), 10),
		"type":         string(event_message.Type),
		"title":        event_message.Title,
		"description":  event_message.Description,
		"startDate:":   event_message.StartDate,
		"endDate":      event_message.EndDate,
		"source":       ev.server.source,
		"dataSourceId": ev.server.data_source_id,
		"editable":     ev.server.editable,
	}
	mJson, _ := json.Marshal(m)
	println("\n", string(mJson), "\n")

	var _err error
	tr := &http.Transport{
		TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
	}
	for z := 0; z < ev.server.reentries; z++ {
		request := gorequest.New().Timeout(time.Duration(ev.server.timeout) * time.Millisecond)
		request.Transport = tr
		request.Header["Authorization"] = "Basic " + ev.basicAuth(ev.server.user, ev.server.user)
		_, body, errs := request.Post(ev.server.url).
			Set("Content-Type", "application/json").
			Send(string(mJson)).
			End()

		println("*jas* body: ", body)

		if strings.Contains(body, "error") {
			if errs == nil {
				errs = []error{errors.New(body)}
			} else {
				errs = append(errs, errors.New(body))
			}
		}

		if errs == nil || len(errs) == 0 {
			break
		} else {

			str := ""
			for i, s := range errs {
				if i > 0 {
					str += ", " + s.Error()
				} else {
					str = s.Error()
				}
			}
			msg, _ := lib_gc_event.NotifyEvent("009-001", "", &[]string{str})
			lib_gc_log.Error.Println(msg)
			_err = errors.New(str)

			// Wait for the next retry
			timer := time.NewTimer(500 * time.Millisecond)
			<-timer.C
		}
	}

	if _err != nil {
		return _err
	} else {
		return nil
	}
}
/*
   This method starts the Go BPulse client.

   Parameters:
       rq_maker: BPulse RQ builder from a slice of pulses
       rs_processor: BPulse responses writer. (optional)
       rs_instance_getter: BPulse RS intance getter.

   Returns
     error: The fired up error if some issue occurs.
*/
func StartBPulseClient(rq_maker lib_pulses_sender.PulseRQMaker, rs_processor lib_pulses_sender.PulseRSProcessor, rs_instance_getter lib_send_channel_adapter.PulseRSInstanceGetter) error {

	// panic catching
	defer lib_gc_panic_catching.PanicCatching("StartBPulseClient")

	if status == BPULSECLIENT_STARTED {
		msg, _ := lib_gc_event.NotifyEvent("008-003", "", &[]string{})
		return errors.New(msg)
	} else {
		// Start the contents.
		lib_gc_contents_loader.InitializeContainerContents()

		// start event logger container
		if err := waitForContainerStarts(lib_bpulse_event.Container.GetGenericContainer()); err != nil {
			return err
		} else {
			// start storage container
			if err := waitForContainerStarts(lib_pulse_storage.Container.GetGenericContainer()); err != nil {
				return err
			} else {
				// start aggregator container
				if err := waitForContainerStarts(lib_pulse_aggregator.Container.GetGenericContainer()); err != nil {
					return err
				} else {

					// Set the bpulse rs instance getter for unmarsal bpulse respones. It's mandatory
					if rs_instance_getter == nil {
						msg, _ := lib_gc_event.NotifyEvent("008-002", "", &[]string{})
						return errors.New(msg)
					} else {
						lib_send_channel_adapter.Container.GetGenericContainer().AddParameter(lib_send_channel_adapter.RS_INSTANCE_GETTER, rs_instance_getter)

						// start sender channel adapter container
						if err := waitForContainerStarts(lib_send_channel_adapter.Container.GetGenericContainer()); err != nil {
							return err
						} else {

							// Set the rq maker. It's mandatory.
							if rq_maker == nil {
								msg, _ := lib_gc_event.NotifyEvent("008-001", "", &[]string{})
								return errors.New(msg)
							} else {
								lib_pulses_sender.Container.GetGenericContainer().AddParameter(lib_pulses_sender.PULSE_RQ_MAKER, rq_maker)

								// RS processor is optional.
								if rs_processor != nil {
									lib_pulses_sender.Container.GetGenericContainer().AddParameter(lib_pulses_sender.PULSE_RS_PROCESSOR, rs_processor)
								}

								if err := waitForContainerStarts(lib_pulses_sender.Container.GetGenericContainer()); err != nil {
									return err
								}
							}
						}
					}
				}
			}
		}

		status = BPULSECLIENT_STARTED
		return nil
	}
}
func (dpa *DefaultPulseAggregator) run() {
	// panic catching
	defer func() {
		lib_gc_panic_catching.PanicCatching("DefaultSender.run")
		dpa.isrunning = false
	}()

	lib_gc_log.Info.Printf("AGGREGATOR content [%p, %s], container [%p, %s] RUN STARTED. \n", &dpa.Contents, DEFAULT_AGGREGATOR_NAME, dpa.Container, dpa.Container.GetName())
	var record *lib_pulse_storage.PulseRecord
	var last_storage_at time.Time = time.Now()
	ticker := time.NewTicker(time.Nanosecond * 100)
	for _ = range ticker.C {
		if dpa.Contents.Status == lib_gc_container.CONTAINER_STATUS_STARTED && dpa.storage != nil {
			select {
			case pulse, ok := <-dpa.aggregation_channel:
				_ = pulse
				_ = ok
				lib_gc_log.Trace.Printf("AGGREGATOR recived pulse %p on content %p, container %p [%s] \n", pulse, &dpa.Contents, dpa.Container, dpa.Container.GetName())
				lib_gc_log.Trace.Printf("AGGREGATOR recived pulse %p : %v+ \n", pulse.String())

				// Check for channel keeps opened
				if !ok {
					_msg, _ := lib_gc_event.NotifyEvent("003-001", "", &[]string{})
					lib_gc_log.Warning.Println(_msg)
					dpa.Status = lib_gc_container.CONTAINER_STATUS_STOPPED
					break
				}

				// Check for an active record
				if record == nil {
					key := strconv.FormatInt(time.Now().Unix(), 10)
					pulses := make([]proto.Message, dpa.record_size)
					record = &lib_pulse_storage.PulseRecord{Key: key, Pulses: pulses, Pos: 0}
				}

				// Add the pulse to the record.
				record.Pulses[record.Pos] = pulse
				lib_gc_log.Trace.Printf("AGGREGATOR added pulse %p on content %p, container %p [%s], pos %d / %d \n", pulse, &dpa.Contents, dpa.Container, dpa.Container.GetName(), record.Pos, dpa.record_size)
				record.Pos = record.Pos + 1

				// Check if the record pulses have reached the block_size size
				if record.Pos == dpa.record_size {
					// Store
					lib_gc_log.Trace.Printf("AGGREGATOR storing BY BLOCK SIZE [%d - %d] pulse %p on content %p, container %p [%s] \n", record.Pos, dpa.record_size, pulse, &dpa.Contents, dpa.Container, dpa.Container.GetName())
					record.Pulses = record.Pulses[:record.Pos]
					dpa.storage.AddPulseRecord(record)
					last_storage_at = time.Now()
					record = nil
				}

			default:
				delay := int64(time.Now().Sub(last_storage_at).Nanoseconds() / 1000000)
				if record != nil && delay >= dpa.max_time_without_store {
					lib_gc_log.Info.Printf("AGGREGATOR storing BY TIMING pulses on content %p, container %p [%s] with delay %s \n", &dpa.Contents, dpa.Container, dpa.Container.GetName(), fmt.Sprint(time.Now().Sub(last_storage_at)))
					// Store
					record.Pulses = record.Pulses[:record.Pos]
					dpa.storage.AddPulseRecord(record)
					last_storage_at = time.Now()
					record = nil
				}
			}
		}
	}
}
func (ds *DefaultSender) run() {
	// panic catching
	defer func() {
		lib_gc_panic_catching.PanicCatching("DefaultSender.run")
		ds.isrunning = false
	}()

	lib_gc_log.Info.Printf("SENDER content [%p, %s], container [%p, %s] RUN STARTED.\n", &ds.Contents, DEFAULT_SENDER, ds.Container, ds.Container.GetName())
	for {
		ds.isrunning = true
		if ds.Contents.Status == lib_gc_container.CONTAINER_STATUS_STARTED && ds.storage != nil {
			max_concurrent_records_semaphore.Lock(1)
			if record, err := ds.storage.GetPulseRecord(); err != nil {
				msg, _ := EVENT.NotifyEvent("006-001", "", &[]string{err.Error()})
				lib_gc_log.Error.Printf("%s\n", msg)
				ds.writeToRSWriter([]byte(msg))
			} else {
				if record != nil {
					lib_gc_log.Trace.Printf("SENDER content [%p, %s], container [%p, %s] Taken pulse record, with %d pulses.\n", &ds.Contents, DEFAULT_SENDER, ds.Container, ds.Container.GetName(), len(record.Pulses))
					if ds.pulse_rq_maker == nil {
						msg, _ := EVENT.NotifyEvent("006-004", "", &[]string{DEFAULT_SENDER})
						lib_gc_log.Error.Println(msg)
					} else {
						if rq, err := ds.pulse_rq_maker(record.Pulses); err != nil {
							msg, _ := EVENT.NotifyEvent("006-005", "", &[]string{DEFAULT_SENDER})
							lib_gc_log.Error.Println(msg)
						} else {

							go func(rq proto.Message, rs_processor lib_pulses_sender.PulseRSProcessor, semaphore lib_gc_semaphore.Semaphore_I) {
								// panic catching
								defer lib_gc_panic_catching.PanicCatching("DefaultSender.run - go")
								if adapter, err := lib_send_channel_adapter.Container.GetSendChannelAdapter(adapter_name); err != nil {
									msg, _ := EVENT.NotifyEvent("006-002", "", &[]string{err.Error()})
									lib_gc_log.Error.Printf("%s\n", msg)
									ds.writeToRSWriter([]byte(msg))
								} else {
									defer lib_send_channel_adapter.Container.ReleaseSendChannelAdapter(adapter_name, adapter)
									semaphore.Lock(1)
									sent := false
									retries := 0
									for (!sent) && (retries < msg_sending_reintries) {
										retries += 1
										if rs, err := adapter.SendPulseRQ(rq); err != nil {
											if msg, err2 := EVENT.NotifyEvent("006-003", "", &[]string{err.Error()}); err2 == nil {
												lib_gc_log.Error.Printf("%s \n !!", msg)
												ds.writeToRSWriter([]byte(msg))
											} else {
												lib_gc_log.Error.Printf("%s \n !!", err2.Error())
												ds.writeToRSWriter([]byte(msg))
											}
										} else {
											sent = true
											if rs_processor != nil {
												if err := rs_processor(rs, ds.rs_writer); err != nil {
													ds.writeToRSWriter([]byte(err.Error()))
												}
											}
										}
									}
									semaphore.ForceToUnLock(1)
								}
							}(rq, ds.pulse_rs_processor, max_concurrent_sents_semaphore)

						}
					}
				}
			}
			max_concurrent_records_semaphore.ForceToUnLock(1)
		}
	}
}