Пример #1
0
func (c *console) PublishEvent(
	s op.Signaler,
	opts outputs.Options,
	event common.MapStr,
) error {
	var jsonEvent []byte
	var err error

	if c.config.Pretty {
		jsonEvent, err = json.MarshalIndent(event, "", "  ")
	} else {
		jsonEvent, err = json.Marshal(event)
	}
	if err != nil {
		logp.Err("Fail to convert the event to JSON (%v): %#v", err, event)
		op.SigCompleted(s)
		return err
	}

	if err = writeBuffer(jsonEvent); err != nil {
		goto fail
	}
	if err = writeBuffer([]byte{'\n'}); err != nil {
		goto fail
	}

	op.SigCompleted(s)
	return nil
fail:
	if opts.Guaranteed {
		logp.Critical("Unable to publish events to console: %v", err)
	}
	op.SigFailed(s, err)
	return err
}
Пример #2
0
func (mh *testMessageHandler) acknowledgeMessage(m message) {
	if mh.response == CompletedResponse {
		op.SigCompleted(m.context.Signal)
	} else {
		op.SigFailed(m.context.Signal, nil)
	}
}
Пример #3
0
func (out *fileOutput) PublishEvent(
	sig op.Signaler,
	opts outputs.Options,
	event common.MapStr,
) error {
	jsonEvent, err := json.Marshal(event)
	if err != nil {
		// mark as success so event is not sent again.
		op.SigCompleted(sig)

		logp.Err("Fail to json encode event(%v): %#v", err, event)
		return err
	}

	err = out.rotator.WriteLine(jsonEvent)
	if err != nil {
		if opts.Guaranteed {
			logp.Critical("Unable to write events to file: %s", err)
		} else {
			logp.Err("Error when writing line to file: %s", err)
		}
	}
	op.Sig(sig, err)
	return err
}
Пример #4
0
func (p *syncPipeline) publish(m message) bool {
	if p.pub.disabled {
		debug("publisher disabled")
		op.SigCompleted(m.context.Signal)
		return true
	}

	client := m.client
	signal := m.context.Signal
	sync := op.NewSignalChannel()
	if len(p.pub.Output) > 1 {
		m.context.Signal = op.SplitSignaler(sync, len(p.pub.Output))
	} else {
		m.context.Signal = sync
	}

	for _, o := range p.pub.Output {
		o.send(m)
	}

	// Await completion signal from output plugin. If client has been disconnected
	// ignore any signal and drop events no matter if send or not.
	select {
	case <-client.canceler.Done():
		return false // return false, indicating events potentially not being send
	case sig := <-sync.C:
		sig.Apply(signal)
		return sig == op.SignalCompleted
	}
}
Пример #5
0
// PublishEvent writes events to a channel then calls Completed on trans.
// It always returns nil.
func (t *testOutputer) PublishEvent(
	trans op.Signaler,
	_ outputs.Options,
	data outputs.Data,
) error {
	t.data <- data
	op.SigCompleted(trans)
	return nil
}
Пример #6
0
func handlePublishEventsResult(
	m *AsyncLoadBalancerMode,
	msg eventsMessage,
) func([]common.MapStr, error) {
	total := len(msg.events)
	return func(events []common.MapStr, err error) {
		debug("handlePublishEventsResult")

		if err != nil {
			debug("handle publish error: %v", err)

			if msg.attemptsLeft > 0 {
				msg.attemptsLeft--
			}

			// reset attempt count if subset of messages has been processed
			if len(events) < total && msg.attemptsLeft >= 0 {
				msg.attemptsLeft = m.maxAttempts
			}

			if err != ErrTempBulkFailure {
				// retry non-published subset of events in batch
				msg.events = events
				m.onFail(false, msg, err)
				return
			}

			if m.maxAttempts > 0 && msg.attemptsLeft == 0 {
				// no more attempts left => drop
				dropping(msg)
				return
			}

			// retry non-published subset of events in batch
			msg.events = events
			m.onFail(false, msg, err)
			return
		}

		// re-insert non-published events into pipeline
		if len(events) != 0 {
			debug("add non-published events back into pipeline: %v", len(events))
			msg.events = events
			if ok := m.forwardEvent(m.retries, msg); !ok {
				dropping(msg)
			}
			return
		}

		// all events published -> signal success
		debug("async bulk publish success")
		op.SigCompleted(msg.signaler)
	}
}
Пример #7
0
func (w *syncWorker) onMessage(msg eventsMessage) error {
	client := w.client

	if msg.event != nil {
		err := client.PublishEvent(msg.event)
		if err != nil {
			if msg.attemptsLeft > 0 {
				msg.attemptsLeft--
			}
			w.onFail(msg, err)
			return err
		}
	} else {
		events := msg.events
		total := len(events)

		for len(events) > 0 {
			var err error

			events, err = client.PublishEvents(events)
			if err != nil {
				if msg.attemptsLeft > 0 {
					msg.attemptsLeft--
				}

				// reset attempt count if subset of messages has been processed
				if len(events) < total && msg.attemptsLeft >= 0 {
					debugf("reset fails")
					msg.attemptsLeft = w.ctx.maxAttempts
				}

				if err != mode.ErrTempBulkFailure {
					// retry non-published subset of events in batch
					msg.events = events
					w.onFail(msg, err)
					return err
				}

				if w.ctx.maxAttempts > 0 && msg.attemptsLeft == 0 {
					// no more attempts left => drop
					dropping(msg)
					return err
				}

				// reset total count for temporary failure loop
				total = len(events)
			}
		}
	}

	op.SigCompleted(msg.signaler)
	return nil
}
Пример #8
0
// publish is used to publish events using the configured protocol client.
// It provides general error handling and back off support used on failed
// send attempts. To be used by PublishEvent and PublishEvents.
// The send callback will try to progress sending traffic and returns kind of
// progress made in ok or resetFail. If ok is set to true, send finished
// processing events. If ok is false but resetFail is set, send was partially
// successful. If send was partially successful, the fail counter is reset thus up
// to maxAttempts send attempts without any progress might be executed.
func (s *Mode) publish(
	signaler op.Signaler,
	opts outputs.Options,
	send func() (ok bool, resetFail bool),
) error {
	fails := 0
	var err error

	guaranteed := opts.Guaranteed || s.maxAttempts == 0
	for !s.closed && (guaranteed || fails < s.maxAttempts) {

		ok := false
		resetFail := false

		if err := s.connect(); err != nil {
			logp.Err("Connecting error publishing events (retrying): %s", err)
			goto sendFail
		}

		ok, resetFail = send()
		if !ok {
			s.closeClient()
			goto sendFail
		}

		debugf("send completed")
		s.backoff.Reset()
		op.SigCompleted(signaler)
		return nil

	sendFail:
		debugf("send fail")

		fails++
		if resetFail {
			debugf("reset fails")
			s.backoff.Reset()
			fails = 0
		}
		s.backoff.Wait()

		if !guaranteed && (s.maxAttempts > 0 && fails == s.maxAttempts) {
			// max number of attempts reached
			debugf("max number of attempts reached")
			break
		}
	}

	debugf("messages dropped")
	mode.Dropped(1)
	op.SigFailed(signaler, err)
	return nil
}
Пример #9
0
func handlePublishEventResult(m *AsyncLoadBalancerMode, msg eventsMessage) func(error) {
	return func(err error) {
		if err != nil {
			if msg.attemptsLeft > 0 {
				msg.attemptsLeft--
			}
			m.onFail(false, msg, err)
		} else {
			op.SigCompleted(msg.signaler)
		}
	}
}
Пример #10
0
func TestPublisherModes(t *testing.T) {
	tests := []struct {
		title string
		async bool
		order []int
	}{
		{"sync", false, []int{1, 2, 3, 4, 5, 6}},
		{"async ordered signal", true, []int{1, 2, 3, 4, 5, 6}},
		{"async out of order signal", true, []int{5, 2, 3, 1, 4, 6}},
	}

	for i, test := range tests {
		t.Logf("run publisher test (%v): %v", i, test.title)

		wg := sync.WaitGroup{}

		pubChan := make(chan []*input.Event, len(test.order)+1)
		collector := &collectLogger{&wg, nil}
		client := pubtest.NewChanClient(0)

		pub := New(test.async, pubChan, collector,
			pubtest.PublisherWithClient(client))
		pub.Start()

		var events [][]*input.Event
		for i := range test.order {
			tmp := makeEvents(fmt.Sprintf("msg: %v", i), 1)
			wg.Add(1)
			pubChan <- tmp
			events = append(events, tmp)
		}

		var msgs []pubtest.PublishMessage
		for _ = range test.order {
			m := <-client.Channel
			msgs = append(msgs, m)
		}

		for _, i := range test.order {
			op.SigCompleted(msgs[i-1].Context.Signal)
		}

		wg.Wait()
		pub.Stop()

		// validate order
		assert.Equal(t, len(events), len(collector.events))
		for i := range events {
			assert.Equal(t, events[i], collector.events[i])
		}
	}
}
Пример #11
0
func (w *asyncWorker) handleResult(msg eventsMessage) func(error) {
	return func(err error) {
		if err != nil {
			if msg.attemptsLeft > 0 {
				msg.attemptsLeft--
			}
			w.onFail(msg, err)
			return
		}

		op.SigCompleted(msg.signaler)
	}
}
Пример #12
0
func (w *asyncWorker) handleResults(msg eventsMessage) func([]common.MapStr, error) {
	total := len(msg.events)
	return func(events []common.MapStr, err error) {
		debugf("handleResults")

		if err != nil {
			debugf("handle publish error: %v", err)

			if msg.attemptsLeft > 0 {
				msg.attemptsLeft--
			}

			// reset attempt count if subset of messages has been processed
			if len(events) < total && msg.attemptsLeft >= 0 {
				msg.attemptsLeft = w.ctx.maxAttempts
			}

			if err != mode.ErrTempBulkFailure {
				// retry non-published subset of events in batch
				msg.events = events
				w.onFail(msg, err)
				return
			}

			if w.ctx.maxAttempts > 0 && msg.attemptsLeft == 0 {
				// no more attempts left => drop
				dropping(msg)
				return
			}

			// retry non-published subset of events in batch
			msg.events = events
			w.onFail(msg, err)
			return
		}

		// re-insert non-published events into pipeline
		if len(events) != 0 {
			go func() {
				debugf("add non-published events back into pipeline: %v", len(events))
				msg.events = events
				w.ctx.pushFailed(msg)
			}()
			return
		}

		// all events published -> signal success
		debugf("async bulk publish success")
		op.SigCompleted(msg.signaler)
	}
}
Пример #13
0
func TestPublisherModes(t *testing.T) {
	tests := []struct {
		title string
		async bool
		order []int
	}{
		{"sync", false, []int{1, 2, 3, 4, 5, 6}},
		{"async ordered signal", true, []int{1, 2, 3, 4, 5, 6}},
		{"async out of order signal", true, []int{5, 2, 3, 1, 4, 6}},
	}

	for i, test := range tests {
		t.Logf("run publisher test (%v): %v", i, test.title)

		pubChan := make(chan []*input.FileEvent, len(test.order)+1)
		regChan := make(chan []*input.FileEvent, len(test.order)+1)
		client := pubtest.NewChanClient(0)

		pub := newPublisher(test.async, pubChan, regChan, client)
		pub.Start()

		var events [][]*input.FileEvent
		for i := range test.order {
			tmp := makeEvents(fmt.Sprintf("msg: %v", i), 1)
			pubChan <- tmp
			events = append(events, tmp)
		}

		var msgs []pubtest.PublishMessage
		for _ = range test.order {
			m := <-client.Channel
			msgs = append(msgs, m)
		}

		for _, i := range test.order {
			op.SigCompleted(msgs[i-1].Context.Signal)
		}

		var regEvents [][]*input.FileEvent
		for _ = range test.order {
			regEvents = append(regEvents, <-regChan)
		}
		pub.Stop()

		// validate order
		for i := range events {
			assert.Equal(t, events[i], regEvents[i])
		}
	}
}
Пример #14
0
func (p *asyncPipeline) publish(m message) bool {
	if p.pub.disabled {
		debug("publisher disabled")
		op.SigCompleted(m.context.Signal)
		return true
	}

	if m.context.Signal != nil {
		s := op.CancelableSignaler(m.client.canceler, m.context.Signal)
		if len(p.outputs) > 1 {
			s = op.SplitSignaler(s, len(p.outputs))
		}
		m.context.Signal = s
	}

	for _, o := range p.outputs {
		o.send(m)
	}
	return true
}
Пример #15
0
func (o *outputWorker) onBulk(ctx *Context, events []common.MapStr) {
	if len(events) == 0 {
		debug("output worker: no events to publish")
		op.SigCompleted(ctx.Signal)
		return
	}

	if o.maxBulkSize < 0 || len(events) <= o.maxBulkSize {
		o.sendBulk(ctx, events)
		return
	}

	// start splitting bulk request
	splits := (len(events) + (o.maxBulkSize - 1)) / o.maxBulkSize
	ctx.Signal = op.SplitSignaler(ctx.Signal, splits)
	for len(events) > 0 {
		sz := o.maxBulkSize
		if sz > len(events) {
			sz = len(events)
		}
		o.sendBulk(ctx, events[:sz])
		events = events[sz:]
	}
}
Пример #16
0
func (o *outputWorker) onBulk(ctx *Context, data []outputs.Data) {
	if len(data) == 0 {
		debug("output worker: no events to publish")
		op.SigCompleted(ctx.Signal)
		return
	}

	if o.maxBulkSize < 0 || len(data) <= o.maxBulkSize {
		o.sendBulk(ctx, data)
		return
	}

	// start splitting bulk request
	splits := (len(data) + (o.maxBulkSize - 1)) / o.maxBulkSize
	ctx.Signal = op.SplitSignaler(ctx.Signal, splits)
	for len(data) > 0 {
		sz := o.maxBulkSize
		if sz > len(data) {
			sz = len(data)
		}
		o.sendBulk(ctx, data[:sz])
		data = data[sz:]
	}
}
Пример #17
0
func (m *LoadBalancerMode) onMessage(
	backoff *backoff,
	client ProtocolClient,
	msg eventsMessage,
) (bool, error) {
	done := false
	if msg.event != nil {
		err := client.PublishEvent(msg.event)
		done = !backoff.WaitOnError(err)
		if err != nil {
			if msg.attemptsLeft > 0 {
				msg.attemptsLeft--
			}
			m.onFail(msg, err)
			return done, err
		}
	} else {
		events := msg.events
		total := len(events)

		for len(events) > 0 {
			var err error

			events, err = client.PublishEvents(events)
			done = !backoff.WaitOnError(err)
			if done && err != nil {
				op.SigFailed(msg.signaler, err)
				return done, err
			}

			if err != nil {
				if msg.attemptsLeft > 0 {
					msg.attemptsLeft--
				}

				// reset attempt count if subset of messages has been processed
				if len(events) < total && msg.attemptsLeft >= 0 {
					debug("reset fails")
					msg.attemptsLeft = m.maxAttempts
				}

				if err != ErrTempBulkFailure {
					// retry non-published subset of events in batch
					msg.events = events
					m.onFail(msg, err)
					return done, err
				}

				if m.maxAttempts > 0 && msg.attemptsLeft == 0 {
					// no more attempts left => drop
					dropping(msg)
					return done, err
				}

				// reset total count for temporary failure loop
				total = len(events)
			}
		}
	}

	op.SigCompleted(msg.signaler)
	return done, nil
}
Пример #18
0
// PublishEvent writes events to a channel then calls Completed on trans.
// It always returns nil.
func (t *testOutputer) PublishEvent(trans op.Signaler, opts outputs.Options,
	event common.MapStr) error {
	t.events <- event
	op.SigCompleted(trans)
	return nil
}