func (b *bulkWorker) onEvents(ctx *Context, events []common.MapStr) { for len(events) > 0 { // split up bulk to match required bulk sizes. // If input events have been split up bufferFull will be set and // bulk request will be published. spaceLeft := cap(b.events) - len(b.events) consume := len(events) bufferFull := spaceLeft <= consume signal := ctx.Signal b.guaranteed = b.guaranteed || ctx.Guaranteed if spaceLeft < consume { consume = spaceLeft if signal != nil { // creating cascading signaler chain for // subset of events being send signal = op.SplitSignaler(signal, 2) } } // buffer events b.events = append(b.events, events[:consume]...) events = events[consume:] if signal != nil { b.pending = append(b.pending, signal) } if bufferFull { b.publish() } } }
func (p *syncPipeline) publish(m message) bool { if p.pub.disabled { debug("publisher disabled") op.SigCompleted(m.context.Signal) return true } client := m.client signal := m.context.Signal sync := op.NewSignalChannel() if len(p.pub.Output) > 1 { m.context.Signal = op.SplitSignaler(sync, len(p.pub.Output)) } else { m.context.Signal = sync } for _, o := range p.pub.Output { o.send(m) } // Await completion signal from output plugin. If client has been disconnected // ignore any signal and drop events no matter if send or not. select { case <-client.canceler.Done(): return false // return false, indicating events potentially not being send case sig := <-sync.C: sig.Apply(signal) return sig == op.SignalCompleted } }
func (b *bulkOutputAdapter) BulkPublish( signal op.Signaler, opts Options, events []common.MapStr, ) error { signal = op.SplitSignaler(signal, len(events)) for _, evt := range events { err := b.PublishEvent(signal, opts, evt) if err != nil { return err } } return nil }
func (b *bulkOutputAdapter) BulkPublish( signal op.Signaler, opts Options, data []Data, ) error { signal = op.SplitSignaler(signal, len(data)) for _, d := range data { err := b.PublishEvent(signal, opts, d) if err != nil { return err } } return nil }
func (p *asyncPipeline) publish(m message) bool { if p.pub.disabled { debug("publisher disabled") op.SigCompleted(m.context.Signal) return true } if m.context.Signal != nil { s := op.CancelableSignaler(m.client.canceler, m.context.Signal) if len(p.outputs) > 1 { s = op.SplitSignaler(s, len(p.outputs)) } m.context.Signal = s } for _, o := range p.outputs { o.send(m) } return true }
func (o *outputWorker) onBulk(ctx *Context, data []outputs.Data) { if len(data) == 0 { debug("output worker: no events to publish") op.SigCompleted(ctx.Signal) return } if o.maxBulkSize < 0 || len(data) <= o.maxBulkSize { o.sendBulk(ctx, data) return } // start splitting bulk request splits := (len(data) + (o.maxBulkSize - 1)) / o.maxBulkSize ctx.Signal = op.SplitSignaler(ctx.Signal, splits) for len(data) > 0 { sz := o.maxBulkSize if sz > len(data) { sz = len(data) } o.sendBulk(ctx, data[:sz]) data = data[sz:] } }
func (o *outputWorker) onBulk(ctx *Context, events []common.MapStr) { if len(events) == 0 { debug("output worker: no events to publish") op.SigCompleted(ctx.Signal) return } if o.maxBulkSize < 0 || len(events) <= o.maxBulkSize { o.sendBulk(ctx, events) return } // start splitting bulk request splits := (len(events) + (o.maxBulkSize - 1)) / o.maxBulkSize ctx.Signal = op.SplitSignaler(ctx.Signal, splits) for len(events) > 0 { sz := o.maxBulkSize if sz > len(events) { sz = len(events) } o.sendBulk(ctx, events[:sz]) events = events[sz:] } }