// Returns a copy of the current buffer. func (b *windowTimeBuffer) points() []models.BatchPoint { if b.size == 0 { return nil } points := make([]models.BatchPoint, b.size) if b.stop > b.start { for i, p := range b.window[b.start:b.stop] { points[i] = models.BatchPointFromPoint(p) } } else { j := 0 l := len(b.window) for i := b.start; i < l; i++ { p := b.window[i] points[j] = models.BatchPointFromPoint(p) j++ } for i := 0; i < b.stop; i++ { p := b.window[i] points[j] = models.BatchPointFromPoint(p) j++ } } return points }
// Returns a copy of the current buffer. func (b *windowBuffer) batch() models.Batch { b.Lock() defer b.Unlock() batch := models.Batch{} if b.size == 0 { return batch } batch.Points = make([]models.BatchPoint, b.size) if b.stop > b.start { for i, p := range b.window[b.start:b.stop] { batch.Points[i] = models.BatchPointFromPoint(p) } } else { j := 0 l := len(b.window) for i := b.start; i < l; i++ { p := b.window[i] batch.Points[j] = models.BatchPointFromPoint(p) j++ } for i := 0; i < b.stop; i++ { p := b.window[i] batch.Points[j] = models.BatchPointFromPoint(p) j++ } } return batch }
func (m *MapNode) runStreamMap() error { batches := make(map[models.GroupID]*models.Batch) for p, ok := m.ins[0].NextPoint(); ok; { b := batches[p.Group] if b == nil { // Create new batch tags := make(map[string]string, len(p.Dimensions)) for _, dim := range p.Dimensions { tags[dim] = p.Tags[dim] } b = &models.Batch{ Name: p.Name, Group: p.Group, Tags: tags, TMax: p.Time, } batches[p.Group] = b } if p.Time.Equal(b.TMax) { b.Points = append(b.Points, models.BatchPointFromPoint(p)) // advance to next point p, ok = m.ins[0].NextPoint() } else { err := m.mapBatch(*b) if err != nil { return err } batches[b.Group] = nil } } return nil }
func (i *InfluxDBOutNode) runOut() error { switch i.Wants() { case pipeline.StreamEdge: for p, ok := i.ins[0].NextPoint(); ok; p, ok = i.ins[0].NextPoint() { batch := models.Batch{ Name: p.Name, Group: p.Group, Tags: p.Tags, Points: []models.BatchPoint{models.BatchPointFromPoint(p)}, } err := i.write(p.Database, p.RetentionPolicy, batch) if err != nil { return err } } case pipeline.BatchEdge: for b, ok := i.ins[0].NextBatch(); ok; b, ok = i.ins[0].NextBatch() { err := i.write("", "", b) if err != nil { return err } } } return nil }
func (i *InfluxDBOutNode) runOut([]byte) error { i.statMap.Add(statsInfluxDBPointsWritten, 0) // Start the write buffer i.wb.start() switch i.Wants() { case pipeline.StreamEdge: for p, ok := i.ins[0].NextPoint(); ok; p, ok = i.ins[0].NextPoint() { i.timer.Start() batch := models.Batch{ Name: p.Name, Group: p.Group, Tags: p.Tags, Points: []models.BatchPoint{models.BatchPointFromPoint(p)}, } err := i.write(p.Database, p.RetentionPolicy, batch) if err != nil { return err } i.timer.Stop() } case pipeline.BatchEdge: for b, ok := i.ins[0].NextBatch(); ok; b, ok = i.ins[0].NextBatch() { i.timer.Start() err := i.write("", "", b) if err != nil { return err } i.timer.Stop() } } return nil }
func (i *InfluxDBOutNode) runOut([]byte) error { i.pointsWritten = &expvar.Int{} i.writeErrors = &expvar.Int{} i.statMap.Set(statsInfluxDBPointsWritten, i.pointsWritten) i.statMap.Set(statsInfluxDBWriteErrors, i.writeErrors) // Start the write buffer i.wb.start() // Create the database and retention policy if i.i.CreateFlag { err := func() error { cli, err := i.et.tm.InfluxDBService.NewNamedClient(i.i.Cluster) if err != nil { return err } var createDb bytes.Buffer createDb.WriteString("CREATE DATABASE ") createDb.WriteString(influxql.QuoteIdent(i.i.Database)) if i.i.RetentionPolicy != "" { createDb.WriteString(" WITH NAME ") createDb.WriteString(influxql.QuoteIdent(i.i.RetentionPolicy)) } _, err = cli.Query(influxdb.Query{Command: createDb.String()}) if err != nil { return err } return nil }() if err != nil { i.logger.Printf("E! failed to create database %q on cluster %q: %v", i.i.Database, i.i.Cluster, err) } } switch i.Wants() { case pipeline.StreamEdge: for p, ok := i.ins[0].NextPoint(); ok; p, ok = i.ins[0].NextPoint() { i.timer.Start() batch := models.Batch{ Name: p.Name, Group: p.Group, Tags: p.Tags, ByName: p.Dimensions.ByName, Points: []models.BatchPoint{models.BatchPointFromPoint(p)}, } err := i.write(p.Database, p.RetentionPolicy, batch) if err != nil { return err } i.timer.Stop() } case pipeline.BatchEdge: for b, ok := i.ins[0].NextBatch(); ok; b, ok = i.ins[0].NextBatch() { i.timer.Start() err := i.write("", "", b) if err != nil { return err } i.timer.Stop() } } return nil }
func (a *AlertNode) runAlert() error { switch a.Wants() { case pipeline.StreamEdge: for p, ok := a.ins[0].NextPoint(); ok; p, ok = a.ins[0].NextPoint() { l := a.determineLevel(p.Fields, p.Tags) state := a.updateState(l, p.Group) if (a.a.UseFlapping && state.flapping) || (a.a.IsStateChangesOnly && !state.changed) { continue } // send alert if we are not OK or we are OK and state changed (i.e recovery) if l != OKAlert || state.changed { batch := models.Batch{ Name: p.Name, Group: p.Group, Tags: p.Tags, Points: []models.BatchPoint{models.BatchPointFromPoint(p)}, } ad, err := a.alertData(p.Name, p.Group, p.Tags, p.Fields, l, p.Time, batch) if err != nil { return err } for _, h := range a.handlers { h(ad) } } } case pipeline.BatchEdge: for b, ok := a.ins[0].NextBatch(); ok; b, ok = a.ins[0].NextBatch() { triggered := false for _, p := range b.Points { l := a.determineLevel(p.Fields, p.Tags) if l > OKAlert { triggered = true state := a.updateState(l, b.Group) if (a.a.UseFlapping && state.flapping) || (a.a.IsStateChangesOnly && !state.changed) { break } ad, err := a.alertData(b.Name, b.Group, b.Tags, p.Fields, l, p.Time, b) if err != nil { return err } for _, h := range a.handlers { h(ad) } break } } if !triggered { state := a.updateState(OKAlert, b.Group) if state.changed { var fields models.Fields if l := len(b.Points); l > 0 { fields = b.Points[l-1].Fields } ad, err := a.alertData(b.Name, b.Group, b.Tags, fields, OKAlert, b.TMax, b) if err != nil { return err } for _, h := range a.handlers { h(ad) } } } } } return nil }
func (a *AlertNode) runAlert([]byte) error { a.alertsTriggered = &expvar.Int{} a.statMap.Set(statsAlertsTriggered, a.alertsTriggered) a.oksTriggered = &expvar.Int{} a.statMap.Set(statsOKsTriggered, a.oksTriggered) a.infosTriggered = &expvar.Int{} a.statMap.Set(statsInfosTriggered, a.infosTriggered) a.warnsTriggered = &expvar.Int{} a.statMap.Set(statsWarnsTriggered, a.warnsTriggered) a.critsTriggered = &expvar.Int{} a.statMap.Set(statsCritsTriggered, a.critsTriggered) switch a.Wants() { case pipeline.StreamEdge: for p, ok := a.ins[0].NextPoint(); ok; p, ok = a.ins[0].NextPoint() { a.timer.Start() l := a.determineLevel(p.Time, p.Fields, p.Tags) state := a.updateState(p.Time, l, p.Group) if (a.a.UseFlapping && state.flapping) || (a.a.IsStateChangesOnly && !state.changed && !state.expired) { a.timer.Stop() continue } // send alert if we are not OK or we are OK and state changed (i.e recovery) if l != OKAlert || state.changed { batch := models.Batch{ Name: p.Name, Group: p.Group, ByName: p.Dimensions.ByName, Tags: p.Tags, Points: []models.BatchPoint{models.BatchPointFromPoint(p)}, } state.triggered(p.Time) duration := state.duration() ad, err := a.alertData(p.Name, p.Group, p.Tags, p.Fields, l, p.Time, duration, batch) if err != nil { return err } a.handleAlert(ad) if a.a.LevelTag != "" || a.a.IdTag != "" { p.Tags = p.Tags.Copy() if a.a.LevelTag != "" { p.Tags[a.a.LevelTag] = l.String() } if a.a.IdTag != "" { p.Tags[a.a.IdTag] = ad.ID } } if a.a.LevelField != "" || a.a.IdField != "" || a.a.DurationField != "" { p.Fields = p.Fields.Copy() if a.a.LevelField != "" { p.Fields[a.a.LevelField] = l.String() } if a.a.IdField != "" { p.Fields[a.a.IdField] = ad.ID } if a.a.DurationField != "" { p.Fields[a.a.DurationField] = int64(duration) } } a.timer.Pause() for _, child := range a.outs { err := child.CollectPoint(p) if err != nil { return err } } a.timer.Resume() } a.timer.Stop() } case pipeline.BatchEdge: for b, ok := a.ins[0].NextBatch(); ok; b, ok = a.ins[0].NextBatch() { a.timer.Start() if len(b.Points) == 0 { a.timer.Stop() continue } // Keep track of lowest level for any point lowestLevel := CritAlert // Keep track of highest level and point highestLevel := OKAlert var highestPoint *models.BatchPoint for i, p := range b.Points { l := a.determineLevel(p.Time, p.Fields, p.Tags) if l < lowestLevel { lowestLevel = l } if l > highestLevel || highestPoint == nil { highestLevel = l highestPoint = &b.Points[i] } } // Default the determined level to lowest. l := lowestLevel // Update determined level to highest if we don't care about all if !a.a.AllFlag { l = highestLevel } // Create alert Data t := highestPoint.Time if a.a.AllFlag || l == OKAlert { t = b.TMax } // Update state state := a.updateState(t, l, b.Group) // Trigger alert if: // l == OK and state.changed (aka recovery) // OR // l != OK and flapping/statechanges checkout if state.changed && l == OKAlert || (l != OKAlert && !((a.a.UseFlapping && state.flapping) || (a.a.IsStateChangesOnly && !state.changed && !state.expired))) { state.triggered(t) duration := state.duration() ad, err := a.alertData(b.Name, b.Group, b.Tags, highestPoint.Fields, l, t, duration, b) if err != nil { return err } a.handleAlert(ad) // Update tags or fields for Level property if a.a.LevelTag != "" || a.a.LevelField != "" || a.a.IdTag != "" || a.a.IdField != "" || a.a.DurationField != "" { for i := range b.Points { if a.a.LevelTag != "" || a.a.IdTag != "" { b.Points[i].Tags = b.Points[i].Tags.Copy() if a.a.LevelTag != "" { b.Points[i].Tags[a.a.LevelTag] = l.String() } if a.a.IdTag != "" { b.Points[i].Tags[a.a.IdTag] = ad.ID } } if a.a.LevelField != "" || a.a.IdField != "" || a.a.DurationField != "" { b.Points[i].Fields = b.Points[i].Fields.Copy() if a.a.LevelField != "" { b.Points[i].Fields[a.a.LevelField] = l.String() } if a.a.IdField != "" { b.Points[i].Fields[a.a.IdField] = ad.ID } if a.a.DurationField != "" { b.Points[i].Fields[a.a.DurationField] = int64(duration) } } } if a.a.LevelTag != "" || a.a.IdTag != "" { b.Tags = b.Tags.Copy() if a.a.LevelTag != "" { b.Tags[a.a.LevelTag] = l.String() } if a.a.IdTag != "" { b.Tags[a.a.IdTag] = ad.ID } } } a.timer.Pause() for _, child := range a.outs { err := child.CollectBatch(b) if err != nil { return err } } a.timer.Resume() } a.timer.Stop() } } return nil }