Пример #1
0
// Returns a copy of the current buffer.
func (b *windowBuffer) batch() models.Batch {
	b.Lock()
	defer b.Unlock()
	batch := models.Batch{}
	if b.size == 0 {
		return batch
	}
	batch.Points = make([]models.BatchPoint, b.size)
	if b.stop > b.start {
		for i, p := range b.window[b.start:b.stop] {
			batch.Points[i] = models.BatchPointFromPoint(p)
		}
	} else {
		j := 0
		l := len(b.window)
		for i := b.start; i < l; i++ {
			p := b.window[i]
			batch.Points[j] = models.BatchPointFromPoint(p)
			j++
		}
		for i := 0; i < b.stop; i++ {
			p := b.window[i]
			batch.Points[j] = models.BatchPointFromPoint(p)
			j++
		}
	}
	return batch
}
Пример #2
0
// take the result of a reduce operation and convert it to a batch
func reduceResultToBatch(field string, value interface{}, tmax time.Time, usePointTimes bool) models.Batch {
	b := models.Batch{}
	b.TMax = tmax
	switch v := value.(type) {
	case tsdb.PositionPoints:
		b.Points = make([]models.BatchPoint, len(v))
		for i, pp := range v {
			if usePointTimes {
				b.Points[i].Time = time.Unix(pp.Time, 0).UTC()
			} else {
				b.Points[i].Time = tmax
			}
			b.Points[i].Fields = models.Fields{field: pp.Value}
			b.Points[i].Fields[field] = pp.Value
			b.Points[i].Tags = pp.Tags
		}
	case tsdb.PositionPoint:
		b.Points = make([]models.BatchPoint, 1)
		if usePointTimes {
			b.Points[0].Time = time.Unix(v.Time, 0).UTC()
		} else {
			b.Points[0].Time = tmax
		}
		b.Points[0].Fields = models.Fields{field: v.Value}
		b.Points[0].Fields[field] = v.Value
		b.Points[0].Tags = v.Tags
	case tsdb.InterfaceValues:
		b.Points = make([]models.BatchPoint, len(v))
		for i, p := range v {
			b.Points[i].Time = tmax
			b.Points[i].Fields = models.Fields{field: p}
		}
	default:
		b.Points = make([]models.BatchPoint, 1)
		b.Points[0].Time = tmax
		b.Points[0].Fields = models.Fields{field: v}
	}
	return b
}
Пример #3
0
// join all batches the set into a single batch
func (js *joinset) JoinIntoBatch() (models.Batch, bool) {
	newBatch := models.Batch{
		Name:  js.name,
		Group: js.First().PointGroup(),
		Tags:  js.First().PointTags(),
		TMax:  js.time,
	}
	empty := make([]bool, js.expected)
	emptyCount := 0
	indexes := make([]int, js.expected)
	var fieldNames []string
	for emptyCount < js.expected {
		set := make([]*models.BatchPoint, js.expected)
		setTime := time.Time{}
		count := 0
		for i, batch := range js.values {
			if empty[i] {
				continue
			}
			if batch == nil {
				emptyCount++
				empty[i] = true
				continue
			}
			b := batch.(models.Batch)
			if indexes[i] == len(b.Points) {
				emptyCount++
				empty[i] = true
				continue
			}
			bp := b.Points[indexes[i]]
			t := bp.Time.Round(js.tolerance)
			if setTime.IsZero() {
				setTime = t
			}
			if t.Equal(setTime) {
				if fieldNames == nil {
					for k := range bp.Fields {
						fieldNames = append(fieldNames, k)
					}
				}
				set[i] = &bp
				indexes[i]++
				count++
			}
		}
		// we didn't get any points from any group we must be empty
		// skip this set
		if count == 0 {
			continue
		}
		// Join all batch points in set
		fields := make(models.Fields, js.expected*len(fieldNames))
		for i, bp := range set {
			if bp == nil {
				switch js.fill {
				case influxql.NullFill:
					for _, k := range fieldNames {
						fields[js.prefixes[i]+"."+k] = nil
					}
				case influxql.NumberFill:
					for _, k := range fieldNames {
						fields[js.prefixes[i]+"."+k] = js.fillValue
					}
				default:
					// inner join no valid point possible
					return models.Batch{}, false
				}
			} else {
				for k, v := range bp.Fields {
					fields[js.prefixes[i]+"."+k] = v
				}
			}
		}
		bp := models.BatchPoint{
			Tags:   newBatch.Tags,
			Time:   setTime,
			Fields: fields,
		}
		newBatch.Points = append(newBatch.Points, bp)
	}
	return newBatch, true
}
Пример #4
0
func (n *FlattenNode) runFlatten([]byte) error {
	switch n.Wants() {
	case pipeline.StreamEdge:
		flattenBuffers := make(map[models.GroupID]*flattenStreamBuffer)
		for p, ok := n.ins[0].NextPoint(); ok; p, ok = n.ins[0].NextPoint() {
			n.timer.Start()
			t := p.Time.Round(n.f.Tolerance)
			currentBuf, ok := flattenBuffers[p.Group]
			if !ok {
				currentBuf = &flattenStreamBuffer{
					Time:       t,
					Name:       p.Name,
					Group:      p.Group,
					Dimensions: p.Dimensions,
					Tags:       p.PointTags(),
				}
				flattenBuffers[p.Group] = currentBuf
			}
			rp := models.RawPoint{
				Time:   t,
				Fields: p.Fields,
				Tags:   p.Tags,
			}
			if t.Equal(currentBuf.Time) {
				currentBuf.Points = append(currentBuf.Points, rp)
			} else {
				if fields, err := n.flatten(currentBuf.Points); err != nil {
					return err
				} else {
					// Emit point
					flatP := models.Point{
						Time:       currentBuf.Time,
						Name:       currentBuf.Name,
						Group:      currentBuf.Group,
						Dimensions: currentBuf.Dimensions,
						Tags:       currentBuf.Tags,
						Fields:     fields,
					}
					n.timer.Pause()
					for _, out := range n.outs {
						err := out.CollectPoint(flatP)
						if err != nil {
							return err
						}
					}
					n.timer.Resume()
				}
				// Update currentBuf with new time and initial point
				currentBuf.Time = t
				currentBuf.Points = currentBuf.Points[0:1]
				currentBuf.Points[0] = rp
			}
			n.timer.Stop()
		}
	case pipeline.BatchEdge:
		allBuffers := make(map[models.GroupID]*flattenBatchBuffer)
		for b, ok := n.ins[0].NextBatch(); ok; b, ok = n.ins[0].NextBatch() {
			n.timer.Start()
			t := b.TMax.Round(n.f.Tolerance)
			currentBuf, ok := allBuffers[b.Group]
			if !ok {
				currentBuf = &flattenBatchBuffer{
					Time:   t,
					Name:   b.Name,
					Group:  b.Group,
					Tags:   b.Tags,
					Points: make(map[time.Time][]models.RawPoint),
				}
				allBuffers[b.Group] = currentBuf
			}
			if !t.Equal(currentBuf.Time) {
				// Flatten/Emit old buffer
				times := make(timeList, 0, len(currentBuf.Points))
				for t := range currentBuf.Points {
					times = append(times, t)
				}
				sort.Sort(times)
				flatBatch := models.Batch{
					TMax:   currentBuf.Time,
					Name:   currentBuf.Name,
					Group:  currentBuf.Group,
					ByName: b.ByName,
					Tags:   currentBuf.Tags,
				}
				for _, t := range times {
					if fields, err := n.flatten(currentBuf.Points[t]); err != nil {
						return err
					} else {
						flatBatch.Points = append(flatBatch.Points, models.BatchPoint{
							Time:   t,
							Tags:   currentBuf.Tags,
							Fields: fields,
						})
					}
					delete(currentBuf.Points, t)
				}
				n.timer.Pause()
				for _, out := range n.outs {
					err := out.CollectBatch(flatBatch)
					if err != nil {
						return err
					}
				}
				n.timer.Resume()
				// Update currentBuf with new time
				currentBuf.Time = t
			}
			for _, p := range b.Points {
				t := p.Time.Round(n.f.Tolerance)
				currentBuf.Points[t] = append(currentBuf.Points[t], models.RawPoint{
					Time:   t,
					Fields: p.Fields,
					Tags:   p.Tags,
				})
			}
			n.timer.Stop()
		}
	}
	return nil

}