Exemple #1
0
// Returns a copy of the current buffer.
func (b *windowBuffer) batch() models.Batch {
	b.Lock()
	defer b.Unlock()
	batch := models.Batch{}
	if b.size == 0 {
		return batch
	}
	batch.Points = make([]models.BatchPoint, b.size)
	if b.stop > b.start {
		for i, p := range b.window[b.start:b.stop] {
			batch.Points[i] = models.BatchPointFromPoint(p)
		}
	} else {
		j := 0
		l := len(b.window)
		for i := b.start; i < l; i++ {
			p := b.window[i]
			batch.Points[j] = models.BatchPointFromPoint(p)
			j++
		}
		for i := 0; i < b.stop; i++ {
			p := b.window[i]
			batch.Points[j] = models.BatchPointFromPoint(p)
			j++
		}
	}
	return batch
}
Exemple #2
0
func (e *Edge) CollectBatch(b models.Batch) error {
	e.collected.Add(1)
	e.incCollected(b.Group, b.Tags, b.PointDimensions(), int64(len(b.Points)))
	select {
	case <-e.aborted:
		return ErrAborted
	case e.batch <- b:
		return nil
	}
}
Exemple #3
0
// Replay the batch data from a single source
func (r *replayBatchSource) replayBatchFromData(data io.ReadCloser, batch BatchCollector, recTime bool) {
	defer batch.Close()
	defer data.Close()

	in := bufio.NewScanner(data)

	// Find relative times
	start := time.Time{}
	var diff time.Duration
	zero := r.clck.Zero()

	for in.Scan() {
		var b models.Batch
		err := json.Unmarshal(in.Bytes(), &b)
		if err != nil {
			r.allErrs <- err
			return
		}
		if len(b.Points) == 0 {
			// do nothing
			continue
		}
		b.Group = models.TagsToGroupID(models.SortedKeys(b.Tags), b.Tags)

		if start.IsZero() {
			start = b.Points[0].Time
			diff = zero.Sub(start)
		}
		// Add tags to all points
		if len(b.Tags) > 0 {
			for i := range b.Points {
				if len(b.Points[i].Tags) == 0 {
					b.Points[i].Tags = b.Tags
				}
			}
		}
		var lastTime time.Time
		if !recTime {
			for i := range b.Points {
				b.Points[i].Time = b.Points[i].Time.Add(diff).UTC()
			}
			lastTime = b.Points[len(b.Points)-1].Time
		} else {
			lastTime = b.Points[len(b.Points)-1].Time.Add(diff).UTC()
		}
		r.clck.Until(lastTime)
		b.TMax = b.Points[len(b.Points)-1].Time
		batch.CollectBatch(b)
	}
	r.allErrs <- in.Err()
}
Exemple #4
0
func (e *booleanPointEmitter) EmitBatch() models.Batch {
	slice := e.emitter.Emit()
	b := models.Batch{
		Name:   e.name,
		TMax:   e.time,
		Group:  e.group,
		ByName: e.dimensions.ByName,
		Tags:   e.tags,
		Points: make([]models.BatchPoint, len(slice)),
	}
	var t time.Time
	for i, ap := range slice {
		if e.pointTimes {
			if ap.Time == influxql.ZeroTime {
				t = e.time
			} else {
				t = time.Unix(0, ap.Time).UTC()
			}
		} else {
			t = e.time
		}
		var tags models.Tags
		if l := len(ap.Tags.KeyValues()); l > 0 {
			// Merge batch and point specific tags
			tags = make(models.Tags, len(e.tags)+l)
			for k, v := range e.tags {
				tags[k] = v
			}
			for k, v := range ap.Tags.KeyValues() {
				tags[k] = v
			}
		} else {
			tags = e.tags
		}
		b.Points[i] = models.BatchPoint{
			Time:   t,
			Tags:   tags,
			Fields: map[string]interface{}{e.as: ap.Value},
		}
		if t.After(b.TMax) {
			b.TMax = t
		}
	}
	return b
}
Exemple #5
0
// Replay the batch data from a single source
func readBatchFromIO(data io.ReadCloser, batches chan<- models.Batch) error {
	defer close(batches)
	defer data.Close()
	dec := json.NewDecoder(data)
	for dec.More() {
		var b models.Batch
		err := dec.Decode(&b)
		if err != nil {
			return err
		}
		if len(b.Points) == 0 {
			// do nothing
			continue
		}
		if b.Group == "" {
			b.Group = models.ToGroupID(
				b.Name,
				b.Tags,
				models.Dimensions{
					ByName:   b.ByName,
					TagNames: models.SortedKeys(b.Tags),
				},
			)
		}
		// Add tags to all points
		if len(b.Tags) > 0 {
			for i := range b.Points {
				if len(b.Points[i].Tags) == 0 {
					b.Points[i].Tags = b.Tags
				}
			}
		}
		batches <- b
	}
	return nil
}
Exemple #6
0
// take the result of a reduce operation and convert it to a batch
func reduceResultToBatch(field string, value interface{}, tmax time.Time, usePointTimes bool) models.Batch {
	b := models.Batch{}
	b.TMax = tmax
	switch v := value.(type) {
	case tsdb.PositionPoints:
		b.Points = make([]models.BatchPoint, len(v))
		for i, pp := range v {
			if usePointTimes {
				b.Points[i].Time = time.Unix(pp.Time, 0).UTC()
			} else {
				b.Points[i].Time = tmax
			}
			b.Points[i].Fields = models.Fields{field: pp.Value}
			b.Points[i].Fields[field] = pp.Value
			b.Points[i].Tags = pp.Tags
		}
	case tsdb.PositionPoint:
		b.Points = make([]models.BatchPoint, 1)
		if usePointTimes {
			b.Points[0].Time = time.Unix(v.Time, 0).UTC()
		} else {
			b.Points[0].Time = tmax
		}
		b.Points[0].Fields = models.Fields{field: v.Value}
		b.Points[0].Fields[field] = v.Value
		b.Points[0].Tags = v.Tags
	case tsdb.InterfaceValues:
		b.Points = make([]models.BatchPoint, len(v))
		for i, p := range v {
			b.Points[i].Time = tmax
			b.Points[i].Fields = models.Fields{field: p}
		}
	default:
		b.Points = make([]models.BatchPoint, 1)
		b.Points[0].Time = tmax
		b.Points[0].Fields = models.Fields{field: v}
	}
	return b
}
Exemple #7
0
// join all batches the set into a single batch
func (js *joinset) JoinIntoBatch() (models.Batch, bool) {
	newBatch := models.Batch{
		Name:  js.name,
		Group: js.First().PointGroup(),
		Tags:  js.First().PointTags(),
		TMax:  js.time,
	}
	empty := make([]bool, js.expected)
	emptyCount := 0
	indexes := make([]int, js.expected)
	var fieldNames []string
	for emptyCount < js.expected {
		set := make([]*models.BatchPoint, js.expected)
		setTime := time.Time{}
		count := 0
		for i, batch := range js.values {
			if empty[i] {
				continue
			}
			if batch == nil {
				emptyCount++
				empty[i] = true
				continue
			}
			b := batch.(models.Batch)
			if indexes[i] == len(b.Points) {
				emptyCount++
				empty[i] = true
				continue
			}
			bp := b.Points[indexes[i]]
			t := bp.Time.Round(js.tolerance)
			if setTime.IsZero() {
				setTime = t
			}
			if t.Equal(setTime) {
				if fieldNames == nil {
					for k := range bp.Fields {
						fieldNames = append(fieldNames, k)
					}
				}
				set[i] = &bp
				indexes[i]++
				count++
			}
		}
		// we didn't get any points from any group we must be empty
		// skip this set
		if count == 0 {
			continue
		}
		// Join all batch points in set
		fields := make(models.Fields, js.expected*len(fieldNames))
		for i, bp := range set {
			if bp == nil {
				switch js.fill {
				case influxql.NullFill:
					for _, k := range fieldNames {
						fields[js.prefixes[i]+"."+k] = nil
					}
				case influxql.NumberFill:
					for _, k := range fieldNames {
						fields[js.prefixes[i]+"."+k] = js.fillValue
					}
				default:
					// inner join no valid point possible
					return models.Batch{}, false
				}
			} else {
				for k, v := range bp.Fields {
					fields[js.prefixes[i]+"."+k] = v
				}
			}
		}
		bp := models.BatchPoint{
			Tags:   newBatch.Tags,
			Time:   setTime,
			Fields: fields,
		}
		newBatch.Points = append(newBatch.Points, bp)
	}
	return newBatch, true
}
Exemple #8
0
func (n *FlattenNode) runFlatten([]byte) error {
	switch n.Wants() {
	case pipeline.StreamEdge:
		flattenBuffers := make(map[models.GroupID]*flattenStreamBuffer)
		for p, ok := n.ins[0].NextPoint(); ok; p, ok = n.ins[0].NextPoint() {
			n.timer.Start()
			t := p.Time.Round(n.f.Tolerance)
			currentBuf, ok := flattenBuffers[p.Group]
			if !ok {
				currentBuf = &flattenStreamBuffer{
					Time:       t,
					Name:       p.Name,
					Group:      p.Group,
					Dimensions: p.Dimensions,
					Tags:       p.PointTags(),
				}
				flattenBuffers[p.Group] = currentBuf
			}
			rp := models.RawPoint{
				Time:   t,
				Fields: p.Fields,
				Tags:   p.Tags,
			}
			if t.Equal(currentBuf.Time) {
				currentBuf.Points = append(currentBuf.Points, rp)
			} else {
				if fields, err := n.flatten(currentBuf.Points); err != nil {
					return err
				} else {
					// Emit point
					flatP := models.Point{
						Time:       currentBuf.Time,
						Name:       currentBuf.Name,
						Group:      currentBuf.Group,
						Dimensions: currentBuf.Dimensions,
						Tags:       currentBuf.Tags,
						Fields:     fields,
					}
					n.timer.Pause()
					for _, out := range n.outs {
						err := out.CollectPoint(flatP)
						if err != nil {
							return err
						}
					}
					n.timer.Resume()
				}
				// Update currentBuf with new time and initial point
				currentBuf.Time = t
				currentBuf.Points = currentBuf.Points[0:1]
				currentBuf.Points[0] = rp
			}
			n.timer.Stop()
		}
	case pipeline.BatchEdge:
		allBuffers := make(map[models.GroupID]*flattenBatchBuffer)
		for b, ok := n.ins[0].NextBatch(); ok; b, ok = n.ins[0].NextBatch() {
			n.timer.Start()
			t := b.TMax.Round(n.f.Tolerance)
			currentBuf, ok := allBuffers[b.Group]
			if !ok {
				currentBuf = &flattenBatchBuffer{
					Time:   t,
					Name:   b.Name,
					Group:  b.Group,
					Tags:   b.Tags,
					Points: make(map[time.Time][]models.RawPoint),
				}
				allBuffers[b.Group] = currentBuf
			}
			if !t.Equal(currentBuf.Time) {
				// Flatten/Emit old buffer
				times := make(timeList, 0, len(currentBuf.Points))
				for t := range currentBuf.Points {
					times = append(times, t)
				}
				sort.Sort(times)
				flatBatch := models.Batch{
					TMax:   currentBuf.Time,
					Name:   currentBuf.Name,
					Group:  currentBuf.Group,
					ByName: b.ByName,
					Tags:   currentBuf.Tags,
				}
				for _, t := range times {
					if fields, err := n.flatten(currentBuf.Points[t]); err != nil {
						return err
					} else {
						flatBatch.Points = append(flatBatch.Points, models.BatchPoint{
							Time:   t,
							Tags:   currentBuf.Tags,
							Fields: fields,
						})
					}
					delete(currentBuf.Points, t)
				}
				n.timer.Pause()
				for _, out := range n.outs {
					err := out.CollectBatch(flatBatch)
					if err != nil {
						return err
					}
				}
				n.timer.Resume()
				// Update currentBuf with new time
				currentBuf.Time = t
			}
			for _, p := range b.Points {
				t := p.Time.Round(n.f.Tolerance)
				currentBuf.Points[t] = append(currentBuf.Points[t], models.RawPoint{
					Time:   t,
					Fields: p.Fields,
					Tags:   p.Tags,
				})
			}
			n.timer.Stop()
		}
	}
	return nil

}