Ejemplo n.º 1
0
func (m *MapNode) mapBatch(b models.Batch) error {
	if len(b.Points) == 0 {
		return nil
	}
	m.timer.Start()
	done := make(chan bool, m.parallel)
	mr := &MapResult{
		Outs: make([]interface{}, m.parallel),
	}
	inputs := make([]tsdb.MapInput, m.parallel)
	j := 0
	for _, p := range b.Points {
		value, ok := p.Fields[m.field]
		if !ok {
			fields := make([]string, 0, len(p.Fields))
			for field := range p.Fields {
				fields = append(fields, field)
			}
			return fmt.Errorf("unknown field %s, available fields %v", m.field, fields)
		}
		item := tsdb.MapItem{
			Timestamp: p.Time.Unix(),
			Value:     value,
			Fields:    p.Fields,
			Tags:      p.Tags,
		}
		inputs[j].Items = append(inputs[j].Items, item)
		inputs[j].TMin = -1
		j = (j + 1) % m.parallel
	}

	mr.Name = b.Name
	mr.Group = b.Group
	mr.TMax = b.TMax
	mr.Tags = b.Tags
	mr.Dims = models.SortedKeys(b.Tags)

	for i := 0; i < m.parallel; i++ {
		go func(i int) {
			mr.Outs[i] = m.f(&inputs[i])
			done <- true
		}(i)
	}

	finished := 0
	for finished != m.parallel {
		<-done
		finished++
	}

	m.timer.Stop()
	for _, child := range m.outs {
		err := child.CollectMaps(mr)
		if err != nil {
			return err
		}
	}
	return nil
}
Ejemplo n.º 2
0
func setGroupOnPoint(p models.Point, allDimensions bool, dimensions []string) models.Point {
	if allDimensions {
		dimensions = models.SortedKeys(p.Tags)
	}
	p.Group = models.TagsToGroupID(dimensions, p.Tags)
	p.Dimensions = dimensions
	return p
}
Ejemplo n.º 3
0
func setGroupOnPoint(p models.Point, allDimensions bool, dimensions models.Dimensions) models.Point {
	if allDimensions {
		dimensions.TagNames = models.SortedKeys(p.Tags)
	}
	p.Group = models.ToGroupID(p.Name, p.Tags, dimensions)
	p.Dimensions = dimensions
	return p
}
Ejemplo n.º 4
0
// Replay the batch data from a single source
func (r *replayBatchSource) replayBatchFromData(data io.ReadCloser, batch BatchCollector, recTime bool) {
	defer batch.Close()
	defer data.Close()

	in := bufio.NewScanner(data)

	// Find relative times
	start := time.Time{}
	var diff time.Duration
	zero := r.clck.Zero()

	for in.Scan() {
		var b models.Batch
		err := json.Unmarshal(in.Bytes(), &b)
		if err != nil {
			r.allErrs <- err
			return
		}
		if len(b.Points) == 0 {
			// do nothing
			continue
		}
		b.Group = models.TagsToGroupID(models.SortedKeys(b.Tags), b.Tags)

		if start.IsZero() {
			start = b.Points[0].Time
			diff = zero.Sub(start)
		}
		// Add tags to all points
		if len(b.Tags) > 0 {
			for i := range b.Points {
				if len(b.Points[i].Tags) == 0 {
					b.Points[i].Tags = b.Tags
				}
			}
		}
		var lastTime time.Time
		if !recTime {
			for i := range b.Points {
				b.Points[i].Time = b.Points[i].Time.Add(diff).UTC()
			}
			lastTime = b.Points[len(b.Points)-1].Time
		} else {
			lastTime = b.Points[len(b.Points)-1].Time.Add(diff).UTC()
		}
		r.clck.Until(lastTime)
		b.TMax = b.Points[len(b.Points)-1].Time
		batch.CollectBatch(b)
	}
	r.allErrs <- in.Err()
}
Ejemplo n.º 5
0
func (m *MapNode) mapBatch(b models.Batch) error {
	if len(b.Points) == 0 {
		return nil
	}
	done := make(chan bool, m.parallel)
	mr := &MapResult{
		Outs: make([]interface{}, m.parallel),
	}
	inputs := make([]tsdb.MapInput, m.parallel)
	j := 0
	for _, p := range b.Points {
		item := tsdb.MapItem{
			Timestamp: p.Time.Unix(),
			Value:     p.Fields[m.field],
			Fields:    p.Fields,
			Tags:      p.Tags,
		}
		inputs[j].Items = append(inputs[j].Items, item)
		inputs[j].TMin = -1
		j = (j + 1) % m.parallel
	}

	mr.Name = b.Name
	mr.Group = b.Group
	mr.TMax = b.TMax
	mr.Tags = b.Tags
	mr.Dims = models.SortedKeys(b.Tags)

	for i := 0; i < m.parallel; i++ {
		go func(i int) {
			mr.Outs[i] = m.f(&inputs[i])
			done <- true
		}(i)
	}

	finished := 0
	for finished != m.parallel {
		<-done
		finished++
	}

	for _, child := range m.outs {
		err := child.CollectMaps(mr)
		if err != nil {
			return err
		}
	}
	return nil
}
Ejemplo n.º 6
0
// Replay the batch data from a single source
func readBatchFromIO(data io.ReadCloser, batches chan<- models.Batch) error {
	defer close(batches)
	defer data.Close()
	dec := json.NewDecoder(data)
	for dec.More() {
		var b models.Batch
		err := dec.Decode(&b)
		if err != nil {
			return err
		}
		if len(b.Points) == 0 {
			// do nothing
			continue
		}
		if b.Group == "" {
			b.Group = models.ToGroupID(
				b.Name,
				b.Tags,
				models.Dimensions{
					ByName:   b.ByName,
					TagNames: models.SortedKeys(b.Tags),
				},
			)
		}
		// Add tags to all points
		if len(b.Tags) > 0 {
			for i := range b.Points {
				if len(b.Points[i].Tags) == 0 {
					b.Points[i].Tags = b.Tags
				}
			}
		}
		batches <- b
	}
	return nil
}
Ejemplo n.º 7
0
func (g *GroupByNode) runGroupBy([]byte) error {
	switch g.Wants() {
	case pipeline.StreamEdge:
		for pt, ok := g.ins[0].NextPoint(); ok; pt, ok = g.ins[0].NextPoint() {
			g.timer.Start()
			pt = setGroupOnPoint(pt, g.allDimensions, g.dimensions)
			g.timer.Stop()
			for _, child := range g.outs {
				err := child.CollectPoint(pt)
				if err != nil {
					return err
				}
			}
		}
	default:
		var lastTime time.Time
		groups := make(map[models.GroupID]*models.Batch)
		for b, ok := g.ins[0].NextBatch(); ok; b, ok = g.ins[0].NextBatch() {
			g.timer.Start()
			if !b.TMax.Equal(lastTime) {
				lastTime = b.TMax
				// Emit all groups
				for id, group := range groups {
					for _, child := range g.outs {
						err := child.CollectBatch(*group)
						if err != nil {
							return err
						}
					}
					// Remove from groups
					delete(groups, id)
				}
			}
			for _, p := range b.Points {
				var dims []string
				if g.allDimensions {
					dims = models.SortedKeys(p.Tags)
				} else {
					dims = g.dimensions
				}
				groupID := models.TagsToGroupID(dims, p.Tags)
				group, ok := groups[groupID]
				if !ok {
					tags := make(map[string]string, len(dims))
					for _, dim := range dims {
						tags[dim] = p.Tags[dim]
					}
					group = &models.Batch{
						Name:  b.Name,
						Group: groupID,
						TMax:  b.TMax,
						Tags:  tags,
					}
					groups[groupID] = group
				}
				group.Points = append(group.Points, p)
			}
			g.timer.Stop()
		}
	}
	return nil
}