// addToIndexFromKey will pull the measurement name, series key, and field name from a composite key and add it to the // database index and measurement fields func (e *Engine) addToIndexFromKey(shardID uint64, key []byte, fieldType influxql.DataType, index *tsdb.DatabaseIndex) error { seriesKey, field := SeriesAndFieldFromCompositeKey(key) measurement := tsdb.MeasurementFromSeriesKey(string(seriesKey)) m := index.CreateMeasurementIndexIfNotExists(measurement) m.SetFieldName(field) mf := e.measurementFields[measurement] if mf == nil { mf = tsdb.NewMeasurementFields() e.measurementFields[measurement] = mf } if err := mf.CreateFieldIfNotExists(field, fieldType, false); err != nil { return err } // Have we already indexed this series? ss := index.SeriesBytes(seriesKey) if ss != nil { // Add this shard to the existing series ss.AssignShard(shardID) return nil } // ignore error because ParseKey returns "missing fields" and we don't have // fields (in line protocol format) in the series key _, tags, _ := models.ParseKey(seriesKey) s := tsdb.NewSeries(string(seriesKey), tags) index.CreateSeriesIndexIfNotExists(measurement, s) s.AssignShard(shardID) return nil }
func BenchmarkMeasurement_SeriesIDForExp_NERegex(b *testing.B) { m := tsdb.NewMeasurement("cpu") for i := 0; i < 100000; i++ { s := tsdb.NewSeries("cpu", models.Tags{models.Tag{ Key: []byte("host"), Value: []byte(fmt.Sprintf("host%d", i))}}) s.ID = uint64(i) m.AddSeries(s) } if exp, got := 100000, len(m.SeriesKeys()); exp != got { b.Fatalf("series count mismatch: exp %v got %v", exp, got) } stmt, err := influxql.NewParser(strings.NewReader(`SELECT * FROM cpu WHERE host !~ /foo\d+/`)).ParseStatement() if err != nil { b.Fatalf("invalid statement: %s", err) } selectStmt := stmt.(*influxql.SelectStatement) b.ResetTimer() for i := 0; i < b.N; i++ { ids := m.IDsForExpr(selectStmt.Condition.(*influxql.BinaryExpr)) if exp, got := 100000, len(ids); exp != got { b.Fatalf("series count mismatch: exp %v got %v", exp, got) } } }
// addToIndexFromKey will pull the measurement name, series key, and field name from a composite key and add it to the // database index and measurement fields func (e *Engine) addToIndexFromKey(key string, fieldType influxql.DataType, index *tsdb.DatabaseIndex, measurementFields map[string]*tsdb.MeasurementFields) error { seriesKey, field := seriesAndFieldFromCompositeKey(key) measurement := tsdb.MeasurementFromSeriesKey(seriesKey) m := index.CreateMeasurementIndexIfNotExists(measurement) m.SetFieldName(field) mf := measurementFields[measurement] if mf == nil { mf = &tsdb.MeasurementFields{ Fields: map[string]*tsdb.Field{}, } measurementFields[measurement] = mf } if err := mf.CreateFieldIfNotExists(field, fieldType, false); err != nil { return err } _, tags, err := models.ParseKey(seriesKey) if err == nil { return err } s := tsdb.NewSeries(seriesKey, tags) s.InitializeShards() index.CreateSeriesIndexIfNotExists(measurement, s) return nil }
// Ensure engine can create an iterator with a condition. func TestEngine_CreateIterator_Condition(t *testing.T) { t.Parallel() e := MustOpenEngine() defer e.Close() e.Index().CreateMeasurementIndexIfNotExists("cpu") e.Index().Measurement("cpu").SetFieldName("X") e.Index().Measurement("cpu").SetFieldName("Y") e.MeasurementFields("cpu").CreateFieldIfNotExists("value", influxql.Float, false) e.MeasurementFields("cpu").CreateFieldIfNotExists("X", influxql.Float, false) e.MeasurementFields("cpu").CreateFieldIfNotExists("Y", influxql.Float, false) si := e.Index().CreateSeriesIndexIfNotExists("cpu", tsdb.NewSeries("cpu,host=A", models.NewTags(map[string]string{"host": "A"}))) si.AssignShard(1) if err := e.WritePointsString( `cpu,host=A value=1.1 1000000000`, `cpu,host=A X=10 1000000000`, `cpu,host=A Y=100 1000000000`, `cpu,host=A value=1.2 2000000000`, `cpu,host=A value=1.3 3000000000`, `cpu,host=A X=20 3000000000`, `cpu,host=A Y=200 3000000000`, ); err != nil { t.Fatalf("failed to write points: %s", err.Error()) } itr, err := e.CreateIterator(influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Dimensions: []string{"host"}, Condition: influxql.MustParseExpr(`X = 10 OR Y > 150`), Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, Ascending: true, }) if err != nil { t.Fatal(err) } fitr := itr.(influxql.FloatIterator) if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(0): %v", err) } else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { t.Fatalf("unexpected point(0): %v", p) } if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected point(1): %v", err) } else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { t.Fatalf("unexpected point(1): %v", p) } if p, err := fitr.Next(); err != nil { t.Fatalf("expected eof, got error: %v", err) } else if p != nil { t.Fatalf("expected eof: %v", p) } }
// MustInitBenchmarkEngine creates a new engine and fills it with points. // Reuses previous engine if the same parameters were used. func MustInitBenchmarkEngine(pointN int) *Engine { // Reuse engine, if available. if benchmark.Engine != nil { if benchmark.PointN == pointN { return benchmark.Engine } // Otherwise close and remove it. benchmark.Engine.Close() benchmark.Engine = nil } const batchSize = 1000 if pointN%batchSize != 0 { panic(fmt.Sprintf("point count (%d) must be a multiple of batch size (%d)", pointN, batchSize)) } e := MustOpenEngine() // Initialize metadata. e.Index().CreateMeasurementIndexIfNotExists("cpu") e.MeasurementFields("cpu").CreateFieldIfNotExists("value", influxql.Float, false) si := e.Index().CreateSeriesIndexIfNotExists("cpu", tsdb.NewSeries("cpu,host=A", models.NewTags(map[string]string{"host": "A"}))) si.AssignShard(1) // Generate time ascending points with jitterred time & value. rand := rand.New(rand.NewSource(0)) for i := 0; i < pointN; i += batchSize { var buf bytes.Buffer for j := 0; j < batchSize; j++ { fmt.Fprintf(&buf, "cpu,host=%s value=%d %d", hostNames[j%len(hostNames)], 100+rand.Intn(50)-25, (time.Duration(i+j)*time.Second)+(time.Duration(rand.Intn(500)-250)*time.Millisecond), ) if j != pointN-1 { fmt.Fprint(&buf, "\n") } } if err := e.WritePointsString(buf.String()); err != nil { panic(err) } } if err := e.WriteSnapshot(); err != nil { panic(err) } // Force garbage collection. runtime.GC() // Save engine reference for reuse. benchmark.Engine = e benchmark.PointN = pointN return e }
// Ensure engine can create an iterator with auxilary fields. func TestEngine_CreateIterator_Aux(t *testing.T) { t.Parallel() e := MustOpenEngine() defer e.Close() e.Index().CreateMeasurementIndexIfNotExists("cpu") e.MeasurementFields("cpu").CreateFieldIfNotExists("value", influxql.Float, false) e.MeasurementFields("cpu").CreateFieldIfNotExists("F", influxql.Float, false) si := e.Index().CreateSeriesIndexIfNotExists("cpu", tsdb.NewSeries("cpu,host=A", models.NewTags(map[string]string{"host": "A"}))) si.AssignShard(1) if err := e.WritePointsString( `cpu,host=A value=1.1 1000000000`, `cpu,host=A F=100 1000000000`, `cpu,host=A value=1.2 2000000000`, `cpu,host=A value=1.3 3000000000`, `cpu,host=A F=200 3000000000`, ); err != nil { t.Fatalf("failed to write points: %s", err.Error()) } itr, err := e.CreateIterator(influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Aux: []influxql.VarRef{{Val: "F"}}, Dimensions: []string{"host"}, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, Ascending: true, }) if err != nil { t.Fatal(err) } fitr := itr.(influxql.FloatIterator) if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(0): %v", err) } else if !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1, Aux: []interface{}{float64(100)}}) { t.Fatalf("unexpected point(0): %v", p) } if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(1): %v", err) } else if !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2, Aux: []interface{}{(*float64)(nil)}}) { t.Fatalf("unexpected point(1): %v", p) } if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(2): %v", err) } else if !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3, Aux: []interface{}{float64(200)}}) { t.Fatalf("unexpected point(2): %v", p) } if p, err := fitr.Next(); err != nil { t.Fatalf("expected eof, got error: %v", err) } else if p != nil { t.Fatalf("expected eof: %v", p) } }
func genTestSeries(mCnt, tCnt, vCnt int) []*TestSeries { measurements := genStrList("measurement", mCnt) tagSets := NewTagSetGenerator(tCnt, vCnt).AllSets() series := []*TestSeries{} for _, m := range measurements { for _, ts := range tagSets { series = append(series, &TestSeries{ Measurement: m, Series: tsdb.NewSeries(fmt.Sprintf("%s:%s", m, string(tsdb.MarshalTags(ts))), models.NewTags(ts)), }) } } return series }
func TestMeasurement_AppendSeriesKeysByID_Exists(t *testing.T) { m := tsdb.NewMeasurement("cpu") s := tsdb.NewSeries("cpu,host=foo", models.Tags{models.Tag{Key: []byte("host"), Value: []byte("foo")}}) s.ID = 1 m.AddSeries(s) var dst []string dst = m.AppendSeriesKeysByID(dst, []uint64{1}) if exp, got := 1, len(dst); exp != got { t.Fatalf("series len mismatch: exp %v, got %v", exp, got) } if exp, got := "cpu,host=foo", dst[0]; exp != got { t.Fatalf("series mismatch: exp %v, got %v", exp, got) } }
// Ensure engine can create an descending iterator for cached values. func TestEngine_CreateIterator_TSM_Descending(t *testing.T) { t.Parallel() e := MustOpenEngine() defer e.Close() e.Index().CreateMeasurementIndexIfNotExists("cpu") e.MeasurementFields("cpu").CreateFieldIfNotExists("value", influxql.Float, false) e.Index().CreateSeriesIndexIfNotExists("cpu", tsdb.NewSeries("cpu,host=A", map[string]string{"host": "A"})) if err := e.WritePointsString( `cpu,host=A value=1.1 1000000000`, `cpu,host=A value=1.2 2000000000`, `cpu,host=A value=1.3 3000000000`, ); err != nil { t.Fatalf("failed to write points: %s", err.Error()) } e.MustWriteSnapshot() itr, err := e.CreateIterator(influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Dimensions: []string{"host"}, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, Ascending: false, }) if err != nil { t.Fatal(err) } fitr := itr.(influxql.FloatIterator) if p := fitr.Next(); !reflect.DeepEqual(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { t.Fatalf("unexpected point(0): %v", p) } if p := fitr.Next(); !reflect.DeepEqual(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { t.Fatalf("unexpected point(1): %v", p) } if p := fitr.Next(); !reflect.DeepEqual(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { t.Fatalf("unexpected point(2): %v", p) } if p := fitr.Next(); p != nil { t.Fatalf("expected eof: %v", p) } }