示例#1
0
// addToIndexFromKey will pull the measurement name, series key, and field name from a composite key and add it to the
// database index and measurement fields
func (e *DevEngine) addToIndexFromKey(key string, fieldType influxql.DataType, index *tsdb.DatabaseIndex, measurementFields map[string]*tsdb.MeasurementFields) error {
	seriesKey, field := seriesAndFieldFromCompositeKey(key)
	measurement := tsdb.MeasurementFromSeriesKey(seriesKey)

	m := index.CreateMeasurementIndexIfNotExists(measurement)
	m.SetFieldName(field)

	mf := measurementFields[measurement]
	if mf == nil {
		mf = &tsdb.MeasurementFields{
			Fields: map[string]*tsdb.Field{},
		}
		measurementFields[measurement] = mf
	}

	if err := mf.CreateFieldIfNotExists(field, fieldType, false); err != nil {
		return err
	}

	_, tags, err := models.ParseKey(seriesKey)
	if err == nil {
		return err
	}

	s := tsdb.NewSeries(seriesKey, tags)
	s.InitializeShards()
	index.CreateSeriesIndexIfNotExists(measurement, s)

	return nil
}
示例#2
0
// LoadMetadataIndex loads the shard metadata into memory.
func (e *Engine) LoadMetadataIndex(index *tsdb.DatabaseIndex, measurementFields map[string]*tsdb.MeasurementFields) error {
	return e.db.View(func(tx *bolt.Tx) error {
		// load measurement metadata
		meta := tx.Bucket([]byte("fields"))
		c := meta.Cursor()
		for k, v := c.First(); k != nil; k, v = c.Next() {
			m := index.CreateMeasurementIndexIfNotExists(string(k))
			mf := &tsdb.MeasurementFields{}
			if err := mf.UnmarshalBinary(v); err != nil {
				return err
			}
			for name, _ := range mf.Fields {
				m.SetFieldName(name)
			}
			mf.Codec = tsdb.NewFieldCodec(mf.Fields)
			measurementFields[m.Name] = mf
		}

		// load series metadata
		meta = tx.Bucket([]byte("series"))
		c = meta.Cursor()
		for k, v := c.First(); k != nil; k, v = c.Next() {
			series := tsdb.NewSeries("", nil)
			if err := series.UnmarshalBinary(v); err != nil {
				return err
			}
			index.CreateSeriesIndexIfNotExists(tsdb.MeasurementFromSeriesKey(string(k)), series)
		}
		return nil
	})
}
示例#3
0
// Ensure the engine can write series metadata and reload it.
func TestEngine_LoadMetadataIndex_Series(t *testing.T) {
	e := OpenDefaultEngine()
	defer e.Close()

	// Setup mock that writes the index
	seriesToCreate := []*tsdb.SeriesCreate{
		{Series: tsdb.NewSeries(string(models.MakeKey([]byte("cpu"), map[string]string{"host": "server0"})), map[string]string{"host": "server0"})},
		{Series: tsdb.NewSeries(string(models.MakeKey([]byte("cpu"), map[string]string{"host": "server1"})), map[string]string{"host": "server1"})},
		{Series: tsdb.NewSeries("series with spaces", nil)},
	}
	e.PointsWriter.WritePointsFn = func(a []models.Point) error { return e.WriteIndex(nil, nil, seriesToCreate) }

	// Write series metadata.
	if err := e.WritePoints(nil, nil, seriesToCreate); err != nil {
		t.Fatal(err)
	}

	// Load metadata index.
	index := tsdb.NewDatabaseIndex()
	if err := e.LoadMetadataIndex(index, make(map[string]*tsdb.MeasurementFields)); err != nil {
		t.Fatal(err)
	}

	// Verify index is correct.
	if m := index.Measurement("cpu"); m == nil {
		t.Fatal("measurement not found")
	} else if s := m.SeriesByID(1); s.Key != "cpu,host=server0" || !reflect.DeepEqual(s.Tags, map[string]string{"host": "server0"}) {
		t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags)
	} else if s = m.SeriesByID(2); s.Key != "cpu,host=server1" || !reflect.DeepEqual(s.Tags, map[string]string{"host": "server1"}) {
		t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags)
	}

	if m := index.Measurement("series with spaces"); m == nil {
		t.Fatal("measurement not found")
	} else if s := m.SeriesByID(3); s.Key != "series with spaces" {
		t.Fatalf("unexpected series: %q", s.Key)
	}
}
示例#4
0
func genTestSeries(mCnt, tCnt, vCnt int) []*TestSeries {
	measurements := genStrList("measurement", mCnt)
	tagSets := NewTagSetGenerator(tCnt, vCnt).AllSets()
	series := []*TestSeries{}
	for _, m := range measurements {
		for _, ts := range tagSets {
			series = append(series, &TestSeries{
				Measurement: m,
				Series:      tsdb.NewSeries(fmt.Sprintf("%s:%s", m, string(tsdb.MarshalTags(ts))), ts),
			})
		}
	}
	return series
}
示例#5
0
// SeriesCreate returns a list of series to create across all points.
func (a PointsSlice) SeriesCreate() []*tsdb.SeriesCreate {
	// Create unique set of series.
	m := map[string]*tsdb.SeriesCreate{}
	for _, points := range a {
		for _, p := range points {
			if pp := p.Encode(); m[string(pp.Key())] == nil {
				m[string(pp.Key())] = &tsdb.SeriesCreate{Measurement: pp.Name(), Series: tsdb.NewSeries(string(string(pp.Key())), pp.Tags())}
			}
		}
	}

	// Convert to slice.
	slice := make([]*tsdb.SeriesCreate, 0, len(m))
	for _, v := range m {
		slice = append(slice, v)
	}
	return slice
}
示例#6
0
func TestWAL_DeleteSeries(t *testing.T) {
	log := openTestWAL()
	defer log.Close()
	defer os.RemoveAll(log.path)

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	var seriesToIndex []*tsdb.SeriesCreate
	log.Index = &testIndexWriter{fn: func(pointsByKey map[string][][]byte, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
		seriesToIndex = append(seriesToIndex, seriesToCreate...)
		return nil
	}}

	seriesToCreate := []*tsdb.SeriesCreate{
		{Series: tsdb.NewSeries(string(tsdb.MakeKey([]byte("cpu"), map[string]string{"host": "A"})), map[string]string{"host": "A"})},
		{Series: tsdb.NewSeries(string(tsdb.MakeKey([]byte("cpu"), map[string]string{"host": "B"})), map[string]string{"host": "B"})},
	}

	// test that we can write to two different series
	p1 := parsePoint("cpu,host=A value=23.2 1", codec)
	p2 := parsePoint("cpu,host=B value=0.9 2", codec)
	p3 := parsePoint("cpu,host=A value=25.3 4", codec)
	p4 := parsePoint("cpu,host=B value=1.0 3", codec)
	if err := log.WritePoints([]tsdb.Point{p1, p2, p3, p4}, nil, seriesToCreate); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	// ensure data is there
	c := log.Cursor("cpu,host=A")
	if k, _ := c.Next(); btou64(k) != 1 {
		t.Fatal("expected data point for cpu,host=A")
	}

	c = log.Cursor("cpu,host=B")
	if k, _ := c.Next(); btou64(k) != 2 {
		t.Fatal("expected data point for cpu,host=B")
	}

	// delete the series and ensure metadata was flushed and data is gone
	if err := log.DeleteSeries([]string{"cpu,host=B"}); err != nil {
		t.Fatalf("error deleting series: %s", err.Error())
	}

	// ensure data is there
	c = log.Cursor("cpu,host=A")
	if k, _ := c.Next(); btou64(k) != 1 {
		t.Fatal("expected data point for cpu,host=A")
	}

	// ensure series is deleted
	c = log.Cursor("cpu,host=B")
	if k, _ := c.Next(); k != nil {
		t.Fatal("expected no data for cpu,host=B")
	}

	// ensure that they were actually flushed to the index. do it this way because the annoying deepequal doessn't really work for these
	for i, s := range seriesToCreate {
		if seriesToIndex[i].Measurement != s.Measurement {
			t.Fatal("expected measurement to be the same")
		}
		if seriesToIndex[i].Series.Key != s.Series.Key {
			t.Fatal("expected series key to be the same")
		}
		if !reflect.DeepEqual(seriesToIndex[i].Series.Tags, s.Series.Tags) {
			t.Fatal("expected series tags to be the same")
		}
	}

	// close and re-open the WAL to ensure that the data didn't show back up
	if err := log.Close(); err != nil {
		t.Fatalf("error closing log: %s", err.Error())
	}

	if err := log.Open(); err != nil {
		t.Fatalf("error opening log: %s", err.Error())
	}

	// ensure data is there
	c = log.Cursor("cpu,host=A")
	if k, _ := c.Next(); btou64(k) != 1 {
		t.Fatal("expected data point for cpu,host=A")
	}

	// ensure series is deleted
	c = log.Cursor("cpu,host=B")
	if k, _ := c.Next(); k != nil {
		t.Fatal("expected no data for cpu,host=B")
	}
}
示例#7
0
func TestWAL_SeriesAndFieldsGetPersisted(t *testing.T) {
	log := openTestWAL()
	defer log.Close()
	defer os.RemoveAll(log.path)

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	var measurementsToIndex map[string]*tsdb.MeasurementFields
	var seriesToIndex []*tsdb.SeriesCreate
	log.Index = &testIndexWriter{fn: func(pointsByKey map[string][][]byte, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
		measurementsToIndex = measurementFieldsToSave
		seriesToIndex = append(seriesToIndex, seriesToCreate...)
		return nil
	}}

	// test that we can write to two different series
	p1 := parsePoint("cpu,host=A value=23.2 1", codec)
	p2 := parsePoint("cpu,host=A value=25.3 4", codec)
	p3 := parsePoint("cpu,host=B value=1.0 1", codec)

	seriesToCreate := []*tsdb.SeriesCreate{
		{Series: tsdb.NewSeries(string(tsdb.MakeKey([]byte("cpu"), map[string]string{"host": "A"})), map[string]string{"host": "A"})},
		{Series: tsdb.NewSeries(string(tsdb.MakeKey([]byte("cpu"), map[string]string{"host": "B"})), map[string]string{"host": "B"})},
	}

	measaurementsToCreate := map[string]*tsdb.MeasurementFields{
		"cpu": {
			Fields: map[string]*tsdb.Field{
				"value": {ID: 1, Name: "value"},
			},
		},
	}

	if err := log.WritePoints([]tsdb.Point{p1, p2, p3}, measaurementsToCreate, seriesToCreate); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	// now close it and see if loading the metadata index will populate the measurement and series info
	log.Close()

	idx := tsdb.NewDatabaseIndex()
	mf := make(map[string]*tsdb.MeasurementFields)

	if err := log.LoadMetadataIndex(idx, mf); err != nil {
		t.Fatalf("error loading metadata index: %s", err.Error())
	}

	s := idx.Series("cpu,host=A")
	if s == nil {
		t.Fatal("expected to find series cpu,host=A in index")
	}

	s = idx.Series("cpu,host=B")
	if s == nil {
		t.Fatal("expected to find series cpu,host=B in index")
	}

	m := mf["cpu"]
	if m == nil {
		t.Fatal("expected to find measurement fields for cpu", mf)
	}
	if m.Fields["value"] == nil {
		t.Fatal("expected to find field definition for 'value'")
	}

	// ensure that they were actually flushed to the index. do it this way because the annoying deepequal doessn't really work for these
	for i, s := range seriesToCreate {
		if seriesToIndex[i].Measurement != s.Measurement {
			t.Fatal("expected measurement to be the same")
		}
		if seriesToIndex[i].Series.Key != s.Series.Key {
			t.Fatal("expected series key to be the same")
		}
		if !reflect.DeepEqual(seriesToIndex[i].Series.Tags, s.Series.Tags) {
			t.Fatal("expected series tags to be the same")
		}
	}

	// ensure that the measurement fields were flushed to the index
	for k, v := range measaurementsToCreate {
		m := measurementsToIndex[k]
		if m == nil {
			t.Fatalf("measurement %s wasn't indexed", k)
		}

		if !reflect.DeepEqual(m.Fields, v.Fields) {
			t.Fatal("measurement fields not equal")
		}
	}

	// now open and close the log and try to reload the metadata index, which should now be empty
	if err := log.Open(); err != nil {
		t.Fatalf("error opening log: %s", err.Error())
	}
	if err := log.Close(); err != nil {
		t.Fatalf("error closing log: %s", err.Error())
	}

	idx = tsdb.NewDatabaseIndex()
	mf = make(map[string]*tsdb.MeasurementFields)

	if err := log.LoadMetadataIndex(idx, mf); err != nil {
		t.Fatalf("error loading metadata index: %s", err.Error())
	}

	if len(idx.Measurements()) != 0 || len(mf) != 0 {
		t.Fatal("expected index and measurement fields to be empty")
	}
}
示例#8
0
// Ensure points can be written to the engine and queried in reverse order.
func TestEngine_WritePoints_Reverse(t *testing.T) {
	e := OpenDefaultEngine()
	defer e.Close()

	// Create metadata.
	mf := &tsdb.MeasurementFields{Fields: make(map[string]*tsdb.Field)}
	mf.CreateFieldIfNotExists("value", influxql.Float)
	seriesToCreate := []*tsdb.SeriesCreate{
		{Series: tsdb.NewSeries(string(tsdb.MakeKey([]byte("temperature"), nil)), nil)},
	}

	// Parse point.
	points, err := tsdb.ParsePointsWithPrecision([]byte("temperature value=100 0"), time.Now().UTC(), "s")
	if err != nil {
		t.Fatal(err)
	} else if data, err := mf.Codec.EncodeFields(points[0].Fields()); err != nil {
		t.Fatal(err)
	} else {
		points[0].SetData(data)
	}

	// Write original value.
	if err := e.WritePoints(points, map[string]*tsdb.MeasurementFields{"temperature": mf}, seriesToCreate); err != nil {
		t.Fatal(err)
	}

	// Flush to disk.
	if err := e.Flush(0); err != nil {
		t.Fatal(err)
	}

	// Parse new point.
	points, err = tsdb.ParsePointsWithPrecision([]byte("temperature value=200 1"), time.Now().UTC(), "s")
	if err != nil {
		t.Fatal(err)
	} else if data, err := mf.Codec.EncodeFields(points[0].Fields()); err != nil {
		t.Fatal(err)
	} else {
		points[0].SetData(data)
	}

	// Write the new points existing value.
	if err := e.WritePoints(points, nil, nil); err != nil {
		t.Fatal(err)
	}

	// Ensure only the updated value is read.
	tx := e.MustBegin(false)
	defer tx.Rollback()

	c := tx.Cursor("temperature", tsdb.Reverse)
	if k, _ := c.Seek(u64tob(math.MaxInt64)); !bytes.Equal(k, u64tob(uint64(time.Unix(1, 0).UnixNano()))) {
		t.Fatalf("unexpected key: %v", btou64(k))
	} else if k, v := c.Next(); !bytes.Equal(k, u64tob(uint64(time.Unix(0, 0).UnixNano()))) {
		t.Fatalf("unexpected key: %#v", k)
	} else if m, err := mf.Codec.DecodeFieldsWithNames(v); err != nil {
		t.Fatal(err)
	} else if m["value"] != float64(100) {
		t.Errorf("unexpected value: %#v", m)
	}

	if k, v := c.Next(); k != nil {
		t.Fatalf("unexpected key/value: %#v / %#v", k, v)
	}
}
示例#9
0
func TestEngine_Deletes(t *testing.T) {
	e := OpenDefaultEngine()
	defer e.Close()

	fields := []string{"value"}
	// Create metadata.
	mf := &tsdb.MeasurementFields{Fields: make(map[string]*tsdb.Field)}
	mf.CreateFieldIfNotExists("value", influxql.Float, false)
	atag := map[string]string{"host": "A"}
	btag := map[string]string{"host": "B"}
	seriesToCreate := []*tsdb.SeriesCreate{
		{Series: tsdb.NewSeries(string(models.MakeKey([]byte("cpu"), atag)), atag)},
		{Series: tsdb.NewSeries(string(models.MakeKey([]byte("cpu"), btag)), btag)},
	}

	p1 := parsePoint("cpu,host=A value=1.1 1000000001")
	p2 := parsePoint("cpu,host=A value=1.2 2000000001")
	p3 := parsePoint("cpu,host=B value=2.1 1000000000")
	p4 := parsePoint("cpu,host=B value=2.1 2000000000")

	e.SkipCompaction = true
	e.WAL.SkipCache = false

	if err := e.WritePoints([]models.Point{p1, p3}, map[string]*tsdb.MeasurementFields{"cpu": mf}, seriesToCreate); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	func() {
		tx, _ := e.Begin(false)
		defer tx.Rollback()
		c := tx.Cursor("cpu,host=A", fields, nil, true)
		k, _ := c.SeekTo(0)
		if k != p1.UnixNano() {
			t.Fatalf("time wrong:\n\texp:%d\n\tgot:%d\n", p1.UnixNano(), k)
		}
	}()

	if err := e.DeleteSeries([]string{"cpu,host=A"}); err != nil {
		t.Fatalf("failed to delete series: %s", err.Error())
	}

	func() {
		tx, _ := e.Begin(false)
		defer tx.Rollback()
		c := tx.Cursor("cpu,host=B", fields, nil, true)
		k, _ := c.SeekTo(0)
		if k != p3.UnixNano() {
			t.Fatalf("time wrong:\n\texp:%d\n\tgot:%d\n", p1.UnixNano(), k)
		}
		c = tx.Cursor("cpu,host=A", fields, nil, true)
		k, _ = c.SeekTo(0)
		if k != tsdb.EOF {
			t.Fatal("expected EOF", k)
		}
	}()

	if err := e.WritePoints([]models.Point{p2, p4}, nil, nil); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	if err := e.WAL.Flush(); err != nil {
		t.Fatalf("error flushing wal: %s", err.Error())
	}

	func() {
		tx, _ := e.Begin(false)
		defer tx.Rollback()
		c := tx.Cursor("cpu,host=A", fields, nil, true)
		k, _ := c.SeekTo(0)
		if k != p2.UnixNano() {
			t.Fatalf("time wrong:\n\texp:%d\n\tgot:%d\n", p1.UnixNano(), k)
		}
	}()

	if err := e.DeleteSeries([]string{"cpu,host=A"}); err != nil {
		t.Fatalf("failed to delete series: %s", err.Error())
	}

	// we already know the delete on the wal works. open and close so
	// the wal flushes to the index. To verify that the delete gets
	// persisted and will go all the way through the index

	if err := e.Engine.Close(); err != nil {
		t.Fatalf("error closing: %s", err.Error())
	}
	if err := e.Open(); err != nil {
		t.Fatalf("error opening: %s", err.Error())
	}

	verify := func() {
		tx, _ := e.Begin(false)
		defer tx.Rollback()
		c := tx.Cursor("cpu,host=B", fields, nil, true)
		k, _ := c.SeekTo(0)
		if k != p3.UnixNano() {
			t.Fatalf("time wrong:\n\texp:%d\n\tgot:%d\n", p1.UnixNano(), k)
		}
		c = tx.Cursor("cpu,host=A", fields, nil, true)
		k, _ = c.SeekTo(0)
		if k != tsdb.EOF {
			t.Fatal("expected EOF")
		}
	}

	fmt.Println("verify 1")
	verify()

	// open and close to verify thd delete was persisted
	if err := e.Engine.Close(); err != nil {
		t.Fatalf("error closing: %s", err.Error())
	}
	if err := e.Open(); err != nil {
		t.Fatalf("error opening: %s", err.Error())
	}

	fmt.Println("verify 2")
	verify()

	if err := e.DeleteSeries([]string{"cpu,host=B"}); err != nil {
		t.Fatalf("failed to delete series: %s", err.Error())
	}

	func() {
		tx, _ := e.Begin(false)
		defer tx.Rollback()
		c := tx.Cursor("cpu,host=B", fields, nil, true)
		k, _ := c.SeekTo(0)
		if k != tsdb.EOF {
			t.Fatal("expected EOF")
		}
	}()

	if err := e.WAL.Flush(); err != nil {
		t.Fatalf("error flushing: %s", err.Error())
	}

	func() {
		tx, _ := e.Begin(false)
		defer tx.Rollback()
		c := tx.Cursor("cpu,host=B", fields, nil, true)
		k, _ := c.SeekTo(0)
		if k != tsdb.EOF {
			t.Fatal("expected EOF")
		}
	}()

	// open and close to verify thd delete was persisted
	if err := e.Engine.Close(); err != nil {
		t.Fatalf("error closing: %s", err.Error())
	}
	if err := e.Open(); err != nil {
		t.Fatalf("error opening: %s", err.Error())
	}

	func() {
		tx, _ := e.Begin(false)
		defer tx.Rollback()
		c := tx.Cursor("cpu,host=B", fields, nil, true)
		k, _ := c.SeekTo(0)
		if k != tsdb.EOF {
			t.Fatal("expected EOF")
		}
	}()
}
示例#10
0
// Ensure points can be written to the engine and queried.
func TestEngine_WritePoints(t *testing.T) {
	e := OpenDefaultEngine()
	defer e.Close()

	// Create metadata.
	mf := &tsdb.MeasurementFields{Fields: make(map[string]*tsdb.Field)}
	mf.CreateFieldIfNotExists("value", influxql.Float)
	seriesToCreate := []*tsdb.SeriesCreate{
		{Series: tsdb.NewSeries(string(models.MakeKey([]byte("temperature"), nil)), nil)},
	}

	// Parse point.
	points, err := models.ParsePointsWithPrecision([]byte("temperature value=100 1434059627"), time.Now().UTC(), "s")
	if err != nil {
		t.Fatal(err)
	} else if data, err := mf.Codec.EncodeFields(points[0].Fields()); err != nil {
		t.Fatal(err)
	} else {
		points[0].SetData(data)
	}

	// Write original value.
	if err := e.WritePoints(points, map[string]*tsdb.MeasurementFields{"temperature": mf}, seriesToCreate); err != nil {
		t.Fatal(err)
	}

	// Flush to disk.
	if err := e.Flush(0); err != nil {
		t.Fatal(err)
	}

	// Parse new point.
	points, err = models.ParsePointsWithPrecision([]byte("temperature value=200 1434059627"), time.Now().UTC(), "s")
	if err != nil {
		t.Fatal(err)
	} else if data, err := mf.Codec.EncodeFields(points[0].Fields()); err != nil {
		t.Fatal(err)
	} else {
		points[0].SetData(data)
	}

	// Update existing value.
	if err := e.WritePoints(points, nil, nil); err != nil {
		t.Fatal(err)
	}

	// Ensure only the updated value is read.
	tx := e.MustBegin(false)
	defer tx.Rollback()

	c := tx.Cursor("temperature", []string{"value"}, mf.Codec, true)
	if k, v := c.SeekTo(0); k != 1434059627000000000 {
		t.Fatalf("unexpected key: %#v", k)
	} else if v == nil || v.(float64) != 200 {
		t.Errorf("unexpected value: %#v", v)
	}

	if k, v := c.Next(); k != tsdb.EOF {
		t.Fatalf("unexpected key/value: %#v / %#v", k, v)
	}
}