Example #1
0
// LoadMetadataIndex loads the shard metadata into memory.
func (e *Engine) LoadMetadataIndex(index *tsdb.DatabaseIndex, measurementFields map[string]*tsdb.MeasurementFields) error {
	return e.db.View(func(tx *bolt.Tx) error {
		// Load measurement metadata
		meta := tx.Bucket([]byte("fields"))
		c := meta.Cursor()
		for k, v := c.First(); k != nil; k, v = c.Next() {
			m := index.CreateMeasurementIndexIfNotExists(string(k))
			mf := &tsdb.MeasurementFields{}
			if err := mf.UnmarshalBinary(v); err != nil {
				return err
			}
			for name, _ := range mf.Fields {
				m.SetFieldName(name)
			}
			mf.Codec = tsdb.NewFieldCodec(mf.Fields)
			measurementFields[m.Name] = mf
		}

		// Load series metadata
		meta = tx.Bucket([]byte("series"))
		c = meta.Cursor()
		for k, v := c.First(); k != nil; k, v = c.Next() {
			series := &tsdb.Series{}
			if err := series.UnmarshalBinary(v); err != nil {
				return err
			}
			index.CreateSeriesIndexIfNotExists(tsdb.MeasurementFromSeriesKey(string(k)), series)
		}
		return nil
	})
}
Example #2
0
func TestWAL_QueryDuringCompaction(t *testing.T) {
	log := openTestWAL()
	log.partitionCount = 1
	defer log.Close()
	defer os.RemoveAll(log.path)

	var points []map[string][][]byte
	finishCompaction := make(chan struct{})
	log.Index = &testIndexWriter{fn: func(pointsByKey map[string][][]byte, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
		points = append(points, pointsByKey)
		finishCompaction <- struct{}{}
		return nil
	}}

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	// test that we can write to two different series
	p1 := parsePoint("cpu,host=A value=23.2 1", codec)
	if err := log.WritePoints([]tsdb.Point{p1}, nil, nil); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	verify := func() {
		c := log.Cursor("cpu,host=A")
		k, v := c.Seek(inttob(1))
		// ensure the series are there and points are in order
		if bytes.Compare(v, p1.Data()) != 0 {
			<-finishCompaction
			t.Fatalf("expected to seek to first point but got key and value: %v %v", k, v)
		}
	}

	verify()
	go func() {
		log.Flush()
	}()

	time.Sleep(100 * time.Millisecond)
	verify()
	<-finishCompaction
	verify()
}
Example #3
0
// LoadMetadataIndex loads the new series and fields files into memory and flushes them to the BoltDB index. This function
// should be called before making a call to Open()
func (l *Log) LoadMetadataIndex(index *tsdb.DatabaseIndex, measurementFields map[string]*tsdb.MeasurementFields) error {
	metaFiles, err := l.metadataFiles()
	if err != nil {
		return err
	}

	measurementFieldsToSave := make(map[string]*tsdb.MeasurementFields)
	var seriesToCreate []*tsdb.SeriesCreate

	// read all the metafiles off disk
	for _, fn := range metaFiles {
		a, err := l.readMetadataFile(fn)
		if err != nil {
			return err
		}

		// loop through the seriesAndFields and add them to the index and the collection to be written to the index
		for _, sf := range a {
			for k, mf := range sf.Fields {
				measurementFieldsToSave[k] = mf

				m := index.CreateMeasurementIndexIfNotExists(string(k))
				for name := range mf.Fields {
					m.SetFieldName(name)
				}
				mf.Codec = tsdb.NewFieldCodec(mf.Fields)
				measurementFields[m.Name] = mf
			}

			for _, sc := range sf.Series {
				seriesToCreate = append(seriesToCreate, sc)

				sc.Series.InitializeShards()
				index.CreateSeriesIndexIfNotExists(tsdb.MeasurementFromSeriesKey(string(sc.Series.Key)), sc.Series)
			}
		}
	}

	if err := l.Index.WriteIndex(nil, measurementFieldsToSave, seriesToCreate); err != nil {
		return err
	}

	// now remove all the old metafiles
	for _, fn := range metaFiles {
		if err := os.Remove(fn); err != nil {
			return err
		}
	}

	return nil
}
Example #4
0
// LoadMetadataIndex loads the shard metadata into memory.
func (e *Engine) LoadMetadataIndex(index *tsdb.DatabaseIndex, measurementFields map[string]*tsdb.MeasurementFields) error {
	if err := e.db.View(func(tx *bolt.Tx) error {
		// Load measurement metadata
		fields, err := e.readFields(tx)
		if err != nil {
			return err
		}
		for k, mf := range fields {
			m := index.CreateMeasurementIndexIfNotExists(string(k))
			for name, _ := range mf.Fields {
				m.SetFieldName(name)
			}
			mf.Codec = tsdb.NewFieldCodec(mf.Fields)
			measurementFields[m.Name] = mf
		}

		// Load series metadata
		series, err := e.readSeries(tx)
		if err != nil {
			return err
		}

		// Load the series into the in-memory index in sorted order to ensure
		// it's always consistent for testing purposes
		a := make([]string, 0, len(series))
		for k, _ := range series {
			a = append(a, k)
		}
		sort.Strings(a)
		for _, key := range a {
			s := series[key]
			s.InitializeShards()
			index.CreateSeriesIndexIfNotExists(tsdb.MeasurementFromSeriesKey(string(key)), s)
		}
		return nil
	}); err != nil {
		return err
	}

	// now flush the metadata that was in the WAL, but hadn't yet been flushed
	if err := e.WAL.LoadMetadataIndex(index, measurementFields); err != nil {
		return err
	}

	// finally open the WAL up
	return e.WAL.Open()
}
Example #5
0
func benchmarkEngine_WriteIndex(b *testing.B, blockSize int) {
	// Skip small iterations.
	if b.N < 1000000 {
		return
	}

	// Create a simple engine.
	e := OpenDefaultEngine()
	e.BlockSize = blockSize
	defer e.Close()

	// Create codec.
	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	// Generate points.
	a := make(map[string][][]byte)
	a["cpu"] = make([][]byte, b.N)
	for i := 0; i < b.N; i++ {
		a["cpu"][i] = wal.MarshalEntry(int64(i), MustEncodeFields(codec, models.Fields{"value": float64(i)}))
	}

	b.ResetTimer()

	// Insert into engine.
	if err := e.WriteIndex(a, nil, nil); err != nil {
		b.Fatal(err)
	}

	// Calculate on-disk size per point.
	bs, _ := e.SeriesBucketStats("cpu")
	stats, err := e.Stats()
	if err != nil {
		b.Fatal(err)
	}
	b.Logf("pts=%9d  bytes/pt=%4.01f  leaf-util=%3.0f%%",
		b.N,
		float64(stats.Size)/float64(b.N),
		(float64(bs.LeafInuse)/float64(bs.LeafAlloc))*100.0,
	)
}
Example #6
0
func TestWAL_PointsSorted(t *testing.T) {
	log := openTestWAL()
	defer log.Close()
	defer os.RemoveAll(log.path)

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	// test that we can write to two different series
	p1 := parsePoint("cpu,host=A value=1.1 1", codec)
	p2 := parsePoint("cpu,host=A value=4.4 4", codec)
	p3 := parsePoint("cpu,host=A value=2.2 2", codec)
	p4 := parsePoint("cpu,host=A value=6.6 6", codec)
	if err := log.WritePoints([]tsdb.Point{p1, p2, p3, p4}, nil, nil); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	c := log.Cursor("cpu,host=A")
	k, _ := c.Next()
	if btou64(k) != 1 {
		t.Fatal("points out of order")
	}
	k, _ = c.Next()
	if btou64(k) != 2 {
		t.Fatal("points out of order")
	}
	k, _ = c.Next()
	if btou64(k) != 4 {
		t.Fatal("points out of order")
	}
	k, _ = c.Next()
	if btou64(k) != 6 {
		t.Fatal("points out of order")
	}
}
Example #7
0
// Ensure the engine can write points to the index.
func TestEngine_WriteIndex_Append(t *testing.T) {
	e := OpenDefaultEngine()
	defer e.Close()

	// Create codec.
	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {ID: uint8(1), Name: "value", Type: influxql.Float},
	})

	// Append points to index.
	if err := e.WriteIndex(map[string][][]byte{
		"cpu": [][]byte{
			append(u64tob(1), MustEncodeFields(codec, models.Fields{"value": float64(10)})...),
			append(u64tob(2), MustEncodeFields(codec, models.Fields{"value": float64(20)})...),
		},
		"mem": [][]byte{
			append(u64tob(0), MustEncodeFields(codec, models.Fields{"value": float64(30)})...),
		},
	}, nil, nil); err != nil {
		t.Fatal(err)
	}

	// Start transaction.
	tx := e.MustBegin(false)
	defer tx.Rollback()

	// Iterate over "cpu" series.
	c := tx.Cursor("cpu", []string{"value"}, codec, true)
	if k, v := c.SeekTo(0); k != 1 || v.(float64) != float64(10) {
		t.Fatalf("unexpected key/value: %x / %x", k, v)
	} else if k, v = c.Next(); k != 2 || v.(float64) != float64(20) {
		t.Fatalf("unexpected key/value: %x / %x", k, v)
	} else if k, _ = c.Next(); k != tsdb.EOF {
		t.Fatalf("unexpected key/value: %x / %x", k, v)
	}

	// Iterate over "mem" series.
	c = tx.Cursor("mem", []string{"value"}, codec, true)
	if k, v := c.SeekTo(0); k != 0 || v.(float64) != float64(30) {
		t.Fatalf("unexpected key/value: %x / %x", k, v)
	} else if k, _ = c.Next(); k != tsdb.EOF {
		t.Fatalf("unexpected key/value: %x / %x", k, v)
	}
}
Example #8
0
// Ensure that the engine properly seeks to a block when the seek value is in the middle.
func TestEngine_WriteIndex_SeekAgainstInBlockValue(t *testing.T) {
	e := OpenDefaultEngine()
	defer e.Close()

	// Create codec.
	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {ID: uint8(1), Name: "value", Type: influxql.String},
	})

	// make sure we have data split across two blocks
	dataSize := (bz1.DefaultBlockSize - 16) / 2
	data := strings.Repeat("*", dataSize)

	// Write initial points to index.
	if err := e.WriteIndex(map[string][][]byte{
		"cpu": [][]byte{
			append(u64tob(10), MustEncodeFields(codec, models.Fields{"value": data})...),
			append(u64tob(20), MustEncodeFields(codec, models.Fields{"value": data})...),
			append(u64tob(30), MustEncodeFields(codec, models.Fields{"value": data})...),
			append(u64tob(40), MustEncodeFields(codec, models.Fields{"value": data})...),
		},
	}, nil, nil); err != nil {
		t.Fatal(err)
	}

	// Start transaction.
	tx := e.MustBegin(false)
	defer tx.Rollback()

	// Ensure that we can seek to a block in the middle
	c := tx.Cursor("cpu", []string{"value"}, codec, true)
	if k, _ := c.SeekTo(15); k != 20 {
		t.Fatalf("expected to seek to time 20, but got %d", k)
	}
	// Ensure that we can seek to the block on the end
	if k, _ := c.SeekTo(35); k != 40 {
		t.Fatalf("expected to seek to time 40, but got %d", k)
	}
}
Example #9
0
// LoadMetadataIndex loads the shard metadata into memory.
func (e *Engine) LoadMetadataIndex(shard *tsdb.Shard, index *tsdb.DatabaseIndex, measurementFields map[string]*tsdb.MeasurementFields) error {
	// Load measurement metadata
	fields, err := e.readFields()
	if err != nil {
		return err
	}
	for k, mf := range fields {
		m := index.CreateMeasurementIndexIfNotExists(string(k))
		for name := range mf.Fields {
			m.SetFieldName(name)
		}
		mf.Codec = tsdb.NewFieldCodec(mf.Fields)
		measurementFields[m.Name] = mf
	}

	// Load series metadata
	series, err := e.readSeries()
	if err != nil {
		return err
	}

	// Load the series into the in-memory index in sorted order to ensure
	// it's always consistent for testing purposes
	a := make([]string, 0, len(series))
	for k := range series {
		a = append(a, k)
	}
	sort.Strings(a)
	for _, key := range a {
		s := series[key]
		s.InitializeShards()
		index.CreateSeriesIndexIfNotExists(tsdb.MeasurementFromSeriesKey(string(key)), s)
	}

	return nil
}
Example #10
0
func TestWAL_CorruptDataLengthSize(t *testing.T) {
	log := openTestWAL()
	defer log.Close()
	defer os.RemoveAll(log.path)

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	// test that we can write to two different series
	p1 := parsePoint("cpu,host=A value=23.2 1", codec)
	p2 := parsePoint("cpu,host=A value=25.3 4", codec)
	if err := log.WritePoints([]tsdb.Point{p1, p2}, nil, nil); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	c := log.Cursor("cpu,host=A", tsdb.Forward)
	_, v := c.Next()
	if bytes.Compare(v, p1.Data()) != 0 {
		t.Fatal("p1 value wrong")
	}
	_, v = c.Next()
	if bytes.Compare(v, p2.Data()) != 0 {
		t.Fatal("p2 value wrong")
	}
	_, v = c.Next()
	if v != nil {
		t.Fatal("expected cursor to return nil")
	}

	// now write junk data and ensure that we can close, re-open and read
	f := log.partition.currentSegmentFile
	f.Write([]byte{0x23, 0x12})
	f.Sync()
	log.Close()

	points := make([]map[string][][]byte, 0)
	log.Index = &testIndexWriter{fn: func(pointsByKey map[string][][]byte, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
		points = append(points, pointsByKey)
		return nil
	}}

	log.Open()

	p := points[0]
	if len(p["cpu,host=A"]) != 2 {
		t.Fatal("expected two points for cpu,host=A")
	}

	// now write new data and ensure it's all good
	p3 := parsePoint("cpu,host=A value=29.2 6", codec)
	if err := log.WritePoints([]tsdb.Point{p3}, nil, nil); err != nil {
		t.Fatalf("failed to write point: %s", err.Error())
	}

	c = log.Cursor("cpu,host=A", tsdb.Forward)
	_, v = c.Next()
	if bytes.Compare(v, p3.Data()) != 0 {
		t.Fatal("p3 value wrong")
	}

	log.Close()

	points = make([]map[string][][]byte, 0)
	log.Open()
	p = points[0]
	if len(p["cpu,host=A"]) != 1 {
		t.Fatal("expected two points for cpu,host=A")
	}
}
Example #11
0
// Ensure a partial compaction can be recovered from.
func TestWAL_Compact_Recovery(t *testing.T) {
	log := openTestWAL()
	log.partitionCount = 1
	log.CompactionThreshold = 0.7
	log.ReadySeriesSize = 1024
	log.flushCheckInterval = time.Minute
	defer log.Close()
	defer os.RemoveAll(log.path)

	points := make([]map[string][][]byte, 0)
	log.Index = &testIndexWriter{fn: func(pointsByKey map[string][][]byte, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
		points = append(points, pointsByKey)
		return nil
	}}

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	// Retrieve partition.
	p := log.partitions[1]

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	b := make([]byte, 70*5000)
	for i := 1; i <= 100; i++ {
		buf := bytes.NewBuffer(b)
		for j := 1; j <= 1000; j++ {
			buf.WriteString(fmt.Sprintf("cpu,host=A,region=uswest%d value=%.3f %d\n", j, rand.Float64(), i))
		}
		buf.WriteString(fmt.Sprintf("cpu,host=A,region=uswest%d value=%.3f %d\n", rand.Int(), rand.Float64(), i))

		// Write the batch out.
		if err := log.WritePoints(parsePoints(buf.String(), codec), nil, nil); err != nil {
			t.Fatalf("failed to write points: %s", err.Error())
		}
	}

	// Mock second open call to fail.
	p.os.OpenSegmentFile = func(name string, flag int, perm os.FileMode) (file *os.File, err error) {
		if filepath.Base(name) == "01.000001.wal" {
			return os.OpenFile(name, flag, perm)
		}
		return nil, errors.New("marker")
	}
	if err := p.flushAndCompact(thresholdFlush); err == nil || err.Error() != "marker" {
		t.Fatalf("unexpected flush error: %s", err)
	}
	p.os.OpenSegmentFile = os.OpenFile

	// Append second file to simulate partial write.
	func() {
		f, err := os.OpenFile(p.compactionFileName(), os.O_RDWR|os.O_APPEND, 0666)
		if err != nil {
			t.Fatal(err)
		}
		defer f.Close()

		// Append filename and partial data.
		if err := p.writeCompactionEntry(f, "01.000002.wal", []*entry{{key: []byte("foo"), data: []byte("bar"), timestamp: 100}}); err != nil {
			t.Fatal(err)
		}

		// Truncate by a few bytes.
		if fi, err := f.Stat(); err != nil {
			t.Fatal(err)
		} else if err = f.Truncate(fi.Size() - 2); err != nil {
			t.Fatal(err)
		}
	}()

	// Now close and re-open the wal and ensure there are no errors.
	log.Close()
	if err := log.Open(); err != nil {
		t.Fatalf("unexpected open error: %s", err)
	}
}
Example #12
0
func TestWAL_DeleteSeries(t *testing.T) {
	log := openTestWAL()
	defer log.Close()
	defer os.RemoveAll(log.path)

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	var seriesToIndex []*tsdb.SeriesCreate
	log.Index = &testIndexWriter{fn: func(pointsByKey map[string][][]byte, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
		seriesToIndex = append(seriesToIndex, seriesToCreate...)
		return nil
	}}

	seriesToCreate := []*tsdb.SeriesCreate{
		{Series: tsdb.NewSeries(string(tsdb.MakeKey([]byte("cpu"), map[string]string{"host": "A"})), map[string]string{"host": "A"})},
		{Series: tsdb.NewSeries(string(tsdb.MakeKey([]byte("cpu"), map[string]string{"host": "B"})), map[string]string{"host": "B"})},
	}

	// test that we can write to two different series
	p1 := parsePoint("cpu,host=A value=23.2 1", codec)
	p2 := parsePoint("cpu,host=B value=0.9 2", codec)
	p3 := parsePoint("cpu,host=A value=25.3 4", codec)
	p4 := parsePoint("cpu,host=B value=1.0 3", codec)
	if err := log.WritePoints([]tsdb.Point{p1, p2, p3, p4}, nil, seriesToCreate); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	// ensure data is there
	c := log.Cursor("cpu,host=A")
	if k, _ := c.Next(); btou64(k) != 1 {
		t.Fatal("expected data point for cpu,host=A")
	}

	c = log.Cursor("cpu,host=B")
	if k, _ := c.Next(); btou64(k) != 2 {
		t.Fatal("expected data point for cpu,host=B")
	}

	// delete the series and ensure metadata was flushed and data is gone
	if err := log.DeleteSeries([]string{"cpu,host=B"}); err != nil {
		t.Fatalf("error deleting series: %s", err.Error())
	}

	// ensure data is there
	c = log.Cursor("cpu,host=A")
	if k, _ := c.Next(); btou64(k) != 1 {
		t.Fatal("expected data point for cpu,host=A")
	}

	// ensure series is deleted
	c = log.Cursor("cpu,host=B")
	if k, _ := c.Next(); k != nil {
		t.Fatal("expected no data for cpu,host=B")
	}

	// ensure that they were actually flushed to the index. do it this way because the annoying deepequal doessn't really work for these
	for i, s := range seriesToCreate {
		if seriesToIndex[i].Measurement != s.Measurement {
			t.Fatal("expected measurement to be the same")
		}
		if seriesToIndex[i].Series.Key != s.Series.Key {
			t.Fatal("expected series key to be the same")
		}
		if !reflect.DeepEqual(seriesToIndex[i].Series.Tags, s.Series.Tags) {
			t.Fatal("expected series tags to be the same")
		}
	}

	// close and re-open the WAL to ensure that the data didn't show back up
	if err := log.Close(); err != nil {
		t.Fatalf("error closing log: %s", err.Error())
	}

	if err := log.Open(); err != nil {
		t.Fatalf("error opening log: %s", err.Error())
	}

	// ensure data is there
	c = log.Cursor("cpu,host=A")
	if k, _ := c.Next(); btou64(k) != 1 {
		t.Fatal("expected data point for cpu,host=A")
	}

	// ensure series is deleted
	c = log.Cursor("cpu,host=B")
	if k, _ := c.Next(); k != nil {
		t.Fatal("expected no data for cpu,host=B")
	}
}
Example #13
0
func TestWAL_SeriesAndFieldsGetPersisted(t *testing.T) {
	log := openTestWAL()
	defer log.Close()
	defer os.RemoveAll(log.path)

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	var measurementsToIndex map[string]*tsdb.MeasurementFields
	var seriesToIndex []*tsdb.SeriesCreate
	log.Index = &testIndexWriter{fn: func(pointsByKey map[string][][]byte, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
		measurementsToIndex = measurementFieldsToSave
		seriesToIndex = append(seriesToIndex, seriesToCreate...)
		return nil
	}}

	// test that we can write to two different series
	p1 := parsePoint("cpu,host=A value=23.2 1", codec)
	p2 := parsePoint("cpu,host=A value=25.3 4", codec)
	p3 := parsePoint("cpu,host=B value=1.0 1", codec)

	seriesToCreate := []*tsdb.SeriesCreate{
		{Series: tsdb.NewSeries(string(tsdb.MakeKey([]byte("cpu"), map[string]string{"host": "A"})), map[string]string{"host": "A"})},
		{Series: tsdb.NewSeries(string(tsdb.MakeKey([]byte("cpu"), map[string]string{"host": "B"})), map[string]string{"host": "B"})},
	}

	measaurementsToCreate := map[string]*tsdb.MeasurementFields{
		"cpu": {
			Fields: map[string]*tsdb.Field{
				"value": {ID: 1, Name: "value"},
			},
		},
	}

	if err := log.WritePoints([]tsdb.Point{p1, p2, p3}, measaurementsToCreate, seriesToCreate); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	// now close it and see if loading the metadata index will populate the measurement and series info
	log.Close()

	idx := tsdb.NewDatabaseIndex()
	mf := make(map[string]*tsdb.MeasurementFields)

	if err := log.LoadMetadataIndex(idx, mf); err != nil {
		t.Fatalf("error loading metadata index: %s", err.Error())
	}

	s := idx.Series("cpu,host=A")
	if s == nil {
		t.Fatal("expected to find series cpu,host=A in index")
	}

	s = idx.Series("cpu,host=B")
	if s == nil {
		t.Fatal("expected to find series cpu,host=B in index")
	}

	m := mf["cpu"]
	if m == nil {
		t.Fatal("expected to find measurement fields for cpu", mf)
	}
	if m.Fields["value"] == nil {
		t.Fatal("expected to find field definition for 'value'")
	}

	// ensure that they were actually flushed to the index. do it this way because the annoying deepequal doessn't really work for these
	for i, s := range seriesToCreate {
		if seriesToIndex[i].Measurement != s.Measurement {
			t.Fatal("expected measurement to be the same")
		}
		if seriesToIndex[i].Series.Key != s.Series.Key {
			t.Fatal("expected series key to be the same")
		}
		if !reflect.DeepEqual(seriesToIndex[i].Series.Tags, s.Series.Tags) {
			t.Fatal("expected series tags to be the same")
		}
	}

	// ensure that the measurement fields were flushed to the index
	for k, v := range measaurementsToCreate {
		m := measurementsToIndex[k]
		if m == nil {
			t.Fatalf("measurement %s wasn't indexed", k)
		}

		if !reflect.DeepEqual(m.Fields, v.Fields) {
			t.Fatal("measurement fields not equal")
		}
	}

	// now open and close the log and try to reload the metadata index, which should now be empty
	if err := log.Open(); err != nil {
		t.Fatalf("error opening log: %s", err.Error())
	}
	if err := log.Close(); err != nil {
		t.Fatalf("error closing log: %s", err.Error())
	}

	idx = tsdb.NewDatabaseIndex()
	mf = make(map[string]*tsdb.MeasurementFields)

	if err := log.LoadMetadataIndex(idx, mf); err != nil {
		t.Fatalf("error loading metadata index: %s", err.Error())
	}

	if len(idx.Measurements()) != 0 || len(mf) != 0 {
		t.Fatal("expected index and measurement fields to be empty")
	}
}
Example #14
0
// Ensure the wal forces a full flush after not having a write in a given interval of time
func TestWAL_CompactAfterTimeWithoutWrite(t *testing.T) {
	log := openTestWAL()
	log.partitionCount = 1

	// set this low
	log.flushCheckInterval = 10 * time.Millisecond
	log.FlushColdInterval = 500 * time.Millisecond

	defer log.Close()
	defer os.RemoveAll(log.path)

	points := make([]map[string][][]byte, 0)
	log.Index = &testIndexWriter{fn: func(pointsByKey map[string][][]byte, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
		points = append(points, pointsByKey)
		return nil
	}}

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	numSeries := 100
	b := make([]byte, 70*5000)
	for i := 1; i <= 10; i++ {
		buf := bytes.NewBuffer(b)
		for j := 1; j <= numSeries; j++ {
			buf.WriteString(fmt.Sprintf("cpu,host=A,region=uswest%d value=%.3f %d\n", j, rand.Float64(), i))
		}

		// write the batch out
		if err := log.WritePoints(parsePoints(buf.String(), codec), nil, nil); err != nil {
			t.Fatalf("failed to write points: %s", err.Error())
		}
		buf = bytes.NewBuffer(b)
	}

	// ensure we have some data
	c := log.Cursor("cpu,host=A,region=uswest10")
	k, _ := c.Next()
	if btou64(k) != 1 {
		t.Fatalf("expected first data point but got one with key: %v", k)
	}

	time.Sleep(700 * time.Millisecond)

	// ensure that as a whole its not ready for flushing yet
	if f := log.partitions[1].shouldFlush(tsdb.DefaultMaxSeriesSize, tsdb.DefaultCompactionThreshold); f != noFlush {
		t.Fatalf("expected partition 1 to return noFlush from shouldFlush %v", f)
	}

	// ensure that the partition is empty
	if log.partitions[1].memorySize != 0 || len(log.partitions[1].cache) != 0 {
		t.Fatal("expected partition to be empty")
	}
	// ensure that we didn't bother to open a new segment file
	if log.partitions[1].currentSegmentFile != nil {
		t.Fatal("expected partition to not have an open segment file")
	}
}
Example #15
0
// Ensure the wal flushes and compacts after a partition has enough series in
// it with enough data to flush
func TestWAL_CompactAfterPercentageThreshold(t *testing.T) {
	log := openTestWAL()
	log.partitionCount = 2
	log.CompactionThreshold = 0.7
	log.ReadySeriesSize = 1024

	// set this high so that a flush doesn't automatically kick in and mess up our test
	log.flushCheckInterval = time.Minute

	defer log.Close()
	defer os.RemoveAll(log.path)

	points := make([]map[string][][]byte, 0)
	log.Index = &testIndexWriter{fn: func(pointsByKey map[string][][]byte, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
		points = append(points, pointsByKey)
		return nil
	}}

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	numSeries := 100
	b := make([]byte, 70*5000)
	for i := 1; i <= 100; i++ {
		buf := bytes.NewBuffer(b)
		for j := 1; j <= numSeries; j++ {
			buf.WriteString(fmt.Sprintf("cpu,host=A,region=uswest%d value=%.3f %d\n", j, rand.Float64(), i))
		}

		// ensure that before we go over the threshold it isn't marked for flushing
		if i < 50 {
			// interleave data for some series that won't be ready to flush
			buf.WriteString(fmt.Sprintf("cpu,host=A,region=useast1 value=%.3f %d\n", rand.Float64(), i))
			buf.WriteString(fmt.Sprintf("cpu,host=A,region=useast3 value=%.3f %d\n", rand.Float64(), i))

			// ensure that as a whole its not ready for flushing yet
			if log.partitions[1].shouldFlush(tsdb.DefaultMaxSeriesSize, tsdb.DefaultCompactionThreshold) != noFlush {
				t.Fatal("expected partition 1 to return false from shouldFlush")
			}
		}

		// write the batch out
		if err := log.WritePoints(parsePoints(buf.String(), codec), nil, nil); err != nil {
			t.Fatalf("failed to write points: %s", err.Error())
		}
		buf = bytes.NewBuffer(b)
	}

	// ensure we have some data
	c := log.Cursor("cpu,host=A,region=uswest23")
	k, v := c.Next()
	if btou64(k) != 1 {
		t.Fatalf("expected timestamp of 1, but got %v %v", k, v)
	}

	// ensure it is marked as should flush because of the threshold
	if log.partitions[1].shouldFlush(tsdb.DefaultMaxSeriesSize, tsdb.DefaultCompactionThreshold) != thresholdFlush {
		t.Fatal("expected partition 1 to return true from shouldFlush")
	}

	if err := log.partitions[1].flushAndCompact(thresholdFlush); err != nil {
		t.Fatalf("error flushing and compacting: %s", err.Error())
	}

	// should be nil
	c = log.Cursor("cpu,host=A,region=uswest23")
	k, v = c.Next()
	if k != nil || v != nil {
		t.Fatal("expected cache to be nil after flush: ", k, v)
	}

	c = log.Cursor("cpu,host=A,region=useast1")
	k, v = c.Next()
	if btou64(k) != 1 {
		t.Fatal("expected cache to be there after flush and compact: ", k, v)
	}

	if len(points) == 0 {
		t.Fatal("expected points to be flushed to index")
	}

	// now close and re-open the wal and ensure the compacted data is gone and other data is still there
	log.Close()
	log.Open()

	c = log.Cursor("cpu,host=A,region=uswest23")
	k, v = c.Next()
	if k != nil || v != nil {
		t.Fatal("expected cache to be nil after flush and re-open: ", k, v)
	}

	c = log.Cursor("cpu,host=A,region=useast1")
	k, v = c.Next()
	if btou64(k) != 1 {
		t.Fatal("expected cache to be there after flush and compact: ", k, v)
	}
}
Example #16
0
func TestWAL_WritePoints(t *testing.T) {
	log := openTestWAL()
	defer log.Close()
	defer os.RemoveAll(log.path)

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	// test that we can write to two different series
	p1 := parsePoint("cpu,host=A value=23.2 1", codec)
	p2 := parsePoint("cpu,host=A value=25.3 4", codec)
	p3 := parsePoint("cpu,host=B value=1.0 1", codec)
	if err := log.WritePoints([]tsdb.Point{p1, p2, p3}, nil, nil); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	verify := func() {
		c := log.Cursor("cpu,host=A")
		k, v := c.Seek(inttob(1))

		// ensure the series are there and points are in order
		if bytes.Compare(v, p1.Data()) != 0 {
			t.Fatalf("expected to seek to first point but got key and value: %v %v", k, v)
		}

		k, v = c.Next()
		if bytes.Compare(v, p2.Data()) != 0 {
			t.Fatalf("expected to seek to first point but got key and value: %v %v", k, v)
		}

		k, v = c.Next()
		if k != nil {
			t.Fatalf("expected nil on last seek: %v %v", k, v)
		}

		c = log.Cursor("cpu,host=B")
		k, v = c.Next()
		if bytes.Compare(v, p3.Data()) != 0 {
			t.Fatalf("expected to seek to first point but got key and value: %v %v", k, v)
		}
	}

	verify()

	// ensure that we can close and re-open the log with points still there
	log.Close()
	log.Open()

	verify()

	// ensure we can write new points into the series
	p4 := parsePoint("cpu,host=A value=1.0 7", codec)
	// ensure we can write an all new series
	p5 := parsePoint("cpu,host=C value=1.4 2", codec)
	// ensure we can write a point out of order and get it back
	p6 := parsePoint("cpu,host=A value=1.3 2", codec)
	// // ensure we can write to a new partition
	// p7 := parsePoint("cpu,region=west value=2.2", codec)
	if err := log.WritePoints([]tsdb.Point{p4, p5, p6}, nil, nil); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	verify2 := func() {
		c := log.Cursor("cpu,host=A")
		k, v := c.Next()
		if bytes.Compare(v, p1.Data()) != 0 {
			t.Fatalf("order wrong, expected p1, %v %v %v", v, k, p1.Data())
		}
		_, v = c.Next()
		if bytes.Compare(v, p6.Data()) != 0 {
			t.Fatal("order wrong, expected p6")
		}
		_, v = c.Next()
		if bytes.Compare(v, p2.Data()) != 0 {
			t.Fatal("order wrong, expected p6")
		}
		_, v = c.Next()
		if bytes.Compare(v, p4.Data()) != 0 {
			t.Fatal("order wrong, expected p6")
		}

		c = log.Cursor("cpu,host=C")
		_, v = c.Next()
		if bytes.Compare(v, p5.Data()) != 0 {
			t.Fatal("order wrong, expected p6")
		}
	}

	verify2()

	log.Close()
	log.Open()

	verify2()
}
Example #17
0
func TestWAL_CorruptDataBlock(t *testing.T) {
	log := openTestWAL()
	defer log.Close()
	defer os.RemoveAll(log.path)

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	// test that we can write to two different series
	p1 := parsePoint("cpu,host=A value=23.2 1", codec)
	p2 := parsePoint("cpu,host=A value=25.3 4", codec)
	if err := log.WritePoints([]tsdb.Point{p1, p2}, nil, nil); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	verify := func() {
		c := log.Cursor("cpu,host=A")
		_, v := c.Next()
		if bytes.Compare(v, p1.Data()) != 0 {
			t.Fatal("p1 value wrong")
		}
		_, v = c.Next()
		if bytes.Compare(v, p2.Data()) != 0 {
			t.Fatal("p2 value wrong")
		}
		_, v = c.Next()
		if v != nil {
			t.Fatal("expected cursor to return nil")
		}
	}

	verify()

	// now write junk data and ensure that we can close, re-open and read

	f := log.partitions[1].currentSegmentFile
	f.Write(u64tob(23))
	// now write a bunch of garbage
	for i := 0; i < 1000; i++ {
		f.Write([]byte{0x23, 0x78, 0x11, 0x33})
	}
	f.Sync()

	log.Close()
	log.Open()

	verify()

	// now write new data and ensure it's all good
	p3 := parsePoint("cpu,host=A value=29.2 6", codec)
	if err := log.WritePoints([]tsdb.Point{p3}, nil, nil); err != nil {
		t.Fatalf("failed to write point: %s", err.Error())
	}

	verify = func() {
		c := log.Cursor("cpu,host=A")
		_, v := c.Next()
		if bytes.Compare(v, p1.Data()) != 0 {
			t.Fatal("p1 value wrong")
		}
		_, v = c.Next()
		if bytes.Compare(v, p2.Data()) != 0 {
			t.Fatal("p2 value wrong")
		}
		_, v = c.Next()
		if bytes.Compare(v, p3.Data()) != 0 {
			t.Fatal("p3 value wrong", p3.Data(), v)
		}
	}

	verify()
	log.Close()
	log.Open()
	verify()
}
Example #18
0
func TestWAL_WritePoints(t *testing.T) {
	log := openTestWAL()
	defer log.Close()
	defer os.RemoveAll(log.path)

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	// Test that we can write to two different series
	p1 := parsePoint("cpu,host=A value=23.2 1", codec)
	p2 := parsePoint("cpu,host=A value=25.3 4", codec)
	p3 := parsePoint("cpu,host=B value=1.0 1", codec)
	if err := log.WritePoints([]models.Point{p1, p2, p3}, nil, nil); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	c := log.Cursor("cpu,host=A", []string{"value"}, codec, true)
	k, v := c.SeekTo(1)

	// ensure the series are there and points are in order
	if v.(float64) != 23.2 {
		t.Fatalf("expected to seek to first point but got key and value: %v %v", k, v)
	}

	k, v = c.Next()
	if v.(float64) != 25.3 {
		t.Fatalf("expected to seek to first point but got key and value: %v %v", k, v)
	}

	k, v = c.Next()
	if k != tsdb.EOF {
		t.Fatalf("expected nil on last seek: %v %v", k, v)
	}

	c = log.Cursor("cpu,host=B", []string{"value"}, codec, true)
	k, v = c.Next()
	if v.(float64) != 1.0 {
		t.Fatalf("expected to seek to first point but got key and value: %v %v", k, v)
	}

	// ensure that we can close and re-open the log with points getting to the index
	log.Close()

	points := make([]map[string][][]byte, 0)
	log.Index = &testIndexWriter{fn: func(pointsByKey map[string][][]byte, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
		points = append(points, pointsByKey)
		return nil
	}}

	if err := log.Open(); err != nil {
		t.Fatal("error opening log", err)
	}

	p := points[0]
	if len(p["cpu,host=A"]) != 2 {
		t.Fatal("expected two points for cpu,host=A flushed to index")
	}
	if len(p["cpu,host=B"]) != 1 {
		t.Fatal("expected one point for cpu,host=B flushed to index")
	}

	// ensure we can write new points into the series
	p4 := parsePoint("cpu,host=A value=1.0 7", codec)
	// ensure we can write an all new series
	p5 := parsePoint("cpu,host=C value=1.4 2", codec)
	// ensure we can write a point out of order and get it back
	p6 := parsePoint("cpu,host=A value=1.3 2", codec)
	// // ensure we can write to a new partition
	// p7 := parsePoint("cpu,region=west value=2.2", codec)
	if err := log.WritePoints([]models.Point{p4, p5, p6}, nil, nil); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	c = log.Cursor("cpu,host=A", []string{"value"}, codec, true)
	if _, v := c.Next(); v.(float64) != 1.3 {
		t.Fatal("order wrong, expected p6")
	}
	if _, v := c.Next(); v.(float64) != 1.0 {
		t.Fatal("order wrong, expected p6")
	}

	c = log.Cursor("cpu,host=C", []string{"value"}, codec, true)
	if _, v := c.Next(); v.(float64) != 1.4 {
		t.Fatal("order wrong, expected p6")
	}

	if err := log.Close(); err != nil {
		t.Fatal("error closing log", err)
	}

	points = make([]map[string][][]byte, 0)
	if err := log.Open(); err != nil {
		t.Fatal("error opening log", err)
	}

	p = points[0]
	if len(p["cpu,host=A"]) != 2 {
		t.Fatal("expected two points for cpu,host=A flushed to index")
	}
	if len(p["cpu,host=C"]) != 1 {
		t.Fatal("expected one point for cpu,host=B flushed to index")
	}
}
Example #19
0
// Ensure the engine can rewrite blocks that contain the new point range.
func TestEngine_Cursor_Reverse(t *testing.T) {
	e := OpenDefaultEngine()
	defer e.Close()

	// Create codec.
	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {ID: uint8(1), Name: "value", Type: influxql.Float},
	})

	// Write initial points to index.
	if err := e.WriteIndex(map[string][][]byte{
		"cpu": [][]byte{
			append(u64tob(10), MustEncodeFields(codec, models.Fields{"value": float64(10)})...),
			append(u64tob(20), MustEncodeFields(codec, models.Fields{"value": float64(20)})...),
			append(u64tob(30), MustEncodeFields(codec, models.Fields{"value": float64(30)})...),
		},
	}, nil, nil); err != nil {
		t.Fatal(err)
	}

	// Write overlapping points to index.
	if err := e.WriteIndex(map[string][][]byte{
		"cpu": [][]byte{
			append(u64tob(9), MustEncodeFields(codec, models.Fields{"value": float64(9)})...),
			append(u64tob(10), MustEncodeFields(codec, models.Fields{"value": float64(255)})...),
			append(u64tob(25), MustEncodeFields(codec, models.Fields{"value": float64(25)})...),
			append(u64tob(31), MustEncodeFields(codec, models.Fields{"value": float64(31)})...),
		},
	}, nil, nil); err != nil {
		t.Fatal(err)
	}

	// Write overlapping points to index again.
	if err := e.WriteIndex(map[string][][]byte{
		"cpu": [][]byte{
			append(u64tob(31), MustEncodeFields(codec, models.Fields{"value": float64(255)})...),
		},
	}, nil, nil); err != nil {
		t.Fatal(err)
	}

	// Start transaction.
	tx := e.MustBegin(false)
	defer tx.Rollback()

	// Iterate over "cpu" series.
	c := tx.Cursor("cpu", []string{"value"}, codec, false)
	if k, v := c.SeekTo(math.MaxInt64); k != 31 || v.(float64) != float64(255) {
		t.Fatalf("unexpected key/value: %x / %x", k, v)
	} else if k, v = c.Next(); k != 30 || v.(float64) != float64(30) {
		t.Fatalf("unexpected key/value: %x / %x", k, v)
	} else if k, v = c.Next(); k != 25 || v.(float64) != float64(25) {
		t.Fatalf("unexpected key/value: %x / %x", k, v)
	} else if k, v = c.Next(); k != 20 || v.(float64) != float64(20) {
		t.Fatalf("unexpected key/value: %x / %x", k, v)
	} else if k, v = c.Next(); k != 10 || v.(float64) != float64(255) {
		t.Fatalf("unexpected key/value: %x / %x", k, v)
	} else if k, v = c.SeekTo(0); k != 9 || v.(float64) != float64(9) {
		t.Fatalf("unexpected key/value: %x / %x", k, v)
	}
}
Example #20
0
func TestWAL_CorruptDataBlock(t *testing.T) {
	log := openTestWAL()
	defer log.Close()
	defer os.RemoveAll(log.path)

	if err := log.Open(); err != nil {
		t.Fatalf("couldn't open wal: %s", err.Error())
	}

	codec := tsdb.NewFieldCodec(map[string]*tsdb.Field{
		"value": {
			ID:   uint8(1),
			Name: "value",
			Type: influxql.Float,
		},
	})

	// test that we can write to two different series
	p1 := parsePoint("cpu,host=A value=23.2 1", codec)
	p2 := parsePoint("cpu,host=A value=25.3 4", codec)
	if err := log.WritePoints([]models.Point{p1, p2}, nil, nil); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	c := log.Cursor("cpu,host=A", []string{"value"}, codec, true)
	if _, v := c.Next(); v.(float64) != 23.2 {
		t.Fatal("p1 value wrong")
	}
	if _, v := c.Next(); v.(float64) != 25.3 {
		t.Fatal("p2 value wrong")
	}
	if _, v := c.Next(); v != nil {
		t.Fatal("expected cursor to return nil")
	}

	// now write junk data and ensure that we can close, re-open and read

	f := log.partition.currentSegmentFile
	f.Write(u64tob(23))
	// now write a bunch of garbage
	for i := 0; i < 1000; i++ {
		f.Write([]byte{0x23, 0x78, 0x11, 0x33})
	}
	f.Sync()

	log.Close()

	points := make([]map[string][][]byte, 0)
	log.Index = &testIndexWriter{fn: func(pointsByKey map[string][][]byte, measurementFieldsToSave map[string]*tsdb.MeasurementFields, seriesToCreate []*tsdb.SeriesCreate) error {
		points = append(points, pointsByKey)
		return nil
	}}

	log.Open()
	if p := points[0]; len(p["cpu,host=A"]) != 2 {
		t.Fatal("expected two points for cpu,host=A")
	}

	// now write new data and ensure it's all good
	p3 := parsePoint("cpu,host=A value=29.2 6", codec)
	if err := log.WritePoints([]models.Point{p3}, nil, nil); err != nil {
		t.Fatalf("failed to write point: %s", err.Error())
	}

	c = log.Cursor("cpu,host=A", []string{"value"}, codec, true)
	if _, v := c.Next(); v.(float64) != 29.2 {
		t.Fatal("p3 value wrong", p3.Data(), v)
	}

	log.Close()

	points = make([]map[string][][]byte, 0)
	log.Open()
	if p := points[0]; len(p["cpu,host=A"]) != 1 {
		t.Fatal("expected two points for cpu,host=A")
	}
}