// Ensure a cursor with a single ref value can be converted into an iterator. func TestFloatCursorIterator_SingleValue(t *testing.T) { cur := NewCursor([]CursorItem{ {Key: 0, Value: float64(100)}, {Key: 3, Value: float64(200)}, }, true) opt := influxql.IteratorOptions{ Expr: &influxql.VarRef{Val: "value"}, Ascending: true, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, } itr := tsdb.NewFloatCursorIterator("series0", map[string]string{"host": "serverA"}, cur, opt) defer itr.Close() if p := itr.Next(); !deep.Equal(p, &influxql.FloatPoint{ Name: "series0", Time: 0, Value: float64(100), }) { t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) } if p := itr.Next(); !deep.Equal(p, &influxql.FloatPoint{ Name: "series0", Time: 3, Value: float64(200), }) { t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) } if p := itr.Next(); p != nil { t.Fatalf("expected eof, got: %s", spew.Sdump(p)) } }
// Ensure shards can create iterators. func TestShards_CreateIterator(t *testing.T) { s := MustOpenStore() defer s.Close() // Create shard #0 with data. s.MustCreateShardWithData("db0", "rp0", 0, `cpu,host=serverA value=1 0`, `cpu,host=serverA value=2 10`, `cpu,host=serverB value=3 20`, ) // Create shard #1 with data. s.MustCreateShardWithData("db0", "rp0", 1, `cpu,host=serverA value=1 30`, `mem,host=serverA value=2 40`, // skip: wrong source `cpu,host=serverC value=3 60`, ) // Create iterator. itr, err := tsdb.Shards(s.Shards([]uint64{0, 1})).CreateIterator(influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Dimensions: []string{"host"}, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, Ascending: true, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, }) if err != nil { t.Fatal(err) } defer itr.Close() fitr := itr.(influxql.FloatIterator) // Read values from iterator. The host=serverA points should come first. if p := fitr.Next(); !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverA"), Time: time.Unix(0, 0).UnixNano(), Value: 1}) { t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) } else if p = fitr.Next(); !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverA"), Time: time.Unix(10, 0).UnixNano(), Value: 2}) { t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) } else if p = fitr.Next(); !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverA"), Time: time.Unix(30, 0).UnixNano(), Value: 1}) { t.Fatalf("unexpected point(2): %s", spew.Sdump(p)) } // Next the host=serverB point. if p := fitr.Next(); !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverB"), Time: time.Unix(20, 0).UnixNano(), Value: 3}) { t.Fatalf("unexpected point(3): %s", spew.Sdump(p)) } // And finally the host=serverC point. if p := fitr.Next(); !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverC"), Time: time.Unix(60, 0).UnixNano(), Value: 3}) { t.Fatalf("unexpected point(4): %s", spew.Sdump(p)) } // Then an EOF should occur. if p := fitr.Next(); p != nil { t.Fatalf("expected eof, got: %s", spew.Sdump(p)) } }
// Ensure a shard can create iterators for its underlying data. func TestShard_CreateIterator(t *testing.T) { sh := MustOpenShard() defer sh.Close() sh.MustWritePointsString(` cpu,host=serverA,region=uswest value=100 0 cpu,host=serverA,region=uswest value=50,val2=5 10 cpu,host=serverB,region=uswest value=25 0 `) // Create iterator. itr, err := sh.CreateIterator(influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Aux: []string{"val2"}, Dimensions: []string{"host"}, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, Ascending: true, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, }) if err != nil { t.Fatal(err) } defer itr.Close() fitr := itr.(influxql.FloatIterator) // Read values from iterator. if p := fitr.Next(); !deep.Equal(p, &influxql.FloatPoint{ Name: "cpu", Tags: influxql.NewTags(map[string]string{"host": "serverA"}), Time: time.Unix(0, 0).UnixNano(), Value: 100, Aux: []interface{}{(*float64)(nil)}, }) { t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) } if p := fitr.Next(); !deep.Equal(p, &influxql.FloatPoint{ Name: "cpu", Tags: influxql.NewTags(map[string]string{"host": "serverA"}), Time: time.Unix(10, 0).UnixNano(), Value: 50, Aux: []interface{}{float64(5)}, }) { t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) } if p := fitr.Next(); !deep.Equal(p, &influxql.FloatPoint{ Name: "cpu", Tags: influxql.NewTags(map[string]string{"host": "serverB"}), Time: time.Unix(0, 0).UnixNano(), Value: 25, Aux: []interface{}{math.NaN()}, }) { t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) } }
// Ensure engine can create an iterator with auxilary fields. func TestEngine_CreateIterator_Aux(t *testing.T) { t.Parallel() e := MustOpenEngine() defer e.Close() e.Index().CreateMeasurementIndexIfNotExists("cpu") e.MeasurementFields("cpu").CreateFieldIfNotExists("value", influxql.Float, false) e.MeasurementFields("cpu").CreateFieldIfNotExists("F", influxql.Float, false) si := e.Index().CreateSeriesIndexIfNotExists("cpu", tsdb.NewSeries("cpu,host=A", models.NewTags(map[string]string{"host": "A"}))) si.AssignShard(1) if err := e.WritePointsString( `cpu,host=A value=1.1 1000000000`, `cpu,host=A F=100 1000000000`, `cpu,host=A value=1.2 2000000000`, `cpu,host=A value=1.3 3000000000`, `cpu,host=A F=200 3000000000`, ); err != nil { t.Fatalf("failed to write points: %s", err.Error()) } itr, err := e.CreateIterator(influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Aux: []influxql.VarRef{{Val: "F"}}, Dimensions: []string{"host"}, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, Ascending: true, }) if err != nil { t.Fatal(err) } fitr := itr.(influxql.FloatIterator) if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(0): %v", err) } else if !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1, Aux: []interface{}{float64(100)}}) { t.Fatalf("unexpected point(0): %v", p) } if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(1): %v", err) } else if !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2, Aux: []interface{}{(*float64)(nil)}}) { t.Fatalf("unexpected point(1): %v", p) } if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(2): %v", err) } else if !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3, Aux: []interface{}{float64(200)}}) { t.Fatalf("unexpected point(2): %v", p) } if p, err := fitr.Next(); err != nil { t.Fatalf("expected eof, got error: %v", err) } else if p != nil { t.Fatalf("expected eof: %v", p) } }
// Ensure the emitter can group iterators together into rows. func TestEmitter_Emit(t *testing.T) { // Build an emitter that pulls from two iterators. e := influxql.NewEmitter([]influxql.Iterator{ &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west"), Time: 0, Value: 1}, {Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 2}, }}, &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 4}, {Name: "cpu", Tags: ParseTags("region=north"), Time: 0, Value: 4}, {Name: "mem", Time: 4, Value: 5}, }}, }, true) e.Columns = []string{"col1", "col2"} // Verify the cpu region=west is emitted first. if row := e.Emit(); !deep.Equal(row, &models.Row{ Name: "cpu", Tags: map[string]string{"region": "west"}, Columns: []string{"col1", "col2"}, Values: [][]interface{}{ {time.Unix(0, 0).UTC(), float64(1), nil}, {time.Unix(0, 1).UTC(), float64(2), float64(4)}, }, }) { t.Fatalf("unexpected row(0): %s", spew.Sdump(row)) } // Verify the cpu region=north is emitted next. if row := e.Emit(); !deep.Equal(row, &models.Row{ Name: "cpu", Tags: map[string]string{"region": "north"}, Columns: []string{"col1", "col2"}, Values: [][]interface{}{ {time.Unix(0, 0).UTC(), nil, float64(4)}, }, }) { t.Fatalf("unexpected row(1): %s", spew.Sdump(row)) } // Verify the mem series is emitted last. if row := e.Emit(); !deep.Equal(row, &models.Row{ Name: "mem", Columns: []string{"col1", "col2"}, Values: [][]interface{}{ {time.Unix(0, 4).UTC(), nil, float64(5)}, }, }) { t.Fatalf("unexpected row(2): %s", spew.Sdump(row)) } // Verify EOF. if row := e.Emit(); row != nil { t.Fatalf("unexpected eof: %s", spew.Sdump(row)) } }
// Ensure limit iterator returns a subset of points. func TestLimitIterator(t *testing.T) { itr := influxql.NewLimitIterator( &FloatIterator{Points: []influxql.FloatPoint{ {Time: 0, Value: 0}, {Time: 1, Value: 1}, {Time: 2, Value: 2}, {Time: 3, Value: 3}, }}, influxql.IteratorOptions{ Limit: 2, Offset: 1, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, }, ) if a, err := (Iterators{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) } else if !deep.Equal(a, [][]influxql.Point{ {&influxql.FloatPoint{Time: 1, Value: 1}}, {&influxql.FloatPoint{Time: 2, Value: 2}}, }) { t.Fatalf("unexpected points: %s", spew.Sdump(a)) } }
// Ensure limit iterators work with limit and offset. func TestLimitIterator_Boolean(t *testing.T) { input := &BooleanIterator{Points: []influxql.BooleanPoint{ {Name: "cpu", Time: 0, Value: true}, {Name: "cpu", Time: 5, Value: false}, {Name: "cpu", Time: 10, Value: true}, {Name: "mem", Time: 5, Value: false}, {Name: "mem", Time: 7, Value: true}, }} itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ Limit: 1, Offset: 1, }) if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ {&influxql.BooleanPoint{Name: "cpu", Time: 5, Value: false}}, {&influxql.BooleanPoint{Name: "mem", Time: 7, Value: true}}, }) { t.Fatalf("unexpected points: %s", spew.Sdump(a)) } if !input.Closed { t.Error("iterator not closed") } }
// Ensure that a float iterator can be created for a last() call. func TestCallIterator_Last_Float(t *testing.T) { itr, _ := influxql.NewCallIterator( &FloatIterator{Points: []influxql.FloatPoint{ {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, }}, influxql.IteratorOptions{ Expr: MustParseExpr(`last("value")`), Dimensions: []string{"host"}, Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, }, ) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) } else if !deep.Equal(a, [][]influxql.Point{ {&influxql.FloatPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, {&influxql.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, {&influxql.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, {&influxql.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, }) { t.Fatalf("unexpected points: %s", spew.Sdump(a)) } }
// Ensure that a integer iterator can be created for a min() call. func TestCallIterator_Min_Integer(t *testing.T) { itr, _ := influxql.NewCallIterator( &IntegerIterator{Points: []influxql.IntegerPoint{ {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, }}, influxql.IteratorOptions{ Expr: MustParseExpr(`min("value")`), Dimensions: []string{"host"}, Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, }, ) if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ {&influxql.IntegerPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, {&influxql.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, {&influxql.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, {&influxql.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, }) { t.Fatalf("unexpected points: %s", spew.Sdump(a)) } }
// TestSample_AllSamplesSeen attempts to verify that it is possible // to get every subsample in a reasonable number of iterations. // // The idea here is that 30 iterations should be enough to hit every possible // sequence at least once. func TestSample_AllSamplesSeen(t *testing.T) { ps := []influxql.FloatPoint{ {Time: 1, Value: 1}, {Time: 2, Value: 2}, {Time: 3, Value: 3}, } // List of all the possible subsamples samples := [][]influxql.FloatPoint{ { {Time: 1, Value: 1}, {Time: 2, Value: 2}, }, { {Time: 1, Value: 1}, {Time: 3, Value: 3}, }, { {Time: 2, Value: 2}, {Time: 3, Value: 3}, }, } // 30 iterations should be sufficient to guarantee that // we hit every possible subsample. for i := 0; i < 30; i++ { s := influxql.NewFloatSampleReducer(2) for _, p := range ps { s.AggregateFloat(&p) } points := s.Emit() for i, sample := range samples { // if we find a sample that it matches, remove it from // this list of possible samples if deep.Equal(sample, points) { samples = append(samples[:i], samples[i+1:]...) break } } // if samples is empty we've seen every sample, so we're done if len(samples) == 0 { return } // The FloatSampleReducer is seeded with time.Now().UnixNano(), and without this sleep, // this test will fail on machines where UnixNano doesn't return full resolution. // Specifically, some Windows machines will only return timestamps accurate to 100ns. // While iterating through this test without an explicit sleep, // we would only see one or two unique seeds across all the calls to NewFloatSampleReducer. time.Sleep(time.Millisecond) } // If we missed a sample, report the error if len(samples) != 0 { t.Fatalf("expected all samples to be seen; unseen samples: %#v", samples) } }
// Ensure that a boolean iterator can be created for a last() call. func TestCallIterator_Last_Boolean(t *testing.T) { itr, _ := influxql.NewCallIterator( &BooleanIterator{Points: []influxql.BooleanPoint{ {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")}, {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, }}, influxql.IteratorOptions{ Expr: MustParseExpr(`last("value")`), Dimensions: []string{"host"}, Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, }, ) if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ {&influxql.BooleanPoint{Time: 2, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 3}}, {&influxql.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, {&influxql.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, {&influxql.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, }) { t.Fatalf("unexpected points: %s", spew.Sdump(a)) } }
// Ensure that a boolean iterator can be created for a modBooleanl. func TestCallIterator_Mode_Boolean(t *testing.T) { itr, _ := influxql.NewModeIterator(&BooleanIterator{Points: []influxql.BooleanPoint{ {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, {Time: 2, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 3, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 4, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 7, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 8, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, {Time: 22, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, {Time: 24, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, }}, influxql.IteratorOptions{ Expr: MustParseExpr(`mode("value")`), Dimensions: []string{"host"}, Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, }, ) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) } else if !deep.Equal(a, [][]influxql.Point{ {&influxql.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA")}}, {&influxql.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB")}}, {&influxql.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA")}}, {&influxql.BooleanPoint{Time: 20, Value: true, Tags: ParseTags("host=hostB")}}, }) { t.Fatalf("unexpected points: %s", spew.Sdump(a)) } }
// Ensure limit iterators work with limit and offset. func TestLimitIterator_String(t *testing.T) { input := &StringIterator{Points: []influxql.StringPoint{ {Name: "cpu", Time: 0, Value: "a"}, {Name: "cpu", Time: 5, Value: "b"}, {Name: "cpu", Time: 10, Value: "c"}, {Name: "mem", Time: 5, Value: "d"}, {Name: "mem", Time: 7, Value: "e"}, }} itr := influxql.NewLimitIterator(input, influxql.IteratorOptions{ Limit: 1, Offset: 1, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) } else if !deep.Equal(a, [][]influxql.Point{ {&influxql.StringPoint{Name: "cpu", Time: 5, Value: "b"}}, {&influxql.StringPoint{Name: "mem", Time: 7, Value: "e"}}, }) { t.Fatalf("unexpected points: %s", spew.Sdump(a)) } if !input.Closed { t.Error("iterator not closed") } }
// Ensure that a string iterator can be created for a count() call. func TestCallIterator_Count_String(t *testing.T) { itr, _ := influxql.NewCallIterator( &StringIterator{Points: []influxql.StringPoint{ {Name: "cpu", Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, {Name: "cpu", Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, {Name: "cpu", Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, {Name: "cpu", Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, {Name: "cpu", Time: 5, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, {Name: "cpu", Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, {Name: "mem", Time: 23, Value: "b", Tags: ParseTags("region=us-west,host=hostB")}, }}, influxql.IteratorOptions{ Expr: MustParseExpr(`count("value")`), Dimensions: []string{"host"}, Interval: influxql.Interval{Duration: 5 * time.Nanosecond}, }, ) if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, {&influxql.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, {&influxql.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, {&influxql.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, {&influxql.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, }) { t.Fatalf("unexpected points: %s", spew.Sdump(a)) } }
// Ensure the emitter will limit the chunked output from a series. func TestEmitter_ChunkSize(t *testing.T) { // Build an emitter that pulls from one iterator with multiple points in the same series. e := influxql.NewEmitter([]influxql.Iterator{ &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("region=west"), Time: 0, Value: 1}, {Name: "cpu", Tags: ParseTags("region=west"), Time: 1, Value: 2}, }}, }, true, 1) e.Columns = []string{"col1"} // Verify the cpu region=west is emitted first. if row, _, err := e.Emit(); err != nil { t.Fatalf("unexpected error(0): %s", err) } else if !deep.Equal(row, &models.Row{ Name: "cpu", Tags: map[string]string{"region": "west"}, Columns: []string{"col1"}, Values: [][]interface{}{ {time.Unix(0, 0).UTC(), float64(1)}, }, Partial: true, }) { t.Fatalf("unexpected row(0): %s", spew.Sdump(row)) } // Verify the cpu region=north is emitted next. if row, _, err := e.Emit(); err != nil { t.Fatalf("unexpected error(1): %s", err) } else if !deep.Equal(row, &models.Row{ Name: "cpu", Tags: map[string]string{"region": "west"}, Columns: []string{"col1"}, Values: [][]interface{}{ {time.Unix(0, 1).UTC(), float64(2)}, }, }) { t.Fatalf("unexpected row(1): %s", spew.Sdump(row)) } // Verify EOF. if row, _, err := e.Emit(); err != nil { t.Fatalf("unexpected error(eof): %s", err) } else if row != nil { t.Fatalf("unexpected eof: %s", spew.Sdump(row)) } }
func CompareFloatIterator(input influxql.Iterator, expected []influxql.FloatPoint) ([]influxql.FloatPoint, bool) { itr := input.(influxql.FloatIterator) points := make([]influxql.FloatPoint, 0, len(expected)) for p := itr.Next(); p != nil; p = itr.Next() { points = append(points, *p) } itr.Close() return points, deep.Equal(points, expected) }
// TestSample_AllSamplesSeen attempts to verify that it is possible // to get every subsample in a reasonable number of iterations. // // The idea here is that 6 iterations should be enough to hit every possible // sequence atleast once. func TestSample_AllSamplesSeen(t *testing.T) { ps := []influxql.FloatPoint{ {Time: 1, Value: 1}, {Time: 2, Value: 2}, {Time: 3, Value: 3}, } // List of all the possible subsamples samples := [][]influxql.FloatPoint{ { {Time: 1, Value: 1}, {Time: 2, Value: 2}, }, { {Time: 1, Value: 1}, {Time: 3, Value: 3}, }, { {Time: 2, Value: 2}, {Time: 3, Value: 3}, }, } // 6 iterations should be more than sufficient to garentee that // we hit every possible subsample. for i := 0; i < 6; i++ { s := influxql.NewFloatSampleReducer(2) for _, p := range ps { s.AggregateFloat(&p) } points := s.Emit() // if samples is empty we've seen every sample, so we're done if len(samples) == 0 { return } for i, sample := range samples { // if we find a sample that it matches, remove it from // this list of possible samples if deep.Equal(sample, points) { samples = append(samples[:i], samples[i+1:]...) } } } // If we missed a sample, report the error if exp, got := 0, len(samples); exp != got { t.Fatalf("expected to get every sample: got %d, exp %d", got, exp) } }
func TestPoint_Clone_Boolean(t *testing.T) { p := &influxql.BooleanPoint{ Name: "cpu", Tags: ParseTags("host=server01"), Time: 5, Value: true, Aux: []interface{}{float64(45)}, } c := p.Clone() if p == c { t.Errorf("clone has the same address as the original: %v == %v", p, c) } if !deep.Equal(p, c) { t.Errorf("mismatched point: %s", spew.Sdump(c)) } if &p.Aux[0] == &c.Aux[0] { t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) } else if !deep.Equal(p.Aux, c.Aux) { t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) } }
func TestSortedMergeIterator_Cast_Float(t *testing.T) { inputs := []influxql.Iterator{ &IntegerIterator{Points: []influxql.IntegerPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, }}, &FloatIterator{Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, }}, } itr := influxql.NewSortedMergeIterator(inputs, influxql.IteratorOptions{ Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, Ascending: true, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) } else if !deep.Equal(a, [][]influxql.Point{ {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, }) { t.Errorf("unexpected points: %s", spew.Sdump(a)) } for i, input := range inputs { switch input := input.(type) { case *FloatIterator: if !input.Closed { t.Errorf("iterator %d not closed", i) } case *IntegerIterator: if !input.Closed { t.Errorf("iterator %d not closed", i) } } } }
// Ensure a cursor with multiple values can be converted into an iterator. func TestFloatCursorIterator_Aux_MultipleValues(t *testing.T) { cur := NewCursor([]CursorItem{ {Key: 0, Value: map[string]interface{}{"val1": float64(100), "val2": "foo"}}, {Key: 3, Value: map[string]interface{}{"val1": float64(200), "val2": "bar"}}, }, true) opt := influxql.IteratorOptions{ Aux: []string{"val1", "val2"}, Ascending: true, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, } itr := tsdb.NewFloatCursorIterator("series0", map[string]string{"host": "serverA"}, cur, opt) defer itr.Close() if p := itr.Next(); !deep.Equal(p, &influxql.FloatPoint{ Name: "series0", Time: 0, Value: math.NaN(), Aux: []interface{}{float64(100), "foo"}, }) { t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) } if p := itr.Next(); !deep.Equal(p, &influxql.FloatPoint{ Name: "series0", Time: 3, Value: math.NaN(), Aux: []interface{}{float64(200), "bar"}, }) { t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) } if p := itr.Next(); p != nil { t.Fatalf("expected eof, got: %s", spew.Sdump(p)) } }
// Ensure that a set of iterators can be merged together, sorted by window and name/tag. func TestMergeIterator_Boolean(t *testing.T) { inputs := []*BooleanIterator{ {Points: []influxql.BooleanPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}, {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}, {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}, {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}, {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: true}, }}, {Points: []influxql.BooleanPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}, {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}, {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}, {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: false}, }}, {Points: []influxql.BooleanPoint{}}, } itr := influxql.NewMergeIterator(BooleanIterators(inputs), influxql.IteratorOptions{ Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, Ascending: true, }) if a, err := Iterators([]influxql.Iterator{itr}).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) } else if !deep.Equal(a, [][]influxql.Point{ {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}}, {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}}, {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}}, {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}}, {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}}, {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}}, {&influxql.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}}, {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: false}}, {&influxql.BooleanPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: true}}, }) { t.Errorf("unexpected points: %s", spew.Sdump(a)) } for i, input := range inputs { if !input.Closed { t.Errorf("iterator %d not closed", i) } } }
// Ensure that a set of iterators can be merged together, sorted by window and name/tag. func TestMergeIterator_Float(t *testing.T) { inputs := []*FloatIterator{ {Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, }}, {Points: []influxql.FloatPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, }}, {Points: []influxql.FloatPoint{}}, {Points: []influxql.FloatPoint{}}, } itr := influxql.NewMergeIterator(FloatIterators(inputs), influxql.IteratorOptions{ Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, Ascending: true, }) if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, {&influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, {&influxql.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, }) { t.Errorf("unexpected points: %s", spew.Sdump(a)) } for i, input := range inputs { if !input.Closed { t.Errorf("iterator %d not closed", i) } } }
// Ensure that a set of iterators can be merged together, sorted by window and name/tag. func TestMergeIterator_String(t *testing.T) { inputs := []*StringIterator{ {Points: []influxql.StringPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}, {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}, {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}, {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: "h"}, }}, {Points: []influxql.StringPoint{ {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}, {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}, {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}, {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}, }}, {Points: []influxql.StringPoint{}}, } itr := influxql.NewMergeIterator(StringIterators(inputs), influxql.IteratorOptions{ Interval: influxql.Interval{ Duration: 10 * time.Nanosecond, }, Ascending: true, }) if a := Iterators([]influxql.Iterator{itr}).ReadAll(); !deep.Equal(a, [][]influxql.Point{ {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}}, {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}}, {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}}, {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}}, {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}}, {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}}, {&influxql.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}}, {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}}, {&influxql.StringPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: "h"}}, }) { t.Errorf("unexpected points: %s", spew.Sdump(a)) } for i, input := range inputs { if !input.Closed { t.Errorf("iterator %d not closed", i) } } }
// Ensure auxilary iterators can be created for auxilary fields. func TestFloatAuxIterator(t *testing.T) { itr := influxql.NewAuxIterator( &FloatIterator{Points: []influxql.FloatPoint{ {Time: 0, Value: 1, Aux: []interface{}{float64(100), float64(200)}}, {Time: 1, Value: 2, Aux: []interface{}{float64(500), math.NaN()}}, }}, []influxql.Series{ {Aux: []influxql.DataType{influxql.Float, influxql.Float}}, }, influxql.IteratorOptions{Aux: []string{"f0", "f1"}}, ) itrs := []influxql.Iterator{ itr, itr.Iterator("f0"), itr.Iterator("f1"), itr.Iterator("f0"), } itr.Start() if a, err := Iterators(itrs).ReadAll(); err != nil { t.Fatalf("unexpected error: %s", err) } else if !deep.Equal(a, [][]influxql.Point{ { &influxql.FloatPoint{Time: 0, Value: 1, Aux: []interface{}{float64(100), float64(200)}}, &influxql.FloatPoint{Time: 0, Value: float64(100)}, &influxql.FloatPoint{Time: 0, Value: float64(200)}, &influxql.FloatPoint{Time: 0, Value: float64(100)}, }, { &influxql.FloatPoint{Time: 1, Value: 2, Aux: []interface{}{float64(500), math.NaN()}}, &influxql.FloatPoint{Time: 1, Value: float64(500)}, &influxql.FloatPoint{Time: 1, Value: math.NaN()}, &influxql.FloatPoint{Time: 1, Value: float64(500)}, }, }) { t.Fatalf("unexpected points: %s", spew.Sdump(a)) } }
func TestSample_SampleSizeGreaterThanNumPoints(t *testing.T) { s := influxql.NewFloatSampleReducer(4) ps := []influxql.FloatPoint{ {Time: 1, Value: 1}, {Time: 2, Value: 2}, {Time: 3, Value: 3}, } for _, p := range ps { s.AggregateFloat(&p) } points := s.Emit() if exp, got := len(ps), len(points); exp != got { t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) } if !deep.Equal(ps, points) { t.Fatalf("unexpected points: %s", spew.Sdump(points)) } }
// Ensure a shard can create iterators for its underlying data. func TestShard_CreateIterator_Descending(t *testing.T) { sh := NewShard() // Calling CreateIterator when the engine is not open will return // ErrEngineClosed. _, got := sh.CreateIterator(influxql.IteratorOptions{}) if exp := tsdb.ErrEngineClosed; got != exp { t.Fatalf("got %v, expected %v", got, exp) } if err := sh.Open(); err != nil { t.Fatal(err) } defer sh.Close() sh.MustWritePointsString(` cpu,host=serverA,region=uswest value=100 0 cpu,host=serverA,region=uswest value=50,val2=5 10 cpu,host=serverB,region=uswest value=25 0 `) // Create iterator. itr, err := sh.CreateIterator(influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Aux: []string{"val2"}, Dimensions: []string{"host"}, Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, Ascending: false, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, }) if err != nil { t.Fatal(err) } defer itr.Close() fitr := itr.(influxql.FloatIterator) // Read values from iterator. if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(0): %s", err) } else if !deep.Equal(p, &influxql.FloatPoint{ Name: "cpu", Tags: influxql.NewTags(map[string]string{"host": "serverB"}), Time: time.Unix(0, 0).UnixNano(), Value: 25, Aux: []interface{}{(*float64)(nil)}, }) { t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) } if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(1): %s", err) } else if !deep.Equal(p, &influxql.FloatPoint{ Name: "cpu", Tags: influxql.NewTags(map[string]string{"host": "serverA"}), Time: time.Unix(10, 0).UnixNano(), Value: 50, Aux: []interface{}{float64(5)}, }) { t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) } if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(2): %s", err) } else if !deep.Equal(p, &influxql.FloatPoint{ Name: "cpu", Tags: influxql.NewTags(map[string]string{"host": "serverA"}), Time: time.Unix(0, 0).UnixNano(), Value: 100, Aux: []interface{}{(*float64)(nil)}, }) { t.Fatalf("unexpected point(2): %s", spew.Sdump(p)) } }
// Ensure the store can backup a shard and another store can restore it. func TestStore_BackupRestoreShard(t *testing.T) { s0, s1 := MustOpenStore(), MustOpenStore() defer s0.Close() defer s1.Close() // Create shard with data. s0.MustCreateShardWithData("db0", "rp0", 100, `cpu value=1 0`, `cpu value=2 10`, `cpu value=3 20`, ) // Backup shard to a buffer. var buf bytes.Buffer if err := s0.BackupShard(100, time.Time{}, &buf); err != nil { t.Fatal(err) } // Create the shard on the other store and restore from buffer. if err := s1.CreateShard("db0", "rp0", 100); err != nil { t.Fatal(err) } if err := s1.RestoreShard(100, &buf); err != nil { t.Fatal(err) } // Read data from itr, err := s1.Shard(100).CreateIterator(influxql.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Sources: []influxql.Source{&influxql.Measurement{ Name: "cpu", Database: "db0", RetentionPolicy: "rp0", }}, Ascending: true, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, }) if err != nil { t.Fatal(err) } fitr := itr.(influxql.FloatIterator) // Read values from iterator. The host=serverA points should come first. p, e := fitr.Next() if e != nil { t.Fatal(e) } if !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Time: time.Unix(0, 0).UnixNano(), Value: 1}) { t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) } p, e = fitr.Next() if e != nil { t.Fatal(e) } if !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Time: time.Unix(10, 0).UnixNano(), Value: 2}) { t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) } p, e = fitr.Next() if e != nil { t.Fatal(e) } if !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Time: time.Unix(20, 0).UnixNano(), Value: 3}) { t.Fatalf("unexpected point(2): %s", spew.Sdump(p)) } }