func TestParsePointWithStringWithCommas(t *testing.T) { // escaped comma test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo\,bar" 1000000000`, tsdb.NewPoint( "cpu", tsdb.Tags{ "host": "serverA", "region": "us-east", }, tsdb.Fields{ "value": 1.0, "str": `foo\,bar`, // commas in string value }, time.Unix(1, 0)), ) // non-escaped comma test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo,bar" 1000000000`, tsdb.NewPoint( "cpu", tsdb.Tags{ "host": "serverA", "region": "us-east", }, tsdb.Fields{ "value": 1.0, "str": "foo,bar", // commas in string value }, time.Unix(1, 0)), ) }
// Ensure the engine can write points to storage. func TestEngine_WritePoints_PointsWriter(t *testing.T) { e := OpenDefaultEngine() defer e.Close() // Points to be inserted. points := []tsdb.Point{ tsdb.NewPoint("cpu", tsdb.Tags{}, tsdb.Fields{}, time.Unix(0, 1)), tsdb.NewPoint("cpu", tsdb.Tags{}, tsdb.Fields{}, time.Unix(0, 0)), tsdb.NewPoint("cpu", tsdb.Tags{}, tsdb.Fields{}, time.Unix(1, 0)), tsdb.NewPoint("cpu", tsdb.Tags{"host": "serverA"}, tsdb.Fields{}, time.Unix(0, 0)), } // Mock points writer to ensure points are passed through. var invoked bool e.PointsWriter.WritePointsFn = func(a []tsdb.Point) error { invoked = true if !reflect.DeepEqual(points, a) { t.Fatalf("unexpected points: %#v", a) } return nil } // Write points against two separate series. if err := e.WritePoints(points, nil, nil); err != nil { t.Fatal(err) } else if !invoked { t.Fatal("PointsWriter.WritePoints() not called") } }
func TestParsePointWithStringField(t *testing.T) { test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo",str2="bar" 1000000000`, tsdb.NewPoint("cpu", tsdb.Tags{ "host": "serverA", "region": "us-east", }, tsdb.Fields{ "value": 1.0, "str": "foo", "str2": "bar", }, time.Unix(1, 0)), ) test(t, `cpu,host=serverA,region=us-east str="foo \" bar" 1000000000`, tsdb.NewPoint("cpu", tsdb.Tags{ "host": "serverA", "region": "us-east", }, tsdb.Fields{ "str": `foo " bar`, }, time.Unix(1, 0)), ) }
func TestParsePointEscapedStringsAndCommas(t *testing.T) { // non-escaped comma and quotes test(t, `cpu,host=serverA,region=us-east value="{Hello\"{,}\" World}" 1000000000`, tsdb.NewPoint( "cpu", tsdb.Tags{ "host": "serverA", "region": "us-east", }, tsdb.Fields{ "value": `{Hello"{,}" World}`, }, time.Unix(1, 0)), ) // escaped comma and quotes test(t, `cpu,host=serverA,region=us-east value="{Hello\"{\,}\" World}" 1000000000`, tsdb.NewPoint( "cpu", tsdb.Tags{ "host": "serverA", "region": "us-east", }, tsdb.Fields{ "value": `{Hello"{\,}" World}`, }, time.Unix(1, 0)), ) }
func TestParsePointsUnbalancedQuotedTags(t *testing.T) { pts, err := tsdb.ParsePointsString("baz,mytag=\"a x=1 1441103862125\nbaz,mytag=a z=1 1441103862126") if err != nil { t.Fatalf("ParsePoints failed: %v", err) } if exp := 2; len(pts) != exp { t.Fatalf("ParsePoints count mismatch. got %v, exp %v", len(pts), exp) } // Expected " in the tag value exp := tsdb.NewPoint("baz", tsdb.Tags{"mytag": `"a`}, tsdb.Fields{"x": float64(1)}, time.Unix(0, 1441103862125)) if pts[0].String() != exp.String() { t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[0].String(), exp.String()) } // Expected two points to ensure we did not overscan the line exp = tsdb.NewPoint("baz", tsdb.Tags{"mytag": `a`}, tsdb.Fields{"z": float64(1)}, time.Unix(0, 1441103862126)) if pts[1].String() != exp.String() { t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[1].String(), exp.String()) } }
func TestDropMeasurementStatement(t *testing.T) { store, executor := testStoreAndExecutor() defer os.RemoveAll(store.Path()) pt := tsdb.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) pt2 := tsdb.NewPoint( "memory", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) if err := store.WriteToShard(shardID, []tsdb.Point{pt, pt2}); err != nil { t.Fatal(err) } got := executeAndGetJSON("show series", executor) exepected := `[{"series":[{"name":"cpu","columns":["_key","host"],"values":[["cpu,host=server","server"]]},{"name":"memory","columns":["_key","host"],"values":[["memory,host=server","server"]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("drop measurement memory", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } validateDrop := func() { got = executeAndGetJSON("show series", executor) exepected = `[{"series":[{"name":"cpu","columns":["_key","host"],"values":[["cpu,host=server","server"]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("show measurements", executor) exepected = `[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("select * from memory", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } } validateDrop() store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf store.Open() executor.Store = store validateDrop() }
func TestNewPointNaN(t *testing.T) { test(t, `cpu value=NaN 1000000000`, tsdb.NewPoint( "cpu", tsdb.Tags{}, tsdb.Fields{ "value": math.NaN(), }, time.Unix(1, 0)), ) test(t, `cpu value=nAn 1000000000`, tsdb.NewPoint( "cpu", tsdb.Tags{}, tsdb.Fields{ "value": math.NaN(), }, time.Unix(1, 0)), ) test(t, `nan value=NaN`, tsdb.NewPoint( "nan", tsdb.Tags{}, tsdb.Fields{ "value": math.NaN(), }, time.Unix(0, 0)), ) }
func TestShardWriteAddNewField(t *testing.T) { tmpDir, _ := ioutil.TempDir("", "shard_test") defer os.RemoveAll(tmpDir) tmpShard := path.Join(tmpDir, "shard") tmpWal := path.Join(tmpDir, "wal") index := tsdb.NewDatabaseIndex() opts := tsdb.NewEngineOptions() opts.Config.WALDir = filepath.Join(tmpDir, "wal") sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts) if err := sh.Open(); err != nil { t.Fatalf("error openeing shard: %s", err.Error()) } defer sh.Close() pt := tsdb.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) err := sh.WritePoints([]tsdb.Point{pt}) if err != nil { t.Fatalf(err.Error()) } pt = tsdb.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0, "value2": 2.0}, time.Unix(1, 2), ) err = sh.WritePoints([]tsdb.Point{pt}) if err != nil { t.Fatalf(err.Error()) } if !reflect.DeepEqual(index.Names(), []string{"cpu"}) { t.Fatalf("measurement names in shard didn't match") } if index.SeriesN() != 1 { t.Fatalf("series wasn't in index") } seriesTags := index.Series(string(pt.Key())).Tags if len(seriesTags) != len(pt.Tags()) || pt.Tags()["host"] != seriesTags["host"] { t.Fatalf("tags weren't properly saved to series index: %v, %v", pt.Tags(), seriesTags) } if !reflect.DeepEqual(index.Measurement("cpu").TagKeys(), []string{"host"}) { t.Fatalf("tag key wasn't saved to measurement index") } if len(index.Measurement("cpu").FieldNames()) != 2 { t.Fatalf("field names wasn't saved to measurement index") } }
func TestShardMapper_WriteAndSingleMapperRawQueryMultiValue(t *testing.T) { tmpDir, _ := ioutil.TempDir("", "shard_test") defer os.RemoveAll(tmpDir) shard := mustCreateShard(tmpDir) pt1time := time.Unix(1, 0).UTC() pt1 := tsdb.NewPoint( "cpu", map[string]string{"host": "serverA", "region": "us-east"}, map[string]interface{}{"foo": 42, "bar": 43}, pt1time, ) pt2time := time.Unix(2, 0).UTC() pt2 := tsdb.NewPoint( "cpu", map[string]string{"host": "serverB", "region": "us-east"}, map[string]interface{}{"foo": 60, "bar": 61}, pt2time, ) err := shard.WritePoints([]tsdb.Point{pt1, pt2}) if err != nil { t.Fatalf(err.Error()) } var tests = []struct { stmt string chunkSize int expected []string }{ { stmt: `SELECT foo FROM cpu`, expected: []string{`{"name":"cpu","fields":["foo"],"values":[{"time":1000000000,"value":42,"tags":{"host":"serverA","region":"us-east"}},{"time":2000000000,"value":60,"tags":{"host":"serverB","region":"us-east"}}]}`}, }, { stmt: `SELECT foo,bar FROM cpu`, expected: []string{`{"name":"cpu","fields":["bar","foo"],"values":[{"time":1000000000,"value":{"bar":43,"foo":42},"tags":{"host":"serverA","region":"us-east"}},{"time":2000000000,"value":{"bar":61,"foo":60},"tags":{"host":"serverB","region":"us-east"}}]}`}, }, } for _, tt := range tests { stmt := mustParseSelectStatement(tt.stmt) mapper := openRawMapperOrFail(t, shard, stmt, tt.chunkSize) for i, s := range tt.expected { got := nextRawChunkAsJson(t, mapper) if got != s { t.Errorf("test '%s'\n\tgot %s\n\texpected %s", tt.stmt, got, tt.expected[i]) break } } } }
func TestWritePointsAndExecuteQuery(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) // Write first point. if err := store.WriteToShard(shardID, []tsdb.Point{tsdb.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), )}); err != nil { t.Fatalf(err.Error()) } // Write second point. if err := store.WriteToShard(shardID, []tsdb.Point{tsdb.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(2, 3), )}); err != nil { t.Fatalf(err.Error()) } got := executeAndGetJSON("SELECT * FROM cpu", executor) exepected := `[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["1970-01-01T00:00:01.000000002Z","server",1],["1970-01-01T00:00:02.000000003Z","server",1]]}]}]` if exepected != got { t.Fatalf("\nexp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) exepected = `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1],["1970-01-01T00:00:02.000000003Z",1]]}]}]` if exepected != got { t.Fatalf("\nexp: %s\ngot: %s", exepected, got) } store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf if err := store.Open(); err != nil { t.Fatalf(err.Error()) } executor.Store = store executor.ShardMapper = &testShardMapper{store: store} got = executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) if exepected != got { t.Fatalf("\nexp: %s\ngot: %s", exepected, got) } }
// Ensure the shard writer returns an error when dialing times out. func TestShardWriter_Write_ErrDialTimeout(t *testing.T) { ts := newTestWriteService(writeShardSuccess) s := cluster.NewService(cluster.Config{}) s.Listener = ts.muxln s.TSDBStore = ts if err := s.Open(); err != nil { t.Fatal(err) } defer s.Close() defer ts.Close() w := cluster.NewShardWriter(time.Nanosecond) w.MetaStore = &metaStore{host: ts.ln.Addr().String()} now := time.Now() shardID := uint64(1) ownerID := uint64(2) var points []tsdb.Point points = append(points, tsdb.NewPoint( "cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, )) if err, exp := w.WriteShard(shardID, ownerID, points), "i/o timeout"; err == nil || !strings.Contains(err.Error(), exp) { t.Fatalf("expected error %v, to contain %s", err, exp) } }
// Ensure the shard writer returns an error when the server fails to accept the write. func TestShardWriter_WriteShard_Error(t *testing.T) { ts := newTestWriteService(writeShardFail) s := cluster.NewService(cluster.Config{}) s.Listener = ts.muxln s.TSDBStore = ts if err := s.Open(); err != nil { t.Fatal(err) } defer s.Close() defer ts.Close() w := cluster.NewShardWriter(time.Minute) w.MetaStore = &metaStore{host: ts.ln.Addr().String()} now := time.Now() shardID := uint64(1) ownerID := uint64(2) var points []tsdb.Point points = append(points, tsdb.NewPoint( "cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, )) if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "error code 1: write shard 1: failed to write" { t.Fatalf("unexpected error: %v", err) } }
func (w *WriteShardRequest) unmarshalPoints() []tsdb.Point { points := make([]tsdb.Point, len(w.pb.GetPoints())) for i, p := range w.pb.GetPoints() { pt := tsdb.NewPoint( p.GetName(), map[string]string{}, map[string]interface{}{}, time.Unix(0, p.GetTime())) for _, f := range p.GetFields() { n := f.GetName() if f.Int32 != nil { pt.AddField(n, f.GetInt32()) } else if f.Int64 != nil { pt.AddField(n, f.GetInt64()) } else if f.Float64 != nil { pt.AddField(n, f.GetFloat64()) } else if f.Bool != nil { pt.AddField(n, f.GetBool()) } else if f.String_ != nil { pt.AddField(n, f.GetString_()) } else { pt.AddField(n, f.GetBytes()) } } tags := tsdb.Tags{} for _, t := range p.GetTags() { tags[t.GetKey()] = t.GetValue() } pt.SetTags(tags) points[i] = pt } return points }
func TestDropDatabase(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) pt := tsdb.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) if err := store.WriteToShard(shardID, []tsdb.Point{pt}); err != nil { t.Fatal(err) } got := executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) expected := `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1]]}]}]` if expected != got { t.Fatalf("exp: %s\ngot: %s", expected, got) } var name string me := &metaExec{fn: func(stmt influxql.Statement) *influxql.Result { name = stmt.(*influxql.DropDatabaseStatement).Name return &influxql.Result{} }} executor.MetaStatementExecutor = me // verify the database is there on disk dbPath := filepath.Join(store.Path(), "foo") if _, err := os.Stat(dbPath); err != nil { t.Fatalf("execpted database dir %s to exist", dbPath) } got = executeAndGetJSON("drop database foo", executor) expected = `[{}]` if got != expected { t.Fatalf("exp: %s\ngot: %s", expected, got) } if name != "foo" { t.Fatalf("expected the MetaStatementExecutor to be called with database name foo, but got %s", name) } if _, err := os.Stat(dbPath); !os.IsNotExist(err) { t.Fatalf("expected database dir %s to be gone", dbPath) } store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf store.Open() executor.Store = store executor.ShardMapper = &testShardMapper{store: store} if err := store.WriteToShard(shardID, []tsdb.Point{pt}); err == nil || err.Error() != "shard not found" { t.Fatalf("expected shard to not be found") } }
func (p *Point) MarshalString() string { pt := tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time) if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { return pt.String() } return pt.PrecisionString(p.Precision) }
func TestParsePointToString(t *testing.T) { line := `cpu,host=serverA,region=us-east bool=false,float=11,float2=12.123,int=10i,str="string val" 1000000000` pts, err := tsdb.ParsePoints([]byte(line)) if err != nil { t.Fatalf(`ParsePoints() failed. got %s`, err) } if exp := 1; len(pts) != exp { t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) } pt := pts[0] got := pt.String() if line != got { t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line) } pt = tsdb.NewPoint("cpu", tsdb.Tags{"host": "serverA", "region": "us-east"}, tsdb.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"}, time.Unix(1, 0)) got = pt.String() if line != got { t.Errorf("NewPoint() to string mismatch:\n got %v\n exp %v", got, line) } }
func TestFilterMatchMultipleWildcards(t *testing.T) { p, err := graphite.NewParser([]string{ "*.* .wrong.measurement*", "servers.* .host.measurement*", // should match this "servers.localhost .wrong.measurement*", "*.localhost .wrong.measurement*", }, nil) if err != nil { t.Fatalf("unexpected error creating parser, got %v", err) } exp := tsdb.NewPoint("cpu_load", tsdb.Tags{"host": "server01"}, tsdb.Fields{"value": float64(11)}, time.Unix(1435077219, 0)) pt, err := p.Parse("servers.server01.cpu_load 11 1435077219") if err != nil { t.Fatalf("parse error: %v", err) } if exp.String() != pt.String() { t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String()) } }
// benchmarkWritePoints benchmarks writing new series to a shard. // mCnt - measurement count // tkCnt - tag key count // tvCnt - tag value count (values per tag) // pntCnt - points per series. # of series = mCnt * (tvCnt ^ tkCnt) func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) { // Generate test series (measurements + unique tag sets). series := genTestSeries(mCnt, tkCnt, tvCnt) // Create index for the shard to use. index := tsdb.NewDatabaseIndex() // Generate point data to write to the shard. points := []tsdb.Point{} for _, s := range series { for val := 0.0; val < float64(pntCnt); val++ { p := tsdb.NewPoint(s.Measurement, s.Series.Tags, map[string]interface{}{"value": val}, time.Now()) points = append(points, p) } } // Stop & reset timers and mem-stats before the main benchmark loop. b.StopTimer() b.ResetTimer() // Run the benchmark loop. for n := 0; n < b.N; n++ { tmpDir, _ := ioutil.TempDir("", "shard_test") tmpShard := path.Join(tmpDir, "shard") shard := tsdb.NewShard(1, index, tmpShard, tsdb.NewEngineOptions()) shard.Open() b.StartTimer() // Call the function being benchmarked. chunkedWrite(shard, points) b.StopTimer() shard.Close() os.RemoveAll(tmpDir) } }
// Parse performs Graphite parsing of a single line. func (p *Parser) Parse(line string) (tsdb.Point, error) { // Break into 3 fields (name, value, timestamp). fields := strings.Fields(line) if len(fields) != 3 { return nil, fmt.Errorf("received %q which doesn't have three fields", line) } // decode the name and tags name, tags, err := p.DecodeNameAndTags(fields[0]) if err != nil { return nil, err } // Parse value. v, err := strconv.ParseFloat(fields[1], 64) if err != nil { return nil, fmt.Errorf("field \"%s\" value: %s", fields[0], err) } fieldValues := make(map[string]interface{}) fieldValues["value"] = v // Parse timestamp. unixTime, err := strconv.ParseFloat(fields[2], 64) if err != nil { return nil, fmt.Errorf("field \"%s\" time: %s", fields[0], err) } // Check if we have fractional seconds timestamp := time.Unix(int64(unixTime), int64((unixTime-math.Floor(unixTime))*float64(time.Second))) point := tsdb.NewPoint(name, tags, fieldValues, timestamp) return point, nil }
func TestParsePointWithTags(t *testing.T) { test(t, "cpu,host=serverA,region=us-east value=1.0 1000000000", tsdb.NewPoint("cpu", tsdb.Tags{"host": "serverA", "region": "us-east"}, tsdb.Fields{"value": 1.0}, time.Unix(1, 0))) }
// convertRowToPoints will convert a query result Row into Points that can be written back in. // Used for continuous and INTO queries func (s *Service) convertRowToPoints(measurementName string, row *influxql.Row) ([]tsdb.Point, error) { // figure out which parts of the result are the time and which are the fields timeIndex := -1 fieldIndexes := make(map[string]int) for i, c := range row.Columns { if c == "time" { timeIndex = i } else { fieldIndexes[c] = i } } if timeIndex == -1 { return nil, errors.New("error finding time index in result") } points := make([]tsdb.Point, 0, len(row.Values)) for _, v := range row.Values { vals := make(map[string]interface{}) for fieldName, fieldIndex := range fieldIndexes { vals[fieldName] = v[fieldIndex] } p := tsdb.NewPoint(measurementName, row.Tags, vals, v[timeIndex].(time.Time)) points = append(points, p) } return points, nil }
// storeStatistics writes the statistics to an InfluxDB system. func (m *Monitor) storeStatistics() { defer m.wg.Done() m.Logger.Printf("Storing statistics in database '%s' retention policy '%s', at interval %s", m.storeDatabase, m.storeRetentionPolicy, m.storeInterval) if err := m.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil { m.Logger.Printf("failed to detect a cluster leader, terminating storage: %s", err.Error()) return } // Get cluster-level metadata. Nothing different is going to happen if errors occur. clusterID, _ := m.MetaStore.ClusterID() nodeID := m.MetaStore.NodeID() hostname, _ := os.Hostname() clusterTags := map[string]string{ "clusterID": fmt.Sprintf("%d", clusterID), "nodeID": fmt.Sprintf("%d", nodeID), "hostname": hostname, } if _, err := m.MetaStore.CreateDatabaseIfNotExists(m.storeDatabase); err != nil { m.Logger.Printf("failed to create database '%s', terminating storage: %s", m.storeDatabase, err.Error()) return } tick := time.NewTicker(m.storeInterval) defer tick.Stop() for { select { case <-tick.C: stats, err := m.Statistics(clusterTags) if err != nil { m.Logger.Printf("failed to retrieve registered statistics: %s", err) continue } points := make(tsdb.Points, 0, len(stats)) for _, s := range stats { points = append(points, tsdb.NewPoint(s.Name, s.Tags, s.Values, time.Now())) } err = m.PointsWriter.WritePoints(&cluster.WritePointsRequest{ Database: m.storeDatabase, RetentionPolicy: m.storeRetentionPolicy, ConsistencyLevel: cluster.ConsistencyLevelOne, Points: points, }) if err != nil { m.Logger.Printf("failed to store statistics: %s", err) } case <-m.done: m.Logger.Printf("terminating storage of statistics") return } } }
// storeStatistics writes the statistics to an InfluxDB system. func (m *Monitor) storeStatistics() { defer m.wg.Done() m.Logger.Printf("Storing statistics in database '%s' retention policy '%s', at interval %s", m.storeDatabase, m.storeRetentionPolicy, m.storeInterval) if err := m.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil { m.Logger.Printf("failed to detect a cluster leader, terminating storage: %s", err.Error()) return } if _, err := m.MetaStore.CreateDatabaseIfNotExists(m.storeDatabase); err != nil { m.Logger.Printf("failed to create database '%s', terminating storage: %s", m.storeDatabase, err.Error()) return } rpi := meta.NewRetentionPolicyInfo(m.storeRetentionPolicy) rpi.Duration = m.storeRetentionDuration rpi.ReplicaN = m.storeReplicationFactor if _, err := m.MetaStore.CreateRetentionPolicyIfNotExists(m.storeDatabase, rpi); err != nil { m.Logger.Printf("failed to create retention policy '%s', terminating storage: %s", m.storeRetentionPolicy, err.Error()) return } tick := time.NewTicker(m.storeInterval) defer tick.Stop() for { select { case <-tick.C: stats, err := m.Statistics() if err != nil { m.Logger.Printf("failed to retrieve registered statistics: %s", err) continue } points := make(tsdb.Points, 0, len(stats)) for _, s := range stats { points = append(points, tsdb.NewPoint(s.Name, s.Tags, s.Values, time.Now())) } err = m.PointsWriter.WritePoints(&cluster.WritePointsRequest{ Database: m.storeDatabase, RetentionPolicy: m.storeRetentionPolicy, ConsistencyLevel: cluster.ConsistencyLevelOne, Points: points, }) if err != nil { m.Logger.Printf("failed to store statistics: %s", err) } case <-m.done: m.Logger.Printf("terminating storage of statistics") return } } }
func TestNewPointUnhandledType(t *testing.T) { // nil value pt := tsdb.NewPoint("cpu", nil, tsdb.Fields{"value": nil}, time.Unix(0, 0)) if exp := `cpu value= 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } // unsupported type gets stored as string now := time.Unix(0, 0).UTC() pt = tsdb.NewPoint("cpu", nil, tsdb.Fields{"value": now}, time.Unix(0, 0)) if exp := `cpu value="1970-01-01 00:00:00 +0000 UTC" 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } if exp := "1970-01-01 00:00:00 +0000 UTC"; pt.Fields()["value"] != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } }
func TestNewPointEscaped(t *testing.T) { // commas pt := tsdb.NewPoint("cpu,main", tsdb.Tags{"tag,bar": "value"}, tsdb.Fields{"name,bar": 1.0}, time.Unix(0, 0)) if exp := `cpu\,main,tag\,bar=value name\,bar=1 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } // spaces pt = tsdb.NewPoint("cpu main", tsdb.Tags{"tag bar": "value"}, tsdb.Fields{"name bar": 1.0}, time.Unix(0, 0)) if exp := `cpu\ main,tag\ bar=value name\ bar=1 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } // equals pt = tsdb.NewPoint("cpu=main", tsdb.Tags{"tag=bar": "value=foo"}, tsdb.Fields{"name=bar": 1.0}, time.Unix(0, 0)) if exp := `cpu=main,tag\=bar=value\=foo name\=bar=1 0`; pt.String() != exp { t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) } }
// Parse performs Graphite parsing of a single line. func (p *Parser) Parse(line string) (tsdb.Point, error) { // Break into 3 fields (name, value, timestamp). fields := strings.Fields(line) if len(fields) != 2 && len(fields) != 3 { return nil, fmt.Errorf("received %q which doesn't have required fields", line) } // decode the name and tags matcher := p.matcher.Match(fields[0]) measurement, tags := matcher.Apply(fields[0]) // Could not extract measurement, use the raw value if measurement == "" { measurement = fields[0] } // Parse value. v, err := strconv.ParseFloat(fields[1], 64) if err != nil { return nil, fmt.Errorf(`field "%s" value: %s`, fields[0], err) } fieldValues := map[string]interface{}{"value": v} // If no 3rd field, use now as timestamp timestamp := time.Now().UTC() if len(fields) == 3 { // Parse timestamp. unixTime, err := strconv.ParseFloat(fields[2], 64) if err != nil { return nil, fmt.Errorf(`field "%s" time: %s`, fields[0], err) } // -1 is a special value that gets converted to current UTC time // See https://github.com/graphite-project/carbon/issues/54 if unixTime != float64(-1) { // Check if we have fractional seconds timestamp = time.Unix(int64(unixTime), int64((unixTime-math.Floor(unixTime))*float64(time.Second))) if timestamp.Before(MinDate) || timestamp.After(MaxDate) { return nil, fmt.Errorf("timestamp out of range") } } } // Set the default tags on the point if they are not already set for k, v := range p.tags { if _, ok := tags[k]; !ok { tags[k] = v } } point := tsdb.NewPoint(measurement, tags, fieldValues, timestamp) return point, nil }
func TestDropSeriesStatement(t *testing.T) { store, executor := testStoreAndExecutor("") defer os.RemoveAll(store.Path()) pt := tsdb.NewPoint( "cpu", map[string]string{"host": "server"}, map[string]interface{}{"value": 1.0}, time.Unix(1, 2), ) err := store.WriteToShard(shardID, []tsdb.Point{pt}) if err != nil { t.Fatalf(err.Error()) } got := executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) exepected := `[{"series":[{"name":"cpu","tags":{"host":"server"},"columns":["time","value"],"values":[["1970-01-01T00:00:01.000000002Z",1]]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("drop series from cpu", executor) got = executeAndGetJSON("SELECT * FROM cpu GROUP BY *", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("show tag keys from cpu", executor) exepected = `[{"series":[{"name":"cpu","columns":["tagKey"]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } store.Close() conf := store.EngineOptions.Config store = tsdb.NewStore(store.Path()) store.EngineOptions.Config = conf store.Open() executor.Store = store got = executeAndGetJSON("select * from cpu", executor) exepected = `[{}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } got = executeAndGetJSON("show tag keys from cpu", executor) exepected = `[{"series":[{"name":"cpu","columns":["tagKey"]}]}]` if exepected != got { t.Fatalf("exp: %s\ngot: %s", exepected, got) } }
func TestNewPointLargeInteger(t *testing.T) { test(t, `cpu value=6632243i 1000000000`, tsdb.NewPoint( "cpu", tsdb.Tags{}, tsdb.Fields{ "value": 6632243, // if incorrectly encoded as a float, it would show up as 6.632243e+06 }, time.Unix(1, 0)), ) }
func TestNewPointFloatScientific(t *testing.T) { test(t, `cpu value=6.632243e+06 1000000000`, tsdb.NewPoint( "cpu", tsdb.Tags{}, tsdb.Fields{ "value": float64(6632243), }, time.Unix(1, 0)), ) }
func TestNewPointFloatNoDecimal(t *testing.T) { test(t, `cpu value=1. 1000000000`, tsdb.NewPoint( "cpu", tsdb.Tags{}, tsdb.Fields{ "value": 1.0, }, time.Unix(1, 0)), ) }