func BenchmarkParsePointsTagsSorted2(b *testing.B) { line := `cpu,host=serverA,region=us-west value=1i 1000000000` for i := 0; i < b.N; i++ { tsdb.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) } }
func BenchmarkParsePointsTagsSorted10(b *testing.B) { line := `cpu,env=prod,host=serverA,region=us-west,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5,target=servers,zone=1c value=1i 1000000000` for i := 0; i < b.N; i++ { tsdb.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) } }
func TestParsePointToString(t *testing.T) { line := `cpu,host=serverA,region=us-east bool=false,float=11,float2=12.123,int=10i,str="string val" 1000000000` pts, err := tsdb.ParsePoints([]byte(line)) if err != nil { t.Fatalf(`ParsePoints() failed. got %s`, err) } if exp := 1; len(pts) != exp { t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) } pt := pts[0] got := pt.String() if line != got { t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line) } pt = tsdb.NewPoint("cpu", tsdb.Tags{"host": "serverA", "region": "us-east"}, tsdb.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"}, time.Unix(1, 0)) got = pt.String() if line != got { t.Errorf("NewPoint() to string mismatch:\n got %v\n exp %v", got, line) } }
func BenchmarkParsePointNoTags(b *testing.B) { line := `cpu value=1i 1000000000` for i := 0; i < b.N; i++ { tsdb.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) } }
func BenchmarkParsePointsTagsSorted5(b *testing.B) { line := `cpu,env=prod,host=serverA,region=us-west,target=servers,zone=1c value=1 1000000000` for i := 0; i < b.N; i++ { tsdb.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) } }
func (s *Service) serve() { defer s.wg.Done() s.batcher.Start() for { buf := make([]byte, UDPBufferSize) select { case <-s.done: // We closed the connection, time to go. return default: // Keep processing. } n, _, err := s.conn.ReadFromUDP(buf) if err != nil { s.Logger.Printf("Failed to read UDP message: %s", err) continue } points, err := tsdb.ParsePoints(buf[:n]) if err != nil { s.Logger.Printf("Failed to parse points: %s", err) continue } for _, point := range points { s.batcher.In() <- point } } }
func BenchmarkParsePointsTagsUnSorted2(b *testing.B) { line := `cpu,region=us-west,host=serverA value=1i 1000000000` for i := 0; i < b.N; i++ { pt, _ := tsdb.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) pt[0].Key() } }
func BenchmarkParsePointsTagsUnSorted10(b *testing.B) { line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5 value=1 1000000000` for i := 0; i < b.N; i++ { pt, _ := tsdb.ParsePoints([]byte(line)) b.SetBytes(int64(len(line))) pt[0].Key() } }
func (p *Processor) unmarshalWrite(b []byte) (uint64, []tsdb.Point, error) { if len(b) < 8 { return 0, nil, fmt.Errorf("too short: len = %d", len(b)) } ownerID := binary.BigEndian.Uint64(b[:8]) points, err := tsdb.ParsePoints(b[8:]) return ownerID, points, err }
func TestStoreEnsureSeriesPersistedInNewShards(t *testing.T) { dir, err := ioutil.TempDir("", "store_test") if err != nil { t.Fatalf("Store.Open() failed to create temp dir: %v", err) } defer os.RemoveAll(dir) s := tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } if err := s.CreateShard("foo", "default", 1); err != nil { t.Fatalf("error creating shard: %v", err) } p, _ := tsdb.ParsePoints([]byte("cpu val=1")) if err := s.WriteToShard(1, p); err != nil { t.Fatalf("error writing to shard: %v", err) } if err := s.CreateShard("foo", "default", 2); err != nil { t.Fatalf("error creating shard: %v", err) } if err := s.WriteToShard(2, p); err != nil { t.Fatalf("error writing to shard: %v", err) } d := s.DatabaseIndex("foo") if d == nil { t.Fatal("expected to have database index for foo") } if d.Series("cpu") == nil { t.Fatal("expected series cpu to be in the index") } // delete the shard, close the store and reopen it and confirm the measurement is still there s.DeleteShard(1) s.Close() s = tsdb.NewStore(dir) s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal") if err := s.Open(); err != nil { t.Fatalf("Store.Open() failed: %v", err) } d = s.DatabaseIndex("foo") if d == nil { t.Fatal("expected to have database index for foo") } if d.Series("cpu") == nil { t.Fatal("expected series cpu to be in the index") } }
func (w *WriteShardRequest) unmarshalPoints() []tsdb.Point { points := make([]tsdb.Point, len(w.pb.GetPoints())) for i, p := range w.pb.GetPoints() { pt, err := tsdb.ParsePoints(p) if err != nil { // A error here means that one node parsed the point correctly but sent an // unparseable version to another node. We could log and drop the point and allow // anti-entropy to resolve the discrepancy but this shouldn't ever happen. panic(fmt.Sprintf("failed to parse point: `%v`: %v", string(p), err)) } points[i] = pt[0] } return points }
func TestParsePointKeyUnsorted(t *testing.T) { pts, err := tsdb.ParsePoints([]byte("cpu,last=1,first=2 value=1i")) if err != nil { t.Fatalf(`ParsePoints() failed. got %s`, err) } if exp := 1; len(pts) != exp { t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) } pt := pts[0] if exp := "cpu,first=2,last=1"; string(pt.Key()) != exp { t.Errorf("ParsePoint key not sorted. got %v, exp %v", pt.Key(), exp) } }
func emitMetrics(k *Kafka, acc plugins.Accumulator, metricConsumer <-chan []byte) error { timeout := time.After(1 * time.Second) for { select { case batch := <-metricConsumer: var points []tsdb.Point var err error if points, err = tsdb.ParsePoints(batch); err != nil { return err } for _, point := range points { acc.AddValuesWithTime(point.Name(), point.Fields(), point.Tags(), point.Time()) } case <-timeout: return nil } } }
func TestParsePointIntsFloats(t *testing.T) { pts, err := tsdb.ParsePoints([]byte(`cpu,host=serverA,region=us-east int=10i,float=11.0,float2=12.1 1000000000`)) if err != nil { t.Fatalf(`ParsePoints() failed. got %s`, err) } if exp := 1; len(pts) != exp { t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) } pt := pts[0] if _, ok := pt.Fields()["int"].(int64); !ok { t.Errorf("ParsePoint() int field mismatch: got %T, exp %T", pt.Fields()["int"], int64(10)) } if _, ok := pt.Fields()["float"].(float64); !ok { t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", pt.Fields()["float64"], float64(11.0)) } if _, ok := pt.Fields()["float2"].(float64); !ok { t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", pt.Fields()["float64"], float64(12.1)) } }
func TestParsPointWithDuplicateTags(t *testing.T) { _, err := tsdb.ParsePoints([]byte(`cpu,host=serverA,host=serverB value=1i 1000000000`)) if err == nil { t.Fatalf(`ParsePoint() expected error. got nil`) } }
func (p *Processor) unmarshalWrite(b []byte) (uint64, []tsdb.Point, error) { ownerID := binary.BigEndian.Uint64(b[:8]) points, err := tsdb.ParsePoints(b[8:]) return ownerID, points, err }