Example #1
0
func BenchmarkParsePointsTagsSorted2(b *testing.B) {
	line := `cpu,host=serverA,region=us-west value=1i 1000000000`
	for i := 0; i < b.N; i++ {
		models.ParsePoints([]byte(line))
		b.SetBytes(int64(len(line)))
	}
}
Example #2
0
func TestParsePointToString(t *testing.T) {
	line := `cpu,host=serverA,region=us-east bool=false,float=11,float2=12.123,int=10i,str="string val" 1000000000`
	pts, err := models.ParsePoints([]byte(line))
	if err != nil {
		t.Fatalf(`ParsePoints() failed. got %s`, err)
	}
	if exp := 1; len(pts) != exp {
		t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp)
	}
	pt := pts[0]

	got := pt.String()
	if line != got {
		t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line)
	}

	pt = models.MustNewPoint("cpu", models.Tags{"host": "serverA", "region": "us-east"},
		models.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"},
		time.Unix(1, 0))

	got = pt.String()
	if line != got {
		t.Errorf("NewPoint() to string mismatch:\n got %v\n exp %v", got, line)
	}
}
Example #3
0
func BenchmarkParsePointNoTags(b *testing.B) {
	line := `cpu value=1i 1000000000`
	for i := 0; i < b.N; i++ {
		models.ParsePoints([]byte(line))
		b.SetBytes(int64(len(line)))
	}
}
Example #4
0
// parser() reads all incoming messages from the consumer, and parses them into
// influxdb metric points.
func (k *Kafka) parser() {
	for {
		select {
		case <-k.done:
			return
		case err := <-k.errs:
			log.Printf("Kafka Consumer Error: %s\n", err.Error())
		case msg := <-k.in:
			points, err := models.ParsePoints(msg.Value)
			if err != nil {
				log.Printf("Could not parse kafka message: %s, error: %s",
					string(msg.Value), err.Error())
			}

			for _, point := range points {
				select {
				case k.pointChan <- point:
					continue
				default:
					log.Printf("Kafka Consumer buffer is full, dropping a point." +
						" You may want to increase the point_buffer setting")
				}
			}

			if !k.doNotCommitMsgs {
				// TODO(cam) this locking can be removed if this PR gets merged:
				// https://github.com/wvanbergen/kafka/pull/84
				k.Lock()
				k.Consumer.CommitUpto(msg)
				k.Unlock()
			}
		}
	}
}
Example #5
0
func (s *Service) processPackets() {
	defer s.wg.Done()

	for p := range s.packets {
		points, err := models.ParsePoints(p)
		if err != nil {
			s.statMap.Add(statPointsParseFail, 1)
			s.Logger.Printf("E! Failed to parse points: %s", err)
			continue
		}

		if err := s.PointsWriter.WritePoints(&cluster.WritePointsRequest{
			Database:         s.config.Database,
			RetentionPolicy:  s.config.RetentionPolicy,
			ConsistencyLevel: cluster.ConsistencyLevelOne,
			Points:           points,
		}); err == nil {
			s.statMap.Add(statPointsTransmitted, int64(len(points)))
		} else {
			s.Logger.Printf("E! failed to write points to database %q: %s", s.config.Database, err)
			s.statMap.Add(statTransmitFail, 1)
		}

		s.statMap.Add(statPointsReceived, int64(len(points)))
	}
}
Example #6
0
func BenchmarkParsePointsTagsSorted10(b *testing.B) {
	line := `cpu,env=prod,host=serverA,region=us-west,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5,target=servers,zone=1c value=1i 1000000000`
	for i := 0; i < b.N; i++ {
		models.ParsePoints([]byte(line))
		b.SetBytes(int64(len(line)))
	}
}
Example #7
0
func (p *Processor) unmarshalWrite(b []byte) (uint64, []models.Point, error) {
	if len(b) < 8 {
		return 0, nil, fmt.Errorf("too short: len = %d", len(b))
	}
	ownerID := binary.BigEndian.Uint64(b[:8])
	points, err := models.ParsePoints(b[8:])
	return ownerID, points, err
}
Example #8
0
func BenchmarkParsePointsTagsUnSorted5(b *testing.B) {
	line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c value=1i 1000000000`
	for i := 0; i < b.N; i++ {
		pt, _ := models.ParsePoints([]byte(line))
		b.SetBytes(int64(len(line)))
		pt[0].Key()
	}
}
Example #9
0
func TestStoreEnsureSeriesPersistedInNewShards(t *testing.T) {
	dir, err := ioutil.TempDir("", "store_test")
	if err != nil {
		t.Fatalf("Store.Open() failed to create temp dir: %v", err)
	}
	defer os.RemoveAll(dir)

	s := tsdb.NewStore(dir)
	s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal")
	if err := s.Open(); err != nil {
		t.Fatalf("Store.Open() failed: %v", err)
	}

	if err := s.CreateShard("foo", "default", 1); err != nil {
		t.Fatalf("error creating shard: %v", err)
	}

	p, _ := models.ParsePoints([]byte("cpu val=1"))
	if err := s.WriteToShard(1, p); err != nil {
		t.Fatalf("error writing to shard: %v", err)
	}

	if err := s.CreateShard("foo", "default", 2); err != nil {
		t.Fatalf("error creating shard: %v", err)
	}

	if err := s.WriteToShard(2, p); err != nil {
		t.Fatalf("error writing to shard: %v", err)
	}

	d := s.DatabaseIndex("foo")
	if d == nil {
		t.Fatal("expected to have database index for foo")
	}
	if d.Series("cpu") == nil {
		t.Fatal("expected series cpu to be in the index")
	}

	// delete the shard, close the store and reopen it and confirm the measurement is still there
	s.DeleteShard(1)
	s.Close()

	s = tsdb.NewStore(dir)
	s.EngineOptions.Config.WALDir = filepath.Join(dir, "wal")
	if err := s.Open(); err != nil {
		t.Fatalf("Store.Open() failed: %v", err)
	}

	d = s.DatabaseIndex("foo")
	if d == nil {
		t.Fatal("expected to have database index for foo")
	}
	if d.Series("cpu") == nil {
		t.Fatal("expected series cpu to be in the index")
	}
}
Example #10
0
func (w *WriteShardRequest) unmarshalPoints() []models.Point {
	points := make([]models.Point, len(w.pb.GetPoints()))
	for i, p := range w.pb.GetPoints() {
		pt, err := models.ParsePoints(p)
		if err != nil {
			// A error here means that one node parsed the point correctly but sent an
			// unparseable version to another node.  We could log and drop the point and allow
			// anti-entropy to resolve the discrepancy but this shouldn't ever happen.
			panic(fmt.Sprintf("failed to parse point: `%v`: %v", string(p), err))
		}
		points[i] = pt[0]
	}
	return points
}
Example #11
0
func TestParsePointKeyUnsorted(t *testing.T) {
	pts, err := models.ParsePoints([]byte("cpu,last=1,first=2 value=1i"))
	if err != nil {
		t.Fatalf(`ParsePoints() failed. got %s`, err)
	}

	if exp := 1; len(pts) != exp {
		t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp)
	}
	pt := pts[0]

	if exp := "cpu,first=2,last=1"; string(pt.Key()) != exp {
		t.Errorf("ParsePoint key not sorted. got %v, exp %v", string(pt.Key()), exp)
	}
}
Example #12
0
func emitMetrics(k *Kafka, acc plugins.Accumulator, metricConsumer <-chan []byte) error {
	timeout := time.After(1 * time.Second)

	for {
		select {
		case batch := <-metricConsumer:
			var points []models.Point
			var err error
			if points, err = models.ParsePoints(batch); err != nil {
				return err
			}

			for _, point := range points {
				acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time())
			}
		case <-timeout:
			return nil
		}
	}
}
Example #13
0
func (s *Service) parser() {
	defer s.wg.Done()

	for {
		select {
		case <-s.done:
			return
		case buf := <-s.parserChan:
			points, err := models.ParsePoints(buf)
			if err != nil {
				s.statMap.Add(statPointsParseFail, 1)
				s.Logger.Printf("Failed to parse points: %s", err)
				continue
			}

			for _, point := range points {
				s.batcher.In() <- point
			}
			s.statMap.Add(statPointsReceived, int64(len(points)))
		}
	}
}
Example #14
0
func TestBasicPointGenerator_Generate(t *testing.T) {
	ps, err := basicPG.Generate()
	if err != nil {
		t.Error(err)
	}

	var buf bytes.Buffer

	for p := range ps {
		b := p.Line()

		buf.Write(b)
		buf.Write([]byte("\n"))
	}

	bs := buf.Bytes()
	bs = bs[0 : len(bs)-1]

	_, err = models.ParsePoints(bs)
	if err != nil {
		t.Error(err)
	}
}
Example #15
0
func TestParsePointIntsFloats(t *testing.T) {
	pts, err := models.ParsePoints([]byte(`cpu,host=serverA,region=us-east int=10i,float=11.0,float2=12.1 1000000000`))
	if err != nil {
		t.Fatalf(`ParsePoints() failed. got %s`, err)
	}

	if exp := 1; len(pts) != exp {
		t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp)
	}
	pt := pts[0]

	if _, ok := pt.Fields()["int"].(int64); !ok {
		t.Errorf("ParsePoint() int field mismatch: got %T, exp %T", pt.Fields()["int"], int64(10))
	}

	if _, ok := pt.Fields()["float"].(float64); !ok {
		t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", pt.Fields()["float64"], float64(11.0))
	}

	if _, ok := pt.Fields()["float2"].(float64); !ok {
		t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", pt.Fields()["float64"], float64(12.1))
	}
}
Example #16
0
func (s *Service) serve() {
	defer s.wg.Done()

	s.batcher.Start()
	for {
		buf := make([]byte, UDPBufferSize)

		select {
		case <-s.done:
			// We closed the connection, time to go.
			return
		default:
			// Keep processing.
		}

		n, _, err := s.conn.ReadFromUDP(buf)
		if err != nil {
			s.statMap.Add(statReadFail, 1)
			s.Logger.Printf("Failed to read UDP message: %s", err)
			continue
		}
		s.statMap.Add(statBytesReceived, int64(n))

		points, err := models.ParsePoints(buf[:n])
		if err != nil {
			s.statMap.Add(statPointsParseFail, 1)
			s.Logger.Printf("Failed to parse points: %s", err)
			continue
		}

		for _, point := range points {
			s.batcher.In() <- point
		}
		s.statMap.Add(statPointsReceived, int64(len(points)))
	}
}
Example #17
0
func (l *Log) readFileToCache(fileName string) error {
	f, err := os.OpenFile(fileName, os.O_RDONLY, 0666)
	if err != nil {
		return err
	}
	defer f.Close()

	buf := make([]byte, writeBufLen)
	data := make([]byte, writeBufLen)
	for {
		// read the type and the length of the entry
		_, err := io.ReadFull(f, buf[0:5])
		if err == io.EOF {
			return nil
		} else if err != nil {
			l.logger.Printf("error reading segment file %s: %s", fileName, err.Error())
			return err
		}
		entryType := buf[0]
		length := btou32(buf[1:5])

		// read the compressed block and decompress it
		if int(length) > len(buf) {
			buf = make([]byte, length)
		}
		_, err = io.ReadFull(f, buf[0:length])
		if err == io.EOF || err == io.ErrUnexpectedEOF {
			l.logger.Printf("hit end of file while reading compressed wal entry from %s", fileName)
			return nil
		} else if err != nil {
			return err
		}
		data, err = snappy.Decode(data, buf[0:length])
		if err != nil {
			l.logger.Printf("error decoding compressed entry from %s: %s", fileName, err.Error())
			return nil
		}

		// and marshal it and send it to the cache
		switch walEntryType(entryType) {
		case pointsEntry:
			points, err := models.ParsePoints(data)
			if err != nil {
				return err
			}
			l.addToCache(points, nil, nil, false)
		case fieldsEntry:
			fields := make(map[string]*tsdb.MeasurementFields)
			if err := json.Unmarshal(data, &fields); err != nil {
				return err
			}
			l.addToCache(nil, fields, nil, false)
		case seriesEntry:
			var series []*tsdb.SeriesCreate
			if err := json.Unmarshal(data, &series); err != nil {
				return err
			}
			l.addToCache(nil, nil, series, false)
		case deleteEntry:
			d := &deleteData{}
			if err := json.Unmarshal(data, &d); err != nil {
				return err
			}
			l.IndexWriter.MarkDeletes(d.Keys)
			l.IndexWriter.MarkMeasurementDelete(d.MeasurementName)
			l.deleteKeysFromCache(d.Keys)
			if d.MeasurementName != "" {
				l.deleteMeasurementFromCache(d.MeasurementName)
			}
		}
	}
}
Example #18
0
func TestParsPointWithDuplicateTags(t *testing.T) {
	_, err := models.ParsePoints([]byte(`cpu,host=serverA,host=serverB value=1i 1000000000`))
	if err == nil {
		t.Fatalf(`ParsePoint() expected error. got nil`)
	}
}