Пример #1
0
func TestWriteTimeTag(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex("db")
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)
	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}
	defer sh.Close()

	pt := models.MustNewPoint(
		"cpu",
		models.NewTags(map[string]string{}),
		map[string]interface{}{"time": 1.0},
		time.Unix(1, 2),
	)

	buf := bytes.NewBuffer(nil)
	sh.SetLogOutput(buf)
	if err := sh.WritePoints([]models.Point{pt}); err != nil {
		t.Fatalf("unexpected error: %v", err)
	} else if got, exp := buf.String(), "dropping field 'time'"; !strings.Contains(got, exp) {
		t.Fatalf("unexpected log message: %s", strings.TrimSpace(got))
	}

	m := index.Measurement("cpu")
	if m != nil {
		t.Fatal("unexpected cpu measurement")
	}

	pt = models.MustNewPoint(
		"cpu",
		models.NewTags(map[string]string{}),
		map[string]interface{}{"value": 1.0, "time": 1.0},
		time.Unix(1, 2),
	)

	buf = bytes.NewBuffer(nil)
	sh.SetLogOutput(buf)
	if err := sh.WritePoints([]models.Point{pt}); err != nil {
		t.Fatalf("unexpected error: %v", err)
	} else if got, exp := buf.String(), "dropping field 'time'"; !strings.Contains(got, exp) {
		t.Fatalf("unexpected log message: %s", strings.TrimSpace(got))
	}

	m = index.Measurement("cpu")
	if m == nil {
		t.Fatal("expected cpu measurement")
	}

	if got, exp := len(m.FieldNames()), 1; got != exp {
		t.Fatalf("invalid number of field names: got=%v exp=%v", got, exp)
	}
}
Пример #2
0
func TestShardWriteAddNewField(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex("db")
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)
	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}
	defer sh.Close()

	pt := models.MustNewPoint(
		"cpu",
		models.NewTags(map[string]string{"host": "server"}),
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	err := sh.WritePoints([]models.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	pt = models.MustNewPoint(
		"cpu",
		models.NewTags(map[string]string{"host": "server"}),
		map[string]interface{}{"value": 1.0, "value2": 2.0},
		time.Unix(1, 2),
	)

	err = sh.WritePoints([]models.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	if index.SeriesN() != 1 {
		t.Fatalf("series wasn't in index")
	}
	seriesTags := index.Series(string(pt.Key())).Tags
	if len(seriesTags) != len(pt.Tags()) || pt.Tags().GetString("host") != seriesTags.GetString("host") {
		t.Fatalf("tags weren't properly saved to series index: %v, %v", pt.Tags(), seriesTags)
	}
	if !reflect.DeepEqual(index.Measurement("cpu").TagKeys(), []string{"host"}) {
		t.Fatalf("tag key wasn't saved to measurement index")
	}

	if len(index.Measurement("cpu").FieldNames()) != 2 {
		t.Fatalf("field names wasn't saved to measurement index")
	}
}
Пример #3
0
func TestFilterMatchMultipleWildcards(t *testing.T) {
	p, err := graphite.NewParser([]string{
		"*.* .wrong.measurement*",
		"servers.* .host.measurement*", // should match this
		"servers.localhost .wrong.measurement*",
		"*.localhost .wrong.measurement*",
	}, nil)

	if err != nil {
		t.Fatalf("unexpected error creating parser, got %v", err)
	}

	exp := models.MustNewPoint("cpu_load",
		models.NewTags(map[string]string{"host": "server01"}),
		models.Fields{"value": float64(11)},
		time.Unix(1435077219, 0))

	pt, err := p.Parse("servers.server01.cpu_load 11 1435077219")
	if err != nil {
		t.Fatalf("parse error: %v", err)
	}

	if exp.String() != pt.String() {
		t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
	}
}
Пример #4
0
// Returns byte array of a line protocol representation of the point
func (p Point) Bytes(precision string) []byte {
	key := models.MakeKey([]byte(p.Name), models.NewTags(p.Tags))
	fields := models.Fields(p.Fields).MarshalBinary()
	kl := len(key)
	fl := len(fields)
	var bytes []byte

	if p.Time.IsZero() {
		bytes = make([]byte, fl+kl+1)
		copy(bytes, key)
		bytes[kl] = ' '
		copy(bytes[kl+1:], fields)
	} else {
		timeStr := strconv.FormatInt(p.Time.UnixNano()/models.GetPrecisionMultiplier(precision), 10)
		tl := len(timeStr)
		bytes = make([]byte, fl+kl+tl+2)
		copy(bytes, key)
		bytes[kl] = ' '
		copy(bytes[kl+1:], fields)
		bytes[kl+fl+1] = ' '
		copy(bytes[kl+fl+2:], []byte(timeStr))
	}

	return bytes
}
Пример #5
0
// Ensure engine can create an iterator with a condition.
func TestEngine_CreateIterator_Condition(t *testing.T) {
	t.Parallel()

	e := MustOpenEngine()
	defer e.Close()

	e.Index().CreateMeasurementIndexIfNotExists("cpu")
	e.Index().Measurement("cpu").SetFieldName("X")
	e.Index().Measurement("cpu").SetFieldName("Y")
	e.MeasurementFields("cpu").CreateFieldIfNotExists("value", influxql.Float, false)
	e.MeasurementFields("cpu").CreateFieldIfNotExists("X", influxql.Float, false)
	e.MeasurementFields("cpu").CreateFieldIfNotExists("Y", influxql.Float, false)
	si := e.Index().CreateSeriesIndexIfNotExists("cpu", tsdb.NewSeries("cpu,host=A", models.NewTags(map[string]string{"host": "A"})))
	si.AssignShard(1)

	if err := e.WritePointsString(
		`cpu,host=A value=1.1 1000000000`,
		`cpu,host=A X=10 1000000000`,
		`cpu,host=A Y=100 1000000000`,

		`cpu,host=A value=1.2 2000000000`,

		`cpu,host=A value=1.3 3000000000`,
		`cpu,host=A X=20 3000000000`,
		`cpu,host=A Y=200 3000000000`,
	); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	itr, err := e.CreateIterator(influxql.IteratorOptions{
		Expr:       influxql.MustParseExpr(`value`),
		Dimensions: []string{"host"},
		Condition:  influxql.MustParseExpr(`X = 10 OR Y > 150`),
		Sources:    []influxql.Source{&influxql.Measurement{Name: "cpu"}},
		StartTime:  influxql.MinTime,
		EndTime:    influxql.MaxTime,
		Ascending:  true,
	})
	if err != nil {
		t.Fatal(err)
	}
	fitr := itr.(influxql.FloatIterator)

	if p, err := fitr.Next(); err != nil {
		t.Fatalf("unexpected error(0): %v", err)
	} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) {
		t.Fatalf("unexpected point(0): %v", p)
	}
	if p, err := fitr.Next(); err != nil {
		t.Fatalf("unexpected point(1): %v", err)
	} else if !reflect.DeepEqual(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) {
		t.Fatalf("unexpected point(1): %v", p)
	}
	if p, err := fitr.Next(); err != nil {
		t.Fatalf("expected eof, got error: %v", err)
	} else if p != nil {
		t.Fatalf("expected eof: %v", p)
	}
}
Пример #6
0
// DefaultTags returns the config's tags.
func (c *Config) DefaultTags() models.Tags {
	m := make(map[string]string, len(c.Tags))
	for _, t := range c.Tags {
		parts := strings.Split(t, "=")
		m[parts[0]] = parts[1]
	}
	return models.NewTags(m)
}
Пример #7
0
// MustInitBenchmarkEngine creates a new engine and fills it with points.
// Reuses previous engine if the same parameters were used.
func MustInitBenchmarkEngine(pointN int) *Engine {
	// Reuse engine, if available.
	if benchmark.Engine != nil {
		if benchmark.PointN == pointN {
			return benchmark.Engine
		}

		// Otherwise close and remove it.
		benchmark.Engine.Close()
		benchmark.Engine = nil
	}

	const batchSize = 1000
	if pointN%batchSize != 0 {
		panic(fmt.Sprintf("point count (%d) must be a multiple of batch size (%d)", pointN, batchSize))
	}

	e := MustOpenEngine()

	// Initialize metadata.
	e.Index().CreateMeasurementIndexIfNotExists("cpu")
	e.MeasurementFields("cpu").CreateFieldIfNotExists("value", influxql.Float, false)
	si := e.Index().CreateSeriesIndexIfNotExists("cpu", tsdb.NewSeries("cpu,host=A", models.NewTags(map[string]string{"host": "A"})))
	si.AssignShard(1)

	// Generate time ascending points with jitterred time & value.
	rand := rand.New(rand.NewSource(0))
	for i := 0; i < pointN; i += batchSize {
		var buf bytes.Buffer
		for j := 0; j < batchSize; j++ {
			fmt.Fprintf(&buf, "cpu,host=%s value=%d %d",
				hostNames[j%len(hostNames)],
				100+rand.Intn(50)-25,
				(time.Duration(i+j)*time.Second)+(time.Duration(rand.Intn(500)-250)*time.Millisecond),
			)
			if j != pointN-1 {
				fmt.Fprint(&buf, "\n")
			}
		}

		if err := e.WritePointsString(buf.String()); err != nil {
			panic(err)
		}
	}

	if err := e.WriteSnapshot(); err != nil {
		panic(err)
	}

	// Force garbage collection.
	runtime.GC()

	// Save engine reference for reuse.
	benchmark.Engine = e
	benchmark.PointN = pointN

	return e
}
Пример #8
0
// Ensure engine can create an iterator with auxilary fields.
func TestEngine_CreateIterator_Aux(t *testing.T) {
	t.Parallel()

	e := MustOpenEngine()
	defer e.Close()

	e.Index().CreateMeasurementIndexIfNotExists("cpu")
	e.MeasurementFields("cpu").CreateFieldIfNotExists("value", influxql.Float, false)
	e.MeasurementFields("cpu").CreateFieldIfNotExists("F", influxql.Float, false)
	si := e.Index().CreateSeriesIndexIfNotExists("cpu", tsdb.NewSeries("cpu,host=A", models.NewTags(map[string]string{"host": "A"})))
	si.AssignShard(1)

	if err := e.WritePointsString(
		`cpu,host=A value=1.1 1000000000`,
		`cpu,host=A F=100 1000000000`,
		`cpu,host=A value=1.2 2000000000`,
		`cpu,host=A value=1.3 3000000000`,
		`cpu,host=A F=200 3000000000`,
	); err != nil {
		t.Fatalf("failed to write points: %s", err.Error())
	}

	itr, err := e.CreateIterator(influxql.IteratorOptions{
		Expr:       influxql.MustParseExpr(`value`),
		Aux:        []influxql.VarRef{{Val: "F"}},
		Dimensions: []string{"host"},
		Sources:    []influxql.Source{&influxql.Measurement{Name: "cpu"}},
		StartTime:  influxql.MinTime,
		EndTime:    influxql.MaxTime,
		Ascending:  true,
	})
	if err != nil {
		t.Fatal(err)
	}
	fitr := itr.(influxql.FloatIterator)

	if p, err := fitr.Next(); err != nil {
		t.Fatalf("unexpected error(0): %v", err)
	} else if !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1, Aux: []interface{}{float64(100)}}) {
		t.Fatalf("unexpected point(0): %v", p)
	}
	if p, err := fitr.Next(); err != nil {
		t.Fatalf("unexpected error(1): %v", err)
	} else if !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2, Aux: []interface{}{(*float64)(nil)}}) {
		t.Fatalf("unexpected point(1): %v", p)
	}
	if p, err := fitr.Next(); err != nil {
		t.Fatalf("unexpected error(2): %v", err)
	} else if !deep.Equal(p, &influxql.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3, Aux: []interface{}{float64(200)}}) {
		t.Fatalf("unexpected point(2): %v", p)
	}
	if p, err := fitr.Next(); err != nil {
		t.Fatalf("expected eof, got error: %v", err)
	} else if p != nil {
		t.Fatalf("expected eof: %v", p)
	}
}
Пример #9
0
// AddPoint adds a point to the WritePointRequest with field key 'value'
func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) {
	pt, err := models.NewPoint(
		name, models.NewTags(tags), map[string]interface{}{"value": value}, timestamp,
	)
	if err != nil {
		return
	}
	w.Points = append(w.Points, pt)
}
Пример #10
0
func Test_Service_UDP(t *testing.T) {
	t.Parallel()

	now := time.Now().UTC().Round(time.Second)

	config := Config{}
	config.Database = "graphitedb"
	config.BatchSize = 0 // No batching.
	config.BatchTimeout = toml.Duration(time.Second)
	config.BindAddress = ":10000"
	config.Protocol = "udp"

	service := NewTestService(&config)

	// Allow test to wait until points are written.
	var wg sync.WaitGroup
	wg.Add(1)

	service.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {
		defer wg.Done()

		pt, _ := models.NewPoint(
			"cpu",
			models.NewTags(map[string]string{}),
			map[string]interface{}{"value": 23.456},
			time.Unix(now.Unix(), 0))
		if database != "graphitedb" {
			t.Fatalf("unexpected database: %s", database)
		} else if retentionPolicy != "" {
			t.Fatalf("unexpected retention policy: %s", retentionPolicy)
		} else if points[0].String() != pt.String() {
			t.Fatalf("unexpected points: %#v", points[0].String())
		}
		return nil
	}

	if err := service.Service.Open(); err != nil {
		t.Fatalf("failed to open Graphite service: %s", err.Error())
	}

	// Connect to the graphite endpoint we just spun up
	_, port, _ := net.SplitHostPort(service.Service.Addr().String())
	conn, err := net.Dial("udp", "127.0.0.1:"+port)
	if err != nil {
		t.Fatal(err)
	}
	data := []byte(`cpu 23.456 `)
	data = append(data, []byte(fmt.Sprintf("%d", now.Unix()))...)
	data = append(data, '\n')
	_, err = conn.Write(data)
	if err != nil {
		t.Fatal(err)
	}

	wg.Wait()
	conn.Close()
}
Пример #11
0
// storeStatistics writes the statistics to an InfluxDB system.
func (m *Monitor) storeStatistics() {
	defer m.wg.Done()
	m.Logger.Printf("Storing statistics in database '%s' retention policy '%s', at interval %s",
		m.storeDatabase, m.storeRetentionPolicy, m.storeInterval)

	hostname, _ := os.Hostname()
	m.SetGlobalTag("hostname", hostname)

	// Wait until an even interval to start recording monitor statistics.
	// If we are interrupted before the interval for some reason, exit early.
	if err := m.waitUntilInterval(m.storeInterval); err != nil {
		return
	}

	tick := time.NewTicker(m.storeInterval)
	defer tick.Stop()

	for {
		select {
		case now := <-tick.C:
			now = now.Truncate(m.storeInterval)
			func() {
				m.mu.Lock()
				defer m.mu.Unlock()
				m.createInternalStorage()
			}()

			stats, err := m.Statistics(m.globalTags)
			if err != nil {
				m.Logger.Printf("failed to retrieve registered statistics: %s", err)
				return
			}

			points := make(models.Points, 0, len(stats))
			for _, s := range stats {
				pt, err := models.NewPoint(s.Name, models.NewTags(s.Tags), s.Values, now)
				if err != nil {
					m.Logger.Printf("Dropping point %v: %v", s.Name, err)
					return
				}
				points = append(points, pt)
			}

			func() {
				m.mu.RLock()
				defer m.mu.RUnlock()

				if err := m.PointsWriter.WritePoints(m.storeDatabase, m.storeRetentionPolicy, points); err != nil {
					m.Logger.Printf("failed to store statistics: %s", err)
				}
			}()
		case <-m.done:
			m.Logger.Printf("terminating storage of statistics")
			return
		}
	}
}
Пример #12
0
// MarshalString renders string representation of a Point with specified
// precision. The default precision is nanoseconds.
func (p *Point) MarshalString() string {
	pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time)
	if err != nil {
		return "# ERROR: " + err.Error() + " " + p.Measurement
	}
	if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" {
		return pt.String()
	}
	return pt.PrecisionString(p.Precision)
}
Пример #13
0
// NewGaugeMetric returns a gauge metric.
// Gauge metrics should be used when the metric is can arbitrarily go up and
// down. ie, temperature, memory usage, cpu usage, etc.
func NewGaugeMetric(
	name string,
	tags map[string]string,
	fields map[string]interface{},
	t time.Time,
) (Metric, error) {
	pt, err := models.NewPoint(name, models.NewTags(tags), fields, t)
	if err != nil {
		return nil, err
	}
	return &metric{
		pt:    pt,
		mType: Gauge,
	}, nil
}
Пример #14
0
func TestParseDefaultTags(t *testing.T) {
	p, err := graphite.NewParser([]string{"servers.localhost .host.measurement*"}, models.NewTags(map[string]string{
		"region": "us-east",
		"zone":   "1c",
		"host":   "should not set",
	}))
	if err != nil {
		t.Fatalf("unexpected error creating parser, got %v", err)
	}

	exp := models.MustNewPoint("cpu_load",
		models.NewTags(map[string]string{"host": "localhost", "region": "us-east", "zone": "1c"}),
		models.Fields{"value": float64(11)},
		time.Unix(1435077219, 0))

	pt, err := p.Parse("servers.localhost.cpu_load 11 1435077219")
	if err != nil {
		t.Fatalf("parse error: %v", err)
	}

	if exp.String() != pt.String() {
		t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
	}
}
Пример #15
0
// Unmarshal translates a collectd packet into InfluxDB data points.
func (s *Service) UnmarshalCollectd(packet *gollectd.Packet) []models.Point {
	// Prefer high resolution timestamp.
	var timestamp time.Time
	if packet.TimeHR > 0 {
		// TimeHR is "near" nanosecond measurement, but not exactly nanasecond time
		// Since we store time in microseconds, we round here (mostly so tests will work easier)
		sec := packet.TimeHR >> 30
		// Shifting, masking, and dividing by 1 billion to get nanoseconds.
		nsec := ((packet.TimeHR & 0x3FFFFFFF) << 30) / 1000 / 1000 / 1000
		timestamp = time.Unix(int64(sec), int64(nsec)).UTC().Round(time.Microsecond)
	} else {
		// If we don't have high resolution time, fall back to basic unix time
		timestamp = time.Unix(int64(packet.Time), 0).UTC()
	}

	var points []models.Point
	for i := range packet.Values {
		name := fmt.Sprintf("%s_%s", packet.Plugin, packet.Values[i].Name)
		tags := make(map[string]string)
		fields := make(map[string]interface{})

		fields["value"] = packet.Values[i].Value

		if packet.Hostname != "" {
			tags["host"] = packet.Hostname
		}
		if packet.PluginInstance != "" {
			tags["instance"] = packet.PluginInstance
		}
		if packet.Type != "" {
			tags["type"] = packet.Type
		}
		if packet.TypeInstance != "" {
			tags["type_instance"] = packet.TypeInstance
		}

		// Drop invalid points
		p, err := models.NewPoint(name, models.NewTags(tags), fields, timestamp)
		if err != nil {
			s.Logger.Printf("Dropping point %v: %v", name, err)
			atomic.AddInt64(&s.stats.InvalidDroppedPoints, 1)
			continue
		}

		points = append(points, p)
	}
	return points
}
Пример #16
0
// Ensure a point can be written via the HTTP protocol.
func TestService_HTTP(t *testing.T) {
	t.Parallel()

	s := NewTestService("db0", "127.0.0.1:0")
	if err := s.Service.Open(); err != nil {
		t.Fatal(err)
	}
	defer s.Service.Close()

	// Mock points writer.
	var called bool
	s.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {
		called = true
		if database != "db0" {
			t.Fatalf("unexpected database: %s", database)
		} else if retentionPolicy != "" {
			t.Fatalf("unexpected retention policy: %s", retentionPolicy)
		} else if !reflect.DeepEqual(points, []models.Point{
			models.MustNewPoint(
				"sys.cpu.nice",
				models.NewTags(map[string]string{"dc": "lga", "host": "web01"}),
				map[string]interface{}{"value": 18.0},
				time.Unix(1346846400, 0),
			),
		}) {
			spew.Dump(points)
			t.Fatalf("unexpected points: %#v", points)
		}
		return nil
	}

	// Write HTTP request to server.
	resp, err := http.Post("http://"+s.Service.Addr().String()+"/api/put", "application/json", strings.NewReader(`{"metric":"sys.cpu.nice", "timestamp":1346846400, "value":18, "tags":{"host":"web01", "dc":"lga"}}`))
	if err != nil {
		t.Fatal(err)
	}
	defer resp.Body.Close()

	// Verify status and body.
	if resp.StatusCode != http.StatusNoContent {
		t.Fatalf("unexpected status code: %d", resp.StatusCode)
	}

	// Verify that the writer was called.
	if !called {
		t.Fatal("points writer not called")
	}
}
Пример #17
0
// Unmarshal translates a ValueList into InfluxDB data points.
func (s *Service) UnmarshalValueList(vl *api.ValueList) []models.Point {
	timestamp := vl.Time.UTC()

	var points []models.Point
	for i := range vl.Values {
		var name string
		name = fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i))
		tags := make(map[string]string)
		fields := make(map[string]interface{})

		// Convert interface back to actual type, then to float64
		switch value := vl.Values[i].(type) {
		case api.Gauge:
			fields["value"] = float64(value)
		case api.Derive:
			fields["value"] = float64(value)
		case api.Counter:
			fields["value"] = float64(value)
		}

		if vl.Identifier.Host != "" {
			tags["host"] = vl.Identifier.Host
		}
		if vl.Identifier.PluginInstance != "" {
			tags["instance"] = vl.Identifier.PluginInstance
		}
		if vl.Identifier.Type != "" {
			tags["type"] = vl.Identifier.Type
		}
		if vl.Identifier.TypeInstance != "" {
			tags["type_instance"] = vl.Identifier.TypeInstance
		}

		// Drop invalid points
		p, err := models.NewPoint(name, models.NewTags(tags), fields, timestamp)
		if err != nil {
			s.Logger.Info(fmt.Sprintf("Dropping point %v: %v", name, err))
			atomic.AddInt64(&s.stats.InvalidDroppedPoints, 1)
			continue
		}

		points = append(points, p)
	}
	return points
}
Пример #18
0
func TestShard_Disabled_WriteQuery(t *testing.T) {
	sh := NewShard()
	if err := sh.Open(); err != nil {
		t.Fatal(err)
	}
	defer sh.Close()

	sh.SetEnabled(false)

	pt := models.MustNewPoint(
		"cpu",
		models.NewTags(map[string]string{"host": "server"}),
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	err := sh.WritePoints([]models.Point{pt})
	if err == nil {
		t.Fatalf("expected shard disabled error")
	}
	if err != tsdb.ErrShardDisabled {
		t.Fatalf(err.Error())
	}

	_, got := sh.CreateIterator(influxql.IteratorOptions{})
	if err == nil {
		t.Fatalf("expected shard disabled error")
	}
	if exp := tsdb.ErrShardDisabled; got != exp {
		t.Fatalf("got %v, expected %v", got, exp)
	}

	sh.SetEnabled(true)

	err = sh.WritePoints([]models.Point{pt})
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if _, err = sh.CreateIterator(influxql.IteratorOptions{}); err != nil {
		t.Fatalf("unexpected error: %v", got)
	}
}
Пример #19
0
// NewPoint returns a point with the given timestamp. If a timestamp is not
// given, then data is sent to the database without a timestamp, in which case
// the server will assign local time upon reception. NOTE: it is recommended to
// send data with a timestamp.
func NewPoint(
	name string,
	tags map[string]string,
	fields map[string]interface{},
	t ...time.Time,
) (*Point, error) {
	var T time.Time
	if len(t) > 0 {
		T = t[0]
	}

	pt, err := models.NewPoint(name, models.NewTags(tags), fields, T)
	if err != nil {
		return nil, err
	}
	return &Point{
		pt: pt,
	}, nil
}
Пример #20
0
func TestParseNoMatch(t *testing.T) {
	p, err := graphite.NewParser([]string{"servers.*.cpu .host.measurement.cpu.measurement"}, nil)
	if err != nil {
		t.Fatalf("unexpected error creating parser, got %v", err)
	}

	exp := models.MustNewPoint("servers.localhost.memory.VmallocChunk",
		models.NewTags(map[string]string{}),
		models.Fields{"value": float64(11)},
		time.Unix(1435077219, 0))

	pt, err := p.Parse("servers.localhost.memory.VmallocChunk 11 1435077219")
	if err != nil {
		t.Fatalf("parse error: %v", err)
	}

	if exp.String() != pt.String() {
		t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
	}
}
Пример #21
0
func TestWriteTimeField(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex("db")
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)
	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}
	defer sh.Close()

	pt := models.MustNewPoint(
		"cpu",
		models.NewTags(map[string]string{"time": "now"}),
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	buf := bytes.NewBuffer(nil)
	sh.SetLogOutput(buf)
	if err := sh.WritePoints([]models.Point{pt}); err != nil {
		t.Fatalf("unexpected error: %v", err)
	} else if got, exp := buf.String(), "dropping tag 'time'"; !strings.Contains(got, exp) {
		t.Fatalf("unexpected log message: %s", strings.TrimSpace(got))
	}

	key := models.MakeKey([]byte("cpu"), nil)
	series := index.Series(string(key))
	if series == nil {
		t.Fatal("expected series")
	} else if len(series.Tags) != 0 {
		t.Fatalf("unexpected number of tags: got=%v exp=%v", len(series.Tags), 0)
	}
}
Пример #22
0
// Ensures that when a shard is closed, it removes any series meta-data
// from the index.
func TestShard_Close_RemoveIndex(t *testing.T) {
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex("db")
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)

	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}

	pt := models.MustNewPoint(
		"cpu",
		models.NewTags(map[string]string{"host": "server"}),
		map[string]interface{}{"value": 1.0},
		time.Unix(1, 2),
	)

	err := sh.WritePoints([]models.Point{pt})
	if err != nil {
		t.Fatalf(err.Error())
	}

	if got, exp := index.SeriesN(), 1; got != exp {
		t.Fatalf("series count mismatch: got %v, exp %v", got, exp)
	}

	// ensure the index gets loaded after closing and opening the shard
	sh.Close()

	if got, exp := index.SeriesN(), 0; got != exp {
		t.Fatalf("series count mismatch: got %v, exp %v", got, exp)
	}
}
Пример #23
0
func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) {
	p, err := graphite.NewParserWithOptions(graphite.Options{
		Templates: []string{"servers.localhost .host.measurement.measurement*"},
		Separator: "_",
	})
	if err != nil {
		t.Fatalf("unexpected error creating parser, got %v", err)
	}

	exp := models.MustNewPoint("cpu_cpu_load_10",
		models.NewTags(map[string]string{"host": "localhost"}),
		models.Fields{"value": float64(11)},
		time.Unix(1435077219, 0))

	pt, err := p.Parse("servers.localhost.cpu.cpu_load.10 11 1435077219")
	if err != nil {
		t.Fatalf("parse error: %v", err)
	}

	if exp.String() != pt.String() {
		t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
	}
}
Пример #24
0
// convertRowToPoints will convert a query result Row into Points that can be written back in.
func convertRowToPoints(measurementName string, row *models.Row) ([]models.Point, error) {
	// figure out which parts of the result are the time and which are the fields
	timeIndex := -1
	fieldIndexes := make(map[string]int)
	for i, c := range row.Columns {
		if c == "time" {
			timeIndex = i
		} else {
			fieldIndexes[c] = i
		}
	}

	if timeIndex == -1 {
		return nil, errors.New("error finding time index in result")
	}

	points := make([]models.Point, 0, len(row.Values))
	for _, v := range row.Values {
		vals := make(map[string]interface{})
		for fieldName, fieldIndex := range fieldIndexes {
			val := v[fieldIndex]
			if val != nil {
				vals[fieldName] = v[fieldIndex]
			}
		}

		p, err := models.NewPoint(measurementName, models.NewTags(row.Tags), vals, v[timeIndex].(time.Time))
		if err != nil {
			// Drop points that can't be stored
			continue
		}

		points = append(points, p)
	}

	return points, nil
}
Пример #25
0
// Tests concurrently writing to the same shard with different field types which
// can trigger a panic when the shard is snapshotted to TSM files.
func TestShard_WritePoints_FieldConflictConcurrent(t *testing.T) {
	if testing.Short() {
		t.Skip()
	}
	tmpDir, _ := ioutil.TempDir("", "shard_test")
	defer os.RemoveAll(tmpDir)
	tmpShard := path.Join(tmpDir, "shard")
	tmpWal := path.Join(tmpDir, "wal")

	index := tsdb.NewDatabaseIndex("db")
	opts := tsdb.NewEngineOptions()
	opts.Config.WALDir = filepath.Join(tmpDir, "wal")

	sh := tsdb.NewShard(1, index, tmpShard, tmpWal, opts)
	if err := sh.Open(); err != nil {
		t.Fatalf("error opening shard: %s", err.Error())
	}
	defer sh.Close()

	points := make([]models.Point, 0, 1000)
	for i := 0; i < cap(points); i++ {
		if i < 500 {
			points = append(points, models.MustNewPoint(
				"cpu",
				models.NewTags(map[string]string{"host": "server"}),
				map[string]interface{}{"value": 1.0},
				time.Unix(int64(i), 0),
			))
		} else {
			points = append(points, models.MustNewPoint(
				"cpu",
				models.NewTags(map[string]string{"host": "server"}),
				map[string]interface{}{"value": int64(1)},
				time.Unix(int64(i), 0),
			))
		}
	}

	var wg sync.WaitGroup
	wg.Add(2)
	go func() {
		defer wg.Done()
		for i := 0; i < 50; i++ {
			if err := sh.DeleteMeasurement("cpu", []string{"cpu,host=server"}); err != nil {
				t.Fatalf(err.Error())
			}

			_ = sh.WritePoints(points[:500])
			if f, err := sh.CreateSnapshot(); err == nil {
				os.RemoveAll(f)
			}

		}
	}()

	go func() {
		defer wg.Done()
		for i := 0; i < 50; i++ {
			if err := sh.DeleteMeasurement("cpu", []string{"cpu,host=server"}); err != nil {
				t.Fatalf(err.Error())
			}

			_ = sh.WritePoints(points[500:])
			if f, err := sh.CreateSnapshot(); err == nil {
				os.RemoveAll(f)
			}
		}
	}()

	wg.Wait()
}
Пример #26
0
func genTestSeries(mCnt, tCnt, vCnt int) []*TestSeries {
	measurements := genStrList("measurement", mCnt)
	tagSets := NewTagSetGenerator(tCnt, vCnt).AllSets()
	series := []*TestSeries{}
	for _, m := range measurements {
		for _, ts := range tagSets {
			series = append(series, &TestSeries{
				Measurement: m,
				Series:      tsdb.NewSeries(fmt.Sprintf("%s:%s", m, string(tsdb.MarshalTags(ts))), models.NewTags(ts)),
			})
		}
	}
	return series
}
Пример #27
0
// ServeHTTP implements OpenTSDB's HTTP /api/put endpoint
func (h *Handler) servePut(w http.ResponseWriter, r *http.Request) {
	defer r.Body.Close()

	// Require POST method.
	if r.Method != "POST" {
		http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
		return
	}

	// Wrap reader if it's gzip encoded.
	var br *bufio.Reader
	if r.Header.Get("Content-Encoding") == "gzip" {
		zr, err := gzip.NewReader(r.Body)
		if err != nil {
			http.Error(w, "could not read gzip, "+err.Error(), http.StatusBadRequest)
			return
		}

		br = bufio.NewReader(zr)
	} else {
		br = bufio.NewReader(r.Body)
	}

	// Lookahead at the first byte.
	f, err := br.Peek(1)
	if err != nil || len(f) != 1 {
		http.Error(w, "peek error: "+err.Error(), http.StatusBadRequest)
		return
	}

	// Peek to see if this is a JSON array.
	var multi bool
	switch f[0] {
	case '{':
	case '[':
		multi = true
	default:
		http.Error(w, "expected JSON array or hash", http.StatusBadRequest)
		return
	}

	// Decode JSON data into slice of points.
	dps := make([]point, 1)
	if dec := json.NewDecoder(br); multi {
		if err = dec.Decode(&dps); err != nil {
			http.Error(w, "json array decode error", http.StatusBadRequest)
			return
		}
	} else {
		if err = dec.Decode(&dps[0]); err != nil {
			http.Error(w, "json object decode error", http.StatusBadRequest)
			return
		}
	}

	// Convert points into TSDB points.
	points := make([]models.Point, 0, len(dps))
	for i := range dps {
		p := dps[i]

		// Convert timestamp to Go time.
		// If time value is over a billion then it's microseconds.
		var ts time.Time
		if p.Time < 10000000000 {
			ts = time.Unix(p.Time, 0)
		} else {
			ts = time.Unix(p.Time/1000, (p.Time%1000)*1000)
		}

		pt, err := models.NewPoint(p.Metric, models.NewTags(p.Tags), map[string]interface{}{"value": p.Value}, ts)
		if err != nil {
			h.Logger.Printf("Dropping point %v: %v", p.Metric, err)
			if h.stats != nil {
				atomic.AddInt64(&h.stats.InvalidDroppedPoints, 1)
			}
			continue
		}
		points = append(points, pt)
	}

	// Write points.
	if err := h.PointsWriter.WritePoints(h.Database, h.RetentionPolicy, models.ConsistencyLevelAny, points); influxdb.IsClientError(err) {
		h.Logger.Println("write series error: ", err)
		http.Error(w, "write series error: "+err.Error(), http.StatusBadRequest)
		return
	} else if err != nil {
		h.Logger.Println("write series error: ", err)
		http.Error(w, "write series error: "+err.Error(), http.StatusInternalServerError)
		return
	}

	w.WriteHeader(http.StatusNoContent)
}
Пример #28
0
// Parse performs Graphite parsing of a single line.
func (p *Parser) Parse(line string) (models.Point, error) {
	// Break into 3 fields (name, value, timestamp).
	fields := strings.Fields(line)
	if len(fields) != 2 && len(fields) != 3 {
		return nil, fmt.Errorf("received %q which doesn't have required fields", line)
	}

	// decode the name and tags
	template := p.matcher.Match(fields[0])
	measurement, tags, field, err := template.Apply(fields[0])
	if err != nil {
		return nil, err
	}

	// Could not extract measurement, use the raw value
	if measurement == "" {
		measurement = fields[0]
	}

	// Parse value.
	v, err := strconv.ParseFloat(fields[1], 64)
	if err != nil {
		return nil, fmt.Errorf(`field "%s" value: %s`, fields[0], err)
	}

	if math.IsNaN(v) || math.IsInf(v, 0) {
		return nil, &UnsupportedValueError{Field: fields[0], Value: v}
	}

	fieldValues := map[string]interface{}{}
	if field != "" {
		fieldValues[field] = v
	} else {
		fieldValues["value"] = v
	}

	// If no 3rd field, use now as timestamp
	timestamp := time.Now().UTC()

	if len(fields) == 3 {
		// Parse timestamp.
		unixTime, err := strconv.ParseFloat(fields[2], 64)
		if err != nil {
			return nil, fmt.Errorf(`field "%s" time: %s`, fields[0], err)
		}

		// -1 is a special value that gets converted to current UTC time
		// See https://github.com/graphite-project/carbon/issues/54
		if unixTime != float64(-1) {
			// Check if we have fractional seconds
			timestamp = time.Unix(int64(unixTime), int64((unixTime-math.Floor(unixTime))*float64(time.Second)))
			if timestamp.Before(MinDate) || timestamp.After(MaxDate) {
				return nil, fmt.Errorf("timestamp out of range")
			}
		}
	}

	// Set the default tags on the point if they are not already set
	for _, t := range p.tags {
		if _, ok := tags[string(t.Key)]; !ok {
			tags[string(t.Key)] = string(t.Value)
		}
	}
	return models.NewPoint(measurement, models.NewTags(tags), fieldValues, timestamp)
}
Пример #29
0
// Statistics returns statistics for periodic monitoring.
func (s *Service) Statistics(tags map[string]string) []models.Statistic {
	return s.Handler.Statistics(models.NewTags(map[string]string{"bind": s.addr}).Merge(tags).Map())
}
Пример #30
0
// Ensure a point can be written via the telnet protocol.
func TestService_Telnet(t *testing.T) {
	t.Parallel()

	s := NewTestService("db0", "127.0.0.1:0")
	if err := s.Service.Open(); err != nil {
		t.Fatal(err)
	}
	defer s.Service.Close()

	// Mock points writer.
	var called int32
	s.WritePointsFn = func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {
		atomic.StoreInt32(&called, 1)

		if database != "db0" {
			t.Fatalf("unexpected database: %s", database)
		} else if retentionPolicy != "" {
			t.Fatalf("unexpected retention policy: %s", retentionPolicy)
		} else if !reflect.DeepEqual(points, []models.Point{
			models.MustNewPoint(
				"sys.cpu.user",
				models.NewTags(map[string]string{"host": "webserver01", "cpu": "0"}),
				map[string]interface{}{"value": 42.5},
				time.Unix(1356998400, 0),
			),
		}) {
			t.Fatalf("unexpected points: %#v", points)
		}
		return nil
	}

	// Open connection to the service.
	conn, err := net.Dial("tcp", s.Service.Addr().String())
	if err != nil {
		t.Fatal(err)
	}
	defer conn.Close()

	// Write telnet data and close.
	if _, err := conn.Write([]byte("put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0")); err != nil {
		t.Fatal(err)
	}
	if err := conn.Close(); err != nil {
		t.Fatal(err)
	}

	tick := time.Tick(10 * time.Millisecond)
	timeout := time.After(10 * time.Second)

	for {
		select {
		case <-tick:
			// Verify that the writer was called.
			if atomic.LoadInt32(&called) > 0 {
				return
			}
		case <-timeout:
			t.Fatal("points writer not called")
		}
	}
}