// Ensure the shard writer returns an error when the server fails to accept the write.
func TestShardWriter_WriteShard_Error(t *testing.T) {
	ts := newTestService(writeShardFail)
	s := cluster.NewService(cluster.Config{})
	s.Listener = ts.muxln
	s.TSDBStore = ts
	if err := s.Open(); err != nil {
		t.Fatal(err)
	}
	defer s.Close()
	defer ts.Close()

	w := cluster.NewShardWriter(time.Minute)
	w.MetaStore = &metaStore{host: ts.ln.Addr().String()}
	now := time.Now()

	shardID := uint64(1)
	ownerID := uint64(2)
	var points []tsdb.Point
	points = append(points, tsdb.NewPoint(
		"cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now,
	))

	if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "error code 1: write shard 1: failed to write" {
		t.Fatalf("unexpected error: %v", err)
	}
}
Example #2
0
func TestFilterMatchMultipleWildcards(t *testing.T) {
	p, err := graphite.NewParser([]string{
		"*.* .wrong.measurement*",
		"servers.* .host.measurement*", // should match this
		"servers.localhost .wrong.measurement*",
		"*.localhost .wrong.measurement*",
	}, nil)

	if err != nil {
		t.Fatalf("unexpected error creating parser, got %v", err)
	}

	exp := tsdb.NewPoint("cpu_load",
		tsdb.Tags{"host": "server01"},
		tsdb.Fields{"value": float64(11)},
		time.Unix(1435077219, 0))

	pt, err := p.Parse("servers.server01.cpu_load 11 1435077219")
	if err != nil {
		t.Fatalf("parse error: %v", err)
	}

	if exp.String() != pt.String() {
		t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
	}
}
Example #3
0
// convertRowToPoints will convert a query result Row into Points that can be written back in.
// Used for continuous and INTO queries
func (s *Service) convertRowToPoints(measurementName string, row *influxql.Row) ([]tsdb.Point, error) {
	// figure out which parts of the result are the time and which are the fields
	timeIndex := -1
	fieldIndexes := make(map[string]int)
	for i, c := range row.Columns {
		if c == "time" {
			timeIndex = i
		} else {
			fieldIndexes[c] = i
		}
	}

	if timeIndex == -1 {
		return nil, errors.New("error finding time index in result")
	}

	points := make([]tsdb.Point, 0, len(row.Values))
	for _, v := range row.Values {
		vals := make(map[string]interface{})
		for fieldName, fieldIndex := range fieldIndexes {
			vals[fieldName] = v[fieldIndex]
		}

		p := tsdb.NewPoint(measurementName, row.Tags, vals, v[timeIndex].(time.Time))

		points = append(points, p)
	}

	return points, nil
}
// Ensure the shard writer returns an error when dialing times out.
func TestShardWriter_Write_ErrDialTimeout(t *testing.T) {
	ts := newTestService(writeShardSuccess)
	s := cluster.NewService(cluster.Config{})
	s.Listener = ts.muxln
	s.TSDBStore = ts
	if err := s.Open(); err != nil {
		t.Fatal(err)
	}
	defer s.Close()
	defer ts.Close()

	w := cluster.NewShardWriter(time.Nanosecond)
	w.MetaStore = &metaStore{host: ts.ln.Addr().String()}
	now := time.Now()

	shardID := uint64(1)
	ownerID := uint64(2)
	var points []tsdb.Point
	points = append(points, tsdb.NewPoint(
		"cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now,
	))

	if err, exp := w.WriteShard(shardID, ownerID, points), "i/o timeout"; err == nil || !strings.Contains(err.Error(), exp) {
		t.Fatalf("expected error %v, to contain %s", err, exp)
	}
}
Example #5
0
func (w *WriteShardRequest) unmarshalPoints() []tsdb.Point {
	points := make([]tsdb.Point, len(w.pb.GetPoints()))
	for i, p := range w.pb.GetPoints() {
		pt := tsdb.NewPoint(
			p.GetName(), map[string]string{},
			map[string]interface{}{}, time.Unix(0, p.GetTime()))

		for _, f := range p.GetFields() {
			n := f.GetName()
			if f.Int32 != nil {
				pt.AddField(n, f.GetInt32())
			} else if f.Int64 != nil {
				pt.AddField(n, f.GetInt64())
			} else if f.Float64 != nil {
				pt.AddField(n, f.GetFloat64())
			} else if f.Bool != nil {
				pt.AddField(n, f.GetBool())
			} else if f.String_ != nil {
				pt.AddField(n, f.GetString_())
			} else {
				pt.AddField(n, f.GetBytes())
			}
		}

		tags := tsdb.Tags{}
		for _, t := range p.GetTags() {
			tags[t.GetKey()] = t.GetValue()
		}
		pt.SetTags(tags)
		points[i] = pt
	}
	return points
}
Example #6
0
// Ensure a point can be written via the telnet protocol.
func TestService_Telnet(t *testing.T) {
	t.Parallel()

	s := NewService("db0")
	if err := s.Open(); err != nil {
		t.Fatal(err)
	}
	defer s.Close()

	// Mock points writer.
	var called int32
	s.PointsWriter.WritePointsFn = func(req *cluster.WritePointsRequest) error {
		atomic.StoreInt32(&called, 1)

		if req.Database != "db0" {
			t.Fatalf("unexpected database: %s", req.Database)
		} else if req.RetentionPolicy != "" {
			t.Fatalf("unexpected retention policy: %s", req.RetentionPolicy)
		} else if !reflect.DeepEqual(req.Points, []tsdb.Point{
			tsdb.NewPoint(
				"sys.cpu.user",
				map[string]string{"host": "webserver01", "cpu": "0"},
				map[string]interface{}{"value": 42.5},
				time.Unix(1356998400, 0),
			),
		}) {
			spew.Dump(req.Points)
			t.Fatalf("unexpected points: %#v", req.Points)
		}
		return nil
	}

	// Open connection to the service.
	conn, err := net.Dial("tcp", s.Addr().String())
	if err != nil {
		t.Fatal(err)
	}
	defer conn.Close()

	// Write telnet data and close.
	if _, err := conn.Write([]byte("put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0")); err != nil {
		t.Fatal(err)
	}
	if err := conn.Close(); err != nil {
		t.Fatal(err)
	}
	time.Sleep(10 * time.Millisecond)

	// Verify that the writer was called.
	if atomic.LoadInt32(&called) == 0 {
		t.Fatal("points writer not called")
	}
}
Example #7
0
// Parse performs Graphite parsing of a single line.
func (p *Parser) Parse(line string) (tsdb.Point, error) {
	// Break into 3 fields (name, value, timestamp).
	fields := strings.Fields(line)
	if len(fields) != 2 && len(fields) != 3 {
		return nil, fmt.Errorf("received %q which doesn't have required fields", line)
	}

	// decode the name and tags
	matcher := p.matcher.Match(fields[0])
	measurement, tags := matcher.Apply(fields[0])

	// Could not extract measurement, use the raw value
	if measurement == "" {
		measurement = fields[0]
	}

	// Parse value.
	v, err := strconv.ParseFloat(fields[1], 64)
	if err != nil {
		return nil, fmt.Errorf(`field "%s" value: %s`, fields[0], err)
	}

	fieldValues := map[string]interface{}{"value": v}

	// If no 3rd field, use now as timestamp
	timestamp := time.Now().UTC()

	if len(fields) == 3 {
		// Parse timestamp.
		unixTime, err := strconv.ParseFloat(fields[2], 64)
		if err != nil {
			return nil, fmt.Errorf(`field "%s" time: %s`, fields[0], err)
		}

		// -1 is a special value that gets converted to current UTC time
		// See https://github.com/graphite-project/carbon/issues/54
		if unixTime != float64(-1) {
			// Check if we have fractional seconds
			timestamp = time.Unix(int64(unixTime), int64((unixTime-math.Floor(unixTime))*float64(time.Second)))
		}
	}

	// Set the default tags on the point if they are not already set
	for k, v := range p.tags {
		if _, ok := tags[k]; !ok {
			tags[k] = v
		}
	}
	point := tsdb.NewPoint(measurement, tags, fieldValues, timestamp)

	return point, nil
}
Example #8
0
// Ensure a point can be written via the HTTP protocol.
func TestService_HTTP(t *testing.T) {
	t.Parallel()

	s := NewService("db0")
	if err := s.Open(); err != nil {
		t.Fatal(err)
	}
	defer s.Close()

	// Mock points writer.
	var called bool
	s.PointsWriter.WritePointsFn = func(req *cluster.WritePointsRequest) error {
		called = true
		if req.Database != "db0" {
			t.Fatalf("unexpected database: %s", req.Database)
		} else if req.RetentionPolicy != "" {
			t.Fatalf("unexpected retention policy: %s", req.RetentionPolicy)
		} else if !reflect.DeepEqual(req.Points, []tsdb.Point{
			tsdb.NewPoint(
				"sys.cpu.nice",
				map[string]string{"dc": "lga", "host": "web01"},
				map[string]interface{}{"value": 18.0},
				time.Unix(1346846400, 0),
			),
		}) {
			spew.Dump(req.Points)
			t.Fatalf("unexpected points: %#v", req.Points)
		}
		return nil
	}

	// Write HTTP request to server.
	resp, err := http.Post("http://"+s.Addr().String()+"/api/put", "application/json", strings.NewReader(`{"metric":"sys.cpu.nice", "timestamp":1346846400, "value":18, "tags":{"host":"web01", "dc":"lga"}}`))
	if err != nil {
		t.Fatal(err)
	}
	defer resp.Body.Close()

	// Verify status and body.
	if resp.StatusCode != http.StatusNoContent {
		t.Fatalf("unexpected status code: %d", resp.StatusCode)
	}

	// Verify that the writer was called.
	if !called {
		t.Fatal("points writer not called")
	}
}
// Ensure the shard writer can successful write a multiple requests.
func TestShardWriter_WriteShard_Multiple(t *testing.T) {
	ts := newTestService(writeShardSuccess)
	s := cluster.NewService(cluster.Config{})
	s.Listener = ts.muxln
	s.TSDBStore = ts
	if err := s.Open(); err != nil {
		t.Fatal(err)
	}
	defer s.Close()
	defer ts.Close()

	w := cluster.NewShardWriter(time.Minute)
	w.MetaStore = &metaStore{host: ts.ln.Addr().String()}

	// Build a single point.
	now := time.Now()
	var points []tsdb.Point
	points = append(points, tsdb.NewPoint("cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now))

	// Write to shard twice and close.
	if err := w.WriteShard(1, 2, points); err != nil {
		t.Fatal(err)
	} else if err := w.WriteShard(1, 2, points); err != nil {
		t.Fatal(err)
	} else if err := w.Close(); err != nil {
		t.Fatal(err)
	}

	// Validate response.
	responses, err := ts.ResponseN(1)
	if err != nil {
		t.Fatal(err)
	} else if responses[0].shardID != 1 {
		t.Fatalf("unexpected shard id: %d", responses[0].shardID)
	}

	// Validate point.
	if p := responses[0].points[0]; p.Name() != "cpu" {
		t.Fatalf("unexpected name: %s", p.Name())
	} else if p.Fields()["value"] != int64(100) {
		t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"])
	} else if p.Tags()["host"] != "server01" {
		t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"])
	} else if p.Time().UnixNano() != now.UnixNano() {
		t.Fatalf("unexpected time: %s", p.Time())
	}
}
Example #10
0
func TestFilterMatchSingle(t *testing.T) {
	p, err := graphite.NewParser([]string{"servers.localhost .host.measurement*"}, nil)
	if err != nil {
		t.Fatalf("unexpected error creating parser, got %v", err)
	}

	exp := tsdb.NewPoint("cpu_load",
		tsdb.Tags{"host": "localhost"},
		tsdb.Fields{"value": float64(11)},
		time.Unix(1435077219, 0))

	pt, err := p.Parse("servers.localhost.cpu_load 11 1435077219")
	if err != nil {
		t.Fatalf("parse error: %v", err)
	}

	if exp.String() != pt.String() {
		t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
	}
}
Example #11
0
func TestParseNoMatch(t *testing.T) {
	p, err := graphite.NewParser([]string{"servers.*.cpu .host.measurement.cpu.measurement"}, nil)
	if err != nil {
		t.Fatalf("unexpected error creating parser, got %v", err)
	}

	exp := tsdb.NewPoint("servers.localhost.memory.VmallocChunk",
		tsdb.Tags{},
		tsdb.Fields{"value": float64(11)},
		time.Unix(1435077219, 0))

	pt, err := p.Parse("servers.localhost.memory.VmallocChunk 11 1435077219")
	if err != nil {
		t.Fatalf("parse error: %v", err)
	}

	if exp.String() != pt.String() {
		t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
	}
}
Example #12
0
// Unmarshal translates a collectd packet into InfluxDB data points.
func Unmarshal(packet *gollectd.Packet) []tsdb.Point {
	// Prefer high resolution timestamp.
	var timestamp time.Time
	if packet.TimeHR > 0 {
		// TimeHR is "near" nanosecond measurement, but not exactly nanasecond time
		// Since we store time in microseconds, we round here (mostly so tests will work easier)
		sec := packet.TimeHR >> 30
		// Shifting, masking, and dividing by 1 billion to get nanoseconds.
		nsec := ((packet.TimeHR & 0x3FFFFFFF) << 30) / 1000 / 1000 / 1000
		timestamp = time.Unix(int64(sec), int64(nsec)).UTC().Round(time.Microsecond)
	} else {
		// If we don't have high resolution time, fall back to basic unix time
		timestamp = time.Unix(int64(packet.Time), 0).UTC()
	}

	var points []tsdb.Point
	for i := range packet.Values {
		name := fmt.Sprintf("%s_%s", packet.Plugin, packet.Values[i].Name)
		tags := make(map[string]string)
		fields := make(map[string]interface{})

		fields["value"] = packet.Values[i].Value

		if packet.Hostname != "" {
			tags["host"] = packet.Hostname
		}
		if packet.PluginInstance != "" {
			tags["instance"] = packet.PluginInstance
		}
		if packet.Type != "" {
			tags["type"] = packet.Type
		}
		if packet.TypeInstance != "" {
			tags["type_instance"] = packet.TypeInstance
		}
		p := tsdb.NewPoint(name, tags, fields, timestamp)

		points = append(points, p)
	}
	return points
}
// Ensure the shard writer returns an error when reading times out.
func TestShardWriter_Write_ErrReadTimeout(t *testing.T) {
	ln, err := net.Listen("tcp", "127.0.0.1:0")
	if err != nil {
		t.Fatal(err)
	}

	w := cluster.NewShardWriter(time.Millisecond)
	w.MetaStore = &metaStore{host: ln.Addr().String()}
	now := time.Now()

	shardID := uint64(1)
	ownerID := uint64(2)
	var points []tsdb.Point
	points = append(points, tsdb.NewPoint(
		"cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now,
	))

	if err := w.WriteShard(shardID, ownerID, points); err == nil || !strings.Contains(err.Error(), "i/o timeout") {
		t.Fatalf("unexpected error: %s", err)
	}
}
Example #14
0
// NormalizeBatchPoints returns a slice of Points, created by populating individual
// points within the batch, which do not have times or tags, with the top-level
// values.
func NormalizeBatchPoints(bp client.BatchPoints) ([]tsdb.Point, error) {
	points := []tsdb.Point{}
	for _, p := range bp.Points {
		if p.Time.IsZero() {
			if bp.Time.IsZero() {
				p.Time = time.Now()
			} else {
				p.Time = bp.Time
			}
		}
		if p.Precision == "" && bp.Precision != "" {
			p.Precision = bp.Precision
		}
		p.Time = client.SetPrecision(p.Time, p.Precision)
		if len(bp.Tags) > 0 {
			if p.Tags == nil {
				p.Tags = make(map[string]string)
			}
			for k := range bp.Tags {
				if p.Tags[k] == "" {
					p.Tags[k] = bp.Tags[k]
				}
			}
		}

		if p.Measurement == "" {
			return points, fmt.Errorf("missing measurement")
		}

		if len(p.Fields) == 0 {
			return points, fmt.Errorf("missing fields")
		}
		// Need to convert from a client.Point to a influxdb.Point
		points = append(points, tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time))
	}

	return points, nil
}
Example #15
0
func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) {
	p, err := graphite.NewParserWithOptions(graphite.Options{
		Templates: []string{"servers.localhost .host.measurement.measurement*"},
		Separator: "_",
	})
	if err != nil {
		t.Fatalf("unexpected error creating parser, got %v", err)
	}

	exp := tsdb.NewPoint("cpu_cpu_load_10",
		tsdb.Tags{"host": "localhost"},
		tsdb.Fields{"value": float64(11)},
		time.Unix(1435077219, 0))

	pt, err := p.Parse("servers.localhost.cpu.cpu_load.10 11 1435077219")
	if err != nil {
		t.Fatalf("parse error: %v", err)
	}

	if exp.String() != pt.String() {
		t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
	}
}
Example #16
0
func TestParseTemplateWhitespace(t *testing.T) {
	p, err := graphite.NewParser([]string{"servers.localhost        .host.measurement*           zone=1c"}, tsdb.Tags{
		"region": "us-east",
		"host":   "should not set",
	})
	if err != nil {
		t.Fatalf("unexpected error creating parser, got %v", err)
	}

	exp := tsdb.NewPoint("cpu_load",
		tsdb.Tags{"host": "localhost", "region": "us-east", "zone": "1c"},
		tsdb.Fields{"value": float64(11)},
		time.Unix(1435077219, 0))

	pt, err := p.Parse("servers.localhost.cpu_load 11 1435077219")
	if err != nil {
		t.Fatalf("parse error: %v", err)
	}

	if exp.String() != pt.String() {
		t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
	}
}
Example #17
0
func TestParseNaN(t *testing.T) {
	p, err := graphite.NewParser([]string{"measurement*"}, nil)
	if err != nil {
		t.Fatalf("unexpected error creating parser, got %v", err)
	}

	pt, err := p.Parse("servers.localhost.cpu_load NaN 1435077219")
	if err != nil {
		t.Fatalf("parse error: %v", err)
	}

	exp := tsdb.NewPoint("servers.localhost.cpu_load",
		tsdb.Tags{},
		tsdb.Fields{"value": math.NaN()},
		time.Unix(1435077219, 0))

	if exp.String() != pt.String() {
		t.Errorf("parse mismatch: got %v, exp %v", pt.String(), exp.String())
	}

	if !math.IsNaN(pt.Fields()["value"].(float64)) {
		t.Errorf("parse value mismatch: expected NaN")
	}
}
Example #18
0
func (w *WriteShardRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) {
	w.AddPoints([]tsdb.Point{tsdb.NewPoint(
		name, tags, map[string]interface{}{"value": value}, timestamp,
	)})
}
Example #19
0
// AddPoint adds a point to the WritePointRequest with field name 'value'
func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) {
	w.Points = append(w.Points, tsdb.NewPoint(
		name, tags, map[string]interface{}{"value": value}, timestamp,
	))
}
Example #20
0
// ServeHTTP implements OpenTSDB's HTTP /api/put endpoint
func (h *Handler) servePut(w http.ResponseWriter, r *http.Request) {
	defer r.Body.Close()

	// Require POST method.
	if r.Method != "POST" {
		http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
		return
	}

	// Wrap reader if it's gzip encoded.
	var br *bufio.Reader
	if r.Header.Get("Content-Encoding") == "gzip" {
		zr, err := gzip.NewReader(r.Body)
		if err != nil {
			http.Error(w, "could not read gzip, "+err.Error(), http.StatusBadRequest)
			return
		}

		br = bufio.NewReader(zr)
	} else {
		br = bufio.NewReader(r.Body)
	}

	// Lookahead at the first byte.
	f, err := br.Peek(1)
	if err != nil || len(f) != 1 {
		http.Error(w, "peek error: "+err.Error(), http.StatusBadRequest)
		return
	}

	// Peek to see if this is a JSON array.
	var multi bool
	switch f[0] {
	case '{':
	case '[':
		multi = true
	default:
		http.Error(w, "expected JSON array or hash", http.StatusBadRequest)
		return
	}

	// Decode JSON data into slice of points.
	dps := make([]point, 1)
	if dec := json.NewDecoder(br); multi {
		if err = dec.Decode(&dps); err != nil {
			http.Error(w, "json array decode error", http.StatusBadRequest)
			return
		}
	} else {
		if err = dec.Decode(&dps[0]); err != nil {
			http.Error(w, "json object decode error", http.StatusBadRequest)
			return
		}
	}

	// Convert points into TSDB points.
	points := make([]tsdb.Point, 0, len(dps))
	for i := range dps {
		p := dps[i]

		// Convert timestamp to Go time.
		// If time value is over a billion then it's microseconds.
		var ts time.Time
		if p.Time < 10000000000 {
			ts = time.Unix(p.Time, 0)
		} else {
			ts = time.Unix(p.Time/1000, (p.Time%1000)*1000)
		}

		points = append(points, tsdb.NewPoint(p.Metric, p.Tags, map[string]interface{}{"value": p.Value}, ts))
	}

	// Write points.
	if err := h.PointsWriter.WritePoints(&cluster.WritePointsRequest{
		Database:         h.Database,
		RetentionPolicy:  h.RetentionPolicy,
		ConsistencyLevel: h.ConsistencyLevel,
		Points:           points,
	}); influxdb.IsClientError(err) {
		h.Logger.Println("write series error: ", err)
		http.Error(w, "write series error: "+err.Error(), http.StatusBadRequest)
		return
	} else if err != nil {
		h.Logger.Println("write series error: ", err)
		http.Error(w, "write series error: "+err.Error(), http.StatusInternalServerError)
		return
	}

	w.WriteHeader(http.StatusNoContent)
}
Example #21
0
// handleTelnetConn accepts OpenTSDB's telnet protocol.
// Each telnet command consists of a line of the form:
//   put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0
func (s *Service) handleTelnetConn(conn net.Conn) {
	defer conn.Close()
	defer s.wg.Done()

	// Wrap connection in a text protocol reader.
	r := textproto.NewReader(bufio.NewReader(conn))
	for {
		line, err := r.ReadLine()
		if err != nil {
			s.Logger.Println("error reading from openTSDB connection", err.Error())
			return
		}

		inputStrs := strings.Fields(line)

		if len(inputStrs) == 1 && inputStrs[0] == "version" {
			conn.Write([]byte("InfluxDB TSDB proxy"))
			continue
		}

		if len(inputStrs) < 4 || inputStrs[0] != "put" {
			s.Logger.Println("TSDBServer: malformed line, skipping: ", line)
			continue
		}

		measurement := inputStrs[1]
		tsStr := inputStrs[2]
		valueStr := inputStrs[3]
		tagStrs := inputStrs[4:]

		var t time.Time
		ts, err := strconv.ParseInt(tsStr, 10, 64)
		if err != nil {
			s.Logger.Println("TSDBServer: malformed time, skipping: ", tsStr)
		}

		switch len(tsStr) {
		case 10:
			t = time.Unix(ts, 0)
			break
		case 13:
			t = time.Unix(ts/1000, (ts%1000)*1000)
			break
		default:
			s.Logger.Println("TSDBServer: time must be 10 or 13 chars, skipping: ", tsStr)
			continue
		}

		tags := make(map[string]string)
		for t := range tagStrs {
			parts := strings.SplitN(tagStrs[t], "=", 2)
			if len(parts) != 2 {
				s.Logger.Println("TSDBServer: malformed tag data", tagStrs[t])
				continue
			}
			k := parts[0]

			tags[k] = parts[1]
		}

		fields := make(map[string]interface{})
		fields["value"], err = strconv.ParseFloat(valueStr, 64)
		if err != nil {
			s.Logger.Println("TSDBServer: could not parse value as float: ", valueStr)
			continue
		}

		p := tsdb.NewPoint(measurement, tags, fields, t)
		if err := s.PointsWriter.WritePoints(&cluster.WritePointsRequest{
			Database:         s.Database,
			RetentionPolicy:  s.RetentionPolicy,
			ConsistencyLevel: s.ConsistencyLevel,
			Points:           []tsdb.Point{p},
		}); err != nil {
			s.Logger.Println("TSDB cannot write data: ", err)
			continue
		}
	}
}
Example #22
0
func TestProcessorProcess(t *testing.T) {
	dir, err := ioutil.TempDir("", "processor_test")
	if err != nil {
		t.Fatalf("failed to create temp dir: %v", err)
	}

	// expected data to be queue and sent to the shardWriter
	var expShardID, expNodeID, count = uint64(100), uint64(200), 0
	pt := tsdb.NewPoint("cpu", tsdb.Tags{"foo": "bar"}, tsdb.Fields{"value": 1.0}, time.Unix(0, 0))

	sh := &fakeShardWriter{
		ShardWriteFn: func(shardID, nodeID uint64, points []tsdb.Point) error {
			count += 1
			if shardID != expShardID {
				t.Errorf("Process() shardID mismatch: got %v, exp %v", shardID, expShardID)
			}
			if nodeID != expNodeID {
				t.Errorf("Process() nodeID mismatch: got %v, exp %v", nodeID, expNodeID)
			}

			if exp := 1; len(points) != exp {
				t.Fatalf("Process() points mismatch: got %v, exp %v", len(points), exp)
			}

			if points[0].String() != pt.String() {
				t.Fatalf("Process() points mismatch:\n got %v\n exp %v", points[0].String(), pt.String())
			}

			return nil
		},
	}

	p, err := NewProcessor(dir, sh, ProcessorOptions{MaxSize: 1024})
	if err != nil {
		t.Fatalf("Process() failed to create processor: %v", err)
	}

	// This should queue the writes
	if err := p.WriteShard(expShardID, expNodeID, []tsdb.Point{pt}); err != nil {
		t.Fatalf("Process() failed to write points: %v", err)
	}

	// This should send the write to the shard writer
	if err := p.Process(); err != nil {
		t.Fatalf("Process() failed to write points: %v", err)
	}

	if exp := 1; count != exp {
		t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
	}

	// Queue should be empty so no writes should be send again
	if err := p.Process(); err != nil {
		t.Fatalf("Process() failed to write points: %v", err)
	}

	// Count should stay the same
	if exp := 1; count != exp {
		t.Fatalf("Process() write count mismatch: got %v, exp %v", count, exp)
	}

}
Example #23
0
func TestNormalizeBatchPoints(t *testing.T) {
	now := time.Now()
	tests := []struct {
		name string
		bp   client.BatchPoints
		p    []tsdb.Point
		err  string
	}{
		{
			name: "default",
			bp: client.BatchPoints{
				Points: []client.Point{
					{Measurement: "cpu", Tags: map[string]string{"region": "useast"}, Time: now, Fields: map[string]interface{}{"value": 1.0}},
				},
			},
			p: []tsdb.Point{
				tsdb.NewPoint("cpu", map[string]string{"region": "useast"}, map[string]interface{}{"value": 1.0}, now),
			},
		},
		{
			name: "merge time",
			bp: client.BatchPoints{
				Time: now,
				Points: []client.Point{
					{Measurement: "cpu", Tags: map[string]string{"region": "useast"}, Fields: map[string]interface{}{"value": 1.0}},
				},
			},
			p: []tsdb.Point{
				tsdb.NewPoint("cpu", map[string]string{"region": "useast"}, map[string]interface{}{"value": 1.0}, now),
			},
		},
		{
			name: "merge tags",
			bp: client.BatchPoints{
				Tags: map[string]string{"day": "monday"},
				Points: []client.Point{
					{Measurement: "cpu", Tags: map[string]string{"region": "useast"}, Time: now, Fields: map[string]interface{}{"value": 1.0}},
					{Measurement: "memory", Time: now, Fields: map[string]interface{}{"value": 2.0}},
				},
			},
			p: []tsdb.Point{
				tsdb.NewPoint("cpu", map[string]string{"day": "monday", "region": "useast"}, map[string]interface{}{"value": 1.0}, now),
				tsdb.NewPoint("memory", map[string]string{"day": "monday"}, map[string]interface{}{"value": 2.0}, now),
			},
		},
	}

	for _, test := range tests {
		t.Logf("running test %q", test.name)
		p, e := httpd.NormalizeBatchPoints(test.bp)
		if test.err == "" && e != nil {
			t.Errorf("unexpected error %v", e)
		} else if test.err != "" && e == nil {
			t.Errorf("expected error %s, got <nil>", test.err)
		} else if e != nil && test.err != e.Error() {
			t.Errorf("unexpected error. expected: %s, got %v", test.err, e)
		}
		if !reflect.DeepEqual(p, test.p) {
			t.Logf("expected: %+v", test.p)
			t.Logf("got:      %+v", p)
			t.Error("failed to normalize.")
		}
	}
}
Example #24
0
func (p *Point) MarshalString() string {
	return tsdb.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time).String()
}
Example #25
0
func Test_ServerGraphiteUDP(t *testing.T) {
	t.Parallel()

	now := time.Now().UTC().Round(time.Second)

	config := graphite.NewConfig()
	config.Database = "graphitedb"
	config.BatchSize = 0 // No batching.
	config.BatchTimeout = toml.Duration(time.Second)
	config.BindAddress = ":10000"
	config.Protocol = "udp"

	service, err := graphite.NewService(config)
	if err != nil {
		t.Fatalf("failed to create Graphite service: %s", err.Error())
	}

	// Allow test to wait until points are written.
	var wg sync.WaitGroup
	wg.Add(1)

	pointsWriter := PointsWriter{
		WritePointsFn: func(req *cluster.WritePointsRequest) error {
			defer wg.Done()

			if req.Database != "graphitedb" {
				t.Fatalf("unexpected database: %s", req.Database)
			} else if req.RetentionPolicy != "" {
				t.Fatalf("unexpected retention policy: %s", req.RetentionPolicy)
			} else if !reflect.DeepEqual(req.Points, []tsdb.Point{
				tsdb.NewPoint(
					"cpu",
					map[string]string{},
					map[string]interface{}{"value": 23.456},
					time.Unix(now.Unix(), 0),
				),
			}) {
				spew.Dump(req.Points)
				t.Fatalf("unexpected points: %#v", req.Points)
			}
			return nil
		},
	}
	service.PointsWriter = &pointsWriter
	dbCreator := DatabaseCreator{}
	service.MetaStore = &dbCreator

	if err := service.Open(); err != nil {
		t.Fatalf("failed to open Graphite service: %s", err.Error())
	}

	if !dbCreator.Created {
		t.Fatalf("failed to create target database")
	}

	// Connect to the graphite endpoint we just spun up
	_, port, _ := net.SplitHostPort(service.Addr().String())
	conn, err := net.Dial("udp", "127.0.0.1:"+port)
	if err != nil {
		t.Fatal(err)
	}
	data := []byte(`cpu 23.456 `)
	data = append(data, []byte(fmt.Sprintf("%d", now.Unix()))...)
	data = append(data, '\n')
	_, err = conn.Write(data)
	if err != nil {
		t.Fatal(err)
	}

	wg.Wait()
	conn.Close()
}