func TestPointsWriter_WritePoints(t *testing.T) {
	tests := []struct {
		name            string
		database        string
		retentionPolicy string
		consistency     cluster.ConsistencyLevel

		// the responses returned by each shard write call.  node ID 1 = pos 0
		err    []error
		expErr error
	}{
		// Consistency one
		{
			name:            "write one success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelOne,
			err:             []error{nil, nil, nil},
			expErr:          nil,
		},
		{
			name:            "write one error",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelOne,
			err:             []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")},
			expErr:          fmt.Errorf("write failed: a failure"),
		},

		// Consistency any
		{
			name:            "write any success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAny,
			err:             []error{fmt.Errorf("a failure"), nil, fmt.Errorf("a failure")},
			expErr:          nil,
		},
		// Consistency all
		{
			name:            "write all success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAll,
			err:             []error{nil, nil, nil},
			expErr:          nil,
		},
		{
			name:            "write all, 2/3, partial write",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAll,
			err:             []error{nil, fmt.Errorf("a failure"), nil},
			expErr:          cluster.ErrPartialWrite,
		},
		{
			name:            "write all, 1/3 (failure)",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAll,
			err:             []error{nil, fmt.Errorf("a failure"), fmt.Errorf("a failure")},
			expErr:          cluster.ErrPartialWrite,
		},

		// Consistency quorum
		{
			name:            "write quorum, 1/3 failure",
			consistency:     cluster.ConsistencyLevelQuorum,
			database:        "mydb",
			retentionPolicy: "myrp",
			err:             []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), nil},
			expErr:          cluster.ErrPartialWrite,
		},
		{
			name:            "write quorum, 2/3 success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelQuorum,
			err:             []error{nil, nil, fmt.Errorf("a failure")},
			expErr:          nil,
		},
		{
			name:            "write quorum, 3/3 success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelQuorum,
			err:             []error{nil, nil, nil},
			expErr:          nil,
		},

		// Error write error
		{
			name:            "no writes succeed",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelOne,
			err:             []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")},
			expErr:          fmt.Errorf("write failed: a failure"),
		},

		// Hinted handoff w/ ANY
		{
			name:            "hinted handoff write succeed",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAny,
			err:             []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")},
			expErr:          nil,
		},

		// Write to non-existant database
		{
			name:            "write to non-existant database",
			database:        "doesnt_exist",
			retentionPolicy: "",
			consistency:     cluster.ConsistencyLevelAny,
			err:             []error{nil, nil, nil},
			expErr:          fmt.Errorf("database not found: doesnt_exist"),
		},
	}

	for _, test := range tests {

		pr := &cluster.WritePointsRequest{
			Database:         test.database,
			RetentionPolicy:  test.retentionPolicy,
			ConsistencyLevel: test.consistency,
		}

		// Three points that range over the shardGroup duration (1h) and should map to two
		// distinct shards
		pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil)
		pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil)
		pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil)

		// copy to prevent data race
		theTest := test
		sm := cluster.NewShardMapping()
		sm.MapPoint(
			&meta.ShardInfo{ID: uint64(1), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}},
			pr.Points[0])
		sm.MapPoint(
			&meta.ShardInfo{ID: uint64(2), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}},
			pr.Points[1])
		sm.MapPoint(
			&meta.ShardInfo{ID: uint64(2), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}},
			pr.Points[2])

		// Local cluster.Node ShardWriter
		// lock on the write increment since these functions get called in parallel
		var mu sync.Mutex
		sw := &fakeShardWriter{
			ShardWriteFn: func(shardID, nodeID uint64, points []tsdb.Point) error {
				mu.Lock()
				defer mu.Unlock()
				return theTest.err[int(nodeID)-1]
			},
		}

		store := &fakeStore{
			WriteFn: func(shardID uint64, points []tsdb.Point) error {
				mu.Lock()
				defer mu.Unlock()
				return theTest.err[0]
			},
		}

		hh := &fakeShardWriter{
			ShardWriteFn: func(shardID, nodeID uint64, points []tsdb.Point) error {
				return nil
			},
		}

		ms := NewMetaStore()
		ms.DatabaseFn = func(database string) (*meta.DatabaseInfo, error) {
			return nil, nil
		}
		ms.NodeIDFn = func() uint64 { return 1 }
		c := cluster.NewPointsWriter()
		c.MetaStore = ms
		c.ShardWriter = sw
		c.TSDBStore = store
		c.HintedHandoff = hh

		err := c.WritePoints(pr)
		if err == nil && test.expErr != nil {
			t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
		}

		if err != nil && test.expErr == nil {
			t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
		}
		if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() {
			t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
		}
	}
}
示例#2
0
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, version string) (*Server, error) {
	// Construct base meta store and data store.
	s := &Server{
		version: version,
		err:     make(chan error),
		closing: make(chan struct{}),

		Hostname:    c.Meta.Hostname,
		BindAddress: c.Meta.BindAddress,

		MetaStore: meta.NewStore(c.Meta),
		TSDBStore: tsdb.NewStore(c.Data.Dir),

		reportingDisabled: c.ReportingDisabled,
	}

	// Copy TSDB configuration.
	s.TSDBStore.MaxWALSize = c.Data.MaxWALSize
	s.TSDBStore.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)
	s.TSDBStore.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay)

	// Set the shard mapper
	s.ShardMapper = cluster.NewShardMapper()
	s.ShardMapper.MetaStore = s.MetaStore
	s.ShardMapper.TSDBStore = s.TSDBStore

	// Initialize query executor.
	s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore)
	s.QueryExecutor.MetaStore = s.MetaStore
	s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore}
	s.QueryExecutor.ShardMapper = s.ShardMapper

	// Set the shard writer
	s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout))
	s.ShardWriter.MetaStore = s.MetaStore

	// Create the hinted handoff service
	s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter)

	// Initialize points writer.
	s.PointsWriter = cluster.NewPointsWriter()
	s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
	s.PointsWriter.MetaStore = s.MetaStore
	s.PointsWriter.TSDBStore = s.TSDBStore
	s.PointsWriter.ShardWriter = s.ShardWriter
	s.PointsWriter.HintedHandoff = s.HintedHandoff

	// Append services.
	s.appendClusterService(c.Cluster)
	s.appendPrecreatorService(c.Precreator)
	s.appendSnapshotterService()
	s.appendAdminService(c.Admin)
	s.appendContinuousQueryService(c.ContinuousQuery)
	s.appendHTTPDService(c.HTTPD)
	s.appendCollectdService(c.Collectd)
	if err := s.appendOpenTSDBService(c.OpenTSDB); err != nil {
		return nil, err
	}
	s.appendUDPService(c.UDP)
	s.appendRetentionPolicyService(c.Retention)
	for _, g := range c.Graphites {
		if err := s.appendGraphiteService(g); err != nil {
			return nil, err
		}
	}

	return s, nil
}