Example #1
0
// NewServer returns a new instance of Server built from a config.
func NewServer(c *Config, version string) (*Server, error) {

	s := &Server{
		version: version,
		err:     make(chan error),
		closing: make(chan struct{}),

		Hostname:    c.Meta.Hostname,
		BindAddress: c.Meta.BindAddress,

		MetaStore: meta.NewStore(c.Meta),
		DataStore: db.NewStore(c.Data.Dir),

		reportingDisabled: c.ReportingDisabled,
	}

	// Copy TSDB configuration.
	s.DataStore.MaxWALSize = c.Data.MaxWALSize
	s.DataStore.WALFlushInterval = time.Duration(c.Data.WALFlushInterval)
	s.DataStore.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay)

	// Set the shard mapper
	s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout))
	s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping
	s.ShardMapper.MetaStore = s.MetaStore
	s.ShardMapper.DataStore = s.DataStore

	// Initialize query executor.
	s.QueryExecutor = db.NewQueryExecutor(s.DataStore)
	s.QueryExecutor.MetaStore = s.MetaStore
	s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore}

	// Set the shard writer
	s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout))
	s.ShardWriter.MetaStore = s.MetaStore

	// Create the hinted handoff service
	s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter)

	// Initialize points writer.
	s.MessagesWriter = cluster.NewMessagesWriter()
	s.MessagesWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout)
	s.MessagesWriter.MetaStore = s.MetaStore
	s.MessagesWriter.DataStore = s.DataStore
	s.MessagesWriter.ShardWriter = s.ShardWriter
	s.MessagesWriter.HintedHandoff = s.HintedHandoff

	// Append services.
	s.appendClusterService(c.Cluster)
	s.appendSnapshotterService()
	s.appendAdminService(c.Admin)
	s.appendHTTPDService(c.HTTPD)
	s.appendRetentionPolicyService(c.Retention)

	return s, nil
}
func TestMessagesWriter_WriteMessages(t *testing.T) {
	tests := []struct {
		name            string
		database        string
		retentionPolicy string
		consistency     cluster.ConsistencyLevel

		// the responses returned by each shard write call.  node ID 1 = pos 0
		err    []error
		expErr error
	}{
		// Consistency one
		{
			name:            "write one success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelOne,
			err:             []error{nil, nil, nil},
			expErr:          nil,
		},
		{
			name:            "write one error",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelOne,
			err:             []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")},
			expErr:          fmt.Errorf("write failed: a failure"),
		},

		// Consistency any
		{
			name:            "write any success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAny,
			err:             []error{fmt.Errorf("a failure"), nil, fmt.Errorf("a failure")},
			expErr:          nil,
		},
		// Consistency all
		{
			name:            "write all success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAll,
			err:             []error{nil, nil, nil},
			expErr:          nil,
		},
		{
			name:            "write all, 2/3, partial write",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAll,
			err:             []error{nil, fmt.Errorf("a failure"), nil},
			expErr:          cluster.ErrPartialWrite,
		},
		{
			name:            "write all, 1/3 (failure)",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAll,
			err:             []error{nil, fmt.Errorf("a failure"), fmt.Errorf("a failure")},
			expErr:          cluster.ErrPartialWrite,
		},

		// Consistency quorum
		{
			name:            "write quorum, 1/3 failure",
			consistency:     cluster.ConsistencyLevelQuorum,
			database:        "mydb",
			retentionPolicy: "myrp",
			err:             []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), nil},
			expErr:          cluster.ErrPartialWrite,
		},
		{
			name:            "write quorum, 2/3 success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelQuorum,
			err:             []error{nil, nil, fmt.Errorf("a failure")},
			expErr:          nil,
		},
		{
			name:            "write quorum, 3/3 success",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelQuorum,
			err:             []error{nil, nil, nil},
			expErr:          nil,
		},

		// Error write error
		{
			name:            "no writes succeed",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelOne,
			err:             []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")},
			expErr:          fmt.Errorf("write failed: a failure"),
		},

		// Hinted handoff w/ ANY
		{
			name:            "hinted handoff write succeed",
			database:        "mydb",
			retentionPolicy: "myrp",
			consistency:     cluster.ConsistencyLevelAny,
			err:             []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")},
			expErr:          nil,
		},

		// Write to non-existant database
		{
			name:            "write to non-existant database",
			database:        "doesnt_exist",
			retentionPolicy: "",
			consistency:     cluster.ConsistencyLevelAny,
			err:             []error{nil, nil, nil},
			expErr:          fmt.Errorf("database not found: doesnt_exist"),
		},
	}

	for _, test := range tests {

		pr := &cluster.WriteMessagesRequest{
			Database:         test.database,
			RetentionPolicy:  test.retentionPolicy,
			ConsistencyLevel: test.consistency,
		}

		// Three messages that range over the shardGroup duration (1h) and should map to two
		// distinct shards
		pr.AddMessage("cpu", 1.0, time.Unix(0, 0), nil)
		pr.AddMessage("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil)
		pr.AddMessage("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil)

		// copy to prevent data race
		theTest := test
		sm := cluster.NewShardMapping()
		sm.MapMessage(
			&meta.ShardInfo{ID: uint64(1), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}},
			pr.Messages[0])
		sm.MapMessage(
			&meta.ShardInfo{ID: uint64(2), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}},
			pr.Messages[1])
		sm.MapMessage(
			&meta.ShardInfo{ID: uint64(2), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}},
			pr.Messages[2])

		// Local cluster.Node ShardWriter
		// lock on the write increment since these functions get called in parallel
		var mu sync.Mutex
		sw := &fakeShardWriter{
			ShardWriteFn: func(shardID, nodeID uint64, messages []db.Message) error {
				mu.Lock()
				defer mu.Unlock()
				return theTest.err[int(nodeID)-1]
			},
		}

		store := &fakeStore{
			WriteFn: func(shardID uint64, messages []db.Message) error {
				mu.Lock()
				defer mu.Unlock()
				return theTest.err[0]
			},
		}

		hh := &fakeShardWriter{
			ShardWriteFn: func(shardID, nodeID uint64, messages []db.Message) error {
				return nil
			},
		}

		ms := NewMetaStore()
		ms.DatabaseFn = func(database string) (*meta.DatabaseInfo, error) {
			return nil, nil
		}
		ms.NodeIDFn = func() uint64 { return 1 }
		c := cluster.NewMessagesWriter()
		c.MetaStore = ms
		c.ShardWriter = sw
		c.DataStore = store
		c.HintedHandoff = hh

		err := c.WriteMessages(pr)
		if err == nil && test.expErr != nil {
			t.Errorf("MessagesWriter.WriteMessages(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
		}

		if err != nil && test.expErr == nil {
			t.Errorf("MessagesWriter.WriteMessages(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
		}
		if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() {
			t.Errorf("MessagesWriter.WriteMessages(): '%s' error: got %v, exp %v", test.name, err, test.expErr)
		}
	}
}