Ejemplo n.º 1
0
// Ensures the messages writer maps a single message to a single shard.
func TestMessagesWriter_MapShards_One(t *testing.T) {
	ms := MetaStore{}
	rp := NewRetentionPolicy("mym", time.Hour, 3)

	ms.NodeIDFn = func() uint64 { return 1 }
	ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) {
		return rp, nil
	}

	ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {
		return &rp.ShardGroups[0], nil
	}

	c := cluster.MessagesWriter{MetaStore: ms}
	pr := &cluster.WriteMessagesRequest{
		Database:         "mydb",
		RetentionPolicy:  "myrp",
		ConsistencyLevel: cluster.ConsistencyLevelOne,
	}
	pr.AddMessage("cpu", 1.0, time.Now(), nil)

	var (
		shardMappings *cluster.ShardMapping
		err           error
	)
	if shardMappings, err = c.MapShards(pr); err != nil {
		t.Fatalf("unexpected an error: %v", err)
	}

	if exp := 1; len(shardMappings.Messages) != exp {
		t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Messages), exp)
	}
}
Ejemplo n.º 2
0
// Ensures the messages writer maps a multiple messages across shard group boundaries.
func TestMessagesWriter_MapShards_Multiple(t *testing.T) {
	ms := MetaStore{}
	rp := NewRetentionPolicy("mym", time.Hour, 3)
	AttachShardGroupInfo(rp, []uint64{1, 2, 3})
	AttachShardGroupInfo(rp, []uint64{1, 2, 3})

	ms.NodeIDFn = func() uint64 { return 1 }
	ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) {
		return rp, nil
	}

	ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) {
		for i, sg := range rp.ShardGroups {
			if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) {
				return &rp.ShardGroups[i], nil
			}
		}
		panic("should not get here")
	}

	c := cluster.MessagesWriter{MetaStore: ms}
	pr := &cluster.WriteMessagesRequest{
		Database:         "mydb",
		RetentionPolicy:  "myrp",
		ConsistencyLevel: cluster.ConsistencyLevelOne,
	}

	// Three messages that range over the shardGroup duration (1h) and should map to two
	// distinct shards
	pr.AddMessage("cpu", 1.0, time.Unix(0, 0), nil)
	pr.AddMessage("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil)
	pr.AddMessage("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil)

	var (
		shardMappings *cluster.ShardMapping
		err           error
	)
	if shardMappings, err = c.MapShards(pr); err != nil {
		t.Fatalf("unexpected an error: %v", err)
	}

	if exp := 2; len(shardMappings.Messages) != exp {
		t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Messages), exp)
	}

	for _, messages := range shardMappings.Messages {
		// First shard shoud have 1 message w/ first message added
		if len(messages) == 1 && messages[0].Time() != pr.Messages[0].Time() {
			t.Fatalf("MapShards() value mismatch. got %v, exp %v", messages[0].Time(), pr.Messages[0].Time())
		}

		// Second shard shoud have the last two messages added
		if len(messages) == 2 && messages[0].Time() != pr.Messages[1].Time() {
			t.Fatalf("MapShards() value mismatch. got %v, exp %v", messages[0].Time(), pr.Messages[1].Time())
		}

		if len(messages) == 2 && messages[1].Time() != pr.Messages[2].Time() {
			t.Fatalf("MapShards() value mismatch. got %v, exp %v", messages[1].Time(), pr.Messages[2].Time())
		}
	}
}