Ejemplo n.º 1
0
func checkShardDispatch(t *testing.T, nServer int, iteration int) {
	replica := ConsistentHashMapReplicaNum
	em, err := ephemeral.NewEtcdEphemeral(etcdForwdCli)
	if err != nil {
		t.Fatalf("Connect to zk error for server1: %s", err)
	}
	defer em.Close()
	fmt.Println("start creating shard host")
	for i := 0; i < nServer; i++ {
		name := fmt.Sprintf("192.168.0.1:%d", i)
		conn, err := NewConsistentHashResServer(em, testEmRoot, name,
			replica, 5*time.Second, dummy{})
		if err != nil {
			t.Fatalf("consistent server %s create failed:%s", name, err)
		}
		fmt.Printf("%d\t", i)

		defer conn.Close()
		assert.Equal(t, conn.HostPort(), name)
	}
	fmt.Println("finish creating shard host")

	// take a snap to get ready
	time.Sleep(time.Second)

	emClient, err := ephemeral.NewEtcdEphemeral(etcdForwdCli)
	assert.NoError(t, err)
	defer emClient.Close()
	client, err := NewConsistentHashResClient(emClient, testEmRoot,
		replica, 5*time.Second, dummy{})
	if err != nil {
		t.Fatalf("consistent client create failed:%s", err)
	}
	defer client.Close()
	assert.Equal(t, client.HostPort(), "")

	verifyShardDist(t, client, nServer, iteration)
}
Ejemplo n.º 2
0
func (c *ConnHashTestSuite) TestNewConnHashTimeout() {
	// overwrite log.Exiter as do noting, otherwise,
	// we will go to fatal since conn is closed and we attemp to create a znode on a closed conn
	em, err := ephemeral.NewEtcdEphemeral(etcdCli)
	assert.NoError(c.T(), err, "should get the connection")

	conn, err := NewConsistentHashResServer(em, testEmRoot, svr3,
		ConsistentHashMapReplicaNum, time.Nanosecond, dummy{})
	fmt.Printf("result of NewConsistentHashResServer %v, %v\n", conn, err)

	assert.Error(c.T(), err, "should hit timeout error")
	assert.Equal(c.T(), err, ErrConnTimedOut)
	assert.Nil(c.T(), conn, "should be nil")
	em.Close()
	fmt.Println("done")
}
Ejemplo n.º 3
0
func (c *ConnHashTestSuite) TestConsistentHashRes() {
	//creates two servers 127.0.0.1:8080 and 192.168.0.1:81
	//testing key uid[1-10]. Compare the result against
	//direct hash calculation from consistenthash.Map

	//first connect server 1 and server 2

	//consistent server 1
	t := c.T()
	em1, err := ephemeral.NewEtcdEphemeral(etcdForwdCli)

	if err != nil {
		t.Fatalf("Connect to zk error for server1: %s", err)
	}
	conn1, err := NewConsistentHashResServer(em1, testEmRoot, svr1,
		ConsistentHashMapReplicaNum, time.Second, dummy{})
	if err != nil {
		t.Fatalf("consistent server 1 %s create failed:%s", svr1, err)
	}
	assert.Equal(t, conn1.HostPort(), svr1)

	// wait zk change to stablize
	time.Sleep(1 * time.Second)
	assert.True(t, conn1.IsMyKey("any keys"), "should always be true since only one server only")

	//consistent server 2
	em2, err := ephemeral.NewEtcdEphemeral(etcdForwdCli)
	if err != nil {
		t.Fatalf("Connect to zk error for server2: %s", err)
	}
	conn2, err := NewConsistentHashResServer(em2, testEmRoot, svr2,
		ConsistentHashMapReplicaNum, time.Second, dummy{})
	if err != nil {
		t.Fatalf("consistent server 2 %s create failed:%s", svr2, err)
	}
	assert.Equal(t, conn2.HostPort(), svr2)

	//client
	emClient, err := ephemeral.NewEtcdEphemeral(etcdForwdCli)
	assert.NoError(t, err)
	client, err := NewConsistentHashResClient(emClient, testEmRoot,
		ConsistentHashMapReplicaNum, time.Second, dummy{})
	if err != nil {
		t.Fatalf("consistent client create failed:%s", err)
	}
	assert.Equal(t, client.HostPort(), "")

	//add server 1 and 2
	cmap := consistenthash.New(ConsistentHashMapReplicaNum, murmur3.Sum32)
	cmap.Add(svr1, svr2)

	//verify hashes are the same across all instances
	verifyAnswer(t, cmap, conn1, conn2, client)
	//verify peers
	verifyPeers(t, client.GetResources(), []string{svr1, svr2})
	// verify shard assignment distribution
	verifyShardDist(t, client, 2, 1000)

	//add another server
	em3, err := ephemeral.NewEtcdEphemeral(etcdForwdCli)
	if err != nil {
		t.Fatalf("Connect to zk error for server3: %s", err)
	}
	conn3, err := NewConsistentHashResServer(em3, testEmRoot, svr3,
		ConsistentHashMapReplicaNum, time.Second, dummy{})
	if err != nil {
		t.Fatalf("consistent server 3 %s create failed:%s", svr3, err)
	}
	assert.Equal(t, conn3.HostPort(), svr3)

	cmap.Add(svr3)

	//verify hashes are the same across all instances
	verifyAnswer(t, cmap, conn1, conn3, client, conn1)
	//verify peers
	verifyPeers(t, client.GetResources(), []string{svr1, svr2, svr3})
	// verify shard assignment distribution
	verifyShardDist(t, client, 3, 1000)

	// when zk are unreachable for like 20 seconds
	// all znodes are expired due to clent session is expired by zk

	// when the zkconn is back again, we still can do sharding
	c.stopForward <- struct{}{}
	time.Sleep(10 * time.Second)

	// make conn alive
	c.stopForward, _ = c.forwarder()
	time.Sleep(time.Second) // wait one second for ready

	//verify peers
	verifyPeers(t, client.GetResources(), []string{svr1, svr2, svr3})
	// verify shard assignment distribution
	verifyShardDist(t, client, 3, 1000)

	conn1.Close()
	conn2.Close()
	conn3.Close()
	client.Close()
	emClient.Close()
	em1.Close()
	em2.Close()
	em3.Close()
}