예제 #1
0
// Ensure that the cluster configuration can be reloaded.
func TestClusterConfigReload(t *testing.T) {
	procAttr := &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}
	argGroup, etcds, err := CreateCluster(3, procAttr, false)
	assert.NoError(t, err)
	defer DestroyCluster(etcds)

	resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":3, "removeDelay":60}`))
	assert.Equal(t, resp.StatusCode, 200)

	time.Sleep(1 * time.Second)

	resp, _ = tests.Get("http://localhost:7002/v2/admin/config")
	body := tests.ReadBodyJSON(resp)
	assert.Equal(t, resp.StatusCode, 200)
	assert.Equal(t, body["activeSize"], 3)
	assert.Equal(t, body["removeDelay"], 60)

	// kill all
	DestroyCluster(etcds)

	for i := 0; i < 3; i++ {
		etcds[i], err = os.StartProcess(EtcdBinPath, argGroup[i], procAttr)
	}

	time.Sleep(1 * time.Second)

	resp, _ = tests.Get("http://localhost:7002/v2/admin/config")
	body = tests.ReadBodyJSON(resp)
	assert.Equal(t, resp.StatusCode, 200)
	assert.Equal(t, body["activeSize"], 3)
	assert.Equal(t, body["removeDelay"], 60)
}
예제 #2
0
// Ensure that we can start a v2 cluster from the logs of a v1 cluster.
func TestV1ClusterMigration(t *testing.T) {
	path, _ := ioutil.TempDir("", "etcd-")
	os.RemoveAll(path)
	defer os.RemoveAll(path)

	nodes := []string{"node0", "node2"}
	for i, node := range nodes {
		nodepath := filepath.Join(path, node)
		fixturepath, _ := filepath.Abs(filepath.Join("../fixtures/v1.cluster/", node))
		fmt.Println("FIXPATH  =", fixturepath)
		fmt.Println("NODEPATH =", nodepath)
		os.MkdirAll(filepath.Dir(nodepath), 0777)

		// Copy over fixture files.
		c := exec.Command("cp", "-rf", fixturepath, nodepath)
		if out, err := c.CombinedOutput(); err != nil {
			fmt.Println(">>>>>>\n", string(out), "<<<<<<")
			panic("Fixture initialization error:" + err.Error())
		}

		procAttr := new(os.ProcAttr)
		procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr}

		args := []string{"etcd", fmt.Sprintf("-data-dir=%s", nodepath)}
		args = append(args, "-addr", fmt.Sprintf("127.0.0.1:%d", 4001+i))
		args = append(args, "-peer-addr", fmt.Sprintf("127.0.0.1:%d", 7001+i))
		args = append(args, "-name", node)
		process, err := os.StartProcess(EtcdBinPath, args, procAttr)
		if err != nil {
			t.Fatal("start process failed:" + err.Error())
			return
		}
		defer process.Kill()
		time.Sleep(time.Second)
	}

	// Ensure deleted message is removed.
	resp, err := tests.Get("http://localhost:4001/v2/keys/message")
	body := tests.ReadBody(resp)
	assert.Nil(t, err, "")
	assert.Equal(t, resp.StatusCode, http.StatusNotFound)
	assert.Equal(t, string(body), `{"errorCode":100,"message":"Key not found","cause":"/message","index":11}`+"\n")

	// Ensure TTL'd message is removed.
	resp, err = tests.Get("http://localhost:4001/v2/keys/foo")
	body = tests.ReadBody(resp)
	assert.Nil(t, err, "")
	assert.Equal(t, resp.StatusCode, 200, "")
	assert.Equal(t, string(body), `{"action":"get","node":{"key":"/foo","value":"one","modifiedIndex":9,"createdIndex":9}}`)
}
예제 #3
0
// Ensures that a directory of values can be recursively retrieved for a given key.
//
//   $ curl -X PUT localhost:4001/v2/keys/foo/x -d value=XXX
//   $ curl -X PUT localhost:4001/v2/keys/foo/y/z -d value=YYY
//   $ curl localhost:4001/v2/keys/foo -d recursive=true
//
func TestV2GetKeyRecursively(t *testing.T) {
	tests.RunServer(func(s *server.Server) {
		v := url.Values{}
		v.Set("value", "XXX")
		v.Set("ttl", "10")
		resp, _ := tests.PutForm(fmt.Sprintf("http://%s%s", s.URL(), "/v2/keys/foo/x"), v)
		tests.ReadBody(resp)

		v.Set("value", "YYY")
		resp, _ = tests.PutForm(fmt.Sprintf("http://%s%s", s.URL(), "/v2/keys/foo/y/z"), v)
		tests.ReadBody(resp)

		resp, _ = tests.Get(fmt.Sprintf("http://%s%s", s.URL(), "/v2/keys/foo?recursive=true"))
		body := tests.ReadBodyJSON(resp)
		assert.Equal(t, body["action"], "get", "")
		assert.Equal(t, body["key"], "/foo", "")
		assert.Equal(t, body["dir"], true, "")
		assert.Equal(t, body["modifiedIndex"], 1, "")
		assert.Equal(t, len(body["kvs"].([]interface{})), 2, "")

		kv0 := body["kvs"].([]interface{})[0].(map[string]interface{})
		assert.Equal(t, kv0["key"], "/foo/x", "")
		assert.Equal(t, kv0["value"], "XXX", "")
		assert.Equal(t, kv0["ttl"], 10, "")

		kv1 := body["kvs"].([]interface{})[1].(map[string]interface{})
		assert.Equal(t, kv1["key"], "/foo/y", "")
		assert.Equal(t, kv1["dir"], true, "")

		kvs2 := kv1["kvs"].([]interface{})[0].(map[string]interface{})
		assert.Equal(t, kvs2["key"], "/foo/y/z", "")
		assert.Equal(t, kvs2["value"], "YYY", "")
	})
}
예제 #4
0
// Ensures that a directory of values can be retrieved for a given key.
//
//   $ curl -X PUT localhost:4001/v1/keys/foo/x -d value=XXX
//   $ curl -X PUT localhost:4001/v1/keys/foo/y/z -d value=YYY
//   $ curl localhost:4001/v1/keys/foo
//
func TestV1GetKeyDir(t *testing.T) {
	tests.RunServer(func(s *server.Server) {
		v := url.Values{}
		v.Set("value", "XXX")
		resp, _ := tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v1/keys/foo/x"), v)
		tests.ReadBody(resp)

		v.Set("value", "YYY")
		resp, _ = tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v1/keys/foo/y/z"), v)
		tests.ReadBody(resp)

		resp, _ = tests.Get(fmt.Sprintf("%s%s", s.URL(), "/v1/keys/foo"))
		assert.Equal(t, resp.StatusCode, http.StatusOK)
		body := tests.ReadBody(resp)
		nodes := make([]interface{}, 0)
		if err := json.Unmarshal(body, &nodes); err != nil {
			panic(fmt.Sprintf("HTTP body JSON parse error: %v", err))
		}
		assert.Equal(t, len(nodes), 2, "")

		node0 := nodes[0].(map[string]interface{})
		assert.Equal(t, node0["action"], "get", "")
		assert.Equal(t, node0["key"], "/foo/x", "")
		assert.Equal(t, node0["value"], "XXX", "")

		node1 := nodes[1].(map[string]interface{})
		assert.Equal(t, node1["action"], "get", "")
		assert.Equal(t, node1["key"], "/foo/y", "")
		assert.Equal(t, node1["dir"], true, "")
	})
}
예제 #5
0
// Ensures that HEAD works.
//
//   $ curl -I localhost:4001/v1/keys/foo/bar -> fail
//   $ curl -X PUT localhost:4001/v1/keys/foo/bar -d value=XXX
//   $ curl -I localhost:4001/v1/keys/foo/bar
//
func TestV1HeadKey(t *testing.T) {
	tests.RunServer(func(s *server.Server) {
		v := url.Values{}
		v.Set("value", "XXX")
		fullURL := fmt.Sprintf("%s%s", s.URL(), "/v1/keys/foo/bar")
		resp, _ := tests.Get(fullURL)
		assert.Equal(t, resp.StatusCode, http.StatusNotFound)
		assert.Equal(t, resp.ContentLength, -1)

		resp, _ = tests.PutForm(fullURL, v)
		tests.ReadBody(resp)

		resp, _ = tests.Get(fullURL)
		assert.Equal(t, resp.StatusCode, http.StatusOK)
		assert.Equal(t, resp.ContentLength, -1)
	})
}
예제 #6
0
// Ensures that a watcher can wait for a value to be set and return it to the client.
//
//   $ curl localhost:4001/v2/keys/foo/bar?wait=true
//   $ curl -X PUT localhost:4001/v2/keys/foo/bar -d value=XXX
//
func TestV2WatchKey(t *testing.T) {
	tests.RunServer(func(s *server.Server) {
		// There exists a little gap between etcd ready to serve and
		// it actually serves the first request, which means the response
		// delay could be a little bigger.
		// This test is time sensitive, so it does one request to ensure
		// that the server is working.
		tests.Get(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar"))

		var watchResp *http.Response
		c := make(chan bool)
		go func() {
			watchResp, _ = tests.Get(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar?wait=true"))
			c <- true
		}()

		// Make sure response didn't fire early.
		time.Sleep(1 * time.Millisecond)

		// Set a value.
		v := url.Values{}
		v.Set("value", "XXX")
		resp, _ := tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar"), v)
		tests.ReadBody(resp)

		// A response should follow from the GET above.
		time.Sleep(1 * time.Millisecond)

		select {
		case <-c:

		default:
			t.Fatal("cannot get watch result")
		}

		body := tests.ReadBodyJSON(watchResp)
		assert.NotNil(t, body, "")
		assert.Equal(t, body["action"], "set", "")

		node := body["node"].(map[string]interface{})
		assert.Equal(t, node["key"], "/foo/bar", "")
		assert.Equal(t, node["value"], "XXX", "")
		assert.Equal(t, node["modifiedIndex"], 3, "")
	})
}
예제 #7
0
// Ensures that a value can be retrieve for a given key.
//
//   $ curl localhost:4001/v1/keys/foo/bar -> fail
//   $ curl -X PUT localhost:4001/v1/keys/foo/bar -d value=XXX
//   $ curl localhost:4001/v1/keys/foo/bar
//
func TestV1GetKey(t *testing.T) {
	tests.RunServer(func(s *server.Server) {
		v := url.Values{}
		v.Set("value", "XXX")
		fullURL := fmt.Sprintf("%s%s", s.URL(), "/v1/keys/foo/bar")
		resp, _ := tests.Get(fullURL)
		assert.Equal(t, resp.StatusCode, http.StatusNotFound)

		resp, _ = tests.PutForm(fullURL, v)
		tests.ReadBody(resp)

		resp, _ = tests.Get(fullURL)
		assert.Equal(t, resp.StatusCode, http.StatusOK)
		body := tests.ReadBodyJSON(resp)
		assert.Equal(t, body["action"], "get", "")
		assert.Equal(t, body["key"], "/foo/bar", "")
		assert.Equal(t, body["value"], "XXX", "")
		assert.Equal(t, body["index"], 2, "")
	})
}
예제 #8
0
// Ensures that a value can be retrieve for a given key.
//
//   $ curl localhost:4001/v2/keys/foo/bar -> fail
//   $ curl -X PUT localhost:4001/v2/keys/foo/bar -d value=XXX
//   $ curl localhost:4001/v2/keys/foo/bar
//
func TestV2GetKey(t *testing.T) {
	tests.RunServer(func(s *server.Server) {
		v := url.Values{}
		v.Set("value", "XXX")
		fullURL := fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar")
		resp, _ := tests.Get(fullURL)
		assert.Equal(t, resp.StatusCode, http.StatusNotFound)

		resp, _ = tests.PutForm(fullURL, v)
		tests.ReadBody(resp)

		resp, _ = tests.Get(fullURL)
		assert.Equal(t, resp.StatusCode, http.StatusOK)
		body := tests.ReadBodyJSON(resp)
		assert.Equal(t, body["action"], "get", "")
		node := body["node"].(map[string]interface{})
		assert.Equal(t, node["key"], "/foo/bar", "")
		assert.Equal(t, node["value"], "XXX", "")
		assert.Equal(t, node["modifiedIndex"], 3, "")
	})
}
예제 #9
0
// Ensures that a value can be retrieve for a given key.
//
//   $ curl -X PUT localhost:4001/v2/keys/foo/bar -d value=XXX
//   $ curl localhost:4001/v2/keys/foo/bar
//
func TestV2GetKey(t *testing.T) {
	tests.RunServer(func(s *server.Server) {
		v := url.Values{}
		v.Set("value", "XXX")
		resp, _ := tests.PutForm(fmt.Sprintf("http://%s%s", s.URL(), "/v2/keys/foo/bar"), v)
		tests.ReadBody(resp)
		resp, _ = tests.Get(fmt.Sprintf("http://%s%s", s.URL(), "/v2/keys/foo/bar"))
		body := tests.ReadBodyJSON(resp)
		assert.Equal(t, body["action"], "get", "")
		assert.Equal(t, body["key"], "/foo/bar", "")
		assert.Equal(t, body["value"], "XXX", "")
		assert.Equal(t, body["modifiedIndex"], 1, "")
	})
}
예제 #10
0
// Ensures that a watcher can wait for a value to be set after a given index.
//
//   $ curl localhost:4001/v2/keys/foo/bar?wait=true&waitIndex=4
//   $ curl -X PUT localhost:4001/v2/keys/foo/bar -d value=XXX
//   $ curl -X PUT localhost:4001/v2/keys/foo/bar -d value=YYY
//
func TestV2WatchKeyWithIndex(t *testing.T) {
	tests.RunServer(func(s *server.Server) {
		var body map[string]interface{}
		c := make(chan bool)
		go func() {
			resp, _ := tests.Get(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar?wait=true&waitIndex=3"))
			body = tests.ReadBodyJSON(resp)
			c <- true
		}()

		// Make sure response didn't fire early.
		time.Sleep(1 * time.Millisecond)
		assert.Nil(t, body, "")

		// Set a value (before given index).
		v := url.Values{}
		v.Set("value", "XXX")
		resp, _ := tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar"), v)
		tests.ReadBody(resp)

		// Make sure response didn't fire early.
		time.Sleep(1 * time.Millisecond)
		assert.Nil(t, body, "")

		// Set a value (before given index).
		v.Set("value", "YYY")
		resp, _ = tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar"), v)
		tests.ReadBody(resp)

		// A response should follow from the GET above.
		time.Sleep(1 * time.Millisecond)

		select {
		case <-c:

		default:
			t.Fatal("cannot get watch result")
		}

		assert.NotNil(t, body, "")
		assert.Equal(t, body["action"], "set", "")

		node := body["node"].(map[string]interface{})
		assert.Equal(t, node["key"], "/foo/bar", "")
		assert.Equal(t, node["value"], "YYY", "")
		assert.Equal(t, node["modifiedIndex"], 3, "")
	})
}
예제 #11
0
// Ensure that the cluster configuration can be updated.
func TestClusterConfigSet(t *testing.T) {
	_, etcds, err := CreateCluster(3, &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}, false)
	assert.NoError(t, err)
	defer DestroyCluster(etcds)

	resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":3, "removeDelay":60}`))
	assert.Equal(t, resp.StatusCode, 200)

	time.Sleep(1 * time.Second)

	resp, _ = tests.Get("http://localhost:7002/v2/admin/config")
	body := tests.ReadBodyJSON(resp)
	assert.Equal(t, resp.StatusCode, 200)
	assert.Equal(t, body["activeSize"], 3)
	assert.Equal(t, body["removeDelay"], 60)
}
예제 #12
0
// TestGetMachines tests '/v2/admin/machines' sends back messages of all machines.
func TestGetMachines(t *testing.T) {
	_, etcds, err := CreateCluster(3, &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}, false)
	assert.NoError(t, err)
	defer DestroyCluster(etcds)

	time.Sleep(1 * time.Second)

	resp, err := tests.Get("http://localhost:7001/v2/admin/machines")
	if !assert.Equal(t, err, nil) {
		t.FailNow()
	}
	assert.Equal(t, resp.StatusCode, 200)
	machines := make([]map[string]interface{}, 0)
	b := tests.ReadBody(resp)
	json.Unmarshal(b, &machines)
	assert.Equal(t, len(machines), 3)
	if machines[0]["state"] != "leader" && machines[1]["state"] != "leader" && machines[2]["state"] != "leader" {
		t.Errorf("no leader in the cluster")
	}
}
예제 #13
0
// Ensures that a watcher can wait for a value to be set after a given index.
//
//   $ curl localhost:4001/v2/keys/keyindir/bar?wait=true
//   $ curl -X PUT localhost:4001/v2/keys/keyindir -d dir=true -d ttl=1
//   $ curl -X PUT localhost:4001/v2/keys/keyindir/bar -d value=YYY
//
func TestV2WatchKeyInDir(t *testing.T) {
	tests.RunServer(func(s *server.Server) {
		var body map[string]interface{}
		c := make(chan bool)

		// Set a value (before given index).
		v := url.Values{}
		v.Set("dir", "true")
		v.Set("ttl", "1")
		resp, _ := tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/keyindir"), v)
		tests.ReadBody(resp)

		// Set a value (before given index).
		v = url.Values{}
		v.Set("value", "XXX")
		resp, _ = tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/keyindir/bar"), v)
		tests.ReadBody(resp)

		go func() {
			resp, _ := tests.Get(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/keyindir/bar?wait=true"))
			body = tests.ReadBodyJSON(resp)
			c <- true
		}()

		// wait for expiration, we do have a up to 500 millisecond delay
		time.Sleep(1500 * time.Millisecond)

		select {
		case <-c:

		default:
			t.Fatal("cannot get watch result")
		}

		assert.NotNil(t, body, "")
		assert.Equal(t, body["action"], "expire", "")

		node := body["node"].(map[string]interface{})
		assert.Equal(t, node["key"], "/keyindir", "")
	})
}
예제 #14
0
// Ensures that a watcher can wait for a value to be set and return it to the client.
//
//   $ curl localhost:4001/v1/watch/foo/bar
//   $ curl -X PUT localhost:4001/v1/keys/foo/bar -d value=XXX
//
func TestV1WatchKey(t *testing.T) {
	tests.RunServer(func(s *server.Server) {
		var body map[string]interface{}
		c := make(chan bool)
		go func() {
			resp, _ := tests.Get(fmt.Sprintf("%s%s", s.URL(), "/v1/watch/foo/bar"))
			body = tests.ReadBodyJSON(resp)
			c <- true
		}()

		// Make sure response didn't fire early.
		time.Sleep(1 * time.Millisecond)
		assert.Nil(t, body, "")

		// Set a value.
		v := url.Values{}
		v.Set("value", "XXX")
		resp, _ := tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v1/keys/foo/bar"), v)
		tests.ReadBody(resp)

		// A response should follow from the GET above.
		time.Sleep(1 * time.Millisecond)

		select {
		case <-c:

		default:
			t.Fatal("cannot get watch result")
		}

		assert.NotNil(t, body, "")
		assert.Equal(t, body["action"], "set", "")

		assert.Equal(t, body["key"], "/foo/bar", "")
		assert.Equal(t, body["value"], "XXX", "")
		assert.Equal(t, body["index"], 2, "")
	})
}
예제 #15
0
// Ensure that we can start a v2 node from the log of a v1 node.
func TestV1SoloMigration(t *testing.T) {
	path, _ := ioutil.TempDir("", "etcd-")
	os.MkdirAll(path, 0777)
	defer os.RemoveAll(path)

	nodepath := filepath.Join(path, "node0")
	fixturepath, _ := filepath.Abs("../fixtures/v1.solo/node0")
	fmt.Println("DATA_DIR =", nodepath)

	// Copy over fixture files.
	c := exec.Command("cp", "-rf", fixturepath, nodepath)
	if out, err := c.CombinedOutput(); err != nil {
		fmt.Println(">>>>>>\n", string(out), "<<<<<<")
		panic("Fixture initialization error:" + err.Error())
	}

	procAttr := new(os.ProcAttr)
	procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr}

	args := []string{"etcd", fmt.Sprintf("-data-dir=%s", nodepath)}
	args = append(args, "-addr", "127.0.0.1:4001")
	args = append(args, "-peer-addr", "127.0.0.1:7001")
	args = append(args, "-name", "node0")
	process, err := os.StartProcess(EtcdBinPath, args, procAttr)
	if err != nil {
		t.Fatal("start process failed:" + err.Error())
		return
	}
	defer process.Kill()
	time.Sleep(time.Second)

	// Ensure deleted message is removed.
	resp, err := tests.Get("http://localhost:4001/v2/keys/message")
	tests.ReadBody(resp)
	assert.Nil(t, err, "")
	assert.Equal(t, resp.StatusCode, 200, "")
}
예제 #16
0
func testGetLeader(s *server.Server, key string) (string, error) {
	resp, err := tests.Get(fmt.Sprintf("%s/mod/v2/leader/%s", s.URL(), key))
	ret := tests.ReadBody(resp)
	return string(ret), err
}
예제 #17
0
파일: mod_lock_test.go 프로젝트: kula/etcd
func testGetLockIndex(s *server.Server, key string) (string, error) {
	resp, err := tests.Get(fmt.Sprintf("%s/mod/v2/lock/%s?field=index", s.URL(), key))
	ret := tests.ReadBody(resp)
	return string(ret), err
}
예제 #18
0
// Create a full cluster and then change the active size.
func TestStandby(t *testing.T) {
	clusterSize := 15
	_, etcds, err := CreateCluster(clusterSize, &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}, false)
	if !assert.NoError(t, err) {
		t.Fatal("cannot create cluster")
	}
	defer DestroyCluster(etcds)

	resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"syncInterval":1}`))
	if !assert.Equal(t, resp.StatusCode, 200) {
		t.FailNow()
	}

	time.Sleep(time.Second)
	c := etcd.NewClient(nil)
	c.SyncCluster()

	// Verify that we just have default machines.
	result, err := c.Get("_etcd/machines", false, true)
	assert.NoError(t, err)
	assert.Equal(t, len(result.Node.Nodes), 9)

	t.Log("Reconfigure with a smaller active size")
	resp, _ = tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":7, "syncInterval":1}`))
	if !assert.Equal(t, resp.StatusCode, 200) {
		t.FailNow()
	}

	// Wait for two monitor cycles before checking for demotion.
	time.Sleep((2 * server.ActiveMonitorTimeout) + (2 * time.Second))

	// Verify that we now have seven peers.
	result, err = c.Get("_etcd/machines", false, true)
	assert.NoError(t, err)
	assert.Equal(t, len(result.Node.Nodes), 7)

	t.Log("Test the functionality of all servers")
	// Set key.
	time.Sleep(time.Second)
	if _, err := c.Set("foo", "bar", 0); err != nil {
		panic(err)
	}
	time.Sleep(time.Second)

	// Check that all peers and standbys have the value.
	for i := range etcds {
		resp, err := tests.Get(fmt.Sprintf("http://localhost:%d/v2/keys/foo", 4000+(i+1)))
		if assert.NoError(t, err) {
			body := tests.ReadBodyJSON(resp)
			if node, _ := body["node"].(map[string]interface{}); assert.NotNil(t, node) {
				assert.Equal(t, node["value"], "bar")
			}
		}
	}

	t.Log("Reconfigure with larger active size and wait for join")
	resp, _ = tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":8, "syncInterval":1}`))
	if !assert.Equal(t, resp.StatusCode, 200) {
		t.FailNow()
	}

	time.Sleep((1 * time.Second) + (1 * time.Second))

	// Verify that exactly eight machines are in the cluster.
	result, err = c.Get("_etcd/machines", false, true)
	assert.NoError(t, err)
	assert.Equal(t, len(result.Node.Nodes), 8)
}
예제 #19
0
// Create a full cluster and then change the active size dramatically.
func TestStandbyDramaticChange(t *testing.T) {
	clusterSize := 9
	_, etcds, err := CreateCluster(clusterSize, &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}, false)
	assert.NoError(t, err)
	defer DestroyCluster(etcds)

	if err != nil {
		t.Fatal("cannot create cluster")
	}

	time.Sleep(time.Second)
	c := etcd.NewClient(nil)
	c.SyncCluster()

	num := clusterSize
	for i := 0; i < 3; i++ {
		for inc := 0; inc < 2; inc++ {
			// Verify that we just have i machines.
			result, err := c.Get("_etcd/machines", false, true)
			assert.NoError(t, err)
			assert.Equal(t, len(result.Node.Nodes), num)

			if inc == 0 {
				num -= 6
			} else {
				num += 6
			}

			t.Log("Reconfigure with active size", num)
			resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(fmt.Sprintf(`{"activeSize":%d, "syncInterval":1}`, num)))
			if !assert.Equal(t, resp.StatusCode, 200) {
				t.FailNow()
			}

			if inc == 0 {
				// Wait for monitor cycles before checking for demotion.
				time.Sleep(6*server.ActiveMonitorTimeout + (1 * time.Second))
			} else {
				time.Sleep(time.Second + (1 * time.Second))
			}

			// Verify that we now have peers.
			result, err = c.Get("_etcd/machines", false, true)
			assert.NoError(t, err)
			assert.Equal(t, len(result.Node.Nodes), num)

			t.Log("Test the functionality of all servers")
			// Set key.
			if _, err := c.Set("foo", "bar", 0); err != nil {
				panic(err)
			}
			time.Sleep(100 * time.Millisecond)

			// Check that all peers and standbys have the value.
			for i := range etcds {
				resp, err := tests.Get(fmt.Sprintf("http://localhost:%d/v2/keys/foo", 4000+(i+1)))
				if assert.NoError(t, err) {
					body := tests.ReadBodyJSON(resp)
					if node, _ := body["node"].(map[string]interface{}); assert.NotNil(t, node) {
						assert.Equal(t, node["value"], "bar")
					}
				}
			}
		}
	}
}
예제 #20
0
func testGetLockValue(s *server.Server, key string) (string, int, error) {
	resp, err := tests.Get(fmt.Sprintf("%s/mod/v2/lock/%s", s.URL(), key))
	ret := tests.ReadBody(resp)
	return string(ret), resp.StatusCode, err
}
예제 #21
0
// Create a full cluster and then add extra an extra standby node.
func TestStandby(t *testing.T) {
	t.Skip("functionality unimplemented")

	clusterSize := 10 // DefaultActiveSize + 1
	_, etcds, err := CreateCluster(clusterSize, &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}, false)
	assert.NoError(t, err)
	defer DestroyCluster(etcds)

	if err != nil {
		t.Fatal("cannot create cluster")
	}

	c := etcd.NewClient(nil)
	c.SyncCluster()

	// Set key.
	time.Sleep(time.Second)
	if _, err := c.Set("foo", "bar", 0); err != nil {
		panic(err)
	}
	time.Sleep(time.Second)

	// Check that all peers and standbys have the value.
	for i := range etcds {
		resp, err := tests.Get(fmt.Sprintf("http://localhost:%d/v2/keys/foo", 4000+(i+1)))
		if assert.NoError(t, err) {
			body := tests.ReadBodyJSON(resp)
			if node, _ := body["node"].(map[string]interface{}); assert.NotNil(t, node) {
				assert.Equal(t, node["value"], "bar")
			}
		}
	}

	// Verify that we have one standby.
	result, err := c.Get("_etcd/standbys", false, true)
	assert.NoError(t, err)
	assert.Equal(t, len(result.Node.Nodes), 1)

	// Reconfigure with larger active size (10 nodes) and wait for promotion.
	resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":10, "promoteDelay":1800}`))
	if !assert.Equal(t, resp.StatusCode, 200) {
		t.FailNow()
	}

	time.Sleep(server.ActiveMonitorTimeout + (1 * time.Second))

	// Verify that the standby node is now a peer.
	result, err = c.Get("_etcd/standbys", false, true)
	assert.NoError(t, err)
	assert.Equal(t, len(result.Node.Nodes), 0)

	// Reconfigure with a smaller active size (8 nodes).
	resp, _ = tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":8, "promoteDelay":1800}`))
	if !assert.Equal(t, resp.StatusCode, 200) {
		t.FailNow()
	}

	// Wait for two monitor cycles before checking for demotion.
	time.Sleep((2 * server.ActiveMonitorTimeout) + (1 * time.Second))

	// Verify that we now have eight peers.
	result, err = c.Get("_etcd/machines", false, true)
	assert.NoError(t, err)
	assert.Equal(t, len(result.Node.Nodes), 8)

	// Verify that we now have two standbys.
	result, err = c.Get("_etcd/standbys", false, true)
	assert.NoError(t, err)
	assert.Equal(t, len(result.Node.Nodes), 2)
}
// Create a five nodes
// Kill all the nodes and restart, then remove the leader
func TestMultiNodeKillAllAndRecoveryAndRemoveLeader(t *testing.T) {
	procAttr := new(os.ProcAttr)
	procAttr.Files = []*os.File{nil, os.Stdout, os.Stderr}

	stop := make(chan bool)
	leaderChan := make(chan string, 1)
	all := make(chan bool, 1)

	clusterSize := 5
	argGroup, etcds, err := CreateCluster(clusterSize, procAttr, false)
	defer DestroyCluster(etcds)

	if err != nil {
		t.Fatal("cannot create cluster")
	}

	c := etcd.NewClient(nil)

	go Monitor(clusterSize, clusterSize, leaderChan, all, stop)
	<-all
	<-leaderChan
	stop <- true

	// It needs some time to sync current commits and write it to disk.
	// Or some instance may be restarted as a new peer, and we don't support
	// to connect back the old cluster that doesn't have majority alive
	// without log now.
	time.Sleep(time.Second)

	c.SyncCluster()

	// kill all
	DestroyCluster(etcds)

	time.Sleep(time.Second)

	stop = make(chan bool)
	leaderChan = make(chan string, 1)
	all = make(chan bool, 1)

	time.Sleep(time.Second)

	for i := 0; i < clusterSize; i++ {
		etcds[i], err = os.StartProcess(EtcdBinPath, argGroup[i], procAttr)
	}

	go Monitor(clusterSize, 1, leaderChan, all, stop)

	<-all
	leader := <-leaderChan

	_, err = c.Set("foo", "bar", 0)
	if err != nil {
		t.Fatalf("Recovery error: %s", err)
	}

	port, _ := strconv.Atoi(strings.Split(leader, ":")[2])
	num := port - 7000
	resp, _ := tests.Delete(leader+"/v2/admin/machines/node"+strconv.Itoa(num), "application/json", nil)
	if !assert.Equal(t, resp.StatusCode, 200) {
		t.FailNow()
	}

	// check the old leader is in standby mode now
	time.Sleep(time.Second)
	resp, _ = tests.Get(leader + "/name")
	assert.Equal(t, resp.StatusCode, 404)
}