// Ensure that the store can delete a directory if recursive is specified. func TestStoreDeleteDiretory(t *testing.T) { s := newStore() // create directory /foo s.Create("/foo", true, "", false, Permanent) // delete /foo with dir = true and recursive = false // this should succeed, since the directory is empty e, err := s.Delete("/foo", true, false) assert.Nil(t, err, "") assert.Equal(t, e.Action, "delete", "") // check pervNode assert.NotNil(t, e.PrevNode, "") assert.Equal(t, e.PrevNode.Key, "/foo", "") assert.Equal(t, e.PrevNode.Dir, true, "") // create directory /foo and directory /foo/bar s.Create("/foo/bar", true, "", false, Permanent) // delete /foo with dir = true and recursive = false // this should fail, since the directory is not empty _, err = s.Delete("/foo", true, false) assert.NotNil(t, err, "") // delete /foo with dir=false and recursive = true // this should succeed, since recursive implies dir=true // and recursively delete should be able to delete all // items under the given directory e, err = s.Delete("/foo", false, true) assert.Nil(t, err, "") assert.Equal(t, e.Action, "delete", "") }
func TestSet(t *testing.T) { s := newStore() // Set /foo="" e, err := s.Set("/foo", false, "", Permanent) assert.Nil(t, err, "") assert.Equal(t, e.Action, "set", "") assert.Equal(t, e.Node.Key, "/foo", "") assert.False(t, e.Node.Dir, "") assert.Equal(t, e.Node.Value, "", "") assert.Nil(t, e.Node.Nodes, "") assert.Nil(t, e.Node.Expiration, "") assert.Equal(t, e.Node.TTL, 0, "") assert.Equal(t, e.Node.ModifiedIndex, uint64(1), "") // Set /foo="bar" e, err = s.Set("/foo", false, "bar", Permanent) assert.Nil(t, err, "") assert.Equal(t, e.Action, "set", "") assert.Equal(t, e.Node.Key, "/foo", "") assert.False(t, e.Node.Dir, "") assert.Equal(t, e.Node.Value, "bar", "") assert.Nil(t, e.Node.Nodes, "") assert.Nil(t, e.Node.Expiration, "") assert.Equal(t, e.Node.TTL, 0, "") assert.Equal(t, e.Node.ModifiedIndex, uint64(2), "") // check prevNode assert.NotNil(t, e.PrevNode, "") assert.Equal(t, e.PrevNode.Key, "/foo", "") assert.Equal(t, e.PrevNode.Value, "", "") assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1), "") // Set /foo="baz" (for testing prevNode) e, err = s.Set("/foo", false, "baz", Permanent) assert.Nil(t, err, "") assert.Equal(t, e.Action, "set", "") assert.Equal(t, e.Node.Key, "/foo", "") assert.False(t, e.Node.Dir, "") assert.Equal(t, e.Node.Value, "baz", "") assert.Nil(t, e.Node.Nodes, "") assert.Nil(t, e.Node.Expiration, "") assert.Equal(t, e.Node.TTL, 0, "") assert.Equal(t, e.Node.ModifiedIndex, uint64(3), "") // check prevNode assert.NotNil(t, e.PrevNode, "") assert.Equal(t, e.PrevNode.Key, "/foo", "") assert.Equal(t, e.PrevNode.Value, "bar", "") assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(2), "") // Set /dir as a directory e, err = s.Set("/dir", true, "", Permanent) assert.Nil(t, err, "") assert.Equal(t, e.Action, "set", "") assert.Equal(t, e.Node.Key, "/dir", "") assert.True(t, e.Node.Dir, "") assert.Equal(t, e.Node.Value, "", "") assert.Nil(t, e.Node.Nodes, "") assert.Nil(t, e.Node.Expiration, "") assert.Equal(t, e.Node.TTL, 0, "") assert.Equal(t, e.Node.ModifiedIndex, uint64(4), "") }
// Ensure that the store cannot delete a directory. func TestStoreCompareAndDeleteDiretoryFail(t *testing.T) { s := newStore() s.Create("/foo", true, "", false, Permanent) _, _err := s.CompareAndDelete("/foo", "", 0) assert.NotNil(t, _err, "") err := _err.(*etcdErr.Error) assert.Equal(t, err.ErrorCode, etcdErr.EcodeNotFile, "") }
func TestRootRdOnly(t *testing.T) { s := newStore() _, err := s.Set("/", true, "", Permanent) assert.NotNil(t, err, "") _, err = s.Delete("/", true, true) assert.NotNil(t, err, "") _, err = s.Create("/", true, "", false, Permanent) assert.NotNil(t, err, "") _, err = s.Update("/", "", Permanent) assert.NotNil(t, err, "") _, err = s.CompareAndSwap("/", "", 0, "", Permanent) assert.NotNil(t, err, "") }
// Ensure that the store does see hidden key creates if watching deeper than a hidden key in recursive mode. func TestStoreWatchRecursiveCreateDeeperThanHiddenKey(t *testing.T) { s := newStore() w, _ := s.Watch("/_foo/bar", true, false, 0) s.Create("/_foo/bar/baz", false, "baz", false, Permanent) e := nbselect(w.EventChan) assert.NotNil(t, e, "") assert.Equal(t, e.Action, "create", "") assert.Equal(t, e.Node.Key, "/_foo/bar/baz", "") }
// Ensure that the store can delete a value. func TestStoreDeleteValue(t *testing.T) { s := newStore() s.Create("/foo", false, "bar", false, Permanent) e, err := s.Delete("/foo", false, false) assert.Nil(t, err, "") assert.Equal(t, e.Action, "delete", "") // check pervNode assert.NotNil(t, e.PrevNode, "") assert.Equal(t, e.PrevNode.Key, "/foo", "") assert.Equal(t, e.PrevNode.Value, "bar", "") }
func TestStoreCompareAndDeletePrevIndexFailsIfNotMatch(t *testing.T) { s := newStore() s.Create("/foo", false, "bar", false, Permanent) e, _err := s.CompareAndDelete("/foo", "", 100) assert.NotNil(t, _err, "") err := _err.(*etcdErr.Error) assert.Equal(t, err.ErrorCode, etcdErr.EcodeTestFailed, "") assert.Equal(t, err.Message, "Compare failed", "") assert.Nil(t, e, "") e, _ = s.Get("/foo", false, false) assert.Equal(t, e.Node.Value, "bar", "") }
func TestStoreCompareAndDeletePrevIndex(t *testing.T) { s := newStore() s.Create("/foo", false, "bar", false, Permanent) e, err := s.CompareAndDelete("/foo", "", 1) assert.Nil(t, err, "") assert.Equal(t, e.Action, "compareAndDelete", "") // check pervNode assert.NotNil(t, e.PrevNode, "") assert.Equal(t, e.PrevNode.Key, "/foo", "") assert.Equal(t, e.PrevNode.Value, "bar", "") assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1), "") assert.Equal(t, e.PrevNode.CreatedIndex, uint64(1), "") }
// Ensures that a watcher can wait for a value to be set after a given index. // // $ curl -X POST localhost:4001/v1/watch/foo/bar -d index=4 // $ curl -X PUT localhost:4001/v1/keys/foo/bar -d value=XXX // $ curl -X PUT localhost:4001/v1/keys/foo/bar -d value=YYY // func TestV1WatchKeyWithIndex(t *testing.T) { tests.RunServer(func(s *server.Server) { var body map[string]interface{} c := make(chan bool) go func() { v := url.Values{} v.Set("index", "3") resp, _ := tests.PostForm(fmt.Sprintf("%s%s", s.URL(), "/v1/watch/foo/bar"), v) body = tests.ReadBodyJSON(resp) c <- true }() // Make sure response didn't fire early. time.Sleep(1 * time.Millisecond) assert.Nil(t, body, "") // Set a value (before given index). v := url.Values{} v.Set("value", "XXX") resp, _ := tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v1/keys/foo/bar"), v) tests.ReadBody(resp) // Make sure response didn't fire early. time.Sleep(1 * time.Millisecond) assert.Nil(t, body, "") // Set a value (before given index). v.Set("value", "YYY") resp, _ = tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v1/keys/foo/bar"), v) tests.ReadBody(resp) // A response should follow from the GET above. time.Sleep(1 * time.Millisecond) select { case <-c: default: t.Fatal("cannot get watch result") } assert.NotNil(t, body, "") assert.Equal(t, body["action"], "set", "") assert.Equal(t, body["key"], "/foo/bar", "") assert.Equal(t, body["value"], "YYY", "") assert.Equal(t, body["index"], 3, "") }) }
// Ensure that the store can conditionally update a key if it has a previous value. func TestStoreCompareAndSwapPrevValue(t *testing.T) { s := newStore() s.Create("/foo", false, "bar", false, Permanent) e, err := s.CompareAndSwap("/foo", "bar", 0, "baz", Permanent) assert.Nil(t, err, "") assert.Equal(t, e.Action, "compareAndSwap", "") assert.Equal(t, e.Node.Value, "baz", "") // check pervNode assert.NotNil(t, e.PrevNode, "") assert.Equal(t, e.PrevNode.Key, "/foo", "") assert.Equal(t, e.PrevNode.Value, "bar", "") assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1), "") assert.Equal(t, e.PrevNode.CreatedIndex, uint64(1), "") e, _ = s.Get("/foo", false, false) assert.Equal(t, e.Node.Value, "baz", "") }
// Ensures that a watcher can wait for a value to be set and return it to the client. // // $ curl localhost:4001/v2/keys/foo/bar?wait=true // $ curl -X PUT localhost:4001/v2/keys/foo/bar -d value=XXX // func TestV2WatchKey(t *testing.T) { tests.RunServer(func(s *server.Server) { // There exists a little gap between etcd ready to serve and // it actually serves the first request, which means the response // delay could be a little bigger. // This test is time sensitive, so it does one request to ensure // that the server is working. tests.Get(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar")) var watchResp *http.Response c := make(chan bool) go func() { watchResp, _ = tests.Get(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar?wait=true")) c <- true }() // Make sure response didn't fire early. time.Sleep(1 * time.Millisecond) // Set a value. v := url.Values{} v.Set("value", "XXX") resp, _ := tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar"), v) tests.ReadBody(resp) // A response should follow from the GET above. time.Sleep(1 * time.Millisecond) select { case <-c: default: t.Fatal("cannot get watch result") } body := tests.ReadBodyJSON(watchResp) assert.NotNil(t, body, "") assert.Equal(t, body["action"], "set", "") node := body["node"].(map[string]interface{}) assert.Equal(t, node["key"], "/foo/bar", "") assert.Equal(t, node["value"], "XXX", "") assert.Equal(t, node["modifiedIndex"], 3, "") }) }
// Ensures that a watcher can wait for a value to be set after a given index. // // $ curl localhost:4001/v2/keys/keyindir/bar?wait=true // $ curl -X PUT localhost:4001/v2/keys/keyindir -d dir=true -d ttl=1 // $ curl -X PUT localhost:4001/v2/keys/keyindir/bar -d value=YYY // func TestV2WatchKeyInDir(t *testing.T) { tests.RunServer(func(s *server.Server) { var body map[string]interface{} c := make(chan bool) // Set a value (before given index). v := url.Values{} v.Set("dir", "true") v.Set("ttl", "1") resp, _ := tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/keyindir"), v) tests.ReadBody(resp) // Set a value (before given index). v = url.Values{} v.Set("value", "XXX") resp, _ = tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/keyindir/bar"), v) tests.ReadBody(resp) go func() { resp, _ := tests.Get(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/keyindir/bar?wait=true")) body = tests.ReadBodyJSON(resp) c <- true }() // wait for expiration, we do have a up to 500 millisecond delay time.Sleep(2000 * time.Millisecond) select { case <-c: default: t.Fatal("cannot get watch result") } assert.NotNil(t, body, "") assert.Equal(t, body["action"], "expire", "") node := body["node"].(map[string]interface{}) assert.Equal(t, node["key"], "/keyindir", "") }) }
// Ensures that a watcher can wait for a value to be set and return it to the client. // // $ curl localhost:4001/v2/keys/foo/bar?wait=true // $ curl -X PUT localhost:4001/v2/keys/foo/bar -d value=XXX // func TestV2WatchKey(t *testing.T) { tests.RunServer(func(s *server.Server) { var body map[string]interface{} c := make(chan bool) go func() { resp, _ := tests.Get(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar?wait=true")) body = tests.ReadBodyJSON(resp) c <- true }() // Make sure response didn't fire early. time.Sleep(1 * time.Millisecond) assert.Nil(t, body, "") // Set a value. v := url.Values{} v.Set("value", "XXX") resp, _ := tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v2/keys/foo/bar"), v) tests.ReadBody(resp) // A response should follow from the GET above. time.Sleep(1 * time.Millisecond) select { case <-c: default: t.Fatal("cannot get watch result") } assert.NotNil(t, body, "") assert.Equal(t, body["action"], "set", "") node := body["node"].(map[string]interface{}) assert.Equal(t, node["key"], "/foo/bar", "") assert.Equal(t, node["value"], "XXX", "") assert.Equal(t, node["modifiedIndex"], 2, "") }) }
// Ensure that the store can recover from a previously saved state that includes an expiring key. func TestStoreRecoverWithExpiration(t *testing.T) { s := newStore() c := make(chan bool) defer func() { c <- true }() go mockSyncService(s.DeleteExpiredKeys, c) var eidx uint64 = 4 s.Create("/foo", true, "", false, Permanent) s.Create("/foo/x", false, "bar", false, Permanent) s.Create("/foo/y", false, "baz", false, time.Now().Add(5*time.Millisecond)) b, err := s.Save() time.Sleep(10 * time.Millisecond) s2 := newStore() c2 := make(chan bool) defer func() { c2 <- true }() go mockSyncService(s2.DeleteExpiredKeys, c2) s2.Recovery(b) time.Sleep(600 * time.Millisecond) e, err := s.Get("/foo/x", false, false) assert.Nil(t, err, "") assert.Equal(t, e.EtcdIndex, eidx, "") assert.Equal(t, *e.Node.Value, "bar", "") e, err = s.Get("/foo/y", false, false) assert.NotNil(t, err, "") assert.Nil(t, e, "") }
// Ensures that a watcher can wait for a value to be set and return it to the client. // // $ curl localhost:4001/v1/watch/foo/bar // $ curl -X PUT localhost:4001/v1/keys/foo/bar -d value=XXX // func TestV1WatchKey(t *testing.T) { tests.RunServer(func(s *server.Server) { var watchResp *http.Response c := make(chan bool) go func() { watchResp, _ = tests.Get(fmt.Sprintf("%s%s", s.URL(), "/v1/watch/foo/bar")) c <- true }() // Make sure response didn't fire early. time.Sleep(1 * time.Millisecond) // Set a value. v := url.Values{} v.Set("value", "XXX") resp, _ := tests.PutForm(fmt.Sprintf("%s%s", s.URL(), "/v1/keys/foo/bar"), v) tests.ReadBody(resp) // A response should follow from the GET above. time.Sleep(1 * time.Millisecond) select { case <-c: default: t.Fatal("cannot get watch result") } body := tests.ReadBodyJSON(watchResp) assert.NotNil(t, body, "") assert.Equal(t, body["action"], "set", "") assert.Equal(t, body["key"], "/foo/bar", "") assert.Equal(t, body["value"], "XXX", "") assert.Equal(t, body["index"], 2, "") }) }
// Create a full cluster and then change the active size. func TestStandby(t *testing.T) { clusterSize := 15 _, etcds, err := CreateCluster(clusterSize, &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}, false) if !assert.NoError(t, err) { t.Fatal("cannot create cluster") } defer DestroyCluster(etcds) resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"syncInterval":1}`)) if !assert.Equal(t, resp.StatusCode, 200) { t.FailNow() } time.Sleep(time.Second) c := etcd.NewClient(nil) c.SyncCluster() // Verify that we just have default machines. result, err := c.Get("_etcd/machines", false, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), 9) t.Log("Reconfigure with a smaller active size") resp, _ = tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":7, "syncInterval":1}`)) if !assert.Equal(t, resp.StatusCode, 200) { t.FailNow() } // Wait for two monitor cycles before checking for demotion. time.Sleep((2 * server.ActiveMonitorTimeout) + (2 * time.Second)) // Verify that we now have seven peers. result, err = c.Get("_etcd/machines", false, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), 7) t.Log("Test the functionality of all servers") // Set key. time.Sleep(time.Second) if _, err := c.Set("foo", "bar", 0); err != nil { panic(err) } time.Sleep(time.Second) // Check that all peers and standbys have the value. for i := range etcds { resp, err := tests.Get(fmt.Sprintf("http://localhost:%d/v2/keys/foo", 4000+(i+1))) if assert.NoError(t, err) { body := tests.ReadBodyJSON(resp) if node, _ := body["node"].(map[string]interface{}); assert.NotNil(t, node) { assert.Equal(t, node["value"], "bar") } } } t.Log("Reconfigure with larger active size and wait for join") resp, _ = tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":8, "syncInterval":1}`)) if !assert.Equal(t, resp.StatusCode, 200) { t.FailNow() } time.Sleep((1 * time.Second) + (1 * time.Second)) // Verify that exactly eight machines are in the cluster. result, err = c.Get("_etcd/machines", false, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), 8) }
// Create a full cluster and then change the active size dramatically. func TestStandbyDramaticChange(t *testing.T) { clusterSize := 9 _, etcds, err := CreateCluster(clusterSize, &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}, false) assert.NoError(t, err) defer DestroyCluster(etcds) if err != nil { t.Fatal("cannot create cluster") } time.Sleep(time.Second) c := etcd.NewClient(nil) c.SyncCluster() num := clusterSize for i := 0; i < 3; i++ { for inc := 0; inc < 2; inc++ { // Verify that we just have i machines. result, err := c.Get("_etcd/machines", false, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), num) if inc == 0 { num -= 6 } else { num += 6 } t.Log("Reconfigure with active size", num) resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(fmt.Sprintf(`{"activeSize":%d, "syncInterval":1}`, num))) if !assert.Equal(t, resp.StatusCode, 200) { t.FailNow() } if inc == 0 { // Wait for monitor cycles before checking for demotion. time.Sleep(6*server.ActiveMonitorTimeout + (1 * time.Second)) } else { time.Sleep(time.Second + (1 * time.Second)) } // Verify that we now have peers. result, err = c.Get("_etcd/machines", false, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), num) t.Log("Test the functionality of all servers") // Set key. if _, err := c.Set("foo", "bar", 0); err != nil { panic(err) } time.Sleep(100 * time.Millisecond) // Check that all peers and standbys have the value. for i := range etcds { resp, err := tests.Get(fmt.Sprintf("http://localhost:%d/v2/keys/foo", 4000+(i+1))) if assert.NoError(t, err) { body := tests.ReadBodyJSON(resp) if node, _ := body["node"].(map[string]interface{}); assert.NotNil(t, node) { assert.Equal(t, node["value"], "bar") } } } } } }
// Create a full cluster and then add extra an extra standby node. func TestStandby(t *testing.T) { t.Skip("functionality unimplemented") clusterSize := 10 // DefaultActiveSize + 1 _, etcds, err := CreateCluster(clusterSize, &os.ProcAttr{Files: []*os.File{nil, os.Stdout, os.Stderr}}, false) assert.NoError(t, err) defer DestroyCluster(etcds) if err != nil { t.Fatal("cannot create cluster") } c := etcd.NewClient(nil) c.SyncCluster() // Set key. time.Sleep(time.Second) if _, err := c.Set("foo", "bar", 0); err != nil { panic(err) } time.Sleep(time.Second) // Check that all peers and standbys have the value. for i := range etcds { resp, err := tests.Get(fmt.Sprintf("http://localhost:%d/v2/keys/foo", 4000+(i+1))) if assert.NoError(t, err) { body := tests.ReadBodyJSON(resp) if node, _ := body["node"].(map[string]interface{}); assert.NotNil(t, node) { assert.Equal(t, node["value"], "bar") } } } // Verify that we have one standby. result, err := c.Get("_etcd/standbys", false, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), 1) // Reconfigure with larger active size (10 nodes) and wait for promotion. resp, _ := tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":10, "promoteDelay":1800}`)) if !assert.Equal(t, resp.StatusCode, 200) { t.FailNow() } time.Sleep(server.ActiveMonitorTimeout + (1 * time.Second)) // Verify that the standby node is now a peer. result, err = c.Get("_etcd/standbys", false, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), 0) // Reconfigure with a smaller active size (8 nodes). resp, _ = tests.Put("http://localhost:7001/v2/admin/config", "application/json", bytes.NewBufferString(`{"activeSize":8, "promoteDelay":1800}`)) if !assert.Equal(t, resp.StatusCode, 200) { t.FailNow() } // Wait for two monitor cycles before checking for demotion. time.Sleep((2 * server.ActiveMonitorTimeout) + (1 * time.Second)) // Verify that we now have eight peers. result, err = c.Get("_etcd/machines", false, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), 8) // Verify that we now have two standbys. result, err = c.Get("_etcd/standbys", false, true) assert.NoError(t, err) assert.Equal(t, len(result.Node.Nodes), 2) }