func TestACL_MultiDC_Found(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() client := rpcClient(t, s1) defer client.Close() dir2, s2 := testServerWithConfig(t, func(c *Config) { c.Datacenter = "dc2" c.ACLDatacenter = "dc1" // Enable ACLs! }) defer os.RemoveAll(dir2) defer s2.Shutdown() // Try to join addr := fmt.Sprintf("127.0.0.1:%d", s1.config.SerfWANConfig.MemberlistConfig.BindPort) if _, err := s2.JoinWAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } testutil.WaitForLeader(t, s1.RPC, "dc1") testutil.WaitForLeader(t, s1.RPC, "dc2") // Create a new token arg := structs.ACLRequest{ Datacenter: "dc1", Op: structs.ACLSet, ACL: structs.ACL{ Name: "User token", Type: structs.ACLTypeClient, Rules: testACLPolicy, }, WriteRequest: structs.WriteRequest{Token: "root"}, } var id string if err := s1.RPC("ACL.Apply", &arg, &id); err != nil { t.Fatalf("err: %v", err) } // Token should resolve acl, err := s2.resolveToken(id) if err != nil { t.Fatalf("err: %v", err) } if acl == nil { t.Fatalf("missing acl") } // Check the policy if acl.KeyRead("bar") { t.Fatalf("unexpected read") } if !acl.KeyRead("foo/test") { t.Fatalf("unexpected failed read") } }
func TestCatalogListNodes_ConsistentRead_Fail(t *testing.T) { dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() codec1 := rpcClient(t, s1) defer codec1.Close() dir2, s2 := testServerDCBootstrap(t, "dc1", false) defer os.RemoveAll(dir2) defer s2.Shutdown() codec2 := rpcClient(t, s2) defer codec2.Close() // Try to join addr := fmt.Sprintf("127.0.0.1:%d", s1.config.SerfLANConfig.MemberlistConfig.BindPort) if _, err := s2.JoinLAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } testutil.WaitForLeader(t, s1.RPC, "dc1") testutil.WaitForLeader(t, s2.RPC, "dc1") // Use the leader as the client, kill the follower var codec rpc.ClientCodec if s1.IsLeader() { codec = codec1 s2.Shutdown() } else { codec = codec2 s1.Shutdown() } args := structs.DCSpecificRequest{ Datacenter: "dc1", QueryOptions: structs.QueryOptions{RequireConsistent: true}, } var out structs.IndexedNodes if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &out); !strings.HasPrefix(err.Error(), "leadership lost") { t.Fatalf("err: %v", err) } if out.QueryMeta.LastContact != 0 { t.Fatalf("should not have a last contact time") } if out.QueryMeta.KnownLeader { t.Fatalf("should have no known leader") } }
func TestResetSessionTimer_NoTTL(t *testing.T) { dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC, "dc1") // Create a session state := s1.fsm.State() if err := state.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil { t.Fatalf("err: %s", err) } session := &structs.Session{ ID: generateUUID(), Node: "foo", TTL: "0000s", } if err := state.SessionCreate(100, session); err != nil { t.Fatalf("err: %v", err) } // Reset the session timer err := s1.resetSessionTimer(session.ID, session) if err != nil { t.Fatalf("err: %v", err) } // Check that we have a timer _, ok := s1.sessionTimers[session.ID] if ok { t.Fatalf("should not have session timer") } }
func TestInvalidateSession(t *testing.T) { dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() testutil.WaitForLeader(t, s1.RPC, "dc1") // Create a session state := s1.fsm.State() if err := state.EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil { t.Fatalf("err: %s", err) } session := &structs.Session{ ID: generateUUID(), Node: "foo", TTL: "10s", } if err := state.SessionCreate(100, session); err != nil { t.Fatalf("err: %v", err) } // This should cause a destroy s1.invalidateSession(session.ID) // Check it is gone _, sess, err := state.SessionGet(session.ID) if err != nil { t.Fatalf("err: %v", err) } if sess != nil { t.Fatalf("should destroy session") } }
func testRemoteExecGetSpec(t *testing.T, c *Config) { dir, agent := makeAgent(t, c) defer os.RemoveAll(dir) defer agent.Shutdown() testutil.WaitForLeader(t, agent.RPC, "dc1") event := &remoteExecEvent{ Prefix: "_rexec", Session: makeRexecSession(t, agent), } defer destroySession(t, agent, event.Session) spec := &remoteExecSpec{ Command: "uptime", Script: []byte("#!/bin/bash"), Wait: time.Second, } buf, err := json.Marshal(spec) if err != nil { t.Fatalf("err: %v", err) } key := "_rexec/" + event.Session + "/job" setKV(t, agent, key, buf) var out remoteExecSpec if !agent.remoteExecGetSpec(event, &out) { t.Fatalf("bad") } if !reflect.DeepEqual(spec, &out) { t.Fatalf("bad spec") } }
func TestCatalogRegister_ForwardDC(t *testing.T) { dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() dir2, s2 := testServerDC(t, "dc2") defer os.RemoveAll(dir2) defer s2.Shutdown() // Try to join addr := fmt.Sprintf("127.0.0.1:%d", s1.config.SerfWANConfig.MemberlistConfig.BindPort) if _, err := s2.JoinWAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } testutil.WaitForLeader(t, s1.RPC, "dc2") arg := structs.RegisterRequest{ Datacenter: "dc2", // Should forward through s1 Node: "foo", Address: "127.0.0.1", Service: &structs.NodeService{ Service: "db", Tags: []string{"master"}, Port: 8000, }, } var out struct{} if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil { t.Fatalf("err: %v", err) } }
func TestACLEndpoint_Apply_RootChange(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() testutil.WaitForLeader(t, s1.RPC, "dc1") arg := structs.ACLRequest{ Datacenter: "dc1", Op: structs.ACLSet, ACL: structs.ACL{ ID: "manage", Name: "User token", Type: structs.ACLTypeClient, }, WriteRequest: structs.WriteRequest{Token: "root"}, } var out string err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &out) if err == nil || !strings.Contains(err.Error(), "root ACL") { t.Fatalf("err: %v", err) } }
func TestCoordinate_Datacenters(t *testing.T) { dir, srv := makeHTTPServer(t) defer os.RemoveAll(dir) defer srv.Shutdown() defer srv.agent.Shutdown() testutil.WaitForLeader(t, srv.agent.RPC, "dc1") req, err := http.NewRequest("GET", "/v1/coordinate/datacenters", nil) if err != nil { t.Fatalf("err: %v", err) } resp := httptest.NewRecorder() obj, err := srv.CoordinateDatacenters(resp, req) if err != nil { t.Fatalf("err: %v", err) } maps := obj.([]structs.DatacenterMap) if len(maps) != 1 || maps[0].Datacenter != "dc1" || len(maps[0].Coordinates) != 1 || maps[0].Coordinates[0].Node != srv.agent.config.NodeName { t.Fatalf("bad: %v", maps) } }
func TestCatalogNodes_Blocking(t *testing.T) { dir, srv := makeHTTPServer(t) defer os.RemoveAll(dir) defer srv.Shutdown() defer srv.agent.Shutdown() testutil.WaitForLeader(t, srv.agent.RPC, "dc1") // Register node args := &structs.DCSpecificRequest{ Datacenter: "dc1", } var out structs.IndexedNodes if err := srv.agent.RPC("Catalog.ListNodes", *args, &out); err != nil { t.Fatalf("err: %v", err) } // Do an update in a little while start := time.Now() go func() { time.Sleep(50 * time.Millisecond) args := &structs.RegisterRequest{ Datacenter: "dc1", Node: "foo", Address: "127.0.0.1", } var out struct{} if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil { t.Fatalf("err: %v", err) } }() // Do a blocking read req, err := http.NewRequest("GET", fmt.Sprintf("/v1/catalog/nodes?wait=60s&index=%d", out.Index), nil) if err != nil { t.Fatalf("err: %v", err) } resp := httptest.NewRecorder() obj, err := srv.CatalogNodes(resp, req) if err != nil { t.Fatalf("err: %v", err) } // Should block for a while if time.Now().Sub(start) < 50*time.Millisecond { t.Fatalf("too fast") } if idx := getIndex(t, resp); idx <= out.Index { t.Fatalf("bad: %v", idx) } nodes := obj.(structs.Nodes) if len(nodes) != 2 { t.Fatalf("bad: %v", obj) } }
func TestACLEndpoint_List(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() testutil.WaitForLeader(t, s1.RPC, "dc1") ids := []string{} for i := 0; i < 5; i++ { arg := structs.ACLRequest{ Datacenter: "dc1", Op: structs.ACLSet, ACL: structs.ACL{ Name: "User token", Type: structs.ACLTypeClient, }, WriteRequest: structs.WriteRequest{Token: "root"}, } var out string if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &out); err != nil { t.Fatalf("err: %v", err) } ids = append(ids, out) } getR := structs.DCSpecificRequest{ Datacenter: "dc1", QueryOptions: structs.QueryOptions{Token: "root"}, } var acls structs.IndexedACLs if err := msgpackrpc.CallWithCodec(codec, "ACL.List", &getR, &acls); err != nil { t.Fatalf("err: %v", err) } if acls.Index == 0 { t.Fatalf("Bad: %v", acls) } // 5 + anonymous + master if len(acls.ACLs) != 7 { t.Fatalf("Bad: %v", acls.ACLs) } for i := 0; i < len(acls.ACLs); i++ { s := acls.ACLs[i] if s.ID == anonymousToken || s.ID == "root" { continue } if !strContains(ids, s.ID) { t.Fatalf("bad: %v", s) } if s.Name != "User token" { t.Fatalf("bad: %v", s) } } }
func TestUiNodeInfo(t *testing.T) { dir, srv := makeHTTPServer(t) defer os.RemoveAll(dir) defer srv.Shutdown() defer srv.agent.Shutdown() testutil.WaitForLeader(t, srv.agent.RPC, "dc1") req, err := http.NewRequest("GET", fmt.Sprintf("/v1/internal/ui/node/%s", srv.agent.config.NodeName), nil) if err != nil { t.Fatalf("err: %v", err) } resp := httptest.NewRecorder() obj, err := srv.UINodeInfo(resp, req) if err != nil { t.Fatalf("err: %v", err) } assertIndex(t, resp) // Should be 1 node for the server node := obj.(*structs.NodeInfo) if node.Node != srv.agent.config.NodeName { t.Fatalf("bad: %v", node) } args := &structs.RegisterRequest{ Datacenter: "dc1", Node: "test", Address: "127.0.0.1", } var out struct{} if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil { t.Fatalf("err: %v", err) } req, err = http.NewRequest("GET", "/v1/internal/ui/node/test", nil) if err != nil { t.Fatalf("err: %v", err) } resp = httptest.NewRecorder() obj, err = srv.UINodeInfo(resp, req) if err != nil { t.Fatalf("err: %v", err) } assertIndex(t, resp) // Should be non-nil empty lists for services and checks node = obj.(*structs.NodeInfo) if node.Node != "test" || node.Services == nil || len(node.Services) != 0 || node.Checks == nil || len(node.Checks) != 0 { t.Fatalf("bad: %v", node) } }
func TestRTT_sortNodesByDistanceFrom(t *testing.T) { dir, server := testServer(t) defer os.RemoveAll(dir) defer server.Shutdown() codec := rpcClient(t, server) defer codec.Close() testutil.WaitForLeader(t, server.RPC, "dc1") seedCoordinates(t, codec, server) nodes := structs.Nodes{ &structs.Node{Node: "apple"}, &structs.Node{Node: "node1"}, &structs.Node{Node: "node2"}, &structs.Node{Node: "node3"}, &structs.Node{Node: "node4"}, &structs.Node{Node: "node5"}, } // The zero value for the source should not trigger any sorting. var source structs.QuerySource if err := server.sortNodesByDistanceFrom(source, nodes); err != nil { t.Fatalf("err: %v", err) } verifyNodeSort(t, nodes, "apple,node1,node2,node3,node4,node5") // Same for a source in some other DC. source.Node = "node1" source.Datacenter = "dc2" if err := server.sortNodesByDistanceFrom(source, nodes); err != nil { t.Fatalf("err: %v", err) } verifyNodeSort(t, nodes, "apple,node1,node2,node3,node4,node5") // Same for a source node in our DC that we have no coordinate for. source.Node = "apple" source.Datacenter = "dc1" if err := server.sortNodesByDistanceFrom(source, nodes); err != nil { t.Fatalf("err: %v", err) } verifyNodeSort(t, nodes, "apple,node1,node2,node3,node4,node5") // Set source to legit values relative to node1 but disable coordinates. source.Node = "node1" source.Datacenter = "dc1" server.config.DisableCoordinates = true if err := server.sortNodesByDistanceFrom(source, nodes); err != nil { t.Fatalf("err: %v", err) } verifyNodeSort(t, nodes, "apple,node1,node2,node3,node4,node5") // Now enable coordinates and sort relative to node1, note that apple // doesn't have any seeded coordinate info so it should end up at the // end, despite its lexical hegemony. server.config.DisableCoordinates = false if err := server.sortNodesByDistanceFrom(source, nodes); err != nil { t.Fatalf("err: %v", err) } verifyNodeSort(t, nodes, "node1,node4,node5,node2,node3,apple") }
func TestCatalogDeregister(t *testing.T) { dir, srv := makeHTTPServer(t) defer os.RemoveAll(dir) defer srv.Shutdown() defer srv.agent.Shutdown() testutil.WaitForLeader(t, srv.agent.RPC, "dc1") // Register node req, err := http.NewRequest("GET", "/v1/catalog/deregister", nil) if err != nil { t.Fatalf("err: %v", err) } args := &structs.DeregisterRequest{ Node: "foo", } req.Body = encodeReq(args) obj, err := srv.CatalogDeregister(nil, req) if err != nil { t.Fatalf("err: %v", err) } res := obj.(bool) if res != true { t.Fatalf("bad: %v", res) } }
func TestInternal_EventFire_Token(t *testing.T) { dir, srv := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" c.ACLDownPolicy = "deny" c.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir) defer srv.Shutdown() codec := rpcClient(t, srv) defer codec.Close() testutil.WaitForLeader(t, srv.RPC, "dc1") // No token is rejected event := structs.EventFireRequest{ Name: "foo", Datacenter: "dc1", Payload: []byte("nope"), } err := msgpackrpc.CallWithCodec(codec, "Internal.EventFire", &event, nil) if err == nil || err.Error() != permissionDenied { t.Fatalf("bad: %s", err) } // Root token is allowed to fire event.Token = "root" err = msgpackrpc.CallWithCodec(codec, "Internal.EventFire", &event, nil) if err != nil { t.Fatalf("err: %s", err) } }
func TestCoordinate_ListDatacenters(t *testing.T) { dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() testutil.WaitForLeader(t, s1.RPC, "dc1") // It's super hard to force the Serfs into a known configuration of // coordinates, so the best we can do is make sure our own DC shows // up in the list with the proper coordinates. The guts of the algorithm // are extensively tested in rtt_test.go using a mock database. var out []structs.DatacenterMap if err := msgpackrpc.CallWithCodec(codec, "Coordinate.ListDatacenters", struct{}{}, &out); err != nil { t.Fatalf("err: %v", err) } if len(out) != 1 || out[0].Datacenter != "dc1" || len(out[0].Coordinates) != 1 || out[0].Coordinates[0].Node != s1.config.NodeName { t.Fatalf("bad: %v", out) } c, err := s1.serfWAN.GetCoordinate() if err != nil { t.Fatalf("bad: %v", err) } verifyCoordinatesEqual(t, c, out[0].Coordinates[0].Coord) }
func TestStatusLeader(t *testing.T) { dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() arg := struct{}{} var leader string if err := msgpackrpc.CallWithCodec(codec, "Status.Leader", arg, &leader); err != nil { t.Fatalf("err: %v", err) } if leader != "" { t.Fatalf("unexpected leader: %v", leader) } testutil.WaitForLeader(t, s1.RPC, "dc1") if err := msgpackrpc.CallWithCodec(codec, "Status.Leader", arg, &leader); err != nil { t.Fatalf("err: %v", err) } if leader == "" { t.Fatalf("no leader") } }
func TestAgent_sendCoordinate(t *testing.T) { conf := nextConfig() conf.SyncCoordinateRateTarget = 10.0 // updates/sec conf.SyncCoordinateIntervalMin = 1 * time.Millisecond conf.ConsulConfig.CoordinateUpdatePeriod = 100 * time.Millisecond conf.ConsulConfig.CoordinateUpdateBatchSize = 10 conf.ConsulConfig.CoordinateUpdateMaxBatches = 1 dir, agent := makeAgent(t, conf) defer os.RemoveAll(dir) defer agent.Shutdown() testutil.WaitForLeader(t, agent.RPC, "dc1") // Wait a little while for an update. time.Sleep(2 * conf.ConsulConfig.CoordinateUpdatePeriod) // Make sure the coordinate is present. req := structs.DCSpecificRequest{ Datacenter: agent.config.Datacenter, } var reply structs.IndexedCoordinates if err := agent.RPC("Coordinate.ListNodes", &req, &reply); err != nil { t.Fatalf("err: %s", err) } if len(reply.Coordinates) != 1 { t.Fatalf("expected a coordinate: %v", reply) } coord := reply.Coordinates[0] if coord.Node != agent.config.NodeName || coord.Coord == nil { t.Fatalf("bad: %v", coord) } }
func TestACL_Authority_Management(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" // Enable ACLs! c.ACLMasterToken = "foobar" c.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) defer s1.Shutdown() client := rpcClient(t, s1) defer client.Close() testutil.WaitForLeader(t, s1.RPC, "dc1") // Resolve the token acl, err := s1.resolveToken("foobar") if err != nil { t.Fatalf("err: %v", err) } if acl == nil { t.Fatalf("missing acl") } // Check the policy, should allow all if !acl.KeyRead("foo/test") { t.Fatalf("unexpected failed read") } }
func TestACLEndpoint_Apply(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() testutil.WaitForLeader(t, s1.RPC, "dc1") arg := structs.ACLRequest{ Datacenter: "dc1", Op: structs.ACLSet, ACL: structs.ACL{ Name: "User token", Type: structs.ACLTypeClient, }, WriteRequest: structs.WriteRequest{Token: "root"}, } var out string if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &out); err != nil { t.Fatalf("err: %v", err) } id := out // Verify state := s1.fsm.State() _, s, err := state.ACLGet(out) if err != nil { t.Fatalf("err: %v", err) } if s == nil { t.Fatalf("should not be nil") } if s.ID != out { t.Fatalf("bad: %v", s) } if s.Name != "User token" { t.Fatalf("bad: %v", s) } // Do a delete arg.Op = structs.ACLDelete arg.ACL.ID = out if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &out); err != nil { t.Fatalf("err: %v", err) } // Verify _, s, err = state.ACLGet(id) if err != nil { t.Fatalf("err: %v", err) } if s != nil { t.Fatalf("bad: %v", s) } }
func TestKVSEndpoint_ListKeys(t *testing.T) { dir, srv := makeHTTPServer(t) defer os.RemoveAll(dir) defer srv.Shutdown() defer srv.agent.Shutdown() testutil.WaitForLeader(t, srv.agent.RPC, "dc1") keys := []string{ "bar", "baz", "foo/sub1", "foo/sub2", "zip", } for _, key := range keys { buf := bytes.NewBuffer([]byte("test")) req, err := http.NewRequest("PUT", "/v1/kv/"+key, buf) if err != nil { t.Fatalf("err: %v", err) } resp := httptest.NewRecorder() obj, err := srv.KVSEndpoint(resp, req) if err != nil { t.Fatalf("err: %v", err) } if res := obj.(bool); !res { t.Fatalf("should work") } } { // Get all the keys req, err := http.NewRequest("GET", "/v1/kv/?keys&seperator=/", nil) if err != nil { t.Fatalf("err: %v", err) } resp := httptest.NewRecorder() obj, err := srv.KVSEndpoint(resp, req) if err != nil { t.Fatalf("err: %v", err) } assertIndex(t, resp) res, ok := obj.([]string) if !ok { t.Fatalf("should work") } expect := []string{"bar", "baz", "foo/", "zip"} if !reflect.DeepEqual(res, expect) { t.Fatalf("bad: %v", res) } } }
func TestCatalogRegister_ForwardLeader(t *testing.T) { dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() codec1 := rpcClient(t, s1) defer codec1.Close() dir2, s2 := testServer(t) defer os.RemoveAll(dir2) defer s2.Shutdown() codec2 := rpcClient(t, s2) defer codec2.Close() // Try to join addr := fmt.Sprintf("127.0.0.1:%d", s1.config.SerfLANConfig.MemberlistConfig.BindPort) if _, err := s2.JoinLAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } testutil.WaitForLeader(t, s1.RPC, "dc1") testutil.WaitForLeader(t, s2.RPC, "dc1") // Use the follower as the client var codec rpc.ClientCodec if !s1.IsLeader() { codec = codec1 } else { codec = codec2 } arg := structs.RegisterRequest{ Datacenter: "dc1", Node: "foo", Address: "127.0.0.1", Service: &structs.NodeService{ Service: "db", Tags: []string{"master"}, Port: 8000, }, } var out struct{} if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil { t.Fatalf("err: %v", err) } }
func httpTestWithConfig(t *testing.T, f func(srv *HTTPServer), cb func(c *Config)) { dir, srv := makeHTTPServerWithConfig(t, cb) defer os.RemoveAll(dir) defer srv.Shutdown() defer srv.agent.Shutdown() testutil.WaitForLeader(t, srv.agent.RPC, "dc1") f(srv) }
func TestSessionEndpoint_Apply(t *testing.T) { dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() testutil.WaitForLeader(t, s1.RPC, "dc1") // Just add a node s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}) arg := structs.SessionRequest{ Datacenter: "dc1", Op: structs.SessionCreate, Session: structs.Session{ Node: "foo", Name: "my-session", }, } var out string if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &out); err != nil { t.Fatalf("err: %v", err) } id := out // Verify state := s1.fsm.State() _, s, err := state.SessionGet(out) if err != nil { t.Fatalf("err: %v", err) } if s == nil { t.Fatalf("should not be nil") } if s.Node != "foo" { t.Fatalf("bad: %v", s) } if s.Name != "my-session" { t.Fatalf("bad: %v", s) } // Do a delete arg.Op = structs.SessionDestroy arg.Session.ID = out if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &out); err != nil { t.Fatalf("err: %v", err) } // Verify _, s, err = state.SessionGet(id) if err != nil { t.Fatalf("err: %v", err) } if s != nil { t.Fatalf("bad: %v", s) } }
func TestLeader_FailedMember(t *testing.T) { dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() dir2, c1 := testClient(t) defer os.RemoveAll(dir2) defer c1.Shutdown() testutil.WaitForLeader(t, s1.RPC, "dc1") // Try to join addr := fmt.Sprintf("127.0.0.1:%d", s1.config.SerfLANConfig.MemberlistConfig.BindPort) if _, err := c1.JoinLAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } // Fail the member c1.Shutdown() // Should be registered state := s1.fsm.State() testutil.WaitForResult(func() (bool, error) { _, node, err := state.GetNode(c1.config.NodeName) if err != nil { t.Fatalf("err: %v", err) } return node != nil, nil }, func(err error) { t.Fatalf("client not registered") }) // Should have a check _, checks, err := state.NodeChecks(c1.config.NodeName) if err != nil { t.Fatalf("err: %v", err) } if len(checks) != 1 { t.Fatalf("client missing check") } if checks[0].CheckID != SerfCheckID { t.Fatalf("bad check: %v", checks[0]) } if checks[0].Name != SerfCheckName { t.Fatalf("bad check: %v", checks[0]) } testutil.WaitForResult(func() (bool, error) { _, checks, err = state.NodeChecks(c1.config.NodeName) if err != nil { t.Fatalf("err: %v", err) } return checks[0].Status == structs.HealthCritical, errors.New(checks[0].Status) }, func(err error) { t.Fatalf("check status is %v, should be critical", err) }) }
func TestSessionEndpoint_NodeSessions(t *testing.T) { dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() testutil.WaitForLeader(t, s1.RPC, "dc1") s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}) s1.fsm.State().EnsureNode(1, &structs.Node{Node: "bar", Address: "127.0.0.1"}) ids := []string{} for i := 0; i < 10; i++ { arg := structs.SessionRequest{ Datacenter: "dc1", Op: structs.SessionCreate, Session: structs.Session{ Node: "bar", }, } if i < 5 { arg.Session.Node = "foo" } var out string if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &out); err != nil { t.Fatalf("err: %v", err) } if i < 5 { ids = append(ids, out) } } getR := structs.NodeSpecificRequest{ Datacenter: "dc1", Node: "foo", } var sessions structs.IndexedSessions if err := msgpackrpc.CallWithCodec(codec, "Session.NodeSessions", &getR, &sessions); err != nil { t.Fatalf("err: %v", err) } if sessions.Index == 0 { t.Fatalf("Bad: %v", sessions) } if len(sessions.Sessions) != 5 { t.Fatalf("Bad: %v", sessions.Sessions) } for i := 0; i < len(sessions.Sessions); i++ { s := sessions.Sessions[i] if !strContains(ids, s.ID) { t.Fatalf("bad: %v", s) } if s.Node != "foo" { t.Fatalf("bad: %v", s) } } }
func TestCatalogListServices_Blocking(t *testing.T) { dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() args := structs.DCSpecificRequest{ Datacenter: "dc1", } var out structs.IndexedServices testutil.WaitForLeader(t, s1.RPC, "dc1") // Run the query if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out); err != nil { t.Fatalf("err: %v", err) } // Setup a blocking query args.MinQueryIndex = out.Index args.MaxQueryTime = time.Second // Async cause a change idx := out.Index start := time.Now() go func() { time.Sleep(100 * time.Millisecond) if err := s1.fsm.State().EnsureNode(idx+1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil { t.Fatalf("err: %v", err) } if err := s1.fsm.State().EnsureService(idx+2, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"primary"}, Address: "127.0.0.1", Port: 5000}); err != nil { t.Fatalf("err: %v", err) } }() // Re-run the query out = structs.IndexedServices{} if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out); err != nil { t.Fatalf("err: %v", err) } // Should block at least 100ms if time.Now().Sub(start) < 100*time.Millisecond { t.Fatalf("too fast") } // Check the indexes if out.Index != idx+2 { t.Fatalf("bad: %v", out) } // Should find the service if len(out.Services) != 2 { t.Fatalf("bad: %v", out) } }
func testHandleRemoteExec(t *testing.T, command string, expectedSubstring string, expectedReturnCode string) { dir, agent := makeAgent(t, nextConfig()) defer os.RemoveAll(dir) defer agent.Shutdown() testutil.WaitForLeader(t, agent.RPC, "dc1") event := &remoteExecEvent{ Prefix: "_rexec", Session: makeRexecSession(t, agent), } defer destroySession(t, agent, event.Session) spec := &remoteExecSpec{ Command: command, Wait: time.Second, } buf, err := json.Marshal(spec) if err != nil { t.Fatalf("err: %v", err) } key := "_rexec/" + event.Session + "/job" setKV(t, agent, key, buf) buf, err = json.Marshal(event) if err != nil { t.Fatalf("err: %v", err) } msg := &UserEvent{ ID: generateUUID(), Payload: buf, } // Handle the event... agent.handleRemoteExec(msg) // Verify we have an ack key = "_rexec/" + event.Session + "/" + agent.config.NodeName + "/ack" d := getKV(t, agent, key) if d == nil || d.Session != event.Session { t.Fatalf("bad ack: %#v", d) } // Verify we have output key = "_rexec/" + event.Session + "/" + agent.config.NodeName + "/out/00000" d = getKV(t, agent, key) if d == nil || d.Session != event.Session || !bytes.Contains(d.Value, []byte(expectedSubstring)) { t.Fatalf("bad output: %#v", d) } // Verify we have an exit code key = "_rexec/" + event.Session + "/" + agent.config.NodeName + "/exit" d = getKV(t, agent, key) if d == nil || d.Session != event.Session || string(d.Value) != expectedReturnCode { t.Fatalf("bad output: %#v", d) } }
func TestACLEndpoint_GetPolicy(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" }) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() testutil.WaitForLeader(t, s1.RPC, "dc1") arg := structs.ACLRequest{ Datacenter: "dc1", Op: structs.ACLSet, ACL: structs.ACL{ Name: "User token", Type: structs.ACLTypeClient, }, WriteRequest: structs.WriteRequest{Token: "root"}, } var out string if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &out); err != nil { t.Fatalf("err: %v", err) } getR := structs.ACLPolicyRequest{ Datacenter: "dc1", ACL: out, } var acls structs.ACLPolicy if err := msgpackrpc.CallWithCodec(codec, "ACL.GetPolicy", &getR, &acls); err != nil { t.Fatalf("err: %v", err) } if acls.Policy == nil { t.Fatalf("Bad: %v", acls) } if acls.TTL != 30*time.Second { t.Fatalf("bad: %v", acls) } // Do a conditional lookup with etag getR.ETag = acls.ETag var out2 structs.ACLPolicy if err := msgpackrpc.CallWithCodec(codec, "ACL.GetPolicy", &getR, &out2); err != nil { t.Fatalf("err: %v", err) } if out2.Policy != nil { t.Fatalf("Bad: %v", out2) } if out2.TTL != 30*time.Second { t.Fatalf("bad: %v", out2) } }
func TestACL_NonAuthority_Management(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" // Enable ACLs! c.ACLMasterToken = "foobar" c.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir1) defer s1.Shutdown() client := rpcClient(t, s1) defer client.Close() dir2, s2 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" // Enable ACLs! c.ACLDefaultPolicy = "deny" c.Bootstrap = false // Disable bootstrap }) defer os.RemoveAll(dir2) defer s2.Shutdown() // Try to join addr := fmt.Sprintf("127.0.0.1:%d", s1.config.SerfLANConfig.MemberlistConfig.BindPort) if _, err := s2.JoinLAN([]string{addr}); err != nil { t.Fatalf("err: %v", err) } testutil.WaitForResult(func() (bool, error) { p1, _ := s1.raftPeers.Peers() return len(p1) == 2, errors.New(fmt.Sprintf("%v", p1)) }, func(err error) { t.Fatalf("should have 2 peers: %v", err) }) testutil.WaitForLeader(t, s1.RPC, "dc1") // find the non-authoritative server var nonAuth *Server if !s1.IsLeader() { nonAuth = s1 } else { nonAuth = s2 } // Resolve the token acl, err := nonAuth.resolveToken("foobar") if err != nil { t.Fatalf("err: %v", err) } if acl == nil { t.Fatalf("missing acl") } // Check the policy, should allow all if !acl.KeyRead("foo/test") { t.Fatalf("unexpected failed read") } }
func TestKVS_Apply(t *testing.T) { dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() testutil.WaitForLeader(t, s1.RPC, "dc1") arg := structs.KVSRequest{ Datacenter: "dc1", Op: structs.KVSSet, DirEnt: structs.DirEntry{ Key: "test", Flags: 42, Value: []byte("test"), }, } var out bool if err := msgpackrpc.CallWithCodec(codec, "KVS.Apply", &arg, &out); err != nil { t.Fatalf("err: %v", err) } // Verify state := s1.fsm.State() _, d, err := state.KVSGet("test") if err != nil { t.Fatalf("err: %v", err) } if d == nil { t.Fatalf("should not be nil") } // Do a check and set arg.Op = structs.KVSCAS arg.DirEnt.ModifyIndex = d.ModifyIndex arg.DirEnt.Flags = 43 if err := msgpackrpc.CallWithCodec(codec, "KVS.Apply", &arg, &out); err != nil { t.Fatalf("err: %v", err) } // Check this was applied if out != true { t.Fatalf("bad: %v", out) } // Verify _, d, err = state.KVSGet("test") if err != nil { t.Fatalf("err: %v", err) } if d.Flags != 43 { t.Fatalf("bad: %v", d) } }