예제 #1
0
func TestCatalogListServices_Stale(t *testing.T) {
	dir1, s1 := testServer(t)
	defer os.RemoveAll(dir1)
	defer s1.Shutdown()
	client := rpcClient(t, s1)
	defer client.Close()

	args := structs.DCSpecificRequest{
		Datacenter: "dc1",
	}
	args.AllowStale = true
	var out structs.IndexedServices

	// Inject a fake service
	s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
	s1.fsm.State().EnsureService(2, "foo", &structs.NodeService{"db", "db", []string{"primary"}, "127.0.0.1", 5000})

	// Run the query, do not wait for leader!
	if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Should find the service
	if len(out.Services) != 1 {
		t.Fatalf("bad: %v", out)
	}

	// Should not have a leader! Stale read
	if out.KnownLeader {
		t.Fatalf("bad: %v", out)
	}
}
예제 #2
0
// UINodes is used to list the nodes in a given datacenter. We return a
// NodeDump which provides overview information for all the nodes
func (s *HTTPServer) UINodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
	// Parse arguments
	args := structs.DCSpecificRequest{}
	if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
		return nil, nil
	}

	// Make the RPC request
	var out structs.IndexedNodeDump
	defer setMeta(resp, &out.QueryMeta)
RPC:
	if err := s.agent.RPC("Internal.NodeDump", &args, &out); err != nil {
		// Retry the request allowing stale data if no leader
		if strings.Contains(err.Error(), structs.ErrNoLeader.Error()) && !args.AllowStale {
			args.AllowStale = true
			goto RPC
		}
		return nil, err
	}

	// Use empty list instead of nil
	for _, info := range out.Dump {
		if info.Services == nil {
			info.Services = make([]*structs.NodeService, 0)
		}
		if info.Checks == nil {
			info.Checks = make([]*structs.HealthCheck, 0)
		}
	}
	if out.Dump == nil {
		out.Dump = make(structs.NodeDump, 0)
	}
	return out.Dump, nil
}
예제 #3
0
func TestCatalogListServices_Blocking(t *testing.T) {
	dir1, s1 := testServer(t)
	defer os.RemoveAll(dir1)
	defer s1.Shutdown()
	codec := rpcClient(t, s1)
	defer codec.Close()

	args := structs.DCSpecificRequest{
		Datacenter: "dc1",
	}
	var out structs.IndexedServices

	testutil.WaitForLeader(t, s1.RPC, "dc1")

	// Run the query
	if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Setup a blocking query
	args.MinQueryIndex = out.Index
	args.MaxQueryTime = time.Second

	// Async cause a change
	idx := out.Index
	start := time.Now()
	go func() {
		time.Sleep(100 * time.Millisecond)
		if err := s1.fsm.State().EnsureNode(idx+1, &structs.Node{Node: "foo", Address: "127.0.0.1"}); err != nil {
			t.Fatalf("err: %v", err)
		}
		if err := s1.fsm.State().EnsureService(idx+2, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: []string{"primary"}, Address: "127.0.0.1", Port: 5000}); err != nil {
			t.Fatalf("err: %v", err)
		}
	}()

	// Re-run the query
	out = structs.IndexedServices{}
	if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, &out); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Should block at least 100ms
	if time.Now().Sub(start) < 100*time.Millisecond {
		t.Fatalf("too fast")
	}

	// Check the indexes
	if out.Index != idx+2 {
		t.Fatalf("bad: %v", out)
	}

	// Should find the service
	if len(out.Services) != 2 {
		t.Fatalf("bad: %v", out)
	}
}
예제 #4
0
func TestCatalogListServices_Blocking(t *testing.T) {
	dir1, s1 := testServer(t)
	defer os.RemoveAll(dir1)
	defer s1.Shutdown()
	client := rpcClient(t, s1)
	defer client.Close()

	args := structs.DCSpecificRequest{
		Datacenter: "dc1",
	}
	var out structs.IndexedServices

	// Wait for leader
	time.Sleep(100 * time.Millisecond)

	// Run the query
	if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Setup a blocking query
	args.MinQueryIndex = out.Index
	args.MaxQueryTime = time.Second

	// Async cause a change
	start := time.Now()
	go func() {
		time.Sleep(100 * time.Millisecond)
		s1.fsm.State().EnsureNode(1, structs.Node{"foo", "127.0.0.1"})
		s1.fsm.State().EnsureService(2, "foo", &structs.NodeService{"db", "db", []string{"primary"}, 5000})
	}()

	// Re-run the query
	out = structs.IndexedServices{}
	if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Should block at least 100ms
	if time.Now().Sub(start) < 100*time.Millisecond {
		t.Fatalf("too fast")
	}

	// Check the indexes
	if out.Index != 2 {
		t.Fatalf("bad: %v", out)
	}

	// Should find the service
	if len(out.Services) != 2 {
		t.Fatalf("bad: %v", out)
	}
}
예제 #5
0
func (s *HTTPServer) CatalogServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
	// Set default DC
	args := structs.DCSpecificRequest{}
	args.NodeMetaFilters = s.parseMetaFilter(req)
	if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
		return nil, nil
	}

	var out structs.IndexedServices
	defer setMeta(resp, &out.QueryMeta)
	if err := s.agent.RPC("Catalog.ListServices", &args, &out); err != nil {
		return nil, err
	}
	return out.Services, nil
}
예제 #6
0
// ReplicationStatus is used to retrieve the current ACL replication status.
func (a *ACL) ReplicationStatus(args *structs.DCSpecificRequest,
	reply *structs.ACLReplicationStatus) error {
	// This must be sent to the leader, so we fix the args since we are
	// re-using a structure where we don't support all the options.
	args.RequireConsistent = true
	args.AllowStale = false
	if done, err := a.srv.forward("ACL.ReplicationStatus", args, args, reply); done {
		return err
	}

	// There's no ACL token required here since this doesn't leak any
	// sensitive information, and we don't want people to have to use
	// management tokens if they are querying this via a health check.

	// Poll the latest status.
	a.srv.aclReplicationStatusLock.RLock()
	*reply = a.srv.aclReplicationStatus
	a.srv.aclReplicationStatusLock.RUnlock()
	return nil
}
예제 #7
0
func TestCatalogListServices_Timeout(t *testing.T) {
	dir1, s1 := testServer(t)
	defer os.RemoveAll(dir1)
	defer s1.Shutdown()
	client := rpcClient(t, s1)
	defer client.Close()

	args := structs.DCSpecificRequest{
		Datacenter: "dc1",
	}
	var out structs.IndexedServices

	testutil.WaitForLeader(t, client.Call, "dc1")

	// Run the query
	if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Setup a blocking query
	args.MinQueryIndex = out.Index
	args.MaxQueryTime = 100 * time.Millisecond

	// Re-run the query
	start := time.Now()
	out = structs.IndexedServices{}
	if err := client.Call("Catalog.ListServices", &args, &out); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Should block at least 100ms
	if time.Now().Sub(start) < 100*time.Millisecond {
		// TODO: Failing
		t.Fatalf("too fast")
	}

	// Check the indexes, should not change
	if out.Index != args.MinQueryIndex {
		t.Fatalf("bad: %v", out)
	}
}
예제 #8
0
// UINodes is used to list the nodes in a given datacenter. We return a
// NodeDump which provides overview information for all the nodes
func (s *HTTPServer) UINodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
	// Parse arguments
	args := structs.DCSpecificRequest{}
	if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
		return nil, nil
	}

	// Make the RPC request
	var out structs.IndexedNodeDump
	defer setMeta(resp, &out.QueryMeta)
RPC:
	if err := s.agent.RPC("Internal.NodeDump", &args, &out); err != nil {
		// Retry the request allowing stale data if no leader
		if strings.Contains(err.Error(), structs.ErrNoLeader.Error()) && !args.AllowStale {
			args.AllowStale = true
			goto RPC
		}
		return nil, err
	}
	return out.Dump, nil
}
예제 #9
0
func (s *HTTPServer) CatalogNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
	// Setup the request
	args := structs.DCSpecificRequest{}
	s.parseSource(req, &args.Source)
	args.NodeMetaFilters = s.parseMetaFilter(req)
	if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
		return nil, nil
	}

	var out structs.IndexedNodes
	defer setMeta(resp, &out.QueryMeta)
	if err := s.agent.RPC("Catalog.ListNodes", &args, &out); err != nil {
		return nil, err
	}
	translateAddresses(s.agent.config, args.Datacenter, out.Nodes)

	// Use empty list instead of nil
	if out.Nodes == nil {
		out.Nodes = make(structs.Nodes, 0)
	}
	return out.Nodes, nil
}
예제 #10
0
func TestSession_Get_List_NodeSessions_ACLFilter(t *testing.T) {
	dir1, s1 := testServerWithConfig(t, func(c *Config) {
		c.ACLDatacenter = "dc1"
		c.ACLMasterToken = "root"
		c.ACLDefaultPolicy = "deny"
		c.ACLEnforceVersion8 = false
	})
	defer os.RemoveAll(dir1)
	defer s1.Shutdown()
	codec := rpcClient(t, s1)
	defer codec.Close()

	testutil.WaitForLeader(t, s1.RPC, "dc1")

	// Create the ACL.
	req := structs.ACLRequest{
		Datacenter: "dc1",
		Op:         structs.ACLSet,
		ACL: structs.ACL{
			Name: "User token",
			Type: structs.ACLTypeClient,
			Rules: `
session "foo" {
	policy = "read"
}
`,
		},
		WriteRequest: structs.WriteRequest{Token: "root"},
	}

	var token string
	if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Create a node and a session.
	s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"})
	arg := structs.SessionRequest{
		Datacenter: "dc1",
		Op:         structs.SessionCreate,
		Session: structs.Session{
			Node: "foo",
		},
		WriteRequest: structs.WriteRequest{Token: "root"},
	}
	var out string
	if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &out); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Perform all the read operations, which should go through since version
	// 8 ACL enforcement isn't enabled.
	getR := structs.SessionSpecificRequest{
		Datacenter: "dc1",
		Session:    out,
	}
	{
		var sessions structs.IndexedSessions
		if err := msgpackrpc.CallWithCodec(codec, "Session.Get", &getR, &sessions); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(sessions.Sessions) != 1 {
			t.Fatalf("bad: %v", sessions.Sessions)
		}
	}
	listR := structs.DCSpecificRequest{
		Datacenter: "dc1",
	}
	{
		var sessions structs.IndexedSessions
		if err := msgpackrpc.CallWithCodec(codec, "Session.List", &listR, &sessions); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(sessions.Sessions) != 1 {
			t.Fatalf("bad: %v", sessions.Sessions)
		}
	}
	nodeR := structs.NodeSpecificRequest{
		Datacenter: "dc1",
		Node:       "foo",
	}
	{
		var sessions structs.IndexedSessions
		if err := msgpackrpc.CallWithCodec(codec, "Session.NodeSessions", &nodeR, &sessions); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(sessions.Sessions) != 1 {
			t.Fatalf("bad: %v", sessions.Sessions)
		}
	}

	// Now turn on version 8 enforcement and make sure everything is empty.
	s1.config.ACLEnforceVersion8 = true
	{
		var sessions structs.IndexedSessions
		if err := msgpackrpc.CallWithCodec(codec, "Session.Get", &getR, &sessions); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(sessions.Sessions) != 0 {
			t.Fatalf("bad: %v", sessions.Sessions)
		}
	}
	{
		var sessions structs.IndexedSessions

		if err := msgpackrpc.CallWithCodec(codec, "Session.List", &listR, &sessions); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(sessions.Sessions) != 0 {
			t.Fatalf("bad: %v", sessions.Sessions)
		}
	}
	{
		var sessions structs.IndexedSessions
		if err := msgpackrpc.CallWithCodec(codec, "Session.NodeSessions", &nodeR, &sessions); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(sessions.Sessions) != 0 {
			t.Fatalf("bad: %v", sessions.Sessions)
		}
	}

	// Finally, supply the token and make sure the reads are allowed.
	getR.Token = token
	{
		var sessions structs.IndexedSessions
		if err := msgpackrpc.CallWithCodec(codec, "Session.Get", &getR, &sessions); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(sessions.Sessions) != 1 {
			t.Fatalf("bad: %v", sessions.Sessions)
		}
	}
	listR.Token = token
	{
		var sessions structs.IndexedSessions
		if err := msgpackrpc.CallWithCodec(codec, "Session.List", &listR, &sessions); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(sessions.Sessions) != 1 {
			t.Fatalf("bad: %v", sessions.Sessions)
		}
	}
	nodeR.Token = token
	{
		var sessions structs.IndexedSessions
		if err := msgpackrpc.CallWithCodec(codec, "Session.NodeSessions", &nodeR, &sessions); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(sessions.Sessions) != 1 {
			t.Fatalf("bad: %v", sessions.Sessions)
		}
	}

	// Try to get a session that doesn't exist to make sure that's handled
	// correctly by the filter (it will get passed a nil slice).
	getR.Session = "adf4238a-882b-9ddc-4a9d-5b6758e4159e"
	{
		var sessions structs.IndexedSessions
		if err := msgpackrpc.CallWithCodec(codec, "Session.Get", &getR, &sessions); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(sessions.Sessions) != 0 {
			t.Fatalf("bad: %v", sessions.Sessions)
		}
	}
}
예제 #11
0
func TestOperator_RaftGetConfiguration_ACLDeny(t *testing.T) {
	dir1, s1 := testServerWithConfig(t, func(c *Config) {
		c.ACLDatacenter = "dc1"
		c.ACLMasterToken = "root"
		c.ACLDefaultPolicy = "deny"
	})
	defer os.RemoveAll(dir1)
	defer s1.Shutdown()
	codec := rpcClient(t, s1)
	defer codec.Close()

	testutil.WaitForLeader(t, s1.RPC, "dc1")

	// Make a request with no token to make sure it gets denied.
	arg := structs.DCSpecificRequest{
		Datacenter: "dc1",
	}
	var reply structs.RaftConfigurationResponse
	err := msgpackrpc.CallWithCodec(codec, "Operator.RaftGetConfiguration", &arg, &reply)
	if err == nil || !strings.Contains(err.Error(), permissionDenied) {
		t.Fatalf("err: %v", err)
	}

	// Create an ACL with operator read permissions.
	var token string
	{
		var rules = `
                    operator = "read"
                `

		req := structs.ACLRequest{
			Datacenter: "dc1",
			Op:         structs.ACLSet,
			ACL: structs.ACL{
				Name:  "User token",
				Type:  structs.ACLTypeClient,
				Rules: rules,
			},
			WriteRequest: structs.WriteRequest{Token: "root"},
		}
		if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil {
			t.Fatalf("err: %v", err)
		}
	}

	// Now it should go through.
	arg.Token = token
	if err := msgpackrpc.CallWithCodec(codec, "Operator.RaftGetConfiguration", &arg, &reply); err != nil {
		t.Fatalf("err: %v", err)
	}

	future := s1.raft.GetConfiguration()
	if err := future.Error(); err != nil {
		t.Fatalf("err: %v", err)
	}
	if len(future.Configuration().Servers) != 1 {
		t.Fatalf("bad: %v", future.Configuration().Servers)
	}
	me := future.Configuration().Servers[0]
	expected := structs.RaftConfigurationResponse{
		Servers: []*structs.RaftServer{
			&structs.RaftServer{
				ID:      me.ID,
				Node:    s1.config.NodeName,
				Address: me.Address,
				Leader:  true,
				Voter:   true,
			},
		},
		Index: future.Index(),
	}
	if !reflect.DeepEqual(reply, expected) {
		t.Fatalf("bad: %v", reply)
	}
}
예제 #12
0
func TestCatalog_ListNodes_ACLFilter(t *testing.T) {
	dir1, s1 := testServerWithConfig(t, func(c *Config) {
		c.ACLDatacenter = "dc1"
		c.ACLMasterToken = "root"
		c.ACLDefaultPolicy = "deny"
		c.ACLEnforceVersion8 = false
	})
	defer os.RemoveAll(dir1)
	defer s1.Shutdown()
	codec := rpcClient(t, s1)
	defer codec.Close()

	testutil.WaitForLeader(t, s1.RPC, "dc1")

	// We scope the reply in each of these since msgpack won't clear out an
	// existing slice if the incoming one is nil, so it's best to start
	// clean each time.

	// Prior to version 8, the node policy should be ignored.
	args := structs.DCSpecificRequest{
		Datacenter: "dc1",
	}
	{
		reply := structs.IndexedNodes{}
		if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(reply.Nodes) != 1 {
			t.Fatalf("bad: %v", reply.Nodes)
		}
	}

	// Now turn on version 8 enforcement and try again.
	s1.config.ACLEnforceVersion8 = true
	{
		reply := structs.IndexedNodes{}
		if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(reply.Nodes) != 0 {
			t.Fatalf("bad: %v", reply.Nodes)
		}
	}

	// Create an ACL that can read the node.
	arg := structs.ACLRequest{
		Datacenter: "dc1",
		Op:         structs.ACLSet,
		ACL: structs.ACL{
			Name: "User token",
			Type: structs.ACLTypeClient,
			Rules: fmt.Sprintf(`
node "%s" {
	policy = "read"
}
`, s1.config.NodeName),
		},
		WriteRequest: structs.WriteRequest{Token: "root"},
	}
	var id string
	if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &id); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Now try with the token and it will go through.
	args.Token = id
	{
		reply := structs.IndexedNodes{}
		if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(reply.Nodes) != 1 {
			t.Fatalf("bad: %v", reply.Nodes)
		}
	}
}
예제 #13
0
func TestCoordinate_ListNodes_ACLFilter(t *testing.T) {
	dir1, s1 := testServerWithConfig(t, func(c *Config) {
		c.ACLDatacenter = "dc1"
		c.ACLMasterToken = "root"
		c.ACLDefaultPolicy = "deny"
		c.ACLEnforceVersion8 = false
	})
	defer os.RemoveAll(dir1)
	defer s1.Shutdown()
	codec := rpcClient(t, s1)
	defer codec.Close()

	testutil.WaitForLeader(t, s1.RPC, "dc1")

	// Register some nodes.
	nodes := []string{"foo", "bar", "baz"}
	for _, node := range nodes {
		req := structs.RegisterRequest{
			Datacenter: "dc1",
			Node:       node,
			Address:    "127.0.0.1",
			WriteRequest: structs.WriteRequest{
				Token: "root",
			},
		}
		var reply struct{}
		if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &req, &reply); err != nil {
			t.Fatalf("err: %v", err)
		}
	}

	// Send coordinate updates for a few nodes.
	arg1 := structs.CoordinateUpdateRequest{
		Datacenter: "dc1",
		Node:       "foo",
		Coord:      generateRandomCoordinate(),
		WriteRequest: structs.WriteRequest{
			Token: "root",
		},
	}
	var out struct{}
	if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg1, &out); err != nil {
		t.Fatalf("err: %v", err)
	}

	arg2 := structs.CoordinateUpdateRequest{
		Datacenter: "dc1",
		Node:       "bar",
		Coord:      generateRandomCoordinate(),
		WriteRequest: structs.WriteRequest{
			Token: "root",
		},
	}
	if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg2, &out); err != nil {
		t.Fatalf("err: %v", err)
	}

	arg3 := structs.CoordinateUpdateRequest{
		Datacenter: "dc1",
		Node:       "baz",
		Coord:      generateRandomCoordinate(),
		WriteRequest: structs.WriteRequest{
			Token: "root",
		},
	}
	if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg3, &out); err != nil {
		t.Fatalf("err: %v", err)
	}

	// Wait for all the coordinate updates to apply. Since we aren't
	// enforcing version 8 ACLs, this should also allow us to read
	// everything back without a token.
	testutil.WaitForResult(func() (bool, error) {
		arg := structs.DCSpecificRequest{
			Datacenter: "dc1",
		}
		resp := structs.IndexedCoordinates{}
		if err := msgpackrpc.CallWithCodec(codec, "Coordinate.ListNodes", &arg, &resp); err != nil {
			t.Fatalf("err: %v", err)
		}
		if len(resp.Coordinates) == 3 {
			return true, nil
		}
		return false, fmt.Errorf("bad: %v", resp.Coordinates)
	}, func(err error) { t.Fatalf("err: %v", err) })

	// Now that we've waited for the batch processing to ingest the
	// coordinates we can do the rest of the requests without the loop. We
	// will start by turning on version 8 ACL support which should block
	// everything.
	s1.config.ACLEnforceVersion8 = true
	arg := structs.DCSpecificRequest{
		Datacenter: "dc1",
	}
	resp := structs.IndexedCoordinates{}
	if err := msgpackrpc.CallWithCodec(codec, "Coordinate.ListNodes", &arg, &resp); err != nil {
		t.Fatalf("err: %v", err)
	}
	if len(resp.Coordinates) != 0 {
		t.Fatalf("bad: %#v", resp.Coordinates)
	}

	// Create an ACL that can read one of the nodes.
	var id string
	{
		req := structs.ACLRequest{
			Datacenter: "dc1",
			Op:         structs.ACLSet,
			ACL: structs.ACL{
				Name: "User token",
				Type: structs.ACLTypeClient,
				Rules: `
node "foo" {
	policy = "read"
}
`,
			},
			WriteRequest: structs.WriteRequest{Token: "root"},
		}
		if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &id); err != nil {
			t.Fatalf("err: %v", err)
		}
	}

	// With the token, it should now go through.
	arg.Token = id
	if err := msgpackrpc.CallWithCodec(codec, "Coordinate.ListNodes", &arg, &resp); err != nil {
		t.Fatalf("err: %v", err)
	}
	if len(resp.Coordinates) != 1 || resp.Coordinates[0].Node != "foo" {
		t.Fatalf("bad: %#v", resp.Coordinates)
	}
}
예제 #14
0
func (s *HTTPServer) AgentMonitor(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
	// Only GET supported
	if req.Method != "GET" {
		resp.WriteHeader(405)
		return nil, nil
	}

	var args structs.DCSpecificRequest
	args.Datacenter = s.agent.config.Datacenter
	s.parseToken(req, &args.Token)
	// Validate that the given token has operator permissions
	var reply structs.RaftConfigurationResponse
	if err := s.agent.RPC("Operator.RaftGetConfiguration", &args, &reply); err != nil {
		return nil, err
	}

	// Get the provided loglevel
	logLevel := req.URL.Query().Get("loglevel")
	if logLevel == "" {
		logLevel = "INFO"
	}

	// Upper case the log level
	logLevel = strings.ToUpper(logLevel)

	// Create a level filter
	filter := logger.LevelFilter()
	filter.MinLevel = logutils.LogLevel(logLevel)
	if !logger.ValidateLevelFilter(filter.MinLevel, filter) {
		resp.WriteHeader(400)
		resp.Write([]byte(fmt.Sprintf("Unknown log level: %s", filter.MinLevel)))
		return nil, nil
	}

	flusher, ok := resp.(http.Flusher)
	if !ok {
		return nil, fmt.Errorf("Streaming not supported")
	}

	// Set up a log handler
	handler := &httpLogHandler{
		filter: filter,
		logCh:  make(chan string, 512),
		logger: s.logger,
	}
	s.agent.logWriter.RegisterHandler(handler)
	defer s.agent.logWriter.DeregisterHandler(handler)

	notify := resp.(http.CloseNotifier).CloseNotify()

	// Stream logs until the connection is closed
	for {
		select {
		case <-notify:
			s.agent.logWriter.DeregisterHandler(handler)
			if handler.droppedCount > 0 {
				s.agent.logger.Printf("[WARN] agent: Dropped %d logs during monitor request", handler.droppedCount)
			}
			return nil, nil
		case log := <-handler.logCh:
			resp.Write([]byte(log + "\n"))
			flusher.Flush()
		}
	}

	return nil, nil
}