// ExecuteRemote is used when a local node doesn't have any instances of a // service available and needs to probe remote DCs. This sends the full query // over since the remote side won't have it in its state store, and this doesn't // do the failover logic since that's already being run on the originating DC. // We don't want things to fan out further than one level. func (p *PreparedQuery) ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { if done, err := p.srv.forward("PreparedQuery.ExecuteRemote", args, args, reply); done { return err } defer metrics.MeasureSince([]string{"consul", "prepared-query", "execute_remote"}, time.Now()) // We have to do this ourselves since we are not doing a blocking RPC. p.srv.setQueryMeta(&reply.QueryMeta) if args.RequireConsistent { if err := p.srv.consistentRead(); err != nil { return err } } // Run the query locally to see what we can find. if err := p.execute(&args.Query, reply); err != nil { return err } // We don't bother trying to do an RTT sort here since we are by // definition in another DC. We just shuffle to make sure that we // balance the load across the results. reply.Nodes.Shuffle() // Apply the limit if given. if args.Limit > 0 && len(reply.Nodes) > args.Limit { reply.Nodes = reply.Nodes[:args.Limit] } return nil }
// preparedQueryExecute executes a prepared query. func (s *HTTPServer) preparedQueryExecute(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { args := structs.PreparedQueryExecuteRequest{ QueryIDOrName: id, } s.parseSource(req, &args.Source) if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { return nil, nil } if err := parseLimit(req, &args.Limit); err != nil { return nil, fmt.Errorf("Bad limit: %s", err) } var reply structs.PreparedQueryExecuteResponse endpoint := s.agent.getEndpoint(preparedQueryEndpoint) if err := s.agent.RPC(endpoint+".Execute", &args, &reply); err != nil { // We have to check the string since the RPC sheds // the specific error type. if err.Error() == consul.ErrQueryNotFound.Error() { resp.WriteHeader(404) resp.Write([]byte(err.Error())) return nil, nil } return nil, err } // Use empty list instead of nil. if reply.Nodes == nil { reply.Nodes = make(structs.CheckServiceNodes, 0) } return reply, nil }
// execute runs a prepared query in the local DC without any failover. We don't // apply any sorting options or ACL checks at this level - it should be done up above. func (p *PreparedQuery) execute(query *structs.PreparedQuery, reply *structs.PreparedQueryExecuteResponse) error { state := p.srv.fsm.State() _, nodes, err := state.CheckServiceNodes(query.Service.Service) if err != nil { return err } // Filter out any unhealthy nodes. nodes = nodes.Filter(query.Service.OnlyPassing) // Apply the tag filters, if any. if len(query.Service.Tags) > 0 { nodes = tagFilter(query.Service.Tags, nodes) } // Capture the nodes and pass the DNS information through to the reply. reply.Service = query.Service.Service reply.Nodes = nodes reply.DNS = query.DNS // Stamp the result for this datacenter. reply.Datacenter = p.srv.config.Datacenter return nil }
// Execute runs a prepared query and returns the results. This will perform the // failover logic if no local results are available. This is typically called as // part of a DNS lookup, or when executing prepared queries from the HTTP API. func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest, reply *structs.PreparedQueryExecuteResponse) error { if done, err := p.srv.forward("PreparedQuery.Execute", args, args, reply); done { return err } defer metrics.MeasureSince([]string{"consul", "prepared-query", "execute"}, time.Now()) // We have to do this ourselves since we are not doing a blocking RPC. p.srv.setQueryMeta(&reply.QueryMeta) if args.RequireConsistent { if err := p.srv.consistentRead(); err != nil { return err } } // Try to locate the query. state := p.srv.fsm.State() _, query, err := state.PreparedQueryLookup(args.QueryIDOrName) if err != nil { return err } if query == nil { return ErrQueryNotFound } // Execute the query for the local DC. if err := p.execute(query, reply); err != nil { return err } // Shuffle the results in case coordinates are not available if they // requested an RTT sort. reply.Nodes.Shuffle() if err := p.srv.sortNodesByDistanceFrom(args.Source, reply.Nodes); err != nil { return err } // Apply the limit if given. if args.Limit > 0 && len(reply.Nodes) > args.Limit { reply.Nodes = reply.Nodes[:args.Limit] } // In the happy path where we found some healthy nodes we go with that // and bail out. Otherwise, we fail over and try remote DCs, as allowed // by the query setup. if len(reply.Nodes) == 0 { wrapper := &queryServerWrapper{p.srv} if err := queryFailover(wrapper, query, args.Limit, args.QueryOptions, reply); err != nil { return err } } return nil }
// preparedQueryExecute executes a prepared query. func (s *HTTPServer) preparedQueryExecute(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { args := structs.PreparedQueryExecuteRequest{ QueryIDOrName: id, Agent: structs.QuerySource{ Node: s.agent.config.NodeName, Datacenter: s.agent.config.Datacenter, }, } s.parseSource(req, &args.Source) if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { return nil, nil } if err := parseLimit(req, &args.Limit); err != nil { return nil, fmt.Errorf("Bad limit: %s", err) } var reply structs.PreparedQueryExecuteResponse endpoint := s.agent.getEndpoint(preparedQueryEndpoint) if err := s.agent.RPC(endpoint+".Execute", &args, &reply); err != nil { // We have to check the string since the RPC sheds // the specific error type. if err.Error() == consul.ErrQueryNotFound.Error() { resp.WriteHeader(404) resp.Write([]byte(err.Error())) return nil, nil } return nil, err } // Note that we translate using the DC that the results came from, since // a query can fail over to a different DC than where the execute request // was sent to. That's why we use the reply's DC and not the one from // the args. translateAddresses(s.agent.config, reply.Datacenter, reply.Nodes) // Use empty list instead of nil. if reply.Nodes == nil { reply.Nodes = make(structs.CheckServiceNodes, 0) } return reply, nil }
// execute runs a prepared query in the local DC without any failover. We don't // apply any sorting options at this level - it should be done up above. func (p *PreparedQuery) execute(query *structs.PreparedQuery, reply *structs.PreparedQueryExecuteResponse) error { state := p.srv.fsm.State() _, nodes, err := state.CheckServiceNodes(query.Service.Service) if err != nil { return err } // This is kind of a paranoia ACL check, in case something changed with // the token from the time the query was registered. Note that we use // the token stored with the query, NOT the passed-in one, which is // critical to how queries work (the query becomes a proxy for a lookup // using the ACL it was created with). acl, err := p.srv.resolveToken(query.Token) if err != nil { return err } if acl != nil && !acl.ServiceRead(query.Service.Service) { p.srv.logger.Printf("[WARN] consul.prepared_query: Execute of prepared query for service '%s' denied due to ACLs", query.Service.Service) return permissionDeniedErr } // Filter out any unhealthy nodes. nodes = nodes.Filter(query.Service.OnlyPassing) // Apply the tag filters, if any. if len(query.Service.Tags) > 0 { nodes = tagFilter(query.Service.Tags, nodes) } // Capture the nodes and pass the DNS information through to the reply. reply.Service = query.Service.Service reply.Nodes = nodes reply.DNS = query.DNS // Stamp the result for this datacenter. reply.Datacenter = p.srv.config.Datacenter return nil }
// queryFailover runs an algorithm to determine which DCs to try and then calls // them to try to locate alternative services. func queryFailover(q queryServer, query *structs.PreparedQuery, limit int, options structs.QueryOptions, reply *structs.PreparedQueryExecuteResponse) error { // Pull the list of other DCs. This is sorted by RTT in case the user // has selected that. nearest, err := q.GetOtherDatacentersByDistance() if err != nil { return err } // This will help us filter unknown DCs supplied by the user. known := make(map[string]struct{}) for _, dc := range nearest { known[dc] = struct{}{} } // Build a candidate list of DCs to try, starting with the nearest N // from RTTs. var dcs []string index := make(map[string]struct{}) if query.Service.Failover.NearestN > 0 { for i, dc := range nearest { if !(i < query.Service.Failover.NearestN) { break } dcs = append(dcs, dc) index[dc] = struct{}{} } } // Then add any DCs explicitly listed that weren't selected above. for _, dc := range query.Service.Failover.Datacenters { // This will prevent a log of other log spammage if we do not // attempt to talk to datacenters we don't know about. if _, ok := known[dc]; !ok { q.GetLogger().Printf("[DEBUG] consul.prepared_query: Skipping unknown datacenter '%s' in prepared query", dc) continue } // This will make sure we don't re-try something that fails // from the NearestN list. if _, ok := index[dc]; !ok { dcs = append(dcs, dc) } } // Now try the selected DCs in priority order. failovers := 0 for _, dc := range dcs { // This keeps track of how many iterations we actually run. failovers++ // Be super paranoid and set the nodes slice to nil since it's // the same slice we used before. We know there's nothing in // there, but the underlying msgpack library has a policy of // updating the slice when it's non-nil, and that feels dirty. // Let's just set it to nil so there's no way to communicate // through this slice across successive RPC calls. reply.Nodes = nil // Note that we pass along the limit since it can be applied // remotely to save bandwidth. We also pass along the consistency // mode information we were given, so that applies to the remote // query as well. remote := &structs.PreparedQueryExecuteRemoteRequest{ Datacenter: dc, Query: *query, Limit: limit, QueryOptions: options, } if err := q.ForwardDC("PreparedQuery.ExecuteRemote", dc, remote, reply); err != nil { q.GetLogger().Printf("[WARN] consul.prepared_query: Failed querying for service '%s' in datacenter '%s': %s", query.Service.Service, dc, err) continue } // We can stop if we found some nodes. if len(reply.Nodes) > 0 { break } } // Set this at the end because the response from the remote doesn't have // this information. reply.Failovers = failovers return nil }
// Execute runs a prepared query and returns the results. This will perform the // failover logic if no local results are available. This is typically called as // part of a DNS lookup, or when executing prepared queries from the HTTP API. func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest, reply *structs.PreparedQueryExecuteResponse) error { if done, err := p.srv.forward("PreparedQuery.Execute", args, args, reply); done { return err } defer metrics.MeasureSince([]string{"consul", "prepared-query", "execute"}, time.Now()) // We have to do this ourselves since we are not doing a blocking RPC. p.srv.setQueryMeta(&reply.QueryMeta) if args.RequireConsistent { if err := p.srv.consistentRead(); err != nil { return err } } // Try to locate the query. state := p.srv.fsm.State() _, query, err := state.PreparedQueryResolve(args.QueryIDOrName) if err != nil { return err } if query == nil { return ErrQueryNotFound } // Execute the query for the local DC. if err := p.execute(query, reply); err != nil { return err } // If they supplied a token with the query, use that, otherwise use the // token passed in with the request. token := args.QueryOptions.Token if query.Token != "" { token = query.Token } if err := p.srv.filterACL(token, &reply.Nodes); err != nil { return err } // TODO (slackpad) We could add a special case here that will avoid the // fail over if we filtered everything due to ACLs. This seems like it // might not be worth the code complexity and behavior differences, // though, since this is essentially a misconfiguration. // Shuffle the results in case coordinates are not available if they // requested an RTT sort. reply.Nodes.Shuffle() // Build the query source. This can be provided by the client, or by // the prepared query. Client-specified takes priority. qs := args.Source if qs.Datacenter == "" { qs.Datacenter = args.Agent.Datacenter } if query.Service.Near != "" && qs.Node == "" { qs.Node = query.Service.Near } // Respect the magic "_agent" flag. if qs.Node == "_agent" { qs.Node = args.Agent.Node } // Perform the distance sort err = p.srv.sortNodesByDistanceFrom(qs, reply.Nodes) if err != nil { return err } // If we applied a distance sort, make sure that the node queried for is in // position 0, provided the results are from the same datacenter. if qs.Node != "" && reply.Datacenter == qs.Datacenter { for i, node := range reply.Nodes { if node.Node.Node == qs.Node { reply.Nodes[0], reply.Nodes[i] = reply.Nodes[i], reply.Nodes[0] break } // Put a cap on the depth of the search. The local agent should // never be further in than this if distance sorting was applied. if i == 9 { break } } } // Apply the limit if given. if args.Limit > 0 && len(reply.Nodes) > args.Limit { reply.Nodes = reply.Nodes[:args.Limit] } // In the happy path where we found some healthy nodes we go with that // and bail out. Otherwise, we fail over and try remote DCs, as allowed // by the query setup. if len(reply.Nodes) == 0 { wrapper := &queryServerWrapper{p.srv} if err := queryFailover(wrapper, query, args.Limit, args.QueryOptions, reply); err != nil { return err } } return nil }
// Execute runs a prepared query and returns the results. This will perform the // failover logic if no local results are available. This is typically called as // part of a DNS lookup, or when executing prepared queries from the HTTP API. func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest, reply *structs.PreparedQueryExecuteResponse) error { if done, err := p.srv.forward("PreparedQuery.Execute", args, args, reply); done { return err } defer metrics.MeasureSince([]string{"consul", "prepared-query", "execute"}, time.Now()) // We have to do this ourselves since we are not doing a blocking RPC. p.srv.setQueryMeta(&reply.QueryMeta) if args.RequireConsistent { if err := p.srv.consistentRead(); err != nil { return err } } // Try to locate the query. state := p.srv.fsm.State() _, query, err := state.PreparedQueryResolve(args.QueryIDOrName) if err != nil { return err } if query == nil { return ErrQueryNotFound } // Execute the query for the local DC. if err := p.execute(query, reply); err != nil { return err } // If they supplied a token with the query, use that, otherwise use the // token passed in with the request. token := args.QueryOptions.Token if query.Token != "" { token = query.Token } if err := p.srv.filterACL(token, &reply.Nodes); err != nil { return err } // TODO (slackpad) We could add a special case here that will avoid the // fail over if we filtered everything due to ACLs. This seems like it // might not be worth the code complexity and behavior differences, // though, since this is essentially a misconfiguration. // Shuffle the results in case coordinates are not available if they // requested an RTT sort. reply.Nodes.Shuffle() if err := p.srv.sortNodesByDistanceFrom(args.Source, reply.Nodes); err != nil { return err } // Apply the limit if given. if args.Limit > 0 && len(reply.Nodes) > args.Limit { reply.Nodes = reply.Nodes[:args.Limit] } // In the happy path where we found some healthy nodes we go with that // and bail out. Otherwise, we fail over and try remote DCs, as allowed // by the query setup. if len(reply.Nodes) == 0 { wrapper := &queryServerWrapper{p.srv} if err := queryFailover(wrapper, query, args.Limit, args.QueryOptions, reply); err != nil { return err } } return nil }