Beispiel #1
0
// HandleQuery evaluates a query and returns an aggregated result set.
// See the README for the query JSON structure and the structure of the results.
func (s *Server) HandleQuery(w http.ResponseWriter, r *http.Request) {
	start := time.Now()
	query, err := gumshoe.ParseJSONQuery(r.Body)
	if err != nil {
		WriteError(w, err, http.StatusBadRequest)
		return
	}
	rows, err := s.DB.GetQueryResult(query)
	if err != nil {
		WriteError(w, err, http.StatusBadRequest)
		return
	}
	elapsed := time.Since(start)
	statsd.Time("gumshoedb.query", elapsed)
	durationMS := int(elapsed.Seconds() * 1000)
	if r.URL.Query().Get("format") == "stream" {
		// Streaming format:
		// Header object: {"duration_ms": 123, "num_results", 234}
		// Then num_rows row objects.
		header := map[string]int{
			"duration_ms": durationMS,
			"num_rows":    len(rows),
		}
		w.Header().Set("Content-Type", "application/json")
		encoder := json.NewEncoder(w)
		if err := encoder.Encode(header); err != nil {
			WriteError(w, err, 500)
			return
		}
		for i, row := range rows {
			if err := encoder.Encode(row); err != nil {
				WriteError(w, err, 500)
				return
			}
			if i%1000 == 0 {
				if f, ok := w.(http.Flusher); ok {
					f.Flush()
				}
			}
		}
		return
	}
	results := map[string]interface{}{
		"results":     rows,
		"duration_ms": durationMS,
	}
	WriteJSONResponse(w, results)
}
Beispiel #2
0
func (r *Router) HandleQuery(w http.ResponseWriter, req *http.Request) {
	start := time.Now()
	queryID := randomID() // used to make tracking a single query throught he logs easier
	if req.URL.Query().Get("format") != "" {
		WriteError(w, errors.New("non-standard query formats not supported"), 500)
		return
	}
	query, err := gumshoe.ParseJSONQuery(req.Body)
	if err != nil {
		WriteError(w, err, http.StatusBadRequest)
		return
	}
	Log.Printf("[%s] got query: %s", queryID, query)
	for _, agg := range query.Aggregates {
		if agg.Type == gumshoe.AggregateAvg {
			// TODO(caleb): Handle as described in the doc.
			WriteError(w, errors.New("average aggregates not handled by the router"), 500)
			return
		}
		if !r.validColumnName(agg.Column) {
			writeInvalidColumnError(w, agg.Column)
			return
		}
	}
	for _, grouping := range query.Groupings {
		if !r.validColumnName(grouping.Column) {
			writeInvalidColumnError(w, grouping.Column)
			return
		}
	}
	for _, filter := range query.Filters {
		if !r.validColumnName(filter.Column) {
			writeInvalidColumnError(w, filter.Column)
			return
		}
	}
	b, err := json.Marshal(query)
	if err != nil {
		panic("unexpected marshal error")
	}
	var (
		wg     wait.Group
		mu     sync.Mutex // protects result, resultMap
		result []gumshoe.RowMap
		// rest only for grouping case
		groupingCol        string
		groupingColIntConv bool
		resultMap          = make(map[interface{}]*lockedRowMap)
	)
	if len(query.Groupings) > 0 {
		groupingCol = query.Groupings[0].Name
		groupingColIntConv = r.convertColumnToIntegral(query.Groupings[0].Column)
	}
	for i := range r.Shards {
		i := i
		wg.Go(func(_ <-chan struct{}) error {
			shard := r.Shards[i]
			url := "http://" + shard + "/query?format=stream"
			resp, err := r.Client.Post(url, "application/json", bytes.NewReader(b))
			if err != nil {
				return err
			}
			defer resp.Body.Close()
			if resp.StatusCode != 200 {
				return NewHTTPError(resp, shard)
			}

			decoder := json.NewDecoder(resp.Body)
			var m map[string]int
			if err := decoder.Decode(&m); err != nil {
				return err
			}

			if len(query.Groupings) == 0 {
				// non-grouping case
				row := make(gumshoe.RowMap)
				if err := decoder.Decode(&row); err != nil {
					return err
				}
				mu.Lock()
				if len(result) == 0 {
					result = []gumshoe.RowMap{row}
				} else {
					r.mergeRows(result[0], row, query)
				}
				mu.Unlock()
				if err := decoder.Decode(&row); err != io.EOF {
					if err == nil {
						return errors.New("got multiple results for a non-group-by query")
					}
					return err
				}
				return nil
			}

			// grouping case
			var rowSize int
			for {
				row := make(gumshoe.RowMap, rowSize)
				if err := decoder.Decode(&row); err != nil {
					if err == io.EOF {
						break
					}
					return err
				}
				rowSize = len(row)
				groupByValue := row[groupingCol]
				if groupingColIntConv && groupByValue != nil {
					groupByValue = int64(groupByValue.(float64))
				}
				mu.Lock()
				cur := resultMap[groupByValue]
				if cur == nil {
					resultMap[groupByValue] = &lockedRowMap{row: row}
					mu.Unlock()
					continue
				}
				// downgrade lock
				cur.mu.Lock()
				mu.Unlock()
				r.mergeRows(cur.row, row, query)
				cur.mu.Unlock()
			}
			return nil
		})
	}
	if err := wg.Wait(); err != nil {
		WriteError(w, err, http.StatusInternalServerError)
		return
	}

	// For the grouping case, we need to flatten the results from resultMap.
	if len(query.Groupings) > 0 {
		for _, lr := range resultMap {
			result = append(result, lr.row)
		}
	}

	Log.Printf("[%s] fetched and merged query results from %d shards in %s (%d combined rows)",
		queryID, len(r.Shards), time.Since(start), len(result))

	WriteJSONResponse(w, Result{
		Results:    result,
		DurationMS: int(time.Since(start).Seconds() * 1000),
	})
}