Пример #1
0
// clusteringHandler handles doing the actual k-means clustering.
//
// The return format is JSON of the form:
//
// {
//   "Clusters": [
//     {
//       "Keys": [
//          "x86:GeForce320M:MacMini4.1:Mac10.8:GM_varied_text_clipped_no_lcd_640_480:8888",...],
//       "ParamSummaries": [
//           [{"Value": "Win8", "Weight": 15}, {"Value": "Android", "Weight": 14}, ...]
//       ],
//       "StepFit": {
//          "LeastSquares":0.0006582442047814354,
//          "TurningPoint":162,
//          "StepSize":0.023272272692293046,
//          "Regression": 35.3
//       }
//       Traces: [[[0, -0.00007967326606768456], [1, 0.011877665949459049], [2, 0.012158129176717419],...]]
//     },
//     ...
//   ],
//   "K": 5,
//   "StdDevThreshhold": 0.1
// }
//
// Note that Keys contains all the keys, while Traces only contains traces of
// the N closest cluster members and the centroid.
//
// Takes the following query parameters:
//
//   _k      - The K to use for k-means clustering.
//   _stddev - The standard deviation to use when normalize traces
//             during k-means clustering.
//   _issue  - The Rietveld issue ID with trybot results to include.
//
// Additionally the rest of the query parameters as returned from
// sk.Query.selectionsAsQuery().
func clusteringHandler(w http.ResponseWriter, r *http.Request) {
	glog.Infof("Clustering Handler: %q\n", r.URL.Path)
	tile := masterTileBuilder.GetTile()
	w.Header().Set("Content-Type", "application/json")
	// If there are no query parameters just return with an empty set of ClusterSummaries.
	if r.FormValue("_k") == "" || r.FormValue("_stddev") == "" {
		writeClusterSummaries(clustering.NewClusterSummaries(), w, r)
		return
	}

	k, err := strconv.ParseInt(r.FormValue("_k"), 10, 32)
	if err != nil {
		util.ReportError(w, r, err, fmt.Sprintf("_k parameter must be an integer %s.", r.FormValue("_k")))
		return
	}
	stddev, err := strconv.ParseFloat(r.FormValue("_stddev"), 64)
	if err != nil {
		util.ReportError(w, r, err, fmt.Sprintf("_stddev parameter must be a float %s.", r.FormValue("_stddev")))
		return
	}

	issue := r.FormValue("_issue")
	var tryResults *types.TryBotResults = nil
	if issue != "" {
		var err error
		tryResults, err = trybot.Get(issue)
		if err != nil {
			util.ReportError(w, r, err, fmt.Sprintf("Failed to get trybot data for clustering."))
			return
		}
	}

	delete(r.Form, "_k")
	delete(r.Form, "_stddev")
	delete(r.Form, "_issue")

	// Create a filter function for traces that match the query parameters and
	// optionally tryResults.
	filter := func(key string, tr *types.PerfTrace) bool {
		if tryResults != nil {
			if _, ok := tryResults.Values[key]; !ok {
				return false
			}
		}
		return tiling.Matches(tr, r.Form)
	}

	if issue != "" {
		if tile, err = trybot.TileWithTryData(tile, issue); err != nil {
			util.ReportError(w, r, err, fmt.Sprintf("Failed to get trybot data for clustering."))
			return
		}
	}
	summary, err := clustering.CalculateClusterSummaries(tile, int(k), stddev, filter)
	if err != nil {
		util.ReportError(w, r, err, "Failed to calculate clusters.")
		return
	}
	writeClusterSummaries(summary, w, r)
}
Пример #2
0
// kernelJSONHandler returns the data needed for displaying Kernel Density Estimates.
//
// The return format is JSON of the form:
//
// {
//   "commit1": {
//     "key1": 1.234,
//     "key2": 1.235,
//     "key3": 1.230,
//     "key4": 1.239,
//     ...
//   },
//   "commit2": {
//     "key1": 1.434,
//     "key2": 1.834,
//     "key3": 1.234,
//     "key4": 1.134,
//     ...
//   },
//   missing: 5, // Number of missing values.
// }
//
// Takes the following query parameters:
//
//   commit1 - The hash for the first commit.
//   commit2 - The hash for the second commit.
//   query   - A paramset in URI query format used to filter the results at each commit.
//
func kernelJSONHandler(w http.ResponseWriter, r *http.Request) {
	// TODO(jcgregorio) Determine the tile(s) to load based on the commit hashes,
	// possibly loading two different tiles, one for each hash.
	tile := masterTileBuilder.GetTile()
	commit1 := r.FormValue("commit1")
	commit2 := r.FormValue("commit2")

	// Calulate the indices where the commit falls in the tile.
	commit1Index := -1
	commit2Index := -1

	// Confirm that the two commits appear in the tile.
	for i, c := range tile.Commits {
		if c.Hash == commit1 {
			commit1Index = i
		}
		if c.Hash == commit2 {
			commit2Index = i
		}
	}
	if commit1Index == -1 || commit2Index == -1 {
		glog.Warningf("Commits %s[%d] %s[%d]", commit1, commit1Index, commit2, commit2Index)
		util.ReportError(w, r, fmt.Errorf("Failed to find commits in tile."), fmt.Sprintf("Failed to find commits in tile."))
		return
	}
	w.Header().Set("Content-Type", "application/json")
	query := r.FormValue("query")
	q, err := url.ParseQuery(query)
	if err != nil {
		util.ReportError(w, r, err, fmt.Sprintf("Failed to parse query parameters."))
		return
	}
	ret := struct {
		Commit1 map[string]float64 `json:"commit1"`
		Commit2 map[string]float64 `json:"commit2"`
		Missing int32              `json:"missing"`
	}{
		Commit1: map[string]float64{},
		Commit2: map[string]float64{},
	}
	for key, tr := range tile.Traces {
		if tiling.Matches(tr, q) {
			if tr.IsMissing(commit1Index) || tr.IsMissing(commit2Index) {
				ret.Missing += 1
				continue
			}
			ret.Commit1[key] = tr.(*types.PerfTrace).Values[commit1Index]
			ret.Commit2[key] = tr.(*types.PerfTrace).Values[commit2Index]
		}
	}
	enc := json.NewEncoder(w)
	if err := enc.Encode(ret); err != nil {
		glog.Errorf("Failed to write or encode output: %s", err)
		return
	}
}
Пример #3
0
// GetLastTrimmed returns the last tile as read-only trimmed to contain at
// most NCommits. It caches trimmed tiles as long as the underlying tiles
// do not change.
//
// includeIgnores - If true then include ignored digests in the returned tile.
func (s *Storage) GetLastTileTrimmed(includeIgnores bool) (*tiling.Tile, error) {
	// Retieve the most recent tile.
	tile := s.MasterTileBuilder.GetTile()

	s.mutex.Lock()
	defer s.mutex.Unlock()

	if s.NCommits <= 0 {
		return tile, nil
	}

	currentIgnoreRev := s.IgnoreStore.Revision()

	// Check if the tile hasn't changed and the ignores haven't changed.
	if s.lastTrimmedTile != nil && tile == s.lastTrimmedTile && s.lastTrimmedIgnoredTile != nil && currentIgnoreRev == s.lastIgnoreRev {
		if includeIgnores {
			return s.lastTrimmedTile, nil
		} else {
			return s.lastTrimmedIgnoredTile, nil
		}
	}

	ignores, err := s.IgnoreStore.List()
	if err != nil {
		return nil, fmt.Errorf("Failed to get ignores to filter tile: %s", err)
	}

	// Now copy the tile by value.
	retIgnoredTile := tile.Copy()

	// Then remove traces that should be ignored.
	ignoreQueries, err := ignore.ToQuery(ignores)
	if err != nil {
		return nil, err
	}
	for id, tr := range retIgnoredTile.Traces {
		for _, q := range ignoreQueries {
			if tiling.Matches(tr, q) {
				delete(retIgnoredTile.Traces, id)
				continue
			}
		}
	}

	// Cache this tile.
	s.lastIgnoreRev = currentIgnoreRev
	s.lastTrimmedTile = tile
	s.lastTrimmedIgnoredTile = retIgnoredTile
	fmt.Printf("Lengths: %d %d\n", len(s.lastTrimmedTile.Traces), len(s.lastTrimmedIgnoredTile.Traces))

	if includeIgnores {
		return s.lastTrimmedTile, nil
	} else {
		return s.lastTrimmedIgnoredTile, nil
	}
}
Пример #4
0
// singleHandler is similar to /query/0/-1/traces?<param filters>, but takes an
// optional commit hash and returns a single value for each trace at that commit,
// or the latest value if a hash is not given or found. The resulting JSON is in
// SingleResponse format that looks like:
//
//  {
//    "traces": [
//      {
//        val: 1.1,
//        params: {"os: "Android", ...}
//      },
//      ...
//    ],
//    "hash": "abc123",
//  }
//
func singleHandler(w http.ResponseWriter, r *http.Request) {
	glog.Infof("Single Handler: %q\n", r.URL.Path)
	handlerStart := time.Now()
	match := singleHandlerPath.FindStringSubmatch(r.URL.Path)
	if r.Method != "GET" || match == nil || len(match) != 2 {
		http.NotFound(w, r)
		return
	}
	if err := r.ParseForm(); err != nil {
		util.ReportError(w, r, err, "Failed to parse query params.")
	}
	hash := match[1]

	tileNum, idx, err := git.TileAddressFromHash(hash, time.Time(ingester.BEGINNING_OF_TIME))
	if err != nil {
		glog.Infof("Did not find hash '%s', use latest: %q.\n", hash, err)
		tileNum = -1
		idx = -1
	}
	glog.Infof("Hash: %s tileNum: %d, idx: %d\n", hash, tileNum, idx)
	tile, err := getTile(0, tileNum)
	if err != nil {
		util.ReportError(w, r, err, "Failed retrieving tile.")
		return
	}

	if idx < 0 {
		idx = len(tile.Commits) - 1 // Defaults to the last slice element.
	}
	glog.Infof("Tile: %d; Idx: %d\n", tileNum, idx)

	ret := SingleResponse{
		Traces: []*SingleTrace{},
		Hash:   tile.Commits[idx].Hash,
	}
	for _, tr := range tile.Traces {
		if tiling.Matches(tr, r.Form) {
			v, err := vec.FillAt(tr.(*types.PerfTrace).Values, idx)
			if err != nil {
				util.ReportError(w, r, err, "Error while getting value at slice index.")
				return
			}
			t := &SingleTrace{
				Val:    v,
				Params: tr.Params(),
			}
			ret.Traces = append(ret.Traces, t)
		}
	}
	w.Header().Set("Content-Type", "application/json")
	enc := json.NewEncoder(w)
	if err := enc.Encode(ret); err != nil {
		glog.Errorf("Failed to write or encode output: %s", err)
	}
	glog.Infoln("Total handler time: ", time.Since(handlerStart).Nanoseconds())
}
Пример #5
0
// searchTile queries across a tile.
func searchTile(q *Query, e *expstorage.Expectations, parsedQuery url.Values, storages *storage.Storage, tile *tiling.Tile, tallies *tally.Tallies, blamer *blame.Blamer, paramset *paramsets.Summary) ([]*Digest, []*tiling.Commit, error) {
	// TODO Use CommitRange to create a trimmed tile.

	traceTally := tallies.ByTrace()
	lastCommitIndex := tile.LastCommitIndex()

	// Loop over the tile and pull out all the digests that match
	// the query, collecting the matching traces as you go. Build
	// up a set of intermediate's that can then be used to calculate
	// Digest's.

	// map [test:digest] *intermediate
	inter := map[string]*intermediate{}
	for id, tr := range tile.Traces {
		if tiling.Matches(tr, parsedQuery) {
			test := tr.Params()[types.PRIMARY_KEY_FIELD]
			// Get all the digests
			digests := digestsFromTrace(id, tr, q.Head, lastCommitIndex, traceTally)
			for _, digest := range digests {
				cl := e.Classification(test, digest)
				if q.excludeClassification(cl) {
					continue
				}

				// Fix blamer to make this easier.
				if q.BlameGroupID != "" {
					if cl == types.UNTRIAGED {
						b := blamer.GetBlame(test, digest, tile.Commits)
						if q.BlameGroupID != blameGroupID(b, tile.Commits) {
							continue
						}
					} else {
						continue
					}
				}
				key := fmt.Sprintf("%s:%s", test, digest)
				if i, ok := inter[key]; !ok {
					inter[key] = newIntermediate(test, digest, id, tr, digests)
				} else {
					i.addTrace(id, tr, digests)
				}
			}
		}
	}
	// Now loop over all the intermediates and build a Digest for each one.
	ret := make([]*Digest, 0, len(inter))
	for key, i := range inter {
		parts := strings.Split(key, ":")
		ret = append(ret, digestFromIntermediate(parts[0], parts[1], i, e, tile, tallies, blamer, storages.DiffStore, paramset, q.IncludeIgnores))
	}
	return ret, tile.Commits, nil
}
Пример #6
0
// FromTile calculates a Quartiles using the values of the first commit in the given tile,
// using only traces that match the given query 'q'.
func FromTile(tile *tiling.Tile, tilestats *tilestats.TileStats, q url.Values) *Quartiles {
	ret := &Quartiles{
		TotalTraces: len(tile.Traces),
	}
	imp := map[string]map[string]string{}
	q1 := map[string]map[string]string{}
	q2 := map[string]map[string]string{}
	q3 := map[string]map[string]string{}
	q4 := map[string]map[string]string{}
	reg := map[string]map[string]string{}
	tilestats.RLock()
	defer tilestats.RUnLock()
	for traceid, tr := range tile.Traces {
		if tiling.Matches(tr, q) {
			st, ok := tilestats.TraceStats(traceid)
			if !ok {
				continue
			}
			ret.Matches += 1
			value := tr.(*types.PerfTrace).Values[0]
			iqr := st.Q3 - st.Q1
			if value < (st.Q1 - 1.5*iqr) {
				imp[traceid] = tr.Params()
			} else if value < st.Q1 {
				q1[traceid] = tr.Params()
			} else if value < st.Q2 {
				q2[traceid] = tr.Params()
			} else if value < st.Q3 {
				q3[traceid] = tr.Params()
			} else if value < (st.Q3 + 1.5*iqr) {
				q4[traceid] = tr.Params()
			} else {
				reg[traceid] = tr.Params()
			}
		} else {
			ret.Missing += 1
		}
	}

	ret.Improvements = kmlabel.ClusterAndDescribe(tile.ParamSet, imp, ret.Matches)
	ret.Q1 = kmlabel.ClusterAndDescribe(tile.ParamSet, q1, ret.Matches)
	ret.Q2 = kmlabel.ClusterAndDescribe(tile.ParamSet, q2, ret.Matches)
	ret.Q3 = kmlabel.ClusterAndDescribe(tile.ParamSet, q3, ret.Matches)
	ret.Q4 = kmlabel.ClusterAndDescribe(tile.ParamSet, q4, ret.Matches)
	ret.Regressions = kmlabel.ClusterAndDescribe(tile.ParamSet, reg, ret.Matches)
	return ret
}
Пример #7
0
// tallyBy does the actual work of ByQuery.
func tallyBy(tile *tiling.Tile, traceTally map[string]Tally, query url.Values) Tally {
	ret := Tally{}
	for k, tr := range tile.Traces {
		if tiling.Matches(tr, query) {
			if _, ok := traceTally[k]; !ok {
				continue
			}
			for digest, n := range traceTally[k] {
				if _, ok := ret[digest]; ok {
					ret[digest] += n
				} else {
					ret[digest] = n
				}
			}
		}
	}
	return ret
}
Пример #8
0
// filterFunc is a Func that returns a filtered set of Traces from the Tile in
// the Context.
//
// It expects a single argument that is a string in URL query format, ala
// os=Ubuntu12&config=8888.
func (FilterFunc) Eval(ctx *Context, node *Node) ([]*types.PerfTrace, error) {
	if len(node.Args) != 1 {
		return nil, fmt.Errorf("filter() takes a single argument.")
	}
	if node.Args[0].Typ != NodeString {
		return nil, fmt.Errorf("filter() takes a string argument.")
	}
	query, err := url.ParseQuery(node.Args[0].Val)
	if err != nil {
		return nil, fmt.Errorf("filter() arg not a valid URL query parameter: %s", err)
	}
	traces := []*types.PerfTrace{}
	for id, tr := range ctx.Tile.Traces {
		if tiling.Matches(tr.(*types.PerfTrace), query) {
			cp := tr.DeepCopy()
			cp.Params()["id"] = tiling.AsCalculatedID(id)
			traces = append(traces, cp.(*types.PerfTrace))
		}
	}
	return traces, nil
}
Пример #9
0
// Search returns a slice of DigestInfo with all the digests that match the given query parameters.
//
// Note that unlike CalcSummaries the results aren't restricted by test name.
// Also note that the result can include positive and negative digests.
func (s *Summaries) Search(query string, includeIgnores bool, head bool, pos bool, neg bool, unt bool) ([]DigestInfo, error) {
	t := timer.New("Search:GetLastTileTrimmed")
	tile, err := s.storages.GetLastTileTrimmed(includeIgnores)
	t.Stop()
	if err != nil {
		return nil, fmt.Errorf("Couldn't retrieve tile: %s", err)
	}
	q, err := url.ParseQuery(query)
	if err != nil {
		return nil, fmt.Errorf("Failed to parse Query in Search: %s", err)
	}

	t = timer.New("Search:Expectations")
	e, err := s.storages.ExpectationsStore.Get()
	t.Stop()
	if err != nil {
		return nil, fmt.Errorf("Couldn't get expectations: %s", err)
	}

	// Filter down to just the traces we are interested in, based on query.
	filtered := map[string]tiling.Trace{}
	t = timer.New("Filter Traces")
	for id, tr := range tile.Traces {
		if tiling.Matches(tr, q) {
			filtered[id] = tr
		}
	}
	t.Stop()

	traceTally := s.tallies.ByTrace()

	// Find all test:digest's in the filtered traces.
	matches := map[string]bool{}
	t = timer.New("Tally up the filtered traces")
	lastCommitIndex := tile.LastCommitIndex()
	for id, trace := range filtered {
		test := trace.Params()[types.PRIMARY_KEY_FIELD]
		if head {
			// Find the last non-missing value in the trace.
			for i := lastCommitIndex; i >= 0; i-- {
				if trace.IsMissing(i) {
					continue
				} else {
					matches[test+":"+trace.(*types.GoldenTrace).Values[i]] = true
					break
				}
			}
		} else {
			if t, ok := traceTally[id]; ok {
				for d, _ := range t {
					matches[test+":"+d] = true
				}
			}
		}
	}
	t.Stop()

	// Now create DigestInfo for each test:digest found, filtering out
	// digests with that don't match the triage classification.
	ret := []DigestInfo{}
	for key, _ := range matches {
		testDigest := strings.Split(key, ":")
		if len(testDigest) != 2 {
			glog.Errorf("Invalid test name or digest value: %s", key)
			continue
		}
		test := testDigest[0]
		digest := testDigest[1]
		class := e.Classification(test, digest)
		switch {
		case class == types.NEGATIVE && !neg:
			continue
		case class == types.POSITIVE && !pos:
			continue
		case class == types.UNTRIAGED && !unt:
			continue
		}
		ret = append(ret, DigestInfo{
			Test:   test,
			Digest: digest,
		})
	}

	return ret, nil
}
Пример #10
0
// CalcSummaries returns a Summary for each test that matches the given input filters.
//
// testNames
//   If not nil or empty then restrict the results to only tests that appear in this slice.
// query
//   URL encoded paramset to use for filtering.
// includeIgnores
//   Boolean, if true then include all digests in the results, including ones normally hidden
//   by the ignores list.
// head
//   Only consider digests at head if true.
//
func (s *Summaries) CalcSummaries(testNames []string, query string, includeIgnores bool, head bool) (map[string]*Summary, error) {
	defer timer.New("CalcSummaries").Stop()
	glog.Infof("CalcSummaries: includeIgnores %v head %v", includeIgnores, head)

	t := timer.New("CalcSummaries:GetLastTileTrimmed")
	tile, err := s.storages.GetLastTileTrimmed(includeIgnores)
	t.Stop()
	if err != nil {
		return nil, fmt.Errorf("Couldn't retrieve tile: %s", err)
	}
	q, err := url.ParseQuery(query)
	if err != nil {
		return nil, fmt.Errorf("Failed to parse Query in CalcSummaries: %s", err)
	}

	ret := map[string]*Summary{}

	t = timer.New("CalcSummaries:Expectations")
	e, err := s.storages.ExpectationsStore.Get()
	t.Stop()
	if err != nil {
		return nil, fmt.Errorf("Couldn't get expectations: %s", err)
	}

	// Filter down to just the traces we are interested in, based on query.
	filtered := map[string][]*TraceID{}
	t = timer.New("Filter Traces")
	for id, tr := range tile.Traces {
		name := tr.Params()[types.PRIMARY_KEY_FIELD]
		if len(testNames) > 0 && !util.In(name, testNames) {
			continue
		}
		if tiling.Matches(tr, q) {
			if slice, ok := filtered[name]; ok {
				filtered[name] = append(slice, &TraceID{tr: tr, id: id})
			} else {
				filtered[name] = []*TraceID{&TraceID{tr: tr, id: id}}
			}
		}
	}
	t.Stop()

	traceTally := s.tallies.ByTrace()

	// Now create summaries for each test using the filtered set of traces.
	t = timer.New("Tally up the filtered traces")
	lastCommitIndex := tile.LastCommitIndex()
	for name, traces := range filtered {
		digests := map[string]bool{}
		corpus := ""
		for _, trid := range traces {
			corpus = trid.tr.Params()["source_type"]
			if head {
				// Find the last non-missing value in the trace.
				for i := lastCommitIndex; i >= 0; i-- {
					if trid.tr.IsMissing(i) {
						continue
					} else {
						digests[trid.tr.(*types.GoldenTrace).Values[i]] = true
						break
					}
				}
			} else {
				// Use the traceTally if available, otherwise just inspect the trace.
				if t, ok := traceTally[trid.id]; ok {
					for k, _ := range t {
						digests[k] = true
					}
				} else {
					for i := lastCommitIndex; i >= 0; i-- {
						if !trid.tr.IsMissing(i) {
							digests[trid.tr.(*types.GoldenTrace).Values[i]] = true
						}
					}
				}
			}
		}
		ret[name] = s.makeSummary(name, e, s.storages.DiffStore, corpus, util.KeysOfStringSet(digests))
	}
	t.Stop()

	return ret, nil
}
Пример #11
0
// queryHandler handles queries for and about traces.
//
// Queries look like:
//
//     /query/0/-1/?arch=Arm7&arch=x86&scale=1
//
// Where they keys and values in the query params are from the ParamSet.
// Repeated parameters are matched via OR. I.e. the above query will include
// anything that has an arch of Arm7 or x86.
//
// The first two path paramters are tile scale and tile number, where -1 means
// the last tile at the given scale.
//
// The normal response is JSON of the form:
//
// {
//   "matches": 187,
// }
//
// If the path is:
//
//    /query/0/-1/traces/?arch=Arm7&arch=x86&scale=1
//
// Then the response is the set of traces that match that query.
//
//  {
//    "traces": [
//      {
//        // All of these keys and values should be exactly what Flot consumes.
//        data: [[1, 1.1], [20, 30]],
//        label: "key1",
//        _params: {"os: "Android", ...}
//      },
//      {
//        data: [[1.2, 2.1], [20, 35]],
//        label: "key2",
//        _params: {"os: "Android", ...}
//      }
//    ]
//  }
//
// If the path is:
//
//    /query/0/-1/traces/?__shortcut=11
//
// Then the traces in the shortcut with that ID are returned, along with the
// git hash at the step function, if the shortcut came from an alert.
//
//  {
//    "traces": [
//      {
//        // All of these keys and values should be exactly what Flot consumes.
//        data: [[1, 1.1], [20, 30]],
//        label: "key1",
//        _params: {"os: "Android", ...}
//      },
//      ...
//    ],
//    "hash": "a012334...",
//  }
//
//
// TODO Add ability to query across a range of tiles.
func queryHandler(w http.ResponseWriter, r *http.Request) {
	glog.Infof("Query Handler: %q\n", r.URL.Path)
	match := queryHandlerPath.FindStringSubmatch(r.URL.Path)
	glog.Infof("%#v", match)
	if r.Method != "GET" || match == nil || len(match) != 4 {
		http.NotFound(w, r)
		return
	}
	if err := r.ParseForm(); err != nil {
		util.ReportError(w, r, err, "Failed to parse query params.")
	}
	tileScale, err := strconv.ParseInt(match[1], 10, 0)
	if err != nil {
		util.ReportError(w, r, err, "Failed parsing tile scale.")
		return
	}
	tileNumber, err := strconv.ParseInt(match[2], 10, 0)
	if err != nil {
		util.ReportError(w, r, err, "Failed parsing tile number.")
		return
	}
	glog.Infof("tile: %d %d", tileScale, tileNumber)
	tile := masterTileBuilder.GetTile()
	w.Header().Set("Content-Type", "application/json")
	ret := &QueryResponse{
		Traces: []*tiling.TraceGUI{},
		Hash:   "",
	}
	if match[3] == "" {
		// We only want the count.
		total := 0
		for _, tr := range tile.Traces {
			if tiling.Matches(tr, r.Form) {
				total++
			}
		}
		glog.Info("Count: ", total)
		inc := json.NewEncoder(w)
		if err := inc.Encode(map[string]int{"matches": total}); err != nil {
			glog.Errorf("Failed to write or encode output: %s", err)
			return
		}
	} else {
		// We want the matching traces.
		shortcutID := r.Form.Get("__shortcut")
		if shortcutID != "" {
			sh, err := shortcut.Get(shortcutID)
			if err != nil {
				http.NotFound(w, r)
				return
			}
			if sh.Issue != "" {
				if tile, err = trybot.TileWithTryData(tile, sh.Issue); err != nil {
					util.ReportError(w, r, err, "Failed to populate shortcut data with trybot result.")
					return
				}
			}
			ret.Hash = sh.Hash
			for _, k := range sh.Keys {
				if tr, ok := tile.Traces[k]; ok {
					tg := traceGuiFromTrace(tr.(*types.PerfTrace), k, tile)
					if tg != nil {
						ret.Traces = append(ret.Traces, tg)
					}
				} else if tiling.IsFormulaID(k) {
					// Re-evaluate the formula and add all the results to the response.
					formula := tiling.FormulaFromID(k)
					if err := addCalculatedTraces(ret, tile, formula); err != nil {
						glog.Errorf("Failed evaluating formula (%q) while processing shortcut %s: %s", formula, shortcutID, err)
					}
				} else if strings.HasPrefix(k, "!") {
					glog.Errorf("A calculated trace is slipped through: (%s) in shortcut %s: %s", k, shortcutID, err)
				}
			}
		} else {
			for key, tr := range tile.Traces {
				if tiling.Matches(tr, r.Form) {
					tg := traceGuiFromTrace(tr.(*types.PerfTrace), key, tile)
					if tg != nil {
						ret.Traces = append(ret.Traces, tg)
					}
				}
			}
		}
		enc := json.NewEncoder(w)
		if err := enc.Encode(ret); err != nil {
			glog.Errorf("Failed to write or encode output: %s", err)
			return
		}
	}
}
Пример #12
0
// imgInfo returns a populated slice of PolyTestImgInfo based on the filter and
// queryString passed in.
//
// max maybe set to -1, which means to not truncate the response digest slice.
// If sortAgainstHash is true then the result will be sorted in direction 'dir' versus the given 'digest',
// otherwise the results will be sorted in terms of ascending N.
//
// If head is true then only return digests that appear at head.
func imgInfo(filter, queryString, testName string, e types.TestClassification, max int, includeIgnores bool, sortAgainstHash bool, dir string, digest string, head bool) ([]*PolyTestImgInfo, int, error) {
	query, err := url.ParseQuery(queryString)
	if err != nil {
		return nil, 0, fmt.Errorf("Failed to parse Query in imgInfo: %s", err)
	}
	query[types.PRIMARY_KEY_FIELD] = []string{testName}

	t := timer.New("finding digests")
	digests := map[string]int{}
	if head {
		tile, err := storages.GetLastTileTrimmed(includeIgnores)
		if err != nil {
			return nil, 0, fmt.Errorf("Failed to retrieve tallies in imgInfo: %s", err)
		}
		lastCommitIndex := tile.LastCommitIndex()
		for _, tr := range tile.Traces {
			if tiling.Matches(tr, query) {
				for i := lastCommitIndex; i >= 0; i-- {
					if tr.IsMissing(i) {
						continue
					} else {
						digests[tr.(*types.GoldenTrace).Values[i]] = 1
						break
					}
				}
			}
		}
	} else {
		digests, err = tallies.ByQuery(query, includeIgnores)
		if err != nil {
			return nil, 0, fmt.Errorf("Failed to retrieve tallies in imgInfo: %s", err)
		}
	}
	glog.Infof("Num Digests: %d", len(digests))
	t.Stop()

	// If we are going to sort against a digest then we need to calculate
	// the diff metrics against that digest.
	diffMetrics := map[string]*diff.DiffMetrics{}
	if sortAgainstHash {
		digestSlice := make([]string, len(digests))
		for d, _ := range digests {
			digestSlice = append(digestSlice, d)
		}
		var err error
		diffMetrics, err = storages.DiffStore.Get(digest, digestSlice)
		if err != nil {
			return nil, 0, fmt.Errorf("Failed to calculate diffs to sort against: %s", err)
		}
	}

	label := types.LabelFromString(filter)
	// Now filter digests by their expectations status here.
	t = timer.New("apply expectations")
	ret := []*PolyTestImgInfo{}
	for digest, n := range digests {
		if e[digest] != label {
			continue
		}
		p := &PolyTestImgInfo{
			Digest: digest,
			N:      n,
		}
		if sortAgainstHash {
			p.PixelDiffPercent = diffMetrics[digest].PixelDiffPercent
		}
		ret = append(ret, p)
	}
	t.Stop()

	if sortAgainstHash {
		if dir == "asc" {
			sort.Sort(PolyTestImgInfoDiffAscSlice(ret))
		} else {
			sort.Sort(sort.Reverse(PolyTestImgInfoDiffAscSlice(ret)))
		}
	} else {
		sort.Sort(PolyTestImgInfoSlice(ret))
	}

	total := len(ret)
	if max > 0 && len(ret) > max {
		ret = ret[:max]
	}
	return ret, total, nil
}
Пример #13
0
// GetLastTrimmed returns the last tile as read-only trimmed to contain at
// most NCommits. It caches trimmed tiles as long as the underlying tiles
// do not change.
//
// includeIgnores - If true then include ignored digests in the returned tile.
func (s *Storage) GetLastTileTrimmed(includeIgnores bool) (*tiling.Tile, error) {
	// Get the last (potentially cached) tile.
	tile, err := s.TileStore.Get(0, -1)
	if err != nil {
		return nil, err
	}

	s.mutex.Lock()
	defer s.mutex.Unlock()

	if s.NCommits <= 0 {
		return tile, err
	}

	currentIgnoreRev := s.IgnoreStore.Revision()

	// Check if the tile hasn't changed and the ignores haven't changed.
	if tile == s.lastBaseTile && s.lastTrimmedTile != nil && s.lastTrimmedIgnoredTile != nil && currentIgnoreRev == s.lastIgnoreRev {
		if includeIgnores {
			return s.lastTrimmedTile, nil
		} else {
			return s.lastTrimmedIgnoredTile, nil
		}
	}

	ignores, err := s.IgnoreStore.List()
	if err != nil {
		return nil, fmt.Errorf("Failed to get ignores to filter tile: %s", err)
	}

	// Build a new trimmed tile and a new trimmed tile with all ingoreable traces removed.
	tileLen := tile.LastCommitIndex() + 1

	// First build the new trimmed tile.
	retTile, err := tile.Trim(util.MaxInt(0, tileLen-s.NCommits), tileLen)
	if err != nil {
		return nil, err
	}

	// Now copy the tile by value.
	retIgnoredTile := retTile.Copy()

	// Then remove traces that should be ignored.
	ignoreQueries, err := ignore.ToQuery(ignores)
	if err != nil {
		return nil, err
	}
	for id, tr := range retIgnoredTile.Traces {
		for _, q := range ignoreQueries {
			if tiling.Matches(tr, q) {
				delete(retIgnoredTile.Traces, id)
				continue
			}
		}
	}

	// Cache this tile.
	s.lastIgnoreRev = currentIgnoreRev
	s.lastTrimmedTile = retTile
	s.lastTrimmedIgnoredTile = retIgnoredTile
	s.lastBaseTile = tile
	fmt.Printf("Lengths: %d %d\n", len(s.lastTrimmedTile.Traces), len(s.lastTrimmedIgnoredTile.Traces))

	if includeIgnores {
		return s.lastTrimmedTile, nil
	} else {
		return s.lastTrimmedIgnoredTile, nil
	}
}
Пример #14
0
// Search returns a slice of Digests that match the input query, and the total number of Digests
// that matched the query. It also returns a slice of Commits that were used in the calculations.
func Search(q *Query, storages *storage.Storage, tallies *tally.Tallies, blamer *blame.Blamer, paramset *paramsets.Summary) ([]*Digest, int, []*tiling.Commit, error) {
	tile, err := storages.GetLastTileTrimmed(q.IncludeIgnores)
	if err != nil {
		return nil, 0, nil, fmt.Errorf("Couldn't retrieve tile: %s", err)
	}

	// TODO Use CommitRange to create a trimmed tile.

	parsedQuery, err := url.ParseQuery(q.Query)
	if err != nil {
		return nil, 0, nil, fmt.Errorf("Failed to parse Query in Search: %s", err)
	}
	e, err := storages.ExpectationsStore.Get()
	if err != nil {
		return nil, 0, nil, fmt.Errorf("Couldn't get expectations: %s", err)
	}
	traceTally := tallies.ByTrace()
	lastCommitIndex := tile.LastCommitIndex()

	// Loop over the tile and pull out all the digests that match
	// the query, collecting the matching traces as you go. Build
	// up a set of intermediate's that can then be used to calculate
	// Digest's.

	// map [test:digest] *intermediate
	inter := map[string]*intermediate{}
	for id, tr := range tile.Traces {
		if tiling.Matches(tr, parsedQuery) {
			test := tr.Params()[types.PRIMARY_KEY_FIELD]
			// Get all the digests
			digests := digestsFromTrace(id, tr, q.Head, lastCommitIndex, traceTally)
			for _, digest := range digests {
				cl := e.Classification(test, digest)
				switch {
				case cl == types.NEGATIVE && !q.Neg:
					continue
				case cl == types.POSITIVE && !q.Pos:
					continue
				case cl == types.UNTRIAGED && !q.Unt:
					continue
				}
				// Fix blamer to make this easier.
				if q.BlameGroupID != "" {
					if cl == types.UNTRIAGED {
						b := blamer.GetBlame(test, digest, tile.Commits)
						if q.BlameGroupID != blameGroupID(b, tile.Commits) {
							continue
						}
					} else {
						continue
					}
				}
				key := fmt.Sprintf("%s:%s", test, digest)
				if i, ok := inter[key]; !ok {
					inter[key] = newIntermediate(test, digest, id, tr, digests)
				} else {
					i.addTrace(id, tr, digests)
				}
			}
		}
	}
	// Now loop over all the intermediates and build a Digest for each one.
	ret := make([]*Digest, 0, len(inter))
	for key, i := range inter {
		parts := strings.Split(key, ":")
		ret = append(ret, digestFromIntermediate(parts[0], parts[1], i, e, tile, tallies, blamer, storages.DiffStore, paramset, q.IncludeIgnores))
	}

	sort.Sort(DigestSlice(ret))

	fullLength := len(ret)
	if fullLength > q.Limit {
		ret = ret[0:q.Limit]
	}

	return ret, fullLength, tile.Commits, nil
}