Пример #1
0
func digestFromIntermediate(test, digest string, inter *intermediate, e *expstorage.Expectations, tile *tiling.Tile, tallies *tally.Tallies, blamer *blame.Blamer, diffStore diff.DiffStore, paramset *paramsets.Summary, includeIgnores bool) *Digest {
	traceTally := tallies.ByTrace()
	ret := &Digest{
		Test:   test,
		Digest: digest,
		Status: e.Classification(test, digest).String(),
		Traces: buildTraces(test, digest, inter, e, tile, traceTally, paramset, includeIgnores),
		Diff:   buildDiff(test, digest, inter, e, tile, tallies.ByTest(), blamer, diffStore, paramset, includeIgnores),
	}
	return ret
}
Пример #2
0
// searchTile queries across a tile.
func searchTile(q *Query, e *expstorage.Expectations, parsedQuery url.Values, storages *storage.Storage, tile *tiling.Tile, tallies *tally.Tallies, blamer *blame.Blamer, paramset *paramsets.Summary) ([]*Digest, []*tiling.Commit, error) {
	// TODO Use CommitRange to create a trimmed tile.

	traceTally := tallies.ByTrace()
	lastCommitIndex := tile.LastCommitIndex()

	// Loop over the tile and pull out all the digests that match
	// the query, collecting the matching traces as you go. Build
	// up a set of intermediate's that can then be used to calculate
	// Digest's.

	// map [test:digest] *intermediate
	inter := map[string]*intermediate{}
	for id, tr := range tile.Traces {
		if tiling.Matches(tr, parsedQuery) {
			test := tr.Params()[types.PRIMARY_KEY_FIELD]
			// Get all the digests
			digests := digestsFromTrace(id, tr, q.Head, lastCommitIndex, traceTally)
			for _, digest := range digests {
				cl := e.Classification(test, digest)
				if q.excludeClassification(cl) {
					continue
				}

				// Fix blamer to make this easier.
				if q.BlameGroupID != "" {
					if cl == types.UNTRIAGED {
						b := blamer.GetBlame(test, digest, tile.Commits)
						if q.BlameGroupID != blameGroupID(b, tile.Commits) {
							continue
						}
					} else {
						continue
					}
				}
				key := fmt.Sprintf("%s:%s", test, digest)
				if i, ok := inter[key]; !ok {
					inter[key] = newIntermediate(test, digest, id, tr, digests)
				} else {
					i.addTrace(id, tr, digests)
				}
			}
		}
	}
	// Now loop over all the intermediates and build a Digest for each one.
	ret := make([]*Digest, 0, len(inter))
	for key, i := range inter {
		parts := strings.Split(key, ":")
		ret = append(ret, digestFromIntermediate(parts[0], parts[1], i, e, tile, tallies, blamer, storages.DiffStore, paramset, q.IncludeIgnores))
	}
	return ret, tile.Commits, nil
}
Пример #3
0
// searchByIssue searches across the given issue.
func searchByIssue(issue string, q *Query, exp *expstorage.Expectations, parsedQuery url.Values, storages *storage.Storage, tile *tiling.Tile, tallies *tally.Tallies, tileParamSet *paramsets.Summary) ([]*Digest, error) {
	trybotResults, err := storages.TrybotResults.Get(issue)
	if err != nil {
		return nil, err
	}

	if !q.IncludeIgnores {
		matcher, err := storages.IgnoreStore.BuildRuleMatcher()
		if err != nil {
			return nil, fmt.Errorf("Unable to build rules matcher: %s", err)
		}
		for k, v := range trybotResults {
			if _, ok := matcher(v.Params); ok {
				delete(trybotResults, k)
			}
		}
	}

	rule := ignore.NewQueryRule(parsedQuery)

	// Aggregate the results into an intermediate representation to avoid
	// passing over the dataset twice.
	inter := map[string]*issueIntermediate{}
	for _, tbr := range trybotResults {
		if rule.IsMatch(tbr.Params) {
			key := tbr.Test + ":" + tbr.Digest
			if found, ok := inter[key]; ok {
				found.add(tbr)
			} else if cl := exp.Classification(tbr.Test, tbr.Digest); !q.excludeClassification(cl) {
				inter[key] = newIssueIntermediate(tbr, cl)
			}
		}
	}

	// Build the output.
	talliesByTest := tallies.ByTest()
	ret := make([]*Digest, 0, len(inter))
	for _, i := range inter {
		ret = append(ret, &Digest{
			Test:     i.test,
			Digest:   i.digest,
			Status:   i.status.String(),
			ParamSet: i.paramSet,
			Diff:     buildDiff(i.test, i.digest, exp, tile, talliesByTest, nil, storages.DiffStore, tileParamSet, q.IncludeIgnores),
		})
	}

	return ret, nil
}
Пример #4
0
// New creates a new Summary.
//
// The Summary listens for change events from the tallies object
// and calculates new paramsets on every event.
func New(tallies *tally.Tallies, storages *storage.Storage) *Summary {
	byTrace, byTraceIncludeIgnored, err := oneStep(tallies, storages)
	if err != nil {
		glog.Fatalf("Failed to calculate first step of paramsets: %s", err)
	}

	s := &Summary{
		byTrace:               byTrace,
		byTraceIncludeIgnored: byTraceIncludeIgnored,
		tallies:               tallies,
		storages:              storages,
	}

	tallies.OnChange(s.updateParamSets)
	return s
}
Пример #5
0
// oneStep does a single step, calculating all the paramsets from the latest tile and tallies.
//
// Returns the paramsets for both the tile with and without ignored traces included.
func oneStep(tallies *tally.Tallies, storages *storage.Storage) (map[string]map[string][]string, map[string]map[string][]string, error) {
	defer timer.New("paramsets").Stop()

	tile, err := storages.GetLastTileTrimmed(false)
	if err != nil {
		return nil, nil, fmt.Errorf("Failed to get tile: %s", err)
	}
	byTrace := byTraceForTile(tile, tallies.ByTrace())

	tile, err = storages.GetLastTileTrimmed(true)
	if err != nil {
		return nil, nil, fmt.Errorf("Failed to get tile: %s", err)
	}
	byTraceIncludeIgnored := byTraceForTile(tile, tallies.ByTrace())

	return byTrace, byTraceIncludeIgnored, nil
}
Пример #6
0
// New creates a new instance of Summaries.
func New(storages *storage.Storage, tallies *tally.Tallies, blamer *blame.Blamer) (*Summaries, error) {
	s := &Summaries{
		storages: storages,
		tallies:  tallies,
		blamer:   blamer,
	}

	var err error
	s.summaries, err = s.CalcSummaries(nil, "", false, true)
	if err != nil {
		return nil, fmt.Errorf("Failed to calculate summaries in New: %s", err)
	}

	// TODO(jcgregorio) Move to a channel for tallies and then combine
	// this and the expStore handling into a single switch statement.
	tallies.OnChange(func() {
		summaries, err := s.CalcSummaries(nil, "", false, true)
		if err != nil {
			glog.Errorf("Failed to refresh summaries: %s", err)
			return
		}
		s.mutex.Lock()
		s.summaries = summaries
		s.mutex.Unlock()
	})

	storages.EventBus.SubscribeAsync(expstorage.EV_EXPSTORAGE_CHANGED, func(e interface{}) {
		testNames := e.([]string)
		glog.Info("Updating summaries after expectations change.")
		partialSummaries, err := s.CalcSummaries(testNames, "", false, true)
		if err != nil {
			glog.Errorf("Failed to refresh summaries: %s", err)
			return
		}
		s.mutex.Lock()
		for k, v := range partialSummaries {
			s.summaries[k] = v
		}
		s.mutex.Unlock()
	})
	return s, nil
}
Пример #7
0
func Init(storages *storage.Storage, summaries *summary.Summaries, tallies *tally.Tallies) error {
	exp, err := storages.ExpectationsStore.Get()
	if err != nil {
		return err
	}
	go func() {
		oneRun := func() {
			t := timer.New("warmer one loop")
			for test, sum := range summaries.Get() {
				for _, digest := range sum.UntHashes {
					t := tallies.ByTest()[test]
					if t != nil {
						// Calculate the closest digest for the side effect of filling in the filediffstore cache.
						digesttools.ClosestDigest(test, digest, exp, t, storages.DiffStore, types.POSITIVE)
						digesttools.ClosestDigest(test, digest, exp, t, storages.DiffStore, types.NEGATIVE)
					}
				}
			}
			t.Stop()
			if newExp, err := storages.ExpectationsStore.Get(); err != nil {
				glog.Errorf("warmer: Failed to get expectations: %s", err)
			} else {
				exp = newExp
			}

			// Make sure all images are downloaded. This is necessary, because
			// the front-end doesn't get URLs (generated via DiffStore.AbsPath)
			// which ensures that the image has been downloaded.
			// TODO(stephana): Remove this once the new diffstore is in place.
			tile, err := storages.GetLastTileTrimmed(true)
			if err != nil {
				glog.Errorf("Error retrieving tile: %s", err)
			}
			tileLen := tile.LastCommitIndex() + 1
			traceDigests := make(map[string]bool, tileLen)
			for _, trace := range tile.Traces {
				gTrace := trace.(*types.GoldenTrace)
				for _, digest := range gTrace.Values {
					if digest != types.MISSING_DIGEST {
						traceDigests[digest] = true
					}
				}
			}

			digests := util.KeysOfStringSet(traceDigests)
			glog.Infof("FOUND %d digests to fetch.", len(digests))
			storages.DiffStore.AbsPath(digests)

			if err := warmTrybotDigests(storages, traceDigests); err != nil {
				glog.Errorf("Error retrieving trybot digests: %s", err)
				return
			}
		}

		oneRun()
		for _ = range time.Tick(time.Minute) {
			oneRun()
		}
	}()
	return nil
}
Пример #8
0
// searchByIssue searches across the given issue.
func searchByIssue(issue string, q *Query, exp *expstorage.Expectations, parsedQuery url.Values, storages *storage.Storage, tile *tiling.Tile, tallies *tally.Tallies, tileParamSet *paramsets.Summary) ([]*Digest, error) {
	trybotResults, err := storages.TrybotResults.Get(issue)
	if err != nil {
		return nil, err
	}

	// Get a matcher for the ignore rules if we filter ignores.
	var ignoreMatcher ignore.RuleMatcher = nil
	if !q.IncludeIgnores {
		ignoreMatcher, err = storages.IgnoreStore.BuildRuleMatcher()
		if err != nil {
			return nil, fmt.Errorf("Unable to build rules matcher: %s", err)
		}
	}

	// Set up a rule to match the query.
	var queryRule ignore.QueryRule = nil
	if len(parsedQuery) > 0 {
		queryRule = ignore.NewQueryRule(parsedQuery)
	}

	// Aggregate the results into an intermediate representation to avoid
	// passing over the dataset twice.
	inter := map[string]*issueIntermediate{}
	talliesByTest := tallies.ByTest()

	for _, bot := range trybotResults.Bots {
		for _, result := range bot.TestResults {
			expandedParams := util.CopyStringMap(bot.BotParams)
			util.AddParams(expandedParams, result.Params)

			if ignoreMatcher != nil {
				if _, ok := ignoreMatcher(expandedParams); ok {
					continue
				}
			}

			if (queryRule == nil) || queryRule.IsMatch(expandedParams) {
				testName := expandedParams[types.PRIMARY_KEY_FIELD]
				digest := trybotResults.Digests[result.DigestIdx]
				key := testName + ":" + digest
				if !q.IncludeMaster {
					if _, ok := talliesByTest[testName][digest]; ok {
						continue
					}
				}
				if found, ok := inter[key]; ok {
					found.add(expandedParams)
				} else if cl := exp.Classification(testName, digest); !q.excludeClassification(cl) {
					inter[key] = newIssueIntermediate(expandedParams, digest, cl)
				}
			}
		}
	}

	// Build the output and make sure the digest are cached on disk.
	digests := make(map[string]bool, len(inter))
	ret := make([]*Digest, 0, len(inter))
	emptyTraces := &Traces{}
	for _, i := range inter {
		ret = append(ret, &Digest{
			Test:     i.test,
			Digest:   i.digest,
			Status:   i.status.String(),
			ParamSet: i.paramSet,
			Diff:     buildDiff(i.test, i.digest, exp, tile, talliesByTest, nil, storages.DiffStore, tileParamSet, q.IncludeIgnores),
			Traces:   emptyTraces,
		})
		digests[i.digest] = true
	}

	// This ensures that all digests are cached on disk.
	storages.DiffStore.AbsPath(util.KeysOfStringSet(digests))
	return ret, nil
}
Пример #9
0
// Search returns a slice of Digests that match the input query, and the total number of Digests
// that matched the query. It also returns a slice of Commits that were used in the calculations.
func Search(q *Query, storages *storage.Storage, tallies *tally.Tallies, blamer *blame.Blamer, paramset *paramsets.Summary) ([]*Digest, int, []*tiling.Commit, error) {
	tile, err := storages.GetLastTileTrimmed(q.IncludeIgnores)
	if err != nil {
		return nil, 0, nil, fmt.Errorf("Couldn't retrieve tile: %s", err)
	}

	// TODO Use CommitRange to create a trimmed tile.

	parsedQuery, err := url.ParseQuery(q.Query)
	if err != nil {
		return nil, 0, nil, fmt.Errorf("Failed to parse Query in Search: %s", err)
	}
	e, err := storages.ExpectationsStore.Get()
	if err != nil {
		return nil, 0, nil, fmt.Errorf("Couldn't get expectations: %s", err)
	}
	traceTally := tallies.ByTrace()
	lastCommitIndex := tile.LastCommitIndex()

	// Loop over the tile and pull out all the digests that match
	// the query, collecting the matching traces as you go. Build
	// up a set of intermediate's that can then be used to calculate
	// Digest's.

	// map [test:digest] *intermediate
	inter := map[string]*intermediate{}
	for id, tr := range tile.Traces {
		if tiling.Matches(tr, parsedQuery) {
			test := tr.Params()[types.PRIMARY_KEY_FIELD]
			// Get all the digests
			digests := digestsFromTrace(id, tr, q.Head, lastCommitIndex, traceTally)
			for _, digest := range digests {
				cl := e.Classification(test, digest)
				switch {
				case cl == types.NEGATIVE && !q.Neg:
					continue
				case cl == types.POSITIVE && !q.Pos:
					continue
				case cl == types.UNTRIAGED && !q.Unt:
					continue
				}
				// Fix blamer to make this easier.
				if q.BlameGroupID != "" {
					if cl == types.UNTRIAGED {
						b := blamer.GetBlame(test, digest, tile.Commits)
						if q.BlameGroupID != blameGroupID(b, tile.Commits) {
							continue
						}
					} else {
						continue
					}
				}
				key := fmt.Sprintf("%s:%s", test, digest)
				if i, ok := inter[key]; !ok {
					inter[key] = newIntermediate(test, digest, id, tr, digests)
				} else {
					i.addTrace(id, tr, digests)
				}
			}
		}
	}
	// Now loop over all the intermediates and build a Digest for each one.
	ret := make([]*Digest, 0, len(inter))
	for key, i := range inter {
		parts := strings.Split(key, ":")
		ret = append(ret, digestFromIntermediate(parts[0], parts[1], i, e, tile, tallies, blamer, storages.DiffStore, paramset, q.IncludeIgnores))
	}

	sort.Sort(DigestSlice(ret))

	fullLength := len(ret)
	if fullLength > q.Limit {
		ret = ret[0:q.Limit]
	}

	return ret, fullLength, tile.Commits, nil
}