// Init starts a back ground process that feeds test and digest information
// to the diffstore to continously calculate diffs for new digests.
func Init(storages *storage.Storage) {
	go func() {
		// TODO(stephana): Once we have events that signal that a new tile
		// is available, we want to process new tiles immediately instead
		// of polling every so often.
		tileStream := storages.GetTileStreamNow(2*time.Minute, true)

		for {
			tile := <-tileStream
			tileLen := tile.LastCommitIndex() + 1

			// digestSets is a map [testname] map [diget] bool.
			digestSets := map[string]map[string]bool{}
			for _, trace := range tile.Traces {
				gTrace := trace.(*types.GoldenTrace)
				testName := trace.Params()[types.PRIMARY_KEY_FIELD]
				for _, digest := range gTrace.Values[:tileLen] {
					if digest != types.MISSING_DIGEST {
						if _, ok := digestSets[testName]; !ok {
							digestSets[testName] = map[string]bool{}
						}
						digestSets[testName][digest] = true
					}
				}
			}

			storages.DiffStore.SetDigestSets(digestSets)
		}
	}()
}
Exemple #2
0
// New creates a new Tallies for the given storage object.
func New(storages *storage.Storage) (*Tallies, error) {
	tile, err := storages.GetLastTileTrimmed(true)
	if err != nil {
		return nil, fmt.Errorf("Couldn't retrieve tile: %s", err)
	}

	trace, test := tallyTile(tile)
	t := &Tallies{
		traceTally: trace,
		testTally:  test,
		storages:   storages,
		callbacks:  []OnChangeCallback{},
	}
	go func() {
		for _ = range time.Tick(2 * time.Minute) {
			tile, err := storages.GetLastTileTrimmed(true)
			if err != nil {
				glog.Errorf("Couldn't retrieve tile: %s", err)
				continue
			}

			trace, test := tallyTile(tile)
			t.mutex.Lock()
			t.traceTally = trace
			t.testTally = test
			t.mutex.Unlock()
			for _, cb := range t.callbacks {
				go cb()
			}
		}
	}()
	return t, nil
}
// oneStep does a single step, calculating all the paramsets from the latest tile and tallies.
//
// Returns the paramsets for both the tile with and without ignored traces included.
func oneStep(tallies *tally.Tallies, storages *storage.Storage) (map[string]map[string][]string, map[string]map[string][]string, error) {
	defer timer.New("paramsets").Stop()

	tile, err := storages.GetLastTileTrimmed(false)
	if err != nil {
		return nil, nil, fmt.Errorf("Failed to get tile: %s", err)
	}
	byTrace := byTraceForTile(tile, tallies.ByTrace())

	tile, err = storages.GetLastTileTrimmed(true)
	if err != nil {
		return nil, nil, fmt.Errorf("Failed to get tile: %s", err)
	}
	byTraceIncludeIgnored := byTraceForTile(tile, tallies.ByTrace())

	return byTrace, byTraceIncludeIgnored, nil
}
Exemple #4
0
// Search returns a slice of Digests that match the input query, and the total number of Digests
// that matched the query. It also returns a slice of Commits that were used in the calculations.
func Search(q *Query, storages *storage.Storage, tallies *tally.Tallies, blamer *blame.Blamer, paramset *paramsets.Summary) ([]*Digest, int, []*tiling.Commit, error) {
	parsedQuery, err := url.ParseQuery(q.Query)
	if err != nil {
		return nil, 0, nil, fmt.Errorf("Failed to parse Query in Search: %s", err)
	}

	tile, err := storages.GetLastTileTrimmed(q.IncludeIgnores)
	if err != nil {
		return nil, 0, nil, fmt.Errorf("Couldn't retrieve tile: %s", err)
	}

	e, err := storages.ExpectationsStore.Get()
	if err != nil {
		return nil, 0, nil, fmt.Errorf("Couldn't get expectations: %s", err)
	}

	var ret []*Digest
	var commits []*tiling.Commit = nil
	if q.Issue != "" {
		ret, err = searchByIssue(q.Issue, q, e, parsedQuery, storages, tile, tallies, paramset)
	} else {
		ret, commits, err = searchTile(q, e, parsedQuery, storages, tile, tallies, blamer, paramset)
	}

	if err != nil {
		return nil, 0, nil, err
	}

	sort.Sort(DigestSlice(ret))
	fullLength := len(ret)
	if fullLength > q.Limit {
		ret = ret[0:q.Limit]
	}

	return ret, fullLength, commits, nil
}
Exemple #5
0
func Init(storages *storage.Storage, summaries *summary.Summaries, tallies *tally.Tallies) error {
	exp, err := storages.ExpectationsStore.Get()
	if err != nil {
		return err
	}
	go func() {
		oneRun := func() {
			t := timer.New("warmer one loop")
			for test, sum := range summaries.Get() {
				for _, digest := range sum.UntHashes {
					t := tallies.ByTest()[test]
					if t != nil {
						// Calculate the closest digest for the side effect of filling in the filediffstore cache.
						digesttools.ClosestDigest(test, digest, exp, t, storages.DiffStore, types.POSITIVE)
						digesttools.ClosestDigest(test, digest, exp, t, storages.DiffStore, types.NEGATIVE)
					}
				}
			}
			t.Stop()
			if newExp, err := storages.ExpectationsStore.Get(); err != nil {
				glog.Errorf("warmer: Failed to get expectations: %s", err)
			} else {
				exp = newExp
			}

			// Make sure all images are downloaded. This is necessary, because
			// the front-end doesn't get URLs (generated via DiffStore.AbsPath)
			// which ensures that the image has been downloaded.
			// TODO(stephana): Remove this once the new diffstore is in place.
			tile, err := storages.GetLastTileTrimmed(true)
			if err != nil {
				glog.Errorf("Error retrieving tile: %s", err)
			}
			tileLen := tile.LastCommitIndex() + 1
			traceDigests := make(map[string]bool, tileLen)
			for _, trace := range tile.Traces {
				gTrace := trace.(*types.GoldenTrace)
				for _, digest := range gTrace.Values {
					if digest != types.MISSING_DIGEST {
						traceDigests[digest] = true
					}
				}
			}

			digests := util.KeysOfStringSet(traceDigests)
			glog.Infof("FOUND %d digests to fetch.", len(digests))
			storages.DiffStore.AbsPath(digests)

			if err := warmTrybotDigests(storages, traceDigests); err != nil {
				glog.Errorf("Error retrieving trybot digests: %s", err)
				return
			}
		}

		oneRun()
		for _ = range time.Tick(time.Minute) {
			oneRun()
		}
	}()
	return nil
}
Exemple #6
0
// Search returns a slice of Digests that match the input query, and the total number of Digests
// that matched the query. It also returns a slice of Commits that were used in the calculations.
func Search(q *Query, storages *storage.Storage, tallies *tally.Tallies, blamer *blame.Blamer, paramset *paramsets.Summary) ([]*Digest, int, []*tiling.Commit, error) {
	tile, err := storages.GetLastTileTrimmed(q.IncludeIgnores)
	if err != nil {
		return nil, 0, nil, fmt.Errorf("Couldn't retrieve tile: %s", err)
	}

	// TODO Use CommitRange to create a trimmed tile.

	parsedQuery, err := url.ParseQuery(q.Query)
	if err != nil {
		return nil, 0, nil, fmt.Errorf("Failed to parse Query in Search: %s", err)
	}
	e, err := storages.ExpectationsStore.Get()
	if err != nil {
		return nil, 0, nil, fmt.Errorf("Couldn't get expectations: %s", err)
	}
	traceTally := tallies.ByTrace()
	lastCommitIndex := tile.LastCommitIndex()

	// Loop over the tile and pull out all the digests that match
	// the query, collecting the matching traces as you go. Build
	// up a set of intermediate's that can then be used to calculate
	// Digest's.

	// map [test:digest] *intermediate
	inter := map[string]*intermediate{}
	for id, tr := range tile.Traces {
		if tiling.Matches(tr, parsedQuery) {
			test := tr.Params()[types.PRIMARY_KEY_FIELD]
			// Get all the digests
			digests := digestsFromTrace(id, tr, q.Head, lastCommitIndex, traceTally)
			for _, digest := range digests {
				cl := e.Classification(test, digest)
				switch {
				case cl == types.NEGATIVE && !q.Neg:
					continue
				case cl == types.POSITIVE && !q.Pos:
					continue
				case cl == types.UNTRIAGED && !q.Unt:
					continue
				}
				// Fix blamer to make this easier.
				if q.BlameGroupID != "" {
					if cl == types.UNTRIAGED {
						b := blamer.GetBlame(test, digest, tile.Commits)
						if q.BlameGroupID != blameGroupID(b, tile.Commits) {
							continue
						}
					} else {
						continue
					}
				}
				key := fmt.Sprintf("%s:%s", test, digest)
				if i, ok := inter[key]; !ok {
					inter[key] = newIntermediate(test, digest, id, tr, digests)
				} else {
					i.addTrace(id, tr, digests)
				}
			}
		}
	}
	// Now loop over all the intermediates and build a Digest for each one.
	ret := make([]*Digest, 0, len(inter))
	for key, i := range inter {
		parts := strings.Split(key, ":")
		ret = append(ret, digestFromIntermediate(parts[0], parts[1], i, e, tile, tallies, blamer, storages.DiffStore, paramset, q.IncludeIgnores))
	}

	sort.Sort(DigestSlice(ret))

	fullLength := len(ret)
	if fullLength > q.Limit {
		ret = ret[0:q.Limit]
	}

	return ret, fullLength, tile.Commits, nil
}