func dumpTileToJSON(store tiling.TileStore, nCommits int, nTraces int, fname string) { tile, err := store.Get(0, -1) if err != nil { glog.Fatal("Could not read tile: " + err.Error()) } newTile := tile if (nCommits > 0) || (nTraces > 0) { lastIdx := tile.LastCommitIndex() if nCommits <= 0 { nCommits = lastIdx + 1 } if nTraces <= 0 { nTraces = len(tile.Traces) } commitLen := util.MinInt(nCommits, lastIdx+1) startCommit := lastIdx + 1 - commitLen newTraces := map[string]tiling.Trace{} for key, trace := range tile.Traces { for i := startCommit; i <= lastIdx; i++ { if !trace.IsMissing(i) { newTraces[key] = trace break } } if len(newTraces) >= nTraces { break } } newCommits := tile.Commits[startCommit:] newParamSet := map[string][]string{} tiling.GetParamSet(newTraces, newParamSet) newTile = &tiling.Tile{ Traces: newTraces, ParamSet: newParamSet, Commits: newCommits, Scale: tile.Scale, TileIndex: tile.TileIndex, } } result, err := json.Marshal(newTile) if err != nil { glog.Fatalf("Could not marshal to JSON: %s", err) } err = ioutil.WriteFile(fname, result, 0644) if err != nil { glog.Fatalf("Could not write output file %s", err) } fmt.Printf("Commits included: %d\n", len(newTile.Commits)) fmt.Printf("Traces included: %d\n", len(newTile.Traces)) }
func diff(tile *tiling.Tile, ts db.DB, isGold bool) error { commits := tile.Commits startTime := time.Unix(commits[0].CommitTime, 0) commitIDs, err := ts.List(startTime, time.Now()) if err != nil { return err } glog.Infof("COMMIT ids:\n\n\n %s\n\n\n", spew.Sdump(commitIDs)) glog.Infof("LOADING tile") traceDBTile, _, err := ts.TileFromCommits(commitIDs) if err != nil { return err } minLen := util.MinInt(len(commits), len(traceDBTile.Commits)) tdbTraces := traceDBTile.Traces glog.Infof("Commits/traces in tilestore: %d - %d", len(commits), len(tile.Traces)) glog.Infof("Commits/traces in tracedb : %d - %d", len(traceDBTile.Commits), len(tdbTraces)) count := 0 matchingCount := 0 for traceID, trace := range tile.Traces { _, ok := tdbTraces[traceID] if !ok { glog.Fatalf("Trace missing: %s", traceID) } v1 := trace.(*gtypes.GoldenTrace).Values[:minLen] v2 := tdbTraces[traceID].(*gtypes.GoldenTrace).Values[:minLen] identicalCount := 0 indices := make([]int, 0, minLen) for idx, val := range v1 { if val == v2[idx] { identicalCount++ } else { indices = append(indices, idx) } } if identicalCount != minLen { glog.Infof("Trace differs by %d / %d / %.2f, %v", identicalCount, minLen, float64(identicalCount)/float64(minLen), indices) } else { matchingCount++ } count++ } glog.Infof("Compared %d traces. Matching: %d", count, matchingCount) return nil }
// Test against the expectation store interface. func testExpectationStore(t *testing.T, store ExpectationsStore, eventBus *eventbus.EventBus) { // Get the initial log size. This is necessary because we // call this function multiple times with the same underlying // SQLExpectationStore. initialLogRecs, initialLogTotal, err := store.QueryLog(0, 100, true) assert.Nil(t, err) initialLogRecsLen := len(initialLogRecs) // If we have an event bus then keep gathering events. callbackCh := make(chan []string, 3) if eventBus != nil { eventBus.SubscribeAsync(EV_EXPSTORAGE_CHANGED, func(e interface{}) { testNames := append([]string{}, e.([]string)...) sort.Strings(testNames) callbackCh <- testNames }) } TEST_1, TEST_2 := "test1", "test2" // digests DIGEST_11, DIGEST_12 := "d11", "d12" DIGEST_21, DIGEST_22 := "d21", "d22" expChange_1 := map[string]types.TestClassification{ TEST_1: types.TestClassification{ DIGEST_11: types.POSITIVE, DIGEST_12: types.NEGATIVE, }, TEST_2: types.TestClassification{ DIGEST_21: types.POSITIVE, DIGEST_22: types.NEGATIVE, }, } logEntry_1 := []*TriageDetail{ &TriageDetail{TEST_1, DIGEST_11, "positive"}, &TriageDetail{TEST_1, DIGEST_12, "negative"}, &TriageDetail{TEST_2, DIGEST_21, "positive"}, &TriageDetail{TEST_2, DIGEST_22, "negative"}, } assert.Nil(t, store.AddChange(expChange_1, "user-0")) if eventBus != nil { eventBus.Wait(EV_EXPSTORAGE_CHANGED) assert.Equal(t, 1, len(callbackCh)) assert.Equal(t, []string{TEST_1, TEST_2}, <-callbackCh) } foundExps, err := store.Get() assert.Nil(t, err) assert.Equal(t, expChange_1, foundExps.Tests) assert.False(t, &expChange_1 == &foundExps.Tests) checkLogEntry(t, store, expChange_1) // Update digests. expChange_2 := map[string]types.TestClassification{ TEST_1: types.TestClassification{ DIGEST_11: types.NEGATIVE, }, TEST_2: types.TestClassification{ DIGEST_22: types.UNTRIAGED, }, } logEntry_2 := []*TriageDetail{ &TriageDetail{TEST_1, DIGEST_11, "negative"}, &TriageDetail{TEST_2, DIGEST_22, "untriaged"}, } assert.Nil(t, store.AddChange(expChange_2, "user-1")) if eventBus != nil { eventBus.Wait(EV_EXPSTORAGE_CHANGED) assert.Equal(t, 1, len(callbackCh)) assert.Equal(t, []string{TEST_1, TEST_2}, <-callbackCh) } foundExps, err = store.Get() assert.Nil(t, err) assert.Equal(t, types.NEGATIVE, foundExps.Tests[TEST_1][DIGEST_11]) assert.Equal(t, types.UNTRIAGED, foundExps.Tests[TEST_2][DIGEST_22]) checkLogEntry(t, store, expChange_2) // Send empty changes to test the event bus. emptyChanges := map[string]types.TestClassification{} assert.Nil(t, store.AddChange(emptyChanges, "user-2")) if eventBus != nil { eventBus.Wait(EV_EXPSTORAGE_CHANGED) assert.Equal(t, 1, len(callbackCh)) assert.Equal(t, []string{}, <-callbackCh) } checkLogEntry(t, store, emptyChanges) // Remove digests. removeDigests_1 := map[string][]string{ TEST_1: []string{DIGEST_11}, TEST_2: []string{DIGEST_22}, } assert.Nil(t, store.RemoveChange(removeDigests_1)) if eventBus != nil { eventBus.Wait(EV_EXPSTORAGE_CHANGED) assert.Equal(t, 1, len(callbackCh)) assert.Equal(t, []string{TEST_1, TEST_2}, <-callbackCh) } foundExps, err = store.Get() assert.Nil(t, err) assert.Equal(t, types.TestClassification(map[string]types.Label{DIGEST_12: types.NEGATIVE}), foundExps.Tests[TEST_1]) assert.Equal(t, types.TestClassification(map[string]types.Label{DIGEST_21: types.POSITIVE}), foundExps.Tests[TEST_2]) removeDigests_2 := map[string][]string{TEST_1: []string{DIGEST_12}} assert.Nil(t, store.RemoveChange(removeDigests_2)) if eventBus != nil { eventBus.Wait(EV_EXPSTORAGE_CHANGED) assert.Equal(t, 1, len(callbackCh)) assert.Equal(t, []string{TEST_1}, <-callbackCh) } foundExps, err = store.Get() assert.Nil(t, err) assert.Equal(t, 1, len(foundExps.Tests)) assert.Nil(t, store.RemoveChange(map[string][]string{})) if eventBus != nil { eventBus.Wait(EV_EXPSTORAGE_CHANGED) assert.Equal(t, 1, len(callbackCh)) assert.Equal(t, []string{}, <-callbackCh) } // Make sure we added the correct number of triage log entries. addedRecs := 3 logEntries, total, err := store.QueryLog(0, 5, true) assert.Nil(t, err) assert.Equal(t, addedRecs+initialLogTotal, total) assert.Equal(t, util.MinInt(addedRecs+initialLogRecsLen, 5), len(logEntries)) lastRec := logEntries[0] secondToLastRec := logEntries[1] assert.Equal(t, 0, len(logEntries[0].Details)) assert.Equal(t, logEntry_2, logEntries[1].Details) assert.Equal(t, logEntry_1, logEntries[2].Details) logEntries, total, err = store.QueryLog(100, 5, true) assert.Nil(t, err) assert.Equal(t, addedRecs+initialLogTotal, total) assert.Equal(t, 0, len(logEntries)) // Undo the latest version and make sure the corresponding record is correct. changes, err := store.UndoChange(lastRec.ID, "user-1") assert.Nil(t, err) checkLogEntry(t, store, changes) changes, err = store.UndoChange(secondToLastRec.ID, "user-1") assert.Nil(t, err) checkLogEntry(t, store, changes) addedRecs += 2 logEntries, total, err = store.QueryLog(0, 2, true) assert.Nil(t, err) assert.Equal(t, addedRecs+initialLogTotal, total) assert.Equal(t, 0, len(logEntries[1].Details)) assert.Equal(t, 2, len(logEntries[0].Details)) foundExps, err = store.Get() assert.Nil(t, err) for testName, digests := range expChange_2 { for d := range digests { _, ok := foundExps.Tests[testName][d] assert.True(t, ok) assert.Equal(t, expChange_1[testName][d].String(), foundExps.Tests[testName][d].String()) } } // Make sure undoing the previous undo causes an error. logEntries, _, err = store.QueryLog(0, 1, false) assert.Nil(t, err) assert.Equal(t, 1, len(logEntries)) _, err = store.UndoChange(logEntries[0].ID, "user-1") assert.NotNil(t, err) // Make sure getExpectationsAt works correctly. sqlStore, ok := store.(*SQLExpectationsStore) if ok { logEntries, _, err = store.QueryLog(0, 100, true) assert.Nil(t, err) // Check the first addition. firstAdd := logEntries[len(logEntries)-1] secondAdd := logEntries[len(logEntries)-2] secondUndo := logEntries[len(logEntries)-5] checkExpectationsAt(t, sqlStore, firstAdd, "first") checkExpectationsAt(t, sqlStore, secondAdd, "second") checkExpectationsAt(t, sqlStore, secondUndo, "third") } }
// updateBlame reads from the provided tileStream and updates the current // blame lists. func (b *Blamer) updateBlame(tile *tiling.Tile) error { exp, err := b.storages.ExpectationsStore.Get() if err != nil { return err } defer timer.New("blame").Stop() // Note: blameStart and blameEnd are continously updated to contain the // smalles start and end index of the ranges for a testName/digest pair. blameStart := map[string]map[string]int{} blameEnd := map[string]map[string]int{} // blameRange stores the candidate ranges for a testName/digest pair. blameRange := map[string]map[string][][]int{} firstCommit := tile.Commits[0] tileLen := tile.LastCommitIndex() + 1 ret := map[string]map[string]*BlameDistribution{} for _, trace := range tile.Traces { gtr := trace.(*types.GoldenTrace) testName := gtr.Params()[types.PRIMARY_KEY_FIELD] // lastIdx tracks the index of the last digest that is definitely // not in the blamelist. lastIdx := -1 found := map[string]bool{} for idx, digest := range gtr.Values[:tileLen] { if digest == types.MISSING_DIGEST { continue } status := exp.Classification(testName, digest) if (status == types.UNTRIAGED) && !found[digest] { found[digest] = true var startIdx int endIdx := idx // If we have only seen empty digests, then we do not // consider any digest before the current one. if lastIdx == -1 { startIdx = idx } else { startIdx = lastIdx + 1 } // Get the info about this digest. digestInfo, err := b.storages.GetOrUpdateDigestInfo(testName, digest, tile.Commits[idx]) if err != nil { return err } // Check if the digest was first seen outside the current tile. isOld := digestInfo.First < firstCommit.CommitTime commitRange := []int{startIdx, endIdx} if blameStartFound, ok := blameStart[testName]; !ok { blameStart[testName] = map[string]int{digest: startIdx} blameEnd[testName] = map[string]int{digest: endIdx} blameRange[testName] = map[string][][]int{digest: [][]int{commitRange}} ret[testName] = map[string]*BlameDistribution{digest: &BlameDistribution{Old: isOld}} } else if currentStart, ok := blameStartFound[digest]; !ok { blameStart[testName][digest] = startIdx blameEnd[testName][digest] = endIdx blameRange[testName][digest] = [][]int{commitRange} ret[testName][digest] = &BlameDistribution{Old: isOld} } else { blameStart[testName][digest] = util.MinInt(currentStart, startIdx) blameEnd[testName][digest] = util.MinInt(blameEnd[testName][digest], endIdx) blameRange[testName][digest] = append(blameRange[testName][digest], commitRange) ret[testName][digest].Old = isOld || ret[testName][digest].Old } } lastIdx = idx } } commits := tile.Commits[:tileLen] for testName, digests := range blameRange { for digest, commitRanges := range digests { start := blameStart[testName][digest] end := blameEnd[testName][digest] freq := make([]int, len(commits)-start) for _, commitRange := range commitRanges { // If the commit range is nil, we cannot calculate the a // blamelist. if commitRange == nil { freq = []int{} break } // Calculate the blame. idxEnd := util.MinInt(commitRange[1], end) for i := commitRange[0]; i <= idxEnd; i++ { freq[i-start]++ } } ret[testName][digest].Freq = freq } } // Swap out the old blame lists for the new ones. b.mutex.Lock() b.testBlameLists, b.commits = ret, commits b.mutex.Unlock() return nil }
// Diff is a utility function that calculates the DiffMetrics and the image of the // difference for the provided images. func Diff(img1, img2 image.Image) (*DiffMetrics, *image.NRGBA) { img1Bounds := img1.Bounds() img2Bounds := img2.Bounds() // Get the bounds we want to compare. cmpWidth := util.MinInt(img1Bounds.Dx(), img2Bounds.Dx()) cmpHeight := util.MinInt(img1Bounds.Dy(), img2Bounds.Dy()) // Get the bounds of the resulting image. If they dimensions match they // will be identical to the result bounds. Fill the image with black pixels. resultWidth := util.MaxInt(img1Bounds.Dx(), img2Bounds.Dx()) resultHeight := util.MaxInt(img1Bounds.Dy(), img2Bounds.Dy()) resultImg := image.NewNRGBA(image.Rect(0, 0, resultWidth, resultHeight)) totalPixels := resultWidth * resultHeight // Loop through all points and compare. We start assuming all pixels are // wrong. This takes care of the case where the images have different sizes // and there is an area not inspected by the loop. numDiffPixels := resultWidth * resultHeight maxRGBADiffs := make([]int, 4) // Pix is a []uint8 rotating through R, G, B, A, R, G, B, A, ... p1 := GetNRGBA(img1).Pix p2 := GetNRGBA(img2).Pix // Compare the bounds, if they are the same then use this fast path. // We pun to uint64 to compare 2 pixels at a time, so we also require // an even number of pixels here. If that's a big deal, we can easily // fix that up, handling the straggler pixel separately at the end. if img1Bounds.Eq(img2Bounds) && len(p1)%8 == 0 { numDiffPixels = 0 // Note the += 8. We're checking two pixels at a time here. for i := 0; i < len(p1); i += 8 { // Most pixels we compare will be the same, so from here to // the 'continue' is the hot path in all this code. rgba_2x := (*uint64)(unsafe.Pointer(&p1[i])) RGBA_2x := (*uint64)(unsafe.Pointer(&p2[i])) if *rgba_2x == *RGBA_2x { continue } // When off == 0, we check the first pixel of the pair; when 4, the second. for off := 0; off <= 4; off += 4 { r, g, b, a := p1[off+i+0], p1[off+i+1], p1[off+i+2], p1[off+i+3] R, G, B, A := p2[off+i+0], p2[off+i+1], p2[off+i+2], p2[off+i+3] if r != R || g != G || b != B || a != A { numDiffPixels++ dr := util.AbsInt(int(r) - int(R)) dg := util.AbsInt(int(g) - int(G)) db := util.AbsInt(int(b) - int(B)) da := util.AbsInt(int(a) - int(A)) maxRGBADiffs[0] = util.MaxInt(dr, maxRGBADiffs[0]) maxRGBADiffs[1] = util.MaxInt(dg, maxRGBADiffs[1]) maxRGBADiffs[2] = util.MaxInt(db, maxRGBADiffs[2]) maxRGBADiffs[3] = util.MaxInt(da, maxRGBADiffs[3]) if dr+dg+db > 0 { copy(resultImg.Pix[off+i:], PixelDiffColor[deltaOffset(dr+dg+db+da)]) } else { copy(resultImg.Pix[off+i:], PixelAlphaDiffColor[deltaOffset(da)]) } } } } } else { for x := 0; x < cmpWidth; x++ { for y := 0; y < cmpHeight; y++ { color1 := img1.At(x, y) color2 := img2.At(x, y) dc := diffColors(color1, color2, maxRGBADiffs) if dc == PixelMatchColor { numDiffPixels-- } resultImg.Set(x, y, dc) } } } return &DiffMetrics{ NumDiffPixels: numDiffPixels, PixelDiffPercent: getPixelDiffPercent(numDiffPixels, totalPixels), MaxRGBADiffs: maxRGBADiffs, DimDiffer: (cmpWidth != resultWidth) || (cmpHeight != resultHeight)}, resultImg }
func (s *StatusWatcher) calcStatus(tile *tiling.Tile) error { defer timer.New("Calc status timer:").Stop() minCommitId := map[string]int{} okByCorpus := map[string]bool{} expectations, err := s.storages.ExpectationsStore.Get() if err != nil { return err } // Gathers unique labels by corpus and label. byCorpus := map[string]map[types.Label]map[string]bool{} // Iterate over the current traces tileLen := tile.LastCommitIndex() + 1 for _, trace := range tile.Traces { gTrace := trace.(*types.GoldenTrace) idx := tileLen - 1 for (idx >= 0) && (gTrace.Values[idx] == types.MISSING_DIGEST) { idx-- } // If this is an empty trace we ignore it for now. if idx == -1 { continue } // If this corpus doesn't exist yet, we initialize it. corpus := gTrace.Params()[types.CORPUS_FIELD] if _, ok := byCorpus[corpus]; !ok { minCommitId[corpus] = tileLen okByCorpus[corpus] = true byCorpus[corpus] = map[types.Label]map[string]bool{ types.POSITIVE: map[string]bool{}, types.NEGATIVE: map[string]bool{}, types.UNTRIAGED: map[string]bool{}, } if _, ok := corpusGauges[corpus]; !ok { corpusGauges[corpus] = map[types.Label]metrics.Gauge{ types.UNTRIAGED: metrics.NewRegisteredGauge(fmt.Sprintf(METRIC_CORPUS_TMPL, types.UNTRIAGED, corpus), nil), types.POSITIVE: metrics.NewRegisteredGauge(fmt.Sprintf(METRIC_CORPUS_TMPL, types.POSITIVE, corpus), nil), types.NEGATIVE: metrics.NewRegisteredGauge(fmt.Sprintf(METRIC_CORPUS_TMPL, types.NEGATIVE, corpus), nil), } } } // Account for the corpus and testname. digest := gTrace.Values[idx] testName := gTrace.Params()[types.PRIMARY_KEY_FIELD] status := expectations.Classification(testName, digest) digestInfo, err := s.storages.GetOrUpdateDigestInfo(testName, digest, tile.Commits[idx]) if err != nil { return err } okByCorpus[corpus] = okByCorpus[corpus] && ((status == types.POSITIVE) || ((status == types.NEGATIVE) && (len(digestInfo.IssueIDs) > 0))) minCommitId[corpus] = util.MinInt(idx, minCommitId[corpus]) byCorpus[corpus][status][digest] = true } commits := tile.Commits[:tileLen] overallOk := true allUntriagedCount := 0 allPositiveCount := 0 allNegativeCount := 0 corpStatus := make([]*GUICorpusStatus, 0, len(byCorpus)) for corpus := range byCorpus { overallOk = overallOk && okByCorpus[corpus] untriagedCount := len(byCorpus[corpus][types.UNTRIAGED]) positiveCount := len(byCorpus[corpus][types.POSITIVE]) negativeCount := len(byCorpus[corpus][types.NEGATIVE]) corpStatus = append(corpStatus, &GUICorpusStatus{ Name: corpus, OK: okByCorpus[corpus], MinCommitHash: commits[minCommitId[corpus]].Hash, UntriagedCount: untriagedCount, NegativeCount: negativeCount, }) allUntriagedCount += untriagedCount allNegativeCount += negativeCount allPositiveCount += positiveCount corpusGauges[corpus][types.POSITIVE].Update(int64(positiveCount)) corpusGauges[corpus][types.NEGATIVE].Update(int64(negativeCount)) corpusGauges[corpus][types.UNTRIAGED].Update(int64(untriagedCount)) } allUntriagedGauge.Update(int64(allUntriagedCount)) allPositiveGauge.Update(int64(allPositiveCount)) allNegativeGauge.Update(int64(allNegativeCount)) totalGauge.Update(int64(allUntriagedCount + allPositiveCount + allNegativeCount)) sort.Sort(CorpusStatusSorter(corpStatus)) // Swap out the current tile. result := &GUIStatus{ OK: overallOk, LastCommit: commits[tileLen-1], CorpStatus: corpStatus, } s.mutex.Lock() s.current = result s.mutex.Unlock() return nil }