func getAllInt(t *testing.T, title string, path string, obj interface{}, exp []int) { result := getAllInterface(t, title, path, obj) if result == nil { return } if len(result) != len(exp) { t.Errorf("FAIL(%s): %s -> invalid length %d < %d", title, path, len(result), len(exp)) return } var intResult []int for _, val := range result { intResult = append(intResult, val.(int)) } sort.Ints(exp) sort.Ints(intResult) for i := 0; i < len(intResult); i++ { if intResult[i] != exp[i] { t.Errorf("FAIL(%s): %s -> exp %d got %d", title, path, exp[i], intResult) } } }
func IsValid(sudoku Sudoku) bool { for i := 0; i < SUDOKU_LENGTH; i++ { rowNums := ScanRow(&sudoku, i) sort.Ints(rowNums) for i := 0; i < len(rowNums)-1; i++ { if rowNums[i+1] == rowNums[i] { return false } } colNums := ScanCol(&sudoku, i) sort.Ints(colNums) for i := 0; i < len(colNums)-1; i++ { if colNums[i+1] == colNums[i] { return false } } recNums := ScanRec(&sudoku, i) sort.Ints(recNums) for i := 0; i < len(recNums)-1; i++ { if recNums[i+1] == recNums[i] { return false } } } return true }
// Same determines whether the trees // t1 and t2 contain the same values. func Same(t1, t2 *tree.Tree) bool { t1ch := make(chan int) t2ch := make(chan int) t1val := []int{} t2val := []int{} go Walk(t1, t1ch) go Walk(t2, t2ch) for i := range t1ch { t1val = append(t1val, i) } for i := range t2ch { t2val = append(t2val, i) } sort.Ints(t1val) sort.Ints(t2val) for i, _ := range t1val { if t1val[i] != t2val[i] { return false } } return true }
func InitAssets() { if len(male) > 0 && len(female) > 0 { return } filepath.Walk("img", func(path string, info os.FileInfo, err error) error { re := regexp.MustCompile("/(male|female)/(\\D+)(\\d+)\\.png") found := re.FindStringSubmatch(path) if len(found) > 0 { gender := found[1] datatype := found[2] index, err := strconv.Atoi(found[3]) if err == nil { if gender == "male" { male[datatype] = append(male[datatype], index) } else if gender == "female" { female[datatype] = append(female[datatype], index) } } } return nil }) for _, value := range male { sort.Ints(value) } for _, value := range female { sort.Ints(value) } }
func main() { f, err := os.Open("input.txt") if err != nil { panic(err) } total_wrapping := 0 total_length := 0 content, err := ioutil.ReadAll(f) lines := strings.Split(string(content), "\n") for _, line := range lines { lwh := strings.Split(string(line), "x") l, _ := strconv.Atoi(lwh[0]) w, _ := strconv.Atoi(lwh[1]) h, _ := strconv.Atoi(lwh[2]) side := []int{l * w, w * h, l * h} sort.Ints(side) min_side := side[0] wrapping := 2*(l*w+w*h+l*h) + min_side total_wrapping += wrapping perimeter := []int{2 * (l + w), 2 * (w + h), 2 * (l + h)} sort.Ints(perimeter) min_perimeter := perimeter[0] bow := l * w * h total_length += min_perimeter + bow } fmt.Println(total_wrapping) fmt.Println(total_length) }
func testProcUtil(t *testing.T, pid int) { lwps, err := LWPs(pid) if err != nil { t.Fatal(err) } sort.Ints(lwps) t.Logf("LWPs(%d) = %#v", pid, lwps) children, err := Children(pid) if err != nil { t.Fatal(err) } sort.Ints(children) t.Logf("Children(%d) = %#v", pid, children) descendants, err := Descendants(pid) if err != nil { t.Fatal(err) } sort.Ints(descendants) t.Logf("Descendants(%d) = %#v", pid, descendants) descendantLWPs, err := DescendantLWPs(pid) if err != nil { t.Fatal(err) } sort.Ints(descendantLWPs) t.Logf("DescendantLWPs(%d) = %#v", pid, descendantLWPs) }
// validate checks that a board with all cells set to {1-9} // fulfills the constraints of a solved board func (b *Board) validate() bool { for i := 0; i < 9; i++ { row := b.row(i) col := b.col(i) zone := b.zone(i) sort.Ints(row) sort.Ints(col) sort.Ints(zone) // Check duplicates for i := 0; i < 8; i++ { if row[i] == row[i+1] { return false } if col[i] == col[i+1] { return false } if zone[i] == zone[i+1] { return false } } } return true }
func compareIntArray(i, j interface{}) int { ia := interface{}(i).([]int) ja := interface{}(j).([]int) sort.Ints(ia) sort.Ints(ja) il := len(ia) jl := len(ja) result := 0 if il < jl { result = -1 } else if il > jl { result = 1 } else { for i, iv := range ia { jv := ja[i] if iv != jv { if iv < jv { result = -1 } else if iv > jv { result = 1 } break } } } return result }
func calculateEstimatesBetweenNodes(priceestimatestruct UberAPI, index int) (lowEstimate []int, duration []int, distance []float64) { fmt.Println("length of low estimate array is:", len(priceestimatestruct.Prices)) lowEstimate = make([]int, len(priceestimatestruct.Prices)) for i := 0; i < len(priceestimatestruct.Prices); i++ { lowEstimate[i] = priceestimatestruct.Prices[i].LowEstimate } duration = make([]int, len(priceestimatestruct.Prices)) for i := 0; i < len(priceestimatestruct.Prices); i++ { duration[i] = priceestimatestruct.Prices[i].Duration } distance = make([]float64, len(priceestimatestruct.Prices)) for i := 0; i < len(priceestimatestruct.Prices); i++ { distance[i] = priceestimatestruct.Prices[i].Distance } sort.Ints(lowEstimate) sort.Ints(duration) sort.Float64s(distance) fmt.Println("Unsorted LowEstimate array : ", lowEstimate) fmt.Println("Unsorted duration array : ", duration) fmt.Println("Unsorted distance array : ", distance) if lowEstimate[0] == 0 { cheapestCostArray = append(cheapestCostArray, lowEstimate[1]) } else { cheapestCostArray = append(cheapestCostArray, lowEstimate[0]) } durationArray = append(durationArray, duration[0]) distanceArray = append(distanceArray, distance[0]) return cheapestCostArray, durationArray, distanceArray }
func cliDumpRevNat(ctx *cli.Context) { dump, err := client.RevNATDump() if err != nil { fmt.Fprintf(os.Stderr, "Warning: Unable to dump map: %s\n", err) } revNatFormat := map[int]string{} revNatFormatKeysV4 := []int{} revNatFormatKeysV6 := []int{} for _, revNat := range dump { revNatFormat[int(revNat.ID)] = revNat.String() if revNat.IsIPv6() { revNatFormatKeysV6 = append(revNatFormatKeysV6, int(revNat.ID)) } else { revNatFormatKeysV4 = append(revNatFormatKeysV4, int(revNat.ID)) } } sort.Ints(revNatFormatKeysV6) sort.Ints(revNatFormatKeysV4) if len(revNatFormatKeysV6) != 0 { fmt.Printf("IPv6:\n") for _, revNATID := range revNatFormatKeysV6 { fmt.Printf("%d => %s\n", revNATID, revNatFormat[revNATID]) } } if len(revNatFormatKeysV4) != 0 { fmt.Printf("IPv4:\n") for _, revNATID := range revNatFormatKeysV4 { fmt.Printf("%d => %s\n", revNATID, revNatFormat[revNATID]) } } }
func solve(mr *MyReader) { x, y, z, n := I4(mr.Gis()) xs := []int{0, x} ys := []int{0, y} for _, da := range mr.NGis(n) { d, a := I2(da) if d == 0 { xs = append(xs, a) } else { ys = append(ys, a) } } sort.Ints(xs) sort.Ints(ys) minval := func(vs []int, r int) int { for i := 1; i < len(vs); i++ { r = Min(r, vs[i]-vs[i-1]) } return r } xmin := minval(xs, x) ymin := minval(ys, y) ans := xmin * ymin * z fmt.Println(ans) }
// Same determines whether the trees // t1 and t2 contain the same values. func Same(t1, t2 *tree.Tree) bool { ch1 := make(chan int) ch2 := make(chan int) go Walk(t1, ch1) go Walk(t2, ch2) s1 := make([]int, 10) s2 := make([]int, 10) for i := 0; i < 10; i++ { a := <-ch1 b := <-ch2 s1[i] = a s2[i] = b } sort.Ints(s1) sort.Ints(s2) for i := 0; i < 10; i++ { if s1[i] != s2[i] { return false } } return true }
func main() { fmt.Scan(&n, &m) var temp string for i := 0; i < n; i++ { fmt.Scan(&temp, &j) if temp == "ATK" { atk = append(atk, j) } else { def = append(def, j) } } for i := 0; i < m; i++ { fmt.Scan(&j) fox = append(fox, j) } sort.Ints(def) sort.Ints(atk) sort.Ints(fox) a := killAll() b := onlyatk() if a > b { fmt.Println(a) } else { fmt.Println(b) } }
func TestDepthFirstSearch(t *testing.T) { G := NewAdjGraphForFile("./algs4-data/tinyG.txt") var search Search var slice []int // test 1 search = NewDepthFirstSearch(G, 0) assert.NotEqual(t, search.Count(), G.V()) slice = make([]int, 0) for v := 0; v < G.V(); v++ { if search.Marked(v) { slice = append(slice, v) } } sort.Ints(slice) assert.Equal(t, slice, []int{0, 1, 2, 3, 4, 5, 6}) // test 2 search = NewDepthFirstSearch(G, 9) assert.NotEqual(t, search.Count(), G.V()) slice = make([]int, 0) for v := 0; v < G.V(); v++ { if search.Marked(v) { slice = append(slice, v) } } sort.Ints(slice) assert.Equal(t, slice, []int{9, 10, 11, 12}) }
func main() { bi := bufio.NewReader(os.Stdin) bo := bufio.NewWriter(os.Stdout) defer bo.Flush() var n, m int fmt.Fscanln(bi, &n, &m) g := make([]int, n) for i := range g { fmt.Fscan(bi, &g[i]) } b := make([]int, m) for i := range b { fmt.Fscan(bi, &b[i]) } sort.Ints(g) sort.Ints(b) res := sum(g) // no one takes for k := 1; k <= min(len(b), len(g)); k++ { new_res := sum(g[:len(g)-k]) + sum(b[:len(b)-k])*k if new_res < res { res = new_res } } fmt.Fprintln(bo, res) }
func Test_intordmut01(tst *testing.T) { //verbose() chk.PrintTitle("intordmut01") var ops OpsData ops.SetDefault() ops.Pm = 1 rnd.Init(0) a := []int{1, 2, 3, 4, 5, 6, 7, 8} io.Pforan("before: a = %v\n", a) ops.OrdSti = []int{2, 5, 4} IntOrdMutation(a, 0, &ops) io.Pfcyan("after: a = %v\n", a) chk.Ints(tst, "a", a, []int{1, 2, 6, 7, 3, 4, 5, 8}) nums := utl.IntRange2(1, 9) sort.Ints(a) chk.Ints(tst, "asorted = 12345678", a, nums) a = []int{1, 2, 3, 4, 5, 6, 7, 8} io.Pforan("\nbefore: a = %v\n", a) ops.OrdSti = nil IntOrdMutation(a, 0, &ops) io.Pfcyan("after: a = %v\n", a) sort.Ints(a) chk.Ints(tst, "asorted = 12345678", a, nums) }
// verifyStoreList ensures that the returned list of stores is correct. func verifyStoreList( sp *StorePool, constraints config.Constraints, rangeID roachpb.RangeID, expected []int, expectedAliveStoreCount int, expectedThrottledStoreCount int, ) error { var actual []int sl, aliveStoreCount, throttledStoreCount := sp.getStoreList(rangeID) sl = sl.filter(constraints) if aliveStoreCount != expectedAliveStoreCount { return errors.Errorf("expected AliveStoreCount %d does not match actual %d", expectedAliveStoreCount, aliveStoreCount) } if throttledStoreCount != expectedThrottledStoreCount { return errors.Errorf("expected ThrottledStoreCount %d does not match actual %d", expectedThrottledStoreCount, throttledStoreCount) } for _, store := range sl.stores { actual = append(actual, int(store.StoreID)) } sort.Ints(expected) sort.Ints(actual) if !reflect.DeepEqual(expected, actual) { return errors.Errorf("expected %+v stores, actual %+v", expected, actual) } return nil }
func cmpareSl(a, b [][]Amorph) bool { // return reflect.DeepEqual(a, b) if len(a) != len(b) { fmt.Println("disticnt1") return false } for i := 0; i < len(a); i++ { if len(a[i]) != len(b[i]) { fmt.Println("disticnt2") return false } ai, bi := make([]int, len(a[i])), make([]int, len(b[i])) for j := 0; j < len(a[i]); j++ { ai[j] = a[i][j].IdxArticle bi[j] = b[i][j].IdxArticle } sort.Ints(ai) sort.Ints(bi) as := fmt.Sprintf("%v", ai) bs := fmt.Sprintf("%v", bi) if as != bs { fmt.Println("disticnt3") return false } } return true }
//WrappingPaper counts how many wrapping paper is needed func WrappingPaper() { //2*l*w + 2*w*h + 2*h*l //l*w*h p := make([]int, 3) areas := make([]int, 3) sum := 0 feetOfRibbon := 0 inFile, _ := os.Open("solutions/wrapping_input.txt") defer inFile.Close() scanner := bufio.NewScanner(inFile) scanner.Split(bufio.ScanLines) for scanner.Scan() { pString := strings.Split(scanner.Text(), "x") p[0], _ = strconv.Atoi(pString[0]) p[1], _ = strconv.Atoi(pString[1]) p[2], _ = strconv.Atoi(pString[2]) areas[0] = p[0] * p[1] areas[1] = p[1] * p[2] areas[2] = p[0] * p[2] sum += (2 * areas[0]) + (2 * areas[1]) + (2 * areas[2]) sort.Ints(areas) sum += areas[0] sort.Ints(p) feetOfRibbon += ((2*p[0] + 2*p[1]) + (p[0] * p[1] * p[2])) } fmt.Println("Sum:", sum) fmt.Println("Feet Of Ribbon:", feetOfRibbon) }
func main() { data, err := os.Open(os.Args[1]) if err != nil { log.Fatal(err) } defer data.Close() scanner := bufio.NewScanner(data) for scanner.Scan() { s := strings.Split(scanner.Text(), ";") var n, e, r int fmt.Sscanf(s[0], "%d %d", &n, &e) g := make(map[int][]int) t := strings.Split(s[1], ",") p := make([]int, 2) for _, i := range t { fmt.Sscanf(i, "%d %d", &p[0], &p[1]) sort.Ints(p) g[p[0]] = append(g[p[0]], p[1]) } for k := range g { sort.Ints(g[k]) } for _, v := range g { for i := 0; i < len(v)-1; i++ { for j := i + 1; j < len(v); j++ { if contains(g[v[i]], v[j]) { r++ } } } } fmt.Println(r) } }
func TestMastRun02(t *testing.T) { inp := PairSlice{ {"hello", 1}, {"hello", 2}, } m := buildMast(inp) for _, pair := range inp { out, ok := m.run(pair.In) if !ok { t.Errorf("expected: accept [%v]\n", pair.In) } if len(out) != 2 { t.Errorf("input: %v, output size: got %v, expected 2\n", pair.In, len(out)) } expected := []int{1, 2} sort.Ints(out) sort.Ints(expected) if !reflect.DeepEqual(out, expected) { t.Errorf("input: %v, output: got %v, expected %v\n", pair.In, out, expected) } } if out, ok := m.run("aloha"); ok { t.Errorf("expected: reject \"aloha\", %v\n", out) } }
func main() { // givens values := []int{7, 6, 5, 4, 3, 2, 1, 0} indices := map[int]int{6: 0, 1: 0, 7: 0} orderedValues := make([]int, len(indices)) orderedIndices := make([]int, len(indices)) i := 0 for j := range indices { // validate that indices are within list boundaries if j < 0 || j >= len(values) { fmt.Println("Invalid index: ", j) return } // extract elements to sort orderedValues[i] = values[j] orderedIndices[i] = j i++ } // sort sort.Ints(orderedValues) sort.Ints(orderedIndices) fmt.Println("initial:", values) // replace sorted values for i, v := range orderedValues { values[orderedIndices[i]] = v } fmt.Println("sorted: ", values) }
// assertEqualDocumentMapping tests that two documents maps (docID -> file path) // are equivalent, that is they store the same docIDs and same file paths. They // specific mapping of docID to path is not important because it is not guaranteed // to be the same every time func assertEqualDocumentMapping(t *testing.T, actual, expected map[int]string) { if len(actual) != len(expected) { t.Errorf("Expected number of documents indexed: %d, actual: %d", len(expected), len(actual)) } actualKeys := make([]int, len(actual)) actualPaths := make([]string, len(actual)) expectedKeys := make([]int, len(expected)) expectedPaths := make([]string, len(expected)) i := 0 for k, v := range actual { actualKeys[i] = k actualPaths[i] = v fmt.Println(v) i++ } i = 0 for k, v := range expected { expectedKeys[i] = k expectedPaths[i] = v fmt.Println(v) i++ } sort.Ints(actualKeys) sort.Ints(expectedKeys) if !reflect.DeepEqual(actualKeys, expectedKeys) { t.Error("Invalid docIDs indexed") } sort.Strings(actualPaths) sort.Strings(expectedPaths) if !reflect.DeepEqual(actualPaths, expectedPaths) { t.Error("Invalid paths indexed") } }
func main() { core.RegisterTypes() flag.Parse() db := core.ContinueDB(filepath.Join(*BaseDbPath, "balances"), *treapToken) logtreap := new(verified.LogTreap) logtreap.MakeOpaque() ads.GetInfo(logtreap).Token = *treapToken pagingC := core.NewPagingC(db) pagingC.Load(ads.GetInfo(logtreap)) c := pagingC c.Use(logtreap) length := logtreap.Count(c) hash := logtreap.Slice(0, length, c) // this hash is what we commit to.... c.Use(hash) tree := hash.Finish(c).(verified.LogTree) c.Use(tree) lastReturn := tree.Index(length-1, c) c.Use(lastReturn) balances := lastReturn.ArgsOrResults[0].(bitrie.Bitrie) c.Use(balances) // core.Dump(bitrie.Bits{}, balances, c) fmt.Println(logtreap.Count(c)) fmt.Println(commitmentToBalances(hash, c)) n := 1000 sizes := make([]int, 0) for i := 0; i < n; i++ { sizes = append(sizes, randomBalance(balances, c)) } sort.Ints(sizes) fmt.Println(sizes) sizes = make([]int, 0) for i := 0; i < n; i++ { j := rand.Intn(int(logtreap.Count(c) - 1)) hash := logtreap.Slice(0, int32(j+1), c) sizes = append(sizes, nextstep(hash, c)) } for i := 0; i < 20; i++ { j := rand.Intn(int(logtreap.Count(c) - 5000 - 1)) for k := j; k < j+5000; k++ { hash := logtreap.Slice(0, int32(k+1), c) sizes = append(sizes, nextstep(hash, c)) } } sort.Ints(sizes) fmt.Println(sizes) }
func main() { srcPath := os.Args[1] dstPath := os.Args[2] reader, err := os.Open(srcPath) check(err) src, srcFmt, err := image.Decode(reader) check(err) fmt.Println("Decoded source", srcPath, "as", srcFmt) scale := 8 boundsMax := src.Bounds().Max smallMax := boundsMax.Div(scale) flat := image.NewNRGBA64(image.Rectangle{image.ZP, smallMax}) samplePix := func(x, y int) (r, g, b []int) { var rs, gs, bs []int for i := x - 25; i <= x+25; i += 5 { if i < 0 || i >= boundsMax.X { continue } for j := y - 25; j <= y+25; j += 5 { if j < 0 || j >= boundsMax.Y { continue } r, g, b, _ := src.At(i, j).RGBA() rs = append(rs, int(r)) gs = append(gs, int(g)) bs = append(bs, int(b)) } } return rs, gs, bs } var wg sync.WaitGroup for i := 0; i < smallMax.X; i++ { for j := 0; j < smallMax.Y; j++ { wg.Add(1) go func(x, y int) { defer wg.Done() r, g, b := samplePix(x*scale+scale/2, y*scale+scale/2) sort.Ints(r) sort.Ints(g) sort.Ints(b) idx := len(r) / 8 flat.SetNRGBA64(x, y, color.NRGBA64{ uint16(r[idx]), uint16(g[idx]), uint16(b[idx]), 0xFFFF}) }(i, j) } } wg.Wait() writer, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, 0600) check(err) //jpeg.Encode(writer, proxy, nil) png.Encode(writer, flat) }
func solve(pr *codejam.Problem, vd *vectorData) { sort.Ints(vd.xs) sort.Ints(vd.ys) minSum := vd.scalar() pr.Write(fmt.Sprintf("Case #%d: %d\n", vd.testIndex, minSum)) }
// `spreadTest` runs a test against the `generator` function, to check that // when calling it 64*40 times, the range of characters per string position it // returns matches the array `expected`, where each entry in `expected` is a // string of all possible characters that should appear in that position in the // string, at least once in the sample of 64*40 responses from the `generator` // function func spreadTest(t *testing.T, generator func() string, expected []string) { // k is an array which stores which characters were found at which // positions. It has one entry per slugid character, therefore 22 entries. // Each entry is a map with a key for each character found, and its value // as the number of times that character appeared at that position in the // slugid in the large sample of slugids generated in this test. var k [22]map[rune]int for i := 0; i < 22; i++ { k[i] = make(map[rune]int) } // Generate a large sample of slugids, and record what characters appeared // where... A monte-carlo test has demonstrated that with 64 * 20 // iterations, no failure occurred in 1000 simulations, so 64 * 40 should be // suitably large to rule out false positives. for i := 0; i < 64*40; i++ { slug := generator() if len(slug) != 22 { t.Fatalf("Generated slug '%s' does not have 22 characters", slug) } for j, char := range slug { k[j][char] = k[j][char] + 1 } } // Compose results into an array `actual`, for comparison with `expected` var actual [22][]int actualRange := "" for j := 0; j < 22; j++ { actual[j] = make([]int, 0) for a, _ := range k[j] { actual[j] = append(actual[j], int(a)) } sort.Ints(actual[j]) for _, c := range actual[j] { actualRange += string(c) } actualRange += "\n" } expectedRange := "" for _, s := range expected { bytes := []byte(s) chars := make([]int, 0) for _, a := range bytes { chars = append(chars, int(a)) } sort.Ints(chars) for _, c := range chars { expectedRange += string(c) } expectedRange += "\n" } if expectedRange != actualRange { t.Errorf("In a large sample of generated slugids (using function %s), the range of characters found per character position in the sample did not match expected results.\n\nExpected: \n%s\n\nActual: \n%s", functionName(generator), expectedRange, actualRange) } }
// orderPerfTodo reorders commit nums for benchmarking todo. // The resulting order is somewhat tricky. We want 2 things: // 1. benchmark sequentially backwards (this provides information about most // recent changes, and allows to estimate noise levels) // 2. benchmark old commits in "scatter" order (this allows to quickly gather // brief information about thousands of old commits) // So this function interleaves the two orders. func orderPerfTodo(nums []int) []int { sort.Ints(nums) n := len(nums) pow2 := uint32(0) // next power-of-two that is >= n npow2 := 0 for npow2 <= n { pow2++ npow2 = 1 << pow2 } res := make([]int, n) resPos := n - 1 // result array is filled backwards present := make([]bool, n) // denotes values that already present in result array for i0, i1 := n-1, 0; i0 >= 0 || i1 < npow2; { // i0 represents "benchmark sequentially backwards" sequence // find the next commit that is not yet present and add it for cnt := 0; cnt < 2; cnt++ { for ; i0 >= 0; i0-- { if !present[i0] { present[i0] = true res[resPos] = nums[i0] resPos-- i0-- break } } } // i1 represents "scatter order" sequence // find the next commit that is not yet present and add it for ; i1 < npow2; i1++ { // do the "recursive split-ordering" trick idx := 0 // bitwise reverse of i1 for j := uint32(0); j <= pow2; j++ { if (i1 & (1 << j)) != 0 { idx = idx | (1 << (pow2 - j - 1)) } } if idx < n && !present[idx] { present[idx] = true res[resPos] = nums[idx] resPos-- i1++ break } } } // The above can't possibly be correct. Do dump check. res2 := make([]int, n) copy(res2, res) sort.Ints(res2) for i := range res2 { if res2[i] != nums[i] { panic(fmt.Sprintf("diff at %v: expect %v, want %v\nwas: %v\n become: %v", i, nums[i], res2[i], nums, res2)) } } return res }
func main() { x := []int{4,2,1,3,5,6} sort.Ints(x) fmt.Println(x) y := []int{3,2,1,5,2,3,5,6} sort.Ints(y) fmt.Println()y }
func getImportantVersions(p CowyoData) []versionsInfo { m := map[int]int{} dmp := diffmatchpatch.New() lastText := "" lastTime := time.Now().AddDate(0, -1, 0) for i, diff := range p.Diffs { seq1, _ := dmp.DiffFromDelta(lastText, diff) texts_linemode := diffRebuildtexts(seq1) rebuilt := texts_linemode[len(texts_linemode)-1] parsedTime, _ := time.Parse(time.ANSIC, p.Timestamps[i]) duration := parsedTime.Sub(lastTime) m[i] = int(duration.Seconds()) if i > 0 { m[i-1] = m[i] } // On to the next one lastText = rebuilt lastTime = parsedTime } // Sort in order of decreasing diff times n := map[int][]int{} var a []int for k, v := range m { n[v] = append(n[v], k) } for k := range n { a = append(a, k) } sort.Sort(sort.Reverse(sort.IntSlice(a))) // Get the top 4 biggest diff times var importantVersions []int var r []versionsInfo for _, k := range a { for _, s := range n[k] { if s != 0 && s != len(n) { fmt.Printf("%d, %d\n", s, k) importantVersions = append(importantVersions, s) if len(importantVersions) > 10 { sort.Ints(importantVersions) for _, nn := range importantVersions { r = append(r, versionsInfo{p.Timestamps[nn], nn}) } return r } } } } sort.Ints(importantVersions) for _, nn := range importantVersions { r = append(r, versionsInfo{p.Timestamps[nn], nn}) } return r }