func (s *S) TestUserAddRole(c *check.C) { _, err := permission.NewRole("r1", "app") c.Assert(err, check.IsNil) _, err = permission.NewRole("r2", "app") c.Assert(err, check.IsNil) u := User{Email: "*****@*****.**", Password: "******"} err = u.Create() c.Assert(err, check.IsNil) err = u.AddRole("r1", "c1") c.Assert(err, check.IsNil) err = u.AddRole("r1", "c2") c.Assert(err, check.IsNil) err = u.AddRole("r2", "x") c.Assert(err, check.IsNil) err = u.AddRole("r2", "x") c.Assert(err, check.IsNil) err = u.AddRole("r3", "a") c.Assert(err, check.Equals, permission.ErrRoleNotFound) expected := []RoleInstance{ {Name: "r1", ContextValue: "c1"}, {Name: "r1", ContextValue: "c2"}, {Name: "r2", ContextValue: "x"}, } sort.Sort(roleInstanceList(expected)) sort.Sort(roleInstanceList(u.Roles)) c.Assert(u.Roles, check.DeepEquals, expected) uDB, err := GetUserByEmail("*****@*****.**") c.Assert(err, check.IsNil) sort.Sort(roleInstanceList(uDB.Roles)) c.Assert(uDB.Roles, check.DeepEquals, expected) }
func TestSort(t *testing.T) { sorted_loglines, err := Parse("fixtures/sorted.out") if err != nil { t.Error(err) } if len(sorted_loglines) != 11 { t.Errorf("Logline length mismatch: %v != 11", len(sorted_loglines)) } unsorted_loglines, err := Parse("fixtures/unsorted.out") if err != nil { t.Error(err) } if len(unsorted_loglines) != 11 { t.Errorf("Logline length mismatch: %v != 11", len(unsorted_loglines)) } if reflect.DeepEqual(sorted_loglines, unsorted_loglines) { t.Error("Unsorted loglines match sorted loglines.") } sort.Sort(ByTime(unsorted_loglines)) if !reflect.DeepEqual(sorted_loglines, unsorted_loglines) { t.Error("Sorted loglines do not match.") } sort.Sort(ByTime(sorted_loglines)) if !reflect.DeepEqual(sorted_loglines, unsorted_loglines) { t.Error("Sorted loglines do not match after unnecessary sort.") } }
// FindLatestMatchingName locats a package by name, returns the latest available version. func (repo *RepositoryXMLBackend) FindLatestMatchingName(name, version, release string) (*Package, error) { var pkg *Package var err error pkgs, ok := repo.Packages[name] if !ok { repo.msg.Debugf("could not find package %q\n", name) return nil, fmt.Errorf("no such package %q", name) } if version == "" && len(pkgs) > 0 { // return latest sorted := make([]*Package, len(pkgs)) copy(sorted, pkgs) sort.Sort(Packages(sorted)) pkg = sorted[len(sorted)-1] } else { // trying to match the requirements req := NewRequires(name, version, release, "", "EQ", "") sorted := make(Packages, 0, len(pkgs)) for _, p := range pkgs { if req.ProvideMatches(p) { sorted = append(sorted, p) } } if len(sorted) > 0 { sort.Sort(sorted) pkg = sorted[len(sorted)-1] } } return pkg, err }
// SortHostPorts sorts the given HostPort slice according to the // sortOrder of each HostPort's embedded Address and the preferIpv6 // flag. See Address.sortOrder() for more info. func SortHostPorts(hps []HostPort, preferIPv6 bool) { if preferIPv6 { sort.Sort(hostPortsPreferringIPv6Slice{hostPortsPreferringIPv4Slice(hps)}) } else { sort.Sort(hostPortsPreferringIPv4Slice(hps)) } }
func TestSort(t *testing.T) { infos := InfoArray{ {"a", Float64Value(3.0), 0, 0, 0, 0}, {"b", Float64Value(1.0), 0, 0, 0, 0}, {"c", Float64Value(2.1), 0, 0, 0, 0}, {"d", Float64Value(2.0), 0, 0, 0, 0}, {"e", Float64Value(-1.0), 0, 0, 0, 0}, } // Verify forward sort. sort.Sort(infos) last := Float64Value(-math.MaxFloat64) for _, info := range infos { if info.Val.Less(last) { t.Errorf("info val %v not increasing", info.Val) } last = info.Val.(Float64Value) } // Verify reverse sort. sort.Sort(sort.Reverse(infos)) last = Float64Value(math.MaxFloat64) for _, info := range infos { if !info.Val.Less(last) { t.Errorf("info val %v not decreasing", info.Val) } last = info.Val.(Float64Value) } }
// Interfaces returns object holding a lists of all the plugs and slots and their connections. func (r *Repository) Interfaces() *Interfaces { r.m.Lock() defer r.m.Unlock() ifaces := &Interfaces{} // Copy and flatten plugs and slots for _, plugs := range r.plugs { for _, plug := range plugs { p := &Plug{ PlugInfo: plug.PlugInfo, Connections: append([]SlotRef(nil), plug.Connections...), } sort.Sort(bySlotRef(p.Connections)) ifaces.Plugs = append(ifaces.Plugs, p) } } for _, slots := range r.slots { for _, slot := range slots { s := &Slot{ SlotInfo: slot.SlotInfo, Connections: append([]PlugRef(nil), slot.Connections...), } sort.Sort(byPlugRef(s.Connections)) ifaces.Slots = append(ifaces.Slots, s) } } sort.Sort(byPlugSnapAndName(ifaces.Plugs)) sort.Sort(bySlotSnapAndName(ifaces.Slots)) return ifaces }
func testPeerListsMatch(t *testing.T, p1, p2 []peer.ID) { if len(p1) != len(p2) { t.Fatal("did not find as many peers as should have", p1, p2) } ids1 := make([]string, len(p1)) ids2 := make([]string, len(p2)) for i, p := range p1 { ids1[i] = string(p) } for i, p := range p2 { ids2[i] = string(p) } sort.Sort(sort.StringSlice(ids1)) sort.Sort(sort.StringSlice(ids2)) for i := range ids1 { if ids1[i] != ids2[i] { t.Fatal("Didnt find expected peer", ids1[i], ids2) } } }
// MatchingInstanceTypes returns all instance types matching constraints and available // in region, sorted by increasing region-specific cost (if known). func MatchingInstanceTypes(allInstanceTypes []InstanceType, region string, cons constraints.Value) ([]InstanceType, error) { var itypes []InstanceType // Rules used to select instance types: // - non memory constraints like cpu-cores etc are always honoured // - if no mem constraint specified and instance-type not specified, // try opinionated default with enough mem to run a server. // - if no matches and no mem constraint specified, try again and // return any matching instance with the largest memory origCons := cons if !cons.HasInstanceType() && cons.Mem == nil { minMem := uint64(minMemoryHeuristic) cons.Mem = &minMem } itypes = matchingTypesForConstraint(allInstanceTypes, cons) // No matches using opinionated default, so if no mem constraint specified, // look for matching instance with largest memory. if len(itypes) == 0 && cons.Mem != origCons.Mem { itypes = matchingTypesForConstraint(allInstanceTypes, origCons) if len(itypes) > 0 { sort.Sort(byMemory(itypes)) itypes = []InstanceType{itypes[len(itypes)-1]} } } // If we have matching instance types, we can return those, sorted by cost. if len(itypes) > 0 { sort.Sort(byCost(itypes)) return itypes, nil } // No luck, so report the error. return nil, fmt.Errorf("no instance types in %s matching constraints %q", region, origCons) }
func TestNodes(t *testing.T) { st := tbl.Construct(t) for _, node := range tbl.Nodes { reply, err := st.Nodes(ctx, &xpb.NodesRequest{ Ticket: []string{node.Ticket}, }) testutil.FatalOnErrT(t, "NodesRequest error: %v", err) if len(reply.Node) != 1 { t.Fatalf("Expected 1 node for %q; found %d: {%v}", node.Ticket, len(reply.Node), reply) } else if expected := nodeInfo(node); !reflect.DeepEqual(reply.Node[0], expected) { t.Fatalf("Expected {%v}; received {%v}", expected, reply.Node[0]) } } var tickets []string var expected []*xpb.NodeInfo for _, n := range tbl.Nodes { tickets = append(tickets, n.Ticket) expected = append(expected, nodeInfo(n)) } reply, err := st.Nodes(ctx, &xpb.NodesRequest{Ticket: tickets}) testutil.FatalOnErrT(t, "NodesRequest error: %v", err) sort.Sort(byNodeTicket(expected)) sort.Sort(byNodeTicket(reply.Node)) if !reflect.DeepEqual(expected, reply.Node) { t.Fatalf("Expected {%v}; received {%v}", expected, reply.Node) } }
func TestUnreferencedBlobs(t *testing.T) { WithTestEnvironment(t, checkerTestData, func(repodir string) { repo := OpenLocalRepo(t, repodir) snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02" OK(t, repo.Backend().Remove(backend.Snapshot, snID)) unusedBlobsBySnapshot := backend.IDs{ ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"), ParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"), ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"), ParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"), ParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"), ParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"), } sort.Sort(unusedBlobsBySnapshot) chkr := checker.New(repo) OK(t, chkr.LoadIndex()) OKs(t, checkPacks(chkr)) OKs(t, checkStruct(chkr)) blobs := chkr.UnusedBlobs() sort.Sort(blobs) Equals(t, unusedBlobsBySnapshot, blobs) }) }
// printTags collects all tags referenced in the profile and prints // them in a sorted table. func printTags(w io.Writer, rpt *Report) error { p := rpt.prof // Hashtable to keep accumulate tags as key,value,count. tagMap := make(map[string]map[string]int64) for _, s := range p.Sample { for key, vals := range s.Label { for _, val := range vals { if valueMap, ok := tagMap[key]; ok { valueMap[val] = valueMap[val] + s.Value[0] continue } valueMap := make(map[string]int64) valueMap[val] = s.Value[0] tagMap[key] = valueMap } } for key, vals := range s.NumLabel { for _, nval := range vals { val := scaledValueLabel(nval, key, "auto") if valueMap, ok := tagMap[key]; ok { valueMap[val] = valueMap[val] + s.Value[0] continue } valueMap := make(map[string]int64) valueMap[val] = s.Value[0] tagMap[key] = valueMap } } } tagKeys := make(tags, 0, len(tagMap)) for key := range tagMap { tagKeys = append(tagKeys, &tag{name: key}) } sort.Sort(tagKeys) for _, tagKey := range tagKeys { var total int64 key := tagKey.name tags := make(tags, 0, len(tagMap[key])) for t, c := range tagMap[key] { total += c tags = append(tags, &tag{name: t, weight: c}) } sort.Sort(tags) fmt.Fprintf(w, "%s: Total %d\n", key, total) for _, t := range tags { if total > 0 { fmt.Fprintf(w, " %8d (%s): %s\n", t.weight, percentage(t.weight, total), t.name) } else { fmt.Fprintf(w, " %8d: %s\n", t.weight, t.name) } } fmt.Fprintln(w) } return nil }
// Spearman returns the rank correlation coefficient between data1 and data2, and the associated p-value func Spearman(data1, data2 []float64) (rs float64, p float64) { n := len(data1) wksp1, wksp2 := make([]float64, n), make([]float64, n) copy(wksp1, data1) copy(wksp2, data2) sort.Sort(sort2{wksp1, wksp2}) sf := crank(wksp1) sort.Sort(sort2{wksp2, wksp1}) sg := crank(wksp2) d := 0.0 for j := 0; j < n; j++ { sq := wksp1[j] - wksp2[j] d += (sq * sq) } en := float64(n) en3n := en*en*en - en fac := (1.0 - sf/en3n) * (1.0 - sg/en3n) rs = (1.0 - (6.0/en3n)*(d+(sf+sg)/12.0)) / math.Sqrt(fac) if fac = (rs + 1.0) * (1.0 - rs); fac > 0 { t := rs * math.Sqrt((en-2.0)/fac) df := en - 2.0 p = mathx.BetaInc(df/(df+t*t), 0.5*df, 0.5) } return rs, p }
// start func main() { var p Player folderPath, _ := osext.ExecutableFolder() p.client_id, _ = ioutil.ReadFile(folderPath + "/client_id.txt") p.MinD = 50 * 60 * 1000 p.MaxD = 500 * 60 * 1000 println("Please type a search term or 'x' to exit ....") r := bufio.NewReader(os.Stdin) for { i, _, _ := r.ReadLine() p.li = string(i) switch { case p.li == "x": p.exit() case p.li == "ll": p.showResultList() case strings.HasPrefix(p.li, "set "): p.set() case strings.HasPrefix(p.li, "i "): p.info() case isAllint(p.li): go p.killAndPlay() case true: p.searchSoundCloud() sort.Sort(ByLength{p.srs}) sort.Sort(ByAge{p.srs}) p.showResultList() } } }
func (s *S) TestUserRemoveRole(c *check.C) { u := User{ Email: "*****@*****.**", Password: "******", Roles: []RoleInstance{ {Name: "r1", ContextValue: "c1"}, {Name: "r1", ContextValue: "c2"}, {Name: "r2", ContextValue: "x"}, }, } err := u.Create() c.Assert(err, check.IsNil) err = u.RemoveRole("r1", "c2") c.Assert(err, check.IsNil) err = u.RemoveRole("r1", "c2") c.Assert(err, check.IsNil) expected := []RoleInstance{ {Name: "r1", ContextValue: "c1"}, {Name: "r2", ContextValue: "x"}, } sort.Sort(roleInstanceList(expected)) sort.Sort(roleInstanceList(u.Roles)) c.Assert(u.Roles, check.DeepEquals, expected) uDB, err := GetUserByEmail("*****@*****.**") c.Assert(err, check.IsNil) sort.Sort(roleInstanceList(uDB.Roles)) c.Assert(uDB.Roles, check.DeepEquals, expected) }
func _sort(programTree *program.Program, filename string, _groupMethodsByType bool, _groupMethodsByVisibility bool, _sortImports bool, order string) (bool, *errors.GoRefactorError) { if ok, err := CheckSortParameters(filename, order); !ok { return false, err } pack, file := programTree.FindPackageAndFileByFilename(filename) if pack == nil { return false, errors.ArgumentError("filename", "Program packages don't contain file '"+filename+"'") } fset := pack.FileSet tokFile := printerUtil.GetFileFromFileSet(fset, filename) groupMethodsByType = _groupMethodsByType groupMethodsByVisibility = _groupMethodsByVisibility sortImports = _sortImports fullOrder = getFullOrder(order) decls := &DeclCollection{file.Decls, file, fset, tokFile} if sortImports { for _, d := range decls.Arr { if gd, ok := d.(*ast.GenDecl); ok { if gd.Tok == token.IMPORT { sort.Sort(SpecCollection(gd.Specs)) } } } } printDecls(tokFile, file) //test //decls.Swap(2, decls.Len()-1) sort.Sort(decls) printDecls(tokFile, file) //printer.Fprint(os.Stdout, fset, file) return true, nil }
func TestDecorationsRefs(t *testing.T) { d := tbl.Decorations[1] st := tbl.Construct(t) reply, err := st.Decorations(ctx, &xpb.DecorationsRequest{ Location: &xpb.Location{Ticket: d.FileTicket}, References: true, Filter: []string{"**"}, }) testutil.FatalOnErrT(t, "DecorationsRequest error: %v", err) if len(reply.SourceText) != 0 { t.Errorf("Unexpected source text: %q", string(d.SourceText)) } if reply.Encoding != "" { t.Errorf("Unexpected encoding: %q", d.Encoding) } expected := refs(xrefs.NewNormalizer(d.SourceText), d.Decoration) if !reflect.DeepEqual(expected, reply.Reference) { t.Fatalf("Expected references %v; found %v", expected, reply.Reference) } expectedNodes := nodeInfos(tbl.Nodes[7:13]) sort.Sort(byNodeTicket(expectedNodes)) sort.Sort(byNodeTicket(reply.Node)) if err := testutil.DeepEqual(expectedNodes, reply.Node); err != nil { t.Fatal(err) } }
func (c *httpClusterClient) Sync(ctx context.Context) error { mAPI := NewMembersAPI(c) ms, err := mAPI.List(ctx) if err != nil { return err } c.Lock() defer c.Unlock() eps := make([]string, 0) for _, m := range ms { eps = append(eps, m.ClientURLs...) } sort.Sort(sort.StringSlice(eps)) ceps := make([]string, len(c.endpoints)) for i, cep := range c.endpoints { ceps[i] = cep.String() } sort.Sort(sort.StringSlice(ceps)) // fast path if no change happens // this helps client to pin the endpoint when no cluster change if reflect.DeepEqual(eps, ceps) { return nil } return c.SetEndpoints(eps) }
// Divides a set of partitions between a set of consumers. func dividePartitionsBetweenConsumers(consumers kazoo.ConsumergroupInstanceList, partitions partitionLeaders) map[string][]*kazoo.Partition { result := make(map[string][]*kazoo.Partition) plen := len(partitions) clen := len(consumers) if clen == 0 { return result } sort.Sort(partitions) sort.Sort(consumers) n := plen / clen m := plen % clen p := 0 for i, consumer := range consumers { first := p last := first + n if m > 0 && i < m { last++ } if last > plen { last = plen } for _, pl := range partitions[first:last] { result[consumer.ID] = append(result[consumer.ID], pl.partition) } p = last } return result }
func (us *UnionScanExec) buildAndSortAddedRows(t table.Table, asName *model.CIStr) error { us.addedRows = make([]*Row, 0, len(us.dirty.addedRows)) for h, data := range us.dirty.addedRows { for i, field := range us.Src.Fields() { field.Expr.SetDatum(data[i]) } if us.condition != nil { matched, err := evaluator.EvalBool(us.ctx, us.condition) if err != nil { return errors.Trace(err) } if !matched { continue } } rowKeyEntry := &RowKeyEntry{Handle: h, Tbl: t, TableAsName: asName} row := &Row{Data: data, RowKeys: []*RowKeyEntry{rowKeyEntry}} us.addedRows = append(us.addedRows, row) } if us.desc { sort.Sort(sort.Reverse(us)) } else { sort.Sort(us) } if us.sortErr != nil { return errors.Trace(us.sortErr) } return nil }
func TestUpdateToInvalid(t *testing.T) { ldb := db.OpenMemory() s := db.NewFileSet("test", ldb) localHave := fileList{ protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)}, protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(2)}, protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(5), Invalid: true}, protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(7)}, } s.Replace(protocol.LocalDeviceID, localHave) have := fileList(haveList(s, protocol.LocalDeviceID)) sort.Sort(have) if fmt.Sprint(have) != fmt.Sprint(localHave) { t.Errorf("Have incorrect before invalidation;\n A: %v !=\n E: %v", have, localHave) } localHave[1] = protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Invalid: true} s.Update(protocol.LocalDeviceID, localHave[1:2]) have = fileList(haveList(s, protocol.LocalDeviceID)) sort.Sort(have) if fmt.Sprint(have) != fmt.Sprint(localHave) { t.Errorf("Have incorrect after invalidation;\n A: %v !=\n E: %v", have, localHave) } }
// updateCountString describes the update counts that were recorded by // storeEventReader. The formatting is appropriate to paste into this test if // as a new expected value. func (ser *storeEventReader) updateCountString() string { var buffer bytes.Buffer w := tabwriter.NewWriter(&buffer, 2, 1, 2, ' ', 0) var storeIDs sort.IntSlice for storeID := range ser.perStoreUpdateCount { storeIDs = append(storeIDs, int(storeID)) } sort.Sort(storeIDs) for _, storeID := range storeIDs { if countset, ok := ser.perStoreUpdateCount[proto.StoreID(storeID)]; ok { fmt.Fprintf(w, "proto.StoreID(%d): {\n", storeID) var methodIDs sort.IntSlice for methodID := range countset { methodIDs = append(methodIDs, int(methodID)) } sort.Sort(methodIDs) for _, methodID := range methodIDs { method := proto.Method(methodID) if count, okCount := countset[method]; okCount { fmt.Fprintf(w, "\tproto.%s:\t%d,\n", method, count) } else { panic("unreachable!") } } } else { panic("unreachable!") } fmt.Fprintf(w, "},\n") } return buffer.String() }
func (p *mtrUiPage) getSitesList() (err error) { u := *mtrApiUrl u.Path = "/data/latency/summary" var b []byte if b, err = getBytes(u.String(), "application/x-protobuf"); err != nil { return } var f mtrpb.DataLatencySummaryResult if err = proto.Unmarshal(b, &f); err != nil { return } p.SparkGroups = make([]sparkGroup, 0) p.dataResult = p.filterDataResults(f.Result) // We don't aggregate if typeID is specified if p.TypeID != "" && len(p.dataResult) > 0 { p.SparkGroups = append(p.SparkGroups, sparkGroup{Rows: make([]sparkRow, 0)}) } for _, r := range p.dataResult { s := dataStatusString(r) row := sparkRow{ ID: r.SiteID + " " + r.TypeID, Title: r.SiteID + " " + removeTypeIDPrefix(r.TypeID), Link: "/data/plot?siteID=" + r.SiteID + "&typeID=" + r.TypeID, SparkUrl: "/data/latency?siteID=" + r.SiteID + "&typeID=" + r.TypeID, Status: s, } stored := false for i, g := range p.SparkGroups { // If we're not doing aggregation(p.TypeID!="") then we always add new row into first group if p.TypeID != "" || g.ID == r.TypeID { g.Rows = append(g.Rows, row) p.SparkGroups[i] = g stored = true break } } if stored { continue } // Cannot find a matching group, create a new group var sg sparkGroup sg = sparkGroup{ID: r.TypeID, Title: removeTypeIDPrefix(r.TypeID), Rows: []sparkRow{row}} p.SparkGroups = append(p.SparkGroups, sg) } for i, g := range p.SparkGroups { sort.Sort(sparkRows(g.Rows)) p.SparkGroups[i] = g } sort.Sort(sparkGroups(p.SparkGroups)) return }
func rethread() { if !threaded { sort.Sort(byUIDRev(msgs)) } else { byThread := make(map[uint64][]*imap.Msg) for _, m := range msgs { t := m.GmailThread byThread[t] = append(byThread[t], m) } var threadList [][]*imap.Msg for _, t := range byThread { sort.Sort(byUID(t)) threadList = append(threadList, t) } sort.Sort(byUIDList(threadList)) msgs = msgs[:0] for _, t := range threadList { msgs = append(msgs, t...) } } for i, m := range msgs { msgNum[m] = i } }
func verifyEvents(t *testing.T, expected, actual []*PodLifecycleEvent) { sort.Sort(sortableEvents(expected)) sort.Sort(sortableEvents(actual)) if !reflect.DeepEqual(expected, actual) { t.Errorf("Actual events differ from the expected; diff:\n %v", diff.ObjectDiff(expected, actual)) } }
// A simple comparison checking if minimum and maximums in both datasets are within allowedVariance // If this function changes, PrintToStdout should be updated accordingly. func isResourceUsageSimilarEnough(left, right percentileUsageData, allowedVariance float64) bool { if len(left.cpuData) == 0 || len(left.memData) == 0 || len(right.cpuData) == 0 || len(right.memData) == 0 { glog.V(4).Infof("Length of at least one data vector is zero. Returning false for the lack of data.") return false } sort.Float64s(left.cpuData) sort.Float64s(right.cpuData) sort.Sort(int64arr(left.memData)) sort.Sort(int64arr(right.memData)) leftCPUMin := math.Max(left.cpuData[0], minCPU) leftCPUMax := math.Max(left.cpuData[len(left.cpuData)-1], minCPU) leftMemMin := max(left.memData[0], minMem) leftMemMax := max(left.memData[len(left.memData)-1], minMem) rightCPUMin := math.Max(right.cpuData[0], minCPU) rightCPUMax := math.Max(right.cpuData[len(right.cpuData)-1], minCPU) rightMemMin := max(right.memData[0], minMem) rightMemMax := max(right.memData[len(right.memData)-1], minMem) return leq(leftCPUMin, allowedVariance*rightCPUMin) && leq(rightCPUMin, allowedVariance*leftCPUMin) && leq(leftCPUMax, allowedVariance*rightCPUMax) && leq(rightCPUMax, allowedVariance*leftCPUMax) && leq(float64(leftMemMin), allowedVariance*float64(rightMemMin)) && leq(float64(rightMemMin), allowedVariance*float64(leftMemMin)) && leq(float64(leftMemMax), allowedVariance*float64(rightMemMax)) && leq(float64(rightMemMax), allowedVariance*float64(leftMemMax)) }
// match returns true if the given arguments match the fields in this // withdrawalInfo. For the requests slice, the order of the items does not // matter. func (wi *withdrawalInfo) match(requests []OutputRequest, startAddress WithdrawalAddress, lastSeriesID uint32, changeStart ChangeAddress, dustThreshold btcutil.Amount) bool { // Use reflect.DeepEqual to compare changeStart and startAddress as they're // structs that contain pointers and we want to compare their content and // not their address. if !reflect.DeepEqual(changeStart, wi.changeStart) { log.Debugf("withdrawal changeStart does not match: %v != %v", changeStart, wi.changeStart) return false } if !reflect.DeepEqual(startAddress, wi.startAddress) { log.Debugf("withdrawal startAddr does not match: %v != %v", startAddress, wi.startAddress) return false } if lastSeriesID != wi.lastSeriesID { log.Debugf("withdrawal lastSeriesID does not match: %v != %v", lastSeriesID, wi.lastSeriesID) return false } if dustThreshold != wi.dustThreshold { log.Debugf("withdrawal dustThreshold does not match: %v != %v", dustThreshold, wi.dustThreshold) return false } r1 := make([]OutputRequest, len(requests)) copy(r1, requests) r2 := make([]OutputRequest, len(wi.requests)) copy(r2, wi.requests) sort.Sort(byOutBailmentID(r1)) sort.Sort(byOutBailmentID(r2)) if !reflect.DeepEqual(r1, r2) { log.Debugf("withdrawal requests does not match: %v != %v", requests, wi.requests) return false } return true }
// isStatusEqual returns true if the given pod statuses are equal, false otherwise. // This method sorts container statuses so order does not affect equality. func isStatusEqual(oldStatus, status *api.PodStatus) bool { sort.Sort(kubetypes.SortedContainerStatuses(status.ContainerStatuses)) sort.Sort(kubetypes.SortedContainerStatuses(oldStatus.ContainerStatuses)) // TODO: More sophisticated equality checking. return reflect.DeepEqual(status, oldStatus) }
// allocateCandidates creates a candidate list of all stores that can used for // allocating a new replica ordered from the best to the worst. Only stores // that meet the criteria are included in the list. func allocateCandidates( sl StoreList, constraints config.Constraints, existing []roachpb.ReplicaDescriptor, existingNodeLocalities map[roachpb.NodeID]roachpb.Locality, deterministic bool, ) candidateList { var candidates candidateList for _, s := range sl.stores { if !preexistingReplicaCheck(s.Node.NodeID, existing) { continue } constraintsOk, preferredMatched := constraintCheck(s, constraints) if !constraintsOk { continue } if !maxCapacityCheck(s) { continue } constraintScore := diversityScore(s, existingNodeLocalities) + float64(preferredMatched) candidates = append(candidates, candidate{ store: s, valid: true, constraint: constraintScore, capacity: capacityScore(s), }) } if deterministic { sort.Sort(sort.Reverse(byScoreAndID(candidates))) } else { sort.Sort(sort.Reverse(byScore(candidates))) } return candidates }
func expectPodUpdate(t *testing.T, ch <-chan kubelet.PodUpdate, expected ...kubelet.PodUpdate) { for i := range expected { update := <-ch sort.Sort(sortedPods(update.Pods)) sort.Sort(sortedPods(expected[i].Pods)) // Make copies of the expected/actual update to compare all fields // except for "Pods", which are compared separately below. expectedCopy, updateCopy := expected[i], update expectedCopy.Pods, updateCopy.Pods = nil, nil if !api.Semantic.DeepEqual(expectedCopy, updateCopy) { t.Fatalf("Expected %#v, Got %#v", expectedCopy, updateCopy) } if len(expected[i].Pods) != len(update.Pods) { t.Fatalf("Expected %#v, Got %#v", expected[i], update) } // Compare pods one by one. This is necessary beacuse we don't want to // compare local annotations. for j := range expected[i].Pods { if podsDifferSemantically(expected[i].Pods[j], update.Pods[j]) { t.Fatalf("Expected %#v, Got %#v", expected[i].Pods[j], update.Pods[j]) } } } expectNoPodUpdate(t, ch) }
// compareTables outputs SQL to make the table names match between DBs func compareTables(conn1 *sql.DB, conn2 *sql.DB) { sql := ` SELECT table_name , table_type , is_insertable_into FROM information_schema.tables WHERE table_schema = 'public' AND (table_type = 'BASE TABLE' --OR table_type = 'VIEW' ) ORDER BY table_name COLLATE "C" ASC;` rowChan1, _ := pgutil.QueryStrings(conn1, sql) rowChan2, _ := pgutil.QueryStrings(conn2, sql) rows1 := make(TableRows, 0) for row := range rowChan1 { rows1 = append(rows1, row) } sort.Sort(rows1) rows2 := make(TableRows, 0) for row := range rowChan2 { rows2 = append(rows2, row) } sort.Sort(rows2) // We have to explicitly type this as Schema here for some unknown reason var schema1 Schema = &TableSchema{rows: rows1, rowNum: -1} var schema2 Schema = &TableSchema{rows: rows2, rowNum: -1} // Compare the tables doDiff(schema1, schema2) }