func (z *zpHeap) add(y *y, s *State) { var a []string for sym := range s.actions { a = append(a, sym.Name) } sort.Strings(a) for _, nm := range a { sym := y.Syms[nm] actions := s.actions[sym] action := actions[0] if action.kind == 's' { heap.Push(z, &zpElem{s, y.States[action.arg], sym, s.distance + len(sym.MinString())}) } } a = a[:0] for sym := range s.gotos { a = append(a, sym.Name) } sort.Strings(a) for _, nm := range a { sym := y.Syms[nm] action := s.gotos[sym] heap.Push(z, &zpElem{s, y.States[action.arg], sym, s.distance + len(sym.MinString())}) } }
// Ingests an alert into the memoryAlertManager and creates a new // AggregationInstance for it, if necessary. func (s *memoryAlertManager) ingest(a *Alert) { fp := a.Fingerprint() agg, ok := s.aggregates[fp] if !ok { agg = &AlertAggregate{ Created: time.Now(), } agg.Ingest(a) for _, r := range s.rules { if r.Handles(agg.Alert) { agg.Rule = r break } } s.aggregates[fp] = agg heap.Push(&s.aggregatesByLastRefreshed, agg) heap.Push(&s.aggregatesByNextNotification, agg) s.needsNotificationRefresh = true } else { agg.Ingest(a) heap.Init(&s.aggregatesByLastRefreshed) } }
func TestScanCursors(t *testing.T) { s := ScanCursors{} heap.Init(&s) heap.Push(&s, &testScanCursor{ key: "b", }) heap.Push(&s, &testScanCursor{ key: "a", }) heap.Push(&s, &testScanCursor{ key: "c", }) a := heap.Pop(&s).(*testScanCursor) if a.key != "a" { t.Errorf("expected a") } b := heap.Pop(&s).(*testScanCursor) if b.key != "b" { t.Errorf("expected b") } c := heap.Pop(&s).(*testScanCursor) if c.key != "c" { t.Errorf("expected c") } }
// Dijkstra's Algorithm is essentially a goalless Uniform Cost Search. That is, its results are roughly equivalent to // running A* with the Null Heuristic from a single node to every other node in the graph -- though it's a fair bit faster // because running A* in that way will recompute things it's already computed every call. Note that you won't necessarily get the same path // you would get for A*, but the cost is guaranteed to be the same (that is, if multiple shortest paths exist, you may get a different shortest path). // // Like A*, Dijkstra's Algorithm likely won't run correctly with negative edge weights -- use Bellman-Ford for that instead // // Dijkstra's algorithm usually only returns a cost map, however, since the data is available this version will also reconstruct the path to every node func Dijkstra(source Node, graph Graph, Cost func(Node, Node) float64) (paths map[int][]Node, costs map[int]float64) { if Cost == nil { if cgraph, ok := graph.(Coster); ok { Cost = cgraph.Cost } else { Cost = UniformCost } } nodes := graph.NodeList() openSet := &aStarPriorityQueue{} closedSet := set.NewSet() // This is to make use of that same costs = make(map[int]float64, len(nodes)) // May overallocate, will change if it becomes a problem predecessor := make(map[int]Node, len(nodes)) nodeIDMap := make(map[int]Node, len(nodes)) heap.Init(openSet) // I don't think we actually need the init step since I use a map check rather than inf to check if we're done /*for _, node := range nodes { if node == source { heap.Push(openSet, internalNode{node, 0, 0}) costs[node] = 0 } else { heap.Push(openSet, internalNode{node, math.MaxFloat64, math.MaxFloat64}) predecessor[node] = -1 } }*/ costs[source.ID()] = 0 heap.Push(openSet, internalNode{source, 0, 0}) for openSet.Len() != 0 { node := heap.Pop(openSet).(internalNode) /* if _, ok := costs[node.int]; !ok { break } */ if closedSet.Contains(node.ID()) { // As in A*, prevents us from having to slowly search and reorder the queue continue } nodeIDMap[node.ID()] = node closedSet.Add(node.ID()) for _, neighbor := range graph.Successors(node) { tmpCost := costs[node.ID()] + Cost(node, neighbor) if cost, ok := costs[neighbor.ID()]; !ok || tmpCost < cost { costs[neighbor.ID()] = cost predecessor[neighbor.ID()] = node heap.Push(openSet, internalNode{neighbor, cost, cost}) } } } paths = make(map[int][]Node, len(costs)) for node, _ := range costs { // Only reconstruct the path if one exists paths[node] = rebuildPath(predecessor, nodeIDMap[node]) } return paths, costs }
// DerivePath returns the cheapest way to satisfy the MSP (the one with the minimal number of delegations). // // ok: True if the MSP can be satisfied with current delegations; false if not. // names: The names in the top-level threshold gate that need to be delegated. // locs: The index in the treshold gate for each name. // trace: All names that must be delegated for for this gate to be satisfied. func (m MSP) DerivePath(db UserDatabase) (ok bool, names []string, locs []int, trace []string) { ts := &TraceSlice{} for i, cond := range m.Conds { switch cond := cond.(type) { case Name: if db.CanGetShare(cond.string) { heap.Push(ts, TraceElem{ i, []string{cond.string}, []string{cond.string}, }) } case Formatted: sok, _, _, strace := MSP(cond).DerivePath(db) if sok { heap.Push(ts, TraceElem{i, []string{}, strace}) } } if (*ts).Len() > m.Min { // If we can otherwise satisfy the threshold gate heap.Pop(ts) // Drop the TraceElem with the heaviest trace (the one that requires the most delegations). } } ok = (*ts).Len() >= m.Min locs, names, trace = ts.Compact() return }
func TestRemove(t *testing.T) { var count int if golangcafeheap.Len() <= 0 { // Add時に0件になるので…。 heap.Push(golangcafeheap, GolangCafe{Name: "ttyokoyama", Priority: 1, Count: 13, Index: 2}) heap.Push(golangcafeheap, GolangCafe{Name: "taknb2nch", Priority: 2, Count: 13, Index: 3}) heap.Push(golangcafeheap, GolangCafe{Name: "qt_luigi", Priority: 3, Count: 13, Index: 4}) heap.Push(golangcafeheap, GolangCafe{Name: "tam_x", Priority: 4, Count: 1, Index: 1}) } else { count = golangcafeheap.Len() } heap.Remove(golangcafeheap, 2) if golangcafeheap.Len() != (count - 1) { t.Errorf("golangcafeheap.Len() = %d, %d", golangcafeheap.Len(), count) } n := golangcafeheap.Len() for i := 0; i < n; i++ { item := golangcafeheap.Pop() golangcafe := item.(*GolangCafe) t.Logf("Name: %s Priority: %d Count: %d Index: %d", golangcafe.Name, golangcafe.Priority, golangcafe.Count, golangcafe.Index) } }
func TestPriorityQueueInit(t *testing.T) { items := map[string]int{ "c": 5, "d": 3, "e": 0, "b": 15, } pq := JobQueue{} heap.Init(&pq) for id, pri := range items { heap.Push(&pq, newJob(id, pri)) } //push in a new job with a high and low priority heap.Push(&pq, newJob("a", 99)) heap.Push(&pq, newJob("z", -19)) // make sure the ordering is correct target_order := []string{"a", "b", "c", "d", "e", "z"} i := 0 for pq.Len() > 0 { j := heap.Pop(&pq).(*Job) t.Logf("Found job:%s pri:%f", j.Id, j.priority) if j.Id != target_order[i] { t.Errorf("Job id %s expected, but found %s at position %d priority %f", target_order[i], j.Id, i, j.priority) } i++ } }
func startNextJob(event *models.Event, node *models.Node, job *models.Job, scheduler scheduling.Scheduler, eventQueue *models.EventQueue) { node.CurJob = job node.EstCompletion = event.Time + scheduler.GetAllocation(job) completeTime := event.Time + job.RealExec if job.AbsoluteDeadline() < node.EstCompletion && job.AbsoluteDeadline() < completeTime { heap.Push(eventQueue, &models.Event{ Job: job, Time: job.AbsoluteDeadline(), Node: node, Type: models.Miss, }) } else if node.EstCompletion < completeTime { heap.Push(eventQueue, &models.Event{ Job: job, Time: node.EstCompletion, Node: node, Type: models.Stretch, }) } else { heap.Push(eventQueue, &models.Event{ Job: event.Job, Time: completeTime, Node: node, Type: models.Complete, }) } }
func TestInit(t *testing.T) { x = []string{"x", "y", "z"} a = []string{"a", "b", "c"} //fmt.Println(x, a) hh := NewHistory(20) heap.Init(hh) //hh.PrintDump() heap.Push(hh, x) //hh.Add(x) //hh.PrintDump() if hh.heap[0][0] != "x" { t.Errorf("First element incorrect: %#v", hh.heap[0][0]) } heap.Push(hh, a) //hh.Add(a) //fmt.Printf("hh: %#v\n", hh) if hh == nil { t.Errorf("First history stored is: %#v", hh) } if hh.heap[0][0] != "a" { t.Errorf("First element incorrect: %#v", hh.heap[0][0]) } }
func (src Point) ShortestPath2(dst Point, m *Map, fp io.Writer) ([]Point, os.Error) { h := &myHeap2{} heap.Init(h) heap.Push(h, NewNode(src, dst)) // Each entry points to previous point in path seen := make(map[Location]bool) expansions := make([]Point, 0) popped := make([]*Node2, 0) defer func() { fmt.Fprintf(fp, "popped = %s;\n\n", nodes2js(popped)) fmt.Fprintf(fp, "expansions= %s;\n\n", points2js(expansions)) }() for h.Len() != 0 { n := heap.Pop(h).(*Node2) popped = append(popped, n) for n2 := range n.expand(m, dst, seen) { log.Printf("%s -> %s", n, n2) expansions = append(expansions, n2.Point) if n2.Equals(dst) { return n2.path(), nil } heap.Push(h, n2) } } return nil, fmt.Errorf("no path found") }
// Cluster by repeatedly splitting clusters. // Use a heap as priority queue for picking clusters to split. // The rule is to spilt the cluster with the most pixels. // Terminate when the desired number of clusters has been populated // or when clusters cannot be further split. func (qz *quantizer) cluster() { pq := new(queue) // Initial cluster. populated at this point, but not analyzed. c := &qz.cs[0] var m uint32 for i := 1; ; { // Only enqueue clusters that can be split. if qz.setWidestChannel(c) { heap.Push(pq, c) } // If no clusters have any color variation, mark the end of the // cluster list and quit early. if len(*pq) == 0 { qz.cs = qz.cs[:i] break } s := heap.Pop(pq).(*cluster) // get cluster to split m = qz.medianCut(s) c = &qz.cs[i] // set c to new cluster i++ qz.split(s, c, m) // split s into c and s at value m // Normal exit is when all clusters are populated. if i == len(qz.cs) { break } if qz.setWidestChannel(s) { heap.Push(pq, s) // return s to queue } } }
func TestEventQueue(t *testing.T) { queue := make(EventQueue, 0, 4) heap.Push(&queue, &Event{Y: 5}) heap.Push(&queue, &Event{Y: 3}) heap.Push(&queue, &Event{Y: 7}) heap.Push(&queue, &Event{Y: 1}) var e *Event e = heap.Pop(&queue).(*Event) if e.Y != 7 { t.Fatalf("Wanted priority 7, got %v", e.Y) } e = heap.Pop(&queue).(*Event) if e.Y != 5 { t.Fatalf("Wanted priority 5, got %v", e.Y) } e = heap.Pop(&queue).(*Event) if e.Y != 3 { t.Fatalf("Wanted priority 3, got %v", e.Y) } e = heap.Pop(&queue).(*Event) if e.Y != 1 { t.Fatalf("Wanted priority 1, got %v", e.Y) } }
func (self *esSampler) Update(d interface{}) { self.lock.Lock() defer self.lock.Unlock() u := rand.Float64() key := math.Pow(u, 1.0/self.weight(d)) if self.samples.Len() < self.maxSize { heap.Push(self.samples, esSampleItem{ data: d, key: key, }) return } s := *(self.samples) min := s[0] // The key of the new item is larger than a key in existing item. // Add this new item. if key > min.key { heap.Pop(self.samples) heap.Push(self.samples, esSampleItem{ data: d, key: key, }) } }
func createTreeFromFrequencies(frequencies []uint, sizes_ []byte, ranks []byte) error { // Create Huffman tree of (present) symbols queue := make(HuffmanPriorityQueue, 0) for i := range ranks { heap.Push(&queue, &HuffmanNode{symbol: ranks[i], weight: frequencies[ranks[i]]}) } for queue.Len() > 1 { // Extract 2 minimum nodes, merge them and enqueue result lNode := heap.Pop(&queue).(*HuffmanNode) rNode := heap.Pop(&queue).(*HuffmanNode) // Setting the symbol is critical to resolve ties during node sorting ! heap.Push(&queue, &HuffmanNode{weight: lNode.weight + rNode.weight, left: lNode, right: rNode, symbol: lNode.symbol}) } rootNode := heap.Pop(&queue).(*HuffmanNode) var err error if len(ranks) == 1 { sizes_[rootNode.symbol] = byte(1) } else { err = fillSizes(rootNode, 0, sizes_) } return err }
func (fg FlowGenerator) makeFlows(logger chan LogEvent) EventQueue { lambda := (fg.bandwidth * 1e9 * fg.load) / (fg.cdf.meanFlowSize() * 1500 * 8) lambda /= 143 creationQueue := make(EventQueue, 0) defer func() { creationQueue = nil }() heap.Init(&creationQueue) for i := 0; i < NUM_HOSTS; i++ { for j := 0; j < NUM_HOSTS; j++ { if i == j { continue } f := &Flow{Start: 1e6 + (rand.ExpFloat64()/lambda)*1e6, Size: fg.cdf.value(), Source: uint8(i), Dest: uint8(j), LastTime: 0, FinishEvent: nil} heap.Push(&creationQueue, makeCreationEvent(f)) } } eventQueue := make(EventQueue, 0) for uint(len(eventQueue)) < fg.numFlows { ev := heap.Pop(&creationQueue).(*Event) logger <- LogEvent{Time: 0, Type: LOG_FLOW_GEN, Flow: ev.Flow} eventQueue = append(eventQueue, makeArrivalEvent(ev.Flow)) nextTime := ev.Time + (rand.ExpFloat64()/lambda)*1e6 f := &Flow{Start: nextTime, Size: fg.cdf.value(), Source: ev.Flow.Source, Dest: ev.Flow.Dest, LastTime: 0, FinishEvent: nil} heap.Push(&creationQueue, makeCreationEvent(f)) } return eventQueue }
func TestLen(t *testing.T) { x = []string{"x", "y", "z"} a = []string{"a", "b", "c"} c := []string{"d", "e", "f"} g := []string{"j", "k", "l"} hh := NewHistory(20) heap.Init(hh) if hh.Len() != 0 { t.Errorf("Limit should be zero! %#v", hh) } heap.Push(hh, x) heap.Push(hh, a) heap.Push(hh, c) heap.Push(hh, g) if hh.Len() != 4 { t.Errorf("Error: hh.Len() not 4! %#v", hh) } for i := 0; i < 21; i++ { heap.Push(hh, g) } hh.PrintDump() if hh.Len() != 20 { hh.PrintDump() t.Errorf("Error: Length grew beyond it's supposed limit: %#v", hh.Len()) } }
func TestGet(t *testing.T) { if golangcafeheap.Len() <= 0 { // Add時に0件になるので…。 heap.Push(golangcafeheap, GolangCafe{Name: "ttyokoyama", Priority: 1, Count: 13, Index: 2}) heap.Push(golangcafeheap, GolangCafe{Name: "taknb2nch", Priority: 2, Count: 13, Index: 3}) heap.Push(golangcafeheap, GolangCafe{Name: "qt_luigi", Priority: 3, Count: 13, Index: 4}) heap.Push(golangcafeheap, GolangCafe{Name: "tam_x", Priority: 4, Count: 1, Index: 1}) } if golangcafeheap.Len() != 4 { t.Errorf("golangcafeheap length = %d", golangcafeheap.Len()) } popItem := heap.Pop(golangcafeheap) if golangcafeheap.Len() != 3 { t.Errorf("golangcafeheap length = %d", golangcafeheap.Len()) } golangcafe := popItem.(*GolangCafe) if golangcafe.Name != "ttyokoyama" { t.Errorf("golangcafe.Name = %s", golangcafe.Name) } t.Logf("Name: %s Priority: %d Count: %d Index: %d", golangcafe.Name, golangcafe.Priority, golangcafe.Count, golangcafe.Index) }
func TestPop(t *testing.T) { x = []string{"x", "y", "z"} a = []string{"a", "b", "c"} //fmt.Println(x, a) hh := NewHistory(20) heap.Init(hh) heap.Push(hh, x) heap.Push(hh, x) heap.Push(hh, a) /* hh.Add(x) hh.Add(a) */ lasti := hh.lastIndex() if lasti != 2 { hh.PrintDump() t.Errorf("Last index not being calculated correctly %#v", lasti) } poped := hh.Pop().([]string) //fmt.Printf("Popped: %#v", poped) if poped[0] != "x" { t.Errorf("Wrong element poped from stack: %#v\n", poped) } if hh.Len() != 2 { hh.PrintDump() t.Errorf("Too many items in heap? %#v\n", hh) } }
func (h *metricHeap) AddMetric(m *metric) { // As the latest input are probably linked to more recent item, search from back in the heap for i := len(h.values) - 1; i >= 0; i-- { // Item found, update summary if h.values[i].Timestamp.Equal(m.timestamp) { h.values[i].Update(m) return } if m.timestamp.Before(h.values[i].Timestamp) { continue } else { // there are some older insert in the list: need to update ms := new(metricSummary) ms.Update(m) heap.Push(h, *ms) // Check if we have not exceeded the size if uint(len(h.values)) > h.size { heap.Pop(h) } return } } // is the item to old to enter the heap, at this stage it means that we have not free slot up into the heap to insert it if uint(len(h.values)) < h.size { ms := new(metricSummary) ms.Update(m) heap.Push(h, *ms) } }
func (self *Session) handleDataPacket(data []byte) { atomic.AddUint32(&self.incomingDataCount, uint32(1)) var serial uint32 binary.Read(bytes.NewReader(data[:4]), binary.BigEndian, &serial) data = data[4:] packet := Packet{serial: serial, data: data} if serial == self.incomingSerial { self.pushData(packet) self.incomingSerial++ } else if serial > self.incomingSerial { heap.Push(self.packetQueue, &packet) } if serial > self.maxIncomingSerial { self.maxIncomingSerial = serial } for self.packetQueue.Len() > 0 { next := heap.Pop(self.packetQueue).(*Packet) if next.serial == self.incomingSerial { self.pushData(*next) self.incomingSerial++ } else { heap.Push(self.packetQueue, next) break } } }
func TestPriorityQueueUpdate(t *testing.T) { items := map[string]int{ "c": 5, "d": 3, "e": 0, "b": 15, } pq := JobQueue{} heap.Init(&pq) for id, pri := range items { heap.Push(&pq, newJob(id, pri)) } j := newJob("z", -19) heap.Push(&pq, j) // make j last run further back in time to act like we increased its priority newt := time.Unix(1437856044-int64(1000), 0) j.t_last_run = &newt pq.Update(j) top := heap.Pop(&pq).(*Job) t.Logf("Found job:%s pri:%f", top.Id, top.priority) if top.Id != "z" { t.Errorf("z was not the top job of the queue; found %s instead", top.Id) } }
func (q *DelayQueue) Add(d Delayed) { deadline := extractFromDelayed(d) q.lock.Lock() defer q.lock.Unlock() // readd using the original deadline computed from the original delay var readd func(*qitem) readd = func(qp *qitem) { q.lock.Lock() defer q.lock.Unlock() heap.Push(&q.queue, &qitem{ value: d, priority: deadline, readd: readd, }) q.cond.Broadcast() } heap.Push(&q.queue, &qitem{ value: d, priority: deadline, readd: readd, }) q.cond.Broadcast() }
func (this *cPool) Push(conn *psmtpConn) []*psmtpConn { // Evict if needed var result []*psmtpConn if this == nil { return result } if this.capacity > 0 { for { if this.all.Len() < this.capacity { break } if v, ok := this.PopAny(); ok { result = append(result, v) } } item := cqItem{conn: conn} heap.Push(&this.all, &item) sub, ok := this.byAuth[conn.auth] if !ok { sub = make(_cQueue, 0, this.capacity) heap.Init(&sub) } heap.Push(&sub, &item) this.byAuth[conn.auth] = sub } else { result = append(result, conn) } return result }
func TestPriorityQueue(t *testing.T) { pq := New() heap.Init(pq) heap.Push(pq, &Item{Value: "hello3", Priority: 3}) heap.Push(pq, &Item{Value: "hello1", Priority: 1}) heap.Push(pq, &Item{Value: "hello8", Priority: 8}) assert.Equal(t, 3+1+8, pq.PrioritySum()) item := pq.Peek() assert.Equal(t, "hello8", item.(*Item).Value.(string)) item = pq.Peek() assert.Equal(t, "hello8", item.(*Item).Value.(string)) item = heap.Pop(pq) assert.Equal(t, "hello8", item.(*Item).Value.(string)) assert.Equal(t, 3+1, pq.PrioritySum()) item = heap.Pop(pq) assert.Equal(t, "hello3", item.(*Item).Value.(string)) item = heap.Pop(pq) assert.Equal(t, "hello1", item.(*Item).Value.(string)) assert.Equal(t, 0, pq.Len()) }
func addMinPathLeft(graph *m.Graph) { dp := &m.DijkstraPrio{} heap.Init(dp) visited := make(map[*m.Node]bool) endNode := graph.EndNode() endNode.SetMinPathLeft(0) visited[endNode] = true for _, edge := range endNode.ToEdges() { node := edge.From() node.SetMinPathLeft(edge.FastestTime()) heap.Push(dp, node) } if dp.Len() > 0 { for node := heap.Pop(dp).(*m.Node); dp.Len() > 0; node = heap.Pop(dp).(*m.Node) { visited[node] = true for _, edge := range node.ToEdges() { innerNode := edge.From() if !visited[innerNode] { innerNode.SetMinPathLeft(edge.FastestTime() + node.MinPathLeft()) heap.Push(dp, innerNode) } } } } }
// ConsiderWeighted lets the sample inspect a new value with a positive given // weight. A weight of one corresponds to the unweighted reservoir sampling // algorithm. A nonpositive weight will lead to the item being rejected without // having been observed. To avoid numerical instabilities, it is advisable to // stay away from zero and infinity, or more generally from regions in which // computing x**1/weight may be ill-behaved. func (rs *WeightedReservoirSample) ConsiderWeighted(value interface{}, weight float64) { if weight <= 0 { glog.Warningf("reservoir sample received non-positive weight %f", weight) return } h := rs.Heap wv := WeightedValue{ Value: value, key: rs.makeKey(weight), } if h.Len() < rs.size { heap.Push(h, wv) if rs.threshold == 0 || wv.key < rs.threshold { rs.threshold = wv.key } return } if wv.key > rs.threshold { // Remove the element with threshold key. heap.Pop(h) // Add in the new element (which has a higher key). heap.Push(h, wv) // Update the threshold to reflect the new threshold. twv := heap.Pop(h).(WeightedValue) rs.threshold = twv.key heap.Push(h, twv) } }
func (x *TopApps) Mark(ApplicationId string, z time.Time) { t := z.Unix() x.Lock() defer x.Unlock() y := x.m[ApplicationId] if y != nil { z1 := heap.Remove(&x.t, y.ti).(*topAppsEntry) if z1 != y { panic("z1 != y") } z2 := heap.Remove(&x.n, y.ni).(*topAppsEntry) if z2 != y { panic("z2 != y") } } else { // New entry y = &topAppsEntry{ApplicationId: ApplicationId} x.m[ApplicationId] = y } y.Mark(t) heap.Push(&x.t, y) heap.Push(&x.n, y) }
// This example inserts some items into a PriorityQueue, manipulates an item, // and then removes the items in priority order. func Example_priorityQueue() { // Some items and their priorities. items := map[string]int{ "banana": 3, "apple": 2, "pear": 4, } // Create a priority queue and put the items in it. pq := &PriorityQueue{} heap.Init(pq) for value, priority := range items { item := &Item{ value: value, priority: priority, } heap.Push(pq, item) } // Insert a new item and then modify its priority. item := &Item{ value: "orange", priority: 1, } heap.Push(pq, item) pq.update(item, item.value, 5) // Take the items out; they arrive in decreasing priority order. for pq.Len() > 0 { item := heap.Pop(pq).(*Item) fmt.Printf("%.2d:%s ", item.priority, item.value) } // Output: // 05:orange 04:pear 03:banana 02:apple }
/* Add a value with a timestamp to the SlidingHyperLogLog. */ func (shll *SlidingHyperLogLog) Add(timestamp uint32, value []byte) { R, j := shll.getPosAndValue(value) Rmax := uint8(0) tmax := int(0) heap.Push(shll.lpfm[j], tR{timestamp, R}) tmp2 := make([]tR, shll.lpfm[j].Len(), shll.lpfm[j].Len()) for shll.lpfm[j].Len() > 0 { item := heap.Pop(shll.lpfm[j]).(tR) tmp2[shll.lpfm[j].Len()] = item } for _, value := range tmp2 { t := value.t R := value.R if tmax == 0 { tmax = int(t) } if int(t) < (tmax - int(shll.window)) { break } if R > Rmax { Rmax = R heap.Push(shll.lpfm[j], value) } if uint(shll.lpfm[j].Len()) == shll.n { break } } }
func findMin(n, k, a, b, c, r int) int { m := []int{a} for i := 1; i < k; i++ { m = append(m, (b*m[i-1]+c)%r) } o := make([]int, k) copy(o, m) sort.Ints(o) h := &intHeap{} heap.Init(h) var x, y int for i := 0; i <= k; { if y >= k || x < o[y] { heap.Push(h, x) x++ i++ } else { if x == o[y] { x++ } y++ } } for len(m)+1 < n { p := heap.Pop(h).(int) if h.notyet(m[len(m)-k]) && notagain(m[len(m)-k+1:len(m)], m[len(m)-k]) { heap.Push(h, m[len(m)-k]) } m = append(m, p) } return heap.Pop(h).(int) }