Exemple #1
0
func TestPush(t *testing.T) {
	h := &uint64Heap{}
	heap.Init(h)

	e := elem{val: 5}
	heap.Push(h, e)
	e.val = 3
	heap.Push(h, e)
	e.val = 4
	heap.Push(h, e)

	require.Equal(t, h.Len(), 3)
	require.EqualValues(t, (*h)[0].val, 3)

	e.val = 10
	(*h)[0] = e
	heap.Fix(h, 0)
	require.EqualValues(t, (*h)[0].val, 4)

	e.val = 11
	(*h)[0] = e
	heap.Fix(h, 0)
	require.EqualValues(t, (*h)[0].val, 5)

	e = heap.Pop(h).(elem)
	require.EqualValues(t, e.val, 5)

	e = heap.Pop(h).(elem)
	require.EqualValues(t, e.val, 10)

	e = heap.Pop(h).(elem)
	require.EqualValues(t, e.val, 11)

	require.Equal(t, h.Len(), 0)
}
Exemple #2
0
func (ph *peerHeap) swapOrder(i, j int) {
	if i == j {
		return
	}

	ph.peerScores[i].order, ph.peerScores[j].order = ph.peerScores[j].order, ph.peerScores[i].order
	heap.Fix(ph, i)
	heap.Fix(ph, j)
}
Exemple #3
0
// Next returns ErrIteratorDone if the iterator is done.
func (iter *iterator) Next() error {
	if len(iter.cursors) <= 0 {
		return ErrIteratorDone
	}

	lastK := iter.cursors[0].k

	for len(iter.cursors) > 0 {
		next := iter.cursors[0]

		if next.ssIndex < 0 && next.pos < 0 {
			err := iter.lowerLevelIter.Next()
			if err == nil {
				next.k, next.v, err = iter.lowerLevelIter.Current()
				if err == nil && len(iter.cursors) > 1 {
					heap.Fix(iter, 0)
				}
			}

			if err != nil {
				iter.lowerLevelIter.Close()
				iter.lowerLevelIter = nil

				heap.Pop(iter)
			}
		} else {
			next.pos++
			if next.pos >= next.posEnd {
				heap.Pop(iter)
			} else {
				next.op, next.k, next.v =
					iter.ss.a[next.ssIndex].GetOperationKeyVal(next.pos)
				if next.op == 0 {
					heap.Pop(iter)
				} else if len(iter.cursors) > 1 {
					heap.Fix(iter, 0)
				}
			}
		}

		if len(iter.cursors) <= 0 {
			return ErrIteratorDone
		}

		if !iteratorBytesEqual(iter.cursors[0].k, lastK) {
			if !iter.iteratorOptions.IncludeDeletions &&
				iter.cursors[0].op == OperationDel {
				return iter.Next()
			}

			return nil
		}
	}

	return ErrIteratorDone
}
Exemple #4
0
// Insert adds an element to the stream to be tracked
func (s *Stream) Insert(x string, count int) error {
	h := fnv.New32a()
	_, err := h.Write([]byte(x))
	if err != nil {
		return err
	}
	xhash := int(h.Sum32()) % len(s.Alphas)

	// are we tracking this element?
	if idx, ok := s.K.M[x]; ok {
		s.K.Elts[idx].Count += count
		heap.Fix(&s.K, idx)
		return nil
	}

	// can we track more elements?
	if len(s.K.Elts) < s.N {
		// there is free space
		heap.Push(&s.K, Element{Key: x, Count: count})
		return nil
	}

	if s.Alphas[xhash]+count < s.K.Elts[0].Count {
		s.Alphas[xhash] += count
		return nil
	}

	// replace the current minimum element
	minKey := s.K.Elts[0].Key

	h.Reset()
	_, err = h.Write([]byte(minKey))
	if err != nil {
		return err
	}
	mkhash := int(h.Sum32()) % len(s.Alphas)
	s.Alphas[mkhash] = s.K.Elts[0].Count

	s.K.Elts[0].Key = x
	s.K.Elts[0].Error = s.Alphas[xhash]
	s.K.Elts[0].Count = s.Alphas[xhash] + count

	// we're not longer monitoring minKey
	delete(s.K.M, minKey)
	// but 'x' is as array position 0
	s.K.M[x] = 0

	heap.Fix(&s.K, 0)
	return nil
}
Exemple #5
0
func (h *ResultHeap) Grow(x Result) {
	docId := x.Posting.DocId
	if i, ok := h.index[docId]; ok {
		h.rank[i] = x
	} else if h.Len() < h.cap {
		h.Push(x)
		heap.Fix(h, h.Len()-1)
	} else if h.rank[0].Score < x.Score {
		oldDocId := h.rank[0].Posting.DocId
		h.rank[0] = x
		delete(h.index, oldDocId)
		h.index[docId] = 0
		heap.Fix(h, 0)
	}
}
Exemple #6
0
func TestPush(t *testing.T) {
	h := &Uint64Heap{}
	heap.Init(h)

	e := Elem{Uid: 5}
	heap.Push(h, e)
	e.Uid = 3
	heap.Push(h, e)
	e.Uid = 4
	heap.Push(h, e)

	if h.Len() != 3 {
		t.Errorf("Expected len 3. Found: %v", h.Len())
	}
	if (*h)[0].Uid != 3 {
		t.Errorf("Expected min 3. Found: %+v", (*h)[0])
	}
	e.Uid = 10
	(*h)[0] = e
	heap.Fix(h, 0)
	if (*h)[0].Uid != 4 {
		t.Errorf("Expected min 4. Found: %+v", (*h)[0])
	}
	e.Uid = 11
	(*h)[0] = e
	heap.Fix(h, 0)
	if (*h)[0].Uid != 5 {
		t.Errorf("Expected min 5. Found: %+v", (*h)[0])
	}

	e = heap.Pop(h).(Elem)
	if e.Uid != 5 {
		t.Errorf("Expected min 5. Found %+v", e)
	}

	e = heap.Pop(h).(Elem)
	if e.Uid != 10 {
		t.Errorf("Expected min 10. Found: %+v", e)
	}
	e = heap.Pop(h).(Elem)
	if e.Uid != 11 {
		t.Errorf("Expected min 11. Found: %+v", e)
	}

	if h.Len() != 0 {
		t.Errorf("Expected len 0. Found: %v, values: %+v", h.Len(), h)
	}
}
Exemple #7
0
func (th *TriangleHeap) DecreaseKey(id int32, weight uint32) {
	if index, ok := th.indices[id]; ok {
		th.triangles[index].weight = weight
		heap.Fix(th, index)
		return
	}
}
Exemple #8
0
// Mark an event happening, using given timestamp.
//
// The implementation assumes time is monotonic, the behaviour is undefined in
// the case of time going back. This operation has logarithmic complexity.
func (ss *Rate) Touch(key string, nowTs time.Time) {
	now := nowTs.UnixNano()

	var bucket *bucket
	if bucketno, found := ss.keytobucketno[key]; found {
		bucket = &ss.buckets[bucketno]
	} else {
		bucketno = uint32(ss.sh.h[0])

		bucket = &ss.buckets[bucketno]
		delete(ss.keytobucketno, bucket.key)
		ss.keytobucketno[key] = bucketno

		bucket.key, bucket.errLastTs, bucket.errRate =
			key, bucket.lastTs, bucket.rate
	}

	if bucket.lastTs != 0 {
		bucket.rate = ss.count(bucket.rate, bucket.lastTs, now)
	}
	bucket.lastTs = now

	// Even lastTs change may change ordering.
	heap.Fix(&ss.sh, int(bucket.idx))
}
Exemple #9
0
func (pq *aStarPriorityQueue) Fix(id int, newGScore, newFScore float64) {
	if i, ok := pq.indexList[id]; ok {
		pq.nodes[i].gscore = newGScore
		pq.nodes[i].fscore = newFScore
		heap.Fix(pq, i)
	}
}
Exemple #10
0
// putWorker puts a worker back in the worker pool.
func (p *Pool) putWorker(w *worker) {
	p.mu.Lock()
	defer p.mu.Unlock()
	w.pending--
	// Reorder the queue based on the load of the workers.
	heap.Fix(&p.workers, w.index)
}
// updateNode sets the number of tasks for a given node. It ignores the update
// if the node isn't already tracked in the heap.
func (nh *nodeHeap) updateNode(n NodeInfo) {
	index, ok := nh.index[n.ID]
	if ok {
		nh.heap[index] = n
		heap.Fix(nh, index)
	}
}
Exemple #12
0
// Add adds a reply to the cache.
// When `ttl` is equal to `nullTTL`, the cache entry will be valid until the closest TTL in the `reply`
func (c *Cache) Put(request *dns.Msg, reply *dns.Msg, ttl int, flags uint8) int {
	c.lock.Lock()
	defer c.lock.Unlock()

	now := c.clock.Now()
	question := request.Question[0]
	key := cacheKey(question)
	ent, found := c.entries[key]
	if found {
		updated := ent.setReply(reply, ttl, flags, now)
		if updated {
			heap.Fix(&c.entriesH, ent.index)
		}
	} else {
		// If we will add a new item and the capacity has been exceeded, make some room...
		if len(c.entriesH) >= c.capacity {
			lowestEntry := heap.Pop(&c.entriesH).(*cacheEntry)
			delete(c.entries, cacheKey(lowestEntry.question))
		}
		ent = newCacheEntry(&question, now)
		ent.setReply(reply, ttl, flags, now)
		heap.Push(&c.entriesH, ent)
		c.entries[key] = ent
	}
	return ent.ReplyLen
}
// enqueue either adds the detail to the queue or updates its location in the
// priority queue.
func (pq *storePoolPQ) enqueue(detail *storeDetail) {
	if detail.index < 0 {
		heap.Push(pq, detail)
	} else {
		heap.Fix(pq, detail.index)
	}
}
func (d *DelayingDeliverer) run() {
	for {
		now := time.Now()
		d.deliver(now)

		nextWakeUp := now.Add(time.Hour)
		if d.heap.Len() > 0 {
			nextWakeUp = d.heap.data[0].DeliveryTime
		}
		sleepTime := nextWakeUp.Sub(now)

		select {
		case <-time.After(sleepTime):
			break // just wake up and process the data
		case item := <-d.updateChannel:
			if position, found := d.heap.keyPosition[item.Key]; found {
				if item.DeliveryTime.Before(d.heap.data[position].DeliveryTime) {
					d.heap.data[position] = item
					heap.Fix(d.heap, position)
				}
				// Ignore if later.
			} else {
				heap.Push(d.heap, item)
			}
		case <-d.stopChannel:
			return
		}
	}
}
Exemple #15
0
// advance advances each iterator in the set to the next value for which *any*
// interpolatingIterator has a real value.
func (is unionIterator) advance() {
	if !is.isValid() {
		return
	}

	// All iterators in the set currently point to the same offset. Advancement
	// begins by pre-advancing any iterators that have a real value for the
	// current offset.
	current := is[0].offset
	for is[0].offset == current {
		is[0].advanceTo(current + 1)
		heap.Fix(&is, 0)
	}

	// It is possible that all iterators are now invalid.
	if !is.isValid() {
		return
	}

	// The iterator in position zero now has the lowest value for
	// nextReal.offset - advance all iterators to that offset.
	min := is[0].nextReal.offset
	for i := range is {
		is[i].advanceTo(min)
	}
	heap.Init(&is)
}
Exemple #16
0
//
// 获取下一个可用的Worker
//
func (pq *PriorityQueue) NextWorker() *Worker {
	now := time.Now()
	for pq.Len() > 0 {
		result := (*pq)[0]

		if result.index != INVALID_INDEX && result.Expire.After(now) {
			// 只要活着,就留在优先级队列中,等待分配任务
			//			log.Println("Find Valid Worker...")

			result.priority -= 1

			// 调整Worker的优先级
			heap.Fix(pq, result.index)

			return result
		} else {
			if result.index != INVALID_INDEX {
				log.Errorf("Invalid Item index in PriorityQueue#NextWorker")
			} else {
				log.Println("Worker Expired")
				// 只有过期的元素才删除
				heap.Remove(pq, result.index)
			}
		}
	}

	log.Println("Has Not Worker...")
	return nil

}
Exemple #17
0
// advance advances each iterator in the set to the next value for which *any*
// interpolatingIterator has a real value.
func (ai aggregatingIterator) advance() {
	if !ai.isValid() {
		return
	}

	// All iterators in the set currently point to the same offset. Advancement
	// begins by pre-advancing any iterators that have a real value for the
	// current offset.
	current := ai[0].offset
	for ai[0].offset == current {
		ai[0].advanceTo(current + 1)
		heap.Fix(&ai, 0)
	}

	// It is possible that all iterators are now invalid.
	if !ai.isValid() {
		return
	}

	// The iterator in position zero now has the lowest value for
	// nextReal.offset - advance all iterators to that offset.
	min := ai[0].nextReal.offset()
	for i := range ai {
		ai[i].advanceTo(min)
	}
	heap.Init(&ai)
}
Exemple #18
0
/**
 * Generating primes with sieve.
 * See: http://www.cs.hmc.edu/~oneill/papers/Sieve-JFP.pdf
 *
 * This approach is slightly slower than trial division. Before identifying the
 * memory bottleneck it was significantly slower.
 *
 * primesSieve(2000000) was substantially slower than calling the function in
 * small steps and progressively reaching 2000000 (2 minutes vs 7 seconds).
 *
 * Attempted optimizations:
 * 1) Skipping even numbers. Paper suggests 77% improvement. Observed: ~20%.
 * 2) Generating target primes directly. Substantially slower than calling the
 *    function in small steps and progressively reaching 2000000.
 *    (2 minutes vs 7 seconds.) This mystery applies to trial division as well
 *    and appears to be platform specific (Mac OSX only). Attempts to profile
 *    have failed. See related:
 *    http://godoc.org/code.google.com/p/rsc/cmd/pprof_mac_fix
 * 3) Allocating a full block of memory in advance, rather than append doubling.
 *    No real difference in performance. Removed. Unclear if there is a way to
 *    pre-allocate and use from that pool without tons of hacks.
 *
 * Ultimately the performance problem was from allocating lots of objects:
 *   min := (*compositeHeap)[0]  // New object.
 *   // ...
 *   heap.Push(compositeHeap, min)  // Escapes local scope. Expensive.
 */
func GetPrime(ceil int) int {
	if ceil < len(knownPrimes) {
		return knownPrimes[ceil]
	}
	generateLock.Lock()
	defer generateLock.Unlock()

	min := (*compositeHeap)[0]
	for len(knownPrimes) <= ceil {
		lastPos += wheel[wheelIndex]
		wheelIndex = (wheelIndex + 1) % len(wheel)
		for lastPos > min.Pos {
			// Increase the multiple and fix the entry.
			min.Pos += min.Prime
			heap.Fix(compositeHeap, 0)
			// Look at the new lowest composite.
			min = (*compositeHeap)[0]
		}
		if lastPos < min.Pos {
			// Eg: i == 3, i < 4. Insert new prime 3 as {3*3, 3}.
			// When another prime (eg, 7) is discovered, do the same.
			primesRwLock.Lock()
			knownPrimes = append(knownPrimes, lastPos)
			primesRwLock.Unlock()
			heap.Push(compositeHeap, &Composite{lastPos * lastPos, lastPos})
		}
	}
	return knownPrimes[ceil]
}
Exemple #19
0
func (d *Driver) Dequeue(queue string, eid uid.ID) (e *storage.Envelope, err error) {
	now := time.Now().UnixNano()
	d.m.Lock()
	defer d.m.Unlock()
	msgs := d.queues.get(queue)
	for i, n := 0, len(*msgs); i < n; i++ {
		msg := (*msgs)[i]
		if msg.availAt > now {
			break
		}
		if !msg.envelope.Retry.IsValid() {
			event.Emit(event.EventMessageDiscarded, msg.envelope)
			msg.removed = true
		}
		if msg.removed {
			heap.Remove(msgs, i)
			i--
			n--
			continue
		}
		e = msg.envelope
		msg.eid = eid
		msg.availAt = now + int64(msg.envelope.Timeout)
		msg.envelope.Retry.Decr()
		msg.accumlating = false
		heap.Fix(msgs, i)
		d.ephemeralIndex[eid] = msg
		return
	}
	err = storage.ErrEmpty
	return
}
Exemple #20
0
func (slows *slowQueries) insertAtIndex(slow slowQuery, idx int) {
	cpy := new(slowQuery)
	*cpy = slow
	slows.priorityQueue[idx] = cpy
	slows.lookup[slow.ParameterizedQuery] = idx
	heap.Fix(slows, idx)
}
Exemple #21
0
func (ss *SimpleRate) Touch(key string, nowTs time.Time) {
	var (
		found  bool
		bucket *srateBucket
		now    = nowTs.UnixNano()
	)
	bucket, found = ss.hash[key]
	if found {
		// we already have the correct bucket
	} else if len(ss.heap) < ss.size {
		// create new bucket
		bucket = &srateBucket{}
		ss.hash[key] = bucket
		bucket.key = key
		heap.Push(&ss.heap, bucket)
	} else {
		// use minimum bucket
		bucket = ss.heap[0]
		delete(ss.hash, bucket.key)
		ss.hash[key] = bucket
		bucket.error, bucket.errorTs, bucket.errorRate =
			bucket.count, bucket.countTs, bucket.countRate
		bucket.key = key
	}

	bucket.count += 1
	bucket.countRate = ss.count(bucket.countRate, bucket.countTs, now)
	bucket.countTs = now

	heap.Fix(&ss.heap, bucket.index)
}
Exemple #22
0
// advanceRoot retrieves the next row for the source at the root of the heap and
// updates the heap accordingly.
func (s *orderedSynchronizer) advanceRoot() error {
	if len(s.heap) == 0 {
		return nil
	}
	src := &s.sources[s.heap[0]]
	if src.row == nil {
		panic("trying to advance closed source")
	}
	oldRow := src.row
	var err error
	src.row, err = src.src.NextRow()
	if err != nil {
		s.err = err
		return err
	}
	if src.row == nil {
		heap.Remove(s, 0)
	} else {
		heap.Fix(s, 0)
		// TODO(radu): this check may be costly, we could disable it in production
		if cmp, err := oldRow.Compare(&s.alloc, s.ordering, src.row); err != nil {
			return err
		} else if cmp > 0 {
			return util.Errorf("incorrectly ordered stream %s after %s", src.row, oldRow)
		}
	}
	// heap operations might set s.err (see Less)
	return s.err
}
Exemple #23
0
// update modifies the f and value of an Node in the queue.
func (pq *PriorityQueue) update(node *Node, g, h, f int, p *Node) {
	node.g = g
	node.h = h
	node.f = f
	node.parent = p
	heap.Fix(pq, node.index)
}
func (nh *nodeHeap) remove(nodeID string) {
	index, ok := nh.index[nodeID]
	if ok {
		nh.heap[index].Tasks = nil
		heap.Fix(nh, index)
		heap.Pop(nh)
	}
}
Exemple #25
0
// Modifies the Priority and Task of a Job.
func (q *Queue) update(job *Job, task string, value string, priority int) {

	job.task = task
	job.value = value
	job.priority = priority
	heap.Fix(q, job.index)

}
Exemple #26
0
func Dijkstra(g *Graph, start, end Vertex) *list.List {
	pq := dijkstraPQ{}
	nodes := map[Vertex]*dijkstraNode{}

	heap.Init(&pq)

	for v := range g.VerticesIter() {
		dn := &dijkstraNode{
			vertex:   v,
			distance: math.Inf(1),
		}
		heap.Push(&pq, dn)
		nodes[v] = dn
	}

	nodes[start].distance = 0
	heap.Fix(&pq, nodes[start].index)

	for pq.Len() > 0 {
		v := heap.Pop(&pq).(*dijkstraNode)

		for he := range g.HalfedgesIter(v.vertex) {
			dn := nodes[he.End]

			if dn == nil {
				continue
			}

			if v.distance+he.Cost < dn.distance {
				dn.distance = v.distance + he.Cost
				dn.predecessor = v
				heap.Fix(&pq, dn.index)
			}
		}

		if v.vertex == end {
			l := list.New()
			for e := v; e != nil; e = e.predecessor {
				l.PushFront(e.vertex)
			}
			return l
		}
	}

	return nil
}
Exemple #27
0
// Prim implements Prim’s algorithm. It returns a minimal spanning
// tree for the given graph, starting with vertex start.
func Prim(g *Graph, start Vertex) *Graph {
	tree := NewGraph()
	nodes := map[Vertex]*primNode{}
	pq := primPQ{}

	heap.Init(&pq)

	for v := range g.VerticesIter() {
		n := &primNode{
			vertex:  v,
			cost:    math.Inf(1),
			visited: false,
		}
		heap.Push(&pq, n)
		nodes[v] = n
	}

	nodes[start].cost = 0
	heap.Fix(&pq, nodes[start].index)

	for pq.Len() > 0 {
		v := heap.Pop(&pq).(*primNode)
		v.visited = true

		for he := range g.HalfedgesIter(v.vertex) {
			node := nodes[he.End]
			if node.visited {
				continue
			}

			if he.Cost < node.cost {
				node.cost = he.Cost
				node.predecessor = v
				heap.Fix(&pq, node.index)
			}
		}
	}

	for _, node := range nodes {
		if node.predecessor != nil {
			tree.AddEdge(node.predecessor.vertex, node.vertex, node.cost)
		}
	}

	return tree
}
// addOrUpdateNode sets the number of tasks for a given node. It adds the node
// to the heap if it wasn't already tracked.
func (nh *nodeHeap) addOrUpdateNode(n NodeInfo) {
	index, ok := nh.index[n.ID]
	if ok {
		nh.heap[index] = n
		heap.Fix(nh, index)
	} else {
		heap.Push(nh, n)
	}
}
func (pq *Q) update(item *Node, w int64) {
	for i := 0; i < len(*pq); i++ {
		if (*pq)[i].n == item {
			(*pq)[i].dist = w
			heap.Fix(pq, i)
			break
		}
	}
}
Exemple #30
0
func (g *group) Dial(c *Client) (conn *gokeyless.Conn, err error) {
	g.Lock()
	defer g.Unlock()

	if g.Len() == 0 {
		err = errors.New("remote group empty")
		return
	}

	var i *item
	var popped []*item
	for g.Len() > 0 {
		i = heap.Pop(g).(*item)
		popped = append(popped, i)
		if conn, err = i.Dial(c); err == nil {
			break
		}

		i.p = 0
		i.errs = append(i.errs, err)
	}

	for _, f := range popped {
		heap.Push(g, f)
	}

	if err != nil {
		return
	}

	go func() {
		defer conn.Close()
		for {
			start := time.Now()
			err := conn.Ping(nil)
			duration := time.Since(start)

			g.Lock()
			if err != nil {
				i.p = 0
				i.errs = append(i.errs, err)
			} else {
				i.p.Update(1 / float64(duration))
			}
			heap.Fix(g, i.index)
			g.Unlock()

			if err != nil {
				log.Infof("Ping failed: %v", err)
				return
			}

			time.Sleep(time.Minute)
		}
	}()
	return
}