func (h *RefByHeight) Unique() { seen := hash.HashSet{} result := make(RefByHeight, 0, cap(*h)) for _, r := range *h { target := r.TargetHash() if !seen.Has(target) { result = append(result, r) } seen.Insert(target) } *h = result }
// ExtractChunks can be called from any goroutine to write Chunks referenced by the given hashes to w. The chunks are ordered by ref-height. Chunks of the same height are written in an unspecified order, relative to one another. func (p *orderedChunkCache) ExtractChunks(hashes hash.HashSet, chunkChan chan *chunks.Chunk) error { iter := p.orderedChunks.NewIterator(nil, nil) defer iter.Release() for iter.Next() { _, hash := fromDbKey(iter.Key()) if !hashes.Has(hash) { continue } compressed := iter.Value() data, err := snappy.Decode(nil, compressed) d.Chk.NoError(err) c := chunks.NewChunkWithHash(hash, data) chunkChan <- &c } return nil }
func (bhcs *httpBatchStore) batchPutRequests() { bhcs.workerWg.Add(1) go func() { defer bhcs.workerWg.Done() hints := types.Hints{} hashes := hash.HashSet{} handleRequest := func(wr writeRequest) { if !wr.justHints { if hashes.Has(wr.hash) { bhcs.requestWg.Done() // Already have a put enqueued for wr.hash. } else { hashes.Insert(wr.hash) } } for hint := range wr.hints { hints[hint] = struct{}{} } } for done := false; !done; { drainAndSend := false select { case wr := <-bhcs.writeQueue: handleRequest(wr) case <-bhcs.flushChan: drainAndSend = true case <-bhcs.finishedChan: drainAndSend = true done = true } if drainAndSend { for drained := false; !drained; { select { case wr := <-bhcs.writeQueue: handleRequest(wr) default: drained = true bhcs.sendWriteRequests(hashes, hints) // Takes ownership of hashes, hints hints = types.Hints{} hashes = hash.HashSet{} } } } } }() }