func (bhcs *httpBatchStore) SchedulePut(c chunks.Chunk, refHeight uint64, hints types.Hints) { if !bhcs.unwrittenPuts.Insert(c, refHeight) { return } bhcs.requestWg.Add(1) bhcs.writeQueue <- writeRequest{c.Hash(), hints, false} }
// SomeChunksP invokes callbacks on every unique chunk reachable from |r| in top-down order. Callbacks are invoked only once for each chunk regardless of how many times the chunk appears. // // |stopCb| is invoked for the types.Ref of every chunk. It can return true to stop SomeChunksP from descending any further. // |chunkCb| is optional, invoked with the chunks.Chunk referenced by |stopCb| if it didn't return true. func SomeChunksP(r types.Ref, bs types.BatchStore, stopCb SomeChunksStopCallback, chunkCb SomeChunksChunkCallback, concurrency int) { rq := newRefQueue() wg := sync.WaitGroup{} mu := sync.Mutex{} visitedRefs := map[hash.Hash]bool{} walkChunk := func(r types.Ref) { defer wg.Done() tr := r.TargetHash() mu.Lock() visited := visitedRefs[tr] visitedRefs[tr] = true mu.Unlock() if visited || stopCb(r) { return } // Try to avoid the cost of reading |c|. It's only necessary if the caller wants to know about every chunk, or if we need to descend below |c| (ref height > 1). var c chunks.Chunk if chunkCb != nil || r.Height() > 1 { c = bs.Get(tr) d.Chk.False(c.IsEmpty()) if chunkCb != nil { chunkCb(r, c) } } if r.Height() == 1 { return } v := types.DecodeValue(c, nil) for _, r1 := range v.Chunks() { wg.Add(1) rq.tail() <- r1 } } iter := func() { for r := range rq.head() { walkChunk(r) } } for i := 0; i < concurrency; i++ { go iter() } wg.Add(1) rq.tail() <- r wg.Wait() rq.close() }
// SchedulePut simply calls Put on the underlying ChunkStore, and ignores hints. func (lbs *localBatchStore) SchedulePut(c chunks.Chunk, refHeight uint64, hints types.Hints) { lbs.once.Do(lbs.expectVersion) lbs.unwrittenPuts.Insert(c, refHeight) lbs.mu.Lock() defer lbs.mu.Unlock() lbs.hashes.Insert(c.Hash()) lbs.AddHints(hints) }
func (rb *readBatchChunkSink) Put(c chunks.Chunk) { rb.mu.RLock() for _, or := range (*(rb.batch))[c.Hash()] { or.Satisfy(c) } rb.mu.RUnlock() rb.mu.Lock() defer rb.mu.Unlock() delete(*(rb.batch), c.Hash()) }
// Enequeue adds a Chunk to the queue of Chunks waiting to be Put into vbs' backing ChunkStore. The instance keeps an internal buffer of Chunks, spilling to the ChunkStore when the buffer is full. If an attempt to Put Chunks fails, this method returns the BackpressureError from the underlying ChunkStore. func (vbs *ValidatingBatchingSink) Enqueue(c chunks.Chunk) chunks.BackpressureError { h := c.Hash() if vbs.vs.isPresent(h) { return nil } v := DecodeFromBytes(c.Data(), vbs.vs, vbs.tc) d.PanicIfTrue(getHash(v) != h, "Invalid hash found") vbs.vs.ensureChunksInCache(v) vbs.vs.set(h, hintedChunk{v.Type(), h}) vbs.batch[vbs.count] = c vbs.count++ if vbs.count == batchSize { return vbs.Flush() } return nil }
// Enequeue adds a Chunk to the queue of Chunks waiting to be Put into vbs' backing ChunkStore. The instance keeps an internal buffer of Chunks, spilling to the ChunkStore when the buffer is full. If an attempt to Put Chunks fails, this method returns the BackpressureError from the underlying ChunkStore. func (vbs *ValidatingBatchingSink) Enqueue(c chunks.Chunk) chunks.BackpressureError { h := c.Hash() if vbs.vs.isPresent(h) { return nil } v := DecodeValue(c, vbs.vs) d.Exp.True(EnsureHash(&hash.Hash{}, v) == h) vbs.vs.ensureChunksInCache(v) vbs.vs.set(h, hintedChunk{v.Type(), h}) vbs.batch[vbs.count] = c vbs.count++ if vbs.count == batchSize { return vbs.Flush() } return nil }
// Insert can be called from any goroutine to store c in the cache. If c is successfully added to the cache, Insert returns true. If c was already in the cache, Insert returns false. func (p *orderedChunkCache) Insert(c chunks.Chunk, refHeight uint64) bool { hash := c.Hash() dbKey, present := func() (dbKey []byte, present bool) { p.mu.Lock() defer p.mu.Unlock() if _, present = p.chunkIndex[hash]; !present { dbKey = toDbKey(refHeight, c.Hash()) p.chunkIndex[hash] = dbKey } return }() if !present { buf := &bytes.Buffer{} gw := snappy.NewBufferedWriter(buf) chunks.Serialize(c, gw) gw.Close() d.Chk.NoError(p.orderedChunks.Put(dbKey, buf.Bytes(), nil)) return true } return false }
// DecodeValue decodes a value from a chunk source. It is an error to provide an empty chunk. func DecodeValue(c chunks.Chunk, vr ValueReader) Value { d.Chk.False(c.IsEmpty()) v := DecodeFromBytes(c.Data(), vr, staticTypeCache) if cacher, ok := v.(hashCacher); ok { assignHash(cacher, c.Hash()) } return v }
// DecodeValue decodes a value from a chunk source. It is an error to provide an empty chunk. func DecodeValue(c chunks.Chunk, vr ValueReader) Value { d.Chk.False(c.IsEmpty()) data := c.Data() dec := newValueDecoder(&binaryNomsReader{data, 0}, vr) v := dec.readValue() if cacher, ok := v.(hashCacher); ok { assignHash(cacher, c.Hash()) } return v }
// Insert can be called from any goroutine to store c in the cache. If c is successfully added to the cache, Insert returns true. If c was already in the cache, Insert returns false. func (p *orderedChunkCache) Insert(c chunks.Chunk, refHeight uint64) bool { hash := c.Hash() dbKey, present := func() (dbKey []byte, present bool) { p.mu.Lock() defer p.mu.Unlock() if _, present = p.chunkIndex[hash]; !present { dbKey = toDbKey(refHeight, c.Hash()) p.chunkIndex[hash] = dbKey } return }() if !present { compressed := snappy.Encode(nil, c.Data()) d.Chk.NoError(p.orderedChunks.Put(dbKey, compressed, nil)) return true } return false }