Пример #1
0
func (bhcs *httpBatchStore) SchedulePut(c chunks.Chunk, refHeight uint64, hints types.Hints) {
	if !bhcs.unwrittenPuts.Insert(c, refHeight) {
		return
	}

	bhcs.requestWg.Add(1)
	bhcs.writeQueue <- writeRequest{c.Hash(), hints, false}
}
Пример #2
0
// SchedulePut simply calls Put on the underlying ChunkStore, and ignores hints.
func (lbs *localBatchStore) SchedulePut(c chunks.Chunk, refHeight uint64, hints types.Hints) {
	lbs.once.Do(lbs.expectVersion)

	lbs.unwrittenPuts.Insert(c, refHeight)
	lbs.mu.Lock()
	defer lbs.mu.Unlock()
	lbs.hashes.Insert(c.Hash())
	lbs.AddHints(hints)
}
Пример #3
0
// DecodeValue decodes a value from a chunk source. It is an error to provide an empty chunk.
func DecodeValue(c chunks.Chunk, vr ValueReader) Value {
	d.Chk.False(c.IsEmpty())
	v := DecodeFromBytes(c.Data(), vr, staticTypeCache)
	if cacher, ok := v.(hashCacher); ok {
		assignHash(cacher, c.Hash())
	}

	return v
}
Пример #4
0
func (rb *readBatchChunkSink) Put(c chunks.Chunk) {
	rb.mu.RLock()
	for _, or := range (*(rb.batch))[c.Hash()] {
		or.Satisfy(c)
	}
	rb.mu.RUnlock()

	rb.mu.Lock()
	defer rb.mu.Unlock()
	delete(*(rb.batch), c.Hash())
}
Пример #5
0
// DecodeValue decodes a value from a chunk source. It is an error to provide an empty chunk.
func DecodeValue(c chunks.Chunk, vr ValueReader) Value {
	d.Chk.False(c.IsEmpty())
	data := c.Data()
	dec := newValueDecoder(&binaryNomsReader{data, 0}, vr)
	v := dec.readValue()

	if cacher, ok := v.(hashCacher); ok {
		assignHash(cacher, c.Hash())
	}

	return v
}
Пример #6
0
// Enequeue adds a Chunk to the queue of Chunks waiting to be Put into vbs' backing ChunkStore. The instance keeps an internal buffer of Chunks, spilling to the ChunkStore when the buffer is full. If an attempt to Put Chunks fails, this method returns the BackpressureError from the underlying ChunkStore.
func (vbs *ValidatingBatchingSink) Enqueue(c chunks.Chunk) chunks.BackpressureError {
	h := c.Hash()
	if vbs.vs.isPresent(h) {
		return nil
	}
	v := DecodeFromBytes(c.Data(), vbs.vs, vbs.tc)
	d.PanicIfTrue(getHash(v) != h, "Invalid hash found")
	vbs.vs.ensureChunksInCache(v)
	vbs.vs.set(h, hintedChunk{v.Type(), h})

	vbs.batch[vbs.count] = c
	vbs.count++
	if vbs.count == batchSize {
		return vbs.Flush()
	}
	return nil
}
Пример #7
0
// Enequeue adds a Chunk to the queue of Chunks waiting to be Put into vbs' backing ChunkStore. The instance keeps an internal buffer of Chunks, spilling to the ChunkStore when the buffer is full. If an attempt to Put Chunks fails, this method returns the BackpressureError from the underlying ChunkStore.
func (vbs *ValidatingBatchingSink) Enqueue(c chunks.Chunk) chunks.BackpressureError {
	h := c.Hash()
	if vbs.vs.isPresent(h) {
		return nil
	}
	v := DecodeValue(c, vbs.vs)
	d.Exp.True(EnsureHash(&hash.Hash{}, v) == h)
	vbs.vs.ensureChunksInCache(v)
	vbs.vs.set(h, hintedChunk{v.Type(), h})

	vbs.batch[vbs.count] = c
	vbs.count++
	if vbs.count == batchSize {
		return vbs.Flush()
	}
	return nil
}
Пример #8
0
// Insert can be called from any goroutine to store c in the cache. If c is successfully added to the cache, Insert returns true. If c was already in the cache, Insert returns false.
func (p *orderedChunkCache) Insert(c chunks.Chunk, refHeight uint64) bool {
	hash := c.Hash()
	dbKey, present := func() (dbKey []byte, present bool) {
		p.mu.Lock()
		defer p.mu.Unlock()
		if _, present = p.chunkIndex[hash]; !present {
			dbKey = toDbKey(refHeight, c.Hash())
			p.chunkIndex[hash] = dbKey
		}
		return
	}()

	if !present {
		compressed := snappy.Encode(nil, c.Data())
		d.Chk.NoError(p.orderedChunks.Put(dbKey, compressed, nil))
		return true
	}
	return false
}
Пример #9
0
// Insert can be called from any goroutine to store c in the cache. If c is successfully added to the cache, Insert returns true. If c was already in the cache, Insert returns false.
func (p *orderedChunkCache) Insert(c chunks.Chunk, refHeight uint64) bool {
	hash := c.Hash()
	dbKey, present := func() (dbKey []byte, present bool) {
		p.mu.Lock()
		defer p.mu.Unlock()
		if _, present = p.chunkIndex[hash]; !present {
			dbKey = toDbKey(refHeight, c.Hash())
			p.chunkIndex[hash] = dbKey
		}
		return
	}()

	if !present {
		buf := &bytes.Buffer{}
		gw := snappy.NewBufferedWriter(buf)
		chunks.Serialize(c, gw)
		gw.Close()
		d.Chk.NoError(p.orderedChunks.Put(dbKey, buf.Bytes(), nil))
		return true
	}
	return false
}