func TestBuildWriteValueRequest(t *testing.T) {
	assert := assert.New(t)
	input1, input2 := "abc", "def"
	chnx := []chunks.Chunk{
		chunks.NewChunk([]byte(input1)),
		chunks.NewChunk([]byte(input2)),
	}

	hints := map[hash.Hash]struct{}{
		hash.Parse("sha1-0000000000000000000000000000000000000002"): struct{}{},
		hash.Parse("sha1-0000000000000000000000000000000000000003"): struct{}{},
	}
	compressed := buildWriteValueRequest(serializeChunks(chnx, assert), hints)
	gr := snappy.NewReader(compressed)

	count := 0
	for hint := range deserializeHints(gr) {
		count++
		_, present := hints[hint]
		assert.True(present)
	}
	assert.Equal(len(hints), count)

	chunkChan := make(chan *chunks.Chunk, 16)
	go chunks.DeserializeToChan(gr, chunkChan)
	for c := range chunkChan {
		assert.Equal(chnx[0].Hash(), c.Hash())
		chnx = chnx[1:]
	}
	assert.Empty(chnx)
}
func TestHandleGetRefs(t *testing.T) {
	assert := assert.New(t)
	cs := chunks.NewTestStore()
	input1, input2 := "abc", "def"
	chnx := []chunks.Chunk{
		chunks.NewChunk([]byte(input1)),
		chunks.NewChunk([]byte(input2)),
	}
	err := cs.PutMany(chnx)
	assert.NoError(err)

	body := strings.NewReader(fmt.Sprintf("ref=%s&ref=%s", chnx[0].Hash(), chnx[1].Hash()))

	w := httptest.NewRecorder()
	HandleGetRefs(w,
		&http.Request{Body: ioutil.NopCloser(body), Method: "POST", Header: http.Header{
			"Content-Type": {"application/x-www-form-urlencoded"},
		}},
		params{},
		cs,
	)

	if assert.Equal(http.StatusOK, w.Code, "Handler error:\n%s", string(w.Body.Bytes())) {
		chunkChan := make(chan *chunks.Chunk)
		go chunks.DeserializeToChan(w.Body, chunkChan)
		for c := range chunkChan {
			assert.Equal(chnx[0].Hash(), c.Hash())
			chnx = chnx[1:]
		}
		assert.Empty(chnx)
	}
}
Exemplo n.º 3
0
func (suite *LevelDBPutCacheSuite) extractChunks(hashes hashSet) <-chan *chunks.Chunk {
	buf := &bytes.Buffer{}
	err := suite.cache.ExtractChunks(hashes, buf)
	suite.NoError(err)

	chunkChan := make(chan *chunks.Chunk)
	go chunks.DeserializeToChan(snappy.NewReader(buf), chunkChan)
	return chunkChan
}
Exemplo n.º 4
0
// Get can be called from any goroutine to retrieve the chunk referenced by hash. If the chunk is not present, Get returns the empty Chunk.
func (p *orderedChunkCache) Get(hash hash.Hash) chunks.Chunk {
	// Don't use defer p.mu.RUnlock() here, because I want reading from orderedChunks NOT to be guarded by the lock. LevelDB handles its own goroutine-safety.
	p.mu.RLock()
	dbKey, ok := p.chunkIndex[hash]
	p.mu.RUnlock()

	if !ok {
		return chunks.EmptyChunk
	}
	data, err := p.orderedChunks.Get(dbKey, nil)
	d.Chk.NoError(err)
	reader := snappy.NewReader(bytes.NewReader(data))
	chunkChan := make(chan *chunks.Chunk)
	go chunks.DeserializeToChan(reader, chunkChan)
	return *(<-chunkChan)
}
Exemplo n.º 5
0
func HandleWriteValue(w http.ResponseWriter, req *http.Request, ps URLParams, cs chunks.ChunkStore) {
	hashes := hash.HashSlice{}
	err := d.Try(func() {
		d.Exp.Equal("POST", req.Method)

		reader := bodyReader(req)
		defer func() {
			// Ensure all data on reader is consumed
			io.Copy(ioutil.Discard, reader)
			reader.Close()
		}()
		vbs := types.NewValidatingBatchingSink(cs)
		vbs.Prepare(deserializeHints(reader))

		chunkChan := make(chan *chunks.Chunk, 16)
		go chunks.DeserializeToChan(reader, chunkChan)
		var bpe chunks.BackpressureError
		for c := range chunkChan {
			if bpe == nil {
				bpe = vbs.Enqueue(*c)
			} else {
				bpe = append(bpe, c.Hash())
			}
			// If a previous Enqueue() errored, we still need to drain chunkChan
			// TODO: what about having DeserializeToChan take a 'done' channel to stop it?
			hashes = append(hashes, c.Hash())
		}
		if bpe == nil {
			bpe = vbs.Flush()
		}
		if bpe != nil {
			w.WriteHeader(httpStatusTooManyRequests)
			w.Header().Add("Content-Type", "application/octet-stream")
			writer := respWriter(req, w)
			defer writer.Close()
			serializeHashes(writer, bpe.AsHashes())
			return
		}
		w.WriteHeader(http.StatusCreated)
	})

	if err != nil {
		http.Error(w, fmt.Sprintf("Error: %v\nChunks in payload: %v", err, hashes), http.StatusBadRequest)
		return
	}
}
func handleWriteValue(w http.ResponseWriter, req *http.Request, ps URLParams, cs chunks.ChunkStore) {
	d.PanicIfTrue(req.Method != "POST", "Expected post method.")

	reader := bodyReader(req)
	defer func() {
		// Ensure all data on reader is consumed
		io.Copy(ioutil.Discard, reader)
		reader.Close()
	}()
	tc := types.NewTypeCache()
	vbs := types.NewValidatingBatchingSink(cs, tc)
	vbs.Prepare(deserializeHints(reader))

	chunkChan := make(chan *chunks.Chunk, 16)
	go chunks.DeserializeToChan(reader, chunkChan)
	var bpe chunks.BackpressureError
	for c := range chunkChan {
		if bpe == nil {
			bpe = vbs.Enqueue(*c)
		} else {
			bpe = append(bpe, c.Hash())
		}
		// If a previous Enqueue() errored, we still need to drain chunkChan
		// TODO: what about having DeserializeToChan take a 'done' channel to stop it?
	}
	if bpe == nil {
		bpe = vbs.Flush()
	}
	if bpe != nil {
		w.WriteHeader(httpStatusTooManyRequests)
		w.Header().Add("Content-Type", "application/octet-stream")
		writer := respWriter(req, w)
		defer writer.Close()
		serializeHashes(writer, bpe.AsHashes())
		return
	}
	w.WriteHeader(http.StatusCreated)
}