func newLocalBatchStore(cs chunks.ChunkStore) *localBatchStore { return &localBatchStore{ cs: cs, unwrittenPuts: newOrderedChunkCache(), vbs: types.NewValidatingBatchingSink(cs, types.NewTypeCache()), hints: types.Hints{}, hashes: hash.HashSet{}, mu: &sync.Mutex{}, } }
func handleWriteValue(w http.ResponseWriter, req *http.Request, ps URLParams, cs chunks.ChunkStore) { d.PanicIfTrue(req.Method != "POST", "Expected post method.") reader := bodyReader(req) defer func() { // Ensure all data on reader is consumed io.Copy(ioutil.Discard, reader) reader.Close() }() tc := types.NewTypeCache() vbs := types.NewValidatingBatchingSink(cs, tc) vbs.Prepare(deserializeHints(reader)) chunkChan := make(chan *chunks.Chunk, 16) go chunks.DeserializeToChan(reader, chunkChan) var bpe chunks.BackpressureError for c := range chunkChan { if bpe == nil { bpe = vbs.Enqueue(*c) } else { bpe = append(bpe, c.Hash()) } // If a previous Enqueue() errored, we still need to drain chunkChan // TODO: what about having DeserializeToChan take a 'done' channel to stop it? } if bpe == nil { bpe = vbs.Flush() } if bpe != nil { w.WriteHeader(httpStatusTooManyRequests) w.Header().Add("Content-Type", "application/octet-stream") writer := respWriter(req, w) defer writer.Close() serializeHashes(writer, bpe.AsHashes()) return } w.WriteHeader(http.StatusCreated) }