// loadChunks loads a group of chunks of a timeseries by their index. The chunk // with the earliest time will have index 0, the following ones will have // incrementally larger indexes. The indexOffset denotes the offset to be added to // each index in indexes. It is the caller's responsibility to not persist or // drop anything for the same fingerprint concurrently. func (p *persistence) loadChunks(fp model.Fingerprint, indexes []int, indexOffset int) ([]chunk.Chunk, error) { f, err := p.openChunkFileForReading(fp) if err != nil { return nil, err } defer f.Close() chunks := make([]chunk.Chunk, 0, len(indexes)) buf := p.bufPool.Get().([]byte) defer func() { // buf may change below. An unwrapped 'defer p.bufPool.Put(buf)' // would only put back the original buf. p.bufPool.Put(buf) }() for i := 0; i < len(indexes); i++ { // This loads chunks in batches. A batch is a streak of // consecutive chunks, read from disk in one go. batchSize := 1 if _, err := f.Seek(offsetForChunkIndex(indexes[i]+indexOffset), os.SEEK_SET); err != nil { return nil, err } for ; batchSize < chunkMaxBatchSize && i+1 < len(indexes) && indexes[i]+1 == indexes[i+1]; i, batchSize = i+1, batchSize+1 { } readSize := batchSize * chunkLenWithHeader if cap(buf) < readSize { buf = make([]byte, readSize) } buf = buf[:readSize] if _, err := io.ReadFull(f, buf); err != nil { return nil, err } for c := 0; c < batchSize; c++ { chunk, err := chunk.NewForEncoding(chunk.Encoding(buf[c*chunkLenWithHeader+chunkHeaderTypeOffset])) if err != nil { return nil, err } if err := chunk.UnmarshalFromBuf(buf[c*chunkLenWithHeader+chunkHeaderLen:]); err != nil { return nil, err } chunks = append(chunks, chunk) } } chunk.Ops.WithLabelValues(chunk.Load).Add(float64(len(chunks))) atomic.AddInt64(&chunk.NumMemChunks, int64(len(chunks))) return chunks, nil }
// scan works like bufio.Scanner.Scan. func (hs *headsScanner) scan() bool { if hs.seriesCurrent == hs.seriesTotal || hs.err != nil { return false } var ( seriesFlags byte fpAsInt uint64 metric codable.Metric persistWatermark int64 modTimeNano int64 modTime time.Time chunkDescsOffset int64 savedFirstTime int64 numChunkDescs int64 firstTime int64 lastTime int64 encoding byte ch chunk.Chunk lastTimeHead model.Time ) if seriesFlags, hs.err = hs.r.ReadByte(); hs.err != nil { return false } headChunkPersisted := seriesFlags&flagHeadChunkPersisted != 0 if fpAsInt, hs.err = codable.DecodeUint64(hs.r); hs.err != nil { return false } hs.fp = model.Fingerprint(fpAsInt) if hs.err = metric.UnmarshalFromReader(hs.r); hs.err != nil { return false } if hs.version != headsFormatLegacyVersion { // persistWatermark only present in v2. persistWatermark, hs.err = binary.ReadVarint(hs.r) if hs.err != nil { return false } modTimeNano, hs.err = binary.ReadVarint(hs.r) if hs.err != nil { return false } if modTimeNano != -1 { modTime = time.Unix(0, modTimeNano) } } if chunkDescsOffset, hs.err = binary.ReadVarint(hs.r); hs.err != nil { return false } if savedFirstTime, hs.err = binary.ReadVarint(hs.r); hs.err != nil { return false } if numChunkDescs, hs.err = binary.ReadVarint(hs.r); hs.err != nil { return false } chunkDescs := make([]*chunk.Desc, numChunkDescs) if hs.version == headsFormatLegacyVersion { if headChunkPersisted { persistWatermark = numChunkDescs } else { persistWatermark = numChunkDescs - 1 } } headChunkClosed := true // Initial assumption. for i := int64(0); i < numChunkDescs; i++ { if i < persistWatermark { if firstTime, hs.err = binary.ReadVarint(hs.r); hs.err != nil { return false } if lastTime, hs.err = binary.ReadVarint(hs.r); hs.err != nil { return false } chunkDescs[i] = &chunk.Desc{ ChunkFirstTime: model.Time(firstTime), ChunkLastTime: model.Time(lastTime), } chunk.NumMemDescs.Inc() } else { // Non-persisted chunk. // If there are non-persisted chunks at all, we consider // the head chunk not to be closed yet. headChunkClosed = false if encoding, hs.err = hs.r.ReadByte(); hs.err != nil { return false } if ch, hs.err = chunk.NewForEncoding(chunk.Encoding(encoding)); hs.err != nil { return false } if hs.err = ch.Unmarshal(hs.r); hs.err != nil { return false } cd := chunk.NewDesc(ch, ch.FirstTime()) if i < numChunkDescs-1 { // This is NOT the head chunk. So it's a chunk // to be persisted, and we need to populate lastTime. hs.chunksToPersistTotal++ cd.MaybePopulateLastTime() } chunkDescs[i] = cd } } if lastTimeHead, hs.err = chunkDescs[len(chunkDescs)-1].LastTime(); hs.err != nil { return false } hs.series = &memorySeries{ metric: model.Metric(metric), chunkDescs: chunkDescs, persistWatermark: int(persistWatermark), modTime: modTime, chunkDescsOffset: int(chunkDescsOffset), savedFirstTime: model.Time(savedFirstTime), lastTime: lastTimeHead, headChunkClosed: headChunkClosed, } hs.seriesCurrent++ return true }