コード例 #1
0
ファイル: wal.go プロジェクト: KoeSystems/influxdb
// seriesToFlush will clear the cache of series over the give threshold and return
// them in a new map along with their combined size
func (p *Partition) seriesToFlush(readySeriesSize int) (map[string][][]byte, int) {
	seriesToFlush := make(map[string][][]byte)
	size := 0
	for k, c := range p.cache {
		// if the series is over the threshold, save it in the map to flush later
		if c.size >= readySeriesSize {
			size += c.size
			seriesToFlush[k] = c.points

			// always hand the index data that is sorted
			if c.isDirtySort {
				sort.Sort(tsdb.ByteSlices(seriesToFlush[k]))
			}

			delete(p.cache, k)
		}
	}

	return seriesToFlush, size
}
コード例 #2
0
ファイル: bz1.go プロジェクト: rmillner/influxdb
// writeIndex writes a set of points for a single key.
func (e *Engine) writeIndex(tx *bolt.Tx, key string, a [][]byte) error {
	// Ignore if there are no points.
	if len(a) == 0 {
		return nil
	}
	e.statMap.Add(statPointsWrite, int64(len(a)))

	// Create or retrieve series bucket.
	bkt, err := tx.Bucket([]byte("points")).CreateBucketIfNotExists([]byte(key))
	if err != nil {
		return fmt.Errorf("create series bucket: %s", err)
	}
	c := bkt.Cursor()

	// Ensure the slice is sorted before retrieving the time range.
	a = tsdb.DedupeEntries(a)
	e.statMap.Add(statPointsWriteDedupe, int64(len(a)))

	// Convert the raw time and byte slices to entries with lengths
	for i, p := range a {
		timestamp := int64(btou64(p[0:8]))
		a[i] = MarshalEntry(timestamp, p[8:])
	}

	// Determine time range of new data.
	tmin, tmax := int64(btou64(a[0][0:8])), int64(btou64(a[len(a)-1][0:8]))

	// If tmin is after the last block then append new blocks.
	//
	// This is the optimized fast path. Otherwise we need to merge the points
	// with existing blocks on disk and rewrite all the blocks for that range.
	if k, v := c.Last(); k == nil {
		bkt.FillPercent = 1.0
		if err := e.writeBlocks(bkt, a); err != nil {
			return fmt.Errorf("new blocks: %s", err)
		}
		return nil
	} else {
		// Determine uncompressed block size.
		sz, err := snappy.DecodedLen(v[8:])
		if err != nil {
			return fmt.Errorf("snappy decoded len: %s", err)
		}

		// Append new blocks if our time range is past the last on-disk time
		// and if our previous block was at least the minimum block size.
		if int64(btou64(v[0:8])) < tmin && sz >= e.BlockSize {
			bkt.FillPercent = 1.0
			if err := e.writeBlocks(bkt, a); err != nil {
				return fmt.Errorf("append blocks: %s", err)
			}
			return nil
		}

		// Otherwise fallthrough to slower insert mode.
		e.statMap.Add(statSlowInsert, 1)
	}

	// Generate map of inserted keys.
	m := make(map[int64]struct{})
	for _, b := range a {
		m[int64(btou64(b[0:8]))] = struct{}{}
	}

	// If time range overlaps existing blocks then unpack full range and reinsert.
	var existing [][]byte
	for k, v := c.First(); k != nil; k, v = c.Next() {
		// Determine block range.
		bmin, bmax := int64(btou64(k)), int64(btou64(v[0:8]))

		// Skip over all blocks before the time range.
		// Exit once we reach a block that is beyond our time range.
		if bmax < tmin {
			continue
		} else if bmin > tmax {
			break
		}

		// Decode block.
		buf, err := snappy.Decode(nil, v[8:])
		if err != nil {
			return fmt.Errorf("decode block: %s", err)
		}

		// Copy out any entries that aren't being overwritten.
		for _, entry := range SplitEntries(buf) {
			if _, ok := m[int64(btou64(entry[0:8]))]; !ok {
				existing = append(existing, entry)
			}
		}

		// Delete block in database.
		c.Delete()
	}

	// Merge entries before rewriting.
	a = append(existing, a...)
	sort.Sort(tsdb.ByteSlices(a))

	// Rewrite points to new blocks.
	if err := e.writeBlocks(bkt, a); err != nil {
		return fmt.Errorf("rewrite blocks: %s", err)
	}

	return nil
}