示例#1
0
文件: wal.go 项目: bwolf/influxdb
// cursor will combine the in memory cache and flush cache (if a flush is currently happening) to give a single ordered cursor for the key
func (p *Partition) cursor(series string, fields []string, dec *tsdb.FieldCodec, ascending bool) *cursor {
	p.mu.Lock()
	defer p.mu.Unlock()

	entry := p.cache[series]
	if entry == nil {
		entry = &cacheEntry{}
	}

	// if we're in the middle of a flush, combine the previous cache
	// with this one for the cursor
	if p.flushCache != nil {
		if fc, ok := p.flushCache[series]; ok {
			c := make([][]byte, len(fc), len(fc)+len(entry.points))
			copy(c, fc)
			c = append(c, entry.points...)

			dedupe := tsdb.DedupeEntries(c)
			return newCursor(dedupe, fields, dec, ascending)
		}
	}

	if entry.isDirtySort {
		entry.points = tsdb.DedupeEntries(entry.points)
		entry.isDirtySort = false
	}

	// Build a copy so modifications to the partition don't change the result set
	a := make([][]byte, len(entry.points))
	copy(a, entry.points)

	return newCursor(a, fields, dec, ascending)
}
示例#2
0
// cursor will combine the in memory cache and flush cache (if a flush is currently happening) to give a single ordered cursor for the key
func (p *Partition) cursor(key string) *cursor {
	p.mu.Lock()
	defer p.mu.Unlock()

	entry := p.cache[key]
	if entry == nil {
		entry = &cacheEntry{}
	}

	// if we're in the middle of a flush, combine the previous cache
	// with this one for the cursor
	if p.flushCache != nil {
		if fc, ok := p.flushCache[key]; ok {
			c := make([][]byte, len(fc), len(fc)+len(entry.points))
			copy(c, fc)
			c = append(c, entry.points...)

			dedupe := tsdb.DedupeEntries(c)
			return &cursor{cache: dedupe}
		}
	}

	if entry.isDirtySort {
		entry.points = tsdb.DedupeEntries(entry.points)
		entry.isDirtySort = false
	}

	// build a copy so modifications to the partition don't change the result set
	a := make([][]byte, len(entry.points))
	copy(a, entry.points)
	return &cursor{cache: a}
}
示例#3
0
// cursor will combine the in memory cache and flush cache (if a flush is currently happening) to give a single ordered cursor for the key
func (p *Partition) cursor(key string) *cursor {
	p.mu.Lock()
	defer p.mu.Unlock()

	entry := p.cache[key]
	if entry == nil {
		return &cursor{}
	}

	// if we're in the middle of a flush, combine the previous cache
	// with this one for the cursor
	if p.flushCache != nil {
		if fc, ok := p.flushCache[key]; ok {
			c := make([][]byte, len(fc), len(fc)+len(entry.points))
			copy(c, fc)
			c = append(c, entry.points...)

			return &cursor{cache: tsdb.DedupeEntries(c)}
		}
	}

	if entry.isDirtySort {
		entry.points = tsdb.DedupeEntries(entry.points)
		entry.isDirtySort = false
	}

	return &cursor{cache: entry.points}
}
示例#4
0
// MergePoints returns a map of all points merged together by key.
// Later points will overwrite earlier ones.
func MergePoints(a []Points) Points {
	// Combine all points into one set.
	m := make(Points)
	for _, set := range a {
		for key, values := range set {
			m[key] = append(m[key], values...)
		}
	}

	// Dedupe points.
	for key, values := range m {
		m[key] = tsdb.DedupeEntries(values)
	}

	return m
}
示例#5
0
文件: bz1.go 项目: rmillner/influxdb
// writeIndex writes a set of points for a single key.
func (e *Engine) writeIndex(tx *bolt.Tx, key string, a [][]byte) error {
	// Ignore if there are no points.
	if len(a) == 0 {
		return nil
	}
	e.statMap.Add(statPointsWrite, int64(len(a)))

	// Create or retrieve series bucket.
	bkt, err := tx.Bucket([]byte("points")).CreateBucketIfNotExists([]byte(key))
	if err != nil {
		return fmt.Errorf("create series bucket: %s", err)
	}
	c := bkt.Cursor()

	// Ensure the slice is sorted before retrieving the time range.
	a = tsdb.DedupeEntries(a)
	e.statMap.Add(statPointsWriteDedupe, int64(len(a)))

	// Convert the raw time and byte slices to entries with lengths
	for i, p := range a {
		timestamp := int64(btou64(p[0:8]))
		a[i] = MarshalEntry(timestamp, p[8:])
	}

	// Determine time range of new data.
	tmin, tmax := int64(btou64(a[0][0:8])), int64(btou64(a[len(a)-1][0:8]))

	// If tmin is after the last block then append new blocks.
	//
	// This is the optimized fast path. Otherwise we need to merge the points
	// with existing blocks on disk and rewrite all the blocks for that range.
	if k, v := c.Last(); k == nil {
		bkt.FillPercent = 1.0
		if err := e.writeBlocks(bkt, a); err != nil {
			return fmt.Errorf("new blocks: %s", err)
		}
		return nil
	} else {
		// Determine uncompressed block size.
		sz, err := snappy.DecodedLen(v[8:])
		if err != nil {
			return fmt.Errorf("snappy decoded len: %s", err)
		}

		// Append new blocks if our time range is past the last on-disk time
		// and if our previous block was at least the minimum block size.
		if int64(btou64(v[0:8])) < tmin && sz >= e.BlockSize {
			bkt.FillPercent = 1.0
			if err := e.writeBlocks(bkt, a); err != nil {
				return fmt.Errorf("append blocks: %s", err)
			}
			return nil
		}

		// Otherwise fallthrough to slower insert mode.
		e.statMap.Add(statSlowInsert, 1)
	}

	// Generate map of inserted keys.
	m := make(map[int64]struct{})
	for _, b := range a {
		m[int64(btou64(b[0:8]))] = struct{}{}
	}

	// If time range overlaps existing blocks then unpack full range and reinsert.
	var existing [][]byte
	for k, v := c.First(); k != nil; k, v = c.Next() {
		// Determine block range.
		bmin, bmax := int64(btou64(k)), int64(btou64(v[0:8]))

		// Skip over all blocks before the time range.
		// Exit once we reach a block that is beyond our time range.
		if bmax < tmin {
			continue
		} else if bmin > tmax {
			break
		}

		// Decode block.
		buf, err := snappy.Decode(nil, v[8:])
		if err != nil {
			return fmt.Errorf("decode block: %s", err)
		}

		// Copy out any entries that aren't being overwritten.
		for _, entry := range SplitEntries(buf) {
			if _, ok := m[int64(btou64(entry[0:8]))]; !ok {
				existing = append(existing, entry)
			}
		}

		// Delete block in database.
		c.Delete()
	}

	// Merge entries before rewriting.
	a = append(existing, a...)
	sort.Sort(tsdb.ByteSlices(a))

	// Rewrite points to new blocks.
	if err := e.writeBlocks(bkt, a); err != nil {
		return fmt.Errorf("rewrite blocks: %s", err)
	}

	return nil
}