Exemplo n.º 1
0
// newTestRangeSet creates a new range set that has the count number of ranges.
func newTestRangeSet(count int, t *testing.T) *testRangeSet {
	rs := &testRangeSet{replicasByKey: btree.New(64 /* degree */)}
	for i := 0; i < count; i++ {
		desc := &roachpb.RangeDescriptor{
			RangeID:  roachpb.RangeID(i),
			StartKey: roachpb.RKey(fmt.Sprintf("%03d", i)),
			EndKey:   roachpb.RKey(fmt.Sprintf("%03d", i+1)),
		}
		// Initialize the range stat so the scanner can use it.
		rng := &Replica{
			RangeID: desc.RangeID,
		}
		rng.mu.state.Stats = enginepb.MVCCStats{
			KeyBytes:  1,
			ValBytes:  2,
			KeyCount:  1,
			LiveCount: 1,
		}

		if err := rng.setDesc(desc); err != nil {
			t.Fatal(err)
		}
		if exRngItem := rs.replicasByKey.ReplaceOrInsert(rng); exRngItem != nil {
			t.Fatalf("failed to insert range %s", rng)
		}
	}
	return rs
}
Exemplo n.º 2
0
// Clear clears the cache and resets the low-water mark.
func (tc *timestampCache) Clear(lowWater hlc.Timestamp) {
	tc.requests = btree.New(btreeDegree)
	tc.rCache.Clear()
	tc.wCache.Clear()
	tc.lowWater = lowWater
	tc.latest = tc.lowWater
}
Exemplo n.º 3
0
// rebuildIndex does the work of regenerating the index
// with the given keys.
func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
	tree := btree.New(2)
	for key := range keys {
		tree.ReplaceOrInsert(btreeString{s: key, l: less})
	}
	return tree
}
Exemplo n.º 4
0
// LoadCountries() loads the countries (as defined in the local
// countries constant) in a memory BTree
func LoadCountries() (*Countries, error) {

	r := csv.NewReader(strings.NewReader(countries_list))
	r.FieldsPerRecord = -1
	r.Comma = ';'

	t := btree.New(4)

	for {

		values, err := r.Read()
		if err == io.EOF {
			break
		}
		if err != nil {
			log_geolocip.Err(fmt.Sprintf("Countries error %v", err))
			break
		}

		// fmt.Println(len(values), values)

		// Use only lines with 2 values
		if len(values) == 2 {
			t.ReplaceOrInsert(Country{values[1], values[0]})
		}
	}

	return (*Countries)(t), nil
}
func BenchmarkSortedInsert_ReplaceOrInsert(b *testing.B) {
	for i := 0; i < b.N; i++ {
		tree := btree.New(btreeDegree)
		for i := 0; i < len(fixture.SortedTestData); i++ {
			tree.ReplaceOrInsert(googItem(fixture.SortedTestData[i]))
		}
	}
}
Exemplo n.º 6
0
func (t *MockTable) getOrCreateRow(rowKey *keyPart) *btree.BTree {
	row := t.rows[rowKey.RowKey()]
	if row == nil {
		row = btree.New(2)
		t.rows[rowKey.RowKey()] = row
	}
	return row
}
Exemplo n.º 7
0
func (t *MockTable) getOrCreateRow(rowKey key) *btree.BTree {
	t.mtx.Lock()
	defer t.mtx.Unlock()
	row := t.rows[rowKey.RowKey()]
	if row == nil {
		row = btree.New(2)
		t.rows[rowKey.RowKey()] = row
	}
	return row
}
Exemplo n.º 8
0
func newEngine() *Engine {
	result := &Engine{
		incomingCh:  make(chan string, 10000),
		awaitCh:     make(chan bool),
		suggestions: btree.New(100),
	}
	go result.loop()
	return result

}
Exemplo n.º 9
0
// Read a MaxMind GeoIP Blocks file in memory, as a
// BTree of Blocks structures.
func LoadBlocksFile(filename string) (*Blocks, error) {

	file, err := os.Open(filename)
	if err != nil {
		log_geolocip.Err(fmt.Sprintf("Blocks error open file: %v", err))
		return nil, err
	}
	defer file.Close()

	t := btree.New(4)

	r := csv.NewReader(file)
	r.FieldsPerRecord = -1

	for {

		values, err := r.Read()
		if err == io.EOF {
			break
		}
		if err != nil {
			log_geolocip.Err(fmt.Sprintf("Blocks error reading file: %v", err))
			break
		}

		// Use only lines with 3 values
		if len(values) == 3 {

			low_ip, err := strconv.ParseUint(values[0], 10, 32)
			if err != nil {
				// log.Println("Line ignored, cannot read LowIP", err)
				continue
			}
			high_ip, err := strconv.ParseUint(values[1], 10, 32)
			if err != nil {
				// log.Println("Line ignored, cannot read HighIP", err)
				continue
			}
			loc_id, err := strconv.ParseUint(values[2], 10, 32)
			if err != nil {
				// log.Println("Line ignored, cannot read LocId", err)
				continue
			}

			var block = Block{uint32(low_ip), uint32(high_ip), uint32(loc_id)}
			// fmt.Println(block)
			t.ReplaceOrInsert(block)

		}
	}

	return (*Blocks)(t), nil
}
Exemplo n.º 10
0
// newTimestampCache returns a new timestamp cache with supplied
// hybrid clock.
func newTimestampCache(clock *hlc.Clock) *timestampCache {
	tc := &timestampCache{
		rCache:                cache.NewIntervalCache(cache.Config{Policy: cache.CacheFIFO}),
		wCache:                cache.NewIntervalCache(cache.Config{Policy: cache.CacheFIFO}),
		requests:              btree.New(btreeDegree),
		evictionSizeThreshold: defaultEvictionSizeThreshold,
	}
	tc.Clear(clock)
	tc.rCache.Config.ShouldEvict = tc.shouldEvict
	tc.wCache.Config.ShouldEvict = tc.shouldEvict
	return tc
}
Exemplo n.º 11
0
// MergeInto merges all entries from this timestamp cache into the
// dest timestamp cache. The clear parameter, if true, copies the
// values of lowWater and latest and clears the destination cache
// before merging in the source.
func (tc *timestampCache) MergeInto(dest *timestampCache, clear bool) {
	if clear {
		dest.rCache.Clear()
		dest.wCache.Clear()
		dest.lowWater = tc.lowWater
		dest.latest = tc.latest
		dest.requests = btree.New(btreeDegree)
		dest.reqIDAlloc = 0

		// Because we just cleared the destination cache, we can directly
		// insert entries from this cache.
		hardMerge := func(srcCache, destCache *cache.IntervalCache) {
			srcCache.Do(func(k, v interface{}) {
				// Cache entries are mutable (see Add), so we give each cache its own
				// unique copy.
				entry := makeCacheEntry(*k.(*cache.IntervalKey), *v.(*cacheValue))
				destCache.AddEntry(entry)
			})
		}
		hardMerge(tc.rCache, dest.rCache)
		hardMerge(tc.wCache, dest.wCache)
	} else {
		dest.lowWater.Forward(tc.lowWater)
		dest.latest.Forward(tc.latest)

		// The cache was not cleared before, so we can't just insert entries because
		// intervals may need to be adjusted or removed to maintain the non-overlapping
		// guarantee.
		softMerge := func(srcCache *cache.IntervalCache, readTSCache bool) {
			srcCache.Do(func(k, v interface{}) {
				key, val := *k.(*cache.IntervalKey), *v.(*cacheValue)
				dest.add(roachpb.Key(key.Start), roachpb.Key(key.End), val.timestamp, val.txnID, readTSCache)
			})
		}
		softMerge(tc.rCache, true)
		softMerge(tc.wCache, false)
	}

	// Copy the requests.
	tc.requests.Ascend(func(i btree.Item) bool {
		req := *(i.(*cacheRequest))
		dest.reqIDAlloc++
		req.uniqueID = dest.reqIDAlloc
		dest.requests.ReplaceOrInsert(&req)
		dest.reqSpans += req.numSpans()
		return true
	})
}
func BenchmarkIterate(b *testing.B) {
	tree := btree.New(btreeDegree)
	for i := 0; i < len(fixture.TestData); i++ {
		tree.ReplaceOrInsert(googItem(fixture.TestData[i]))
	}
	b.ResetTimer()

	for i := 0; i < b.N; i++ {

		tree.Ascend(func(i btree.Item) bool {
			_ = i.(googItem).Key
			_ = i.(googItem).Value
			return true
		})
	}
}
Exemplo n.º 13
0
func NewStaticDataSource(name string, indexedCol int, data [][]driver.Value, cols []string) *StaticDataSource {

	sourceSchema := datasource.NewSourceSchema(name, sourceType)
	tbl := datasource.NewTable(name, sourceSchema)
	sourceSchema.AddTable(tbl)
	schema := datasource.NewSchema(name)
	schema.AddSourceSchema(sourceSchema)

	m := StaticDataSource{indexCol: indexedCol}
	m.tbl = tbl
	m.bt = btree.New(32)
	m.Schema = schema
	m.tbl.SetColumns(cols)
	for _, row := range data {
		m.Put(nil, nil, row)
	}
	return &m
}
Exemplo n.º 14
0
func syncGo() {
	seq_next := int64(1)
	dseq := map[string]int64{}
	dlog := btree.New(8)
	cur_wchan := make(chan struct{}, 0)
	for {
		req := <-syncChan
		switch req := req.(type) {
		case syncNotifyReq:
			item_seq := dseq[req.key]
			if item_seq > 0 {
				i := dlog.Get(kdVersion{seq: item_seq})
				cur := i.(kdVersion)
				if cur.data == req.data {
					continue
				}
				dlog.Delete(kdVersion{seq: item_seq})
			}
			dseq[req.key] = seq_next
			dlog.ReplaceOrInsert(kdVersion{seq: seq_next, key: req.key, data: req.data})
			seq_next++
			close(cur_wchan)
			cur_wchan = make(chan struct{}, 0)
		case syncRefreshReq:
			update := map[string]string{}
			dlog.AscendGreaterOrEqual(kdVersion{seq: req.last_seq}, func(i btree.Item) bool {
				kd := i.(kdVersion)
				update[kd.key] = kd.data
				return true
			})
			req.rsp_ch <- syncRefreshRsp{
				seq:    seq_next,
				update: update,
				wchan:  cur_wchan,
			}
		}
	}
}
Exemplo n.º 15
0
		} else {
			u.addr.ip = make(net.IP, 4)
		}
		r.Read(u.addr.ip)
		bodylen -= uint16(len(u.addr.ip))
		binary.Read(r, binary.BigEndian, &u.addr.port)
		bodylen -= 2
		u.content = make([]byte, int(bodylen))
		r.Read(u.content)
	}
	return nil
}

var udpSessionTable = make(map[uint16]*udpSession)
var udpSessionIdSet = btree.New(4)
var cidTable = make(map[uint32]uint16)
var udpSessionMutex sync.Mutex

func closeAllUDPSession() {
	udpSessionMutex.Lock()
	defer udpSessionMutex.Unlock()
	for id, _ := range udpSessionTable {
		delete(udpSessionTable, id)
		//closeProxySession(session.session.id)
	}
	cidTable = make(map[uint32]uint16)
}

func removeUdpSession(id *udpSessionId) {
	s, exist := udpSessionTable[id.id]
Exemplo n.º 16
0
func (r *MemoryBTreeDB) FlushAll() (int64, error) {
	r.data = btree.New(8)
	return 0, nil
}
Exemplo n.º 17
0
func newTreeIndex() index {
	return &treeIndex{
		tree: btree.New(32),
	}
}
Exemplo n.º 18
0
// NewSelection creates a new empty Selection
func NewSelection() *Selection {
	return &Selection{btree.New(32)}
}
Exemplo n.º 19
0
func (s *Selection) Reset() {
	s.mutex.Lock()
	defer s.mutex.Unlock()
	s.tree = btree.New(32)
}
Exemplo n.º 20
0
func NewGoogleBTree(lesser func(l, r interface{}) bool) *GoogleBTree {
	return &GoogleBTree{
		bt:     btree.New(32),
		lesser: lesser,
	}
}
Exemplo n.º 21
0
func TestIndexCompact(t *testing.T) {
	maxRev := int64(20)
	tests := []struct {
		key     []byte
		remove  bool
		rev     revision
		created revision
		ver     int64
	}{
		{[]byte("foo"), false, revision{main: 1}, revision{main: 1}, 1},
		{[]byte("foo1"), false, revision{main: 2}, revision{main: 2}, 1},
		{[]byte("foo2"), false, revision{main: 3}, revision{main: 3}, 1},
		{[]byte("foo2"), false, revision{main: 4}, revision{main: 3}, 2},
		{[]byte("foo"), false, revision{main: 5}, revision{main: 1}, 2},
		{[]byte("foo1"), false, revision{main: 6}, revision{main: 2}, 2},
		{[]byte("foo1"), true, revision{main: 7}, revision{}, 0},
		{[]byte("foo2"), true, revision{main: 8}, revision{}, 0},
		{[]byte("foo"), true, revision{main: 9}, revision{}, 0},
		{[]byte("foo"), false, revision{10, 0}, revision{10, 0}, 1},
		{[]byte("foo1"), false, revision{10, 1}, revision{10, 1}, 1},
	}

	// Continuous Compact
	ti := newTreeIndex()
	for _, tt := range tests {
		if tt.remove {
			ti.Tombstone(tt.key, tt.rev)
		} else {
			ti.Put(tt.key, tt.rev)
		}
	}
	for i := int64(1); i < maxRev; i++ {
		am := ti.Compact(i)

		wti := &treeIndex{tree: btree.New(32)}
		for _, tt := range tests {
			if _, ok := am[tt.rev]; ok || tt.rev.GreaterThan(revision{main: i}) {
				if tt.remove {
					wti.Tombstone(tt.key, tt.rev)
				} else {
					restore(wti, tt.key, tt.created, tt.rev, tt.ver)
				}
			}
		}
		if !ti.Equal(wti) {
			t.Errorf("#%d: not equal ti", i)
		}
	}

	// Once Compact
	for i := int64(1); i < maxRev; i++ {
		ti := newTreeIndex()
		for _, tt := range tests {
			if tt.remove {
				ti.Tombstone(tt.key, tt.rev)
			} else {
				ti.Put(tt.key, tt.rev)
			}
		}
		am := ti.Compact(i)

		wti := &treeIndex{tree: btree.New(32)}
		for _, tt := range tests {
			if _, ok := am[tt.rev]; ok || tt.rev.GreaterThan(revision{main: i}) {
				if tt.remove {
					wti.Tombstone(tt.key, tt.rev)
				} else {
					restore(wti, tt.key, tt.created, tt.rev, tt.ver)
				}
			}
		}
		if !ti.Equal(wti) {
			t.Errorf("#%d: not equal ti", i)
		}
	}
}