Exemplo n.º 1
0
func indexRangesToPBRanges(ranges []*plan.IndexRange, fieldTypes []*types.FieldType) ([]*tipb.KeyRange, error) {
	keyRanges := make([]*tipb.KeyRange, 0, len(ranges))
	for _, ran := range ranges {
		err := convertIndexRangeTypes(ran, fieldTypes)
		if err != nil {
			return nil, errors.Trace(err)
		}
		low, err := codec.EncodeKey(nil, ran.LowVal...)
		if err != nil {
			return nil, errors.Trace(err)
		}
		if ran.LowExclude {
			low = []byte(kv.Key(low).PrefixNext())
		}
		high, err := codec.EncodeKey(nil, ran.HighVal...)
		if err != nil {
			return nil, errors.Trace(err)
		}
		if !ran.HighExclude {
			high = []byte(kv.Key(high).PrefixNext())
		}
		keyRanges = append(keyRanges, &tipb.KeyRange{Low: low, High: high})
	}
	return keyRanges, nil
}
Exemplo n.º 2
0
// GenIndexKey generates storage key for index values. Returned distinct indicates whether the
// indexed values should be distinct in storage (i.e. whether handle is encoded in the key).
func (c *kvIndex) GenIndexKey(indexedValues []interface{}, h int64) (key []byte, distinct bool, err error) {
	if c.unique {
		// See: https://dev.mysql.com/doc/refman/5.7/en/create-index.html
		// A UNIQUE index creates a constraint such that all values in the index must be distinct.
		// An error occurs if you try to add a new row with a key value that matches an existing row.
		// For all engines, a UNIQUE index permits multiple NULL values for columns that can contain NULL.
		distinct = true
		for _, cv := range indexedValues {
			if cv == nil {
				distinct = false
				break
			}
		}
	}

	key = append(key, c.prefix...)
	if distinct {
		key, err = codec.EncodeKey(key, indexedValues...)
	} else {
		key, err = codec.EncodeKey(key, append(indexedValues, h)...)
	}
	if err != nil {
		return nil, false, errors.Trace(err)
	}
	return
}
Exemplo n.º 3
0
func indexRangesToKVRanges(tid, idxID int64, ranges []*plan.IndexRange, fieldTypes []*types.FieldType) ([]kv.KeyRange, error) {
	krs := make([]kv.KeyRange, 0, len(ranges))
	for _, ran := range ranges {
		err := convertIndexRangeTypes(ran, fieldTypes)
		if err != nil {
			return nil, errors.Trace(err)
		}

		low, err := codec.EncodeKey(nil, ran.LowVal...)
		if err != nil {
			return nil, errors.Trace(err)
		}
		if ran.LowExclude {
			low = []byte(kv.Key(low).PrefixNext())
		}
		high, err := codec.EncodeKey(nil, ran.HighVal...)
		if err != nil {
			return nil, errors.Trace(err)
		}
		if !ran.HighExclude {
			high = []byte(kv.Key(high).PrefixNext())
		}
		startKey := tablecodec.EncodeIndexSeekKey(tid, idxID, low)
		endKey := tablecodec.EncodeIndexSeekKey(tid, idxID, high)
		krs = append(krs, kv.KeyRange{StartKey: startKey, EndKey: endKey})
	}
	return krs, nil
}
Exemplo n.º 4
0
func (ps *perfSchema) initRecords(tbName string, records [][]interface{}) error {
	lastLsn := atomic.AddUint64(ps.lsns[tbName], uint64(len(records)))

	batch := pool.Get().(*leveldb.Batch)
	defer func() {
		batch.Reset()
		pool.Put(batch)
	}()

	for i, rec := range records {
		lsn := lastLsn - uint64(len(records)) + uint64(i)
		rawKey := []interface{}{uint64(lsn)}
		key, err := codec.EncodeKey(nil, rawKey...)
		if err != nil {
			return errors.Trace(err)
		}
		val, err := codec.EncodeValue(nil, rec...)
		if err != nil {
			return errors.Trace(err)
		}
		batch.Put(key, val)
	}

	err := ps.stores[tbName].Write(batch, nil)
	return errors.Trace(err)
}
Exemplo n.º 5
0
func setRow(txn kv.Transaction, handle int64, tbl *simpleTableInfo, gen genValueFunc) error {
	rowKey := tablecodec.EncodeRowKey(tbl.tID, codec.EncodeInt(nil, handle))
	columnValues := gen(handle, tbl)
	value, err := tablecodec.EncodeRow(columnValues, tbl.cIDs)
	if err != nil {
		return errors.Trace(err)
	}
	err = txn.Set(rowKey, value)
	if err != nil {
		return errors.Trace(err)
	}
	for i, idxCol := range tbl.indices {
		idxVal := columnValues[idxCol]
		encoded, err := codec.EncodeKey(nil, idxVal, types.NewDatum(handle))
		if err != nil {
			return errors.Trace(err)
		}
		idxKey := tablecodec.EncodeIndexSeekKey(tbl.tID, tbl.iIDs[i], encoded)
		err = txn.Set(idxKey, []byte{0})
		if err != nil {
			return errors.Trace(err)
		}
	}
	return nil
}
Exemplo n.º 6
0
func (s *HBasePutTestSuit) TestGetPut(c *C) {
	log.Info(codec.EncodeKey(170))

	p := NewPut([]byte("1_\xff\xff"))
	p2 := NewPut([]byte("1_\xff\xfe"))
	p3 := NewPut([]byte("1_\xff\xee"))
	p.AddValue([]byte("cf"), []byte("q"), []byte("!"))
	p2.AddValue([]byte("cf"), []byte("q"), []byte("!"))
	p3.AddValue([]byte("cf"), []byte("q"), []byte("!"))

	cli, err := NewClient(getTestZkHosts(), "/hbase")
	c.Assert(err, Equals, nil)

	cli.Put("t2", p)
	cli.Put("t2", p2)
	cli.Put("t2", p3)

	scan := NewScan([]byte("t2"), 100, cli)
	scan.StartRow = []byte("1_")
	for {
		r := scan.Next()
		if r == nil {
			break
		}
		log.Info(r.SortedColumns[0].Row)
	}

	cli.Delete("t2", NewDelete([]byte("1_\xff\xff")))
	cli.Delete("t2", NewDelete([]byte("1_\xff\xfe")))
	cli.Delete("t2", NewDelete([]byte("1_\xff\xee")))

}
Exemplo n.º 7
0
// GenIndexKey generates storage key for index values. Returned distinct indicates whether the
// indexed values should be distinct in storage (i.e. whether handle is encoded in the key).
func (c *index) GenIndexKey(indexedValues []types.Datum, h int64) (key []byte, distinct bool, err error) {
	if c.idxInfo.Unique {
		// See https://dev.mysql.com/doc/refman/5.7/en/create-index.html
		// A UNIQUE index creates a constraint such that all values in the index must be distinct.
		// An error occurs if you try to add a new row with a key value that matches an existing row.
		// For all engines, a UNIQUE index permits multiple NULL values for columns that can contain NULL.
		distinct = true
		for _, cv := range indexedValues {
			if cv.IsNull() {
				distinct = false
				break
			}
		}
	}

	// For string columns, indexes can be created that use only the leading part of column values,
	// using col_name(length) syntax to specify an index prefix length.
	for i := 0; i < len(indexedValues); i++ {
		v := &indexedValues[i]
		if v.Kind() == types.KindString || v.Kind() == types.KindBytes {
			ic := c.idxInfo.Columns[i]
			if ic.Length != types.UnspecifiedLength && len(v.GetBytes()) > ic.Length {
				// truncate value and limit its length
				v.SetBytes(v.GetBytes()[:ic.Length])
			}
		}
	}

	key = append(key, []byte(c.prefix)...)
	if distinct {
		key, err = codec.EncodeKey(key, indexedValues...)
	} else {
		key, err = codec.EncodeKey(key, append(indexedValues, types.NewDatum(h))...)
	}
	if err != nil {
		return nil, false, errors.Trace(err)
	}
	return
}
Exemplo n.º 8
0
func (t *Table) addDeleteBinlog(ctx context.Context, h int64, r []types.Datum) error {
	mutation := t.getMutation(ctx)
	if t.meta.PKIsHandle {
		mutation.DeletedIds = append(mutation.DeletedIds, h)
		mutation.Sequence = append(mutation.Sequence, binlog.MutationType_DeleteID)
		return nil
	}

	var primaryIdx *model.IndexInfo
	for _, idx := range t.meta.Indices {
		if idx.Primary {
			primaryIdx = idx
			break
		}
	}
	var data []byte
	var err error
	if primaryIdx != nil {
		indexedValues := make([]types.Datum, len(primaryIdx.Columns))
		for i := range indexedValues {
			indexedValues[i] = r[primaryIdx.Columns[i].Offset]
		}
		data, err = codec.EncodeKey(nil, indexedValues...)
		if err != nil {
			return errors.Trace(err)
		}
		mutation.DeletedPks = append(mutation.DeletedPks, data)
		mutation.Sequence = append(mutation.Sequence, binlog.MutationType_DeletePK)
		return nil
	}
	colIDs := make([]int64, len(t.Cols()))
	for i, col := range t.Cols() {
		colIDs[i] = col.ID
	}
	data, err = tablecodec.EncodeRow(r, colIDs)
	if err != nil {
		return errors.Trace(err)
	}
	mutation.DeletedRows = append(mutation.DeletedRows, data)
	mutation.Sequence = append(mutation.Sequence, binlog.MutationType_DeleteRow)
	return nil
}
Exemplo n.º 9
0
func (s *testTableCodecSuite) TestCutKey(c *C) {
	colIDs := []int64{1, 2, 3}
	values := []types.Datum{types.NewIntDatum(1), types.NewBytesDatum([]byte("abc")), types.NewFloat64Datum(5.5)}
	handle := types.NewIntDatum(100)
	values = append(values, handle)
	encodedValue, err := codec.EncodeKey(nil, values...)
	c.Assert(err, IsNil)
	tableID := int64(4)
	indexID := int64(5)
	indexKey := EncodeIndexSeekKey(tableID, indexID, encodedValue)
	valuesMap, handleBytes, err := CutIndexKey(indexKey, colIDs)
	c.Assert(err, IsNil)
	for i, colID := range colIDs {
		valueBytes := valuesMap[colID]
		var val types.Datum
		_, val, _ = codec.DecodeOne(valueBytes)
		c.Assert(val, DeepEquals, values[i])
	}
	_, handleVal, _ := codec.DecodeOne(handleBytes)
	c.Assert(handleVal, DeepEquals, types.NewIntDatum(100))
}