// TestBatchScanMaxWithDeleted verifies that if a deletion // in the updates map shadows an entry from the engine, the // max on a scan is still reached. func TestBatchScanMaxWithDeleted(t *testing.T) { defer leaktest.AfterTest(t) e := NewInMem(proto.Attributes{}, 1<<20) defer e.Close() b := e.NewBatch() defer b.Close() // Write two values. if err := e.Put(proto.EncodedKey("a"), []byte("value1")); err != nil { t.Fatal(err) } if err := e.Put(proto.EncodedKey("b"), []byte("value2")); err != nil { t.Fatal(err) } // Now, delete "a" in batch. if err := b.Clear(proto.EncodedKey("a")); err != nil { t.Fatal(err) } // A scan with max=1 should scan "b". kvs, err := Scan(b, proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 1) if err != nil { t.Fatal(err) } if len(kvs) != 1 || !bytes.Equal(kvs[0].Key, []byte("b")) { t.Errorf("expected scan of \"b\"; got %v", kvs) } }
// TestBatchConcurrency verifies operation of batch when the // underlying engine has concurrent modifications to overlapping // keys. This should never happen with the way Cockroach uses // batches, but worth verifying. func TestBatchConcurrency(t *testing.T) { defer leaktest.AfterTest(t) e := NewInMem(proto.Attributes{}, 1<<20) defer e.Close() b := e.NewBatch() defer b.Close() // Write a merge to the batch. if err := b.Merge(proto.EncodedKey("a"), appender("bar")); err != nil { t.Fatal(err) } val, err := b.Get(proto.EncodedKey("a")) if err != nil { t.Fatal(err) } if !compareMergedValues(t, val, appender("bar")) { t.Error("mismatch of \"a\"") } // Write an engine value. if err := e.Put(proto.EncodedKey("a"), appender("foo")); err != nil { t.Fatal(err) } // Now, read again and verify that the merge happens on top of the mod. val, err = b.Get(proto.EncodedKey("a")) if err != nil { t.Fatal(err) } if !bytes.Equal(val, appender("foobar")) { t.Error("mismatch of \"a\"") } }
// TestEngineMerge tests that the passing through of engine merge operations // to the goMerge function works as expected. The semantics are tested more // exhaustively in the merge tests themselves. func TestEngineMerge(t *testing.T) { runWithAllEngines(func(engine Engine, t *testing.T) { testcases := []struct { testKey proto.EncodedKey merges [][]byte expected []byte }{ { proto.EncodedKey("haste not in life"), [][]byte{ appender("x"), appender("y"), appender("z"), }, appender("xyz"), }, { proto.EncodedKey("timeseriesmerged"), [][]byte{ timeSeriesInt(testtime, 1000, []tsIntSample{ {1, 1, 5, 5, 5}, }...), timeSeriesInt(testtime, 1000, []tsIntSample{ {2, 1, 5, 5, 5}, {1, 2, 10, 7, 3}, }...), timeSeriesInt(testtime, 1000, []tsIntSample{ {10, 1, 5, 5, 5}, }...), timeSeriesInt(testtime, 1000, []tsIntSample{ {5, 1, 5, 5, 5}, {3, 1, 5, 5, 5}, }...), }, timeSeriesInt(testtime, 1000, []tsIntSample{ {1, 3, 15, 7, 3}, {2, 1, 5, 5, 5}, {3, 1, 5, 5, 5}, {5, 1, 5, 5, 5}, {10, 1, 5, 5, 5}, }...), }, } for _, tc := range testcases { for i, update := range tc.merges { if err := engine.Merge(tc.testKey, update); err != nil { t.Fatalf("%d: %v", i, err) } } result, _ := engine.Get(tc.testKey) var resultV, expectedV proto.MVCCMetadata gogoproto.Unmarshal(result, &resultV) gogoproto.Unmarshal(tc.expected, &expectedV) if !reflect.DeepEqual(resultV, expectedV) { t.Errorf("unexpected append-merge result: %v != %v", resultV, expectedV) } } }, t) }
func TestEngineScan1(t *testing.T) { defer leaktest.AfterTest(t) runWithAllEngines(func(engine Engine, t *testing.T) { testCases := []struct { key, value []byte }{ {[]byte("dog"), []byte("woof")}, {[]byte("cat"), []byte("meow")}, {[]byte("server"), []byte("42")}, {[]byte("french"), []byte("Allô?")}, {[]byte("german"), []byte("hallo")}, {[]byte("chinese"), []byte("你好")}, } keyMap := map[string][]byte{} for _, c := range testCases { if err := engine.Put(c.key, c.value); err != nil { t.Errorf("could not put key %q: %v", c.key, err) } keyMap[string(c.key)] = c.value } sortedKeys := make([]string, len(testCases)) for i, t := range testCases { sortedKeys[i] = string(t.key) } sort.Strings(sortedKeys) keyvals, err := Scan(engine, []byte("chinese"), []byte("german"), 0) if err != nil { t.Fatalf("could not run scan: %v", err) } ensureRangeEqual(t, sortedKeys[1:4], keyMap, keyvals) // Check an end of range which does not equal an existing key. keyvals, err = Scan(engine, []byte("chinese"), []byte("german1"), 0) if err != nil { t.Fatalf("could not run scan: %v", err) } ensureRangeEqual(t, sortedKeys[1:5], keyMap, keyvals) keyvals, err = Scan(engine, []byte("chinese"), []byte("german"), 2) if err != nil { t.Fatalf("could not run scan: %v", err) } ensureRangeEqual(t, sortedKeys[1:3], keyMap, keyvals) // Should return all key/value pairs in lexicographic order. // Note that []byte("") is the lowest key possible and is // a special case in engine.scan, that's why we test it here. startKeys := []proto.EncodedKey{proto.EncodedKey("cat"), proto.EncodedKey("")} for _, startKey := range startKeys { keyvals, err := Scan(engine, startKey, proto.EncodedKey(proto.KeyMax), 0) if err != nil { t.Fatalf("could not run scan: %v", err) } ensureRangeEqual(t, sortedKeys, keyMap, keyvals) } }, t) }
// Commit writes all pending updates to the underlying engine in // an atomic write batch. func (b *Batch) Commit() error { if b.committed { panic("this batch was already committed") } var batch []interface{} b.updates.DoRange(func(n llrb.Comparable) (done bool) { batch = append(batch, n) return false }, proto.RawKeyValue{Key: proto.EncodedKey(KeyMin)}, proto.RawKeyValue{Key: proto.EncodedKey(KeyMax)}) b.committed = true return b.engine.WriteBatch(batch) }
func TestBatchMerge(t *testing.T) { defer leaktest.AfterTest(t) stopper := stop.NewStopper() defer stopper.Stop() e := NewInMem(proto.Attributes{}, 1<<20, stopper) b := e.NewBatch() defer b.Close() // Write batch put, delete & merge. if err := b.Put(proto.EncodedKey("a"), appender("a-value")); err != nil { t.Fatal(err) } if err := b.Clear(proto.EncodedKey("b")); err != nil { t.Fatal(err) } if err := b.Merge(proto.EncodedKey("c"), appender("c-value")); err != nil { t.Fatal(err) } // Now, merge to all three keys. if err := b.Merge(proto.EncodedKey("a"), appender("append")); err != nil { t.Fatal(err) } if err := b.Merge(proto.EncodedKey("b"), appender("append")); err != nil { t.Fatal(err) } if err := b.Merge(proto.EncodedKey("c"), appender("append")); err != nil { t.Fatal(err) } // Verify values. val, err := b.Get(proto.EncodedKey("a")) if err != nil { t.Fatal(err) } if !compareMergedValues(t, val, appender("a-valueappend")) { t.Error("mismatch of \"a\"") } val, err = b.Get(proto.EncodedKey("b")) if err != nil { t.Fatal(err) } if !compareMergedValues(t, val, appender("append")) { t.Error("mismatch of \"b\"") } val, err = b.Get(proto.EncodedKey("c")) if err != nil { t.Fatal(err) } if !compareMergedValues(t, val, appender("c-valueappend")) { t.Error("mismatch of \"c\"") } }
func TestSnapshot(t *testing.T) { defer leaktest.AfterTest(t) runWithAllEngines(func(engine Engine, t *testing.T) { key := []byte("a") val1 := []byte("1") if err := engine.Put(key, val1); err != nil { t.Fatal(err) } val, _ := engine.Get(key) if !bytes.Equal(val, val1) { t.Fatalf("the value %s in get result does not match the value %s in request", val, val1) } snap := engine.NewSnapshot() defer snap.Close() val2 := []byte("2") if err := engine.Put(key, val2); err != nil { t.Fatal(err) } val, _ = engine.Get(key) valSnapshot, error := snap.Get(key) if error != nil { t.Fatalf("error : %s", error) } if !bytes.Equal(val, val2) { t.Fatalf("the value %s in get result does not match the value %s in request", val, val2) } if !bytes.Equal(valSnapshot, val1) { t.Fatalf("the value %s in get result does not match the value %s in request", valSnapshot, val1) } keyvals, _ := Scan(engine, key, proto.EncodedKey(proto.KeyMax), 0) keyvalsSnapshot, error := Scan(snap, key, proto.EncodedKey(proto.KeyMax), 0) if error != nil { t.Fatalf("error : %s", error) } if len(keyvals) != 1 || !bytes.Equal(keyvals[0].Value, val2) { t.Fatalf("the value %s in get result does not match the value %s in request", keyvals[0].Value, val2) } if len(keyvalsSnapshot) != 1 || !bytes.Equal(keyvalsSnapshot[0].Value, val1) { t.Fatalf("the value %s in get result does not match the value %s in request", keyvalsSnapshot[0].Value, val1) } }, t) }
func TestEngineBatch(t *testing.T) { runWithAllEngines(func(engine Engine, t *testing.T) { numShuffles := 100 key := proto.EncodedKey("a") // Those are randomized below. batch := []interface{}{ BatchPut{proto.RawKeyValue{Key: key, Value: appender("~ockroachDB")}}, BatchPut{proto.RawKeyValue{Key: key, Value: appender("C~ckroachDB")}}, BatchPut{proto.RawKeyValue{Key: key, Value: appender("Co~kroachDB")}}, BatchPut{proto.RawKeyValue{Key: key, Value: appender("Coc~roachDB")}}, BatchPut{proto.RawKeyValue{Key: key, Value: appender("C**k~oachDB")}}, BatchPut{proto.RawKeyValue{Key: key, Value: appender("Cockr~achDB")}}, BatchPut{proto.RawKeyValue{Key: key, Value: appender("Cockro~chDB")}}, BatchPut{proto.RawKeyValue{Key: key, Value: appender("Cockroa~hDB")}}, BatchPut{proto.RawKeyValue{Key: key, Value: appender("Cockroac~DB")}}, BatchPut{proto.RawKeyValue{Key: key, Value: appender("Cockroach~B")}}, BatchPut{proto.RawKeyValue{Key: key, Value: appender("CockroachD~")}}, BatchDelete{proto.RawKeyValue{Key: key}}, BatchMerge{proto.RawKeyValue{Key: key, Value: appender("C")}}, BatchMerge{proto.RawKeyValue{Key: key, Value: appender(" o")}}, BatchMerge{proto.RawKeyValue{Key: key, Value: appender(" c")}}, BatchMerge{proto.RawKeyValue{Key: key, Value: appender(" k")}}, BatchMerge{proto.RawKeyValue{Key: key, Value: appender("r")}}, BatchMerge{proto.RawKeyValue{Key: key, Value: appender(" o")}}, BatchMerge{proto.RawKeyValue{Key: key, Value: appender(" a")}}, BatchMerge{proto.RawKeyValue{Key: key, Value: appender(" c")}}, BatchMerge{proto.RawKeyValue{Key: key, Value: appender("h")}}, BatchMerge{proto.RawKeyValue{Key: key, Value: appender(" D")}}, BatchMerge{proto.RawKeyValue{Key: key, Value: appender(" B")}}, } for i := 0; i < numShuffles; i++ { // In each run, create an array of shuffled operations. shuffledIndices := rand.Perm(len(batch)) currentBatch := make([]interface{}, len(batch)) for k := range currentBatch { currentBatch[k] = batch[shuffledIndices[k]] } // Reset the key engine.Clear(key) // Run it once with individual operations and remember the result. for i, op := range currentBatch { if err := engine.WriteBatch([]interface{}{op}); err != nil { t.Errorf("batch test: %d: op %v: %v", i, op, err) continue } } correctValue, _ := engine.Get(key) // Run the whole thing as a batch and compare. if err := engine.WriteBatch(currentBatch); err != nil { t.Errorf("batch test: %d: %v", i, err) continue } actualValue, _ := engine.Get(key) if !bytes.Equal(actualValue, correctValue) { t.Errorf("batch test: %d: result inconsistent", i) } } }, t) }
// TestBootstrapOfNonEmptyStore verifies bootstrap failure if engine // is not empty. func TestBootstrapOfNonEmptyStore(t *testing.T) { defer leaktest.AfterTest(t) eng := engine.NewInMem(proto.Attributes{}, 1<<20) // Put some random garbage into the engine. if err := eng.Put(proto.EncodedKey("foo"), []byte("bar")); err != nil { t.Errorf("failure putting key foo into engine: %s", err) } ctx := TestStoreContext manual := hlc.NewManualClock(0) ctx.Clock = hlc.NewClock(manual.UnixNano) ctx.Transport = multiraft.NewLocalRPCTransport() stopper := stop.NewStopper() stopper.AddCloser(ctx.Transport) defer stopper.Stop() store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1}) // Can't init as haven't bootstrapped. if err := store.Start(stopper); err == nil { t.Error("expected failure init'ing un-bootstrapped store") } // Bootstrap should fail on non-empty engine. if err := store.Bootstrap(testIdent, stopper); err == nil { t.Error("expected bootstrap error on non-empty store") } }
// mergeUpdates combines the next key/value from the engine iterator // with all batch updates which preceed it. The final batch update // which might overlap the next key/value is merged. The start // parameter indicates the first possible key to merge from either // iterator. func (bi *batchIterator) mergeUpdates(start proto.EncodedKey) { // Use a for-loop because deleted entries might cause nothing // to be added to bi.pending; in this case, we loop to next key. for len(bi.pending) == 0 && bi.iter.Valid() { kv := proto.RawKeyValue{Key: bi.iter.Key(), Value: bi.iter.Value()} bi.iter.Next() // Get updates up to the engine iterator's current key. bi.getUpdates(start, kv.Key) // Possibly merge an update with engine iterator's current key. if val := bi.updates.Get(kv); val != nil { switch t := val.(type) { case BatchDelete: case BatchPut: bi.pending = append(bi.pending, t.RawKeyValue) case BatchMerge: mergedKV := proto.RawKeyValue{Key: t.Key} mergedKV.Value, bi.err = goMerge(kv.Value, t.Value) if bi.err == nil { bi.pending = append(bi.pending, mergedKV) } } } else { bi.pending = append(bi.pending, kv) } start = kv.Key.Next() } if len(bi.pending) == 0 { bi.getUpdates(start, proto.EncodedKey(KeyMax)) } }
// Merge stores the key / value as a BatchMerge in the updates tree. // If the updates map already contains a BatchPut, then this value is // merged with the Put and kept as a BatchPut. If the updates map // already contains a BatchMerge, then this value is merged with the // existing BatchMerge and kept as a BatchMerge. If the updates map // contains a BatchDelete, then this value is merged with a nil byte // slice and stored as a BatchPut. func (b *Batch) Merge(key proto.EncodedKey, value []byte) error { if len(key) == 0 { return emptyKeyError() } // Need to make a copy of key as the caller may reuse it. key = append(proto.EncodedKey(nil), key...) val := b.updates.Get(proto.RawKeyValue{Key: key}) if val != nil { switch t := val.(type) { case BatchDelete: mergedBytes, err := goMerge(nil, value) if err != nil { return err } b.updates.Insert(BatchPut{proto.RawKeyValue{Key: key, Value: mergedBytes}}) case BatchPut: mergedBytes, err := goMerge(t.Value, value) if err != nil { return err } b.updates.Insert(BatchPut{proto.RawKeyValue{Key: key, Value: mergedBytes}}) case BatchMerge: mergedBytes, err := goMerge(t.Value, value) if err != nil { return err } b.updates.Insert(BatchMerge{proto.RawKeyValue{Key: key, Value: mergedBytes}}) } } else { // Need to make a copy of value as the caller may reuse it. value = append([]byte(nil), value...) b.updates.Insert(BatchMerge{proto.RawKeyValue{Key: key, Value: value}}) } return nil }
func TestBatchProto(t *testing.T) { defer leaktest.AfterTest(t) stopper := stop.NewStopper() defer stopper.Stop() e := NewInMem(proto.Attributes{}, 1<<20, stopper) b := e.NewBatch() defer b.Close() kv := &proto.RawKeyValue{Key: proto.EncodedKey("a"), Value: []byte("value")} if _, _, err := PutProto(b, proto.EncodedKey("proto"), kv); err != nil { t.Fatal(err) } getKV := &proto.RawKeyValue{} ok, keySize, valSize, err := b.GetProto(proto.EncodedKey("proto"), getKV) if !ok || err != nil { t.Fatalf("expected GetProto to success ok=%t: %s", ok, err) } if keySize != 5 { t.Errorf("expected key size 5; got %d", keySize) } var data []byte if data, err = gogoproto.Marshal(kv); err != nil { t.Fatal(err) } if valSize != int64(len(data)) { t.Errorf("expected value size %d; got %d", len(data), valSize) } if !reflect.DeepEqual(getKV, kv) { t.Errorf("expected %v; got %v", kv, getKV) } // Before commit, proto will not be available via engine. if ok, _, _, err = e.GetProto(proto.EncodedKey("proto"), getKV); ok || err != nil { t.Fatalf("expected GetProto to fail ok=%t: %s", ok, err) } // Commit and verify the proto can be read directly from the engine. if err := b.Commit(); err != nil { t.Fatal(err) } if ok, _, _, err = e.GetProto(proto.EncodedKey("proto"), getKV); !ok || err != nil { t.Fatalf("expected GetProto to success ok=%t: %s", ok, err) } if !reflect.DeepEqual(getKV, kv) { t.Errorf("expected %v; got %v", kv, getKV) } }
// Clear stores the key as a BatchDelete in the updates tree. func (b *Batch) Clear(key proto.EncodedKey) error { if len(key) == 0 { return emptyKeyError() } // Need to make a copy of key as the caller may reuse it. key = append(proto.EncodedKey(nil), key...) b.updates.Insert(BatchDelete{proto.RawKeyValue{Key: key}}) return nil }
// Put stores the key / value as a BatchPut in the updates tree. func (b *Batch) Put(key proto.EncodedKey, value []byte) error { if len(key) == 0 { return emptyKeyError() } // Need to make a copy of key and value as the caller may reuse // them. key = append(proto.EncodedKey(nil), key...) value = append([]byte(nil), value...) b.updates.Insert(BatchPut{proto.RawKeyValue{Key: key, Value: value}}) return nil }
// TestBatchScanWithDelete verifies that a scan containing // a single deleted value returns nothing. func TestBatchScanWithDelete(t *testing.T) { defer leaktest.AfterTest(t) e := NewInMem(proto.Attributes{}, 1<<20) defer e.Close() b := e.NewBatch() defer b.Close() // Write initial value, then delete via batch. if err := e.Put(proto.EncodedKey("a"), []byte("value")); err != nil { t.Fatal(err) } if err := b.Clear(proto.EncodedKey("a")); err != nil { t.Fatal(err) } kvs, err := Scan(b, proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 0) if err != nil { t.Fatal(err) } if len(kvs) != 0 { t.Errorf("expected empty scan with batch-deleted value; got %v", kvs) } }
// TestEngineBatchCommit writes a batch containing 10K rows (all the // same key) and concurrently attempts to read the value in a tight // loop. The test verifies that either there is no value for the key // or it contains the final value, but never a value in between. func TestEngineBatchCommit(t *testing.T) { defer leaktest.AfterTest(t) numWrites := 10000 key := proto.EncodedKey("a") finalVal := []byte(strconv.Itoa(numWrites - 1)) runWithAllEngines(func(e Engine, t *testing.T) { // Start a concurrent read operation in a busy loop. readsBegun := make(chan struct{}) readsDone := make(chan struct{}) writesDone := make(chan struct{}) go func() { for i := 0; ; i++ { select { case <-writesDone: close(readsDone) return default: val, err := e.Get(key) if err != nil { t.Fatal(err) } if val != nil && bytes.Compare(val, finalVal) != 0 { close(readsDone) t.Fatalf("key value should be empty or %q; got %q", string(finalVal), string(val)) } if i == 0 { close(readsBegun) } } } }() // Wait until we've succeeded with first read. <-readsBegun // Create key/values and put them in a batch to engine. batch := e.NewBatch() defer batch.Close() for i := 0; i < numWrites; i++ { if err := batch.Put(key, []byte(strconv.Itoa(i))); err != nil { t.Fatal(err) } } if err := batch.Commit(); err != nil { t.Fatal(err) } close(writesDone) <-readsDone }, t) }
// TestEngineWriteBatch writes a batch containing 10K rows (all the // same key) and concurrently attempts to read the value in a tight // loop. The test verifies that either there is no value for the key // or it contains the final value, but never a value in between. func TestEngineWriteBatch(t *testing.T) { numWrites := 10000 key := proto.EncodedKey("a") finalVal := []byte(strconv.Itoa(numWrites - 1)) runWithAllEngines(func(e Engine, t *testing.T) { // Start a concurrent read operation in a busy loop. readsBegun := make(chan struct{}) readsDone := make(chan struct{}) writesDone := make(chan struct{}) go func() { for i := 0; ; i++ { select { case <-writesDone: close(readsDone) return default: val, err := e.Get(key) if err != nil { t.Fatal(err) } if val != nil && bytes.Compare(val, finalVal) != 0 { close(readsDone) t.Fatalf("key value should be empty or %q; got %q", string(finalVal), string(val)) } if i == 0 { close(readsBegun) } } } }() // Wait until we've succeeded with first read. <-readsBegun // Create key/values and put them in a batch to engine. puts := make([]interface{}, numWrites, numWrites) for i := 0; i < numWrites; i++ { puts[i] = BatchPut{proto.RawKeyValue{Key: key, Value: []byte(strconv.Itoa(i))}} } if err := e.WriteBatch(puts); err != nil { t.Fatal(err) } close(writesDone) <-readsDone }, t) }
func TestBatchGet(t *testing.T) { defer leaktest.AfterTest(t) stopper := stop.NewStopper() defer stopper.Stop() e := NewInMem(proto.Attributes{}, 1<<20, stopper) b := e.NewBatch() defer b.Close() // Write initial values, then write to batch. if err := e.Put(proto.EncodedKey("b"), []byte("value")); err != nil { t.Fatal(err) } if err := e.Put(proto.EncodedKey("c"), appender("foo")); err != nil { t.Fatal(err) } // Write batch values. if err := b.Put(proto.EncodedKey("a"), []byte("value")); err != nil { t.Fatal(err) } if err := b.Clear(proto.EncodedKey("b")); err != nil { t.Fatal(err) } if err := b.Merge(proto.EncodedKey("c"), appender("bar")); err != nil { t.Fatal(err) } expValues := []proto.RawKeyValue{ {Key: proto.EncodedKey("a"), Value: []byte("value")}, {Key: proto.EncodedKey("b"), Value: nil}, {Key: proto.EncodedKey("c"), Value: appender("foobar")}, } for i, expKV := range expValues { kv, err := b.Get(expKV.Key) if err != nil { t.Fatal(err) } if !bytes.Equal(kv, expKV.Value) { t.Errorf("%d: expected \"value\", got %q", i, kv) } } }
func (r *rocksDBIterator) SeekReverse(key []byte) { if len(key) == 0 { C.DBIterSeekToLast(r.iter) } else { C.DBIterSeek(r.iter, goToCSlice(key)) // Maybe the key has exceeded the last key in rocksdb if !r.Valid() { C.DBIterSeekToLast(r.iter) } if !r.Valid() { return } // Make sure the current key is <= the provided key. curKey := r.Key() if proto.EncodedKey(key).Less(curKey) { r.Prev() } } }
func TestEngineIncrement(t *testing.T) { defer leaktest.AfterTest(t) runWithAllEngines(func(engine Engine, t *testing.T) { // Start with increment of an empty key. val, err := Increment(engine, proto.EncodedKey("a"), 1) if err != nil { t.Fatal(err) } if val != 1 { t.Errorf("expected increment to be %d; got %d", 1, val) } // Increment same key by 1. if val, err = Increment(engine, proto.EncodedKey("a"), 1); err != nil { t.Fatal(err) } if val != 2 { t.Errorf("expected increment to be %d; got %d", 2, val) } // Increment same key by 2. if val, err = Increment(engine, proto.EncodedKey("a"), 2); err != nil { t.Fatal(err) } if val != 4 { t.Errorf("expected increment to be %d; got %d", 4, val) } // Decrement same key by -1. if val, err = Increment(engine, proto.EncodedKey("a"), -1); err != nil { t.Fatal(err) } if val != 3 { t.Errorf("expected increment to be %d; got %d", 3, val) } // Increment same key by max int64 value to cause overflow; should return error. if val, err = Increment(engine, proto.EncodedKey("a"), math.MaxInt64); err == nil { t.Error("expected an overflow error") } if val, err = Increment(engine, proto.EncodedKey("a"), 0); err != nil { t.Fatal(err) } if val != 3 { t.Errorf("expected increment to be %d; got %d", 3, val) } }, t) }
// TestSnapshotMethods verifies that snapshots allow only read-only // engine operations. func TestSnapshotMethods(t *testing.T) { defer leaktest.AfterTest(t) runWithAllEngines(func(engine Engine, t *testing.T) { keys := [][]byte{[]byte("a"), []byte("b")} vals := [][]byte{[]byte("1"), []byte("2")} for i := range keys { if err := engine.Put(keys[i], vals[i]); err != nil { t.Fatal(err) } } snap := engine.NewSnapshot() defer snap.Close() // Verify Attrs. var attrs proto.Attributes switch engine.(type) { case *InMem: attrs = inMemAttrs } if !reflect.DeepEqual(engine.Attrs(), attrs) { t.Errorf("attrs mismatch; expected %+v, got %+v", attrs, engine.Attrs()) } // Verify Put is error. if err := snap.Put([]byte("c"), []byte("3")); err == nil { t.Error("expected error on Put to snapshot") } // Verify Get. valSnapshot, err := snap.Get(keys[0]) if err != nil { t.Fatal(err) } if !bytes.Equal(vals[0], valSnapshot) { t.Fatalf("the value %s in get result does not match the value %s in snapshot", vals[0], valSnapshot) } // Verify Scan. keyvals, _ := Scan(engine, proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 0) keyvalsSnapshot, err := Scan(snap, proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 0) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(keyvals, keyvalsSnapshot) { t.Fatalf("the key/values %v in scan result does not match the value %s in snapshot", keyvals, keyvalsSnapshot) } // Verify Iterate. index := 0 if err := snap.Iterate(proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), func(kv proto.RawKeyValue) (bool, error) { if !bytes.Equal(kv.Key, keys[index]) || !bytes.Equal(kv.Value, vals[index]) { t.Errorf("%d: key/value not equal between expected and snapshot: %s/%s, %s/%s", index, keys[index], vals[index], kv.Key, kv.Value) } index++ return false, nil }); err != nil { t.Fatal(err) } // Verify Clear is error. if err := snap.Clear(keys[0]); err == nil { t.Error("expected error on Clear to snapshot") } // Verify Merge is error. if err := snap.Merge([]byte("merge-key"), appender("x")); err == nil { t.Error("expected error on Merge to snapshot") } // Verify Capacity. capacity, err := engine.Capacity() if err != nil { t.Fatal(err) } capacitySnapshot, err := snap.Capacity() if err != nil { t.Fatal(err) } // The Available fields of capacity may differ due to processes beyond our control. if capacity.Capacity != capacitySnapshot.Capacity { t.Errorf("expected capacities to be equal: %v != %v", capacity.Capacity, capacitySnapshot.Capacity) } // Verify ApproximateSize. approx, err := engine.ApproximateSize(proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax)) if err != nil { t.Fatal(err) } approxSnapshot, err := snap.ApproximateSize(proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax)) if err != nil { t.Fatal(err) } if approx != approxSnapshot { t.Errorf("expected approx sizes to be equal: %d != %d", approx, approxSnapshot) } // Write a new key to engine. newKey := []byte("c") newVal := []byte("3") if err := engine.Put(newKey, newVal); err != nil { t.Fatal(err) } // Verify NewIterator still iterates over original snapshot. iter := snap.NewIterator() iter.Seek(newKey) if iter.Valid() { t.Error("expected invalid iterator when seeking to element which shouldn't be visible to snapshot") } iter.Close() // Verify Commit is error. if err := snap.Commit(); err == nil { t.Error("expected error on Commit to snapshot") } }, t) }
func TestBatchScan(t *testing.T) { defer leaktest.AfterTest(t) e := NewInMem(proto.Attributes{}, 1<<20) defer e.Close() b := e.NewBatch() defer b.Close() existingVals := []proto.RawKeyValue{ {Key: proto.EncodedKey("a"), Value: []byte("1")}, {Key: proto.EncodedKey("b"), Value: []byte("2")}, {Key: proto.EncodedKey("c"), Value: []byte("3")}, {Key: proto.EncodedKey("d"), Value: []byte("4")}, {Key: proto.EncodedKey("e"), Value: []byte("5")}, {Key: proto.EncodedKey("f"), Value: []byte("6")}, {Key: proto.EncodedKey("g"), Value: []byte("7")}, {Key: proto.EncodedKey("h"), Value: []byte("8")}, {Key: proto.EncodedKey("i"), Value: []byte("9")}, {Key: proto.EncodedKey("j"), Value: []byte("10")}, {Key: proto.EncodedKey("k"), Value: []byte("11")}, {Key: proto.EncodedKey("l"), Value: []byte("12")}, {Key: proto.EncodedKey("m"), Value: []byte("13")}, } for _, kv := range existingVals { if err := e.Put(kv.Key, kv.Value); err != nil { t.Fatal(err) } } batchVals := []proto.RawKeyValue{ {Key: proto.EncodedKey("a"), Value: []byte("b1")}, {Key: proto.EncodedKey("bb"), Value: []byte("b2")}, {Key: proto.EncodedKey("c"), Value: []byte("b3")}, {Key: proto.EncodedKey("dd"), Value: []byte("b4")}, {Key: proto.EncodedKey("e"), Value: []byte("b5")}, {Key: proto.EncodedKey("ff"), Value: []byte("b6")}, {Key: proto.EncodedKey("g"), Value: []byte("b7")}, {Key: proto.EncodedKey("hh"), Value: []byte("b8")}, {Key: proto.EncodedKey("i"), Value: []byte("b9")}, {Key: proto.EncodedKey("jj"), Value: []byte("b10")}, } for _, kv := range batchVals { if err := b.Put(kv.Key, kv.Value); err != nil { t.Fatal(err) } } scans := []struct { start, end proto.EncodedKey max int64 }{ // Full monty. {start: proto.EncodedKey("a"), end: proto.EncodedKey("z"), max: 0}, // Select ~half. {start: proto.EncodedKey("a"), end: proto.EncodedKey("z"), max: 9}, // Select one. {start: proto.EncodedKey("a"), end: proto.EncodedKey("z"), max: 1}, // Select half by end key. {start: proto.EncodedKey("a"), end: proto.EncodedKey("f0"), max: 0}, // Start at half and select rest. {start: proto.EncodedKey("f"), end: proto.EncodedKey("z"), max: 0}, // Start at last and select max=10. {start: proto.EncodedKey("m"), end: proto.EncodedKey("z"), max: 10}, } // Scan each case using the batch and store the results. results := map[int][]proto.RawKeyValue{} for i, scan := range scans { kvs, err := Scan(b, scan.start, scan.end, scan.max) if err != nil { t.Fatal(err) } results[i] = kvs } // Now, commit batch and re-scan using engine direct to compare results. if err := b.Commit(); err != nil { t.Fatal(err) } for i, scan := range scans { kvs, err := Scan(e, scan.start, scan.end, scan.max) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(kvs, results[i]) { t.Errorf("%d: expected %v; got %v", i, results[i], kvs) } } }
func TestEngineDeleteRange(t *testing.T) { defer leaktest.AfterTest(t) runWithAllEngines(func(engine Engine, t *testing.T) { keys := []proto.EncodedKey{ proto.EncodedKey("a"), proto.EncodedKey("aa"), proto.EncodedKey("aaa"), proto.EncodedKey("ab"), proto.EncodedKey("abc"), proto.EncodedKey(proto.KeyMax), } insertKeys(keys, engine, t) // Scan all keys (non-inclusive of final key). verifyScan(proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 10, keys[0:5], engine, t) // Delete a range of keys numDeleted, err := ClearRange(engine, proto.EncodedKey("aa"), proto.EncodedKey("abc")) // Verify what was deleted if err != nil { t.Error("Not expecting an error") } if numDeleted != 3 { t.Errorf("Expected to delete 3 entries; was %v", numDeleted) } // Verify what's left verifyScan(proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 10, []proto.EncodedKey{proto.EncodedKey("a"), proto.EncodedKey("abc")}, engine, t) }, t) }
func TestEngineScan2(t *testing.T) { defer leaktest.AfterTest(t) // TODO(Tobias): Merge this with TestEngineScan1 and remove // either verifyScan or the other helper function. runWithAllEngines(func(engine Engine, t *testing.T) { keys := []proto.EncodedKey{ proto.EncodedKey("a"), proto.EncodedKey("aa"), proto.EncodedKey("aaa"), proto.EncodedKey("ab"), proto.EncodedKey("abc"), proto.EncodedKey(proto.KeyMax), } insertKeys(keys, engine, t) // Scan all keys (non-inclusive of final key). verifyScan(proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 10, keys[0:5], engine, t) verifyScan(proto.EncodedKey("a"), proto.EncodedKey(proto.KeyMax), 10, keys[0:5], engine, t) // Scan sub range. verifyScan(proto.EncodedKey("aab"), proto.EncodedKey("abcc"), 10, keys[3:5], engine, t) verifyScan(proto.EncodedKey("aa0"), proto.EncodedKey("abcc"), 10, keys[2:5], engine, t) // Scan with max values. verifyScan(proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 3, keys[0:3], engine, t) verifyScan(proto.EncodedKey("a0"), proto.EncodedKey(proto.KeyMax), 3, keys[1:4], engine, t) // Scan with max value 0 gets all values. verifyScan(proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 0, keys[0:5], engine, t) }, t) }
// TestBatchBasics verifies that all commands work in a batch, aren't // visible until commit, and then are all visible after commit. func TestBatchBasics(t *testing.T) { defer leaktest.AfterTest(t) e := NewInMem(proto.Attributes{}, 1<<20) defer e.Close() b := e.NewBatch() defer b.Close() if err := b.Put(proto.EncodedKey("a"), []byte("value")); err != nil { t.Fatal(err) } // Write an engine value to be deleted. if err := e.Put(proto.EncodedKey("b"), []byte("value")); err != nil { t.Fatal(err) } if err := b.Clear(proto.EncodedKey("b")); err != nil { t.Fatal(err) } // Write an engine value to be merged. if err := e.Put(proto.EncodedKey("c"), appender("foo")); err != nil { t.Fatal(err) } if err := b.Merge(proto.EncodedKey("c"), appender("bar")); err != nil { t.Fatal(err) } // Check all keys are in initial state (nothing from batch has gone // through to engine until commit). expValues := []proto.RawKeyValue{ {Key: proto.EncodedKey("b"), Value: []byte("value")}, {Key: proto.EncodedKey("c"), Value: appender("foo")}, } kvs, err := Scan(e, proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 0) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(expValues, kvs) { t.Errorf("%v != %v", kvs, expValues) } // Now, merged values should be: expValues = []proto.RawKeyValue{ {Key: proto.EncodedKey("a"), Value: []byte("value")}, {Key: proto.EncodedKey("c"), Value: appender("foobar")}, } // Scan values from batch directly. kvs, err = Scan(b, proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 0) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(expValues, kvs) { t.Errorf("%v != %v", kvs, expValues) } // Commit batch and verify direct engine scan yields correct values. if err := b.Commit(); err != nil { t.Fatal(err) } kvs, err = Scan(e, proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 0) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(expValues, kvs) { t.Errorf("%v != %v", kvs, expValues) } }
func TestEngineBatch(t *testing.T) { defer leaktest.AfterTest(t) runWithAllEngines(func(engine Engine, t *testing.T) { numShuffles := 100 key := proto.EncodedKey("a") // Those are randomized below. type data struct { key proto.EncodedKey value []byte merge bool } batch := []data{ {key, appender("~ockroachDB"), false}, {key, appender("C~ckroachDB"), false}, {key, appender("Co~kroachDB"), false}, {key, appender("Coc~roachDB"), false}, {key, appender("C**k~oachDB"), false}, {key, appender("Cockr~achDB"), false}, {key, appender("Cockro~chDB"), false}, {key, appender("Cockroa~hDB"), false}, {key, appender("Cockroac~DB"), false}, {key, appender("Cockroach~B"), false}, {key, appender("CockroachD~"), false}, {key, nil, false}, {key, appender("C"), true}, {key, appender(" o"), true}, {key, appender(" c"), true}, {key, appender(" k"), true}, {key, appender("r"), true}, {key, appender(" o"), true}, {key, appender(" a"), true}, {key, appender(" c"), true}, {key, appender("h"), true}, {key, appender(" D"), true}, {key, appender(" B"), true}, } apply := func(eng Engine, d data) error { if d.value == nil { return eng.Clear(d.key) } else if d.merge { return eng.Merge(d.key, d.value) } return eng.Put(d.key, d.value) } get := func(eng Engine, key proto.EncodedKey) []byte { b, err := eng.Get(key) if err != nil { t.Fatal(err) } m := &MVCCMetadata{} if err := gogoproto.Unmarshal(b, m); err != nil { t.Fatal(err) } if m.Value == nil { return nil } return m.Value.Bytes } for i := 0; i < numShuffles; i++ { // In each run, create an array of shuffled operations. shuffledIndices := rand.Perm(len(batch)) currentBatch := make([]data, len(batch)) for k := range currentBatch { currentBatch[k] = batch[shuffledIndices[k]] } // Reset the key if err := engine.Clear(key); err != nil { t.Fatal(err) } // Run it once with individual operations and remember the result. for i, op := range currentBatch { if err := apply(engine, op); err != nil { t.Errorf("%d: op %v: %v", i, op, err) continue } } expectedValue := get(engine, key) // Run the whole thing as a batch and compare. b := engine.NewBatch() if err := b.Clear(key); err != nil { t.Fatal(err) } for _, op := range currentBatch { if err := apply(b, op); err != nil { t.Fatal(err) } } // Try getting the value from the batch. actualValue := get(b, key) if !bytes.Equal(actualValue, expectedValue) { t.Errorf("%d: expected %s, but got %s", i, expectedValue, actualValue) } // Try using an iterator to get the value from the batch. iter := b.NewIterator() iter.Seek(key) if !iter.Valid() { if currentBatch[len(currentBatch)-1].value != nil { t.Errorf("%d: batch seek invalid", i) } } else if !bytes.Equal(iter.Key(), key) { t.Errorf("%d: batch seek expected key %s, but got %s", i, key, iter.Key()) } else { m := &MVCCMetadata{} if err := iter.ValueProto(m); err != nil { t.Fatal(err) } if !bytes.Equal(m.Value.Bytes, expectedValue) { t.Errorf("%d: expected %s, but got %s", i, expectedValue, m.Value.Bytes) } } iter.Close() // Commit the batch and try getting the value from the engine. if err := b.Commit(); err != nil { t.Errorf("%d: %v", i, err) continue } actualValue = get(engine, key) if !bytes.Equal(actualValue, expectedValue) { t.Errorf("%d: expected %s, but got %s", i, expectedValue, actualValue) } } }, t) }