// TestSendRPCRetry verifies that sendRPC failed on first address but succeed on // second address, the second reply should be successfully returned back. func TestSendRPCRetry(t *testing.T) { defer leaktest.AfterTest(t) g, s := makeTestGossip(t) defer s() if err := g.SetNodeDescriptor(&proto.NodeDescriptor{NodeID: 1}); err != nil { t.Fatal(err) } // Fill RangeDescriptor with 2 replicas var descriptor = proto.RangeDescriptor{ RaftID: 1, StartKey: proto.Key("a"), EndKey: proto.Key("z"), } for i := 1; i <= 2; i++ { addr := util.MakeUnresolvedAddr("tcp", fmt.Sprintf("node%d", i)) nd := &proto.NodeDescriptor{ NodeID: proto.NodeID(i), Address: proto.Addr{ Network: addr.Network(), Address: addr.String(), }, } if err := g.AddInfo(gossip.MakeNodeIDKey(proto.NodeID(i)), nd, time.Hour); err != nil { t.Fatal(err) } descriptor.Replicas = append(descriptor.Replicas, proto.Replica{ NodeID: proto.NodeID(i), StoreID: proto.StoreID(i), }) } // Define our rpcSend stub which returns success on the second address. var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) interface{}, getReply func() interface{}, _ *rpc.Context) ([]interface{}, error) { if method == "Node.Scan" { // reply from first address failed _ = getReply() // reply from second address succeed reply := getReply() reply.(*proto.ScanResponse).Rows = append([]proto.KeyValue{}, proto.KeyValue{Key: proto.Key("b"), Value: proto.Value{}}) return []interface{}{reply}, nil } return nil, util.Errorf("Not expected method %v", method) } ctx := &DistSenderContext{ rpcSend: testFn, rangeDescriptorDB: mockRangeDescriptorDB(func(_ proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) { return []proto.RangeDescriptor{descriptor}, nil }), } ds := NewDistSender(ctx, g) call := proto.ScanCall(proto.Key("a"), proto.Key("d"), 1) sr := call.Reply.(*proto.ScanResponse) ds.Send(context.Background(), call) if err := sr.GoError(); err != nil { t.Fatal(err) } if l := len(sr.Rows); l != 1 { t.Fatalf("expected 1 row; got %d", l) } }
// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan // across ranges that doesn't require read consistency will set a timestamp // using the clock local to the distributed sender. func TestMultiRangeScanReverseScanInconsistent(t *testing.T) { defer leaktest.AfterTest(t) s, db := setupMultipleRanges(t, "b") defer s.Stop() // Write keys "a" and "b", the latter of which is the first key in the // second range. keys := []string{"a", "b"} ts := []time.Time{} b := &client.Batch{} for _, key := range keys { b.Put(key, "value") } if err := db.Run(b); err != nil { t.Fatal(err) } for i := range keys { ts = append(ts, b.Results[i].Rows[0].Timestamp()) log.Infof("%d: %s", i, b.Results[i].Rows[0].Timestamp()) } // Do an inconsistent Scan/ReverseScan from a new DistSender and verify // it does the read at its local clock and doesn't receive an // OpRequiresTxnError. We set the local clock to the timestamp of // the first key to verify it's used to read only key "a". manual := hlc.NewManualClock(ts[1].UnixNano() - 1) clock := hlc.NewClock(manual.UnixNano) ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock}, s.Gossip()) // Scan. call := proto.ScanCall(proto.Key("a"), proto.Key("c"), 0) sr := call.Reply.(*proto.ScanResponse) sa := call.Args.(*proto.ScanRequest) sa.ReadConsistency = proto.INCONSISTENT if err := client.SendCall(ds, call); err != nil { t.Fatal(err) } if l := len(sr.Rows); l != 1 { t.Fatalf("expected 1 row; got %d", l) } if key := string(sr.Rows[0].Key); keys[0] != key { t.Errorf("expected key %q; got %q", keys[0], key) } // ReverseScan. call = proto.ReverseScanCall(proto.Key("a"), proto.Key("c"), 0) rsr := call.Reply.(*proto.ReverseScanResponse) rsa := call.Args.(*proto.ReverseScanRequest) rsa.ReadConsistency = proto.INCONSISTENT if err := client.SendCall(ds, call); err != nil { t.Fatal(err) } if l := len(rsr.Rows); l != 1 { t.Fatalf("expected 1 row; got %d", l) } if key := string(rsr.Rows[0].Key); keys[0] != key { t.Errorf("expected key %q; got %q", keys[0], key) } }
// TestMultiRangeScanWithMaxResults tests that commands which access multiple // ranges with MaxResults parameter are carried out properly. func TestMultiRangeScanWithMaxResults(t *testing.T) { defer leaktest.AfterTest(t) testCases := []struct { splitKeys []proto.Key keys []proto.Key }{ {[]proto.Key{proto.Key("m")}, []proto.Key{proto.Key("a"), proto.Key("z")}}, {[]proto.Key{proto.Key("h"), proto.Key("q")}, []proto.Key{proto.Key("b"), proto.Key("f"), proto.Key("k"), proto.Key("r"), proto.Key("w"), proto.Key("y")}}, } for i, tc := range testCases { s := StartTestServer(t) ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip()) tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, s.stopper) for _, sk := range tc.splitKeys { if err := s.node.ctx.DB.AdminSplit(sk); err != nil { t.Fatal(err) } } var call proto.Call for _, k := range tc.keys { call = proto.PutCall(k, proto.Value{Bytes: k}) call.Args.Header().User = storage.UserRoot tds.Send(context.Background(), call) if err := call.Reply.Header().GoError(); err != nil { t.Fatal(err) } } // Try every possible ScanRequest startKey. for start := 0; start < len(tc.keys); start++ { // Try every possible maxResults, from 1 to beyond the size of key array. for maxResults := 1; maxResults <= len(tc.keys)-start+1; maxResults++ { scan := proto.ScanCall(tc.keys[start], tc.keys[len(tc.keys)-1].Next(), int64(maxResults)) scan.Args.Header().Timestamp = call.Reply.Header().Timestamp scan.Args.Header().User = storage.UserRoot tds.Send(context.Background(), scan) if err := scan.Reply.Header().GoError(); err != nil { t.Fatal(err) } rows := scan.Reply.(*proto.ScanResponse).Rows if start+maxResults <= len(tc.keys) && len(rows) != maxResults { t.Fatalf("%d: start=%s: expected %d rows, but got %d", i, tc.keys[start], maxResults, len(rows)) } else if start+maxResults == len(tc.keys)+1 && len(rows) != maxResults-1 { t.Fatalf("%d: expected %d rows, but got %d", i, maxResults-1, len(rows)) } } } defer s.Stop() } }
// TestRetryOnWrongReplicaError sets up a DistSender on a minimal gossip // network and a mock of rpc.Send, and verifies that the DistSender correctly // retries upon encountering a stale entry in its range descriptor cache. func TestRetryOnWrongReplicaError(t *testing.T) { defer leaktest.AfterTest(t) g, s := makeTestGossip(t) defer s() // Updated below, after it has first been returned. badStartKey := proto.Key("m") newRangeDescriptor := testRangeDescriptor goodStartKey := newRangeDescriptor.StartKey newRangeDescriptor.StartKey = badStartKey descStale := true var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message, _ *rpc.Context) ([]gogoproto.Message, error) { ba := getArgs(testAddress).(*proto.BatchRequest) if _, ok := ba.GetArg(proto.RangeLookup); ok { if !descStale && bytes.HasPrefix(ba.Key, keys.Meta2Prefix) { t.Errorf("unexpected extra lookup for non-stale replica descriptor at %s", ba.Key) } br := getReply().(*proto.BatchResponse) r := &proto.RangeLookupResponse{} r.Ranges = append(r.Ranges, newRangeDescriptor) br.Add(r) // If we just returned the stale descriptor, set up returning the // good one next time. if bytes.HasPrefix(ba.Key, keys.Meta2Prefix) { if newRangeDescriptor.StartKey.Equal(badStartKey) { newRangeDescriptor.StartKey = goodStartKey } else { descStale = false } } return []gogoproto.Message{br}, nil } // When the Scan first turns up, update the descriptor for future // range descriptor lookups. if !newRangeDescriptor.StartKey.Equal(goodStartKey) { return nil, &proto.RangeKeyMismatchError{RequestStartKey: ba.Key, RequestEndKey: ba.EndKey} } return []gogoproto.Message{getReply()}, nil } ctx := &DistSenderContext{ RPCSend: testFn, } ds := NewDistSender(ctx, g) call := proto.ScanCall(proto.Key("a"), proto.Key("d"), 0) sr := call.Reply.(*proto.ScanResponse) client.SendCallConverted(ds, context.Background(), call) if err := sr.GoError(); err != nil { t.Errorf("scan encountered error: %s", err) } }
// TestRetryOnWrongReplicaError sets up a DistSender on a minimal gossip // network and a mock of rpc.Send, and verifies that the DistSender correctly // retries upon encountering a stale entry in its range descriptor cache. func TestRetryOnWrongReplicaError(t *testing.T) { defer leaktest.AfterTest(t) g, s := makeTestGossip(t) defer s() // Updated below, after it has first been returned. newRangeDescriptor := testRangeDescriptor newEndKey := proto.Key("m") descStale := true var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) interface{}, getReply func() interface{}, _ *rpc.Context) ([]interface{}, error) { header := getArgs(testAddress).(proto.Request).Header() if method == "Node.InternalRangeLookup" { // If the non-broken descriptor has already been returned, that's // an error. if !descStale && bytes.HasPrefix(header.Key, keys.Meta2Prefix) { t.Errorf("unexpected extra lookup for non-stale replica descriptor at %s", header.Key) } r := getReply().(*proto.InternalRangeLookupResponse) // The fresh descriptor is about to be returned. if bytes.HasPrefix(header.Key, keys.Meta2Prefix) && newRangeDescriptor.StartKey.Equal(newEndKey) { descStale = false } r.Ranges = append(r.Ranges, newRangeDescriptor) return []interface{}{r}, nil } // When the Scan first turns up, update the descriptor for future // range descriptor lookups. if !newRangeDescriptor.StartKey.Equal(newEndKey) { newRangeDescriptor = *gogoproto.Clone(&testRangeDescriptor).(*proto.RangeDescriptor) newRangeDescriptor.StartKey = newEndKey return nil, &proto.RangeKeyMismatchError{RequestStartKey: header.Key, RequestEndKey: header.EndKey} } return []interface{}{getReply()}, nil } ctx := &DistSenderContext{ rpcSend: testFn, } ds := NewDistSender(ctx, g) call := proto.ScanCall(proto.Key("a"), proto.Key("d"), 0) sr := call.Reply.(*proto.ScanResponse) ds.Send(context.Background(), call) if err := sr.GoError(); err != nil { t.Errorf("scan encountered error: %s", err) } }
func (b *Batch) scan(s, e interface{}, maxRows int64, isReverse bool) { begin, err := marshalKey(s) if err != nil { b.initResult(0, 0, err) return } end, err := marshalKey(e) if err != nil { b.initResult(0, 0, err) return } if !isReverse { b.calls = append(b.calls, proto.ScanCall(proto.Key(begin), proto.Key(end), maxRows)) } else { b.calls = append(b.calls, proto.ReverseScanCall(proto.Key(begin), proto.Key(end), maxRows)) } b.initResult(1, 0, nil) }
// TestMultiRangeScanDeleteRange tests that commands which access multiple // ranges are carried out properly. func TestMultiRangeScanDeleteRange(t *testing.T) { defer leaktest.AfterTest(t) s := StartTestServer(t) defer s.Stop() ds := kv.NewDistSender(&kv.DistSenderContext{Clock: s.Clock()}, s.Gossip()) tds := kv.NewTxnCoordSender(ds, s.Clock(), testContext.Linearizable, s.stopper) if err := s.node.ctx.DB.AdminSplit("m"); err != nil { t.Fatal(err) } writes := []proto.Key{proto.Key("a"), proto.Key("z")} get := proto.Call{ Args: &proto.GetRequest{ RequestHeader: proto.RequestHeader{ Key: writes[0], }, }, Reply: &proto.GetResponse{}, } get.Args.Header().User = storage.UserRoot get.Args.Header().EndKey = writes[len(writes)-1] tds.Send(context.Background(), get) if err := get.Reply.Header().GoError(); err == nil { t.Errorf("able to call Get with a key range: %v", get) } var call proto.Call for i, k := range writes { call = proto.PutCall(k, proto.Value{Bytes: k}) call.Args.Header().User = storage.UserRoot tds.Send(context.Background(), call) if err := call.Reply.Header().GoError(); err != nil { t.Fatal(err) } scan := proto.ScanCall(writes[0], writes[len(writes)-1].Next(), 0) // The Put ts may have been pushed by tsCache, // so make sure we see their values in our Scan. scan.Args.Header().Timestamp = call.Reply.Header().Timestamp scan.Args.Header().User = storage.UserRoot tds.Send(context.Background(), scan) if err := scan.Reply.Header().GoError(); err != nil { t.Fatal(err) } if scan.Reply.Header().Txn == nil { t.Errorf("expected Scan to be wrapped in a Transaction") } if rows := scan.Reply.(*proto.ScanResponse).Rows; len(rows) != i+1 { t.Fatalf("expected %d rows, but got %d", i+1, len(rows)) } } del := proto.Call{ Args: &proto.DeleteRangeRequest{ RequestHeader: proto.RequestHeader{ User: storage.UserRoot, Key: writes[0], EndKey: proto.Key(writes[len(writes)-1]).Next(), Timestamp: call.Reply.Header().Timestamp, }, }, Reply: &proto.DeleteRangeResponse{}, } tds.Send(context.Background(), del) if err := del.Reply.Header().GoError(); err != nil { t.Fatal(err) } if del.Reply.Header().Txn == nil { t.Errorf("expected DeleteRange to be wrapped in a Transaction") } if n := del.Reply.(*proto.DeleteRangeResponse).NumDeleted; n != int64(len(writes)) { t.Errorf("expected %d keys to be deleted, but got %d instead", len(writes), n) } scan := proto.ScanCall(writes[0], writes[len(writes)-1].Next(), 0) scan.Args.Header().Timestamp = del.Reply.Header().Timestamp scan.Args.Header().User = storage.UserRoot scan.Args.Header().Txn = &proto.Transaction{Name: "MyTxn"} tds.Send(context.Background(), scan) if err := scan.Reply.Header().GoError(); err != nil { t.Fatal(err) } if txn := scan.Reply.Header().Txn; txn == nil || txn.Name != "MyTxn" { t.Errorf("wanted Txn to persist, but it changed to %v", txn) } if rows := scan.Reply.(*proto.ScanResponse).Rows; len(rows) > 0 { t.Fatalf("scan after delete returned rows: %v", rows) } }
// TestMultiRangeMergeStaleDescriptor simulates the situation in which the // DistSender executes a multi-range scan which encounters the stale descriptor // of a range which has since incorporated its right neighbor by means of a // merge. It is verified that the DistSender scans the correct keyrange exactly // once. func TestMultiRangeMergeStaleDescriptor(t *testing.T) { defer leaktest.AfterTest(t) g, s := makeTestGossip(t) defer s() // Assume we have two ranges, [a-b) and [b-KeyMax). merged := false // The stale first range descriptor which is unaware of the merge. var firstRange = proto.RangeDescriptor{ RaftID: 1, StartKey: proto.Key("a"), EndKey: proto.Key("b"), Replicas: []proto.Replica{ { NodeID: 1, StoreID: 1, }, }, } // The merged descriptor, which will be looked up after having processed // the stale range [a,b). var mergedRange = proto.RangeDescriptor{ RaftID: 1, StartKey: proto.Key("a"), EndKey: proto.KeyMax, Replicas: []proto.Replica{ { NodeID: 1, StoreID: 1, }, }, } // Assume we have two key-value pairs, a=1 and c=2. existingKVs := []proto.KeyValue{ {Key: proto.Key("a"), Value: proto.Value{Bytes: []byte("1")}}, {Key: proto.Key("c"), Value: proto.Value{Bytes: []byte("2")}}, } var testFn rpcSendFn = func(_ rpc.Options, method string, addrs []net.Addr, getArgs func(addr net.Addr) interface{}, getReply func() interface{}, _ *rpc.Context) ([]interface{}, error) { if method != "Node.Scan" { t.Fatalf("unexpected method:%s", method) } header := getArgs(testAddress).(proto.Request).Header() reply := getReply().(*proto.ScanResponse) results := []proto.KeyValue{} for _, curKV := range existingKVs { if header.Key.Less(curKV.Key.Next()) && curKV.Key.Less(header.EndKey) { results = append(results, curKV) } } reply.Rows = results return []interface{}{reply}, nil } ctx := &DistSenderContext{ rpcSend: testFn, rangeDescriptorDB: mockRangeDescriptorDB(func(key proto.Key, _ lookupOptions) ([]proto.RangeDescriptor, error) { if !merged { // Asume a range merge operation happened merged = true return []proto.RangeDescriptor{firstRange}, nil } return []proto.RangeDescriptor{mergedRange}, nil }), } ds := NewDistSender(ctx, g) call := proto.ScanCall(proto.Key("a"), proto.Key("d"), 10) // Set the Txn info to avoid an OpRequiresTxnError. call.Args.Header().Txn = &proto.Transaction{} reply := call.Reply.(*proto.ScanResponse) ds.Send(context.Background(), call) if err := reply.GoError(); err != nil { t.Fatalf("scan encountered error: %s", err) } if !reflect.DeepEqual(existingKVs, reply.Rows) { t.Fatalf("expect get %v, actual get %v", existingKVs, reply.Rows) } }
// ScanStruct scans the specified columns from the structured table identified // by the destination slice. The slice element type, start and end key types // must be identical. The primary key columns within start and end are used to // identify which rows to scan. The type must have previously been bound to a // table using BindModel. If columns is empty all of the columns in the table // are scanned. func (b *Batch) ScanStruct(dest, start, end interface{}, maxRows int64, columns ...string) { sliceV := reflect.ValueOf(dest) if sliceV.Kind() != reflect.Ptr { b.initResult(0, 0, fmt.Errorf("dest must be a pointer to a slice: %T", dest)) return } sliceV = sliceV.Elem() if sliceV.Kind() != reflect.Slice { b.initResult(0, 0, fmt.Errorf("dest must be a pointer to a slice: %T", dest)) return } modelT := sliceV.Type().Elem() // Are we returning a slice of structs or pointers to structs? ptrResults := modelT.Kind() == reflect.Ptr if ptrResults { modelT = modelT.Elem() } m, err := b.DB.getModel(modelT, false) if err != nil { b.initResult(0, 0, err) return } var scanColIDs map[uint32]bool if len(columns) > 0 { lowerStrings(columns) scanColIDs = make(map[uint32]bool, len(columns)) for _, colName := range columns { col, ok := m.columnsByName[colName] if !ok { b.initResult(0, 0, fmt.Errorf("%s: unable to find column %s", m.name, colName)) return } scanColIDs[col.ID] = true } } startV := reflect.Indirect(reflect.ValueOf(start)) if modelT != startV.Type() { b.initResult(0, 0, fmt.Errorf("incompatible start key type: %s != %s", modelT, startV.Type())) return } endV := reflect.Indirect(reflect.ValueOf(end)) if modelT != endV.Type() { b.initResult(0, 0, fmt.Errorf("incompatible end key type: %s != %s", modelT, endV.Type())) return } startKey, err := m.encodePrimaryKey(startV) if err != nil { b.initResult(0, 0, err) return } endKey, err := m.encodePrimaryKey(endV) if err != nil { b.initResult(0, 0, err) return } if log.V(2) { log.Infof("Scan %q %q", startKey, endKey) } c := proto.ScanCall(proto.Key(startKey), proto.Key(endKey), maxRows) c.Post = func() error { reply := c.Reply.(*proto.ScanResponse) if len(reply.Rows) == 0 { return nil } var primaryKey []byte resultPtr := reflect.New(modelT) result := resultPtr.Elem() zero := reflect.Zero(result.Type()) for _, row := range reply.Rows { if primaryKey != nil && !bytes.HasPrefix(row.Key, primaryKey) { if ptrResults { sliceV = reflect.Append(sliceV, resultPtr) resultPtr = reflect.New(modelT) result = resultPtr.Elem() } else { sliceV = reflect.Append(sliceV, result) result.Set(zero) } _, err := m.decodePrimaryKey(primaryKey, result) if err != nil { return err } } remaining, err := m.decodePrimaryKey([]byte(row.Key), result) if err != nil { return err } primaryKey = []byte(row.Key[:len(row.Key)-len(remaining)]) _, colID := roachencoding.DecodeUvarint(remaining) if err != nil { return err } if scanColIDs != nil && !scanColIDs[uint32(colID)] { continue } col, ok := m.columnsByID[uint32(colID)] if !ok { return fmt.Errorf("%s: unable to find column %d", m.name, colID) } if err := unmarshalValue(&row.Value, result.FieldByIndex(col.field.Index)); err != nil { return err } } if ptrResults { sliceV = reflect.Append(sliceV, resultPtr) } else { sliceV = reflect.Append(sliceV, result) } reflect.ValueOf(dest).Elem().Set(sliceV) return nil } b.calls = append(b.calls, c) b.initResult(1, 0, nil) }