func TestCloneProto(t *testing.T) { testCases := []struct { pb proto.Message shouldPanic bool }{ {&roachpb.StoreIdent{}, false}, {&roachpb.StoreIdent{ClusterID: uuid.MakeV4()}, true}, {&roachpb.TxnMeta{}, false}, {&roachpb.TxnMeta{ID: uuid.NewV4()}, true}, {&roachpb.Transaction{}, false}, {&config.ZoneConfig{RangeMinBytes: 123, RangeMaxBytes: 456}, false}, } for _, tc := range testCases { var clone proto.Message var panicObj interface{} func() { defer func() { panicObj = recover() }() clone = protoutil.Clone(tc.pb) }() if tc.shouldPanic { if panicObj == nil { t.Errorf("%T: expected panic but didn't get one", tc.pb) } } else { if panicObj != nil { if panicStr := fmt.Sprint(panicObj); !strings.Contains(panicStr, "attempt to clone") { t.Errorf("%T: got unexpected panic %s", tc.pb, panicStr) } } } if panicObj == nil { realClone := proto.Clone(tc.pb) if !reflect.DeepEqual(clone, realClone) { t.Errorf("%T: clone did not equal original. expected:\n%+v\ngot:\n%+v", tc.pb, realClone, clone) } } } }
// bootstrapCluster bootstraps a multiple stores using the provided // engines and cluster ID. The first bootstrapped store contains a // single range spanning all keys. Initial range lookup metadata is // populated for the range. Returns the cluster ID. func bootstrapCluster(engines []engine.Engine) (uuid.UUID, error) { clusterID := uuid.MakeV4() stopper := stop.NewStopper() defer stopper.Stop() ctx := storage.StoreContext{} ctx.ScanInterval = 10 * time.Minute ctx.Clock = hlc.NewClock(hlc.UnixNano) ctx.Tracer = tracing.NewTracer() // Create a KV DB with a local sender. stores := storage.NewStores(ctx.Clock) sender := kv.NewTxnCoordSender(stores, ctx.Clock, false, ctx.Tracer, stopper) ctx.DB = client.NewDB(sender) ctx.Transport = storage.NewLocalRPCTransport(stopper) for i, eng := range engines { sIdent := roachpb.StoreIdent{ ClusterID: clusterID, NodeID: 1, StoreID: roachpb.StoreID(i + 1), } // The bootstrapping store will not connect to other nodes so its // StoreConfig doesn't really matter. s := storage.NewStore(ctx, eng, &roachpb.NodeDescriptor{NodeID: 1}) // Verify the store isn't already part of a cluster. if s.Ident.ClusterID != *uuid.EmptyUUID { return uuid.UUID{}, util.Errorf("storage engine already belongs to a cluster (%s)", s.Ident.ClusterID) } // Bootstrap store to persist the store ident. if err := s.Bootstrap(sIdent, stopper); err != nil { return uuid.UUID{}, err } // Create first range, writing directly to engine. Note this does // not create the range, just its data. Only do this if this is the // first store. if i == 0 { initialValues := GetBootstrapSchema().GetInitialValues() if err := s.BootstrapRange(initialValues); err != nil { return uuid.UUID{}, err } } if err := s.Start(stopper); err != nil { return uuid.UUID{}, err } stores.AddStore(s) // Initialize node and store ids. Only initialize the node once. if i == 0 { if nodeID, err := allocateNodeID(ctx.DB); nodeID != sIdent.NodeID || err != nil { return uuid.UUID{}, util.Errorf("expected to initialize node id allocator to %d, got %d: %s", sIdent.NodeID, nodeID, err) } } if storeID, err := allocateStoreIDs(sIdent.NodeID, 1, ctx.DB); storeID != sIdent.StoreID || err != nil { return uuid.UUID{}, util.Errorf("expected to initialize store id allocator to %d, got %d: %s", sIdent.StoreID, storeID, err) } } return clusterID, nil }
func TestFlowRegistry(t *testing.T) { reg := makeFlowRegistry() id1 := FlowID{uuid.MakeV4()} f1 := &Flow{} id2 := FlowID{uuid.MakeV4()} f2 := &Flow{} id3 := FlowID{uuid.MakeV4()} f3 := &Flow{} // A basic duration; needs to be significantly larger than possible delays // in scheduling goroutines. jiffy := 10 * time.Millisecond // -- Lookup, register, lookup, unregister, lookup. -- if f := reg.LookupFlow(id1, 0); f != nil { t.Error("looked up unregistered flow") } reg.RegisterFlow(id1, f1) if f := reg.LookupFlow(id1, 0); f != f1 { t.Error("couldn't lookup previously registered flow") } reg.UnregisterFlow(id1) if f := reg.LookupFlow(id1, 0); f != nil { t.Error("looked up unregistered flow") } // -- Lookup with timeout, register in the meantime. -- go func() { time.Sleep(jiffy) reg.RegisterFlow(id1, f1) }() if f := reg.LookupFlow(id1, 10*jiffy); f != f1 { t.Error("couldn't lookup registered flow (with wait)") } if f := reg.LookupFlow(id1, 0); f != f1 { t.Error("couldn't lookup registered flow") } // -- Multiple lookups before register. -- var wg sync.WaitGroup wg.Add(2) go func() { if f := reg.LookupFlow(id2, 10*jiffy); f != f2 { t.Error("couldn't lookup registered flow (with wait)") } wg.Done() }() go func() { if f := reg.LookupFlow(id2, 10*jiffy); f != f2 { t.Error("couldn't lookup registered flow (with wait)") } wg.Done() }() time.Sleep(jiffy) reg.RegisterFlow(id2, f2) wg.Wait() // -- Multiple lookups, with the first one failing. -- var wg1 sync.WaitGroup var wg2 sync.WaitGroup wg1.Add(1) wg2.Add(1) go func() { if f := reg.LookupFlow(id3, jiffy); f != nil { t.Error("expected lookup to fail") } wg1.Done() }() go func() { if f := reg.LookupFlow(id3, 10*jiffy); f != f3 { t.Error("couldn't lookup registered flow (with wait)") } wg2.Done() }() wg1.Wait() reg.RegisterFlow(id3, f3) wg2.Wait() }
func TestClusterFlow(t *testing.T) { defer leaktest.AfterTest(t)() const numRows = 100 args := base.TestClusterArgs{ReplicationMode: base.ReplicationManual} tc := serverutils.StartTestCluster(t, 3, args) defer tc.Stopper().Stop() sumDigitsFn := func(row int) parser.Datum { sum := 0 for row > 0 { sum += row % 10 row /= 10 } return parser.NewDInt(parser.DInt(sum)) } sqlutils.CreateTable(t, tc.ServerConn(0), "t", "num INT PRIMARY KEY, digitsum INT, numstr STRING, INDEX s (digitsum)", numRows, sqlutils.ToRowFn(sqlutils.RowIdxFn, sumDigitsFn, sqlutils.RowEnglishFn)) kvDB := tc.Server(0).KVClient().(*client.DB) desc := sqlbase.GetTableDescriptor(kvDB, "test", "t") makeIndexSpan := func(start, end int) TableReaderSpan { var span roachpb.Span prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(desc, desc.Indexes[0].ID)) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) return TableReaderSpan{Span: span} } // Set up table readers on three hosts feeding data into a join reader on // the third host. This is a basic test for the distributed flow // infrastructure, including local and remote streams. // // Note that the ranges won't necessarily be local to the table readers, but // that doesn't matter for the purposes of this test. tr1 := TableReaderSpec{ Table: *desc, IndexIdx: 1, OutputColumns: []uint32{0, 1}, Spans: []TableReaderSpan{makeIndexSpan(0, 8)}, } tr2 := TableReaderSpec{ Table: *desc, IndexIdx: 1, OutputColumns: []uint32{0, 1}, Spans: []TableReaderSpan{makeIndexSpan(8, 12)}, } tr3 := TableReaderSpec{ Table: *desc, IndexIdx: 1, OutputColumns: []uint32{0, 1}, Spans: []TableReaderSpan{makeIndexSpan(12, 100)}, } jr := JoinReaderSpec{ Table: *desc, OutputColumns: []uint32{2}, } txn := client.NewTxn(context.Background(), *kvDB) fid := FlowID{uuid.MakeV4()} req1 := &SetupFlowRequest{Txn: txn.Proto} req1.Flow = FlowSpec{ FlowID: fid, Processors: []ProcessorSpec{{ Core: ProcessorCoreUnion{TableReader: &tr1}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{ {Mailbox: &MailboxSpec{StreamID: 0, TargetAddr: tc.Server(2).ServingAddr()}}, }, }}, }}, } req2 := &SetupFlowRequest{Txn: txn.Proto} req2.Flow = FlowSpec{ FlowID: fid, Processors: []ProcessorSpec{{ Core: ProcessorCoreUnion{TableReader: &tr2}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{ {Mailbox: &MailboxSpec{StreamID: 1, TargetAddr: tc.Server(2).ServingAddr()}}, }, }}, }}, } req3 := &SetupFlowRequest{Txn: txn.Proto} req3.Flow = FlowSpec{ FlowID: fid, Processors: []ProcessorSpec{ { Core: ProcessorCoreUnion{TableReader: &tr3}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{ {LocalStreamID: LocalStreamID(0)}, }, }}, }, { Input: []InputSyncSpec{{ Type: InputSyncSpec_ORDERED, Ordering: Ordering{Columns: []Ordering_Column{{1, Ordering_Column_ASC}}}, Streams: []StreamEndpointSpec{ {Mailbox: &MailboxSpec{StreamID: 0}}, {Mailbox: &MailboxSpec{StreamID: 1}}, {LocalStreamID: LocalStreamID(0)}, }, }}, Core: ProcessorCoreUnion{JoinReader: &jr}, Output: []OutputRouterSpec{{ Type: OutputRouterSpec_MIRROR, Streams: []StreamEndpointSpec{{Mailbox: &MailboxSpec{SimpleResponse: true}}}, }}}, }, } var clients []DistSQLClient for i := 0; i < 3; i++ { s := tc.Server(i) conn, err := s.RPCContext().GRPCDial(s.ServingAddr()) if err != nil { t.Fatal(err) } clients = append(clients, NewDistSQLClient(conn)) } ctx := context.Background() if log.V(1) { log.Infof(ctx, "Setting up flow on 0") } if resp, err := clients[0].SetupFlow(context.Background(), req1); err != nil { t.Fatal(err) } else if resp.Error != nil { t.Fatal(resp.Error) } if log.V(1) { log.Infof(ctx, "Setting up flow on 1") } if resp, err := clients[1].SetupFlow(context.Background(), req2); err != nil { t.Fatal(err) } else if resp.Error != nil { t.Fatal(resp.Error) } if log.V(1) { log.Infof(ctx, "Running flow on 2") } stream, err := clients[2].RunSimpleFlow(context.Background(), req3) if err != nil { t.Fatal(err) } var decoder StreamDecoder var rows sqlbase.EncDatumRows for { msg, err := stream.Recv() if err != nil { if err == io.EOF { break } t.Fatal(err) } err = decoder.AddMessage(msg) if err != nil { t.Fatal(err) } rows = testGetDecodedRows(t, &decoder, rows) } if done, trailerErr := decoder.IsDone(); !done { t.Fatal("stream not done") } else if trailerErr != nil { t.Fatal("error in the stream trailer:", trailerErr) } // The result should be all the numbers in string form, ordered by the // digit sum (and then by number). var results []string for sum := 1; sum <= 50; sum++ { for i := 1; i <= numRows; i++ { if int(*sumDigitsFn(i).(*parser.DInt)) == sum { results = append(results, fmt.Sprintf("['%s']", sqlutils.IntToEnglish(i))) } } } expected := strings.Join(results, " ") expected = "[" + expected + "]" if rowStr := rows.String(); rowStr != expected { t.Errorf("Result: %s\n Expected: %s\n", rowStr, expected) } }