Esempio n. 1
0
func (m *multiTestContext) Start(t *testing.T, numStores int) {
	if m.manualClock == nil {
		m.manualClock = hlc.NewManualClock(0)
	}
	if m.clock == nil {
		m.clock = hlc.NewClock(m.manualClock.UnixNano)
	}
	if m.gossip == nil {
		rpcContext := rpc.NewContext(m.clock, rpc.LoadInsecureTLSConfig())
		m.gossip = gossip.New(rpcContext, gossip.TestInterval, "")
	}
	if m.transport == nil {
		m.transport = multiraft.NewLocalRPCTransport()
	}
	if m.sender == nil {
		m.sender = kv.NewLocalSender()
	}
	if m.db == nil {
		txnSender := kv.NewTxnCoordSender(m.sender, m.clock, false)
		m.db = client.NewKV(txnSender, nil)
		m.db.User = storage.UserRoot
	}

	for i := 0; i < numStores; i++ {
		m.addStore(t)
	}
}
Esempio n. 2
0
// createTestStoreWithEngine creates a test store using the given engine and clock.
// The caller is responsible for closing the store on exit.
func createTestStoreWithEngine(t *testing.T, eng engine.Engine, clock *hlc.Clock,
	bootstrap bool) *storage.Store {
	rpcContext := rpc.NewContext(hlc.NewClock(hlc.UnixNano), rpc.LoadInsecureTLSConfig())
	g := gossip.New(rpcContext, gossip.TestInterval, "")
	lSender := kv.NewLocalSender()
	sender := kv.NewTxnCoordSender(lSender, clock, false)
	db := client.NewKV(sender, nil)
	db.User = storage.UserRoot
	// TODO(bdarnell): arrange to have the transport closed.
	store := storage.NewStore(clock, eng, db, g, multiraft.NewLocalRPCTransport())
	if bootstrap {
		if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}); err != nil {
			t.Fatal(err)
		}
	}
	lSender.AddStore(store)
	if bootstrap {
		if err := store.BootstrapRange(); err != nil {
			t.Fatal(err)
		}
	}
	if err := store.Start(); err != nil {
		t.Fatal(err)
	}
	return store
}
Esempio n. 3
0
// BootstrapCluster bootstraps a store using the provided engine and
// cluster ID. The bootstrapped store contains a single range spanning
// all keys. Initial range lookup metadata is populated for the range.
//
// Returns a KV client for unittest purposes. Caller should close
// the returned client.
func BootstrapCluster(clusterID string, eng engine.Engine) (*client.KV, error) {
	sIdent := proto.StoreIdent{
		ClusterID: clusterID,
		NodeID:    1,
		StoreID:   1,
	}
	clock := hlc.NewClock(hlc.UnixNano)
	// Create a KV DB with a local sender.
	lSender := kv.NewLocalSender()
	localDB := client.NewKV(kv.NewTxnCoordSender(lSender, clock, false), nil)
	// TODO(bdarnell): arrange to have the transport closed.
	s := storage.NewStore(clock, eng, localDB, nil, multiraft.NewLocalRPCTransport())

	// Verify the store isn't already part of a cluster.
	if len(s.Ident.ClusterID) > 0 {
		return nil, util.Errorf("storage engine already belongs to a cluster (%s)", s.Ident.ClusterID)
	}

	// Bootstrap store to persist the store ident.
	if err := s.Bootstrap(sIdent); err != nil {
		return nil, err
	}
	// Create first range.
	if err := s.BootstrapRange(); err != nil {
		return nil, err
	}
	if err := s.Start(); err != nil {
		return nil, err
	}
	lSender.AddStore(s)

	// Initialize node and store ids after the fact to account
	// for use of node ID = 1 and store ID = 1.
	if nodeID, err := allocateNodeID(localDB); nodeID != sIdent.NodeID || err != nil {
		return nil, util.Errorf("expected to intialize node id allocator to %d, got %d: %v",
			sIdent.NodeID, nodeID, err)
	}
	if storeID, err := allocateStoreIDs(sIdent.NodeID, 1, localDB); storeID != sIdent.StoreID || err != nil {
		return nil, util.Errorf("expected to intialize store id allocator to %d, got %d: %v",
			sIdent.StoreID, storeID, err)
	}

	return localDB, nil
}
Esempio n. 4
0
// This is an example for using the Call() method to Put and then Get
// a value for a given key.
func ExampleKV_Call() {
	// Using built-in test server for this example code.
	serv := StartTestServer(nil)
	defer serv.Stop()

	// Replace with actual host:port address string (ex "localhost:8080") for server cluster.
	serverAddress := serv.HTTPAddr

	// Key Value Client initialization.
	sender := client.NewHTTPSender(serverAddress, &http.Transport{
		TLSClientConfig: rpc.LoadInsecureTLSConfig().Config(),
	})
	kvClient := client.NewKV(sender, nil)
	kvClient.User = storage.UserRoot
	defer kvClient.Close()

	key := proto.Key("a")
	value := []byte{1, 2, 3, 4}

	// Store test value.
	putResp := &proto.PutResponse{}
	if err := kvClient.Call(proto.Put, proto.PutArgs(key, value), putResp); err != nil {
		log.Fatal(err)
	}

	// Retrieve test value using same key.
	getResp := &proto.GetResponse{}
	if err := kvClient.Call(proto.Get, proto.GetArgs(key), getResp); err != nil {
		log.Fatal(err)
	}

	// Data validation.
	if getResp.Value == nil {
		log.Fatal("No value returned.")
	}
	if !bytes.Equal(value, getResp.Value.Bytes) {
		log.Fatal("Data mismatch on retrieved value.")
	}

	fmt.Println("Client example done.")
	// Output: Client example done.
}
func TestLocalSenderLookupReplica(t *testing.T) {
	manualClock := hlc.NewManualClock(0)
	clock := hlc.NewClock(manualClock.UnixNano)
	eng := engine.NewInMem(proto.Attributes{}, 1<<20)
	ls := NewLocalSender()
	db := client.NewKV(NewTxnCoordSender(ls, clock, false), nil)
	transport := multiraft.NewLocalRPCTransport()
	defer transport.Close()
	store := storage.NewStore(clock, eng, db, nil, transport)
	if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}); err != nil {
		t.Fatal(err)
	}
	ls.AddStore(store)
	if err := store.BootstrapRange(); err != nil {
		t.Fatal(err)
	}
	if err := store.Start(); err != nil {
		t.Fatal(err)
	}
	defer store.Stop()
	rng := splitTestRange(store, engine.KeyMin, proto.Key("a"), t)
	if err := store.RemoveRange(rng); err != nil {
		t.Fatal(err)
	}

	// Create two new stores with ranges we care about.
	var e [2]engine.Engine
	var s [2]*storage.Store
	ranges := []struct {
		storeID    proto.StoreID
		start, end proto.Key
	}{
		{2, proto.Key("a"), proto.Key("c")},
		{3, proto.Key("x"), proto.Key("z")},
	}
	for i, rng := range ranges {
		e[i] = engine.NewInMem(proto.Attributes{}, 1<<20)
		transport := multiraft.NewLocalRPCTransport()
		defer transport.Close()
		s[i] = storage.NewStore(clock, e[i], db, nil, transport)
		s[i].Ident.StoreID = rng.storeID
		if err := s[i].Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: rng.storeID}); err != nil {
			t.Fatal(err)
		}
		if err := s[i].Start(); err != nil {
			t.Fatal(err)
		}
		defer s[i].Stop()

		desc, err := store.NewRangeDescriptor(rng.start, rng.end, []proto.Replica{{StoreID: rng.storeID}})
		if err != nil {
			t.Fatal(err)
		}
		newRng, err := storage.NewRange(desc, s[i])
		if err != nil {
			t.Fatal(err)
		}
		if err := s[i].AddRange(newRng); err != nil {
			t.Error(err)
		}
		ls.AddStore(s[i])
	}

	if _, r, err := ls.lookupReplica(proto.Key("a"), proto.Key("c")); r.StoreID != s[0].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
	}
	if _, r, err := ls.lookupReplica(proto.Key("b"), nil); r.StoreID != s[0].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[0].Ident.StoreID, r.StoreID, err)
	}
	if _, r, err := ls.lookupReplica(proto.Key("b"), proto.Key("d")); r != nil || err == nil {
		t.Errorf("expected store 0 and error got %d", r.StoreID)
	}
	if _, r, err := ls.lookupReplica(proto.Key("x"), proto.Key("z")); r.StoreID != s[1].Ident.StoreID {
		t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
	}
	if _, r, err := ls.lookupReplica(proto.Key("y"), nil); r.StoreID != s[1].Ident.StoreID || err != nil {
		t.Errorf("expected store %d; got %d: %v", s[1].Ident.StoreID, r.StoreID, err)
	}
}
Esempio n. 6
0
// createTestClient creates a new KV client which connects using
// an HTTP sender to the server at addr.
func createTestClient(addr string) *client.KV {
	sender := newNotifyingSender(client.NewHTTPSender(addr, &http.Transport{
		TLSClientConfig: rpc.LoadInsecureTLSConfig().Config(),
	}))
	return client.NewKV(sender, nil)
}
Esempio n. 7
0
// This is an example for using the RunTransaction() method to submit
// multiple Key Value API operations inside a transaction.
func ExampleKV_RunTransaction() {
	// Using built-in test server for this example code.
	serv := StartTestServer(nil)
	defer serv.Stop()

	// Replace with actual host:port address string (ex "localhost:8080") for server cluster.
	serverAddress := serv.HTTPAddr

	// Key Value Client initialization.
	sender := client.NewHTTPSender(serverAddress, &http.Transport{
		TLSClientConfig: rpc.LoadInsecureTLSConfig().Config(),
	})
	kvClient := client.NewKV(sender, nil)
	kvClient.User = storage.UserRoot
	defer kvClient.Close()

	// Create test data.
	numKVPairs := 10
	keys := make([]string, numKVPairs)
	values := make([][]byte, numKVPairs)
	for i := 0; i < numKVPairs; i++ {
		keys[i] = fmt.Sprintf("testkey-%03d", i)
		values[i] = []byte(fmt.Sprintf("testvalue-%03d", i))
	}

	// Insert all KV pairs inside a transaction.
	putOpts := client.TransactionOptions{Name: "example put"}
	err := kvClient.RunTransaction(&putOpts, func(txn *client.KV) error {
		for i := 0; i < numKVPairs; i++ {
			txn.Prepare(proto.Put, proto.PutArgs(proto.Key(keys[i]), values[i]), &proto.PutResponse{})
		}
		// Note that the KV client is flushed automatically on transaction
		// commit. Invoking Flush after individual API methods is only
		// required if the result needs to be received to take conditional
		// action.
		return nil
	})
	if err != nil {
		log.Fatal(err)
	}

	// Read back KV pairs inside a transaction.
	getResponses := make([]proto.GetResponse, numKVPairs)
	getOpts := client.TransactionOptions{Name: "example get"}
	err = kvClient.RunTransaction(&getOpts, func(txn *client.KV) error {
		for i := 0; i < numKVPairs; i++ {
			txn.Prepare(proto.Get, proto.GetArgs(proto.Key(keys[i])), &getResponses[i])
		}
		return nil
	})
	if err != nil {
		log.Fatal(err)
	}

	// Check results.
	for i, getResp := range getResponses {
		if getResp.Value == nil {
			log.Fatal("No value returned for ", keys[i])
		} else {
			if !bytes.Equal(values[i], getResp.Value.Bytes) {
				log.Fatal("Data mismatch for ", keys[i], ", got: ", getResp.Value.Bytes)
			}
		}
	}

	fmt.Println("Transaction example done.")
	// Output: Transaction example done.
}
Esempio n. 8
0
// This is an example for using the Prepare() method to submit
// multiple Key Value API operations to be run in parallel. Flush() is
// then used to begin execution of all the prepared operations.
func ExampleKV_Prepare() {
	// Using built-in test server for this example code.
	serv := StartTestServer(nil)
	defer serv.Stop()

	// Replace with actual host:port address string (ex "localhost:8080") for server cluster.
	serverAddress := serv.HTTPAddr

	// Key Value Client initialization.
	sender := client.NewHTTPSender(serverAddress, &http.Transport{
		TLSClientConfig: rpc.LoadInsecureTLSConfig().Config(),
	})
	kvClient := client.NewKV(sender, nil)
	kvClient.User = storage.UserRoot
	defer kvClient.Close()

	// Insert test data.
	batchSize := 12
	keys := make([]string, batchSize)
	values := make([][]byte, batchSize)
	for i := 0; i < batchSize; i++ {
		keys[i] = fmt.Sprintf("key-%03d", i)
		values[i] = []byte(fmt.Sprintf("value-%03d", i))

		putReq := proto.PutArgs(proto.Key(keys[i]), values[i])
		putResp := &proto.PutResponse{}
		kvClient.Prepare(proto.Put, putReq, putResp)
	}

	// Flush all puts for parallel execution.
	if err := kvClient.Flush(); err != nil {
		log.Fatal(err)
	}

	// Scan for the newly inserted rows in parallel.
	numScans := 3
	rowsPerScan := batchSize / numScans
	scanResponses := make([]proto.ScanResponse, numScans)
	for i := 0; i < numScans; i++ {
		firstKey := proto.Key(keys[i*rowsPerScan])
		lastKey := proto.Key(keys[((i+1)*rowsPerScan)-1])
		kvClient.Prepare(proto.Scan, proto.ScanArgs(firstKey, lastKey.Next(), int64(rowsPerScan)), &scanResponses[i])
	}
	// Flush all scans for parallel execution.
	if err := kvClient.Flush(); err != nil {
		log.Fatal(err)
	}

	// Check results which may be returned out-of-order from creation.
	var matchCount int
	for i := 0; i < numScans; i++ {
		for _, keyVal := range scanResponses[i].Rows {
			currKey := keyVal.Key
			currValue := keyVal.Value.Bytes
			for j, origKey := range keys {
				if bytes.Equal(currKey, proto.Key(origKey)) && bytes.Equal(currValue, values[j]) {
					matchCount++
				}
			}
		}
	}
	if matchCount != batchSize {
		log.Fatal("Data mismatch.")
	}

	fmt.Println("Prepare Flush example done.")
	// Output: Prepare Flush example done.
}
Esempio n. 9
0
// verifyUncertainty writes values to a key in 5ns intervals and then launches
// a transaction at each value's timestamp reading that value with
// the maximumOffset given, verifying in the process that the correct values
// are read (usually after one transaction restart).
func verifyUncertainty(concurrency int, maxOffset time.Duration, t *testing.T) {
	db, _, clock, _, lSender, transport, err := createTestDB()
	if err != nil {
		t.Fatal(err)
	}
	defer transport.Close()

	txnOpts := &client.TransactionOptions{
		Name: "test",
	}

	key := []byte("key-test")
	// wgStart waits for all transactions to line up, wgEnd has the main
	// function wait for them to finish.
	var wgStart, wgEnd sync.WaitGroup
	wgStart.Add(concurrency + 1)
	wgEnd.Add(concurrency)

	// Initial high offset to allow for future writes.
	clock.SetMaxOffset(999 * time.Nanosecond)
	for i := 0; i < concurrency; i++ {
		value := []byte(fmt.Sprintf("value-%d", i))
		// Values will be written with 5ns spacing.
		futureTS := clock.Now().Add(5, 0)
		clock.Update(futureTS)
		// Expected number of versions skipped.
		skipCount := int(maxOffset) / 5
		if i+skipCount >= concurrency {
			skipCount = concurrency - i - 1
		}
		readValue := []byte(fmt.Sprintf("value-%d", i+skipCount))
		pr := proto.PutResponse{}
		db.Call(proto.Put, &proto.PutRequest{
			RequestHeader: proto.RequestHeader{
				Key: key,
			},
			Value: proto.Value{Bytes: value},
		}, &pr)
		if err := pr.GoError(); err != nil {
			t.Errorf("%d: got write error: %v", i, err)
		}
		gr := proto.GetResponse{}
		db.Call(proto.Get, &proto.GetRequest{
			RequestHeader: proto.RequestHeader{
				Key:       key,
				Timestamp: clock.Now(),
			},
		}, &gr)
		if gr.GoError() != nil || gr.Value == nil || !bytes.Equal(gr.Value.Bytes, value) {
			t.Fatalf("%d: expected success reading value %+v: %v", i, gr.Value, gr.GoError())
		}

		go func(i int) {
			defer wgEnd.Done()
			wgStart.Done()
			// Wait until the other goroutines are running.
			wgStart.Wait()

			txnManual := hlc.NewManualClock(futureTS.WallTime)
			txnClock := hlc.NewClock(txnManual.UnixNano)
			// Make sure to incorporate the logical component if the wall time
			// hasn't changed (i=0). The logical component will change
			// internally in a way we can't track, but we want to be just
			// ahead.
			txnClock.Update(futureTS.Add(0, 999))
			// The written values are spaced out in intervals of 5ns, so
			// setting <5ns here should make do without any restarts while
			// higher values require roughly offset/5 restarts.
			txnClock.SetMaxOffset(maxOffset)

			sender := NewTxnCoordSender(lSender, txnClock, false)
			txnDB := client.NewKV(sender, nil)
			txnDB.User = storage.UserRoot

			if err := txnDB.RunTransaction(txnOpts, func(txn *client.KV) error {
				// Read within the transaction.
				gr := proto.GetResponse{}
				txn.Call(proto.Get, &proto.GetRequest{
					RequestHeader: proto.RequestHeader{
						Key:       key,
						Timestamp: futureTS,
					},
				}, &gr)
				if err := gr.GoError(); err != nil {
					if _, ok := gr.GoError().(*proto.ReadWithinUncertaintyIntervalError); ok {
						return err
					}
					return util.Errorf("unexpected read error of type %s: %v", reflect.TypeOf(err), err)
				}
				if gr.Value == nil || gr.Value.Bytes == nil {
					return util.Errorf("no value read")
				}
				if !bytes.Equal(gr.Value.Bytes, readValue) {
					return util.Errorf("%d: read wrong value %q at %v, wanted %q", i, gr.Value.Bytes, futureTS, readValue)
				}
				return nil
			}); err != nil {
				t.Error(err)
			}
		}(i)
	}
	// Kick the goroutines loose.
	wgStart.Done()
	// Wait for the goroutines to finish.
	wgEnd.Wait()
}
Esempio n. 10
0
// sendOne sends a single call via the wrapped sender. If the call is
// part of a transaction, the TxnCoordSender adds the transaction to a
// map of active transactions and begins heartbeating it. Every
// subsequent call for the same transaction updates the lastUpdateTS
// to prevent live transactions from being considered abandoned and
// garbage collected. Read/write mutating requests have their key or
// key range added to the transaction's interval tree of key ranges
// for eventual cleanup via resolved write intents.
//
// On success, and if the call is part of a transaction, the affected
// key range is recorded as live intents for eventual cleanup upon
// transaction commit. Upon successful txn commit, initiates cleanup
// of intents.
func (tc *TxnCoordSender) sendOne(call *client.Call) {
	var startNS int64
	header := call.Args.Header()
	// If this call is part of a transaction...
	if header.Txn != nil {
		// Set the timestamp to the original timestamp for read-only
		// commands and to the transaction timestamp for read/write
		// commands.
		if proto.IsReadOnly(call.Method) {
			header.Timestamp = header.Txn.OrigTimestamp
		} else {
			header.Timestamp = header.Txn.Timestamp
		}
		// End transaction must have its key set to the txn ID.
		if call.Method == proto.EndTransaction {
			header.Key = header.Txn.Key
			// Remember when EndTransaction started in case we want to
			// be linearizable.
			startNS = tc.clock.PhysicalNow()
		}
	}

	// Send the command through wrapped sender.
	tc.wrapped.Send(call)

	if header.Txn != nil {
		// If not already set, copy the request txn.
		if call.Reply.Header().Txn == nil {
			call.Reply.Header().Txn = gogoproto.Clone(header.Txn).(*proto.Transaction)
		}
		tc.updateResponseTxn(header, call.Reply.Header())
	}

	// If successful, we're in a transaction, and the command leaves
	// transactional intents, add the key or key range to the intents map.
	// If the transaction metadata doesn't yet exist, create it.
	if call.Reply.Header().GoError() == nil && header.Txn != nil && proto.IsTransactional(call.Method) {
		tc.Lock()
		var ok bool
		var txnMeta *txnMetadata
		if txnMeta, ok = tc.txns[string(header.Txn.ID)]; !ok {
			txnMeta = &txnMetadata{
				txn:             *header.Txn,
				keys:            util.NewIntervalCache(util.CacheConfig{Policy: util.CacheNone}),
				lastUpdateTS:    tc.clock.Now(),
				timeoutDuration: tc.clientTimeout,
				closer:          make(chan struct{}),
			}
			tc.txns[string(header.Txn.ID)] = txnMeta

			// TODO(jiajia): Reevaluate this logic of creating a goroutine
			// for each active transaction. Spencer suggests a heap
			// containing next heartbeat timeouts which is processed by a
			// single goroutine.
			go tc.heartbeat(header.Txn, txnMeta.closer)
		}
		txnMeta.lastUpdateTS = tc.clock.Now()
		txnMeta.addKeyRange(header.Key, header.EndKey)
		tc.Unlock()
	}

	// Cleanup intents and transaction map if end of transaction.
	switch t := call.Reply.Header().GoError().(type) {
	case *proto.TransactionAbortedError:
		// If already aborted, cleanup the txn on this TxnCoordSender.
		tc.cleanupTxn(&t.Txn)
	case *proto.OpRequiresTxnError:
		// Run a one-off transaction with that single command.
		log.Infof("%s: auto-wrapping in txn and re-executing", call.Method)
		txnOpts := &client.TransactionOptions{
			Name: "auto-wrap",
		}
		// Must not call Close() on this KV - that would call
		// tc.Close().
		tmpKV := client.NewKV(tc, nil)
		tmpKV.User = call.Args.Header().User
		tmpKV.UserPriority = call.Args.Header().GetUserPriority()
		call.Reply.Reset()
		tmpKV.RunTransaction(txnOpts, func(txn *client.KV) error {
			return txn.Call(call.Method, call.Args, call.Reply)
		})
	case nil:
		var txn *proto.Transaction
		if call.Method == proto.EndTransaction {
			txn = call.Reply.Header().Txn
			// If the -linearizable flag is set, we want to make sure that
			// all the clocks in the system are past the commit timestamp
			// of the transaction. This is guaranteed if either
			// - the commit timestamp is MaxOffset behind startNS
			// - MaxOffset ns were spent in this function
			// when returning to the client. Below we choose the option
			// that involves less waiting, which is likely the first one
			// unless a transaction commits with an odd timestamp.
			if tsNS := txn.Timestamp.WallTime; startNS > tsNS {
				startNS = tsNS
			}
			sleepNS := tc.clock.MaxOffset() -
				time.Duration(tc.clock.PhysicalNow()-startNS)
			if tc.linearizable && sleepNS > 0 {
				defer func() {
					log.V(1).Infof("%v: waiting %dms on EndTransaction for linearizability", txn.ID, sleepNS/1000000)
					time.Sleep(sleepNS)
				}()
			}
		}
		if txn != nil && txn.Status != proto.PENDING {
			tc.cleanupTxn(txn)
		}
	}
}