Beispiel #1
0
func (m *multiTestContext) Start(t *testing.T, numStores int) {
	if m.manualClock == nil {
		m.manualClock = hlc.NewManualClock(0)
	}
	if m.clock == nil {
		m.clock = hlc.NewClock(m.manualClock.UnixNano)
	}
	if m.gossip == nil {
		rpcContext := rpc.NewContext(m.clock, rpc.LoadInsecureTLSConfig())
		m.gossip = gossip.New(rpcContext, gossip.TestInterval, "")
	}
	if m.transport == nil {
		m.transport = multiraft.NewLocalRPCTransport()
	}
	if m.sender == nil {
		m.sender = kv.NewLocalSender()
	}
	if m.db == nil {
		txnSender := kv.NewTxnCoordSender(m.sender, m.clock, false)
		m.db = client.NewKV(txnSender, nil)
		m.db.User = storage.UserRoot
	}

	for i := 0; i < numStores; i++ {
		m.addStore(t)
	}
}
Beispiel #2
0
// SimulateNetwork creates nodeCount gossip nodes. The network should
// be set to either "tcp" or "unix". The gossipInterval should be set
// to a compressed simulation timescale, though large enough to give
// the concurrent goroutines enough time to pass data back and forth
// in order to yield accurate estimates of how old data actually ends
// up being at the various nodes. After each gossipInterval period,
// simCallback is invoked; when it returns false, the simulation
// ends. If it returns true, the simulation continues another cycle.
//
// Node0 gossips the node count as well as the gossip sentinel. The
// gossip bootstrap hosts are set to the first three nodes (or fewer if
// less than three are available).
//
// At each cycle of the simulation, node 0 gossips the sentinel. If
// the simulation requires other nodes to gossip, this should be done
// via simCallback.
//
// The simulation callback receives a map of nodes, keyed by node address.
func SimulateNetwork(nodeCount int, network string, gossipInterval time.Duration,
	simCallback func(cycle int, nodes map[string]*Gossip) bool) {

	// seed the random number generator for non-determinism across
	// multiple runs.
	rand.Seed(time.Now().UTC().UnixNano())

	tlsConfig := rpc.LoadInsecureTLSConfig()

	log.Infof("simulating network with %d nodes", nodeCount)
	servers := make([]*rpc.Server, nodeCount)
	addrs := make([]net.Addr, nodeCount)
	for i := 0; i < nodeCount; i++ {
		addr := util.CreateTestAddr(network)
		servers[i] = rpc.NewServer(addr, tlsConfig)
		if err := servers[i].Start(); err != nil {
			log.Fatal(err)
		}
		addrs[i] = servers[i].Addr()
	}
	var bootstrap []net.Addr
	if nodeCount < 3 {
		bootstrap = addrs
	} else {
		bootstrap = addrs[:3]
	}

	nodes := make(map[string]*Gossip, nodeCount)
	for i := 0; i < nodeCount; i++ {
		node := New(tlsConfig)
		node.Name = fmt.Sprintf("Node%d", i)
		node.SetBootstrap(bootstrap)
		node.SetInterval(gossipInterval)
		node.Start(servers[i])
		// Node 0 gossips node count.
		if i == 0 {
			node.AddInfo(KeyNodeCount, int64(nodeCount), time.Hour)
		}
		nodes[addrs[i].String()] = node
	}

	gossipTimeout := time.Tick(gossipInterval)
	var complete bool
	for cycle := 0; !complete; cycle++ {
		select {
		case <-gossipTimeout:
			// Node 0 gossips sentinel every cycle.
			nodes[addrs[0].String()].AddInfo(KeySentinel, int64(cycle), time.Hour)
			if !simCallback(cycle, nodes) {
				complete = true
			}
		}
	}

	// Stop all servers & nodes.
	for i := 0; i < nodeCount; i++ {
		servers[i].Close()
		nodes[addrs[i].String()].Stop()
	}
}
Beispiel #3
0
// TestGossipGroupsInfoStore verifies gossiping of groups via the
// gossip instance infostore.
func TestGossipGroupsInfoStore(t *testing.T) {
	g := New(rpc.LoadInsecureTLSConfig())

	// For int64.
	g.RegisterGroup("i", 3, MinGroup)
	for i := 0; i < 3; i++ {
		g.AddInfo(fmt.Sprintf("i.%d", i), int64(i), time.Hour)
	}
	values, err := g.GetGroupInfos("i")
	if err != nil {
		t.Errorf("error fetching int64 group: %v", err)
	}
	if len(values) != 3 {
		t.Errorf("incorrect number of values in group: %v", values)
	}
	for i := 0; i < 3; i++ {
		if values[i].(int64) != int64(i) {
			t.Errorf("index %d has incorrect value: %d, expected %d", i, values[i].(int64), i)
		}
	}
	if _, err := g.GetGroupInfos("i2"); err == nil {
		t.Errorf("expected error fetching nonexistent key \"i2\"")
	}

	// For float64.
	g.RegisterGroup("f", 3, MinGroup)
	for i := 0; i < 3; i++ {
		g.AddInfo(fmt.Sprintf("f.%d", i), float64(i), time.Hour)
	}
	values, err = g.GetGroupInfos("f")
	if err != nil {
		t.Errorf("error fetching float64 group: %v", err)
	}
	if len(values) != 3 {
		t.Errorf("incorrect number of values in group: %v", values)
	}
	for i := 0; i < 3; i++ {
		if values[i].(float64) != float64(i) {
			t.Errorf("index %d has incorrect value: %f, expected %d", i, values[i].(float64), i)
		}
	}

	// For string.
	g.RegisterGroup("s", 3, MinGroup)
	for i := 0; i < 3; i++ {
		g.AddInfo(fmt.Sprintf("s.%d", i), fmt.Sprintf("%d", i), time.Hour)
	}
	values, err = g.GetGroupInfos("s")
	if err != nil {
		t.Errorf("error fetching string group: %v", err)
	}
	if len(values) != 3 {
		t.Errorf("incorrect number of values in group: %v", values)
	}
	for i := 0; i < 3; i++ {
		if values[i].(string) != fmt.Sprintf("%d", i) {
			t.Errorf("index %d has incorrect value: %d, expected %s", i, values[i], fmt.Sprintf("%d", i))
		}
	}
}
Beispiel #4
0
// createTestStoreWithEngine creates a test store using the given engine and clock.
// The caller is responsible for closing the store on exit.
func createTestStoreWithEngine(t *testing.T, eng engine.Engine, clock *hlc.Clock,
	bootstrap bool) *storage.Store {
	rpcContext := rpc.NewContext(hlc.NewClock(hlc.UnixNano), rpc.LoadInsecureTLSConfig())
	g := gossip.New(rpcContext, gossip.TestInterval, "")
	lSender := kv.NewLocalSender()
	sender := kv.NewTxnCoordSender(lSender, clock, false)
	db := client.NewKV(sender, nil)
	db.User = storage.UserRoot
	// TODO(bdarnell): arrange to have the transport closed.
	store := storage.NewStore(clock, eng, db, g, multiraft.NewLocalRPCTransport())
	if bootstrap {
		if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}); err != nil {
			t.Fatal(err)
		}
	}
	lSender.AddStore(store)
	if bootstrap {
		if err := store.BootstrapRange(); err != nil {
			t.Fatal(err)
		}
	}
	if err := store.Start(); err != nil {
		t.Fatal(err)
	}
	return store
}
Beispiel #5
0
// createTestRange creates a new range initialized to the full extent
// of the keyspace. The gossip instance is also returned for testing.
func createTestRange(engine engine.Engine, t *testing.T) (*Range, *gossip.Gossip) {
	rm := &proto.RangeMetadata{
		RangeID:         0,
		RangeDescriptor: testRangeDescriptor,
	}
	g := gossip.New(rpc.LoadInsecureTLSConfig())
	clock := hlc.NewClock(hlc.UnixNano)
	r := NewRange(rm, clock, engine, nil, g)
	r.Start()
	return r, g
}
Beispiel #6
0
// NewNetwork creates nodeCount gossip nodes. The networkType should
// be set to either "tcp" or "unix". The gossipInterval should be set
// to a compressed simulation timescale, though large enough to give
// the concurrent goroutines enough time to pass data back and forth
// in order to yield accurate estimates of how old data actually ends
// up being at the various nodes (e.g. DefaultTestGossipInterval).
func NewNetwork(nodeCount int, networkType string,
	gossipInterval time.Duration, gossipBootstrap string) *Network {

	tlsConfig := rpc.LoadInsecureTLSConfig()
	clock := hlc.NewClock(hlc.UnixNano)
	rpcContext := rpc.NewContext(clock, tlsConfig)

	log.Infof("simulating gossip network with %d nodes", nodeCount)
	servers := make([]*rpc.Server, nodeCount)
	addrs := make([]net.Addr, nodeCount)
	for i := 0; i < nodeCount; i++ {
		addr := util.CreateTestAddr(networkType)
		servers[i] = rpc.NewServer(addr, rpcContext)
		if err := servers[i].Start(); err != nil {
			log.Fatal(err)
		}
		addrs[i] = servers[i].Addr()
	}
	var bootstrap []net.Addr
	if nodeCount < 3 {
		bootstrap = addrs
	} else {
		bootstrap = addrs[:3]
	}

	nodes := make([]*Node, nodeCount)
	for i := 0; i < nodeCount; i++ {
		node := gossip.New(rpcContext, gossipInterval, gossipBootstrap)
		node.Name = fmt.Sprintf("Node%d", i)
		node.SetBootstrap(bootstrap)
		node.SetInterval(gossipInterval)
		node.Start(servers[i])
		// Node 0 gossips node count.
		if i == 0 {
			node.AddInfo(gossip.KeyNodeCount, int64(nodeCount), time.Hour)
		}
		nodes[i] = &Node{Gossip: node, Addr: addrs[i], Server: servers[i]}
	}

	return &Network{
		Nodes:          nodes,
		Addrs:          addrs,
		NetworkType:    networkType,
		GossipInterval: gossipInterval}
}
Beispiel #7
0
func newServer() (*server, error) {
	// Determine hostname in case it hasn't been specified in -rpc or -http.
	host, err := os.Hostname()
	if err != nil {
		host = "127.0.0.1"
	}

	// Resolve
	if strings.HasPrefix(*rpcAddr, ":") {
		*rpcAddr = host + *rpcAddr
	}
	_, err = net.ResolveTCPAddr("tcp", *rpcAddr)
	if err != nil {
		return nil, util.Errorf("unable to resolve RPC address %q: %v", *rpcAddr, err)
	}

	var tlsConfig *rpc.TLSConfig
	if *certDir == "" {
		tlsConfig = rpc.LoadInsecureTLSConfig()
	} else {
		var err error
		if tlsConfig, err = rpc.LoadTLSConfig(*certDir); err != nil {
			return nil, util.Errorf("unable to load TLS config: %v", err)
		}
	}

	s := &server{
		host:  host,
		mux:   http.NewServeMux(),
		clock: hlc.NewClock(hlc.UnixNano),
		rpc:   rpc.NewServer(util.MakeRawAddr("tcp", *rpcAddr), tlsConfig),
	}
	s.clock.SetMaxDrift(*maxDrift)

	s.gossip = gossip.New(tlsConfig)
	s.kvDB = kv.NewDB(kv.NewDistKV(s.gossip), s.clock)
	s.kvREST = rest.NewRESTServer(s.kvDB)
	s.node = NewNode(s.kvDB, s.gossip)
	s.admin = newAdminServer(s.kvDB)
	s.status = newStatusServer(s.kvDB)
	s.structuredDB = structured.NewDB(s.kvDB)
	s.structuredREST = structured.NewRESTServer(s.structuredDB)

	return s, nil
}
Beispiel #8
0
// This is an example for using the Call() method to Put and then Get
// a value for a given key.
func ExampleKV_Call() {
	// Using built-in test server for this example code.
	serv := StartTestServer(nil)
	defer serv.Stop()

	// Replace with actual host:port address string (ex "localhost:8080") for server cluster.
	serverAddress := serv.HTTPAddr

	// Key Value Client initialization.
	sender := client.NewHTTPSender(serverAddress, &http.Transport{
		TLSClientConfig: rpc.LoadInsecureTLSConfig().Config(),
	})
	kvClient := client.NewKV(sender, nil)
	kvClient.User = storage.UserRoot
	defer kvClient.Close()

	key := proto.Key("a")
	value := []byte{1, 2, 3, 4}

	// Store test value.
	putResp := &proto.PutResponse{}
	if err := kvClient.Call(proto.Put, proto.PutArgs(key, value), putResp); err != nil {
		log.Fatal(err)
	}

	// Retrieve test value using same key.
	getResp := &proto.GetResponse{}
	if err := kvClient.Call(proto.Get, proto.GetArgs(key), getResp); err != nil {
		log.Fatal(err)
	}

	// Data validation.
	if getResp.Value == nil {
		log.Fatal("No value returned.")
	}
	if !bytes.Equal(value, getResp.Value.Bytes) {
		log.Fatal("Data mismatch on retrieved value.")
	}

	fmt.Println("Client example done.")
	// Output: Client example done.
}
Beispiel #9
0
// startGossip creates local and remote gossip instances.
// The remote gossip instance launches its gossip service.
func startGossip(t *testing.T) (local, remote *Gossip, lserver, rserver *rpc.Server) {
	tlsConfig := rpc.LoadInsecureTLSConfig()

	laddr := util.CreateTestAddr("unix")
	lserver = rpc.NewServer(laddr, tlsConfig)
	if err := lserver.Start(); err != nil {
		t.Fatal(err)
	}
	local = New(tlsConfig)
	raddr := util.CreateTestAddr("unix")
	rserver = rpc.NewServer(raddr, tlsConfig)
	if err := rserver.Start(); err != nil {
		t.Fatal(err)
	}
	remote = New(tlsConfig)
	local.start(lserver)
	remote.start(rserver)
	time.Sleep(time.Millisecond)
	return
}
Beispiel #10
0
// TestGossipInfoStore verifies operation of gossip instance infostore.
func TestGossipInfoStore(t *testing.T) {
	g := New(rpc.LoadInsecureTLSConfig())
	g.AddInfo("i", int64(1), time.Hour)
	if val, err := g.GetInfo("i"); val.(int64) != int64(1) || err != nil {
		t.Errorf("error fetching int64: %v", err)
	}
	if _, err := g.GetInfo("i2"); err == nil {
		t.Errorf("expected error fetching nonexistent key \"i2\"")
	}
	g.AddInfo("f", float64(3.14), time.Hour)
	if val, err := g.GetInfo("f"); val.(float64) != float64(3.14) || err != nil {
		t.Errorf("error fetching float64: %v", err)
	}
	if _, err := g.GetInfo("f2"); err == nil {
		t.Errorf("expected error fetching nonexistent key \"f2\"")
	}
	g.AddInfo("s", "b", time.Hour)
	if val, err := g.GetInfo("s"); val.(string) != "b" || err != nil {
		t.Errorf("error fetching string: %v", err)
	}
	if _, err := g.GetInfo("s2"); err == nil {
		t.Errorf("expected error fetching nonexistent key \"s2\"")
	}
}
Beispiel #11
0
// createTestClient creates a new KV client which connects using
// an HTTP sender to the server at addr.
func createTestClient(addr string) *client.KV {
	sender := newNotifyingSender(client.NewHTTPSender(addr, &http.Transport{
		TLSClientConfig: rpc.LoadInsecureTLSConfig().Config(),
	}))
	return client.NewKV(sender, nil)
}
Beispiel #12
0
// This is an example for using the RunTransaction() method to submit
// multiple Key Value API operations inside a transaction.
func ExampleKV_RunTransaction() {
	// Using built-in test server for this example code.
	serv := StartTestServer(nil)
	defer serv.Stop()

	// Replace with actual host:port address string (ex "localhost:8080") for server cluster.
	serverAddress := serv.HTTPAddr

	// Key Value Client initialization.
	sender := client.NewHTTPSender(serverAddress, &http.Transport{
		TLSClientConfig: rpc.LoadInsecureTLSConfig().Config(),
	})
	kvClient := client.NewKV(sender, nil)
	kvClient.User = storage.UserRoot
	defer kvClient.Close()

	// Create test data.
	numKVPairs := 10
	keys := make([]string, numKVPairs)
	values := make([][]byte, numKVPairs)
	for i := 0; i < numKVPairs; i++ {
		keys[i] = fmt.Sprintf("testkey-%03d", i)
		values[i] = []byte(fmt.Sprintf("testvalue-%03d", i))
	}

	// Insert all KV pairs inside a transaction.
	putOpts := client.TransactionOptions{Name: "example put"}
	err := kvClient.RunTransaction(&putOpts, func(txn *client.KV) error {
		for i := 0; i < numKVPairs; i++ {
			txn.Prepare(proto.Put, proto.PutArgs(proto.Key(keys[i]), values[i]), &proto.PutResponse{})
		}
		// Note that the KV client is flushed automatically on transaction
		// commit. Invoking Flush after individual API methods is only
		// required if the result needs to be received to take conditional
		// action.
		return nil
	})
	if err != nil {
		log.Fatal(err)
	}

	// Read back KV pairs inside a transaction.
	getResponses := make([]proto.GetResponse, numKVPairs)
	getOpts := client.TransactionOptions{Name: "example get"}
	err = kvClient.RunTransaction(&getOpts, func(txn *client.KV) error {
		for i := 0; i < numKVPairs; i++ {
			txn.Prepare(proto.Get, proto.GetArgs(proto.Key(keys[i])), &getResponses[i])
		}
		return nil
	})
	if err != nil {
		log.Fatal(err)
	}

	// Check results.
	for i, getResp := range getResponses {
		if getResp.Value == nil {
			log.Fatal("No value returned for ", keys[i])
		} else {
			if !bytes.Equal(values[i], getResp.Value.Bytes) {
				log.Fatal("Data mismatch for ", keys[i], ", got: ", getResp.Value.Bytes)
			}
		}
	}

	fmt.Println("Transaction example done.")
	// Output: Transaction example done.
}
Beispiel #13
0
// This is an example for using the Prepare() method to submit
// multiple Key Value API operations to be run in parallel. Flush() is
// then used to begin execution of all the prepared operations.
func ExampleKV_Prepare() {
	// Using built-in test server for this example code.
	serv := StartTestServer(nil)
	defer serv.Stop()

	// Replace with actual host:port address string (ex "localhost:8080") for server cluster.
	serverAddress := serv.HTTPAddr

	// Key Value Client initialization.
	sender := client.NewHTTPSender(serverAddress, &http.Transport{
		TLSClientConfig: rpc.LoadInsecureTLSConfig().Config(),
	})
	kvClient := client.NewKV(sender, nil)
	kvClient.User = storage.UserRoot
	defer kvClient.Close()

	// Insert test data.
	batchSize := 12
	keys := make([]string, batchSize)
	values := make([][]byte, batchSize)
	for i := 0; i < batchSize; i++ {
		keys[i] = fmt.Sprintf("key-%03d", i)
		values[i] = []byte(fmt.Sprintf("value-%03d", i))

		putReq := proto.PutArgs(proto.Key(keys[i]), values[i])
		putResp := &proto.PutResponse{}
		kvClient.Prepare(proto.Put, putReq, putResp)
	}

	// Flush all puts for parallel execution.
	if err := kvClient.Flush(); err != nil {
		log.Fatal(err)
	}

	// Scan for the newly inserted rows in parallel.
	numScans := 3
	rowsPerScan := batchSize / numScans
	scanResponses := make([]proto.ScanResponse, numScans)
	for i := 0; i < numScans; i++ {
		firstKey := proto.Key(keys[i*rowsPerScan])
		lastKey := proto.Key(keys[((i+1)*rowsPerScan)-1])
		kvClient.Prepare(proto.Scan, proto.ScanArgs(firstKey, lastKey.Next(), int64(rowsPerScan)), &scanResponses[i])
	}
	// Flush all scans for parallel execution.
	if err := kvClient.Flush(); err != nil {
		log.Fatal(err)
	}

	// Check results which may be returned out-of-order from creation.
	var matchCount int
	for i := 0; i < numScans; i++ {
		for _, keyVal := range scanResponses[i].Rows {
			currKey := keyVal.Key
			currValue := keyVal.Value.Bytes
			for j, origKey := range keys {
				if bytes.Equal(currKey, proto.Key(origKey)) && bytes.Equal(currValue, values[j]) {
					matchCount++
				}
			}
		}
	}
	if matchCount != batchSize {
		log.Fatal("Data mismatch.")
	}

	fmt.Println("Prepare Flush example done.")
	// Output: Prepare Flush example done.
}