예제 #1
0
func parallelBench(b *testing.B, cm Congomap, fn func(*testing.PB)) {
	// doesn't necessarily fill the entire map
	for i := 0; i < len(states); i++ {
		cm.Store(randomState(), randomState())
	}
	b.RunParallel(fn)
}
예제 #2
0
파일: fmt_test.go 프로젝트: vsayer/go
func BenchmarkSprintfBoolean(b *testing.B) {
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			Sprintf("%t", true)
		}
	})
}
예제 #3
0
func BenchmarkConnRoutingKey(b *testing.B) {
	const workers = 16

	cluster := createCluster()
	cluster.NumConns = 1
	cluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy())
	session := createSessionFromCluster(cluster, b)
	defer session.Close()

	if err := createTable(session, "CREATE TABLE IF NOT EXISTS routing_key_stress (id int primary key)"); err != nil {
		b.Fatal(err)
	}

	var seed uint64
	writer := func(pb *testing.PB) {
		seed := atomic.AddUint64(&seed, 1)
		var i uint64 = 0
		query := session.Query("insert into routing_key_stress (id) values (?)")

		for pb.Next() {
			if _, err := query.Bind(i * seed).GetRoutingKey(); err != nil {
				b.Error(err)
				return
			}
			i++
		}
	}

	b.SetParallelism(workers)
	b.RunParallel(writer)
}
예제 #4
0
파일: fmt_test.go 프로젝트: vsayer/go
func BenchmarkSprintfString(b *testing.B) {
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			Sprintf("%s", "hello")
		}
	})
}
예제 #5
0
파일: fmt_test.go 프로젝트: vsayer/go
func BenchmarkSprintfPrefixedInt(b *testing.B) {
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			Sprintf("This is some meaningless prefix text that needs to be scanned %d", 6)
		}
	})
}
예제 #6
0
func BenchmarkSelectNonblock(b *testing.B) {
	myc1 := make(chan int)
	myc2 := make(chan int)
	myc3 := make(chan int, 1)
	myc4 := make(chan int, 1)
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			select {
			case <-myc1:
			default:
			}
			select {
			case myc2 <- 0:
			default:
			}
			select {
			case <-myc3:
			default:
			}
			select {
			case myc4 <- 0:
			default:
			}
		}
	})
}
예제 #7
0
파일: fmt_test.go 프로젝트: vsayer/go
func BenchmarkSprintfPadding(b *testing.B) {
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			Sprintf("%16f", 1.0)
		}
	})
}
예제 #8
0
func BenchmarkHashGet(b *testing.B) {
	mPath := "/test"
	cli := NewZKClient(testServers, time.Second*5, nil)
	defer cli.Close()
	cli.CreatePersistNode(mPath)
	defer cli.DeleteNode(mPath)

	cli.CreateEphemeralNode(mPath + "/127.0.0.1:6379")
	cli.CreateEphemeralNode(mPath + "/127.0.0.1:6380")

	fmt.Println("===============create addr dir===============")
	time.Sleep(2 * time.Second)

	zkm := NewZKMonitor(testServers, time.Second*5, &testService{}, nil, mPath)
	zkm.Run()
	defer zkm.Close()
	time.Sleep(2 * time.Second)

	hGetter := zkm.HashGetter(nil)
	key := []byte{1, 2, 3, 4, 5, 6, 7, 8}

	nilCnt := int32(0)
	b.ResetTimer()
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			key[0] = byte(rand.Int())
			conn := hGetter.GetConn(key)
			if conn == nil {
				atomic.AddInt32(&nilCnt, 1)
			}
		}
	})
	b.Log("nil connection count:", nilCnt)
}
예제 #9
0
// runMVCCMerge merges value into numKeys separate keys.
func runMVCCMerge(value *roachpb.Value, numKeys int, b *testing.B) {
	stopper := stop.NewStopper()
	defer stopper.Stop()
	rocksdb := NewInMem(roachpb.Attributes{}, testCacheSize, stopper)

	// Precompute keys so we don't waste time formatting them at each iteration.
	keys := make([]roachpb.Key, numKeys)
	for i := 0; i < numKeys; i++ {
		keys[i] = roachpb.Key(fmt.Sprintf("key-%d", i))
	}

	b.ResetTimer()

	// Use parallelism if specified when test is run.
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			ms := MVCCStats{}
			if err := MVCCMerge(rocksdb, &ms, keys[rand.Intn(numKeys)], *value); err != nil {
				b.Fatal(err)
			}
		}
	})

	// Read values out to force merge.
	for _, key := range keys {
		val, _, err := MVCCGet(rocksdb, key, roachpb.ZeroTimestamp, true, nil)
		if err != nil {
			b.Fatal(err)
		} else if val == nil {
			continue
		}
	}

	b.StopTimer()
}
예제 #10
0
// runBenchmarkBank mirrors the SQL performed by examples/sql_bank, but
// structured as a benchmark for easier usage of the Go performance analysis
// tools like pprof, memprof and trace.
func runBenchmarkBank(b *testing.B, db *sql.DB) {
	if _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS bank`); err != nil {
		b.Fatal(err)
	}

	{
		// Initialize the "accounts" table.
		schema := `
CREATE TABLE IF NOT EXISTS bank.accounts (
  id INT PRIMARY KEY,
  balance INT NOT NULL
)`
		if _, err := db.Exec(schema); err != nil {
			b.Fatal(err)
		}
		if _, err := db.Exec("TRUNCATE TABLE bank.accounts"); err != nil {
			b.Fatal(err)
		}

		var placeholders bytes.Buffer
		var values []interface{}
		for i := 0; i < *numAccounts; i++ {
			if i > 0 {
				placeholders.WriteString(", ")
			}
			fmt.Fprintf(&placeholders, "($%d, 0)", i+1)
			values = append(values, i)
		}
		stmt := `INSERT INTO bank.accounts (id, balance) VALUES ` + placeholders.String()
		if _, err := db.Exec(stmt, values...); err != nil {
			b.Fatal(err)
		}
	}

	b.ResetTimer()
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			from := rand.Intn(*numAccounts)
			to := rand.Intn(*numAccounts - 1)
			if from == to {
				to = *numAccounts - 1
			}

			amount := rand.Intn(*maxTransfer)

			update := `
UPDATE bank.accounts
  SET balance = CASE id WHEN $1 THEN balance-$3 WHEN $2 THEN balance+$3 END
  WHERE id IN ($1, $2) AND (SELECT balance >= $3 FROM bank.accounts WHERE id = $1)
`
			if _, err := db.Exec(update, from, to, amount); err != nil {
				if log.V(1) {
					log.Warning(err)
				}
				continue
			}
		}
	})
	b.StopTimer()
}
예제 #11
0
// runClientScan first creates test data (and resets the benchmarking
// timer). It then performs b.N client scans in increments of numRows
// keys over all of the data, restarting at the beginning of the
// keyspace, as many times as necessary.
func runClientScan(useSSL bool, numRows, numVersions int, b *testing.B) {
	const numKeys = 100000

	s, db := setupClientBenchData(useSSL, numVersions, numKeys, b)
	defer s.Stop()

	b.SetBytes(int64(numRows * valueSize))
	b.ResetTimer()

	b.RunParallel(func(pb *testing.PB) {
		startKeyBuf := append(make([]byte, 0, 64), []byte("key-")...)
		endKeyBuf := append(make([]byte, 0, 64), []byte("key-")...)
		for pb.Next() {
			// Choose a random key to start scan.
			keyIdx := rand.Int31n(int32(numKeys - numRows))
			startKey := roachpb.Key(encoding.EncodeUvarintAscending(
				startKeyBuf, uint64(keyIdx)))
			endKey := roachpb.Key(encoding.EncodeUvarintAscending(
				endKeyBuf, uint64(keyIdx)+uint64(numRows)))
			rows, pErr := db.Scan(startKey, endKey, int64(numRows))
			if pErr != nil {
				b.Fatalf("failed scan: %s", pErr)
			}
			if len(rows) != numRows {
				b.Fatalf("failed to scan: %d != %d", len(rows), numRows)
			}
		}
	})

	b.StopTimer()
}
예제 #12
0
// runMVCCGet first creates test data (and resets the benchmarking
// timer). It then performs b.N MVCCGets.
func runMVCCGet(numVersions int, b *testing.B) {
	// Use the same number of keys for all of the mvcc get
	// benchmarks. Using a different number of keys per test gives
	// preferential treatment to tests with fewer keys. Note that the
	// datasets all fit in cache and the cache is pre-warmed.
	const numKeys = 100000

	rocksdb := setupMVCCScanData(numVersions, numKeys, b)
	defer rocksdb.Close()

	prewarmCache(rocksdb)

	b.SetBytes(1024)
	b.ResetTimer()

	b.RunParallel(func(pb *testing.PB) {
		keyBuf := append(make([]byte, 0, 64), []byte("key-")...)
		for pb.Next() {
			// Choose a random key to retrieve.
			keyIdx := rand.Int31n(int32(numKeys))
			key := proto.Key(encoding.EncodeUvarint(keyBuf[0:4], uint64(keyIdx)))
			walltime := int64(5 * (rand.Int31n(int32(numVersions)) + 1))
			ts := makeTS(walltime, 0)
			if v, _, err := MVCCGet(rocksdb, key, ts, true, nil); err != nil {
				b.Fatalf("failed get: %s", err)
			} else if len(v.Bytes) != 1024 {
				b.Fatalf("unexpected value size: %d", len(v.Bytes))
			}
		}
	})

	b.StopTimer()
}
예제 #13
0
// runMVCCScan first creates test data (and resets the benchmarking
// timer). It then performs b.N MVCCScans in increments of numRows
// keys over all of the data in the rocksdb instance, restarting at
// the beginning of the keyspace, as many times as necessary.
func runMVCCScan(numRows, numVersions int, b *testing.B) {
	// Use the same number of keys for all of the mvcc scan
	// benchmarks. Using a different number of keys per test gives
	// preferential treatment to tests with fewer keys. Note that the
	// datasets all fit in cache and the cache is pre-warmed.
	const numKeys = 100000

	rocksdb := setupMVCCScanData(numVersions, numKeys, b)
	defer rocksdb.Close()

	prewarmCache(rocksdb)

	b.SetBytes(int64(numRows * 1024))
	b.ResetTimer()

	b.RunParallel(func(pb *testing.PB) {
		keyBuf := append(make([]byte, 0, 64), []byte("key-")...)
		for pb.Next() {
			// Choose a random key to start scan.
			keyIdx := rand.Int31n(int32(numKeys - numRows))
			startKey := proto.Key(encoding.EncodeUvarint(keyBuf[0:4], uint64(keyIdx)))
			walltime := int64(5 * (rand.Int31n(int32(numVersions)) + 1))
			ts := makeTS(walltime, 0)
			kvs, _, err := MVCCScan(rocksdb, startKey, proto.KeyMax, int64(numRows), ts, true, nil)
			if err != nil {
				b.Fatalf("failed scan: %s", err)
			}
			if len(kvs) != numRows {
				b.Fatalf("failed to scan: %d != %d", len(kvs), numRows)
			}
		}
	})

	b.StopTimer()
}
예제 #14
0
파일: sql_test.go 프로젝트: pjump/gcc
func BenchmarkManyConcurrentQueries(b *testing.B) {
	b.ReportAllocs()
	// To see lock contention in Go 1.4, 16~ cores and 128~ goroutines are required.
	const parallelism = 16

	db := newTestDB(b, "magicquery")
	defer closeDB(b, db)
	db.SetMaxIdleConns(runtime.GOMAXPROCS(0) * parallelism)

	stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
	if err != nil {
		b.Fatal(err)
	}
	defer stmt.Close()

	b.SetParallelism(parallelism)
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			rows, err := stmt.Query("sleep", 1)
			if err != nil {
				b.Error(err)
				return
			}
			rows.Close()
		}
	})
}
예제 #15
0
func BenchmarkAddAndQueryPost(b *testing.B) {
	// Pre-build posts and queries so we're not measuring that.
	rand := rand.New(rand.NewSource(time.Now().UnixNano()))
	idx := &PostIndex{}
	posts := createPosts(aliceChain, 100000, rand) // Large number of posts to query
	for _, v := range posts {
		idx.AddPost(v.id, v.words)
	}

	posts = createPosts(aliceChain, b.N, rand) // New posts!
	queries := make([]string, b.N)
	for i := 0; i < len(queries); i++ {
		ql := rand.Intn(4) + 1
		t := aliceChain.Generate(ql, rand)
		w := splitToWords(t)
		queries[i] = randomQuery(w, rand)
	}
	var index int32 = -1 // Count up to N but atomically

	b.ResetTimer()
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			i := atomic.AddInt32(&index, 1)
			if rand.Intn(5) == 0 {
				p := posts[i]
				idx.AddPost(p.id, p.words)
			} else {
				q := queries[i]
				idx.QueryPosts(q, 100)
			}
		}
	})
}
예제 #16
0
// Benchmark that runs with parallelism to help find concurrency related
// issues. To run with parallelism, the 'go test' cpu flag must be set
// greater than 1, otherwise it just runs concurrently but not in parallel.
func BenchmarkParallelUdpParse(b *testing.B) {
	rand.Seed(22)
	numMessages := len(messages)
	dns := newDNS(false)
	client := dns.results.(*publish.ChanTransactions)

	// Drain the results channal while the test is running.
	go func() {
		totalMessages := 0
		for r := range client.Channel {
			_ = r
			totalMessages++
		}
		fmt.Printf("Parsed %d messages.\n", totalMessages)
	}()

	b.ResetTimer()
	b.RunParallel(func(pb *testing.PB) {
		// Each iteration parses one message, either a request or a response.
		// The request and response could be parsed on different goroutines.
		for pb.Next() {
			q := messages[rand.Intn(numMessages)]
			var packet *protos.Packet
			if rand.Intn(2) == 0 {
				packet = newPacket(forward, q.request)
			} else {
				packet = newPacket(reverse, q.response)
			}
			dns.ParseUDP(packet)
		}
	})

	defer close(client.Channel)
}
예제 #17
0
func BenchmarkSelectSyncContended(b *testing.B) {
	myc1 := make(chan int)
	myc2 := make(chan int)
	myc3 := make(chan int)
	done := make(chan int)
	b.RunParallel(func(pb *testing.PB) {
		go func() {
			for {
				select {
				case myc1 <- 0:
				case myc2 <- 0:
				case myc3 <- 0:
				case <-done:
					return
				}
			}
		}()
		for pb.Next() {
			select {
			case <-myc1:
			case <-myc2:
			case <-myc3:
			}
		}
	})
	close(done)
}
예제 #18
0
파일: server_test.go 프로젝트: 2thetop/go
func benchmarkEndToEnd(dial func() (*Client, error), b *testing.B) {
	once.Do(startServer)
	client, err := dial()
	if err != nil {
		b.Fatal("error dialing:", err)
	}
	defer client.Close()

	// Synchronous calls
	args := &Args{7, 8}
	b.ResetTimer()

	b.RunParallel(func(pb *testing.PB) {
		reply := new(Reply)
		for pb.Next() {
			err := client.Call("Arith.Add", args, reply)
			if err != nil {
				b.Fatalf("rpc error: Add: expected no error but got string %q", err.Error())
			}
			if reply.C != args.A+args.B {
				b.Fatalf("rpc error: Add: expected %d got %d", reply.C, args.A+args.B)
			}
		}
	})
}
예제 #19
0
func benchmarkClientServerParallel(b *testing.B, parallelism int) {
	b.ReportAllocs()
	b.StopTimer()
	handler, err := NewHandler(func(in Ping, out *Pong) error {
		if in.Data != "ping" {
			b.Fatalf("expected ping, got %q", in.Data)
		}
		out.Data = "pong"
		return nil
	})
	if err != nil {
		b.Fatalf("NewHandler: %v", err)
	}

	s := npctest.NewServer(handler)
	defer s.Close()

	c := NewClient([]string{s.Listener.Addr().String()})
	defer c.Close()
	b.SetParallelism(parallelism)
	b.StartTimer()
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			var pong Pong
			err = c.Call(Ping{"ping"}, &pong)
			if err != nil {
				b.Fatalf("Call: %v", err)
			}
			if pong.Data != "pong" {
				b.Fatalf("expected pong, got %q", pong.Data)
			}
		}
	})
}
예제 #20
0
func BenchmarkRedisClusterPing(b *testing.B) {
	if testing.Short() {
		b.Skip("skipping in short mode")
	}

	cluster := &clusterScenario{
		ports:     []string{"8220", "8221", "8222", "8223", "8224", "8225"},
		nodeIds:   make([]string, 6),
		processes: make(map[string]*redisProcess, 6),
		clients:   make(map[string]*redis.Client, 6),
	}
	if err := startCluster(cluster); err != nil {
		b.Fatal(err)
	}
	defer stopCluster(cluster)
	client := cluster.clusterClient(nil)
	defer client.Close()

	b.ResetTimer()

	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			if err := client.Ping().Err(); err != nil {
				b.Fatal(err)
			}
		}
	})
}
예제 #21
0
파일: fmt_test.go 프로젝트: vsayer/go
func BenchmarkSprintfEmpty(b *testing.B) {
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			Sprintf("")
		}
	})
}
예제 #22
0
func BenchmarkInboundParallel(b *testing.B) {
	var reqCount int32
	serverAddr, err := setupBenchServer()
	require.NoError(b, err, "setupBenchServer failed")

	started := time.Now()

	b.RunParallel(func(pb *testing.PB) {
		// Start a client for each runner
		client, err := startClient(serverAddr)
		require.NoError(b, err, "startClient failed")
		defer client.Close()

		for pb.Next() {
			client.CallAndWait()
			atomic.AddInt32(&reqCount, int32(1))
		}
		fmt.Println("Successful requests", client.numTimes, "Mean", client.mean)
		for err, count := range client.errors {
			fmt.Printf("%v: %v\n", count, err)
		}
	})

	duration := time.Since(started)
	fmt.Println("Requests", reqCount, "RPS: ", float64(reqCount)/duration.Seconds())
}
예제 #23
0
파일: fmt_test.go 프로젝트: vsayer/go
func BenchmarkSprintfIntInt(b *testing.B) {
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			Sprintf("%d %d", 5, 6)
		}
	})
}
예제 #24
0
func BenchmarkParTaskForNew100TasksParallel(b *testing.B) {
	b.RunParallel(func(pb *testing.PB) {
		tasks := make([]Task, 100)

		for pb.Next() {
			for i := 0; i < 100; i++ {
				tasks[i] = NewTask("hello",
					1,
					true,
					func(i Input) bool {
						return true
					},
					func(i Input) Promise {
						return NewSettablePromise(true).DoneWith("return_result")
					})
			}

			parTask := Par(Seq(tasks[72:86]...),
				Par(Seq(tasks[0:12]...),
					Seq(tasks[12:35]...),
					Seq(tasks[35:47]...),
					Seq(tasks[47:54]...),
					Seq(tasks[54:58]...),
					Seq(tasks[58:63]...),
					Seq(tasks[63:72]...)),
				Par(tasks[86:92]...),
				Seq(tasks[92:]...),
			)

			parTask.Do()
		}
	})
}
예제 #25
0
파일: fmt_test.go 프로젝트: vsayer/go
func BenchmarkSprintfFloat(b *testing.B) {
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			Sprintf("%g", 5.23184)
		}
	})
}
예제 #26
0
// benchmarkThrottlerParallel is the parallel version of benchmarkThrottler.
// Set -cpu to change the number of threads. The QPS should be distributed
// across all threads and the reported benchmark value should be similar
// to the value of benchmarkThrottler.
func benchmarkThrottlerParallel(b *testing.B, qps int64) {
	threadCount := runtime.GOMAXPROCS(0)
	throttler, _ := NewThrottler("test", "queries", threadCount, qps, ReplicationLagModuleDisabled)
	defer throttler.Close()
	threadIDs := make(chan int, threadCount)
	for id := 0; id < threadCount; id++ {
		threadIDs <- id
	}
	close(threadIDs)
	b.ResetTimer()

	b.RunParallel(func(pb *testing.PB) {
		threadID := <-threadIDs

		for pb.Next() {
			backedOff := 0
			for {
				backoff := throttler.Throttle(threadID)
				if backoff == NotThrottled {
					break
				}
				backedOff++
				if backedOff > 1 {
					b.Logf("did not wait long enough after backoff. threadID = %v, last backoff = %v", threadID, backoff)
				}
				time.Sleep(backoff)
			}
		}
		throttler.ThreadFinished(threadID)
	})
}
예제 #27
0
func BenchmarkConnStress(b *testing.B) {
	const workers = 16

	cluster := createCluster()
	cluster.NumConns = 1
	session := createSessionFromCluster(cluster, b)
	defer session.Close()

	if err := createTable(session, "CREATE TABLE IF NOT EXISTS conn_stress (id int primary key)"); err != nil {
		b.Fatal(err)
	}

	var seed uint64
	writer := func(pb *testing.PB) {
		seed := atomic.AddUint64(&seed, 1)
		var i uint64 = 0
		for pb.Next() {
			if err := session.Query("insert into conn_stress (id) values (?)", i*seed).Exec(); err != nil {
				b.Error(err)
				return
			}
			i++
		}
	}

	b.SetParallelism(workers)
	b.RunParallel(writer)
}
예제 #28
0
func BenchmarkSingleConn(b *testing.B) {
	srv := NewTestServer(b, 3)
	defer srv.Stop()

	cluster := testCluster(srv.Address, 3)
	// Set the timeout arbitrarily low so that the query hits the timeout in a
	// timely manner.
	cluster.Timeout = 500 * time.Millisecond
	cluster.NumConns = 1
	db, err := cluster.CreateSession()
	if err != nil {
		b.Fatalf("NewCluster: %v", err)
	}
	defer db.Close()

	b.ResetTimer()
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			err := db.Query("void").Exec()
			if err != nil {
				b.Error(err)
				return
			}
		}
	})
}
예제 #29
0
func benchPutItemParallel(p, c int, b *testing.B) {
	svc := dynamodb.New(&aws.Config{
		DisableSSL: aws.Bool(true),
	})

	av, err := dynamodbattribute.ConvertToMap(dbItem{Key: "MyKey", Data: "MyData"})
	if err != nil {
		b.Fatal("expect no ConvertToMap errors", err)
	}
	params := &dynamodb.PutItemInput{
		Item:      av,
		TableName: aws.String(testTableName),
	}
	b.N = c

	b.ResetTimer()
	b.SetParallelism(p)
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			_, err = svc.PutItem(params)
			if err != nil {
				b.Error("expect no request errors", err)
			}
		}
	})
}
예제 #30
0
func testSeperateRouter(b *testing.B, server_r *Router, client_r *Router, n int, m int) {
	name := "scheduler"
	b.ResetTimer()
	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			var wg sync.WaitGroup
			for i := 0; i < m; i++ {
				req := pbt.NewResourceReq()
				req.Id = proto.Uint64(0)
				i := rand.Intn(n)
				if m == 1 {
					if _, err := client_r.CallWait(name+string(i), "rpc", req, 5); err != nil {
						b.Log(err)
						b.FailNow()
					}
				} else {
					wg.Add(1)
					client_r.Call(name+string(i), "rpc", req, ClientProcessReponseWaitGroup, &wg, 0)
				}
			}
			if m > 1 {
				wg.Wait()
			}
		}
	})

	client_r.Stop()
	server_r.Stop()
}