예제 #1
0
파일: main.go 프로젝트: txrxio/planb
func runServer(c *cli.Context) {
	var rp reverseproxy.ReverseProxy
	switch c.String("engine") {
	case "native":
		rp = &reverseproxy.NativeReverseProxy{}
	case "fasthttp":
		rp = &reverseproxy.FastReverseProxy{}
	default:
		log.Fatal(errors.New("invalid engine"))
	}
	readOpts := backend.RedisOptions{
		Host:          c.String("read-redis-host"),
		Port:          c.Int("read-redis-port"),
		SentinelAddrs: c.String("read-redis-sentinel-addrs"),
		SentinelName:  c.String("read-redis-sentinel-name"),
		Password:      c.String("read-redis-password"),
		DB:            c.Int("read-redis-db"),
	}
	writeOpts := backend.RedisOptions{
		Host:          c.String("write-redis-host"),
		Port:          c.Int("write-redis-port"),
		SentinelAddrs: c.String("write-redis-sentinel-addrs"),
		SentinelName:  c.String("write-redis-sentinel-name"),
		Password:      c.String("write-redis-password"),
		DB:            c.Int("write-redis-db"),
	}
	routesBE, err := backend.NewRedisBackend(readOpts, writeOpts)
	if err != nil {
		log.Fatal(err)
	}
	if c.Bool("active-healthcheck") {
		err = routesBE.StartMonitor()
		if err != nil {
			log.Fatal(err)
		}
	}
	r := router.Router{
		Backend:        routesBE,
		LogPath:        c.String("access-log"),
		DeadBackendTTL: c.Int("dead-backend-time"),
	}
	err = r.Init()
	if err != nil {
		log.Fatal(err)
	}
	addr, err := rp.Initialize(reverseproxy.ReverseProxyConfig{
		Listen:          c.String("listen"),
		Router:          &r,
		RequestIDHeader: c.String("request-id-header"),
		FlushInterval:   time.Duration(c.Int("flush-interval")) * time.Millisecond,
		DialTimeout:     time.Duration(c.Int("dial-timeout")) * time.Second,
		RequestTimeout:  time.Duration(c.Int("request-timeout")) * time.Second,
	})
	handleSignals(rp)
	log.Printf("Listening on %s...\n", addr)
	rp.Listen()
	r.Stop()
	routesBE.StopMonitor()
}
예제 #2
0
파일: main_test.go 프로젝트: txrxio/planb
func (s *S) TestServeHTTPStressAllLeakDetector(c *check.C) {
	if testing.Short() {
		c.Skip("this test takes a long time, specially with -race")
	}
	checkLeaksEnabled := os.Getenv("PLANB_CHECK_LEAKS") != ""
	log.SetOutput(ioutil.Discard)
	defer log.SetOutput(os.Stderr)
	nFrontends := 50
	nServers := nFrontends * 4
	servers := make([]*httptest.Server, nServers)
	allNamesMap := map[string]struct{}{}
	for i := range servers {
		msg := fmt.Sprintf("server-%d", i)
		allNamesMap[msg] = struct{}{}
		srv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
			rw.Write([]byte(msg))
		}))
		defer srv.Close()
		servers[i] = srv
	}
	frontends := make([]string, nFrontends)
	for i := range frontends {
		frontend := fmt.Sprintf("stressfront%0d.com", i)
		frontends[i] = frontend
		err := s.redis.RPush("frontend:"+frontend, frontend).Err()
		c.Assert(err, check.IsNil)
		ratio := nServers / nFrontends
		for j := 0; j < ratio; j++ {
			err := s.redis.RPush("frontend:"+frontend, servers[(i*ratio)+j].URL).Err()
			c.Assert(err, check.IsNil)
		}
		if i > nFrontends/2 {
			// Add invalid backends forcing errors on half of the frontends
			err := s.redis.RPush("frontend:"+frontend, "http://127.0.0.1:32412", "http://127.0.0.1:32413").Err()
			c.Assert(err, check.IsNil)
		}
	}
	nProffs := 4
	files := make([]*os.File, nProffs)
	if checkLeaksEnabled {
		for i := range files {
			files[i], _ = os.OpenFile(fmt.Sprintf("./planb_stress_%d_mem.pprof", i), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0660)
		}
	}
	opts := backend.RedisOptions{
		Host: "localhost",
		Port: 6379,
		DB:   int(redisDB),
	}
	routesBE, err := backend.NewRedisBackend(opts, opts)
	c.Assert(err, check.IsNil)
	r := router.Router{Backend: routesBE}
	err = r.Init()
	c.Assert(err, check.IsNil)
	var nativeRP reverseproxy.ReverseProxy = &reverseproxy.NativeReverseProxy{}
	addr, err := nativeRP.Initialize(reverseproxy.ReverseProxyConfig{
		Listen:      ":0",
		Router:      &r,
		DialTimeout: time.Second,
	})
	c.Assert(err, check.IsNil)
	go nativeRP.Listen()
	defer nativeRP.Stop()
	nClients := 4
	rec := make(chan string, 1000)
	wg := sync.WaitGroup{}
	accessedBackends := map[string]struct{}{}
	mtx := sync.Mutex{}
	for i := 0; i < nClients; i++ {
		go func() {
			for host := range rec {
				req, inErr := http.NewRequest("GET", fmt.Sprintf("http://%s/", addr), nil)
				c.Assert(inErr, check.IsNil)
				req.Host = host
				rsp, inErr := http.DefaultClient.Do(req)
				c.Assert(inErr, check.IsNil)
				srvName, _ := ioutil.ReadAll(rsp.Body)
				rsp.Body.Close()
				if len(srvName) != 0 {
					mtx.Lock()
					accessedBackends[string(srvName)] = struct{}{}
					mtx.Unlock()
				}
				wg.Done()
			}
		}()
	}
	N := 20000
	for _, f := range files {
		for i := 0; i < N; i++ {
			wg.Add(1)
			rec <- frontends[i%len(frontends)]
		}
		wg.Wait()
		c.Assert(accessedBackends, check.DeepEquals, allNamesMap)
		if checkLeaksEnabled {
			runtime.GC()
			pprof.WriteHeapProfile(f)
		}
	}
	if checkLeaksEnabled {
		for _, f := range files {
			f.Close()
		}
	}
}