Exemplo n.º 1
0
func main() {
	cf := config.NewDefaultProxy()
	cf.PoolCapacity = 2
	peer := proxy.New(cf)

	for {
		time.Sleep(time.Duration(interval) * time.Second)
		fmt.Println()

		client, err := peer.ServantByAddr(host + ":" + port)
		if err != nil {
			fmt.Println(err)
			continue
		}

		ctx := rpc.NewContext()
		ctx.Reason = "broken.test"
		ctx.Rid = time.Now().UnixNano()
		pong, err := client.Ping(ctx)
		if err != nil {
			fmt.Println("err:", err)

			if proxy.IsIoError(err) {
				client.Close()
			}
		} else {
			fmt.Println(pong)
		}

		client.Recycle()
	}

}
Exemplo n.º 2
0
func main() {
	cf := config.NewDefaultProxy()
	cf.IoTimeout = time.Hour
	cf.TcpNoDelay = tcpNoDelay
	prx := proxy.New(cf)

	etclib.Dial([]string{zk})
	go prx.StartMonitorCluster()
	prx.AwaitClusterTopologyReady()

	// test pool
	if testPool {
		testServantPool(prx)
		pause("pool tested")
	}

	go report.run()

	wg := new(sync.WaitGroup)
	t1 := time.Now()
	for k := c1; k <= c2; k += 10 {
		Concurrency = k

		cf.PoolCapacity = Concurrency
		prx = proxy.New(cf)

		for i := 0; i < Rounds; i++ {
			for j := 0; j < k; j++ {
				wg.Add(1)
				go runSession(prx, wg, i+1, j)
			}

			wg.Wait()
		}
	}

	elapsed := time.Since(t1)
	log.Printf("Elapsed: %s, calls: {%s, %.1f/s}, sessions: {%s, %.1f/s}, errors: {conn:%d, io:%d call:%d}",
		elapsed,
		gofmt.Comma(report.callOk),
		float64(report.callOk)/elapsed.Seconds(),
		gofmt.Comma(int64(report.sessionN)),
		float64(report.sessionN)/elapsed.Seconds(),
		report.connErrs,
		report.ioErrs,
		report.callErrs)
}
Exemplo n.º 3
0
func BenchmarkPingOnLocalhost(b *testing.B) {
	b.ReportAllocs()

	cf := &config.ConfigProxy{PoolCapacity: 1}
	client, err := proxy.New(cf).ServantByAddr("localhost:9001")
	if err != nil {
		b.Fatal(err)
	}
	defer client.Recycle()

	ctx := rpc.NewContext()
	ctx.Reason = "benchmark"
	ctx.Rid = 2

	for i := 0; i < b.N; i++ {
		client.Ping(ctx)
	}
}
Exemplo n.º 4
0
func (this *FunServantImpl) createServants() {
	log.Info("creating servants...")

	// proxy can dynamically auto discover peers
	if this.conf.Proxy.Enabled() {
		log.Debug("creating servant: proxy")
		this.proxy = proxy.New(this.conf.Proxy)
	} else {
		panic("peers proxy required")
	}

	log.Debug("creating servant: idgen")
	var err error
	this.idgen, err = idgen.NewIdGenerator(this.conf.IdgenWorkerId)
	if err != nil {
		panic(err)
	}

	if this.conf.Lcache.Enabled() {
		log.Debug("creating servant: lcache")
		this.lc = cache.NewLruCache(this.conf.Lcache.MaxItems)
		this.lc.OnEvicted = this.onLcLruEvicted
	}

	if this.conf.Memcache.Enabled() {
		log.Debug("creating servant: memcache")
		this.mc = memcache.New(this.conf.Memcache)
	}

	if this.conf.Redis.Enabled() {
		log.Debug("creating servant: redis")
		this.rd = redis.New(this.conf.Redis)
	}

	if this.conf.Lock.Enabled() {
		log.Debug("creating servant: lock")
		this.lk = lock.New(this.conf.Lock)
	}

	if this.conf.Mysql.Enabled() {
		log.Debug("creating servant: mysql")
		this.my = mysql.New(this.conf.Mysql)

		switch this.conf.Mysql.CacheStore {
		case "mem":
			this.dbCacheStore = store.NewMemStore(this.conf.Mysql.CacheStoreMemMaxItems)

		case "redis":
			this.dbCacheStore = store.NewRedisStore(this.conf.Mysql.CacheStoreRedisPool,
				this.conf.Redis)

		default:
			panic("unknown mysql cache store")
		}
	}

	if this.conf.Mongodb.Enabled() {
		log.Debug("creating servant: mongodb")
		this.mg = mongo.New(this.conf.Mongodb)
		if this.conf.Mongodb.DebugProtocol ||
			this.conf.Mongodb.DebugHeartbeat {
			mgo.SetLogger(&mongoProtocolLogger{})
			mgo.SetDebug(this.conf.Mongodb.DebugProtocol)
		}
	}

	if this.conf.Couchbase.Enabled() {
		log.Debug("creating servant: couchbase")

		var err error
		// pool is always 'default'
		this.cb, err = couch.New(this.conf.Couchbase.Servers, "default")
		if err != nil {
			log.Error("couchbase: %s", err)
		}
	}

	log.Info("servants created")
}