/*
receiverConnection starts individual receivers for this connection.
*/
func receiverConnection(conn *stompngo.Connection, cn, qpc int) {
	ltag := tag + "-receiverconnection"

	ll.Printf("%stag:%s connsess:%s starts cn:%d qpc:%d\n",
		exampid, ltag, conn.Session(),
		cn, qpc)

	// cn -> a connection number: 1..n
	// qpc -> destinations per connection
	// Ex:
	// 1, 2
	// 2, 2
	// 3, 2

	// This code runs *once* for each connection

	// These calcs are what causes a skip below.  It is a safety valve to keep
	// from starting one too many connections.
	cb := cn - 1       // this connection number, zero based
	q1 := qpc*cb + 1   // 1st queue number
	ql := q1 + qpc - 1 // last queue number
	if ql > sngecomm.Nqs() {
		ql = sngecomm.Nqs() // truncate last if over max destinations
	}

	var wgrconn sync.WaitGroup

	var skipped bool
	if q1 <= ql {
		ll.Printf("%stag:%s connsess:%s startq cn:%d q1:%d ql: %d\n",
			exampid, ltag, conn.Session(),
			cn, q1, ql)
		skipped = false
	} else {
		// Skips are possible, at least with the current calling code, see above
		ll.Printf("%stag:%s connsess:%s startskip cn:%d q1:%d ql: %d\n",
			exampid, ltag, conn.Session(),
			cn, q1, ql)
		skipped = true
	}

	for q := q1; q <= ql; q++ {
		wgrconn.Add(1)
		go runReceive(conn, q, &wgrconn)
	}
	wgrconn.Wait()
	//
	ll.Printf("%stag:%s connsess:%s ends cn:%d qpc:%d skipped:%t\n",
		exampid, ltag, conn.Session(),
		cn, qpc, skipped)
	wgr.Done()
}
// Show a number of writers and readers operating concurrently from unique
// destinations.
func main() {

	st := time.Now()

	sngecomm.ShowRunParms(exampid)

	ll.Printf("%stag:%s connsess:%s main_starts\n",
		exampid, tag, sngecomm.Lcs)

	ll.Printf("%stag:%s connsess:%s main_profiling pprof:%v\n",
		exampid, tag, sngecomm.Lcs,
		sngecomm.Pprof())

	ll.Printf("%stag:%s connsess:%s main_current_GOMAXPROCS gmp:%v\n",
		exampid, tag, sngecomm.Lcs,
		runtime.GOMAXPROCS(-1))

	if sngecomm.SetMAXPROCS() {
		nc := runtime.NumCPU()
		ll.Printf("%stag:%s connsess:%s main_current_num_cpus cncpu:%v\n",
			exampid, tag, sngecomm.Lcs,
			nc)
		gmp := runtime.GOMAXPROCS(nc)
		ll.Printf("%stag:%s connsess:%s main_previous_num_cpus pncpu:%v\n",
			exampid, tag, sngecomm.Lcs,
			gmp)
		ll.Printf("%stag:%s connsess:%s main_current_GOMAXPROCS gmp:%v\n",
			exampid, tag, sngecomm.Lcs,
			runtime.GOMAXPROCS(-1))
	}
	//
	sw = sngecomm.SendWait()
	rw = sngecomm.RecvWait()
	sf = sngecomm.SendFactor()
	rf = sngecomm.RecvFactor()
	ll.Printf("%stag:%s connsess:%s main_wait_sleep_factors sw:%v rw:%v sf:%v rf:%v\n",
		exampid, tag, sngecomm.Lcs,
		sw, rw, sf, rf)
	//
	q := sngecomm.Nqs()
	//
	wga.Add(2)
	go startReceivers(q)
	go startSenders(q)
	wga.Wait()

	ll.Printf("%stag:%s connsess:%s main_elapsed:%v\n",
		exampid, tag, sngecomm.Lcs,
		time.Now().Sub(st))

}
示例#3
0
// Connect to a STOMP broker, subscribe and receive some messages and disconnect.
func main() {

	st := time.Now()

	// Standard example connect sequence
	n, conn, e := sngecomm.CommonConnect(exampid, tag, ll)
	if e != nil {
		ll.Fatalf("%stag:%s connsess:%s main_on_connect error:%v",
			exampid, tag, sngecomm.Lcs,
			e.Error()) // Handle this ......
	}
	session = conn.Session()
	//******************
	nqs := sngecomm.Nqs()
	for qn := 1; qn <= nqs; qn++ {
		runNextQueue(qn, conn)
	}
	//******************
	ll.Printf("%stag:%s connsess:%s start_drain_receives\n",
		exampid, tag, session)
	for _, v := range qcb {
		_ = <-v
	}
	ll.Printf("%stag:%s connsess:%s end_drain_receives\n",
		exampid, tag, session)
	// Standard example disconnect sequence
	if dodisc {
		e = sngecomm.CommonDisconnect(n, conn, exampid, tag, ll)
		if e != nil {
			ll.Fatalf("%stag:%s connsess:%s main_on_disconnect error:%v",
				exampid, tag, session,
				e.Error()) // Handle this ......
		}
		ll.Printf("%stag:%s connsess:%s disconnect_receipt:%v\n",
			exampid, tag, session,
			conn.DisconnectReceipt)
	} else {
		ll.Printf("%stag:%s connsess:%s skipping_disconnect\n",
			exampid, tag, session)
	}

	ll.Printf("%stag:%s connsess:%s main_elapsed:%v\n",
		exampid, tag, session,
		time.Now().Sub(st))
}
/*
startSender initializes the single send connection, and starts one sender go
for each destination.
*/
func startSender() {
	ltag := tag + "-startsender"

	n, conn := openSconn()
	ll.Printf("%stag:%s connsess:%s start\n",
		exampid, ltag, conn.Session())
	for i := 1; i <= sngecomm.Nqs(); i++ {
		wgs.Add(1)
		go runSender(conn, fmt.Sprintf("%d", i))
	}
	wgs.Wait()
	ll.Printf("%stag:%s connsess:%s end\n",
		exampid, ltag, conn.Session())
	sngecomm.ShowStats(exampid, ltag, conn)
	closeSconn(n, conn)
	//
	wga.Done()
}
/*
startReceivers creates connections per environment variables, and starts each
connection.
*/
func startReceivers() {

	ltag := tag + "-startreceivers"

	// This was a performance experiment.  With number of connections.
	// My recollection is that it did not work out.
	// However ..... I will leave this code in place for now.

	// Figure out number of receiver connections wanted
	nrc := sngecomm.Nqs() // 1 receiver per each destination
	nqs := nrc            // Number of queues (destinations) starts the same

	if s := os.Getenv("STOMP_RECVCONNS"); s != "" {
		i, e := strconv.ParseInt(s, 10, 32)
		if nil != e {
			ll.Fatalf("%stag:%s connsess:%s RECVCONNS_conversion_error error:%v\n",
				exampid, ltag, sngecomm.Lcs,
				e.Error())
		} else {
			nrc = int(i)
		}
	}

	// Limit max receiver connection count to number of destinations
	if nrc > nqs {
		nrc = nqs
	}

	// Next calc. destinations per receiver
	dpr := nqs / nrc // Calculation first guess.
	if nqs%nrc != 0 {
		dpr += 1 // Bump destinations per receiver by 1.
	}
	// Destinations per receiver must be at least 1
	if dpr == 0 {
		dpr = 1
	}

	ll.Printf("%stag:%s connsess:%s start nrc:%d dpr:%d\n",
		exampid, ltag, sngecomm.Lcs,
		nrc, dpr)

	// So the idea seems to be allow more than one destination per receiver
	ncm := make([]net.Conn, 0)
	csm := make([]*stompngo.Connection, 0)
	for c := 1; c <= nrc; c++ { // :-)
		n, conn := openSconn()
		ncm = append(ncm, n)
		csm = append(csm, conn)
		wgr.Add(1)
		ll.Printf("%stag:%s connsess:%s connstart conn_number:%d nrc:%d dpr:%d\n",
			exampid, ltag, conn.Session(),
			c, nrc, dpr)
		go receiverConnection(conn, c, dpr)
	}
	wgr.Wait()
	ll.Printf("%stag:%s connsess:%s wait_done nrc:%d dpr:%d\n",
		exampid, ltag, sngecomm.Lcs,
		nrc, dpr)
	//
	for c := 1; c <= nrc; c++ {
		ll.Printf("%stag:%s connsess:%s connend conn_number:%d nrc:%d dpr:%d\n",
			exampid, ltag, csm[c-1].Session(),
			c, nrc, dpr)
		sngecomm.ShowStats(exampid, ltag, csm[c-1])
		closeSconn(ncm[c-1], csm[c-1])
	}
	//
	wga.Done()
}
func main() {

	st := time.Now()

	sngecomm.ShowRunParms(exampid)

	ll.Printf("%stag:%s connsess:%s main_starts\n",
		exampid, tag, sngecomm.Lcs)

	ll.Printf("%stag:%s connsess:%s main_profiling pprof:%v\n",
		exampid, tag, sngecomm.Lcs,
		sngecomm.Pprof())

	ll.Printf("%stag:%s connsess:%s main_current_GOMAXPROCS gmp:%v\n",
		exampid, tag, sngecomm.Lcs,
		runtime.GOMAXPROCS(-1))

	if sngecomm.SetMAXPROCS() {
		nc := runtime.NumCPU()
		ll.Printf("%stag:%s connsess:%s main_current_num_cpus cncpu:%v\n",
			exampid, tag, sngecomm.Lcs,
			nc)
		gmp := runtime.GOMAXPROCS(nc)
		ll.Printf("%stag:%s connsess:%s main_previous_num_cpus pncpu:%v\n",
			exampid, tag, sngecomm.Lcs,
			gmp)
		ll.Printf("%stag:%s connsess:%s main_current_GOMAXPROCS gmp:%v\n",
			exampid, tag, sngecomm.Lcs,
			runtime.GOMAXPROCS(-1))
	}
	//
	sw = sngecomm.SendWait()
	rw = sngecomm.RecvWait()
	sf = sngecomm.SendFactor()
	rf = sngecomm.RecvFactor()
	ll.Printf("%stag:%s connsess:%s main_wait_sleep_factors sw:%v rw:%v sf:%v rf:%v\n",
		exampid, tag, sngecomm.Lcs,
		sw, rw, sf, rf)
	//
	numq := sngecomm.Nqs()
	nmsgs = senv.Nmsgs() // message count
	//
	ll.Printf("%stag:%s connsess:%s main_starting_receivers\n",
		exampid, tag, sngecomm.Lcs)
	for q := 1; q <= numq; q++ {
		wgr.Add(1)
		go runReceiver(q)
	}
	ll.Printf("%stag:%s connsess:%s main_started_receivers\n",
		exampid, tag, sngecomm.Lcs)
	//
	ll.Printf("%stag:%s connsess:%s main_starting_senders\n",
		exampid, tag, sngecomm.Lcs)
	for q := 1; q <= numq; q++ {
		wgs.Add(1)
		go runSender(q)
	}
	ll.Printf("%stag:%s connsess:%s main_started_senders\n",
		exampid, tag, sngecomm.Lcs)
	//
	wgs.Wait()
	ll.Printf("%stag:%s connsess:%s main_senders_complete\n",
		exampid, tag, sngecomm.Lcs)
	wgr.Wait()
	ll.Printf("%stag:%s connsess:%s main_receivers_complete\n",
		exampid, tag, sngecomm.Lcs)
	//

	// The end
	ll.Printf("%stag:%s connsess:%s main_elapsed:%v\n",
		exampid, tag, sngecomm.Lcs,
		time.Now().Sub(st))

}
// Show a number of writers and readers operating concurrently from unique
// destinations.
func main() {

	st := time.Now()

	sngecomm.ShowRunParms(exampid)

	ll.Printf("%stag:%s connsess:%s main_starts\n",
		exampid, tag, sngecomm.Lcs)

	ll.Printf("%stag:%s connsess:%s main_profiling pprof:%v\n",
		exampid, tag, sngecomm.Lcs,
		sngecomm.Pprof())

	ll.Printf("%stag:%s connsess:%s main_current_GOMAXPROCS gmp:%v\n",
		exampid, tag, sngecomm.Lcs,
		runtime.GOMAXPROCS(-1))

	if sngecomm.SetMAXPROCS() {
		nc := runtime.NumCPU()
		ll.Printf("%stag:%s connsess:%s main_current_num_cpus cncpu:%v\n",
			exampid, tag, sngecomm.Lcs,
			nc)
		gmp := runtime.GOMAXPROCS(nc)
		ll.Printf("%stag:%s connsess:%s main_previous_num_cpus pncpu:%v\n",
			exampid, tag, sngecomm.Lcs,
			gmp)
		ll.Printf("%stag:%s connsess:%s main_current_GOMAXPROCS gmp:%v\n",
			exampid, tag, sngecomm.Lcs,
			runtime.GOMAXPROCS(-1))
	}
	// Wait flags
	sw = sngecomm.SendWait()
	rw = sngecomm.RecvWait()
	sf = sngecomm.SendFactor()
	rf = sngecomm.RecvFactor()
	ll.Printf("%stag:%s connsess:%s main_wait_sleep_factors sw:%v rw:%v sf:%v rf:%v\n",
		exampid, tag, sngecomm.Lcs,
		sw, rw, sf, rf)
	// Number of queues
	nqs := sngecomm.Nqs()

	// Standard example connect sequence
	var e error
	n, conn, e = sngecomm.CommonConnect(exampid, tag, ll)
	if e != nil {
		if conn != nil {
			ll.Printf("%stag:%s  connsess:%s Connect Response headers:%v body%s\n",
				exampid, tag, conn.Session(), conn.ConnectResponse.Headers,
				string(conn.ConnectResponse.Body))
		}
		ll.Fatalf("%stag:%s connsess:%s main_on_connect error:%v",
			exampid, tag, sngecomm.Lcs,
			e.Error()) // Handle this ......
	}

	// Many receivers running under the same connection can cause
	// (wire read) performance issues.  This is *very* dependent on the broker
	// being used, specifically the broker's algorithm for putting messages on
	// the wire.
	// To alleviate those issues, this strategy insures that messages are
	// received from the wire as soon as possible.  Those messages are then
	// buffered internally for (possibly later) application processing. In
	// this example, buffering occurs in the stompngo package.
	conn.SetSubChanCap(senv.SubChanCap()) // Experiment with this value, YMMV

	// Run everything
	wga.Add(2)
	go startReceivers(nqs)
	go startSenders(nqs)
	wga.Wait()

	// Standard example disconnect sequence
	e = sngecomm.CommonDisconnect(n, conn, exampid, tag, ll)
	if e != nil {
		ll.Fatalf("%stag:%s connsess:%s main_on_disconnect error:%v",
			exampid, tag, conn.Session(),
			e.Error()) // Handle this ......
	}

	sngecomm.ShowStats(exampid, tag, conn)

	ll.Printf("%stag:%s connsess:%s main_elapsed:%v\n",
		exampid, tag, conn.Session(),
		time.Now().Sub(st))

}