コード例 #1
0
// Usage example for the thread pool.
func Example_threadPool() {
	// Create a new thread pool with 5 concurrent worker capacity
	workers := pool.NewThreadPool(5)

	// Start the pool (you could schedule tasks before starting, and they would
	// wait queued until permission is given to execute)
	workers.Start()

	// Schedule some tasks (functions with no arguments nor return values)
	for i := 0; i < 10; i++ {
		id := i // Need to copy i for the task closure
		workers.Schedule(func() {
			time.Sleep(time.Duration(id) * 50 * time.Millisecond)
			fmt.Printf("Task #%d done.\n", id)
		})
	}
	// Terminate the pool gracefully (don't clear unstarted tasks)
	workers.Terminate(false)

	// Output:
	// Task #0 done.
	// Task #1 done.
	// Task #2 done.
	// Task #3 done.
	// Task #4 done.
	// Task #5 done.
	// Task #6 done.
	// Task #7 done.
	// Task #8 done.
	// Task #9 done.
}
コード例 #2
0
ファイル: broadcast_test.go プロジェクト: BinaryBlob/iris-go
func benchmarkBroadcastThroughput(threads int, b *testing.B) {
	// Create the service handler
	handler := &broadcastTestHandler{
		delivers: make(chan []byte, b.N),
	}
	// Register a new service to the relay
	serv, err := Register(config.relay, config.cluster, handler, nil)
	if err != nil {
		b.Fatalf("registration failed: %v.", err)
	}
	defer serv.Unregister()

	// Create the thread pool with the concurrent broadcasts
	workers := pool.NewThreadPool(threads)
	for i := 0; i < b.N; i++ {
		workers.Schedule(func() {
			if err := handler.conn.Broadcast(config.cluster, []byte{byte(i)}); err != nil {
				b.Fatalf("broadcast failed: %v.", err)
			}
		})
	}
	// Reset timer and benchmark the message transfer
	b.ResetTimer()
	workers.Start()
	for i := 0; i < b.N; i++ {
		<-handler.delivers
	}
	workers.Terminate(false)

	// Stop the timer (don't measure deferred cleanup)
	b.StopTimer()
}
コード例 #3
0
ファイル: connection.go プロジェクト: BinaryBlob/iris-go
// Connects to a local relay endpoint on port and registers as cluster.
func newConnection(port int, cluster string, handler ServiceHandler, limits *ServiceLimits, logger log15.Logger) (*Connection, error) {
	// Connect to the iris relay node
	addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("localhost:%d", port))
	if err != nil {
		return nil, err
	}
	sock, err := net.DialTCP("tcp", nil, addr)
	if err != nil {
		return nil, err
	}
	// Create the relay object
	conn := &Connection{
		// Application layer
		handler: handler,

		reqReps: make(map[uint64]chan []byte),
		reqErrs: make(map[uint64]chan error),
		subLive: make(map[string]*topic),
		tunLive: make(map[uint64]*Tunnel),

		// Network layer
		sock:    sock,
		sockBuf: bufio.NewReadWriter(bufio.NewReader(sock), bufio.NewWriter(sock)),

		// Bookkeeping
		quit: make(chan chan error),
		term: make(chan struct{}),

		Log: logger,
	}
	// Initialize service QoS fields
	if cluster != "" {
		conn.limits = limits
		conn.bcastPool = pool.NewThreadPool(limits.BroadcastThreads)
		conn.reqPool = pool.NewThreadPool(limits.RequestThreads)
	}
	// Initialize the connection and wait for a confirmation
	if err := conn.sendInit(cluster); err != nil {
		return nil, err
	}
	if _, err := conn.procInit(); err != nil {
		return nil, err
	}
	// Start the network receiver and return
	go conn.process()
	return conn, nil
}
コード例 #4
0
ファイル: overlay.go プロジェクト: runcnc/iris
// Creates a new overlay structure with all internal state initialized, ready to
// be booted.
func New(id string, key *rsa.PrivateKey, app Callback) *Overlay {
	// Generate the random node id for this overlay peer
	peerId := make([]byte, config.PastrySpace/8)
	if n, err := io.ReadFull(rand.Reader, peerId); n < len(peerId) || err != nil {
		panic(fmt.Sprintf("failed to generate node id: %v", err))
	}
	nodeId := new(big.Int).SetBytes(peerId)

	// Assemble and return the overlay instance
	o := &Overlay{
		app: app,

		authId:  id,
		authKey: key,

		nodeId: nodeId,
		addrs:  []string{},

		livePeers: make(map[string]*peer),
		routes:    newRoutingTable(nodeId),
		time:      1,

		acceptQuit: []chan chan error{},
		maintQuit:  make(chan chan error),

		authInit:   pool.NewThreadPool(config.PastryAuthThreads),
		authAccept: pool.NewThreadPool(config.PastryAuthThreads),
		stateExch:  pool.NewThreadPool(config.PastryExchThreads),

		exchSet:     make(map[*peer]*state),
		dropSet:     make(map[*peer]struct{}),
		eventNotify: make(chan struct{}, 1), // Buffer one notification
	}
	o.heart = newHeart(o)
	return o
}
コード例 #5
0
ファイル: topic.go プロジェクト: BinaryBlob/iris-go
// Creates a new topic subscription.
func newTopic(handler TopicHandler, limits *TopicLimits, logger log15.Logger) *topic {
	top := &topic{
		// Application layer
		handler: handler,

		// Quality of service
		limits:    limits,
		eventPool: pool.NewThreadPool(limits.EventThreads),

		// Bookkeeping
		logger: logger,
	}
	// Start the event processing and return
	top.eventPool.Start()
	return top
}
コード例 #6
0
ファイル: connection.go プロジェクト: simia-tech/iris
// Connects to the iris overlay. The parameters can be either both specified, in
// the case of a service registration, or both skipped in the case of a client
// connection. Others combinations will fail.
func (o *Overlay) Connect(cluster string, handler ConnectionHandler) (*Connection, error) {
	// Make sure only valid argument combinations pass
	if (cluster == "" && handler != nil) || (cluster != "" && handler == nil) {
		return nil, fmt.Errorf("invalid connection arguments: cluster '%v', handler %v", cluster, handler)
	}
	// Create the connection object
	c := &Connection{
		cluster: cluster,
		handler: handler,
		iris:    o,

		reqReps: make(map[uint64]chan []byte),
		reqErrs: make(map[uint64]chan error),
		subLive: make(map[string]SubscriptionHandler),
		tunLive: make(map[uint64]*Tunnel),

		// Quality of service
		workers: pool.NewThreadPool(config.IrisHandlerThreads),

		// Bookkeeping
		quit: make(chan chan error),
		term: make(chan struct{}),
	}
	// Assign a connection id and track it
	o.lock.Lock()
	c.id, o.autoid = o.autoid, o.autoid+1
	o.conns[c.id] = c
	o.lock.Unlock()

	// Subscribe to the multi-group if the connection is a service
	if c.cluster != "" {
		for _, prefix := range clusterPrefixes {
			if err := c.iris.subscribe(c.id, prefix+cluster); err != nil {
				return nil, err
			}
		}
	}
	c.workers.Start()

	return c, nil
}
コード例 #7
0
ファイル: pubsub_test.go プロジェクト: BinaryBlob/iris-go
func benchmarkPublishThroughput(threads int, b *testing.B) {
	// Connect to the local relay
	conn, err := Connect(config.relay)
	if err != nil {
		b.Fatalf("connection failed: %v", err)
	}
	defer conn.Close()

	// Subscribe to a topic and wait for state propagation
	handler := &publishTestTopicHandler{
		delivers: make(chan []byte, b.N),
	}
	if err := conn.Subscribe(config.topic, handler, nil); err != nil {
		b.Fatalf("subscription failed: %v", err)
	}
	defer conn.Unsubscribe(config.topic)
	time.Sleep(100 * time.Millisecond)

	// Create the thread pool with the concurrent publishes
	workers := pool.NewThreadPool(threads)
	for i := 0; i < b.N; i++ {
		workers.Schedule(func() {
			if err := conn.Publish(config.topic, []byte{byte(i)}); err != nil {
				b.Fatalf("failed to publish: %v.", err)
			}
		})
	}
	// Reset timer and benchmark the message transfer
	b.ResetTimer()
	workers.Start()
	for i := 0; i < b.N; i++ {
		<-handler.delivers
	}
	workers.Terminate(false)

	// Stop the timer (don't measure deferred cleanup)
	b.StopTimer()
}
コード例 #8
0
ファイル: relay.go プロジェクト: ibmendoza/iris-0.3.2
// Accepts an inbound relay connection, executing the initialization procedure.
func (r *Relay) acceptRelay(sock net.Conn) (*relay, error) {
	// Create the relay object
	rel := &relay{
		reqReps: make(map[uint64]chan []byte),
		reqErrs: make(map[uint64]chan error),
		tunPend: make(map[uint64]*iris.Tunnel),
		tunInit: make(map[uint64]chan struct{}),
		tunLive: make(map[uint64]*tunnel),

		// Network layer
		sock:    sock,
		sockBuf: bufio.NewReadWriter(bufio.NewReader(sock), bufio.NewWriter(sock)),

		// Quality of service
		workers: pool.NewThreadPool(config.RelayHandlerThreads),

		// Misc
		done: r.done,
		quit: make(chan chan error),
		term: make(chan struct{}),
	}
	// Lock the socket to ensure no writes pass during init
	rel.sockLock.Lock()
	defer rel.sockLock.Unlock()

	// Initialize the relay
	version, cluster, err := rel.procInit()
	if err != nil {
		rel.drop()
		return nil, err
	}
	// Make sure the protocol version is compatible
	if version != protoVersion {
		// Drop the connection in either error branch
		defer rel.drop()

		reason := fmt.Sprintf("Unsupported protocol. Client: %s. Iris: %s.", version, protoVersion)
		if err := rel.sendDeny(reason); err != nil {
			return nil, err
		}
		return nil, fmt.Errorf("relay: unsupported client protocol version: have %v, want %v", version, protoVersion)
	}
	// Connect to the Iris network either as a service or as a client
	var handler iris.ConnectionHandler
	if cluster != "" {
		handler = rel
	}
	conn, err := r.iris.Connect(cluster, handler)
	if err != nil {
		rel.drop()
		return nil, err
	}
	rel.iris = conn

	// Report the connection accepted
	if err := rel.sendInit(); err != nil {
		rel.drop()
		return nil, err
	}
	// Start accepting messages and return
	rel.workers.Start()
	go rel.process()
	return rel, nil
}