コード例 #1
0
ファイル: client.go プロジェクト: jsimonetti/tlstun
func (c *clientConnection) run() {
	c.log.Debug("starting yamux on ws")

	session, err := yamux.Server(c.websocket, nil)
	if err != nil {
		c.log.Crit("could not initialise yamux session", log.Ctx{"error": err})
	}
	c.log.Debug("yamux session started")
	c.session = session

	c.log.Debug("listening for streams")
	// Accept a stream
	for {
		stream, id, err := c.acceptStream()
		if err != nil {
			if err != io.EOF {
				c.log.Error("error acception stream", log.Ctx{"error": err})
			}
			c.websocket.Close()
			c.session.Close()
			return
		}
		c.log.Debug("accepted stream", log.Ctx{"streamid": id})
		go c.handleStream(stream, id)
	}
}
コード例 #2
0
ファイル: provider_test.go プロジェクト: vektra/gdata
func testHandshake(t *testing.T, list net.Listener, expect *HandshakeRequest) {
	client, err := list.Accept()
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	defer client.Close()

	preamble := make([]byte, len(clientPreamble))
	n, err := client.Read(preamble)
	if err != nil || n != len(preamble) {
		t.Fatalf("err: %v", err)
	}

	server, _ := yamux.Server(client, yamux.DefaultConfig())
	conn, err := server.Accept()
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	defer conn.Close()
	rpcCodec := msgpackrpc.NewCodec(true, true, conn)

	rpcSrv := rpc.NewServer()
	rpcSrv.RegisterName("Session", &TestHandshake{t, expect})

	err = rpcSrv.ServeRequest(rpcCodec)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
}
コード例 #3
0
ファイル: config_test.go プロジェクト: zanella/nomad
func startTLSServer(config *Config) (net.Conn, chan error) {
	errc := make(chan error, 1)

	tlsConfigServer, err := config.IncomingTLSConfig()
	if err != nil {
		errc <- err
		return nil, errc
	}

	client, server := net.Pipe()

	// Use yamux to buffer the reads, otherwise it's easy to deadlock
	muxConf := yamux.DefaultConfig()
	serverSession, _ := yamux.Server(server, muxConf)
	clientSession, _ := yamux.Client(client, muxConf)
	clientConn, _ := clientSession.Open()
	serverConn, _ := serverSession.Accept()

	go func() {
		tlsServer := tls.Server(serverConn, tlsConfigServer)
		if err := tlsServer.Handshake(); err != nil {
			errc <- err
		}
		close(errc)
		// Because net.Pipe() is unbuffered, if both sides
		// Close() simultaneously, we will deadlock as they
		// both send an alert and then block. So we make the
		// server read any data from the client until error or
		// EOF, which will allow the client to Close(), and
		// *then* we Close() the server.
		io.Copy(ioutil.Discard, tlsServer)
		tlsServer.Close()
	}()
	return clientConn, errc
}
コード例 #4
0
ファイル: server.go プロジェクト: devendraPSL/terraform-api
// ServeConn runs a single connection.
//
// ServeConn blocks, serving the connection until the client hangs up.
func (s *Server) ServeConn(conn io.ReadWriteCloser) {
	// First create the yamux server to wrap this connection
	mux, err := yamux.Server(conn, nil)
	if err != nil {
		conn.Close()
		log.Printf("[ERR] plugin: %s", err)
		return
	}

	// Accept the control connection
	control, err := mux.Accept()
	if err != nil {
		mux.Close()
		log.Printf("[ERR] plugin: %s", err)
		return
	}

	// Create the broker and start it up
	broker := newMuxBroker(mux)
	go broker.Run()

	// Use the control connection to build the dispenser and serve the
	// connection.
	server := rpc.NewServer()
	server.RegisterName("Dispenser", &dispenseServer{
		ProviderFunc:    s.ProviderFunc,
		ProvisionerFunc: s.ProvisionerFunc,

		broker: broker,
	})
	server.ServeConn(control)
}
コード例 #5
0
ファイル: main.go プロジェクト: yuanjs/kcptun
// handle multiplex-ed connection
func handleMux(conn io.ReadWriteCloser, target string) {
	// stream multiplex
	var mux *yamux.Session
	config := &yamux.Config{
		AcceptBacklog:          256,
		EnableKeepAlive:        true,
		KeepAliveInterval:      30 * time.Second,
		ConnectionWriteTimeout: 30 * time.Second,
		MaxStreamWindowSize:    16777216,
		LogOutput:              os.Stderr,
	}
	m, err := yamux.Server(conn, config)
	if err != nil {
		log.Println(err)
		return
	}
	mux = m
	defer mux.Close()

	for {
		p1, err := mux.Accept()
		if err != nil {
			log.Println(err)
			return
		}
		p2, err := net.DialTimeout("tcp", target, 5*time.Second)
		if err != nil {
			log.Println(err)
			return
		}
		go handleClient(p1, p2)
	}
}
コード例 #6
0
ファイル: expose.go プロジェクト: nicot/expose
func downstreamServe(conn net.Conn, downstream host) error {
	session, err := yamux.Server(conn, nil)
	if err != nil {
		return err
	}

	control, err := session.Accept()
	if err != nil {
		return err
	}

	laddr, err := ioutil.ReadAll(control)
	fmt.Println(string(laddr))
	if err != nil {
		return err
	}

	for {
		incoming, err := session.Accept()
		if err != nil {
			return err
		}

		outgoing, err := net.Dial("tcp", downstream.String())
		if err != nil {
			return err
		}

		go handle(proxy(incoming, outgoing))
	}

	return nil
}
コード例 #7
0
ファイル: mux_broker.go プロジェクト: JNPRAutomate/packer
func newMuxBrokerServer(rwc io.ReadWriteCloser) (*muxBroker, error) {
	s, err := yamux.Server(rwc, nil)
	if err != nil {
		return nil, err
	}

	return newMuxBroker(s), nil
}
コード例 #8
0
ファイル: yamuxer.go プロジェクト: blacklabeldata/cerebrum
func (y *yamuxer) handleConn(g grim.GrimReaper, conn net.Conn) {
	defer g.Wait()

	conf := yamux.DefaultConfig()
	conf.LogOutput = y.logOutput
	session, _ := yamux.Server(conn, conf)

	streamCh := make(chan net.Conn)
	g.SpawnFunc(processStreams(g.New(), conn, streamCh, y.dispatcher))
	g.SpawnFunc(acceptStreams(y.logger, session, streamCh))
}
コード例 #9
0
ファイル: service.go プロジェクト: 40a/vega
func (s *Service) acceptMux(c net.Conn) {
	defer s.wg.Done()

	session, err := yamux.Server(c, muxConfig)
	if err != nil {
		if eofish(err) {
			return
		}

		panic(err)
	}

	defer session.Close()

	acs := make(chan acceptStream, 1)

	debugf("new session for %s\n", c.RemoteAddr())

	data := &clientData{
		parent:     c,
		session:    session,
		inflight:   make(map[MessageId]*Delivery),
		ephemerals: make(map[string]*clientEphemeralInfo),
		done:       make(chan struct{}),
	}

	for {
		go func() {
			stream, err := session.AcceptStream()
			acs <- acceptStream{stream, err}
		}()

		select {
		case <-s.shutdown:
			session.Close()
			s.cleanupConn(c, data)
			return
		case ac := <-acs:
			if ac.err != nil {
				if eofish(ac.err) {
					debugf("eof detected starting a new stream\n")
					s.cleanupConn(c, data)
					return
				}

				panic(ac.err)
			}

			go s.handle(c, ac.stream, data)
		}
	}
}
コード例 #10
0
ファイル: rpc_server.go プロジェクト: PagerDuty/nomad
// ServeConn runs a single connection.
//
// ServeConn blocks, serving the connection until the client hangs up.
func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) {
	// First create the yamux server to wrap this connection
	mux, err := yamux.Server(conn, nil)
	if err != nil {
		conn.Close()
		log.Printf("[ERR] plugin: error creating yamux server: %s", err)
		return
	}

	// Accept the control connection
	control, err := mux.Accept()
	if err != nil {
		mux.Close()
		if err != io.EOF {
			log.Printf("[ERR] plugin: error accepting control connection: %s", err)
		}

		return
	}

	// Connect the stdstreams (in, out, err)
	stdstream := make([]net.Conn, 2)
	for i, _ := range stdstream {
		stdstream[i], err = mux.Accept()
		if err != nil {
			mux.Close()
			log.Printf("[ERR] plugin: accepting stream %d: %s", i, err)
			return
		}
	}

	// Copy std streams out to the proper place
	go copyStream("stdout", stdstream[0], s.Stdout)
	go copyStream("stderr", stdstream[1], s.Stderr)

	// Create the broker and start it up
	broker := newMuxBroker(mux)
	go broker.Run()

	// Use the control connection to build the dispenser and serve the
	// connection.
	server := rpc.NewServer()
	server.RegisterName("Control", &controlServer{
		server: s,
	})
	server.RegisterName("Dispenser", &dispenseServer{
		broker:  broker,
		plugins: s.Plugins,
	})
	server.ServeConn(control)
}
コード例 #11
0
ファイル: provider_test.go プロジェクト: vektra/gdata
func TestProvider_Disconnect(t *testing.T) {
	config := testProviderConfig()
	p, err := NewProvider(config)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	defer p.Shutdown()

	// Setup RPC client
	a, b := testConn(t)
	client, _ := yamux.Client(a, yamux.DefaultConfig())
	server, _ := yamux.Server(b, yamux.DefaultConfig())
	go p.handleSession(client, make(chan struct{}))

	stream, _ := server.Open()
	cc := msgpackrpc.NewCodec(false, false, stream)

	// Make the connect rpc
	args := &DisconnectRequest{
		NoRetry: true,
		Backoff: 300 * time.Second,
	}
	resp := &DisconnectResponse{}
	err = msgpackrpc.CallWithCodec(cc, "Client.Disconnect", args, resp)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	p.backoffLock.Lock()
	defer p.backoffLock.Unlock()

	if p.backoff != 300*time.Second {
		t.Fatalf("bad: %v", p.backoff)
	}
	if !p.noRetry {
		t.Fatalf("bad")
	}

	p.sessionLock.Lock()
	defer p.sessionLock.Unlock()

	if p.sessionID != "" {
		t.Fatalf("Bad: %v", p.sessionID)
	}
	if p.sessionAuth {
		t.Fatalf("Bad: %v", p.sessionAuth)
	}
}
コード例 #12
0
ファイル: provider_test.go プロジェクト: vektra/gdata
func TestProvider_Connect(t *testing.T) {
	config := testProviderConfig()
	config.Service.Capabilities["foo"] = 1
	config.Handlers["foo"] = fooCapability(t)
	p, err := NewProvider(config)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	defer p.Shutdown()

	// Setup RPC client
	a, b := testConn(t)
	client, _ := yamux.Client(a, yamux.DefaultConfig())
	server, _ := yamux.Server(b, yamux.DefaultConfig())
	go p.handleSession(client, make(chan struct{}))

	stream, _ := server.Open()
	cc := msgpackrpc.NewCodec(false, false, stream)

	// Make the connect rpc
	args := &ConnectRequest{
		Capability: "foo",
		Meta: map[string]string{
			"zip": "zap",
		},
	}
	resp := &ConnectResponse{}
	err = msgpackrpc.CallWithCodec(cc, "Client.Connect", args, resp)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Should be successful!
	if !resp.Success {
		t.Fatalf("bad")
	}

	// At this point, we should be connected
	out := make([]byte, 9)
	n, err := stream.Read(out)
	if err != nil {
		t.Fatalf("err: %v %d", err, n)
	}

	if string(out) != "foobarbaz" {
		t.Fatalf("bad: %s", out)
	}
}
コード例 #13
0
ファイル: onecache_rpc.go プロジェクト: dadgar/onecache
// handleMultiplex handles a multiplexed connection.
func (r *rpcServer) handleMultiplex(conn net.Conn) {
	defer conn.Close()
	conf := yamux.DefaultConfig()
	conf.LogOutput = r.n.config.LogOutput
	server, _ := yamux.Server(conn, conf)
	for {
		sub, err := server.Accept()
		if err != nil {
			if err != io.EOF {
				r.n.logger.Printf("[ERR] onecache.rpc: multiplex conn accept failed: %v", err)
			}
			return
		}
		go r.handleConn(sub)
	}
}
コード例 #14
0
ファイル: rpc.go プロジェクト: carriercomm/nomad
// handleMultiplex is used to multiplex a single incoming connection
// using the Yamux multiplexer
func (s *Server) handleMultiplex(conn net.Conn) {
	defer conn.Close()
	conf := yamux.DefaultConfig()
	conf.LogOutput = s.config.LogOutput
	server, _ := yamux.Server(conn, conf)
	for {
		sub, err := server.Accept()
		if err != nil {
			if err != io.EOF {
				s.logger.Printf("[ERR] nomad.rpc: multiplex conn accept failed: %v", err)
			}
			return
		}
		go s.handleNomadConn(sub)
	}
}
コード例 #15
0
ファイル: listener.go プロジェクト: spambarrier/anaLog
func (li *Listener) getSession() (*yamux.Session, error) {
	li.mu.Lock()
	defer li.mu.Unlock()

	if li.session != nil {
		return li.session, nil
	}

	// connect to the socket master
	conn, err := net.Dial("tcp", li.socketMasterAddress)
	if err != nil {
		return nil, err
	}

	// bind to a port
	err = protocol.WriteHandshakeRequest(conn, protocol.HandshakeRequest{
		SocketDefinition: li.socketDefinition,
	})
	if err != nil {
		conn.Close()
		return nil, err
	}

	// see if that worked
	res, err := protocol.ReadHandshakeResponse(conn)
	if err != nil {
		conn.Close()
		return nil, err
	}
	if res.Status != "OK" {
		conn.Close()
		return nil, fmt.Errorf("%s", res.Status)
	}

	// start a new session
	session, err := yamux.Server(conn, yamux.DefaultConfig())
	if err != nil {
		conn.Close()
		return nil, err
	}

	return session, nil
}
コード例 #16
0
ファイル: provider_test.go プロジェクト: vektra/gdata
func TestProvider_Flash(t *testing.T) {
	config := testProviderConfig()
	buf := bytes.NewBuffer(nil)
	config.LogOutput = buf
	p, err := NewProvider(config)
	if err != nil {
		t.Fatalf("err: %v", err)
	}
	defer p.Shutdown()

	// Setup RPC client
	a, b := testConn(t)
	client, _ := yamux.Client(a, yamux.DefaultConfig())
	server, _ := yamux.Server(b, yamux.DefaultConfig())
	go p.handleSession(client, make(chan struct{}))

	stream, _ := server.Open()
	cc := msgpackrpc.NewCodec(false, false, stream)

	// Make the connect rpc
	args := &FlashRequest{
		Severity: "INFO",
		Message:  "TESTING",
	}
	resp := &FlashResponse{}
	err = msgpackrpc.CallWithCodec(cc, "Client.Flash", args, resp)
	if err != nil {
		t.Fatalf("err: %v", err)
	}

	// Wait until we are disconnected
	start := time.Now()
	for time.Now().Sub(start) < time.Second {
		if bytes.Contains(buf.Bytes(), []byte("TESTING")) {
			break
		}
		time.Sleep(10 * time.Millisecond)
	}
	if !bytes.Contains(buf.Bytes(), []byte("TESTING")) {
		t.Fatalf("missing: %s", buf)
	}
}
コード例 #17
0
ファイル: server.go プロジェクト: keshavdv/subway
func handler(ws *websocket.Conn) {
	// Setup server side of yamux
	session, err := yamux.Server(ws, nil)
	if err != nil {
		panic(err)
	}

	// Handle new streams
	for {
		stream, err := session.Accept()
		if err != nil {
			if session.IsClosed() {
				// TODO: tunnel is no longer needed, close locally bound ports for this session
				log.Info("session closed")
				break
			}
			// Print errors
			log.Error("yamux error: %s", err)
			continue
		}
		go tunnel(stream)
	}
}
コード例 #18
0
func testYamux(t *testing.T) (client *yamux.Session, server *yamux.Session) {
	l, err := net.Listen("tcp", "127.0.0.1:0")
	if err != nil {
		t.Fatalf("err: %s", err)
	}

	// Server side
	doneCh := make(chan struct{})
	go func() {
		defer close(doneCh)
		conn, err := l.Accept()
		l.Close()
		if err != nil {
			t.Fatalf("err: %s", err)
		}

		server, err = yamux.Server(conn, nil)
		if err != nil {
			t.Fatalf("err: %s", err)
		}
	}()

	// Client side
	conn, err := net.Dial("tcp", l.Addr().String())
	if err != nil {
		t.Fatalf("err: %s", err)
	}
	client, err = yamux.Client(conn, nil)
	if err != nil {
		t.Fatalf("err: %s", err)
	}

	// Wait for the server
	<-doneCh

	return
}
コード例 #19
0
ファイル: client.go プロジェクト: spambarrier/anaLog
// Listen connects to the socket master, binds a port, and accepts
// multiplexed traffic as new connections
func (client *Client) Listen(socketDefinition protocol.SocketDefinition) (net.Listener, error) {
	// connect to the socket master
	conn, err := net.Dial("tcp", client.socketMasterAddress)
	if err != nil {
		return nil, err
	}

	// bind to a port
	err = protocol.WriteHandshakeRequest(conn, protocol.HandshakeRequest{
		SocketDefinition: socketDefinition,
	})
	if err != nil {
		conn.Close()
		return nil, err
	}

	// see if that worked
	res, err := protocol.ReadHandshakeResponse(conn)
	if err != nil {
		conn.Close()
		return nil, err
	}
	if res.Status != "OK" {
		conn.Close()
		return nil, fmt.Errorf("%s", res.Status)
	}

	// start a new session
	session, err := yamux.Server(conn, yamux.DefaultConfig())
	if err != nil {
		conn.Close()
		return nil, err
	}

	return session, nil
}
コード例 #20
0
ファイル: main.go プロジェクト: pjvds/publichost
func main() {
	app := cli.NewApp()
	app.Name = "publichost"
	app.Flags = []cli.Flag{
		cli.StringFlag{
			Name:   "publichost, p",
			Value:  "",
			Usage:  "the address of the publichost server",
			EnvVar: "PUBLICHOST",
		},
	}
	app.Commands = []cli.Command{
		cli.Command{
			Name: "dir",
			Action: func(ctx *cli.Context) {
				localDir := ctx.Args().First()
				if len(localDir) == 0 {
					log.Fatal("local directory not specified")
				}

				if _, err := os.Stat(localDir); err != nil {
					log.Fatal(err.Error())
				}

				log.Println("connecting to server")
				conn, err := tls.Dial("tcp", "api.publichost.io:443", nil)
				if err != nil {
					log.Fatal(err.Error())
				}
				if _, err = conn.Write([]byte("GET /tunnel HTTP/1.1\r\nHost: api.publichost.io\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\nSec-WebSocket-Version: 13\r\n\r\n")); err != nil {
					log.Fatal(err.Error())
				}

				log.Println("opening tunnel")
				reader := bufio.NewReader(conn)
				response, err := http.ReadResponse(reader, nil)
				if err != nil {
					log.Fatal(err.Error())
				}

				log.Print("tunnel available at: " + response.Header.Get("X-Publichost-Address"))
				tunnel, err := yamux.Server(conn, nil)
				if err != nil {
					log.Fatal(err.Error())
				}

				fileserver := http.FileServer(http.Dir(localDir))
				handler := handlers.CombinedLoggingHandler(os.Stdout, fileserver)
				if err := http.Serve(tunnel, handler); err != nil {
					log.Fatal(err.Error())
				}
			},
		},
		cli.Command{
			Name: "http",
			Action: func(ctx *cli.Context) {
				localUrl := ctx.Args().First()

				log.Println("connecting to server")
				conn, err := tls.Dial("tcp", "api.publichost.io:443", nil)
				if err != nil {
					log.Fatal(err.Error())
				}

				request, err := http.NewRequest("GET", "api.publichost.io", nil)
				if err != nil {
					log.Fatal(err)
				}

				request.Header.Set("X-Publichost-Local", localUrl)
				request.Header.Set("Upgrade", "websocket")
				request.Header.Set("Connection", "Upgrade")
				request.Header.Set("Sec-WebSocket-Key", "dGhlIHNhbXBsZSBub25jZQ==")
				request.Header.Set("Sec-WebSocket-Version", "13")

				if err = request.Write(conn); err != nil {
					log.Fatal(err.Error())
				}

				log.Println("opening tunnel")
				reader := bufio.NewReader(conn)
				response, err := http.ReadResponse(reader, nil)
				if err != nil {
					log.Fatal(err.Error())
				}

				log.Print("tunnel available at: " + response.Header.Get("X-Publichost-Address"))
				tunnel, err := yamux.Server(conn, nil)
				if err != nil {
					log.Fatal(err.Error())
				}

				local, err := url.Parse(localUrl)
				if err != nil {
					log.Fatal(err.Error())
				}

				handler := handlers.CombinedLoggingHandler(os.Stdout, httputil.NewSingleHostReverseProxy(local))

				if err := http.Serve(tunnel, handler); err != nil {
					log.Fatal(err.Error())
				}
			},
		},
	}
	app.RunAndExitOnError()
}
コード例 #21
0
ファイル: server.go プロジェクト: StalkR/misc
func handle(conn net.Conn) {
	remote := conn.RemoteAddr().String()
	session, err := yamux.Server(conn, nil)
	if err != nil {
		log.Printf("[%s] session error: %v", remote, err)
		return
	}

	done := make(chan struct{})

	cmd := exec.Command("/bin/bash")
	shellPty, err := pty.Start(cmd)
	if err != nil {
		log.Printf("[%s] pty error: %v", remote, err)
		return
	}
	go func() {
		if err := cmd.Wait(); err != nil {
			log.Printf("[%s] wait error: %v", remote, err)
		}
		done <- struct{}{}
	}()

	controlChannel, err := session.Accept()
	if err != nil {
		log.Printf("[%s] control channel accept error: %v", remote, err)
		return
	}
	go func() {
		r := gob.NewDecoder(controlChannel)
		for {
			var win struct {
				Rows, Cols int
			}
			if err := r.Decode(&win); err != nil {
				break
			}
			if err := Setsize(shellPty, win.Rows, win.Cols); err != nil {
				log.Printf("[%s] setsize error: %v", remote, err)
				break
			}
			if err := syscall.Kill(cmd.Process.Pid, syscall.SIGWINCH); err != nil {
				log.Printf("[%s] sigwinch error: %v", remote, err)
				break
			}
		}
		done <- struct{}{}
	}()

	dataChannel, err := session.Accept()
	if err != nil {
		log.Printf("[%s] data channel accept error: %v", remote, err)
		return
	}
	cp := func(dst io.Writer, src io.Reader) {
		io.Copy(dst, src)
		done <- struct{}{}
	}
	go cp(dataChannel, shellPty)
	go cp(shellPty, dataChannel)

	<-done
	shellPty.Close()
	session.Close() // closes controlChannel, dataChannel, session and conn
	log.Printf("[%s] done", remote)
}
コード例 #22
0
ファイル: server.go プロジェクト: ultimatums/tunnel
// controlHandler is used to capture incoming tunnel connect requests into raw
// tunnel TCP connections.
func (s *Server) controlHandler(w http.ResponseWriter, r *http.Request) (ctErr error) {
	identifier := r.Header.Get(xKTunnelIdentifier)
	_, ok := s.getHost(identifier)
	if !ok {
		return fmt.Errorf("no host associated for identifier %s. please use server.AddHost()", identifier)
	}

	ct, ok := s.getControl(identifier)
	if ok {
		ct.Close()
		s.deleteControl(identifier)
		s.log.Warning("Control connection for '%s' already exists. This is a race condition and needs to be fixed on client implementation", identifier)
		return fmt.Errorf("control conn for %s already exist. \n", identifier)
	}

	s.log.Debug("Tunnel with identifier %s", identifier)

	hj, ok := w.(http.Hijacker)
	if !ok {
		return errors.New("webserver doesn't support hijacking")
	}

	conn, _, err := hj.Hijack()
	if err != nil {
		return fmt.Errorf("hijack not possible %s", err)
	}

	io.WriteString(conn, "HTTP/1.1 "+connected+"\n\n")

	conn.SetDeadline(time.Time{})

	s.log.Debug("Creating control session")
	session, err := yamux.Server(conn, s.yamuxConfig)
	if err != nil {
		return err
	}
	s.addSession(identifier, session)

	var stream net.Conn

	// close and delete the session/stream if something goes wrong
	defer func() {
		if ctErr != nil {
			if stream != nil {
				stream.Close()
			}
			s.deleteSession(identifier)
		}
	}()

	acceptStream := func() error {
		stream, err = session.Accept()
		return err
	}

	// if we don't receive anything from the client, we'll timeout
	select {
	case err := <-async(acceptStream):
		if err != nil {
			return err
		}
	case <-time.After(time.Second * 10):
		return errors.New("timeout getting session")
	}

	s.log.Debug("Initiating handshake protocol")
	buf := make([]byte, len(ctHandshakeRequest))
	if _, err := stream.Read(buf); err != nil {
		return err
	}

	if string(buf) != ctHandshakeRequest {
		return fmt.Errorf("handshake aborted. got: %s", string(buf))
	}

	if _, err := stream.Write([]byte(ctHandshakeResponse)); err != nil {
		return err
	}

	// setup control stream and start to listen to messages
	ct = newControl(stream)
	s.addControl(identifier, ct)
	go s.listenControl(ct)

	s.log.Debug("Control connection is setup")
	return nil
}