Ejemplo n.º 1
0
// pumpWrites causes goroutines blocking on meekConn.Write() to write
// to the specified writer. This function blocks until the meek response
// body limits (size for protocol v1, turn around time for protocol v2+)
// are met, or the meekConn is closed.
// Note: channel scheme assumes only one concurrent call to pumpWrites
func (conn *meekConn) pumpWrites(writer io.Writer) error {

	startTime := monotime.Now()
	timeout := time.NewTimer(MEEK_TURN_AROUND_TIMEOUT)
	defer timeout.Stop()

	for {
		select {
		case buffer := <-conn.nextWriteBuffer:
			_, err := writer.Write(buffer)

			// Assumes that writeResult won't block.
			// Note: always send the err to writeResult,
			// as the Write() caller is blocking on this.
			conn.writeResult <- err

			if err != nil {
				return err
			}

			if conn.protocolVersion < MEEK_PROTOCOL_VERSION_2 {
				// Protocol v1 clients expect at most
				// MEEK_MAX_PAYLOAD_LENGTH response bodies
				return nil
			}
			totalElapsedTime := monotime.Since(startTime) / time.Millisecond
			if totalElapsedTime >= MEEK_EXTENDED_TURN_AROUND_TIMEOUT {
				return nil
			}
			timeout.Reset(MEEK_TURN_AROUND_TIMEOUT)
		case <-timeout.C:
			return nil
		case <-conn.closeBroadcast:
			return io.EOF
		}
	}
}
Ejemplo n.º 2
0
// establishCandidateGenerator populates the candidate queue with server entries
// from the data store. Server entries are iterated in rank order, so that promoted
// servers with higher rank are priority candidates.
func (controller *Controller) establishCandidateGenerator(impairedProtocols []string) {
	defer controller.establishWaitGroup.Done()
	defer close(controller.candidateServerEntries)

	// establishStartTime is used to calculate and report the
	// client's tunnel establishment duration.
	//
	// networkWaitDuration is the elapsed time spent waiting
	// for network connectivity. This duration will be excluded
	// from reported tunnel establishment duration.
	establishStartTime := monotime.Now()
	var networkWaitDuration time.Duration

	iterator, err := NewServerEntryIterator(controller.config)
	if err != nil {
		NoticeAlert("failed to iterate over candidates: %s", err)
		controller.SignalComponentFailure()
		return
	}
	defer iterator.Close()

	isServerAffinityCandidate := true

	// TODO: reconcile server affinity scheme with multi-tunnel mode
	if controller.config.TunnelPoolSize > 1 {
		isServerAffinityCandidate = false
		close(controller.serverAffinityDoneBroadcast)
	}

loop:
	// Repeat until stopped
	for i := 0; ; i++ {

		networkWaitStartTime := monotime.Now()

		if !WaitForNetworkConnectivity(
			controller.config.NetworkConnectivityChecker,
			controller.stopEstablishingBroadcast,
			controller.shutdownBroadcast) {
			break loop
		}

		networkWaitDuration += monotime.Since(networkWaitStartTime)

		// Send each iterator server entry to the establish workers
		startTime := monotime.Now()
		for {
			serverEntry, err := iterator.Next()
			if err != nil {
				NoticeAlert("failed to get next candidate: %s", err)
				controller.SignalComponentFailure()
				break loop
			}
			if serverEntry == nil {
				// Completed this iteration
				break
			}

			if controller.config.TargetApiProtocol == common.PSIPHON_SSH_API_PROTOCOL &&
				!serverEntry.SupportsSSHAPIRequests() {
				continue
			}

			// Disable impaired protocols. This is only done for the
			// first iteration of the ESTABLISH_TUNNEL_WORK_TIME
			// loop since (a) one iteration should be sufficient to
			// evade the attack; (b) there's a good chance of false
			// positives (such as short tunnel durations due to network
			// hopping on a mobile device).
			// Impaired protocols logic is not applied when
			// config.TunnelProtocol is specified.
			// The edited serverEntry is temporary copy which is not
			// stored or reused.
			if i == 0 && controller.config.TunnelProtocol == "" {
				serverEntry.DisableImpairedProtocols(impairedProtocols)
				if len(serverEntry.GetSupportedProtocols()) == 0 {
					// Skip this server entry, as it has no supported
					// protocols after disabling the impaired ones
					// TODO: modify ServerEntryIterator to skip these?
					continue
				}
			}

			// adjustedEstablishStartTime is establishStartTime shifted
			// to exclude time spent waiting for network connectivity.

			candidate := &candidateServerEntry{
				serverEntry:                serverEntry,
				isServerAffinityCandidate:  isServerAffinityCandidate,
				adjustedEstablishStartTime: establishStartTime.Add(networkWaitDuration),
			}

			// Note: there must be only one server affinity candidate, as it
			// closes the serverAffinityDoneBroadcast channel.
			isServerAffinityCandidate = false

			// TODO: here we could generate multiple candidates from the
			// server entry when there are many MeekFrontingAddresses.

			select {
			case controller.candidateServerEntries <- candidate:
			case <-controller.stopEstablishingBroadcast:
				break loop
			case <-controller.shutdownBroadcast:
				break loop
			}

			if startTime.Add(ESTABLISH_TUNNEL_WORK_TIME).Before(monotime.Now()) {
				// Start over, after a brief pause, with a new shuffle of the server
				// entries, and potentially some newly fetched server entries.
				break
			}
		}
		// Free up resources now, but don't reset until after the pause.
		iterator.Close()

		// Trigger a fetch remote server list, since we may have failed to
		// connect with all known servers. Don't block sending signal, since
		// this signal may have already been sent.
		// Don't wait for fetch remote to succeed, since it may fail and
		// enter a retry loop and we're better off trying more known servers.
		// TODO: synchronize the fetch response, so it can be incorporated
		// into the server entry iterator as soon as available.
		select {
		case controller.signalFetchRemoteServerList <- *new(struct{}):
		default:
		}

		// Trigger an out-of-band upgrade availability check and download.
		// Since we may have failed to connect, we may benefit from upgrading
		// to a new client version with new circumvention capabilities.
		select {
		case controller.signalDownloadUpgrade <- "":
		default:
		}

		// After a complete iteration of candidate servers, pause before iterating again.
		// This helps avoid some busy wait loop conditions, and also allows some time for
		// network conditions to change. Also allows for fetch remote to complete,
		// in typical conditions (it isn't strictly necessary to wait for this, there will
		// be more rounds if required).
		timeout := time.After(
			time.Duration(*controller.config.EstablishTunnelPausePeriodSeconds) * time.Second)
		select {
		case <-timeout:
			// Retry iterating
		case <-controller.stopEstablishingBroadcast:
			break loop
		case <-controller.shutdownBroadcast:
			break loop
		}

		iterator.Reset()
	}

	NoticeInfo("stopped candidate generator")
}
Ejemplo n.º 3
0
// EstablishTunnel first makes a network transport connection to the
// Psiphon server and then establishes an SSH client session on top of
// that transport. The SSH server is authenticated using the public
// key in the server entry.
// Depending on the server's capabilities, the connection may use
// plain SSH over TCP, obfuscated SSH over TCP, or obfuscated SSH over
// HTTP (meek protocol).
// When requiredProtocol is not blank, that protocol is used. Otherwise,
// the a random supported protocol is used.
// untunneledDialConfig is used for untunneled final status requests.
func EstablishTunnel(
	config *Config,
	untunneledDialConfig *DialConfig,
	sessionId string,
	pendingConns *common.Conns,
	serverEntry *protocol.ServerEntry,
	adjustedEstablishStartTime monotime.Time,
	tunnelOwner TunnelOwner) (tunnel *Tunnel, err error) {

	selectedProtocol, err := selectProtocol(config, serverEntry)
	if err != nil {
		return nil, common.ContextError(err)
	}

	// Build transport layers and establish SSH connection. Note that
	// dialConn and monitoredConn are the same network connection.
	dialResult, err := dialSsh(
		config, pendingConns, serverEntry, selectedProtocol, sessionId)
	if err != nil {
		return nil, common.ContextError(err)
	}

	// Cleanup on error
	defer func() {
		if err != nil {
			dialResult.sshClient.Close()
			dialResult.monitoredConn.Close()
			pendingConns.Remove(dialResult.dialConn)
		}
	}()

	// The tunnel is now connected
	tunnel = &Tunnel{
		mutex:                    new(sync.Mutex),
		config:                   config,
		untunneledDialConfig:     untunneledDialConfig,
		isClosed:                 false,
		serverEntry:              serverEntry,
		protocol:                 selectedProtocol,
		conn:                     dialResult.monitoredConn,
		sshClient:                dialResult.sshClient,
		sshServerRequests:        dialResult.sshRequests,
		operateWaitGroup:         new(sync.WaitGroup),
		shutdownOperateBroadcast: make(chan struct{}),
		// A buffer allows at least one signal to be sent even when the receiver is
		// not listening. Senders should not block.
		signalPortForwardFailure: make(chan struct{}, 1),
		dialStats:                dialResult.dialStats,
		// Buffer allows SetClientVerificationPayload to submit one new payload
		// without blocking or dropping it.
		newClientVerificationPayload: make(chan string, 1),
	}

	// Create a new Psiphon API server context for this tunnel. This includes
	// performing a handshake request. If the handshake fails, this establishment
	// fails.
	if !config.DisableApi {
		NoticeInfo("starting server context for %s", tunnel.serverEntry.IpAddress)
		tunnel.serverContext, err = NewServerContext(tunnel, sessionId)
		if err != nil {
			return nil, common.ContextError(
				fmt.Errorf("error starting server context for %s: %s",
					tunnel.serverEntry.IpAddress, err))
		}
	}

	// establishDuration is the elapsed time between the controller starting tunnel
	// establishment and this tunnel being established. The reported value represents
	// how long the user waited between starting the client and having a usable tunnel;
	// or how long between the client detecting an unexpected tunnel disconnect and
	// completing automatic reestablishment.
	//
	// This time period may include time spent unsuccessfully connecting to other
	// servers. Time spent waiting for network connectivity is excluded.
	tunnel.establishDuration = monotime.Since(adjustedEstablishStartTime)

	tunnel.establishedTime = monotime.Now()

	// Now that network operations are complete, cancel interruptibility
	pendingConns.Remove(dialResult.dialConn)

	// Spawn the operateTunnel goroutine, which monitors the tunnel and handles periodic stats updates.
	tunnel.operateWaitGroup.Add(1)
	go tunnel.operateTunnel(tunnelOwner)

	return tunnel, nil
}
Ejemplo n.º 4
0
func (session *meekSession) expired() bool {
	lastActivity := monotime.Time(atomic.LoadInt64(&session.lastActivity))
	return monotime.Since(lastActivity) > MEEK_MAX_SESSION_STALENESS
}
func run(t *testing.T, rateLimits RateLimits) {

	// Run a local HTTP server which serves large chunks of data

	go func() {

		handler := func(w http.ResponseWriter, r *http.Request) {
			_, _ = ioutil.ReadAll(r.Body)
			testData, _ := MakeSecureRandomBytes(testDataSize)
			w.Write(testData)
		}

		server := &http.Server{
			Addr:    serverAddress,
			Handler: http.HandlerFunc(handler),
		}

		server.ListenAndServe()
	}()

	// TODO: properly synchronize with server startup
	time.Sleep(1 * time.Second)

	// Set up a HTTP client with a throttled connection

	throttledDial := func(network, addr string) (net.Conn, error) {
		conn, err := net.Dial(network, addr)
		if err != nil {
			return conn, err
		}
		return NewThrottledConn(conn, rateLimits), nil
	}

	client := &http.Client{
		Transport: &http.Transport{
			Dial: throttledDial,
		},
	}

	// Upload and download a large chunk of data, and time it

	testData, _ := MakeSecureRandomBytes(testDataSize)
	requestBody := bytes.NewReader(testData)

	startTime := monotime.Now()

	response, err := client.Post("http://"+serverAddress, "application/octet-stream", requestBody)
	if err == nil && response.StatusCode != http.StatusOK {
		response.Body.Close()
		err = fmt.Errorf("unexpected response code: %d", response.StatusCode)
	}
	if err != nil {
		t.Fatalf("request failed: %s", err)
	}
	defer response.Body.Close()

	// Test: elapsed upload time must reflect rate limit

	checkElapsedTime(t, testDataSize, rateLimits.WriteBytesPerSecond, monotime.Since(startTime))

	startTime = monotime.Now()

	body, err := ioutil.ReadAll(response.Body)
	if err != nil {
		t.Fatalf("read response failed: %s", err)
	}
	if len(body) != testDataSize {
		t.Fatalf("unexpected response size: %d", len(body))
	}

	// Test: elapsed download time must reflect rate limit

	checkElapsedTime(t, testDataSize, rateLimits.ReadBytesPerSecond, monotime.Since(startTime))
}
func (sshClient *sshClient) handleTCPChannel(
	hostToConnect string,
	portToConnect int,
	newChannel ssh.NewChannel) {

	isWebServerPortForward := false
	config := sshClient.sshServer.support.Config
	if config.WebServerPortForwardAddress != "" {
		destination := net.JoinHostPort(hostToConnect, strconv.Itoa(portToConnect))
		if destination == config.WebServerPortForwardAddress {
			isWebServerPortForward = true
			if config.WebServerPortForwardRedirectAddress != "" {
				// Note: redirect format is validated when config is loaded
				host, portStr, _ := net.SplitHostPort(config.WebServerPortForwardRedirectAddress)
				port, _ := strconv.Atoi(portStr)
				hostToConnect = host
				portToConnect = port
			}
		}
	}

	if !isWebServerPortForward && !sshClient.isPortForwardPermitted(
		portForwardTypeTCP, hostToConnect, portToConnect) {

		sshClient.rejectNewChannel(
			newChannel, ssh.Prohibited, "port forward not permitted")
		return
	}

	var bytesUp, bytesDown int64
	sshClient.openedPortForward(portForwardTypeTCP)
	defer func() {
		sshClient.closedPortForward(
			portForwardTypeTCP, atomic.LoadInt64(&bytesUp), atomic.LoadInt64(&bytesDown))
	}()

	// TOCTOU note: important to increment the port forward count (via
	// openPortForward) _before_ checking isPortForwardLimitExceeded
	// otherwise, the client could potentially consume excess resources
	// by initiating many port forwards concurrently.
	// TODO: close LRU connection (after successful Dial) instead of
	// rejecting new connection?
	if maxCount, exceeded := sshClient.isPortForwardLimitExceeded(portForwardTypeTCP); exceeded {

		// Close the oldest TCP port forward. CloseOldest() closes
		// the conn and the port forward's goroutine will complete
		// the cleanup asynchronously.
		//
		// Some known limitations:
		//
		// - Since CloseOldest() closes the upstream socket but does not
		//   clean up all resources associated with the port forward. These
		//   include the goroutine(s) relaying traffic as well as the SSH
		//   channel. Closing the socket will interrupt the goroutines which
		//   will then complete the cleanup. But, since the full cleanup is
		//   asynchronous, there exists a possibility that a client can consume
		//   more than max port forward resources -- just not upstream sockets.
		//
		// - An LRU list entry for this port forward is not added until
		//   after the dial completes, but the port forward is counted
		//   towards max limits. This means many dials in progress will
		//   put established connections in jeopardy.
		//
		// - We're closing the oldest open connection _before_ successfully
		//   dialing the new port forward. This means we are potentially
		//   discarding a good connection to make way for a failed connection.
		//   We cannot simply dial first and still maintain a limit on
		//   resources used, so to address this we'd need to add some
		//   accounting for connections still establishing.

		sshClient.tcpPortForwardLRU.CloseOldest()

		log.WithContextFields(
			LogFields{
				"maxCount": maxCount,
			}).Debug("closed LRU TCP port forward")
	}

	// Dial the target remote address. This is done in a goroutine to
	// ensure the shutdown signal is handled immediately.

	remoteAddr := fmt.Sprintf("%s:%d", hostToConnect, portToConnect)

	log.WithContextFields(LogFields{"remoteAddr": remoteAddr}).Debug("dialing")

	type dialTcpResult struct {
		conn net.Conn
		err  error
	}

	resultChannel := make(chan *dialTcpResult, 1)
	dialStartTime := monotime.Now()

	go func() {
		// TODO: on EADDRNOTAVAIL, temporarily suspend new clients
		// TODO: IPv6 support
		conn, err := net.DialTimeout(
			"tcp4", remoteAddr, SSH_TCP_PORT_FORWARD_DIAL_TIMEOUT)
		resultChannel <- &dialTcpResult{conn, err}
	}()

	var result *dialTcpResult
	select {
	case result = <-resultChannel:
	case <-sshClient.stopBroadcast:
		// Note: may leave dial in progress (TODO: use DialContext to cancel)
		return
	}

	sshClient.updateQualityMetrics(
		result.err == nil, monotime.Since(dialStartTime))

	if result.err != nil {
		sshClient.rejectNewChannel(newChannel, ssh.ConnectionFailed, result.err.Error())
		return
	}

	// The upstream TCP port forward connection has been established. Schedule
	// some cleanup and notify the SSH client that the channel is accepted.

	fwdConn := result.conn
	defer fwdConn.Close()

	fwdChannel, requests, err := newChannel.Accept()
	if err != nil {
		log.WithContextFields(LogFields{"error": err}).Warning("accept new channel failed")
		return
	}
	go ssh.DiscardRequests(requests)
	defer fwdChannel.Close()

	// ActivityMonitoredConn monitors the TCP port forward I/O and updates
	// its LRU status. ActivityMonitoredConn also times out I/O on the port
	// forward if both reads and writes have been idle for the specified
	// duration.

	lruEntry := sshClient.tcpPortForwardLRU.Add(fwdConn)
	defer lruEntry.Remove()

	fwdConn, err = common.NewActivityMonitoredConn(
		fwdConn,
		sshClient.idleTCPPortForwardTimeout(),
		true,
		lruEntry)
	if result.err != nil {
		log.WithContextFields(LogFields{"error": err}).Error("NewActivityMonitoredConn failed")
		return
	}

	// Relay channel to forwarded connection.

	log.WithContextFields(LogFields{"remoteAddr": remoteAddr}).Debug("relaying")

	// TODO: relay errors to fwdChannel.Stderr()?
	relayWaitGroup := new(sync.WaitGroup)
	relayWaitGroup.Add(1)
	go func() {
		defer relayWaitGroup.Done()
		// io.Copy allocates a 32K temporary buffer, and each port forward relay uses
		// two of these buffers; using io.CopyBuffer with a smaller buffer reduces the
		// overall memory footprint.
		bytes, err := io.CopyBuffer(
			fwdChannel, fwdConn, make([]byte, SSH_TCP_PORT_FORWARD_COPY_BUFFER_SIZE))
		atomic.AddInt64(&bytesDown, bytes)
		if err != nil && err != io.EOF {
			// Debug since errors such as "connection reset by peer" occur during normal operation
			log.WithContextFields(LogFields{"error": err}).Debug("downstream TCP relay failed")
		}
		// Interrupt upstream io.Copy when downstream is shutting down.
		// TODO: this is done to quickly cleanup the port forward when
		// fwdConn has a read timeout, but is it clean -- upstream may still
		// be flowing?
		fwdChannel.Close()
	}()
	bytes, err := io.CopyBuffer(
		fwdConn, fwdChannel, make([]byte, SSH_TCP_PORT_FORWARD_COPY_BUFFER_SIZE))
	atomic.AddInt64(&bytesUp, bytes)
	if err != nil && err != io.EOF {
		log.WithContextFields(LogFields{"error": err}).Debug("upstream TCP relay failed")
	}
	// Shutdown special case: fwdChannel will be closed and return EOF when
	// the SSH connection is closed, but we need to explicitly close fwdConn
	// to interrupt the downstream io.Copy, which may be blocked on a
	// fwdConn.Read().
	fwdConn.Close()

	relayWaitGroup.Wait()

	log.WithContextFields(
		LogFields{
			"remoteAddr": remoteAddr,
			"bytesUp":    atomic.LoadInt64(&bytesUp),
			"bytesDown":  atomic.LoadInt64(&bytesDown)}).Debug("exiting")
}