コード例 #1
0
// IsUntunneled takes a destination hostname or IP address and determines
// if it should be accessed through a tunnel. When a hostname is presented, it
// is first resolved to an IP address which can be matched against the routes data.
// Multiple goroutines may invoke RequiresTunnel simultaneously. Multi-reader
// locks are used in the implementation to enable concurrent access, with no locks
// held during network access.
func (classifier *SplitTunnelClassifier) IsUntunneled(targetAddress string) bool {

	if !classifier.hasRoutes() {
		return false
	}

	classifier.mutex.RLock()
	cachedClassification, ok := classifier.cache[targetAddress]
	classifier.mutex.RUnlock()
	if ok && cachedClassification.expiry.After(monotime.Now()) {
		return cachedClassification.isUntunneled
	}

	ipAddr, ttl, err := tunneledLookupIP(
		classifier.dnsServerAddress, classifier.dnsTunneler, targetAddress)
	if err != nil {
		NoticeAlert("failed to resolve address for split tunnel classification: %s", err)
		return false
	}
	expiry := monotime.Now().Add(ttl)

	isUntunneled := classifier.ipAddressInRoutes(ipAddr)

	// TODO: garbage collect expired items from cache?

	classifier.mutex.Lock()
	classifier.cache[targetAddress] = &classification{isUntunneled, expiry}
	classifier.mutex.Unlock()

	if isUntunneled {
		NoticeUntunneled(targetAddress)
	}

	return isUntunneled
}
コード例 #2
0
ファイル: net.go プロジェクト: adamkruger/psiphon-tunnel-core
// NewActivityMonitoredConn creates a new ActivityMonitoredConn.
func NewActivityMonitoredConn(
	conn net.Conn,
	inactivityTimeout time.Duration,
	activeOnWrite bool,
	activityUpdater ActivityUpdater,
	lruEntry *LRUConnsEntry) (*ActivityMonitoredConn, error) {

	if inactivityTimeout > 0 {
		err := conn.SetDeadline(time.Now().Add(inactivityTimeout))
		if err != nil {
			return nil, ContextError(err)
		}
	}

	now := int64(monotime.Now())

	return &ActivityMonitoredConn{
		Conn:                 conn,
		inactivityTimeout:    inactivityTimeout,
		activeOnWrite:        activeOnWrite,
		realStartTime:        time.Now(),
		monotonicStartTime:   now,
		lastReadActivityTime: now,
		activityUpdater:      activityUpdater,
		lruEntry:             lruEntry,
	}, nil
}
コード例 #3
0
ファイル: net.go プロジェクト: adamkruger/psiphon-tunnel-core
func (conn *ActivityMonitoredConn) Read(buffer []byte) (int, error) {
	n, err := conn.Conn.Read(buffer)
	if err == nil {

		if conn.inactivityTimeout > 0 {
			err = conn.Conn.SetDeadline(time.Now().Add(conn.inactivityTimeout))
			if err != nil {
				return n, ContextError(err)
			}
		}

		readActivityTime := int64(monotime.Now())

		if conn.activityUpdater != nil {
			conn.activityUpdater.UpdateProgress(
				int64(n), 0, readActivityTime-atomic.LoadInt64(&conn.lastReadActivityTime))
		}

		if conn.lruEntry != nil {
			conn.lruEntry.Touch()
		}

		atomic.StoreInt64(&conn.lastReadActivityTime, readActivityTime)

	}
	// Note: no context error to preserve error type
	return n, err
}
コード例 #4
0
ファイル: dns.go プロジェクト: adamkruger/psiphon-tunnel-core
// NewDNSResolver initializes a new DNSResolver, loading it with
// a fresh resolver value. The load must succeed, so either
// "/etc/resolv.conf" must contain a valid "nameserver" line with
// a DNS server IP address, or a valid "defaultResolver" default
// value must be provided.
// On systems without "/etc/resolv.conf", "defaultResolver" is
// required.
//
// The resolver is considered stale and reloaded if last checked
// more than 5 seconds before the last Get(), which is similar to
// frequencies in other implementations:
//
// - https://golang.org/src/net/dnsclient_unix.go,
//   resolverConfig.tryUpdate: 5 seconds
//
// - https://github.com/ambrop72/badvpn/blob/master/udpgw/udpgw.c,
//   maybe_update_dns: 2 seconds
//
func NewDNSResolver(defaultResolver string) (*DNSResolver, error) {

	dns := &DNSResolver{
		lastReloadTime: int64(monotime.Now()),
	}

	dns.ReloadableFile = common.NewReloadableFile(
		DNS_SYSTEM_CONFIG_FILENAME,
		func(fileContent []byte) error {

			resolver, err := parseResolveConf(fileContent)
			if err != nil {
				// On error, state remains the same
				return common.ContextError(err)
			}

			dns.resolver = resolver

			log.WithContextFields(
				LogFields{
					"resolver": resolver.String(),
				}).Debug("loaded system DNS resolver")

			return nil
		})

	_, err := dns.Reload()
	if err != nil {
		if defaultResolver == "" {
			return nil, common.ContextError(err)
		}

		log.WithContextFields(
			LogFields{"err": err}).Info(
			"failed to load system DNS resolver; using default")

		resolver, err := parseResolver(defaultResolver)
		if err != nil {
			return nil, common.ContextError(err)
		}

		dns.resolver = resolver
	}

	return dns, nil
}
コード例 #5
0
ファイル: dns.go プロジェクト: adamkruger/psiphon-tunnel-core
// Get returns the cached resolver, first updating the cached
// value if it's stale. If reloading fails, the previous value
// is used.
func (dns *DNSResolver) Get() net.IP {

	// Every UDP DNS port forward frequently calls Get(), so this code
	// is intended to minimize blocking. Most callers will hit just the
	// atomic.LoadInt64 reload time check and the RLock (an atomic.AddInt32
	// when no write lock is pending). An atomic.CompareAndSwapInt32 is
	// used to ensure only one goroutine enters Reload() and blocks on
	// its write lock. Finally, since since ReloadableFile.Reload
	// checks whether the underlying file has changed _before_ aquiring a
	// write lock, we only incur write lock blocking when "/etc/resolv.conf"
	// has actually changed.

	lastReloadTime := monotime.Time(atomic.LoadInt64(&dns.lastReloadTime))
	stale := monotime.Now().After(lastReloadTime.Add(DNS_SYSTEM_CONFIG_RELOAD_PERIOD))

	if stale {

		isReloader := atomic.CompareAndSwapInt32(&dns.isReloading, 0, 1)

		if isReloader {

			// Unconditionally set last reload time. Even on failure only
			// want to retry after another DNS_SYSTEM_CONFIG_RELOAD_PERIOD.
			atomic.StoreInt64(&dns.lastReloadTime, time.Now().Unix())

			_, err := dns.Reload()
			if err != nil {
				log.WithContextFields(
					LogFields{"err": err}).Info(
					"failed to reload system DNS resolver")
			}

			atomic.StoreInt32(&dns.isReloading, 0)
		}
	}

	dns.ReloadableFile.RLock()
	defer dns.ReloadableFile.RUnlock()

	return dns.resolver
}
コード例 #6
0
// pumpWrites causes goroutines blocking on meekConn.Write() to write
// to the specified writer. This function blocks until the meek response
// body limits (size for protocol v1, turn around time for protocol v2+)
// are met, or the meekConn is closed.
// Note: channel scheme assumes only one concurrent call to pumpWrites
func (conn *meekConn) pumpWrites(writer io.Writer) error {

	startTime := monotime.Now()
	timeout := time.NewTimer(MEEK_TURN_AROUND_TIMEOUT)
	defer timeout.Stop()

	for {
		select {
		case buffer := <-conn.nextWriteBuffer:
			_, err := writer.Write(buffer)

			// Assumes that writeResult won't block.
			// Note: always send the err to writeResult,
			// as the Write() caller is blocking on this.
			conn.writeResult <- err

			if err != nil {
				return err
			}

			if conn.protocolVersion < MEEK_PROTOCOL_VERSION_2 {
				// Protocol v1 clients expect at most
				// MEEK_MAX_PAYLOAD_LENGTH response bodies
				return nil
			}
			totalElapsedTime := monotime.Since(startTime) / time.Millisecond
			if totalElapsedTime >= MEEK_EXTENDED_TURN_AROUND_TIMEOUT {
				return nil
			}
			timeout.Reset(MEEK_TURN_AROUND_TIMEOUT)
		case <-timeout.C:
			return nil
		case <-conn.closeBroadcast:
			return io.EOF
		}
	}
}
コード例 #7
0
// classifyImpairedProtocol tracks "impaired" protocol classifications for failed
// tunnels. A protocol is classified as impaired if a tunnel using that protocol
// fails, repeatedly, shortly after the start of the connection. During tunnel
// establishment, impaired protocols are briefly skipped.
//
// One purpose of this measure is to defend against an attack where the adversary,
// for example, tags an OSSH TCP connection as an "unidentified" protocol; allows
// it to connect; but then kills the underlying TCP connection after a short time.
// Since OSSH has less latency than other protocols that may bypass an "unidentified"
// filter, these other protocols might never be selected for use.
//
// Concurrency note: only the runTunnels() goroutine may call classifyImpairedProtocol
func (controller *Controller) classifyImpairedProtocol(failedTunnel *Tunnel) {
	if failedTunnel.establishedTime.Add(IMPAIRED_PROTOCOL_CLASSIFICATION_DURATION).After(monotime.Now()) {
		controller.impairedProtocolClassification[failedTunnel.protocol] += 1
	} else {
		controller.impairedProtocolClassification[failedTunnel.protocol] = 0
	}
	if len(controller.getImpairedProtocols()) == len(common.SupportedTunnelProtocols) {
		// Reset classification if all protocols are classified as impaired as
		// the network situation (or attack) may not be protocol-specific.
		// TODO: compare against count of distinct supported protocols for
		// current known server entries.
		controller.impairedProtocolClassification = make(map[string]int)
	}
}
コード例 #8
0
// upgradeDownloader makes periodic attemps to complete a client upgrade
// download. DownloadUpgrade() is resumable, so each attempt has potential for
// getting closer to completion, even in conditions where the download or
// tunnel is repeatedly interrupted.
// An upgrade download is triggered by either a handshake response indicating
// that a new version is available; or after failing to connect, in which case
// it's useful to check, out-of-band, for an upgrade with new circumvention
// capabilities.
// Once the download operation completes successfully, the downloader exits
// and is not run again: either there is not a newer version, or the upgrade
// has been downloaded and is ready to be applied.
// We're assuming that the upgrade will be applied and the entire system
// restarted before another upgrade is to be downloaded.
//
// TODO: refactor upgrade downloader and remote server list fetcher to use
// common code (including the resumable download routines).
//
func (controller *Controller) upgradeDownloader() {
	defer controller.runWaitGroup.Done()

	var lastDownloadTime monotime.Time

downloadLoop:
	for {
		// Wait for a signal before downloading
		var handshakeVersion string
		select {
		case handshakeVersion = <-controller.signalDownloadUpgrade:
		case <-controller.shutdownBroadcast:
			break downloadLoop
		}

		// Unless handshake is explicitly advertizing a new version, skip
		// checking entirely when a recent download was successful.
		if handshakeVersion == "" &&
			lastDownloadTime != 0 &&
			lastDownloadTime.Add(DOWNLOAD_UPGRADE_STALE_PERIOD).After(monotime.Now()) {
			continue
		}

	retryLoop:
		for {
			// Don't attempt to download while there is no network connectivity,
			// to avoid alert notice noise.
			if !WaitForNetworkConnectivity(
				controller.config.NetworkConnectivityChecker,
				controller.shutdownBroadcast) {
				break downloadLoop
			}

			// Pick any active tunnel and make the next download attempt. If there's
			// no active tunnel, the untunneledDialConfig will be used.
			tunnel := controller.getNextActiveTunnel()

			err := DownloadUpgrade(
				controller.config,
				handshakeVersion,
				tunnel,
				controller.untunneledDialConfig)

			if err == nil {
				lastDownloadTime = monotime.Now()
				break retryLoop
			}

			NoticeAlert("failed to download upgrade: %s", err)

			timeout := time.After(
				time.Duration(*controller.config.DownloadUpgradeRetryPeriodSeconds) * time.Second)
			select {
			case <-timeout:
			case <-controller.shutdownBroadcast:
				break downloadLoop
			}
		}
	}

	NoticeInfo("exiting upgrade downloader")
}
コード例 #9
0
// remoteServerListFetcher fetches an out-of-band list of server entries
// for more tunnel candidates. It fetches when signalled, with retries
// on failure.
func (controller *Controller) remoteServerListFetcher() {
	defer controller.runWaitGroup.Done()

	if controller.config.RemoteServerListUrl == "" {
		NoticeAlert("remote server list URL is blank")
		return
	}
	if controller.config.RemoteServerListSignaturePublicKey == "" {
		NoticeAlert("remote server list signature public key blank")
		return
	}

	var lastFetchTime monotime.Time

fetcherLoop:
	for {
		// Wait for a signal before fetching
		select {
		case <-controller.signalFetchRemoteServerList:
		case <-controller.shutdownBroadcast:
			break fetcherLoop
		}

		// Skip fetch entirely (i.e., send no request at all, even when ETag would save
		// on response size) when a recent fetch was successful
		if lastFetchTime != 0 &&
			lastFetchTime.Add(FETCH_REMOTE_SERVER_LIST_STALE_PERIOD).After(monotime.Now()) {
			continue
		}

	retryLoop:
		for {
			// Don't attempt to fetch while there is no network connectivity,
			// to avoid alert notice noise.
			if !WaitForNetworkConnectivity(
				controller.config.NetworkConnectivityChecker,
				controller.shutdownBroadcast) {
				break fetcherLoop
			}

			// Pick any active tunnel and make the next fetch attempt. If there's
			// no active tunnel, the untunneledDialConfig will be used.
			tunnel := controller.getNextActiveTunnel()

			err := FetchRemoteServerList(
				controller.config,
				tunnel,
				controller.untunneledDialConfig)

			if err == nil {
				lastFetchTime = monotime.Now()
				break retryLoop
			}

			NoticeAlert("failed to fetch remote server list: %s", err)

			timeout := time.After(
				time.Duration(*controller.config.FetchRemoteServerListRetryPeriodSeconds) * time.Second)
			select {
			case <-timeout:
			case <-controller.shutdownBroadcast:
				break fetcherLoop
			}
		}
	}

	NoticeInfo("exiting remote server list fetcher")
}
コード例 #10
0
// operateTunnel monitors the health of the tunnel and performs
// periodic work.
//
// BytesTransferred and TotalBytesTransferred notices are emitted
// for live reporting and diagnostics reporting, respectively.
//
// Status requests are sent to the Psiphon API to report bytes
// transferred.
//
// Periodic SSH keep alive packets are sent to ensure the underlying
// TCP connection isn't terminated by NAT, or other network
// interference -- or test if it has been terminated while the device
// has been asleep. When a keep alive times out, the tunnel is
// considered failed.
//
// An immediate SSH keep alive "probe" is sent to test the tunnel and
// server responsiveness when a port forward failure is detected: a
// failed dial or failed read/write. This keep alive has a shorter
// timeout.
//
// Note that port foward failures may be due to non-failure conditions.
// For example, when the user inputs an invalid domain name and
// resolution is done by the ssh server; or trying to connect to a
// non-white-listed port; and the error message in these cases is not
// distinguishable from a a true server error (a common error message,
// "ssh: rejected: administratively prohibited (open failed)", may be
// returned for these cases but also if the server has run out of
// ephemeral ports, for example).
//
// SSH keep alives are not sent when the tunnel has been recently
// active (not only does tunnel activity obviate the necessity of a keep
// alive, testing has shown that keep alives may time out for "busy"
// tunnels, especially over meek protocol and other high latency
// conditions).
//
// "Recently active" is defined has having received payload bytes. Sent
// bytes are not considered as testing has shown bytes may appear to
// send when certain NAT devices have interfered with the tunnel, while
// no bytes are received. In a pathological case, with DNS implemented
// as tunneled UDP, a browser may wait excessively for a domain name to
// resolve, while no new port forward is attempted which would otherwise
// result in a tunnel failure detection.
//
// TODO: change "recently active" to include having received any
// SSH protocol messages from the server, not just user payload?
//
func (tunnel *Tunnel) operateTunnel(tunnelOwner TunnelOwner) {
	defer tunnel.operateWaitGroup.Done()

	lastBytesReceivedTime := monotime.Now()

	lastTotalBytesTransferedTime := monotime.Now()
	totalSent := int64(0)
	totalReceived := int64(0)

	noticeBytesTransferredTicker := time.NewTicker(1 * time.Second)
	defer noticeBytesTransferredTicker.Stop()

	// The next status request and ssh keep alive times are picked at random,
	// from a range, to make the resulting traffic less fingerprintable,
	// Note: not using Tickers since these are not fixed time periods.
	nextStatusRequestPeriod := func() time.Duration {
		return makeRandomPeriod(
			PSIPHON_API_STATUS_REQUEST_PERIOD_MIN,
			PSIPHON_API_STATUS_REQUEST_PERIOD_MAX)
	}

	statsTimer := time.NewTimer(nextStatusRequestPeriod())
	defer statsTimer.Stop()

	// Schedule an immediate status request to deliver any unreported
	// persistent stats.
	// Note: this may not be effective when there's an outstanding
	// asynchronous untunneled final status request is holding the
	// persistent stats records. It may also conflict with other
	// tunnel candidates which attempt to send an immediate request
	// before being discarded. For now, we mitigate this with a short,
	// random delay.
	unreported := CountUnreportedPersistentStats()
	if unreported > 0 {
		NoticeInfo("Unreported persistent stats: %d", unreported)
		statsTimer.Reset(makeRandomPeriod(
			PSIPHON_API_STATUS_REQUEST_SHORT_PERIOD_MIN,
			PSIPHON_API_STATUS_REQUEST_SHORT_PERIOD_MAX))
	}

	nextSshKeepAlivePeriod := func() time.Duration {
		return makeRandomPeriod(
			TUNNEL_SSH_KEEP_ALIVE_PERIOD_MIN,
			TUNNEL_SSH_KEEP_ALIVE_PERIOD_MAX)
	}

	// TODO: don't initialize timer when config.DisablePeriodicSshKeepAlive is set
	sshKeepAliveTimer := time.NewTimer(nextSshKeepAlivePeriod())
	if tunnel.config.DisablePeriodicSshKeepAlive {
		sshKeepAliveTimer.Stop()
	} else {
		defer sshKeepAliveTimer.Stop()
	}

	// Perform network requests in separate goroutines so as not to block
	// other operations.
	requestsWaitGroup := new(sync.WaitGroup)

	requestsWaitGroup.Add(1)
	signalStatusRequest := make(chan struct{})
	go func() {
		defer requestsWaitGroup.Done()
		for _ = range signalStatusRequest {
			sendStats(tunnel)
		}
	}()

	requestsWaitGroup.Add(1)
	signalSshKeepAlive := make(chan time.Duration)
	sshKeepAliveError := make(chan error, 1)
	go func() {
		defer requestsWaitGroup.Done()
		for timeout := range signalSshKeepAlive {
			err := sendSshKeepAlive(tunnel.sshClient, tunnel.conn, timeout)
			if err != nil {
				select {
				case sshKeepAliveError <- err:
				default:
				}
			}
		}
	}()

	requestsWaitGroup.Add(1)
	signalStopClientVerificationRequests := make(chan struct{})
	go func() {
		defer requestsWaitGroup.Done()

		clientVerificationRequestSuccess := true
		clientVerificationPayload := ""
		failCount := 0
		for {
			// TODO: use reflect.SelectCase?
			if clientVerificationRequestSuccess == true {
				failCount = 0
				select {
				case clientVerificationPayload = <-tunnel.newClientVerificationPayload:
				case <-signalStopClientVerificationRequests:
					return
				}
			} else {
				// If sendClientVerification failed to send the payload we
				// will retry after a delay. Will use a new payload instead
				// if that arrives in the meantime.
				// If failures count is more than PSIPHON_API_CLIENT_VERIFICATION_REQUEST_MAX_RETRIES
				// stop retrying for this tunnel.
				failCount += 1
				if failCount > PSIPHON_API_CLIENT_VERIFICATION_REQUEST_MAX_RETRIES {
					return
				}
				timeout := time.After(PSIPHON_API_CLIENT_VERIFICATION_REQUEST_RETRY_PERIOD)
				select {
				case <-timeout:
				case clientVerificationPayload = <-tunnel.newClientVerificationPayload:
				case <-signalStopClientVerificationRequests:
					return
				}
			}

			clientVerificationRequestSuccess = sendClientVerification(tunnel, clientVerificationPayload)
		}
	}()

	shutdown := false
	var err error
	for !shutdown && err == nil {
		select {
		case <-noticeBytesTransferredTicker.C:
			sent, received := transferstats.ReportRecentBytesTransferredForServer(
				tunnel.serverEntry.IpAddress)

			if received > 0 {
				lastBytesReceivedTime = monotime.Now()
			}

			totalSent += sent
			totalReceived += received

			if lastTotalBytesTransferedTime.Add(TOTAL_BYTES_TRANSFERRED_NOTICE_PERIOD).Before(monotime.Now()) {
				NoticeTotalBytesTransferred(tunnel.serverEntry.IpAddress, totalSent, totalReceived)
				lastTotalBytesTransferedTime = monotime.Now()
			}

			// Only emit the frequent BytesTransferred notice when tunnel is not idle.
			if tunnel.config.EmitBytesTransferred && (sent > 0 || received > 0) {
				NoticeBytesTransferred(tunnel.serverEntry.IpAddress, sent, received)
			}

		case <-statsTimer.C:
			select {
			case signalStatusRequest <- *new(struct{}):
			default:
			}
			statsTimer.Reset(nextStatusRequestPeriod())

		case <-sshKeepAliveTimer.C:
			if lastBytesReceivedTime.Add(TUNNEL_SSH_KEEP_ALIVE_PERIODIC_INACTIVE_PERIOD).Before(monotime.Now()) {
				select {
				case signalSshKeepAlive <- time.Duration(*tunnel.config.TunnelSshKeepAlivePeriodicTimeoutSeconds) * time.Second:
				default:
				}
			}
			sshKeepAliveTimer.Reset(nextSshKeepAlivePeriod())

		case <-tunnel.signalPortForwardFailure:
			// Note: no mutex on portForwardFailureTotal; only referenced here
			tunnel.totalPortForwardFailures++
			NoticeInfo("port forward failures for %s: %d",
				tunnel.serverEntry.IpAddress, tunnel.totalPortForwardFailures)

			if lastBytesReceivedTime.Add(TUNNEL_SSH_KEEP_ALIVE_PROBE_INACTIVE_PERIOD).Before(monotime.Now()) {
				select {
				case signalSshKeepAlive <- time.Duration(*tunnel.config.TunnelSshKeepAliveProbeTimeoutSeconds) * time.Second:
				default:
				}
			}
			if !tunnel.config.DisablePeriodicSshKeepAlive {
				sshKeepAliveTimer.Reset(nextSshKeepAlivePeriod())
			}

		case err = <-sshKeepAliveError:

		case serverRequest := <-tunnel.sshServerRequests:
			if serverRequest != nil {
				err := HandleServerRequest(tunnelOwner, tunnel, serverRequest.Type, serverRequest.Payload)
				if err == nil {
					serverRequest.Reply(true, nil)
				} else {
					NoticeAlert("HandleServerRequest for %s failed: %s", serverRequest.Type, err)
					serverRequest.Reply(false, nil)

				}
			}

		case <-tunnel.shutdownOperateBroadcast:
			shutdown = true
		}
	}

	close(signalSshKeepAlive)
	close(signalStatusRequest)
	close(signalStopClientVerificationRequests)
	requestsWaitGroup.Wait()

	// Capture bytes transferred since the last noticeBytesTransferredTicker tick
	sent, received := transferstats.ReportRecentBytesTransferredForServer(tunnel.serverEntry.IpAddress)
	totalSent += sent
	totalReceived += received

	// Always emit a final NoticeTotalBytesTransferred
	NoticeTotalBytesTransferred(tunnel.serverEntry.IpAddress, totalSent, totalReceived)

	// Tunnel does not have a serverContext when DisableApi is set.
	if tunnel.serverContext != nil && !tunnel.IsDiscarded() {

		// The stats for this tunnel will be reported via the next successful
		// status request.

		// Since client clocks are unreliable, we report the server's timestamp from
		// the handshake response as the absolute tunnel start time. This time
		// will be slightly earlier than the actual tunnel activation time, as the
		// client has to receive and parse the response and activate the tunnel.

		tunnelStartTime := tunnel.serverContext.serverHandshakeTimestamp

		// For the tunnel duration calculation, we use the local clock. The start time
		// is tunnel.establishedTime as recorded when the tunnel was established. For the
		// end time, we do not use the current time as we may now be long past the
		// actual termination time of the tunnel. For example, the host or device may
		// have resumed after a long sleep (it's not clear that the monotonic clock service
		// used to measure elapsed time will or will not stop during device sleep). Instead,
		// we use the last data received time as the estimated tunnel end time.
		//
		// One potential issue with using the last received time is receiving data
		// after an extended sleep because the device sleep occured with data still in
		// the OS socket read buffer. This is not expected to happen on Android, as the
		// OS will wake a process when it has TCP data available to read. (For this reason,
		// the actual long sleep issue is only with an idle tunnel; in this case the client
		// is responsible for sending SSH keep alives but a device sleep will delay the
		// golang SSH keep alive timer.)
		//
		// Idle tunnels will only read data when a SSH keep alive is sent. As a result,
		// the last-received-time scheme can undercount tunnel durations by up to
		// TUNNEL_SSH_KEEP_ALIVE_PERIOD_MAX for idle tunnels.

		tunnelDuration := tunnel.conn.GetLastActivityMonotime().Sub(tunnel.establishedTime)

		err := RecordTunnelStat(
			tunnel.serverContext.sessionId,
			tunnel.serverContext.tunnelNumber,
			tunnel.serverEntry.IpAddress,
			fmt.Sprintf("%d", tunnel.establishDuration),
			tunnelStartTime,
			fmt.Sprintf("%d", tunnelDuration),
			totalSent,
			totalReceived)
		if err != nil {
			NoticeAlert("RecordTunnelStats failed: %s", common.ContextError(err))
		}
	}

	// Final status request notes:
	//
	// It's highly desirable to send a final status request in order to report
	// domain bytes transferred stats as well as to report tunnel stats as
	// soon as possible. For this reason, we attempt untunneled requests when
	// the tunneled request isn't possible or has failed.
	//
	// In an orderly shutdown (err == nil), the Controller is stopping and
	// everything must be wrapped up quickly. Also, we still have a working
	// tunnel. So we first attempt a tunneled status request (with a short
	// timeout) and then attempt, synchronously -- otherwise the Contoller's
	// runWaitGroup.Wait() will return while a request is still in progress
	// -- untunneled requests (also with short timeouts). Note that in this
	// case the untunneled request will opt out of untunneledPendingConns so
	// that it's not inadvertently canceled by the Controller shutdown
	// sequence (see doUntunneledStatusRequest).
	//
	// If the tunnel has failed, the Controller may continue working. We want
	// to re-establish as soon as possible (so don't want to block on status
	// requests, even for a second). We may have a long time to attempt
	// untunneled requests in the background. And there is no tunnel through
	// which to attempt tunneled requests. So we spawn a goroutine to run the
	// untunneled requests, which are allowed a longer timeout. These requests
	// will be interrupted by the Controller's untunneledPendingConns.CloseAll()
	// in the case of a shutdown.

	if err == nil {
		NoticeInfo("shutdown operate tunnel")
		if !sendStats(tunnel) {
			sendUntunneledStats(tunnel, true)
		}
	} else {
		NoticeAlert("operate tunnel error for %s: %s", tunnel.serverEntry.IpAddress, err)
		go sendUntunneledStats(tunnel, false)
		tunnelOwner.SignalTunnelFailure(tunnel)
	}
}
コード例 #11
0
// EstablishTunnel first makes a network transport connection to the
// Psiphon server and then establishes an SSH client session on top of
// that transport. The SSH server is authenticated using the public
// key in the server entry.
// Depending on the server's capabilities, the connection may use
// plain SSH over TCP, obfuscated SSH over TCP, or obfuscated SSH over
// HTTP (meek protocol).
// When requiredProtocol is not blank, that protocol is used. Otherwise,
// the a random supported protocol is used.
// untunneledDialConfig is used for untunneled final status requests.
func EstablishTunnel(
	config *Config,
	untunneledDialConfig *DialConfig,
	sessionId string,
	pendingConns *common.Conns,
	serverEntry *protocol.ServerEntry,
	adjustedEstablishStartTime monotime.Time,
	tunnelOwner TunnelOwner) (tunnel *Tunnel, err error) {

	selectedProtocol, err := selectProtocol(config, serverEntry)
	if err != nil {
		return nil, common.ContextError(err)
	}

	// Build transport layers and establish SSH connection. Note that
	// dialConn and monitoredConn are the same network connection.
	dialResult, err := dialSsh(
		config, pendingConns, serverEntry, selectedProtocol, sessionId)
	if err != nil {
		return nil, common.ContextError(err)
	}

	// Cleanup on error
	defer func() {
		if err != nil {
			dialResult.sshClient.Close()
			dialResult.monitoredConn.Close()
			pendingConns.Remove(dialResult.dialConn)
		}
	}()

	// The tunnel is now connected
	tunnel = &Tunnel{
		mutex:                    new(sync.Mutex),
		config:                   config,
		untunneledDialConfig:     untunneledDialConfig,
		isClosed:                 false,
		serverEntry:              serverEntry,
		protocol:                 selectedProtocol,
		conn:                     dialResult.monitoredConn,
		sshClient:                dialResult.sshClient,
		sshServerRequests:        dialResult.sshRequests,
		operateWaitGroup:         new(sync.WaitGroup),
		shutdownOperateBroadcast: make(chan struct{}),
		// A buffer allows at least one signal to be sent even when the receiver is
		// not listening. Senders should not block.
		signalPortForwardFailure: make(chan struct{}, 1),
		dialStats:                dialResult.dialStats,
		// Buffer allows SetClientVerificationPayload to submit one new payload
		// without blocking or dropping it.
		newClientVerificationPayload: make(chan string, 1),
	}

	// Create a new Psiphon API server context for this tunnel. This includes
	// performing a handshake request. If the handshake fails, this establishment
	// fails.
	if !config.DisableApi {
		NoticeInfo("starting server context for %s", tunnel.serverEntry.IpAddress)
		tunnel.serverContext, err = NewServerContext(tunnel, sessionId)
		if err != nil {
			return nil, common.ContextError(
				fmt.Errorf("error starting server context for %s: %s",
					tunnel.serverEntry.IpAddress, err))
		}
	}

	// establishDuration is the elapsed time between the controller starting tunnel
	// establishment and this tunnel being established. The reported value represents
	// how long the user waited between starting the client and having a usable tunnel;
	// or how long between the client detecting an unexpected tunnel disconnect and
	// completing automatic reestablishment.
	//
	// This time period may include time spent unsuccessfully connecting to other
	// servers. Time spent waiting for network connectivity is excluded.
	tunnel.establishDuration = monotime.Since(adjustedEstablishStartTime)

	tunnel.establishedTime = monotime.Now()

	// Now that network operations are complete, cancel interruptibility
	pendingConns.Remove(dialResult.dialConn)

	// Spawn the operateTunnel goroutine, which monitors the tunnel and handles periodic stats updates.
	tunnel.operateWaitGroup.Add(1)
	go tunnel.operateTunnel(tunnelOwner)

	return tunnel, nil
}
コード例 #12
0
func (session *meekSession) touch() {
	atomic.StoreInt64(&session.lastActivity, int64(monotime.Now()))
}
コード例 #13
0
// remoteServerListFetcher fetches an out-of-band list of server entries
// for more tunnel candidates. It fetches when signalled, with retries
// on failure.
func (controller *Controller) remoteServerListFetcher(
	name string,
	fetcher RemoteServerListFetcher,
	signal <-chan struct{},
	retryPeriod, stalePeriod time.Duration) {

	defer controller.runWaitGroup.Done()

	var lastFetchTime monotime.Time

fetcherLoop:
	for {
		// Wait for a signal before fetching
		select {
		case <-signal:
		case <-controller.shutdownBroadcast:
			break fetcherLoop
		}

		// Skip fetch entirely (i.e., send no request at all, even when ETag would save
		// on response size) when a recent fetch was successful
		if lastFetchTime != 0 &&
			lastFetchTime.Add(stalePeriod).After(monotime.Now()) {
			continue
		}

	retryLoop:
		for {
			// Don't attempt to fetch while there is no network connectivity,
			// to avoid alert notice noise.
			if !WaitForNetworkConnectivity(
				controller.config.NetworkConnectivityChecker,
				controller.shutdownBroadcast) {
				break fetcherLoop
			}

			// Pick any active tunnel and make the next fetch attempt. If there's
			// no active tunnel, the untunneledDialConfig will be used.
			tunnel := controller.getNextActiveTunnel()

			err := fetcher(
				controller.config,
				tunnel,
				controller.untunneledDialConfig)

			if err == nil {
				lastFetchTime = monotime.Now()
				break retryLoop
			}

			NoticeAlert("failed to fetch %s remote server list: %s", name, err)

			timeout := time.After(retryPeriod)
			select {
			case <-timeout:
			case <-controller.shutdownBroadcast:
				break fetcherLoop
			}
		}
	}

	NoticeInfo("exiting %s remote server list fetcher", name)
}
コード例 #14
0
// establishCandidateGenerator populates the candidate queue with server entries
// from the data store. Server entries are iterated in rank order, so that promoted
// servers with higher rank are priority candidates.
func (controller *Controller) establishCandidateGenerator(impairedProtocols []string) {
	defer controller.establishWaitGroup.Done()
	defer close(controller.candidateServerEntries)

	// establishStartTime is used to calculate and report the
	// client's tunnel establishment duration.
	//
	// networkWaitDuration is the elapsed time spent waiting
	// for network connectivity. This duration will be excluded
	// from reported tunnel establishment duration.
	establishStartTime := monotime.Now()
	var networkWaitDuration time.Duration

	iterator, err := NewServerEntryIterator(controller.config)
	if err != nil {
		NoticeAlert("failed to iterate over candidates: %s", err)
		controller.SignalComponentFailure()
		return
	}
	defer iterator.Close()

	isServerAffinityCandidate := true

	// TODO: reconcile server affinity scheme with multi-tunnel mode
	if controller.config.TunnelPoolSize > 1 {
		isServerAffinityCandidate = false
		close(controller.serverAffinityDoneBroadcast)
	}

loop:
	// Repeat until stopped
	for i := 0; ; i++ {

		networkWaitStartTime := monotime.Now()

		if !WaitForNetworkConnectivity(
			controller.config.NetworkConnectivityChecker,
			controller.stopEstablishingBroadcast,
			controller.shutdownBroadcast) {
			break loop
		}

		networkWaitDuration += monotime.Since(networkWaitStartTime)

		// Send each iterator server entry to the establish workers
		startTime := monotime.Now()
		for {
			serverEntry, err := iterator.Next()
			if err != nil {
				NoticeAlert("failed to get next candidate: %s", err)
				controller.SignalComponentFailure()
				break loop
			}
			if serverEntry == nil {
				// Completed this iteration
				break
			}

			if controller.config.TargetApiProtocol == common.PSIPHON_SSH_API_PROTOCOL &&
				!serverEntry.SupportsSSHAPIRequests() {
				continue
			}

			// Disable impaired protocols. This is only done for the
			// first iteration of the ESTABLISH_TUNNEL_WORK_TIME
			// loop since (a) one iteration should be sufficient to
			// evade the attack; (b) there's a good chance of false
			// positives (such as short tunnel durations due to network
			// hopping on a mobile device).
			// Impaired protocols logic is not applied when
			// config.TunnelProtocol is specified.
			// The edited serverEntry is temporary copy which is not
			// stored or reused.
			if i == 0 && controller.config.TunnelProtocol == "" {
				serverEntry.DisableImpairedProtocols(impairedProtocols)
				if len(serverEntry.GetSupportedProtocols()) == 0 {
					// Skip this server entry, as it has no supported
					// protocols after disabling the impaired ones
					// TODO: modify ServerEntryIterator to skip these?
					continue
				}
			}

			// adjustedEstablishStartTime is establishStartTime shifted
			// to exclude time spent waiting for network connectivity.

			candidate := &candidateServerEntry{
				serverEntry:                serverEntry,
				isServerAffinityCandidate:  isServerAffinityCandidate,
				adjustedEstablishStartTime: establishStartTime.Add(networkWaitDuration),
			}

			// Note: there must be only one server affinity candidate, as it
			// closes the serverAffinityDoneBroadcast channel.
			isServerAffinityCandidate = false

			// TODO: here we could generate multiple candidates from the
			// server entry when there are many MeekFrontingAddresses.

			select {
			case controller.candidateServerEntries <- candidate:
			case <-controller.stopEstablishingBroadcast:
				break loop
			case <-controller.shutdownBroadcast:
				break loop
			}

			if startTime.Add(ESTABLISH_TUNNEL_WORK_TIME).Before(monotime.Now()) {
				// Start over, after a brief pause, with a new shuffle of the server
				// entries, and potentially some newly fetched server entries.
				break
			}
		}
		// Free up resources now, but don't reset until after the pause.
		iterator.Close()

		// Trigger a fetch remote server list, since we may have failed to
		// connect with all known servers. Don't block sending signal, since
		// this signal may have already been sent.
		// Don't wait for fetch remote to succeed, since it may fail and
		// enter a retry loop and we're better off trying more known servers.
		// TODO: synchronize the fetch response, so it can be incorporated
		// into the server entry iterator as soon as available.
		select {
		case controller.signalFetchRemoteServerList <- *new(struct{}):
		default:
		}

		// Trigger an out-of-band upgrade availability check and download.
		// Since we may have failed to connect, we may benefit from upgrading
		// to a new client version with new circumvention capabilities.
		select {
		case controller.signalDownloadUpgrade <- "":
		default:
		}

		// After a complete iteration of candidate servers, pause before iterating again.
		// This helps avoid some busy wait loop conditions, and also allows some time for
		// network conditions to change. Also allows for fetch remote to complete,
		// in typical conditions (it isn't strictly necessary to wait for this, there will
		// be more rounds if required).
		timeout := time.After(
			time.Duration(*controller.config.EstablishTunnelPausePeriodSeconds) * time.Second)
		select {
		case <-timeout:
			// Retry iterating
		case <-controller.stopEstablishingBroadcast:
			break loop
		case <-controller.shutdownBroadcast:
			break loop
		}

		iterator.Reset()
	}

	NoticeInfo("stopped candidate generator")
}
コード例 #15
0
func run(t *testing.T, rateLimits RateLimits) {

	// Run a local HTTP server which serves large chunks of data

	go func() {

		handler := func(w http.ResponseWriter, r *http.Request) {
			_, _ = ioutil.ReadAll(r.Body)
			testData, _ := MakeSecureRandomBytes(testDataSize)
			w.Write(testData)
		}

		server := &http.Server{
			Addr:    serverAddress,
			Handler: http.HandlerFunc(handler),
		}

		server.ListenAndServe()
	}()

	// TODO: properly synchronize with server startup
	time.Sleep(1 * time.Second)

	// Set up a HTTP client with a throttled connection

	throttledDial := func(network, addr string) (net.Conn, error) {
		conn, err := net.Dial(network, addr)
		if err != nil {
			return conn, err
		}
		return NewThrottledConn(conn, rateLimits), nil
	}

	client := &http.Client{
		Transport: &http.Transport{
			Dial: throttledDial,
		},
	}

	// Upload and download a large chunk of data, and time it

	testData, _ := MakeSecureRandomBytes(testDataSize)
	requestBody := bytes.NewReader(testData)

	startTime := monotime.Now()

	response, err := client.Post("http://"+serverAddress, "application/octet-stream", requestBody)
	if err == nil && response.StatusCode != http.StatusOK {
		response.Body.Close()
		err = fmt.Errorf("unexpected response code: %d", response.StatusCode)
	}
	if err != nil {
		t.Fatalf("request failed: %s", err)
	}
	defer response.Body.Close()

	// Test: elapsed upload time must reflect rate limit

	checkElapsedTime(t, testDataSize, rateLimits.WriteBytesPerSecond, monotime.Since(startTime))

	startTime = monotime.Now()

	body, err := ioutil.ReadAll(response.Body)
	if err != nil {
		t.Fatalf("read response failed: %s", err)
	}
	if len(body) != testDataSize {
		t.Fatalf("unexpected response size: %d", len(body))
	}

	// Test: elapsed download time must reflect rate limit

	checkElapsedTime(t, testDataSize, rateLimits.ReadBytesPerSecond, monotime.Since(startTime))
}
コード例 #16
0
func controllerRun(t *testing.T, runConfig *controllerRunConfig) {

	configFileContents, err := ioutil.ReadFile("controller_test.config")
	if err != nil {
		// Skip, don't fail, if config file is not present
		t.Skipf("error loading configuration file: %s", err)
	}
	config, err := LoadConfig(configFileContents)
	if err != nil {
		t.Fatalf("error processing configuration file: %s", err)
	}

	if runConfig.clientIsLatestVersion {
		config.ClientVersion = "999999999"
	}

	if runConfig.disableEstablishing {
		// Clear remote server list so tunnel cannot be established.
		// TODO: also delete all server entries in the datastore.
		config.RemoteServerListUrl = ""
	}

	if runConfig.disableApi {
		config.DisableApi = true
	}

	config.TunnelPoolSize = runConfig.tunnelPoolSize

	if runConfig.disableUntunneledUpgrade {
		// Disable untunneled upgrade downloader to ensure tunneled case is tested
		config.UpgradeDownloadClientVersionHeader = ""
	}

	if runConfig.useUpstreamProxy && runConfig.disruptNetwork {
		t.Fatalf("cannot use multiple upstream proxies")
	}
	if runConfig.disruptNetwork {
		config.UpstreamProxyUrl = disruptorProxyURL
	} else if runConfig.useUpstreamProxy {
		config.UpstreamProxyUrl = upstreamProxyURL
		config.UpstreamProxyCustomHeaders = upstreamProxyCustomHeaders
	}

	if runConfig.useHostNameTransformer {
		config.HostNameTransformer = &TestHostNameTransformer{}
	}

	// Override client retry throttle values to speed up automated
	// tests and ensure tests complete within fixed deadlines.
	fetchRemoteServerListRetryPeriodSeconds := 0
	config.FetchRemoteServerListRetryPeriodSeconds = &fetchRemoteServerListRetryPeriodSeconds
	downloadUpgradeRetryPeriodSeconds := 0
	config.DownloadUpgradeRetryPeriodSeconds = &downloadUpgradeRetryPeriodSeconds
	establishTunnelPausePeriodSeconds := 1
	config.EstablishTunnelPausePeriodSeconds = &establishTunnelPausePeriodSeconds

	os.Remove(config.UpgradeDownloadFilename)

	config.TunnelProtocol = runConfig.protocol

	err = InitDataStore(config)
	if err != nil {
		t.Fatalf("error initializing datastore: %s", err)
	}

	serverEntryCount := CountServerEntries("", "")

	if runConfig.expectNoServerEntries && serverEntryCount > 0 {
		// TODO: replace expectNoServerEntries with resetServerEntries
		// so tests can run in arbitrary order
		t.Fatalf("unexpected server entries")
	}

	controller, err := NewController(config)
	if err != nil {
		t.Fatalf("error creating controller: %s", err)
	}

	// Monitor notices for "Tunnels" with count > 1, the
	// indication of tunnel establishment success.
	// Also record the selected HTTP proxy port to use
	// when fetching websites through the tunnel.

	httpProxyPort := 0

	tunnelEstablished := make(chan struct{}, 1)
	upgradeDownloaded := make(chan struct{}, 1)
	remoteServerListDownloaded := make(chan struct{}, 1)
	confirmedLatestVersion := make(chan struct{}, 1)

	var clientUpgradeDownloadedBytesCount int32
	var remoteServerListDownloadedBytesCount int32
	var impairedProtocolCount int32
	var impairedProtocolClassification = struct {
		sync.RWMutex
		classification map[string]int
	}{classification: make(map[string]int)}

	SetNoticeOutput(NewNoticeReceiver(
		func(notice []byte) {
			// TODO: log notices without logging server IPs:
			// fmt.Fprintf(os.Stderr, "%s\n", string(notice))
			noticeType, payload, err := GetNotice(notice)
			if err != nil {
				return
			}
			switch noticeType {

			case "ListeningHttpProxyPort":

				httpProxyPort = int(payload["port"].(float64))

			case "ConnectingServer":

				serverProtocol := payload["protocol"].(string)
				if runConfig.protocol != "" && serverProtocol != runConfig.protocol {
					// TODO: wrong goroutine for t.FatalNow()
					t.Fatalf("wrong protocol selected: %s", serverProtocol)
				}

			case "Tunnels":

				count := int(payload["count"].(float64))
				if count > 0 {
					if runConfig.disableEstablishing {
						// TODO: wrong goroutine for t.FatalNow()
						t.Fatalf("tunnel established unexpectedly")
					} else {
						select {
						case tunnelEstablished <- *new(struct{}):
						default:
						}
					}
				}

			case "ClientUpgradeDownloadedBytes":

				atomic.AddInt32(&clientUpgradeDownloadedBytesCount, 1)
				t.Logf("ClientUpgradeDownloadedBytes: %d", int(payload["bytes"].(float64)))

			case "ClientUpgradeDownloaded":

				select {
				case upgradeDownloaded <- *new(struct{}):
				default:
				}

			case "ClientIsLatestVersion":

				select {
				case confirmedLatestVersion <- *new(struct{}):
				default:
				}

			case "RemoteServerListDownloadedBytes":

				atomic.AddInt32(&remoteServerListDownloadedBytesCount, 1)
				t.Logf("RemoteServerListDownloadedBytes: %d", int(payload["bytes"].(float64)))

			case "RemoteServerListDownloaded":

				select {
				case remoteServerListDownloaded <- *new(struct{}):
				default:
				}

			case "ImpairedProtocolClassification":

				classification := payload["classification"].(map[string]interface{})

				impairedProtocolClassification.Lock()
				impairedProtocolClassification.classification = make(map[string]int)
				for k, v := range classification {
					count := int(v.(float64))
					if count >= IMPAIRED_PROTOCOL_CLASSIFICATION_THRESHOLD {
						atomic.AddInt32(&impairedProtocolCount, 1)
					}
					impairedProtocolClassification.classification[k] = count
				}
				impairedProtocolClassification.Unlock()

			case "ActiveTunnel":

				serverProtocol := payload["protocol"].(string)

				classification := make(map[string]int)
				impairedProtocolClassification.RLock()
				for k, v := range impairedProtocolClassification.classification {
					classification[k] = v
				}
				impairedProtocolClassification.RUnlock()

				count, ok := classification[serverProtocol]
				if ok && count >= IMPAIRED_PROTOCOL_CLASSIFICATION_THRESHOLD {
					// TODO: wrong goroutine for t.FatalNow()
					t.Fatalf("unexpected tunnel using impaired protocol: %s, %+v",
						serverProtocol, classification)
				}

			}
		}))

	// Run controller, which establishes tunnels

	shutdownBroadcast := make(chan struct{})
	controllerWaitGroup := new(sync.WaitGroup)
	controllerWaitGroup.Add(1)
	go func() {
		defer controllerWaitGroup.Done()
		controller.Run(shutdownBroadcast)
	}()

	defer func() {
		// Test: shutdown must complete within 20 seconds

		close(shutdownBroadcast)

		shutdownTimeout := time.NewTimer(20 * time.Second)

		shutdownOk := make(chan struct{}, 1)
		go func() {
			controllerWaitGroup.Wait()
			shutdownOk <- *new(struct{})
		}()

		select {
		case <-shutdownOk:
		case <-shutdownTimeout.C:
			t.Fatalf("controller shutdown timeout exceeded")
		}
	}()

	if !runConfig.disableEstablishing {

		// Test: tunnel must be established within 120 seconds

		establishTimeout := time.NewTimer(120 * time.Second)

		select {
		case <-tunnelEstablished:

		case <-establishTimeout.C:
			t.Fatalf("tunnel establish timeout exceeded")
		}

		// Test: if starting with no server entries, a fetch remote
		// server list must have succeeded. With disruptNetwork, the
		// fetch must have been resumed at least once.

		if serverEntryCount == 0 {
			select {
			case <-remoteServerListDownloaded:
			default:
				t.Fatalf("expected remote server list downloaded")
			}

			if runConfig.disruptNetwork {
				count := atomic.LoadInt32(&remoteServerListDownloadedBytesCount)
				if count <= 1 {
					t.Fatalf("unexpected remote server list download progress: %d", count)
				}
			}
		}

		// Test: fetch website through tunnel

		// Allow for known race condition described in NewHttpProxy():
		time.Sleep(1 * time.Second)

		fetchAndVerifyWebsite(t, httpProxyPort)

		// Test: run for duration, periodically using the tunnel to
		// ensure failed tunnel detection, and ultimately hitting
		// impaired protocol checks.

		startTime := monotime.Now()

		for {

			time.Sleep(1 * time.Second)
			useTunnel(t, httpProxyPort)

			if startTime.Add(runConfig.runDuration).Before(monotime.Now()) {
				break
			}
		}

		// Test: with disruptNetwork, impaired protocols should be exercised

		if runConfig.runDuration > 0 && runConfig.disruptNetwork {
			count := atomic.LoadInt32(&impairedProtocolCount)
			if count <= 0 {
				t.Fatalf("unexpected impaired protocol count: %d", count)
			} else {
				impairedProtocolClassification.RLock()
				t.Logf("impaired protocol classification: %+v",
					impairedProtocolClassification.classification)
				impairedProtocolClassification.RUnlock()
			}
		}
	}

	// Test: upgrade check/download must be downloaded within 180 seconds

	expectUpgrade := !runConfig.disableApi && !runConfig.disableUntunneledUpgrade

	if expectUpgrade {
		upgradeTimeout := time.NewTimer(180 * time.Second)

		select {
		case <-upgradeDownloaded:
			// TODO: verify downloaded file
			if runConfig.clientIsLatestVersion {
				t.Fatalf("upgrade downloaded unexpectedly")
			}

			// Test: with disruptNetwork, must be multiple download progress notices

			if runConfig.disruptNetwork {
				count := atomic.LoadInt32(&clientUpgradeDownloadedBytesCount)
				if count <= 1 {
					t.Fatalf("unexpected upgrade download progress: %d", count)
				}
			}

		case <-confirmedLatestVersion:
			if !runConfig.clientIsLatestVersion {
				t.Fatalf("confirmed latest version unexpectedly")
			}

		case <-upgradeTimeout.C:
			t.Fatalf("upgrade download timeout exceeded")
		}
	}
}
コード例 #17
0
func TestActivityMonitoredConn(t *testing.T) {
	buffer := make([]byte, 1024)

	conn, err := NewActivityMonitoredConn(
		&dummyConn{},
		200*time.Millisecond,
		true,
		nil)
	if err != nil {
		t.Fatalf("NewActivityMonitoredConn failed")
	}

	realStartTime := time.Now().UTC()

	monotonicStartTime := monotime.Now()

	time.Sleep(100 * time.Millisecond)

	_, err = conn.Read(buffer)
	if err != nil {
		t.Fatalf("read before initial timeout failed")
	}

	time.Sleep(100 * time.Millisecond)

	_, err = conn.Read(buffer)
	if err != nil {
		t.Fatalf("previous read failed to extend timeout")
	}

	time.Sleep(100 * time.Millisecond)

	_, err = conn.Write(buffer)
	if err != nil {
		t.Fatalf("previous read failed to extend timeout")
	}

	time.Sleep(100 * time.Millisecond)

	_, err = conn.Read(buffer)
	if err != nil {
		t.Fatalf("previous write failed to extend timeout")
	}

	lastSuccessfulReadTime := monotime.Now()

	time.Sleep(100 * time.Millisecond)

	_, err = conn.Write(buffer)
	if err != nil {
		t.Fatalf("previous read failed to extend timeout")
	}

	time.Sleep(300 * time.Millisecond)

	_, err = conn.Read(buffer)
	if err != iotest.ErrTimeout {
		t.Fatalf("failed to timeout")
	}

	if realStartTime.Round(time.Millisecond) != conn.GetStartTime().Round(time.Millisecond) {
		t.Fatalf("unexpected GetStartTime")
	}

	if int64(lastSuccessfulReadTime)/int64(time.Millisecond) !=
		int64(conn.GetLastActivityMonotime())/int64(time.Millisecond) {
		t.Fatalf("unexpected GetLastActivityTime")
	}

	diff := lastSuccessfulReadTime.Sub(monotonicStartTime).Nanoseconds() - conn.GetActiveDuration().Nanoseconds()
	if diff < 0 {
		diff = -diff
	}
	if diff > (1 * time.Millisecond).Nanoseconds() {
		t.Fatalf("unexpected GetActiveDuration")
	}
}
コード例 #18
0
// roundTrip configures and makes the actual HTTP POST request
func (meek *MeekConn) roundTrip(sendPayload []byte) (io.ReadCloser, error) {

	// The retry mitigates intermittent failures between the client and front/server.
	//
	// Note: Retry will only be effective if entire request failed (underlying transport protocol
	// such as SSH will fail if extra bytes are replayed in either direction due to partial relay
	// success followed by retry).
	// At least one retry is always attempted. We retry when still within a brief deadline and wait
	// for a short time before re-dialing.
	//
	// TODO: in principle, we could retry for min(TUNNEL_WRITE_TIMEOUT, meek-server.MAX_SESSION_STALENESS),
	// i.e., as long as the underlying tunnel has not timed out and as long as the server has not
	// expired the current meek session. Presently not doing this to avoid excessive connection attempts
	// through the first hop. In addition, this will require additional support for timely shutdown.
	retries := uint(0)
	retryDeadline := monotime.Now().Add(MEEK_ROUND_TRIP_RETRY_DEADLINE)

	var err error
	var response *http.Response
	for {

		var request *http.Request
		request, err = http.NewRequest("POST", meek.url.String(), bytes.NewReader(sendPayload))
		if err != nil {
			// Don't retry when can't initialize a Request
			break
		}

		// Don't use the default user agent ("Go 1.1 package http").
		// For now, just omit the header (net/http/request.go: "may be blank to not send the header").
		request.Header.Set("User-Agent", "")

		request.Header.Set("Content-Type", "application/octet-stream")

		// Set additional headers to the HTTP request using the same method we use for adding
		// custom headers to HTTP proxy requests
		for name, value := range meek.additionalHeaders {
			// hack around special case of "Host" header
			// https://golang.org/src/net/http/request.go#L474
			// using URL.Opaque, see URL.RequestURI() https://golang.org/src/net/url/url.go#L915
			if name == "Host" {
				if len(value) > 0 {
					if request.URL.Opaque == "" {
						request.URL.Opaque = request.URL.Scheme + "://" + request.Host + request.URL.RequestURI()
					}
					request.Host = value[0]
				}
			} else {
				request.Header[name] = value
			}
		}

		request.AddCookie(meek.cookie)

		// The http.Transport.RoundTrip is run in a goroutine to enable cancelling a request in-flight.
		type roundTripResponse struct {
			response *http.Response
			err      error
		}
		roundTripResponseChannel := make(chan *roundTripResponse, 1)
		roundTripWaitGroup := new(sync.WaitGroup)
		roundTripWaitGroup.Add(1)
		go func() {
			defer roundTripWaitGroup.Done()
			r, err := meek.transport.RoundTrip(request)
			roundTripResponseChannel <- &roundTripResponse{r, err}
		}()
		select {
		case roundTripResponse := <-roundTripResponseChannel:
			response = roundTripResponse.response
			err = roundTripResponse.err
		case <-meek.broadcastClosed:
			meek.transport.CancelRequest(request)
			return nil, nil
		}
		roundTripWaitGroup.Wait()

		if err == nil {
			break
		}

		if retries >= 1 && monotime.Now().After(retryDeadline) {
			break
		}
		retries += 1

		time.Sleep(MEEK_ROUND_TRIP_RETRY_DELAY)
	}
	if err != nil {
		return nil, common.ContextError(err)
	}
	if response.StatusCode != http.StatusOK {
		return nil, common.ContextError(fmt.Errorf("http request failed %d", response.StatusCode))
	}
	// observe response cookies for meek session key token.
	// Once found it must be used for all consecutive requests made to the server
	for _, c := range response.Cookies() {
		if meek.cookie.Name == c.Name {
			meek.cookie.Value = c.Value
			break
		}
	}
	return response.Body, nil
}
コード例 #19
0
func (sshClient *sshClient) handleTCPChannel(
	hostToConnect string,
	portToConnect int,
	newChannel ssh.NewChannel) {

	isWebServerPortForward := false
	config := sshClient.sshServer.support.Config
	if config.WebServerPortForwardAddress != "" {
		destination := net.JoinHostPort(hostToConnect, strconv.Itoa(portToConnect))
		if destination == config.WebServerPortForwardAddress {
			isWebServerPortForward = true
			if config.WebServerPortForwardRedirectAddress != "" {
				// Note: redirect format is validated when config is loaded
				host, portStr, _ := net.SplitHostPort(config.WebServerPortForwardRedirectAddress)
				port, _ := strconv.Atoi(portStr)
				hostToConnect = host
				portToConnect = port
			}
		}
	}

	if !isWebServerPortForward && !sshClient.isPortForwardPermitted(
		portForwardTypeTCP, hostToConnect, portToConnect) {

		sshClient.rejectNewChannel(
			newChannel, ssh.Prohibited, "port forward not permitted")
		return
	}

	var bytesUp, bytesDown int64
	sshClient.openedPortForward(portForwardTypeTCP)
	defer func() {
		sshClient.closedPortForward(
			portForwardTypeTCP, atomic.LoadInt64(&bytesUp), atomic.LoadInt64(&bytesDown))
	}()

	// TOCTOU note: important to increment the port forward count (via
	// openPortForward) _before_ checking isPortForwardLimitExceeded
	// otherwise, the client could potentially consume excess resources
	// by initiating many port forwards concurrently.
	// TODO: close LRU connection (after successful Dial) instead of
	// rejecting new connection?
	if maxCount, exceeded := sshClient.isPortForwardLimitExceeded(portForwardTypeTCP); exceeded {

		// Close the oldest TCP port forward. CloseOldest() closes
		// the conn and the port forward's goroutine will complete
		// the cleanup asynchronously.
		//
		// Some known limitations:
		//
		// - Since CloseOldest() closes the upstream socket but does not
		//   clean up all resources associated with the port forward. These
		//   include the goroutine(s) relaying traffic as well as the SSH
		//   channel. Closing the socket will interrupt the goroutines which
		//   will then complete the cleanup. But, since the full cleanup is
		//   asynchronous, there exists a possibility that a client can consume
		//   more than max port forward resources -- just not upstream sockets.
		//
		// - An LRU list entry for this port forward is not added until
		//   after the dial completes, but the port forward is counted
		//   towards max limits. This means many dials in progress will
		//   put established connections in jeopardy.
		//
		// - We're closing the oldest open connection _before_ successfully
		//   dialing the new port forward. This means we are potentially
		//   discarding a good connection to make way for a failed connection.
		//   We cannot simply dial first and still maintain a limit on
		//   resources used, so to address this we'd need to add some
		//   accounting for connections still establishing.

		sshClient.tcpPortForwardLRU.CloseOldest()

		log.WithContextFields(
			LogFields{
				"maxCount": maxCount,
			}).Debug("closed LRU TCP port forward")
	}

	// Dial the target remote address. This is done in a goroutine to
	// ensure the shutdown signal is handled immediately.

	remoteAddr := fmt.Sprintf("%s:%d", hostToConnect, portToConnect)

	log.WithContextFields(LogFields{"remoteAddr": remoteAddr}).Debug("dialing")

	type dialTcpResult struct {
		conn net.Conn
		err  error
	}

	resultChannel := make(chan *dialTcpResult, 1)
	dialStartTime := monotime.Now()

	go func() {
		// TODO: on EADDRNOTAVAIL, temporarily suspend new clients
		// TODO: IPv6 support
		conn, err := net.DialTimeout(
			"tcp4", remoteAddr, SSH_TCP_PORT_FORWARD_DIAL_TIMEOUT)
		resultChannel <- &dialTcpResult{conn, err}
	}()

	var result *dialTcpResult
	select {
	case result = <-resultChannel:
	case <-sshClient.stopBroadcast:
		// Note: may leave dial in progress (TODO: use DialContext to cancel)
		return
	}

	sshClient.updateQualityMetrics(
		result.err == nil, monotime.Since(dialStartTime))

	if result.err != nil {
		sshClient.rejectNewChannel(newChannel, ssh.ConnectionFailed, result.err.Error())
		return
	}

	// The upstream TCP port forward connection has been established. Schedule
	// some cleanup and notify the SSH client that the channel is accepted.

	fwdConn := result.conn
	defer fwdConn.Close()

	fwdChannel, requests, err := newChannel.Accept()
	if err != nil {
		log.WithContextFields(LogFields{"error": err}).Warning("accept new channel failed")
		return
	}
	go ssh.DiscardRequests(requests)
	defer fwdChannel.Close()

	// ActivityMonitoredConn monitors the TCP port forward I/O and updates
	// its LRU status. ActivityMonitoredConn also times out I/O on the port
	// forward if both reads and writes have been idle for the specified
	// duration.

	lruEntry := sshClient.tcpPortForwardLRU.Add(fwdConn)
	defer lruEntry.Remove()

	fwdConn, err = common.NewActivityMonitoredConn(
		fwdConn,
		sshClient.idleTCPPortForwardTimeout(),
		true,
		lruEntry)
	if result.err != nil {
		log.WithContextFields(LogFields{"error": err}).Error("NewActivityMonitoredConn failed")
		return
	}

	// Relay channel to forwarded connection.

	log.WithContextFields(LogFields{"remoteAddr": remoteAddr}).Debug("relaying")

	// TODO: relay errors to fwdChannel.Stderr()?
	relayWaitGroup := new(sync.WaitGroup)
	relayWaitGroup.Add(1)
	go func() {
		defer relayWaitGroup.Done()
		// io.Copy allocates a 32K temporary buffer, and each port forward relay uses
		// two of these buffers; using io.CopyBuffer with a smaller buffer reduces the
		// overall memory footprint.
		bytes, err := io.CopyBuffer(
			fwdChannel, fwdConn, make([]byte, SSH_TCP_PORT_FORWARD_COPY_BUFFER_SIZE))
		atomic.AddInt64(&bytesDown, bytes)
		if err != nil && err != io.EOF {
			// Debug since errors such as "connection reset by peer" occur during normal operation
			log.WithContextFields(LogFields{"error": err}).Debug("downstream TCP relay failed")
		}
		// Interrupt upstream io.Copy when downstream is shutting down.
		// TODO: this is done to quickly cleanup the port forward when
		// fwdConn has a read timeout, but is it clean -- upstream may still
		// be flowing?
		fwdChannel.Close()
	}()
	bytes, err := io.CopyBuffer(
		fwdConn, fwdChannel, make([]byte, SSH_TCP_PORT_FORWARD_COPY_BUFFER_SIZE))
	atomic.AddInt64(&bytesUp, bytes)
	if err != nil && err != io.EOF {
		log.WithContextFields(LogFields{"error": err}).Debug("upstream TCP relay failed")
	}
	// Shutdown special case: fwdChannel will be closed and return EOF when
	// the SSH connection is closed, but we need to explicitly close fwdConn
	// to interrupt the downstream io.Copy, which may be blocked on a
	// fwdConn.Read().
	fwdConn.Close()

	relayWaitGroup.Wait()

	log.WithContextFields(
		LogFields{
			"remoteAddr": remoteAddr,
			"bytesUp":    atomic.LoadInt64(&bytesUp),
			"bytesDown":  atomic.LoadInt64(&bytesDown)}).Debug("exiting")
}