Beispiel #1
0
// NewClientServerPipe creates a sandbox communication pipe and attaches a DCCP client and a DCCP
// server to its endpoints. In addition to sending all emits to a standard DCCP log file, it sends a
// copy of all emits to the dup TraceWriter.
func NewClientServerPipe(env *dccp.Env) (clientConn, serverConn *dccp.Conn, clientToServer, serverToClient *headerHalfPipe) {
	llog := dccp.NewAmb("line", env)
	hca, hcb, _ := NewPipe(env, llog, "client", "server")
	ccid := ccid3.CCID3{}

	clog := dccp.NewAmb("client", env)
	clientConn = dccp.NewConnClient(env, clog, hca, ccid.NewSender(env, clog), ccid.NewReceiver(env, clog), 0)

	slog := dccp.NewAmb("server", env)
	serverConn = dccp.NewConnServer(env, slog, hcb, ccid.NewSender(env, slog), ccid.NewReceiver(env, slog))

	return clientConn, serverConn, hca, hcb
}
Beispiel #2
0
// TestRate tests whether a single connection's one-way client-to-server rate converges to
// limit imposed by connection in that the send rate has to:
//	(1) converge and stabilize, and
//	(2) the stable rate should
//		(2.a) either be closely below the connection limit,
//		(2.b) or be closely above the connection limit (and maintain a drop rate below some threshold)
// A two-way test is not necessary as the congestion mechanisms in either direction are completely independent.
//
// NOTE: Pipe currently supports rate simulation in packets per time interval. If we want to test behavior
// under variable packet sizes, we need to implement rate simulation in bytes per interval.
func TestRate(t *testing.T) {

	env, _ := NewEnv("rate")
	clientConn, serverConn, clientToServer, _ := NewClientServerPipe(env)

	// Set rate limit on client-to-server connection
	clientToServer.SetWriteRate(rateInterval, ratePacketsPerInterval)

	cchan := make(chan int, 1)
	mtu := clientConn.GetMTU()
	buf := make([]byte, mtu)
	env.Go(func() {
		t0 := env.Now()
		for env.Now()-t0 < rateDuration {
			err := clientConn.Write(buf)
			if err != nil {
				t.Errorf("error writing (%s)", err)
				break
			}
		}
		// Close is necessary because otherwise, if no read timeout is in place, the
		// server sides hangs forever on Read
		clientConn.Close()
		close(cchan)
	}, "test client")

	schan := make(chan int, 1)
	env.Go(func() {
		for {
			_, err := serverConn.Read()
			if err == dccp.ErrEOF {
				break
			} else if err != nil {
				t.Errorf("error reading (%s)", err)
				break
			}
		}
		serverConn.Close()
		close(schan)
	}, "test server")

	_, _ = <-cchan
	_, _ = <-schan

	clientConn.Abort()
	serverConn.Abort()

	env.NewGoJoin("end-of-test", clientConn.Joiner(), serverConn.Joiner()).Join()
	dccp.NewAmb("line", env).E(dccp.EventMatch, "Server and client done.")
	if err := env.Close(); err != nil {
		t.Errorf("error closing runtime (%s)", err)
	}
}
Beispiel #3
0
// Idle keeps the connection between a client and server idle for a few seconds and makes sure that
// no unusual behavior occurs.
func TestIdle(t *testing.T) {

	env, _ := NewEnv("idle")
	clientConn, serverConn, _, _ := NewClientServerPipe(env)
	payload := []byte{1, 2, 3}

	cchan := make(chan int, 1)
	env.Go(func() {
		if err := clientConn.Write(payload); err != nil {
			t.Errorf("client write (%s)", err)
		}
		env.Sleep(10e9) // Stay idle for 10 sec
		if err := clientConn.Close(); err != nil && err != dccp.ErrEOF {
			t.Errorf("client close (%s)", err)
		}
		cchan <- 1
		close(cchan)
	}, "test client")

	schan := make(chan int, 1)
	env.Go(func() {
		if err := serverConn.Write(payload); err != nil {
			t.Errorf("server write (%s)", err)
		}
		env.Sleep(10e9) // Stay idle for 10 sec
		if err := serverConn.Close(); err != nil && err != dccp.ErrEOF {
			// XXX why not EOF
			t.Logf("server close (%s)", err)
		}
		schan <- 1
		close(schan)
	}, "test server")

	<-cchan
	<-schan
	clientConn.Abort()
	serverConn.Abort()
	env.NewGoJoin("end-of-test", clientConn.Joiner(), serverConn.Joiner()).Join()

	dccp.NewAmb("line", env).E(dccp.EventMatch, "Server and client done.")
	if err := env.Close(); err != nil {
		t.Errorf("Error closing runtime (%s)", err)
	}
}
Beispiel #4
0
// TestOpenClose verifies that connect and close handshakes function correctly
func TestOpenClose(t *testing.T) {
	env, _ := NewEnv("openclose")
	clientConn, serverConn, _, _ := NewClientServerPipe(env)

	cchan := make(chan int, 1)
	env.Go(func() {
		env.Sleep(2e9)
		_, err := clientConn.Read()
		if err != dccp.ErrEOF {
			t.Errorf("client read error (%s), expected EBADF", err)
		}
		cchan <- 1
		close(cchan)
	}, "test client")

	schan := make(chan int, 1)
	env.Go(func() {
		env.Sleep(1e9)
		if err := serverConn.Close(); err != nil {
			t.Errorf("server close error (%s)", err)
		}
		schan <- 1
		close(schan)
	}, "test server")

	<-cchan
	<-schan

	// Abort casuses both connection to wrap up the connection quickly
	clientConn.Abort()
	serverConn.Abort()
	// However, even aborting leaves various connection goroutines lingering for a short while.
	// The next line ensures that we wait until all goroutines are done.
	env.NewGoJoin("end-of-test", clientConn.Joiner(), serverConn.Joiner()).Join()

	dccp.NewAmb("line", env).E(dccp.EventMatch, "Server and client done.")
	if err := env.Close(); err != nil {
		t.Errorf("Error closing runtime (%s)", err)
	}
}
Beispiel #5
0
// TestRoundtripEstimation checks that round-trip times are estimated accurately.
func TestRoundtripEstimation(t *testing.T) {
	dccp.InstallCtrlCPanic()

	env, plex := NewEnv("rtt")
	reducer := NewMeasure(env, t)
	plex.Add(reducer)
	plex.Add(newRoundtripCheckpoint(env, t))
	plex.HighlightSamples(ccid3.RoundtripElapsedSample, ccid3.RoundtripReportSample)

	clientConn, serverConn, clientToServer, _ := NewClientServerPipe(env)

	// Roundtrip estimates might be imprecise during long idle periods,
	// as a product of the CCID3 design, since during such period precise
	// estimates are not necessary. Therefore, to focus on roundtrip time
	// estimation without saturating the link, we generate sufficiently
	// regular transmissions.

	payload := []byte{1, 2, 3}
	buf := make([]byte, len(payload))

	// In order to isolate roundtrip measurement testing from the complexities
	// of the send rate calculation mechanism, we fix the send rate of both
	// endpoints using the debug flag FixRate.
	clientConn.Amb().Flags().SetUint32("FixRate", roundtripRate)
	serverConn.Amb().Flags().SetUint32("FixRate", roundtripRate)

	// Increase the client—>server latency from 0 to latency at half time
	env.Go(func() {
		env.Sleep(roundtripDuration / 2)
		clientToServer.SetWriteLatency(roundtripLatency)
	}, "test controller")

	cchan := make(chan int, 1)
	env.Go(func() {
		t0 := env.Now()
		for env.Now()-t0 < roundtripDuration {
			err := clientConn.Write(buf)
			if err != nil {
				break
			}
		}
		// Close is necessary because otherwise, if no read timeout is in place, the
		// server sides hangs forever on Read
		clientConn.Close()
		close(cchan)
	}, "test client")

	schan := make(chan int, 1)
	env.Go(func() {
		for {
			_, err := serverConn.Read()
			if err != nil {
				break
			}
		}
		close(schan)
	}, "test server")

	_, _ = <-cchan
	_, _ = <-schan

	// Shutdown the connections properly
	clientConn.Abort()
	serverConn.Abort()
	env.NewGoJoin("end-of-test", clientConn.Joiner(), serverConn.Joiner()).Join()
	dccp.NewAmb("line", env).E(dccp.EventMatch, "Server and client done.")
	if err := env.Close(); err != nil {
		t.Errorf("error closing runtime (%s)", err)
	}
}