예제 #1
0
파일: worker_test.go 프로젝트: bac/juju
func (s *statusHistoryPrunerSuite) TestWorkerWontCallPruneBeforeFiringTimer(c *gc.C) {
	fakeTimer := newMockTimer(coretesting.LongWait)

	fakeTimerFunc := func(d time.Duration) worker.PeriodicTimer {
		// construction of timer should be with 0 because we intend it to
		// run once before waiting.
		c.Assert(d, gc.Equals, 0*time.Nanosecond)
		return fakeTimer
	}
	facade := newFakeFacade()
	conf := statushistorypruner.Config{
		Facade:         facade,
		MaxHistoryTime: 1 * time.Second,
		MaxHistoryMB:   3,
		PruneInterval:  coretesting.ShortWait,
		NewTimer:       fakeTimerFunc,
	}

	pruner, err := statushistorypruner.New(conf)
	c.Check(err, jc.ErrorIsNil)
	s.AddCleanup(func(*gc.C) {
		c.Assert(worker.Stop(pruner), jc.ErrorIsNil)
	})

	select {
	case <-facade.passedMaxHistoryMB:
		c.Fatal("called before firing timer.")
	case <-time.After(coretesting.LongWait):
	}
}
예제 #2
0
파일: worker_test.go 프로젝트: bac/juju
func (s *Suite) TestSUCCESSMinionWaitTimeout(c *gc.C) {
	// The SUCCESS phase is special in that even if some minions fail
	// to report the migration should continue. There's no turning
	// back from SUCCESS.
	s.facade.queueStatus(s.makeStatus(coremigration.SUCCESS))

	worker, err := migrationmaster.New(s.config)
	c.Assert(err, jc.ErrorIsNil)
	defer workertest.DirtyKill(c, worker)

	select {
	case <-s.clock.Alarms():
	case <-time.After(coretesting.LongWait):
		c.Fatal("timed out waiting for clock.After call")
	}

	// Move time ahead in order to trigger timeout.
	s.clock.Advance(15 * time.Minute)

	err = workertest.CheckKilled(c, worker)
	c.Assert(err, gc.Equals, migrationmaster.ErrMigrated)

	s.stub.CheckCalls(c, joinCalls(
		watchStatusLockdownCalls,
		[]jujutesting.StubCall{
			{"facade.WatchMinionReports", nil},
			{"facade.SetPhase", []interface{}{coremigration.LOGTRANSFER}},
			{"facade.SetPhase", []interface{}{coremigration.REAP}},
			{"facade.Reap", nil},
			{"facade.SetPhase", []interface{}{coremigration.DONE}},
		},
	))
}
예제 #3
0
파일: connected_test.go 프로젝트: bac/juju
func assertNoSignal(c *gc.C, signal <-chan struct{}) {
	select {
	case <-signal:
		c.Fatal("unexpected signal")
	case <-time.After(coretesting.ShortWait):
	}
}
예제 #4
0
파일: connected_test.go 프로젝트: bac/juju
func assertSignal(c *gc.C, signal <-chan struct{}) {
	select {
	case <-signal:
	case <-time.After(coretesting.LongWait):
		c.Fatal("timed out waiting for signal")
	}
}
예제 #5
0
파일: unit_test.go 프로젝트: makyo/juju
func (s *UnitSuite) TestChangeConfig(c *gc.C) {
	config := FakeAgentConfig{}
	configChanged := voyeur.NewValue(true)
	a := UnitAgent{
		AgentConf:        config,
		configChangedVal: configChanged,
	}

	var mutateCalled bool
	mutate := func(config agent.ConfigSetter) error {
		mutateCalled = true
		return nil
	}

	configChangedCh := make(chan bool)
	watcher := configChanged.Watch()
	watcher.Next() // consume initial event
	go func() {
		configChangedCh <- watcher.Next()
	}()

	err := a.ChangeConfig(mutate)
	c.Assert(err, jc.ErrorIsNil)

	c.Check(mutateCalled, jc.IsTrue)
	select {
	case result := <-configChangedCh:
		c.Check(result, jc.IsTrue)
	case <-time.After(coretesting.LongWait):
		c.Fatal("timed out waiting for config changed signal")
	}
}
예제 #6
0
func (s *InstanceModeSuite) TestStartWithStateOpenPortsBroken(c *gc.C) {
	svc := s.AddTestingService(c, "wordpress", s.charm)
	err := svc.SetExposed()
	c.Assert(err, jc.ErrorIsNil)
	u, m := s.addUnit(c, svc)
	inst := s.startInstance(c, m)

	err = u.OpenPort("tcp", 80)
	c.Assert(err, jc.ErrorIsNil)

	// Nothing open without firewaller.
	s.assertPorts(c, inst, m.Id(), nil)
	dummy.SetInstanceBroken(inst, "OpenPorts")

	// Starting the firewaller should attempt to open the ports,
	// and fail due to the method being broken.
	fw, err := firewaller.NewFirewaller(s.firewaller)
	c.Assert(err, jc.ErrorIsNil)

	errc := make(chan error, 1)
	go func() { errc <- fw.Wait() }()
	s.BackingState.StartSync()
	select {
	case err := <-errc:
		c.Assert(err, gc.ErrorMatches,
			`cannot respond to units changes for "machine-1": dummyInstance.OpenPorts is broken`)
	case <-time.After(coretesting.LongWait):
		fw.Kill()
		fw.Wait()
		c.Fatal("timed out waiting for firewaller to stop")
	}
}
예제 #7
0
파일: pinger_test.go 프로젝트: bac/juju
func waitForClock(c *gc.C, clock *testing.Clock) {
	select {
	case <-clock.Alarms():
	case <-time.After(coretesting.LongWait):
		c.Fatal("timed out waiting for clock")
	}
}
예제 #8
0
파일: mock_test.go 프로젝트: Pankov404/juju
func assertNoHooks(c *gc.C, hooks <-chan hook.Info) {
	select {
	case <-hooks:
		c.Fatal("unexpected hook")
	case <-time.After(testing.ShortWait):
	}
}
예제 #9
0
파일: util_test.go 프로젝트: bac/juju
// waitForWorkers waits for a given worker to be started, returning all
// workers started while waiting.
func (r *fakeSingularRunner) waitForWorkers(c *gc.C, targets []string) []string {
	var seen []string
	seenTargets := make(map[string]bool)
	numSeenTargets := 0
	timeout := time.After(coretesting.LongWait)
	for {
		select {
		case workerName := <-r.startC:
			c.Logf("worker %q started; workers seen so far: %+v (len: %d, len(targets): %d)", workerName, seen, len(seen), len(targets))
			if seenTargets[workerName] == true {
				c.Fatal("worker started twice: " + workerName)
			}
			seenTargets[workerName] = true
			numSeenTargets++
			seen = append(seen, workerName)
			if numSeenTargets == len(targets) {
				c.Logf("all expected target workers started: %+v", seen)
				return seen
			}
			c.Logf("still waiting for workers %+v to start; numSeenTargets=%d", targets, numSeenTargets)
		case <-timeout:
			c.Fatalf("timed out waiting for %v", targets)
		}
	}
}
예제 #10
0
func (s *ManifoldSuite) TestConcurrentLockers(c *gc.C) {
	worker, err := s.manifold.Start(s.getResource)
	c.Assert(err, jc.ErrorIsNil)
	c.Check(worker, gc.NotNil)
	defer kill(worker)

	var locker charmdir.Locker
	err = s.manifold.Output(worker, &locker)
	c.Check(err, jc.ErrorIsNil)

	nlockers := 10
	ch := make(chan struct{}, nlockers)
	for i := 0; i < nlockers; i++ {
		go func() {
			locker.SetAvailable(true)
			locker.SetAvailable(false)
			ch <- struct{}{}
		}()
	}
	for i := 0; i < nlockers; i++ {
		select {
		case <-ch:
		case <-time.After(coretesting.LongWait):
			c.Fatal("timed out waiting to confirm locker worker exit")
		}
	}
}
예제 #11
0
파일: worker_test.go 프로젝트: bac/juju
func (s *suite) TestPrunesOldLogs(c *gc.C) {
	maxLogAge := 24 * time.Hour
	noPruneMB := int(1e9)
	s.StartWorker(c, maxLogAge, noPruneMB)

	now := time.Now()
	addLogsToPrune := func(count int) {
		// Add messages beyond the prune threshold.
		tPrune := now.Add(-maxLogAge - 1)
		s.addLogs(c, tPrune, "prune", count)
	}
	addLogsToKeep := func(count int) {
		// Add messages within the prune threshold.
		s.addLogs(c, now, "keep", count)
	}
	for i := 0; i < 10; i++ {
		addLogsToKeep(5)
		addLogsToPrune(5)
	}

	// Wait for all logs with the message "prune" to be removed.
	for attempt := testing.LongAttempt.Start(); attempt.Next(); {
		pruneRemaining, err := s.logsColl.Find(bson.M{"x": "prune"}).Count()
		c.Assert(err, jc.ErrorIsNil)
		if pruneRemaining == 0 {
			// All the "keep" messages should still be there.
			keepCount, err := s.logsColl.Find(bson.M{"x": "keep"}).Count()
			c.Assert(err, jc.ErrorIsNil)
			c.Assert(keepCount, gc.Equals, 50)
			return
		}
	}
	c.Fatal("pruning didn't happen as expected")
}
예제 #12
0
파일: bootstrap_test.go 프로젝트: bac/juju
func (s *bootstrapSuite) TestBootstrapBuildAgent(c *gc.C) {
	if runtime.GOOS == "windows" {
		c.Skip("issue 1403084: Currently does not work because of jujud problems")
	}

	// Patch out HostArch and FindTools to allow the test to pass on other architectures,
	// such as s390.
	s.PatchValue(&arch.HostArch, func() string { return arch.ARM64 })
	s.PatchValue(bootstrap.FindTools, func(environs.Environ, int, int, string, tools.Filter) (tools.List, error) {
		c.Fatal("should not call FindTools if BuildAgent is specified")
		return nil, errors.NotFoundf("tools")
	})

	env := newEnviron("foo", useDefaultKeys, nil)
	err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{
		BuildAgent:       true,
		AdminSecret:      "admin-secret",
		CAPrivateKey:     coretesting.CAKey,
		ControllerConfig: coretesting.FakeControllerConfig(),
		BuildAgentTarball: func(build bool, ver *version.Number, _ string) (*sync.BuiltAgent, error) {
			c.Logf("BuildAgentTarball version %s", ver)
			c.Assert(build, jc.IsTrue)
			return &sync.BuiltAgent{Dir: c.MkDir()}, nil
		},
	})
	c.Assert(err, jc.ErrorIsNil)
	// Check that the model config has the correct version set.
	cfg := env.instanceConfig.Bootstrap.ControllerModelConfig
	agentVersion, valid := cfg.AgentVersion()
	c.Check(valid, jc.IsTrue)
	c.Check(agentVersion.String(), gc.Equals, "1.99.0.1")
}
예제 #13
0
파일: kill_test.go 프로젝트: bac/juju
func (s *KillSuite) syncClockAlarm(c *gc.C) {
	select {
	case <-s.clock.Alarms():
	case <-time.After(coretesting.LongWait):
		c.Fatal("timed out waiting for test clock After call")
	}
}
예제 #14
0
func (s *controllerSuite) TestWatchAllModels(c *gc.C) {
	watcherId, err := s.controller.WatchAllModels()
	c.Assert(err, jc.ErrorIsNil)

	watcherAPI_, err := apiserver.NewAllWatcher(facadetest.Context{
		State_:     s.State,
		Resources_: s.resources,
		Auth_:      s.authorizer,
		ID_:        watcherId.AllWatcherId,
	})
	c.Assert(err, jc.ErrorIsNil)
	watcherAPI := watcherAPI_.(*apiserver.SrvAllWatcher)
	defer func() {
		err := watcherAPI.Stop()
		c.Assert(err, jc.ErrorIsNil)
	}()

	resultC := make(chan params.AllWatcherNextResults)
	go func() {
		result, err := watcherAPI.Next()
		c.Assert(err, jc.ErrorIsNil)
		resultC <- result
	}()

	select {
	case result := <-resultC:
		// Expect to see the initial environment be reported.
		deltas := result.Deltas
		c.Assert(deltas, gc.HasLen, 1)
		envInfo := deltas[0].Entity.(*multiwatcher.ModelInfo)
		c.Assert(envInfo.ModelUUID, gc.Equals, s.State.ModelUUID())
	case <-time.After(testing.LongWait):
		c.Fatal("timed out")
	}
}
예제 #15
0
파일: syslog_test.go 프로젝트: bac/juju
func (s *syslogSuite) sendRecord(c *gc.C, rec *logsender.LogRecord) {
	select {
	case s.logsCh <- rec:
	case <-time.After(coretesting.LongWait):
		c.Fatal(`timed out "sending" message`)
	}
}
예제 #16
0
func (s *ManifoldSuite) TestStatusWorkerRunsHookOnChanges(c *gc.C) {
	msClient := &stubMeterStatusClient{stub: s.stub, changes: make(chan struct{})}
	s.PatchValue(meterstatus.NewMeterStatusClient,
		func(_ base.APICaller, _ names.UnitTag) msapi.MeterStatusClient {
			return msClient
		})
	s.PatchValue(meterstatus.NewRunner,
		func(_ runner.Context, _ context.Paths) runner.Runner {
			return &stubRunner{stub: s.stub}
		})

	getResource := dt.StubGetResource(s.dummyResources)

	worker, err := s.manifold.Start(getResource)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(worker, gc.NotNil)

	running := make(chan struct{})
	meterstatus.PatchInit(worker, func() { close(running) })

	select {
	case <-running:
	case <-time.After(coretesting.LongWait):
		c.Fatal("timed out waiting for signal")
	}
	msClient.changes <- struct{}{}
	msClient.code = "RED"

	worker.Kill()
	err = worker.Wait()
	c.Assert(err, jc.ErrorIsNil)
	s.stub.CheckCallNames(c, "MeterStatus", "RunHook", "WatchMeterStatus", "MeterStatus", "RunHook")

}
예제 #17
0
func (s *RethinkSuite) TestTableChangesExitNoResults(c *test.C) {
	DB("test").TableDrop("changes").Exec(session)
	DB("test").TableCreate("changes").Exec(session)

	var n int

	res, err := DB("test").Table("changes").Changes().Run(session)
	if err != nil {
		c.Fatal(err.Error())
	}
	c.Assert(res.Type(), test.Equals, "Feed")

	change := make(chan ChangeResponse)

	// Close cursor after one second
	go func() {
		<-time.After(time.Second)
		res.Close()
	}()

	// Listen for changes
	res.Listen(change)
	for _ = range change {
		n++
	}

	c.Assert(n, test.Equals, 0)
}
func (s *RethinkSuite) TestClusterRecoverAfterNoNodes(c *test.C) {
	session, err := Connect(ConnectOpts{
		Addresses:           []string{url, url2},
		DiscoverHosts:       true,
		NodeRefreshInterval: time.Second,
	})
	c.Assert(err, test.IsNil)

	t := time.NewTimer(time.Second * 30)
	hasHadZeroNodes := false
	for {
		select {
		// Fail if deadline has passed
		case <-t.C:
			c.Fatal("No node was added to the cluster")
		default:
			// Check if there are no nodes
			if len(session.cluster.GetNodes()) == 0 {
				hasHadZeroNodes = true
			}

			// Pass if another node was added
			if len(session.cluster.GetNodes()) >= 1 && hasHadZeroNodes {
				return
			}
		}
	}
}
예제 #19
0
파일: conn.go 프로젝트: bac/juju
// Create a home directory and Juju data home for user username.
// This is used by setUpConn to create the 'ubuntu' user home, after RootDir,
// and may be used again later for other users.
func (s *JujuConnSuite) CreateUserHome(c *gc.C, params *UserHomeParams) {
	if s.RootDir == "" {
		c.Fatal("JujuConnSuite.setUpConn required first for RootDir")
	}
	c.Assert(params.Username, gc.Not(gc.Equals), "")
	home := filepath.Join(s.RootDir, "home", params.Username)
	err := os.MkdirAll(home, 0777)
	c.Assert(err, jc.ErrorIsNil)
	err = utils.SetHome(home)
	c.Assert(err, jc.ErrorIsNil)

	jujuHome := filepath.Join(home, ".local", "share")
	err = os.MkdirAll(filepath.Join(home, ".local", "share"), 0777)
	c.Assert(err, jc.ErrorIsNil)

	previousJujuXDGDataHome := osenv.SetJujuXDGDataHome(jujuHome)
	if params.SetOldHome {
		s.oldJujuXDGDataHome = previousJujuXDGDataHome
	}

	err = os.MkdirAll(s.DataDir(), 0777)
	c.Assert(err, jc.ErrorIsNil)

	jujuModelEnvKey := "JUJU_MODEL"
	if params.ModelEnvKey != "" {
		jujuModelEnvKey = params.ModelEnvKey
	}
	s.PatchEnvironment(osenv.JujuModelEnvKey, jujuModelEnvKey)

	s.ControllerStore = jujuclient.NewFileClientStore()
}
예제 #20
0
파일: txnpruner_test.go 프로젝트: bac/juju
func (s *TxnPrunerSuite) TestPrunes(c *gc.C) {
	fakePruner := newFakeTransactionPruner()
	testClock := testing.NewClock(time.Now())
	interval := time.Minute
	p := txnpruner.New(fakePruner, interval, testClock)
	defer p.Kill()

	select {
	case <-testClock.Alarms():
	case <-time.After(coretesting.LongWait):
		c.Fatalf("timed out waiting for worker to stat")
	}
	c.Logf("pruner running and waiting: %s (%s)", testClock.Now(), time.Now())
	// Show that we prune every minute
	for i := 0; i < 5; i++ {
		testClock.Advance(interval)
		c.Logf("loop %d: %s (%s)", i, testClock.Now(), time.Now())
		select {
		case <-fakePruner.pruneCh:
		case <-time.After(coretesting.LongWait):
			c.Fatal("timed out waiting for pruning to happen")
		}
		// Now we need to wait for the txn pruner to call clock.After again
		// before we advance the clock, or it will be waiting for the wrong time.
		select {
		case <-testClock.Alarms():
		case <-time.After(coretesting.LongWait):
			c.Fatalf("timed out waiting for worker to loop around")
		}
	}
}
예제 #21
0
func (s *LoopSuite) TestOnIdle(c *gc.C) {
	onIdleCh := make(chan interface{}, 1)
	s.onIdle = func() error {
		onIdleCh <- nil
		return nil
	}

	done := make(chan interface{}, 1)
	go func() {
		_, err := s.loop()
		done <- err
	}()

	waitChannel(c, onIdleCh, "waiting for onIdle")
	s.watcher.changes <- struct{}{}
	waitChannel(c, onIdleCh, "waiting for onIdle")
	close(s.dying)

	err := waitChannel(c, done, "waiting for loop to exit")
	c.Assert(err, gc.Equals, tomb.ErrDying)

	select {
	case <-onIdleCh:
		c.Fatal("unexpected onIdle call")
	default:
	}
}
예제 #22
0
func (s *PatchedManifoldSuite) TestStatusWorkerDoesNotRerunAfterRestart(c *gc.C) {
	getResource := dt.StubGetResource(s.dummyResources)
	worker, err := s.manifold.Start(getResource)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(worker, gc.NotNil)

	s.msClient.changes <- struct{}{}

	// Kill worker.
	worker.Kill()
	err = worker.Wait()
	c.Assert(err, jc.ErrorIsNil)

	// Restart it.
	worker, err = s.manifold.Start(getResource)
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(worker, gc.NotNil)

	running := make(chan struct{})
	meterstatus.PatchInit(worker, func() { close(running) })

	select {
	case <-running:
	case <-time.After(coretesting.LongWait):
		c.Fatal("timed out waiting for signal")
	}

	worker.Kill()
	err = worker.Wait()

	s.stub.CheckCallNames(c, "MeterStatus", "RunHook", "WatchMeterStatus", "MeterStatus", "MeterStatus", "WatchMeterStatus")
	c.Assert(err, jc.ErrorIsNil)
}
예제 #23
0
func (t *mockStateTracker) assertDoneCalled(c *gc.C) {
	for a := coretesting.LongAttempt.Start(); a.Next(); {
		if t.isDoneCalled() {
			return
		}
	}
	c.Fatal("Done() not called on tracker")
}
예제 #24
0
func (s *UpgradeSuite) TestDowngradeOnMasterWhenOtherStateServerDoesntStartUpgrade(c *gc.C) {
	coretesting.SkipIfWindowsBug(c, "lp:1446885")
	// This test checks that the master triggers a downgrade if one of
	// the other state server fails to signal it is ready for upgrade.
	//
	// This test is functional, ensuring that the upgrader worker
	// terminates the machine agent with the UpgradeReadyError which
	// makes the downgrade happen.

	// Speed up the watcher frequency to make the test much faster.
	s.PatchValue(&watcher.Period, 200*time.Millisecond)

	// Provide (fake) tools so that the upgrader has something to downgrade to.
	envtesting.AssertUploadFakeToolsVersions(
		c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), s.oldVersion)

	// Only the first machine is going to be ready for upgrade.
	machineIdA, machineIdB, _ := s.createUpgradingStateServers(c)

	// One of the other state servers is ready for upgrade (but machine C doesn't).
	info, err := s.State.EnsureUpgradeInfo(machineIdB, s.oldVersion.Number, version.Current)
	c.Assert(err, jc.ErrorIsNil)

	agent := s.newAgentFromMachineId(c, machineIdA)
	defer agent.Stop()

	s.machineIsMaster = true

	var agentErr error
	agentDone := make(chan bool)
	go func() {
		agentErr = agent.Run(nil)
		close(agentDone)
	}()

	select {
	case <-agentDone:
		upgradeReadyErr, ok := agentErr.(*upgrader.UpgradeReadyError)
		if !ok {
			c.Fatalf("didn't see UpgradeReadyError, instead got: %v", agentErr)
		}
		// Confirm that the downgrade is back to the previous version.
		current := version.Binary{
			Number: version.Current,
			Arch:   arch.HostArch(),
			Series: series.HostSeries(),
		}
		c.Assert(upgradeReadyErr.OldTools, gc.Equals, current)
		c.Assert(upgradeReadyErr.NewTools, gc.Equals, s.oldVersion)

	case <-time.After(coretesting.LongWait):
		c.Fatal("machine agent did not exit as expected")
	}

	// UpgradeInfo doc should now be archived.
	err = info.Refresh()
	c.Assert(err, gc.ErrorMatches, "current upgrade info not found")
}
예제 #25
0
func (s *bufferedLogWriterSuite) receiveOne(c *gc.C) *logsender.LogRecord {
	select {
	case rec := <-s.writer.Logs():
		return rec
	case <-time.After(coretesting.LongWait):
		c.Fatal("timed out waiting for log record")
	}
	panic("should never get here")
}
예제 #26
0
func (s *debugLogDBIntSuite) assertStops(c *gc.C, done chan error, tailer *fakeLogTailer) {
	select {
	case err := <-done:
		c.Assert(err, jc.ErrorIsNil)
		c.Assert(tailer.stopped, jc.IsTrue)
	case <-time.After(coretesting.LongWait):
		c.Fatal("timed out waiting for request handler to stop")
	}
}
예제 #27
0
func lastConnPointer(c *gc.C, modelUser *state.ModelUser) *time.Time {
	lastConn, err := modelUser.LastConnection()
	if err != nil {
		if state.IsNeverConnectedError(err) {
			return nil
		}
		c.Fatal(err)
	}
	return &lastConn
}
예제 #28
0
func lastConnPointer(c *gc.C, st *state.State, modelUser permission.UserAccess) *time.Time {
	lastConn, err := st.LastModelConnection(modelUser.UserTag)
	if err != nil {
		if state.IsNeverConnectedError(err) {
			return nil
		}
		c.Fatal(err)
	}
	return &lastConn
}
예제 #29
0
func lastLoginPointer(c *gc.C, user *state.User) *time.Time {
	lastLogin, err := user.LastLogin()
	if err != nil {
		if state.IsNeverLoggedInError(err) {
			return nil
		}
		c.Fatal(err)
	}
	return &lastLogin
}
예제 #30
0
파일: syslog_test.go 프로젝트: bac/juju
func (s *syslogSuite) nextMessage(c *gc.C, received chan rfc5424test.Message) rfc5424test.Message {
	select {
	case msg, ok := <-received:
		c.Assert(ok, jc.IsTrue)
		return msg
	case <-time.After(coretesting.LongWait):
		c.Fatal("timed out waiting for message to be forwarded")
	}
	return rfc5424test.Message{}
}