예제 #1
0
파일: occupy_test.go 프로젝트: bac/juju
func (*OccupySuite) TestStartSuccess(c *gc.C) {
	fix := newFixture(c)
	defer fix.TearDown(c)
	c.Check(fix.Guard(c).Unlock(), jc.ErrorIsNil)

	// Start a worker...
	expect := workertest.NewErrorWorker(nil)
	defer workertest.CleanKill(c, expect)
	run := func() (worker.Worker, error) {
		return expect, nil
	}
	worker, err := fortress.Occupy(fix.Guest(c), run, nil)
	c.Assert(err, jc.ErrorIsNil)
	c.Check(worker, gc.Equals, expect)

	// ...and check we can't lockdown again...
	locked := make(chan error, 1)
	go func() {
		locked <- fix.Guard(c).Lockdown(nil)
	}()
	select {
	case err := <-locked:
		c.Fatalf("unexpected Lockdown result: %v", err)
	case <-time.After(coretesting.ShortWait):
	}

	// ...until the worker completes.
	workertest.CleanKill(c, worker)
	select {
	case err := <-locked:
		c.Check(err, jc.ErrorIsNil)
	case <-time.After(coretesting.LongWait):
		c.Fatalf("visit never completed")
	}
}
예제 #2
0
파일: aggregate_test.go 프로젝트: bac/juju
func (s *aggregateSuite) TestPartialInstanceErrors(c *gc.C) {
	testGetter := new(testInstanceGetter)
	clock := jujutesting.NewClock(time.Now())
	delay := time.Second

	cfg := aggregatorConfig{
		Clock:   clock,
		Delay:   delay,
		Environ: testGetter,
	}

	testGetter.err = environs.ErrPartialInstances
	testGetter.newTestInstance("foo", "not foobar", []string{"192.168.1.2"})

	aggregator, err := newAggregator(cfg)
	c.Check(err, jc.ErrorIsNil)

	// Ensure the worker is killed and cleaned up if the test exits early.
	defer workertest.CleanKill(c, aggregator)

	// // Create a checker we can launch as goroutines
	var wg sync.WaitGroup
	checkInfo := func(id instance.Id, expectStatus string, expectedError error) {
		defer wg.Done()
		info, err := aggregator.instanceInfo(id)
		if expectedError == nil {
			c.Check(err, jc.ErrorIsNil)
		} else {
			c.Check(err.Error(), gc.Equals, expectedError.Error())
		}
		c.Check(info.status.Message, gc.Equals, expectStatus)
	}

	// Launch and wait for these
	wg.Add(2)
	go checkInfo("foo", "not foobar", nil)
	go checkInfo("foo2", "", errors.New("instance foo2 not found"))

	// Unwind the testing clock to let our requests through.
	waitAlarms(c, clock, 2)
	clock.Advance(delay)

	// Check we're still alive.
	workertest.CheckAlive(c, aggregator)

	// Wait until the checkers pass.
	wg.Wait()

	// Now kill the worker so we don't risk a race in the following assertions.
	workertest.CleanKill(c, aggregator)

	// Ensure we got our list back with the correct length.
	c.Assert(len(testGetter.ids), gc.Equals, 2)

	// Ensure we called instances once.
	// TODO(redir): all this stuff is really crying out to be, e.g.
	// testGetter.CheckOneCall(c, "foo", "foo2") per
	// http://reviews.vapour.ws/r/4885/
	c.Assert(testGetter.counter, gc.Equals, int32(1))
}
예제 #3
0
파일: aggregate_test.go 프로젝트: bac/juju
// Test several requests in a short space of time get batched.
func (s *aggregateSuite) TestMultipleResponseHandling(c *gc.C) {
	// We setup a couple variables here so that we can use them locally without
	// type assertions. Then we use them in the aggregatorConfig.
	testGetter := new(testInstanceGetter)
	clock := jujutesting.NewClock(time.Now())
	delay := time.Minute
	cfg := aggregatorConfig{
		Clock:   clock,
		Delay:   delay,
		Environ: testGetter,
	}

	// Setup multiple instances to batch
	testGetter.newTestInstance("foo", "foobar", []string{"127.0.0.1", "192.168.1.1"})
	testGetter.newTestInstance("foo2", "not foobar", []string{"192.168.1.2"})
	testGetter.newTestInstance("foo3", "ok-ish", []string{"192.168.1.3"})

	aggregator, err := newAggregator(cfg)
	c.Check(err, jc.ErrorIsNil)

	// Ensure the worker is killed and cleaned up if the test exits early.
	defer workertest.CleanKill(c, aggregator)

	// Create a closure for tests we can launch in goroutines.
	var wg sync.WaitGroup
	checkInfo := func(id instance.Id, expectStatus string) {
		defer wg.Done()
		info, err := aggregator.instanceInfo(id)
		c.Check(err, jc.ErrorIsNil)
		c.Check(info.status.Message, gc.Equals, expectStatus)
	}

	// Launch and wait for these
	wg.Add(2)
	go checkInfo("foo2", "not foobar")
	go checkInfo("foo3", "ok-ish")

	// Unwind the testing clock to let our requests through.
	waitAlarms(c, clock, 2)
	clock.Advance(delay)

	// Check we're still alive.
	workertest.CheckAlive(c, aggregator)

	// Wait until the tests pass.
	wg.Wait()

	// Ensure we kill the worker before looking at our testInstanceGetter to
	// ensure there's no possibility of a race.
	workertest.CleanKill(c, aggregator)

	// Ensure we got our list back with the expected contents.
	c.Assert(testGetter.ids, jc.SameContents, []instance.Id{"foo2", "foo3"})

	// Ensure we called instances once and have no errors there.
	c.Assert(testGetter.err, jc.ErrorIsNil)
	c.Assert(testGetter.counter, gc.DeepEquals, int32(1))
}
예제 #4
0
파일: worker_test.go 프로젝트: bac/juju
func (*WorkerSuite) TestStopKills(c *gc.C) {
	w := workertest.NewErrorWorker(nil)
	defer workertest.CleanKill(c, w)

	worker.Stop(w)
	workertest.CheckKilled(c, w)
}
예제 #5
0
func (fix fixture) cleanup(c *gc.C, w worker.Worker) {
	if fix.dirty {
		workertest.DirtyKill(c, w)
	} else {
		workertest.CleanKill(c, w)
	}
}
예제 #6
0
파일: engine_test.go 프로젝트: bac/juju
func (s *EngineSuite) TestStartAbortOnEngineKill(c *gc.C) {
	s.fix.run(c, func(engine *dependency.Engine) {
		starts := make(chan struct{}, 1000)
		manifold := dependency.Manifold{
			Start: func(context dependency.Context) (worker.Worker, error) {
				starts <- struct{}{}
				select {
				case <-context.Abort():
				case <-time.After(coretesting.LongWait):
					c.Errorf("timed out")
				}
				return nil, errors.New("whatever")
			},
		}
		err := engine.Install("task", manifold)
		c.Assert(err, jc.ErrorIsNil)

		select {
		case <-starts:
		case <-time.After(coretesting.LongWait):
			c.Fatalf("timed out")
		}
		workertest.CleanKill(c, engine)

		select {
		case <-starts:
			c.Fatalf("unexpected start")
		default:
		}
	})
}
예제 #7
0
파일: tracker_test.go 프로젝트: bac/juju
func (s *TrackerSuite) newTracker() *leadership.Tracker {
	tracker := s.newTrackerInner()
	s.AddCleanup(func(c *gc.C) {
		workertest.CleanKill(c, tracker)
	})
	return tracker
}
예제 #8
0
파일: tracker_test.go 프로젝트: bac/juju
func (s *TrackerSuite) TestWaitMinionBecomeMinion(c *gc.C) {
	s.claimer.Stub.SetErrors(nil, coreleadership.ErrClaimDenied, nil)
	tracker := s.newTracker()

	// Check the first ticket stays open.
	assertWaitMinion(c, tracker, false)

	// Wait long enough for a single refresh, to trigger ErrClaimDenied; then
	// check the next ticket is closed.
	s.refreshes(1)
	assertWaitMinion(c, tracker, true)

	// Stop the tracker before trying to look at its stub.
	workertest.CleanKill(c, tracker)

	// Unblock the release goroutine, lest data races.
	s.unblockRelease(c)

	s.claimer.CheckCalls(c, []testing.StubCall{{
		FuncName: "ClaimLeadership",
		Args: []interface{}{
			"led-service", "led-service/123", leaseDuration,
		},
	}, {
		FuncName: "ClaimLeadership",
		Args: []interface{}{
			"led-service", "led-service/123", leaseDuration,
		},
	}, {
		FuncName: "BlockUntilLeadershipReleased",
		Args: []interface{}{
			"led-service",
		},
	}})
}
예제 #9
0
파일: tracker_test.go 프로젝트: bac/juju
func (s *TrackerSuite) TestWaitLeaderNeverBecomeLeader(c *gc.C) {
	s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil)
	tracker := s.newTracker()

	// Check initial ticket fails.
	assertWaitLeader(c, tracker, false)

	// Get a new ticket and stop the tracker while it's pending.
	ticket := tracker.WaitLeader()
	workertest.CleanKill(c, tracker)

	// Check the ticket got closed without sending true.
	assertTicket(c, ticket, false)
	assertTicket(c, ticket, false)

	// Unblock the release goroutine and stop the tracker before trying to
	// look at its stub.
	s.unblockRelease(c)
	s.claimer.CheckCalls(c, []testing.StubCall{{
		FuncName: "ClaimLeadership",
		Args: []interface{}{
			"led-service", "led-service/123", leaseDuration,
		},
	}, {
		FuncName: "BlockUntilLeadershipReleased",
		Args: []interface{}{
			"led-service",
		},
	}})
}
예제 #10
0
파일: tracker_test.go 프로젝트: bac/juju
func (s *TrackerSuite) TestWaitLeaderBecomeLeader(c *gc.C) {
	s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil, nil)
	tracker := s.newTracker()

	// Check initial ticket fails.
	assertWaitLeader(c, tracker, false)

	// Unblock the release goroutine...
	s.unblockRelease(c)

	// advance the clock a small amount, but not enough to trigger a check
	s.refreshes(0)

	// ...then check the next ticket succeeds.
	assertWaitLeader(c, tracker, true)

	// Stop the tracker before trying to look at its stub.
	workertest.CleanKill(c, tracker)
	s.claimer.CheckCalls(c, []testing.StubCall{{
		FuncName: "ClaimLeadership",
		Args: []interface{}{
			"led-service", "led-service/123", leaseDuration,
		},
	}, {
		FuncName: "BlockUntilLeadershipReleased",
		Args: []interface{}{
			"led-service",
		},
	}, {
		FuncName: "ClaimLeadership",
		Args: []interface{}{
			"led-service", "led-service/123", leaseDuration,
		},
	}})
}
예제 #11
0
파일: tracker_test.go 프로젝트: bac/juju
func (s *TrackerSuite) TestOnLeaderFailure(c *gc.C) {
	s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil)
	tracker := s.newTracker()

	// Check the ticket fails.
	assertClaimLeader(c, tracker, false)

	// Stop the tracker before trying to look at its mocks.
	workertest.CleanKill(c, tracker)

	// Unblock the release goroutine, lest data races.
	s.unblockRelease(c)

	s.claimer.CheckCalls(c, []testing.StubCall{{
		FuncName: "ClaimLeadership",
		Args: []interface{}{
			"led-service", "led-service/123", leaseDuration,
		},
	}, {
		FuncName: "BlockUntilLeadershipReleased",
		Args: []interface{}{
			"led-service",
		},
	}})
}
예제 #12
0
파일: restart_test.go 프로젝트: bac/juju
func (*RestartWorkersSuite) TestSingularManagerRestart(c *gc.C) {
	fix := BasicFixture()
	fix.SW_errors = []error{errors.New("oof"), nil}
	fix.RunRestart(c, func(ctx Context, rw *workers.RestartWorkers) {
		origw := rw.SingularManager()
		w := NextWorker(c, ctx.SWs())
		c.Assert(w, gc.NotNil)
		AssertWorker(c, rw.SingularManager(), w)
		w.Kill()

		clock := ctx.Clock()
		WaitAlarms(c, clock, 1)
		clock.Advance(fiveSeconds)
		w2 := NextWorker(c, ctx.SWs())
		c.Assert(w, gc.NotNil)
		WaitWorker(c, SM_getter(rw), w2)

		// The new worker should underlie the originally
		// acquired singular manager, so that restarts
		// do not require callers to acquire a new manager
		AssertWorker(c, origw, w2)

		workertest.CleanKill(c, rw)
	})
}
예제 #13
0
파일: reporter_test.go 프로젝트: makyo/juju
func (s *ReportSuite) TestReportError(c *gc.C) {
	s.fix.run(c, func(engine *dependency.Engine) {
		mh1 := newManifoldHarness("missing")
		manifold := mh1.Manifold()
		err := engine.Install("task", manifold)
		c.Assert(err, jc.ErrorIsNil)
		mh1.AssertNoStart(c)

		workertest.CleanKill(c, engine)
		report := engine.Report()
		c.Check(report, jc.DeepEquals, map[string]interface{}{
			"state": "stopped",
			"error": nil,
			"manifolds": map[string]interface{}{
				"task": map[string]interface{}{
					"state":  "stopped",
					"error":  dependency.ErrMissing,
					"inputs": []string{"missing"},
					"resource-log": []map[string]interface{}{{
						"name":  "missing",
						"type":  "<nil>",
						"error": dependency.ErrMissing,
					}},
					"report": (map[string]interface{})(nil),
				},
			},
		})
	})
}
예제 #14
0
파일: worker_test.go 프로젝트: bac/juju
func (s *Suite) TestVALIDATION(c *gc.C) {
	s.client.watcher.changes <- watcher.MigrationStatus{
		MigrationId:    "id",
		Phase:          migration.VALIDATION,
		TargetAPIAddrs: addrs,
		TargetCACert:   caCert,
	}
	w, err := migrationminion.New(s.config)
	c.Assert(err, jc.ErrorIsNil)
	defer workertest.CleanKill(c, w)

	s.waitForStubCalls(c, []string{
		"Watch",
		"Lockdown",
		"API open",
		"ValidateMigration",
		"API close",
		"Report",
	})
	s.stub.CheckCall(c, 2, "API open", &api.Info{
		ModelTag: modelTag,
		Tag:      agentTag,
		Password: agentPassword,
		Addrs:    addrs,
		CACert:   caCert,
	})
	s.stub.CheckCall(c, 5, "Report", "id", migration.VALIDATION, true)
}
예제 #15
0
파일: pinger_test.go 프로젝트: makyo/juju
func (s *WorkerSuite) TestNewLoop(c *gc.C) {
	waitChan := make(chan struct{})
	block := make(chan struct{})
	s.clock.setAfter(4)
	count := 0
	s.cfg.Start = func() (presence.Pinger, error) {
		pinger, err := s.start()
		c.Logf("%d", count)
		if count > 3 {
			s.pinger.notify = waitChan
			s.pinger.waitBlock = block
		}
		count += 1
		return pinger, err
	}

	w, err := presence.New(s.cfg)
	c.Assert(err, jc.ErrorIsNil)
	defer workertest.CleanKill(c, w)
	defer close(block)
	<-waitChan

	s.stub.CheckCallNames(c,
		"start", "Wait", "After",
		"start", "Wait", "After",
		"start", "Wait", "After",
		"start", "Wait", "After",
		"start", "Wait",
	)
}
예제 #16
0
파일: apiserver_test.go 프로젝트: bac/juju
func (s *apiserverBaseSuite) newServer(c *gc.C, config apiserver.ServerConfig) *apiserver.Server {
	srv := s.newServerNoCleanup(c, config)
	s.AddCleanup(func(c *gc.C) {
		workertest.CleanKill(c, srv)
	})
	return srv
}
예제 #17
0
파일: worker_test.go 프로젝트: bac/juju
func (s *workerSuite) TestSetMembersErrorIsNotFatal(c *gc.C) {
	coretesting.SkipIfI386(c, "lp:1425569")

	DoTestForIPv4AndIPv6(func(ipVersion TestIPVersion) {
		st := NewFakeState()
		InitState(c, st, 3, ipVersion)
		st.session.setStatus(mkStatuses("0p 1s 2s", ipVersion))
		var setCount voyeur.Value
		st.errors.setErrorFuncFor("Session.Set", func() error {
			setCount.Set(true)
			return errors.New("sample")
		})
		s.PatchValue(&initialRetryInterval, 10*time.Microsecond)
		s.PatchValue(&maxRetryInterval, coretesting.ShortWait/4)

		w, err := newWorker(st, noPublisher{}, false)
		c.Assert(err, jc.ErrorIsNil)
		defer workertest.CleanKill(c, w)

		// See that the worker is retrying.
		setCountW := setCount.Watch()
		mustNext(c, setCountW)
		mustNext(c, setCountW)
		mustNext(c, setCountW)
	})
}
예제 #18
0
파일: worker_test.go 프로젝트: bac/juju
func (s *workerSuite) TestWorkerPublishesInstanceIds(c *gc.C) {
	DoTestForIPv4AndIPv6(func(ipVersion TestIPVersion) {
		s.PatchValue(&pollInterval, coretesting.LongWait+time.Second)
		s.PatchValue(&initialRetryInterval, 5*time.Millisecond)
		s.PatchValue(&maxRetryInterval, initialRetryInterval)

		publishCh := make(chan []instance.Id, 100)

		publish := func(apiServers [][]network.HostPort, instanceIds []instance.Id) error {
			publishCh <- instanceIds
			return nil
		}
		st := NewFakeState()
		InitState(c, st, 3, ipVersion)

		w, err := newWorker(st, PublisherFunc(publish), false)
		c.Assert(err, jc.ErrorIsNil)
		defer workertest.CleanKill(c, w)

		select {
		case instanceIds := <-publishCh:
			c.Assert(instanceIds, jc.SameContents, []instance.Id{"id-10", "id-11", "id-12"})
		case <-time.After(coretesting.LongWait):
			c.Errorf("timed out waiting for publish")
		}
	})
}
예제 #19
0
파일: worker_test.go 프로젝트: bac/juju
func (s *workerSuite) TestControllersArePublished(c *gc.C) {
	DoTestForIPv4AndIPv6(func(ipVersion TestIPVersion) {
		publishCh := make(chan [][]network.HostPort)
		publish := func(apiServers [][]network.HostPort, instanceIds []instance.Id) error {
			publishCh <- apiServers
			return nil
		}

		st := NewFakeState()
		InitState(c, st, 3, ipVersion)
		w, err := newWorker(st, PublisherFunc(publish), false)
		c.Assert(err, jc.ErrorIsNil)
		defer workertest.CleanKill(c, w)

		select {
		case servers := <-publishCh:
			AssertAPIHostPorts(c, servers, ExpectedAPIHostPorts(3, ipVersion))
		case <-time.After(coretesting.LongWait):
			c.Fatalf("timed out waiting for publish")
		}

		// Change one of the servers' API addresses and check that it's published.
		var newMachine10APIHostPorts []network.HostPort
		newMachine10APIHostPorts = network.NewHostPorts(apiPort, ipVersion.extraHost)
		st.machine("10").setAPIHostPorts(newMachine10APIHostPorts)
		select {
		case servers := <-publishCh:
			expected := ExpectedAPIHostPorts(3, ipVersion)
			expected[0] = newMachine10APIHostPorts
			AssertAPIHostPorts(c, servers, expected)
		case <-time.After(coretesting.LongWait):
			c.Fatalf("timed out waiting for publish")
		}
	})
}
예제 #20
0
func (s *Suite) TestCleanKillTimeout(c *gc.C) {
	w := workertest.NewForeverWorker(nil)
	defer w.ReallyKill()

	workertest.CleanKill(c, w)
	s.CheckFailed(c)
}
예제 #21
0
파일: worker_test.go 프로젝트: bac/juju
func (s *workerSuite) TestAddressChange(c *gc.C) {
	DoTestForIPv4AndIPv6(func(ipVersion TestIPVersion) {
		st := NewFakeState()
		InitState(c, st, 3, ipVersion)

		memberWatcher := st.session.members.Watch()
		mustNext(c, memberWatcher)
		assertMembers(c, memberWatcher.Value(), mkMembers("0v", ipVersion))

		logger.Infof("starting worker")
		w, err := newWorker(st, noPublisher{}, false)
		c.Assert(err, jc.ErrorIsNil)
		defer workertest.CleanKill(c, w)

		// Wait for the worker to set the initial members.
		mustNext(c, memberWatcher)
		assertMembers(c, memberWatcher.Value(), mkMembers("0v 1 2", ipVersion))

		// Change an address and wait for it to be changed in the
		// members.
		st.machine("11").setStateHostPort(ipVersion.extraHostPort)

		mustNext(c, memberWatcher)
		expectMembers := mkMembers("0v 1 2", ipVersion)
		expectMembers[1].Address = ipVersion.extraHostPort
		assertMembers(c, memberWatcher.Value(), expectMembers)
	})
}
예제 #22
0
파일: util_test.go 프로젝트: bac/juju
func (fix *engineFixture) kill(c *gc.C, engine *dependency.Engine) {
	if fix.dirty {
		workertest.DirtyKill(c, engine)
	} else {
		workertest.CleanKill(c, engine)
	}
}
예제 #23
0
func (s *MachineRemovalSuite) createRemovalWatcher(c *gc.C, st *state.State) (
	state.NotifyWatcher, testing.NotifyWatcherC,
) {
	w := st.WatchMachineRemovals()
	s.AddCleanup(func(c *gc.C) { workertest.CleanKill(c, w) })
	return w, testing.NewNotifyWatcherC(c, st, w)
}
예제 #24
0
파일: machine_test.go 프로젝트: bac/juju
func (s *MachineSuite) TestDyingModelCleanedUp(c *gc.C) {
	st, closer := s.setUpNewModel(c)
	defer closer()

	timeout := time.After(ReallyLongWait)
	s.assertJobWithState(c, state.JobManageModel, func(agent.Config, *state.State) {
		model, err := st.Model()
		c.Assert(err, jc.ErrorIsNil)
		watch := model.Watch()
		defer workertest.CleanKill(c, watch)

		err = model.Destroy()
		c.Assert(err, jc.ErrorIsNil)
		for {
			select {
			case <-watch.Changes():
				err := model.Refresh()
				cause := errors.Cause(err)
				if err == nil {
					continue // still there
				} else if errors.IsNotFound(cause) {
					return // successfully removed
				}
				c.Assert(err, jc.ErrorIsNil) // guaranteed fail
			case <-time.After(coretesting.ShortWait):
				st.StartSync()
			case <-timeout:
				c.Fatalf("timed out waiting for workers")
			}
		}
	})
}
예제 #25
0
파일: flag_test.go 프로젝트: makyo/juju
func (*FlagSuite) TestFlagLocked(c *gc.C) {
	lock := gate.NewLock()
	worker, err := gate.NewFlag(lock)
	c.Assert(err, jc.ErrorIsNil)
	defer workertest.CleanKill(c, worker)
	workertest.CheckAlive(c, worker)
	c.Check(worker.Check(), jc.IsFalse)
}
예제 #26
0
파일: worker_test.go 프로젝트: bac/juju
func runWorkerUntilMongoStateIs(c *gc.C, st *fakeState, w *pgWorker, mss state.MongoSpaceStates) {
	changes := st.controllers.Watch()
	changes.Next()
	for st.getMongoSpaceState() != mss {
		changes.Next()
	}
	workertest.CleanKill(c, w)
}
예제 #27
0
func (s *Suite) TestCheckKilledTimeout(c *gc.C) {
	w := workertest.NewErrorWorker(nil)
	defer workertest.CleanKill(c, w)

	err := workertest.CheckKilled(c, w)
	s.CheckFailed(c)
	c.Check(err, gc.ErrorMatches, "workertest: worker not stopping")
}
예제 #28
0
파일: worker_test.go 프로젝트: bac/juju
func (*WorkerSuite) TestWorkerNoErr(c *gc.C) {
	stub := &testing.Stub{}
	worker, err := machineactions.NewMachineActionsWorker(defaultConfig(stub))
	c.Assert(err, jc.ErrorIsNil)

	workertest.CheckAlive(c, worker)
	workertest.CleanKill(c, worker)
	stub.CheckCalls(c, getSuccessfulCalls(allCalls))
}
예제 #29
0
파일: pinger_test.go 프로젝트: bac/juju
func (s *WorkerSuite) TestInitialSuccess(c *gc.C) {
	fix := NewFixture()
	stub := fix.Run(c, func(context Context, worker *presence.Worker) {
		workertest.CleanKill(c, worker)
		// Despite immediate kill, a pinger was still started.
		context.WaitPinger()
	})
	stub.CheckCallNames(c, "Start")
}
예제 #30
0
파일: aggregate_test.go 프로젝트: bac/juju
// Test that one request gets sent after suitable delay.
func (s *aggregateSuite) TestSingleRequest(c *gc.C) {
	// We setup a couple variables here so that we can use them locally without
	// type assertions. Then we use them in the aggregatorConfig.
	testGetter := new(testInstanceGetter)
	clock := jujutesting.NewClock(time.Now())
	delay := time.Minute
	cfg := aggregatorConfig{
		Clock:   clock,
		Delay:   delay,
		Environ: testGetter,
	}

	// Add a new test instance.
	testGetter.newTestInstance("foo", "foobar", []string{"127.0.0.1", "192.168.1.1"})

	aggregator, err := newAggregator(cfg)
	c.Check(err, jc.ErrorIsNil)

	// Ensure the worker is killed and cleaned up if the test exits early.
	defer workertest.CleanKill(c, aggregator)

	// Create a test in a goroutine and make sure we wait for it to finish.
	var wg sync.WaitGroup
	wg.Add(1)
	go func() {
		defer wg.Done()
		info, err := aggregator.instanceInfo("foo")
		c.Check(err, jc.ErrorIsNil)
		c.Check(info.status.Message, gc.DeepEquals, "foobar")
	}()

	// Unwind the test clock
	waitAlarms(c, clock, 1)
	clock.Advance(delay)

	wg.Wait()

	// Ensure we kill the worker before looking at our testInstanceGetter to
	// ensure there's no possibility of a race.
	workertest.CleanKill(c, aggregator)

	ids := testGetter.ids
	c.Assert(ids, gc.DeepEquals, []instance.Id{"foo"})
}