Exemple #1
0
func (s *aggregateSuite) TestPartialInstanceErrors(c *gc.C) {
	testGetter := new(testInstanceGetter)
	clock := jujutesting.NewClock(time.Now())
	delay := time.Second

	cfg := aggregatorConfig{
		Clock:   clock,
		Delay:   delay,
		Environ: testGetter,
	}

	testGetter.err = environs.ErrPartialInstances
	testGetter.newTestInstance("foo", "not foobar", []string{"192.168.1.2"})

	aggregator, err := newAggregator(cfg)
	c.Check(err, jc.ErrorIsNil)

	// Ensure the worker is killed and cleaned up if the test exits early.
	defer workertest.CleanKill(c, aggregator)

	// // Create a checker we can launch as goroutines
	var wg sync.WaitGroup
	checkInfo := func(id instance.Id, expectStatus string, expectedError error) {
		defer wg.Done()
		info, err := aggregator.instanceInfo(id)
		if expectedError == nil {
			c.Check(err, jc.ErrorIsNil)
		} else {
			c.Check(err.Error(), gc.Equals, expectedError.Error())
		}
		c.Check(info.status.Message, gc.Equals, expectStatus)
	}

	// Launch and wait for these
	wg.Add(2)
	go checkInfo("foo", "not foobar", nil)
	go checkInfo("foo2", "", errors.New("instance foo2 not found"))

	// Unwind the testing clock to let our requests through.
	waitAlarms(c, clock, 2)
	clock.Advance(delay)

	// Check we're still alive.
	workertest.CheckAlive(c, aggregator)

	// Wait until the checkers pass.
	wg.Wait()

	// Now kill the worker so we don't risk a race in the following assertions.
	workertest.CleanKill(c, aggregator)

	// Ensure we got our list back with the correct length.
	c.Assert(len(testGetter.ids), gc.Equals, 2)

	// Ensure we called instances once.
	// TODO(redir): all this stuff is really crying out to be, e.g.
	// testGetter.CheckOneCall(c, "foo", "foo2") per
	// http://reviews.vapour.ws/r/4885/
	c.Assert(testGetter.counter, gc.Equals, int32(1))
}
Exemple #2
0
func (s *WaitSuite) TestConfigError(c *gc.C) {
	fix := &fixture{
		observerErrs: []error{
			errors.New("biff zonk"),
		},
	}
	fix.Run(c, func(context *runContext) {
		abort := make(chan struct{})
		defer close(abort)

		done := make(chan struct{})
		go func() {
			defer close(done)
			env, err := environ.WaitForEnviron(context.watcher, context, nil, abort)
			c.Check(env, gc.IsNil)
			c.Check(err, gc.ErrorMatches, "cannot read environ config: biff zonk")
		}()

		context.SendNotify()
		select {
		case <-done:
		case <-time.After(coretesting.LongWait):
			c.Errorf("timed out waiting for failure")
		}
		workertest.CheckAlive(c, context.watcher)
	})
}
Exemple #3
0
func (*FlagSuite) TestFlagLocked(c *gc.C) {
	lock := gate.NewLock()
	worker, err := gate.NewFlag(lock)
	c.Assert(err, jc.ErrorIsNil)
	defer workertest.CleanKill(c, worker)
	workertest.CheckAlive(c, worker)
	c.Check(worker.Check(), jc.IsFalse)
}
Exemple #4
0
func (s *FlagSuite) TestClaimFailure(c *gc.C) {
	fix := newFixture(c, errClaimDenied, nil)
	fix.Run(c, func(flag *singular.FlagWorker, _ *coretesting.Clock, _ func()) {
		c.Check(flag.Check(), jc.IsFalse)
		workertest.CheckAlive(c, flag)
	})
	fix.CheckClaimWait(c)
}
Exemple #5
0
// Test several requests in a short space of time get batched.
func (s *aggregateSuite) TestMultipleResponseHandling(c *gc.C) {
	// We setup a couple variables here so that we can use them locally without
	// type assertions. Then we use them in the aggregatorConfig.
	testGetter := new(testInstanceGetter)
	clock := jujutesting.NewClock(time.Now())
	delay := time.Minute
	cfg := aggregatorConfig{
		Clock:   clock,
		Delay:   delay,
		Environ: testGetter,
	}

	// Setup multiple instances to batch
	testGetter.newTestInstance("foo", "foobar", []string{"127.0.0.1", "192.168.1.1"})
	testGetter.newTestInstance("foo2", "not foobar", []string{"192.168.1.2"})
	testGetter.newTestInstance("foo3", "ok-ish", []string{"192.168.1.3"})

	aggregator, err := newAggregator(cfg)
	c.Check(err, jc.ErrorIsNil)

	// Ensure the worker is killed and cleaned up if the test exits early.
	defer workertest.CleanKill(c, aggregator)

	// Create a closure for tests we can launch in goroutines.
	var wg sync.WaitGroup
	checkInfo := func(id instance.Id, expectStatus string) {
		defer wg.Done()
		info, err := aggregator.instanceInfo(id)
		c.Check(err, jc.ErrorIsNil)
		c.Check(info.status.Message, gc.Equals, expectStatus)
	}

	// Launch and wait for these
	wg.Add(2)
	go checkInfo("foo2", "not foobar")
	go checkInfo("foo3", "ok-ish")

	// Unwind the testing clock to let our requests through.
	waitAlarms(c, clock, 2)
	clock.Advance(delay)

	// Check we're still alive.
	workertest.CheckAlive(c, aggregator)

	// Wait until the tests pass.
	wg.Wait()

	// Ensure we kill the worker before looking at our testInstanceGetter to
	// ensure there's no possibility of a race.
	workertest.CleanKill(c, aggregator)

	// Ensure we got our list back with the expected contents.
	c.Assert(testGetter.ids, jc.SameContents, []instance.Id{"foo2", "foo3"})

	// Ensure we called instances once and have no errors there.
	c.Assert(testGetter.err, jc.ErrorIsNil)
	c.Assert(testGetter.counter, gc.DeepEquals, int32(1))
}
Exemple #6
0
func (s *ManifoldSuite) TestConfigChangeWithAddrReordering(c *gc.C) {
	s.agent.conf.setAddresses("1.1.1.1:1", "2.2.2.2:2")
	w := s.startWorkerClean(c)

	// Change API address ordering - worker should stay up.
	s.agent.conf.setAddresses("2.2.2.2:2", "1.1.1.1:1")
	s.agentConfigChanged.Set(0)
	workertest.CheckAlive(c, w)
}
Exemple #7
0
func (s *ManifoldSuite) TestConfigChangeWithNoAddrChange(c *gc.C) {
	s.agent.conf.setAddresses("1.1.1.1:1")
	w := s.startWorkerClean(c)

	// Signal config change without changing API addresses - worker
	// should continue running.
	s.agentConfigChanged.Set(0)
	workertest.CheckAlive(c, w)
}
Exemple #8
0
func (s *FlagSuite) TestClaimSuccess(c *gc.C) {
	fix := newFixture(c, nil, errors.New("should not happen"))
	fix.Run(c, func(flag *singular.FlagWorker, clock *coretesting.Clock, unblock func()) {
		<-clock.Alarms()
		clock.Advance(29 * time.Second)
		workertest.CheckAlive(c, flag)
	})
	fix.CheckClaims(c, 1)
}
func (s *UndertakerSuite) TestAlreadyDeadTimeMissingWaits(c *gc.C) {
	s.fix.info.Result.Life = "dead"
	stub := s.fix.run(c, func(w worker.Worker, clock *coretesting.Clock) {
		waitAlarm(c, clock)
		clock.Advance(RIPTime - time.Second)
		workertest.CheckAlive(c, w)
	})
	stub.CheckCallNames(c, "ModelInfo", "Destroy")
}
Exemple #10
0
func (*WorkerSuite) TestWorkerNoErr(c *gc.C) {
	stub := &testing.Stub{}
	worker, err := machineactions.NewMachineActionsWorker(defaultConfig(stub))
	c.Assert(err, jc.ErrorIsNil)

	workertest.CheckAlive(c, worker)
	workertest.CleanKill(c, worker)
	stub.CheckCalls(c, getSuccessfulCalls(allCalls))
}
Exemple #11
0
func (s *Suite) checkNonRunningPhase(c *gc.C, phase migration.Phase) {
	c.Logf("checking %s", phase)
	s.stub.ResetCalls()
	s.client.watcher.changes <- watcher.MigrationStatus{Phase: phase}
	w, err := migrationminion.New(s.config)
	c.Assert(err, jc.ErrorIsNil)
	workertest.CheckAlive(c, w)
	workertest.CleanKill(c, w)
	s.stub.CheckCallNames(c, "Watch", "Unlock")
}
Exemple #12
0
func (*FlagSuite) TestFlagUnlockError(c *gc.C) {
	lock := gate.NewLock()
	worker, err := gate.NewFlag(lock)
	c.Assert(err, jc.ErrorIsNil)
	defer workertest.DirtyKill(c, worker)
	workertest.CheckAlive(c, worker)
	lock.Unlock()
	err = workertest.CheckKilled(c, worker)
	c.Check(err, gc.Equals, gate.ErrUnlocked)
}
func (s *suite) TestRestartsErrorWorker(c *gc.C) {
	s.runTest(c, func(w worker.Worker, backend *mockBackend) {
		backend.sendModelChange("uuid")
		workers := s.waitWorkers(c, 1)
		workers[0].tomb.Kill(errors.New("blaf"))

		s.assertStarts(c, "uuid")
		workertest.CheckAlive(c, w)
	})
}
Exemple #14
0
func (*ResumerSuite) TestWaitsToResume(c *gc.C) {
	fix := newFixture(nil, errors.New("unexpected"))
	stub := fix.Run(c, func(clock *testing.Clock, worker *resumer.Resumer) {
		waitAlarms(c, clock, 2)
		clock.Advance(time.Hour - time.Nanosecond)
		workertest.CheckAlive(c, worker)
		workertest.CleanKill(c, worker)
	})
	stub.CheckCallNames(c, "ResumeTransactions")
}
Exemple #15
0
func (s *statePoolSuite) TestKillWorkers(c *gc.C) {
	// Get some State instances via the pool and extract their
	// internal workers.
	st1, err := s.Pool.Get(s.ModelUUID1)
	c.Assert(err, jc.ErrorIsNil)
	w1 := state.GetInternalWorkers(st1)
	workertest.CheckAlive(c, w1)

	st2, err := s.Pool.Get(s.ModelUUID1)
	c.Assert(err, jc.ErrorIsNil)
	w2 := state.GetInternalWorkers(st2)
	workertest.CheckAlive(c, w2)

	// Now kill their workers.
	s.Pool.KillWorkers()

	// Ensure the internal workers for each State died.
	c.Check(workertest.CheckKilled(c, w1), jc.ErrorIsNil)
	c.Check(workertest.CheckKilled(c, w2), jc.ErrorIsNil)
}
Exemple #16
0
func (s *WorkerSuite) unlockCheck(c *gc.C, check func(c *gc.C)) {
	worker, lock := s.startWorker(c)
	defer workertest.CleanKill(c, worker)
	select {
	case <-time.After(coretesting.LongWait):
		c.Fatalf("discovery never completed")
	case <-lock.Unlocked():
		check(c)
	}
	workertest.CheckAlive(c, worker)
}
Exemple #17
0
func (s *FlagSuite) TestClaimSuccessesThenError(c *gc.C) {
	fix := newFixture(c)
	fix.Run(c, func(flag *singular.FlagWorker, clock *coretesting.Clock, unblock func()) {
		<-clock.Alarms()
		clock.Advance(time.Minute)
		<-clock.Alarms()
		clock.Advance(time.Minute)
		workertest.CheckAlive(c, flag)
	})
	fix.CheckClaims(c, 3)
}
func (s *UndertakerSuite) TestAlreadyDeadTimeRecordedWaits(c *gc.C) {
	halfTime := RIPTime / 2
	diedAt := time.Now().Add(-halfTime)
	s.fix.info.Result.Life = "dead"
	s.fix.info.Result.TimeOfDeath = &diedAt
	stub := s.fix.run(c, func(w worker.Worker, clock *coretesting.Clock) {
		waitAlarm(c, clock)
		clock.Advance(halfTime - time.Second)
		workertest.CheckAlive(c, w)
	})
	stub.CheckCallNames(c, "ModelInfo", "Destroy")
}
Exemple #19
0
func (*WorkerSuite) TestFailHandlingSecondActionSendAllResults(c *gc.C) {
	stub := &testing.Stub{}
	stub.SetErrors(nil, nil, nil, nil, nil, nil, nil, nil, errors.New("kryptonite"))
	worker, err := machineactions.NewMachineActionsWorker(defaultConfig(stub))
	c.Assert(err, jc.ErrorIsNil)
	workertest.CheckAlive(c, worker)
	workertest.CleanKill(c, worker)

	successfulCalls := getSuccessfulCalls(allCalls)
	successfulCalls[9].Args = []interface{}{secondActionTag, params.ActionFailed, "kryptonite"}
	stub.CheckCalls(c, successfulCalls)
}
func (s *suite) TestNeverRestartsFinishedWorker(c *gc.C) {
	s.runTest(c, func(w worker.Worker, backend *mockBackend) {
		backend.sendModelChange("uuid")
		workers := s.waitWorkers(c, 1)
		workers[0].tomb.Kill(nil)

		// even when we get a change for it
		backend.sendModelChange("uuid")
		workertest.CheckAlive(c, w)
		s.assertNoWorkers(c)
	})
}
Exemple #21
0
func (s *WorkerSuite) TestInitialRetryIsDelayed(c *gc.C) {
	// First start attempt fails.
	fix := NewFixture(errors.New("zap"))
	stub := fix.Run(c, func(context Context, worker *presence.Worker) {
		context.WaitAlarms(2)
		// Now we know the worker is waiting to start the next
		// pinger, advance *almost* far enough to trigger it.
		context.AdvanceClock(almostFiveSeconds)
		workertest.CheckAlive(c, worker)
	})
	stub.CheckCallNames(c, "Start")
}
func (s *UndertakerSuite) TestImmediateSuccess(c *gc.C) {
	stub := s.fix.run(c, func(w worker.Worker, clock *coretesting.Clock) {
		waitAlarm(c, clock)
		clock.Advance(RIPTime - time.Second)
		workertest.CheckAlive(c, w)
	})
	stub.CheckCallNames(c,
		"ModelInfo",
		"WatchModelResources",
		"ProcessDyingModel",
		"Destroy",
	)
}
Exemple #23
0
func (s *WorkerSuite) TestStoppedPingerRestartIsDelayed(c *gc.C) {
	fix := NewFixture()
	stub := fix.Run(c, func(context Context, worker *presence.Worker) {
		context.WaitPinger().Kill()
		context.WaitAlarms(2)
		// Now we know the first pinger has been stopped (no
		// error), and the worker is waiting to start the next
		// one, advance *almost* far enough to trigger it.
		context.AdvanceClock(almostFiveSeconds)
		workertest.CheckAlive(c, worker)
	})
	stub.CheckCallNames(c, "Start")
}
Exemple #24
0
func (s *WorkerSuite) TestManyRestarts(c *gc.C) {
	fix := NewFixture()
	stub := fix.Run(c, func(context Context, worker *presence.Worker) {
		context.WaitAlarms(1)
		for i := 0; i < 4; i++ {
			context.WaitPinger().Kill()
			context.WaitAlarms(1)
			context.AdvanceClock(fiveSeconds)
		}
		workertest.CheckAlive(c, worker)
	})
	stub.CheckCallNames(c, "Start", "Start", "Start", "Start", "Start")
}
Exemple #25
0
func (s *Suite) TestPreviouslyAbortedMigration(c *gc.C) {
	masterClient := newStubMasterClient(s.stub)
	masterClient.status.Phase = migration.ABORTDONE
	s.triggerMigration(masterClient)
	worker, err := migrationmaster.New(migrationmaster.Config{
		Facade: masterClient,
		Guard:  newStubGuard(s.stub),
	})
	c.Assert(err, jc.ErrorIsNil)
	workertest.CheckAlive(c, worker)
	workertest.CleanKill(c, worker)

	// No reliable way to test stub calls in this case unfortunately.
}
Exemple #26
0
func (s *WorkerSuite) TestFailedPingerRestartIsDelayed(c *gc.C) {
	// First start succeeds; pinger will die with error when killed.
	fix := NewFixture(nil, errors.New("zap"))
	stub := fix.Run(c, func(context Context, worker *presence.Worker) {
		context.WaitPinger().Kill()
		context.WaitAlarms(2)
		// Now we know the first pinger has been stopped, and
		// the worker is waiting to start the next one, advance
		// *almost* far enough to trigger it.
		context.AdvanceClock(almostFiveSeconds)
		workertest.CheckAlive(c, worker)
	})
	stub.CheckCallNames(c, "Start")
}
Exemple #27
0
func (s *Suite) TestNONE(c *gc.C) {
	s.client.watcher.changes <- watcher.MigrationStatus{
		Phase: migration.NONE,
	}
	w, err := migrationminion.New(migrationminion.Config{
		Facade: s.client,
		Guard:  s.guard,
		Agent:  s.agent,
	})
	c.Assert(err, jc.ErrorIsNil)

	workertest.CheckAlive(c, w)
	workertest.CleanKill(c, w)
	s.stub.CheckCallNames(c, "Watch", "Unlock")
}
Exemple #28
0
func (*WorkerSuite) TestResultNoRealChange(c *gc.C) {
	stub := &testing.Stub{}
	config := lifeflag.Config{
		Facade: newMockFacade(stub, life.Alive, life.Alive, life.Dying),
		Entity: testEntity,
		Result: life.IsNotDead,
	}
	worker, err := lifeflag.New(config)
	c.Check(err, jc.ErrorIsNil)
	c.Check(worker.Check(), jc.IsTrue)

	workertest.CheckAlive(c, worker)
	workertest.CleanKill(c, worker)
	checkCalls(c, stub, "Life", "Watch", "Life", "Life")
}
Exemple #29
0
func (s *SelfSuite) TestStress(c *gc.C) {
	s.fix.run(c, func(engine dependency.Engine) {

		// Repeatedly install a manifold inside itself.
		manifold := dependency.SelfManifold(engine)
		for i := 0; i < 100; i++ {
			go engine.Install(fmt.Sprintf("self-%d", i), manifold)
		}

		// Give it a moment to screw up if it's going to
		// (injudicious implementation could induce deadlock)
		// then let the fixture worry about a clean kill.
		workertest.CheckAlive(c, engine)
	})
}
Exemple #30
0
func (*DumbWorkersSuite) TestLeadershipManagerFails(c *gc.C) {
	fix := BasicFixture()
	fix.LW_errors = []error{errors.New("zap")}
	fix.RunDumb(c, func(ctx Context, dw *workers.DumbWorkers) {
		w := NextWorker(c, ctx.LWs())
		c.Assert(w, gc.NotNil)
		AssertWorker(c, dw.LeadershipManager(), w)

		w.Kill()
		workertest.CheckAlive(c, dw)
		AssertWorker(c, dw.LeadershipManager(), w)

		err := workertest.CheckKill(c, dw)
		c.Check(err, gc.ErrorMatches, "error stopping leadership lease manager: zap")
	})
}