func (s *TrackerSuite) TestWaitMinionNeverBecomeMinion(c *gc.C) { tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) ticket := tracker.WaitMinion() select { case <-time.After(refreshes(2)): case <-ticket.Ready(): c.Fatalf("got unexpected readiness: %v", ticket.Wait()) } s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }, { FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }, { FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }}) }
func (s *TrackerSuite) TestOnLeaderFailure(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil) tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check the ticket fails. assertClaimLeader(c, tracker, false) // Stop the tracker before trying to look at its mocks. assertStop(c, tracker) // Unblock the release goroutine, lest data races. s.unblockRelease(c) s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }, { FuncName: "BlockUntilLeadershipReleased", Args: []interface{}{ "led-service", }, }}) }
func (s *TrackerSuite) TestWaitLeaderNeverBecomeLeader(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil) tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check initial ticket fails. assertWaitLeader(c, tracker, false) // Get a new ticket and stop the tracker while it's pending. ticket := tracker.WaitLeader() assertStop(c, tracker) // Check the ticket got closed without sending true. assertTicket(c, ticket, false) assertTicket(c, ticket, false) // Unblock the release goroutine and stop the tracker before trying to // look at its stub. s.unblockRelease(c) s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }, { FuncName: "BlockUntilLeadershipReleased", Args: []interface{}{ "led-service", }, }}) }
func (s *TrackerSuite) TestFailGainLeadership(c *gc.C) { s.claimer.Stub.SetErrors( coreleadership.ErrClaimDenied, nil, coreleadership.ErrClaimDenied, nil, ) tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check initial ticket fails. assertClaimLeader(c, tracker, false) // Unblock the release goroutine... s.unblockRelease(c) // ...and, uh, voodoo sleep a bit, but not long enough to trigger a refresh... <-time.After(refreshes(0)) // ...then check the next ticket fails again. assertClaimLeader(c, tracker, false) // This time, sleep long enough that a refresh would trigger if it were // going to... <-time.After(refreshes(1)) // ...but it won't, because we Stop the tracker... assertStop(c, tracker) // ...and clear out the release goroutine before we look at the stub. s.unblockRelease(c) s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }, { FuncName: "BlockUntilLeadershipReleased", Args: []interface{}{ "led-service", }, }, { FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }, { FuncName: "BlockUntilLeadershipReleased", Args: []interface{}{ "led-service", }, }}) }
func (s *TrackerSuite) TestOnLeaderSuccess(c *gc.C) { tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check the ticket succeeds. assertClaimLeader(c, tracker, true) // Stop the tracker before trying to look at its stub. assertStop(c, tracker) s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }}) }
func (s *TrackerSuite) TestOnLeaderError(c *gc.C) { s.claimer.Stub.SetErrors(errors.New("pow")) tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) defer worker.Stop(tracker) // Check the ticket fails. assertClaimLeader(c, tracker, false) // Stop the tracker before trying to look at its mocks. err := worker.Stop(tracker) c.Check(err, gc.ErrorMatches, "leadership failure: pow") s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }}) }
func (s *TrackerSuite) TestWaitMinionAlreadyMinion(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil) tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check initial ticket is closed immediately. assertWaitLeader(c, tracker, false) // Stop the tracker before trying to look at its stub. assertStop(c, tracker) s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }, { FuncName: "BlockUntilLeadershipReleased", Args: []interface{}{ "led-service", }, }}) }
func (s *TrackerSuite) TestWaitMinionBecomeMinion(c *gc.C) { s.claimer.Stub.SetErrors(nil, coreleadership.ErrClaimDenied, nil) tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check the first ticket stays open. assertWaitMinion(c, tracker, false) // Wait long enough for a single refresh, to trigger ErrClaimDenied; then // check the next ticket is closed. <-time.After(refreshes(1)) assertWaitMinion(c, tracker, true) // Stop the tracker before trying to look at its stub. assertStop(c, tracker) // Unblock the release goroutine, lest data races. s.unblockRelease(c) s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }, { FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }, { FuncName: "BlockUntilLeadershipReleased", Args: []interface{}{ "led-service", }, }}) }
func (s *TrackerSuite) TestWaitLeaderBecomeLeader(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil, nil) tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check initial ticket fails. assertWaitLeader(c, tracker, false) // Unblock the release goroutine... s.unblockRelease(c) // ...and, uh, voodoo sleep a bit, but not long enough to trigger a refresh... <-time.After(refreshes(0)) // ...then check the next ticket succeeds. assertWaitLeader(c, tracker, true) // Stop the tracker before trying to look at its stub. assertStop(c, tracker) s.claimer.CheckCalls(c, []testing.StubCall{{ FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }, { FuncName: "BlockUntilLeadershipReleased", Args: []interface{}{ "led-service", }, }, { FuncName: "ClaimLeadership", Args: []interface{}{ "led-service", "led-service/123", leaseDuration, }, }}) }
func (s *TrackerSuite) TestServiceName(c *gc.C) { tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) c.Assert(tracker.ServiceName(), gc.Equals, "led-service") }
func (u *Uniter) loop(unitTag names.UnitTag) (err error) { // Start tracking leadership state. // TODO(fwereade): ideally, this wouldn't be created here; as a worker it's // clearly better off being managed by a Runner. However, we haven't come up // with a clean way to reference one (lineage of a...) worker from another, // so for now the tracker is accessible only to its unit. leadershipTracker := leadership.NewTrackerWorker( unitTag, u.leadershipClaimer, leadershipGuarantee, ) u.addCleanup(func() error { return worker.Stop(leadershipTracker) }) u.leadershipTracker = leadershipTracker if err := u.init(unitTag); err != nil { if err == worker.ErrTerminateAgent { return err } return fmt.Errorf("failed to initialize uniter for %q: %v", unitTag, err) } logger.Infof("unit %q started", u.unit) // Start filtering state change events for consumption by modes. u.f, err = filter.NewFilter(u.st, unitTag) if err != nil { return err } u.addCleanup(u.f.Stop) // Stop the uniter if either of these components fails. go func() { u.tomb.Kill(leadershipTracker.Wait()) }() go func() { u.tomb.Kill(u.f.Wait()) }() // Start handling leader settings events, or not, as appropriate. u.f.WantLeaderSettingsEvents(!u.operationState().Leader) // Run modes until we encounter an error. mode := ModeContinue for err == nil { select { case <-u.tomb.Dying(): err = tomb.ErrDying default: mode, err = mode(u) switch cause := errors.Cause(err); cause { case operation.ErrNeedsReboot: err = worker.ErrRebootMachine case tomb.ErrDying, worker.ErrTerminateAgent: err = cause case operation.ErrHookFailed: mode, err = ModeHookError, nil default: charmURL, ok := operation.DeployConflictCharmURL(cause) if ok { mode, err = ModeConflicted(charmURL), nil } } } } logger.Infof("unit %q shutting down: %s", u.unit, err) return err }