func (s *uniterLeadershipSuite) TestMergeLeadershipSettings(c *gc.C) { // First, the unit must be elected leader; otherwise merges will be denied. leaderClient := leadership.NewClient(s.apiState) err := leaderClient.ClaimLeadership(s.serviceId, s.unitId, 10*time.Second) c.Assert(err, jc.ErrorIsNil) client := uniter.NewState(s.apiState, names.NewUnitTag(s.unitId)) // Grab what settings exist. settings, err := client.LeadershipSettings.Read(s.serviceId) c.Assert(err, jc.ErrorIsNil) // Double check that it's empty so that we don't pass the test by // happenstance. c.Assert(settings, gc.HasLen, 0) // Toss a few settings in. settings["foo"] = "bar" settings["baz"] = "biz" err = client.LeadershipSettings.Merge(s.serviceId, settings) c.Assert(err, jc.ErrorIsNil) settings, err = client.LeadershipSettings.Read(s.serviceId) c.Assert(err, jc.ErrorIsNil) c.Check(settings["foo"], gc.Equals, "bar") c.Check(settings["baz"], gc.Equals, "biz") }
func (s *ClientSuite) TestClaimLeadershipTranslation(c *gc.C) { const claimTime = 5 * time.Hour numStubCalls := 0 apiCaller := s.apiCaller(c, func(request string, arg, result interface{}) error { numStubCalls++ c.Check(request, gc.Equals, "ClaimLeadership") c.Check(arg, jc.DeepEquals, params.ClaimLeadershipBulkParams{ Params: []params.ClaimLeadershipParams{{ ServiceTag: "service-stub-service", UnitTag: "unit-stub-unit-0", DurationSeconds: claimTime.Seconds(), }}, }) switch result := result.(type) { case *params.ClaimLeadershipBulkResults: result.Results = []params.ErrorResult{{}} default: c.Fatalf("bad result type: %T", result) } return nil }) client := leadership.NewClient(apiCaller) err := client.ClaimLeadership(StubServiceNm, StubUnitNm, claimTime) c.Check(err, jc.ErrorIsNil) c.Check(numStubCalls, gc.Equals, 1) }
func (s *leadershipSuite) TestUnblock(c *gc.C) { client := leadership.NewClient(s.apiState) err := client.ClaimLeadership(s.serviceId, s.unitId, 10*time.Second) c.Assert(err, jc.ErrorIsNil) unblocked := make(chan struct{}) go func() { err := client.BlockUntilLeadershipReleased(s.serviceId) c.Check(err, jc.ErrorIsNil) unblocked <- struct{}{} }() time.Sleep(coretesting.ShortWait) err = client.ReleaseLeadership(s.serviceId, s.unitId) c.Assert(err, jc.ErrorIsNil) select { case <-time.After(coretesting.LongWait): c.Errorf("Timed out waiting for leadership to release.") case <-unblocked: } }
func (s *leadershipSuite) TestClaimLeadership(c *gc.C) { client := leadership.NewClient(s.apiState) err := client.ClaimLeadership(s.serviceId, s.unitId, 10*time.Second) c.Assert(err, jc.ErrorIsNil) tokens, err := s.State.LeasePersistor.PersistedTokens() c.Assert(err, jc.ErrorIsNil) c.Assert(tokens, gc.HasLen, 1) c.Assert(tokens[0].Namespace, gc.Equals, "mysql-leadership") c.Assert(tokens[0].Id, gc.Equals, "mysql/0") unblocked := make(chan struct{}) go func() { err := client.BlockUntilLeadershipReleased(s.serviceId) c.Check(err, jc.ErrorIsNil) unblocked <- struct{}{} }() time.Sleep(coretesting.ShortWait) select { case <-time.After(15 * time.Second): c.Errorf("Timed out waiting for leadership to release.") case <-unblocked: } }
func (s *ClientSuite) TestBlockUntilLeadershipReleasedFacadeCallError(c *gc.C) { errMsg := "well, I just give up." numStubCalls := 0 apiCaller := s.apiCaller(c, func(_ string, _, _ interface{}) error { numStubCalls++ return errors.Errorf(errMsg) }) client := leadership.NewClient(apiCaller) err := client.BlockUntilLeadershipReleased(StubServiceNm) c.Check(numStubCalls, gc.Equals, 1) c.Check(err, gc.ErrorMatches, "error blocking on leadership release: "+errMsg) }
func (s *ClientSuite) TestClaimLeadershipFacadeCallError(c *gc.C) { errMsg := "well, I just give up." numStubCalls := 0 apiCaller := s.apiCaller(c, func(_ string, _, _ interface{}) error { numStubCalls++ return errors.Errorf(errMsg) }) client := leadership.NewClient(apiCaller) err := client.ClaimLeadership(StubServiceNm, StubUnitNm, 0) c.Check(numStubCalls, gc.Equals, 1) c.Check(err, gc.ErrorMatches, "error making a leadership claim: "+errMsg) }
func (s *leadershipSuite) TestReleaseLeadership(c *gc.C) { client := leadership.NewClient(s.apiState) err := client.ClaimLeadership(s.serviceId, s.unitId, 10*time.Second) c.Assert(err, jc.ErrorIsNil) err = client.ReleaseLeadership(s.serviceId, s.unitId) c.Assert(err, jc.ErrorIsNil) tokens, err := s.State.LeasePersistor.PersistedTokens() c.Assert(err, jc.ErrorIsNil) c.Assert(tokens, gc.HasLen, 0) }
func (s *ClientSuite) TestBlockUntilLeadershipReleasedError(c *gc.C) { numStubCalls := 0 apiCaller := s.apiCaller(c, func(_ string, _, result interface{}) error { numStubCalls++ switch result := result.(type) { case *params.ErrorResult: *result = params.ErrorResult{Error: ¶ms.Error{Message: "splat"}} default: c.Fatalf("bad result type: %T", result) } return nil }) client := leadership.NewClient(apiCaller) err := client.BlockUntilLeadershipReleased(StubServiceNm) c.Check(numStubCalls, gc.Equals, 1) c.Check(err, gc.ErrorMatches, "error blocking on leadership release: splat") }
func (s *ClientSuite) TestBlockUntilLeadershipReleasedTranslation(c *gc.C) { numStubCalls := 0 apiCaller := s.apiCaller(c, func(request string, arg, result interface{}) error { numStubCalls++ c.Check(request, gc.Equals, "BlockUntilLeadershipReleased") c.Check(arg, jc.DeepEquals, names.NewServiceTag(StubServiceNm)) switch result := result.(type) { case *params.ErrorResult: default: c.Fatalf("bad result type: %T", result) } return nil }) client := leadership.NewClient(apiCaller) err := client.BlockUntilLeadershipReleased(StubServiceNm) c.Check(numStubCalls, gc.Equals, 1) c.Check(err, jc.ErrorIsNil) }
func (s *ClientSuite) TestClaimLeadershipDeniedError(c *gc.C) { numStubCalls := 0 apiCaller := s.apiCaller(c, func(_ string, _, result interface{}) error { numStubCalls++ switch result := result.(type) { case *params.ClaimLeadershipBulkResults: result.Results = []params.ErrorResult{{Error: ¶ms.Error{ Message: "blah", Code: params.CodeLeadershipClaimDenied, }}} default: c.Fatalf("bad result type: %T", result) } return nil }) client := leadership.NewClient(apiCaller) err := client.ClaimLeadership(StubServiceNm, StubUnitNm, 0) c.Check(numStubCalls, gc.Equals, 1) c.Check(err, gc.Equals, coreleadership.ErrClaimDenied) }
func (s *ClientSuite) TestClaimLeadershipUnknownError(c *gc.C) { errMsg := "I'm trying!" numStubCalls := 0 apiCaller := s.apiCaller(c, func(_ string, _, result interface{}) error { numStubCalls++ switch result := result.(type) { case *params.ClaimLeadershipBulkResults: result.Results = []params.ErrorResult{{Error: ¶ms.Error{ Message: errMsg, }}} default: c.Fatalf("bad result type: %T", result) } return nil }) client := leadership.NewClient(apiCaller) err := client.ClaimLeadership(StubServiceNm, StubUnitNm, 0) c.Check(numStubCalls, gc.Equals, 1) c.Check(err, gc.ErrorMatches, errMsg) }
func (s *uniterLeadershipSuite) TestReadLeadershipSettings(c *gc.C) { // First, the unit must be elected leader; otherwise merges will be denied. leaderClient := leadership.NewClient(s.apiState) err := leaderClient.ClaimLeadership(s.serviceId, s.unitId, 10*time.Second) c.Assert(err, jc.ErrorIsNil) client := uniter.NewState(s.apiState, names.NewUnitTag(s.unitId)) // Toss a few settings in. desiredSettings := map[string]string{ "foo": "bar", "baz": "biz", } err = client.LeadershipSettings.Merge(s.serviceId, desiredSettings) c.Assert(err, jc.ErrorIsNil) settings, err := client.LeadershipSettings.Read(s.serviceId) c.Assert(err, jc.ErrorIsNil) c.Check(settings, gc.DeepEquals, desiredSettings) }
func (s *uniterLeadershipSuite) TestSettingsChangeNotifier(c *gc.C) { // First, the unit must be elected leader; otherwise merges will be denied. leaderClient := leadership.NewClient(s.apiState) err := leaderClient.ClaimLeadership(s.serviceId, s.unitId, 10*time.Second) c.Assert(err, jc.ErrorIsNil) client := uniter.NewState(s.apiState, names.NewUnitTag(s.unitId)) // Listen for changes watcher, err := client.LeadershipSettings.WatchLeadershipSettings(s.serviceId) c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertStop(c, watcher) leadershipC := statetesting.NewNotifyWatcherC(c, s.BackingState, watcher) // Inital event leadershipC.AssertOneChange() // Make some changes err = client.LeadershipSettings.Merge(s.serviceId, map[string]string{"foo": "bar"}) c.Assert(err, jc.ErrorIsNil) leadershipC.AssertOneChange() // And check that the changes were actually applied settings, err := client.LeadershipSettings.Read(s.serviceId) c.Assert(err, jc.ErrorIsNil) c.Check(settings["foo"], gc.Equals, "bar") // Make a couple of changes, and then check that they have been // coalesced into a single event err = client.LeadershipSettings.Merge(s.serviceId, map[string]string{"foo": "baz"}) c.Assert(err, jc.ErrorIsNil) err = client.LeadershipSettings.Merge(s.serviceId, map[string]string{"bing": "bong"}) c.Assert(err, jc.ErrorIsNil) leadershipC.AssertOneChange() }
return nil, err } return newManifoldWorker(agent, apiCaller, config.LeadershipGuarantee) } } // newManifoldWorker wraps NewTrackerWorker for the convenience of startFunc. It // exists primarily to be patched out via NewManifoldWorker for ease of testing, // and is not itself directly tested; once all NewTrackerWorker clients have been // replaced with manifolds, the tests can be tidied up a bit. var newManifoldWorker = func(agent agent.Agent, apiCaller base.APICaller, guarantee time.Duration) (worker.Worker, error) { tag := agent.CurrentConfig().Tag() unitTag, ok := tag.(names.UnitTag) if !ok { return nil, fmt.Errorf("expected a unit tag; got %q", tag) } claimer := leadership.NewClient(apiCaller) return NewTrackerWorker(unitTag, claimer, guarantee), nil } // outputFunc extracts the Tracker from a *tracker passed in as a Worker. func outputFunc(in worker.Worker, out interface{}) error { inWorker, _ := in.(*tracker) outPointer, _ := out.(*Tracker) if inWorker == nil || outPointer == nil { return errors.Errorf("expected %T->%T; got %T->%T", inWorker, outPointer, in, out) } *outPointer = inWorker return nil }
func (a *UnitAgent) APIWorkers() (_ worker.Worker, err error) { agentConfig := a.CurrentConfig() dataDir := agentConfig.DataDir() hookLock, err := cmdutil.HookExecutionLock(dataDir) if err != nil { return nil, err } st, entity, err := OpenAPIState(agentConfig, a) if err != nil { return nil, err } unitTag, err := names.ParseUnitTag(entity.Tag()) if err != nil { return nil, errors.Trace(err) } // Ensure that the environment uuid is stored in the agent config. // Luckily the API has it recorded for us after we connect. if agentConfig.Environment().Id() == "" { err := a.ChangeConfig(func(setter agent.ConfigSetter) error { environTag, err := st.EnvironTag() if err != nil { return errors.Annotate(err, "no environment uuid set on api") } return setter.Migrate(agent.MigrateParams{ Environment: environTag, }) }) if err != nil { logger.Warningf("unable to save environment uuid: %v", err) // Not really fatal, just annoying. } } defer func() { if err != nil { st.Close() reportClosedUnitAPI(st) } }() // Before starting any workers, ensure we record the Juju version this unit // agent is running. currentTools := &tools.Tools{Version: version.Current} apiStateUpgrader := a.getUpgrader(st) if err := apiStateUpgrader.SetVersion(agentConfig.Tag().String(), currentTools.Version); err != nil { return nil, errors.Annotate(err, "cannot set unit agent version") } runner := worker.NewRunner(cmdutil.ConnectionIsFatal(logger, st), cmdutil.MoreImportant) // start proxyupdater first to ensure proxy settings are correct runner.StartWorker("proxyupdater", func() (worker.Worker, error) { return proxyupdater.New(st.Environment(), false), nil }) runner.StartWorker("upgrader", func() (worker.Worker, error) { return upgrader.NewAgentUpgrader( st.Upgrader(), agentConfig, agentConfig.UpgradedToVersion(), func() bool { return false }, a.initialAgentUpgradeCheckComplete, ), nil }) runner.StartWorker("logger", func() (worker.Worker, error) { return workerlogger.NewLogger(st.Logger(), agentConfig), nil }) runner.StartWorker("uniter", func() (worker.Worker, error) { uniterFacade, err := st.Uniter() if err != nil { return nil, errors.Trace(err) } uniterParams := uniter.UniterParams{ uniterFacade, unitTag, leadership.NewClient(st), dataDir, hookLock, uniter.NewMetricsTimerChooser(), uniter.NewUpdateStatusTimer(), } return uniter.NewUniter(&uniterParams), nil }) runner.StartWorker("apiaddressupdater", func() (worker.Worker, error) { uniterFacade, err := st.Uniter() if err != nil { return nil, errors.Trace(err) } return apiaddressupdater.NewAPIAddressUpdater(uniterFacade, a), nil }) runner.StartWorker("rsyslog", func() (worker.Worker, error) { return cmdutil.NewRsyslogConfigWorker(st.Rsyslog(), agentConfig, rsyslog.RsyslogModeForwarding) }) return cmdutil.NewCloseWorker(logger, runner, st), nil }
if err := getResource(config.ApiCallerName, &apiCaller); err != nil { return nil, err } return newManifoldWorker(agent, apiCaller, config.LeadershipGuarantee) } } // newManifoldWorker wraps NewTrackerWorker for the convenience of startFunc. It // exists primarily to be patched out via NewManifoldWorker for ease of testing, // and is not itself directly tested; once all NewTrackerWorker clients have been // replaced with manifolds, the tests can be tidied up a bit. var newManifoldWorker = func(agent agent.Agent, apiCaller base.APICaller, guarantee time.Duration) (worker.Worker, error) { unitTag, ok := agent.Tag().(names.UnitTag) if !ok { return nil, fmt.Errorf("expected a unit tag; got %q", agent.Tag()) } leadershipManager := leadership.NewClient(apiCaller) return NewTrackerWorker(unitTag, leadershipManager, guarantee), nil } // outputFunc extracts the Tracker from a *tracker passed in as a Worker. func outputFunc(in worker.Worker, out interface{}) error { inWorker, _ := in.(*tracker) outPointer, _ := out.(*Tracker) if inWorker == nil || outPointer == nil { return errors.Errorf("expected %T->%T; got %T->%T", inWorker, outPointer, in, out) } *outPointer = inWorker return nil }