func (s *DeploySuite) testExecuteConflictError(c *gc.C, newDeploy newDeploy) { callbacks := NewDeployCallbacks() deployer := &MockDeployer{ MockNotifyRevert: &MockNoArgs{}, MockNotifyResolved: &MockNoArgs{}, MockStage: &MockStage{}, MockDeploy: &MockNoArgs{err: charm.ErrConflict}, } factory := operation.NewFactory(operation.FactoryParams{ Deployer: deployer, Callbacks: callbacks, }) charmURL := curl("cs:quantal/nyancat-4") op, err := newDeploy(factory, charmURL) c.Assert(err, jc.ErrorIsNil) _, err = op.Prepare(operation.State{}) c.Assert(err, jc.ErrorIsNil) newState, err := op.Execute(operation.State{}) c.Check(newState, gc.IsNil) c.Check(err, gc.ErrorMatches, "cannot deploy charm cs:quantal/nyancat-4") errURL, ok := operation.DeployConflictCharmURL(err) c.Check(ok, jc.IsTrue) c.Check(errURL, gc.DeepEquals, charmURL) c.Check(deployer.MockDeploy.called, jc.IsTrue) }
func (u *Uniter) loop(unitTag names.UnitTag) (err error) { if err := u.init(unitTag); err != nil { if err == worker.ErrTerminateAgent { return err } return fmt.Errorf("failed to initialize uniter for %q: %v", unitTag, err) } logger.Infof("unit %q started", u.unit) // Start filtering state change events for consumption by modes. u.f, err = filter.NewFilter(u.st, unitTag) if err != nil { return err } u.addCleanup(u.f.Stop) // Stop the uniter if the filter fails. go func() { u.tomb.Kill(u.f.Wait()) }() // Start handling leader settings events, or not, as appropriate. u.f.WantLeaderSettingsEvents(!u.operationState().Leader) // Run modes until we encounter an error. mode := ModeContinue for err == nil { select { case <-u.tomb.Dying(): err = tomb.ErrDying default: mode, err = mode(u) switch cause := errors.Cause(err); cause { case operation.ErrNeedsReboot: err = worker.ErrRebootMachine case tomb.ErrDying, worker.ErrTerminateAgent: err = cause case operation.ErrHookFailed: mode, err = ModeHookError, nil default: charmURL, ok := operation.DeployConflictCharmURL(cause) if ok { mode, err = ModeConflicted(charmURL), nil } } } } logger.Infof("unit %q shutting down: %s", u.unit, err) return err }
func (u *Uniter) loop(unitTag names.UnitTag) (err error) { // Start tracking leadership state. // TODO(fwereade): ideally, this wouldn't be created here; as a worker it's // clearly better off being managed by a Runner. However, we haven't come up // with a clean way to reference one (lineage of a...) worker from another, // so for now the tracker is accessible only to its unit. leadershipTracker := leadership.NewTrackerWorker( unitTag, u.leadershipClaimer, leadershipGuarantee, ) u.addCleanup(func() error { return worker.Stop(leadershipTracker) }) u.leadershipTracker = leadershipTracker if err := u.init(unitTag); err != nil { if err == worker.ErrTerminateAgent { return err } return fmt.Errorf("failed to initialize uniter for %q: %v", unitTag, err) } logger.Infof("unit %q started", u.unit) // Start filtering state change events for consumption by modes. u.f, err = filter.NewFilter(u.st, unitTag) if err != nil { return err } u.addCleanup(u.f.Stop) // Stop the uniter if either of these components fails. go func() { u.tomb.Kill(leadershipTracker.Wait()) }() go func() { u.tomb.Kill(u.f.Wait()) }() // Start handling leader settings events, or not, as appropriate. u.f.WantLeaderSettingsEvents(!u.operationState().Leader) // Run modes until we encounter an error. mode := ModeContinue for err == nil { select { case <-u.tomb.Dying(): err = tomb.ErrDying default: mode, err = mode(u) switch cause := errors.Cause(err); cause { case operation.ErrNeedsReboot: err = worker.ErrRebootMachine case tomb.ErrDying, worker.ErrTerminateAgent: err = cause case operation.ErrHookFailed: mode, err = ModeHookError, nil default: charmURL, ok := operation.DeployConflictCharmURL(cause) if ok { mode, err = ModeConflicted(charmURL), nil } } } } logger.Infof("unit %q shutting down: %s", u.unit, err) return err }