func (s *resolverSuite) SetUpTest(c *gc.C) { s.stub = testing.Stub{} s.charmURL = charm.MustParseURL("cs:precise/mysql-2") s.remoteState = remotestate.Snapshot{ CharmModifiedVersion: s.charmModifiedVersion, CharmURL: s.charmURL, } s.opFactory = operation.NewFactory(operation.FactoryParams{}) attachments, err := storage.NewAttachments(&dummyStorageAccessor{}, names.NewUnitTag("u/0"), c.MkDir(), nil) c.Assert(err, jc.ErrorIsNil) s.clearResolved = func() error { return errors.New("unexpected resolved") } s.reportHookError = func(hook.Info) error { return errors.New("unexpected report hook error") } s.resolverConfig = uniter.ResolverConfig{ ClearResolved: func() error { return s.clearResolved() }, ReportHookError: func(info hook.Info) error { return s.reportHookError(info) }, StartRetryHookTimer: func() { s.stub.AddCall("StartRetryHookTimer") }, StopRetryHookTimer: func() { s.stub.AddCall("StopRetryHookTimer") }, ShouldRetryHooks: true, Leadership: leadership.NewResolver(), Actions: uniteractions.NewResolver(), Relations: relation.NewRelationsResolver(&dummyRelations{}), Storage: storage.NewResolver(attachments), Commands: nopResolver{}, } s.resolver = uniter.NewUniterResolver(s.resolverConfig) }
func (s *attachmentsSuite) TestAttachmentsUpdateShortCircuitDeath(c *gc.C) { stateDir := c.MkDir() unitTag := names.NewUnitTag("mysql/0") abort := make(chan struct{}) storageTag0 := names.NewStorageTag("data/0") storageTag1 := names.NewStorageTag("data/1") removed := set.NewTags() st := &mockStorageAccessor{ unitStorageAttachments: func(u names.UnitTag) ([]params.StorageAttachmentId, error) { return nil, nil }, remove: func(s names.StorageTag, u names.UnitTag) error { c.Assert(u, gc.Equals, unitTag) removed.Add(s) return nil }, } att, err := storage.NewAttachments(st, unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) r := storage.NewResolver(att) // First make sure we create a storage-attached hook operation for // data/0. We do this to show that until the hook is *committed*, // we will still short-circuit removal. localState := resolver.LocalState{State: operation.State{ Kind: operation.Continue, }} _, err = r.NextOp(localState, remotestate.Snapshot{ Life: params.Alive, Storage: map[names.StorageTag]remotestate.StorageSnapshot{ storageTag0: { Life: params.Alive, Kind: params.StorageKindBlock, Location: "/dev/sdb", Attached: true, }, }, }, &mockOperations{}) c.Assert(err, jc.ErrorIsNil) for _, storageTag := range []names.StorageTag{storageTag0, storageTag1} { _, err = r.NextOp(localState, remotestate.Snapshot{ Life: params.Alive, Storage: map[names.StorageTag]remotestate.StorageSnapshot{ storageTag: {Life: params.Dying}, }, }, nil) c.Assert(err, gc.Equals, resolver.ErrNoOperation) } c.Assert(removed.SortedValues(), jc.DeepEquals, []names.Tag{ storageTag0, storageTag1, }) }
func (s *attachmentsSuite) TestAttachmentsStorage(c *gc.C) { stateDir := c.MkDir() unitTag := names.NewUnitTag("mysql/0") abort := make(chan struct{}) st := &mockStorageAccessor{ unitStorageAttachments: func(u names.UnitTag) ([]params.StorageAttachmentId, error) { return nil, nil }, } att, err := storage.NewAttachments(st, unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) r := storage.NewResolver(att) storageTag := names.NewStorageTag("data/0") _, err = att.Storage(storageTag) c.Assert(err, jc.Satisfies, errors.IsNotFound) assertStorageTags(c, att) // Inform the resolver of an attachment. localState := resolver.LocalState{State: operation.State{ Kind: operation.Continue, }} op, err := r.NextOp(localState, remotestate.Snapshot{ Life: params.Alive, Storage: map[names.StorageTag]remotestate.StorageSnapshot{ storageTag: { Kind: params.StorageKindBlock, Life: params.Alive, Location: "/dev/sdb", Attached: true, }, }, }, &mockOperations{}) c.Assert(err, jc.ErrorIsNil) c.Assert(op.String(), gc.Equals, "run hook storage-attached") assertStorageTags(c, att, storageTag) ctx, err := att.Storage(storageTag) c.Assert(err, jc.ErrorIsNil) c.Assert(ctx, gc.NotNil) c.Assert(ctx.Tag(), gc.Equals, storageTag) c.Assert(ctx.Kind(), gc.Equals, corestorage.StorageKindBlock) c.Assert(ctx.Location(), gc.Equals, "/dev/sdb") }
func (s *attachmentsSuite) TestAttachmentsWaitPending(c *gc.C) { stateDir := c.MkDir() unitTag := names.NewUnitTag("mysql/0") abort := make(chan struct{}) storageTag := names.NewStorageTag("data/0") st := &mockStorageAccessor{ unitStorageAttachments: func(u names.UnitTag) ([]params.StorageAttachmentId, error) { return nil, nil }, } att, err := storage.NewAttachments(st, unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) r := storage.NewResolver(att) nextOp := func(installed bool) error { localState := resolver.LocalState{State: operation.State{ Installed: installed, Kind: operation.Continue, }} _, err := r.NextOp(localState, remotestate.Snapshot{ Life: params.Alive, Storage: map[names.StorageTag]remotestate.StorageSnapshot{ storageTag: { Life: params.Alive, Attached: false, }, }, }, &mockOperations{}) return err } // Inform the resolver of a new, unprovisioned storage attachment. // Before install, we should wait for its completion; after install, // we should not. err = nextOp(false /* workload not installed */) c.Assert(att.Pending(), gc.Equals, 1) c.Assert(err, gc.Equals, resolver.ErrWaiting) err = nextOp(true /* workload installed */) c.Assert(err, gc.Equals, resolver.ErrNoOperation) }
func (s *resolverSuite) SetUpTest(c *gc.C) { s.charmURL = charm.MustParseURL("cs:precise/mysql-2") s.remoteState = remotestate.Snapshot{ CharmURL: s.charmURL, } s.opFactory = operation.NewFactory(operation.FactoryParams{}) attachments, err := storage.NewAttachments(&dummyStorageAccessor{}, names.NewUnitTag("u/0"), c.MkDir(), nil) c.Assert(err, jc.ErrorIsNil) s.resolver = uniter.NewUniterResolver( func() error { return errors.New("unexpected resolved") }, func(_ hook.Info) error { return errors.New("unexpected report hook error") }, func() error { return nil }, uniteractions.NewResolver(), leadership.NewResolver(), relation.NewRelationsResolver(&dummyRelations{}), storage.NewResolver(attachments), ) }
func (u *Uniter) loop(unitTag names.UnitTag) (err error) { if err := u.init(unitTag); err != nil { if err == worker.ErrTerminateAgent { return err } return fmt.Errorf("failed to initialize uniter for %q: %v", unitTag, err) } logger.Infof("unit %q started", u.unit) // Install is a special case, as it must run before there // is any remote state, and before the remote state watcher // is started. var charmURL *corecharm.URL var charmModifiedVersion int opState := u.operationExecutor.State() if opState.Kind == operation.Install { logger.Infof("resuming charm install") op, err := u.operationFactory.NewInstall(opState.CharmURL) if err != nil { return errors.Trace(err) } if err := u.operationExecutor.Run(op); err != nil { return errors.Trace(err) } charmURL = opState.CharmURL } else { curl, err := u.unit.CharmURL() if err != nil { return errors.Trace(err) } charmURL = curl svc, err := u.unit.Service() if err != nil { return errors.Trace(err) } charmModifiedVersion, err = svc.CharmModifiedVersion() if err != nil { return errors.Trace(err) } } var ( watcher *remotestate.RemoteStateWatcher watcherMu sync.Mutex ) logger.Infof("hooks are retried %v", u.hookRetryStrategy.ShouldRetry) retryHookChan := make(chan struct{}, 1) retryHookTimer := utils.NewBackoffTimer(utils.BackoffTimerConfig{ Min: u.hookRetryStrategy.MinRetryTime, Max: u.hookRetryStrategy.MaxRetryTime, Jitter: u.hookRetryStrategy.JitterRetryTime, Factor: u.hookRetryStrategy.RetryTimeFactor, Func: func() { // Don't try to send on the channel if it's already full // This can happen if the timer fires off before the event is consumed // by the resolver loop select { case retryHookChan <- struct{}{}: default: } }, Clock: u.clock, }) defer func() { // Whenever we exit the uniter we want to stop a potentially // running timer so it doesn't trigger for nothing. retryHookTimer.Reset() }() restartWatcher := func() error { watcherMu.Lock() defer watcherMu.Unlock() if watcher != nil { // watcher added to catacomb, will kill uniter if there's an error. worker.Stop(watcher) } var err error watcher, err = remotestate.NewWatcher( remotestate.WatcherConfig{ State: remotestate.NewAPIState(u.st), LeadershipTracker: u.leadershipTracker, UnitTag: unitTag, UpdateStatusChannel: u.updateStatusAt, CommandChannel: u.commandChannel, RetryHookChannel: retryHookChan, }) if err != nil { return errors.Trace(err) } if err := u.catacomb.Add(watcher); err != nil { return errors.Trace(err) } return nil } onIdle := func() error { opState := u.operationExecutor.State() if opState.Kind != operation.Continue { // We should only set idle status if we're in // the "Continue" state, which indicates that // there is nothing to do and we're not in an // error state. return nil } return setAgentStatus(u, status.StatusIdle, "", nil) } clearResolved := func() error { if err := u.unit.ClearResolved(); err != nil { return errors.Trace(err) } watcher.ClearResolvedMode() return nil } for { if err = restartWatcher(); err != nil { err = errors.Annotate(err, "(re)starting watcher") break } uniterResolver := NewUniterResolver(ResolverConfig{ ClearResolved: clearResolved, ReportHookError: u.reportHookError, FixDeployer: u.deployer.Fix, ShouldRetryHooks: u.hookRetryStrategy.ShouldRetry, StartRetryHookTimer: retryHookTimer.Start, StopRetryHookTimer: retryHookTimer.Reset, Actions: actions.NewResolver(), Leadership: uniterleadership.NewResolver(), Relations: relation.NewRelationsResolver(u.relations), Storage: storage.NewResolver(u.storage), Commands: runcommands.NewCommandsResolver( u.commands, watcher.CommandCompleted, ), }) // We should not do anything until there has been a change // to the remote state. The watcher will trigger at least // once initially. select { case <-u.catacomb.Dying(): return u.catacomb.ErrDying() case <-watcher.RemoteStateChanged(): } localState := resolver.LocalState{ CharmURL: charmURL, CharmModifiedVersion: charmModifiedVersion, } for err == nil { err = resolver.Loop(resolver.LoopConfig{ Resolver: uniterResolver, Watcher: watcher, Executor: u.operationExecutor, Factory: u.operationFactory, Abort: u.catacomb.Dying(), OnIdle: onIdle, CharmDirGuard: u.charmDirGuard, }, &localState) switch cause := errors.Cause(err); cause { case nil: // Loop back around. case resolver.ErrLoopAborted: err = u.catacomb.ErrDying() case operation.ErrNeedsReboot: err = worker.ErrRebootMachine case operation.ErrHookFailed: // Loop back around. The resolver can tell that it is in // an error state by inspecting the operation state. err = nil case resolver.ErrTerminate: err = u.terminate() case resolver.ErrRestart: // make sure we update the two values used above in // creating LocalState. charmURL = localState.CharmURL charmModifiedVersion = localState.CharmModifiedVersion // leave err assigned, causing loop to break default: // We need to set conflicted from here, because error // handling is outside of the resolver's control. if operation.IsDeployConflictError(cause) { localState.Conflicted = true err = setAgentStatus(u, status.StatusError, "upgrade failed", nil) } else { reportAgentError(u, "resolver loop error", err) } } } if errors.Cause(err) != resolver.ErrRestart { break } } logger.Infof("unit %q shutting down: %s", u.unit, err) return err }
func (s *attachmentsSuite) TestAttachmentsSetDying(c *gc.C) { stateDir := c.MkDir() unitTag := names.NewUnitTag("mysql/0") abort := make(chan struct{}) storageTag := names.NewStorageTag("data/0") var destroyed, removed bool st := &mockStorageAccessor{ unitStorageAttachments: func(u names.UnitTag) ([]params.StorageAttachmentId, error) { c.Assert(u, gc.Equals, unitTag) return []params.StorageAttachmentId{{ StorageTag: storageTag.String(), UnitTag: unitTag.String(), }}, nil }, storageAttachment: func(s names.StorageTag, u names.UnitTag) (params.StorageAttachment, error) { c.Assert(u, gc.Equals, unitTag) c.Assert(s, gc.Equals, storageTag) return params.StorageAttachment{}, ¶ms.Error{ Message: "not provisioned", Code: params.CodeNotProvisioned, } }, destroyUnitStorageAttachments: func(u names.UnitTag) error { c.Assert(u, gc.Equals, unitTag) destroyed = true return nil }, remove: func(s names.StorageTag, u names.UnitTag) error { c.Assert(removed, jc.IsFalse) c.Assert(s, gc.Equals, storageTag) c.Assert(u, gc.Equals, unitTag) removed = true return nil }, } att, err := storage.NewAttachments(st, unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) c.Assert(att.Pending(), gc.Equals, 1) r := storage.NewResolver(att) // Inform the resolver that the unit is Dying. The storage is still // Alive, and is now provisioned, but will be destroyed and removed // by the resolver. localState := resolver.LocalState{State: operation.State{ Kind: operation.Continue, }} _, err = r.NextOp(localState, remotestate.Snapshot{ Life: params.Dying, Storage: map[names.StorageTag]remotestate.StorageSnapshot{ storageTag: { Kind: params.StorageKindBlock, Life: params.Alive, Location: "/dev/sdb", Attached: true, }, }, }, &mockOperations{}) c.Assert(err, gc.Equals, resolver.ErrNoOperation) c.Assert(destroyed, jc.IsTrue) c.Assert(att.Pending(), gc.Equals, 0) c.Assert(removed, jc.IsTrue) }
func (s *attachmentsSuite) TestAttachmentsCommitHook(c *gc.C) { stateDir := c.MkDir() unitTag := names.NewUnitTag("mysql/0") abort := make(chan struct{}) var removed bool storageTag := names.NewStorageTag("data/0") st := &mockStorageAccessor{ unitStorageAttachments: func(u names.UnitTag) ([]params.StorageAttachmentId, error) { return nil, nil }, remove: func(s names.StorageTag, u names.UnitTag) error { removed = true c.Assert(s, gc.Equals, storageTag) return nil }, } att, err := storage.NewAttachments(st, unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) r := storage.NewResolver(att) // Inform the resolver of an attachment. localState := resolver.LocalState{State: operation.State{ Kind: operation.Continue, }} _, err = r.NextOp(localState, remotestate.Snapshot{ Life: params.Alive, Storage: map[names.StorageTag]remotestate.StorageSnapshot{ storageTag: { Kind: params.StorageKindBlock, Life: params.Alive, Location: "/dev/sdb", Attached: true, }, }, }, &mockOperations{}) c.Assert(err, jc.ErrorIsNil) c.Assert(att.Pending(), gc.Equals, 1) // No file exists until storage-attached is committed. stateFile := filepath.Join(stateDir, "data-0") c.Assert(stateFile, jc.DoesNotExist) err = att.CommitHook(hook.Info{ Kind: hooks.StorageAttached, StorageId: storageTag.Id(), }) c.Assert(err, jc.ErrorIsNil) data, err := ioutil.ReadFile(stateFile) c.Assert(err, jc.ErrorIsNil) c.Assert(string(data), gc.Equals, "attached: true\n") c.Assert(att.Pending(), gc.Equals, 0) c.Assert(removed, jc.IsFalse) err = att.CommitHook(hook.Info{ Kind: hooks.StorageDetaching, StorageId: storageTag.Id(), }) c.Assert(err, jc.ErrorIsNil) c.Assert(stateFile, jc.DoesNotExist) c.Assert(removed, jc.IsTrue) }
func (u *Uniter) loop(unitTag names.UnitTag) (err error) { if err := u.init(unitTag); err != nil { if err == worker.ErrTerminateAgent { return err } return fmt.Errorf("failed to initialize uniter for %q: %v", unitTag, err) } logger.Infof("unit %q started", u.unit) // Install is a special case, as it must run before there // is any remote state, and before the remote state watcher // is started. var charmURL *corecharm.URL opState := u.operationExecutor.State() if opState.Kind == operation.Install { logger.Infof("resuming charm install") op, err := u.operationFactory.NewInstall(opState.CharmURL) if err != nil { return errors.Trace(err) } if err := u.operationExecutor.Run(op); err != nil { return errors.Trace(err) } charmURL = opState.CharmURL } else { curl, err := u.unit.CharmURL() if err != nil { return errors.Trace(err) } charmURL = curl } var ( watcher *remotestate.RemoteStateWatcher watcherMu sync.Mutex ) restartWatcher := func() error { watcherMu.Lock() defer watcherMu.Unlock() if watcher != nil { if err := watcher.Stop(); err != nil { return errors.Trace(err) } } var err error watcher, err = remotestate.NewWatcher( remotestate.WatcherConfig{ State: remotestate.NewAPIState(u.st), LeadershipTracker: u.leadershipTracker, UnitTag: unitTag, UpdateStatusChannel: u.updateStatusAt, }) if err != nil { return errors.Trace(err) } // Stop the uniter if the watcher fails. The watcher may be // stopped cleanly, so only kill the tomb if the error is // non-nil. go func(w *remotestate.RemoteStateWatcher) { if err := w.Wait(); err != nil { u.tomb.Kill(err) } }(watcher) return nil } // watcher may be replaced, so use a closure. u.addCleanup(func() error { watcherMu.Lock() defer watcherMu.Unlock() if watcher != nil { return watcher.Stop() } return nil }) onIdle := func() error { opState := u.operationExecutor.State() if opState.Kind != operation.Continue { // We should only set idle status if we're in // the "Continue" state, which indicates that // there is nothing to do and we're not in an // error state. return nil } return setAgentStatus(u, params.StatusIdle, "", nil) } clearResolved := func() error { if err := u.unit.ClearResolved(); err != nil { return errors.Trace(err) } watcher.ClearResolvedMode() return nil } for { if err = restartWatcher(); err != nil { err = errors.Annotate(err, "(re)starting watcher") break } uniterResolver := &uniterResolver{ clearResolved: clearResolved, reportHookError: u.reportHookError, fixDeployer: u.deployer.Fix, actionsResolver: actions.NewResolver(), leadershipResolver: uniterleadership.NewResolver(), relationsResolver: relation.NewRelationsResolver(u.relations), storageResolver: storage.NewResolver(u.storage), } // We should not do anything until there has been a change // to the remote state. The watcher will trigger at least // once initially. select { case <-u.tomb.Dying(): return tomb.ErrDying case <-watcher.RemoteStateChanged(): } localState := resolver.LocalState{CharmURL: charmURL} for err == nil { err = resolver.Loop(resolver.LoopConfig{ Resolver: uniterResolver, Watcher: watcher, Executor: u.operationExecutor, Factory: u.operationFactory, Dying: u.tomb.Dying(), OnIdle: onIdle, CharmDirLocker: u.charmDirLocker, }, &localState) switch cause := errors.Cause(err); cause { case nil: // Loop back around. case tomb.ErrDying: err = tomb.ErrDying case operation.ErrNeedsReboot: err = worker.ErrRebootMachine case operation.ErrHookFailed: // Loop back around. The resolver can tell that it is in // an error state by inspecting the operation state. err = nil case resolver.ErrTerminate: err = u.terminate() case resolver.ErrRestart: charmURL = localState.CharmURL // leave err assigned, causing loop to break default: // We need to set conflicted from here, because error // handling is outside of the resolver's control. if operation.IsDeployConflictError(cause) { localState.Conflicted = true err = setAgentStatus(u, params.StatusError, "upgrade failed", nil) } else { reportAgentError(u, "resolver loop error", err) } } } if errors.Cause(err) != resolver.ErrRestart { break } } logger.Infof("unit %q shutting down: %s", u.unit, err) return err }
func (s *attachmentsSuite) TestAttachmentsStorage(c *gc.C) { stateDir := c.MkDir() unitTag := names.NewUnitTag("mysql/0") abort := make(chan struct{}) storageTag := names.NewStorageTag("data/0") attachment := params.StorageAttachment{ StorageTag: storageTag.String(), UnitTag: unitTag.String(), Life: params.Alive, Kind: params.StorageKindBlock, Location: "/dev/sdb", } st := &mockStorageAccessor{ unitStorageAttachments: func(u names.UnitTag) ([]params.StorageAttachmentId, error) { c.Assert(u, gc.Equals, unitTag) return nil, nil }, storageAttachment: func(s names.StorageTag, u names.UnitTag) (params.StorageAttachment, error) { c.Assert(s, gc.Equals, storageTag) return attachment, nil }, } att, err := storage.NewAttachments(st, unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) // There should be no context for data/0 until a required remote state change occurs. _, ok := att.Storage(storageTag) c.Assert(ok, jc.Satisfies, errors.IsNotFound) assertStorageTags(c, att) err = att.UpdateStorage([]names.StorageTag{storageTag}) c.Assert(err, jc.ErrorIsNil) assertStorageTags(c, att, storageTag) storageResolver := storage.NewResolver(att) storage.SetStorageLife(storageResolver, map[names.StorageTag]params.Life{ storageTag: params.Alive, }) localState := resolver.LocalState{ State: operation.State{ Kind: operation.Continue, }, } remoteState := remotestate.Snapshot{ Storage: map[names.StorageTag]remotestate.StorageSnapshot{ storageTag: remotestate.StorageSnapshot{ Kind: params.StorageKindBlock, Life: params.Alive, Location: "/dev/sdb", Attached: true, }, }, } op, err := storageResolver.NextOp(localState, remoteState, &mockOperations{}) c.Assert(err, jc.ErrorIsNil) c.Assert(op.String(), gc.Equals, "run hook storage-attached") ctx, err := att.Storage(storageTag) c.Assert(err, jc.ErrorIsNil) c.Assert(ctx, gc.NotNil) c.Assert(ctx.Tag(), gc.Equals, storageTag) c.Assert(ctx.Kind(), gc.Equals, corestorage.StorageKindBlock) c.Assert(ctx.Location(), gc.Equals, "/dev/sdb") }