func (ts *taskRunnerSuite) TestExternalAbort(c *C) { sb := &stateBackend{} st := state.New(sb) r := state.NewTaskRunner(st) defer r.Stop() ch := make(chan bool) r.AddHandler("blocking", func(t *state.Task, tb *tomb.Tomb) error { ch <- true <-tb.Dying() return nil }, nil) st.Lock() chg := st.NewChange("install", "...") t := st.NewTask("blocking", "...") chg.AddTask(t) st.Unlock() r.Ensure() <-ch st.Lock() chg.Abort() st.Unlock() // The Abort above must make Ensure kill the task, or this will never end. ensureChange(c, r, sb, chg) }
func (ts *taskRunnerSuite) TestStopHandlerJustFinishing(c *C) { sb := &stateBackend{} st := state.New(sb) r := state.NewTaskRunner(st) defer r.Stop() ch := make(chan bool) r.AddHandler("just-finish", func(t *state.Task, tb *tomb.Tomb) error { ch <- true <-tb.Dying() // just ignore and actually finishes return nil }, nil) st.Lock() chg := st.NewChange("install", "...") t := st.NewTask("just-finish", "...") chg.AddTask(t) st.Unlock() r.Ensure() <-ch r.Stop() st.Lock() defer st.Unlock() c.Check(t.Status(), Equals, state.DoneStatus) }
func (ts *taskRunnerSuite) TestStopAskForRetry(c *C) { sb := &stateBackend{} st := state.New(sb) r := state.NewTaskRunner(st) defer r.Stop() ch := make(chan bool) r.AddHandler("ask-for-retry", func(t *state.Task, tb *tomb.Tomb) error { ch <- true <-tb.Dying() // ask for retry return &state.Retry{} }, nil) st.Lock() chg := st.NewChange("install", "...") t := st.NewTask("ask-for-retry", "...") chg.AddTask(t) st.Unlock() r.Ensure() <-ch r.Stop() st.Lock() defer st.Unlock() c.Check(t.Status(), Equals, state.DoingStatus) }
func newRunnerManager(s *state.State) *runnerManager { rm := &runnerManager{ runner: state.NewTaskRunner(s), } rm.runner.AddHandler("runMgr1", func(t *state.Task, _ *tomb.Tomb) error { s := t.State() s.Lock() defer s.Unlock() s.Set("runMgr1Mark", 1) return nil }, nil) rm.runner.AddHandler("runMgr2", func(t *state.Task, _ *tomb.Tomb) error { s := t.State() s.Lock() defer s.Unlock() s.Set("runMgr2Mark", 1) return nil }, nil) rm.runner.AddHandler("runMgrEnsureBefore", func(t *state.Task, _ *tomb.Tomb) error { s := t.State() s.Lock() defer s.Unlock() s.EnsureBefore(20 * time.Millisecond) return nil }, nil) return rm }
// Manager returns a new snap manager. func Manager(st *state.State) (*SnapManager, error) { runner := state.NewTaskRunner(st) m := &SnapManager{ state: st, backend: backend.Backend{}, runner: runner, } // this handler does nothing runner.AddHandler("nop", func(t *state.Task, _ *tomb.Tomb) error { return nil }, nil) // install/update related runner.AddHandler("prepare-snap", m.doPrepareSnap, m.undoPrepareSnap) runner.AddHandler("download-snap", m.doDownloadSnap, m.undoPrepareSnap) runner.AddHandler("mount-snap", m.doMountSnap, m.undoMountSnap) runner.AddHandler("unlink-current-snap", m.doUnlinkCurrentSnap, m.undoUnlinkCurrentSnap) runner.AddHandler("copy-snap-data", m.doCopySnapData, m.undoCopySnapData) runner.AddCleanup("copy-snap-data", m.cleanupCopySnapData) runner.AddHandler("link-snap", m.doLinkSnap, m.undoLinkSnap) runner.AddHandler("start-snap-services", m.startSnapServices, m.stopSnapServices) // FIXME: drop the task entirely after a while // (having this wart here avoids yet-another-patch) runner.AddHandler("cleanup", func(*state.Task, *tomb.Tomb) error { return nil }, nil) // remove related runner.AddHandler("stop-snap-services", m.stopSnapServices, m.startSnapServices) runner.AddHandler("unlink-snap", m.doUnlinkSnap, nil) runner.AddHandler("clear-snap", m.doClearSnapData, nil) runner.AddHandler("discard-snap", m.doDiscardSnap, nil) // alias related runner.AddHandler("alias", m.doAlias, m.undoAlias) runner.AddHandler("clear-aliases", m.doClearAliases, m.undoClearAliases) runner.AddHandler("set-auto-aliases", m.doSetAutoAliases, m.undoClearAliases) runner.AddHandler("setup-aliases", m.doSetupAliases, m.undoSetupAliases) runner.AddHandler("remove-aliases", m.doRemoveAliases, m.doSetupAliases) // control serialisation runner.SetBlocked(m.blockedTask) // test handlers runner.AddHandler("fake-install-snap", func(t *state.Task, _ *tomb.Tomb) error { return nil }, nil) runner.AddHandler("fake-install-snap-error", func(t *state.Task, _ *tomb.Tomb) error { return fmt.Errorf("fake-install-snap-error errored") }, nil) return m, nil }
// Manager returns a new HookManager. func Manager(s *state.State) (*HookManager, error) { runner := state.NewTaskRunner(s) manager := &HookManager{ state: s, runner: runner, repository: newRepository(), contexts: make(map[string]*Context), } runner.AddHandler("run-hook", manager.doRunHook, nil) return manager, nil }
// Manager returns a new snap manager. func Manager(s *state.State) (*SnapManager, error) { runner := state.NewTaskRunner(s) m := &SnapManager{ state: s, backend: backend.Backend{}, runner: runner, } // this handler does nothing runner.AddHandler("nop", func(t *state.Task, _ *tomb.Tomb) error { return nil }, nil) // install/update related runner.AddHandler("prepare-snap", m.doPrepareSnap, m.undoPrepareSnap) runner.AddHandler("download-snap", m.doDownloadSnap, m.undoPrepareSnap) runner.AddHandler("mount-snap", m.doMountSnap, m.undoMountSnap) runner.AddHandler("unlink-current-snap", m.doUnlinkCurrentSnap, m.undoUnlinkCurrentSnap) runner.AddHandler("copy-snap-data", m.doCopySnapData, m.undoCopySnapData) runner.AddCleanup("copy-snap-data", m.cleanupCopySnapData) runner.AddHandler("link-snap", m.doLinkSnap, m.undoLinkSnap) runner.AddHandler("start-snap-services", m.startSnapServices, m.stopSnapServices) // FIXME: drop the task entirely after a while // (having this wart here avoids yet-another-patch) runner.AddHandler("cleanup", func(*state.Task, *tomb.Tomb) error { return nil }, nil) // FIXME: port to native tasks and rename //runner.AddHandler("garbage-collect", m.doGarbageCollect, nil) // TODO: refresh-all needs logic at this level, to properly // handle the logic for that mode (e.g. skip snaps installed // with --devmode, set jailmode from snapstate). // remove related runner.AddHandler("stop-snap-services", m.stopSnapServices, m.startSnapServices) runner.AddHandler("unlink-snap", m.doUnlinkSnap, nil) runner.AddHandler("clear-snap", m.doClearSnapData, nil) runner.AddHandler("discard-snap", m.doDiscardSnap, nil) // test handlers runner.AddHandler("fake-install-snap", func(t *state.Task, _ *tomb.Tomb) error { return nil }, nil) runner.AddHandler("fake-install-snap-error", func(t *state.Task, _ *tomb.Tomb) error { return fmt.Errorf("fake-install-snap-error errored") }, nil) return m, nil }
func (ts *taskRunnerSuite) TestCleanup(c *C) { sb := &stateBackend{} st := state.New(sb) r := state.NewTaskRunner(st) defer r.Stop() r.AddHandler("clean-it", func(t *state.Task, tb *tomb.Tomb) error { return nil }, nil) r.AddHandler("other", func(t *state.Task, tb *tomb.Tomb) error { return nil }, nil) called := 0 r.AddCleanup("clean-it", func(t *state.Task, tb *tomb.Tomb) error { called++ if called == 1 { return fmt.Errorf("retry me") } return nil }) st.Lock() chg := st.NewChange("install", "...") t1 := st.NewTask("clean-it", "...") t2 := st.NewTask("other", "...") chg.AddTask(t1) chg.AddTask(t2) st.Unlock() chgIsClean := func() bool { st.Lock() defer st.Unlock() return chg.IsClean() } // Mark tasks as done. ensureChange(c, r, sb, chg) // First time it errors, then it works, then it's ignored. c.Assert(chgIsClean(), Equals, false) c.Assert(called, Equals, 0) r.Ensure() r.Wait() c.Assert(chgIsClean(), Equals, false) c.Assert(called, Equals, 1) r.Ensure() r.Wait() c.Assert(chgIsClean(), Equals, true) c.Assert(called, Equals, 2) r.Ensure() r.Wait() c.Assert(chgIsClean(), Equals, true) c.Assert(called, Equals, 2) }
func (ts *taskRunnerSuite) TestPrematureChangeReady(c *C) { sb := &stateBackend{} st := state.New(sb) r := state.NewTaskRunner(st) defer r.Stop() ch := make(chan bool) r.AddHandler("block-undo", func(t *state.Task, tb *tomb.Tomb) error { return nil }, func(t *state.Task, tb *tomb.Tomb) error { ch <- true <-ch return nil }) r.AddHandler("fail", func(t *state.Task, tb *tomb.Tomb) error { return errors.New("BAM") }, nil) st.Lock() chg := st.NewChange("install", "...") t1 := st.NewTask("block-undo", "...") t2 := st.NewTask("fail", "...") chg.AddTask(t1) chg.AddTask(t2) st.Unlock() r.Ensure() // Error r.Wait() r.Ensure() // Block on undo <-ch defer func() { ch <- true r.Wait() }() st.Lock() defer st.Unlock() if chg.Status().Ready() { c.Errorf("Change considered ready prematurely") } c.Assert(chg.Err(), IsNil) select { case <-chg.Ready(): c.Errorf("Change considered ready prematurely") default: } }
// Manager returns a new assertion manager. func Manager(s *state.State) (*AssertManager, error) { runner := state.NewTaskRunner(s) runner.AddHandler("validate-snap", doValidateSnap, nil) db, err := sysdb.Open() if err != nil { return nil, err } s.Lock() ReplaceDB(s, db) s.Unlock() return &AssertManager{runner: runner}, nil }
// Manager returns a new device manager. func Manager(s *state.State) (*DeviceManager, error) { runner := state.NewTaskRunner(s) keypairMgr, err := asserts.OpenFSKeypairManager(dirs.SnapDeviceDir) if err != nil { return nil, err } m := &DeviceManager{state: s, keypairMgr: keypairMgr, runner: runner} runner.AddHandler("generate-device-key", m.doGenerateDeviceKey, nil) runner.AddHandler("request-serial", m.doRequestSerial, nil) return m, nil }
// Manager returns a new device manager. func Manager(s *state.State, hookManager *hookstate.HookManager) (*DeviceManager, error) { runner := state.NewTaskRunner(s) keypairMgr, err := asserts.OpenFSKeypairManager(dirs.SnapDeviceDir) if err != nil { return nil, err } m := &DeviceManager{state: s, keypairMgr: keypairMgr, runner: runner} hookManager.Register(regexp.MustCompile("^prepare-device$"), newPrepareDeviceHandler) runner.AddHandler("generate-device-key", m.doGenerateDeviceKey, nil) runner.AddHandler("request-serial", m.doRequestSerial, nil) runner.AddHandler("mark-seeded", m.doMarkSeeded, nil) return m, nil }
// Manager returns a new InterfaceManager. // Extra interfaces can be provided for testing. func Manager(s *state.State, extra []interfaces.Interface) (*InterfaceManager, error) { runner := state.NewTaskRunner(s) m := &InterfaceManager{ state: s, runner: runner, repo: interfaces.NewRepository(), } if err := m.initialize(extra); err != nil { return nil, err } // interface tasks might touch more than the immediate task target snap, serialize them runner.SetBlocked(func(_ *state.Task, running []*state.Task) bool { return len(running) != 0 }) runner.AddHandler("connect", m.doConnect, nil) runner.AddHandler("disconnect", m.doDisconnect, nil) runner.AddHandler("setup-profiles", m.doSetupProfiles, m.doRemoveProfiles) runner.AddHandler("remove-profiles", m.doRemoveProfiles, m.doSetupProfiles) runner.AddHandler("discard-conns", m.doDiscardConns, m.undoDiscardConns) return m, nil }
func (ts *taskRunnerSuite) TestTaskSerialization(c *C) { ensureBeforeTick := make(chan bool, 1) sb := &stateBackend{ ensureBefore: time.Hour, ensureBeforeSeen: ensureBeforeTick, } st := state.New(sb) r := state.NewTaskRunner(st) defer r.Stop() ch1 := make(chan bool) ch2 := make(chan bool) r.AddHandler("do1", func(t *state.Task, _ *tomb.Tomb) error { ch1 <- true ch1 <- true return nil }, nil) r.AddHandler("do2", func(t *state.Task, _ *tomb.Tomb) error { ch2 <- true return nil }, nil) // start first do1, and then do2 when nothing else is running startedDo1 := false r.SetBlocked(func(t *state.Task, running []*state.Task) bool { if t.Kind() == "do2" && (len(running) != 0 || !startedDo1) { return true } if t.Kind() == "do1" { startedDo1 = true } return false }) st.Lock() chg := st.NewChange("install", "...") t1 := st.NewTask("do1", "...") chg.AddTask(t1) t2 := st.NewTask("do2", "...") chg.AddTask(t2) st.Unlock() r.Ensure() // will start only one, do1 select { case <-ch1: case <-time.After(2 * time.Second): c.Fatal("do1 wasn't called") } c.Check(ensureBeforeTick, HasLen, 0) c.Check(ch2, HasLen, 0) r.Ensure() // won't yet start anything new c.Check(ensureBeforeTick, HasLen, 0) c.Check(ch2, HasLen, 0) // finish do1 select { case <-ch1: case <-time.After(2 * time.Second): c.Fatal("do1 wasn't continued") } // getting an EnsureBefore 0 call select { case <-ensureBeforeTick: case <-time.After(2 * time.Second): c.Fatal("EnsureBefore wasn't called") } c.Check(sb.ensureBefore, Equals, time.Duration(0)) r.Ensure() // will start do2 select { case <-ch2: case <-time.After(2 * time.Second): c.Fatal("do2 wasn't called") } // no more EnsureBefore calls c.Check(ensureBeforeTick, HasLen, 0) }
func (ts *taskRunnerSuite) TestRetryAfterDuration(c *C) { ensureBeforeTick := make(chan bool, 1) sb := &stateBackend{ ensureBefore: time.Hour, ensureBeforeSeen: ensureBeforeTick, } st := state.New(sb) r := state.NewTaskRunner(st) defer r.Stop() ch := make(chan bool) ask := 0 r.AddHandler("ask-for-retry", func(t *state.Task, _ *tomb.Tomb) error { ask++ if ask == 1 { return &state.Retry{After: time.Minute} } ch <- true return nil }, nil) st.Lock() chg := st.NewChange("install", "...") t := st.NewTask("ask-for-retry", "...") chg.AddTask(t) st.Unlock() tock := time.Now() restore := state.MockTime(tock) defer restore() r.Ensure() // will run and be rescheduled in a minute select { case <-ensureBeforeTick: case <-time.After(2 * time.Second): c.Fatal("EnsureBefore wasn't called") } st.Lock() defer st.Unlock() c.Check(t.Status(), Equals, state.DoingStatus) c.Check(ask, Equals, 1) c.Check(sb.ensureBefore, Equals, 1*time.Minute) schedule := t.AtTime() c.Check(schedule.IsZero(), Equals, false) state.MockTime(tock.Add(5 * time.Second)) sb.ensureBefore = time.Hour st.Unlock() r.Ensure() // too soon st.Lock() c.Check(t.Status(), Equals, state.DoingStatus) c.Check(ask, Equals, 1) c.Check(sb.ensureBefore, Equals, 55*time.Second) c.Check(t.AtTime().Equal(schedule), Equals, true) state.MockTime(schedule) sb.ensureBefore = time.Hour st.Unlock() r.Ensure() // time to run again select { case <-ch: case <-time.After(2 * time.Second): c.Fatal("handler wasn't called") } // wait for handler to finish r.Wait() st.Lock() c.Check(t.Status(), Equals, state.DoneStatus) c.Check(ask, Equals, 2) c.Check(sb.ensureBefore, Equals, time.Hour) c.Check(t.AtTime().IsZero(), Equals, true) }
func (ts *taskRunnerSuite) TestAbortLanes(c *C) { names := strings.Fields("t11 t12 t21 t22 t31 t32 t41 t42") for _, test := range abortLanesTests { sb := &stateBackend{} st := state.New(sb) r := state.NewTaskRunner(st) defer r.Stop() st.Lock() defer st.Unlock() c.Assert(len(st.Tasks()), Equals, 0) chg := st.NewChange("install", "...") tasks := make(map[string]*state.Task) for _, name := range names { tasks[name] = st.NewTask("do", name) chg.AddTask(tasks[name]) } tasks["t12"].WaitFor(tasks["t11"]) tasks["t21"].WaitFor(tasks["t12"]) tasks["t22"].WaitFor(tasks["t21"]) tasks["t31"].WaitFor(tasks["t12"]) tasks["t32"].WaitFor(tasks["t31"]) tasks["t41"].WaitFor(tasks["t22"]) tasks["t41"].WaitFor(tasks["t32"]) tasks["t42"].WaitFor(tasks["t41"]) c.Logf("-----") c.Logf("Testing setup: %s", test.setup) statuses := make(map[string]state.Status) for s := state.DefaultStatus; s <= state.ErrorStatus; s++ { statuses[strings.ToLower(s.String())] = s } items := strings.Fields(test.setup) seen := make(map[string]bool) for i := 0; i < len(items); i++ { item := items[i] parts := strings.Split(item, ":") if parts[0] == "*" { for _, name := range names { if !seen[name] { parts[0] = name items = append(items, strings.Join(parts, ":")) } } continue } seen[parts[0]] = true task := tasks[parts[0]] task.SetStatus(statuses[parts[1]]) if len(parts) > 2 { lanes := strings.Split(parts[2], ",") for _, lane := range lanes { n, err := strconv.Atoi(lane) c.Assert(err, IsNil) task.JoinLane(n) } } } c.Logf("Aborting with: %v", test.abort) chg.AbortLanes(test.abort) c.Logf("Expected result: %s", test.result) seen = make(map[string]bool) var expected = strings.Fields(test.result) var obtained []string for i := 0; i < len(expected); i++ { item := expected[i] parts := strings.Split(item, ":") if parts[0] == "*" { var expanded []string for _, name := range names { if !seen[name] { parts[0] = name expanded = append(expanded, strings.Join(parts, ":")) } } expected = append(expected[:i], append(expanded, expected[i+1:]...)...) i-- continue } name := parts[0] seen[parts[0]] = true obtained = append(obtained, name+":"+strings.ToLower(tasks[name].Status().String())) } c.Assert(strings.Join(obtained, " "), Equals, strings.Join(expected, " "), Commentf("setup: %s", test.setup)) } }
func (ts *taskRunnerSuite) TestSequenceTests(c *C) { sb := &stateBackend{} st := state.New(sb) r := state.NewTaskRunner(st) defer r.Stop() ch := make(chan string, 256) fn := func(label string) state.HandlerFunc { return func(task *state.Task, tomb *tomb.Tomb) error { st.Lock() defer st.Unlock() ch <- task.Summary() + ":" + label var isSet bool if task.Get(label+"-block", &isSet) == nil && isSet { ch <- task.Summary() + ":" + label + "-block" st.Unlock() <-tomb.Dying() st.Lock() ch <- task.Summary() + ":" + label + "-unblock" } if task.Get(label+"-retry", &isSet) == nil && isSet { task.Set(label+"-retry", false) ch <- task.Summary() + ":" + label + "-retry" return &state.Retry{} } if task.Get(label+"-error", &isSet) == nil && isSet { ch <- task.Summary() + ":" + label + "-error" return errors.New("boom") } if task.Get(label+"-set-ready", &isSet) == nil && isSet { switch task.Status() { case state.DoingStatus: task.SetStatus(state.DoneStatus) case state.UndoingStatus: task.SetStatus(state.UndoneStatus) } } return nil } } r.AddHandler("do", fn("do"), nil) r.AddHandler("do-undo", fn("do"), fn("undo")) for _, test := range sequenceTests { st.Lock() // Delete previous changes. st.Prune(1, 1) chg := st.NewChange("install", "...") tasks := make(map[string]*state.Task) for _, name := range strings.Fields("t11 t12 t21 t31 t32") { if name == "t12" { tasks[name] = st.NewTask("do", name) } else { tasks[name] = st.NewTask("do-undo", name) } chg.AddTask(tasks[name]) } tasks["t21"].WaitFor(tasks["t11"]) tasks["t21"].WaitFor(tasks["t12"]) tasks["t31"].WaitFor(tasks["t21"]) tasks["t32"].WaitFor(tasks["t21"]) st.Unlock() c.Logf("-----") c.Logf("Testing setup: %s", test.setup) statuses := make(map[string]state.Status) for s := state.DefaultStatus; s <= state.ErrorStatus; s++ { statuses[strings.ToLower(s.String())] = s } // Reset and prepare initial task state. st.Lock() for _, t := range chg.Tasks() { t.SetStatus(state.DefaultStatus) t.Set("do-error", false) t.Set("do-block", false) t.Set("undo-error", false) t.Set("undo-block", false) } for _, item := range strings.Fields(test.setup) { if item == "chg:abort" { chg.Abort() continue } kv := strings.Split(item, ":") if strings.HasPrefix(kv[1], "was-") { tasks[kv[0]].SetStatus(statuses[kv[1][4:]]) } else { tasks[kv[0]].Set(kv[1], true) } } st.Unlock() // Run change until final. ensureChange(c, r, sb, chg) // Compute order of events observed. var events []string var done bool for !done { select { case ev := <-ch: events = append(events, ev) // Make t11/t12 and t31/t32 always show up in the // same order if they're next to each other. for i := len(events) - 2; i >= 0; i-- { prev := events[i] next := events[i+1] switch strings.Split(next, ":")[1] { case "do-unblock", "undo-unblock": default: if prev[1] == next[1] && prev[2] > next[2] { events[i], events[i+1] = next, prev continue } } break } default: done = true } } c.Logf("Expected result: %s", test.result) c.Assert(strings.Join(events, " "), Equals, test.result, Commentf("setup: %s", test.setup)) // Compute final expected status for tasks. finalStatus := make(map[string]state.Status) // ... default when no handler is called for tname := range tasks { finalStatus[tname] = state.HoldStatus } // ... overwrite based on relevant setup for _, item := range strings.Fields(test.setup) { if item == "chg:abort" && strings.Contains(test.setup, "t12:was-doing") { // t12 has no undo so must hold if asked to abort when was doing. finalStatus["t12"] = state.HoldStatus } kv := strings.Split(item, ":") if !strings.HasPrefix(kv[1], "was-") { continue } switch strings.TrimPrefix(kv[1], "was-") { case "do", "doing", "done": finalStatus[kv[0]] = state.DoneStatus case "abort", "undo", "undoing", "undone": if kv[0] == "t12" { finalStatus[kv[0]] = state.DoneStatus // no undo for t12 } else { finalStatus[kv[0]] = state.UndoneStatus } case "was-error": finalStatus[kv[0]] = state.ErrorStatus case "was-hold": finalStatus[kv[0]] = state.ErrorStatus } } // ... and overwrite based on events observed. for _, ev := range events { kv := strings.Split(ev, ":") switch kv[1] { case "do": finalStatus[kv[0]] = state.DoneStatus case "undo": finalStatus[kv[0]] = state.UndoneStatus case "do-error", "undo-error": finalStatus[kv[0]] = state.ErrorStatus case "do-retry": if kv[0] == "t12" && finalStatus["t11"] == state.ErrorStatus { // t12 has no undo so must hold if asked to abort on retry. finalStatus["t12"] = state.HoldStatus } } } st.Lock() var gotStatus, wantStatus []string for _, task := range chg.Tasks() { gotStatus = append(gotStatus, task.Summary()+":"+task.Status().String()) wantStatus = append(wantStatus, task.Summary()+":"+finalStatus[task.Summary()].String()) } st.Unlock() c.Logf("Expected statuses: %s", strings.Join(wantStatus, " ")) comment := Commentf("calls: %s", test.result) c.Assert(strings.Join(gotStatus, " "), Equals, strings.Join(wantStatus, " "), comment) } }