func (s *storageProvisionerSuite) TestSetVolumeInfoErrorResultDoesNotStopWorker(c *gc.C) { volumeAccessor := newMockVolumeAccessor() volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { return []params.ErrorResult{{Error: ¶ms.Error{Message: "message", Code: "code"}}}, nil } args := &workerArgs{volumes: volumeAccessor} worker := newStorageProvisioner(c, args) defer func() { err := worker.Wait() c.Assert(err, jc.ErrorIsNil) }() defer worker.Kill() done := make(chan interface{}) go func() { defer close(done) worker.Wait() }() args.volumes.volumesWatcher.changes <- []string{"1"} args.environ.watcher.changes <- struct{}{} assertNoEvent(c, done, "worker exited") }
func (s *storageProvisionerSuite) TestUpdateEnvironConfig(c *gc.C) { volumeAccessor := newMockVolumeAccessor() volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") s.provider.volumeSourceFunc = func(envConfig *config.Config, sourceConfig *storage.Config) (storage.VolumeSource, error) { c.Assert(envConfig, gc.NotNil) c.Assert(sourceConfig, gc.NotNil) c.Assert(envConfig.AllAttrs()["foo"], gc.Equals, "bar") return nil, errors.New("zinga") } args := &workerArgs{volumes: volumeAccessor} worker := newStorageProvisioner(c, args) defer worker.Wait() defer worker.Kill() newConfig, err := args.environ.cfg.Apply(map[string]interface{}{"foo": "bar"}) c.Assert(err, jc.ErrorIsNil) args.environ.watcher.changes <- struct{}{} args.environ.setConfig(newConfig) args.environ.watcher.changes <- struct{}{} args.volumes.volumesWatcher.changes <- []string{"1", "2"} err = worker.Wait() c.Assert(err, gc.ErrorMatches, `processing pending volumes: creating volumes: getting volume source: getting storage source "dummy": zinga`) }
// runWorker starts the supplied manifold's worker and communicates it back to the // loop goroutine; waits for worker completion; and communicates any error encountered // back to the loop goroutine. It must not be run on the loop goroutine. func (engine *engine) runWorker(name string, delay time.Duration, start StartFunc, getResource GetResourceFunc) { startWorkerAndWait := func() error { logger.Infof("starting %q manifold worker in %s...", name, delay) select { case <-time.After(delay): case <-engine.tomb.Dying(): logger.Debugf("not starting %q manifold worker (shutting down)", name) return tomb.ErrDying } logger.Debugf("starting %q manifold worker", name) worker, err := start(getResource) if err != nil { logger.Warningf("failed to start %q manifold worker: %v", name, err) return err } logger.Debugf("running %q manifold worker", name) select { case <-engine.tomb.Dying(): logger.Debugf("stopping %q manifold worker (shutting down)", name) worker.Kill() case engine.started <- startedTicket{name, worker}: logger.Debugf("registered %q manifold worker", name) } return worker.Wait() } // We may or may not send on started, but we *must* send on stopped. engine.stopped <- stoppedTicket{name, startWorkerAndWait()} }
// upgradeWaiterWorker runs the specified worker after upgrades have completed. func (a *MachineAgent) upgradeWaiterWorker(start func() (worker.Worker, error)) worker.Worker { return worker.NewSimpleWorker(func(stop <-chan struct{}) error { // Wait for the upgrade to complete (or for us to be stopped). select { case <-stop: return nil case <-a.upgradeWorkerContext.UpgradeComplete: } // Upgrades are done, start the worker. worker, err := start() if err != nil { return err } // Wait for worker to finish or for us to be stopped. waitCh := make(chan error) go func() { waitCh <- worker.Wait() }() select { case err := <-waitCh: return err case <-stop: worker.Kill() } return <-waitCh // Ensure worker has stopped before returning. }) }
// runWorker starts the supplied manifold's worker and communicates it back to the // loop goroutine; waits for worker completion; and communicates any error encountered // back to the loop goroutine. It must not be run on the loop goroutine. func (engine *Engine) runWorker(name string, delay time.Duration, start StartFunc, context *context) { errAborted := errors.New("aborted before delay elapsed") startAfterDelay := func() (worker.Worker, error) { // NOTE: the context will expire *after* the worker is started. // This is tolerable because // 1) we'll still correctly block access attempts most of the time // 2) failing to block them won't cause data races anyway // 3) it's not worth complicating the interface for every client just // to eliminate the possibility of one harmlessly dumb interaction. defer context.expire() logger.Tracef("starting %q manifold worker in %s...", name, delay) select { case <-engine.tomb.Dying(): return nil, errAborted case <-context.Abort(): return nil, errAborted // TODO(fwereade): 2016-03-17 lp:1558657 case <-time.After(delay): } logger.Tracef("starting %q manifold worker", name) return start(context) } startWorkerAndWait := func() error { worker, err := startAfterDelay() switch errors.Cause(err) { case errAborted: return nil case nil: logger.Tracef("running %q manifold worker", name) default: logger.Tracef("failed to start %q manifold worker: %v", name, err) return err } select { case <-engine.tomb.Dying(): logger.Tracef("stopping %q manifold worker (shutting down)", name) // Doesn't matter whether worker == engine: if we're already Dying // then cleanly Kill()ing ourselves again won't hurt anything. worker.Kill() case engine.started <- startedTicket{name, worker, context.accessLog}: logger.Tracef("registered %q manifold worker", name) } if worker == engine { // We mustn't Wait() for ourselves to complete here, or we'll // deadlock. But we should wait until we're Dying, because we // need this func to keep running to keep the self manifold // accessible as a resource. <-engine.tomb.Dying() return tomb.ErrDying } return worker.Wait() } // We may or may not send on started, but we *must* send on stopped. engine.stopped <- stoppedTicket{name, startWorkerAndWait(), context.accessLog} }
func (s *MachinerSuite) TestMachinerStorageAttached(c *gc.C) { // Machine is dying. We'll respond to "EnsureDead" by // saying that there are still storage attachments; // this should not cause an error. s.accessor.machine.life = params.Dying s.accessor.machine.SetErrors( nil, // SetMachineAddresses nil, // SetStatus nil, // Watch nil, // Refresh nil, // SetStatus ¶ms.Error{Code: params.CodeMachineHasAttachedStorage}, ) worker := machiner.NewMachiner(s.accessor, s.agentConfig, false) s.accessor.machine.watcher.changes <- struct{}{} worker.Kill() c.Check(worker.Wait(), jc.ErrorIsNil) s.accessor.CheckCalls(c, []gitjujutesting.StubCall{{ FuncName: "Machine", Args: []interface{}{s.agentConfig.Tag()}, }}) s.accessor.machine.watcher.CheckCalls(c, []gitjujutesting.StubCall{ {FuncName: "Changes"}, {FuncName: "Changes"}, {FuncName: "Stop"}, }) s.accessor.machine.CheckCalls(c, []gitjujutesting.StubCall{{ FuncName: "SetMachineAddresses", Args: []interface{}{ network.NewAddresses( "255.255.255.255", "0.0.0.0", ), }, }, { FuncName: "SetStatus", Args: []interface{}{ params.StatusStarted, "", map[string]interface{}(nil), }, }, { FuncName: "Watch", }, { FuncName: "Refresh", }, { FuncName: "Life", }, { FuncName: "SetStatus", Args: []interface{}{ params.StatusStopped, "", map[string]interface{}(nil), }, }, { FuncName: "EnsureDead", }}) }
func (s *storageProvisionerSuite) TestFilesystemAdded(c *gc.C) { expectedFilesystems := []params.Filesystem{{ FilesystemTag: "filesystem-1", Info: params.FilesystemInfo{ FilesystemId: "id-1", Size: 1024, }, }, { FilesystemTag: "filesystem-2", Info: params.FilesystemInfo{ FilesystemId: "id-2", Size: 1024, }, }} filesystemInfoSet := make(chan interface{}) filesystemAccessor := newMockFilesystemAccessor() filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { defer close(filesystemInfoSet) c.Assert(filesystems, jc.SameContents, expectedFilesystems) return nil, nil } args := &workerArgs{filesystems: filesystemAccessor} worker := newStorageProvisioner(c, args) defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() // The worker should create filesystems according to ids "1" and "2". filesystemAccessor.filesystemsWatcher.changes <- []string{"1", "2"} // ... but not until the environment config is available. assertNoEvent(c, filesystemInfoSet, "filesystem info set") args.environ.watcher.changes <- struct{}{} waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set") }
func (s *storageProvisionerSuite) TestDestroyVolumes(c *gc.C) { provisionedVolume := names.NewVolumeTag("1") unprovisionedVolume := names.NewVolumeTag("2") volumeAccessor := newMockVolumeAccessor() volumeAccessor.provisionVolume(provisionedVolume) life := func(tags []names.Tag) ([]params.LifeResult, error) { results := make([]params.LifeResult, len(tags)) for i := range results { results[i].Life = params.Dead } return results, nil } destroyedChan := make(chan interface{}, 1) s.provider.destroyVolumesFunc = func(volumeIds []string) []error { destroyedChan <- volumeIds return make([]error, len(volumeIds)) } removedChan := make(chan interface{}, 1) remove := func(tags []names.Tag) ([]params.ErrorResult, error) { removedChan <- tags return make([]params.ErrorResult, len(tags)), nil } args := &workerArgs{ volumes: volumeAccessor, life: &mockLifecycleManager{ life: life, remove: remove, }, } worker := newStorageProvisioner(c, args) defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() volumeAccessor.volumesWatcher.changes <- []string{ provisionedVolume.Id(), unprovisionedVolume.Id(), } args.environ.watcher.changes <- struct{}{} // Both volumes should be removed; the provisioned one // should be deprovisioned first. destroyed := waitChannel(c, destroyedChan, "waiting for volume to be deprovisioned") assertNoEvent(c, destroyedChan, "volumes deprovisioned") c.Assert(destroyed, jc.DeepEquals, []string{"vol-1"}) var removed []names.Tag for len(removed) < 2 { tags := waitChannel(c, removedChan, "waiting for volumes to be removed").([]names.Tag) removed = append(removed, tags...) } c.Assert(removed, jc.SameContents, []names.Tag{provisionedVolume, unprovisionedVolume}) assertNoEvent(c, removedChan, "volumes removed") }
func (s *WorkerSuite) TestWatchError(c *gc.C) { fix := newFixture(c, errors.New("zap ouch")) fix.Run(c, func(worker worker.Worker) { err := worker.Wait() c.Check(err, gc.ErrorMatches, "zap ouch") }) fix.CheckCallNames(c, "Watch") }
// Run just the upgrade-steps worker with the fake machine agent // provided. func (s *UpgradeSuite) runUpgradeWorkerUsingAgent( c *gc.C, agent *fakeUpgradingMachineAgent, jobs ...multiwatcher.MachineJob, ) (error, *upgradeWorkerContext) { s.setInstantRetryStrategy(c) context := NewUpgradeWorkerContext() worker := context.Worker(agent, nil, jobs) return worker.Wait(), context }
func (s *ManifoldSuite) TestBrokenConnectionKillsWorkerWithFallbackErr(c *gc.C) { worker := s.setupWorkerTest(c) close(s.conn.broken) err := worker.Wait() c.Check(err, gc.ErrorMatches, "api connection broken unexpectedly") s.conn.stub.CheckCalls(c, []testing.StubCall{{ FuncName: "Close", }}) }
func (s *ManifoldSuite) setupWorkerTest(c *gc.C) worker.Worker { worker, err := s.manifold.Start(s.resources.Context()) c.Check(err, jc.ErrorIsNil) s.AddCleanup(func(c *gc.C) { worker.Kill() err := worker.Wait() c.Check(err, jc.ErrorIsNil) }) return worker }
func (s *ManifoldSuite) TestBrokenConnectionKillsWorkerWithCloseErr(c *gc.C) { s.conn.stub.SetErrors(errors.New("bad plumbing")) worker := s.setupWorkerTest(c) close(s.conn.broken) err := worker.Wait() c.Check(err, gc.ErrorMatches, "bad plumbing") s.conn.stub.CheckCalls(c, []testing.StubCall{{ FuncName: "Close", }}) }
func (s *storageProvisionerSuite) TestCreateVolumeCreatesAttachment(c *gc.C) { volumeAccessor := newMockVolumeAccessor() volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") volumeAttachmentInfoSet := make(chan interface{}) volumeAccessor.setVolumeAttachmentInfo = func(volumeAttachments []params.VolumeAttachment) ([]params.ErrorResult, error) { defer close(volumeAttachmentInfoSet) return make([]params.ErrorResult, len(volumeAttachments)), nil } s.provider.createVolumesFunc = func(args []storage.VolumeParams) ([]storage.CreateVolumesResult, error) { volumeAccessor.provisionedAttachments[params.MachineStorageId{ MachineTag: args[0].Attachment.Machine.String(), AttachmentTag: args[0].Attachment.Volume.String(), }] = params.VolumeAttachment{ VolumeTag: args[0].Attachment.Volume.String(), MachineTag: args[0].Attachment.Machine.String(), } return []storage.CreateVolumesResult{{ Volume: &storage.Volume{ Tag: args[0].Tag, VolumeInfo: storage.VolumeInfo{ VolumeId: "vol-ume", }, }, VolumeAttachment: &storage.VolumeAttachment{ Volume: args[0].Attachment.Volume, Machine: args[0].Attachment.Machine, }, }}, nil } attachVolumesCalled := make(chan interface{}) s.provider.attachVolumesFunc = func(args []storage.VolumeAttachmentParams) ([]storage.AttachVolumesResult, error) { defer close(attachVolumesCalled) return nil, errors.New("should not be called") } args := &workerArgs{volumes: volumeAccessor} worker := newStorageProvisioner(c, args) defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "volume-1", }} assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment set") // The worker should create volumes according to ids "1". volumeAccessor.volumesWatcher.changes <- []string{"1"} args.environ.watcher.changes <- struct{}{} waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set") assertNoEvent(c, attachVolumesCalled, "AttachVolumes called") }
func (s *storageProvisionerSuite) TestAttachVolumeBackedFilesystem(c *gc.C) { infoSet := make(chan interface{}) filesystemAccessor := newMockFilesystemAccessor() filesystemAccessor.setFilesystemAttachmentInfo = func(attachments []params.FilesystemAttachment) ([]params.ErrorResult, error) { infoSet <- attachments return nil, nil } args := &workerArgs{ scope: names.NewMachineTag("0"), filesystems: filesystemAccessor, } worker := newStorageProvisioner(c, args) defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() filesystemAccessor.provisionedFilesystems["filesystem-0-0"] = params.Filesystem{ FilesystemTag: "filesystem-0-0", VolumeTag: "volume-0-0", Info: params.FilesystemInfo{ FilesystemId: "whatever", Size: 123, }, } filesystemAccessor.provisionedMachines["machine-0"] = instance.Id("already-provisioned-0") args.volumes.blockDevices[params.MachineStorageId{ MachineTag: "machine-0", AttachmentTag: "volume-0-0", }] = storage.BlockDevice{ DeviceName: "xvdf1", Size: 123, } filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ MachineTag: "machine-0", AttachmentTag: "filesystem-0-0", }} assertNoEvent(c, infoSet, "filesystem attachment info set") args.environ.watcher.changes <- struct{}{} filesystemAccessor.filesystemsWatcher.changes <- []string{"0/0"} info := waitChannel( c, infoSet, "waiting for filesystem attachment info to be set", ).([]params.FilesystemAttachment) c.Assert(info, jc.DeepEquals, []params.FilesystemAttachment{{ FilesystemTag: "filesystem-0-0", MachineTag: "machine-0", Info: params.FilesystemAttachmentInfo{ MountPoint: "/mnt/xvdf1", ReadOnly: true, }, }}) }
// Run just the upgradesteps worker with a fake machine agent and // fake agent config. func (s *UpgradeSuite) runUpgradeWorker(c *gc.C, jobs ...multiwatcher.MachineJob) ( error, *fakeConfigSetter, []StatusCall, gate.Lock, ) { s.setInstantRetryStrategy(c) config := s.makeFakeConfig() agent := NewFakeAgent(config) doneLock, err := NewLock(agent) c.Assert(err, jc.ErrorIsNil) machineStatus := &testStatusSetter{} worker, err := NewWorker(doneLock, agent, nil, jobs, s.openStateForUpgrade, s.preUpgradeSteps, machineStatus) c.Assert(err, jc.ErrorIsNil) return worker.Wait(), config, machineStatus.Calls, doneLock }
func (s *ManifoldSuite) setupWorkerTest(c *gc.C) worker.Worker { context := dt.StubContext(nil, map[string]interface{}{ "agent-name": &dummyAgent{spoolDir: s.spoolDir}, }) worker, err := s.manifold.Start(context) c.Check(err, jc.ErrorIsNil) s.AddCleanup(func(c *gc.C) { worker.Kill() err := worker.Wait() c.Check(err, jc.ErrorIsNil) }) return worker }
func (s *storageProvisionerSuite) TestStartStop(c *gc.C) { worker := storageprovisioner.NewStorageProvisioner( coretesting.EnvironmentTag, "dir", newMockVolumeAccessor(), newMockFilesystemAccessor(), &mockLifecycleManager{}, newMockEnvironAccessor(c), newMockMachineAccessor(c), ) worker.Kill() c.Assert(worker.Wait(), gc.IsNil) }
func (s *storageProvisionerSuite) TestSetVolumeInfoErrorStopsWorker(c *gc.C) { volumeAccessor := newMockVolumeAccessor() volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") volumeAccessor.setVolumeInfo = func(volumes []params.Volume) ([]params.ErrorResult, error) { return []params.ErrorResult{{Error: ¶ms.Error{Message: "message", Code: "code"}}}, nil } args := &workerArgs{volumes: volumeAccessor} worker := newStorageProvisioner(c, args) defer worker.Wait() defer worker.Kill() done := make(chan interface{}) go func() { defer close(done) err := worker.Wait() c.Assert(err, gc.ErrorMatches, "processing pending volumes: publishing volume 1 to state: message") }() args.volumes.volumesWatcher.changes <- []string{"1"} args.environ.watcher.changes <- struct{}{} waitChannel(c, done, "waiting for worker to exit") }
func (s *storageProvisionerSuite) TestDestroyFilesystems(c *gc.C) { provisionedFilesystem := names.NewFilesystemTag("1") unprovisionedFilesystem := names.NewFilesystemTag("2") filesystemAccessor := newMockFilesystemAccessor() filesystemAccessor.provisionFilesystem(provisionedFilesystem) life := func(tags []names.Tag) ([]params.LifeResult, error) { results := make([]params.LifeResult, len(tags)) for i := range results { results[i].Life = params.Dead } return results, nil } removedChan := make(chan interface{}, 1) remove := func(tags []names.Tag) ([]params.ErrorResult, error) { removedChan <- tags return make([]params.ErrorResult, len(tags)), nil } args := &workerArgs{ filesystems: filesystemAccessor, life: &mockLifecycleManager{ life: life, remove: remove, }, } worker := newStorageProvisioner(c, args) defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() filesystemAccessor.filesystemsWatcher.changes <- []string{ provisionedFilesystem.Id(), unprovisionedFilesystem.Id(), } args.environ.watcher.changes <- struct{}{} // Both filesystems should be removed; the provisioned one // *should* be deprovisioned first, but we don't currently // have the ability to do so via the storage provider API. var removed []names.Tag for len(removed) < 2 { tags := waitChannel(c, removedChan, "waiting for filesystems to be removed").([]names.Tag) removed = append(removed, tags...) } c.Assert(removed, jc.SameContents, []names.Tag{provisionedFilesystem, unprovisionedFilesystem}) assertNoEvent(c, removedChan, "filesystems removed") }
func (s *ManifoldSuite) setupWorkerTest(c *gc.C) worker.Worker { worker, err := s.manifold.Start(s.getResource) c.Check(err, jc.ErrorIsNil) s.AddCleanup(func(c *gc.C) { worker.Kill() err := worker.Wait() c.Check(err, jc.ErrorIsNil) }) s.CheckCalls(c, []testing.StubCall{{ FuncName: "createLock", Args: []interface{}{"/path/to/data/dir"}, }}) return worker }
func (s *WorkerSuite) TestRescaleThenError(c *gc.C) { fix := newFixture(c, nil, nil, errors.New("pew squish")) fix.Run(c, func(worker worker.Worker) { err := worker.Wait() c.Check(err, gc.ErrorMatches, "pew squish") }) fix.CheckCalls(c, []testing.StubCall{{ FuncName: "Watch", }, { FuncName: "Rescale", Args: []interface{}{[]string{"expected", "first"}}, }, { FuncName: "Rescale", Args: []interface{}{[]string{"expected", "second"}}, }}) }
// TestStatusWorkerStarts ensures that the manifold correctly sets up the connected worker. func (s *PatchedManifoldSuite) TestStatusWorkerStarts(c *gc.C) { var called bool s.manifoldConfig.NewConnectedStatusWorker = func(cfg meterstatus.ConnectedConfig) (worker.Worker, error) { called = true return meterstatus.NewConnectedStatusWorker(cfg) } manifold := meterstatus.Manifold(s.manifoldConfig) worker, err := manifold.Start(s.resources.Context()) c.Assert(called, jc.IsTrue) c.Assert(err, jc.ErrorIsNil) c.Assert(worker, gc.NotNil) worker.Kill() err = worker.Wait() c.Assert(err, jc.ErrorIsNil) s.stub.CheckCallNames(c, "MeterStatus", "RunHook", "WatchMeterStatus") }
// runWorker starts the supplied manifold's worker and communicates it back to the // loop goroutine; waits for worker completion; and communicates any error encountered // back to the loop goroutine. It must not be run on the loop goroutine. func (engine *engine) runWorker(name string, delay time.Duration, start StartFunc, resourceGetter *resourceGetter) { errAborted := errors.New("aborted before delay elapsed") startAfterDelay := func() (worker.Worker, error) { // NOTE: the resourceGetter will expire *after* the worker is started. // This is tolerable because // 1) we'll still correctly block access attempts most of the time // 2) failing to block them won't cause data races anyway // 3) it's not worth complicating the interface for every client just // to eliminate the possibility of one harmlessly dumb interaction. defer resourceGetter.expire() logger.Tracef("starting %q manifold worker in %s...", name, delay) select { case <-time.After(delay): case <-engine.tomb.Dying(): return nil, errAborted } logger.Tracef("starting %q manifold worker", name) return start(resourceGetter.getResource) } startWorkerAndWait := func() error { worker, err := startAfterDelay() switch errors.Cause(err) { case errAborted: return nil case nil: logger.Tracef("running %q manifold worker", name) default: logger.Tracef("failed to start %q manifold worker: %v", name, err) return err } select { case <-engine.tomb.Dying(): logger.Tracef("stopping %q manifold worker (shutting down)", name) worker.Kill() case engine.started <- startedTicket{name, worker, resourceGetter.accessLog}: logger.Tracef("registered %q manifold worker", name) } return worker.Wait() } // We may or may not send on started, but we *must* send on stopped. engine.stopped <- stoppedTicket{name, startWorkerAndWait(), resourceGetter.accessLog} }
// Occupy launches a Visit to fortress that creates a worker and holds the // visit open until the worker completes. Like most funcs that return any // Worker, the caller takes responsibility for its lifetime; be aware that // the responsibility is especially heavy here, because failure to clean up // the worker will block cleanup of the fortress. // // This may sound scary, but the alternative is to have multiple components // "responsible for" a single worker's lifetime -- and Fortress itself would // have to grow new concerns, of understanding and managing worker.Workers -- // and that scenario ends up much worse. func Occupy(fortress Guest, start StartFunc, abort Abort) (worker.Worker, error) { // Create two channels to communicate success and failure of worker // creation; and a worker-running func that sends on exactly one // of them, and returns only when (1) a value has been sent and (2) // no worker is running. Note especially that it always returns nil. started := make(chan worker.Worker, 1) failed := make(chan error, 1) task := func() error { worker, err := start() if err != nil { failed <- err } else { started <- worker worker.Wait() // ignore error: worker is SEP now. } return nil } // Start a goroutine to run the task func inside the fortress. If // this operation succeeds, we must inspect started and failed to // determine what actually happened; but if it fails, we can be // confident that the task (which never fails) did not run, and can // therefore return the failure without waiting further. finished := make(chan error, 1) go func() { finished <- fortress.Visit(task, abort) }() // Watch all these channels to figure out what happened and inform // the client. A nil error from finished indicates that there will // be some value waiting on one of the other channels. for { select { case err := <-finished: if err != nil { return nil, errors.Trace(err) } case err := <-failed: return nil, errors.Trace(err) case worker := <-started: return worker, nil } } }
// TestInactiveWorker ensures that the manifold correctly sets up the isolated worker. func (s *PatchedManifoldSuite) TestIsolatedWorker(c *gc.C) { delete(s.dummyResources, "apicaller-name") var called bool s.manifoldConfig.NewIsolatedStatusWorker = func(cfg meterstatus.IsolatedConfig) (worker.Worker, error) { called = true return meterstatus.NewIsolatedStatusWorker(cfg) } manifold := meterstatus.Manifold(s.manifoldConfig) getResource := dt.StubGetResource(s.dummyResources) worker, err := manifold.Start(getResource) c.Assert(called, jc.IsTrue) c.Assert(err, jc.ErrorIsNil) c.Assert(worker, gc.NotNil) worker.Kill() err = worker.Wait() c.Assert(err, jc.ErrorIsNil) s.stub.CheckCallNames(c, "MeterStatus", "RunHook", "WatchMeterStatus") }
func (s *storageProvisionerSuite) TestVolumeNonDynamic(c *gc.C) { volumeInfoSet := make(chan interface{}) volumeAccessor := newMockVolumeAccessor() volumeAccessor.setVolumeInfo = func([]params.Volume) ([]params.ErrorResult, error) { defer close(volumeInfoSet) return nil, nil } args := &workerArgs{volumes: volumeAccessor} worker := newStorageProvisioner(c, args) defer worker.Wait() defer worker.Kill() // Volumes for non-dynamic providers should not be created. s.provider.dynamic = false args.environ.watcher.changes <- struct{}{} volumeAccessor.volumesWatcher.changes <- []string{"1"} assertNoEvent(c, volumeInfoSet, "volume info set") }
func (s *storageProvisionerSuite) TestVolumeNeedsInstance(c *gc.C) { volumeInfoSet := make(chan interface{}) volumeAccessor := newMockVolumeAccessor() volumeAccessor.setVolumeInfo = func([]params.Volume) ([]params.ErrorResult, error) { defer close(volumeInfoSet) return nil, nil } volumeAccessor.setVolumeAttachmentInfo = func([]params.VolumeAttachment) ([]params.ErrorResult, error) { return nil, nil } args := &workerArgs{volumes: volumeAccessor} worker := newStorageProvisioner(c, args) defer worker.Wait() defer worker.Kill() volumeAccessor.volumesWatcher.changes <- []string{needsInstanceVolumeId} args.environ.watcher.changes <- struct{}{} assertNoEvent(c, volumeInfoSet, "volume info set") args.machines.instanceIds[names.NewMachineTag("1")] = "inst-id" args.machines.watcher.changes <- struct{}{} waitChannel(c, volumeInfoSet, "waiting for volume info to be set") }
// upgradeWaiterWorker runs the specified worker after upgrades have completed. func (a *MachineAgent) upgradeWaiterWorker(name string, start func() (worker.Worker, error)) worker.Worker { return worker.NewSimpleWorker(func(stop <-chan struct{}) error { // Wait for the agent upgrade and upgrade steps to complete (or for us to be stopped). for _, ch := range []<-chan struct{}{ a.upgradeComplete.Unlocked(), a.initialUpgradeCheckComplete.Unlocked(), } { select { case <-stop: return nil case <-ch: } } logger.Debugf("upgrades done, starting worker %q", name) // Upgrades are done, start the worker. worker, err := start() if err != nil { return err } // Wait for worker to finish or for us to be stopped. waitCh := make(chan error) go func() { waitCh <- worker.Wait() }() select { case err := <-waitCh: logger.Debugf("worker %q exited with %v", name, err) return err case <-stop: logger.Debugf("stopping so killing worker %q", name) worker.Kill() } return <-waitCh // Ensure worker has stopped before returning. }) }
func (s *storageProvisionerSuite) TestDetachFilesystemsUnattached(c *gc.C) { removed := make(chan interface{}) removeAttachments := func(ids []params.MachineStorageId) ([]params.ErrorResult, error) { defer close(removed) c.Assert(ids, gc.DeepEquals, []params.MachineStorageId{{ MachineTag: "machine-0", AttachmentTag: "filesystem-0", }}) return make([]params.ErrorResult, len(ids)), nil } args := &workerArgs{ life: &mockLifecycleManager{removeAttachments: removeAttachments}, } worker := newStorageProvisioner(c, args) defer worker.Wait() defer worker.Kill() args.filesystems.attachmentsWatcher.changes <- []params.MachineStorageId{{ MachineTag: "machine-0", AttachmentTag: "filesystem-0", }} args.environ.watcher.changes <- struct{}{} waitChannel(c, removed, "waiting for attachment to be removed") }