示例#1
0
func (*scheduleSuite) TestRemove(c *gc.C) {
	clock := coretesting.NewClock(time.Time{})
	now := clock.Now()
	s := schedule.NewSchedule(clock)

	s.Add("k0", "v0", now.Add(3*time.Second))
	s.Add("k1", "v1", now.Add(2*time.Second))
	s.Remove("k0")
	assertReady(c, s, clock /* nothing */)

	clock.Advance(3 * time.Second)
	assertReady(c, s, clock, "v1")
}
示例#2
0
func (*scheduleSuite) TestAdd(c *gc.C) {
	clock := coretesting.NewClock(time.Time{})
	now := clock.Now()
	s := schedule.NewSchedule(clock)

	s.Add("k0", "v0", now.Add(3*time.Second))
	s.Add("k1", "v1", now.Add(1500*time.Millisecond))
	s.Add("k2", "v2", now.Add(2*time.Second))

	clock.Advance(time.Second) // T+1
	assertReady(c, s, clock /* nothing */)

	clock.Advance(time.Second) // T+2
	assertReady(c, s, clock, "v1", "v2")
	assertReady(c, s, clock /* nothing */)

	clock.Advance(500 * time.Millisecond) // T+2.5
	assertReady(c, s, clock /* nothing */)

	clock.Advance(time.Second) // T+3.5
	assertReady(c, s, clock, "v0")
}
示例#3
0
func (*scheduleSuite) TestNext(c *gc.C) {
	clock := coretesting.NewClock(time.Time{})
	now := clock.Now()
	s := schedule.NewSchedule(clock)

	s.Add("k0", "v0", now.Add(3*time.Second))
	s.Add("k1", "v1", now.Add(1500*time.Millisecond))
	s.Add("k2", "v2", now.Add(2*time.Second))
	s.Add("k3", "v3", now.Add(2500*time.Millisecond))

	assertNextOp(c, s, clock, 1500*time.Millisecond)
	clock.Advance(1500 * time.Millisecond)
	assertReady(c, s, clock, "v1")

	clock.Advance(500 * time.Millisecond)
	assertNextOp(c, s, clock, 0)
	assertReady(c, s, clock, "v2")

	s.Remove("k3")

	clock.Advance(2 * time.Second) // T+4
	assertNextOp(c, s, clock, 0)
	assertReady(c, s, clock, "v0")
}
示例#4
0
func (*scheduleSuite) TestRemoveKeyNotFound(c *gc.C) {
	s := schedule.NewSchedule(coretesting.NewClock(time.Time{}))
	s.Remove("0") // does not explode
}
示例#5
0
func (*scheduleSuite) TestReadyNoEvents(c *gc.C) {
	s := schedule.NewSchedule(coretesting.NewClock(time.Time{}))
	ready := s.Ready(time.Now())
	c.Assert(ready, gc.HasLen, 0)
}
示例#6
0
func (*scheduleSuite) TestNextNoEvents(c *gc.C) {
	s := schedule.NewSchedule(coretesting.NewClock(time.Time{}))
	next := s.Next()
	c.Assert(next, gc.IsNil)
}
示例#7
0
func (w *storageProvisioner) loop() error {
	var (
		volumesChanges               watcher.StringsChannel
		filesystemsChanges           watcher.StringsChannel
		volumeAttachmentsChanges     watcher.MachineStorageIdsChannel
		filesystemAttachmentsChanges watcher.MachineStorageIdsChannel
		machineBlockDevicesChanges   <-chan struct{}
	)
	machineChanges := make(chan names.MachineTag)

	modelConfigWatcher, err := w.config.Environ.WatchForModelConfigChanges()
	if err != nil {
		return errors.Annotate(err, "watching model config")
	}
	if err := w.catacomb.Add(modelConfigWatcher); err != nil {
		return errors.Trace(err)
	}

	// Machine-scoped provisioners need to watch block devices, to create
	// volume-backed filesystems.
	if machineTag, ok := w.config.Scope.(names.MachineTag); ok {
		machineBlockDevicesWatcher, err := w.config.Volumes.WatchBlockDevices(machineTag)
		if err != nil {
			return errors.Annotate(err, "watching block devices")
		}
		if err := w.catacomb.Add(machineBlockDevicesWatcher); err != nil {
			return errors.Trace(err)
		}
		machineBlockDevicesChanges = machineBlockDevicesWatcher.Changes()
	}

	startWatchers := func() error {
		volumesWatcher, err := w.config.Volumes.WatchVolumes()
		if err != nil {
			return errors.Annotate(err, "watching volumes")
		}
		if err := w.catacomb.Add(volumesWatcher); err != nil {
			return errors.Trace(err)
		}
		volumesChanges = volumesWatcher.Changes()

		filesystemsWatcher, err := w.config.Filesystems.WatchFilesystems()
		if err != nil {
			return errors.Annotate(err, "watching filesystems")
		}
		if err := w.catacomb.Add(filesystemsWatcher); err != nil {
			return errors.Trace(err)
		}
		filesystemsChanges = filesystemsWatcher.Changes()

		volumeAttachmentsWatcher, err := w.config.Volumes.WatchVolumeAttachments()
		if err != nil {
			return errors.Annotate(err, "watching volume attachments")
		}
		if err := w.catacomb.Add(volumeAttachmentsWatcher); err != nil {
			return errors.Trace(err)
		}
		volumeAttachmentsChanges = volumeAttachmentsWatcher.Changes()

		filesystemAttachmentsWatcher, err := w.config.Filesystems.WatchFilesystemAttachments()
		if err != nil {
			return errors.Annotate(err, "watching filesystem attachments")
		}
		if err := w.catacomb.Add(filesystemAttachmentsWatcher); err != nil {
			return errors.Trace(err)
		}
		filesystemAttachmentsChanges = filesystemAttachmentsWatcher.Changes()
		return nil
	}

	ctx := context{
		kill:                                 w.catacomb.Kill,
		addWorker:                            w.catacomb.Add,
		config:                               w.config,
		volumes:                              make(map[names.VolumeTag]storage.Volume),
		volumeAttachments:                    make(map[params.MachineStorageId]storage.VolumeAttachment),
		volumeBlockDevices:                   make(map[names.VolumeTag]storage.BlockDevice),
		filesystems:                          make(map[names.FilesystemTag]storage.Filesystem),
		filesystemAttachments:                make(map[params.MachineStorageId]storage.FilesystemAttachment),
		machines:                             make(map[names.MachineTag]*machineWatcher),
		machineChanges:                       machineChanges,
		schedule:                             schedule.NewSchedule(w.config.Clock),
		incompleteVolumeParams:               make(map[names.VolumeTag]storage.VolumeParams),
		incompleteVolumeAttachmentParams:     make(map[params.MachineStorageId]storage.VolumeAttachmentParams),
		incompleteFilesystemParams:           make(map[names.FilesystemTag]storage.FilesystemParams),
		incompleteFilesystemAttachmentParams: make(map[params.MachineStorageId]storage.FilesystemAttachmentParams),
		pendingVolumeBlockDevices:            make(set.Tags),
	}
	ctx.managedFilesystemSource = newManagedFilesystemSource(
		ctx.volumeBlockDevices, ctx.filesystems,
	)
	for {

		// Check if block devices need to be refreshed.
		if err := processPendingVolumeBlockDevices(&ctx); err != nil {
			return errors.Annotate(err, "processing pending block devices")
		}

		select {
		case <-w.catacomb.Dying():
			return w.catacomb.ErrDying()
		case _, ok := <-modelConfigWatcher.Changes():
			if !ok {
				return errors.New("environ config watcher closed")
			}
			modelConfig, err := w.config.Environ.ModelConfig()
			if err != nil {
				return errors.Annotate(err, "getting model config")
			}
			if ctx.modelConfig == nil {
				// We've received the initial model config,
				// so we can begin provisioning storage.
				if err := startWatchers(); err != nil {
					return err
				}
			}
			ctx.modelConfig = modelConfig
		case changes, ok := <-volumesChanges:
			if !ok {
				return errors.New("volumes watcher closed")
			}
			if err := volumesChanged(&ctx, changes); err != nil {
				return errors.Trace(err)
			}
		case changes, ok := <-volumeAttachmentsChanges:
			if !ok {
				return errors.New("volume attachments watcher closed")
			}
			if err := volumeAttachmentsChanged(&ctx, changes); err != nil {
				return errors.Trace(err)
			}
		case changes, ok := <-filesystemsChanges:
			if !ok {
				return errors.New("filesystems watcher closed")
			}
			if err := filesystemsChanged(&ctx, changes); err != nil {
				return errors.Trace(err)
			}
		case changes, ok := <-filesystemAttachmentsChanges:
			if !ok {
				return errors.New("filesystem attachments watcher closed")
			}
			if err := filesystemAttachmentsChanged(&ctx, changes); err != nil {
				return errors.Trace(err)
			}
		case _, ok := <-machineBlockDevicesChanges:
			if !ok {
				return errors.New("machine block devices watcher closed")
			}
			if err := machineBlockDevicesChanged(&ctx); err != nil {
				return errors.Trace(err)
			}
		case machineTag := <-machineChanges:
			if err := refreshMachine(&ctx, machineTag); err != nil {
				return errors.Trace(err)
			}
		case <-ctx.schedule.Next():
			// Ready to pick something(s) off the pending queue.
			if err := processSchedule(&ctx); err != nil {
				return errors.Trace(err)
			}
		}
	}
}
示例#8
0
func (w *storageprovisioner) loop() error {
	var environConfigChanges <-chan struct{}
	var volumesWatcher apiwatcher.StringsWatcher
	var filesystemsWatcher apiwatcher.StringsWatcher
	var volumesChanges <-chan []string
	var filesystemsChanges <-chan []string
	var volumeAttachmentsWatcher apiwatcher.MachineStorageIdsWatcher
	var filesystemAttachmentsWatcher apiwatcher.MachineStorageIdsWatcher
	var volumeAttachmentsChanges <-chan []params.MachineStorageId
	var filesystemAttachmentsChanges <-chan []params.MachineStorageId
	var machineBlockDevicesWatcher apiwatcher.NotifyWatcher
	var machineBlockDevicesChanges <-chan struct{}
	machineChanges := make(chan names.MachineTag)

	environConfigWatcher, err := w.environ.WatchForEnvironConfigChanges()
	if err != nil {
		return errors.Annotate(err, "watching environ config")
	}
	defer watcher.Stop(environConfigWatcher, &w.tomb)
	environConfigChanges = environConfigWatcher.Changes()

	// Machine-scoped provisioners need to watch block devices, to create
	// volume-backed filesystems.
	if machineTag, ok := w.scope.(names.MachineTag); ok {
		machineBlockDevicesWatcher, err = w.volumes.WatchBlockDevices(machineTag)
		if err != nil {
			return errors.Annotate(err, "watching block devices")
		}
		defer watcher.Stop(machineBlockDevicesWatcher, &w.tomb)
		machineBlockDevicesChanges = machineBlockDevicesWatcher.Changes()
	}

	// The other watchers are started dynamically; stop only if started.
	defer w.maybeStopWatcher(volumesWatcher)
	defer w.maybeStopWatcher(volumeAttachmentsWatcher)
	defer w.maybeStopWatcher(filesystemsWatcher)
	defer w.maybeStopWatcher(filesystemAttachmentsWatcher)

	startWatchers := func() error {
		var err error
		volumesWatcher, err = w.volumes.WatchVolumes()
		if err != nil {
			return errors.Annotate(err, "watching volumes")
		}
		filesystemsWatcher, err = w.filesystems.WatchFilesystems()
		if err != nil {
			return errors.Annotate(err, "watching filesystems")
		}
		volumeAttachmentsWatcher, err = w.volumes.WatchVolumeAttachments()
		if err != nil {
			return errors.Annotate(err, "watching volume attachments")
		}
		filesystemAttachmentsWatcher, err = w.filesystems.WatchFilesystemAttachments()
		if err != nil {
			return errors.Annotate(err, "watching filesystem attachments")
		}
		volumesChanges = volumesWatcher.Changes()
		filesystemsChanges = filesystemsWatcher.Changes()
		volumeAttachmentsChanges = volumeAttachmentsWatcher.Changes()
		filesystemAttachmentsChanges = filesystemAttachmentsWatcher.Changes()
		return nil
	}

	ctx := context{
		scope:                             w.scope,
		storageDir:                        w.storageDir,
		volumeAccessor:                    w.volumes,
		filesystemAccessor:                w.filesystems,
		life:                              w.life,
		machineAccessor:                   w.machines,
		statusSetter:                      w.status,
		time:                              w.clock,
		volumes:                           make(map[names.VolumeTag]storage.Volume),
		volumeAttachments:                 make(map[params.MachineStorageId]storage.VolumeAttachment),
		volumeBlockDevices:                make(map[names.VolumeTag]storage.BlockDevice),
		filesystems:                       make(map[names.FilesystemTag]storage.Filesystem),
		filesystemAttachments:             make(map[params.MachineStorageId]storage.FilesystemAttachment),
		machines:                          make(map[names.MachineTag]*machineWatcher),
		machineChanges:                    machineChanges,
		schedule:                          schedule.NewSchedule(w.clock),
		pendingVolumeBlockDevices:         make(set.Tags),
		incompleteVolumeParams:            make(map[names.VolumeTag]storage.VolumeParams),
		incompleteVolumeAttachmentParams:  make(map[params.MachineStorageId]storage.VolumeAttachmentParams),
		pendingFilesystems:                make(map[names.FilesystemTag]storage.FilesystemParams),
		pendingFilesystemAttachments:      make(map[params.MachineStorageId]storage.FilesystemAttachmentParams),
		pendingDyingFilesystemAttachments: make(map[params.MachineStorageId]storage.FilesystemAttachmentParams),
	}
	ctx.managedFilesystemSource = newManagedFilesystemSource(
		ctx.volumeBlockDevices, ctx.filesystems,
	)
	defer func() {
		for _, w := range ctx.machines {
			w.stop()
		}
	}()

	for {
		// Check if any pending operations can be fulfilled.
		if err := processPending(&ctx); err != nil {
			return errors.Trace(err)
		}

		select {
		case <-w.tomb.Dying():
			return tomb.ErrDying
		case _, ok := <-environConfigChanges:
			if !ok {
				return watcher.EnsureErr(environConfigWatcher)
			}
			environConfig, err := w.environ.EnvironConfig()
			if err != nil {
				return errors.Annotate(err, "getting environ config")
			}
			if ctx.environConfig == nil {
				// We've received the initial environ config,
				// so we can begin provisioning storage.
				if err := startWatchers(); err != nil {
					return err
				}
			}
			ctx.environConfig = environConfig
		case changes, ok := <-volumesChanges:
			if !ok {
				return watcher.EnsureErr(volumesWatcher)
			}
			if err := volumesChanged(&ctx, changes); err != nil {
				return errors.Trace(err)
			}
		case changes, ok := <-volumeAttachmentsChanges:
			if !ok {
				return watcher.EnsureErr(volumeAttachmentsWatcher)
			}
			if err := volumeAttachmentsChanged(&ctx, changes); err != nil {
				return errors.Trace(err)
			}
		case changes, ok := <-filesystemsChanges:
			if !ok {
				return watcher.EnsureErr(filesystemsWatcher)
			}
			if err := filesystemsChanged(&ctx, changes); err != nil {
				return errors.Trace(err)
			}
		case changes, ok := <-filesystemAttachmentsChanges:
			if !ok {
				return watcher.EnsureErr(filesystemAttachmentsWatcher)
			}
			if err := filesystemAttachmentsChanged(&ctx, changes); err != nil {
				return errors.Trace(err)
			}
		case _, ok := <-machineBlockDevicesChanges:
			if !ok {
				return watcher.EnsureErr(machineBlockDevicesWatcher)
			}
			if err := machineBlockDevicesChanged(&ctx); err != nil {
				return errors.Trace(err)
			}
		case machineTag := <-machineChanges:
			if err := refreshMachine(&ctx, machineTag); err != nil {
				return errors.Trace(err)
			}
		case <-ctx.schedule.Next():
			// Ready to pick something(s) off the pending queue.
			if err := processSchedule(&ctx); err != nil {
				return errors.Trace(err)
			}
		}
	}
}