Exemplo n.º 1
0
func (s *FastPeriodSuite) TestStop(c *gc.C) {
	t := &tomb.Tomb{}
	watcher.Stop(&dummyWatcher{nil}, t)
	c.Assert(t.Err(), gc.Equals, tomb.ErrStillAlive)

	watcher.Stop(&dummyWatcher{errors.New("BLAM")}, t)
	c.Assert(t.Err(), gc.ErrorMatches, "BLAM")
}
Exemplo n.º 2
0
// stopWatchers stops all the firewaller's watchers.
func (fw *Firewaller) stopWatchers() {
	watcher.Stop(fw.environWatcher, &fw.tomb)
	watcher.Stop(fw.machinesWatcher, &fw.tomb)
	for _, unitd := range fw.unitds {
		watcher.Stop(unitd, &fw.tomb)
	}
	for _, serviced := range fw.serviceds {
		watcher.Stop(serviced, &fw.tomb)
	}
	for _, machined := range fw.machineds {
		watcher.Stop(machined, &fw.tomb)
	}
}
Exemplo n.º 3
0
// ModeTerminating marks the unit dead and returns ErrTerminateAgent.
func ModeTerminating(u *Uniter) (next Mode, err error) {
	defer modeContext("ModeTerminating", &err)()
	if err = u.unit.SetStatus(params.StatusStopped, "", nil); err != nil {
		return nil, err
	}
	w, err := u.unit.Watch()
	if err != nil {
		return nil, err
	}
	defer watcher.Stop(w, &u.tomb)
	for {
		select {
		case <-u.tomb.Dying():
			return nil, tomb.ErrDying
		case _, ok := <-w.Changes():
			if !ok {
				return nil, watcher.MustErr(w)
			}
			if err := u.unit.Refresh(); err != nil {
				return nil, err
			}
			if hasSubs, err := u.unit.HasSubordinates(); err != nil {
				return nil, err
			} else if hasSubs {
				continue
			}
			// The unit is known to be Dying; so if it didn't have subordinates
			// just above, it can't acquire new ones before this call.
			if err := u.unit.EnsureDead(); err != nil {
				return nil, err
			}
			return nil, worker.ErrTerminateAgent
		}
	}
}
Exemplo n.º 4
0
// loop is the worker's main loop.
func (nw *Networker) loop() error {
	logger.Debugf("starting on machine %q", nw.tag)
	if !nw.CanWriteConfig() {
		logger.Warningf("running in safe mode - no commands or changes to network config will be done")
	}
	w, err := nw.init()
	if err != nil {
		if w != nil {
			// We don't bother to propagate an error, because we
			// already have an error
			w.Stop()
		}
		return err
	}
	defer watcher.Stop(w, &nw.tomb)
	logger.Debugf("initialized and started watching")
	for {
		select {
		case <-nw.tomb.Dying():
			logger.Debugf("shutting down")
			return tomb.ErrDying
		case _, ok := <-w.Changes():
			logger.Debugf("got change notification")
			if !ok {
				return watcher.MustErr(w)
			}
			if err := nw.handle(); err != nil {
				return err
			}
		}
	}
}
Exemplo n.º 5
0
func (nw *notifyWorker) loop() error {
	w, err := nw.handler.SetUp()
	if err != nil {
		if w != nil {
			// We don't bother to propagate an error, because we
			// already have an error
			w.Stop()
		}
		return err
	}
	defer propagateTearDown(nw.handler, &nw.tomb)
	defer watcher.Stop(w, &nw.tomb)
	for {
		select {
		case <-nw.tomb.Dying():
			return tomb.ErrDying
		case _, ok := <-w.Changes():
			if !ok {
				return ensureErr(w)
			}
			if err := nw.handler.Handle(nw.tomb.Dying()); err != nil {
				return err
			}
		}
	}
}
Exemplo n.º 6
0
// newStorageSource creates a hook source that watches for changes to,
// and generates storage hooks for, a single storage attachment.
func newStorageSource(
	st StorageAccessor,
	unitTag names.UnitTag,
	storageTag names.StorageTag,
	attached bool,
) (*storageSource, error) {
	w, err := st.WatchStorageAttachment(storageTag, unitTag)
	if err != nil {
		return nil, errors.Annotate(err, "watching storage attachment")
	}
	s := &storageSource{
		storageHookQueue: &storageHookQueue{
			unitTag:    unitTag,
			storageTag: storageTag,
			attached:   attached,
		},
		st:      st,
		watcher: w,
		changes: make(chan hook.SourceChange),
	}
	go func() {
		defer s.tomb.Done()
		defer watcher.Stop(w, &s.tomb)
		s.tomb.Kill(s.loop())
	}()
	return s, nil
}
Exemplo n.º 7
0
// NewEnvironObserver waits for the environment to have a valid
// environment configuration and returns a new environment observer.
// While waiting for the first environment configuration, it will
// return with tomb.ErrDying if it receives a value on dying.
func NewEnvironObserver(st EnvironConfigObserver) (*EnvironObserver, error) {
	config, err := st.EnvironConfig()
	if err != nil {
		return nil, err
	}
	environ, err := environs.New(config)
	if err != nil {
		return nil, errors.Annotate(err, "cannot create an environment")
	}
	environWatcher, err := st.WatchForEnvironConfigChanges()
	if err != nil {
		return nil, errors.Annotate(err, "cannot watch environment config")
	}
	obs := &EnvironObserver{
		st:             st,
		environ:        environ,
		environWatcher: environWatcher,
	}
	go func() {
		defer obs.tomb.Done()
		defer watcher.Stop(environWatcher, &obs.tomb)
		obs.tomb.Kill(obs.loop())
	}()
	return obs, nil
}
Exemplo n.º 8
0
// addRelation causes the unit agent to join the supplied relation, and to
// store persistent state in the supplied dir.
func (u *Uniter) addRelation(rel *uniter.Relation, dir *relation.StateDir) error {
	logger.Infof("joining relation %q", rel)
	ru, err := rel.Unit(u.unit)
	if err != nil {
		return err
	}
	r := NewRelationer(ru, dir, u.relationHooks)
	w, err := u.unit.Watch()
	if err != nil {
		return err
	}
	defer watcher.Stop(w, &u.tomb)
	for {
		select {
		case <-u.tomb.Dying():
			return tomb.ErrDying
		case _, ok := <-w.Changes():
			if !ok {
				return watcher.MustErr(w)
			}
			err := r.Join()
			if params.IsCodeCannotEnterScopeYet(err) {
				logger.Infof("cannot enter scope for relation %q; waiting for subordinate to be removed", rel)
				continue
			} else if err != nil {
				return err
			}
			logger.Infof("joined relation %q", rel)
			u.relationers[rel.Id()] = r
			return nil
		}
	}
}
Exemplo n.º 9
0
func (sw *stringsWorker) loop() error {
	w, err := sw.handler.SetUp()
	if err != nil {
		if w != nil {
			// We don't bother to propagate an error, because we
			// already have an error
			w.Stop()
		}
		return err
	}
	defer propagateTearDown(sw.handler, &sw.tomb)
	defer watcher.Stop(w, &sw.tomb)
	for {
		select {
		case <-sw.tomb.Dying():
			return tomb.ErrDying
		case changes, ok := <-w.Changes():
			if !ok {
				return mustErr(w)
			}
			if err := sw.handler.Handle(changes); err != nil {
				return err
			}
		}
	}
}
Exemplo n.º 10
0
func (u *Uniter) terminate() error {
	w, err := u.unit.Watch()
	if err != nil {
		return errors.Trace(err)
	}
	defer watcher.Stop(w, &u.tomb)
	for {
		select {
		case <-u.tomb.Dying():
			return tomb.ErrDying
		case _, ok := <-w.Changes():
			if !ok {
				return watcher.EnsureErr(w)
			}
			if err := u.unit.Refresh(); err != nil {
				return errors.Trace(err)
			}
			if hasSubs, err := u.unit.HasSubordinates(); err != nil {
				return errors.Trace(err)
			} else if hasSubs {
				continue
			}
			// The unit is known to be Dying; so if it didn't have subordinates
			// just above, it can't acquire new ones before this call.
			if err := u.unit.EnsureDead(); err != nil {
				return errors.Trace(err)
			}
			return worker.ErrTerminateAgent
		}
	}
}
Exemplo n.º 11
0
func (p *environProvisioner) loop() error {
	var environConfigChanges <-chan struct{}
	environWatcher, err := p.st.WatchForEnvironConfigChanges()
	if err != nil {
		return err
	}
	environConfigChanges = environWatcher.Changes()
	defer watcher.Stop(environWatcher, &p.tomb)

	p.environ, err = worker.WaitForEnviron(environWatcher, p.st, p.tomb.Dying())
	if err != nil {
		return err
	}
	p.broker = p.environ

	safeMode := p.environ.Config().ProvisionerSafeMode()
	task, err := p.getStartTask(safeMode)
	if err != nil {
		return err
	}
	defer watcher.Stop(task, &p.tomb)

	for {
		select {
		case <-p.tomb.Dying():
			return tomb.ErrDying
		case <-task.Dying():
			err := task.Err()
			logger.Errorf("environ provisioner died: %v", err)
			return err
		case _, ok := <-environConfigChanges:
			if !ok {
				return watcher.MustErr(environWatcher)
			}
			environConfig, err := p.st.EnvironConfig()
			if err != nil {
				logger.Errorf("cannot load environment configuration: %v", err)
				return err
			}
			if err := p.setConfig(environConfig); err != nil {
				logger.Errorf("loaded invalid environment configuration: %v", err)
			}
			task.SetSafeMode(environConfig.ProvisionerSafeMode())
		}
	}
}
Exemplo n.º 12
0
func (w *relationUnitsWatcher) finish() {
	watcher.Stop(w.sw, &w.tomb)
	for _, watchedValue := range w.watching.Values() {
		w.st.watcher.Unwatch(w.st.settings.Name, watchedValue, w.updates)
	}
	close(w.updates)
	close(w.out)
	w.tomb.Done()
}
Exemplo n.º 13
0
func (task *provisionerTask) loop() error {
	logger.Infof("Starting up provisioner task %s", task.machineTag)
	defer watcher.Stop(task.machineWatcher, &task.tomb)

	// Don't allow the harvesting mode to change until we have read at
	// least one set of changes, which will populate the task.machines
	// map. Otherwise we will potentially see all legitimate instances
	// as unknown.
	var harvestModeChan chan config.HarvestMode

	// Not all provisioners have a retry channel.
	var retryChan <-chan struct{}
	if task.retryWatcher != nil {
		retryChan = task.retryWatcher.Changes()
	}

	// When the watcher is started, it will have the initial changes be all
	// the machines that are relevant. Also, since this is available straight
	// away, we know there will be some changes right off the bat.
	for {
		select {
		case <-task.tomb.Dying():
			logger.Infof("Shutting down provisioner task %s", task.machineTag)
			return tomb.ErrDying
		case ids, ok := <-task.machineWatcher.Changes():
			if !ok {
				return watcher.EnsureErr(task.machineWatcher)
			}
			if err := task.processMachines(ids); err != nil {
				return errors.Annotate(err, "failed to process updated machines")
			}
			// We've seen a set of changes. Enable modification of
			// harvesting mode.
			harvestModeChan = task.harvestModeChan
		case harvestMode := <-harvestModeChan:
			if harvestMode == task.harvestMode {
				break
			}

			logger.Infof("harvesting mode changed to %s", harvestMode)
			task.harvestMode = harvestMode

			if harvestMode.HarvestUnknown() {

				logger.Infof("harvesting unknown machines")
				if err := task.processMachines(nil); err != nil {
					return errors.Annotate(err, "failed to process machines after safe mode disabled")
				}
			}
		case <-retryChan:
			if err := task.processMachinesWithTransientErrors(); err != nil {
				return errors.Annotate(err, "failed to process machines with transient errors")
			}
		}
	}
}
Exemplo n.º 14
0
func (p *containerProvisioner) loop() error {
	var environConfigChanges <-chan struct{}
	environWatcher, err := p.st.WatchForEnvironConfigChanges()
	if err != nil {
		return err
	}
	environConfigChanges = environWatcher.Changes()
	defer watcher.Stop(environWatcher, &p.tomb)

	config, err := p.st.EnvironConfig()
	if err != nil {
		return err
	}
	harvestMode := config.ProvisionerHarvestMode()

	task, err := p.getStartTask(harvestMode)
	if err != nil {
		return err
	}
	defer watcher.Stop(task, &p.tomb)

	for {
		select {
		case <-p.tomb.Dying():
			return tomb.ErrDying
		case <-task.Dying():
			err := task.Err()
			logger.Errorf("%s provisioner died: %v", p.containerType, err)
			return err
		case _, ok := <-environConfigChanges:
			if !ok {
				return watcher.EnsureErr(environWatcher)
			}
			environConfig, err := p.st.EnvironConfig()
			if err != nil {
				logger.Errorf("cannot load environment configuration: %v", err)
				return err
			}
			p.configObserver.notify(environConfig)
			task.SetHarvestMode(environConfig.ProvisionerHarvestMode())
		}
	}
}
Exemplo n.º 15
0
func (q *AliveHookQueue) loop(initial *State) {
	defer q.tomb.Done()
	defer watcher.Stop(q.w, &q.tomb)

	// Consume initial event, and reconcile with initial state, by inserting
	// a new RelationUnitsChange before the initial event, which schedules
	// every missing unit for immediate departure before anything else happens
	// (apart from a single potential required post-joined changed event).
	ch1, ok := <-q.w.Changes()
	if !ok {
		q.tomb.Kill(watcher.MustErr(q.w))
		return
	}
	if len(ch1.Departed) != 0 {
		panic("AliveHookQueue must be started with a fresh RelationUnitsWatcher")
	}
	q.changedPending = initial.ChangedPending
	ch0 := params.RelationUnitsChange{}
	for unit, version := range initial.Members {
		q.info[unit] = &unitInfo{
			unit:    unit,
			version: version,
			joined:  true,
		}
		if _, found := ch1.Changed[unit]; !found {
			ch0.Departed = append(ch0.Departed, unit)
		}
	}
	q.update(ch0)
	q.update(ch1)

	var next hook.Info
	var out chan<- hook.Info
	for {
		if q.empty() {
			out = nil
		} else {
			out = q.out
			next = q.next()
		}
		select {
		case <-q.tomb.Dying():
			return
		case ch, ok := <-q.w.Changes():
			if !ok {
				q.tomb.Kill(watcher.MustErr(q.w))
				return
			}
			q.update(ch)
		case out <- next:
			q.pop()
		}
	}
}
Exemplo n.º 16
0
Arquivo: sender.go Projeto: bac/juju
// NewSender starts sending hooks from source onto the out channel, and will
// continue to do so until Stop()ped (or the source is exhausted). NewSender
// takes ownership of the supplied source, and responsibility for cleaning it up;
// but it will not close the out channel.
func NewSender(out chan<- Info, source Source) Sender {
	sender := &hookSender{
		out: out,
	}
	go func() {
		defer sender.tomb.Done()
		defer watcher.Stop(source, &sender.tomb)
		sender.tomb.Kill(sender.loop(source))
	}()
	return sender
}
Exemplo n.º 17
0
func (task *provisionerTask) loop() error {
	logger.Infof("Starting up provisioner task %s", task.machineTag)
	defer watcher.Stop(task.machineWatcher, &task.tomb)

	// Don't allow the safe mode to change until we have
	// read at least one set of changes, which will populate
	// the task.machines map. Otherwise we will potentially
	// see all legitimate instances as unknown.
	var safeModeChan chan bool

	// Not all provisioners have a retry channel.
	var retryChan <-chan struct{}
	if task.retryWatcher != nil {
		retryChan = task.retryWatcher.Changes()
	}

	// When the watcher is started, it will have the initial changes be all
	// the machines that are relevant. Also, since this is available straight
	// away, we know there will be some changes right off the bat.
	for {
		select {
		case <-task.tomb.Dying():
			logger.Infof("Shutting down provisioner task %s", task.machineTag)
			return tomb.ErrDying
		case ids, ok := <-task.machineWatcher.Changes():
			if !ok {
				return watcher.MustErr(task.machineWatcher)
			}
			if err := task.processMachines(ids); err != nil {
				return fmt.Errorf("failed to process updated machines: %v", err)
			}
			// We've seen a set of changes. Enable safe mode change.
			safeModeChan = task.safeModeChan
		case safeMode := <-safeModeChan:
			if safeMode == task.safeMode {
				break
			}
			logger.Infof("safe mode changed to %v", safeMode)
			task.safeMode = safeMode
			if !safeMode {
				// Safe mode has been disabled, so process current machines
				// so that unknown machines will be immediately dealt with.
				if err := task.processMachines(nil); err != nil {
					return fmt.Errorf("failed to process machines after safe mode disabled: %v", err)
				}
			}
		case <-retryChan:
			if err := task.processMachinesWithTransientErrors(); err != nil {
				return fmt.Errorf("failed to process machines with transient errors: %v", err)
			}
		}
	}
}
Exemplo n.º 18
0
func (w *activeStatusWorker) loop() error {
	code, info, err := w.stateFile.Read()
	if err != nil {
		return errors.Trace(err)
	}

	// Check current meter status before entering loop.
	currentCode, currentInfo, err := w.status.MeterStatus()
	if err != nil {
		return errors.Trace(err)
	}
	if code != currentCode || info != currentInfo {
		err = w.runHook(currentCode, currentInfo)
		if err != nil {
			return errors.Trace(err)
		}
		code, info = currentCode, currentInfo
	}

	watch, err := w.status.WatchMeterStatus()
	if err != nil {
		return errors.Trace(err)
	}
	defer watcher.Stop(watch, &w.tomb)

	// This function is used in tests to signal entering the worker loop.
	if w.init != nil {
		w.init()
	}
	for {
		select {
		case _, ok := <-watch.Changes():
			logger.Debugf("got meter status change")
			if !ok {
				return watcher.EnsureErr(watch)
			}
			currentCode, currentInfo, err := w.status.MeterStatus()
			if err != nil {
				return errors.Trace(err)
			}
			if currentCode == code && currentInfo == info {
				continue
			}
			err = w.runHook(currentCode, currentInfo)
			if err != nil {
				return errors.Trace(err)
			}
			code, info = currentCode, currentInfo
		case <-w.tomb.Dying():
			return tomb.ErrDying
		}
	}
}
Exemplo n.º 19
0
Arquivo: peeker.go Projeto: bac/juju
// NewPeeker returns a new Peeker providing a view of the supplied source
// (of which it takes ownership).
func NewPeeker(source Source) Peeker {
	p := &peeker{
		peeks: make(chan Peek),
	}
	go func() {
		defer p.tomb.Done()
		defer close(p.peeks)
		defer watcher.Stop(source, &p.tomb)
		p.tomb.Kill(p.loop(source))
	}()
	return p
}
Exemplo n.º 20
0
// stopWatchers stops all the firewaller's watchers.
func (fw *Firewaller) stopWatchers() {
	if fw.environWatcher != nil {
		watcher.Stop(fw.environWatcher, &fw.tomb)
	}
	if fw.machinesWatcher != nil {
		watcher.Stop(fw.machinesWatcher, &fw.tomb)
	}
	if fw.portsWatcher != nil {
		watcher.Stop(fw.portsWatcher, &fw.tomb)
	}
	for _, serviced := range fw.serviceds {
		if serviced != nil {
			watcher.Stop(serviced, &fw.tomb)
		}
	}
	for _, machined := range fw.machineds {
		if machined != nil {
			watcher.Stop(machined, &fw.tomb)
		}
	}
}
Exemplo n.º 21
0
// NewWatcher returns a RemoteStateWatcher that handles state changes pertaining to the
// supplied unit.
func NewWatcher(config WatcherConfig) (*RemoteStateWatcher, error) {
	w := &RemoteStateWatcher{
		st:                        config.State,
		relations:                 make(map[names.RelationTag]*relationUnitsWatcher),
		relationUnitsChanges:      make(chan relationUnitsChange),
		storageAttachmentWatchers: make(map[names.StorageTag]*storageAttachmentWatcher),
		storageAttachmentChanges:  make(chan storageAttachmentChange),
		leadershipTracker:         config.LeadershipTracker,
		updateStatusChannel:       config.UpdateStatusChannel,
		commandChannel:            config.CommandChannel,
		retryHookChannel:          config.RetryHookChannel,
		// Note: it is important that the out channel be buffered!
		// The remote state watcher will perform a non-blocking send
		// on the channel to wake up the observer. It is non-blocking
		// so that we coalesce events while the observer is busy.
		out: make(chan struct{}, 1),
		current: Snapshot{
			Relations: make(map[int]RelationSnapshot),
			Storage:   make(map[names.StorageTag]StorageSnapshot),
		},
	}
	if err := w.init(config.UnitTag); err != nil {
		return nil, errors.Trace(err)
	}
	go func() {
		defer w.tomb.Done()
		err := w.loop(config.UnitTag)
		logger.Errorf("remote state watcher exited: %v", err)
		w.tomb.Kill(errors.Cause(err))

		// Stop all remaining sub-watchers.
		for _, w := range w.storageAttachmentWatchers {
			watcher.Stop(w, &w.tomb)
		}
		for _, w := range w.relations {
			watcher.Stop(w, &w.tomb)
		}
	}()
	return w, nil
}
Exemplo n.º 22
0
func (u *Uniter) loop(unitTag string) (err error) {
	if err := u.init(unitTag); err != nil {
		if err == worker.ErrTerminateAgent {
			return err
		}
		return fmt.Errorf("failed to initialize uniter for %q: %v", unitTag, err)
	}
	defer u.runListener.Close()
	logger.Infof("unit %q started", u.unit)

	environWatcher, err := u.st.WatchForEnvironConfigChanges()
	if err != nil {
		return err
	}
	defer watcher.Stop(environWatcher, &u.tomb)
	u.watchForProxyChanges(environWatcher)

	// Start filtering state change events for consumption by modes.
	u.f, err = newFilter(u.st, unitTag)
	if err != nil {
		return err
	}
	defer watcher.Stop(u.f, &u.tomb)
	go func() {
		u.tomb.Kill(u.f.Wait())
	}()

	// Run modes until we encounter an error.
	mode := ModeContinue
	for err == nil {
		select {
		case <-u.tomb.Dying():
			err = tomb.ErrDying
		default:
			mode, err = mode(u)
		}
	}
	logger.Infof("unit %q shutting down: %s", u.unit, err)
	return err
}
Exemplo n.º 23
0
// ModeTerminating marks the unit dead and returns ErrTerminateAgent.
func ModeTerminating(u *Uniter) (next Mode, err error) {
	defer modeContext("ModeTerminating", &err)()
	w, err := u.unit.Watch()
	if err != nil {
		return nil, errors.Trace(err)
	}
	defer watcher.Stop(w, &u.tomb)

	// Upon unit termination we attempt to send any leftover metrics one last time. If we fail, there is nothing
	// else we can do but log the error.
	sendErr := u.runOperation(newSendMetricsOp())
	if sendErr != nil {
		logger.Warningf("failed to send metrics: %v", sendErr)
	}

	for {
		select {
		case <-u.tomb.Dying():
			return nil, tomb.ErrDying
		case actionId := <-u.f.ActionEvents():
			creator := newActionOp(actionId)
			if err := u.runOperation(creator); err != nil {
				return nil, errors.Trace(err)
			}
		case _, ok := <-w.Changes():
			if !ok {
				return nil, watcher.EnsureErr(w)
			}
			if err := u.unit.Refresh(); err != nil {
				return nil, errors.Trace(err)
			}
			if hasSubs, err := u.unit.HasSubordinates(); err != nil {
				return nil, errors.Trace(err)
			} else if hasSubs {
				continue
			}
			// The unit is known to be Dying; so if it didn't have subordinates
			// just above, it can't acquire new ones before this call.
			if err := u.unit.EnsureDead(); err != nil {
				return nil, errors.Trace(err)
			}
			return nil, worker.ErrTerminateAgent
		}
	}
}
Exemplo n.º 24
0
func (p *containerProvisioner) loop() error {
	task, err := p.getStartTask(false)
	if err != nil {
		return err
	}
	defer watcher.Stop(task, &p.tomb)

	for {
		select {
		case <-p.tomb.Dying():
			return tomb.ErrDying
		case <-task.Dying():
			err := task.Err()
			logger.Errorf("%s provisioner died: %v", p.containerType, err)
			return err
		}
	}
}
Exemplo n.º 25
0
func newStorageAttachmentWatcher(
	st StorageAccessor,
	in apiwatcher.NotifyWatcher,
	unitTag names.UnitTag,
	storageTag names.StorageTag,
	changes chan<- storageAttachmentChange,
) *storageAttachmentWatcher {
	s := &storageAttachmentWatcher{
		st:         st,
		watcher:    in,
		changes:    changes,
		storageTag: storageTag,
		unitTag:    unitTag,
	}
	go func() {
		defer s.tomb.Done()
		defer watcher.Stop(in, &s.tomb)
		s.tomb.Kill(s.loop())
	}()
	return s
}
Exemplo n.º 26
0
// watchLoop watches the service's exposed flag for changes.
func (sd *serviceData) watchLoop(exposed bool) {
	defer sd.tomb.Done()
	w, err := sd.service.Watch()
	if err != nil {
		sd.fw.tomb.Kill(err)
		return
	}
	defer watcher.Stop(w, &sd.tomb)
	for {
		select {
		case <-sd.tomb.Dying():
			return
		case _, ok := <-w.Changes():
			if !ok {
				sd.fw.tomb.Kill(watcher.EnsureErr(w))
				return
			}
			if err := sd.service.Refresh(); err != nil {
				if !params.IsCodeNotFound(err) {
					sd.fw.tomb.Kill(err)
				}
				return
			}
			change, err := sd.service.IsExposed()
			if err != nil {
				sd.fw.tomb.Kill(err)
				return
			}
			if change == exposed {
				continue
			}
			exposed = change
			select {
			case sd.fw.exposedChange <- &exposedChange{sd, change}:
			case <-sd.tomb.Dying():
				return
			}
		}
	}
}
Exemplo n.º 27
0
// watchLoop watches the unit for port changes.
func (ud *unitData) watchLoop(latestPorts []network.Port) {
	defer ud.tomb.Done()
	w, err := ud.unit.Watch()
	if err != nil {
		ud.fw.tomb.Kill(err)
		return
	}
	defer watcher.Stop(w, &ud.tomb)
	for {
		select {
		case <-ud.tomb.Dying():
			return
		case _, ok := <-w.Changes():
			if !ok {
				ud.fw.tomb.Kill(watcher.MustErr(w))
				return
			}
			if err := ud.unit.Refresh(); err != nil {
				if !params.IsCodeNotFound(err) {
					ud.fw.tomb.Kill(err)
				}
				return
			}
			change, err := ud.unit.OpenedPorts()
			if err != nil {
				ud.fw.tomb.Kill(err)
				return
			}
			if samePorts(change, latestPorts) {
				continue
			}
			latestPorts = append(latestPorts[:0], change...)
			select {
			case ud.fw.portsChange <- &portsChange{ud, change}:
			case <-ud.tomb.Dying():
				return
			}
		}
	}
}
Exemplo n.º 28
0
// ModeTerminating marks the unit dead and returns ErrTerminateAgent.
func ModeTerminating(u *Uniter) (next Mode, err error) {
	defer modeContext("ModeTerminating", &err)()
	w, err := u.unit.Watch()
	if err != nil {
		return nil, errors.Trace(err)
	}

	defer watcher.Stop(w, &u.tomb)

	for {
		select {
		case <-u.tomb.Dying():
			return nil, tomb.ErrDying
		case actionId := <-u.f.ActionEvents():
			creator := newActionOp(actionId)
			if err := u.runOperation(creator); err != nil {
				return nil, errors.Trace(err)
			}
		case _, ok := <-w.Changes():
			if !ok {
				return nil, watcher.EnsureErr(w)
			}
			if err := u.unit.Refresh(); err != nil {
				return nil, errors.Trace(err)
			}
			if hasSubs, err := u.unit.HasSubordinates(); err != nil {
				return nil, errors.Trace(err)
			} else if hasSubs {
				continue
			}
			// The unit is known to be Dying; so if it didn't have subordinates
			// just above, it can't acquire new ones before this call.
			if err := u.unit.EnsureDead(); err != nil {
				return nil, errors.Trace(err)
			}
			return nil, worker.ErrTerminateAgent
		}
	}
}
Exemplo n.º 29
0
// NewEnvironObserver waits for the state to have a valid environment
// configuration and returns a new environment observer. While waiting
// for the first environment configuration, it will return with
// tomb.ErrDying if it receives a value on dying.
func NewEnvironObserver(st *state.State) (*EnvironObserver, error) {
	config, err := st.EnvironConfig()
	if err != nil {
		return nil, err
	}
	environ, err := environs.New(config)
	if err != nil {
		return nil, fmt.Errorf("cannot make Environ: %v", err)
	}
	environWatcher := st.WatchForEnvironConfigChanges()
	obs := &EnvironObserver{
		st:             st,
		environ:        environ,
		environWatcher: environWatcher,
	}
	go func() {
		defer obs.tomb.Done()
		defer watcher.Stop(environWatcher, &obs.tomb)
		obs.tomb.Kill(obs.loop())
	}()
	return obs, nil
}
Exemplo n.º 30
0
// watchLoop watches the machine for units added or removed.
func (md *machineData) watchLoop(unitw apiwatcher.StringsWatcher) {
	defer md.tomb.Done()
	defer watcher.Stop(unitw, &md.tomb)
	for {
		select {
		case <-md.tomb.Dying():
			return
		case change, ok := <-unitw.Changes():
			if !ok {
				_, err := md.machine()
				if !params.IsCodeNotFound(err) {
					md.fw.tomb.Kill(watcher.EnsureErr(unitw))
				}
				return
			}
			select {
			case md.fw.unitsChange <- &unitsChange{md, change}:
			case <-md.tomb.Dying():
				return
			}
		}
	}
}