Exemplo n.º 1
0
func (s *FastPeriodSuite) TestStop(c *gc.C) {
	t := &tomb.Tomb{}
	watcher.Stop(&dummyWatcher{nil}, t)
	c.Assert(t.Err(), gc.Equals, tomb.ErrStillAlive)

	watcher.Stop(&dummyWatcher{errors.New("BLAM")}, t)
	c.Assert(t.Err(), gc.ErrorMatches, "BLAM")
}
Exemplo n.º 2
0
// stopWatchers stops all the firewaller's watchers.
func (fw *Firewaller) stopWatchers() {
	watcher.Stop(fw.environWatcher, &fw.tomb)
	watcher.Stop(fw.machinesWatcher, &fw.tomb)
	for _, unitd := range fw.unitds {
		watcher.Stop(unitd, &fw.tomb)
	}
	for _, serviced := range fw.serviceds {
		watcher.Stop(serviced, &fw.tomb)
	}
	for _, machined := range fw.machineds {
		watcher.Stop(machined, &fw.tomb)
	}
}
Exemplo n.º 3
0
// ModeTerminating marks the unit dead and returns ErrTerminateAgent.
func ModeTerminating(u *Uniter) (next Mode, err error) {
	defer modeContext("ModeTerminating", &err)()
	if err = u.unit.SetStatus(params.StatusStopped, "", nil); err != nil {
		return nil, err
	}
	w, err := u.unit.Watch()
	if err != nil {
		return nil, err
	}
	defer watcher.Stop(w, &u.tomb)
	for {
		select {
		case <-u.tomb.Dying():
			return nil, tomb.ErrDying
		case _, ok := <-w.Changes():
			if !ok {
				return nil, watcher.MustErr(w)
			}
			if err := u.unit.Refresh(); err != nil {
				return nil, err
			}
			if hasSubs, err := u.unit.HasSubordinates(); err != nil {
				return nil, err
			} else if hasSubs {
				continue
			}
			// The unit is known to be Dying; so if it didn't have subordinates
			// just above, it can't acquire new ones before this call.
			if err := u.unit.EnsureDead(); err != nil {
				return nil, err
			}
			return nil, worker.ErrTerminateAgent
		}
	}
}
Exemplo n.º 4
0
// addRelation causes the unit agent to join the supplied relation, and to
// store persistent state in the supplied dir.
func (u *Uniter) addRelation(rel *uniter.Relation, dir *relation.StateDir) error {
	logger.Infof("joining relation %q", rel)
	ru, err := rel.Unit(u.unit)
	if err != nil {
		return err
	}
	r := NewRelationer(ru, dir, u.relationHooks)
	w, err := u.unit.Watch()
	if err != nil {
		return err
	}
	defer watcher.Stop(w, &u.tomb)
	for {
		select {
		case <-u.tomb.Dying():
			return tomb.ErrDying
		case _, ok := <-w.Changes():
			if !ok {
				return watcher.MustErr(w)
			}
			err := r.Join()
			if params.IsCodeCannotEnterScopeYet(err) {
				logger.Infof("cannot enter scope for relation %q; waiting for subordinate to be removed", rel)
				continue
			} else if err != nil {
				return err
			}
			logger.Infof("joined relation %q", rel)
			u.relationers[rel.Id()] = r
			return nil
		}
	}
}
Exemplo n.º 5
0
func (nw *notifyWorker) loop() error {
	w, err := nw.handler.SetUp()
	if err != nil {
		if w != nil {
			// We don't bother to propagate an error, because we
			// already have an error
			w.Stop()
		}
		return err
	}
	defer propagateTearDown(nw.handler, &nw.tomb)
	defer watcher.Stop(w, &nw.tomb)
	for {
		select {
		case <-nw.tomb.Dying():
			return tomb.ErrDying
		case _, ok := <-w.Changes():
			if !ok {
				return mustErr(w)
			}
			if err := nw.handler.Handle(); err != nil {
				return err
			}
		}
	}
}
Exemplo n.º 6
0
func (p *environProvisioner) loop() error {
	var environConfigChanges <-chan struct{}
	environWatcher, err := p.st.WatchForEnvironConfigChanges()
	if err != nil {
		return err
	}
	environConfigChanges = environWatcher.Changes()
	defer watcher.Stop(environWatcher, &p.tomb)

	p.environ, err = worker.WaitForEnviron(environWatcher, p.st, p.tomb.Dying())
	if err != nil {
		return err
	}
	p.broker = p.environ

	safeMode := p.environ.Config().ProvisionerSafeMode()
	task, err := p.getStartTask(safeMode)
	if err != nil {
		return err
	}
	defer watcher.Stop(task, &p.tomb)

	for {
		select {
		case <-p.tomb.Dying():
			return tomb.ErrDying
		case <-task.Dying():
			err := task.Err()
			logger.Errorf("environ provisioner died: %v", err)
			return err
		case _, ok := <-environConfigChanges:
			if !ok {
				return watcher.MustErr(environWatcher)
			}
			environConfig, err := p.st.EnvironConfig()
			if err != nil {
				logger.Errorf("cannot load environment configuration: %v", err)
				return err
			}
			if err := p.setConfig(environConfig); err != nil {
				logger.Errorf("loaded invalid environment configuration: %v", err)
			}
			task.SetSafeMode(environConfig.ProvisionerSafeMode())
		}
	}
}
Exemplo n.º 7
0
func (w *relationUnitsWatcher) finish() {
	watcher.Stop(w.sw, &w.tomb)
	for _, watchedValue := range w.watching.Values() {
		w.st.watcher.Unwatch(w.st.settings.Name, watchedValue, w.updates)
	}
	close(w.updates)
	close(w.out)
	w.tomb.Done()
}
Exemplo n.º 8
0
func (q *AliveHookQueue) loop(initial *State) {
	defer q.tomb.Done()
	defer watcher.Stop(q.w, &q.tomb)

	// Consume initial event, and reconcile with initial state, by inserting
	// a new RelationUnitsChange before the initial event, which schedules
	// every missing unit for immediate departure before anything else happens
	// (apart from a single potential required post-joined changed event).
	ch1, ok := <-q.w.Changes()
	if !ok {
		q.tomb.Kill(watcher.MustErr(q.w))
		return
	}
	if len(ch1.Departed) != 0 {
		panic("AliveHookQueue must be started with a fresh RelationUnitsWatcher")
	}
	q.changedPending = initial.ChangedPending
	ch0 := params.RelationUnitsChange{}
	for unit, version := range initial.Members {
		q.info[unit] = &unitInfo{
			unit:    unit,
			version: version,
			joined:  true,
		}
		if _, found := ch1.Changed[unit]; !found {
			ch0.Departed = append(ch0.Departed, unit)
		}
	}
	q.update(ch0)
	q.update(ch1)

	var next hook.Info
	var out chan<- hook.Info
	for {
		if q.empty() {
			out = nil
		} else {
			out = q.out
			next = q.next()
		}
		select {
		case <-q.tomb.Dying():
			return
		case ch, ok := <-q.w.Changes():
			if !ok {
				q.tomb.Kill(watcher.MustErr(q.w))
				return
			}
			q.update(ch)
		case out <- next:
			q.pop()
		}
	}
}
Exemplo n.º 9
0
func (task *provisionerTask) loop() error {
	logger.Infof("Starting up provisioner task %s", task.machineTag)
	defer watcher.Stop(task.machineWatcher, &task.tomb)

	// Don't allow the safe mode to change until we have
	// read at least one set of changes, which will populate
	// the task.machines map. Otherwise we will potentially
	// see all legitimate instances as unknown.
	var safeModeChan chan bool

	// Not all provisioners have a retry channel.
	var retryChan <-chan struct{}
	if task.retryWatcher != nil {
		retryChan = task.retryWatcher.Changes()
	}

	// When the watcher is started, it will have the initial changes be all
	// the machines that are relevant. Also, since this is available straight
	// away, we know there will be some changes right off the bat.
	for {
		select {
		case <-task.tomb.Dying():
			logger.Infof("Shutting down provisioner task %s", task.machineTag)
			return tomb.ErrDying
		case ids, ok := <-task.machineWatcher.Changes():
			if !ok {
				return watcher.MustErr(task.machineWatcher)
			}
			if err := task.processMachines(ids); err != nil {
				return fmt.Errorf("failed to process updated machines: %v", err)
			}
			// We've seen a set of changes. Enable safe mode change.
			safeModeChan = task.safeModeChan
		case safeMode := <-safeModeChan:
			if safeMode == task.safeMode {
				break
			}
			logger.Infof("safe mode changed to %v", safeMode)
			task.safeMode = safeMode
			if !safeMode {
				// Safe mode has been disabled, so process current machines
				// so that unknown machines will be immediately dealt with.
				if err := task.processMachines(nil); err != nil {
					return fmt.Errorf("failed to process machines after safe mode disabled: %v", err)
				}
			}
		case <-retryChan:
			if err := task.processMachinesWithTransientErrors(); err != nil {
				return fmt.Errorf("failed to process machines with transient errors: %v", err)
			}
		}
	}
}
Exemplo n.º 10
0
func (u *Uniter) loop(unitTag string) (err error) {
	if err := u.init(unitTag); err != nil {
		if err == worker.ErrTerminateAgent {
			return err
		}
		return fmt.Errorf("failed to initialize uniter for %q: %v", unitTag, err)
	}
	defer u.runListener.Close()
	logger.Infof("unit %q started", u.unit)

	environWatcher, err := u.st.WatchForEnvironConfigChanges()
	if err != nil {
		return err
	}
	defer watcher.Stop(environWatcher, &u.tomb)
	u.watchForProxyChanges(environWatcher)

	// Start filtering state change events for consumption by modes.
	u.f, err = newFilter(u.st, unitTag)
	if err != nil {
		return err
	}
	defer watcher.Stop(u.f, &u.tomb)
	go func() {
		u.tomb.Kill(u.f.Wait())
	}()

	// Run modes until we encounter an error.
	mode := ModeContinue
	for err == nil {
		select {
		case <-u.tomb.Dying():
			err = tomb.ErrDying
		default:
			mode, err = mode(u)
		}
	}
	logger.Infof("unit %q shutting down: %s", u.unit, err)
	return err
}
Exemplo n.º 11
0
func (p *containerProvisioner) loop() error {
	task, err := p.getStartTask(false)
	if err != nil {
		return err
	}
	defer watcher.Stop(task, &p.tomb)

	for {
		select {
		case <-p.tomb.Dying():
			return tomb.ErrDying
		case <-task.Dying():
			err := task.Err()
			logger.Errorf("%s provisioner died: %v", p.containerType, err)
			return err
		}
	}
}
Exemplo n.º 12
0
// watchLoop watches the service's exposed flag for changes.
func (sd *serviceData) watchLoop(exposed bool) {
	defer sd.tomb.Done()
	w, err := sd.service.Watch()
	if err != nil {
		sd.fw.tomb.Kill(err)
		return
	}
	defer watcher.Stop(w, &sd.tomb)
	for {
		select {
		case <-sd.tomb.Dying():
			return
		case _, ok := <-w.Changes():
			if !ok {
				sd.fw.tomb.Kill(watcher.MustErr(w))
				return
			}
			if err := sd.service.Refresh(); err != nil {
				if !params.IsCodeNotFound(err) {
					sd.fw.tomb.Kill(err)
				}
				return
			}
			change, err := sd.service.IsExposed()
			if err != nil {
				sd.fw.tomb.Kill(err)
				return
			}
			if change == exposed {
				continue
			}
			exposed = change
			select {
			case sd.fw.exposedChange <- &exposedChange{sd, change}:
			case <-sd.tomb.Dying():
				return
			}
		}
	}
}
Exemplo n.º 13
0
// watchLoop watches the unit for port changes.
func (ud *unitData) watchLoop(latestPorts []instance.Port) {
	defer ud.tomb.Done()
	w, err := ud.unit.Watch()
	if err != nil {
		ud.fw.tomb.Kill(err)
		return
	}
	defer watcher.Stop(w, &ud.tomb)
	for {
		select {
		case <-ud.tomb.Dying():
			return
		case _, ok := <-w.Changes():
			if !ok {
				ud.fw.tomb.Kill(watcher.MustErr(w))
				return
			}
			if err := ud.unit.Refresh(); err != nil {
				if !params.IsCodeNotFound(err) {
					ud.fw.tomb.Kill(err)
				}
				return
			}
			change, err := ud.unit.OpenedPorts()
			if err != nil {
				ud.fw.tomb.Kill(err)
				return
			}
			if samePorts(change, latestPorts) {
				continue
			}
			latestPorts = append(latestPorts[:0], change...)
			select {
			case ud.fw.portsChange <- &portsChange{ud, change}:
			case <-ud.tomb.Dying():
				return
			}
		}
	}
}
Exemplo n.º 14
0
// NewEnvironObserver waits for the state to have a valid environment
// configuration and returns a new environment observer. While waiting
// for the first environment configuration, it will return with
// tomb.ErrDying if it receives a value on dying.
func NewEnvironObserver(st *state.State) (*EnvironObserver, error) {
	config, err := st.EnvironConfig()
	if err != nil {
		return nil, err
	}
	environ, err := environs.New(config)
	if err != nil {
		return nil, fmt.Errorf("cannot make Environ: %v", err)
	}
	environWatcher := st.WatchForEnvironConfigChanges()
	obs := &EnvironObserver{
		st:             st,
		environ:        environ,
		environWatcher: environWatcher,
	}
	go func() {
		defer obs.tomb.Done()
		defer watcher.Stop(environWatcher, &obs.tomb)
		obs.tomb.Kill(obs.loop())
	}()
	return obs, nil
}
Exemplo n.º 15
0
// watchLoop watches the machine for units added or removed.
func (md *machineData) watchLoop(unitw apiwatcher.StringsWatcher) {
	defer md.tomb.Done()
	defer watcher.Stop(unitw, &md.tomb)
	for {
		select {
		case <-md.tomb.Dying():
			return
		case change, ok := <-unitw.Changes():
			if !ok {
				_, err := md.machine()
				if !params.IsCodeNotFound(err) {
					md.fw.tomb.Kill(watcher.MustErr(unitw))
				}
				return
			}
			select {
			case md.fw.unitsChange <- &unitsChange{md, change}:
			case <-md.tomb.Dying():
				return
			}
		}
	}
}
Exemplo n.º 16
0
func (u *Upgrader) loop() error {
	currentTools := &coretools.Tools{Version: version.Current}
	err := u.st.SetVersion(u.tag, currentTools.Version)
	if err != nil {
		return err
	}
	versionWatcher, err := u.st.WatchAPIVersion(u.tag)
	if err != nil {
		return err
	}
	changes := versionWatcher.Changes()
	defer watcher.Stop(versionWatcher, &u.tomb)
	var retry <-chan time.Time
	// We don't read on the dying channel until we have received the
	// initial event from the API version watcher, thus ensuring
	// that we attempt an upgrade even if other workers are dying
	// all around us.
	var (
		dying                <-chan struct{}
		wantTools            *coretools.Tools
		wantVersion          version.Number
		hostnameVerification utils.SSLHostnameVerification
	)
	for {
		select {
		case _, ok := <-changes:
			if !ok {
				return watcher.MustErr(versionWatcher)
			}
			wantVersion, err = u.st.DesiredVersion(u.tag)
			if err != nil {
				return err
			}
			logger.Infof("desired tool version: %v", wantVersion)
			dying = u.tomb.Dying()
		case <-retry:
		case <-dying:
			return nil
		}
		if wantVersion == currentTools.Version.Number {
			continue
		} else if !allowedTargetVersion(version.Current.Number, wantVersion) {
			// See also bug #1299802 where when upgrading from
			// 1.16 to 1.18 there is a race condition that can
			// cause the unit agent to upgrade, and then want to
			// downgrade when its associate machine agent has not
			// finished upgrading.
			logger.Infof("desired tool version: %s is older than current %s, refusing to downgrade",
				wantVersion, version.Current)
			continue
		}
		logger.Infof("upgrade requested from %v to %v", currentTools.Version, wantVersion)
		// TODO(dimitern) 2013-10-03 bug #1234715
		// Add a testing HTTPS storage to verify the
		// disableSSLHostnameVerification behavior here.
		wantTools, hostnameVerification, err = u.st.Tools(u.tag)
		if err != nil {
			// Not being able to lookup Tools is considered fatal
			return err
		}
		// The worker cannot be stopped while we're downloading
		// the tools - this means that even if the API is going down
		// repeatedly (causing the agent to be stopped), as long
		// as we have got as far as this, we will still be able to
		// upgrade the agent.
		err := u.ensureTools(wantTools, hostnameVerification)
		if err == nil {
			return &UpgradeReadyError{
				OldTools:  version.Current,
				NewTools:  wantTools.Version,
				AgentName: u.tag,
				DataDir:   u.dataDir,
			}
		}
		logger.Errorf("failed to fetch tools from %q: %v", wantTools.URL, err)
		retry = retryAfter()
	}
}
Exemplo n.º 17
0
func (f *filter) loop(unitTag string) (err error) {
	defer func() {
		if params.IsCodeNotFoundOrCodeUnauthorized(err) {
			err = worker.ErrTerminateAgent
		}
	}()
	if f.unit, err = f.st.Unit(unitTag); err != nil {
		return err
	}
	if err = f.unitChanged(); err != nil {
		return err
	}
	f.service, err = f.unit.Service()
	if err != nil {
		return err
	}
	if err = f.serviceChanged(); err != nil {
		return err
	}
	unitw, err := f.unit.Watch()
	if err != nil {
		return err
	}
	defer f.maybeStopWatcher(unitw)
	servicew, err := f.service.Watch()
	if err != nil {
		return err
	}
	defer f.maybeStopWatcher(servicew)
	// configw and relationsw can get restarted, so we need to use
	// their eventual values in the defer calls.
	var configw apiwatcher.NotifyWatcher
	var configChanges <-chan struct{}
	curl, err := f.unit.CharmURL()
	if err == nil {
		configw, err = f.unit.WatchConfigSettings()
		if err != nil {
			return err
		}
		configChanges = configw.Changes()
		f.upgradeFrom.url = curl
	} else if err != uniter.ErrNoCharmURLSet {
		filterLogger.Errorf("unit charm: %v", err)
		return err
	}
	defer func() {
		if configw != nil {
			watcher.Stop(configw, &f.tomb)
		}
	}()
	relationsw, err := f.service.WatchRelations()
	if err != nil {
		return err
	}
	defer func() {
		if relationsw != nil {
			watcher.Stop(relationsw, &f.tomb)
		}
	}()

	// Config events cannot be meaningfully discarded until one is available;
	// once we receive the initial change, we unblock discard requests by
	// setting this channel to its namesake on f.
	var discardConfig chan struct{}
	for {
		var ok bool
		select {
		case <-f.tomb.Dying():
			return tomb.ErrDying

		// Handle watcher changes.
		case _, ok = <-unitw.Changes():
			filterLogger.Debugf("got unit change")
			if !ok {
				return watcher.MustErr(unitw)
			}
			if err = f.unitChanged(); err != nil {
				return err
			}
		case _, ok = <-servicew.Changes():
			filterLogger.Debugf("got service change")
			if !ok {
				return watcher.MustErr(servicew)
			}
			if err = f.serviceChanged(); err != nil {
				return err
			}
		case _, ok = <-configChanges:
			filterLogger.Debugf("got config change")
			if !ok {
				return watcher.MustErr(configw)
			}
			filterLogger.Debugf("preparing new config event")
			f.outConfig = f.outConfigOn
			discardConfig = f.discardConfig
		case keys, ok := <-relationsw.Changes():
			filterLogger.Debugf("got relations change")
			if !ok {
				return watcher.MustErr(relationsw)
			}
			var ids []int
			for _, key := range keys {
				relationTag := names.RelationTag(key)
				rel, err := f.st.Relation(relationTag)
				if params.IsCodeNotFoundOrCodeUnauthorized(err) {
					// If it's actually gone, this unit cannot have entered
					// scope, and therefore never needs to know about it.
				} else if err != nil {
					return err
				} else {
					ids = append(ids, rel.Id())
				}
			}
			f.relationsChanged(ids)

		// Send events on active out chans.
		case f.outUpgrade <- f.upgrade:
			filterLogger.Debugf("sent upgrade event")
			f.outUpgrade = nil
		case f.outResolved <- f.resolved:
			filterLogger.Debugf("sent resolved event")
			f.outResolved = nil
		case f.outConfig <- nothing:
			filterLogger.Debugf("sent config event")
			f.outConfig = nil
		case f.outRelations <- f.relations:
			filterLogger.Debugf("sent relations event")
			f.outRelations = nil
			f.relations = nil

		// Handle explicit requests.
		case curl := <-f.setCharm:
			filterLogger.Debugf("changing charm to %q", curl)
			// We need to restart the config watcher after setting the
			// charm, because service config settings are distinct for
			// different service charms.
			if configw != nil {
				if err := configw.Stop(); err != nil {
					return err
				}
			}
			if err := f.unit.SetCharmURL(curl); err != nil {
				filterLogger.Debugf("failed setting charm url %q: %v", curl, err)
				return err
			}
			select {
			case <-f.tomb.Dying():
				return tomb.ErrDying
			case f.didSetCharm <- nothing:
			}
			configw, err = f.unit.WatchConfigSettings()
			if err != nil {
				return err
			}
			configChanges = configw.Changes()

			// Restart the relations watcher.
			if err := relationsw.Stop(); err != nil {
				return err
			}
			relationsw, err = f.service.WatchRelations()
			if err != nil {
				return err
			}

			f.upgradeFrom.url = curl
			if err = f.upgradeChanged(); err != nil {
				return err
			}
		case force := <-f.wantForcedUpgrade:
			filterLogger.Debugf("want forced upgrade %v", force)
			f.upgradeFrom.force = force
			if err = f.upgradeChanged(); err != nil {
				return err
			}
		case <-f.wantResolved:
			filterLogger.Debugf("want resolved event")
			if f.resolved != params.ResolvedNone {
				f.outResolved = f.outResolvedOn
			}
		case <-f.clearResolved:
			filterLogger.Debugf("resolved event handled")
			f.outResolved = nil
			if err := f.unit.ClearResolved(); err != nil {
				return err
			}
			if err := f.unitChanged(); err != nil {
				return err
			}
			select {
			case <-f.tomb.Dying():
				return tomb.ErrDying
			case f.didClearResolved <- nothing:
			}
		case <-discardConfig:
			filterLogger.Debugf("discarded config event")
			f.outConfig = nil
		}
	}
}
Exemplo n.º 18
0
func (f *filter) maybeStopWatcher(w watcher.Stopper) {
	if w != nil {
		watcher.Stop(w, &f.tomb)
	}
}