Beispiel #1
0
func (s *WatcherSuite) TestStop(c *C) {
	t := &tomb.Tomb{}
	watcher.Stop(&dummyWatcher{nil}, t)
	c.Assert(t.Err(), Equals, tomb.ErrStillAlive)

	watcher.Stop(&dummyWatcher{errors.New("BLAM")}, t)
	c.Assert(t.Err(), ErrorMatches, "BLAM")
}
Beispiel #2
0
func (p *Provisioner) loop() error {
	environWatcher := p.st.WatchEnvironConfig()
	defer watcher.Stop(environWatcher, &p.tomb)

	var err error
	p.environ, err = worker.WaitForEnviron(environWatcher, p.tomb.Dying())
	if err != nil {
		return err
	}

	auth, err := NewSimpleAuthenticator(p.environ)
	if err != nil {
		return err
	}

	// Start a new worker for the environment provider.

	// Start responding to changes in machines, and to any further updates
	// to the environment config.
	instanceBroker, err := p.getBroker()
	if err != nil {
		return err
	}
	machineWatcher, err := p.getWatcher()
	if err != nil {
		return err
	}
	environmentProvisioner := NewProvisionerTask(
		p.machineId,
		p.st,
		machineWatcher,
		instanceBroker,
		auth)
	defer watcher.Stop(environmentProvisioner, &p.tomb)

	for {
		select {
		case <-p.tomb.Dying():
			return tomb.ErrDying
		case <-environmentProvisioner.Dying():
			err := environmentProvisioner.Err()
			logger.Errorf("environment provisioner died: %v", err)
			return err
		case cfg, ok := <-environWatcher.Changes():
			if !ok {
				return watcher.MustErr(environWatcher)
			}
			if err := p.setConfig(cfg); err != nil {
				logger.Errorf("loaded invalid environment configuration: %v", err)
			}
		}
	}
	panic("not reached")
}
Beispiel #3
0
// stopWatchers stops all the firewaller's watchers.
func (fw *Firewaller) stopWatchers() {
	watcher.Stop(fw.environWatcher, &fw.tomb)
	watcher.Stop(fw.machinesWatcher, &fw.tomb)
	for _, unitd := range fw.unitds {
		watcher.Stop(unitd, &fw.tomb)
	}
	for _, serviced := range fw.serviceds {
		watcher.Stop(serviced, &fw.tomb)
	}
	for _, machined := range fw.machineds {
		watcher.Stop(machined, &fw.tomb)
	}
}
Beispiel #4
0
func (p *Provisioner) loop() error {
	environWatcher := p.st.WatchEnvironConfig()
	defer watcher.Stop(environWatcher, &p.tomb)

	var err error
	p.environ, err = worker.WaitForEnviron(environWatcher, p.tomb.Dying())
	if err != nil {
		return err
	}

	// Get a new StateInfo from the environment: the one used to
	// launch the agent may refer to localhost, which will be
	// unhelpful when attempting to run an agent on a new machine.
	if p.stateInfo, p.apiInfo, err = p.environ.StateInfo(); err != nil {
		return err
	}

	// Call processMachines to stop any unknown instances before watching machines.
	if err := p.processMachines(nil); err != nil {
		return err
	}

	// Start responding to changes in machines, and to any further updates
	// to the environment config.
	machinesWatcher := p.st.WatchMachines()
	defer watcher.Stop(machinesWatcher, &p.tomb)
	// START OMIT
	// launchpad.net/juju-core/worker/provisioner/provisioner.go
	for {
		select {
		case <-p.tomb.Dying():
			return tomb.ErrDying
		case cfg, ok := <-environWatcher.Changes():
			if !ok {
				return watcher.MustErr(environWatcher)
			}
			if err := p.setConfig(cfg); err != nil {
				log.Errorf("worker/provisioner: loaded invalid environment configuration: %v", err)
			}
		case ids, ok := <-machinesWatcher.Changes():
			if !ok {
				return watcher.MustErr(machinesWatcher)
			}
			if err := p.processMachines(ids); err != nil {
				return err
			}
		}
	}
	// END OMIT
	panic("not reached")
}
Beispiel #5
0
func (nw *notifyWorker) loop() error {
	w, err := nw.handler.SetUp()
	if err != nil {
		if w != nil {
			// We don't bother to propagate an error, because we
			// already have an error
			w.Stop()
		}
		return err
	}
	defer propagateTearDown(nw.handler, &nw.tomb)
	defer watcher.Stop(w, &nw.tomb)
	for {
		select {
		case <-nw.tomb.Dying():
			return tomb.ErrDying
		case _, ok := <-w.Changes():
			if !ok {
				return nw.mustErr(w)
			}
			if err := nw.handler.Handle(); err != nil {
				return err
			}
		}
	}
}
Beispiel #6
0
func (nw *notifyWorker) loop() error {
	var w api.NotifyWatcher
	var err error
	defer handlerTearDown(nw.handler, &nw.tomb)
	if w, err = nw.handler.SetUp(); err != nil {
		if w != nil {
			// We don't bother to propogate an error, because we
			// already have an error
			w.Stop()
		}
		return err
	}
	defer watcher.Stop(w, &nw.tomb)
	for {
		select {
		case <-nw.tomb.Dying():
			return tomb.ErrDying
		case _, ok := <-w.Changes():
			if !ok {
				return nw.closedHandler(w)
			}
			if err := nw.handler.Handle(); err != nil {
				return err
			}
		}
	}
	panic("unreachable")
}
Beispiel #7
0
// addRelation causes the unit agent to join the supplied relation, and to
// store persistent state in the supplied dir.
func (u *Uniter) addRelation(rel *state.Relation, dir *relation.StateDir) error {
	log.Printf("worker/uniter: joining relation %q", rel)
	ru, err := rel.Unit(u.unit)
	if err != nil {
		return err
	}
	r := NewRelationer(ru, dir, u.relationHooks)
	w := u.unit.Watch()
	defer watcher.Stop(w, &u.tomb)
	for {
		select {
		case <-u.tomb.Dying():
			return tomb.ErrDying
		case _, ok := <-w.Changes():
			if !ok {
				return watcher.MustErr(w)
			}
			if err := r.Join(); err == state.ErrCannotEnterScopeYet {
				log.Printf("worker/uniter: cannot enter scope for relation %q; waiting for subordinate to be removed", rel)
				continue
			} else if err != nil {
				return err
			}
			log.Printf("worker/uniter: joined relation %q", rel)
			u.relationers[rel.Id()] = r
			return nil
		}
	}
	panic("unreachable")
}
Beispiel #8
0
// watchLoop watches the service's exposed flag for changes.
func (sd *serviceData) watchLoop(exposed bool) {
	defer sd.tomb.Done()
	w := sd.service.Watch()
	defer watcher.Stop(w, &sd.tomb)
	for {
		select {
		case <-sd.tomb.Dying():
			return
		case _, ok := <-w.Changes():
			if !ok {
				sd.fw.tomb.Kill(watcher.MustErr(w))
				return
			}
			if err := sd.service.Refresh(); err != nil {
				if !errors.IsNotFoundError(err) {
					sd.fw.tomb.Kill(err)
				}
				return
			}
			change := sd.service.IsExposed()
			if change == exposed {
				continue
			}
			exposed = change
			select {
			case sd.fw.exposedChange <- &exposedChange{sd, change}:
			case <-sd.tomb.Dying():
				return
			}
		}
	}
}
Beispiel #9
0
// watchLoop watches the unit for port changes.
func (ud *unitData) watchLoop(latestPorts []instance.Port) {
	defer ud.tomb.Done()
	w := ud.unit.Watch()
	defer watcher.Stop(w, &ud.tomb)
	for {
		select {
		case <-ud.tomb.Dying():
			return
		case _, ok := <-w.Changes():
			if !ok {
				ud.fw.tomb.Kill(watcher.MustErr(w))
				return
			}
			if err := ud.unit.Refresh(); err != nil {
				if !errors.IsNotFoundError(err) {
					ud.fw.tomb.Kill(err)
				}
				return
			}
			change := ud.unit.OpenedPorts()
			if samePorts(change, latestPorts) {
				continue
			}
			latestPorts = append(latestPorts[:0], change...)
			select {
			case ud.fw.portsChange <- &portsChange{ud, change}:
			case <-ud.tomb.Dying():
				return
			}
		}
	}
}
Beispiel #10
0
func (d *Deployer) loop() error {
	machine, err := d.st.Machine(d.machineId)
	if err != nil {
		return err
	}
	machineUnitsWatcher := machine.WatchUnits()
	defer watcher.Stop(machineUnitsWatcher, &d.tomb)

	deployed, err := d.ctx.DeployedUnits()
	if err != nil {
		return err
	}
	for _, unitName := range deployed {
		d.deployed.Add(unitName)
		if err := d.changed(unitName); err != nil {
			return err
		}
	}
	for {
		select {
		case <-d.tomb.Dying():
			return tomb.ErrDying
		case changes, ok := <-machineUnitsWatcher.Changes():
			if !ok {
				return watcher.MustErr(machineUnitsWatcher)
			}
			for _, unitName := range changes {
				if err := d.changed(unitName); err != nil {
					return err
				}
			}
		}
	}
	panic("unreachable")
}
Beispiel #11
0
// ModeTerminating marks the unit dead and returns ErrTerminateAgent.
func ModeTerminating(u *Uniter) (next Mode, err error) {
	defer modeContext("ModeTerminating", &err)()
	if err = u.unit.SetStatus(params.StatusStopped, ""); err != nil {
		return nil, err
	}
	w := u.unit.Watch()
	defer watcher.Stop(w, &u.tomb)
	for {
		select {
		case <-u.tomb.Dying():
			return nil, tomb.ErrDying
		case _, ok := <-w.Changes():
			if !ok {
				return nil, watcher.MustErr(w)
			}
			if err := u.unit.Refresh(); err != nil {
				return nil, err
			}
			if len(u.unit.SubordinateNames()) > 0 {
				continue
			}
			// The unit is known to be Dying; so if it didn't have subordinates
			// just above, it can't acquire new ones before this call.
			if err := u.unit.EnsureDead(); err != nil {
				return nil, err
			}
			return nil, worker.ErrTerminateAgent
		}
	}
}
func (task *provisionerTask) loop() error {
	logger.Infof("Starting up provisioner task %s", task.machineId)
	defer watcher.Stop(task.machineWatcher, &task.tomb)

	// When the watcher is started, it will have the initial changes be all
	// the machines that are relevant. Also, since this is available straight
	// away, we know there will be some changes right off the bat.
	for {
		select {
		case <-task.tomb.Dying():
			logger.Infof("Shutting down provisioner task %s", task.machineId)
			return tomb.ErrDying
		case ids, ok := <-task.machineWatcher.Changes():
			if !ok {
				return watcher.MustErr(task.machineWatcher)
			}
			// TODO(dfc; lp:1042717) fire process machines periodically to shut down unknown
			// instances.
			if err := task.processMachines(ids); err != nil {
				logger.Errorf("Process machines failed: %v", err)
				return err
			}
		}
	}
}
Beispiel #13
0
func (mr *Machiner) loop() error {
	m, err := mr.st.Machine(mr.id)
	if state.IsNotFound(err) {
		return worker.ErrDead
	} else if err != nil {
		return err
	}
	w := m.Watch()
	defer watcher.Stop(w, &mr.tomb)
	for {
		select {
		case <-mr.tomb.Dying():
			return tomb.ErrDying
		case <-w.Changes():
			if err := m.Refresh(); state.IsNotFound(err) {
				return worker.ErrDead
			} else if err != nil {
				return err
			}
			if m.Life() != state.Alive {
				// If the machine is Dying, it has no units,
				// and can be safely set to Dead.
				if err := m.EnsureDead(); err != nil {
					return err
				}
				return worker.ErrDead
			}
		}
	}
	panic("unreachable")
}
Beispiel #14
0
func (w *RelationUnitsWatcher) finish() {
	watcher.Stop(w.sw, &w.tomb)
	for _, watchedValue := range w.watching.Values() {
		w.st.watcher.Unwatch(w.st.settings.Name, watchedValue, w.updates)
	}
	close(w.updates)
	close(w.out)
	w.tomb.Done()
}
Beispiel #15
0
func (q *AliveHookQueue) loop(initial *State) {
	defer q.tomb.Done()
	defer watcher.Stop(q.w, &q.tomb)

	// Consume initial event, and reconcile with initial state, by inserting
	// a new RelationUnitsChange before the initial event, which schedules
	// every missing unit for immediate departure before anything else happens
	// (apart from a single potential required post-joined changed event).
	ch1, ok := <-q.w.Changes()
	if !ok {
		q.tomb.Kill(watcher.MustErr(q.w))
		return
	}
	if len(ch1.Departed) != 0 {
		panic("AliveHookQueue must be started with a fresh RelationUnitsWatcher")
	}
	q.changedPending = initial.ChangedPending
	ch0 := state.RelationUnitsChange{}
	for unit, version := range initial.Members {
		q.info[unit] = &unitInfo{
			unit:    unit,
			version: version,
			joined:  true,
		}
		if _, found := ch1.Changed[unit]; !found {
			ch0.Departed = append(ch0.Departed, unit)
		}
	}
	q.update(ch0)
	q.update(ch1)

	var next hook.Info
	var out chan<- hook.Info
	for {
		if q.empty() {
			out = nil
		} else {
			out = q.out
			next = q.next()
		}
		select {
		case <-q.tomb.Dying():
			return
		case ch, ok := <-q.w.Changes():
			if !ok {
				q.tomb.Kill(watcher.MustErr(q.w))
				return
			}
			q.update(ch)
		case out <- next:
			q.pop()
		}
	}
}
Beispiel #16
0
// NewDeployer returns a Deployer that deploys and recalls unit agents via
// mgr, according to membership and lifecycle changes notified by w.
func NewDeployer(st *state.State, mgr Manager, w *state.UnitsWatcher) *Deployer {
	d := &Deployer{
		st:         st,
		mgr:        mgr,
		entityName: w.EntityName(),
		deployed:   map[string]bool{},
	}
	go func() {
		defer d.tomb.Done()
		defer watcher.Stop(w, &d.tomb)
		d.tomb.Kill(d.loop(w))
	}()
	return d
}
Beispiel #17
0
func (u *Uniter) loop(name string) (err error) {
	if err = u.init(name); err != nil {
		return err
	}
	log.Printf("worker/uniter: unit %q started", u.unit)

	// Start filtering state change events for consumption by modes.
	u.f, err = newFilter(u.st, name)
	if err != nil {
		return err
	}
	defer watcher.Stop(u.f, &u.tomb)
	go func() {
		u.tomb.Kill(u.f.Wait())
	}()

	// Announce our presence to the world.
	pinger, err := u.unit.SetAgentAlive()
	if err != nil {
		return err
	}
	defer watcher.Stop(pinger, &u.tomb)

	// Run modes until we encounter an error.
	mode := ModeInit
	for err == nil {
		select {
		case <-u.tomb.Dying():
			err = tomb.ErrDying
		default:
			mode, err = mode(u)
		}
	}
	log.Printf("worker/uniter: unit %q shutting down: %s", u.unit, err)
	return err
}
Beispiel #18
0
// watchLoop watches the machine for units added or removed.
func (md *machineData) watchLoop(unitw state.StringsWatcher) {
	defer md.tomb.Done()
	defer watcher.Stop(unitw, &md.tomb)
	for {
		select {
		case <-md.tomb.Dying():
			return
		case change, ok := <-unitw.Changes():
			if !ok {
				_, err := md.machine()
				if !errors.IsNotFoundError(err) {
					md.fw.tomb.Kill(watcher.MustErr(unitw))
				}
				return
			}
			select {
			case md.fw.unitsChange <- &unitsChange{md, change}:
			case <-md.tomb.Dying():
				return
			}
		}
	}
}
Beispiel #19
0
func (u *Upgrader) run() error {
	// Let the state know the version that is currently running.
	currentTools, err := environs.ReadTools(u.dataDir, version.Current)
	if err != nil {
		// Don't abort everything because we can't find the tools directory.
		// The problem should sort itself out as we will immediately
		// download some more tools and upgrade.
		log.Printf("cmd/jujud: upgrader cannot read current tools: %v", err)
		currentTools = &state.Tools{
			Binary: version.Current,
		}
	}
	err = u.agentState.SetAgentTools(currentTools)
	if err != nil {
		return err
	}

	w := u.st.WatchEnvironConfig()
	defer watcher.Stop(w, &u.tomb)

	// Rather than using worker.WaitForEnviron, invalid environments are
	// managed explicitly so that all configuration changes are observed
	// by the loop below.
	var environ environs.Environ

	// TODO(rog) retry downloads when they fail.
	var (
		download      *downloader.Download
		downloadTools *state.Tools
		downloadDone  <-chan downloader.Status
	)
	// If we're killed early on (probably as a result of some other
	// task dying) we allow ourselves some time to try to connect to
	// the state and download a new version. We return to normal
	// undelayed behaviour when:
	// 1) We find there's no upgrade to do.
	// 2) A download fails.
	tomb := delayedTomb(&u.tomb, upgraderKillDelay)
	noDelay := func() {
		if tomb != &u.tomb {
			tomb.Kill(nil)
			tomb = &u.tomb
		}
	}
	for {
		// We wait for the tools to change while we're downloading
		// so that if something goes wrong (for instance a bad URL
		// hangs up) another change to the proposed tools can
		// potentially fix things.
		select {
		case cfg, ok := <-w.Changes():
			if !ok {
				return watcher.MustErr(w)
			}
			var err error
			if environ == nil {
				environ, err = environs.New(cfg)
				if err != nil {
					log.Printf("cmd/jujud: upgrader loaded invalid initial environment configuration: %v", err)
					break
				}
			} else {
				err = environ.SetConfig(cfg)
				if err != nil {
					log.Printf("cmd/jujud: upgrader loaded invalid environment configuration: %v", err)
					// continue on, because the version number is still significant.
				}
			}
			vers := cfg.AgentVersion()
			if download != nil {
				// There's a download in progress, stop it if we need to.
				if vers == downloadTools.Number {
					// We are already downloading the requested tools.
					break
				}
				// Tools changed. We need to stop and restart.
				download.Stop()
				download, downloadTools, downloadDone = nil, nil, nil
			}
			// Ignore the proposed tools if we're already running the
			// proposed version.
			if vers == version.Current.Number {
				noDelay()
				break
			}
			binary := version.Current
			binary.Number = vers

			if tools, err := environs.ReadTools(u.dataDir, binary); err == nil {
				// The tools have already been downloaded, so use them.
				return u.upgradeReady(currentTools, tools)
			}
			flags := environs.CompatVersion
			if cfg.Development() {
				flags |= environs.DevVersion
			}
			tools, err := environs.FindTools(environ, binary, flags)
			if err != nil {
				log.Printf("cmd/jujud: upgrader error finding tools for %v: %v", binary, err)
				noDelay()
				// TODO(rog): poll until tools become available.
				break
			}
			if tools.Binary != binary {
				if tools.Number == version.Current.Number {
					// TODO(rog): poll until tools become available.
					log.Printf("cmd/jujud: upgrader: version %v requested but found only current version: %v", binary, tools.Number)
					noDelay()
					break
				}
				log.Printf("cmd/jujud: upgrader cannot find exact tools match for %s; using %s instead", binary, tools.Binary)
			}
			log.Printf("cmd/jujud: upgrader downloading %q", tools.URL)
			download = downloader.New(tools.URL, "")
			downloadTools = tools
			downloadDone = download.Done()
		case status := <-downloadDone:
			tools := downloadTools
			download, downloadTools, downloadDone = nil, nil, nil
			if status.Err != nil {
				log.Printf("cmd/jujud: upgrader download of %v failed: %v", tools.Binary, status.Err)
				noDelay()
				break
			}
			err := environs.UnpackTools(u.dataDir, tools, status.File)
			status.File.Close()
			if err := os.Remove(status.File.Name()); err != nil {
				log.Printf("cmd/jujud: upgrader cannot remove temporary download file: %v", err)
			}
			if err != nil {
				log.Printf("cmd/jujud: upgrader cannot unpack %v tools: %v", tools.Binary, err)
				noDelay()
				break
			}
			return u.upgradeReady(currentTools, tools)
		case <-tomb.Dying():
			if download != nil {
				return fmt.Errorf("upgrader aborted download of %q", downloadTools.URL)
			}
			return nil
		}
	}
	panic("not reached")
}
Beispiel #20
0
func (u *Upgrader) run() error {
	// Let the state know the version that is currently running.
	currentTools, err := tools.ReadTools(u.dataDir, version.Current)
	if err != nil {
		// Don't abort everything because we can't find the tools directory.
		// The problem should sort itself out as we will immediately
		// download some more tools and upgrade.
		log.Warningf("upgrader cannot read current tools: %v", err)
		currentTools = &tools.Tools{
			Binary: version.Current,
		}
	}
	err = u.agentState.SetAgentTools(currentTools)
	if err != nil {
		return err
	}

	// TODO(fwereade): this whole package should be ignorant of environs,
	// so it shouldn't be watching environ config (and it shouldn't be
	// looking in storage): we should be able to find out what to download
	// from state, exactly as we do for charms.
	w := u.st.WatchEnvironConfig()
	defer watcher.Stop(w, &u.tomb)

	// Rather than using worker.WaitForEnviron, invalid environments are
	// managed explicitly so that all configuration changes are observed
	// by the loop below.
	var environ environs.Environ

	// TODO(rog) retry downloads when they fail.
	var (
		download      *downloader.Download
		downloadTools *tools.Tools
		downloadDone  <-chan downloader.Status
	)
	// If we're killed early on (probably as a result of some other
	// task dying) we allow ourselves some time to try to connect to
	// the state and download a new version. We return to normal
	// undelayed behaviour when:
	// 1) We find there's no upgrade to do.
	// 2) A download fails.
	tomb := delayedTomb(&u.tomb, upgraderKillDelay)
	noDelay := func() {
		if tomb != &u.tomb {
			tomb.Kill(nil)
			tomb = &u.tomb
		}
	}
	for {
		// We wait for the tools to change while we're downloading
		// so that if something goes wrong (for instance a bad URL
		// hangs up) another change to the proposed tools can
		// potentially fix things.
		select {
		case cfg, ok := <-w.Changes():
			if !ok {
				return watcher.MustErr(w)
			}
			var err error
			if environ == nil {
				environ, err = environs.New(cfg)
				if err != nil {
					log.Errorf("upgrader loaded invalid initial environment configuration: %v", err)
					break
				}
			} else {
				err = environ.SetConfig(cfg)
				if err != nil {
					log.Warningf("upgrader loaded invalid environment configuration: %v", err)
					// continue on, because the version number is still significant.
				}
			}
			proposed, ok := cfg.AgentVersion()
			if !ok {
				// This shouldn't be possible; but if it happens it's no reason
				// to kill this task. Just wait for the config to change again.
				continue
			}
			if download != nil {
				// There's a download in progress, stop it if we need to.
				if downloadTools.Number == proposed {
					// We are already downloading the requested tools.
					break
				}
				// Tools changed. We need to stop and restart.
				download.Stop()
				download, downloadTools, downloadDone = nil, nil, nil
			}
			// TODO: major version upgrades.
			if proposed.Major != version.Current.Major {
				log.Errorf("major version upgrades are not supported yet")
				noDelay()
				break
			}
			if proposed == version.Current.Number {
				noDelay()
				break
			}
			required := version.Binary{
				Number: proposed,
				Series: version.Current.Series,
				Arch:   version.Current.Arch,
			}
			if tools, err := tools.ReadTools(u.dataDir, required); err == nil {
				// The exact tools have already been downloaded, so use them.
				return u.upgradeReady(currentTools, tools)
			}
			tools, err := environs.FindExactTools(environ, required)
			if err != nil {
				log.Errorf("upgrader error finding tools for %v: %v", required, err)
				if !errors.IsNotFoundError(err) {
					return err
				}
				noDelay()
				// TODO(rog): poll until tools become available.
				break
			}
			log.Infof("upgrader downloading %q", tools.URL)
			download = downloader.New(tools.URL, "")
			downloadTools = tools
			downloadDone = download.Done()
		case status := <-downloadDone:
			newTools := downloadTools
			download, downloadTools, downloadDone = nil, nil, nil
			if status.Err != nil {
				log.Errorf("upgrader download of %v failed: %v", newTools.Binary, status.Err)
				noDelay()
				break
			}
			err := tools.UnpackTools(u.dataDir, newTools, status.File)
			status.File.Close()
			if err := os.Remove(status.File.Name()); err != nil {
				log.Warningf("upgrader cannot remove temporary download file: %v", err)
			}
			if err != nil {
				log.Errorf("upgrader cannot unpack %v tools: %v", newTools.Binary, err)
				noDelay()
				break
			}
			return u.upgradeReady(currentTools, newTools)
		case <-tomb.Dying():
			if download != nil {
				return fmt.Errorf("upgrader aborted download of %q", downloadTools.URL)
			}
			return nil
		}
	}
	panic("not reached")
}
Beispiel #21
0
func (u *Upgrader) loop() error {
	currentTools, err := tools.ReadTools(u.dataDir, version.Current)
	if err != nil {
		// Don't abort everything because we can't find the tools directory.
		// The problem should sort itself out as we will immediately
		// download some more tools and upgrade.
		logger.Warningf("cannot read current tools: %v", err)
		currentTools = &tools.Tools{
			Version: version.Current,
		}
	}
	err = u.st.SetTools(u.tag, currentTools)
	if err != nil {
		return err
	}
	versionWatcher, err := u.st.WatchAPIVersion(u.tag)
	if err != nil {
		return err
	}
	changes := versionWatcher.Changes()
	defer watcher.Stop(versionWatcher, &u.tomb)
	var retry <-chan time.Time
	// We don't read on the dying channel until we have received the
	// initial event from the API version watcher, thus ensuring
	// that we attempt an upgrade even if other workers are dying
	// all around us.
	var dying <-chan struct{}
	var wantTools *tools.Tools
	for {
		select {
		case _, ok := <-changes:
			if !ok {
				return watcher.MustErr(versionWatcher)
			}
			wantTools, err = u.st.Tools(u.tag)
			if err != nil {
				return err
			}
			logger.Infof("required tools: %v", wantTools.Version)
			dying = u.tomb.Dying()
		case <-retry:
		case <-dying:
			return nil
		}
		if wantTools.Version.Number != currentTools.Version.Number {
			logger.Infof("upgrade required from %v to %v", currentTools.Version, wantTools.Version)
			// The worker cannot be stopped while we're downloading
			// the tools - this means that even if the API is going down
			// repeatedly (causing the agent to be stopped), as long
			// as we have got as far as this, we will still be able to
			// upgrade the agent.
			err := u.fetchTools(wantTools)
			if err == nil {
				return &UpgradeReadyError{
					OldTools:  currentTools,
					NewTools:  wantTools,
					AgentName: u.tag,
					DataDir:   u.dataDir,
				}
			}
			logger.Errorf("failed to fetch tools from %q: %v", wantTools.URL, err)
			retry = retryAfter()
		}
	}
}
Beispiel #22
0
func (f *filter) loop(unitName string) (err error) {
	f.unit, err = f.st.Unit(unitName)
	if err != nil {
		return err
	}
	if err = f.unitChanged(); err != nil {
		return err
	}
	f.service, err = f.unit.Service()
	if err != nil {
		return err
	}
	if err = f.serviceChanged(); err != nil {
		return err
	}
	unitw := f.unit.Watch()
	defer watcher.Stop(unitw, &f.tomb)
	servicew := f.service.Watch()
	defer watcher.Stop(servicew, &f.tomb)
	// configw and relationsw can get restarted, so we need to use
	// their eventual values in the defer calls.
	var configw state.NotifyWatcher
	var configChanges <-chan struct{}
	if curl, ok := f.unit.CharmURL(); ok {
		configw, err = f.unit.WatchConfigSettings()
		if err != nil {
			return err
		}
		configChanges = configw.Changes()
		f.upgradeFrom.url = curl
	}
	defer func() {
		if configw != nil {
			watcher.Stop(configw, &f.tomb)
		}
	}()
	relationsw := f.service.WatchRelations()
	defer func() { watcher.Stop(relationsw, &f.tomb) }()

	// Config events cannot be meaningfully discarded until one is available;
	// once we receive the initial change, we unblock discard requests by
	// setting this channel to its namesake on f.
	var discardConfig chan struct{}
	for {
		var ok bool
		select {
		case <-f.tomb.Dying():
			return tomb.ErrDying

		// Handle watcher changes.
		case _, ok = <-unitw.Changes():
			log.Debugf("worker/uniter/filter: got unit change")
			if !ok {
				return watcher.MustErr(unitw)
			}
			if err = f.unitChanged(); err != nil {
				return err
			}
		case _, ok = <-servicew.Changes():
			log.Debugf("worker/uniter/filter: got service change")
			if !ok {
				return watcher.MustErr(servicew)
			}
			if err = f.serviceChanged(); err != nil {
				return err
			}
		case _, ok = <-configChanges:
			log.Debugf("worker/uniter/filter: got config change")
			if !ok {
				return watcher.MustErr(configw)
			}
			log.Debugf("worker/uniter/filter: preparing new config event")
			f.outConfig = f.outConfigOn
			discardConfig = f.discardConfig
		case keys, ok := <-relationsw.Changes():
			log.Debugf("worker/uniter/filter: got relations change")
			if !ok {
				return watcher.MustErr(relationsw)
			}
			var ids []int
			for _, key := range keys {
				if rel, err := f.st.KeyRelation(key); errors.IsNotFoundError(err) {
					// If it's actually gone, this unit cannot have entered
					// scope, and therefore never needs to know about it.
				} else if err != nil {
					return err
				} else {
					ids = append(ids, rel.Id())
				}
			}
			f.relationsChanged(ids)

		// Send events on active out chans.
		case f.outUpgrade <- f.upgrade:
			log.Debugf("worker/uniter/filter: sent upgrade event")
			f.outUpgrade = nil
		case f.outResolved <- f.resolved:
			log.Debugf("worker/uniter/filter: sent resolved event")
			f.outResolved = nil
		case f.outConfig <- nothing:
			log.Debugf("worker/uniter/filter: sent config event")
			f.outConfig = nil
		case f.outRelations <- f.relations:
			log.Debugf("worker/uniter/filter: sent relations event")
			f.outRelations = nil
			f.relations = nil

		// Handle explicit requests.
		case curl := <-f.setCharm:
			log.Debugf("worker/uniter/filter: changing charm to %q", curl)
			// We need to restart the config watcher after setting the
			// charm, because service config settings are distinct for
			// different service charms.
			if configw != nil {
				if err := configw.Stop(); err != nil {
					return err
				}
			}
			if err := f.unit.SetCharmURL(curl); err != nil {
				log.Debugf("worker/uniter/filter: failed setting charm url %q: %v", curl, err)
				return err
			}
			select {
			case <-f.tomb.Dying():
				return tomb.ErrDying
			case f.didSetCharm <- nothing:
			}
			configw, err = f.unit.WatchConfigSettings()
			if err != nil {
				return err
			}
			configChanges = configw.Changes()

			// Restart the relations watcher.
			if err := relationsw.Stop(); err != nil {
				return err
			}
			relationsw = f.service.WatchRelations()

			f.upgradeFrom.url = curl
			if err = f.upgradeChanged(); err != nil {
				return err
			}
		case force := <-f.wantForcedUpgrade:
			log.Debugf("worker/uniter/filter: want forced upgrade %v", force)
			f.upgradeFrom.force = force
			if err = f.upgradeChanged(); err != nil {
				return err
			}
		case <-f.wantResolved:
			log.Debugf("worker/uniter/filter: want resolved event")
			if f.resolved != state.ResolvedNone {
				f.outResolved = f.outResolvedOn
			}
		case <-f.clearResolved:
			log.Debugf("worker/uniter/filter: resolved event handled")
			f.outResolved = nil
			if err := f.unit.ClearResolved(); err != nil {
				return err
			}
			if err = f.unitChanged(); err != nil {
				return err
			}
			select {
			case <-f.tomb.Dying():
				return tomb.ErrDying
			case f.didClearResolved <- nothing:
			}
		case <-discardConfig:
			log.Debugf("worker/uniter/filter: discarded config event")
			f.outConfig = nil
		}
	}
	panic("unreachable")
}
Beispiel #23
0
func (f *filter) loop(unitName string) (err error) {
	f.unit, err = f.st.Unit(unitName)
	if err != nil {
		return err
	}
	if err = f.unitChanged(); err != nil {
		return err
	}
	f.service, err = f.unit.Service()
	if err != nil {
		return err
	}
	f.upgradeRequested.url, _ = f.service.CharmURL()
	if err = f.serviceChanged(); err != nil {
		return err
	}
	unitw := f.unit.Watch()
	defer watcher.Stop(unitw, &f.tomb)
	servicew := f.service.Watch()
	defer watcher.Stop(servicew, &f.tomb)
	configw := f.service.WatchConfig()
	defer watcher.Stop(configw, &f.tomb)
	relationsw := f.service.WatchRelations()
	defer watcher.Stop(relationsw, &f.tomb)

	// Config events cannot be meaningfully discarded until one is available;
	// once we receive the initial change, we unblock discard requests by
	// setting this channel to its namesake on f.
	var discardConfig chan struct{}
	for {
		var ok bool
		select {
		case <-f.tomb.Dying():
			return tomb.ErrDying

		// Handle watcher changes.
		case _, ok = <-unitw.Changes():
			log.Debugf("worker/uniter/filter: got unit change")
			if !ok {
				return watcher.MustErr(unitw)
			}
			if err = f.unitChanged(); err != nil {
				return err
			}
		case _, ok = <-servicew.Changes():
			log.Debugf("worker/uniter/filter: got service change")
			if !ok {
				return watcher.MustErr(servicew)
			}
			if err = f.serviceChanged(); err != nil {
				return err
			}
		case _, ok := <-configw.Changes():
			log.Debugf("worker/uniter/filter: got config change")
			if !ok {
				return watcher.MustErr(configw)
			}
			log.Debugf("worker/uniter/filter: preparing new config event")
			f.outConfig = f.outConfigOn
			discardConfig = f.discardConfig
		case ids, ok := <-relationsw.Changes():
			log.Debugf("filter: got relations change")
			if !ok {
				return watcher.MustErr(relationsw)
			}
			f.relationsChanged(ids)

		// Send events on active out chans.
		case f.outUpgrade <- f.upgrade:
			log.Debugf("worker/uniter/filter: sent upgrade event")
			f.upgradeRequested.url = f.upgrade.URL()
			f.outUpgrade = nil
		case f.outResolved <- f.resolved:
			log.Debugf("worker/uniter/filter: sent resolved event")
			f.outResolved = nil
		case f.outConfig <- nothing:
			log.Debugf("filter: sent config event")
			f.outConfig = nil
		case f.outRelations <- f.relations:
			log.Debugf("filter: sent relations event")
			f.outRelations = nil
			f.relations = nil

		// Handle explicit requests.
		case req := <-f.wantUpgrade:
			log.Debugf("worker/uniter/filter: want upgrade event")
			f.upgradeRequested = req
			if err = f.upgradeChanged(); err != nil {
				return err
			}
		case <-f.wantResolved:
			log.Debugf("worker/uniter/filter: want resolved event")
			if f.resolved != state.ResolvedNone {
				f.outResolved = f.outResolvedOn
			}
		case <-discardConfig:
			log.Debugf("filter: discarded config event")
			f.outConfig = nil
		}
	}
	panic("unreachable")
}