Ejemplo n.º 1
0
// NewTracker loads an environment from the observer and returns a new Tracker,
// or an error if anything goes wrong. If a tracker is returned, its Environ()
// method is immediately usable.
//
// The caller is responsible for Kill()ing the returned Tracker and Wait()ing
// for any errors it might return.
func NewTracker(config Config) (*Tracker, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}
	modelConfig, err := config.Observer.ModelConfig()
	if err != nil {
		return nil, errors.Annotate(err, "cannot read environ config")
	}
	environ, err := config.NewEnvironFunc(modelConfig)
	if err != nil {
		return nil, errors.Annotate(err, "cannot create environ")
	}

	t := &Tracker{
		config:  config,
		environ: environ,
	}
	err = catacomb.Invoke(catacomb.Plan{
		Site: &t.catacomb,
		Work: t.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return t, nil
}
Ejemplo n.º 2
0
// NewNetworker returns a Worker that handles machine networking
// configuration. If there is no <configBasePath>/interfaces file, an
// error is returned.
func NewNetworker(
	st apinetworker.State,
	agentConfig agent.Config,
	intrusiveMode bool,
	configBaseDir string,
) (*Networker, error) {
	tag, ok := agentConfig.Tag().(names.MachineTag)
	if !ok {
		// This should never happen, as there is a check for it in the
		// machine agent.
		return nil, fmt.Errorf("expected names.MachineTag, got %T", agentConfig.Tag())
	}
	nw := &Networker{
		st:            st,
		tag:           tag,
		intrusiveMode: intrusiveMode,
		configBaseDir: configBaseDir,
		configFiles:   make(map[string]*configFile),
		interfaceInfo: make(map[string]network.InterfaceInfo),
		interfaces:    make(map[string]net.Interface),
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &nw.catacomb,
		Work: nw.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return nw, nil
}
Ejemplo n.º 3
0
// NewWatcher returns a RemoteStateWatcher that handles state changes pertaining to the
// supplied unit.
func NewWatcher(config WatcherConfig) (*RemoteStateWatcher, error) {
	w := &RemoteStateWatcher{
		st:                        config.State,
		relations:                 make(map[names.RelationTag]*relationUnitsWatcher),
		relationUnitsChanges:      make(chan relationUnitsChange),
		storageAttachmentWatchers: make(map[names.StorageTag]*storageAttachmentWatcher),
		storageAttachmentChanges:  make(chan storageAttachmentChange),
		leadershipTracker:         config.LeadershipTracker,
		updateStatusChannel:       config.UpdateStatusChannel,
		commandChannel:            config.CommandChannel,
		retryHookChannel:          config.RetryHookChannel,
		// Note: it is important that the out channel be buffered!
		// The remote state watcher will perform a non-blocking send
		// on the channel to wake up the observer. It is non-blocking
		// so that we coalesce events while the observer is busy.
		out: make(chan struct{}, 1),
		current: Snapshot{
			Relations: make(map[int]RelationSnapshot),
			Storage:   make(map[names.StorageTag]StorageSnapshot),
		},
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &w.catacomb,
		Work: func() error {
			return w.loop(config.UnitTag)
		},
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}
Ejemplo n.º 4
0
// NewRestartWorkers returns a worker that will live until Kill()ed,
// giving access to a set of sub-workers needed by the state package.
//
// These workers may die of their own accord at any time, and will be
// replaced after the configured delay; all active workers will be
// stopped before Wait returns.
func NewRestartWorkers(config RestartConfig) (*RestartWorkers, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}

	dw, err := NewDumbWorkers(DumbConfig{
		Factory: config.Factory,
		Logger:  config.Logger,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}

	rw := &RestartWorkers{
		config:  config,
		workers: dw,
	}
	err = catacomb.Invoke(catacomb.Plan{
		Site: &rw.catacomb,
		Work: rw.run,
		Init: []worker.Worker{dw},
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return rw, nil
}
Ejemplo n.º 5
0
// NewContainerProvisioner returns a new Provisioner. When new machines
// are added to the state, it allocates instances from the environment
// and allocates them to the new machines.
func NewContainerProvisioner(
	containerType instance.ContainerType,
	st *apiprovisioner.State,
	agentConfig agent.Config,
	broker environs.InstanceBroker,
	toolsFinder ToolsFinder,
) (Provisioner, error) {

	p := &containerProvisioner{
		provisioner: provisioner{
			st:          st,
			agentConfig: agentConfig,
			broker:      broker,
			toolsFinder: toolsFinder,
		},
		containerType: containerType,
	}
	p.Provisioner = p
	logger.Tracef("Starting %s provisioner for %q", p.containerType, p.agentConfig.Tag())

	err := catacomb.Invoke(catacomb.Plan{
		Site: &p.catacomb,
		Work: p.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return p, nil
}
Ejemplo n.º 6
0
Archivo: pinger.go Proyecto: bac/juju
// New returns a Worker backed by Config. The caller is responsible for
// Kill()ing the Worker and handling any errors returned from Wait();
// but as it happens it's designed to be an apiserver/common.Resource,
// and never to exit unless Kill()ed, so in practice Stop(), which will
// call Kill() and Wait() internally, is Good Enough.
func New(config Config) (*Worker, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}
	name := fmt.Sprintf("juju.apiserver.presence.%s", config.Identity)
	w := &Worker{
		config:  config,
		logger:  loggo.GetLogger(name),
		running: make(chan struct{}),
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &w.catacomb,
		Work: w.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}

	// To support unhappy assumptions in apiserver/server_test.go,
	// we block New until at least one attempt to start a Pinger
	// has been made. This preserves the apparent behaviour of an
	// unwrapped Pinger under normal conditions.
	select {
	case <-w.catacomb.Dying():
		if err := w.Wait(); err != nil {
			return nil, errors.Trace(err)
		}
		return nil, errors.New("worker stopped abnormally without reporting an error")
	case <-w.running:
		return w, nil
	}
}
Ejemplo n.º 7
0
// NewUniter creates a new Uniter which will install, run, and upgrade
// a charm on behalf of the unit with the given unitTag, by executing
// hooks and operations provoked by changes in st.
func NewUniter(uniterParams *UniterParams) (*Uniter, error) {
	u := &Uniter{
		st:                   uniterParams.UniterFacade,
		paths:                NewPaths(uniterParams.DataDir, uniterParams.UnitTag),
		hookLock:             uniterParams.MachineLock,
		leadershipTracker:    uniterParams.LeadershipTracker,
		charmDirGuard:        uniterParams.CharmDirGuard,
		updateStatusAt:       uniterParams.UpdateStatusSignal,
		hookRetryStrategy:    uniterParams.HookRetryStrategy,
		newOperationExecutor: uniterParams.NewOperationExecutor,
		observer:             uniterParams.Observer,
		clock:                uniterParams.Clock,
		downloader:           uniterParams.Downloader,
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &u.catacomb,
		Work: func() error {
			return u.loop(uniterParams.UnitTag)
		},
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return u, nil
}
Ejemplo n.º 8
0
// New returns a worker that exposes the result of the configured
// predicate when applied to the configured entity's life value,
// and fails with ErrValueChanged when the result changes.
func New(config Config) (*Worker, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}

	// Read it before the worker starts, so that we have a value
	// guaranteed before we return the worker. Because we read this
	// before we start the internal watcher, we'll need an additional
	// read triggered by the first change event; this will *probably*
	// be the same value, but we can't assume it.
	life, err := config.Facade.Life(config.Entity)
	if err != nil {
		return nil, filter(errors.Trace(err))
	}

	w := &Worker{
		config: config,
		life:   life,
	}
	err = catacomb.Invoke(catacomb.Plan{
		Site: &w.catacomb,
		Work: w.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}
Ejemplo n.º 9
0
Archivo: pinger.go Proyecto: makyo/juju
// New returns a Worker backed by Config. The caller is responsible for
// Kill()ing the Worker and handling any errors returned from Wait();
// but as it happens it's designed to be an apiserver/common.Resource,
// and never to exit unless Kill()ed, so in practice Stop(), which will
// call Kill() and Wait() internally, is Good Enough.
func New(config Config) (*Worker, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}
	name := fmt.Sprintf("juju.apiserver.presence.%s", config.Identity)
	w := &Worker{
		config: config,
		logger: loggo.GetLogger(name),
	}
	ready := make(chan struct{})
	err := catacomb.Invoke(catacomb.Plan{
		Site: &w.catacomb,
		Work: func() error {
			// Run once to prime presence before diving into the loop.
			pinger := w.startPinger()
			if ready != nil {
				close(ready)
				ready = nil
			}
			if pinger != nil {
				w.waitOnPinger(pinger)
			}
			return w.loop()
		},
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	<-ready
	return w, nil
}
Ejemplo n.º 10
0
// startService creates a new data value for tracking details of the
// service and starts watching the service for exposure changes.
func (fw *Firewaller) startService(service *firewaller.Service) error {
	exposed, err := service.IsExposed()
	if err != nil {
		return err
	}
	serviced := &serviceData{
		fw:      fw,
		service: service,
		exposed: exposed,
		unitds:  make(map[names.UnitTag]*unitData),
	}
	err = catacomb.Invoke(catacomb.Plan{
		Site: &serviced.catacomb,
		Work: func() error {
			return serviced.watchLoop(exposed)
		},
	})
	if err != nil {
		return errors.Trace(err)
	}
	if err := fw.catacomb.Add(serviced); err != nil {
		return errors.Trace(err)
	}
	fw.serviceds[service.Tag()] = serviced
	return nil
}
Ejemplo n.º 11
0
func checkInvalid(c *gc.C, plan catacomb.Plan, match string) {
	check := func(err error) {
		c.Check(err, gc.ErrorMatches, match)
		c.Check(err, jc.Satisfies, errors.IsNotValid)
	}
	check(plan.Validate())
	check(catacomb.Invoke(plan))
}
Ejemplo n.º 12
0
// startMachine creates a new data value for tracking details of the
// machine and starts watching the machine for units added or removed.
func (fw *Firewaller) startMachine(tag names.MachineTag) error {
	machined := &machineData{
		fw:           fw,
		tag:          tag,
		unitds:       make(map[names.UnitTag]*unitData),
		openedPorts:  make([]network.PortRange, 0),
		definedPorts: make(map[network.PortRange]names.UnitTag),
	}
	m, err := machined.machine()
	if params.IsCodeNotFound(err) {
		return nil
	} else if err != nil {
		return errors.Annotate(err, "cannot watch machine units")
	}
	unitw, err := m.WatchUnits()
	if err != nil {
		return errors.Trace(err)
	}
	// XXX(fwereade): this is the best of a bunch of bad options. We've started
	// the watch, so we're responsible for it; but we (probably?) need to do this
	// little dance below to update the machined data on the fw loop goroutine,
	// whence it's usually accessed, before we start the machined watchLoop
	// below. That catacomb *should* be the only one responsible -- and it *is*
	// responsible -- but having it in the main fw catacomb as well does no harm,
	// and greatly simplifies the code below (which would otherwise have to
	// manage unitw lifetime and errors manually).
	if err := fw.catacomb.Add(unitw); err != nil {
		return errors.Trace(err)
	}
	select {
	case <-fw.catacomb.Dying():
		return fw.catacomb.ErrDying()
	case change, ok := <-unitw.Changes():
		if !ok {
			return errors.New("machine units watcher closed")
		}
		fw.machineds[tag] = machined
		err = fw.unitsChanged(&unitsChange{machined, change})
		if err != nil {
			delete(fw.machineds, tag)
			return errors.Annotatef(err, "cannot respond to units changes for %q", tag)
		}
	}

	err = catacomb.Invoke(catacomb.Plan{
		Site: &machined.catacomb,
		Work: func() error {
			return machined.watchLoop(unitw)
		},
	})
	if err != nil {
		delete(fw.machineds, tag)
		return errors.Trace(err)
	}

	// register the machined with the firewaller's catacomb.
	return fw.catacomb.Add(machined)
}
Ejemplo n.º 13
0
func (s *CatacombSuite) TestReusedCatacomb(c *gc.C) {
	var site catacomb.Catacomb
	err := catacomb.Invoke(catacomb.Plan{
		Site: &site,
		Work: func() error { return nil },
	})
	c.Check(err, jc.ErrorIsNil)
	err = site.Wait()
	c.Check(err, jc.ErrorIsNil)

	w := s.fix.startErrorWorker(c, nil)
	err = catacomb.Invoke(catacomb.Plan{
		Site: &site,
		Work: func() error { return nil },
		Init: []worker.Worker{w},
	})
	c.Check(err, gc.ErrorMatches, "catacomb 0x[0-9a-f]+ has already been used")
	w.assertDead(c)
}
Ejemplo n.º 14
0
Archivo: dumb.go Proyecto: bac/juju
// NewDumbWorkers returns a worker that will live until Kill()ed,
// giving access to a set of sub-workers needed by the state package.
//
// These workers may die of their own accord at any time, and will
// not be replaced; they will also all be stopped before Wait returns.
func NewDumbWorkers(config DumbConfig) (_ *DumbWorkers, err error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}
	logger := config.Logger

	w := &DumbWorkers{config: config}
	defer func() {
		if err == nil {
			return
		}
		// this is ok because cleanup can handle nil fields
		if cleanupErr := w.cleanup(); cleanupErr != nil {
			logger.Errorf("while aborting DumbWorkers creation: %v", cleanupErr)
		}
	}()

	logger.Debugf("starting leadership lease manager")
	w.leadershipWorker, err = config.Factory.NewLeadershipWorker()
	if err != nil {
		return nil, errors.Annotatef(err, "cannot create leadership lease manager")
	}

	logger.Debugf("starting singular lease manager")
	w.singularWorker, err = config.Factory.NewSingularWorker()
	if err != nil {
		return nil, errors.Annotatef(err, "cannot create singular lease manager")
	}

	logger.Debugf("starting transaction log watcher")
	w.txnLogWorker, err = config.Factory.NewTxnLogWorker()
	if err != nil {
		return nil, errors.Annotatef(err, "cannot create transaction log watcher")
	}

	logger.Debugf("starting presence watcher")
	w.presenceWorker, err = config.Factory.NewPresenceWorker()
	if err != nil {
		return nil, errors.Annotatef(err, "cannot create presence watcher")
	}

	// note that we specifically *don't* want to use catacomb's
	// worker-tracking features like Add and Init, because we want
	// this type to live until externally killed, regardless of the
	// state of the inner workers. We're just using catacomb because
	// it's slightly safer than tomb.
	err = catacomb.Invoke(catacomb.Plan{
		Site: &w.catacomb,
		Work: w.run,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}
Ejemplo n.º 15
0
func (s *CatacombSuite) TestPlanDataRace(c *gc.C) {
	w := s.fix.startErrorWorker(c, nil)
	plan := catacomb.Plan{
		Site: &catacomb.Catacomb{},
		Work: func() error { return nil },
		Init: []worker.Worker{w},
	}
	err := catacomb.Invoke(plan)
	c.Assert(err, jc.ErrorIsNil)

	plan.Init[0] = nil
}
Ejemplo n.º 16
0
// NewWorker returns a worker that keeps track of
// the machines in the state and polls their instance
// addresses and status periodically to keep them up to date.
func NewWorker(st *instancepoller.API) (worker.Worker, error) {
	u := &updaterWorker{
		st: st,
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &u.catacomb,
		Work: u.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return u, nil
}
Ejemplo n.º 17
0
// NewUndertaker returns a worker which processes a dying environment.
func NewUndertaker(client apiundertaker.UndertakerClient, clock uc.Clock) (worker.Worker, error) {
	u := &undertaker{
		client: client,
		clock:  clock,
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &u.catacomb,
		Work: u.run,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return u, nil
}
Ejemplo n.º 18
0
Archivo: worker.go Proyecto: makyo/juju
// New returns a Worker backed by config, or an error.
func New(config Config) (worker.Worker, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}
	w := &Worker{config: config}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &w.catacomb,
		Work: w.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}
Ejemplo n.º 19
0
func (fix *fixture) run(c *gc.C, task func(), init ...worker.Worker) error {
	err := catacomb.Invoke(catacomb.Plan{
		Site: &fix.catacomb,
		Work: func() error { task(); return nil },
		Init: init,
	})
	c.Assert(err, jc.ErrorIsNil)

	select {
	case <-fix.catacomb.Dead():
	case <-time.After(coretesting.LongWait):
		c.Fatalf("timed out")
	}
	return fix.catacomb.Wait()
}
Ejemplo n.º 20
0
// NewStringsWorker starts a new worker that runs a StringsHandler.
func NewStringsWorker(config StringsConfig) (*StringsWorker, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}
	sw := &StringsWorker{
		config: config,
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &sw.catacomb,
		Work: sw.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return sw, nil
}
Ejemplo n.º 21
0
// NewWorker returns a worker that will attempt to discover the
// configured Environ's spaces, and update the controller via the
// configured Facade. Names are sanitised with NewName, and any
// supplied Unlocker will be Unlock()ed when the first complete
// discovery and update succeeds.
//
// Once that update completes, the worker just waits to be Kill()ed.
// We should probably poll for changes, really, but I'm making an
// effort to preserve existing behaviour where possible.
func NewWorker(config Config) (worker.Worker, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}
	dw := &discoverspacesWorker{
		config: config,
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &dw.catacomb,
		Work: dw.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return dw, nil
}
Ejemplo n.º 22
0
Archivo: worker.go Proyecto: bac/juju
// NewWorker returns a worker that keeps track of
// the machines in the state and polls their instance
// addresses and status periodically to keep them up to date.
func NewWorker(config Config) (worker.Worker, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}
	u := &updaterWorker{
		config: config,
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &u.catacomb,
		Work: u.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return u, nil
}
Ejemplo n.º 23
0
// NewLogForwarder returns a worker that forwards logs received from
// the stream to the sender.
func NewLogForwarder(args OpenLogForwarderArgs) (*LogForwarder, error) {
	lf := &LogForwarder{
		args:      args,
		enabledCh: make(chan bool, 1),
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &lf.catacomb,
		Work: func() error {
			return errors.Trace(lf.loop())
		},
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return lf, nil
}
Ejemplo n.º 24
0
// NewUndertaker returns a worker which processes a dying model.
func NewUndertaker(config Config) (*Undertaker, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}
	u := &Undertaker{
		config: config,
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &u.catacomb,
		Work: u.run,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return u, nil
}
Ejemplo n.º 25
0
// NewNotifyWorker starts a new worker that runs a NotifyHandler.
func NewNotifyWorker(config NotifyConfig) (*NotifyWorker, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}
	nw := &NotifyWorker{
		config: config,
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &nw.catacomb,
		Work: nw.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return nw, nil
}
Ejemplo n.º 26
0
func newAggregator(config aggregatorConfig) (*aggregator, error) {
	if err := config.validate(); err != nil {
		return nil, errors.Trace(err)
	}
	a := &aggregator{
		config: config,
		reqc:   make(chan instanceInfoReq),
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &a.catacomb,
		Work: a.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return a, nil
}
Ejemplo n.º 27
0
Archivo: worker.go Proyecto: makyo/juju
func newWorker(st stateInterface, pub publisherInterface, supportsSpaces bool) (worker.Worker, error) {
	w := &pgWorker{
		st:                     st,
		machineChanges:         make(chan struct{}),
		machineTrackers:        make(map[string]*machineTracker),
		publisher:              pub,
		providerSupportsSpaces: supportsSpaces,
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &w.catacomb,
		Work: w.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return w, nil
}
Ejemplo n.º 28
0
func New(config Config) (worker.Worker, error) {
	if err := config.Validate(); err != nil {
		return nil, errors.Trace(err)
	}
	m := &modelWorkerManager{
		config: config,
	}

	err := catacomb.Invoke(catacomb.Plan{
		Site: &m.catacomb,
		Work: m.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return m, nil
}
Ejemplo n.º 29
0
func NewProvisionerTask(
	controllerUUID string,
	machineTag names.MachineTag,
	harvestMode config.HarvestMode,
	machineGetter MachineGetter,
	toolsFinder ToolsFinder,
	machineWatcher watcher.StringsWatcher,
	retryWatcher watcher.NotifyWatcher,
	broker environs.InstanceBroker,
	auth authentication.AuthenticationProvider,
	imageStream string,
	retryStartInstanceStrategy RetryStrategy,
) (ProvisionerTask, error) {
	machineChanges := machineWatcher.Changes()
	workers := []worker.Worker{machineWatcher}
	var retryChanges watcher.NotifyChannel
	if retryWatcher != nil {
		retryChanges = retryWatcher.Changes()
		workers = append(workers, retryWatcher)
	}
	task := &provisionerTask{
		controllerUUID:             controllerUUID,
		machineTag:                 machineTag,
		machineGetter:              machineGetter,
		toolsFinder:                toolsFinder,
		machineChanges:             machineChanges,
		retryChanges:               retryChanges,
		broker:                     broker,
		auth:                       auth,
		harvestMode:                harvestMode,
		harvestModeChan:            make(chan config.HarvestMode, 1),
		machines:                   make(map[string]*apiprovisioner.Machine),
		imageStream:                imageStream,
		retryStartInstanceStrategy: retryStartInstanceStrategy,
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &task.catacomb,
		Work: task.loop,
		Init: workers,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return task, nil
}
Ejemplo n.º 30
0
func newMachineTracker(stm stateMachine, notifyCh chan struct{}) (*machineTracker, error) {
	m := &machineTracker{
		notifyCh:       notifyCh,
		id:             stm.Id(),
		stm:            stm,
		apiHostPorts:   stm.APIHostPorts(),
		mongoHostPorts: stm.MongoHostPorts(),
		wantsVote:      stm.WantsVote(),
	}
	err := catacomb.Invoke(catacomb.Plan{
		Site: &m.catacomb,
		Work: m.loop,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}
	return m, nil
}