Exemple #1
0
func (s *workerSuite) TestErrorKillsWorker(c *gc.C) {
	s.AssertConfigParameterUpdated(c, "broken", "ReleaseAddress")
	w, err := addresser.NewWorker(s.State)
	c.Assert(err, jc.ErrorIsNil)
	defer worker.Stop(w)

	// The worker should have died with an error.

	stopErr := make(chan error)
	go func() {
		w.Wait()
		stopErr <- worker.Stop(w)
	}()

	select {
	case err := <-stopErr:
		msg := "failed to release address .*: dummy.ReleaseAddress is broken"
		c.Assert(err, gc.ErrorMatches, msg)
	case <-time.After(coretesting.LongWait):
		c.Fatalf("worker did not stop as expected")
	}

	// As we failed to release addresses they should not have been removed
	// from state.
	for _, digit := range []int{3, 4, 5, 6} {
		rawAddr := fmt.Sprintf("0.1.2.%d", digit)
		_, err := s.State.IPAddress(rawAddr)
		c.Assert(err, jc.ErrorIsNil)
	}
}
Exemple #2
0
func (s *singularSuite) TestWithIsMasterTrue(c *gc.C) {
	// When IsMaster returns true, workers get started on the underlying
	// runner as usual.
	s.PatchValue(&singular.PingInterval, 1*time.Millisecond)
	underlyingRunner := newRunner()
	conn := &fakeConn{
		isMaster: true,
	}
	r, err := singular.New(underlyingRunner, conn)
	c.Assert(err, gc.IsNil)

	started := make(chan struct{}, 1)
	err = r.StartWorker("worker", func() (worker.Worker, error) {
		return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
			started <- struct{}{}
			<-stop
			return nil
		}), nil
	})
	select {
	case <-started:
	case <-time.After(testing.LongWait):
		c.Fatalf("timed out waiting for worker to start")
	}

	err = worker.Stop(r)
	c.Assert(err, gc.IsNil)
}
Exemple #3
0
// forgetUnit cleans the unit data after the unit is removed.
func (fw *Firewaller) forgetUnit(unitd *unitData) {
	serviced := unitd.serviced
	machined := unitd.machined

	// If it's the last unit in the service, we'll need to stop the serviced.
	stoppedService := false
	if len(serviced.unitds) == 1 {
		if _, found := serviced.unitds[unitd.tag]; found {
			// Unusually, it's fine to ignore this error, because we know the
			// serviced is being tracked in fw.catacomb. But we do still want
			// to wait until the watch loop has stopped before we nuke the last
			// data and return.
			worker.Stop(serviced)
			stoppedService = true
		}
	}

	// Clean up after stopping.
	delete(fw.unitds, unitd.tag)
	delete(machined.unitds, unitd.tag)
	delete(serviced.unitds, unitd.tag)
	logger.Debugf("stopped watching %q", unitd.tag)
	if stoppedService {
		serviceTag := serviced.service.Tag()
		delete(fw.serviceds, serviceTag)
		logger.Debugf("stopped watching %q", serviceTag)
	}
}
Exemple #4
0
func (s *workerSuite) TestWorkerPublishesInstanceIds(c *gc.C) {
	DoTestForIPv4AndIPv6(func(ipVersion TestIPVersion) {
		s.PatchValue(&pollInterval, coretesting.LongWait+time.Second)
		s.PatchValue(&initialRetryInterval, 5*time.Millisecond)
		s.PatchValue(&maxRetryInterval, initialRetryInterval)

		publishCh := make(chan []instance.Id, 100)

		publish := func(apiServers [][]network.HostPort, instanceIds []instance.Id) error {
			publishCh <- instanceIds
			return nil
		}
		st := NewFakeState()
		InitState(c, st, 3, ipVersion)

		w := newWorker(st, PublisherFunc(publish))
		defer func() {
			c.Check(worker.Stop(w), gc.IsNil)
		}()

		select {
		case instanceIds := <-publishCh:
			c.Assert(instanceIds, jc.SameContents, []instance.Id{"id-10", "id-11", "id-12"})
		case <-time.After(coretesting.LongWait):
			c.Errorf("timed out waiting for publish")
		}
	})
}
Exemple #5
0
func (s *workerSuite) TestSetMembersErrorIsNotFatal(c *gc.C) {
	coretesting.SkipIfI386(c, "lp:1425569")

	DoTestForIPv4AndIPv6(func(ipVersion TestIPVersion) {
		st := NewFakeState()
		InitState(c, st, 3, ipVersion)
		st.session.setStatus(mkStatuses("0p 1s 2s", ipVersion))
		var setCount voyeur.Value
		st.errors.setErrorFuncFor("Session.Set", func() error {
			setCount.Set(true)
			return errors.New("sample")
		})
		s.PatchValue(&initialRetryInterval, 10*time.Microsecond)
		s.PatchValue(&maxRetryInterval, coretesting.ShortWait/4)

		w := newWorker(st, noPublisher{})
		defer func() {
			c.Check(worker.Stop(w), gc.IsNil)
		}()

		// See that the worker is retrying.
		setCountW := setCount.Watch()
		mustNext(c, setCountW)
		mustNext(c, setCountW)
		mustNext(c, setCountW)
	})
}
Exemple #6
0
// APIWorkers returns a dependency.Engine running the unit agent's responsibilities.
func (a *UnitAgent) APIWorkers() (worker.Worker, error) {
	manifolds := unit.Manifolds(unit.ManifoldsConfig{
		Agent:               agent.APIHostPortsSetter{a},
		LogSource:           a.bufferedLogs,
		LeadershipGuarantee: 30 * time.Second,
		AgentConfigChanged:  a.configChangedVal,
	})

	config := dependency.EngineConfig{
		IsFatal:     cmdutil.IsFatal,
		WorstError:  cmdutil.MoreImportantError,
		ErrorDelay:  3 * time.Second,
		BounceDelay: 10 * time.Millisecond,
	}
	engine, err := dependency.NewEngine(config)
	if err != nil {
		return nil, err
	}
	if err := dependency.Install(engine, manifolds); err != nil {
		if err := worker.Stop(engine); err != nil {
			logger.Errorf("while stopping engine with bad manifolds: %v", err)
		}
		return nil, err
	}
	return engine, nil
}
Exemple #7
0
func (s *GlobalModeSuite) TestRestartUnexposedService(c *gc.C) {
	// Start firewaller and open ports.
	fw, err := firewaller.NewFirewaller(s.firewaller)
	c.Assert(err, jc.ErrorIsNil)

	svc := s.AddTestingService(c, "wordpress", s.charm)
	err = svc.SetExposed()
	c.Assert(err, jc.ErrorIsNil)

	u, m := s.addUnit(c, svc)
	s.startInstance(c, m)
	err = u.OpenPort("tcp", 80)
	c.Assert(err, jc.ErrorIsNil)
	err = u.OpenPort("tcp", 8080)
	c.Assert(err, jc.ErrorIsNil)

	s.assertEnvironPorts(c, []network.PortRange{{80, 80, "tcp"}, {8080, 8080, "tcp"}})

	// Stop firewaller and clear exposed flag on service.
	err = worker.Stop(fw)
	c.Assert(err, jc.ErrorIsNil)

	err = svc.ClearExposed()
	c.Assert(err, jc.ErrorIsNil)

	// Start firewaller and check port.
	fw, err = firewaller.NewFirewaller(s.firewaller)
	c.Assert(err, jc.ErrorIsNil)
	defer statetesting.AssertKillAndWait(c, fw)

	s.assertEnvironPorts(c, nil)
}
Exemple #8
0
Fichier : open.go Projet : bac/juju
// Close the connection to the database.
func (st *State) Close() (err error) {
	defer errors.DeferredAnnotatef(&err, "closing state failed")

	var errs []error
	handle := func(name string, err error) {
		if err != nil {
			errs = append(errs, errors.Annotatef(err, "error stopping %s", name))
		}
	}
	if st.workers != nil {
		handle("standard workers", worker.Stop(st.workers))
	}

	st.mu.Lock()
	if st.allManager != nil {
		handle("allwatcher manager", st.allManager.Stop())
	}
	if st.allModelManager != nil {
		handle("allModelWatcher manager", st.allModelManager.Stop())
	}
	if st.allModelWatcherBacking != nil {
		handle("allModelWatcher backing", st.allModelWatcherBacking.Release())
	}
	st.session.Close()
	st.mu.Unlock()

	if len(errs) > 0 {
		for _, err := range errs[1:] {
			logger.Errorf("while closing state: %v", err)
		}
		return errs[0]
	}
	logger.Debugf("closed state without error")
	return nil
}
Exemple #9
0
func (s *MachineSuite) TestJobManageModelRunsMinUnitsWorker(c *gc.C) {
	s.assertJobWithState(c, state.JobManageModel, func(_ agent.Config, agentState *state.State) {
		// Ensure that the MinUnits worker is alive by doing a simple check
		// that it responds to state changes: add a service, set its minimum
		// number of units to one, wait for the worker to add the missing unit.
		service := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress"))
		err := service.SetMinUnits(1)
		c.Assert(err, jc.ErrorIsNil)
		w := service.Watch()
		defer worker.Stop(w)

		// Trigger a sync on the state used by the agent, and wait for the unit
		// to be created.
		agentState.StartSync()
		timeout := time.After(coretesting.LongWait)
		for {
			select {
			case <-timeout:
				c.Fatalf("unit not created")
			case <-time.After(coretesting.ShortWait):
				s.State.StartSync()
			case <-w.Changes():
				units, err := service.AllUnits()
				c.Assert(err, jc.ErrorIsNil)
				if len(units) == 1 {
					return
				}
			}
		}
	})
}
Exemple #10
0
func (s *MachineSuite) waitProvisioned(c *gc.C, unit *state.Unit) (*state.Machine, instance.Id) {
	c.Logf("waiting for unit %q to be provisioned", unit)
	machineId, err := unit.AssignedMachineId()
	c.Assert(err, jc.ErrorIsNil)
	m, err := s.State.Machine(machineId)
	c.Assert(err, jc.ErrorIsNil)
	w := m.Watch()
	defer worker.Stop(w)
	timeout := time.After(coretesting.LongWait)
	for {
		select {
		case <-timeout:
			c.Fatalf("timed out waiting for provisioning")
		case <-time.After(coretesting.ShortWait):
			s.State.StartSync()
		case _, ok := <-w.Changes():
			c.Assert(ok, jc.IsTrue)
			err := m.Refresh()
			c.Assert(err, jc.ErrorIsNil)
			if instId, err := m.InstanceId(); err == nil {
				c.Logf("unit provisioned with instance %s", instId)
				return m, instId
			} else {
				c.Check(err, jc.Satisfies, errors.IsNotProvisioned)
			}
		}
	}
}
Exemple #11
0
func (s *ManifoldSuite) startManifold(c *gc.C) (worker.Worker, error) {
	w, err := s.manifold.Start(dt.StubGetResource(s.resources))
	if w != nil {
		s.AddCleanup(func(*gc.C) { worker.Stop(w) })
	}
	return w, err
}
Exemple #12
0
func (s *workerSuite) TestStateServersArePublished(c *gc.C) {
	publishCh := make(chan [][]network.HostPort)
	publish := func(apiServers [][]network.HostPort, instanceIds []instance.Id) error {
		publishCh <- apiServers
		return nil
	}

	st := newFakeState()
	initState(c, st, 3)
	w := newWorker(st, publisherFunc(publish))
	defer func() {
		c.Check(worker.Stop(w), gc.IsNil)
	}()
	select {
	case servers := <-publishCh:
		assertAPIHostPorts(c, servers, expectedAPIHostPorts(3))
	case <-time.After(coretesting.LongWait):
		c.Fatalf("timed out waiting for publish")
	}

	// Change one of the servers' API addresses and check that it's published.

	newMachine10APIHostPorts := addressesWithPort(apiPort, "0.2.8.124")
	st.machine("10").setAPIHostPorts(newMachine10APIHostPorts)
	select {
	case servers := <-publishCh:
		expected := expectedAPIHostPorts(3)
		expected[0] = newMachine10APIHostPorts
		assertAPIHostPorts(c, servers, expected)
	case <-time.After(coretesting.LongWait):
		c.Fatalf("timed out waiting for publish")
	}
}
Exemple #13
0
func (s *workerJujuConnSuite) TestPublisherSetsAPIHostPorts(c *gc.C) {
	st := newFakeState()
	initState(c, st, 3)

	watcher := s.State.WatchAPIHostPorts()
	cwatch := statetesting.NewNotifyWatcherC(c, s.State, watcher)
	cwatch.AssertOneChange()

	statePublish := newPublisher(s.State)

	// Wrap the publisher so that we can call StartSync immediately
	// after the publishAPIServers method is called.
	publish := func(apiServers [][]network.HostPort, instanceIds []instance.Id) error {
		err := statePublish.publishAPIServers(apiServers, instanceIds)
		s.State.StartSync()
		return err
	}

	w := newWorker(st, publisherFunc(publish))
	defer func() {
		c.Check(worker.Stop(w), gc.IsNil)
	}()

	cwatch.AssertOneChange()
	hps, err := s.State.APIHostPorts()
	c.Assert(err, gc.IsNil)
	assertAPIHostPorts(c, hps, expectedAPIHostPorts(3))
}
Exemple #14
0
func (s *workerSuite) TestAddressChange(c *gc.C) {
	st := newFakeState()
	initState(c, st, 3)

	memberWatcher := st.session.members.Watch()
	mustNext(c, memberWatcher)
	assertMembers(c, memberWatcher.Value(), mkMembers("0v"))

	logger.Infof("starting worker")
	w := newWorker(st, noPublisher{})
	defer func() {
		c.Check(worker.Stop(w), gc.IsNil)
	}()

	// Wait for the worker to set the initial members.
	mustNext(c, memberWatcher)
	assertMembers(c, memberWatcher.Value(), mkMembers("0v 1 2"))

	// Change an address and wait for it to be changed in the
	// members.
	st.machine("11").setStateHostPort("0.1.99.99:9876")

	mustNext(c, memberWatcher)
	expectMembers := mkMembers("0v 1 2")
	expectMembers[1].Address = "0.1.99.99:9876"
	assertMembers(c, memberWatcher.Value(), expectMembers)
}
Exemple #15
0
func (s *networkerSuite) TestPrimaryOrLoopbackInterfacesAreSkipped(c *gc.C) {
	c.Skip("enable once the networker is enabled again")

	// Reset what's considered up, so we can test eth0 and lo are not
	// touched.
	s.upInterfaces = make(set.Strings)
	s.interfacesWithAddress = make(set.Strings)

	nw, _ := s.newCustomNetworker(c, s.apiFacade, s.stateMachine.Id(), true, false)
	defer worker.Stop(nw)

	timeout := time.After(coretesting.LongWait)
	for {
		select {
		case <-s.lastCommands:
			if !s.vlanModuleLoaded {
				// VLAN module loading commands is one of the first things
				// the worker does, so if hasn't happened, we wait a bit more.
				continue
			}
			c.Assert(s.upInterfaces.Contains("lo"), jc.IsFalse)
			c.Assert(s.upInterfaces.Contains("eth0"), jc.IsFalse)
			if s.upInterfaces.Contains("eth1") {
				// If we run ifup eth1, we successfully skipped lo and
				// eth0.
				s.assertHaveConfig(c, nw, "", "eth0", "eth1", "eth1.42", "eth0.69")
				return
			}
		case <-timeout:
			c.Fatalf("commands expected but not executed")
		}
	}
}
Exemple #16
0
func (s *notifyWorkerSuite) TestStop(c *gc.C) {
	err := worker.Stop(s.worker)
	c.Assert(err, gc.IsNil)
	// After stop, Wait should return right away
	err = waitShort(c, s.worker)
	c.Assert(err, gc.IsNil)
}
Exemple #17
0
// startModelWorkers starts the set of workers that run for every model
// in each controller.
func (a *MachineAgent) startModelWorkers(uuid string) (worker.Worker, error) {
	modelAgent, err := model.WrapAgent(a, uuid)
	if err != nil {
		return nil, errors.Trace(err)
	}

	engine, err := dependency.NewEngine(dependency.EngineConfig{
		IsFatal:     model.IsFatal,
		WorstError:  model.WorstError,
		Filter:      model.IgnoreErrRemoved,
		ErrorDelay:  3 * time.Second,
		BounceDelay: 10 * time.Millisecond,
	})
	if err != nil {
		return nil, errors.Trace(err)
	}

	manifolds := modelManifolds(model.ManifoldsConfig{
		Agent:                       modelAgent,
		AgentConfigChanged:          a.configChangedVal,
		Clock:                       clock.WallClock,
		RunFlagDuration:             time.Minute,
		CharmRevisionUpdateInterval: 24 * time.Hour,
		EntityStatusHistoryCount:    100,
		EntityStatusHistoryInterval: 5 * time.Minute,
		SpacesImportedGate:          a.discoverSpacesComplete,
	})
	if err := dependency.Install(engine, manifolds); err != nil {
		if err := worker.Stop(engine); err != nil {
			logger.Errorf("while stopping engine with bad manifolds: %v", err)
		}
		return nil, errors.Trace(err)
	}
	return engine, nil
}
Exemple #18
0
// refreshMachine refreshes the specified machine's instance ID. If it is set,
// then the machine watcher is stopped and pending entities' parameters are
// updated. If the machine is not provisioned yet, this method is a no-op.
func refreshMachine(ctx *context, tag names.MachineTag) error {
	w, ok := ctx.machines[tag]
	if !ok {
		return errors.Errorf("machine %s is not being watched", tag.Id())
	}
	stopAndRemove := func() error {
		worker.Stop(w)
		delete(ctx.machines, tag)
		return nil
	}
	results, err := ctx.config.Machines.InstanceIds([]names.MachineTag{tag})
	if err != nil {
		return errors.Annotate(err, "getting machine instance ID")
	}
	if err := results[0].Error; err != nil {
		if params.IsCodeNotProvisioned(err) {
			return nil
		} else if params.IsCodeNotFound(err) {
			// Machine is gone, so stop watching.
			return stopAndRemove()
		}
		return errors.Annotate(err, "getting machine instance ID")
	}
	machineProvisioned(ctx, tag, instance.Id(results[0].Result))
	// machine provisioning is the only thing we care about;
	// stop the watcher.
	return stopAndRemove()
}
Exemple #19
0
func (s *ProxyUpdaterSuite) TestEnvironmentVariables(c *gc.C) {
	setenv := func(proxy, value string) {
		os.Setenv(proxy, value)
		os.Setenv(strings.ToUpper(proxy), value)
	}
	setenv("http_proxy", "foo")
	setenv("https_proxy", "foo")
	setenv("ftp_proxy", "foo")
	setenv("no_proxy", "foo")

	proxySettings, _ := s.updateConfig(c)
	updater, err := proxyupdater.NewWorker(s.config)
	c.Assert(err, jc.ErrorIsNil)
	defer worker.Stop(updater)
	s.waitProxySettings(c, proxySettings)

	assertEnv := func(proxy, value string) {
		c.Assert(os.Getenv(proxy), gc.Equals, value)
		c.Assert(os.Getenv(strings.ToUpper(proxy)), gc.Equals, value)
	}
	assertEnv("http_proxy", proxySettings.Http)
	assertEnv("https_proxy", proxySettings.Https)
	assertEnv("ftp_proxy", proxySettings.Ftp)
	assertEnv("no_proxy", proxySettings.NoProxy)
}
Exemple #20
0
func (*runnerSuite) TestOneWorkerStartWhenStopping(c *gc.C) {
	worker.RestartDelay = 3 * time.Second
	runner := worker.NewRunner(allFatal, noImportance)
	starter := newTestWorkerStarter()
	starter.stopWait = make(chan struct{})

	err := runner.StartWorker("id", testWorkerStart(starter))
	c.Assert(err, jc.ErrorIsNil)
	starter.assertStarted(c, true)
	err = runner.StopWorker("id")
	c.Assert(err, jc.ErrorIsNil)
	err = runner.StartWorker("id", testWorkerStart(starter))
	c.Assert(err, jc.ErrorIsNil)

	close(starter.stopWait)
	starter.assertStarted(c, false)
	// Check that the task is restarted immediately without
	// the usual restart timeout delay.
	t0 := time.Now()
	starter.assertStarted(c, true)
	restartDuration := time.Since(t0)
	if restartDuration > 1*time.Second {
		c.Fatalf("task did not restart immediately")
	}
	c.Assert(worker.Stop(runner), gc.IsNil)
}
func (s *stringsWorkerSuite) TestStop(c *gc.C) {
	err := worker.Stop(s.worker)
	c.Assert(err, jc.ErrorIsNil)
	// After stop, Wait should return right away
	err = waitShort(c, s.worker)
	c.Assert(err, jc.ErrorIsNil)
}
Exemple #22
0
func (s *EngineSuite) TestIsFatal(c *gc.C) {

	// Start an engine that pays attention to fatal errors.
	fatalError := errors.New("KABOOM")
	s.stopEngine(c)
	s.startEngine(c, func(err error) bool {
		return err == fatalError
	})

	// Start two independent workers.
	mh1 := newManifoldHarness()
	err := s.engine.Install("some-task", mh1.Manifold())
	c.Assert(err, jc.ErrorIsNil)
	mh1.AssertOneStart(c)
	mh2 := newManifoldHarness()
	err = s.engine.Install("other-task", mh2.Manifold())
	c.Assert(err, jc.ErrorIsNil)
	mh2.AssertOneStart(c)

	// Bounce one worker with Just Some Error; check that worker bounces.
	mh1.InjectError(c, errors.New("splort"))
	mh1.AssertOneStart(c)
	mh2.AssertNoStart(c)

	// Bounce another worker with the fatal error; check the engine exits with
	// the right error.
	mh2.InjectError(c, fatalError)
	mh1.AssertNoStart(c)
	mh2.AssertNoStart(c)
	err = worker.Stop(s.engine)
	c.Assert(err, gc.Equals, fatalError)

	// Clear out s.engine -- lest TearDownTest freak out about the error.
	s.engine = nil
}
Exemple #23
0
func (s *workerSuite) TestAddressChange(c *gc.C) {
	DoTestForIPv4AndIPv6(func(ipVersion TestIPVersion) {
		st := NewFakeState()
		InitState(c, st, 3, ipVersion)

		memberWatcher := st.session.members.Watch()
		mustNext(c, memberWatcher)
		assertMembers(c, memberWatcher.Value(), mkMembers("0v", ipVersion))

		logger.Infof("starting worker")
		w := newWorker(st, noPublisher{})
		defer func() {
			c.Check(worker.Stop(w), gc.IsNil)
		}()

		// Wait for the worker to set the initial members.
		mustNext(c, memberWatcher)
		assertMembers(c, memberWatcher.Value(), mkMembers("0v 1 2", ipVersion))

		// Change an address and wait for it to be changed in the
		// members.
		st.machine("11").setStateHostPort(ipVersion.extraHostPort)

		mustNext(c, memberWatcher)
		expectMembers := mkMembers("0v 1 2", ipVersion)
		expectMembers[1].Address = ipVersion.extraHostPort
		assertMembers(c, memberWatcher.Value(), expectMembers)
	})
}
Exemple #24
0
// storageChanged responds to unit storage changes.
func (w *RemoteStateWatcher) storageChanged(keys []string) error {
	tags := make([]names.StorageTag, len(keys))
	for i, key := range keys {
		tags[i] = names.NewStorageTag(key)
	}
	ids := make([]params.StorageAttachmentId, len(keys))
	for i, tag := range tags {
		ids[i] = params.StorageAttachmentId{
			StorageTag: tag.String(),
			UnitTag:    w.unit.Tag().String(),
		}
	}
	results, err := w.st.StorageAttachmentLife(ids)
	if err != nil {
		return errors.Trace(err)
	}

	w.mu.Lock()
	defer w.mu.Unlock()

	for i, result := range results {
		tag := tags[i]
		if result.Error == nil {
			if storageSnapshot, ok := w.current.Storage[tag]; ok {
				// We've previously started a watcher for this storage
				// attachment, so all we needed to do was update the
				// lifecycle state.
				storageSnapshot.Life = result.Life
				w.current.Storage[tag] = storageSnapshot
				continue
			}
			// We haven't seen this storage attachment before, so start
			// a watcher now; add it to our catacomb in case of mishap;
			// and wait for the initial event.
			saw, err := w.st.WatchStorageAttachment(tag, w.unit.Tag())
			if err != nil {
				return errors.Annotate(err, "watching storage attachment")
			}
			if err := w.catacomb.Add(saw); err != nil {
				return errors.Trace(err)
			}
			if err := w.watchStorageAttachment(tag, result.Life, saw); err != nil {
				return errors.Trace(err)
			}
		} else if params.IsCodeNotFound(result.Error) {
			if watcher, ok := w.storageAttachmentWatchers[tag]; ok {
				// already under catacomb management, any error tracked already
				worker.Stop(watcher)
				delete(w.storageAttachmentWatchers, tag)
			}
			delete(w.current.Storage, tag)
		} else {
			return errors.Annotatef(
				result.Error, "getting life of %s attachment",
				names.ReadableString(tag),
			)
		}
	}
	return nil
}
Exemple #25
0
func (s *workerSuite) TestStateServersArePublished(c *gc.C) {
	DoTestForIPv4AndIPv6(func(ipVersion TestIPVersion) {
		publishCh := make(chan [][]network.HostPort)
		publish := func(apiServers [][]network.HostPort, instanceIds []instance.Id) error {
			publishCh <- apiServers
			return nil
		}

		st := NewFakeState()
		InitState(c, st, 3, ipVersion)
		w := newWorker(st, PublisherFunc(publish))
		defer func() {
			c.Check(worker.Stop(w), gc.IsNil)
		}()
		select {
		case servers := <-publishCh:
			AssertAPIHostPorts(c, servers, ExpectedAPIHostPorts(3, ipVersion))
		case <-time.After(coretesting.LongWait):
			c.Fatalf("timed out waiting for publish")
		}

		// Change one of the servers' API addresses and check that it's published.
		var newMachine10APIHostPorts []network.HostPort
		newMachine10APIHostPorts = network.NewHostPorts(apiPort, ipVersion.extraHost)
		st.machine("10").setAPIHostPorts(newMachine10APIHostPorts)
		select {
		case servers := <-publishCh:
			expected := ExpectedAPIHostPorts(3, ipVersion)
			expected[0] = newMachine10APIHostPorts
			AssertAPIHostPorts(c, servers, expected)
		case <-time.After(coretesting.LongWait):
			c.Fatalf("timed out waiting for publish")
		}
	})
}
Exemple #26
0
func (s *MachinerStateSuite) TestSetDeadWithDyingUnit(c *gc.C) {
	mr := s.makeMachiner(false)
	defer worker.Stop(mr)

	// Add a service, assign to machine.
	wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress"))
	unit, err := wordpress.AddUnit()
	c.Assert(err, jc.ErrorIsNil)
	err = unit.AssignToMachine(s.machine)
	c.Assert(err, jc.ErrorIsNil)

	// Service alive, can't destroy machine.
	err = s.machine.Destroy()
	c.Assert(err, jc.Satisfies, state.IsHasAssignedUnitsError)

	err = wordpress.Destroy()
	c.Assert(err, jc.ErrorIsNil)

	// With dying unit, machine can now be marked as dying.
	c.Assert(s.machine.Destroy(), gc.IsNil)
	s.State.StartSync()
	c.Assert(s.machine.Refresh(), gc.IsNil)
	c.Assert(s.machine.Life(), gc.Equals, state.Dying)

	// When the unit is ultimately destroyed, the machine becomes dead.
	err = unit.Destroy()
	c.Assert(err, jc.ErrorIsNil)
	s.State.StartSync()
	c.Assert(mr.Wait(), gc.Equals, worker.ErrTerminateAgent)

}
func (s *ProvisionerSuite) TestMachineErrorsRetainInstances(c *gc.C) {
	task := s.newProvisionerTask(c, config.HarvestAll, s.Environ, s.provisioner, mockToolsFinder{})
	defer stop(c, task)

	// create a machine
	m0, err := s.addMachine()
	c.Assert(err, jc.ErrorIsNil)
	s.checkStartInstance(c, m0)

	// create an instance out of band
	s.startUnknownInstance(c, "999")

	// start the provisioner and ensure it doesn't kill any instances if there are error getting machines
	task = s.newProvisionerTask(
		c,
		config.HarvestAll,
		s.Environ,
		&mockMachineGetter{},
		&mockToolsFinder{},
	)
	defer func() {
		err := worker.Stop(task)
		c.Assert(err, gc.ErrorMatches, ".*failed to get machine.*")
	}()
	s.checkNoOperations(c)
}
Exemple #28
0
func (s *engineFixture) stopEngine(c *gc.C) {
	if s.engine != nil {
		err := worker.Stop(s.engine)
		s.engine = nil
		c.Check(err, jc.ErrorIsNil)
	}
}
Exemple #29
0
func (s *SelfSuite) TestActuallyWorks(c *gc.C) {

	// Create and install a manifold with an unsatisfied dependency.
	mh1 := newManifoldHarness("self")
	err := s.engine.Install("dependent", mh1.Manifold())
	c.Assert(err, jc.ErrorIsNil)
	mh1.AssertNoStart(c)

	// Install an engine inside itself; once it's "started", dependent will
	// be restarted.
	manifold := dependency.SelfManifold(s.engine)
	err = s.engine.Install("self", manifold)
	c.Assert(err, jc.ErrorIsNil)
	mh1.AssertOneStart(c)

	// Check we can still stop it (with a timeout -- injudicious
	// implementation changes could induce deadlocks).
	done := make(chan struct{})
	go func() {
		err := worker.Stop(s.engine)
		c.Check(err, jc.ErrorIsNil)
		close(done)
	}()
	select {
	case <-done:
	case <-time.After(coretesting.LongWait):
		c.Fatalf("timed out")
	}
}
Exemple #30
0
// upgradeWaiterWorker runs the specified worker after upgrades have completed.
func (a *MachineAgent) upgradeWaiterWorker(name string, start func() (worker.Worker, error)) worker.Worker {
	return worker.NewSimpleWorker(func(stop <-chan struct{}) error {
		// Wait for the agent upgrade and upgrade steps to complete (or for us to be stopped).
		for _, ch := range []<-chan struct{}{
			a.upgradeComplete.Unlocked(),
			a.initialUpgradeCheckComplete.Unlocked(),
		} {
			select {
			case <-stop:
				return nil
			case <-ch:
			}
		}
		logger.Debugf("upgrades done, starting worker %q", name)

		// Upgrades are done, start the worker.
		w, err := start()
		if err != nil {
			return err
		}
		// Wait for worker to finish or for us to be stopped.
		done := make(chan error, 1)
		go func() {
			done <- w.Wait()
		}()
		select {
		case err := <-done:
			return errors.Annotatef(err, "worker %q exited", name)
		case <-stop:
			logger.Debugf("stopping so killing worker %q", name)
			return worker.Stop(w)
		}
	})
}