Ejemplo n.º 1
0
// APIWorker returns a Worker that connects to the API and starts any
// workers that need an API connection.
//
// If a state worker is necessary, APIWorker calls ensureStateWorker.
func (a *MachineAgent) APIWorker(ensureStateWorker func()) (worker.Worker, error) {
	st, entity, err := openAPIState(a.Conf.Conf, a)
	if err != nil {
		// There was an error connecting to the API,
		// https://launchpad.net/bugs/1199915 means that we may just
		// not have an API password set. So force a state connection at
		// this point.
		// TODO(jam): Once we can reliably trust that we have API
		//            passwords set, and we no longer need state
		//            connections (and possibly agents will be blocked
		//            from connecting directly to state) we can remove
		//            this. Currently needed because 1.10 does not set
		//            the API password and 1.11 requires it
		ensureStateWorker()
		return nil, err
	}
	m := entity.(*machineagent.Machine)
	needsStateWorker := false
	for _, job := range m.Jobs() {
		needsStateWorker = needsStateWorker || stateJobs[job]
	}
	if needsStateWorker {
		ensureStateWorker()
	}
	runner := worker.NewRunner(allFatal, moreImportant)
	// Only the machiner currently connects to the API.
	// Add other workers here as they are ready.
	runner.StartWorker("machiner", func() (worker.Worker, error) {
		return machiner.NewMachiner(st.Machiner(), a.Tag()), nil
	})
	return newCloseWorker(runner, st), nil // Note: a worker.Runner is itself a worker.Worker.
}
Ejemplo n.º 2
0
func (s *MachinerSuite) TestSetsStatusWhenDying(c *C) {
	m, err := s.State.AddMachine("series", state.JobHostUnits)
	c.Assert(err, IsNil)
	mr := machiner.NewMachiner(s.State, m.Id())
	defer mr.Stop()
	c.Assert(m.Destroy(), IsNil)
	s.waitMachineStatus(c, m, params.StatusStopped)
}
Ejemplo n.º 3
0
func (s *MachinerSuite) TestRunStop(c *C) {
	m, err := s.State.AddMachine(state.JobHostUnits)
	c.Assert(err, IsNil)
	mr := machiner.NewMachiner(s.State, m.Id())
	c.Assert(mr.Stop(), IsNil)
	c.Assert(m.Refresh(), IsNil)
	c.Assert(m.Life(), Equals, state.Alive)
}
Ejemplo n.º 4
0
func (s *MachinerSuite) TestSetDead(c *C) {
	mr := machiner.NewMachiner(s.machinerState, s.machine.Tag())
	defer worker.Stop(mr)
	c.Assert(s.machine.Destroy(), IsNil)
	s.State.StartSync()
	c.Assert(mr.Wait(), Equals, worker.ErrTerminateAgent)
	c.Assert(s.machine.Refresh(), IsNil)
	c.Assert(s.machine.Life(), Equals, state.Dead)
}
Ejemplo n.º 5
0
func (s *MachinerSuite) TestStartSetsStatus(c *C) {
	status, info, err := s.machine.Status()
	c.Assert(err, IsNil)
	c.Assert(status, Equals, params.StatusPending)
	c.Assert(info, Equals, "")

	mr := machiner.NewMachiner(s.machinerState, s.apiMachine.Tag())
	defer worker.Stop(mr)

	s.waitMachineStatus(c, s.machine, params.StatusStarted)
}
Ejemplo n.º 6
0
func (s *MachinerSuite) TestSetDead(c *C) {
	m, err := s.State.AddMachine(state.JobHostUnits)
	c.Assert(err, IsNil)
	mr := machiner.NewMachiner(s.State, m.Id())
	defer mr.Stop()
	c.Assert(m.EnsureDying(), IsNil)
	s.State.StartSync()
	c.Assert(mr.Wait(), Equals, worker.ErrDead)
	c.Assert(m.Refresh(), IsNil)
	c.Assert(m.Life(), Equals, state.Dead)
}
Ejemplo n.º 7
0
// APIWorker returns a Worker that connects to the API and starts any
// workers that need an API connection.
//
// If a state worker is necessary, APIWorker calls ensureStateWorker.
func (a *MachineAgent) APIWorker(ensureStateWorker func()) (worker.Worker, error) {
	st, entity, err := openAPIState(a.Conf.Conf, a)
	if err != nil {
		// There was an error connecting to the API,
		// https://launchpad.net/bugs/1199915 means that we may just
		// not have an API password set. So force a state connection at
		// this point.
		// TODO(jam): Once we can reliably trust that we have API
		//            passwords set, and we no longer need state
		//            connections (and possibly agents will be blocked
		//            from connecting directly to state) we can remove
		//            this. Currently needed because 1.10 does not set
		//            the API password and 1.11 requires it
		ensureStateWorker()
		return nil, err
	}
	needsStateWorker := false
	for _, job := range entity.Jobs() {
		needsStateWorker = needsStateWorker || stateJobs[job]
	}
	if needsStateWorker {
		ensureStateWorker()
	}
	runner := worker.NewRunner(allFatal, moreImportant)
	// Only the machiner currently connects to the API.
	// Add other workers here as they are ready.
	runner.StartWorker("machiner", func() (worker.Worker, error) {
		return machiner.NewMachiner(st.Machiner(), a.Tag()), nil
	})
	runner.StartWorker("upgrader", func() (worker.Worker, error) {
		// TODO(rog) use id instead of *Machine (or introduce Clone method)
		return upgrader.New(st.Upgrader(), a.Tag(), a.Conf.DataDir), nil
	})
	for _, job := range entity.Jobs() {
		switch job {
		case params.JobHostUnits:
			deployerTask, err := newDeployer(st.Deployer(), a.Tag(), a.Conf.DataDir)
			if err != nil {
				return nil, err
			}
			runner.StartWorker("deployer", func() (worker.Worker, error) {
				return deployerTask, nil
			})
		case params.JobManageEnviron:
			// Not yet implemented with the API.
		case params.JobManageState:
			// Not yet implemented with the API.
		default:
			// TODO(dimitern): Once all workers moved over to using
			// the API, report "unknown job type" here.
		}
	}
	return newCloseWorker(runner, st), nil // Note: a worker.Runner is itself a worker.Worker.
}
Ejemplo n.º 8
0
func (s *MachinerSuite) TestSetDead(c *C) {
	m, err := s.State.AddMachine("series", state.JobHostUnits)
	c.Assert(err, IsNil)
	mr := machiner.NewMachiner(s.State, m.Id())
	defer mr.Stop()
	c.Assert(m.Destroy(), IsNil)
	s.State.StartSync()
	c.Assert(mr.Wait(), Equals, worker.ErrTerminateAgent)
	c.Assert(m.Refresh(), IsNil)
	c.Assert(m.Life(), Equals, state.Dead)
}
Ejemplo n.º 9
0
func (s *MachinerSuite) TestStartSetsStatus(c *C) {
	m, err := s.State.AddMachine("series", state.JobHostUnits)
	c.Assert(err, IsNil)

	status, info, err := m.Status()
	c.Assert(err, IsNil)
	c.Assert(status, Equals, params.StatusPending)
	c.Assert(info, Equals, "")

	alive, err := m.AgentAlive()
	c.Assert(err, IsNil)
	c.Assert(alive, Equals, false)

	mr := machiner.NewMachiner(s.State, m.Id())
	defer mr.Stop()

	s.waitMachineStatus(c, m, params.StatusStarted)

	s.State.Sync()
	alive, err = m.AgentAlive()
	c.Assert(err, IsNil)
	c.Assert(alive, Equals, true)
}
Ejemplo n.º 10
0
func (s *MachinerSuite) TestRunStop(c *C) {
	mr := machiner.NewMachiner(s.machinerState, s.apiMachine.Tag())
	c.Assert(worker.Stop(mr), IsNil)
	c.Assert(s.apiMachine.Refresh(), IsNil)
	c.Assert(s.apiMachine.Life(), Equals, params.Alive)
}
Ejemplo n.º 11
0
func (s *MachinerSuite) TestNotFoundOrUnauthorized(c *C) {
	mr := machiner.NewMachiner(s.machinerState, "eleventy-one")
	c.Assert(mr.Wait(), Equals, worker.ErrTerminateAgent)
}
Ejemplo n.º 12
0
func (s *MachinerSuite) TestSetsStatusWhenDying(c *C) {
	mr := machiner.NewMachiner(s.machinerState, s.apiMachine.Tag())
	defer worker.Stop(mr)
	c.Assert(s.machine.Destroy(), IsNil)
	s.waitMachineStatus(c, s.machine, params.StatusStopped)
}
Ejemplo n.º 13
0
func (s *MachinerSuite) TestNotFound(c *C) {
	mr := machiner.NewMachiner(s.State, "eleventy-one")
	c.Assert(mr.Wait(), Equals, worker.ErrDead)
}
Ejemplo n.º 14
0
// StateJobs returns a worker running all the workers that require
// a *state.State connection.
func (a *MachineAgent) StateWorker() (worker.Worker, error) {
	st, entity, err := openState(a.Conf.Conf, a)
	if err != nil {
		return nil, err
	}
	// If this fails, other bits will fail, so we just log the error, and
	// let the other failures actually restart runners
	if err := EnsureAPIInfo(a.Conf.Conf, st, entity); err != nil {
		log.Warningf("failed to EnsureAPIInfo: %v", err)
	}
	reportOpenedState(st)
	m := entity.(*state.Machine)
	// TODO(rog) use more discriminating test for errors
	// rather than taking everything down indiscriminately.
	dataDir := a.Conf.DataDir
	runner := worker.NewRunner(allFatal, moreImportant)
	runner.StartWorker("upgrader", func() (worker.Worker, error) {
		// TODO(rog) use id instead of *Machine (or introduce Clone method)
		return NewUpgrader(st, m, dataDir), nil
	})
	runner.StartWorker("machiner", func() (worker.Worker, error) {
		return machiner.NewMachiner(st, m.Id()), nil
	})
	// At this stage, since we don't embed lxc containers, just start an lxc
	// provisioner task for non-lxc containers.  Since we have only LXC
	// containers and normal machines, this effectively means that we only
	// have an LXC provisioner when we have a normally provisioned machine
	// (through the environ-provisioner).  With the upcoming advent of KVM
	// containers, it is likely that we will want an LXC provisioner on a KVM
	// machine, and once we get nested LXC containers, we can remove this
	// check.
	providerType := os.Getenv("JUJU_PROVIDER_TYPE")
	if providerType != provider.Local && m.ContainerType() != instance.LXC {
		workerName := fmt.Sprintf("%s-provisioner", provisioner.LXC)
		runner.StartWorker(workerName, func() (worker.Worker, error) {
			return provisioner.NewProvisioner(provisioner.LXC, st, a.MachineId, dataDir), nil
		})
	}
	// Take advantage of special knowledge here in that we will only ever want
	// the storage provider on one machine, and that is the "bootstrap" node.
	if providerType == provider.Local && m.Id() == bootstrapMachineId {
		runner.StartWorker("local-storage", func() (worker.Worker, error) {
			return localstorage.NewWorker(), nil
		})
	}
	for _, job := range m.Jobs() {
		switch job {
		case state.JobHostUnits:
			runner.StartWorker("deployer", func() (worker.Worker, error) {
				return newDeployer(st, m.Id(), dataDir), nil
			})
		case state.JobManageEnviron:
			runner.StartWorker("environ-provisioner", func() (worker.Worker, error) {
				return provisioner.NewProvisioner(provisioner.ENVIRON, st, a.MachineId, dataDir), nil
			})
			runner.StartWorker("firewaller", func() (worker.Worker, error) {
				return firewaller.NewFirewaller(st), nil
			})
		case state.JobManageState:
			runner.StartWorker("apiserver", func() (worker.Worker, error) {
				// If the configuration does not have the required information,
				// it is currently not a recoverable error, so we kill the whole
				// agent, potentially enabling human intervention to fix
				// the agent's configuration file. In the future, we may retrieve
				// the state server certificate and key from the state, and
				// this should then change.
				if len(a.Conf.StateServerCert) == 0 || len(a.Conf.StateServerKey) == 0 {
					return nil, &fatalError{"configuration does not have state server cert/key"}
				}
				return apiserver.NewServer(st, fmt.Sprintf(":%d", a.Conf.APIPort), a.Conf.StateServerCert, a.Conf.StateServerKey)
			})
			runner.StartWorker("cleaner", func() (worker.Worker, error) {
				return cleaner.NewCleaner(st), nil
			})
			runner.StartWorker("resumer", func() (worker.Worker, error) {
				// The action of resumer is so subtle that it is not tested,
				// because we can't figure out how to do so without brutalising
				// the transaction log.
				return resumer.NewResumer(st), nil
			})
		default:
			log.Warningf("ignoring unknown job %q", job)
		}
	}
	return newCloseWorker(runner, st), nil
}