예제 #1
0
func (s *ProvisionerSuite) TestProvisioningStopsInstances(c *C) {
	p := provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)

	// create a machine
	m0, err := s.State.AddMachine(config.DefaultSeries, state.JobHostUnits)
	c.Assert(err, IsNil)
	s.checkStartInstance(c, m0)

	// create a second machine
	m1, err := s.State.AddMachine(config.DefaultSeries, state.JobHostUnits)
	c.Assert(err, IsNil)
	s.checkStartInstance(c, m1)
	stop(c, p)

	// mark the first machine as dead
	c.Assert(m0.EnsureDead(), IsNil)

	// remove the second machine entirely
	c.Assert(m1.EnsureDead(), IsNil)
	c.Assert(m1.Remove(), IsNil)

	// start a new provisioner to shut them both down
	p = provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)
	s.checkStopInstance(c)
	s.checkStopInstance(c)
	s.waitRemoved(c, m0)
}
예제 #2
0
func (s *ProvisionerSuite) TestProvisioningStopsUnknownInstances(c *C) {
	p := provisioner.NewProvisioner(s.State)
	// we are not using defer s.stopProvisioner(c, p) because we need to control when
	// the PA is restarted in this test. Methods like Fatalf and Assert should not be used.

	// create a machine
	m, err := s.State.AddMachine(state.JobHostUnits)
	c.Check(err, IsNil)

	s.checkStartInstance(c, m, "pork")

	// create a second machine
	m, err = s.State.AddMachine(state.JobHostUnits)
	c.Check(err, IsNil)

	s.checkStartInstance(c, m, "pork")

	// stop the PA
	c.Check(p.Stop(), IsNil)

	// mark the machine as dead
	c.Assert(m.EnsureDead(), IsNil)

	// start a new provisioner
	p = provisioner.NewProvisioner(s.State)

	s.checkStopInstance(c)

	c.Assert(p.Stop(), IsNil)
}
예제 #3
0
func (s *ProvisionerSuite) TestProvisioningDoesNotProvisionTheSameMachineAfterRestart(c *C) {
	p := provisioner.NewProvisioner(s.State)
	// we are not using defer s.stopProvisioner(c, p) because we need to control when
	// the PA is restarted in this test. tf. Methods like Fatalf and Assert should not be used.

	// create a machine
	m, err := s.State.AddMachine(state.JobHostUnits)
	c.Check(err, IsNil)

	s.checkStartInstance(c, m, "pork")

	// restart the PA
	c.Check(p.Stop(), IsNil)

	p = provisioner.NewProvisioner(s.State)

	// check that there is only one machine known
	machines, err := p.AllMachines()
	c.Check(err, IsNil)
	c.Check(len(machines), Equals, 1)
	c.Check(machines[0].Id(), Equals, "0")

	// the PA should not create it a second time
	s.checkNotStartInstance(c)

	c.Assert(p.Stop(), IsNil)
}
예제 #4
0
func (s *ProvisionerSuite) TestProvisionerSetsErrorStatusWhenStartInstanceFailed(c *C) {
	brokenMsg := breakDummyProvider(c, s.State, "StartInstance")
	p := provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)

	// Check that an instance is not provisioned when the machine is created...
	m, err := s.State.AddMachine(config.DefaultSeries, state.JobHostUnits)
	c.Assert(err, IsNil)
	s.checkNoOperations(c)

	// And check the machine status is set to error.
	status, info, err := m.Status()
	c.Assert(err, IsNil)
	c.Assert(status, Equals, params.StatusError)
	c.Assert(info, Equals, brokenMsg)

	// Unbreak the environ config.
	err = s.fixEnvironment()
	c.Assert(err, IsNil)

	// Restart the PA to make sure the machine is skipped again.
	stop(c, p)
	p = provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)
	s.checkNoOperations(c)
}
예제 #5
0
func (s *ProvisionerSuite) TestDyingMachines(c *C) {
	p := provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)

	// provision a machine
	m0, err := s.State.AddMachine(config.DefaultSeries, state.JobHostUnits)
	c.Assert(err, IsNil)
	s.checkStartInstance(c, m0)

	// stop the provisioner and make the machine dying
	stop(c, p)
	err = m0.Destroy()
	c.Assert(err, IsNil)

	// add a new, dying, unprovisioned machine
	m1, err := s.State.AddMachine(config.DefaultSeries, state.JobHostUnits)
	c.Assert(err, IsNil)
	err = m1.Destroy()
	c.Assert(err, IsNil)

	// start the provisioner and wait for it to reap the useless machine
	p = provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)
	s.checkNoOperations(c)
	s.waitRemoved(c, m1)

	// verify the other one's still fine
	err = m0.Refresh()
	c.Assert(err, IsNil)
	c.Assert(m0.Life(), Equals, state.Dying)
}
예제 #6
0
func (s *ProvisionerSuite) TestProvisioningRecoversAfterInvalidEnvironmentPublished(c *C) {
	p := provisioner.NewProvisioner(s.State)
	defer s.stopProvisioner(c, p)

	// place a new machine into the state
	m, err := s.State.AddMachine(state.JobHostUnits)
	c.Assert(err, IsNil)

	s.checkStartInstance(c, m, "pork")

	err = s.invalidateEnvironment(c)
	c.Assert(err, IsNil)

	s.State.StartSync()

	// create a second machine
	m, err = s.State.AddMachine(state.JobHostUnits)
	c.Assert(err, IsNil)

	// the PA should create it using the old environment
	s.checkStartInstance(c, m, "pork")

	err = s.fixEnvironment()
	c.Assert(err, IsNil)

	// insert our observer
	cfgObserver := make(chan *config.Config, 1)
	p.SetObserver(cfgObserver)

	cfg, err := s.State.EnvironConfig()
	c.Assert(err, IsNil)
	attrs := cfg.AllAttrs()
	attrs["secret"] = "beef"
	cfg, err = config.New(attrs)
	c.Assert(err, IsNil)
	err = s.State.SetEnvironConfig(cfg)

	s.State.StartSync()

	// wait for the PA to load the new configuration
	select {
	case <-cfgObserver:
	case <-time.After(200 * time.Millisecond):
		c.Fatalf("PA did not action config change")
	}

	// create a third machine
	m, err = s.State.AddMachine(state.JobHostUnits)
	c.Assert(err, IsNil)

	// the PA should create it using the new environment
	s.checkStartInstance(c, m, "beef")
}
예제 #7
0
func (s *ProvisionerSuite) TestConstraints(c *C) {
	// Create a machine with non-standard constraints.
	m, err := s.State.AddMachine(config.DefaultSeries, state.JobHostUnits)
	c.Assert(err, IsNil)
	cons := constraints.MustParse("mem=4G arch=amd64")
	err = m.SetConstraints(cons)
	c.Assert(err, IsNil)

	// Start a provisioner and check those constraints are used.
	p := provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)
	s.checkStartInstanceCustom(c, m, "pork", cons)
}
예제 #8
0
// STARTb OMIT
func (s *ProvisionerSuite) TestSimple(c *C) {
	p := provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)

	// Check that an instance is provisioned when the machine is created...
	m, err := s.State.AddMachine(config.DefaultSeries, state.JobHostUnits)
	c.Assert(err, IsNil)
	s.checkStartInstance(c, m)

	// ...and removed, along with the machine, when the machine is Dead.
	c.Assert(m.EnsureDead(), IsNil)
	s.checkStopInstance(c)
	s.waitRemoved(c, m)
}
예제 #9
0
func (s *ProvisionerSuite) TestProvisioningDoesNotOccurWithAnInvalidEnvironment(c *C) {
	err := s.invalidateEnvironment(c)
	c.Assert(err, IsNil)

	p := provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)

	// try to create a machine
	_, err = s.State.AddMachine(config.DefaultSeries, state.JobHostUnits)
	c.Assert(err, IsNil)

	// the PA should not create it
	s.checkNoOperations(c)
}
예제 #10
0
func (s *ProvisionerSuite) TestProvisioningDoesNotProvisionTheSameMachineAfterRestart(c *C) {
	p := provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)

	// create a machine
	m, err := s.State.AddMachine(config.DefaultSeries, state.JobHostUnits)
	c.Assert(err, IsNil)
	s.checkStartInstance(c, m)

	// restart the PA
	stop(c, p)
	p = provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)

	// check that there is only one machine known
	machines, err := p.AllMachines()
	c.Assert(err, IsNil)
	c.Check(len(machines), Equals, 1)
	c.Check(machines[0].Id(), Equals, "0")

	// the PA should not create it a second time
	s.checkNoOperations(c)
}
예제 #11
0
func (s *ProvisionerSuite) TestProvisioningDoesNotOccurWithAnInvalidEnvironment(c *C) {
	err := s.invalidateEnvironment(c)
	c.Assert(err, IsNil)

	p := provisioner.NewProvisioner(s.State)
	defer s.stopProvisioner(c, p)

	// try to create a machine
	_, err = s.State.AddMachine(state.JobHostUnits)
	c.Assert(err, IsNil)

	// the PA should not create it
	s.checkNotStartInstance(c)
}
예제 #12
0
// Start and stop one machine, watch the PA.
func (s *ProvisionerSuite) TestSimple(c *C) {
	p := provisioner.NewProvisioner(s.State)
	defer s.stopProvisioner(c, p)

	// place a new machine into the state
	m, err := s.State.AddMachine(state.JobHostUnits)
	c.Assert(err, IsNil)

	s.checkStartInstance(c, m, "pork")

	// now mark it as dying
	c.Assert(m.EnsureDead(), IsNil)

	// watch the PA remove it
	s.checkStopInstance(c)
}
예제 #13
0
func (s *ProvisionerSuite) TestProvisioningOccursWithFixedEnvironment(c *C) {
	err := s.invalidateEnvironment(c)
	c.Assert(err, IsNil)

	p := provisioner.NewProvisioner(s.State)
	defer s.stopProvisioner(c, p)

	// try to create a machine
	m, err := s.State.AddMachine(state.JobHostUnits)
	c.Assert(err, IsNil)

	// the PA should not create it
	s.checkNotStartInstance(c)

	err = s.fixEnvironment()
	c.Assert(err, IsNil)

	s.checkStartInstance(c, m, "pork")
}
예제 #14
0
func (a *MachineAgent) RunOnce(st *state.State, e AgentState) error {
	m := e.(*state.Machine)
	log.Printf("cmd/jujud: running jobs for machine agent: %v", m.Jobs())
	tasks := []task{NewUpgrader(st, m, a.Conf.DataDir)}
	for _, j := range m.Jobs() {
		switch j {
		case state.JobHostUnits:
			tasks = append(tasks,
				newDeployer(st, m.WatchPrincipalUnits(), a.Conf.DataDir))
		case state.JobManageEnviron:
			tasks = append(tasks,
				provisioner.NewProvisioner(st),
				firewaller.NewFirewaller(st))
		default:
			log.Printf("cmd/jujud: ignoring unknown job %q", j)
		}
	}
	return runTasks(a.tomb.Dying(), tasks...)
}
예제 #15
0
func (s *ProvisionerSuite) TestProvisioningOccursWithFixedEnvironment(c *C) {
	err := s.invalidateEnvironment(c)
	c.Assert(err, IsNil)

	p := provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)

	// try to create a machine
	m, err := s.State.AddMachine(config.DefaultSeries, state.JobHostUnits)
	c.Assert(err, IsNil)

	// the PA should not create it
	s.checkNoOperations(c)

	err = s.fixEnvironment()
	c.Assert(err, IsNil)

	s.checkStartInstance(c, m)
}
예제 #16
0
func (s *ProvisionerSuite) TestProvisioningDoesOccurAfterInvalidEnvironmentPublished(c *C) {
	p := provisioner.NewProvisioner(s.State, "0")
	defer stop(c, p)

	// place a new machine into the state
	m, err := s.State.AddMachine(config.DefaultSeries, state.JobHostUnits)
	c.Assert(err, IsNil)

	s.checkStartInstance(c, m)

	err = s.invalidateEnvironment(c)
	c.Assert(err, IsNil)

	// create a second machine
	m, err = s.State.AddMachine(config.DefaultSeries, state.JobHostUnits)
	c.Assert(err, IsNil)

	// the PA should create it using the old environment
	s.checkStartInstance(c, m)
}
예제 #17
0
// STARTa OMIT
func (s *ProvisionerSuite) TestProvisionerStartStop(c *C) {
	p := provisioner.NewProvisioner(s.State, "0")
	c.Assert(p.Stop(), IsNil)
}
예제 #18
0
func (s *lxcProvisionerSuite) newLxcProvisioner() *provisioner.Provisioner {
	return provisioner.NewProvisioner(provisioner.LXC, s.State, s.machineId, s.DataDir())
}
예제 #19
0
func (s *ProvisionerSuite) newEnvironProvisioner(machineId string) *provisioner.Provisioner {
	return provisioner.NewProvisioner(provisioner.ENVIRON, s.State, machineId, "")
}
예제 #20
0
파일: machine.go 프로젝트: rif/golang-stuff
// StateJobs returns a worker running all the workers that require
// a *state.State connection.
func (a *MachineAgent) StateWorker() (worker.Worker, error) {
	st, entity, err := openState(a.Conf.Conf, a)
	if err != nil {
		return nil, err
	}
	// If this fails, other bits will fail, so we just log the error, and
	// let the other failures actually restart runners
	if err := EnsureAPIInfo(a.Conf.Conf, st, entity); err != nil {
		log.Warningf("failed to EnsureAPIInfo: %v", err)
	}
	reportOpenedState(st)
	m := entity.(*state.Machine)
	// TODO(rog) use more discriminating test for errors
	// rather than taking everything down indiscriminately.
	dataDir := a.Conf.DataDir
	runner := worker.NewRunner(allFatal, moreImportant)
	runner.StartWorker("upgrader", func() (worker.Worker, error) {
		// TODO(rog) use id instead of *Machine (or introduce Clone method)
		return NewUpgrader(st, m, dataDir), nil
	})
	// At this stage, since we don't embed lxc containers, just start an lxc
	// provisioner task for non-lxc containers.  Since we have only LXC
	// containers and normal machines, this effectively means that we only
	// have an LXC provisioner when we have a normally provisioned machine
	// (through the environ-provisioner).  With the upcoming advent of KVM
	// containers, it is likely that we will want an LXC provisioner on a KVM
	// machine, and once we get nested LXC containers, we can remove this
	// check.
	providerType := os.Getenv("JUJU_PROVIDER_TYPE")
	if providerType != provider.Local && m.ContainerType() != instance.LXC {
		workerName := fmt.Sprintf("%s-provisioner", provisioner.LXC)
		runner.StartWorker(workerName, func() (worker.Worker, error) {
			return provisioner.NewProvisioner(provisioner.LXC, st, a.MachineId, dataDir), nil
		})
	}
	// Take advantage of special knowledge here in that we will only ever want
	// the storage provider on one machine, and that is the "bootstrap" node.
	if providerType == provider.Local && m.Id() == bootstrapMachineId {
		runner.StartWorker("local-storage", func() (worker.Worker, error) {
			return localstorage.NewWorker(), nil
		})
	}
	for _, job := range m.Jobs() {
		switch job {
		case state.JobHostUnits:
			runner.StartWorker("deployer", func() (worker.Worker, error) {
				return newDeployer(st, m.Id(), dataDir), nil
			})
		case state.JobManageEnviron:
			runner.StartWorker("environ-provisioner", func() (worker.Worker, error) {
				return provisioner.NewProvisioner(provisioner.ENVIRON, st, a.MachineId, dataDir), nil
			})
			runner.StartWorker("firewaller", func() (worker.Worker, error) {
				return firewaller.NewFirewaller(st), nil
			})
		case state.JobManageState:
			runner.StartWorker("apiserver", func() (worker.Worker, error) {
				// If the configuration does not have the required information,
				// it is currently not a recoverable error, so we kill the whole
				// agent, potentially enabling human intervention to fix
				// the agent's configuration file. In the future, we may retrieve
				// the state server certificate and key from the state, and
				// this should then change.
				if len(a.Conf.StateServerCert) == 0 || len(a.Conf.StateServerKey) == 0 {
					return nil, &fatalError{"configuration does not have state server cert/key"}
				}
				return apiserver.NewServer(st, fmt.Sprintf(":%d", a.Conf.APIPort), a.Conf.StateServerCert, a.Conf.StateServerKey)
			})
			runner.StartWorker("cleaner", func() (worker.Worker, error) {
				return cleaner.NewCleaner(st), nil
			})
			runner.StartWorker("resumer", func() (worker.Worker, error) {
				// The action of resumer is so subtle that it is not tested,
				// because we can't figure out how to do so without brutalising
				// the transaction log.
				return resumer.NewResumer(st), nil
			})
		default:
			log.Warningf("ignoring unknown job %q", job)
		}
	}
	return newCloseWorker(runner, st), nil
}