func (s *JujuConnSuite) setUpConn(c *C) { if s.RootDir != "" { panic("JujuConnSuite.setUpConn without teardown") } s.RootDir = c.MkDir() s.oldHome = os.Getenv("HOME") home := filepath.Join(s.RootDir, "/home/ubuntu") err := os.MkdirAll(home, 0777) c.Assert(err, IsNil) os.Setenv("HOME", home) dataDir := filepath.Join(s.RootDir, "/var/lib/juju") err = os.MkdirAll(dataDir, 0777) c.Assert(err, IsNil) yaml := []byte(fmt.Sprintf(envConfig, version.Current.Number)) err = ioutil.WriteFile(config.JujuHomePath("environments.yaml"), yaml, 0600) c.Assert(err, IsNil) err = ioutil.WriteFile(config.JujuHomePath("dummyenv-cert.pem"), []byte(testing.CACert), 0666) c.Assert(err, IsNil) err = ioutil.WriteFile(config.JujuHomePath("dummyenv-private-key.pem"), []byte(testing.CAKey), 0600) c.Assert(err, IsNil) environ, err := environs.NewFromName("dummyenv") c.Assert(err, IsNil) // sanity check we've got the correct environment. c.Assert(environ.Name(), Equals, "dummyenv") c.Assert(environs.Bootstrap(environ, constraints.Value{}), IsNil) s.BackingState = environ.(GetStater).GetStateInAPIServer() conn, err := juju.NewConn(environ) c.Assert(err, IsNil) s.Conn = conn s.State = conn.State apiConn, err := juju.NewAPIConn(environ, api.DialOpts{}) c.Assert(err, IsNil) s.APIConn = apiConn s.APIState = apiConn.State s.environ = environ }
func (t *LiveTests) TestBootstrapAndDeploy(c *C) { if !t.CanOpenState || !t.HasProvisioner { c.Skip(fmt.Sprintf("skipping provisioner test, CanOpenState: %v, HasProvisioner: %v", t.CanOpenState, t.HasProvisioner)) } t.BootstrapOnce(c) // TODO(niemeyer): Stop growing this kitchen sink test and split it into proper parts. c.Logf("opening connection") conn, err := juju.NewConn(t.Env) c.Assert(err, IsNil) defer conn.Close() c.Logf("opening API connection") apiConn, err := juju.NewAPIConn(t.Env, api.DefaultDialOpts()) c.Assert(err, IsNil) defer conn.Close() // Check that the agent version has made it through the // bootstrap process (it's optional in the config.Config) cfg, err := conn.State.EnvironConfig() c.Assert(err, IsNil) agentVersion, ok := cfg.AgentVersion() c.Check(ok, Equals, true) c.Check(agentVersion, Equals, version.CurrentNumber()) // Check that the constraints have been set in the environment. cons, err := conn.State.EnvironConstraints() c.Assert(err, IsNil) c.Assert(cons.String(), Equals, "mem=2048M") // Wait for machine agent to come up on the bootstrap // machine and find the deployed series from that. m0, err := conn.State.Machine("0") c.Assert(err, IsNil) instId0, err := m0.InstanceId() c.Assert(err, IsNil) // Check that the API connection is working. status, err := apiConn.State.Client().Status() c.Assert(err, IsNil) c.Assert(status.Machines["0"].InstanceId, Equals, string(instId0)) mw0 := newMachineToolWaiter(m0) defer mw0.Stop() // If the series has not been specified, we expect the most recent Ubuntu LTS release to be used. expectedVersion := version.Current expectedVersion.Series = config.DefaultSeries mtools0 := waitAgentTools(c, mw0, expectedVersion) // Create a new service and deploy a unit of it. c.Logf("deploying service") repoDir := c.MkDir() url := coretesting.Charms.ClonedURL(repoDir, mtools0.Series, "dummy") sch, err := conn.PutCharm(url, &charm.LocalRepository{repoDir}, false) c.Assert(err, IsNil) svc, err := conn.State.AddService("dummy", sch) c.Assert(err, IsNil) units, err := conn.AddUnits(svc, 1, "") c.Assert(err, IsNil) unit := units[0] // Wait for the unit's machine and associated agent to come up // and announce itself. mid1, err := unit.AssignedMachineId() c.Assert(err, IsNil) m1, err := conn.State.Machine(mid1) c.Assert(err, IsNil) mw1 := newMachineToolWaiter(m1) defer mw1.Stop() waitAgentTools(c, mw1, mtools0.Binary) err = m1.Refresh() c.Assert(err, IsNil) instId1, err := m1.InstanceId() c.Assert(err, IsNil) uw := newUnitToolWaiter(unit) defer uw.Stop() utools := waitAgentTools(c, uw, expectedVersion) // Check that we can upgrade the environment. newVersion := utools.Binary newVersion.Patch++ t.checkUpgrade(c, conn, newVersion, mw0, mw1, uw) // BUG(niemeyer): Logic below is very much wrong. Must be: // // 1. EnsureDying on the unit and EnsureDying on the machine // 2. Unit dies by itself // 3. Machine removes dead unit // 4. Machine dies by itself // 5. Provisioner removes dead machine // // Now remove the unit and its assigned machine and // check that the PA removes it. c.Logf("removing unit") err = unit.Destroy() c.Assert(err, IsNil) // Wait until unit is dead uwatch := unit.Watch() defer uwatch.Stop() for unit.Life() != state.Dead { c.Logf("waiting for unit change") <-uwatch.Changes() err := unit.Refresh() c.Logf("refreshed; err %v", err) if errors.IsNotFoundError(err) { c.Logf("unit has been removed") break } c.Assert(err, IsNil) } for { c.Logf("destroying machine") err := m1.Destroy() if err == nil { break } c.Assert(err, FitsTypeOf, &state.HasAssignedUnitsError{}) time.Sleep(5 * time.Second) err = m1.Refresh() if errors.IsNotFoundError(err) { break } c.Assert(err, IsNil) } c.Logf("waiting for instance to be removed") t.assertStopInstance(c, conn.Environ, instId1) }