예제 #1
0
파일: apiconn_test.go 프로젝트: jkary/core
func (*NewAPIConnSuite) TestNewConn(c *gc.C) {
	cfg, err := config.New(config.NoDefaults, dummy.SampleConfig())
	c.Assert(err, gc.IsNil)
	ctx := coretesting.Context(c)
	env, err := environs.Prepare(cfg, ctx, configstore.NewMem())
	c.Assert(err, gc.IsNil)

	envtesting.UploadFakeTools(c, env.Storage())
	err = bootstrap.Bootstrap(ctx, env, environs.BootstrapParams{})
	c.Assert(err, gc.IsNil)

	cfg = env.Config()
	cfg, err = cfg.Apply(map[string]interface{}{
		"secret": "fnord",
	})
	c.Assert(err, gc.IsNil)
	err = env.SetConfig(cfg)
	c.Assert(err, gc.IsNil)

	conn, err := juju.NewAPIConn(env, api.DefaultDialOpts())
	c.Assert(err, gc.IsNil)
	c.Assert(conn.Environ, gc.Equals, env)
	c.Assert(conn.State, gc.NotNil)

	// the secrets will not be updated, as they already exist
	attrs, err := conn.State.Client().EnvironmentGet()
	c.Assert(attrs["secret"], gc.Equals, "pork")

	c.Assert(conn.Close(), gc.IsNil)
}
예제 #2
0
파일: livetests.go 프로젝트: jkary/core
func (t *LiveTests) TestCheckEnvironmentOnConnect(c *gc.C) {
	// When new connection is established to a bootstraped environment,
	// it is checked that we are running against a juju-core environment.
	if !t.CanOpenState {
		c.Skip("CanOpenState is false; cannot open state connection")
	}
	t.BootstrapOnce(c)

	conn, err := juju.NewConn(t.Env)
	c.Assert(err, gc.IsNil)
	conn.Close()

	apiConn, err := juju.NewAPIConn(t.Env, api.DefaultDialOpts())
	c.Assert(err, gc.IsNil)
	apiConn.Close()
}
예제 #3
0
파일: livetests.go 프로젝트: jkary/core
func (t *LiveTests) TestBootstrapAndDeploy(c *gc.C) {
	if !t.CanOpenState || !t.HasProvisioner {
		c.Skip(fmt.Sprintf("skipping provisioner test, CanOpenState: %v, HasProvisioner: %v", t.CanOpenState, t.HasProvisioner))
	}
	t.BootstrapOnce(c)

	// TODO(niemeyer): Stop growing this kitchen sink test and split it into proper parts.

	c.Logf("opening connection")
	conn, err := juju.NewConn(t.Env)
	c.Assert(err, gc.IsNil)
	defer conn.Close()

	c.Logf("opening API connection")
	apiConn, err := juju.NewAPIConn(t.Env, api.DefaultDialOpts())
	c.Assert(err, gc.IsNil)
	defer conn.Close()

	// Check that the agent version has made it through the
	// bootstrap process (it's optional in the config.Config)
	cfg, err := conn.State.EnvironConfig()
	c.Assert(err, gc.IsNil)
	agentVersion, ok := cfg.AgentVersion()
	c.Check(ok, gc.Equals, true)
	c.Check(agentVersion, gc.Equals, version.Current.Number)

	// Check that the constraints have been set in the environment.
	cons, err := conn.State.EnvironConstraints()
	c.Assert(err, gc.IsNil)
	c.Assert(cons.String(), gc.Equals, "mem=2048M")

	// Wait for machine agent to come up on the bootstrap
	// machine and find the deployed series from that.
	m0, err := conn.State.Machine("0")
	c.Assert(err, gc.IsNil)

	instId0, err := m0.InstanceId()
	c.Assert(err, gc.IsNil)

	// Check that the API connection is working.
	status, err := apiConn.State.Client().Status(nil)
	c.Assert(err, gc.IsNil)
	c.Assert(status.Machines["0"].InstanceId, gc.Equals, string(instId0))

	mw0 := newMachineToolWaiter(m0)
	defer mw0.Stop()

	// If the series has not been specified, we expect the most recent Ubuntu LTS release to be used.
	expectedVersion := version.Current
	expectedVersion.Series = config.LatestLtsSeries()

	mtools0 := waitAgentTools(c, mw0, expectedVersion)

	// Create a new service and deploy a unit of it.
	c.Logf("deploying service")
	repoDir := c.MkDir()
	url := coretesting.Charms.ClonedURL(repoDir, mtools0.Version.Series, "dummy")
	sch, err := conn.PutCharm(url, &charm.LocalRepository{Path: repoDir}, false)
	c.Assert(err, gc.IsNil)
	svc, err := conn.State.AddService("dummy", "user-admin", sch, nil, nil)
	c.Assert(err, gc.IsNil)
	units, err := juju.AddUnits(conn.State, svc, 1, "")
	c.Assert(err, gc.IsNil)
	unit := units[0]

	// Wait for the unit's machine and associated agent to come up
	// and announce itself.
	mid1, err := unit.AssignedMachineId()
	c.Assert(err, gc.IsNil)
	m1, err := conn.State.Machine(mid1)
	c.Assert(err, gc.IsNil)
	mw1 := newMachineToolWaiter(m1)
	defer mw1.Stop()
	waitAgentTools(c, mw1, mtools0.Version)

	err = m1.Refresh()
	c.Assert(err, gc.IsNil)
	instId1, err := m1.InstanceId()
	c.Assert(err, gc.IsNil)
	uw := newUnitToolWaiter(unit)
	defer uw.Stop()
	utools := waitAgentTools(c, uw, expectedVersion)

	// Check that we can upgrade the environment.
	newVersion := utools.Version
	newVersion.Patch++
	t.checkUpgrade(c, conn, newVersion, mw0, mw1, uw)

	// BUG(niemeyer): Logic below is very much wrong. Must be:
	//
	// 1. EnsureDying on the unit and EnsureDying on the machine
	// 2. Unit dies by itself
	// 3. Machine removes dead unit
	// 4. Machine dies by itself
	// 5. Provisioner removes dead machine
	//

	// Now remove the unit and its assigned machine and
	// check that the PA removes it.
	c.Logf("removing unit")
	err = unit.Destroy()
	c.Assert(err, gc.IsNil)

	// Wait until unit is dead
	uwatch := unit.Watch()
	defer uwatch.Stop()
	for unit.Life() != state.Dead {
		c.Logf("waiting for unit change")
		<-uwatch.Changes()
		err := unit.Refresh()
		c.Logf("refreshed; err %v", err)
		if errors.IsNotFound(err) {
			c.Logf("unit has been removed")
			break
		}
		c.Assert(err, gc.IsNil)
	}
	for {
		c.Logf("destroying machine")
		err := m1.Destroy()
		if err == nil {
			break
		}
		c.Assert(err, gc.FitsTypeOf, &state.HasAssignedUnitsError{})
		time.Sleep(5 * time.Second)
		err = m1.Refresh()
		if errors.IsNotFound(err) {
			break
		}
		c.Assert(err, gc.IsNil)
	}
	c.Logf("waiting for instance to be removed")
	t.assertStopInstance(c, conn.Environ, instId1)
}