Exemplo n.º 1
0
func (*NewAPIConnSuite) TestNewConn(c *gc.C) {
	cfg, err := config.New(config.NoDefaults, dummy.SampleConfig())
	c.Assert(err, gc.IsNil)
	ctx := coretesting.Context(c)
	env, err := environs.Prepare(cfg, ctx, configstore.NewMem())
	c.Assert(err, gc.IsNil)

	envtesting.UploadFakeTools(c, env.Storage())
	err = bootstrap.Bootstrap(ctx, env, environs.BootstrapParams{})
	c.Assert(err, gc.IsNil)

	cfg = env.Config()
	cfg, err = cfg.Apply(map[string]interface{}{
		"secret": "fnord",
	})
	c.Assert(err, gc.IsNil)
	err = env.SetConfig(cfg)
	c.Assert(err, gc.IsNil)

	conn, err := juju.NewAPIConn(env, api.DefaultDialOpts())
	c.Assert(err, gc.IsNil)
	c.Assert(conn.Environ, gc.Equals, env)
	c.Assert(conn.State, gc.NotNil)

	// the secrets will not be updated, as they already exist
	attrs, err := conn.State.Client().EnvironmentGet()
	c.Assert(attrs["secret"], gc.Equals, "pork")

	c.Assert(conn.Close(), gc.IsNil)
}
Exemplo n.º 2
0
func (t *LiveTests) TestCheckEnvironmentOnConnect(c *gc.C) {
	// When new connection is established to a bootstraped environment,
	// it is checked that we are running against a juju-core environment.
	if !t.CanOpenState {
		c.Skip("CanOpenState is false; cannot open state connection")
	}
	t.BootstrapOnce(c)

	conn, err := juju.NewConn(t.Env)
	c.Assert(err, gc.IsNil)
	conn.Close()

	apiConn, err := juju.NewAPIConn(t.Env, api.DefaultDialOpts())
	c.Assert(err, gc.IsNil)
	apiConn.Close()
}
Exemplo n.º 3
0
func DirectClientFactory(conf *config.Config) (*Client, error) {
	env, err := environs.New(conf)
	if err != nil {
		return nil, err
	}

	dialOpts := api.DefaultDialOpts()
	conn, err := juju.NewAPIConn(env, dialOpts)
	if err != nil {
		return nil, err
	}

	wrapper := &Client{}
	wrapper.client = conn.State.Client()
	wrapper.apiState = conn.State
	//defer apiclient.Close()
	return wrapper, err
}
Exemplo n.º 4
0
func (c *restoreCommand) Run(ctx *cmd.Context) error {
	if c.showDescription {
		fmt.Fprintf(ctx.Stdout, "%s\n", c.Info().Purpose)
		return nil
	}
	if err := c.Log.Start(ctx); err != nil {
		return err
	}
	agentConf, err := extractConfig(c.backupFile)
	if err != nil {
		return fmt.Errorf("cannot extract configuration from backup file: %v", err)
	}
	progress("extracted credentials from backup file")
	store, err := configstore.Default()
	if err != nil {
		return err
	}
	cfg, _, err := environs.ConfigForName(c.EnvName, store)
	if err != nil {
		return err
	}
	env, err := rebootstrap(cfg, ctx, c.Constraints)
	if err != nil {
		return fmt.Errorf("cannot re-bootstrap environment: %v", err)
	}
	progress("connecting to newly bootstrapped instance")
	var conn *juju.APIConn
	// The state server backend may not be ready to accept logins so we retry.
	// We'll do up to 8 retries over 2 minutes to give the server time to come up.
	// Typically we expect only 1 retry will be needed.
	attempt := utils.AttemptStrategy{Delay: 15 * time.Second, Min: 8}
	for a := attempt.Start(); a.Next(); {
		conn, err = juju.NewAPIConn(env, api.DefaultDialOpts())
		if err == nil || errors.Cause(err).Error() != "EOF" {
			break
		}
		progress("bootstrapped instance not ready - attempting to redial")
	}
	if err != nil {
		return fmt.Errorf("cannot connect to bootstrap instance: %v", err)
	}
	progress("restoring bootstrap machine")
	newInstId, machine0Addr, err := restoreBootstrapMachine(conn, c.backupFile, agentConf)
	if err != nil {
		return fmt.Errorf("cannot restore bootstrap machine: %v", err)
	}
	progress("restored bootstrap machine")
	// Update the environ state to point to the new instance.
	if err := bootstrap.SaveState(env.Storage(), &bootstrap.BootstrapState{
		StateInstances: []instance.Id{newInstId},
	}); err != nil {
		return fmt.Errorf("cannot update environ bootstrap state storage: %v", err)
	}
	// Construct our own state info rather than using juju.NewConn so
	// that we can avoid storage eventual-consistency issues
	// (and it's faster too).
	caCert, ok := cfg.CACert()
	if !ok {
		return fmt.Errorf("configuration has no CA certificate")
	}
	progress("opening state")
	// We need to retry here to allow mongo to come up on the restored state server.
	// The connection might succeed due to the mongo dial retries but there may still
	// be a problem issuing database commands.
	var st *state.State
	for a := attempt.Start(); a.Next(); {
		st, err = state.Open(&state.Info{
			Info: mongo.Info{
				Addrs:  []string{fmt.Sprintf("%s:%d", machine0Addr, cfg.StatePort())},
				CACert: caCert,
			},
			Tag:      agentConf.Credentials.Tag,
			Password: agentConf.Credentials.Password,
		}, mongo.DefaultDialOpts(), environs.NewStatePolicy())
		if err == nil {
			break
		}
		progress("state server not ready - attempting to re-connect")
	}
	if err != nil {
		return fmt.Errorf("cannot open state: %v", err)
	}
	progress("updating all machines")
	if err := updateAllMachines(st, machine0Addr); err != nil {
		return fmt.Errorf("cannot update machines: %v", err)
	}
	return nil
}
Exemplo n.º 5
0
func (c *restoreCommand) Run(ctx *cmd.Context) error {
	if c.showDescription {
		fmt.Fprintf(ctx.Stdout, "%s\n", c.Info().Purpose)
		return nil
	}
	if err := c.Log.Start(ctx); err != nil {
		return err
	}
	agentConf, err := extractConfig(c.backupFile)
	if err != nil {
		return fmt.Errorf("cannot extract configuration from backup file: %v", err)
	}
	progress("extracted credentials from backup file")
	store, err := configstore.Default()
	if err != nil {
		return err
	}
	cfg, _, err := environs.ConfigForName(c.EnvName, store)
	if err != nil {
		return err
	}
	env, err := rebootstrap(cfg, ctx, c.Constraints)
	if err != nil {
		return fmt.Errorf("cannot re-bootstrap environment: %v", err)
	}
	progress("connecting to newly bootstrapped instance")
	conn, err := juju.NewAPIConn(env, api.DefaultDialOpts())
	if err != nil {
		return fmt.Errorf("cannot connect to bootstrap instance: %v", err)
	}
	progress("restoring bootstrap machine")
	newInstId, machine0Addr, err := restoreBootstrapMachine(conn, c.backupFile, agentConf)
	if err != nil {
		return fmt.Errorf("cannot restore bootstrap machine: %v", err)
	}
	progress("restored bootstrap machine")
	// Update the environ state to point to the new instance.
	if err := bootstrap.SaveState(env.Storage(), &bootstrap.BootstrapState{
		StateInstances: []instance.Id{newInstId},
	}); err != nil {
		return fmt.Errorf("cannot update environ bootstrap state storage: %v", err)
	}
	// Construct our own state info rather than using juju.NewConn so
	// that we can avoid storage eventual-consistency issues
	// (and it's faster too).
	caCert, ok := cfg.CACert()
	if !ok {
		return fmt.Errorf("configuration has no CA certificate")
	}
	progress("opening state")
	st, err := state.Open(&state.Info{
		Info: mongo.Info{
			Addrs:  []string{fmt.Sprintf("%s:%d", machine0Addr, cfg.StatePort())},
			CACert: caCert,
		},
		Tag:      agentConf.Credentials.Tag,
		Password: agentConf.Credentials.Password,
	}, mongo.DefaultDialOpts(), environs.NewStatePolicy())
	if err != nil {
		return fmt.Errorf("cannot open state: %v", err)
	}
	progress("updating all machines")
	if err := updateAllMachines(st, machine0Addr); err != nil {
		return fmt.Errorf("cannot update machines: %v", err)
	}
	return nil
}
Exemplo n.º 6
0
func (s *JujuConnSuite) setUpConn(c *gc.C) {
	if s.RootDir != "" {
		panic("JujuConnSuite.setUpConn without teardown")
	}
	s.RootDir = c.MkDir()
	s.oldHome = utils.Home()
	home := filepath.Join(s.RootDir, "/home/ubuntu")
	err := os.MkdirAll(home, 0777)
	c.Assert(err, gc.IsNil)
	utils.SetHome(home)
	s.oldJujuHome = osenv.SetJujuHome(filepath.Join(home, ".juju"))
	err = os.Mkdir(osenv.JujuHome(), 0777)
	c.Assert(err, gc.IsNil)

	err = os.MkdirAll(s.DataDir(), 0777)
	c.Assert(err, gc.IsNil)
	s.PatchEnvironment(osenv.JujuEnvEnvKey, "")

	// TODO(rog) remove these files and add them only when
	// the tests specifically need them (in cmd/juju for example)
	s.writeSampleConfig(c, osenv.JujuHomePath("environments.yaml"))

	err = ioutil.WriteFile(osenv.JujuHomePath("dummyenv-cert.pem"), []byte(testing.CACert), 0666)
	c.Assert(err, gc.IsNil)

	err = ioutil.WriteFile(osenv.JujuHomePath("dummyenv-private-key.pem"), []byte(testing.CAKey), 0600)
	c.Assert(err, gc.IsNil)

	store, err := configstore.Default()
	c.Assert(err, gc.IsNil)
	s.ConfigStore = store

	ctx := testing.Context(c)
	environ, err := environs.PrepareFromName("dummyenv", ctx, s.ConfigStore)
	c.Assert(err, gc.IsNil)
	// sanity check we've got the correct environment.
	c.Assert(environ.Name(), gc.Equals, "dummyenv")
	s.PatchValue(&dummy.DataDir, s.DataDir())
	s.LogDir = c.MkDir()
	s.PatchValue(&dummy.LogDir, s.LogDir)

	versions := PreferredDefaultVersions(environ.Config(), version.Binary{Number: version.Current.Number, Series: "precise", Arch: "amd64"})
	versions = append(versions, version.Current)

	// Upload tools for both preferred and fake default series
	envtesting.MustUploadFakeToolsVersions(environ.Storage(), versions...)
	c.Assert(bootstrap.Bootstrap(ctx, environ, environs.BootstrapParams{}), gc.IsNil)

	s.BackingState = environ.(GetStater).GetStateInAPIServer()

	conn, err := juju.NewConn(environ)
	c.Assert(err, gc.IsNil)
	s.Conn = conn
	s.State = conn.State

	apiConn, err := juju.NewAPIConn(environ, api.DialOpts{})
	c.Assert(err, gc.IsNil)
	s.APIConn = apiConn
	s.APIState = apiConn.State
	s.environ = environ
}
Exemplo n.º 7
0
func (t *LiveTests) TestBootstrapAndDeploy(c *gc.C) {
	if !t.CanOpenState || !t.HasProvisioner {
		c.Skip(fmt.Sprintf("skipping provisioner test, CanOpenState: %v, HasProvisioner: %v", t.CanOpenState, t.HasProvisioner))
	}
	t.BootstrapOnce(c)

	// TODO(niemeyer): Stop growing this kitchen sink test and split it into proper parts.

	c.Logf("opening connection")
	conn, err := juju.NewConn(t.Env)
	c.Assert(err, gc.IsNil)
	defer conn.Close()

	c.Logf("opening API connection")
	apiConn, err := juju.NewAPIConn(t.Env, api.DefaultDialOpts())
	c.Assert(err, gc.IsNil)
	defer conn.Close()

	// Check that the agent version has made it through the
	// bootstrap process (it's optional in the config.Config)
	cfg, err := conn.State.EnvironConfig()
	c.Assert(err, gc.IsNil)
	agentVersion, ok := cfg.AgentVersion()
	c.Check(ok, gc.Equals, true)
	c.Check(agentVersion, gc.Equals, version.Current.Number)

	// Check that the constraints have been set in the environment.
	cons, err := conn.State.EnvironConstraints()
	c.Assert(err, gc.IsNil)
	c.Assert(cons.String(), gc.Equals, "mem=2048M")

	// Wait for machine agent to come up on the bootstrap
	// machine and find the deployed series from that.
	m0, err := conn.State.Machine("0")
	c.Assert(err, gc.IsNil)

	instId0, err := m0.InstanceId()
	c.Assert(err, gc.IsNil)

	// Check that the API connection is working.
	status, err := apiConn.State.Client().Status(nil)
	c.Assert(err, gc.IsNil)
	c.Assert(status.Machines["0"].InstanceId, gc.Equals, string(instId0))

	mw0 := newMachineToolWaiter(m0)
	defer mw0.Stop()

	// If the series has not been specified, we expect the most recent Ubuntu LTS release to be used.
	expectedVersion := version.Current
	expectedVersion.Series = config.LatestLtsSeries()

	mtools0 := waitAgentTools(c, mw0, expectedVersion)

	// Create a new service and deploy a unit of it.
	c.Logf("deploying service")
	repoDir := c.MkDir()
	url := charmtesting.Charms.ClonedURL(repoDir, mtools0.Version.Series, "dummy")
	sch, err := conn.PutCharm(url, &charm.LocalRepository{Path: repoDir}, false)
	c.Assert(err, gc.IsNil)
	svc, err := conn.State.AddService("dummy", "user-admin", sch, nil)
	c.Assert(err, gc.IsNil)
	units, err := juju.AddUnits(conn.State, svc, 1, "")
	c.Assert(err, gc.IsNil)
	unit := units[0]

	// Wait for the unit's machine and associated agent to come up
	// and announce itself.
	mid1, err := unit.AssignedMachineId()
	c.Assert(err, gc.IsNil)
	m1, err := conn.State.Machine(mid1)
	c.Assert(err, gc.IsNil)
	mw1 := newMachineToolWaiter(m1)
	defer mw1.Stop()
	waitAgentTools(c, mw1, mtools0.Version)

	err = m1.Refresh()
	c.Assert(err, gc.IsNil)
	instId1, err := m1.InstanceId()
	c.Assert(err, gc.IsNil)
	uw := newUnitToolWaiter(unit)
	defer uw.Stop()
	utools := waitAgentTools(c, uw, expectedVersion)

	// Check that we can upgrade the environment.
	newVersion := utools.Version
	newVersion.Patch++
	t.checkUpgrade(c, conn, newVersion, mw0, mw1, uw)

	// BUG(niemeyer): Logic below is very much wrong. Must be:
	//
	// 1. EnsureDying on the unit and EnsureDying on the machine
	// 2. Unit dies by itself
	// 3. Machine removes dead unit
	// 4. Machine dies by itself
	// 5. Provisioner removes dead machine
	//

	// Now remove the unit and its assigned machine and
	// check that the PA removes it.
	c.Logf("removing unit")
	err = unit.Destroy()
	c.Assert(err, gc.IsNil)

	// Wait until unit is dead
	uwatch := unit.Watch()
	defer uwatch.Stop()
	for unit.Life() != state.Dead {
		c.Logf("waiting for unit change")
		<-uwatch.Changes()
		err := unit.Refresh()
		c.Logf("refreshed; err %v", err)
		if errors.IsNotFound(err) {
			c.Logf("unit has been removed")
			break
		}
		c.Assert(err, gc.IsNil)
	}
	for {
		c.Logf("destroying machine")
		err := m1.Destroy()
		if err == nil {
			break
		}
		c.Assert(err, gc.FitsTypeOf, &state.HasAssignedUnitsError{})
		time.Sleep(5 * time.Second)
		err = m1.Refresh()
		if errors.IsNotFound(err) {
			break
		}
		c.Assert(err, gc.IsNil)
	}
	c.Logf("waiting for instance to be removed")
	t.assertStopInstance(c, conn.Environ, instId1)
}