func (c *restoreCommand) Run(ctx *cmd.Context) error { if c.showDescription { fmt.Fprintf(ctx.Stdout, "%s\n", c.Info().Purpose) return nil } if err := c.Log.Start(ctx); err != nil { return err } agentConf, err := extractConfig(c.backupFile) if err != nil { return errors.Annotate(err, "cannot extract configuration from backup file") } progress("extracted credentials from backup file") store, err := configstore.Default() if err != nil { return err } cfg, err := c.Config(store) if err != nil { return err } env, err := rebootstrap(cfg, ctx, c.Constraints) if err != nil { return errors.Annotate(err, "cannot re-bootstrap environment") } progress("connecting to newly bootstrapped instance") var apiState *api.State // The state server backend may not be ready to accept logins so we retry. // We'll do up to 8 retries over 2 minutes to give the server time to come up. // Typically we expect only 1 retry will be needed. attempt := utils.AttemptStrategy{Delay: 15 * time.Second, Min: 8} for a := attempt.Start(); a.Next(); { apiState, err = juju.NewAPIState(env, api.DefaultDialOpts()) if err == nil || errors.Cause(err).Error() != "EOF" { break } progress("bootstrapped instance not ready - attempting to redial") } if err != nil { return errors.Annotate(err, "cannot connect to bootstrap instance") } progress("restoring bootstrap machine") machine0Addr, err := restoreBootstrapMachine(apiState, c.backupFile, agentConf) if err != nil { return errors.Annotate(err, "cannot restore bootstrap machine") } progress("restored bootstrap machine") apiState, err = juju.NewAPIState(env, api.DefaultDialOpts()) progress("opening state") if err != nil { return errors.Annotate(err, "cannot connect to api server") } progress("updating all machines") if err := updateAllMachines(apiState, machine0Addr); err != nil { return errors.Annotate(err, "cannot update machines") } return nil }
func (cs *NewAPIStateSuite) TestNewAPIState(c *gc.C) { cfg, err := config.New(config.NoDefaults, dummy.SampleConfig()) c.Assert(err, jc.ErrorIsNil) ctx := envtesting.BootstrapContext(c) env, err := environs.Prepare(cfg, ctx, configstore.NewMem()) c.Assert(err, jc.ErrorIsNil) storageDir := c.MkDir() cs.PatchValue(&envtools.DefaultBaseURL, storageDir) stor, err := filestorage.NewFileStorageWriter(storageDir) c.Assert(err, jc.ErrorIsNil) envtesting.UploadFakeTools(c, stor, "released", "released") err = bootstrap.Bootstrap(ctx, env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) cfg = env.Config() cfg, err = cfg.Apply(map[string]interface{}{ "secret": "fnord", }) c.Assert(err, jc.ErrorIsNil) err = env.SetConfig(cfg) c.Assert(err, jc.ErrorIsNil) st, err := juju.NewAPIState(dummy.AdminUserTag(), env, api.DialOpts{}) c.Assert(st, gc.NotNil) // the secrets will not be updated, as they already exist attrs, err := st.Client().EnvironmentGet() c.Assert(attrs["secret"], gc.Equals, "pork") c.Assert(st.Close(), gc.IsNil) }
func (s *cmdControllerSuite) TestSystemKillCallsEnvironDestroyOnHostedEnviron(c *gc.C) { st := s.Factory.MakeEnvironment(c, &factory.EnvParams{ Name: "foo", }) defer st.Close() st.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyEnvironment") st.Close() opc := make(chan dummy.Operation, 200) dummy.Listen(opc) conn, err := juju.NewAPIState(s.AdminUserTag(c), s.Environ, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) s.AddCleanup(func(*gc.C) { conn.Close() }) client := undertakerapi.NewClient(conn) startTime := time.Date(2015, time.September, 1, 17, 2, 1, 0, time.UTC) mClock := testing.NewClock(startTime) undertaker.NewUndertaker(client, mClock) store, err := configstore.Default() _, err = store.ReadInfo("dummyenv") c.Assert(err, jc.ErrorIsNil) s.run(c, "kill-controller", "dummyenv", "-y") // Ensure that Destroy was called on the hosted environment ... opRecvTimeout(c, st, opc, dummy.OpDestroy{}) // ... and that the configstore was removed. _, err = store.ReadInfo("dummyenv") c.Assert(err, jc.Satisfies, errors.IsNotFound) }
func (*NewAPIStateSuite) TestNewAPIState(c *gc.C) { cfg, err := config.New(config.NoDefaults, dummy.SampleConfig()) c.Assert(err, gc.IsNil) ctx := coretesting.Context(c) env, err := environs.Prepare(cfg, ctx, configstore.NewMem()) c.Assert(err, gc.IsNil) envtesting.UploadFakeTools(c, env.Storage()) err = bootstrap.Bootstrap(ctx, env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) cfg = env.Config() cfg, err = cfg.Apply(map[string]interface{}{ "secret": "fnord", }) c.Assert(err, gc.IsNil) err = env.SetConfig(cfg) c.Assert(err, gc.IsNil) st, err := juju.NewAPIState(env, api.DialOpts{}) c.Assert(st, gc.NotNil) // the secrets will not be updated, as they already exist attrs, err := st.Client().EnvironmentGet() c.Assert(attrs["secret"], gc.Equals, "pork") c.Assert(st.Close(), gc.IsNil) }
func (s *JujuConnSuite) setUpConn(c *gc.C) { if s.RootDir != "" { panic("JujuConnSuite.setUpConn without teardown") } s.RootDir = c.MkDir() s.oldHome = utils.Home() home := filepath.Join(s.RootDir, "/home/ubuntu") err := os.MkdirAll(home, 0777) c.Assert(err, gc.IsNil) utils.SetHome(home) s.oldJujuHome = osenv.SetJujuHome(filepath.Join(home, ".juju")) err = os.Mkdir(osenv.JujuHome(), 0777) c.Assert(err, gc.IsNil) err = os.MkdirAll(s.DataDir(), 0777) c.Assert(err, gc.IsNil) s.PatchEnvironment(osenv.JujuEnvEnvKey, "") // TODO(rog) remove these files and add them only when // the tests specifically need them (in cmd/juju for example) s.writeSampleConfig(c, osenv.JujuHomePath("environments.yaml")) err = ioutil.WriteFile(osenv.JujuHomePath("dummyenv-cert.pem"), []byte(testing.CACert), 0666) c.Assert(err, gc.IsNil) err = ioutil.WriteFile(osenv.JujuHomePath("dummyenv-private-key.pem"), []byte(testing.CAKey), 0600) c.Assert(err, gc.IsNil) store, err := configstore.Default() c.Assert(err, gc.IsNil) s.ConfigStore = store ctx := testing.Context(c) environ, err := environs.PrepareFromName("dummyenv", ctx, s.ConfigStore) c.Assert(err, gc.IsNil) // sanity check we've got the correct environment. c.Assert(environ.Config().Name(), gc.Equals, "dummyenv") s.PatchValue(&dummy.DataDir, s.DataDir()) s.LogDir = c.MkDir() s.PatchValue(&dummy.LogDir, s.LogDir) versions := PreferredDefaultVersions(environ.Config(), version.Binary{Number: version.Current.Number, Series: "precise", Arch: "amd64"}) versions = append(versions, version.Current) // Upload tools for both preferred and fake default series envtesting.MustUploadFakeToolsVersions(environ.Storage(), versions...) err = bootstrap.Bootstrap(ctx, environ, bootstrap.BootstrapParams{}) c.Assert(err, gc.IsNil) s.BackingState = environ.(GetStater).GetStateInAPIServer() s.State, err = newState(environ, s.BackingState.MongoConnectionInfo()) c.Assert(err, gc.IsNil) s.APIState, err = juju.NewAPIState(environ, api.DialOpts{}) c.Assert(err, gc.IsNil) s.Environ = environ }
func (s *cmdControllerSuite) createEnv(c *gc.C, envname string, isServer bool) { conn, err := juju.NewAPIState(s.AdminUserTag(c), s.Environ, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) s.AddCleanup(func(*gc.C) { conn.Close() }) envManager := environmentmanager.NewClient(conn) _, err = envManager.CreateEnvironment(s.AdminUserTag(c).Id(), nil, map[string]interface{}{ "name": envname, "authorized-keys": "ssh-key", "state-server": isServer, }) c.Assert(err, jc.ErrorIsNil) }
func (t *LiveTests) TestCheckEnvironmentOnConnect(c *gc.C) { // When new connection is established to a bootstraped environment, // it is checked that we are running against a juju-core environment. if !t.CanOpenState { c.Skip("CanOpenState is false; cannot open state connection") } t.BootstrapOnce(c) apiState, err := juju.NewAPIState(t.Env, api.DefaultDialOpts()) c.Assert(err, gc.IsNil) apiState.Close() }
func (s *blockSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) conn, err := juju.NewAPIState(s.AdminUserTag(c), s.Environ, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) s.AddCleanup(func(*gc.C) { conn.Close() }) s.ctx = &mockContext{ agentConfig: &mockAgentConfig{dataDir: s.DataDir()}, apiState: conn, state: s.State, } s.blockClient = block.NewClient(conn) }
func (s *modelmanagerSuite) OpenAPI(c *gc.C) *modelmanager.Client { conn, err := juju.NewAPIState(s.AdminUserTag(c), s.Environ, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) s.AddCleanup(func(*gc.C) { conn.Close() }) return modelmanager.NewClient(conn) }
func (c *restoreCommand) Run(ctx *cmd.Context) error { if c.showDescription { fmt.Fprintf(ctx.Stdout, "%s\n", c.Info().Purpose) return nil } if err := c.Log.Start(ctx); err != nil { return err } agentConf, err := extractConfig(c.backupFile) if err != nil { return errors.Annotate(err, "cannot extract configuration from backup file") } progress("extracted credentials from backup file") store, err := configstore.Default() if err != nil { return err } cfg, err := c.Config(store, nil) if err != nil { return err } env, err := rebootstrap(cfg, ctx, c.Constraints) if err != nil { return errors.Annotate(err, "cannot re-bootstrap environment") } progress("connecting to newly bootstrapped instance") var apiState api.Connection // The state server backend may not be ready to accept logins so we retry. // We'll do up to 8 retries over 2 minutes to give the server time to come up. // Typically we expect only 1 retry will be needed. attempt := utils.AttemptStrategy{Delay: 15 * time.Second, Min: 8} // While specifying the admin user will work for now, as soon as we allow // the users to have a different initial user name, or they have changed // the password for the admin user, this will fail. owner := names.NewUserTag("admin") for a := attempt.Start(); a.Next(); { apiState, err = juju.NewAPIState(owner, env, api.DefaultDialOpts()) if err == nil || errors.Cause(err).Error() != "EOF" { break } progress("bootstrapped instance not ready - attempting to redial") } if err != nil { return errors.Annotate(err, "cannot connect to bootstrap instance") } progress("restoring bootstrap machine") machine0Addr, err := restoreBootstrapMachine(apiState, c.backupFile, agentConf) if err != nil { return errors.Annotate(err, "cannot restore bootstrap machine") } progress("restored bootstrap machine") apiState, err = juju.NewAPIState(owner, env, api.DefaultDialOpts()) progress("opening state") if err != nil { return errors.Annotate(err, "cannot connect to api server") } progress("updating all machines") results, err := updateAllMachines(apiState, machine0Addr) if err != nil { return errors.Annotate(err, "cannot update machines") } var message string for _, result := range results { if result.err != nil { message = fmt.Sprintf("Update of machine %q failed: %v", result.machineName, result.err) } else { message = fmt.Sprintf("Succesful update of machine %q", result.machineName) } progress(message) } return nil }
func (s *JujuConnSuite) setUpConn(c *gc.C) { if s.RootDir != "" { panic("JujuConnSuite.setUpConn without teardown") } s.RootDir = c.MkDir() s.oldHome = utils.Home() home := filepath.Join(s.RootDir, "/home/ubuntu") err := os.MkdirAll(home, 0777) c.Assert(err, jc.ErrorIsNil) utils.SetHome(home) s.oldJujuHome = osenv.SetJujuHome(filepath.Join(home, ".juju")) err = os.Mkdir(osenv.JujuHome(), 0777) c.Assert(err, jc.ErrorIsNil) err = os.MkdirAll(s.DataDir(), 0777) c.Assert(err, jc.ErrorIsNil) s.PatchEnvironment(osenv.JujuEnvEnvKey, "") // TODO(rog) remove these files and add them only when // the tests specifically need them (in cmd/juju for example) s.writeSampleConfig(c, osenv.JujuHomePath("environments.yaml")) err = ioutil.WriteFile(osenv.JujuHomePath("dummyenv-cert.pem"), []byte(testing.CACert), 0666) c.Assert(err, jc.ErrorIsNil) err = ioutil.WriteFile(osenv.JujuHomePath("dummyenv-private-key.pem"), []byte(testing.CAKey), 0600) c.Assert(err, jc.ErrorIsNil) store, err := configstore.Default() c.Assert(err, jc.ErrorIsNil) s.ConfigStore = store ctx := testing.Context(c) environ, err := environs.PrepareFromName("dummyenv", envcmd.BootstrapContext(ctx), s.ConfigStore) c.Assert(err, jc.ErrorIsNil) // sanity check we've got the correct environment. c.Assert(environ.Config().Name(), gc.Equals, "dummyenv") s.PatchValue(&dummy.DataDir, s.DataDir()) s.LogDir = c.MkDir() s.PatchValue(&dummy.LogDir, s.LogDir) versions := PreferredDefaultVersions(environ.Config(), version.Binary{Number: version.Current.Number, Series: "precise", Arch: "amd64"}) versions = append(versions, version.Current) // Upload tools for both preferred and fake default series s.DefaultToolsStorageDir = c.MkDir() s.PatchValue(&tools.DefaultBaseURL, s.DefaultToolsStorageDir) stor, err := filestorage.NewFileStorageWriter(s.DefaultToolsStorageDir) c.Assert(err, jc.ErrorIsNil) // Upload tools to both release and devel streams since config will dictate that we // end up looking in both places. envtesting.AssertUploadFakeToolsVersions(c, stor, "released", "released", versions...) envtesting.AssertUploadFakeToolsVersions(c, stor, "devel", "devel", versions...) s.DefaultToolsStorage = stor err = bootstrap.Bootstrap(envcmd.BootstrapContext(ctx), environ, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) s.BackingState = environ.(GetStater).GetStateInAPIServer() s.State, err = newState(environ, s.BackingState.MongoConnectionInfo()) c.Assert(err, jc.ErrorIsNil) s.APIState, err = juju.NewAPIState(s.AdminUserTag(c), environ, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) err = s.State.SetAPIHostPorts(s.APIState.APIHostPorts()) c.Assert(err, jc.ErrorIsNil) // Make sure the config store has the api endpoint address set info, err := s.ConfigStore.ReadInfo("dummyenv") c.Assert(err, jc.ErrorIsNil) endpoint := info.APIEndpoint() endpoint.Addresses = []string{s.APIState.APIHostPorts()[0][0].String()} info.SetAPIEndpoint(endpoint) err = info.Write() c.Assert(err, jc.ErrorIsNil) // Make sure the jenv file has the local host ports. c.Logf("jenv host ports: %#v", s.APIState.APIHostPorts()) s.Environ = environ // Insert expected values... servingInfo := state.StateServingInfo{ PrivateKey: testing.ServerKey, Cert: testing.ServerCert, CAPrivateKey: testing.CAKey, SharedSecret: "really, really secret", APIPort: 4321, StatePort: 1234, } s.State.SetStateServingInfo(servingInfo) }
func (t *LiveTests) TestBootstrapAndDeploy(c *gc.C) { if !t.CanOpenState || !t.HasProvisioner { c.Skip(fmt.Sprintf("skipping provisioner test, CanOpenState: %v, HasProvisioner: %v", t.CanOpenState, t.HasProvisioner)) } t.BootstrapOnce(c) // TODO(niemeyer): Stop growing this kitchen sink test and split it into proper parts. c.Logf("opening state") st := t.Env.(testing.GetStater).GetStateInAPIServer() c.Logf("opening API connection") apiState, err := juju.NewAPIState(t.Env, api.DefaultDialOpts()) c.Assert(err, gc.IsNil) defer apiState.Close() // Check that the agent version has made it through the // bootstrap process (it's optional in the config.Config) cfg, err := st.EnvironConfig() c.Assert(err, gc.IsNil) agentVersion, ok := cfg.AgentVersion() c.Check(ok, gc.Equals, true) c.Check(agentVersion, gc.Equals, version.Current.Number) // Check that the constraints have been set in the environment. cons, err := st.EnvironConstraints() c.Assert(err, gc.IsNil) c.Assert(cons.String(), gc.Equals, "mem=2048M") // Wait for machine agent to come up on the bootstrap // machine and find the deployed series from that. m0, err := st.Machine("0") c.Assert(err, gc.IsNil) instId0, err := m0.InstanceId() c.Assert(err, gc.IsNil) // Check that the API connection is working. status, err := apiState.Client().Status(nil) c.Assert(err, gc.IsNil) c.Assert(status.Machines["0"].InstanceId, gc.Equals, string(instId0)) mw0 := newMachineToolWaiter(m0) defer mw0.Stop() // If the series has not been specified, we expect the most recent Ubuntu LTS release to be used. expectedVersion := version.Current expectedVersion.Series = config.LatestLtsSeries() mtools0 := waitAgentTools(c, mw0, expectedVersion) // Create a new service and deploy a unit of it. c.Logf("deploying service") repoDir := c.MkDir() url := charmtesting.Charms.ClonedURL(repoDir, mtools0.Version.Series, "dummy") sch, err := testing.PutCharm(st, url, &charm.LocalRepository{Path: repoDir}, false) c.Assert(err, gc.IsNil) svc, err := st.AddService("dummy", "user-admin", sch, nil) c.Assert(err, gc.IsNil) units, err := juju.AddUnits(st, svc, 1, "") c.Assert(err, gc.IsNil) unit := units[0] // Wait for the unit's machine and associated agent to come up // and announce itself. mid1, err := unit.AssignedMachineId() c.Assert(err, gc.IsNil) m1, err := st.Machine(mid1) c.Assert(err, gc.IsNil) mw1 := newMachineToolWaiter(m1) defer mw1.Stop() waitAgentTools(c, mw1, mtools0.Version) err = m1.Refresh() c.Assert(err, gc.IsNil) instId1, err := m1.InstanceId() c.Assert(err, gc.IsNil) uw := newUnitToolWaiter(unit) defer uw.Stop() utools := waitAgentTools(c, uw, expectedVersion) // Check that we can upgrade the environment. newVersion := utools.Version newVersion.Patch++ t.checkUpgrade(c, st, newVersion, mw0, mw1, uw) // BUG(niemeyer): Logic below is very much wrong. Must be: // // 1. EnsureDying on the unit and EnsureDying on the machine // 2. Unit dies by itself // 3. Machine removes dead unit // 4. Machine dies by itself // 5. Provisioner removes dead machine // // Now remove the unit and its assigned machine and // check that the PA removes it. c.Logf("removing unit") err = unit.Destroy() c.Assert(err, gc.IsNil) // Wait until unit is dead uwatch := unit.Watch() defer uwatch.Stop() for unit.Life() != state.Dead { c.Logf("waiting for unit change") <-uwatch.Changes() err := unit.Refresh() c.Logf("refreshed; err %v", err) if errors.IsNotFound(err) { c.Logf("unit has been removed") break } c.Assert(err, gc.IsNil) } for { c.Logf("destroying machine") err := m1.Destroy() if err == nil { break } c.Assert(err, gc.FitsTypeOf, &state.HasAssignedUnitsError{}) time.Sleep(5 * time.Second) err = m1.Refresh() if errors.IsNotFound(err) { break } c.Assert(err, gc.IsNil) } c.Logf("waiting for instance to be removed") t.assertStopInstance(c, t.Env, instId1) }
func (c *restoreCommand) Run(ctx *cmd.Context) error { if c.showDescription { fmt.Fprintf(ctx.Stdout, "%s\n", c.Info().Purpose) return nil } if err := c.Log.Start(ctx); err != nil { return err } agentConf, err := extractConfig(c.backupFile) if err != nil { return fmt.Errorf("cannot extract configuration from backup file: %v", err) } progress("extracted credentials from backup file") store, err := configstore.Default() if err != nil { return err } cfg, err := c.Config(store) if err != nil { return err } env, err := rebootstrap(cfg, ctx, c.Constraints) if err != nil { return fmt.Errorf("cannot re-bootstrap environment: %v", err) } progress("connecting to newly bootstrapped instance") var apiState *api.State // The state server backend may not be ready to accept logins so we retry. // We'll do up to 8 retries over 2 minutes to give the server time to come up. // Typically we expect only 1 retry will be needed. attempt := utils.AttemptStrategy{Delay: 15 * time.Second, Min: 8} for a := attempt.Start(); a.Next(); { apiState, err = juju.NewAPIState(env, api.DefaultDialOpts()) if err == nil || errors.Cause(err).Error() != "EOF" { break } progress("bootstrapped instance not ready - attempting to redial") } if err != nil { return fmt.Errorf("cannot connect to bootstrap instance: %v", err) } progress("restoring bootstrap machine") machine0Addr, err := restoreBootstrapMachine(apiState, c.backupFile, agentConf) if err != nil { return fmt.Errorf("cannot restore bootstrap machine: %v", err) } progress("restored bootstrap machine") // Construct our own state info rather than using juju.NewConn so // that we can avoid storage eventual-consistency issues // (and it's faster too). caCert, ok := cfg.CACert() if !ok { return fmt.Errorf("configuration has no CA certificate") } // TODO(dfc) agenConf.Credentials should supply a Tag tag, err := names.ParseTag(agentConf.Credentials.Tag) if err != nil { return err } progress("opening state") // We need to retry here to allow mongo to come up on the restored state server. // The connection might succeed due to the mongo dial retries but there may still // be a problem issuing database commands. var st *state.State for a := attempt.Start(); a.Next(); { st, err = state.Open(&authentication.MongoInfo{ Info: mongo.Info{ Addrs: []string{fmt.Sprintf("%s:%d", machine0Addr, cfg.StatePort())}, CACert: caCert, }, Tag: tag, Password: agentConf.Credentials.Password, }, mongo.DefaultDialOpts(), environs.NewStatePolicy()) if err == nil { break } progress("state server not ready - attempting to re-connect") } if err != nil { return fmt.Errorf("cannot open state: %v", err) } progress("updating all machines") if err := updateAllMachines(st, machine0Addr); err != nil { return fmt.Errorf("cannot update machines: %v", err) } return nil }