func newServer(s *state.State, lis *net.TCPListener, cfg ServerConfig) (_ *Server, err error) { tlsCert, err := tls.X509KeyPair(cfg.Cert, cfg.Key) if err != nil { return nil, err } stPool := cfg.StatePool if stPool == nil { stPool = state.NewStatePool(s) } srv := &Server{ clock: cfg.Clock, newObserver: cfg.NewObserver, state: s, statePool: stPool, lis: newChangeCertListener(lis, cfg.CertChanged, tlsCert), tag: cfg.Tag, dataDir: cfg.DataDir, logDir: cfg.LogDir, limiter: utils.NewLimiter(loginRateLimit), validator: cfg.Validator, adminAPIFactories: map[int]adminAPIFactory{ 3: newAdminAPIV3, }, } srv.authCtxt, err = newAuthContext(s) if err != nil { return nil, errors.Trace(err) } go srv.run() return srv, nil }
func newServer(s *state.State, lis *net.TCPListener, cfg ServerConfig) (_ *Server, err error) { tlsCert, err := tls.X509KeyPair(cfg.Cert, cfg.Key) if err != nil { return nil, err } // TODO(rog) check that *srvRoot is a valid type for using // as an RPC server. tlsConfig := &tls.Config{ Certificates: []tls.Certificate{tlsCert}, MinVersion: tls.VersionTLS10, } srv := &Server{ state: s, statePool: state.NewStatePool(s), lis: newChangeCertListener(lis, cfg.CertChanged, tlsConfig), tag: cfg.Tag, dataDir: cfg.DataDir, logDir: cfg.LogDir, limiter: utils.NewLimiter(loginRateLimit), validator: cfg.Validator, adminApiFactories: map[int]adminApiFactory{ 3: newAdminApiV3, }, } srv.authCtxt, err = newAuthContext(s) if err != nil { return nil, errors.Trace(err) } go srv.run() return srv, nil }
func (s *statePoolSuite) TestClose(c *gc.C) { p := state.NewStatePool(s.State) defer p.Close() // Get some State instances. st1, err := p.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) st2, err := p.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) // Now close them. err = p.Close() c.Assert(err, jc.ErrorIsNil) // Confirm that controller State isn't closed. _, err = s.State.Model() c.Assert(err, jc.ErrorIsNil) // Ensure that new ones are returned if further States are // requested. st1_, err := p.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) c.Assert(st1_, gc.Not(gc.Equals), st1) st2_, err := p.Get(s.ModelUUID2) c.Assert(err, jc.ErrorIsNil) c.Assert(st2_, gc.Not(gc.Equals), st2) }
func newServer(s *state.State, lis *net.TCPListener, cfg ServerConfig) (_ *Server, err error) { logger.Infof("listening on %q", lis.Addr()) srv := &Server{ state: s, statePool: state.NewStatePool(s), addr: lis.Addr().(*net.TCPAddr), // cannot fail tag: cfg.Tag, dataDir: cfg.DataDir, logDir: cfg.LogDir, limiter: utils.NewLimiter(loginRateLimit), validator: cfg.Validator, adminApiFactories: map[int]adminApiFactory{ 0: newAdminApiV0, 1: newAdminApiV1, 2: newAdminApiV2, }, } srv.authCtxt = newAuthContext(srv) tlsCert, err := tls.X509KeyPair(cfg.Cert, cfg.Key) if err != nil { return nil, err } // TODO(rog) check that *srvRoot is a valid type for using // as an RPC server. tlsConfig := tls.Config{ Certificates: []tls.Certificate{tlsCert}, } changeCertListener := newChangeCertListener(lis, cfg.CertChanged, tlsConfig) go srv.run(changeCertListener) return srv, nil }
func (s *statePoolSuite) TestSystemState(c *gc.C) { p := state.NewStatePool(s.State) defer p.Close() st0 := p.SystemState() c.Assert(st0, gc.Equals, s.State) }
func (s *statePoolSuite) TestGetWithControllerEnv(c *gc.C) { p := state.NewStatePool(s.State) defer p.Close() // When a State for the controller env is requested, the same // State that was original passed in should be returned. st0, err := p.Get(s.ModelUUID) c.Assert(err, jc.ErrorIsNil) c.Assert(st0, gc.Equals, s.State) }
func (s *statePoolSuite) SetUpTest(c *gc.C) { s.StateSuite.SetUpTest(c) s.ModelUUID = s.State.ModelUUID() s.State1 = s.Factory.MakeModel(c, nil) s.AddCleanup(func(*gc.C) { s.State1.Close() }) s.ModelUUID1 = s.State1.ModelUUID() s.State2 = s.Factory.MakeModel(c, nil) s.AddCleanup(func(*gc.C) { s.State2.Close() }) s.ModelUUID2 = s.State2.ModelUUID() s.Pool = state.NewStatePool(s.State) s.AddCleanup(func(*gc.C) { s.Pool.Close() }) }
func (s *serverSuite) TestClosesStateFromPool(c *gc.C) { pool := state.NewStatePool(s.State) cfg := defaultServerConfig(c) cfg.StatePool = pool _, server := newServerWithConfig(c, s.State, cfg) defer assertStop(c, server) w := s.State.WatchModels() defer workertest.CleanKill(c, w) // Initial change. assertChange(c, w) otherState := s.Factory.MakeModel(c, nil) defer otherState.Close() s.State.StartSync() // This ensures that the model exists for more than one of the // time slices that the watcher uses for coalescing // events. Without it the model appears and disappears quickly // enough that it never generates a change from WatchModels. // Many Bothans died to bring us this information. assertChange(c, w) model, err := otherState.Model() c.Assert(err, jc.ErrorIsNil) // Ensure the model's in the pool but not referenced. st, err := pool.Get(otherState.ModelUUID()) c.Assert(err, jc.ErrorIsNil) err = pool.Release(otherState.ModelUUID()) c.Assert(err, jc.ErrorIsNil) // Make a request for the model API to check it releases // state back into the pool once the connection is closed. addr := fmt.Sprintf("localhost:%d", server.Addr().Port) conn, err := dialWebsocket(c, addr, fmt.Sprintf("/model/%s/api", st.ModelUUID()), 0) c.Assert(err, jc.ErrorIsNil) conn.Close() // When the model goes away the API server should ensure st gets closed. err = model.Destroy() c.Assert(err, jc.ErrorIsNil) s.State.StartSync() assertStateBecomesClosed(c, st) }
func (s *statePoolSuite) TestGet(c *gc.C) { p := state.NewStatePool(s.State) defer p.Close() st1, err := p.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) c.Assert(st1.ModelUUID(), gc.Equals, s.ModelUUID1) st2, err := p.Get(s.ModelUUID2) c.Assert(err, jc.ErrorIsNil) c.Assert(st2.ModelUUID(), gc.Equals, s.ModelUUID2) // Check that the same instances are returned // when a State for the same env is re-requested. st1_, err := p.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) c.Assert(st1_, gc.Equals, st1) st2_, err := p.Get(s.ModelUUID2) c.Assert(err, jc.ErrorIsNil) c.Assert(st2_, gc.Equals, st2) }
func newServer(s *state.State, lis net.Listener, cfg ServerConfig) (_ *Server, err error) { stPool := cfg.StatePool if stPool == nil { stPool = state.NewStatePool(s) } srv := &Server{ clock: cfg.Clock, pingClock: cfg.pingClock(), lis: lis, newObserver: cfg.NewObserver, state: s, statePool: stPool, tag: cfg.Tag, dataDir: cfg.DataDir, logDir: cfg.LogDir, limiter: utils.NewLimiter(loginRateLimit), validator: cfg.Validator, adminAPIFactories: map[int]adminAPIFactory{ 3: newAdminAPIV3, }, certChanged: cfg.CertChanged, allowModelAccess: cfg.AllowModelAccess, } srv.tlsConfig = srv.newTLSConfig(cfg) srv.lis = tls.NewListener(lis, srv.tlsConfig) srv.authCtxt, err = newAuthContext(s) if err != nil { return nil, errors.Trace(err) } if err := srv.updateCertificate(cfg.Cert, cfg.Key); err != nil { return nil, errors.Annotatef(err, "cannot set initial certificate") } go srv.run() return srv, nil }
func (e *environ) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (*environs.BootstrapResult, error) { series := config.PreferredSeries(e.Config()) availableTools, err := args.AvailableTools.Match(coretools.Filter{Series: series}) if err != nil { return nil, err } arch := availableTools.Arches()[0] defer delay() if err := e.checkBroken("Bootstrap"); err != nil { return nil, err } network.SetPreferIPv6(e.Config().PreferIPv6()) password := e.Config().AdminSecret() if password == "" { return nil, fmt.Errorf("admin-secret is required for bootstrap") } if _, ok := e.Config().CACert(); !ok { return nil, fmt.Errorf("no CA certificate in model configuration") } logger.Infof("would pick tools from %s", availableTools) cfg, err := environs.BootstrapConfig(e.Config()) if err != nil { return nil, fmt.Errorf("cannot make bootstrap config: %v", err) } estate, err := e.state() if err != nil { return nil, err } estate.mu.Lock() defer estate.mu.Unlock() if estate.bootstrapped { return nil, fmt.Errorf("model is already bootstrapped") } estate.preferIPv6 = e.Config().PreferIPv6() // Create an instance for the bootstrap node. logger.Infof("creating bootstrap instance") i := &dummyInstance{ id: BootstrapInstanceId, addresses: network.NewAddresses("localhost"), ports: make(map[network.PortRange]bool), machineId: agent.BootstrapMachineId, series: series, firewallMode: e.Config().FirewallMode(), state: estate, controller: true, } estate.insts[i.id] = i if e.ecfg().controller() { // TODO(rog) factor out relevant code from cmd/jujud/bootstrap.go // so that we can call it here. info := stateInfo(estate.preferIPv6) // Since the admin user isn't setup until after here, // the password in the info structure is empty, so the admin // user is constructed with an empty password here. // It is set just below. st, err := state.Initialize( names.NewUserTag("admin@local"), info, cfg, mongotest.DialOpts(), estate.statePolicy) if err != nil { panic(err) } if err := st.SetModelConstraints(args.ModelConstraints); err != nil { panic(err) } if err := st.SetAdminMongoPassword(password); err != nil { panic(err) } if err := st.MongoSession().DB("admin").Login("admin", password); err != nil { panic(err) } env, err := st.Model() if err != nil { panic(err) } owner, err := st.User(env.Owner()) if err != nil { panic(err) } // We log this out for test purposes only. No one in real life can use // a dummy provider for anything other than testing, so logging the password // here is fine. logger.Debugf("setting password for %q to %q", owner.Name(), password) owner.SetPassword(password) estate.apiStatePool = state.NewStatePool(st) estate.apiServer, err = apiserver.NewServer(st, estate.apiListener, apiserver.ServerConfig{ Cert: []byte(testing.ServerCert), Key: []byte(testing.ServerKey), Tag: names.NewMachineTag("0"), DataDir: DataDir, LogDir: LogDir, StatePool: estate.apiStatePool, }) if err != nil { panic(err) } estate.apiState = st } estate.bootstrapped = true estate.ops <- OpBootstrap{Context: ctx, Env: e.name, Args: args} finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig) error { estate.ops <- OpFinalizeBootstrap{Context: ctx, Env: e.name, InstanceConfig: icfg} return nil } bsResult := &environs.BootstrapResult{ Arch: arch, Series: series, Finalize: finalize, } return bsResult, nil }
func (s *utilsSuite) SetUpTest(c *gc.C) { s.StateSuite.SetUpTest(c) s.pool = state.NewStatePool(s.State) s.AddCleanup(func(*gc.C) { s.pool.Close() }) }
func (e *environ) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (*environs.BootstrapResult, error) { series := config.PreferredSeries(e.Config()) availableTools, err := args.AvailableTools.Match(coretools.Filter{Series: series}) if err != nil { return nil, err } arch := availableTools.Arches()[0] defer delay() if err := e.checkBroken("Bootstrap"); err != nil { return nil, err } if _, ok := args.ControllerConfig.CACert(); !ok { return nil, errors.New("no CA certificate in controller configuration") } logger.Infof("would pick tools from %s", availableTools) estate, err := e.state() if err != nil { return nil, err } estate.mu.Lock() defer estate.mu.Unlock() if estate.bootstrapped { return nil, errors.New("model is already bootstrapped") } // Create an instance for the bootstrap node. logger.Infof("creating bootstrap instance") i := &dummyInstance{ id: BootstrapInstanceId, addresses: network.NewAddresses("localhost"), ports: make(map[network.PortRange]bool), machineId: agent.BootstrapMachineId, series: series, firewallMode: e.Config().FirewallMode(), state: estate, controller: true, } estate.insts[i.id] = i estate.bootstrapped = true estate.ops <- OpBootstrap{Context: ctx, Env: e.name, Args: args} finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig, _ environs.BootstrapDialOpts) error { if e.ecfg().controller() { icfg.Bootstrap.BootstrapMachineInstanceId = BootstrapInstanceId if err := instancecfg.FinishInstanceConfig(icfg, e.Config()); err != nil { return err } adminUser := names.NewUserTag("admin@local") var cloudCredentialTag names.CloudCredentialTag if icfg.Bootstrap.ControllerCloudCredentialName != "" { cloudCredentialTag = names.NewCloudCredentialTag(fmt.Sprintf( "%s/%s/%s", icfg.Bootstrap.ControllerCloudName, adminUser.Id(), icfg.Bootstrap.ControllerCloudCredentialName, )) } cloudCredentials := make(map[names.CloudCredentialTag]cloud.Credential) if icfg.Bootstrap.ControllerCloudCredential != nil && icfg.Bootstrap.ControllerCloudCredentialName != "" { cloudCredentials[cloudCredentialTag] = *icfg.Bootstrap.ControllerCloudCredential } info := stateInfo() // Since the admin user isn't setup until after here, // the password in the info structure is empty, so the admin // user is constructed with an empty password here. // It is set just below. st, err := state.Initialize(state.InitializeParams{ Clock: clock.WallClock, ControllerConfig: icfg.Controller.Config, ControllerModelArgs: state.ModelArgs{ Owner: adminUser, Config: icfg.Bootstrap.ControllerModelConfig, Constraints: icfg.Bootstrap.BootstrapMachineConstraints, CloudName: icfg.Bootstrap.ControllerCloudName, CloudRegion: icfg.Bootstrap.ControllerCloudRegion, CloudCredential: cloudCredentialTag, StorageProviderRegistry: e, }, Cloud: icfg.Bootstrap.ControllerCloud, CloudName: icfg.Bootstrap.ControllerCloudName, CloudCredentials: cloudCredentials, MongoInfo: info, MongoDialOpts: mongotest.DialOpts(), NewPolicy: estate.newStatePolicy, }) if err != nil { return err } if err := st.SetModelConstraints(args.ModelConstraints); err != nil { return err } if err := st.SetAdminMongoPassword(icfg.Controller.MongoInfo.Password); err != nil { return err } if err := st.MongoSession().DB("admin").Login("admin", icfg.Controller.MongoInfo.Password); err != nil { return err } env, err := st.Model() if err != nil { return err } owner, err := st.User(env.Owner()) if err != nil { return err } // We log this out for test purposes only. No one in real life can use // a dummy provider for anything other than testing, so logging the password // here is fine. logger.Debugf("setting password for %q to %q", owner.Name(), icfg.Controller.MongoInfo.Password) owner.SetPassword(icfg.Controller.MongoInfo.Password) estate.apiStatePool = state.NewStatePool(st) estate.apiServer, err = apiserver.NewServer(st, estate.apiListener, apiserver.ServerConfig{ Clock: clock.WallClock, Cert: testing.ServerCert, Key: testing.ServerKey, Tag: names.NewMachineTag("0"), DataDir: DataDir, LogDir: LogDir, StatePool: estate.apiStatePool, NewObserver: func() observer.Observer { return &fakeobserver.Instance{} }, // Should never be used but prevent external access just in case. AutocertURL: "https://0.1.2.3/no-autocert-here", }) if err != nil { panic(err) } estate.apiState = st } estate.ops <- OpFinalizeBootstrap{Context: ctx, Env: e.name, InstanceConfig: icfg} return nil } bsResult := &environs.BootstrapResult{ Arch: arch, Series: series, Finalize: finalize, } return bsResult, nil }