func (s *baseLoginSuite) setupServerForEnvironmentWithValidator(c *gc.C, envTag names.EnvironTag, validator apiserver.LoginValidator) (*api.Info, func()) { listener, err := net.Listen("tcp", "127.0.0.1:0") c.Assert(err, jc.ErrorIsNil) srv, err := apiserver.NewServer( s.State, listener, apiserver.ServerConfig{ Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), Validator: validator, Tag: names.NewMachineTag("0"), }, ) c.Assert(err, jc.ErrorIsNil) c.Assert(s.setAdminApi, gc.NotNil) s.setAdminApi(srv) info := &api.Info{ Tag: nil, Password: "", EnvironTag: envTag, Addrs: []string{srv.Addr().String()}, CACert: coretesting.CACert, } return info, func() { err := srv.Stop() c.Assert(err, jc.ErrorIsNil) } }
func (s *serverSuite) TestNonCompatiblePathsAre404(c *gc.C) { // we expose the API at '/' for compatibility, and at '/ENVUUID/api' // for the correct location, but other Paths should fail. listener, err := net.Listen("tcp", ":0") c.Assert(err, gc.IsNil) srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), }) c.Assert(err, gc.IsNil) defer srv.Stop() // We have to use 'localhost' because that is what the TLS cert says. // So find just the Port for the server _, portString, err := net.SplitHostPort(srv.Addr()) c.Assert(err, gc.IsNil) addr := "localhost:" + portString // '/' should be fine conn, err := dialWebsocket(c, addr, "/") c.Assert(err, gc.IsNil) conn.Close() // '/environment/ENVIRONUUID/api' should be fine conn, err = dialWebsocket(c, addr, "/environment/dead-beef-123456/api") c.Assert(err, gc.IsNil) conn.Close() // '/randompath' is not ok conn, err = dialWebsocket(c, addr, "/randompath") // Unfortunately go.net/websocket just returns Bad Status, it doesn't // give us any information (whether this was a 404 Not Found, Internal // Server Error, 200 OK, etc.) c.Assert(err, gc.ErrorMatches, `websocket.Dial wss://localhost:\d+/randompath: bad status`) c.Assert(conn, gc.IsNil) }
func (s *apiserverBaseSuite) newServerNoCleanup(c *gc.C, config apiserver.ServerConfig) *apiserver.Server { listener, err := net.Listen("tcp", ":0") c.Assert(err, jc.ErrorIsNil) srv, err := apiserver.NewServer(s.State, listener, config) c.Assert(err, jc.ErrorIsNil) return srv }
func (s *baseLoginSuite) setupServerForModelWithValidator(c *gc.C, modelTag names.ModelTag, validator apiserver.LoginValidator) (*api.Info, func()) { listener, err := net.Listen("tcp", "127.0.0.1:0") c.Assert(err, jc.ErrorIsNil) srv, err := apiserver.NewServer( s.State, listener, apiserver.ServerConfig{ Clock: clock.WallClock, Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), Validator: validator, Tag: names.NewMachineTag("0"), LogDir: c.MkDir(), NewObserver: func() observer.Observer { return &fakeobserver.Instance{} }, }, ) c.Assert(err, jc.ErrorIsNil) c.Assert(s.setAdminAPI, gc.NotNil) s.setAdminAPI(srv) info := &api.Info{ Tag: nil, Password: "", ModelTag: modelTag, Addrs: []string{srv.Addr().String()}, CACert: coretesting.CACert, } return info, func() { err := srv.Stop() c.Assert(err, jc.ErrorIsNil) } }
func (s *loginSuite) setupServerWithValidator(c *gc.C, validator apiserver.LoginValidator) (*api.Info, func()) { listener, err := net.Listen("tcp", ":0") c.Assert(err, gc.IsNil) srv, err := apiserver.NewServer( s.State, listener, apiserver.ServerConfig{ Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), Validator: validator, }, ) c.Assert(err, gc.IsNil) env, err := s.State.Environment() c.Assert(err, gc.IsNil) info := &api.Info{ Tag: nil, Password: "", EnvironTag: env.Tag(), Addrs: []string{srv.Addr()}, CACert: coretesting.CACert, } return info, func() { err := srv.Stop() c.Assert(err, gc.IsNil) } }
func (s *serverSuite) TestAPIServerCanListenOnBothIPv4AndIPv6(c *gc.C) { err := s.State.SetAPIHostPorts(nil) c.Assert(err, jc.ErrorIsNil) // Start our own instance of the server listening on // both IPv4 and IPv6 localhost addresses and an ephemeral port. listener, err := net.Listen("tcp", ":0") c.Assert(err, jc.ErrorIsNil) srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), Tag: names.NewMachineTag("0"), }) c.Assert(err, jc.ErrorIsNil) defer srv.Stop() port := srv.Addr().Port portString := fmt.Sprintf("%d", port) machine, password := s.Factory.MakeMachineReturningPassword( c, &factory.MachineParams{Nonce: "fake_nonce"}) // Now connect twice - using IPv4 and IPv6 endpoints. apiInfo := &api.Info{ Tag: machine.Tag(), Password: password, Nonce: "fake_nonce", Addrs: []string{net.JoinHostPort("127.0.0.1", portString)}, CACert: coretesting.CACert, EnvironTag: s.State.EnvironTag(), } ipv4State, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer ipv4State.Close() c.Assert(ipv4State.Addr(), gc.Equals, net.JoinHostPort("127.0.0.1", portString)) c.Assert(ipv4State.APIHostPorts(), jc.DeepEquals, [][]network.HostPort{ network.NewHostPorts(port, "127.0.0.1"), }) _, err = ipv4State.Machiner().Machine(machine.MachineTag()) c.Assert(err, jc.ErrorIsNil) apiInfo.Addrs = []string{net.JoinHostPort("::1", portString)} ipv6State, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer ipv6State.Close() c.Assert(ipv6State.Addr(), gc.Equals, net.JoinHostPort("::1", portString)) c.Assert(ipv6State.APIHostPorts(), jc.DeepEquals, [][]network.HostPort{ network.NewHostPorts(port, "::1"), }) _, err = ipv6State.Machiner().Machine(machine.MachineTag()) c.Assert(err, jc.ErrorIsNil) }
// newServer returns a new running API server. func newServer(c *gc.C, st *state.State) *apiserver.Server { listener, err := net.Listen("tcp", ":0") c.Assert(err, jc.ErrorIsNil) srv, err := apiserver.NewServer(st, listener, apiserver.ServerConfig{ Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), Tag: names.NewMachineTag("0"), }) c.Assert(err, jc.ErrorIsNil) return srv }
// newServerWithConfig is like newServer except that the entire // server configuration may be specified (see defaultServerConfig // for a suitable starting point). func newServerWithConfig(c *gc.C, st *state.State, cfg apiserver.ServerConfig) (*api.Info, *apiserver.Server) { // Note that we can't listen on localhost here because TestAPIServerCanListenOnBothIPv4AndIPv6 assumes // that we listen on IPv6 too, and listening on localhost does not do that. listener, err := net.Listen("tcp", ":0") c.Assert(err, jc.ErrorIsNil) srv, err := apiserver.NewServer(st, listener, cfg) c.Assert(err, jc.ErrorIsNil) return &api.Info{ Addrs: []string{fmt.Sprintf("localhost:%d", srv.Addr().Port)}, CACert: coretesting.CACert, }, srv }
func (s *serverSuite) TestStop(c *gc.C) { // Start our own instance of the server so we have // a handle on it to stop it. listener, err := net.Listen("tcp", ":0") c.Assert(err, jc.ErrorIsNil) srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), Tag: names.NewMachineTag("0"), }) c.Assert(err, jc.ErrorIsNil) defer srv.Stop() machine, password := s.Factory.MakeMachineReturningPassword( c, &factory.MachineParams{Nonce: "fake_nonce"}) // A net.TCPAddr cannot be directly stringified into a valid hostname. address := fmt.Sprintf("localhost:%d", srv.Addr().Port) // Note we can't use openAs because we're not connecting to apiInfo := &api.Info{ Tag: machine.Tag(), Password: password, Nonce: "fake_nonce", Addrs: []string{address}, CACert: coretesting.CACert, EnvironTag: s.State.EnvironTag(), } st, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer st.Close() _, err = st.Machiner().Machine(machine.MachineTag()) c.Assert(err, jc.ErrorIsNil) err = srv.Stop() c.Assert(err, jc.ErrorIsNil) _, err = st.Machiner().Machine(machine.MachineTag()) err = errors.Cause(err) // The client has not necessarily seen the server shutdown yet, // so there are two possible errors. if err != rpc.ErrShutdown && err != io.ErrUnexpectedEOF { c.Fatalf("unexpected error from request: %#v, expected rpc.ErrShutdown or io.ErrUnexpectedEOF", err) } // Check it can be stopped twice. err = srv.Stop() c.Assert(err, jc.ErrorIsNil) }
// newServer returns a new running API server. func newServer(c *gc.C, st *state.State) *apiserver.Server { listener, err := net.Listen("tcp", ":0") c.Assert(err, jc.ErrorIsNil) srv, err := apiserver.NewServer(st, listener, apiserver.ServerConfig{ Clock: clock.WallClock, Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), Tag: names.NewMachineTag("0"), LogDir: c.MkDir(), NewObserver: func() observer.Observer { return &fakeobserver.Instance{} }, }) c.Assert(err, jc.ErrorIsNil) return srv }
func (s *serverSuite) TestStop(c *gc.C) { // Start our own instance of the server so we have // a handle on it to stop it. listener, err := net.Listen("tcp", ":0") c.Assert(err, gc.IsNil) srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), }) c.Assert(err, gc.IsNil) defer srv.Stop() stm, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, gc.IsNil) err = stm.SetProvisioned("foo", "fake_nonce", nil) c.Assert(err, gc.IsNil) password, err := utils.RandomPassword() c.Assert(err, gc.IsNil) err = stm.SetPassword(password) c.Assert(err, gc.IsNil) // Note we can't use openAs because we're not connecting to apiInfo := &api.Info{ Tag: stm.Tag(), Password: password, Nonce: "fake_nonce", Addrs: []string{srv.Addr()}, CACert: coretesting.CACert, } st, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, gc.IsNil) defer st.Close() _, err = st.Machiner().Machine(stm.Tag().(names.MachineTag)) c.Assert(err, gc.IsNil) err = srv.Stop() c.Assert(err, gc.IsNil) _, err = st.Machiner().Machine(stm.Tag().(names.MachineTag)) // The client has not necessarily seen the server shutdown yet, // so there are two possible errors. if err != rpc.ErrShutdown && err != io.ErrUnexpectedEOF { c.Fatalf("unexpected error from request: %v", err) } // Check it can be stopped twice. err = srv.Stop() c.Assert(err, gc.IsNil) }
func (s *serverSuite) TestMinTLSVersion(c *gc.C) { loggo.GetLogger("juju.apiserver").SetLogLevel(loggo.TRACE) listener, err := net.Listen("tcp", ":0") c.Assert(err, jc.ErrorIsNil) srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), Tag: names.NewMachineTag("0"), }) c.Assert(err, jc.ErrorIsNil) defer srv.Stop() // We have to use 'localhost' because that is what the TLS cert says. addr := fmt.Sprintf("localhost:%d", srv.Addr().Port) // Specify an unsupported TLS version conn, err := dialWebsocket(c, addr, "/", tls.VersionSSL30) c.Assert(err, gc.ErrorMatches, ".*protocol version not supported") c.Assert(conn, gc.IsNil) }
func (a *MachineAgent) newApiserverWorker(st *state.State, certChanged chan params.StateServingInfo) (worker.Worker, error) { agentConfig := a.CurrentConfig() // If the configuration does not have the required information, // it is currently not a recoverable error, so we kill the whole // agent, potentially enabling human intervention to fix // the agent's configuration file. info, ok := agentConfig.StateServingInfo() if !ok { return nil, &cmdutil.FatalError{"StateServingInfo not available and we need it"} } cert := []byte(info.Cert) key := []byte(info.PrivateKey) if len(cert) == 0 || len(key) == 0 { return nil, &cmdutil.FatalError{"configuration does not have controller cert/key"} } tag := agentConfig.Tag() dataDir := agentConfig.DataDir() logDir := agentConfig.LogDir() endpoint := net.JoinHostPort("", strconv.Itoa(info.APIPort)) listener, err := net.Listen("tcp", endpoint) if err != nil { return nil, err } w, err := apiserver.NewServer(st, listener, apiserver.ServerConfig{ Cert: cert, Key: key, Tag: tag, DataDir: dataDir, LogDir: logDir, Validator: a.limitLogins, CertChanged: certChanged, }) if err != nil { return nil, errors.Annotate(err, "cannot start api server worker") } return w, nil }
func (a *MachineAgent) newApiserverWorker(st *state.State, certChanged chan params.StateServingInfo) (worker.Worker, error) { agentConfig := a.CurrentConfig() // If the configuration does not have the required information, // it is currently not a recoverable error, so we kill the whole // agent, potentially enabling human intervention to fix // the agent's configuration file. info, ok := agentConfig.StateServingInfo() if !ok { return nil, &cmdutil.FatalError{"StateServingInfo not available and we need it"} } cert := []byte(info.Cert) key := []byte(info.PrivateKey) if len(cert) == 0 || len(key) == 0 { return nil, &cmdutil.FatalError{"configuration does not have controller cert/key"} } tag := agentConfig.Tag() dataDir := agentConfig.DataDir() logDir := agentConfig.LogDir() endpoint := net.JoinHostPort("", strconv.Itoa(info.APIPort)) listener, err := net.Listen("tcp", endpoint) if err != nil { return nil, err } // TODO(katco): We should be doing something more serious than // logging audit errors. Failures in the auditing systems should // stop the api server until the problem can be corrected. auditErrorHandler := func(err error) { logger.Criticalf("%v", err) } controllerConfig, err := st.ControllerConfig() if err != nil { return nil, errors.Annotate(err, "cannot fetch the controller config") } server, err := apiserver.NewServer(st, listener, apiserver.ServerConfig{ Clock: clock.WallClock, Cert: cert, Key: key, Tag: tag, DataDir: dataDir, LogDir: logDir, Validator: a.limitLogins, CertChanged: certChanged, NewObserver: newObserverFn( controllerConfig, clock.WallClock, jujuversion.Current, agentConfig.Model().Id(), newAuditEntrySink(st, logDir), auditErrorHandler, ), }) if err != nil { return nil, errors.Annotate(err, "cannot start api server worker") } return server, nil }
func (s *legacySuite) TestAPIServerCanShutdownWithOutstandingNext(c *gc.C) { lis, err := net.Listen("tcp", "localhost:0") c.Assert(err, jc.ErrorIsNil) srv, err := apiserver.NewServer(s.State, lis, apiserver.ServerConfig{ Clock: clock.WallClock, Cert: []byte(testing.ServerCert), Key: []byte(testing.ServerKey), Tag: names.NewMachineTag("0"), DataDir: c.MkDir(), LogDir: c.MkDir(), NewObserver: func() observer.Observer { return &fakeobserver.Instance{} }, }) c.Assert(err, gc.IsNil) // Connect to the API server we've just started. apiInfo := s.APIInfo(c) apiInfo.Addrs = []string{lis.Addr().String()} apiInfo.ModelTag = names.ModelTag{} apiState, err := api.Open(apiInfo, api.DialOpts{}) sysManager := controller.NewClient(apiState) defer sysManager.Close() w, err := sysManager.WatchAllModels() c.Assert(err, jc.ErrorIsNil) defer w.Stop() deltasC := make(chan struct{}, 2) go func() { defer close(deltasC) for { _, err := w.Next() if err != nil { return } deltasC <- struct{}{} } }() // Read the first event. select { case <-deltasC: case <-time.After(testing.LongWait): c.Fatal("timed out") } // Wait a little while for the Next call to actually arrive. time.Sleep(testing.ShortWait) // We should be able to close the server instance // even when there's an outstanding Next call. srvStopped := make(chan struct{}) go func() { srv.Stop() close(srvStopped) }() select { case <-srvStopped: case <-time.After(testing.LongWait): c.Fatal("timed out waiting for server to stop") } // Check that the Next call has returned too. select { case _, ok := <-deltasC: if ok { c.Fatalf("got unexpected event from deltasC") } case <-time.After(testing.LongWait): c.Fatal("timed out") } }
func (e *environ) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (arch, series string, _ environs.BootstrapFinalizer, _ error) { series = config.PreferredSeries(e.Config()) availableTools, err := args.AvailableTools.Match(coretools.Filter{Series: series}) if err != nil { return "", "", nil, err } arch = availableTools.Arches()[0] defer delay() if err := e.checkBroken("Bootstrap"); err != nil { return "", "", nil, err } network.InitializeFromConfig(e.Config()) password := e.Config().AdminSecret() if password == "" { return "", "", nil, fmt.Errorf("admin-secret is required for bootstrap") } if _, ok := e.Config().CACert(); !ok { return "", "", nil, fmt.Errorf("no CA certificate in environment configuration") } logger.Infof("would pick tools from %s", availableTools) cfg, err := environs.BootstrapConfig(e.Config()) if err != nil { return "", "", nil, fmt.Errorf("cannot make bootstrap config: %v", err) } estate, err := e.state() if err != nil { return "", "", nil, err } estate.mu.Lock() defer estate.mu.Unlock() if estate.bootstrapped { return "", "", nil, fmt.Errorf("environment is already bootstrapped") } estate.preferIPv6 = e.Config().PreferIPv6() // Create an instance for the bootstrap node. logger.Infof("creating bootstrap instance") i := &dummyInstance{ id: BootstrapInstanceId, addresses: network.NewAddresses("localhost"), ports: make(map[network.PortRange]bool), machineId: agent.BootstrapMachineId, series: series, firewallMode: e.Config().FirewallMode(), state: estate, stateServer: true, } estate.insts[i.id] = i if e.ecfg().stateServer() { // TODO(rog) factor out relevant code from cmd/jujud/bootstrap.go // so that we can call it here. info := stateInfo(estate.preferIPv6) // Since the admin user isn't setup until after here, // the password in the info structure is empty, so the admin // user is constructed with an empty password here. // It is set just below. st, err := state.Initialize( AdminUserTag(), info, cfg, mongo.DefaultDialOpts(), estate.statePolicy) if err != nil { panic(err) } if err := st.SetEnvironConstraints(args.Constraints); err != nil { panic(err) } if err := st.SetAdminMongoPassword(password); err != nil { panic(err) } if err := st.MongoSession().DB("admin").Login("admin", password); err != nil { panic(err) } env, err := st.Environment() if err != nil { panic(err) } owner, err := st.User(env.Owner()) if err != nil { panic(err) } // We log this out for test purposes only. No one in real life can use // a dummy provider for anything other than testing, so logging the password // here is fine. logger.Debugf("setting password for %q to %q", owner.Name(), password) owner.SetPassword(password) estate.apiServer, err = apiserver.NewServer(st, estate.apiListener, apiserver.ServerConfig{ Cert: []byte(testing.ServerCert), Key: []byte(testing.ServerKey), Tag: names.NewMachineTag("0"), DataDir: DataDir, LogDir: LogDir, }) if err != nil { panic(err) } estate.apiState = st } estate.bootstrapped = true estate.ops <- OpBootstrap{Context: ctx, Env: e.name, Args: args} finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig) error { estate.ops <- OpFinalizeBootstrap{Context: ctx, Env: e.name, InstanceConfig: icfg} return nil } return arch, series, finalize, nil }
func (e *environ) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (*environs.BootstrapResult, error) { series := config.PreferredSeries(e.Config()) availableTools, err := args.AvailableTools.Match(coretools.Filter{Series: series}) if err != nil { return nil, err } arch := availableTools.Arches()[0] defer delay() if err := e.checkBroken("Bootstrap"); err != nil { return nil, err } if _, ok := args.ControllerConfig.CACert(); !ok { return nil, errors.New("no CA certificate in controller configuration") } logger.Infof("would pick tools from %s", availableTools) estate, err := e.state() if err != nil { return nil, err } estate.mu.Lock() defer estate.mu.Unlock() if estate.bootstrapped { return nil, errors.New("model is already bootstrapped") } // Create an instance for the bootstrap node. logger.Infof("creating bootstrap instance") i := &dummyInstance{ id: BootstrapInstanceId, addresses: network.NewAddresses("localhost"), ports: make(map[network.PortRange]bool), machineId: agent.BootstrapMachineId, series: series, firewallMode: e.Config().FirewallMode(), state: estate, controller: true, } estate.insts[i.id] = i estate.bootstrapped = true estate.ops <- OpBootstrap{Context: ctx, Env: e.name, Args: args} finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig, _ environs.BootstrapDialOpts) error { if e.ecfg().controller() { icfg.Bootstrap.BootstrapMachineInstanceId = BootstrapInstanceId if err := instancecfg.FinishInstanceConfig(icfg, e.Config()); err != nil { return err } adminUser := names.NewUserTag("admin@local") var cloudCredentialTag names.CloudCredentialTag if icfg.Bootstrap.ControllerCloudCredentialName != "" { cloudCredentialTag = names.NewCloudCredentialTag(fmt.Sprintf( "%s/%s/%s", icfg.Bootstrap.ControllerCloudName, adminUser.Id(), icfg.Bootstrap.ControllerCloudCredentialName, )) } cloudCredentials := make(map[names.CloudCredentialTag]cloud.Credential) if icfg.Bootstrap.ControllerCloudCredential != nil && icfg.Bootstrap.ControllerCloudCredentialName != "" { cloudCredentials[cloudCredentialTag] = *icfg.Bootstrap.ControllerCloudCredential } info := stateInfo() // Since the admin user isn't setup until after here, // the password in the info structure is empty, so the admin // user is constructed with an empty password here. // It is set just below. st, err := state.Initialize(state.InitializeParams{ Clock: clock.WallClock, ControllerConfig: icfg.Controller.Config, ControllerModelArgs: state.ModelArgs{ Owner: adminUser, Config: icfg.Bootstrap.ControllerModelConfig, Constraints: icfg.Bootstrap.BootstrapMachineConstraints, CloudName: icfg.Bootstrap.ControllerCloudName, CloudRegion: icfg.Bootstrap.ControllerCloudRegion, CloudCredential: cloudCredentialTag, StorageProviderRegistry: e, }, Cloud: icfg.Bootstrap.ControllerCloud, CloudName: icfg.Bootstrap.ControllerCloudName, CloudCredentials: cloudCredentials, MongoInfo: info, MongoDialOpts: mongotest.DialOpts(), NewPolicy: estate.newStatePolicy, }) if err != nil { return err } if err := st.SetModelConstraints(args.ModelConstraints); err != nil { return err } if err := st.SetAdminMongoPassword(icfg.Controller.MongoInfo.Password); err != nil { return err } if err := st.MongoSession().DB("admin").Login("admin", icfg.Controller.MongoInfo.Password); err != nil { return err } env, err := st.Model() if err != nil { return err } owner, err := st.User(env.Owner()) if err != nil { return err } // We log this out for test purposes only. No one in real life can use // a dummy provider for anything other than testing, so logging the password // here is fine. logger.Debugf("setting password for %q to %q", owner.Name(), icfg.Controller.MongoInfo.Password) owner.SetPassword(icfg.Controller.MongoInfo.Password) estate.apiStatePool = state.NewStatePool(st) estate.apiServer, err = apiserver.NewServer(st, estate.apiListener, apiserver.ServerConfig{ Clock: clock.WallClock, Cert: testing.ServerCert, Key: testing.ServerKey, Tag: names.NewMachineTag("0"), DataDir: DataDir, LogDir: LogDir, StatePool: estate.apiStatePool, NewObserver: func() observer.Observer { return &fakeobserver.Instance{} }, // Should never be used but prevent external access just in case. AutocertURL: "https://0.1.2.3/no-autocert-here", }) if err != nil { panic(err) } estate.apiState = st } estate.ops <- OpFinalizeBootstrap{Context: ctx, Env: e.name, InstanceConfig: icfg} return nil } bsResult := &environs.BootstrapResult{ Arch: arch, Series: series, Finalize: finalize, } return bsResult, nil }
func (s *serverSuite) TestAPIServerCanListenOnBothIPv4AndIPv6(c *gc.C) { err := s.State.SetAPIHostPorts(nil) c.Assert(err, gc.IsNil) // Start our own instance of the server listening on // both IPv4 and IPv6 localhost addresses and an ephemeral port. listener, err := net.Listen("tcp", ":0") c.Assert(err, gc.IsNil) srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), }) c.Assert(err, gc.IsNil) defer srv.Stop() // srv.Addr() always reports "localhost" together // with the port as address. This way it can be used // as hostname to construct URLs which will work // for both IPv4 and IPv6-only networks, as // localhost resolves as both 127.0.0.1 and ::1. // Retrieve the port as string and integer. hostname, portString, err := net.SplitHostPort(srv.Addr()) c.Assert(err, gc.IsNil) c.Assert(hostname, gc.Equals, "localhost") port, err := strconv.Atoi(portString) c.Assert(err, gc.IsNil) stm, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, gc.IsNil) err = stm.SetProvisioned("foo", "fake_nonce", nil) c.Assert(err, gc.IsNil) password, err := utils.RandomPassword() c.Assert(err, gc.IsNil) err = stm.SetPassword(password) c.Assert(err, gc.IsNil) // Now connect twice - using IPv4 and IPv6 endpoints. apiInfo := &api.Info{ Tag: stm.Tag(), Password: password, Nonce: "fake_nonce", Addrs: []string{net.JoinHostPort("127.0.0.1", portString)}, CACert: coretesting.CACert, } ipv4State, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, gc.IsNil) defer ipv4State.Close() c.Assert(ipv4State.Addr(), gc.Equals, net.JoinHostPort("127.0.0.1", portString)) c.Assert(ipv4State.APIHostPorts(), jc.DeepEquals, [][]network.HostPort{ []network.HostPort{{network.NewAddress("127.0.0.1", network.ScopeMachineLocal), port}}, }) _, err = ipv4State.Machiner().Machine(stm.Tag().(names.MachineTag)) c.Assert(err, gc.IsNil) apiInfo.Addrs = []string{net.JoinHostPort("::1", portString)} ipv6State, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, gc.IsNil) defer ipv6State.Close() c.Assert(ipv6State.Addr(), gc.Equals, net.JoinHostPort("::1", portString)) c.Assert(ipv6State.APIHostPorts(), jc.DeepEquals, [][]network.HostPort{ []network.HostPort{{network.NewAddress("::1", network.ScopeMachineLocal), port}}, }) _, err = ipv6State.Machiner().Machine(stm.Tag().(names.MachineTag)) c.Assert(err, gc.IsNil) }
// StateWorker returns a worker running all the workers that require // a *state.State connection. func (a *MachineAgent) StateWorker() (worker.Worker, error) { agentConfig := a.CurrentConfig() // Create system-identity file. if err := agent.WriteSystemIdentityFile(agentConfig); err != nil { return nil, err } // Start MongoDB server and dial. if err := a.ensureMongoServer(agentConfig); err != nil { return nil, err } st, m, err := openState(agentConfig, stateWorkerDialOpts) if err != nil { return nil, err } reportOpenedState(st) singularStateConn := singularStateConn{st.MongoSession(), m} runner := newRunner(connectionIsFatal(st), moreImportant) singularRunner, err := newSingularRunner(runner, singularStateConn) if err != nil { return nil, fmt.Errorf("cannot make singular State Runner: %v", err) } // Take advantage of special knowledge here in that we will only ever want // the storage provider on one machine, and that is the "bootstrap" node. providerType := agentConfig.Value(agent.ProviderType) if (providerType == provider.Local || provider.IsManual(providerType)) && m.Id() == bootstrapMachineId { a.startWorkerAfterUpgrade(runner, "local-storage", func() (worker.Worker, error) { // TODO(axw) 2013-09-24 bug #1229507 // Make another job to enable storage. // There's nothing special about this. return localstorage.NewWorker(agentConfig), nil }) } for _, job := range m.Jobs() { switch job { case state.JobHostUnits: // Implemented in APIWorker. case state.JobManageEnviron: useMultipleCPUs() a.startWorkerAfterUpgrade(runner, "instancepoller", func() (worker.Worker, error) { return instancepoller.NewWorker(st), nil }) a.startWorkerAfterUpgrade(runner, "peergrouper", func() (worker.Worker, error) { return peergrouperNew(st) }) runner.StartWorker("apiserver", func() (worker.Worker, error) { // If the configuration does not have the required information, // it is currently not a recoverable error, so we kill the whole // agent, potentially enabling human intervention to fix // the agent's configuration file. In the future, we may retrieve // the state server certificate and key from the state, and // this should then change. info, ok := agentConfig.StateServingInfo() if !ok { return nil, &fatalError{"StateServingInfo not available and we need it"} } cert := []byte(info.Cert) key := []byte(info.PrivateKey) if len(cert) == 0 || len(key) == 0 { return nil, &fatalError{"configuration does not have state server cert/key"} } dataDir := agentConfig.DataDir() logDir := agentConfig.LogDir() endpoint := net.JoinHostPort("", strconv.Itoa(info.APIPort)) listener, err := net.Listen("tcp", endpoint) if err != nil { return nil, err } return apiserver.NewServer(st, listener, apiserver.ServerConfig{ Cert: cert, Key: key, DataDir: dataDir, LogDir: logDir, Validator: a.limitLoginsDuringUpgrade, }) }) a.startWorkerAfterUpgrade(singularRunner, "cleaner", func() (worker.Worker, error) { return cleaner.NewCleaner(st), nil }) a.startWorkerAfterUpgrade(singularRunner, "resumer", func() (worker.Worker, error) { // The action of resumer is so subtle that it is not tested, // because we can't figure out how to do so without brutalising // the transaction log. return resumer.NewResumer(st), nil }) a.startWorkerAfterUpgrade(singularRunner, "minunitsworker", func() (worker.Worker, error) { return minunitsworker.NewMinUnitsWorker(st), nil }) case state.JobManageStateDeprecated: // Legacy environments may set this, but we ignore it. default: logger.Warningf("ignoring unknown job %q", job) } } return newCloseWorker(runner, st), nil }
func (e *environ) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (arch, series string, _ environs.BootstrapFinalizer, _ error) { series = config.PreferredSeries(e.Config()) availableTools, err := args.AvailableTools.Match(coretools.Filter{Series: series}) if err != nil { return "", "", nil, err } arch = availableTools.Arches()[0] defer delay() if err := e.checkBroken("Bootstrap"); err != nil { return "", "", nil, err } network.InitializeFromConfig(e.Config()) password := e.Config().AdminSecret() if password == "" { return "", "", nil, fmt.Errorf("admin-secret is required for bootstrap") } if _, ok := e.Config().CACert(); !ok { return "", "", nil, fmt.Errorf("no CA certificate in environment configuration") } logger.Infof("would pick tools from %s", availableTools) cfg, err := environs.BootstrapConfig(e.Config()) if err != nil { return "", "", nil, fmt.Errorf("cannot make bootstrap config: %v", err) } estate, err := e.state() if err != nil { return "", "", nil, err } estate.mu.Lock() defer estate.mu.Unlock() if estate.bootstrapped { return "", "", nil, fmt.Errorf("environment is already bootstrapped") } estate.preferIPv6 = e.Config().PreferIPv6() // Create an instance for the bootstrap node. logger.Infof("creating bootstrap instance") i := &dummyInstance{ id: BootstrapInstanceId, addresses: network.NewAddresses("localhost"), ports: make(map[network.PortRange]bool), machineId: agent.BootstrapMachineId, series: series, firewallMode: e.Config().FirewallMode(), state: estate, stateServer: true, } estate.insts[i.id] = i if e.ecfg().stateServer() { // TODO(rog) factor out relevant code from cmd/jujud/bootstrap.go // so that we can call it here. info := stateInfo(estate.preferIPv6) st, err := state.Initialize(info, cfg, mongo.DefaultDialOpts(), estate.statePolicy) if err != nil { panic(err) } if err := st.SetEnvironConstraints(args.Constraints); err != nil { panic(err) } if err := st.SetAdminMongoPassword(password); err != nil { panic(err) } if err := st.MongoSession().DB("admin").Login("admin", password); err != nil { panic(err) } _, err = st.AddAdminUser(password) if err != nil { panic(err) } estate.apiServer, err = apiserver.NewServer(st, estate.apiListener, apiserver.ServerConfig{ Cert: []byte(testing.ServerCert), Key: []byte(testing.ServerKey), DataDir: DataDir, LogDir: LogDir, }) if err != nil { panic(err) } estate.apiState = st } estate.bootstrapped = true estate.ops <- OpBootstrap{Context: ctx, Env: e.name, Args: args} finalize := func(ctx environs.BootstrapContext, mcfg *cloudinit.MachineConfig) error { estate.ops <- OpFinalizeBootstrap{Context: ctx, Env: e.name, MachineConfig: mcfg} return nil } return arch, series, finalize, nil }