func (s *loginSuite) setupServerWithValidator(c *gc.C, validator apiserver.LoginValidator) (*api.Info, func()) { srv, err := apiserver.NewServer( s.State, apiserver.ServerConfig{ Port: 0, Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), Validator: validator, }, ) c.Assert(err, gc.IsNil) env, err := s.State.Environment() c.Assert(err, gc.IsNil) info := &api.Info{ Tag: "", Password: "", EnvironTag: env.Tag().String(), Addrs: []string{srv.Addr()}, CACert: coretesting.CACert, } return info, func() { err := srv.Stop() c.Assert(err, gc.IsNil) } }
func (s *serverSuite) TestNonCompatiblePathsAre404(c *gc.C) { // we expose the API at '/' for compatibility, and at '/ENVUUID/api' // for the correct location, but other Paths should fail. listener, err := net.Listen("tcp", ":0") c.Assert(err, gc.IsNil) srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), }) c.Assert(err, gc.IsNil) defer srv.Stop() // We have to use 'localhost' because that is what the TLS cert says. // So find just the Port for the server _, portString, err := net.SplitHostPort(srv.Addr()) c.Assert(err, gc.IsNil) addr := "localhost:" + portString // '/' should be fine conn, err := dialWebsocket(c, addr, "/") c.Assert(err, gc.IsNil) conn.Close() // '/environment/ENVIRONUUID/api' should be fine conn, err = dialWebsocket(c, addr, "/environment/dead-beef-123456/api") c.Assert(err, gc.IsNil) conn.Close() // '/randompath' is not ok conn, err = dialWebsocket(c, addr, "/randompath") // Unfortunately go.net/websocket just returns Bad Status, it doesn't // give us any information (whether this was a 404 Not Found, Internal // Server Error, 200 OK, etc.) c.Assert(err, gc.ErrorMatches, `websocket.Dial wss://localhost:\d+/randompath: bad status`) c.Assert(conn, gc.IsNil) }
func (s *serverSuite) TestStop(c *gc.C) { // Start our own instance of the server so we have // a handle on it to stop it. listener, err := net.Listen("tcp", ":0") c.Assert(err, gc.IsNil) srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), }) c.Assert(err, gc.IsNil) defer srv.Stop() stm, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, gc.IsNil) err = stm.SetProvisioned("foo", "fake_nonce", nil) c.Assert(err, gc.IsNil) password, err := utils.RandomPassword() c.Assert(err, gc.IsNil) err = stm.SetPassword(password) c.Assert(err, gc.IsNil) // Note we can't use openAs because we're not connecting to apiInfo := &api.Info{ Tag: stm.Tag(), Password: password, Nonce: "fake_nonce", Addrs: []string{srv.Addr()}, CACert: coretesting.CACert, } st, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, gc.IsNil) defer st.Close() _, err = st.Machiner().Machine(stm.Tag().(names.MachineTag)) c.Assert(err, gc.IsNil) err = srv.Stop() c.Assert(err, gc.IsNil) _, err = st.Machiner().Machine(stm.Tag().(names.MachineTag)) // The client has not necessarily seen the server shutdown yet, // so there are two possible errors. if err != rpc.ErrShutdown && err != io.ErrUnexpectedEOF { c.Fatalf("unexpected error from request: %v", err) } // Check it can be stopped twice. err = srv.Stop() c.Assert(err, gc.IsNil) }
func (e *environ) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) error { selectedTools, err := common.EnsureBootstrapTools(ctx, e, config.PreferredSeries(e.Config()), args.Constraints.Arch) if err != nil { return err } series := selectedTools.OneSeries() defer delay() if err := e.checkBroken("Bootstrap"); err != nil { return err } network.InitializeFromConfig(e.Config()) password := e.Config().AdminSecret() if password == "" { return fmt.Errorf("admin-secret is required for bootstrap") } if _, ok := e.Config().CACert(); !ok { return fmt.Errorf("no CA certificate in environment configuration") } logger.Infof("would pick tools from %s", selectedTools) cfg, err := environs.BootstrapConfig(e.Config()) if err != nil { return fmt.Errorf("cannot make bootstrap config: %v", err) } estate, err := e.state() if err != nil { return err } estate.mu.Lock() defer estate.mu.Unlock() if estate.bootstrapped { return fmt.Errorf("environment is already bootstrapped") } estate.preferIPv6 = e.Config().PreferIPv6() // Write the bootstrap file just like a normal provider. However // we need to release the mutex for the save state to work, so regain // it after the call. estate.mu.Unlock() instIds := []instance.Id{BootstrapInstanceId} if err := bootstrap.SaveState(e.Storage(), &bootstrap.BootstrapState{StateInstances: instIds}); err != nil { logger.Errorf("failed to save state instances: %v", err) estate.mu.Lock() // otherwise defered unlock will fail return err } estate.mu.Lock() // back at it // Create an instance for the bootstrap node. logger.Infof("creating bootstrap instance") i := &dummyInstance{ id: BootstrapInstanceId, addresses: network.NewAddresses("localhost"), ports: make(map[network.Port]bool), machineId: agent.BootstrapMachineId, series: series, firewallMode: e.Config().FirewallMode(), state: estate, stateServer: true, } estate.insts[i.id] = i if e.ecfg().stateServer() { // TODO(rog) factor out relevant code from cmd/jujud/bootstrap.go // so that we can call it here. info := stateInfo(estate.preferIPv6) st, err := state.Initialize(info, cfg, mongo.DefaultDialOpts(), estate.statePolicy) if err != nil { panic(err) } if err := st.SetEnvironConstraints(args.Constraints); err != nil { panic(err) } if err := st.SetAdminMongoPassword(utils.UserPasswordHash(password, utils.CompatSalt)); err != nil { panic(err) } _, err = st.AddAdminUser(password) if err != nil { panic(err) } estate.apiServer, err = apiserver.NewServer(st, estate.apiListener, apiserver.ServerConfig{ Cert: []byte(testing.ServerCert), Key: []byte(testing.ServerKey), DataDir: DataDir, LogDir: LogDir, }) if err != nil { panic(err) } estate.apiState = st } estate.bootstrapped = true estate.ops <- OpBootstrap{Context: ctx, Env: e.name, Args: args} return nil }
func (s *serverSuite) TestAPIServerCanListenOnBothIPv4AndIPv6(c *gc.C) { // Start our own instance of the server listening on // both IPv4 and IPv6 localhost addresses and an ephemeral port. listener, err := net.Listen("tcp", ":0") c.Assert(err, gc.IsNil) srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ Cert: []byte(coretesting.ServerCert), Key: []byte(coretesting.ServerKey), }) c.Assert(err, gc.IsNil) defer srv.Stop() // srv.Addr() always reports "localhost" together // with the port as address. This way it can be used // as hostname to construct URLs which will work // for both IPv4 and IPv6-only networks, as // localhost resolves as both 127.0.0.1 and ::1. // Retrieve the port as string and integer. hostname, portString, err := net.SplitHostPort(srv.Addr()) c.Assert(err, gc.IsNil) c.Assert(hostname, gc.Equals, "localhost") port, err := strconv.Atoi(portString) c.Assert(err, gc.IsNil) stm, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, gc.IsNil) err = stm.SetProvisioned("foo", "fake_nonce", nil) c.Assert(err, gc.IsNil) password, err := utils.RandomPassword() c.Assert(err, gc.IsNil) err = stm.SetPassword(password) c.Assert(err, gc.IsNil) // Now connect twice - using IPv4 and IPv6 endpoints. apiInfo := &api.Info{ Tag: stm.Tag(), Password: password, Nonce: "fake_nonce", Addrs: []string{net.JoinHostPort("127.0.0.1", portString)}, CACert: coretesting.CACert, } ipv4State, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, gc.IsNil) defer ipv4State.Close() c.Assert(ipv4State.Addr(), gc.Equals, net.JoinHostPort("127.0.0.1", portString)) c.Assert(ipv4State.APIHostPorts(), jc.DeepEquals, [][]network.HostPort{ []network.HostPort{{network.NewAddress("127.0.0.1", network.ScopeMachineLocal), port}}, }) _, err = ipv4State.Machiner().Machine(stm.Tag().(names.MachineTag)) c.Assert(err, gc.IsNil) apiInfo.Addrs = []string{net.JoinHostPort("::1", portString)} ipv6State, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, gc.IsNil) defer ipv6State.Close() c.Assert(ipv6State.Addr(), gc.Equals, net.JoinHostPort("::1", portString)) c.Assert(ipv6State.APIHostPorts(), jc.DeepEquals, [][]network.HostPort{ []network.HostPort{{network.NewAddress("::1", network.ScopeMachineLocal), port}}, }) _, err = ipv6State.Machiner().Machine(stm.Tag().(names.MachineTag)) c.Assert(err, gc.IsNil) }
// StateWorker returns a worker running all the workers that require // a *state.State connection. func (a *MachineAgent) StateWorker() (worker.Worker, error) { agentConfig := a.CurrentConfig() // Create system-identity file if err := agent.WriteSystemIdentityFile(agentConfig); err != nil { return nil, err } // Start MondoDB server if err := a.ensureMongoServer(agentConfig); err != nil { return nil, err } st, m, err := openState(agentConfig, mongo.DialOpts{}) if err != nil { return nil, err } reportOpenedState(st) singularStateConn := singularStateConn{st.MongoSession(), m} runner := newRunner(connectionIsFatal(st), moreImportant) singularRunner, err := newSingularRunner(runner, singularStateConn) if err != nil { return nil, fmt.Errorf("cannot make singular State Runner: %v", err) } // Take advantage of special knowledge here in that we will only ever want // the storage provider on one machine, and that is the "bootstrap" node. providerType := agentConfig.Value(agent.ProviderType) if (providerType == provider.Local || provider.IsManual(providerType)) && m.Id() == bootstrapMachineId { a.startWorkerAfterUpgrade(runner, "local-storage", func() (worker.Worker, error) { // TODO(axw) 2013-09-24 bug #1229507 // Make another job to enable storage. // There's nothing special about this. return localstorage.NewWorker(agentConfig), nil }) } for _, job := range m.Jobs() { switch job { case state.JobHostUnits: // Implemented in APIWorker. case state.JobManageEnviron: useMultipleCPUs() a.startWorkerAfterUpgrade(runner, "instancepoller", func() (worker.Worker, error) { return instancepoller.NewWorker(st), nil }) a.startWorkerAfterUpgrade(runner, "peergrouper", func() (worker.Worker, error) { return peergrouperNew(st) }) runner.StartWorker("apiserver", func() (worker.Worker, error) { // If the configuration does not have the required information, // it is currently not a recoverable error, so we kill the whole // agent, potentially enabling human intervention to fix // the agent's configuration file. In the future, we may retrieve // the state server certificate and key from the state, and // this should then change. info, ok := agentConfig.StateServingInfo() if !ok { return nil, &fatalError{"StateServingInfo not available and we need it"} } port := info.APIPort cert := []byte(info.Cert) key := []byte(info.PrivateKey) if len(cert) == 0 || len(key) == 0 { return nil, &fatalError{"configuration does not have state server cert/key"} } dataDir := agentConfig.DataDir() logDir := agentConfig.LogDir() return apiserver.NewServer(st, apiserver.ServerConfig{ Port: port, Cert: cert, Key: key, DataDir: dataDir, LogDir: logDir, Validator: a.limitLoginsDuringUpgrade, }) }) a.startWorkerAfterUpgrade(singularRunner, "cleaner", func() (worker.Worker, error) { return cleaner.NewCleaner(st), nil }) a.startWorkerAfterUpgrade(singularRunner, "resumer", func() (worker.Worker, error) { // The action of resumer is so subtle that it is not tested, // because we can't figure out how to do so without brutalising // the transaction log. return resumer.NewResumer(st), nil }) a.startWorkerAfterUpgrade(singularRunner, "minunitsworker", func() (worker.Worker, error) { return minunitsworker.NewMinUnitsWorker(st), nil }) case state.JobManageStateDeprecated: // Legacy environments may set this, but we ignore it. default: logger.Warningf("ignoring unknown job %q", job) } } return newCloseWorker(runner, st), nil }