func (s *loginSuite) TestBadLogin(c *gc.C) { // Start our own server so we can control when the first login // happens. Otherwise in JujuConnSuite.SetUpTest api.Open is // called with user-admin permissions automatically. info, cleanup := s.setupServerWithValidator(c, nil) defer cleanup() adminUser := s.AdminUserTag(c) for i, t := range []struct { tag names.Tag password string err error code string }{{ tag: adminUser, password: "******", err: &rpc.RequestError{ Message: "invalid entity name or password", Code: "unauthorized access", }, code: params.CodeUnauthorized, }, { tag: names.NewUserTag("unknown"), password: "******", err: &rpc.RequestError{ Message: "invalid entity name or password", Code: "unauthorized access", }, code: params.CodeUnauthorized, }} { c.Logf("test %d; entity %q; password %q", i, t.tag, t.password) func() { // Open the API without logging in, so we can perform // operations on the connection before calling Login. st := s.openAPIWithoutLogin(c, info) defer st.Close() _, err := apimachiner.NewState(st).Machine(names.NewMachineTag("0")) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "Machiner"`, Code: "not implemented", }) // Since these are user login tests, the nonce is empty. err = st.Login(t.tag, t.password, "", nil) c.Assert(errors.Cause(err), gc.DeepEquals, t.err) c.Assert(params.ErrCode(err), gc.Equals, t.code) _, err = apimachiner.NewState(st).Machine(names.NewMachineTag("0")) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "Machiner"`, Code: "not implemented", }) }() } }
func (s *migrationSuite) TestImportingModel(c *gc.C) { m, password := s.Factory.MakeMachineReturningPassword(c, &factory.MachineParams{ Nonce: "nonce", }) model, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) err = model.SetMigrationMode(state.MigrationModeImporting) c.Assert(err, jc.ErrorIsNil) _, cleanup := s.setupServer(c) defer cleanup() // Users should be able to log in but RPC requests should fail. info := s.APIInfo(c) userConn := s.OpenAPIAs(c, info.Tag, info.Password) defer userConn.Close() _, err = userConn.Client().Status(nil) c.Check(err, gc.ErrorMatches, "migration in progress, model is importing") // Machines should be able to use the API. machineConn := s.OpenAPIAsMachine(c, m.Tag(), password, "nonce") defer machineConn.Close() _, err = apimachiner.NewState(machineConn).Machine(m.MachineTag()) c.Check(err, jc.ErrorIsNil) }
func (s *MachinerStateSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) s.st, s.machine = s.OpenAPIAsNewMachine(c) // Create the machiner API facade. s.machinerState = apimachiner.NewState(s.st) c.Assert(s.machinerState, gc.NotNil) // Get the machine through the facade. var err error s.apiMachine, err = s.machinerState.Machine(s.machine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) c.Assert(s.apiMachine.Tag(), gc.Equals, s.machine.Tag()) // Isolate tests better by not using real interface addresses. s.PatchValue(machiner.InterfaceAddrs, func() ([]net.Addr, error) { return nil, nil }) s.PatchValue(&network.InterfaceByNameAddrs, func(string) ([]net.Addr, error) { return nil, nil }) s.PatchValue(&network.LXCNetDefaultConfig, "") s.PatchValue(machiner.GetObservedNetworkConfig, func() ([]params.NetworkConfig, error) { return nil, nil }) }
func (s *APIAddressUpdaterSuite) TestAddressChange(c *gc.C) { setter := &apiAddressSetter{servers: make(chan [][]network.HostPort, 1)} st, _ := s.OpenAPIAsNewMachine(c, state.JobHostUnits) worker, err := apiaddressupdater.NewAPIAddressUpdater(apimachiner.NewState(st), setter) c.Assert(err, jc.ErrorIsNil) defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() s.BackingState.StartSync() updatedServers := [][]network.HostPort{ network.NewHostPorts(1234, "localhost", "127.0.0.1"), } // SetAPIHostPorts should be called with the initial value (empty), // and then the updated value. select { case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for SetAPIHostPorts to be called initially") case servers := <-setter.servers: c.Assert(servers, gc.HasLen, 0) } err = s.State.SetAPIHostPorts(updatedServers) c.Assert(err, jc.ErrorIsNil) s.BackingState.StartSync() select { case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for SetAPIHostPorts to be called after update") case servers := <-setter.servers: c.Assert(servers, gc.DeepEquals, updatedServers) } }
func (s *APIAddressUpdaterSuite) TestStartStop(c *gc.C) { st, _ := s.OpenAPIAsNewMachine(c, state.JobHostUnits) worker, err := apiaddressupdater.NewAPIAddressUpdater(apimachiner.NewState(st), &apiAddressSetter{}) c.Assert(err, jc.ErrorIsNil) worker.Kill() c.Assert(worker.Wait(), gc.IsNil) }
func (s *serverSuite) TestAPIServerCanListenOnBothIPv4AndIPv6(c *gc.C) { err := s.State.SetAPIHostPorts(nil) c.Assert(err, jc.ErrorIsNil) // Start our own instance of the server listening on // both IPv4 and IPv6 localhost addresses and an ephemeral port. srv := newServer(c, s.State) defer srv.Stop() port := srv.Addr().Port portString := fmt.Sprintf("%d", port) machine, password := s.Factory.MakeMachineReturningPassword( c, &factory.MachineParams{Nonce: "fake_nonce"}) // Now connect twice - using IPv4 and IPv6 endpoints. apiInfo := &api.Info{ Tag: machine.Tag(), Password: password, Nonce: "fake_nonce", Addrs: []string{net.JoinHostPort("127.0.0.1", portString)}, CACert: coretesting.CACert, ModelTag: s.State.ModelTag(), } ipv4State, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer ipv4State.Close() c.Assert(ipv4State.Addr(), gc.Equals, net.JoinHostPort("127.0.0.1", portString)) c.Assert(ipv4State.APIHostPorts(), jc.DeepEquals, [][]network.HostPort{ network.NewHostPorts(port, "127.0.0.1"), }) _, err = apimachiner.NewState(ipv4State).Machine(machine.MachineTag()) c.Assert(err, jc.ErrorIsNil) apiInfo.Addrs = []string{net.JoinHostPort("::1", portString)} ipv6State, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer ipv6State.Close() c.Assert(ipv6State.Addr(), gc.Equals, net.JoinHostPort("::1", portString)) c.Assert(ipv6State.APIHostPorts(), jc.DeepEquals, [][]network.HostPort{ network.NewHostPorts(port, "::1"), }) _, err = apimachiner.NewState(ipv6State).Machine(machine.MachineTag()) c.Assert(err, jc.ErrorIsNil) }
func (s *serverSuite) TestStop(c *gc.C) { // Start our own instance of the server so we have // a handle on it to stop it. srv := newServer(c, s.State) defer srv.Stop() machine, password := s.Factory.MakeMachineReturningPassword( c, &factory.MachineParams{Nonce: "fake_nonce"}) // A net.TCPAddr cannot be directly stringified into a valid hostname. address := fmt.Sprintf("localhost:%d", srv.Addr().Port) // Note we can't use openAs because we're not connecting to apiInfo := &api.Info{ Tag: machine.Tag(), Password: password, Nonce: "fake_nonce", Addrs: []string{address}, CACert: coretesting.CACert, ModelTag: s.State.ModelTag(), } st, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer st.Close() _, err = apimachiner.NewState(st).Machine(machine.MachineTag()) c.Assert(err, jc.ErrorIsNil) err = srv.Stop() c.Assert(err, jc.ErrorIsNil) _, err = apimachiner.NewState(st).Machine(machine.MachineTag()) err = errors.Cause(err) // The client has not necessarily seen the server shutdown yet, // so there are two possible errors. if err != rpc.ErrShutdown && err != io.ErrUnexpectedEOF { c.Fatalf("unexpected error from request: %#v, expected rpc.ErrShutdown or io.ErrUnexpectedEOF", err) } // Check it can be stopped twice. err = srv.Stop() c.Assert(err, jc.ErrorIsNil) }
func (s *serverSuite) TestStop(c *gc.C) { // Start our own instance of the server so we have // a handle on it to stop it. _, srv := newServer(c, s.State) defer assertStop(c, srv) machine, password := s.Factory.MakeMachineReturningPassword( c, &factory.MachineParams{Nonce: "fake_nonce"}) // A net.TCPAddr cannot be directly stringified into a valid hostname. address := fmt.Sprintf("localhost:%d", srv.Addr().Port) // Note we can't use openAs because we're not connecting to apiInfo := &api.Info{ Tag: machine.Tag(), Password: password, Nonce: "fake_nonce", Addrs: []string{address}, CACert: coretesting.CACert, ModelTag: s.State.ModelTag(), } st, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer st.Close() _, err = apimachiner.NewState(st).Machine(machine.MachineTag()) c.Assert(err, jc.ErrorIsNil) err = srv.Stop() c.Assert(err, jc.ErrorIsNil) _, err = apimachiner.NewState(st).Machine(machine.MachineTag()) // The client has not necessarily seen the server shutdown yet, so there // are multiple possible errors. All we should care about is that there is // an error, not what the error actually is. c.Assert(err, gc.NotNil) // Check it can be stopped twice. err = srv.Stop() c.Assert(err, jc.ErrorIsNil) }
func (s *MachineSuite) TestManageModelServesAPI(c *gc.C) { s.assertJobWithState(c, state.JobManageModel, func(conf agent.Config, agentState *state.State) { apiInfo, ok := conf.APIInfo() c.Assert(ok, jc.IsTrue) st, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer st.Close() m, err := apimachiner.NewState(st).Machine(conf.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) c.Assert(m.Life(), gc.Equals, params.Alive) }) }
func (s *machinerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) m, err := s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) err = m.SetProviderAddresses(network.NewAddress("10.0.0.1")) c.Assert(err, jc.ErrorIsNil) s.st, s.machine = s.OpenAPIAsNewMachine(c) // Create the machiner API facade. s.machiner = machiner.NewState(s.st) c.Assert(s.machiner, gc.NotNil) s.APIAddresserTests = apitesting.NewAPIAddresserTests(s.machiner, s.BackingState) }
func (a *MachineAgent) setControllerNetworkConfig(apiConn api.Connection) error { machinerAPI := apimachiner.NewState(apiConn) agentConfig := a.CurrentConfig() tag := agentConfig.Tag().(names.MachineTag) machine, err := machinerAPI.Machine(tag) if errors.IsNotFound(err) || err == nil && machine.Life() == params.Dead { return worker.ErrTerminateAgent } if err != nil { return errors.Annotatef(err, "cannot load machine %s from state", tag) } if err := machine.SetProviderNetworkConfig(); err != nil { return errors.Annotate(err, "cannot set controller provider network config") } return nil }
// Manifold returns a dependency manifold that runs a machiner worker, using // the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { // TODO(waigani) This function is currently covered by functional tests // under the machine agent. Add unit tests once infrastructure to do so is // in place. // newWorker non-trivially wraps NewMachiner to specialise a PostUpgradeManifold. var newWorker = func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { currentConfig := a.CurrentConfig() // TODO(fwereade): this functionality should be on the // deployer facade instead. agentFacade := apiagent.NewState(apiCaller) envConfig, err := agentFacade.ModelConfig() if err != nil { return nil, errors.Errorf("cannot read environment config: %v", err) } ignoreMachineAddresses, _ := envConfig.IgnoreMachineAddresses() // Containers only have machine addresses, so we can't ignore them. tag := currentConfig.Tag() if names.IsContainerMachine(tag.Id()) { ignoreMachineAddresses = false } if ignoreMachineAddresses { logger.Infof("machine addresses not used, only addresses from provider") } accessor := APIMachineAccessor{apimachiner.NewState(apiCaller)} w, err := NewMachiner(Config{ MachineAccessor: accessor, Tag: tag.(names.MachineTag), ClearMachineAddressesOnStart: ignoreMachineAddresses, NotifyMachineDead: func() error { return agent.SetCanUninstall(a) }, }) if err != nil { return nil, errors.Annotate(err, "cannot start machiner worker") } return w, err } return util.PostUpgradeManifold(config.PostUpgradeManifoldConfig, newWorker) }
func (s *baseLoginSuite) runLoginSetsLogIdentifier(c *gc.C) { info, cleanup := s.setupServerWithValidator(c, nil) defer cleanup() machine, password := s.Factory.MakeMachineReturningPassword( c, &factory.MachineParams{Nonce: "fake_nonce"}) info.Tag = machine.Tag() info.Password = password info.Nonce = "fake_nonce" apiConn, err := api.Open(info, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer apiConn.Close() apiMachine, err := apimachiner.NewState(apiConn).Machine(machine.MachineTag()) c.Assert(err, jc.ErrorIsNil) c.Assert(apiMachine.Tag(), gc.Equals, machine.Tag()) }
func (s *baseLoginSuite) checkLoginWithValidator(c *gc.C, validator apiserver.LoginValidator, checker validationChecker) { info, cleanup := s.setupServerWithValidator(c, validator) defer cleanup() st := s.openAPIWithoutLogin(c, info) defer st.Close() // Ensure not already logged in. _, err := apimachiner.NewState(st).Machine(names.NewMachineTag("0")) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "Machiner"`, Code: "not implemented", }) adminUser := s.AdminUserTag(c) // Since these are user login tests, the nonce is empty. err = st.Login(adminUser, "dummy-secret", "", nil) checker(c, err, st) }
func (s *baseLoginSuite) checkLoginWithValidator(c *gc.C, validator apiserver.LoginValidator, checker validationChecker) { cfg := defaultServerConfig(c) cfg.Validator = validator info, srv := newServerWithConfig(c, s.State, cfg) defer assertStop(c, srv) info.ModelTag = s.State.ModelTag() st := s.openAPIWithoutLogin(c, info) // Ensure not already logged in. _, err := apimachiner.NewState(st).Machine(names.NewMachineTag("0")) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "Machiner"`, Code: "not implemented", }) adminUser := s.AdminUserTag(c) // Since these are user login tests, the nonce is empty. err = st.Login(adminUser, "dummy-secret", "", nil) checker(c, err, st) }
// newWorker non-trivially wraps NewMachiner to specialise a util.AgentApiManifold. // // TODO(waigani) This function is currently covered by functional tests // under the machine agent. Add unit tests once infrastructure to do so is // in place. func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { currentConfig := a.CurrentConfig() // TODO(fwereade): this functionality should be on the // machiner facade instead -- or, better yet, separate // the networking concerns from the lifecycle ones and // have completey separate workers. // // (With their own facades.) agentFacade := apiagent.NewState(apiCaller) envConfig, err := agentFacade.ModelConfig() if err != nil { return nil, errors.Errorf("cannot read environment config: %v", err) } ignoreMachineAddresses, _ := envConfig.IgnoreMachineAddresses() // Containers only have machine addresses, so we can't ignore them. tag := currentConfig.Tag() if names.IsContainerMachine(tag.Id()) { ignoreMachineAddresses = false } if ignoreMachineAddresses { logger.Infof("machine addresses not used, only addresses from provider") } accessor := APIMachineAccessor{apimachiner.NewState(apiCaller)} w, err := NewMachiner(Config{ MachineAccessor: accessor, Tag: tag.(names.MachineTag), ClearMachineAddressesOnStart: ignoreMachineAddresses, NotifyMachineDead: func() error { return agent.SetCanUninstall(a) }, }) if err != nil { return nil, errors.Annotate(err, "cannot start machiner worker") } return w, err }
// Machiner returns a version of the state that provides functionality // required by the machiner worker. func (st *State) Machiner() *machiner.State { return machiner.NewState(st) }
func (s *APIAddressUpdaterSuite) TestLXCBridgeAddressesFiltering(c *gc.C) { lxcFakeNetConfig := filepath.Join(c.MkDir(), "lxc-net") netConf := []byte(` # comments ignored LXC_BR= ignored LXC_ADDR = "fooo" LXC_BRIDGE="foobar" # detected anything else ignored LXC_BRIDGE="ignored"`[1:]) err := ioutil.WriteFile(lxcFakeNetConfig, netConf, 0644) c.Assert(err, jc.ErrorIsNil) s.PatchValue(&network.InterfaceByNameAddrs, func(name string) ([]net.Addr, error) { c.Assert(name, gc.Equals, "foobar") return []net.Addr{ &net.IPAddr{IP: net.IPv4(10, 0, 3, 1)}, &net.IPAddr{IP: net.IPv4(10, 0, 3, 4)}, }, nil }) s.PatchValue(&network.LXCNetDefaultConfig, lxcFakeNetConfig) initialServers := [][]network.HostPort{ network.NewHostPorts(1234, "localhost", "127.0.0.1"), network.NewHostPorts( 4321, "10.0.3.1", // filtered "10.0.3.3", // not filtered (not a lxc bridge address) ), network.NewHostPorts(4242, "10.0.3.4"), // filtered } err = s.State.SetAPIHostPorts(initialServers) c.Assert(err, jc.ErrorIsNil) setter := &apiAddressSetter{servers: make(chan [][]network.HostPort, 1)} st, _ := s.OpenAPIAsNewMachine(c, state.JobHostUnits) worker, err := apiaddressupdater.NewAPIAddressUpdater(apimachiner.NewState(st), setter) c.Assert(err, jc.ErrorIsNil) defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() s.BackingState.StartSync() updatedServers := [][]network.HostPort{ network.NewHostPorts(1234, "localhost", "127.0.0.1"), network.NewHostPorts( 4001, "10.0.3.1", // filtered "10.0.3.3", // not filtered (not a lxc bridge address) ), network.NewHostPorts(4200, "10.0.3.4"), // filtered } // SetAPIHostPorts should be called with the initial value, and // then the updated value, but filtering occurs in both cases. select { case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for SetAPIHostPorts to be called initially") case servers := <-setter.servers: c.Assert(servers, gc.HasLen, 2) c.Assert(servers, jc.DeepEquals, [][]network.HostPort{ network.NewHostPorts(1234, "localhost", "127.0.0.1"), network.NewHostPorts(4321, "10.0.3.3"), }) } err = s.State.SetAPIHostPorts(updatedServers) c.Assert(err, gc.IsNil) s.BackingState.StartSync() select { case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for SetAPIHostPorts to be called after update") case servers := <-setter.servers: c.Assert(servers, gc.HasLen, 2) c.Assert(servers, jc.DeepEquals, [][]network.HostPort{ network.NewHostPorts(1234, "localhost", "127.0.0.1"), network.NewHostPorts(4001, "10.0.3.3"), }) } }
// validateMigration is called by the migrationminion to help check // that the agent will be ok when connected to a new controller. func (a *MachineAgent) validateMigration(apiCaller base.APICaller) error { // TODO(mjs) - more extensive checks to come. facade := apimachiner.NewState(apiCaller) _, err := facade.Machine(names.NewMachineTag(a.machineId)) return errors.Trace(err) }
// using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { typedConfig := util.AgentApiManifoldConfig(config) return util.AgentApiManifold(typedConfig, newWorker) } // newWorker trivially wraps NewAPIAddressUpdater for use in a util.AgentApiManifold. // It's not tested at the moment, because the scaffolding necessary is too // unwieldy/distracting to introduce at this point. var newWorker = func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { tag := a.CurrentConfig().Tag() // TODO(fwereade): use appropriate facade! var facade APIAddresser switch apiTag := tag.(type) { case names.UnitTag: facade = uniter.NewState(apiCaller, apiTag) case names.MachineTag: facade = machiner.NewState(apiCaller) default: return nil, errors.Errorf("expected a unit or machine tag; got %q", tag) } setter := agent.APIHostPortsSetter{a} w, err := NewAPIAddressUpdater(facade, setter) if err != nil { return nil, errors.Trace(err) } return w, nil }
// Manifold returns a dependency manifold that runs an upgrader // worker, using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { return dependency.Manifold{ Inputs: []string{ config.AgentName, config.APICallerName, config.UpgradeStepsGateName, }, Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { // Sanity checks if config.OpenStateForUpgrade == nil { return nil, errors.New("missing OpenStateForUpgrade in config") } if config.PreUpgradeSteps == nil { return nil, errors.New("missing PreUpgradeSteps in config") } // Get machine agent. var agent agent.Agent if err := getResource(config.AgentName, &agent); err != nil { return nil, err } // Grab the tag and ensure that it's for a machine. tag, ok := agent.CurrentConfig().Tag().(names.MachineTag) if !ok { return nil, errors.New("agent's tag is not a machine tag") } // Get API connection. // TODO(fwereade): can we make the worker use an // APICaller instead? should be able to depend on // the Engine to abort us when conn is closed... var apiConn api.Connection if err := getResource(config.APICallerName, &apiConn); err != nil { return nil, err } // Get the machine agent's jobs. // TODO(fwereade): use appropriate facade! agentFacade := apiagent.NewState(apiConn) entity, err := agentFacade.Entity(tag) if err != nil { return nil, err } jobs := entity.Jobs() // Get machine instance for setting status on. // TODO(fwereade): use appropriate facade! machinerFacade := apimachiner.NewState(apiConn) machine, err := machinerFacade.Machine(tag) if err != nil { return nil, err } // Get upgradesteps completed lock. var upgradeStepsLock gate.Lock if err := getResource(config.UpgradeStepsGateName, &upgradeStepsLock); err != nil { return nil, err } return NewWorker( upgradeStepsLock, agent, apiConn, jobs, config.OpenStateForUpgrade, config.PreUpgradeSteps, machine, ) }, } }
// startAPIWorkers is called to start workers which rely on the // machine agent's API connection (via the apiworkers manifold). It // returns a Runner with a number of workers attached to it. // // The workers started here need to be converted to run under the // dependency engine. Once they have all been converted, this method - // and the apiworkers manifold - can be removed. func (a *MachineAgent) startAPIWorkers(apiConn api.Connection) (_ worker.Worker, outErr error) { agentConfig := a.CurrentConfig() entity, err := apiagent.NewState(apiConn).Entity(a.Tag()) if err != nil { return nil, errors.Trace(err) } var isModelManager bool for _, job := range entity.Jobs() { switch job { case multiwatcher.JobManageModel: isModelManager = true default: // TODO(dimitern): Once all workers moved over to using // the API, report "unknown job type" here. } } runner := newConnRunner(apiConn) defer func() { // If startAPIWorkers exits early with an error, stop the // runner so that any already started runners aren't leaked. if outErr != nil { worker.Stop(runner) } }() modelConfig, err := apiagent.NewState(apiConn).ModelConfig() if err != nil { return nil, fmt.Errorf("cannot read model config: %v", err) } // Perform the operations needed to set up hosting for containers. if err := a.setupContainerSupport(runner, apiConn, agentConfig); err != nil { cause := errors.Cause(err) if params.IsCodeDead(cause) || cause == worker.ErrTerminateAgent { return nil, worker.ErrTerminateAgent } return nil, fmt.Errorf("setting up container support: %v", err) } if isModelManager { // Published image metadata for some providers are in simple streams. // Providers that do not depend on simple streams do not need this worker. env, err := newEnvirons(modelConfig) if err != nil { return nil, errors.Annotate(err, "getting environ") } if _, ok := env.(simplestreams.HasRegion); ok { // Start worker that stores published image metadata in state. runner.StartWorker("imagemetadata", func() (worker.Worker, error) { return newMetadataUpdater(apiConn.MetadataUpdater()), nil }) } // We don't have instance info set and the network config for the // bootstrap machine only, so update it now. All the other machines will // have instance info including network config set at provisioning time. if err := a.setControllerNetworkConfig(apiConn); err != nil { return nil, errors.Annotate(err, "setting controller network config") } } else { runner.StartWorker("stateconverter", func() (worker.Worker, error) { // TODO(fwereade): this worker needs its own facade. facade := apimachiner.NewState(apiConn) handler := conv2state.New(facade, a) w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{ Handler: handler, }) if err != nil { return nil, errors.Annotate(err, "cannot start controller promoter worker") } return w, nil }) } return runner, nil }