func newEnviron(cloud environs.CloudSpec, cfg *config.Config) (*environ, error) { ecfg, err := newValidConfig(cfg, configDefaults) if err != nil { return nil, errors.Annotate(err, "invalid config") } client, err := newClient(cloud) if err != nil { return nil, errors.Annotatef(err, "failed to create new client") } namespace, err := instance.NewNamespace(cfg.UUID()) if err != nil { return nil, errors.Trace(err) } env := &environ{ name: ecfg.Name(), cloud: cloud, ecfg: ecfg, client: client, namespace: namespace, } return env, nil }
func newEnviron(spec environs.CloudSpec, cfg *config.Config, newRawProvider newRawProviderFunc) (*environ, error) { ecfg, err := newValidConfig(cfg) if err != nil { return nil, errors.Annotate(err, "invalid config") } namespace, err := instance.NewNamespace(cfg.UUID()) if err != nil { return nil, errors.Trace(err) } raw, err := newRawProvider(spec) if err != nil { return nil, errors.Trace(err) } env := &environ{ name: ecfg.Name(), uuid: ecfg.UUID(), raw: raw, namespace: namespace, ecfg: ecfg, } env.base = common.DefaultProvider{Env: env} //TODO(wwitzel3) make sure we are also cleaning up profiles during destroy if err := env.initProfile(); err != nil { return nil, errors.Trace(err) } return env, nil }
func (s *BaseSuiteUnpatched) setConfig(c *gc.C, cfg *config.Config) { s.Config = cfg ecfg, err := newConfig(cfg, nil) c.Assert(err, jc.ErrorIsNil) s.EnvConfig = ecfg uuid := cfg.UUID() s.Env.uuid = uuid s.Env.ecfg = s.EnvConfig namespace, err := instance.NewNamespace(uuid) c.Assert(err, jc.ErrorIsNil) s.Env.namespace = namespace }
// NewContainerManager returns a manager object that can start and stop kvm // containers. func NewContainerManager(conf container.ManagerConfig) (container.Manager, error) { modelUUID := conf.PopValue(container.ConfigModelUUID) if modelUUID == "" { return nil, errors.Errorf("model UUID is required") } namespace, err := instance.NewNamespace(modelUUID) if err != nil { return nil, errors.Trace(err) } logDir := conf.PopValue(container.ConfigLogDir) if logDir == "" { logDir = agent.DefaultPaths.LogDir } conf.WarnAboutUnused() return &containerManager{namespace: namespace, logdir: logDir}, nil }
// NewContainerManager creates the entity that knows how to create and manage // LXD containers. // TODO(jam): This needs to grow support for things like LXC's ImageURLGetter // functionality. func NewContainerManager(conf container.ManagerConfig) (container.Manager, error) { modelUUID := conf.PopValue(container.ConfigModelUUID) if modelUUID == "" { return nil, errors.Errorf("model UUID is required") } namespace, err := instance.NewNamespace(modelUUID) if err != nil { return nil, errors.Trace(err) } conf.WarnAboutUnused() return &containerManager{ modelUUID: modelUUID, namespace: namespace, }, nil }
func (s *environAvailzonesSuite) TestInstanceAvailabilityZoneNames(c *gc.C) { client := vsphere.ExposeEnvFakeClient(s.Env) client.SetPropertyProxyHandler("FakeDatacenter", vsphere.RetrieveDatacenterProperties) namespace, err := instance.NewNamespace(s.Env.Config().UUID()) c.Assert(err, jc.ErrorIsNil) vmName, err := namespace.Hostname("1") c.Assert(err, jc.ErrorIsNil) s.FakeInstancesWithResourcePool(client, vsphere.InstRp{Inst: vmName, Rp: "rp1"}) s.FakeAvailabilityZonesWithResourcePool(client, vsphere.ZoneRp{Zone: "z1", Rp: "rp1"}, vsphere.ZoneRp{Zone: "z2", Rp: "rp2"}) zones, err := s.Env.InstanceAvailabilityZoneNames([]instance.Id{instance.Id(vmName)}) c.Assert(err, jc.ErrorIsNil) c.Assert(len(zones), gc.Equals, 1) c.Assert(zones[0], gc.Equals, "z1") }
func newEnviron(cloud environs.CloudSpec, cfg *config.Config) (*environ, error) { ecfg, err := newConfig(cfg, nil) if err != nil { return nil, errors.Annotate(err, "invalid config") } credAttrs := cloud.Credential.Attributes() if cloud.Credential.AuthType() == jujucloud.JSONFileAuthType { contents := credAttrs[credAttrFile] credential, err := parseJSONAuthFile(strings.NewReader(contents)) if err != nil { return nil, errors.Trace(err) } credAttrs = credential.Attributes() } credential := &google.Credentials{ ClientID: credAttrs[credAttrClientID], ProjectID: credAttrs[credAttrProjectID], ClientEmail: credAttrs[credAttrClientEmail], PrivateKey: []byte(credAttrs[credAttrPrivateKey]), } connectionConfig := google.ConnectionConfig{ Region: cloud.Region, ProjectID: credential.ProjectID, } // Connect and authenticate. conn, err := newConnection(connectionConfig, credential) if err != nil { return nil, errors.Trace(err) } namespace, err := instance.NewNamespace(cfg.UUID()) if err != nil { return nil, errors.Trace(err) } return &environ{ name: ecfg.config.Name(), uuid: ecfg.config.UUID(), cloud: cloud, ecfg: ecfg, gce: conn, namespace: namespace, }, nil }
func (p EnvironProvider) Open(args environs.OpenParams) (environs.Environ, error) { logger.Infof("opening model %q", args.Config.Name()) if err := validateCloudSpec(args.Cloud); err != nil { return nil, errors.Annotate(err, "validating cloud spec") } uuid := args.Config.UUID() namespace, err := instance.NewNamespace(uuid) if err != nil { return nil, errors.Annotate(err, "creating instance namespace") } e := &Environ{ name: args.Config.Name(), uuid: uuid, cloud: args.Cloud, namespace: namespace, } e.firewaller = p.FirewallerFactory.GetFirewaller(e) e.configurator = p.Configurator if err := e.SetConfig(args.Config); err != nil { return nil, err } return e, nil }
func (s *NamespaceSuite) newNamespace(c *gc.C) instance.Namespace { ns, err := instance.NewNamespace(modelUUID) c.Assert(err, jc.ErrorIsNil) return ns }
func (s *NamespaceSuite) TestInvalidModelTag(c *gc.C) { ns, err := instance.NewNamespace("foo") c.Assert(ns, gc.IsNil) c.Assert(err, gc.ErrorMatches, `model ID "foo" is not a valid model`) }
func (m *fakeContainerManager) Namespace() instance.Namespace { ns, _ := instance.NewNamespace(coretesting.ModelTag.Id()) return ns }
func (s *environInstanceSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) namespace, err := instance.NewNamespace(s.Env.Config().UUID()) c.Assert(err, jc.ErrorIsNil) s.namespace = namespace }