func (s *BootstrapSuite) TestDefaultStoragePools(c *gc.C) { _, cmd, err := s.initBootstrapCommand( c, nil, "--model-config", s.b64yamlControllerModelConfig, "--hosted-model-config", s.b64yamlHostedModelConfig, "--instance-id", string(s.instanceId), ) c.Assert(err, jc.ErrorIsNil) err = cmd.Run(nil) c.Assert(err, jc.ErrorIsNil) st, err := state.Open(testing.ModelTag, &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{gitjujutesting.MgoServer.Addr()}, CACert: testing.CACert, }, Password: testPassword, }, mongotest.DialOpts(), environs.NewStatePolicy()) c.Assert(err, jc.ErrorIsNil) defer st.Close() settings := state.NewStateSettings(st) pm := poolmanager.New(settings) for _, p := range []string{"ebs-ssd"} { _, err = pm.Get(p) c.Assert(err, jc.ErrorIsNil) } }
func (s *serviceSuite) TestClientServiceDeployWithUnsupportedStoragePool(c *gc.C) { registry.RegisterProvider("hostloop", &mockStorageProvider{kind: storage.StorageKindBlock}) pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("host-loop-pool", provider.HostLoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) curl, _ := s.UploadCharm(c, "utopic/storage-block-0", "storage-block") storageConstraints := map[string]storage.Constraints{ "data": storage.Constraints{ Pool: "host-loop-pool", Count: 1, Size: 1024, }, } var cons constraints.Value args := params.ServiceDeploy{ ServiceName: "service", CharmUrl: curl.String(), NumUnits: 1, Constraints: cons, Storage: storageConstraints, } results, err := s.serviceApi.ServicesDeploy(params.ServicesDeploy{ Services: []params.ServiceDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) c.Assert(results.Results, gc.HasLen, 1) c.Assert(results.Results[0].Error, gc.ErrorMatches, `.*pool "host-loop-pool" uses storage provider "hostloop" which is not supported for environments of type "dummy"`) }
// machineVolumeParams retrieves VolumeParams for the volumes that should be // provisioned with, and attached to, the machine. The client should ignore // parameters that it does not know how to handle. func (p *ProvisionerAPI) machineVolumeParams(m *state.Machine) ([]params.VolumeParams, error) { volumeAttachments, err := m.VolumeAttachments() if err != nil { return nil, err } if len(volumeAttachments) == 0 { return nil, nil } envConfig, err := p.st.EnvironConfig() if err != nil { return nil, err } poolManager := poolmanager.New(state.NewStateSettings(p.st)) allVolumeParams := make([]params.VolumeParams, 0, len(volumeAttachments)) for _, volumeAttachment := range volumeAttachments { volumeTag := volumeAttachment.Volume() volume, err := p.st.Volume(volumeTag) if err != nil { return nil, errors.Annotatef(err, "getting volume %q", volumeTag.Id()) } storageInstance, err := storagecommon.MaybeAssignedStorageInstance( volume.StorageInstance, p.st.StorageInstance, ) if err != nil { return nil, errors.Annotatef(err, "getting volume %q storage instance", volumeTag.Id()) } volumeParams, err := storagecommon.VolumeParams(volume, storageInstance, envConfig, poolManager) if err != nil { return nil, errors.Annotatef(err, "getting volume %q parameters", volumeTag.Id()) } provider, err := registry.StorageProvider(storage.ProviderType(volumeParams.Provider)) if err != nil { return nil, errors.Annotate(err, "getting storage provider") } if provider.Dynamic() { // Leave dynamic storage to the storage provisioner. continue } volumeAttachmentParams, ok := volumeAttachment.Params() if !ok { // Attachment is already provisioned; this is an insane // state, so we should not proceed with the volume. return nil, errors.Errorf( "volume %s already attached to machine %s", volumeTag.Id(), m.Id(), ) } // Not provisioned yet, so ask the cloud provisioner do it. volumeParams.Attachment = ¶ms.VolumeAttachmentParams{ volumeTag.String(), m.Tag().String(), "", // we're creating the volume, so it has no volume ID. "", // we're creating the machine, so it has no instance ID. volumeParams.Provider, volumeAttachmentParams.ReadOnly, } allVolumeParams = append(allVolumeParams, volumeParams) } return allVolumeParams, nil }
func (s *EnvironSuite) TestDestroyEnvironmentWithPersistentVolumesFails(c *gc.C) { // Create a persistent volume. // TODO(wallyworld) - consider moving this to factory registry.RegisterEnvironStorageProviders("someprovider", ec2.EBS_ProviderType) pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("persistent-block", ec2.EBS_ProviderType, map[string]interface{}{"persistent": "true"}) c.Assert(err, jc.ErrorIsNil) ch := s.AddTestingCharm(c, "storage-block2") storage := map[string]state.StorageConstraints{ "multi1to10": makeStorageCons("persistent-block", 1024, 1), } service := s.AddTestingServiceWithStorage(c, "storage-block2", ch, storage) unit, err := service.AddUnit() c.Assert(err, jc.ErrorIsNil) err = s.State.AssignUnit(unit, state.AssignCleanEmpty) c.Assert(err, jc.ErrorIsNil) volume1, err := s.State.StorageInstanceVolume(names.NewStorageTag("multi1to10/0")) c.Assert(err, jc.ErrorIsNil) volumeInfoSet := state.VolumeInfo{Size: 123, Persistent: true, VolumeId: "vol-ume"} err = s.State.SetVolumeInfo(volume1.VolumeTag(), volumeInfoSet) c.Assert(err, jc.ErrorIsNil) env, err := s.State.Environment() c.Assert(err, jc.ErrorIsNil) // TODO(wallyworld) when we can destroy/remove volume, ensure env can then be destroyed c.Assert(errors.Cause(env.Destroy()), gc.Equals, state.ErrPersistentVolumesExist) }
// TODO(wallyworld) - add another test that deploy with storage fails for older environments // (need deploy client to be refactored to use API stub) func (s *DeploySuite) TestStorage(c *gc.C) { pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{"foo": "bar"}) c.Assert(err, jc.ErrorIsNil) testcharms.Repo.CharmArchivePath(s.SeriesPath, "storage-block") err = runDeploy(c, "local:storage-block", "--storage", "data=loop-pool,1G") c.Assert(err, jc.ErrorIsNil) curl := charm.MustParseURL("local:trusty/storage-block-1") service, _ := s.AssertService(c, "storage-block", curl, 1, 0) cons, err := service.StorageConstraints() c.Assert(err, jc.ErrorIsNil) c.Assert(cons, jc.DeepEquals, map[string]state.StorageConstraints{ "data": { Pool: "loop-pool", Count: 1, Size: 1024, }, "allecto": { Pool: "loop", Count: 0, Size: 1024, }, }) }
func (s *MigrationBaseSuite) makeUnitWithStorage(c *gc.C) (*state.Application, *state.Unit, names.StorageTag) { pool := "loop-pool" kind := "block" // Create a default pool for block devices. pm := poolmanager.New(state.NewStateSettings(s.State), dummy.StorageProviders()) _, err := pm.Create(pool, provider.LoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) // There are test charms called "storage-block" and // "storage-filesystem" which are what you'd expect. ch := s.AddTestingCharm(c, "storage-"+kind) storage := map[string]state.StorageConstraints{ "data": makeStorageCons(pool, 1024, 1), } service := s.AddTestingServiceWithStorage(c, "storage-"+kind, ch, storage) unit, err := service.AddUnit() machine := s.Factory.MakeMachine(c, nil) err = unit.AssignToMachine(machine) c.Assert(err, jc.ErrorIsNil) c.Assert(err, jc.ErrorIsNil) storageTag := names.NewStorageTag("data/0") agentVersion := version.MustParseBinary("2.0.1-quantal-and64") err = unit.SetAgentVersion(agentVersion) c.Assert(err, jc.ErrorIsNil) return service, unit, storageTag }
func (s *VolumeStateSuite) TestAddServiceDefaultPool(c *gc.C) { // Register a default pool. pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("default-block", provider.LoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) err = s.State.UpdateEnvironConfig(map[string]interface{}{ "storage-default-block-source": "default-block", }, nil, nil) c.Assert(err, jc.ErrorIsNil) ch := s.AddTestingCharm(c, "storage-block") storage := map[string]state.StorageConstraints{ "data": makeStorageCons("", 1024, 1), } service := s.AddTestingServiceWithStorage(c, "storage-block", ch, storage) cons, err := service.StorageConstraints() c.Assert(err, jc.ErrorIsNil) c.Assert(cons, jc.DeepEquals, map[string]state.StorageConstraints{ "data": state.StorageConstraints{ Pool: "default-block", Size: 1024, Count: 1, }, "allecto": state.StorageConstraints{ Pool: "loop", Size: 1024, Count: 0, }, }) }
func setupTestStorageSupport(c *gc.C, s *state.State) { stsetts := state.NewStateSettings(s) poolManager := poolmanager.New(stsetts, dummy.StorageProviders()) _, err := poolManager.Create(testPool, provider.LoopProviderType, map[string]interface{}{"it": "works"}) c.Assert(err, jc.ErrorIsNil) _, err = poolManager.Create(testPersistentPool, "environscoped", map[string]interface{}{"persistent": true}) c.Assert(err, jc.ErrorIsNil) }
func setupTestStorageSupport(c *gc.C, s *state.State) { stsetts := state.NewStateSettings(s) poolManager := poolmanager.New(stsetts) _, err := poolManager.Create(testPool, provider.LoopProviderType, map[string]interface{}{"it": "works"}) c.Assert(err, jc.ErrorIsNil) registry.RegisterEnvironStorageProviders("dummy", ec2.EBS_ProviderType) registry.RegisterEnvironStorageProviders("dummyenv", ec2.EBS_ProviderType) }
func (s *assignCleanSuite) SetUpTest(c *gc.C) { c.Logf("assignment policy for this test: %q", s.policy) s.ConnSuite.SetUpTest(c) wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) s.wordpress = wordpress pm := poolmanager.New(state.NewStateSettings(s.State), provider.CommonStorageProviders()) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) }
func (s *baseSuite) setupStoragePool(c *gc.C) { pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) err = s.State.UpdateEnvironConfig(map[string]interface{}{ "storage-default-block-source": "loop-pool", }, nil, nil) c.Assert(err, jc.ErrorIsNil) }
func newStorageProvisionerAPI(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*StorageProvisionerAPI, error) { env, err := stateenvirons.GetNewEnvironFunc(environs.New)(st) if err != nil { return nil, errors.Annotate(err, "getting environ") } registry := stateenvirons.NewStorageProviderRegistry(env) pm := poolmanager.New(state.NewStateSettings(st), registry) return NewStorageProvisionerAPI(stateShim{st}, resources, authorizer, registry, pm) }
func (s *poolSuite) SetUpTest(c *gc.C) { s.StateSuite.SetUpTest(c) s.settings = state.NewStateSettings(s.State) s.registry = storage.StaticProviderRegistry{ map[storage.ProviderType]storage.Provider{ "loop": &dummystorage.StorageProvider{}, }, } s.poolManager = poolmanager.New(s.settings, s.registry) }
func (s *MigrationImportSuite) TestStoragePools(c *gc.C) { pm := poolmanager.New(state.NewStateSettings(s.State), provider.CommonStorageProviders()) _, err := pm.Create("test-pool", provider.LoopProviderType, map[string]interface{}{ "value": 42, }) c.Assert(err, jc.ErrorIsNil) _, newSt := s.importModel(c) pm = poolmanager.New(state.NewStateSettings(newSt), provider.CommonStorageProviders()) pools, err := pm.List() c.Assert(err, jc.ErrorIsNil) c.Assert(pools, gc.HasLen, 1) pool := pools[0] c.Assert(pool.Name(), gc.Equals, "test-pool") c.Assert(pool.Provider(), gc.Equals, provider.LoopProviderType) c.Assert(pool.Attrs(), jc.DeepEquals, map[string]interface{}{ "value": 42, }) }
func (s *StorageStateSuiteBase) SetUpTest(c *gc.C) { s.ConnSuite.SetUpTest(c) // Create a default pool for block devices. pm := poolmanager.New(state.NewStateSettings(s.State), dummy.StorageProviders()) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) // Create a pool that creates persistent block devices. _, err = pm.Create("persistent-block", "environscoped-block", map[string]interface{}{ "persistent": true, }) c.Assert(err, jc.ErrorIsNil) }
func (s *StorageStateSuite) TestNewModelDefaultPools(c *gc.C) { st := s.Factory.MakeModel(c, &factory.ModelParams{ StorageProviderRegistry: testingStorageProviders, }) s.AddCleanup(func(*gc.C) { st.Close() }) // When a model is created, it is populated with the default // pools of each storage provider supported by the model's // cloud provider. pm := poolmanager.New(state.NewStateSettings(st), testingStorageProviders) listed, err := pm.List() c.Assert(err, jc.ErrorIsNil) sort.Sort(byStorageConfigName(listed)) c.Assert(listed, jc.DeepEquals, []*storage.Config{blackPool, radiancePool}) }
// storageConfig returns the provider type and config attributes for the // specified poolName. If no such pool exists, we check to see if poolName is // actually a provider type, in which case config will be empty. func storageConfig(st *state.State, poolName string) (storage.ProviderType, map[string]interface{}, error) { pm := poolmanager.New(state.NewStateSettings(st)) p, err := pm.Get(poolName) // If not a storage pool, then maybe a provider type. if errors.IsNotFound(err) { providerType := storage.ProviderType(poolName) if _, err1 := registry.StorageProvider(providerType); err1 != nil { return "", nil, errors.Trace(err) } return providerType, nil, nil } if err != nil { return "", nil, errors.Trace(err) } return p.Provider(), p.Attrs(), nil }
func (s *ProvisionerSuite) TestProvisioningMachinesWithRequestedVolumes(c *gc.C) { // Set up a persistent pool. registry.RegisterProvider("static", &dummystorage.StorageProvider{IsDynamic: false}) registry.RegisterEnvironStorageProviders("dummy", "static") defer registry.RegisterProvider("static", nil) poolManager := poolmanager.New(state.NewStateSettings(s.State)) _, err := poolManager.Create("persistent-pool", "static", map[string]interface{}{"persistent": true}) c.Assert(err, jc.ErrorIsNil) p := s.newEnvironProvisioner(c) defer stop(c, p) // Add and provision a machine with volumes specified. requestedVolumes := []state.MachineVolumeParams{{ Volume: state.VolumeParams{Pool: "static", Size: 1024}, Attachment: state.VolumeAttachmentParams{}, }, { Volume: state.VolumeParams{Pool: "persistent-pool", Size: 2048}, Attachment: state.VolumeAttachmentParams{}, }} expectVolumeInfo := []storage.Volume{{ names.NewVolumeTag("1"), storage.VolumeInfo{ Size: 1024, }, }, { names.NewVolumeTag("2"), storage.VolumeInfo{ Size: 2048, Persistent: true, }, }} m, err := s.addMachineWithRequestedVolumes(requestedVolumes, s.defaultConstraints) c.Assert(err, jc.ErrorIsNil) inst := s.checkStartInstanceCustom( c, m, "pork", s.defaultConstraints, nil, nil, nil, expectVolumeInfo, false, nil, true, ) // Cleanup. c.Assert(m.EnsureDead(), gc.IsNil) s.checkStopInstances(c, inst) s.waitRemoved(c, m) }
func (s *defaultStoragePoolsSuite) TestDefaultStoragePools(c *gc.C) { p1, err := storage.NewConfig("pool1", storage.ProviderType("loop"), map[string]interface{}{"1": "2"}) p2, err := storage.NewConfig("pool2", storage.ProviderType("tmpfs"), map[string]interface{}{"3": "4"}) c.Assert(err, jc.ErrorIsNil) defaultPools := []*storage.Config{p1, p2} poolmanager.RegisterDefaultStoragePools(defaultPools) settings := state.NewStateSettings(s.State) err = poolmanager.AddDefaultStoragePools(settings) c.Assert(err, jc.ErrorIsNil) pm := poolmanager.New(settings) for _, pool := range defaultPools { p, err := pm.Get(pool.Name()) c.Assert(err, jc.ErrorIsNil) c.Assert(p.Provider(), gc.Equals, pool.Provider()) c.Assert(p.Attrs(), gc.DeepEquals, pool.Attrs()) } }
func assertPoolExists(c *gc.C, st *state.State, pname, provider, attr string) { stsetts := state.NewStateSettings(st) poolManager := poolmanager.New(stsetts) found, err := poolManager.List() c.Assert(err, jc.ErrorIsNil) c.Assert(len(found) > 0, jc.IsTrue) exists := false for _, one := range found { if one.Name() == pname { exists = true c.Assert(string(one.Provider()), gc.Equals, provider) // At this stage, only 1 attr is expected and checked expectedAttrs := strings.Split(attr, "=") value, ok := one.Attrs()[expectedAttrs[0]] c.Assert(ok, jc.IsTrue) c.Assert(value, gc.Equals, expectedAttrs[1]) } } c.Assert(exists, jc.IsTrue) }
func (s *provisionerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) s.factory = factory.NewFactory(s.State) s.resources = common.NewResources() // Create the resource registry separately to track invocations to // Register. s.resources = common.NewResources() s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) env, err := stateenvirons.GetNewEnvironFunc(environs.New)(s.State) c.Assert(err, jc.ErrorIsNil) registry := stateenvirons.NewStorageProviderRegistry(env) pm := poolmanager.New(state.NewStateSettings(s.State), registry) s.authorizer = &apiservertesting.FakeAuthorizer{ Tag: names.NewMachineTag("0"), EnvironManager: true, } backend := storageprovisioner.NewStateBackend(s.State) s.api, err = storageprovisioner.NewStorageProvisionerAPI(backend, s.resources, s.authorizer, registry, pm) c.Assert(err, jc.ErrorIsNil) }
func (s *provisionerSuite) TestSetInstanceInfo(c *gc.C) { pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{"foo": "bar"}) c.Assert(err, jc.ErrorIsNil) // Create a fresh machine, since machine 0 is already provisioned. template := state.MachineTemplate{ Series: "quantal", Jobs: []state.MachineJob{state.JobHostUnits}, Volumes: []state.MachineVolumeParams{{ Volume: state.VolumeParams{ Pool: "loop-pool", Size: 123, }}, }, } notProvisionedMachine, err := s.State.AddOneMachine(template) c.Assert(err, jc.ErrorIsNil) apiMachine, err := s.provisioner.Machine(notProvisionedMachine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) instanceId, err := apiMachine.InstanceId() c.Assert(err, jc.Satisfies, params.IsCodeNotProvisioned) c.Assert(err, gc.ErrorMatches, "machine 1 not provisioned") c.Assert(instanceId, gc.Equals, instance.Id("")) hwChars := instance.MustParseHardware("cpu-cores=123", "mem=4G") _, err = s.State.Network("net1") c.Assert(err, jc.Satisfies, errors.IsNotFound) _, err = s.State.Network("vlan42") c.Assert(err, jc.Satisfies, errors.IsNotFound) ifacesMachine, err := notProvisionedMachine.NetworkInterfaces() c.Assert(err, jc.ErrorIsNil) c.Assert(ifacesMachine, gc.HasLen, 0) volumes := []params.Volume{{ VolumeTag: "volume-1-0", Info: params.VolumeInfo{ VolumeId: "vol-123", Size: 124, }, }} volumeAttachments := map[string]params.VolumeAttachmentInfo{ "volume-1-0": { DeviceName: "xvdf1", }, } err = apiMachine.SetInstanceInfo( "i-will", "fake_nonce", &hwChars, nil, volumes, volumeAttachments, ) c.Assert(err, jc.ErrorIsNil) instanceId, err = apiMachine.InstanceId() c.Assert(err, jc.ErrorIsNil) c.Assert(instanceId, gc.Equals, instance.Id("i-will")) // Try it again - should fail. err = apiMachine.SetInstanceInfo("i-wont", "fake", nil, nil, nil, nil) c.Assert(err, gc.ErrorMatches, `cannot record provisioning info for "i-wont": cannot set instance data for machine "1": already set`) // Now try to get machine 0's instance id. apiMachine, err = s.provisioner.Machine(s.machine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) instanceId, err = apiMachine.InstanceId() c.Assert(err, jc.ErrorIsNil) c.Assert(instanceId, gc.Equals, instance.Id("i-manager")) // Now check volumes and volume attachments. volume, err := s.State.Volume(names.NewVolumeTag("1/0")) c.Assert(err, jc.ErrorIsNil) volumeInfo, err := volume.Info() c.Assert(err, jc.ErrorIsNil) c.Assert(volumeInfo, gc.Equals, state.VolumeInfo{ VolumeId: "vol-123", Pool: "loop-pool", Size: 124, }) stateVolumeAttachments, err := s.State.MachineVolumeAttachments(names.NewMachineTag("1")) c.Assert(err, jc.ErrorIsNil) c.Assert(stateVolumeAttachments, gc.HasLen, 1) volumeAttachmentInfo, err := stateVolumeAttachments[0].Info() c.Assert(err, jc.ErrorIsNil) c.Assert(volumeAttachmentInfo, gc.Equals, state.VolumeAttachmentInfo{ DeviceName: "xvdf1", }) }
func (s *withoutControllerSuite) TestProvisioningInfoWithStorage(c *gc.C) { pm := poolmanager.New(state.NewStateSettings(s.State), dummy.StorageProviders()) _, err := pm.Create("static-pool", "static", map[string]interface{}{"foo": "bar"}) c.Assert(err, jc.ErrorIsNil) cons := constraints.MustParse("cores=123 mem=8G") template := state.MachineTemplate{ Series: "quantal", Jobs: []state.MachineJob{state.JobHostUnits}, Constraints: cons, Placement: "valid", Volumes: []state.MachineVolumeParams{ {Volume: state.VolumeParams{Size: 1000, Pool: "static-pool"}}, {Volume: state.VolumeParams{Size: 2000, Pool: "static-pool"}}, }, } placementMachine, err := s.State.AddOneMachine(template) c.Assert(err, jc.ErrorIsNil) args := params.Entities{Entities: []params.Entity{ {Tag: s.machines[0].Tag().String()}, {Tag: placementMachine.Tag().String()}, }} result, err := s.provisioner.ProvisioningInfo(args) c.Assert(err, jc.ErrorIsNil) controllerCfg := coretesting.FakeControllerConfig() // Dummy provider uses a random port, which is added to cfg used to create environment. apiPort := dummy.ApiPort(s.Environ.Provider()) controllerCfg["api-port"] = apiPort expected := params.ProvisioningInfoResults{ Results: []params.ProvisioningInfoResult{ {Result: ¶ms.ProvisioningInfo{ ControllerConfig: controllerCfg, Series: "quantal", Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, Tags: map[string]string{ tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), }, }}, {Result: ¶ms.ProvisioningInfo{ ControllerConfig: controllerCfg, Series: "quantal", Constraints: template.Constraints, Placement: template.Placement, Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, Tags: map[string]string{ tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), }, Volumes: []params.VolumeParams{{ VolumeTag: "volume-0", Size: 1000, Provider: "static", Attributes: map[string]interface{}{"foo": "bar"}, Tags: map[string]string{ tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), }, Attachment: ¶ms.VolumeAttachmentParams{ MachineTag: placementMachine.Tag().String(), VolumeTag: "volume-0", Provider: "static", }, }, { VolumeTag: "volume-1", Size: 2000, Provider: "static", Attributes: map[string]interface{}{"foo": "bar"}, Tags: map[string]string{ tags.JujuController: coretesting.ControllerTag.Id(), tags.JujuModel: coretesting.ModelTag.Id(), }, Attachment: ¶ms.VolumeAttachmentParams{ MachineTag: placementMachine.Tag().String(), VolumeTag: "volume-1", Provider: "static", }, }}, }}, }, } // The order of volumes is not predictable, so we make sure we // compare the right ones. This only applies to Results[1] since // it is the only result to contain volumes. if expected.Results[1].Result.Volumes[0].VolumeTag != result.Results[1].Result.Volumes[0].VolumeTag { vols := expected.Results[1].Result.Volumes vols[0], vols[1] = vols[1], vols[0] } c.Assert(result, jc.DeepEquals, expected) }
// populateDefaultStoragePools creates the default storage pools. func (c *BootstrapCommand) populateDefaultStoragePools(st *state.State) error { settings := state.NewStateSettings(st) return poolmanager.AddDefaultStoragePools(settings) }
func addDefaultStoragePools(st *state.State) error { settings := state.NewStateSettings(st) return poolmanager.AddDefaultStoragePools(settings) }
settings poolmanager.SettingsManager resources *common.Resources authorizer common.Authorizer getScopeAuthFunc common.GetAuthFunc getStorageEntityAuthFunc common.GetAuthFunc getMachineAuthFunc common.GetAuthFunc getBlockDevicesAuthFunc common.GetAuthFunc getAttachmentAuthFunc func() (func(names.MachineTag, names.Tag) bool, error) } var getState = func(st *state.State) provisionerState { return stateShim{st} } var getSettingsManager = func(st *state.State) poolmanager.SettingsManager { return state.NewStateSettings(st) } // NewStorageProvisionerAPI creates a new server-side StorageProvisionerAPI facade. func NewStorageProvisionerAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*StorageProvisionerAPI, error) { if !authorizer.AuthMachineAgent() { return nil, common.ErrPerm } canAccessStorageMachine := func(tag names.MachineTag, allowEnvironManager bool) bool { authEntityTag := authorizer.GetAuthTag() if tag == authEntityTag { // Machine agents can access volumes // scoped to their own machine. return true } parentId := state.ParentId(tag.Id())
func poolManager(st *state.State) poolmanager.PoolManager { return poolmanager.New(state.NewStateSettings(st)) }
func (s *provisionerSuite) TestSetInstanceInfo(c *gc.C) { pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{"foo": "bar"}) c.Assert(err, jc.ErrorIsNil) // Create a fresh machine, since machine 0 is already provisioned. template := state.MachineTemplate{ Series: "quantal", Jobs: []state.MachineJob{state.JobHostUnits}, Volumes: []state.MachineVolumeParams{{ Volume: state.VolumeParams{ Pool: "loop-pool", Size: 123, }}, }, } notProvisionedMachine, err := s.State.AddOneMachine(template) c.Assert(err, jc.ErrorIsNil) apiMachine, err := s.provisioner.Machine(notProvisionedMachine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) instanceId, err := apiMachine.InstanceId() c.Assert(err, jc.Satisfies, params.IsCodeNotProvisioned) c.Assert(err, gc.ErrorMatches, "machine 1 not provisioned") c.Assert(instanceId, gc.Equals, instance.Id("")) hwChars := instance.MustParseHardware("cpu-cores=123", "mem=4G") _, err = s.State.Network("net1") c.Assert(err, jc.Satisfies, errors.IsNotFound) _, err = s.State.Network("vlan42") c.Assert(err, jc.Satisfies, errors.IsNotFound) ifacesMachine, err := notProvisionedMachine.NetworkInterfaces() c.Assert(err, jc.ErrorIsNil) c.Assert(ifacesMachine, gc.HasLen, 0) networks := []params.Network{{ Tag: "network-net1", ProviderId: "net1", CIDR: "0.1.2.0/24", VLANTag: 0, }, { Tag: "network-vlan42", ProviderId: "vlan42", CIDR: "0.2.2.0/24", VLANTag: 42, }, { Tag: "network-vlan69", ProviderId: "vlan69", CIDR: "0.3.2.0/24", VLANTag: 69, }, { Tag: "network-vlan42", // duplicated; ignored ProviderId: "vlan42", CIDR: "0.2.2.0/24", VLANTag: 42, }} ifaces := []params.NetworkInterface{{ MACAddress: "aa:bb:cc:dd:ee:f0", NetworkTag: "network-net1", InterfaceName: "eth0", IsVirtual: false, }, { MACAddress: "aa:bb:cc:dd:ee:f1", NetworkTag: "network-net1", InterfaceName: "eth1", IsVirtual: false, }, { MACAddress: "aa:bb:cc:dd:ee:f1", NetworkTag: "network-vlan42", InterfaceName: "eth1.42", IsVirtual: true, }, { MACAddress: "aa:bb:cc:dd:ee:f1", NetworkTag: "network-vlan69", InterfaceName: "eth1.69", IsVirtual: true, }, { MACAddress: "aa:bb:cc:dd:ee:f1", // duplicated mac+net; ignored NetworkTag: "network-vlan42", InterfaceName: "eth2", IsVirtual: true, }, { MACAddress: "aa:bb:cc:dd:ee:f4", NetworkTag: "network-net1", InterfaceName: "eth1", // duplicated name+machine id; ignored IsVirtual: false, }} volumes := []params.Volume{{ VolumeTag: "volume-1-0", Info: params.VolumeInfo{ VolumeId: "vol-123", Size: 124, }, }} volumeAttachments := map[string]params.VolumeAttachmentInfo{ "volume-1-0": { DeviceName: "xvdf1", }, } err = apiMachine.SetInstanceInfo( "i-will", "fake_nonce", &hwChars, networks, ifaces, volumes, volumeAttachments, ) c.Assert(err, jc.ErrorIsNil) instanceId, err = apiMachine.InstanceId() c.Assert(err, jc.ErrorIsNil) c.Assert(instanceId, gc.Equals, instance.Id("i-will")) // Try it again - should fail. err = apiMachine.SetInstanceInfo("i-wont", "fake", nil, nil, nil, nil, nil) c.Assert(err, gc.ErrorMatches, `cannot record provisioning info for "i-wont": cannot set instance data for machine "1": already set`) // Now try to get machine 0's instance id. apiMachine, err = s.provisioner.Machine(s.machine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) instanceId, err = apiMachine.InstanceId() c.Assert(err, jc.ErrorIsNil) c.Assert(instanceId, gc.Equals, instance.Id("i-manager")) // Check the networks are created. for i := range networks { if i == 3 { // Last one was ignored, so skip it. break } tag, err := names.ParseNetworkTag(networks[i].Tag) c.Assert(err, jc.ErrorIsNil) networkName := tag.Id() nw, err := s.State.Network(networkName) c.Assert(err, jc.ErrorIsNil) c.Check(nw.Name(), gc.Equals, networkName) c.Check(nw.ProviderId(), gc.Equals, network.Id(networks[i].ProviderId)) c.Check(nw.Tag().String(), gc.Equals, networks[i].Tag) c.Check(nw.VLANTag(), gc.Equals, networks[i].VLANTag) c.Check(nw.CIDR(), gc.Equals, networks[i].CIDR) } // And the network interfaces as well. ifacesMachine, err = notProvisionedMachine.NetworkInterfaces() c.Assert(err, jc.ErrorIsNil) c.Assert(ifacesMachine, gc.HasLen, 4) actual := make([]params.NetworkInterface, len(ifacesMachine)) for i, iface := range ifacesMachine { actual[i].InterfaceName = iface.InterfaceName() actual[i].NetworkTag = iface.NetworkTag().String() actual[i].MACAddress = iface.MACAddress() actual[i].IsVirtual = iface.IsVirtual() c.Check(iface.MachineTag(), gc.Equals, notProvisionedMachine.Tag()) c.Check(iface.MachineId(), gc.Equals, notProvisionedMachine.Id()) } c.Assert(actual, jc.SameContents, ifaces[:4]) // skip the rest as they are ignored. // Now check volumes and volume attachments. volume, err := s.State.Volume(names.NewVolumeTag("1/0")) c.Assert(err, jc.ErrorIsNil) volumeInfo, err := volume.Info() c.Assert(err, jc.ErrorIsNil) c.Assert(volumeInfo, gc.Equals, state.VolumeInfo{ VolumeId: "vol-123", Pool: "loop-pool", Size: 124, }) stateVolumeAttachments, err := s.State.MachineVolumeAttachments(names.NewMachineTag("1")) c.Assert(err, jc.ErrorIsNil) c.Assert(stateVolumeAttachments, gc.HasLen, 1) volumeAttachmentInfo, err := stateVolumeAttachments[0].Info() c.Assert(err, jc.ErrorIsNil) c.Assert(volumeAttachmentInfo, gc.Equals, state.VolumeAttachmentInfo{ DeviceName: "xvdf1", }) }
// NewProvisionerAPI creates a new server-side ProvisionerAPI facade. func NewProvisionerAPI(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*ProvisionerAPI, error) { if !authorizer.AuthMachineAgent() && !authorizer.AuthModelManager() { return nil, common.ErrPerm } getAuthFunc := func() (common.AuthFunc, error) { isModelManager := authorizer.AuthModelManager() isMachineAgent := authorizer.AuthMachineAgent() authEntityTag := authorizer.GetAuthTag() return func(tag names.Tag) bool { if isMachineAgent && tag == authEntityTag { // A machine agent can always access its own machine. return true } switch tag := tag.(type) { case names.MachineTag: parentId := state.ParentId(tag.Id()) if parentId == "" { // All top-level machines are accessible by the // environment manager. return isModelManager } // All containers with the authenticated machine as a // parent are accessible by it. // TODO(dfc) sometimes authEntity tag is nil, which is fine because nil is // only equal to nil, but it suggests someone is passing an authorizer // with a nil tag. return isMachineAgent && names.NewMachineTag(parentId) == authEntityTag default: return false } }, nil } getAuthOwner := func() (common.AuthFunc, error) { return authorizer.AuthOwner, nil } model, err := st.Model() if err != nil { return nil, err } configGetter := stateenvirons.EnvironConfigGetter{st} env, err := environs.GetEnviron(configGetter, environs.New) if err != nil { return nil, err } urlGetter := common.NewToolsURLGetter(model.UUID(), st) storageProviderRegistry := stateenvirons.NewStorageProviderRegistry(env) return &ProvisionerAPI{ Remover: common.NewRemover(st, false, getAuthFunc), StatusSetter: common.NewStatusSetter(st, getAuthFunc), StatusGetter: common.NewStatusGetter(st, getAuthFunc), DeadEnsurer: common.NewDeadEnsurer(st, getAuthFunc), PasswordChanger: common.NewPasswordChanger(st, getAuthFunc), LifeGetter: common.NewLifeGetter(st, getAuthFunc), StateAddresser: common.NewStateAddresser(st), APIAddresser: common.NewAPIAddresser(st, resources), ModelWatcher: common.NewModelWatcher(st, resources, authorizer), ModelMachinesWatcher: common.NewModelMachinesWatcher(st, resources, authorizer), ControllerConfigAPI: common.NewControllerConfig(st), InstanceIdGetter: common.NewInstanceIdGetter(st, getAuthFunc), ToolsFinder: common.NewToolsFinder(configGetter, st, urlGetter), ToolsGetter: common.NewToolsGetter(st, configGetter, st, urlGetter, getAuthOwner), st: st, resources: resources, authorizer: authorizer, configGetter: configGetter, storageProviderRegistry: storageProviderRegistry, storagePoolManager: poolmanager.New(state.NewStateSettings(st), storageProviderRegistry), getAuthFunc: getAuthFunc, }, nil }
func (s *poolSuite) SetUpTest(c *gc.C) { s.StateSuite.SetUpTest(c) s.settings = state.NewStateSettings(s.State) s.poolManager = poolmanager.New(s.settings) }