func (st *State) createDefaultStoragePoolsOps(registry storage.ProviderRegistry) ([]txn.Op, error) { m := poolmanager.MemSettings{make(map[string]map[string]interface{})} pm := poolmanager.New(m, registry) providerTypes, err := registry.StorageProviderTypes() if err != nil { return nil, errors.Trace(err) } for _, providerType := range providerTypes { p, err := registry.StorageProvider(providerType) if err != nil { return nil, errors.Trace(err) } if err := poolmanager.AddDefaultStoragePools(p, pm); err != nil { return nil, errors.Annotatef( err, "adding default storage pools for %q", providerType, ) } } var ops []txn.Op for key, settings := range m.Settings { ops = append(ops, createSettingsOp(settingsC, key, settings)) } return ops, nil }
func (s *VolumeStateSuite) TestAddServiceDefaultPool(c *gc.C) { // Register a default pool. pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("default-block", provider.LoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) err = s.State.UpdateEnvironConfig(map[string]interface{}{ "storage-default-block-source": "default-block", }, nil, nil) c.Assert(err, jc.ErrorIsNil) ch := s.AddTestingCharm(c, "storage-block") storage := map[string]state.StorageConstraints{ "data": makeStorageCons("", 1024, 1), } service := s.AddTestingServiceWithStorage(c, "storage-block", ch, storage) cons, err := service.StorageConstraints() c.Assert(err, jc.ErrorIsNil) c.Assert(cons, jc.DeepEquals, map[string]state.StorageConstraints{ "data": state.StorageConstraints{ Pool: "default-block", Size: 1024, Count: 1, }, "allecto": state.StorageConstraints{ Pool: "loop", Size: 1024, Count: 0, }, }) }
func poolStorageProvider(st *State, poolName string) (storage.ProviderType, storage.Provider, error) { registry, err := st.storageProviderRegistry() if err != nil { return "", nil, errors.Annotate(err, "getting storage provider registry") } poolManager := poolmanager.New(NewStateSettings(st), registry) pool, err := poolManager.Get(poolName) if errors.IsNotFound(err) { // If there's no pool called poolName, maybe a provider type // has been specified directly. providerType := storage.ProviderType(poolName) provider, err1 := registry.StorageProvider(providerType) if err1 != nil { // The name can't be resolved as a storage provider type, // so return the original "pool not found" error. return "", nil, errors.Trace(err) } return providerType, provider, nil } else if err != nil { return "", nil, errors.Trace(err) } providerType := pool.Provider() provider, err := registry.StorageProvider(providerType) if err != nil { return "", nil, errors.Trace(err) } return providerType, provider, nil }
// machineVolumeParams retrieves VolumeParams for the volumes that should be // provisioned with, and attached to, the machine. The client should ignore // parameters that it does not know how to handle. func (p *ProvisionerAPI) machineVolumeParams(m *state.Machine) ([]params.VolumeParams, error) { volumeAttachments, err := m.VolumeAttachments() if err != nil { return nil, err } if len(volumeAttachments) == 0 { return nil, nil } envConfig, err := p.st.EnvironConfig() if err != nil { return nil, err } poolManager := poolmanager.New(state.NewStateSettings(p.st)) allVolumeParams := make([]params.VolumeParams, 0, len(volumeAttachments)) for _, volumeAttachment := range volumeAttachments { volumeTag := volumeAttachment.Volume() volume, err := p.st.Volume(volumeTag) if err != nil { return nil, errors.Annotatef(err, "getting volume %q", volumeTag.Id()) } storageInstance, err := storagecommon.MaybeAssignedStorageInstance( volume.StorageInstance, p.st.StorageInstance, ) if err != nil { return nil, errors.Annotatef(err, "getting volume %q storage instance", volumeTag.Id()) } volumeParams, err := storagecommon.VolumeParams(volume, storageInstance, envConfig, poolManager) if err != nil { return nil, errors.Annotatef(err, "getting volume %q parameters", volumeTag.Id()) } provider, err := registry.StorageProvider(storage.ProviderType(volumeParams.Provider)) if err != nil { return nil, errors.Annotate(err, "getting storage provider") } if provider.Dynamic() { // Leave dynamic storage to the storage provisioner. continue } volumeAttachmentParams, ok := volumeAttachment.Params() if !ok { // Attachment is already provisioned; this is an insane // state, so we should not proceed with the volume. return nil, errors.Errorf( "volume %s already attached to machine %s", volumeTag.Id(), m.Id(), ) } // Not provisioned yet, so ask the cloud provisioner do it. volumeParams.Attachment = ¶ms.VolumeAttachmentParams{ volumeTag.String(), m.Tag().String(), "", // we're creating the volume, so it has no volume ID. "", // we're creating the machine, so it has no instance ID. volumeParams.Provider, volumeAttachmentParams.ReadOnly, } allVolumeParams = append(allVolumeParams, volumeParams) } return allVolumeParams, nil }
func (s *serviceSuite) TestClientServiceDeployWithUnsupportedStoragePool(c *gc.C) { registry.RegisterProvider("hostloop", &mockStorageProvider{kind: storage.StorageKindBlock}) pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("host-loop-pool", provider.HostLoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) curl, _ := s.UploadCharm(c, "utopic/storage-block-0", "storage-block") storageConstraints := map[string]storage.Constraints{ "data": storage.Constraints{ Pool: "host-loop-pool", Count: 1, Size: 1024, }, } var cons constraints.Value args := params.ServiceDeploy{ ServiceName: "service", CharmUrl: curl.String(), NumUnits: 1, Constraints: cons, Storage: storageConstraints, } results, err := s.serviceApi.ServicesDeploy(params.ServicesDeploy{ Services: []params.ServiceDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) c.Assert(results.Results, gc.HasLen, 1) c.Assert(results.Results[0].Error, gc.ErrorMatches, `.*pool "host-loop-pool" uses storage provider "hostloop" which is not supported for environments of type "dummy"`) }
func (s *BootstrapSuite) TestDefaultStoragePools(c *gc.C) { _, cmd, err := s.initBootstrapCommand( c, nil, "--model-config", s.b64yamlControllerModelConfig, "--hosted-model-config", s.b64yamlHostedModelConfig, "--instance-id", string(s.instanceId), ) c.Assert(err, jc.ErrorIsNil) err = cmd.Run(nil) c.Assert(err, jc.ErrorIsNil) st, err := state.Open(testing.ModelTag, &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{gitjujutesting.MgoServer.Addr()}, CACert: testing.CACert, }, Password: testPassword, }, mongotest.DialOpts(), environs.NewStatePolicy()) c.Assert(err, jc.ErrorIsNil) defer st.Close() settings := state.NewStateSettings(st) pm := poolmanager.New(settings) for _, p := range []string{"ebs-ssd"} { _, err = pm.Get(p) c.Assert(err, jc.ErrorIsNil) } }
func (s *EnvironSuite) TestDestroyEnvironmentWithPersistentVolumesFails(c *gc.C) { // Create a persistent volume. // TODO(wallyworld) - consider moving this to factory registry.RegisterEnvironStorageProviders("someprovider", ec2.EBS_ProviderType) pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("persistent-block", ec2.EBS_ProviderType, map[string]interface{}{"persistent": "true"}) c.Assert(err, jc.ErrorIsNil) ch := s.AddTestingCharm(c, "storage-block2") storage := map[string]state.StorageConstraints{ "multi1to10": makeStorageCons("persistent-block", 1024, 1), } service := s.AddTestingServiceWithStorage(c, "storage-block2", ch, storage) unit, err := service.AddUnit() c.Assert(err, jc.ErrorIsNil) err = s.State.AssignUnit(unit, state.AssignCleanEmpty) c.Assert(err, jc.ErrorIsNil) volume1, err := s.State.StorageInstanceVolume(names.NewStorageTag("multi1to10/0")) c.Assert(err, jc.ErrorIsNil) volumeInfoSet := state.VolumeInfo{Size: 123, Persistent: true, VolumeId: "vol-ume"} err = s.State.SetVolumeInfo(volume1.VolumeTag(), volumeInfoSet) c.Assert(err, jc.ErrorIsNil) env, err := s.State.Environment() c.Assert(err, jc.ErrorIsNil) // TODO(wallyworld) when we can destroy/remove volume, ensure env can then be destroyed c.Assert(errors.Cause(env.Destroy()), gc.Equals, state.ErrPersistentVolumesExist) }
// TODO(wallyworld) - add another test that deploy with storage fails for older environments // (need deploy client to be refactored to use API stub) func (s *DeploySuite) TestStorage(c *gc.C) { pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{"foo": "bar"}) c.Assert(err, jc.ErrorIsNil) testcharms.Repo.CharmArchivePath(s.SeriesPath, "storage-block") err = runDeploy(c, "local:storage-block", "--storage", "data=loop-pool,1G") c.Assert(err, jc.ErrorIsNil) curl := charm.MustParseURL("local:trusty/storage-block-1") service, _ := s.AssertService(c, "storage-block", curl, 1, 0) cons, err := service.StorageConstraints() c.Assert(err, jc.ErrorIsNil) c.Assert(cons, jc.DeepEquals, map[string]state.StorageConstraints{ "data": { Pool: "loop-pool", Count: 1, Size: 1024, }, "allecto": { Pool: "loop", Count: 0, Size: 1024, }, }) }
func (s *MigrationBaseSuite) makeUnitWithStorage(c *gc.C) (*state.Application, *state.Unit, names.StorageTag) { pool := "loop-pool" kind := "block" // Create a default pool for block devices. pm := poolmanager.New(state.NewStateSettings(s.State), dummy.StorageProviders()) _, err := pm.Create(pool, provider.LoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) // There are test charms called "storage-block" and // "storage-filesystem" which are what you'd expect. ch := s.AddTestingCharm(c, "storage-"+kind) storage := map[string]state.StorageConstraints{ "data": makeStorageCons(pool, 1024, 1), } service := s.AddTestingServiceWithStorage(c, "storage-"+kind, ch, storage) unit, err := service.AddUnit() machine := s.Factory.MakeMachine(c, nil) err = unit.AssignToMachine(machine) c.Assert(err, jc.ErrorIsNil) c.Assert(err, jc.ErrorIsNil) storageTag := names.NewStorageTag("data/0") agentVersion := version.MustParseBinary("2.0.1-quantal-and64") err = unit.SetAgentVersion(agentVersion) c.Assert(err, jc.ErrorIsNil) return service, unit, storageTag }
func setupTestStorageSupport(c *gc.C, s *state.State) { stsetts := state.NewStateSettings(s) poolManager := poolmanager.New(stsetts, dummy.StorageProviders()) _, err := poolManager.Create(testPool, provider.LoopProviderType, map[string]interface{}{"it": "works"}) c.Assert(err, jc.ErrorIsNil) _, err = poolManager.Create(testPersistentPool, "environscoped", map[string]interface{}{"persistent": true}) c.Assert(err, jc.ErrorIsNil) }
func setupTestStorageSupport(c *gc.C, s *state.State) { stsetts := state.NewStateSettings(s) poolManager := poolmanager.New(stsetts) _, err := poolManager.Create(testPool, provider.LoopProviderType, map[string]interface{}{"it": "works"}) c.Assert(err, jc.ErrorIsNil) registry.RegisterEnvironStorageProviders("dummy", ec2.EBS_ProviderType) registry.RegisterEnvironStorageProviders("dummyenv", ec2.EBS_ProviderType) }
func (s *assignCleanSuite) SetUpTest(c *gc.C) { c.Logf("assignment policy for this test: %q", s.policy) s.ConnSuite.SetUpTest(c) wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) s.wordpress = wordpress pm := poolmanager.New(state.NewStateSettings(s.State), provider.CommonStorageProviders()) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) }
func (s *baseSuite) setupStoragePool(c *gc.C) { pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) err = s.State.UpdateEnvironConfig(map[string]interface{}{ "storage-default-block-source": "loop-pool", }, nil, nil) c.Assert(err, jc.ErrorIsNil) }
func newStorageProvisionerAPI(st *state.State, resources facade.Resources, authorizer facade.Authorizer) (*StorageProvisionerAPI, error) { env, err := stateenvirons.GetNewEnvironFunc(environs.New)(st) if err != nil { return nil, errors.Annotate(err, "getting environ") } registry := stateenvirons.NewStorageProviderRegistry(env) pm := poolmanager.New(state.NewStateSettings(st), registry) return NewStorageProvisionerAPI(stateShim{st}, resources, authorizer, registry, pm) }
func (s *poolSuite) SetUpTest(c *gc.C) { s.StateSuite.SetUpTest(c) s.settings = state.NewStateSettings(s.State) s.registry = storage.StaticProviderRegistry{ map[storage.ProviderType]storage.Provider{ "loop": &dummystorage.StorageProvider{}, }, } s.poolManager = poolmanager.New(s.settings, s.registry) }
func (s *MigrationImportSuite) TestStoragePools(c *gc.C) { pm := poolmanager.New(state.NewStateSettings(s.State), provider.CommonStorageProviders()) _, err := pm.Create("test-pool", provider.LoopProviderType, map[string]interface{}{ "value": 42, }) c.Assert(err, jc.ErrorIsNil) _, newSt := s.importModel(c) pm = poolmanager.New(state.NewStateSettings(newSt), provider.CommonStorageProviders()) pools, err := pm.List() c.Assert(err, jc.ErrorIsNil) c.Assert(pools, gc.HasLen, 1) pool := pools[0] c.Assert(pool.Name(), gc.Equals, "test-pool") c.Assert(pool.Provider(), gc.Equals, provider.LoopProviderType) c.Assert(pool.Attrs(), jc.DeepEquals, map[string]interface{}{ "value": 42, }) }
// FilesystemParams returns the parameters for creating the filesystems // with the specified tags. func (s *StorageProvisionerAPI) FilesystemParams(args params.Entities) (params.FilesystemParamsResults, error) { canAccess, err := s.getStorageEntityAuthFunc() if err != nil { return params.FilesystemParamsResults{}, err } envConfig, err := s.st.EnvironConfig() if err != nil { return params.FilesystemParamsResults{}, err } results := params.FilesystemParamsResults{ Results: make([]params.FilesystemParamsResult, len(args.Entities)), } poolManager := poolmanager.New(s.settings) one := func(arg params.Entity) (params.FilesystemParams, error) { tag, err := names.ParseFilesystemTag(arg.Tag) if err != nil || !canAccess(tag) { return params.FilesystemParams{}, common.ErrPerm } filesystem, err := s.st.Filesystem(tag) if errors.IsNotFound(err) { return params.FilesystemParams{}, common.ErrPerm } else if err != nil { return params.FilesystemParams{}, err } storageInstance, err := storagecommon.MaybeAssignedStorageInstance( filesystem.Storage, s.st.StorageInstance, ) if err != nil { return params.FilesystemParams{}, err } filesystemParams, err := storagecommon.FilesystemParams( filesystem, storageInstance, envConfig, poolManager, ) if err != nil { return params.FilesystemParams{}, err } return filesystemParams, nil } for i, arg := range args.Entities { var result params.FilesystemParamsResult filesystemParams, err := one(arg) if err != nil { result.Error = common.ServerError(err) } else { result.Result = filesystemParams } results.Results[i] = result } return results, nil }
func (s *StorageStateSuiteBase) SetUpTest(c *gc.C) { s.ConnSuite.SetUpTest(c) // Create a default pool for block devices. pm := poolmanager.New(state.NewStateSettings(s.State), dummy.StorageProviders()) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) // Create a pool that creates persistent block devices. _, err = pm.Create("persistent-block", "environscoped-block", map[string]interface{}{ "persistent": true, }) c.Assert(err, jc.ErrorIsNil) }
func (s *StorageStateSuite) TestNewModelDefaultPools(c *gc.C) { st := s.Factory.MakeModel(c, &factory.ModelParams{ StorageProviderRegistry: testingStorageProviders, }) s.AddCleanup(func(*gc.C) { st.Close() }) // When a model is created, it is populated with the default // pools of each storage provider supported by the model's // cloud provider. pm := poolmanager.New(state.NewStateSettings(st), testingStorageProviders) listed, err := pm.List() c.Assert(err, jc.ErrorIsNil) sort.Sort(byStorageConfigName(listed)) c.Assert(listed, jc.DeepEquals, []*storage.Config{blackPool, radiancePool}) }
func (i *importer) storagePools() error { registry, err := i.st.storageProviderRegistry() if err != nil { return errors.Annotate(err, "getting provider registry") } pm := poolmanager.New(NewStateSettings(i.st), registry) for _, pool := range i.model.StoragePools() { _, err := pm.Create(pool.Name(), storage.ProviderType(pool.Provider()), pool.Attributes()) if err != nil { return errors.Annotatef(err, "creating pool %q", pool.Name()) } } return nil }
// storageConfig returns the provider type and config attributes for the // specified poolName. If no such pool exists, we check to see if poolName is // actually a provider type, in which case config will be empty. func storageConfig(st *state.State, poolName string) (storage.ProviderType, map[string]interface{}, error) { pm := poolmanager.New(state.NewStateSettings(st)) p, err := pm.Get(poolName) // If not a storage pool, then maybe a provider type. if errors.IsNotFound(err) { providerType := storage.ProviderType(poolName) if _, err1 := registry.StorageProvider(providerType); err1 != nil { return "", nil, errors.Trace(err) } return providerType, nil, nil } if err != nil { return "", nil, errors.Trace(err) } return p.Provider(), p.Attrs(), nil }
func (s *ProvisionerSuite) TestProvisioningMachinesWithRequestedVolumes(c *gc.C) { // Set up a persistent pool. registry.RegisterProvider("static", &dummystorage.StorageProvider{IsDynamic: false}) registry.RegisterEnvironStorageProviders("dummy", "static") defer registry.RegisterProvider("static", nil) poolManager := poolmanager.New(state.NewStateSettings(s.State)) _, err := poolManager.Create("persistent-pool", "static", map[string]interface{}{"persistent": true}) c.Assert(err, jc.ErrorIsNil) p := s.newEnvironProvisioner(c) defer stop(c, p) // Add and provision a machine with volumes specified. requestedVolumes := []state.MachineVolumeParams{{ Volume: state.VolumeParams{Pool: "static", Size: 1024}, Attachment: state.VolumeAttachmentParams{}, }, { Volume: state.VolumeParams{Pool: "persistent-pool", Size: 2048}, Attachment: state.VolumeAttachmentParams{}, }} expectVolumeInfo := []storage.Volume{{ names.NewVolumeTag("1"), storage.VolumeInfo{ Size: 1024, }, }, { names.NewVolumeTag("2"), storage.VolumeInfo{ Size: 2048, Persistent: true, }, }} m, err := s.addMachineWithRequestedVolumes(requestedVolumes, s.defaultConstraints) c.Assert(err, jc.ErrorIsNil) inst := s.checkStartInstanceCustom( c, m, "pork", s.defaultConstraints, nil, nil, nil, expectVolumeInfo, false, nil, true, ) // Cleanup. c.Assert(m.EnsureDead(), gc.IsNil) s.checkStopInstances(c, inst) s.waitRemoved(c, m) }
func (s *defaultStoragePoolsSuite) TestDefaultStoragePools(c *gc.C) { p1, err := storage.NewConfig("pool1", storage.ProviderType("loop"), map[string]interface{}{"1": "2"}) p2, err := storage.NewConfig("pool2", storage.ProviderType("tmpfs"), map[string]interface{}{"3": "4"}) c.Assert(err, jc.ErrorIsNil) defaultPools := []*storage.Config{p1, p2} poolmanager.RegisterDefaultStoragePools(defaultPools) settings := state.NewStateSettings(s.State) err = poolmanager.AddDefaultStoragePools(settings) c.Assert(err, jc.ErrorIsNil) pm := poolmanager.New(settings) for _, pool := range defaultPools { p, err := pm.Get(pool.Name()) c.Assert(err, jc.ErrorIsNil) c.Assert(p.Provider(), gc.Equals, pool.Provider()) c.Assert(p.Attrs(), gc.DeepEquals, pool.Attrs()) } }
func (e *exporter) storagePools() error { registry, err := e.st.storageProviderRegistry() if err != nil { return errors.Annotate(err, "getting provider registry") } pm := poolmanager.New(storagePoolSettingsManager{e: e}, registry) poolConfigs, err := pm.List() if err != nil { return errors.Annotate(err, "listing pools") } for _, cfg := range poolConfigs { e.model.AddStoragePool(description.StoragePoolArgs{ Name: cfg.Name(), Provider: string(cfg.Provider()), Attributes: cfg.Attrs(), }) } return nil }
func assertPoolExists(c *gc.C, st *state.State, pname, provider, attr string) { stsetts := state.NewStateSettings(st) poolManager := poolmanager.New(stsetts) found, err := poolManager.List() c.Assert(err, jc.ErrorIsNil) c.Assert(len(found) > 0, jc.IsTrue) exists := false for _, one := range found { if one.Name() == pname { exists = true c.Assert(string(one.Provider()), gc.Equals, provider) // At this stage, only 1 attr is expected and checked expectedAttrs := strings.Split(attr, "=") value, ok := one.Attrs()[expectedAttrs[0]] c.Assert(ok, jc.IsTrue) c.Assert(value, gc.Equals, expectedAttrs[1]) } } c.Assert(exists, jc.IsTrue) }
func (s *provisionerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) s.factory = factory.NewFactory(s.State) s.resources = common.NewResources() // Create the resource registry separately to track invocations to // Register. s.resources = common.NewResources() s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) env, err := stateenvirons.GetNewEnvironFunc(environs.New)(s.State) c.Assert(err, jc.ErrorIsNil) registry := stateenvirons.NewStorageProviderRegistry(env) pm := poolmanager.New(state.NewStateSettings(s.State), registry) s.authorizer = &apiservertesting.FakeAuthorizer{ Tag: names.NewMachineTag("0"), EnvironManager: true, } backend := storageprovisioner.NewStateBackend(s.State) s.api, err = storageprovisioner.NewStorageProvisionerAPI(backend, s.resources, s.authorizer, registry, pm) c.Assert(err, jc.ErrorIsNil) }
func (s *defaultStoragePoolsSuite) TestDefaultStoragePools(c *gc.C) { p1, err := storage.NewConfig("pool1", storage.ProviderType("whatever"), map[string]interface{}{"1": "2"}) c.Assert(err, jc.ErrorIsNil) p2, err := storage.NewConfig("pool2", storage.ProviderType("whatever"), map[string]interface{}{"3": "4"}) c.Assert(err, jc.ErrorIsNil) provider := &dummystorage.StorageProvider{ DefaultPools_: []*storage.Config{p1, p2}, } settings := poolmanager.MemSettings{make(map[string]map[string]interface{})} pm := poolmanager.New(settings, storage.StaticProviderRegistry{ map[storage.ProviderType]storage.Provider{"whatever": provider}, }) err = poolmanager.AddDefaultStoragePools(provider, pm) c.Assert(err, jc.ErrorIsNil) c.Assert(settings.Settings, jc.DeepEquals, map[string]map[string]interface{}{ "pool#pool1": map[string]interface{}{"1": "2", "name": "pool1", "type": "whatever"}, "pool#pool2": map[string]interface{}{"3": "4", "name": "pool2", "type": "whatever"}, }) }
func (s *provisionerSuite) TestSetInstanceInfo(c *gc.C) { pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{"foo": "bar"}) c.Assert(err, jc.ErrorIsNil) // Create a fresh machine, since machine 0 is already provisioned. template := state.MachineTemplate{ Series: "quantal", Jobs: []state.MachineJob{state.JobHostUnits}, Volumes: []state.MachineVolumeParams{{ Volume: state.VolumeParams{ Pool: "loop-pool", Size: 123, }}, }, } notProvisionedMachine, err := s.State.AddOneMachine(template) c.Assert(err, jc.ErrorIsNil) apiMachine, err := s.provisioner.Machine(notProvisionedMachine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) instanceId, err := apiMachine.InstanceId() c.Assert(err, jc.Satisfies, params.IsCodeNotProvisioned) c.Assert(err, gc.ErrorMatches, "machine 1 not provisioned") c.Assert(instanceId, gc.Equals, instance.Id("")) hwChars := instance.MustParseHardware("cpu-cores=123", "mem=4G") _, err = s.State.Network("net1") c.Assert(err, jc.Satisfies, errors.IsNotFound) _, err = s.State.Network("vlan42") c.Assert(err, jc.Satisfies, errors.IsNotFound) ifacesMachine, err := notProvisionedMachine.NetworkInterfaces() c.Assert(err, jc.ErrorIsNil) c.Assert(ifacesMachine, gc.HasLen, 0) volumes := []params.Volume{{ VolumeTag: "volume-1-0", Info: params.VolumeInfo{ VolumeId: "vol-123", Size: 124, }, }} volumeAttachments := map[string]params.VolumeAttachmentInfo{ "volume-1-0": { DeviceName: "xvdf1", }, } err = apiMachine.SetInstanceInfo( "i-will", "fake_nonce", &hwChars, nil, volumes, volumeAttachments, ) c.Assert(err, jc.ErrorIsNil) instanceId, err = apiMachine.InstanceId() c.Assert(err, jc.ErrorIsNil) c.Assert(instanceId, gc.Equals, instance.Id("i-will")) // Try it again - should fail. err = apiMachine.SetInstanceInfo("i-wont", "fake", nil, nil, nil, nil) c.Assert(err, gc.ErrorMatches, `cannot record provisioning info for "i-wont": cannot set instance data for machine "1": already set`) // Now try to get machine 0's instance id. apiMachine, err = s.provisioner.Machine(s.machine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) instanceId, err = apiMachine.InstanceId() c.Assert(err, jc.ErrorIsNil) c.Assert(instanceId, gc.Equals, instance.Id("i-manager")) // Now check volumes and volume attachments. volume, err := s.State.Volume(names.NewVolumeTag("1/0")) c.Assert(err, jc.ErrorIsNil) volumeInfo, err := volume.Info() c.Assert(err, jc.ErrorIsNil) c.Assert(volumeInfo, gc.Equals, state.VolumeInfo{ VolumeId: "vol-123", Pool: "loop-pool", Size: 124, }) stateVolumeAttachments, err := s.State.MachineVolumeAttachments(names.NewMachineTag("1")) c.Assert(err, jc.ErrorIsNil) c.Assert(stateVolumeAttachments, gc.HasLen, 1) volumeAttachmentInfo, err := stateVolumeAttachments[0].Info() c.Assert(err, jc.ErrorIsNil) c.Assert(volumeAttachmentInfo, gc.Equals, state.VolumeAttachmentInfo{ DeviceName: "xvdf1", }) }
func poolManager(st *state.State) poolmanager.PoolManager { return poolmanager.New(state.NewStateSettings(st)) }
func (s *provisionerSuite) TestSetInstanceInfo(c *gc.C) { pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{"foo": "bar"}) c.Assert(err, jc.ErrorIsNil) // Create a fresh machine, since machine 0 is already provisioned. template := state.MachineTemplate{ Series: "quantal", Jobs: []state.MachineJob{state.JobHostUnits}, Volumes: []state.MachineVolumeParams{{ Volume: state.VolumeParams{ Pool: "loop-pool", Size: 123, }}, }, } notProvisionedMachine, err := s.State.AddOneMachine(template) c.Assert(err, jc.ErrorIsNil) apiMachine, err := s.provisioner.Machine(notProvisionedMachine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) instanceId, err := apiMachine.InstanceId() c.Assert(err, jc.Satisfies, params.IsCodeNotProvisioned) c.Assert(err, gc.ErrorMatches, "machine 1 not provisioned") c.Assert(instanceId, gc.Equals, instance.Id("")) hwChars := instance.MustParseHardware("cpu-cores=123", "mem=4G") _, err = s.State.Network("net1") c.Assert(err, jc.Satisfies, errors.IsNotFound) _, err = s.State.Network("vlan42") c.Assert(err, jc.Satisfies, errors.IsNotFound) ifacesMachine, err := notProvisionedMachine.NetworkInterfaces() c.Assert(err, jc.ErrorIsNil) c.Assert(ifacesMachine, gc.HasLen, 0) networks := []params.Network{{ Tag: "network-net1", ProviderId: "net1", CIDR: "0.1.2.0/24", VLANTag: 0, }, { Tag: "network-vlan42", ProviderId: "vlan42", CIDR: "0.2.2.0/24", VLANTag: 42, }, { Tag: "network-vlan69", ProviderId: "vlan69", CIDR: "0.3.2.0/24", VLANTag: 69, }, { Tag: "network-vlan42", // duplicated; ignored ProviderId: "vlan42", CIDR: "0.2.2.0/24", VLANTag: 42, }} ifaces := []params.NetworkInterface{{ MACAddress: "aa:bb:cc:dd:ee:f0", NetworkTag: "network-net1", InterfaceName: "eth0", IsVirtual: false, }, { MACAddress: "aa:bb:cc:dd:ee:f1", NetworkTag: "network-net1", InterfaceName: "eth1", IsVirtual: false, }, { MACAddress: "aa:bb:cc:dd:ee:f1", NetworkTag: "network-vlan42", InterfaceName: "eth1.42", IsVirtual: true, }, { MACAddress: "aa:bb:cc:dd:ee:f1", NetworkTag: "network-vlan69", InterfaceName: "eth1.69", IsVirtual: true, }, { MACAddress: "aa:bb:cc:dd:ee:f1", // duplicated mac+net; ignored NetworkTag: "network-vlan42", InterfaceName: "eth2", IsVirtual: true, }, { MACAddress: "aa:bb:cc:dd:ee:f4", NetworkTag: "network-net1", InterfaceName: "eth1", // duplicated name+machine id; ignored IsVirtual: false, }} volumes := []params.Volume{{ VolumeTag: "volume-1-0", Info: params.VolumeInfo{ VolumeId: "vol-123", Size: 124, }, }} volumeAttachments := map[string]params.VolumeAttachmentInfo{ "volume-1-0": { DeviceName: "xvdf1", }, } err = apiMachine.SetInstanceInfo( "i-will", "fake_nonce", &hwChars, networks, ifaces, volumes, volumeAttachments, ) c.Assert(err, jc.ErrorIsNil) instanceId, err = apiMachine.InstanceId() c.Assert(err, jc.ErrorIsNil) c.Assert(instanceId, gc.Equals, instance.Id("i-will")) // Try it again - should fail. err = apiMachine.SetInstanceInfo("i-wont", "fake", nil, nil, nil, nil, nil) c.Assert(err, gc.ErrorMatches, `cannot record provisioning info for "i-wont": cannot set instance data for machine "1": already set`) // Now try to get machine 0's instance id. apiMachine, err = s.provisioner.Machine(s.machine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) instanceId, err = apiMachine.InstanceId() c.Assert(err, jc.ErrorIsNil) c.Assert(instanceId, gc.Equals, instance.Id("i-manager")) // Check the networks are created. for i := range networks { if i == 3 { // Last one was ignored, so skip it. break } tag, err := names.ParseNetworkTag(networks[i].Tag) c.Assert(err, jc.ErrorIsNil) networkName := tag.Id() nw, err := s.State.Network(networkName) c.Assert(err, jc.ErrorIsNil) c.Check(nw.Name(), gc.Equals, networkName) c.Check(nw.ProviderId(), gc.Equals, network.Id(networks[i].ProviderId)) c.Check(nw.Tag().String(), gc.Equals, networks[i].Tag) c.Check(nw.VLANTag(), gc.Equals, networks[i].VLANTag) c.Check(nw.CIDR(), gc.Equals, networks[i].CIDR) } // And the network interfaces as well. ifacesMachine, err = notProvisionedMachine.NetworkInterfaces() c.Assert(err, jc.ErrorIsNil) c.Assert(ifacesMachine, gc.HasLen, 4) actual := make([]params.NetworkInterface, len(ifacesMachine)) for i, iface := range ifacesMachine { actual[i].InterfaceName = iface.InterfaceName() actual[i].NetworkTag = iface.NetworkTag().String() actual[i].MACAddress = iface.MACAddress() actual[i].IsVirtual = iface.IsVirtual() c.Check(iface.MachineTag(), gc.Equals, notProvisionedMachine.Tag()) c.Check(iface.MachineId(), gc.Equals, notProvisionedMachine.Id()) } c.Assert(actual, jc.SameContents, ifaces[:4]) // skip the rest as they are ignored. // Now check volumes and volume attachments. volume, err := s.State.Volume(names.NewVolumeTag("1/0")) c.Assert(err, jc.ErrorIsNil) volumeInfo, err := volume.Info() c.Assert(err, jc.ErrorIsNil) c.Assert(volumeInfo, gc.Equals, state.VolumeInfo{ VolumeId: "vol-123", Pool: "loop-pool", Size: 124, }) stateVolumeAttachments, err := s.State.MachineVolumeAttachments(names.NewMachineTag("1")) c.Assert(err, jc.ErrorIsNil) c.Assert(stateVolumeAttachments, gc.HasLen, 1) volumeAttachmentInfo, err := stateVolumeAttachments[0].Info() c.Assert(err, jc.ErrorIsNil) c.Assert(volumeAttachmentInfo, gc.Equals, state.VolumeAttachmentInfo{ DeviceName: "xvdf1", }) }