func (s *VolumeStateSuite) TestWatchEnvironVolumeAttachments(c *gc.C) { service := s.setupMixedScopeStorageService(c, "block") addUnit := func() { u, err := service.AddUnit() c.Assert(err, jc.ErrorIsNil) err = s.State.AssignUnit(u, state.AssignCleanEmpty) c.Assert(err, jc.ErrorIsNil) } addUnit() w := s.State.WatchEnvironVolumeAttachments() defer testing.AssertStop(c, w) wc := testing.NewStringsWatcherC(c, s.State, w) wc.AssertChangeInSingleEvent("0:0") // initial wc.AssertNoChange() addUnit() wc.AssertChangeInSingleEvent("1:3") wc.AssertNoChange() err := s.State.DetachVolume(names.NewMachineTag("0"), names.NewVolumeTag("0")) c.Assert(err, jc.ErrorIsNil) wc.AssertChangeInSingleEvent("0:0") // dying wc.AssertNoChange() err = s.State.RemoveVolumeAttachment(names.NewMachineTag("0"), names.NewVolumeTag("0")) c.Assert(err, jc.ErrorIsNil) wc.AssertChangeInSingleEvent("0:0") // removed wc.AssertNoChange() }
func (s *provisionerSuite) TestRemoveVolumesMachineAgent(c *gc.C) { s.setupVolumes(c) s.authorizer.EnvironManager = false args := params.Entities{Entities: []params.Entity{ {"volume-0-0"}, {"volume-0-42"}, {"volume-42"}, {"volume-invalid"}, {"machine-0"}, }} err := s.State.DetachVolume(names.NewMachineTag("0"), names.NewVolumeTag("0/0")) c.Assert(err, jc.ErrorIsNil) err = s.State.RemoveVolumeAttachment(names.NewMachineTag("0"), names.NewVolumeTag("0/0")) c.Assert(err, jc.ErrorIsNil) err = s.State.DestroyVolume(names.NewVolumeTag("0/0")) c.Assert(err, jc.ErrorIsNil) result, err := s.api.Remove(args) c.Assert(err, jc.ErrorIsNil) c.Assert(result, gc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{ {Error: nil}, {Error: nil}, {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, {Error: ¶ms.Error{Message: `"volume-invalid" is not a valid volume tag`}}, {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) }
func (s *VolumeStateSuite) TestWatchMachineVolumeAttachments(c *gc.C) { service := s.setupMixedScopeStorageService(c, "block") addUnit := func() { u, err := service.AddUnit() c.Assert(err, jc.ErrorIsNil) err = s.State.AssignUnit(u, state.AssignCleanEmpty) c.Assert(err, jc.ErrorIsNil) } addUnit() w := s.State.WatchMachineVolumeAttachments(names.NewMachineTag("0")) defer testing.AssertStop(c, w) wc := testing.NewStringsWatcherC(c, s.State, w) wc.AssertChangeInSingleEvent("0:0", "0:0/1", "0:0/2") // initial wc.AssertNoChange() addUnit() // no change, since we're only interested in the one machine. wc.AssertNoChange() err := s.State.DetachVolume(names.NewMachineTag("0"), names.NewVolumeTag("0")) c.Assert(err, jc.ErrorIsNil) wc.AssertChangeInSingleEvent("0:0") // dying wc.AssertNoChange() err = s.State.RemoveVolumeAttachment(names.NewMachineTag("0"), names.NewVolumeTag("0")) c.Assert(err, jc.ErrorIsNil) wc.AssertChangeInSingleEvent("0:0") // removed wc.AssertNoChange() // TODO(axw) respond to changes to the same machine when we support // dynamic storage and/or placement. }
func (s *volumeSourceSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.provider = gce.GCEStorageProvider() var err error s.source, err = s.provider.VolumeSource(s.BaseSuite.Config, &storage.Config{}) c.Check(err, jc.ErrorIsNil) inst := gce.NewInstance(s.BaseInstance, s.Env) vTag := names.NewVolumeTag("0") mTag := names.NewMachineTag("0") s.instId = inst.Id() s.attachmentParams = &storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ Provider: "gce", Machine: mTag, InstanceId: s.instId, }, VolumeId: s.BaseDisk.Name, Volume: names.NewVolumeTag("0"), } s.params = []storage.VolumeParams{{ Tag: vTag, Size: 1024, Provider: "gce", Attachment: s.attachmentParams, }} }
func (s *loopSuite) TestCreateVolumes(c *gc.C) { source, _ := s.loopVolumeSource(c) s.commands.expect("fallocate", "-l", "2MiB", filepath.Join(s.storageDir, "volume-0")) volumes, volumeAttachments, err := source.CreateVolumes([]storage.VolumeParams{{ Tag: names.NewVolumeTag("0"), Size: 2, Attachment: &storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ Machine: names.NewMachineTag("1"), InstanceId: "instance-id", }, }, }}) c.Assert(err, jc.ErrorIsNil) c.Assert(volumes, gc.HasLen, 1) // volume attachments always deferred to AttachVolumes c.Assert(volumeAttachments, gc.HasLen, 0) c.Assert(volumes[0], gc.Equals, storage.Volume{ names.NewVolumeTag("0"), storage.VolumeInfo{ VolumeId: "volume-0", Size: 2, }, }) }
func (s *ebsVolumeSuite) assertCreateVolumes(c *gc.C, vs storage.VolumeSource, instanceId string) { vols, err := s.createVolumes(vs, instanceId) c.Assert(err, jc.ErrorIsNil) c.Assert(vols, gc.HasLen, 3) c.Assert(vols, jc.SameContents, []storage.Volume{{ names.NewVolumeTag("0"), storage.VolumeInfo{ Size: 10240, VolumeId: "vol-0", Persistent: true, }, }, { names.NewVolumeTag("1"), storage.VolumeInfo{ Size: 20480, VolumeId: "vol-1", Persistent: true, }, }, { names.NewVolumeTag("2"), storage.VolumeInfo{ Size: 30720, VolumeId: "vol-2", Persistent: false, }, }}) ec2Client := ec2.StorageEC2(vs) ec2Vols, err := ec2Client.Volumes(nil, nil) c.Assert(err, jc.ErrorIsNil) c.Assert(ec2Vols.Volumes, gc.HasLen, 3) sortBySize(ec2Vols.Volumes) c.Assert(ec2Vols.Volumes[0].Size, gc.Equals, 10) c.Assert(ec2Vols.Volumes[1].Size, gc.Equals, 20) c.Assert(ec2Vols.Volumes[2].Size, gc.Equals, 30) }
func (s *storageProvisionerSuite) TestDestroyVolumes(c *gc.C) { provisionedVolume := names.NewVolumeTag("1") unprovisionedVolume := names.NewVolumeTag("2") volumeAccessor := newMockVolumeAccessor() volumeAccessor.provisionVolume(provisionedVolume) life := func(tags []names.Tag) ([]params.LifeResult, error) { results := make([]params.LifeResult, len(tags)) for i := range results { results[i].Life = params.Dead } return results, nil } destroyedChan := make(chan interface{}, 1) s.provider.destroyVolumesFunc = func(volumeIds []string) []error { destroyedChan <- volumeIds return make([]error, len(volumeIds)) } removedChan := make(chan interface{}, 1) remove := func(tags []names.Tag) ([]params.ErrorResult, error) { removedChan <- tags return make([]params.ErrorResult, len(tags)), nil } args := &workerArgs{ volumes: volumeAccessor, life: &mockLifecycleManager{ life: life, remove: remove, }, } worker := newStorageProvisioner(c, args) defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() volumeAccessor.volumesWatcher.changes <- []string{ provisionedVolume.Id(), unprovisionedVolume.Id(), } args.environ.watcher.changes <- struct{}{} // Both volumes should be removed; the provisioned one // should be deprovisioned first. destroyed := waitChannel(c, destroyedChan, "waiting for volume to be deprovisioned") assertNoEvent(c, destroyedChan, "volumes deprovisioned") c.Assert(destroyed, jc.DeepEquals, []string{"vol-1"}) var removed []names.Tag for len(removed) < 2 { tags := waitChannel(c, removedChan, "waiting for volumes to be removed").([]names.Tag) removed = append(removed, tags...) } c.Assert(removed, jc.SameContents, []names.Tag{provisionedVolume, unprovisionedVolume}) assertNoEvent(c, removedChan, "volumes removed") }
func (s *volumeSuite) TestParseVolumeTag(c *gc.C) { assertParseVolumeTag(c, "volume-0", names.NewVolumeTag("0")) assertParseVolumeTag(c, "volume-88", names.NewVolumeTag("88")) assertParseVolumeTag(c, "volume-0-lxc-0-88", names.NewVolumeTag("0/lxc/0/88")) assertParseVolumeTagInvalid(c, "", names.InvalidTagError("", "")) assertParseVolumeTagInvalid(c, "one", names.InvalidTagError("one", "")) assertParseVolumeTagInvalid(c, "volume-", names.InvalidTagError("volume-", names.VolumeTagKind)) assertParseVolumeTagInvalid(c, "machine-0", names.InvalidTagError("machine-0", names.VolumeTagKind)) }
func (s *ebsVolumeSuite) createVolumes(vs storage.VolumeSource, instanceId string) ([]storage.Volume, error) { if instanceId == "" { instanceId = s.srv.ec2srv.NewInstances(1, "m1.medium", imageId, ec2test.Running, nil)[0] } volume0 := names.NewVolumeTag("0") volume1 := names.NewVolumeTag("1") volume2 := names.NewVolumeTag("2") params := []storage.VolumeParams{{ Tag: volume0, Size: 10 * 1000, Provider: ec2.EBS_ProviderType, Attributes: map[string]interface{}{ "persistent": true, "volume-type": "io1", "iops": 100, }, Attachment: &storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ InstanceId: instance.Id(instanceId), }, }, ResourceTags: map[string]string{ tags.JujuEnv: s.TestConfig["uuid"].(string), }, }, { Tag: volume1, Size: 20 * 1000, Provider: ec2.EBS_ProviderType, Attributes: map[string]interface{}{ "persistent": true, }, Attachment: &storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ InstanceId: instance.Id(instanceId), }, }, ResourceTags: map[string]string{ tags.JujuEnv: "something-else", }, }, { Tag: volume2, Size: 30 * 1000, Provider: ec2.EBS_ProviderType, ResourceTags: map[string]string{ "abc": "123", }, Attachment: &storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ InstanceId: instance.Id(instanceId), }, }, }} vols, _, err := vs.CreateVolumes(params) return vols, err }
func (s *volumeSuite) TestInstanceVolumesOldMass(c *gc.C) { obj := s.testMAASObject.TestServer.NewNode(`{"system_id": "node0"}`) instance := maasInstance{&obj} volumes, attachments, err := instance.volumes(names.NewMachineTag("1"), []names.VolumeTag{ names.NewVolumeTag("1"), names.NewVolumeTag("2"), }) c.Assert(err, jc.ErrorIsNil) c.Assert(volumes, gc.HasLen, 0) c.Assert(attachments, gc.HasLen, 0) }
func (s *VolumeStateSuite) TestParseVolumeAttachmentId(c *gc.C) { assertValid := func(id string, m names.MachineTag, v names.VolumeTag) { machineTag, volumeTag, err := state.ParseVolumeAttachmentId(id) c.Assert(err, jc.ErrorIsNil) c.Assert(machineTag, gc.Equals, m) c.Assert(volumeTag, gc.Equals, v) } assertValid("0:0", names.NewMachineTag("0"), names.NewVolumeTag("0")) assertValid("0:0/1", names.NewMachineTag("0"), names.NewVolumeTag("0/1")) assertValid("0/lxc/0:1", names.NewMachineTag("0/lxc/0"), names.NewVolumeTag("1")) }
func (s *cinderVolumeSourceSuite) TestDetachVolumes(c *gc.C) { const mockServerId2 = mockServerId + "2" var numListCalls, numDetachCalls int mockAdapter := &mockAdapter{ listVolumeAttachments: func(serverId string) ([]nova.VolumeAttachment, error) { numListCalls++ if serverId == mockServerId2 { // no attachments return nil, nil } c.Check(serverId, gc.Equals, mockServerId) return []nova.VolumeAttachment{{ Id: mockVolId, VolumeId: mockVolId, ServerId: mockServerId, Device: "/dev/sda", }}, nil }, detachVolume: func(serverId, volId string) error { numDetachCalls++ c.Check(serverId, gc.Equals, mockServerId) c.Check(volId, gc.Equals, mockVolId) return nil }, } volSource := openstack.NewCinderVolumeSource(mockAdapter) errs, err := volSource.DetachVolumes([]storage.VolumeAttachmentParams{{ Volume: names.NewVolumeTag("123"), VolumeId: mockVolId, AttachmentParams: storage.AttachmentParams{ Machine: names.NewMachineTag("0"), InstanceId: mockServerId, }, }, { Volume: names.NewVolumeTag("42"), VolumeId: "42", AttachmentParams: storage.AttachmentParams{ Machine: names.NewMachineTag("0"), InstanceId: mockServerId2, }, }}) c.Assert(err, jc.ErrorIsNil) c.Assert(errs, jc.DeepEquals, []error{nil, nil}) // DetachVolume should only be called for existing attachments. mockAdapter.CheckCalls(c, []gitjujutesting.StubCall{{ "ListVolumeAttachments", []interface{}{mockServerId}, }, { "DetachVolume", []interface{}{mockServerId, mockVolId}, }, { "ListVolumeAttachments", []interface{}{mockServerId2}, }}) }
func (s *managedfsSuite) testAttachFilesystems(c *gc.C, readOnly, reattach bool) { const testMountPoint = "/in/the/place" source := s.initSource(c) cmd := s.commands.expect("df", "--output=source", filepath.Dir(testMountPoint)) cmd.respond("headers\n/same/as/rootfs", nil) cmd = s.commands.expect("df", "--output=source", testMountPoint) if reattach { cmd.respond("headers\n/different/to/rootfs", nil) } else { cmd.respond("headers\n/same/as/rootfs", nil) var args []string if readOnly { args = append(args, "-o", "ro") } args = append(args, "/dev/sda1", testMountPoint) s.commands.expect("mount", args...) } s.blockDevices[names.NewVolumeTag("0")] = storage.BlockDevice{ DeviceName: "sda", HardwareId: "capncrunch", Size: 2, } s.filesystems[names.NewFilesystemTag("0/0")] = storage.Filesystem{ Tag: names.NewFilesystemTag("0/0"), Volume: names.NewVolumeTag("0"), } results, err := source.AttachFilesystems([]storage.FilesystemAttachmentParams{{ Filesystem: names.NewFilesystemTag("0/0"), FilesystemId: "filesystem-0-0", AttachmentParams: storage.AttachmentParams{ Machine: names.NewMachineTag("0"), InstanceId: "inst-ance", ReadOnly: readOnly, }, Path: testMountPoint, }}) c.Assert(err, jc.ErrorIsNil) c.Assert(results, jc.DeepEquals, []storage.AttachFilesystemsResult{{ FilesystemAttachment: &storage.FilesystemAttachment{ names.NewFilesystemTag("0/0"), names.NewMachineTag("0"), storage.FilesystemAttachmentInfo{ Path: testMountPoint, ReadOnly: readOnly, }, }, }}) }
func (s *VolumeStateSuite) TestWatchMachineVolumeAttachments(c *gc.C) { service := s.setupMixedScopeStorageService(c, "block") addUnit := func(to *state.Machine) (u *state.Unit, m *state.Machine) { var err error u, err = service.AddUnit() c.Assert(err, jc.ErrorIsNil) if to != nil { err = u.AssignToMachine(to) c.Assert(err, jc.ErrorIsNil) return u, to } err = s.State.AssignUnit(u, state.AssignCleanEmpty) c.Assert(err, jc.ErrorIsNil) mid, err := u.AssignedMachineId() c.Assert(err, jc.ErrorIsNil) m, err = s.State.Machine(mid) c.Assert(err, jc.ErrorIsNil) return u, m } _, m0 := addUnit(nil) w := s.State.WatchMachineVolumeAttachments(names.NewMachineTag("0")) defer testing.AssertStop(c, w) wc := testing.NewStringsWatcherC(c, s.State, w) wc.AssertChangeInSingleEvent("0:0/1", "0:0/2") // initial wc.AssertNoChange() addUnit(nil) // no change, since we're only interested in the one machine. wc.AssertNoChange() err := s.State.DetachVolume(names.NewMachineTag("0"), names.NewVolumeTag("0")) c.Assert(err, jc.ErrorIsNil) // no change, since we're only interested in attachments of // machine-scoped volumes. wc.AssertNoChange() err = s.State.DetachVolume(names.NewMachineTag("0"), names.NewVolumeTag("0/1")) c.Assert(err, jc.ErrorIsNil) wc.AssertChangeInSingleEvent("0:0/1") // dying wc.AssertNoChange() err = s.State.RemoveVolumeAttachment(names.NewMachineTag("0"), names.NewVolumeTag("0/1")) c.Assert(err, jc.ErrorIsNil) wc.AssertChangeInSingleEvent("0:0/1") // removed wc.AssertNoChange() addUnit(m0) wc.AssertChangeInSingleEvent("0:0/7", "0:0/8") // added }
func (s *provisionerSuite) TestEnsureDead(c *gc.C) { s.setupVolumes(c) args := params.Entities{Entities: []params.Entity{{"volume-0-0"}, {"volume-1"}, {"volume-42"}}} result, err := s.api.EnsureDead(args) c.Assert(err, jc.ErrorIsNil) // TODO(wallyworld) - this test will be updated when EnsureDead is supported c.Assert(result, gc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{ {Error: common.ServerError(common.NotSupportedError(names.NewVolumeTag("0/0"), "ensuring death"))}, {Error: common.ServerError(common.NotSupportedError(names.NewVolumeTag("1"), "ensuring death"))}, {Error: common.ServerError(errors.NotFoundf(`volume "42"`))}, }, }) }
func (s *ebsVolumeSuite) TestVolumeTags(c *gc.C) { vs := s.volumeSource(c, nil) results, err := s.createVolumes(vs, "") c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.HasLen, 3) c.Assert(results[0].Error, jc.ErrorIsNil) c.Assert(results[0].Volume, jc.DeepEquals, &storage.Volume{ names.NewVolumeTag("0"), storage.VolumeInfo{ Size: 10240, VolumeId: "vol-0", Persistent: true, }, }) c.Assert(results[1].Error, jc.ErrorIsNil) c.Assert(results[1].Volume, jc.DeepEquals, &storage.Volume{ names.NewVolumeTag("1"), storage.VolumeInfo{ Size: 20480, VolumeId: "vol-1", Persistent: true, }, }) c.Assert(results[2].Error, jc.ErrorIsNil) c.Assert(results[2].Volume, jc.DeepEquals, &storage.Volume{ names.NewVolumeTag("2"), storage.VolumeInfo{ Size: 30720, VolumeId: "vol-2", Persistent: true, }, }) ec2Client := ec2.StorageEC2(vs) ec2Vols, err := ec2Client.Volumes(nil, nil) c.Assert(err, jc.ErrorIsNil) c.Assert(ec2Vols.Volumes, gc.HasLen, 3) sortBySize(ec2Vols.Volumes) c.Assert(ec2Vols.Volumes[0].Tags, jc.SameContents, []awsec2.Tag{ {"juju-env-uuid", "deadbeef-0bad-400d-8000-4b1d0d06f00d"}, {"Name", "juju-sample-volume-0"}, }) c.Assert(ec2Vols.Volumes[1].Tags, jc.SameContents, []awsec2.Tag{ {"juju-env-uuid", "something-else"}, {"Name", "juju-sample-volume-1"}, }) c.Assert(ec2Vols.Volumes[2].Tags, jc.SameContents, []awsec2.Tag{ {"Name", "juju-sample-volume-2"}, {"abc", "123"}, }) }
func (s *azureVolumeSuite) TestAttachVolumesNotAttached(c *gc.C) { vs := s.volumeSource(c, nil) machine := names.NewMachineTag("0") volume := names.NewVolumeTag("0") env := makeEnviron(c) prefix := env.getEnvPrefix() service := makeDeployment(env, prefix+"service") roleName := service.Deployments[0].RoleList[0].RoleName inst, err := env.getInstance(service, roleName) c.Assert(err, jc.ErrorIsNil) getRoleResponse, err := xml.Marshal(&gwacl.PersistentVMRole{}) c.Assert(err, jc.ErrorIsNil) gwacl.PatchManagementAPIResponses([]gwacl.DispatcherResponse{ gwacl.NewDispatcherResponse(getRoleResponse, http.StatusOK, nil), }) results, err := vs.AttachVolumes([]storage.VolumeAttachmentParams{{ Volume: volume, VolumeId: "volume-0.vhd", AttachmentParams: storage.AttachmentParams{ Machine: machine, InstanceId: inst.Id(), }, }}) c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.HasLen, 1) c.Assert(results[0].Error, gc.ErrorMatches, "attaching volumes not supported") }
func (s *azureVolumeSuite) TestCreateVolumesLegacyInstance(c *gc.C) { vs := s.volumeSource(c, nil) machine := names.NewMachineTag("123") volume := names.NewVolumeTag("0") env := makeEnviron(c) prefix := env.getEnvPrefix() serviceName := "service" service := makeLegacyDeployment(env, prefix+serviceName) inst, err := env.getInstance(service, "") c.Assert(err, jc.ErrorIsNil) params := []storage.VolumeParams{{ Tag: volume, Size: 10 * 1000, Provider: storageProviderType, Attachment: &storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ Machine: machine, InstanceId: inst.Id(), }, }, }} results, err := vs.CreateVolumes(params) c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.HasLen, 1) c.Assert(results[0].Error, gc.ErrorMatches, "attaching disks to legacy instances not supported") }
func (s *provisionerSuite) TestRemoveVolumeAttachments(c *gc.C) { s.setupVolumes(c) s.authorizer.EnvironManager = false err := s.State.DetachVolume(names.NewMachineTag("0"), names.NewVolumeTag("1")) c.Assert(err, jc.ErrorIsNil) results, err := s.api.RemoveAttachment(params.MachineStorageIds{ Ids: []params.MachineStorageId{{ MachineTag: "machine-0", AttachmentTag: "volume-0-0", }, { MachineTag: "machine-0", AttachmentTag: "volume-1", }, { MachineTag: "machine-2", AttachmentTag: "volume-4", }, { MachineTag: "machine-0", AttachmentTag: "volume-42", }}, }) c.Assert(err, jc.ErrorIsNil) c.Assert(results, jc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{ {Error: ¶ms.Error{Message: "removing attachment of volume 0/0 from machine 0: volume attachment is not dying"}}, {Error: nil}, {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, {Error: ¶ms.Error{Message: `removing attachment of volume 42 from machine 0: volume "42" on machine "0" not found`, Code: "not found"}}, }, }) }
func (v *mockFilesystemAccessor) FilesystemParams(filesystems []names.FilesystemTag) ([]params.FilesystemParamsResult, error) { var result []params.FilesystemParamsResult for _, tag := range filesystems { if _, ok := v.provisionedFilesystems[tag.String()]; ok { result = append(result, params.FilesystemParamsResult{ Error: ¶ms.Error{Message: "already provisioned"}, }) } else { filesystemParams := params.FilesystemParams{ FilesystemTag: tag.String(), Size: 1024, Provider: "dummy", Tags: map[string]string{ "very": "fancy", }, } if _, ok := names.FilesystemMachine(tag); ok { // place all volume-backed filesystems on machine-scoped // volumes with the same ID as the filesystem. filesystemParams.VolumeTag = names.NewVolumeTag(tag.Id()).String() } result = append(result, params.FilesystemParamsResult{Result: filesystemParams}) } } return result, nil }
func (*volumesSuite) TestVolumeParamsStorageTags(c *gc.C) { volumeTag := names.NewVolumeTag("100") storageTag := names.NewStorageTag("mystore/0") unitTag := names.NewUnitTag("mysql/123") p, err := storagecommon.VolumeParams( &fakeVolume{tag: volumeTag, params: &state.VolumeParams{ Pool: "loop", Size: 1024, }}, &fakeStorageInstance{tag: storageTag, owner: unitTag}, testing.CustomModelConfig(c, nil), &fakePoolManager{}, ) c.Assert(err, jc.ErrorIsNil) c.Assert(p, jc.DeepEquals, params.VolumeParams{ VolumeTag: "volume-100", Provider: "loop", Size: 1024, Tags: map[string]string{ tags.JujuController: testing.ModelTag.Id(), tags.JujuModel: testing.ModelTag.Id(), tags.JujuStorageInstance: "mystore/0", tags.JujuStorageOwner: "mysql/123", }, }) }
// assertMachineStorageRefs ensures that the specified machine's set of volume // and filesystem references corresponds exactly to the volume and filesystem // attachments that relate to the machine. func assertMachineStorageRefs(c *gc.C, st *state.State, m names.MachineTag) { machines, closer := state.GetRawCollection(st, state.MachinesC) defer closer() var doc struct { Volumes []string `bson:"volumes,omitempty"` Filesystems []string `bson:"filesystems,omitempty"` } err := machines.FindId(state.DocID(st, m.Id())).One(&doc) c.Assert(err, jc.ErrorIsNil) have := make(set.Tags) for _, v := range doc.Volumes { have.Add(names.NewVolumeTag(v)) } for _, f := range doc.Filesystems { have.Add(names.NewFilesystemTag(f)) } expect := make(set.Tags) volumeAttachments, err := st.MachineVolumeAttachments(m) c.Assert(err, jc.ErrorIsNil) for _, a := range volumeAttachments { expect.Add(a.Volume()) } filesystemAttachments, err := st.MachineFilesystemAttachments(m) c.Assert(err, jc.ErrorIsNil) for _, a := range filesystemAttachments { expect.Add(a.Filesystem()) } c.Assert(have, jc.DeepEquals, expect) }
func (s *watchStorageAttachmentSuite) SetUpTest(c *gc.C) { s.storageTag = names.NewStorageTag("osd-devices/0") s.machineTag = names.NewMachineTag("0") s.unitTag = names.NewUnitTag("ceph/0") s.storageInstance = &fakeStorageInstance{ tag: s.storageTag, owner: s.machineTag, kind: state.StorageKindBlock, } s.volume = &fakeVolume{tag: names.NewVolumeTag("0")} s.volumeAttachmentWatcher = apiservertesting.NewFakeNotifyWatcher() s.volumeAttachmentWatcher.C <- struct{}{} s.blockDevicesWatcher = apiservertesting.NewFakeNotifyWatcher() s.blockDevicesWatcher.C <- struct{}{} s.storageAttachmentWatcher = apiservertesting.NewFakeNotifyWatcher() s.storageAttachmentWatcher.C <- struct{}{} s.st = &fakeStorage{ storageInstance: func(tag names.StorageTag) (state.StorageInstance, error) { return s.storageInstance, nil }, storageInstanceVolume: func(tag names.StorageTag) (state.Volume, error) { return s.volume, nil }, watchVolumeAttachment: func(names.MachineTag, names.VolumeTag) state.NotifyWatcher { return s.volumeAttachmentWatcher }, watchBlockDevices: func(names.MachineTag) state.NotifyWatcher { return s.blockDevicesWatcher }, watchStorageAttachment: func(names.StorageTag, names.UnitTag) state.NotifyWatcher { return s.storageAttachmentWatcher }, } }
func (s *provisionerSuite) testOpWithTags( c *gc.C, opName string, apiCall func(*storageprovisioner.State, []names.Tag) ([]params.ErrorResult, error), ) { var callCount int apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Check(objType, gc.Equals, "StorageProvisioner") c.Check(version, gc.Equals, 0) c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, opName) c.Check(arg, gc.DeepEquals, params.Entities{Entities: []params.Entity{{Tag: "volume-100"}}}) c.Assert(result, gc.FitsTypeOf, ¶ms.ErrorResults{}) *(result.(*params.ErrorResults)) = params.ErrorResults{ Results: []params.ErrorResult{{Error: nil}}, } callCount++ return nil }) st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) volumes := []names.Tag{names.NewVolumeTag("100")} errorResults, err := apiCall(st, volumes) c.Check(err, jc.ErrorIsNil) c.Check(callCount, gc.Equals, 1) c.Assert(errorResults, jc.DeepEquals, []params.ErrorResult{{}}) }
func (s *volumeSuite) TestCreateVolumeItemNonexistingVolume(c *gc.C) { s.state.volume = func(tag names.VolumeTag) (state.Volume, error) { return s.volume, errors.Errorf("not volume for tag %v", tag) } found := storage.CreateVolumeItem(s.api, names.NewVolumeTag("666").String(), nil) c.Assert(found.Error, gc.ErrorMatches, ".*volume for tag.*") }
func (s *volumeSuite) TestInstanceVolumesOldMass(c *gc.C) { obj := s.testMAASObject.TestServer.NewNode(`{"system_id": "node0"}`) statusGetter := func(instance.Id) (string, string) { // status, substatus or status info. return "provisioning", "substatus" } instance := maas1Instance{&obj, nil, statusGetter} volumes, attachments, err := instance.volumes(names.NewMachineTag("1"), []names.VolumeTag{ names.NewVolumeTag("1"), names.NewVolumeTag("2"), }) c.Assert(err, jc.ErrorIsNil) c.Assert(volumes, gc.HasLen, 0) c.Assert(attachments, gc.HasLen, 0) }
func (s *provisionerSuite) TestVolumeParams(c *gc.C) { var callCount int apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Check(objType, gc.Equals, "StorageProvisioner") c.Check(version, gc.Equals, 0) c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "VolumeParams") c.Check(arg, gc.DeepEquals, params.Entities{Entities: []params.Entity{{"volume-100"}}}) c.Assert(result, gc.FitsTypeOf, ¶ms.VolumeParamsResults{}) *(result.(*params.VolumeParamsResults)) = params.VolumeParamsResults{ Results: []params.VolumeParamsResult{{ Result: params.VolumeParams{ VolumeTag: "volume-100", Size: 1024, Provider: "loop", }, }}, } callCount++ return nil }) st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) volumeParams, err := st.VolumeParams([]names.VolumeTag{names.NewVolumeTag("100")}) c.Check(err, jc.ErrorIsNil) c.Check(callCount, gc.Equals, 1) c.Assert(volumeParams, jc.DeepEquals, []params.VolumeParamsResult{{ Result: params.VolumeParams{ VolumeTag: "volume-100", Size: 1024, Provider: "loop", }, }}) }
func (s *loopSuite) TestDetachVolumesDetachFails(c *gc.C) { source, _ := s.loopVolumeSource(c) fileName := filepath.Join(s.storageDir, "volume-0") cmd := s.commands.expect("losetup", "-j", fileName) cmd.respond("/dev/loop0: foo\n/dev/loop1: bar\n", nil) cmd = s.commands.expect("losetup", "-d", "/dev/loop0") cmd.respond("", errors.New("oy")) err := ioutil.WriteFile(fileName, nil, 0644) c.Assert(err, jc.ErrorIsNil) err = source.DetachVolumes([]storage.VolumeAttachmentParams{{ Volume: names.NewVolumeTag("0"), VolumeId: "vol-ume0", AttachmentParams: storage.AttachmentParams{ Machine: names.NewMachineTag("0"), InstanceId: "inst-ance", }, }}) c.Assert(err, gc.ErrorMatches, `.* detaching loop device "loop0": oy`) // file should not have been removed _, err = os.Stat(fileName) c.Assert(err, jc.ErrorIsNil) }
func (s *VolumeStateSuite) TestVolumeBindingMachine(c *gc.C) { machine, err := s.State.AddOneMachine(state.MachineTemplate{ Series: "quantal", Jobs: []state.MachineJob{state.JobHostUnits}, Volumes: []state.MachineVolumeParams{{ Volume: state.VolumeParams{Pool: "environscoped", Size: 1024}, }}, }) c.Assert(err, jc.ErrorIsNil) // Volumes created unassigned to a storage instance are // bound to the initially attached machine. volume := s.volume(c, names.NewVolumeTag("0")) c.Assert(volume.LifeBinding(), gc.Equals, machine.Tag()) c.Assert(volume.Life(), gc.Equals, state.Alive) err = s.State.DetachVolume(machine.MachineTag(), volume.VolumeTag()) c.Assert(err, jc.ErrorIsNil) err = s.State.RemoveVolumeAttachment(machine.MachineTag(), volume.VolumeTag()) c.Assert(err, jc.ErrorIsNil) volume = s.volume(c, volume.VolumeTag()) c.Assert(volume.Life(), gc.Equals, state.Dead) // TODO(axw) when we can assign storage to an existing volume, we // should test that a machine-bound volume is not destroyed when // its assigned storage instance is removed. }
// addVolumeOp returns a txn.Op to create a new volume with the specified // parameters. If the supplied machine ID is non-empty, and the storage // provider is machine-scoped, then the volume will be scoped to that // machine. func (st *State) addVolumeOp(params VolumeParams, machineId string) (txn.Op, names.VolumeTag, error) { if params.binding == nil { params.binding = names.NewMachineTag(machineId) } params, err := st.volumeParamsWithDefaults(params) if err != nil { return txn.Op{}, names.VolumeTag{}, errors.Trace(err) } machineId, err = st.validateVolumeParams(params, machineId) if err != nil { return txn.Op{}, names.VolumeTag{}, errors.Annotate(err, "validating volume params") } name, err := newVolumeName(st, machineId) if err != nil { return txn.Op{}, names.VolumeTag{}, errors.Annotate(err, "cannot generate volume name") } op := txn.Op{ C: volumesC, Id: name, Assert: txn.DocMissing, Insert: &volumeDoc{ Name: name, StorageId: params.storage.Id(), Binding: params.binding.String(), Params: ¶ms, // Every volume is created with one attachment. AttachmentCount: 1, }, } return op, names.NewVolumeTag(name), nil }