func (s *metricsAdderSuite) TestNewMetricsAdderAPIRefusesNonAgent(c *gc.C) { tests := []struct { tag names.Tag environManager bool expectedError string }{ // TODO(cmars): unit agent should get permission denied when callers are // moved to machine agent. {names.NewUnitTag("mysql/0"), false, ""}, {names.NewLocalUserTag("admin"), true, "permission denied"}, {names.NewMachineTag("0"), false, ""}, {names.NewMachineTag("0"), true, ""}, } for i, test := range tests { c.Logf("test %d", i) anAuthoriser := s.authorizer anAuthoriser.EnvironManager = test.environManager anAuthoriser.Tag = test.tag endPoint, err := metricsadder.NewMetricsAdderAPI(s.State, nil, anAuthoriser) if test.expectedError == "" { c.Assert(err, jc.ErrorIsNil) c.Assert(endPoint, gc.NotNil) } else { c.Assert(err, gc.ErrorMatches, test.expectedError) c.Assert(endPoint, gc.IsNil) } } }
func (s *facadeSuite) TestReportKeys(c *gc.C) { s.authorizer.Tag = names.NewMachineTag("1") args := params.SSHHostKeySet{ EntityKeys: []params.SSHHostKeys{ { Tag: names.NewMachineTag("0").String(), PublicKeys: []string{"rsa0", "dsa0"}, }, { Tag: names.NewMachineTag("1").String(), PublicKeys: []string{"rsa1", "dsa1"}, }, }, } result, err := s.facade.ReportKeys(args) c.Assert(err, jc.ErrorIsNil) c.Assert(result, gc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{ {Error: apiservertesting.ErrUnauthorized}, {nil}, }, }) s.backend.stub.CheckCalls(c, []jujutesting.StubCall{{ "SetSSHHostKeys", []interface{}{ names.NewMachineTag("1"), state.SSHHostKeys{"rsa1", "dsa1"}, }, }}) }
func (s *loginSuite) TestBadLogin(c *gc.C) { // Start our own server so we can control when the first login // happens. Otherwise in JujuConnSuite.SetUpTest api.Open is // called with user-admin permissions automatically. info, cleanup := s.setupServer(c) defer cleanup() for i, t := range badLoginTests { c.Logf("test %d; entity %q; password %q", i, t.tag, t.password) // Note that Open does not log in if the tag and password // are empty. This allows us to test operations on the connection // before calling Login, which we could not do if Open // always logged in. info.Tag = "" info.Password = "" func() { st, err := api.Open(info, fastDialOpts) c.Assert(err, gc.IsNil) defer st.Close() _, err = st.Machiner().Machine(names.NewMachineTag("0")) c.Assert(err, gc.ErrorMatches, `unknown object type "Machiner"`) // Since these are user login tests, the nonce is empty. err = st.Login(t.tag, t.password, "") c.Assert(err, gc.ErrorMatches, t.err) c.Assert(params.ErrCode(err), gc.Equals, t.code) _, err = st.Machiner().Machine(names.NewMachineTag("0")) c.Assert(err, gc.ErrorMatches, `unknown object type "Machiner"`) }() } }
func (s *VolumeStateSuite) TestWatchEnvironVolumeAttachments(c *gc.C) { service := s.setupMixedScopeStorageService(c, "block") addUnit := func() { u, err := service.AddUnit() c.Assert(err, jc.ErrorIsNil) err = s.State.AssignUnit(u, state.AssignCleanEmpty) c.Assert(err, jc.ErrorIsNil) } addUnit() w := s.State.WatchEnvironVolumeAttachments() defer testing.AssertStop(c, w) wc := testing.NewStringsWatcherC(c, s.State, w) wc.AssertChangeInSingleEvent("0:0") // initial wc.AssertNoChange() addUnit() wc.AssertChangeInSingleEvent("1:3") wc.AssertNoChange() err := s.State.DetachVolume(names.NewMachineTag("0"), names.NewVolumeTag("0")) c.Assert(err, jc.ErrorIsNil) wc.AssertChangeInSingleEvent("0:0") // dying wc.AssertNoChange() err = s.State.RemoveVolumeAttachment(names.NewMachineTag("0"), names.NewVolumeTag("0")) c.Assert(err, jc.ErrorIsNil) wc.AssertChangeInSingleEvent("0:0") // removed wc.AssertNoChange() }
func (s *provisionerSuite) TestRemoveFilesystemsMachineAgent(c *gc.C) { s.setupFilesystems(c) s.authorizer.EnvironManager = false args := params.Entities{Entities: []params.Entity{ {"filesystem-0-0"}, {"filesystem-0-42"}, {"filesystem-42"}, {"filesystem-invalid"}, {"machine-0"}, }} err := s.State.DetachFilesystem(names.NewMachineTag("0"), names.NewFilesystemTag("0/0")) c.Assert(err, jc.ErrorIsNil) err = s.State.RemoveFilesystemAttachment(names.NewMachineTag("0"), names.NewFilesystemTag("0/0")) c.Assert(err, jc.ErrorIsNil) err = s.State.DestroyFilesystem(names.NewFilesystemTag("0/0")) c.Assert(err, jc.ErrorIsNil) result, err := s.api.Remove(args) c.Assert(err, jc.ErrorIsNil) c.Assert(result, gc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{ {Error: nil}, {Error: nil}, {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, {Error: ¶ms.Error{Message: `"filesystem-invalid" is not a valid filesystem tag`}}, {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) }
func (c *upgradeMongoCommand) migratableMachines() (upgradeMongoParams, error) { haClient, err := c.getHAClient() if err != nil { return upgradeMongoParams{}, err } defer haClient.Close() results, err := haClient.MongoUpgradeMode(mongo.Mongo32wt) if err != nil { return upgradeMongoParams{}, errors.Annotate(err, "cannot enter mongo upgrade mode") } result := upgradeMongoParams{} result.master = migratable{ ip: results.Master.PublicAddress, machine: names.NewMachineTag(results.Master.Tag), series: results.Master.Series, } result.machines = make([]migratable, len(results.Members)) for i, member := range results.Members { result.machines[i] = migratable{ ip: member.PublicAddress, machine: names.NewMachineTag(member.Tag), series: member.Series, } } result.rsMembers = make([]replicaset.Member, len(results.RsMembers)) for i, rsMember := range results.RsMembers { result.rsMembers[i] = rsMember } return result, nil }
func (s *toolsSuite) TestTools(c *gc.C) { getCanRead := func() (common.AuthFunc, error) { return func(tag names.Tag) bool { return tag == names.NewMachineTag("0") || tag == names.NewMachineTag("42") }, nil } tg := common.NewToolsGetter(s.State, s.State, s.State, sprintfURLGetter("tools:%s"), getCanRead) c.Assert(tg, gc.NotNil) err := s.machine0.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) args := params.Entities{ Entities: []params.Entity{ {Tag: "machine-0"}, {Tag: "machine-1"}, {Tag: "machine-42"}, }} result, err := tg.Tools(args) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 3) c.Assert(result.Results[0].Error, gc.IsNil) c.Assert(result.Results[0].Tools, gc.NotNil) c.Assert(result.Results[0].Tools.Version, gc.DeepEquals, current) c.Assert(result.Results[0].Tools.URL, gc.Equals, "tools:"+current.String()) c.Assert(result.Results[0].DisableSSLHostnameVerification, jc.IsTrue) c.Assert(result.Results[1].Error, gc.DeepEquals, apiservertesting.ErrUnauthorized) c.Assert(result.Results[2].Error, gc.DeepEquals, apiservertesting.NotFoundError("machine 42")) }
// makeMachineConfig produces a valid cloudinit machine config. func makeMachineConfig(c *gc.C) *cloudinit.MachineConfig { machineID := "0" return &cloudinit.MachineConfig{ MachineId: machineID, MachineNonce: "gxshasqlnng", DataDir: environs.DataDir, LogDir: agent.DefaultLogDir, Jobs: []params.MachineJob{params.JobManageEnviron, params.JobHostUnits}, CloudInitOutputLog: environs.CloudInitOutputLog, Tools: &tools.Tools{URL: "file://" + c.MkDir()}, StateInfo: &state.Info{ Info: mongo.Info{ CACert: testing.CACert, Addrs: []string{"127.0.0.1:123"}, }, Tag: names.NewMachineTag(machineID).String(), Password: "******", }, APIInfo: &api.Info{ CACert: testing.CACert, Addrs: []string{"127.0.0.1:123"}, Tag: names.NewMachineTag(machineID).String(), }, MachineAgentServiceName: "jujud-machine-0", } }
func (s *networkerSuite) TestMachineNetworkConfigNameChange(c *gc.C) { var called bool networker.PatchFacadeCall(s, s.networker, func(request string, args, response interface{}) error { if !called { called = true c.Assert(request, gc.Equals, "MachineNetworkConfig") return ¶ms.Error{ Message: "MachineNetworkConfig", Code: params.CodeNotImplemented, } } c.Assert(request, gc.Equals, "MachineNetworkInfo") expected := params.Entities{ Entities: []params.Entity{{Tag: names.NewMachineTag("42").String()}}, } c.Assert(args, gc.DeepEquals, expected) result := response.(*params.MachineNetworkConfigResults) result.Results = make([]params.MachineNetworkConfigResult, 1) result.Results[0].Error = common.ServerError(common.ErrPerm) return nil }) // Make a call, in this case result is "permission denied". info, err := s.networker.MachineNetworkConfig(names.NewMachineTag("42")) c.Assert(err, gc.ErrorMatches, "permission denied") c.Assert(err, jc.Satisfies, params.IsCodeUnauthorized) c.Assert(info, gc.IsNil) }
func (s *tmpfsSuite) TestAttachFilesystemsMountReadOnly(c *gc.C) { source := s.tmpfsFilesystemSource(c) _, err := source.CreateFilesystems([]storage.FilesystemParams{{ Tag: names.NewFilesystemTag("1"), Size: 1024, }}) c.Assert(err, jc.ErrorIsNil) cmd := s.commands.expect("df", "--output=source", "/var/lib/juju/storage/fs/foo") cmd.respond("header\nvalue", nil) s.commands.expect("mount", "-t", "tmpfs", "filesystem-1", "/var/lib/juju/storage/fs/foo", "-o", "size=1024m,ro") results, err := source.AttachFilesystems([]storage.FilesystemAttachmentParams{{ Filesystem: names.NewFilesystemTag("1"), Path: "/var/lib/juju/storage/fs/foo", AttachmentParams: storage.AttachmentParams{ Machine: names.NewMachineTag("2"), ReadOnly: true, }, }}) c.Assert(err, jc.ErrorIsNil) c.Assert(results, jc.DeepEquals, []storage.AttachFilesystemsResult{{ FilesystemAttachment: &storage.FilesystemAttachment{ Filesystem: names.NewFilesystemTag("1"), Machine: names.NewMachineTag("2"), FilesystemAttachmentInfo: storage.FilesystemAttachmentInfo{ Path: "/var/lib/juju/storage/fs/foo", ReadOnly: true, }, }, }}) }
func (s *VolumeStateSuite) TestWatchMachineVolumeAttachments(c *gc.C) { service := s.setupMixedScopeStorageService(c, "block") addUnit := func() { u, err := service.AddUnit() c.Assert(err, jc.ErrorIsNil) err = s.State.AssignUnit(u, state.AssignCleanEmpty) c.Assert(err, jc.ErrorIsNil) } addUnit() w := s.State.WatchMachineVolumeAttachments(names.NewMachineTag("0")) defer testing.AssertStop(c, w) wc := testing.NewStringsWatcherC(c, s.State, w) wc.AssertChangeInSingleEvent("0:0", "0:0/1", "0:0/2") // initial wc.AssertNoChange() addUnit() // no change, since we're only interested in the one machine. wc.AssertNoChange() err := s.State.DetachVolume(names.NewMachineTag("0"), names.NewVolumeTag("0")) c.Assert(err, jc.ErrorIsNil) wc.AssertChangeInSingleEvent("0:0") // dying wc.AssertNoChange() err = s.State.RemoveVolumeAttachment(names.NewMachineTag("0"), names.NewVolumeTag("0")) c.Assert(err, jc.ErrorIsNil) wc.AssertChangeInSingleEvent("0:0") // removed wc.AssertNoChange() // TODO(axw) respond to changes to the same machine when we support // dynamic storage and/or placement. }
func (s *provisionerSuite) TestRemoveVolumesEnvironManager(c *gc.C) { s.setupVolumes(c) args := params.Entities{Entities: []params.Entity{ {"volume-1-0"}, {"volume-1"}, {"volume-2"}, {"volume-42"}, {"volume-invalid"}, {"machine-0"}, }} err := s.State.DetachVolume(names.NewMachineTag("0"), names.NewVolumeTag("1")) c.Assert(err, jc.ErrorIsNil) err = s.State.RemoveVolumeAttachment(names.NewMachineTag("0"), names.NewVolumeTag("1")) c.Assert(err, jc.ErrorIsNil) err = s.State.DestroyVolume(names.NewVolumeTag("1")) c.Assert(err, jc.ErrorIsNil) result, err := s.api.Remove(args) c.Assert(err, jc.ErrorIsNil) c.Assert(result, gc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{ {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, {Error: nil}, {Error: ¶ms.Error{Message: "removing volume 2: volume is not dead"}}, {Error: nil}, {Error: ¶ms.Error{Message: `"volume-invalid" is not a valid volume tag`}}, {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) }
func (s *actionsSuite) TestAuthAndActionFromTagFn(c *gc.C) { notFoundActionTag := names.NewActionTag(utils.MustNewUUID().String()) authorizedActionTag := names.NewActionTag(utils.MustNewUUID().String()) authorizedMachineTag := names.NewMachineTag("1") authorizedAction := fakeAction{name: "action1", receiver: authorizedMachineTag.Id()} unauthorizedActionTag := names.NewActionTag(utils.MustNewUUID().String()) unauthorizedMachineTag := names.NewMachineTag("10") unauthorizedAction := fakeAction{name: "action2", receiver: unauthorizedMachineTag.Id()} invalidReceiverActionTag := names.NewActionTag(utils.MustNewUUID().String()) invalidReceiverAction := fakeAction{name: "action2", receiver: "masterexploder"} canAccess := makeCanAccess(map[names.Tag]bool{ authorizedMachineTag: true, }) getActionByTag := makeGetActionByTag(map[names.ActionTag]state.Action{ authorizedActionTag: authorizedAction, unauthorizedActionTag: unauthorizedAction, invalidReceiverActionTag: invalidReceiverAction, }) tagFn := common.AuthAndActionFromTagFn(canAccess, getActionByTag) for i, test := range []struct { tag string errString string err error expectedAction state.Action }{{ tag: "invalid-action-tag", errString: `"invalid-action-tag" is not a valid tag`, }, { tag: notFoundActionTag.String(), errString: "action not found", }, { tag: invalidReceiverActionTag.String(), errString: `invalid actionreceiver name "masterexploder"`, }, { tag: unauthorizedActionTag.String(), err: common.ErrPerm, }, { tag: authorizedActionTag.String(), expectedAction: authorizedAction, }} { c.Logf("test %d", i) action, err := tagFn(test.tag) if test.errString != "" { c.Check(err, gc.ErrorMatches, test.errString) c.Check(action, gc.IsNil) } else if test.err != nil { c.Check(err, gc.Equals, test.err) c.Check(action, gc.IsNil) } else { c.Check(err, jc.ErrorIsNil) c.Check(action, gc.Equals, action) } } }
func (s *loginSuite) TestBadLogin(c *gc.C) { // Start our own server so we can control when the first login // happens. Otherwise in JujuConnSuite.SetUpTest api.Open is // called with user-admin permissions automatically. info, cleanup := s.setupServerWithValidator(c, nil) defer cleanup() adminUser := s.AdminUserTag(c) for i, t := range []struct { tag names.Tag password string err error code string }{{ tag: adminUser, password: "******", err: &rpc.RequestError{ Message: "invalid entity name or password", Code: "unauthorized access", }, code: params.CodeUnauthorized, }, { tag: names.NewUserTag("unknown"), password: "******", err: &rpc.RequestError{ Message: "invalid entity name or password", Code: "unauthorized access", }, code: params.CodeUnauthorized, }} { c.Logf("test %d; entity %q; password %q", i, t.tag, t.password) func() { // Open the API without logging in, so we can perform // operations on the connection before calling Login. st := s.openAPIWithoutLogin(c, info) defer st.Close() _, err := apimachiner.NewState(st).Machine(names.NewMachineTag("0")) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "Machiner"`, Code: "not implemented", }) // Since these are user login tests, the nonce is empty. err = st.Login(t.tag, t.password, "", nil) c.Assert(errors.Cause(err), gc.DeepEquals, t.err) c.Assert(params.ErrCode(err), gc.Equals, t.code) _, err = apimachiner.NewState(st).Machine(names.NewMachineTag("0")) c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "Machiner"`, Code: "not implemented", }) }() } }
func (s *machinerSuite) TestMachineAndMachineTag(c *gc.C) { machine, err := s.machiner.Machine(names.NewMachineTag("42")) c.Assert(err, gc.ErrorMatches, "permission denied") c.Assert(err, jc.Satisfies, params.IsCodeUnauthorized) c.Assert(machine, gc.IsNil) machine, err = s.machiner.Machine(names.NewMachineTag("1")) c.Assert(err, gc.IsNil) c.Assert(machine.Tag(), gc.Equals, "machine-1") }
func (backend *mockBackend) GetSSHHostKeys(tag names.MachineTag) (state.SSHHostKeys, error) { backend.stub.AddCall("GetSSHHostKeys", tag) switch tag { case names.NewMachineTag("0"): return state.SSHHostKeys{"rsa0", "dsa0"}, nil case names.NewMachineTag("1"): return state.SSHHostKeys{"rsa1", "dsa1"}, nil } return nil, errors.New("machine not found") }
func (s *cinderVolumeSourceSuite) TestDetachVolumes(c *gc.C) { const mockServerId2 = mockServerId + "2" var numListCalls, numDetachCalls int mockAdapter := &mockAdapter{ listVolumeAttachments: func(serverId string) ([]nova.VolumeAttachment, error) { numListCalls++ if serverId == mockServerId2 { // no attachments return nil, nil } c.Check(serverId, gc.Equals, mockServerId) return []nova.VolumeAttachment{{ Id: mockVolId, VolumeId: mockVolId, ServerId: mockServerId, Device: "/dev/sda", }}, nil }, detachVolume: func(serverId, volId string) error { numDetachCalls++ c.Check(serverId, gc.Equals, mockServerId) c.Check(volId, gc.Equals, mockVolId) return nil }, } volSource := openstack.NewCinderVolumeSource(mockAdapter) errs, err := volSource.DetachVolumes([]storage.VolumeAttachmentParams{{ Volume: names.NewVolumeTag("123"), VolumeId: mockVolId, AttachmentParams: storage.AttachmentParams{ Machine: names.NewMachineTag("0"), InstanceId: mockServerId, }, }, { Volume: names.NewVolumeTag("42"), VolumeId: "42", AttachmentParams: storage.AttachmentParams{ Machine: names.NewMachineTag("0"), InstanceId: mockServerId2, }, }}) c.Assert(err, jc.ErrorIsNil) c.Assert(errs, jc.DeepEquals, []error{nil, nil}) // DetachVolume should only be called for existing attachments. mockAdapter.CheckCalls(c, []gitjujutesting.StubCall{{ "ListVolumeAttachments", []interface{}{mockServerId}, }, { "DetachVolume", []interface{}{mockServerId, mockVolId}, }, { "ListVolumeAttachments", []interface{}{mockServerId2}, }}) }
func (s *FilesystemStateSuite) TestParseFilesystemAttachmentId(c *gc.C) { assertValid := func(id string, m names.MachineTag, v names.FilesystemTag) { machineTag, filesystemTag, err := state.ParseFilesystemAttachmentId(id) c.Assert(err, jc.ErrorIsNil) c.Assert(machineTag, gc.Equals, m) c.Assert(filesystemTag, gc.Equals, v) } assertValid("0:0", names.NewMachineTag("0"), names.NewFilesystemTag("0")) assertValid("0:0/1", names.NewMachineTag("0"), names.NewFilesystemTag("0/1")) assertValid("0/lxc/0:1", names.NewMachineTag("0/lxc/0"), names.NewFilesystemTag("1")) }
// setMachineID updates MachineId, MachineAgentServiceName, // MongoInfo.Tag, and APIInfo.Tag to match the given machine ID. If // MongoInfo or APIInfo are nil, they're not changed. func (cfg *testInstanceConfig) setMachineID(id string) *testInstanceConfig { cfg.MachineId = id cfg.MachineAgentServiceName = fmt.Sprintf("jujud-%s", names.NewMachineTag(id).String()) if cfg.MongoInfo != nil { cfg.MongoInfo.Tag = names.NewMachineTag(id) } if cfg.APIInfo != nil { cfg.APIInfo.Tag = names.NewMachineTag(id) } return cfg }
func (s *loginSuite) TestBadLogin(c *gc.C) { // Start our own server so we can control when the first login // happens. Otherwise in JujuConnSuite.SetUpTest api.Open is // called with user-admin permissions automatically. info, cleanup := s.setupServerWithValidator(c, nil) defer cleanup() adminUser := s.AdminUserTag(c) for i, t := range []struct { tag string password string err string code string }{{ tag: adminUser.String(), password: "******", err: "invalid entity name or password", code: params.CodeUnauthorized, }, { tag: "user-unknown", password: "******", err: "invalid entity name or password", code: params.CodeUnauthorized, }, { tag: "bar", password: "******", err: `"bar" is not a valid tag`, }} { c.Logf("test %d; entity %q; password %q", i, t.tag, t.password) // Note that Open does not log in if the tag and password // are empty. This allows us to test operations on the connection // before calling Login, which we could not do if Open // always logged in. info.Tag = nil info.Password = "" func() { st, err := api.Open(info, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer st.Close() _, err = st.Machiner().Machine(names.NewMachineTag("0")) c.Assert(err, gc.ErrorMatches, `.*unknown object type "Machiner"`) // Since these are user login tests, the nonce is empty. err = st.Login(t.tag, t.password, "") c.Assert(err, gc.ErrorMatches, t.err) c.Assert(params.ErrCode(err), gc.Equals, t.code) _, err = st.Machiner().Machine(names.NewMachineTag("0")) c.Assert(err, gc.ErrorMatches, `.*unknown object type "Machiner"`) }() } }
func (s *VolumeStateSuite) TestParseVolumeAttachmentId(c *gc.C) { assertValid := func(id string, m names.MachineTag, v names.VolumeTag) { machineTag, volumeTag, err := state.ParseVolumeAttachmentId(id) c.Assert(err, jc.ErrorIsNil) c.Assert(machineTag, gc.Equals, m) c.Assert(volumeTag, gc.Equals, v) } assertValid("0:0", names.NewMachineTag("0"), names.NewVolumeTag("0")) assertValid("0:0/1", names.NewMachineTag("0"), names.NewVolumeTag("0/1")) assertValid("0/lxc/0:1", names.NewMachineTag("0/lxc/0"), names.NewVolumeTag("1")) }
func (s *managedfsSuite) testAttachFilesystems(c *gc.C, readOnly, reattach bool) { const testMountPoint = "/in/the/place" source := s.initSource(c) cmd := s.commands.expect("df", "--output=source", filepath.Dir(testMountPoint)) cmd.respond("headers\n/same/as/rootfs", nil) cmd = s.commands.expect("df", "--output=source", testMountPoint) if reattach { cmd.respond("headers\n/different/to/rootfs", nil) } else { cmd.respond("headers\n/same/as/rootfs", nil) var args []string if readOnly { args = append(args, "-o", "ro") } args = append(args, "/dev/sda1", testMountPoint) s.commands.expect("mount", args...) } s.blockDevices[names.NewVolumeTag("0")] = storage.BlockDevice{ DeviceName: "sda", HardwareId: "capncrunch", Size: 2, } s.filesystems[names.NewFilesystemTag("0/0")] = storage.Filesystem{ Tag: names.NewFilesystemTag("0/0"), Volume: names.NewVolumeTag("0"), } results, err := source.AttachFilesystems([]storage.FilesystemAttachmentParams{{ Filesystem: names.NewFilesystemTag("0/0"), FilesystemId: "filesystem-0-0", AttachmentParams: storage.AttachmentParams{ Machine: names.NewMachineTag("0"), InstanceId: "inst-ance", ReadOnly: readOnly, }, Path: testMountPoint, }}) c.Assert(err, jc.ErrorIsNil) c.Assert(results, jc.DeepEquals, []storage.AttachFilesystemsResult{{ FilesystemAttachment: &storage.FilesystemAttachment{ names.NewFilesystemTag("0/0"), names.NewMachineTag("0"), storage.FilesystemAttachmentInfo{ Path: testMountPoint, ReadOnly: readOnly, }, }, }}) }
func (s *environSuite) TestStartInstanceStorage(c *gc.C) { env := s.bootstrap(c) s.newNode(c, "thenode1", "host1", map[string]interface{}{ "memory": 8192, "physicalblockdevice_set": nodeStorageAttrs, "constraint_map": storageConstraintAttrs, }) s.addSubnet(c, 1, 1, "thenode1") params := environs.StartInstanceParams{Volumes: []storage.VolumeParams{ {Tag: names.NewVolumeTag("1"), Size: 2000000}, {Tag: names.NewVolumeTag("3"), Size: 2000000}, }} result, err := testing.StartInstanceWithParams(env, "1", params) c.Assert(err, jc.ErrorIsNil) c.Check(result.Volumes, jc.DeepEquals, []storage.Volume{ { names.NewVolumeTag("1"), storage.VolumeInfo{ Size: 238475, VolumeId: "volume-1", HardwareId: "id_for_sda", }, }, { names.NewVolumeTag("3"), storage.VolumeInfo{ Size: 238475, VolumeId: "volume-3", HardwareId: "", }, }, }) c.Assert(result.VolumeAttachments, jc.DeepEquals, []storage.VolumeAttachment{ { names.NewVolumeTag("1"), names.NewMachineTag("1"), storage.VolumeAttachmentInfo{ DeviceName: "", ReadOnly: false, }, }, { names.NewVolumeTag("3"), names.NewMachineTag("1"), storage.VolumeAttachmentInfo{ DeviceName: "sdc", ReadOnly: false, }, }, }) }
func (s *ImportSuite) TestUploadBinariesTools(c *gc.C) { // Create a model that has three different tools versions: // one for a machine, one for a container, and one for a unit agent. // We don't care about the actual validity of the model (it isn't). model := description.NewModel(description.ModelArgs{ Owner: names.NewUserTag("me"), }) machine := model.AddMachine(description.MachineArgs{ Id: names.NewMachineTag("0"), }) machine.SetTools(description.AgentToolsArgs{ Version: version.MustParseBinary("2.0.1-trusty-amd64"), }) container := machine.AddContainer(description.MachineArgs{ Id: names.NewMachineTag("0/lxc/0"), }) container.SetTools(description.AgentToolsArgs{ Version: version.MustParseBinary("2.0.5-trusty-amd64"), }) service := model.AddService(description.ServiceArgs{ Tag: names.NewServiceTag("magic"), CharmURL: "local:trusty/magic", }) unit := service.AddUnit(description.UnitArgs{ Tag: names.NewUnitTag("magic/0"), }) unit.SetTools(description.AgentToolsArgs{ Version: version.MustParseBinary("2.0.3-trusty-amd64"), }) uploader := &fakeUploader{tools: make(map[version.Binary]string)} config := migration.UploadBinariesConfig{ State: &fakeStateStorage{}, Model: model, Target: &fakeAPIConnection{}, GetCharmUploader: func(api.Connection) migration.CharmUploader { return &noOpUploader{} }, GetToolsUploader: func(target api.Connection) migration.ToolsUploader { return uploader }, GetStateStorage: func(migration.UploadBackend) storage.Storage { return &fakeCharmsStorage{} }, GetCharmStoragePath: func(migration.UploadBackend, *charm.URL) (string, error) { return "", nil }, } err := migration.UploadBinaries(config) c.Assert(err, jc.ErrorIsNil) c.Assert(uploader.tools, jc.DeepEquals, map[version.Binary]string{ version.MustParseBinary("2.0.1-trusty-amd64"): "fake tools 2.0.1-trusty-amd64", version.MustParseBinary("2.0.3-trusty-amd64"): "fake tools 2.0.3-trusty-amd64", version.MustParseBinary("2.0.5-trusty-amd64"): "fake tools 2.0.5-trusty-amd64", }) }
func (s *FilesystemStateSuite) TestWatchMachineFilesystemAttachments(c *gc.C) { service := s.setupMixedScopeStorageService(c, "filesystem") addUnit := func(to *state.Machine) (u *state.Unit, m *state.Machine) { var err error u, err = service.AddUnit() c.Assert(err, jc.ErrorIsNil) if to != nil { err = u.AssignToMachine(to) c.Assert(err, jc.ErrorIsNil) return u, to } err = s.State.AssignUnit(u, state.AssignCleanEmpty) c.Assert(err, jc.ErrorIsNil) mid, err := u.AssignedMachineId() c.Assert(err, jc.ErrorIsNil) m, err = s.State.Machine(mid) c.Assert(err, jc.ErrorIsNil) return u, m } _, m0 := addUnit(nil) w := s.State.WatchMachineFilesystemAttachments(names.NewMachineTag("0")) defer testing.AssertStop(c, w) wc := testing.NewStringsWatcherC(c, s.State, w) wc.AssertChangeInSingleEvent("0:0/1", "0:0/2") // initial wc.AssertNoChange() addUnit(nil) // no change, since we're only interested in the one machine. wc.AssertNoChange() err := s.State.DetachFilesystem(names.NewMachineTag("0"), names.NewFilesystemTag("0")) c.Assert(err, jc.ErrorIsNil) // no change, since we're only interested in attachments of // machine-scoped volumes. wc.AssertNoChange() err = s.State.DetachFilesystem(names.NewMachineTag("0"), names.NewFilesystemTag("0/1")) c.Assert(err, jc.ErrorIsNil) wc.AssertChangeInSingleEvent("0:0/1") // dying wc.AssertNoChange() err = s.State.RemoveFilesystemAttachment(names.NewMachineTag("0"), names.NewFilesystemTag("0/1")) c.Assert(err, jc.ErrorIsNil) wc.AssertChangeInSingleEvent("0:0/1") // removed wc.AssertNoChange() addUnit(m0) wc.AssertChangeInSingleEvent("0:0/7", "0:0/8") wc.AssertNoChange() }
func (s *RsyslogSuite) TestNamespace(c *gc.C) { st := s.st // set the rsyslog cert err := s.APIState.Client().EnvironmentSet(map[string]interface{}{"rsyslog-ca-cert": coretesting.CACert}) c.Assert(err, jc.ErrorIsNil) // namespace only takes effect in filenames // for machine-0; all others assume isolation. s.testNamespace(c, st, names.NewMachineTag("0"), "", "25-juju.conf", *rsyslog.LogDir) s.testNamespace(c, st, names.NewMachineTag("0"), "mynamespace", "25-juju-mynamespace.conf", *rsyslog.LogDir+"-mynamespace") s.testNamespace(c, st, names.NewMachineTag("1"), "", "25-juju.conf", *rsyslog.LogDir) s.testNamespace(c, st, names.NewMachineTag("1"), "mynamespace", "25-juju.conf", *rsyslog.LogDir) s.testNamespace(c, st, names.NewUnitTag("myservice/0"), "", "26-juju-unit-myservice-0.conf", *rsyslog.LogDir) s.testNamespace(c, st, names.NewUnitTag("myservice/0"), "mynamespace", "26-juju-unit-myservice-0.conf", *rsyslog.LogDir) }
// Convert machine ids to tags. func machineIdsToTags(ids ...string) []string { var result []string for _, id := range ids { result = append(result, names.NewMachineTag(id).String()) } return result }
func (s *machineSuite) TestSetsInstanceInfoInitially(c *gc.C) { context := &testMachineContext{ getInstanceInfo: instanceInfoGetter(c, "i1234", testAddrs, "running", nil), dyingc: make(chan struct{}), } m := &testMachine{ tag: names.NewMachineTag("99"), instanceId: "i1234", refresh: func() error { return nil }, life: params.Alive, } died := make(chan machine) // Change the poll intervals to be short, so that we know // that we've polled (probably) at least a few times. s.PatchValue(&ShortPoll, coretesting.ShortWait/10) s.PatchValue(&LongPoll, coretesting.ShortWait/10) go runMachine(context, m, nil, died) time.Sleep(coretesting.ShortWait) killMachineLoop(c, m, context.dyingc, died) c.Assert(context.killAllErr, gc.Equals, nil) c.Assert(m.addresses, gc.DeepEquals, testAddrs) c.Assert(m.setAddressCount, gc.Equals, 1) c.Assert(m.instStatus, gc.Equals, "running") }
func (s *machineSuite) TestSinglePollWhenInstancInfoUnimplemented(c *gc.C) { s.PatchValue(&ShortPoll, 1*time.Millisecond) s.PatchValue(&LongPoll, 1*time.Millisecond) count := int32(0) getInstanceInfo := func(id instance.Id) (instanceInfo, error) { c.Check(id, gc.Equals, instance.Id("i1234")) atomic.AddInt32(&count, 1) err := ¶ms.Error{ Code: params.CodeNotImplemented, Message: "instance address not implemented", } return instanceInfo{}, err } context := &testMachineContext{ getInstanceInfo: getInstanceInfo, dyingc: make(chan struct{}), } m := &testMachine{ tag: names.NewMachineTag("99"), instanceId: "i1234", refresh: func() error { return nil }, life: params.Alive, } died := make(chan machine) go runMachine(context, m, nil, died) time.Sleep(coretesting.ShortWait) killMachineLoop(c, m, context.dyingc, died) c.Assert(context.killAllErr, gc.Equals, nil) c.Assert(count, gc.Equals, int32(1)) }
// countPolls sets up a machine loop with the given // addresses and status to be returned from getInstanceInfo, // waits for coretesting.ShortWait, and returns the // number of times the instance is polled. func countPolls(c *gc.C, addrs []network.Address, instId, instStatus string, machineStatus params.Status) int { count := int32(0) getInstanceInfo := func(id instance.Id) (instanceInfo, error) { c.Check(string(id), gc.Equals, instId) atomic.AddInt32(&count, 1) if addrs == nil { return instanceInfo{}, fmt.Errorf("no instance addresses available") } return instanceInfo{addrs, instStatus}, nil } context := &testMachineContext{ getInstanceInfo: getInstanceInfo, dyingc: make(chan struct{}), } m := &testMachine{ tag: names.NewMachineTag("99"), instanceId: instance.Id(instId), refresh: func() error { return nil }, addresses: addrs, life: params.Alive, status: machineStatus, } died := make(chan machine) go runMachine(context, m, nil, died) time.Sleep(coretesting.ShortWait) killMachineLoop(c, m, context.dyingc, died) c.Assert(context.killAllErr, gc.Equals, nil) return int(count) }