// RunCommand runs the command and returns channels holding the // command's operations and errors. func RunCommand(ctx *cmd.Context, com cmd.Command, args ...string) (opc chan dummy.Operation, errc chan error) { if ctx == nil { panic("ctx == nil") } errc = make(chan error, 1) opc = make(chan dummy.Operation, 200) dummy.Listen(opc) go func() { defer func() { // signal that we're done with this ops channel. dummy.Listen(nil) // now that dummy is no longer going to send ops on // this channel, close it to signal to test cases // that we are done. close(opc) }() if err := coretesting.InitCommand(com, args); err != nil { errc <- err return } errc <- com.Run(ctx) }() return }
func (s *suite) TestAllocateAddress(c *gc.C) { e := s.bootstrapTestEnviron(c, false) defer func() { err := e.Destroy() c.Assert(err, gc.IsNil) }() inst, _ := jujutesting.AssertStartInstance(c, e, "0") c.Assert(inst, gc.NotNil) netId := network.Id("net1") opc := make(chan dummy.Operation, 200) dummy.Listen(opc) expectAddress := network.NewAddress("0.1.2.1", network.ScopeCloudLocal) address, err := e.AllocateAddress(inst.Id(), netId) c.Assert(err, gc.IsNil) c.Assert(address, gc.DeepEquals, expectAddress) assertAllocateAddress(c, e, opc, inst.Id(), netId, expectAddress) expectAddress = network.NewAddress("0.1.2.2", network.ScopeCloudLocal) address, err = e.AllocateAddress(inst.Id(), netId) c.Assert(err, gc.IsNil) c.Assert(address, gc.DeepEquals, expectAddress) assertAllocateAddress(c, e, opc, inst.Id(), netId, expectAddress) }
func (s *cmdControllerSuite) TestSystemKillCallsEnvironDestroyOnHostedEnviron(c *gc.C) { st := s.Factory.MakeEnvironment(c, &factory.EnvParams{ Name: "foo", }) defer st.Close() st.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyEnvironment") st.Close() opc := make(chan dummy.Operation, 200) dummy.Listen(opc) conn, err := juju.NewAPIState(s.AdminUserTag(c), s.Environ, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) s.AddCleanup(func(*gc.C) { conn.Close() }) client := undertakerapi.NewClient(conn) startTime := time.Date(2015, time.September, 1, 17, 2, 1, 0, time.UTC) mClock := testing.NewClock(startTime) undertaker.NewUndertaker(client, mClock) store, err := configstore.Default() _, err = store.ReadInfo("dummyenv") c.Assert(err, jc.ErrorIsNil) s.run(c, "kill-controller", "dummyenv", "-y") // Ensure that Destroy was called on the hosted environment ... opRecvTimeout(c, st, opc, dummy.OpDestroy{}) // ... and that the configstore was removed. _, err = store.ReadInfo("dummyenv") c.Assert(err, jc.Satisfies, errors.IsNotFound) }
func (s *cmdControllerSuite) TestSystemKillCallsEnvironDestroyOnHostedEnviron(c *gc.C) { st := s.Factory.MakeModel(c, &factory.ModelParams{ Name: "foo", }) defer st.Close() st.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") st.Close() opc := make(chan dummy.Operation, 200) dummy.Listen(opc) store := jujuclient.NewFileClientStore() _, err := store.ControllerByName("kontroll") c.Assert(err, jc.ErrorIsNil) s.run(c, "kill-controller", "kontroll", "-y") // Ensure that Destroy was called on the hosted environ ... // TODO(fwereade): how do we know it's the hosted environ? // what actual interactions made it ok to destroy any environ // here? (there used to be an undertaker that didn't work...) opRecvTimeout(c, st, opc, dummy.OpDestroy{}) // ... and that the details were removed removed from // the client store. _, err = store.ControllerByName("kontroll") c.Assert(err, jc.Satisfies, errors.IsNotFound) }
func (s *MachineSuite) TestManageModel(c *gc.C) { usefulVersion := version.Binary{ Number: jujuversion.Current, Arch: arch.HostArch(), Series: "quantal", // to match the charm created below } envtesting.AssertUploadFakeToolsVersions(c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), usefulVersion) m, _, _ := s.primeAgent(c, state.JobManageModel) op := make(chan dummy.Operation, 200) dummy.Listen(op) a := s.newAgent(c, m) // Make sure the agent is stopped even if the test fails. defer a.Stop() done := make(chan error) go func() { done <- a.Run(nil) }() c.Logf("started test agent, waiting for workers...") r0 := s.singularRecord.nextRunner(c) r0.waitForWorker(c, "txnpruner") // Check that the provisioner and firewaller are alive by doing // a rudimentary check that it responds to state changes. // Create an exposed service, and add a unit. charm := s.AddTestingCharm(c, "dummy") svc := s.AddTestingService(c, "test-service", charm) err := svc.SetExposed() c.Assert(err, jc.ErrorIsNil) units, err := juju.AddUnits(s.State, svc, svc.Name(), 1, nil) c.Assert(err, jc.ErrorIsNil) // It should be allocated to a machine, which should then be provisioned. c.Logf("service %q added with 1 unit, waiting for unit %q's machine to be started...", svc.Name(), units[0].Name()) c.Check(opRecvTimeout(c, s.State, op, dummy.OpStartInstance{}), gc.NotNil) c.Logf("machine hosting unit %q started, waiting for the unit to be deployed...", units[0].Name()) s.waitProvisioned(c, units[0]) // Open a port on the unit; it should be handled by the firewaller. c.Logf("unit %q deployed, opening port tcp/999...", units[0].Name()) err = units[0].OpenPort("tcp", 999) c.Assert(err, jc.ErrorIsNil) c.Check(opRecvTimeout(c, s.State, op, dummy.OpOpenPorts{}), gc.NotNil) c.Logf("unit %q port tcp/999 opened, cleaning up...", units[0].Name()) err = a.Stop() c.Assert(err, jc.ErrorIsNil) select { case err := <-done: c.Assert(err, jc.ErrorIsNil) case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for agent to terminate") } c.Logf("test agent stopped successfully.") }
func (s *MachineSuite) TestManageEnviron(c *gc.C) { usefulVersion := version.Current usefulVersion.Series = "quantal" // to match the charm created below envtesting.AssertUploadFakeToolsVersions(c, s.Environ.Storage(), usefulVersion) m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron) op := make(chan dummy.Operation, 200) dummy.Listen(op) a := s.newAgent(c, m) // Make sure the agent is stopped even if the test fails. defer a.Stop() done := make(chan error) go func() { done <- a.Run(nil) }() // Check that the provisioner and firewaller are alive by doing // a rudimentary check that it responds to state changes. // Add one unit to a service; it should get allocated a machine // and then its ports should be opened. charm := s.AddTestingCharm(c, "dummy") svc := s.AddTestingService(c, "test-service", charm) err := svc.SetExposed() c.Assert(err, gc.IsNil) units, err := juju.AddUnits(s.State, svc, 1, "") c.Assert(err, gc.IsNil) c.Check(opRecvTimeout(c, s.State, op, dummy.OpStartInstance{}), gc.NotNil) // Wait for the instance id to show up in the state. s.waitProvisioned(c, units[0]) err = units[0].OpenPort("tcp", 999) c.Assert(err, gc.IsNil) c.Check(opRecvTimeout(c, s.State, op, dummy.OpOpenPorts{}), gc.NotNil) err = a.Stop() c.Assert(err, gc.IsNil) select { case err := <-done: c.Assert(err, gc.IsNil) case <-time.After(5 * time.Second): c.Fatalf("timed out waiting for agent to terminate") } c.Assert(s.singularRecord.started(), jc.DeepEquals, []string{ "charm-revision-updater", "cleaner", "environ-provisioner", "firewaller", "minunitsworker", "resumer", }) }
func (s *CommonProvisionerSuite) SetUpTest(c *gc.C) { // Disable the default state policy, because the // provisioner needs to be able to test pathological // scenarios where a machine exists in state with // invalid environment config. dummy.SetStatePolicy(nil) s.JujuConnSuite.SetUpTest(c) // Create the operations channel with more than enough space // for those tests that don't listen on it. op := make(chan dummy.Operation, 500) dummy.Listen(op) s.op = op cfg, err := s.State.EnvironConfig() c.Assert(err, jc.ErrorIsNil) s.cfg = cfg // Create a machine for the dummy bootstrap instance, // so the provisioner doesn't destroy it. insts, err := s.Environ.Instances([]instance.Id{dummy.BootstrapInstanceId}) c.Assert(err, jc.ErrorIsNil) addrs, err := insts[0].Addresses() c.Assert(err, jc.ErrorIsNil) machine, err := s.State.AddOneMachine(state.MachineTemplate{ Addresses: addrs, Series: "quantal", Nonce: agent.BootstrapNonce, InstanceId: dummy.BootstrapInstanceId, Jobs: []state.MachineJob{state.JobManageEnviron}, }) c.Assert(err, jc.ErrorIsNil) c.Assert(machine.Id(), gc.Equals, "0") current := version.Binary{ Number: version.Current, Arch: arch.HostArch(), Series: series.HostSeries(), } err = machine.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) password, err := utils.RandomPassword() c.Assert(err, jc.ErrorIsNil) err = machine.SetPassword(password) c.Assert(err, jc.ErrorIsNil) s.st = s.OpenAPIAsMachine(c, machine.Tag(), password, agent.BootstrapNonce) c.Assert(s.st, gc.NotNil) c.Logf("API: login as %q successful", machine.Tag()) s.provisioner = s.st.Provisioner() c.Assert(s.provisioner, gc.NotNil) }
func runCommand(ctx *cmd.Context, com cmd.Command, args ...string) (opc chan dummy.Operation, errc chan error) { if ctx == nil { panic("ctx == nil") } errc = make(chan error, 1) opc = make(chan dummy.Operation, 200) dummy.Listen(opc) go func() { // signal that we're done with this ops channel. defer dummy.Listen(nil) err := coretesting.InitCommand(com, args) if err != nil { errc <- err return } err = com.Run(ctx) errc <- err }() return }
func (s *workerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) // Unbreak dummy provider methods. s.AssertConfigParameterUpdated(c, "broken", "") s.APIConnection, _ = s.OpenAPIAsNewMachine(c, state.JobManageModel) s.API = s.APIConnection.DiscoverSpaces() s.OpsChan = make(chan dummy.Operation, 10) dummy.Listen(s.OpsChan) s.spacesDiscovered = nil }
func (s *suite) TestListNetworks(c *gc.C) { e := s.bootstrapTestEnviron(c) opc := make(chan dummy.Operation, 200) dummy.Listen(opc) expectInfo := []network.BasicInfo{ {CIDR: "0.10.0.0/8", ProviderId: "dummy-private"}, {CIDR: "0.20.0.0/24", ProviderId: "dummy-public"}, } netInfo, err := e.ListNetworks() c.Assert(err, gc.IsNil) c.Assert(netInfo, jc.DeepEquals, expectInfo) assertListNetworks(c, e, opc, expectInfo) }
func (s *destroyEnvironmentSuite) TestDestroyEnvironmentWithContainers(c *gc.C) { ops := make(chan dummy.Operation, 500) dummy.Listen(ops) _, nonManager, _ := s.setUpInstances(c) nonManagerId, _ := nonManager.InstanceId() err := s.APIState.Client().DestroyEnvironment() c.Assert(err, gc.IsNil) for op := range ops { if op, ok := op.(dummy.OpStopInstances); ok { c.Assert(op.Ids, jc.SameContents, []instance.Id{nonManagerId}) break } } }
func (s *suite) TestSubnets(c *gc.C) { e := s.bootstrapTestEnviron(c) defer func() { err := e.Destroy() c.Assert(err, jc.ErrorIsNil) }() opc := make(chan dummy.Operation, 200) dummy.Listen(opc) expectInfo := []network.SubnetInfo{{ CIDR: "0.10.0.0/24", ProviderId: "dummy-private", AvailabilityZones: []string{"zone1", "zone2"}, }, { CIDR: "0.20.0.0/24", ProviderId: "dummy-public", }} ids := []network.Id{"dummy-private", "dummy-public", "foo-bar"} netInfo, err := e.Subnets("i-foo", ids) c.Assert(err, jc.ErrorIsNil) c.Assert(netInfo, jc.DeepEquals, expectInfo) assertSubnets(c, e, opc, "i-foo", ids, expectInfo) // Test filtering by id(s). netInfo, err = e.Subnets("i-foo", nil) c.Assert(err, jc.ErrorIsNil) c.Assert(netInfo, jc.DeepEquals, expectInfo) assertSubnets(c, e, opc, "i-foo", nil, expectInfo) netInfo, err = e.Subnets("i-foo", ids[0:1]) c.Assert(err, jc.ErrorIsNil) c.Assert(netInfo, jc.DeepEquals, expectInfo[0:1]) assertSubnets(c, e, opc, "i-foo", ids[0:1], expectInfo[0:1]) netInfo, err = e.Subnets("i-foo", ids[1:]) c.Assert(err, jc.ErrorIsNil) c.Assert(netInfo, jc.DeepEquals, expectInfo[1:]) assertSubnets(c, e, opc, "i-foo", ids[1:], expectInfo[1:]) // Test we can induce errors. s.breakMethods(c, e, "Subnets") netInfo, err = e.Subnets("i-any", nil) c.Assert(err, gc.ErrorMatches, `dummy\.Subnets is broken`) c.Assert(netInfo, gc.HasLen, 0) }
func (s *destroyEnvironmentSuite) TestDestroyEnvironmentWithContainers(c *gc.C) { ops := make(chan dummy.Operation, 500) dummy.Listen(ops) _, nonManager, _ := s.setUpInstances(c) nonManagerId, _ := nonManager.InstanceId() err := common.DestroyEnvironment(s.State, s.State.EnvironTag()) c.Assert(err, jc.ErrorIsNil) for op := range ops { if op, ok := op.(dummy.OpStopInstances); ok { c.Assert(op.Ids, jc.SameContents, []instance.Id{nonManagerId}) break } } s.metricSender.CheckCalls(c, []jtesting.StubCall{{FuncName: "SendMetrics"}}) }
func (s *CommonProvisionerSuite) SetUpTest(c *gc.C) { // Disable the default state policy, because the // provisioner needs to be able to test pathological // scenarios where a machine exists in state with // invalid environment config. dummy.SetStatePolicy(nil) s.JujuConnSuite.SetUpTest(c) // Create the operations channel with more than enough space // for those tests that don't listen on it. op := make(chan dummy.Operation, 500) dummy.Listen(op) s.op = op cfg, err := s.State.EnvironConfig() c.Assert(err, gc.IsNil) s.cfg = cfg }
func (s *suite) TestReleaseAddress(c *gc.C) { e := s.bootstrapTestEnviron(c, false) defer func() { err := e.Destroy() c.Assert(err, jc.ErrorIsNil) }() inst, _ := jujutesting.AssertStartInstance(c, e, "0") c.Assert(inst, gc.NotNil) subnetId := network.Id("net1") opc := make(chan dummy.Operation, 200) dummy.Listen(opc) // Release a couple of addresses. address := network.NewScopedAddress("0.1.2.1", network.ScopeCloudLocal) macAddress := "foobar" hostname := "myhostname" err := e.ReleaseAddress(inst.Id(), subnetId, address, macAddress, hostname) c.Assert(err, jc.ErrorIsNil) assertReleaseAddress(c, e, opc, inst.Id(), subnetId, address, macAddress, hostname) address = network.NewScopedAddress("0.1.2.2", network.ScopeCloudLocal) err = e.ReleaseAddress(inst.Id(), subnetId, address, macAddress, hostname) c.Assert(err, jc.ErrorIsNil) assertReleaseAddress(c, e, opc, inst.Id(), subnetId, address, macAddress, hostname) // Test we can induce errors. s.breakMethods(c, e, "ReleaseAddress") address = network.NewScopedAddress("0.1.2.3", network.ScopeCloudLocal) err = e.ReleaseAddress(inst.Id(), subnetId, address, macAddress, hostname) c.Assert(err, gc.ErrorMatches, `dummy\.ReleaseAddress is broken`) // Finally, test the method respects the feature flag when // disabled. s.SetFeatureFlags() // clear the flags. err = e.ReleaseAddress(inst.Id(), subnetId, address, macAddress, hostname) c.Assert(err, gc.ErrorMatches, "address allocation not supported") c.Assert(err, jc.Satisfies, errors.IsNotSupported) }
func (s *workerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) if s.Enabled { s.SetFeatureFlags(feature.AddressAllocation) } // Unbreak dummy provider methods. s.AssertConfigParameterUpdated(c, "broken", "") s.APIConnection, _ = s.OpenAPIAsNewMachine(c, state.JobManageModel) s.API = s.APIConnection.Addresser() machineA, err := s.State.AddMachine("quantal", state.JobHostUnits) s.MachineA = machineA c.Assert(err, jc.ErrorIsNil) err = s.MachineA.SetProvisioned("foo", "fake_nonce", nil) c.Assert(err, jc.ErrorIsNil) // This machine will be destroyed after address creation to test the // handling of addresses for machines that have gone. machineB, err := s.State.AddMachine("quantal", state.JobHostUnits) s.MachineB = machineB c.Assert(err, jc.ErrorIsNil) s.createAddresses(c) s.State.StartSync() s.OpsChan = make(chan dummy.Operation, 10) dummy.Listen(s.OpsChan) // Start the Addresser worker. w, err := addresser.NewWorker(s.API) c.Assert(err, jc.ErrorIsNil) s.Worker = w s.waitForInitialDead(c) }
func dummyListen() chan dummy.Operation { opsChan := make(chan dummy.Operation, 10) dummy.Listen(opsChan) return opsChan }
func (s *suite) TestNetworkInterfaces(c *gc.C) { e := s.bootstrapTestEnviron(c) defer func() { err := e.Destroy() c.Assert(err, jc.ErrorIsNil) }() opc := make(chan dummy.Operation, 200) dummy.Listen(opc) expectInfo := []network.InterfaceInfo{{ ProviderId: "dummy-eth0", ProviderSubnetId: "dummy-private", CIDR: "0.10.0.0/24", DeviceIndex: 0, InterfaceName: "eth0", InterfaceType: "ethernet", VLANTag: 0, MACAddress: "aa:bb:cc:dd:ee:f0", Disabled: false, NoAutoStart: false, ConfigType: network.ConfigDHCP, Address: network.NewAddress("0.10.0.2"), DNSServers: network.NewAddresses("ns1.dummy", "ns2.dummy"), GatewayAddress: network.NewAddress("0.10.0.1"), }, { ProviderId: "dummy-eth1", ProviderSubnetId: "dummy-public", CIDR: "0.20.0.0/24", DeviceIndex: 1, InterfaceName: "eth1", InterfaceType: "ethernet", VLANTag: 1, MACAddress: "aa:bb:cc:dd:ee:f1", Disabled: false, NoAutoStart: true, ConfigType: network.ConfigDHCP, Address: network.NewAddress("0.20.0.2"), DNSServers: network.NewAddresses("ns1.dummy", "ns2.dummy"), GatewayAddress: network.NewAddress("0.20.0.1"), }, { ProviderId: "dummy-eth2", ProviderSubnetId: "dummy-disabled", CIDR: "0.30.0.0/24", DeviceIndex: 2, InterfaceName: "eth2", InterfaceType: "ethernet", VLANTag: 2, MACAddress: "aa:bb:cc:dd:ee:f2", Disabled: true, NoAutoStart: false, ConfigType: network.ConfigDHCP, Address: network.NewAddress("0.30.0.2"), DNSServers: network.NewAddresses("ns1.dummy", "ns2.dummy"), GatewayAddress: network.NewAddress("0.30.0.1"), }} info, err := e.NetworkInterfaces("i-42") c.Assert(err, jc.ErrorIsNil) c.Assert(info, jc.DeepEquals, expectInfo) assertInterfaces(c, e, opc, "i-42", expectInfo) // Test we can induce errors. s.breakMethods(c, e, "NetworkInterfaces") info, err = e.NetworkInterfaces("i-any") c.Assert(err, gc.ErrorMatches, `dummy\.NetworkInterfaces is broken`) c.Assert(info, gc.HasLen, 0) }
func (s *suite) TestNetworkInterfaces(c *gc.C) { e := s.bootstrapTestEnviron(c, false) defer func() { err := e.Destroy() c.Assert(err, jc.ErrorIsNil) }() opc := make(chan dummy.Operation, 200) dummy.Listen(opc) expectInfo := []network.InterfaceInfo{{ ProviderId: "dummy-eth0", ProviderSubnetId: "dummy-private", NetworkName: "juju-private", CIDR: "0.10.0.0/24", DeviceIndex: 0, InterfaceName: "eth0", VLANTag: 0, MACAddress: "aa:bb:cc:dd:ee:f0", Disabled: false, NoAutoStart: false, ConfigType: network.ConfigDHCP, Address: network.NewAddress("0.10.0.2"), DNSServers: network.NewAddresses("ns1.dummy", "ns2.dummy"), GatewayAddress: network.NewAddress("0.10.0.1"), ExtraConfig: nil, }, { ProviderId: "dummy-eth1", ProviderSubnetId: "dummy-public", NetworkName: "juju-public", CIDR: "0.20.0.0/24", DeviceIndex: 1, InterfaceName: "eth1", VLANTag: 1, MACAddress: "aa:bb:cc:dd:ee:f1", Disabled: false, NoAutoStart: true, ConfigType: network.ConfigDHCP, Address: network.NewAddress("0.20.0.2"), DNSServers: network.NewAddresses("ns1.dummy", "ns2.dummy"), GatewayAddress: network.NewAddress("0.20.0.1"), ExtraConfig: nil, }, { ProviderId: "dummy-eth2", ProviderSubnetId: "dummy-disabled", NetworkName: "juju-disabled", CIDR: "0.30.0.0/24", DeviceIndex: 2, InterfaceName: "eth2", VLANTag: 2, MACAddress: "aa:bb:cc:dd:ee:f2", Disabled: true, NoAutoStart: false, ConfigType: network.ConfigDHCP, Address: network.NewAddress("0.30.0.2"), DNSServers: network.NewAddresses("ns1.dummy", "ns2.dummy"), GatewayAddress: network.NewAddress("0.30.0.1"), ExtraConfig: nil, }} info, err := e.NetworkInterfaces("i-42") c.Assert(err, jc.ErrorIsNil) c.Assert(info, jc.DeepEquals, expectInfo) assertInterfaces(c, e, opc, "i-42", expectInfo) // Test that with instance id prefix "i-no-nics-" no results are // returned. info, err = e.NetworkInterfaces("i-no-nics-here") c.Assert(err, jc.ErrorIsNil) c.Assert(info, gc.HasLen, 0) assertInterfaces(c, e, opc, "i-no-nics-here", expectInfo[:0]) // Test that with instance id prefix "i-nic-no-subnet-" we get a result // with no associated subnet. expectInfo = []network.InterfaceInfo{{ DeviceIndex: 0, ProviderId: network.Id("dummy-eth0"), NetworkName: "juju-public", InterfaceName: "eth0", MACAddress: "aa:bb:cc:dd:ee:f0", Disabled: false, NoAutoStart: false, ConfigType: network.ConfigDHCP, }} info, err = e.NetworkInterfaces("i-nic-no-subnet-here") c.Assert(err, jc.ErrorIsNil) c.Assert(info, gc.HasLen, 1) assertInterfaces(c, e, opc, "i-nic-no-subnet-here", expectInfo) // Test that with instance id prefix "i-disabled-nic-" we get a result // with only a disabled subnet. expectInfo = []network.InterfaceInfo{{ ProviderId: "dummy-eth2", ProviderSubnetId: "dummy-disabled", NetworkName: "juju-disabled", CIDR: "0.30.0.0/24", DeviceIndex: 2, InterfaceName: "eth2", VLANTag: 2, MACAddress: "aa:bb:cc:dd:ee:f2", Disabled: true, NoAutoStart: false, ConfigType: network.ConfigDHCP, Address: network.NewAddress("0.30.0.2"), DNSServers: network.NewAddresses("ns1.dummy", "ns2.dummy"), GatewayAddress: network.NewAddress("0.30.0.1"), ExtraConfig: nil, }} info, err = e.NetworkInterfaces("i-disabled-nic-here") c.Assert(err, jc.ErrorIsNil) c.Assert(info, gc.HasLen, 1) assertInterfaces(c, e, opc, "i-disabled-nic-here", expectInfo) // Test we can induce errors. s.breakMethods(c, e, "NetworkInterfaces") info, err = e.NetworkInterfaces("i-any") c.Assert(err, gc.ErrorMatches, `dummy\.NetworkInterfaces is broken`) c.Assert(info, gc.HasLen, 0) }
func (s *suite) TestSubnets(c *gc.C) { e := s.bootstrapTestEnviron(c, false) defer func() { err := e.Destroy() c.Assert(err, jc.ErrorIsNil) }() opc := make(chan dummy.Operation, 200) dummy.Listen(opc) expectInfo := []network.SubnetInfo{{ CIDR: "0.10.0.0/24", ProviderId: "dummy-private", AllocatableIPLow: net.ParseIP("0.10.0.0"), AllocatableIPHigh: net.ParseIP("0.10.0.255"), AvailabilityZones: []string{"zone1", "zone2"}, }, { CIDR: "0.20.0.0/24", ProviderId: "dummy-public", AllocatableIPLow: net.ParseIP("0.20.0.0"), AllocatableIPHigh: net.ParseIP("0.20.0.255"), }} // Prepare a version of the above with no allocatable range to // test the magic "i-no-alloc-" prefix below. noallocInfo := make([]network.SubnetInfo, len(expectInfo)) for i, exp := range expectInfo { pid := string(exp.ProviderId) pid = strings.TrimPrefix(pid, "dummy-") noallocInfo[i].ProviderId = network.Id("noalloc-" + pid) noallocInfo[i].AllocatableIPLow = nil noallocInfo[i].AllocatableIPHigh = nil noallocInfo[i].AvailabilityZones = exp.AvailabilityZones noallocInfo[i].CIDR = exp.CIDR } ids := []network.Id{"dummy-private", "dummy-public", "foo-bar"} netInfo, err := e.Subnets("i-foo", ids) c.Assert(err, jc.ErrorIsNil) c.Assert(netInfo, jc.DeepEquals, expectInfo) assertSubnets(c, e, opc, "i-foo", ids, expectInfo) // Test filtering by id(s). netInfo, err = e.Subnets("i-foo", nil) c.Assert(err, jc.ErrorIsNil) c.Assert(netInfo, jc.DeepEquals, expectInfo) assertSubnets(c, e, opc, "i-foo", nil, expectInfo) netInfo, err = e.Subnets("i-foo", ids[0:1]) c.Assert(err, jc.ErrorIsNil) c.Assert(netInfo, jc.DeepEquals, expectInfo[0:1]) assertSubnets(c, e, opc, "i-foo", ids[0:1], expectInfo[0:1]) netInfo, err = e.Subnets("i-foo", ids[1:]) c.Assert(err, jc.ErrorIsNil) c.Assert(netInfo, jc.DeepEquals, expectInfo[1:]) assertSubnets(c, e, opc, "i-foo", ids[1:], expectInfo[1:]) // Test that using an instance id with prefix of either // "i-no-subnets-" or "i-nic-no-subnet-" // returns no results, regardless whether ids are given or not. for _, instId := range []instance.Id{"i-no-subnets-foo", "i-nic-no-subnet-foo"} { netInfo, err = e.Subnets(instId, nil) c.Assert(err, jc.ErrorIsNil) c.Assert(netInfo, gc.HasLen, 0) assertSubnets(c, e, opc, instId, nil, expectInfo[:0]) } netInfo, err = e.Subnets("i-no-subnets-foo", ids) c.Assert(err, jc.ErrorIsNil) c.Assert(netInfo, gc.HasLen, 0) assertSubnets(c, e, opc, "i-no-subnets-foo", ids, expectInfo[:0]) // Test the behavior with "i-no-alloc-" instance id prefix. // When # is "all", all returned subnets have no allocatable range // set and have provider ids with "noalloc-" prefix. netInfo, err = e.Subnets("i-no-alloc-all", nil) c.Assert(err, jc.ErrorIsNil) c.Assert(netInfo, jc.DeepEquals, noallocInfo) assertSubnets(c, e, opc, "i-no-alloc-all", nil, noallocInfo) // When # is an integer, the #-th subnet in result has no // allocatable range set and a provider id prefix "noalloc-". netInfo, err = e.Subnets("i-no-alloc-0", nil) c.Assert(err, jc.ErrorIsNil) expectResult := []network.SubnetInfo{noallocInfo[0], expectInfo[1]} c.Assert(netInfo, jc.DeepEquals, expectResult) assertSubnets(c, e, opc, "i-no-alloc-0", nil, expectResult) netInfo, err = e.Subnets("i-no-alloc-1", nil) c.Assert(err, jc.ErrorIsNil) expectResult = []network.SubnetInfo{expectInfo[0], noallocInfo[1]} c.Assert(netInfo, jc.DeepEquals, expectResult) assertSubnets(c, e, opc, "i-no-alloc-1", nil, expectResult) // For the last case above, also test the error returned when # is // not integer or it's out of range of the results (including when // filtering by ids is applied). netInfo, err = e.Subnets("i-no-alloc-foo", nil) c.Assert(err, gc.ErrorMatches, `invalid index "foo"; expected int`) c.Assert(netInfo, gc.HasLen, 0) netInfo, err = e.Subnets("i-no-alloc-1", ids[:1]) c.Assert(err, gc.ErrorMatches, `index 1 out of range; expected 0..0`) c.Assert(netInfo, gc.HasLen, 0) netInfo, err = e.Subnets("i-no-alloc-2", ids) c.Assert(err, gc.ErrorMatches, `index 2 out of range; expected 0..1`) c.Assert(netInfo, gc.HasLen, 0) // Test we can induce errors. s.breakMethods(c, e, "Subnets") netInfo, err = e.Subnets("i-any", nil) c.Assert(err, gc.ErrorMatches, `dummy\.Subnets is broken`) c.Assert(netInfo, gc.HasLen, 0) }
func (s *CommonProvisionerSuite) SetUpTest(c *gc.C) { // Disable the default state policy, because the // provisioner needs to be able to test pathological // scenarios where a machine exists in state with // invalid environment config. dummy.SetStatePolicy(nil) s.JujuConnSuite.SetUpTest(c) // We do not want to pull published image metadata for tests... imagetesting.PatchOfficialDataSources(&s.CleanupSuite, "") // We want an image to start test instances err := s.State.CloudImageMetadataStorage.SaveMetadata([]cloudimagemetadata.Metadata{{ cloudimagemetadata.MetadataAttributes{ Region: "region", Series: "trusty", Arch: "amd64", VirtType: "", RootStorageType: "", Source: "test", }, 10, "-999", }}) c.Assert(err, jc.ErrorIsNil) // Create the operations channel with more than enough space // for those tests that don't listen on it. op := make(chan dummy.Operation, 500) dummy.Listen(op) s.op = op cfg, err := s.State.EnvironConfig() c.Assert(err, jc.ErrorIsNil) s.cfg = cfg // Create a machine for the dummy bootstrap instance, // so the provisioner doesn't destroy it. insts, err := s.Environ.Instances([]instance.Id{dummy.BootstrapInstanceId}) c.Assert(err, jc.ErrorIsNil) addrs, err := insts[0].Addresses() c.Assert(err, jc.ErrorIsNil) machine, err := s.State.AddOneMachine(state.MachineTemplate{ Addresses: addrs, Series: "quantal", Nonce: agent.BootstrapNonce, InstanceId: dummy.BootstrapInstanceId, Jobs: []state.MachineJob{state.JobManageEnviron}, }) c.Assert(err, jc.ErrorIsNil) c.Assert(machine.Id(), gc.Equals, "0") current := version.Binary{ Number: version.Current, Arch: arch.HostArch(), Series: series.HostSeries(), } err = machine.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) password, err := utils.RandomPassword() c.Assert(err, jc.ErrorIsNil) err = machine.SetPassword(password) c.Assert(err, jc.ErrorIsNil) s.st = s.OpenAPIAsMachine(c, machine.Tag(), password, agent.BootstrapNonce) c.Assert(s.st, gc.NotNil) c.Logf("API: login as %q successful", machine.Tag()) s.provisioner = s.st.Provisioner() c.Assert(s.provisioner, gc.NotNil) }
func (s *cmdControllerSuite) testControllerDestroy(c *gc.C, forceAPI bool) { st := s.Factory.MakeModel(c, &factory.ModelParams{ Name: "just-a-controller", ConfigAttrs: testing.Attrs{"controller": true}, CloudRegion: "dummy-region", }) defer st.Close() factory.NewFactory(st).MakeApplication(c, nil) stop := make(chan struct{}) done := make(chan struct{}) // In order for the destroy controller command to complete we need to run // the code that the cleaner and undertaker workers would be running in // the agent in order to progress the lifecycle of the hosted model, // and cleanup the documents. go func() { defer close(done) a := testing.LongAttempt.Start() for a.Next() { err := s.State.Cleanup() c.Check(err, jc.ErrorIsNil) err = st.Cleanup() c.Check(err, jc.ErrorIsNil) err = st.ProcessDyingModel() if errors.Cause(err) != state.ErrModelNotDying { c.Check(err, jc.ErrorIsNil) if err == nil { // success! return } } select { case <-stop: return default: // retry } } }() if forceAPI { // Remove bootstrap config from the client store, // forcing the command to use the API. err := os.Remove(jujuclient.JujuBootstrapConfigPath()) c.Assert(err, jc.ErrorIsNil) } ops := make(chan dummy.Operation, 1) dummy.Listen(ops) s.run(c, "destroy-controller", "kontroll", "-y", "--destroy-all-models", "--debug") close(stop) <-done destroyOp := (<-ops).(dummy.OpDestroy) c.Assert(destroyOp.Env, gc.Equals, "controller") c.Assert(destroyOp.Cloud, gc.Equals, "dummy") c.Assert(destroyOp.CloudRegion, gc.Equals, "dummy-region") store := jujuclient.NewFileClientStore() _, err := store.ControllerByName("kontroll") c.Assert(err, jc.Satisfies, errors.IsNotFound) }