func (t *LiveTests) TestStopInstances(c *gc.C) { t.PrepareOnce(c) // It would be nice if this test was in jujutest, but // there's no way for jujutest to fabricate a valid-looking // instance id. inst0, _ := testing.AssertStartInstance(c, t.Env, "40") inst1 := ec2.FabricateInstance(inst0, "i-aaaaaaaa") inst2, _ := testing.AssertStartInstance(c, t.Env, "41") err := t.Env.StopInstances(inst0.Id(), inst1.Id(), inst2.Id()) c.Check(err, jc.ErrorIsNil) var insts []instance.Instance // We need the retry logic here because we are waiting // for Instances to return an error, and it will not retry // if it succeeds. gone := false for a := ec2.ShortAttempt.Start(); a.Next(); { insts, err = t.Env.Instances([]instance.Id{inst0.Id(), inst2.Id()}) if err == environs.ErrPartialInstances { // instances not gone yet. continue } if err == environs.ErrNoInstances { gone = true break } c.Fatalf("error getting instances: %v", err) } if !gone { c.Errorf("after termination, instances remaining: %v", insts) } }
func (t *Tests) TestStartStop(c *gc.C) { e := t.Prepare(c) t.UploadFakeTools(c, e.Storage()) cfg, err := e.Config().Apply(map[string]interface{}{ "agent-version": version.Current.Number.String(), }) c.Assert(err, gc.IsNil) err = e.SetConfig(cfg) c.Assert(err, gc.IsNil) insts, err := e.Instances(nil) c.Assert(err, gc.IsNil) c.Assert(insts, gc.HasLen, 0) inst0, hc := testing.AssertStartInstance(c, e, "0") c.Assert(inst0, gc.NotNil) id0 := inst0.Id() // Sanity check for hardware characteristics. c.Assert(hc.Arch, gc.NotNil) c.Assert(hc.Mem, gc.NotNil) c.Assert(hc.CpuCores, gc.NotNil) inst1, _ := testing.AssertStartInstance(c, e, "1") c.Assert(inst1, gc.NotNil) id1 := inst1.Id() insts, err = e.Instances([]instance.Id{id0, id1}) c.Assert(err, gc.IsNil) c.Assert(insts, gc.HasLen, 2) c.Assert(insts[0].Id(), gc.Equals, id0) c.Assert(insts[1].Id(), gc.Equals, id1) // order of results is not specified insts, err = e.AllInstances() c.Assert(err, gc.IsNil) c.Assert(insts, gc.HasLen, 2) c.Assert(insts[0].Id(), gc.Not(gc.Equals), insts[1].Id()) err = e.StopInstances(inst0.Id()) c.Assert(err, gc.IsNil) insts, err = e.Instances([]instance.Id{id0, id1}) c.Assert(err, gc.Equals, environs.ErrPartialInstances) c.Assert(insts[0], gc.IsNil) c.Assert(insts[1].Id(), gc.Equals, id1) insts, err = e.AllInstances() c.Assert(err, gc.IsNil) c.Assert(insts[0].Id(), gc.Equals, id1) }
func (s *LiveTests) assertStartInstanceDefaultSecurityGroup(c *gc.C, useDefault bool) { attrs := s.TestConfig.Merge(coretesting.Attrs{ "name": "sample-" + randomName(), "control-bucket": "juju-test-" + randomName(), "use-default-secgroup": useDefault, }) cfg, err := config.New(config.NoDefaults, attrs) c.Assert(err, gc.IsNil) // Set up a test environment. env, err := environs.New(cfg) c.Assert(err, gc.IsNil) c.Assert(env, gc.NotNil) defer env.Destroy() // Bootstrap and start an instance. err = bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) inst, _ := jujutesting.AssertStartInstance(c, env, "100") // Check whether the instance has the default security group assigned. novaClient := openstack.GetNovaClient(env) groups, err := novaClient.GetServerSecurityGroups(string(inst.Id())) c.Assert(err, gc.IsNil) defaultGroupFound := false for _, group := range groups { if group.Name == "default" { defaultGroupFound = true break } } c.Assert(defaultGroupFound, gc.Equals, useDefault) }
func (s *suite) TestAllocateAddress(c *gc.C) { e := s.bootstrapTestEnviron(c, false) defer func() { err := e.Destroy() c.Assert(err, gc.IsNil) }() inst, _ := jujutesting.AssertStartInstance(c, e, "0") c.Assert(inst, gc.NotNil) netId := network.Id("net1") opc := make(chan dummy.Operation, 200) dummy.Listen(opc) expectAddress := network.NewAddress("0.1.2.1", network.ScopeCloudLocal) address, err := e.AllocateAddress(inst.Id(), netId) c.Assert(err, gc.IsNil) c.Assert(address, gc.DeepEquals, expectAddress) assertAllocateAddress(c, e, opc, inst.Id(), netId, expectAddress) expectAddress = network.NewAddress("0.1.2.2", network.ScopeCloudLocal) address, err = e.AllocateAddress(inst.Id(), netId) c.Assert(err, gc.IsNil) c.Assert(address, gc.DeepEquals, expectAddress) assertAllocateAddress(c, e, opc, inst.Id(), netId, expectAddress) }
func (t *localServerSuite) TestAddresses(c *gc.C) { env := t.Prepare(c) err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) inst, _ := testing.AssertStartInstance(c, env, "1") c.Assert(err, jc.ErrorIsNil) addrs, err := inst.Addresses() c.Assert(err, jc.ErrorIsNil) // Expected values use Address type but really contain a regexp for // the value rather than a valid ip or hostname. expected := []network.Address{{ Value: "8.0.0.*", Type: network.IPv4Address, Scope: network.ScopePublic, }, { Value: "127.0.0.*", Type: network.IPv4Address, Scope: network.ScopeCloudLocal, }} c.Assert(addrs, gc.HasLen, len(expected)) for i, addr := range addrs { c.Check(addr.Value, gc.Matches, expected[i].Value) c.Check(addr.Type, gc.Equals, expected[i].Type) c.Check(addr.Scope, gc.Equals, expected[i].Scope) } }
// primeAgent adds a new Machine to run the given jobs, and sets up the // machine agent's directory. It returns the new machine, the // agent's configuration and the tools currently running. func (s *commonMachineSuite) primeAgent( c *gc.C, vers version.Binary, jobs ...state.MachineJob) (m *state.Machine, agentConfig agent.ConfigSetterWriter, tools *tools.Tools) { // Add a machine and ensure it is provisioned. m, err := s.State.AddMachine("quantal", jobs...) c.Assert(err, gc.IsNil) inst, md := jujutesting.AssertStartInstance(c, s.Environ, m.Id()) c.Assert(m.SetProvisioned(inst.Id(), agent.BootstrapNonce, md), gc.IsNil) // Add an address for the tests in case the maybeInitiateMongoServer // codepath is exercised. s.setFakeMachineAddresses(c, m) // Set up the new machine. err = m.SetAgentVersion(vers) c.Assert(err, gc.IsNil) err = m.SetPassword(initialMachinePassword) c.Assert(err, gc.IsNil) tag := m.Tag() if m.IsManager() { err = m.SetMongoPassword(initialMachinePassword) c.Assert(err, gc.IsNil) agentConfig, tools = s.agentSuite.primeStateAgent(c, tag, initialMachinePassword, vers) info, ok := agentConfig.StateServingInfo() c.Assert(ok, jc.IsTrue) err = s.State.SetStateServingInfo(info) c.Assert(err, gc.IsNil) } else { agentConfig, tools = s.agentSuite.primeAgent(c, tag, initialMachinePassword, vers) } err = agentConfig.Write() c.Assert(err, gc.IsNil) return m, agentConfig, tools }
func (s *environSuite) TestStartInstanceDistributionParams(c *gc.C) { env := s.bootstrap(c) var mock mockAvailabilityZoneAllocations s.PatchValue(&availabilityZoneAllocations, mock.AvailabilityZoneAllocations) // no distribution group specified s.newNode(c, "node1", "host1", nil) s.addSubnet(c, 1, 1, "node1") testing.AssertStartInstance(c, env, s.controllerUUID, "1") c.Assert(mock.group, gc.HasLen, 0) // distribution group specified: ensure it's passed through to AvailabilityZone. s.newNode(c, "node2", "host2", nil) s.addSubnet(c, 2, 2, "node2") expectedInstances := []instance.Id{"i-0", "i-1"} params := environs.StartInstanceParams{ ControllerUUID: s.controllerUUID, DistributionGroup: func() ([]instance.Id, error) { return expectedInstances, nil }, } _, err := testing.StartInstanceWithParams(env, "1", params) c.Assert(err, jc.ErrorIsNil) c.Assert(mock.group, gc.DeepEquals, expectedInstances) }
func (t *localServerSuite) TestStartInstanceDistributionParams(c *gc.C) { env := t.Prepare(c) err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) mock := mockAvailabilityZoneAllocations{ result: []common.AvailabilityZoneInstances{{ZoneName: "az1"}}, } t.PatchValue(ec2.AvailabilityZoneAllocations, mock.AvailabilityZoneAllocations) // no distribution group specified testing.AssertStartInstance(c, env, "1") c.Assert(mock.group, gc.HasLen, 0) // distribution group specified: ensure it's passed through to AvailabilityZone. expectedInstances := []instance.Id{"i-0", "i-1"} params := environs.StartInstanceParams{ DistributionGroup: func() ([]instance.Id, error) { return expectedInstances, nil }, } _, err = testing.StartInstanceWithParams(env, "1", params, nil) c.Assert(err, jc.ErrorIsNil) c.Assert(mock.group, gc.DeepEquals, expectedInstances) }
func (s *workerSuite) setupScenario(c *gc.C) ([]*state.Machine, []instance.Instance) { var machines []*state.Machine var insts []instance.Instance for i := 0; i < 10; i++ { m, err := s.State.AddMachine("series", state.JobHostUnits) c.Assert(err, gc.IsNil) machines = append(machines, m) inst, _ := testing.AssertStartInstance(c, s.Conn.Environ, m.Id()) insts = append(insts, inst) } // Associate the odd-numbered machines with an instance. for i := 1; i < len(machines); i += 2 { m := machines[i] err := m.SetProvisioned(insts[i].Id(), "nonce", nil) c.Assert(err, gc.IsNil) } // Associate the first half of the instances with an address and status. for i := 0; i < len(machines)/2; i++ { dummy.SetInstanceAddresses(insts[i], s.addressesForIndex(i)) dummy.SetInstanceStatus(insts[i], "running") } // Make sure the second half of the instances have no addresses. for i := len(machines) / 2; i < len(machines); i++ { dummy.SetInstanceAddresses(insts[i], nil) } return machines, insts }
func (s *localServerSuite) TestInstanceStatus(c *gc.C) { env := s.Prepare(c) inst, _ := testing.AssertStartInstance(c, env, "100") c.Assert(inst.Status(), gc.Equals, "running") err := env.StopInstances(inst.Id()) c.Assert(err, jc.ErrorIsNil) }
// If the environment is configured not to require a public IP address for nodes, // bootstrapping and starting an instance should occur without any attempt to // allocate a public address. func (s *localServerSuite) TestStartInstanceWithoutPublicIP(c *gc.C) { cleanup := s.srv.Service.Nova.RegisterControlPoint( "addFloatingIP", func(sc hook.ServiceControl, args ...interface{}) error { return fmt.Errorf("add floating IP should not have been called") }, ) defer cleanup() cleanup = s.srv.Service.Nova.RegisterControlPoint( "addServerFloatingIP", func(sc hook.ServiceControl, args ...interface{}) error { return fmt.Errorf("add server floating IP should not have been called") }, ) defer cleanup() cfg, err := config.New(config.NoDefaults, s.TestConfig.Merge(coretesting.Attrs{ "use-floating-ip": false, })) c.Assert(err, gc.IsNil) env, err := environs.Prepare(cfg, coretesting.Context(c), s.ConfigStore) c.Assert(err, gc.IsNil) err = bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) inst, _ := testing.AssertStartInstance(c, env, "100") err = env.StopInstances(inst.Id()) c.Assert(err, gc.IsNil) }
func (t *localServerSuite) testStartInstanceAvailZoneOneConstrained(c *gc.C, runInstancesError *amzec2.Error) { env := t.Prepare(c) err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) mock := mockAvailabilityZoneAllocations{ result: []common.AvailabilityZoneInstances{ {ZoneName: "az1"}, {ZoneName: "az2"}, }, } t.PatchValue(ec2.AvailabilityZoneAllocations, mock.AvailabilityZoneAllocations) // The first call to RunInstances fails with an error indicating the AZ // is constrained. The second attempt succeeds, and so allocates to az2. var azArgs []string realRunInstances := *ec2.RunInstances t.PatchValue(ec2.RunInstances, func(e *amzec2.EC2, ri *amzec2.RunInstances) (*amzec2.RunInstancesResp, error) { azArgs = append(azArgs, ri.AvailZone) if len(azArgs) == 1 { return nil, runInstancesError } return realRunInstances(e, ri) }) inst, hwc := testing.AssertStartInstance(c, env, "1") c.Assert(azArgs, gc.DeepEquals, []string{"az1", "az2"}) c.Assert(ec2.InstanceEC2(inst).AvailZone, gc.Equals, "az2") c.Check(*hwc.AvailabilityZone, gc.Equals, "az2") }
func (s *localServerSuite) TestInstanceStatus(c *gc.C) { env := s.Prepare(c) s.Tests.UploadFakeTools(c, env.Storage()) inst, _ := testing.AssertStartInstance(c, env, "100") c.Assert(inst.Status(), gc.Equals, "running") err := env.StopInstances(inst.Id()) c.Assert(err, gc.IsNil) }
// If the environment is configured not to require a public IP address for nodes, // bootstrapping and starting an instance should occur without any attempt to // allocate a public address. func (s *localServerSuite) TestStartInstance(c *gc.C) { env := s.Prepare(c) err := bootstrap.Bootstrap(bootstrapContext(c), env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) inst, _ := testing.AssertStartInstance(c, env, "100") err = env.StopInstances(inst.Id()) c.Assert(err, jc.ErrorIsNil) }
func (s *localServerSuite) TestInstanceStatus(c *gc.C) { env := s.Prepare(c) // goose's test service always returns ACTIVE state. inst, _ := testing.AssertStartInstance(c, env, "100") c.Assert(inst.Status(), gc.Equals, nova.StatusActive) err := env.StopInstances(inst.Id()) c.Assert(err, gc.IsNil) }
func (t *LiveTests) TestControllerInstances(c *gc.C) { t.BootstrapOnce(c) allInsts, err := t.Env.AllInstances() c.Assert(err, jc.ErrorIsNil) c.Assert(allInsts, gc.HasLen, 1) // bootstrap instance bootstrapInstId := allInsts[0].Id() inst0, _ := testing.AssertStartInstance(c, t.Env, "98") defer t.Env.StopInstances(inst0.Id()) inst1, _ := testing.AssertStartInstance(c, t.Env, "99") defer t.Env.StopInstances(inst1.Id()) insts, err := t.Env.ControllerInstances() c.Assert(err, jc.ErrorIsNil) c.Assert(insts, gc.DeepEquals, []instance.Id{bootstrapInstId}) }
func (t *localServerSuite) TestInstanceStatus(c *gc.C) { env := t.Prepare(c) err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) t.srv.ec2srv.SetInitialInstanceState(ec2test.Terminated) inst, _ := testing.AssertStartInstance(c, env, "1") c.Assert(err, jc.ErrorIsNil) c.Assert(inst.Status(), gc.Equals, "terminated") }
// If the environment is configured not to require a public IP address for nodes, // bootstrapping and starting an instance should occur without any attempt to // allocate a public address. func (s *localServerSuite) TestStartInstance(c *gc.C) { env := s.Prepare(c) s.Tests.UploadFakeTools(c, env.Storage()) err := bootstrap.Bootstrap(bootstrapContext(c), env, bootstrap.BootstrapParams{}) c.Assert(err, gc.IsNil) inst, _ := testing.AssertStartInstance(c, env, "100") err = env.StopInstances(inst.Id()) c.Assert(err, gc.IsNil) }
func (s *environSuite) TestStartInstanceDistribution(c *gc.C) { env := s.bootstrap(c) s.testMAASObject.TestServer.AddZone("test-available", "description") s.newNode(c, "node1", "host1", map[string]interface{}{"zone": "test-available"}) s.addSubnet(c, 1, 1, "node1") inst, _ := testing.AssertStartInstance(c, env, s.controllerUUID, "1") zone, err := inst.(*maas1Instance).zone() c.Assert(err, jc.ErrorIsNil) c.Assert(zone, gc.Equals, "test-available") }
func (t *localServerSuite) TestStartInstanceDistribution(c *gc.C) { env := t.Prepare(c) err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) // test-available is the only available AZ, so AvailabilityZoneAllocations // is guaranteed to return that. inst, _ := testing.AssertStartInstance(c, env, "1") c.Assert(ec2.InstanceEC2(inst).AvailZone, gc.Equals, "test-available") }
func (t *localServerSuite) TestStartInstanceHardwareCharacteristics(c *gc.C) { env := t.Prepare(c) err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) _, hc := testing.AssertStartInstance(c, env, "1") c.Check(*hc.Arch, gc.Equals, "amd64") c.Check(*hc.Mem, gc.Equals, uint64(1740)) c.Check(*hc.CpuCores, gc.Equals, uint64(1)) c.Assert(*hc.CpuPower, gc.Equals, uint64(100)) }
func (t *localServerSuite) TestInstanceStatus(c *gc.C) { env := t.Prepare(c) envtesting.UploadFakeTools(c, env.Storage()) err := bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) t.srv.ec2srv.SetInitialInstanceState(ec2test.Terminated) inst, _ := testing.AssertStartInstance(c, env, "1") c.Assert(err, gc.IsNil) c.Assert(inst.Status(), gc.Equals, "terminated") }
func (s *localServerSuite) TestInstancesGathering(c *gc.C) { env := s.Prepare(c) inst0, _ := testing.AssertStartInstance(c, env, s.ControllerUUID, "100") id0 := inst0.Id() inst1, _ := testing.AssertStartInstance(c, env, s.ControllerUUID, "101") id1 := inst1.Id() c.Logf("id0: %s, id1: %s", id0, id1) defer func() { // StopInstances deletes machines in parallel but the Joyent // API test double isn't goroutine-safe so stop them one at a // time. See https://pad.lv/1604514 c.Check(env.StopInstances(inst0.Id()), jc.ErrorIsNil) c.Check(env.StopInstances(inst1.Id()), jc.ErrorIsNil) }() for i, test := range instanceGathering { c.Logf("test %d: find %v -> expect len %d, err: %v", i, test.ids, len(test.ids), test.err) ids := make([]instance.Id, len(test.ids)) for j, id := range test.ids { switch id { case "id0": ids[j] = id0 case "id1": ids[j] = id1 } } insts, err := env.Instances(ids) c.Assert(err, gc.Equals, test.err) if err == environs.ErrNoInstances { c.Assert(insts, gc.HasLen, 0) } else { c.Assert(insts, gc.HasLen, len(test.ids)) } for j, inst := range insts { if ids[j] != "" { c.Assert(inst.Id(), gc.Equals, ids[j]) } else { c.Assert(inst, gc.IsNil) } } } }
func (t *localServerSuite) TestStartInstanceHardwareCharacteristics(c *gc.C) { env := t.Prepare(c) envtesting.UploadFakeTools(c, env.Storage()) err := bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) _, hc := testing.AssertStartInstance(c, env, "1") c.Check(*hc.Arch, gc.Equals, "amd64") c.Check(*hc.Mem, gc.Equals, uint64(1740)) c.Check(*hc.CpuCores, gc.Equals, uint64(1)) c.Assert(*hc.CpuPower, gc.Equals, uint64(100)) }
func (s *suite) TestAvailabilityZone(c *gc.C) { e := s.bootstrapTestEnviron(c, true) defer func() { err := e.Destroy() c.Assert(err, jc.ErrorIsNil) }() inst, hwc := jujutesting.AssertStartInstance(c, e, "0") c.Assert(inst, gc.NotNil) c.Check(hwc.AvailabilityZone, gc.IsNil) }
func (t *localServerSuite) TestStartInstanceDistribution(c *gc.C) { env := t.Prepare(c) envtesting.UploadFakeTools(c, env.Storage()) err := bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) // test-available is the only available AZ, so AvailabilityZoneAllocations // is guaranteed to return that. inst, _ := testing.AssertStartInstance(c, env, "1") c.Assert(openstack.InstanceServerDetail(inst).AvailabilityZone, gc.Equals, "test-available") }
func (s *localServerSuite) TestStartInstanceNetwork(c *gc.C) { cfg, err := config.New(config.NoDefaults, s.TestConfig.Merge(coretesting.Attrs{ // A label that corresponds to a nova test service network "network": "net", })) c.Assert(err, gc.IsNil) env, err := environs.New(cfg) c.Assert(err, gc.IsNil) inst, _ := testing.AssertStartInstance(c, env, "100") err = env.StopInstances(inst.Id()) c.Assert(err, gc.IsNil) }
// If the environment is configured not to require a public IP address for nodes, // bootstrapping and starting an instance should occur without any attempt to // allocate a public address. func (s *localServerSuite) TestStartInstance(c *gc.C) { env := s.Prepare(c) err := bootstrap.Bootstrap(bootstrapContext(c), env, bootstrap.BootstrapParams{ ControllerConfig: coretesting.FakeControllerConfig(), AdminSecret: testing.AdminSecret, CAPrivateKey: coretesting.CAKey, }) c.Assert(err, jc.ErrorIsNil) inst, _ := testing.AssertStartInstance(c, env, s.ControllerUUID, "100") err = env.StopInstances(inst.Id()) c.Assert(err, jc.ErrorIsNil) }
// TestStartStop is similar to Tests.TestStartStop except // that it does not assume a pristine environment. func (t *LiveTests) TestStartStop(c *gc.C) { t.PrepareOnce(c) t.UploadFakeTools(c, t.Env.Storage()) inst, _ := testing.AssertStartInstance(c, t.Env, "0") c.Assert(inst, gc.NotNil) id0 := inst.Id() insts, err := t.Env.Instances([]instance.Id{id0, id0}) c.Assert(err, gc.IsNil) c.Assert(insts, gc.HasLen, 2) c.Assert(insts[0].Id(), gc.Equals, id0) c.Assert(insts[1].Id(), gc.Equals, id0) // Asserting on the return of AllInstances makes the test fragile, // as even comparing the before and after start values can be thrown // off if other instances have been created or destroyed in the same // time frame. Instead, just check the instance we created exists. insts, err = t.Env.AllInstances() c.Assert(err, gc.IsNil) found := false for _, inst := range insts { if inst.Id() == id0 { c.Assert(found, gc.Equals, false, gc.Commentf("%v", insts)) found = true } } c.Assert(found, gc.Equals, true, gc.Commentf("expected %v in %v", inst, insts)) addresses, err := jujutesting.WaitInstanceAddresses(t.Env, inst.Id()) c.Assert(err, gc.IsNil) c.Assert(addresses, gc.Not(gc.HasLen), 0) insts, err = t.Env.Instances([]instance.Id{id0, ""}) c.Assert(err, gc.Equals, environs.ErrPartialInstances) c.Assert(insts, gc.HasLen, 2) c.Check(insts[0].Id(), gc.Equals, id0) c.Check(insts[1], gc.IsNil) err = t.Env.StopInstances(inst.Id()) c.Assert(err, gc.IsNil) // The machine may not be marked as shutting down // immediately. Repeat a few times to ensure we get the error. for a := t.Attempt.Start(); a.Next(); { insts, err = t.Env.Instances([]instance.Id{id0}) if err != nil { break } } c.Assert(err, gc.Equals, environs.ErrNoInstances) c.Assert(insts, gc.HasLen, 0) }
func (s *localServerSuite) TestInstancesGathering(c *gc.C) { env := s.Prepare(c) s.Tests.UploadFakeTools(c, env.Storage()) inst0, _ := testing.AssertStartInstance(c, env, "100") id0 := inst0.Id() inst1, _ := testing.AssertStartInstance(c, env, "101") id1 := inst1.Id() c.Logf("id0: %s, id1: %s", id0, id1) defer func() { err := env.StopInstances(inst0.Id(), inst1.Id()) c.Assert(err, gc.IsNil) }() for i, test := range instanceGathering { c.Logf("test %d: find %v -> expect len %d, err: %v", i, test.ids, len(test.ids), test.err) ids := make([]instance.Id, len(test.ids)) for j, id := range test.ids { switch id { case "id0": ids[j] = id0 case "id1": ids[j] = id1 } } insts, err := env.Instances(ids) c.Assert(err, gc.Equals, test.err) if err == environs.ErrNoInstances { c.Assert(insts, gc.HasLen, 0) } else { c.Assert(insts, gc.HasLen, len(test.ids)) } for j, inst := range insts { if ids[j] != "" { c.Assert(inst.Id(), gc.Equals, ids[j]) } else { c.Assert(inst, gc.IsNil) } } } }