func (t *LiveTests) TestStopInstances(c *C) { // It would be nice if this test was in jujutest, but // there's no way for jujutest to fabricate a valid-looking // instance id. inst0, _ := testing.StartInstance(c, t.Env, "40") inst1 := ec2.FabricateInstance(inst0, "i-aaaaaaaa") inst2, _ := testing.StartInstance(c, t.Env, "41") err := t.Env.StopInstances([]instance.Instance{inst0, inst1, inst2}) c.Check(err, IsNil) var insts []instance.Instance // We need the retry logic here because we are waiting // for Instances to return an error, and it will not retry // if it succeeds. gone := false for a := ec2.ShortAttempt.Start(); a.Next(); { insts, err = t.Env.Instances([]instance.Id{inst0.Id(), inst2.Id()}) if err == environs.ErrPartialInstances { // instances not gone yet. continue } if err == environs.ErrNoInstances { gone = true break } c.Fatalf("error getting instances: %v", err) } if !gone { c.Errorf("after termination, instances remaining: %v", insts) } }
func (t *Tests) TestStartStop(c *C) { e := t.Open(c) envtesting.UploadFakeTools(c, e.Storage()) cfg, err := e.Config().Apply(map[string]interface{}{ "agent-version": version.Current.Number.String(), }) c.Assert(err, IsNil) err = e.SetConfig(cfg) c.Assert(err, IsNil) insts, err := e.Instances(nil) c.Assert(err, IsNil) c.Assert(insts, HasLen, 0) inst0, hc := testing.StartInstance(c, e, "0") c.Assert(inst0, NotNil) id0 := inst0.Id() // Sanity check for hardware characteristics. c.Assert(hc.Arch, NotNil) c.Assert(hc.Mem, NotNil) c.Assert(hc.CpuCores, NotNil) inst1, _ := testing.StartInstance(c, e, "1") c.Assert(inst1, NotNil) id1 := inst1.Id() insts, err = e.Instances([]instance.Id{id0, id1}) c.Assert(err, IsNil) c.Assert(insts, HasLen, 2) c.Assert(insts[0].Id(), Equals, id0) c.Assert(insts[1].Id(), Equals, id1) // order of results is not specified insts, err = e.AllInstances() c.Assert(err, IsNil) c.Assert(insts, HasLen, 2) c.Assert(insts[0].Id(), Not(Equals), insts[1].Id()) err = e.StopInstances([]instance.Instance{inst0}) c.Assert(err, IsNil) insts, err = e.Instances([]instance.Id{id0, id1}) c.Assert(err, Equals, environs.ErrPartialInstances) c.Assert(insts[0], IsNil) c.Assert(insts[1].Id(), Equals, id1) insts, err = e.AllInstances() c.Assert(err, IsNil) c.Assert(insts[0].Id(), Equals, id1) }
// If the environment is configured not to require a public IP address for nodes, // bootstrapping and starting an instance should occur without any attempt to // allocate a public address. func (s *localServerSuite) TestStartInstanceWithoutPublicIP(c *C) { cleanup := s.srv.Service.Nova.RegisterControlPoint( "addFloatingIP", func(sc hook.ServiceControl, args ...interface{}) error { return fmt.Errorf("add floating IP should not have been called") }, ) defer cleanup() cleanup = s.srv.Service.Nova.RegisterControlPoint( "addServerFloatingIP", func(sc hook.ServiceControl, args ...interface{}) error { return fmt.Errorf("add server floating IP should not have been called") }, ) defer cleanup() cfg, err := s.Env.Config().Apply(map[string]interface{}{ "use-floating-ip": false, }) c.Assert(err, IsNil) env, err := environs.New(cfg) c.Assert(err, IsNil) err = environs.Bootstrap(env, constraints.Value{}) c.Assert(err, IsNil) inst, _ := testing.StartInstance(c, env, "100") err = s.Env.StopInstances([]instance.Instance{inst}) c.Assert(err, IsNil) }
func (s *SSHCommonSuite) makeMachines(n int, c *C) []*state.Machine { var machines = make([]*state.Machine, n) for i := 0; i < n; i++ { m, err := s.State.AddMachine("series", state.JobHostUnits) c.Assert(err, IsNil) // must set an instance id as the ssh command uses that as a signal the machine // has been provisioned inst, md := testing.StartInstance(c, s.Conn.Environ, m.Id()) c.Assert(m.SetProvisioned(inst.Id(), "fake_nonce", md), IsNil) machines[i] = m } return machines }
// TestStartStop is similar to Tests.TestStartStop except // that it does not assume a pristine environment. func (t *LiveTests) TestStartStop(c *C) { inst, _ := testing.StartInstance(c, t.Env, "0") c.Assert(inst, NotNil) id0 := inst.Id() insts, err := t.Env.Instances([]instance.Id{id0, id0}) c.Assert(err, IsNil) c.Assert(insts, HasLen, 2) c.Assert(insts[0].Id(), Equals, id0) c.Assert(insts[1].Id(), Equals, id0) // Asserting on the return of AllInstances makes the test fragile, // as even comparing the before and after start values can be thrown // off if other instances have been created or destroyed in the same // time frame. Instead, just check the instance we created exists. insts, err = t.Env.AllInstances() c.Assert(err, IsNil) found := false for _, inst := range insts { if inst.Id() == id0 { c.Assert(found, Equals, false, Commentf("%v", insts)) found = true } } c.Assert(found, Equals, true, Commentf("expected %v in %v", inst, insts)) dns, err := inst.WaitDNSName() c.Assert(err, IsNil) c.Assert(dns, Not(Equals), "") insts, err = t.Env.Instances([]instance.Id{id0, ""}) c.Assert(err, Equals, environs.ErrPartialInstances) c.Assert(insts, HasLen, 2) c.Check(insts[0].Id(), Equals, id0) c.Check(insts[1], IsNil) err = t.Env.StopInstances([]instance.Instance{inst}) c.Assert(err, IsNil) // The machine may not be marked as shutting down // immediately. Repeat a few times to ensure we get the error. for a := t.Attempt.Start(); a.Next(); { insts, err = t.Env.Instances([]instance.Id{id0}) if err != nil { break } } c.Assert(err, Equals, environs.ErrNoInstances) c.Assert(insts, HasLen, 0) }
func (s *localServerSuite) TestInstancesGathering(c *C) { inst0, _ := testing.StartInstance(c, s.Env, "100") id0 := inst0.Id() inst1, _ := testing.StartInstance(c, s.Env, "101") id1 := inst1.Id() defer func() { err := s.Env.StopInstances([]instance.Instance{inst0, inst1}) c.Assert(err, IsNil) }() for i, test := range instanceGathering { c.Logf("test %d: find %v -> expect len %d, err: %v", i, test.ids, len(test.ids), test.err) ids := make([]instance.Id, len(test.ids)) for j, id := range test.ids { switch id { case "id0": ids[j] = id0 case "id1": ids[j] = id1 } } insts, err := s.Env.Instances(ids) c.Assert(err, Equals, test.err) if err == environs.ErrNoInstances { c.Assert(insts, HasLen, 0) } else { c.Assert(insts, HasLen, len(test.ids)) } for j, inst := range insts { if ids[j] != "" { c.Assert(inst.Id(), Equals, ids[j]) } else { c.Assert(inst, IsNil) } } } }
func (t *LiveTests) TestInstanceAttributes(c *C) { inst, hc := testing.StartInstance(c, t.Env, "30") defer t.Env.StopInstances([]instance.Instance{inst}) // Sanity check for hardware characteristics. c.Assert(hc.Arch, NotNil) c.Assert(hc.Mem, NotNil) c.Assert(hc.CpuCores, NotNil) c.Assert(hc.CpuPower, NotNil) dns, err := inst.WaitDNSName() // TODO(niemeyer): This assert sometimes fails with "no instances found" c.Assert(err, IsNil) c.Assert(dns, Not(Equals), "") insts, err := t.Env.Instances([]instance.Id{inst.Id()}) c.Assert(err, IsNil) c.Assert(len(insts), Equals, 1) ec2inst := ec2.InstanceEC2(insts[0]) c.Assert(ec2inst.DNSName, Equals, dns) c.Assert(ec2inst.InstanceType, Equals, "m1.small") }
func (t *LiveTests) TestInstanceGroups(c *C) { ec2conn := ec2.EnvironEC2(t.Env) groups := amzec2.SecurityGroupNames( ec2.JujuGroupName(t.Env), ec2.MachineGroupName(t.Env, "98"), ec2.MachineGroupName(t.Env, "99"), ) info := make([]amzec2.SecurityGroupInfo, len(groups)) // Create a group with the same name as the juju group // but with different permissions, to check that it's deleted // and recreated correctly. oldJujuGroup := createGroup(c, ec2conn, groups[0].Name, "old juju group") // Add two permissions: one is required and should be left alone; // the other is not and should be deleted. // N.B. this is unfortunately sensitive to the actual set of permissions used. _, err := ec2conn.AuthorizeSecurityGroup(oldJujuGroup, []amzec2.IPPerm{ { Protocol: "tcp", FromPort: 22, ToPort: 22, SourceIPs: []string{"0.0.0.0/0"}, }, { Protocol: "udp", FromPort: 4321, ToPort: 4322, SourceIPs: []string{"3.4.5.6/32"}, }, }) c.Assert(err, IsNil) inst0, _ := testing.StartInstance(c, t.Env, "98") defer t.Env.StopInstances([]instance.Instance{inst0}) // Create a same-named group for the second instance // before starting it, to check that it's reused correctly. oldMachineGroup := createGroup(c, ec2conn, groups[2].Name, "old machine group") inst1, _ := testing.StartInstance(c, t.Env, "99") defer t.Env.StopInstances([]instance.Instance{inst1}) groupsResp, err := ec2conn.SecurityGroups(groups, nil) c.Assert(err, IsNil) c.Assert(groupsResp.Groups, HasLen, len(groups)) // For each group, check that it exists and record its id. for i, group := range groups { found := false for _, g := range groupsResp.Groups { if g.Name == group.Name { groups[i].Id = g.Id info[i] = g found = true break } } if !found { c.Fatalf("group %q not found", group.Name) } } // The old juju group should have been reused. c.Check(groups[0].Id, Equals, oldJujuGroup.Id) // Check that it authorizes the correct ports and there // are no extra permissions (in particular we are checking // that the unneeded permission that we added earlier // has been deleted). perms := info[0].IPPerms c.Assert(perms, HasLen, 6) checkPortAllowed(c, perms, 22) // SSH checkPortAllowed(c, perms, 37017) // MongoDB checkPortAllowed(c, perms, 17070) // API checkSecurityGroupAllowed(c, perms, groups[0]) // The old machine group should have been reused also. c.Check(groups[2].Id, Equals, oldMachineGroup.Id) // Check that each instance is part of the correct groups. resp, err := ec2conn.Instances([]string{string(inst0.Id()), string(inst1.Id())}, nil) c.Assert(err, IsNil) c.Assert(resp.Reservations, HasLen, 2) for _, r := range resp.Reservations { c.Assert(r.Instances, HasLen, 1) // each instance must be part of the general juju group. msg := Commentf("reservation %#v", r) c.Assert(hasSecurityGroup(r, groups[0]), Equals, true, msg) inst := r.Instances[0] switch instance.Id(inst.InstanceId) { case inst0.Id(): c.Assert(hasSecurityGroup(r, groups[1]), Equals, true, msg) c.Assert(hasSecurityGroup(r, groups[2]), Equals, false, msg) case inst1.Id(): c.Assert(hasSecurityGroup(r, groups[2]), Equals, true, msg) c.Assert(hasSecurityGroup(r, groups[1]), Equals, false, msg) default: c.Errorf("unknown instance found: %v", inst) } } }
// startInstance starts a new instance for the given machine. func (s *FirewallerSuite) startInstance(c *C, m *state.Machine) instance.Instance { inst, hc := testing.StartInstance(c, s.Conn.Environ, m.Id()) err := m.SetProvisioned(inst.Id(), "fake_nonce", hc) c.Assert(err, IsNil) return inst }
func (t *LiveTests) TestGlobalPorts(c *C) { // Change configuration. oldConfig := t.Env.Config() defer func() { err := t.Env.SetConfig(oldConfig) c.Assert(err, IsNil) }() attrs := t.Env.Config().AllAttrs() attrs["firewall-mode"] = "global" newConfig, err := t.Env.Config().Apply(attrs) c.Assert(err, IsNil) err = t.Env.SetConfig(newConfig) c.Assert(err, IsNil) // Create instances and check open ports on both instances. inst1, _ := testing.StartInstance(c, t.Env, "1") defer t.Env.StopInstances([]instance.Instance{inst1}) ports, err := t.Env.Ports() c.Assert(err, IsNil) c.Assert(ports, HasLen, 0) inst2, _ := testing.StartInstance(c, t.Env, "2") ports, err = t.Env.Ports() c.Assert(err, IsNil) c.Assert(ports, HasLen, 0) defer t.Env.StopInstances([]instance.Instance{inst2}) err = t.Env.OpenPorts([]instance.Port{{"udp", 67}, {"tcp", 45}, {"tcp", 89}, {"tcp", 99}}) c.Assert(err, IsNil) ports, err = t.Env.Ports() c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []instance.Port{{"tcp", 45}, {"tcp", 89}, {"tcp", 99}, {"udp", 67}}) // Check closing some ports. err = t.Env.ClosePorts([]instance.Port{{"tcp", 99}, {"udp", 67}}) c.Assert(err, IsNil) ports, err = t.Env.Ports() c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []instance.Port{{"tcp", 45}, {"tcp", 89}}) // Check that we can close ports that aren't there. err = t.Env.ClosePorts([]instance.Port{{"tcp", 111}, {"udp", 222}}) c.Assert(err, IsNil) ports, err = t.Env.Ports() c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []instance.Port{{"tcp", 45}, {"tcp", 89}}) // Check errors when acting on instances. err = inst1.OpenPorts("1", []instance.Port{{"tcp", 80}}) c.Assert(err, ErrorMatches, `invalid firewall mode for opening ports on instance: "global"`) err = inst1.ClosePorts("1", []instance.Port{{"tcp", 80}}) c.Assert(err, ErrorMatches, `invalid firewall mode for closing ports on instance: "global"`) _, err = inst1.Ports("1") c.Assert(err, ErrorMatches, `invalid firewall mode for retrieving ports from instance: "global"`) }
func (t *LiveTests) TestPorts(c *C) { inst1, _ := testing.StartInstance(c, t.Env, "1") c.Assert(inst1, NotNil) defer t.Env.StopInstances([]instance.Instance{inst1}) ports, err := inst1.Ports("1") c.Assert(err, IsNil) c.Assert(ports, HasLen, 0) inst2, _ := testing.StartInstance(c, t.Env, "2") c.Assert(inst2, NotNil) ports, err = inst2.Ports("2") c.Assert(err, IsNil) c.Assert(ports, HasLen, 0) defer t.Env.StopInstances([]instance.Instance{inst2}) // Open some ports and check they're there. err = inst1.OpenPorts("1", []instance.Port{{"udp", 67}, {"tcp", 45}}) c.Assert(err, IsNil) ports, err = inst1.Ports("1") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []instance.Port{{"tcp", 45}, {"udp", 67}}) ports, err = inst2.Ports("2") c.Assert(err, IsNil) c.Assert(ports, HasLen, 0) err = inst2.OpenPorts("2", []instance.Port{{"tcp", 89}, {"tcp", 45}}) c.Assert(err, IsNil) // Check there's no crosstalk to another machine ports, err = inst2.Ports("2") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []instance.Port{{"tcp", 45}, {"tcp", 89}}) ports, err = inst1.Ports("1") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []instance.Port{{"tcp", 45}, {"udp", 67}}) // Check that opening the same port again is ok. oldPorts, err := inst2.Ports("2") c.Assert(err, IsNil) err = inst2.OpenPorts("2", []instance.Port{{"tcp", 45}}) c.Assert(err, IsNil) ports, err = inst2.Ports("2") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, oldPorts) // Check that opening the same port again and another port is ok. err = inst2.OpenPorts("2", []instance.Port{{"tcp", 45}, {"tcp", 99}}) c.Assert(err, IsNil) ports, err = inst2.Ports("2") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []instance.Port{{"tcp", 45}, {"tcp", 89}, {"tcp", 99}}) err = inst2.ClosePorts("2", []instance.Port{{"tcp", 45}, {"tcp", 99}}) c.Assert(err, IsNil) // Check that we can close ports and that there's no crosstalk. ports, err = inst2.Ports("2") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []instance.Port{{"tcp", 89}}) ports, err = inst1.Ports("1") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []instance.Port{{"tcp", 45}, {"udp", 67}}) // Check that we can close multiple ports. err = inst1.ClosePorts("1", []instance.Port{{"tcp", 45}, {"udp", 67}}) c.Assert(err, IsNil) ports, err = inst1.Ports("1") c.Assert(ports, HasLen, 0) // Check that we can close ports that aren't there. err = inst2.ClosePorts("2", []instance.Port{{"tcp", 111}, {"udp", 222}}) c.Assert(err, IsNil) ports, err = inst2.Ports("2") c.Assert(ports, DeepEquals, []instance.Port{{"tcp", 89}}) // Check errors when acting on environment. err = t.Env.OpenPorts([]instance.Port{{"tcp", 80}}) c.Assert(err, ErrorMatches, `invalid firewall mode for opening ports on environment: "instance"`) err = t.Env.ClosePorts([]instance.Port{{"tcp", 80}}) c.Assert(err, ErrorMatches, `invalid firewall mode for closing ports on environment: "instance"`) _, err = t.Env.Ports() c.Assert(err, ErrorMatches, `invalid firewall mode for retrieving ports from environment: "instance"`) }