func (s *FirewallerSuite) TestGlobalModeStartWithUnexposedService(c *C) { // Change configuration. restore := s.setGlobalMode(c) defer restore(c) m, err := s.State.AddMachine(state.JobHostUnits) c.Assert(err, IsNil) inst, err := s.Conn.Environ.StartInstance(m.Id(), testing.InvalidStateInfo(m.Id()), nil) c.Assert(err, IsNil) err = m.SetInstanceId(inst.Id()) c.Assert(err, IsNil) svc, err := s.State.AddService("wordpress", s.charm) c.Assert(err, IsNil) u, err := svc.AddUnit() c.Assert(err, IsNil) err = u.AssignToMachine(m) c.Assert(err, IsNil) err = u.OpenPort("tcp", 80) c.Assert(err, IsNil) // Starting the firewaller, no open ports. fw := firewaller.NewFirewaller(s.State) defer func() { c.Assert(fw.Stop(), IsNil) }() s.assertEnvironPorts(c, nil) // Expose service. err = svc.SetExposed() c.Assert(err, IsNil) s.assertEnvironPorts(c, []state.Port{{"tcp", 80}}) }
// startInstance starts a new instance for the given machine. func (s *FirewallerSuite) startInstance(c *C, m *state.Machine) environs.Instance { inst, err := s.Conn.Environ.StartInstance(m.Id(), testing.InvalidStateInfo(m.Id()), nil) c.Assert(err, IsNil) err = m.SetInstanceId(inst.Id()) c.Assert(err, IsNil) return inst }
func (s *FirewallerSuite) TestStartWithPartialState(c *C) { m, err := s.State.AddMachine(state.JobHostUnits) c.Assert(err, IsNil) inst, err := s.Conn.Environ.StartInstance(m.Id(), testing.InvalidStateInfo(m.Id()), nil) c.Assert(err, IsNil) err = m.SetInstanceId(inst.Id()) c.Assert(err, IsNil) svc, err := s.State.AddService("wordpress", s.charm) c.Assert(err, IsNil) err = svc.SetExposed() c.Assert(err, IsNil) // Starting the firewaller, no open ports. fw := firewaller.NewFirewaller(s.State) defer func() { c.Assert(fw.Stop(), IsNil) }() s.assertPorts(c, inst, m.Id(), nil) // Complete steps to open port. u, err := svc.AddUnit() c.Assert(err, IsNil) err = u.AssignToMachine(m) c.Assert(err, IsNil) err = u.OpenPort("tcp", 80) c.Assert(err, IsNil) s.assertPorts(c, inst, m.Id(), []state.Port{{"tcp", 80}}) }
// Check that we can't start an instance running tools // that correspond with no available platform. func (t *LiveTests) TestStartInstanceOnUnknownPlatform(c *C) { vers := version.Current // Note that we want this test to function correctly in the // dummy environment, so to avoid enumerating all possible // platforms in the dummy provider, it treats only series and/or // architectures with the "unknown" prefix as invalid. vers.Series = "unknownseries" vers.Arch = "unknownarch" name := environs.ToolsStoragePath(vers) storage := t.Env.Storage() checkPutFile(c, storage, name, []byte("fake tools on invalid series")) defer storage.Remove(name) url, err := storage.URL(name) c.Assert(err, IsNil) tools := &state.Tools{ Binary: vers, URL: url, } inst, err := t.Env.StartInstance("4", testing.InvalidStateInfo("4"), tools) if inst != nil { err := t.Env.StopInstances([]environs.Instance{inst}) c.Check(err, IsNil) } c.Assert(inst, IsNil) c.Assert(err, ErrorMatches, "cannot find image.*") }
func (t *Tests) TestStartStop(c *C) { e := t.Open(c) insts, err := e.Instances(nil) c.Assert(err, IsNil) c.Assert(insts, HasLen, 0) inst0, err := e.StartInstance("0", testing.InvalidStateInfo("0"), nil) c.Assert(err, IsNil) c.Assert(inst0, NotNil) id0 := inst0.Id() inst1, err := e.StartInstance("1", testing.InvalidStateInfo("1"), nil) c.Assert(err, IsNil) c.Assert(inst1, NotNil) id1 := inst1.Id() insts, err = e.Instances([]state.InstanceId{id0, id1}) c.Assert(err, IsNil) c.Assert(insts, HasLen, 2) c.Assert(insts[0].Id(), Equals, id0) c.Assert(insts[1].Id(), Equals, id1) // order of results is not specified insts, err = e.AllInstances() c.Assert(err, IsNil) c.Assert(insts, HasLen, 2) c.Assert(insts[0].Id(), Not(Equals), insts[1].Id()) err = e.StopInstances([]environs.Instance{inst0}) c.Assert(err, IsNil) insts, err = e.Instances([]state.InstanceId{id0, id1}) c.Assert(err, Equals, environs.ErrPartialInstances) c.Assert(insts[0], IsNil) c.Assert(insts[1].Id(), Equals, id1) insts, err = e.AllInstances() c.Assert(err, IsNil) c.Assert(insts[0].Id(), Equals, id1) }
// TestStartStop is similar to Tests.TestStartStop except // that it does not assume a pristine environment. func (t *LiveTests) TestStartStop(c *C) { inst, err := t.Env.StartInstance("0", testing.InvalidStateInfo("0"), nil) c.Assert(err, IsNil) c.Assert(inst, NotNil) id0 := inst.Id() insts, err := t.Env.Instances([]state.InstanceId{id0, id0}) c.Assert(err, IsNil) c.Assert(insts, HasLen, 2) c.Assert(insts[0].Id(), Equals, id0) c.Assert(insts[1].Id(), Equals, id0) // Asserting on the return of AllInstances makes the test fragile, // as even comparing the before and after start values can be thrown // off if other instances have been created or destroyed in the same // time frame. Instead, just check the instance we created exists. insts, err = t.Env.AllInstances() c.Assert(err, IsNil) found := false for _, inst := range insts { if inst.Id() == id0 { c.Assert(found, Equals, false, Commentf("%v", insts)) found = true } } c.Assert(found, Equals, true, Commentf("expected %v in %v", inst, insts)) dns, err := inst.WaitDNSName() c.Assert(err, IsNil) c.Assert(dns, Not(Equals), "") insts, err = t.Env.Instances([]state.InstanceId{id0, ""}) c.Assert(err, Equals, environs.ErrPartialInstances) c.Assert(insts, HasLen, 2) c.Check(insts[0].Id(), Equals, id0) c.Check(insts[1], IsNil) err = t.Env.StopInstances([]environs.Instance{inst}) c.Assert(err, IsNil) // The machine may not be marked as shutting down // immediately. Repeat a few times to ensure we get the error. for a := t.Attempt.Start(); a.Next(); { insts, err = t.Env.Instances([]state.InstanceId{id0}) if err != nil { break } } c.Assert(err, Equals, environs.ErrNoInstances) c.Assert(insts, HasLen, 0) }
func (s *SSHCommonSuite) makeMachines(n int, c *C) []*state.Machine { var machines = make([]*state.Machine, n) for i := 0; i < n; i++ { m, err := s.State.AddMachine(state.JobHostUnits) c.Assert(err, IsNil) // must set an instance id as the ssh command uses that as a signal the machine // has been provisioned inst, err := s.Conn.Environ.StartInstance(m.Id(), testing.InvalidStateInfo(m.Id()), nil) c.Assert(err, IsNil) c.Assert(m.SetInstanceId(inst.Id()), IsNil) machines[i] = m } return machines }
func (t *LiveTests) TestStopInstances(c *C) { // It would be nice if this test was in jujutest, but // there's no way for jujutest to fabricate a valid-looking // instance id. inst0, err := t.Env.StartInstance("40", testing.InvalidStateInfo("40"), nil) c.Assert(err, IsNil) inst1 := ec2.FabricateInstance(inst0, "i-aaaaaaaa") inst2, err := t.Env.StartInstance("41", testing.InvalidStateInfo("41"), nil) c.Assert(err, IsNil) err = t.Env.StopInstances([]environs.Instance{inst0, inst1, inst2}) c.Check(err, IsNil) var insts []environs.Instance // We need the retry logic here because we are waiting // for Instances to return an error, and it will not retry // if it succeeds. gone := false for a := ec2.ShortAttempt.Start(); a.Next(); { insts, err = t.Env.Instances([]state.InstanceId{inst0.Id(), inst2.Id()}) if err == environs.ErrPartialInstances { // instances not gone yet. continue } if err == environs.ErrNoInstances { gone = true break } c.Fatalf("error getting instances: %v", err) } if !gone { c.Errorf("after termination, instances remaining: %v", insts) } }
func (t *LiveTests) TestInstanceDNSName(c *C) { inst, err := t.Env.StartInstance("30", testing.InvalidStateInfo("30"), nil) c.Assert(err, IsNil) defer t.Env.StopInstances([]environs.Instance{inst}) dns, err := inst.WaitDNSName() // TODO(niemeyer): This assert sometimes fails with "no instances found" c.Assert(err, IsNil) c.Assert(dns, Not(Equals), "") insts, err := t.Env.Instances([]state.InstanceId{inst.Id()}) c.Assert(err, IsNil) c.Assert(len(insts), Equals, 1) ec2inst := ec2.InstanceEC2(insts[0]) c.Assert(ec2inst.DNSName, Equals, dns) }
func (t *LiveTests) TestInstanceGroups(c *C) { ec2conn := ec2.EnvironEC2(t.Env) groups := amzec2.SecurityGroupNames( ec2.JujuGroupName(t.Env), ec2.MachineGroupName(t.Env, "98"), ec2.MachineGroupName(t.Env, "99"), ) info := make([]amzec2.SecurityGroupInfo, len(groups)) // Create a group with the same name as the juju group // but with different permissions, to check that it's deleted // and recreated correctly. oldJujuGroup := createGroup(c, ec2conn, groups[0].Name, "old juju group") // Add two permissions: one is required and should be left alone; // the other is not and should be deleted. // N.B. this is unfortunately sensitive to the actual set of permissions used. _, err := ec2conn.AuthorizeSecurityGroup(oldJujuGroup, []amzec2.IPPerm{ { Protocol: "tcp", FromPort: 22, ToPort: 22, SourceIPs: []string{"0.0.0.0/0"}, }, { Protocol: "udp", FromPort: 4321, ToPort: 4322, SourceIPs: []string{"3.4.5.6/32"}, }, }) c.Assert(err, IsNil) inst0, err := t.Env.StartInstance("98", testing.InvalidStateInfo("98"), nil) c.Assert(err, IsNil) defer t.Env.StopInstances([]environs.Instance{inst0}) // Create a same-named group for the second instance // before starting it, to check that it's reused correctly. oldMachineGroup := createGroup(c, ec2conn, groups[2].Name, "old machine group") inst1, err := t.Env.StartInstance("99", testing.InvalidStateInfo("99"), nil) c.Assert(err, IsNil) defer t.Env.StopInstances([]environs.Instance{inst1}) groupsResp, err := ec2conn.SecurityGroups(groups, nil) c.Assert(err, IsNil) c.Assert(groupsResp.Groups, HasLen, len(groups)) // For each group, check that it exists and record its id. for i, group := range groups { found := false for _, g := range groupsResp.Groups { if g.Name == group.Name { groups[i].Id = g.Id info[i] = g found = true break } } if !found { c.Fatalf("group %q not found", group.Name) } } // The old juju group should have been reused. c.Check(groups[0].Id, Equals, oldJujuGroup.Id) // Check that it authorizes the correct ports and there // are no extra permissions (in particular we are checking // that the unneeded permission that we added earlier // has been deleted). perms := info[0].IPPerms c.Assert(perms, HasLen, 5) checkPortAllowed(c, perms, 22) // SSH checkPortAllowed(c, perms, 37017) // MongoDB checkSecurityGroupAllowed(c, perms, groups[0]) // The old machine group should have been reused also. c.Check(groups[2].Id, Equals, oldMachineGroup.Id) // Check that each instance is part of the correct groups. resp, err := ec2conn.Instances([]string{string(inst0.Id()), string(inst1.Id())}, nil) c.Assert(err, IsNil) c.Assert(resp.Reservations, HasLen, 2) for _, r := range resp.Reservations { c.Assert(r.Instances, HasLen, 1) // each instance must be part of the general juju group. msg := Commentf("reservation %#v", r) c.Assert(hasSecurityGroup(r, groups[0]), Equals, true, msg) inst := r.Instances[0] switch state.InstanceId(inst.InstanceId) { case inst0.Id(): c.Assert(hasSecurityGroup(r, groups[1]), Equals, true, msg) c.Assert(hasSecurityGroup(r, groups[2]), Equals, false, msg) case inst1.Id(): c.Assert(hasSecurityGroup(r, groups[2]), Equals, true, msg) c.Assert(hasSecurityGroup(r, groups[1]), Equals, false, msg) default: c.Errorf("unknown instance found: %v", inst) } } }
}, map[string]interface{}{ "machines": map[string]interface{}{ "0": map[string]interface{}{ "instance-id": "pending", }, }, "services": make(map[string]interface{}), }, }, { "simulate the PA starting an instance in response to the state change", func(st *state.State, conn *juju.Conn, c *C) { m, err := st.Machine("0") c.Assert(err, IsNil) inst, err := conn.Environ.StartInstance(m.Id(), testing.InvalidStateInfo(m.Id()), nil) c.Assert(err, IsNil) err = m.SetInstanceId(inst.Id()) c.Assert(err, IsNil) }, map[string]interface{}{ "machines": map[string]interface{}{ "0": map[string]interface{}{ "dns-name": "dummyenv-0.dns", "instance-id": "dummyenv-0", }, }, "services": make(map[string]interface{}), }, }, {
func (t *LiveTests) TestGlobalPorts(c *C) { // Change configuration. oldConfig := t.Env.Config() defer func() { err := t.Env.SetConfig(oldConfig) c.Assert(err, IsNil) }() attrs := t.Env.Config().AllAttrs() attrs["firewall-mode"] = "global" newConfig, err := t.Env.Config().Apply(attrs) c.Assert(err, IsNil) err = t.Env.SetConfig(newConfig) c.Assert(err, IsNil) // Create instances and check open ports on both instances. inst1, err := t.Env.StartInstance("1", testing.InvalidStateInfo("1"), nil) c.Assert(err, IsNil) defer t.Env.StopInstances([]environs.Instance{inst1}) ports, err := t.Env.Ports() c.Assert(err, IsNil) c.Assert(ports, HasLen, 0) inst2, err := t.Env.StartInstance("2", testing.InvalidStateInfo("2"), nil) c.Assert(err, IsNil) ports, err = t.Env.Ports() c.Assert(err, IsNil) c.Assert(ports, HasLen, 0) defer t.Env.StopInstances([]environs.Instance{inst2}) err = t.Env.OpenPorts([]state.Port{{"udp", 67}, {"tcp", 45}, {"tcp", 89}, {"tcp", 99}}) c.Assert(err, IsNil) ports, err = t.Env.Ports() c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []state.Port{{"tcp", 45}, {"tcp", 89}, {"tcp", 99}, {"udp", 67}}) // Check closing some ports. err = t.Env.ClosePorts([]state.Port{{"tcp", 99}, {"udp", 67}}) c.Assert(err, IsNil) ports, err = t.Env.Ports() c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []state.Port{{"tcp", 45}, {"tcp", 89}}) // Check that we can close ports that aren't there. err = t.Env.ClosePorts([]state.Port{{"tcp", 111}, {"udp", 222}}) c.Assert(err, IsNil) ports, err = t.Env.Ports() c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []state.Port{{"tcp", 45}, {"tcp", 89}}) // Check errors when acting on instances. err = inst1.OpenPorts("1", []state.Port{{"tcp", 80}}) c.Assert(err, ErrorMatches, `invalid firewall mode for opening ports on instance: "global"`) err = inst1.ClosePorts("1", []state.Port{{"tcp", 80}}) c.Assert(err, ErrorMatches, `invalid firewall mode for closing ports on instance: "global"`) _, err = inst1.Ports("1") c.Assert(err, ErrorMatches, `invalid firewall mode for retrieving ports from instance: "global"`) }
func (t *LiveTests) TestPorts(c *C) { inst1, err := t.Env.StartInstance("1", testing.InvalidStateInfo("1"), nil) c.Assert(err, IsNil) c.Assert(inst1, NotNil) defer t.Env.StopInstances([]environs.Instance{inst1}) ports, err := inst1.Ports("1") c.Assert(err, IsNil) c.Assert(ports, HasLen, 0) inst2, err := t.Env.StartInstance("2", testing.InvalidStateInfo("2"), nil) c.Assert(err, IsNil) c.Assert(inst2, NotNil) ports, err = inst2.Ports("2") c.Assert(err, IsNil) c.Assert(ports, HasLen, 0) defer t.Env.StopInstances([]environs.Instance{inst2}) // Open some ports and check they're there. err = inst1.OpenPorts("1", []state.Port{{"udp", 67}, {"tcp", 45}}) c.Assert(err, IsNil) ports, err = inst1.Ports("1") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []state.Port{{"tcp", 45}, {"udp", 67}}) ports, err = inst2.Ports("2") c.Assert(err, IsNil) c.Assert(ports, HasLen, 0) err = inst2.OpenPorts("2", []state.Port{{"tcp", 89}, {"tcp", 45}}) c.Assert(err, IsNil) // Check there's no crosstalk to another machine ports, err = inst2.Ports("2") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []state.Port{{"tcp", 45}, {"tcp", 89}}) ports, err = inst1.Ports("1") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []state.Port{{"tcp", 45}, {"udp", 67}}) // Check that opening the same port again is ok. oldPorts, err := inst2.Ports("2") c.Assert(err, IsNil) err = inst2.OpenPorts("2", []state.Port{{"tcp", 45}}) c.Assert(err, IsNil) ports, err = inst2.Ports("2") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, oldPorts) // Check that opening the same port again and another port is ok. err = inst2.OpenPorts("2", []state.Port{{"tcp", 45}, {"tcp", 99}}) c.Assert(err, IsNil) ports, err = inst2.Ports("2") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []state.Port{{"tcp", 45}, {"tcp", 89}, {"tcp", 99}}) err = inst2.ClosePorts("2", []state.Port{{"tcp", 45}, {"tcp", 99}}) c.Assert(err, IsNil) // Check that we can close ports and that there's no crosstalk. ports, err = inst2.Ports("2") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []state.Port{{"tcp", 89}}) ports, err = inst1.Ports("1") c.Assert(err, IsNil) c.Assert(ports, DeepEquals, []state.Port{{"tcp", 45}, {"udp", 67}}) // Check that we can close multiple ports. err = inst1.ClosePorts("1", []state.Port{{"tcp", 45}, {"udp", 67}}) c.Assert(err, IsNil) ports, err = inst1.Ports("1") c.Assert(ports, HasLen, 0) // Check that we can close ports that aren't there. err = inst2.ClosePorts("2", []state.Port{{"tcp", 111}, {"udp", 222}}) c.Assert(err, IsNil) ports, err = inst2.Ports("2") c.Assert(ports, DeepEquals, []state.Port{{"tcp", 89}}) // Check errors when acting on environment. err = t.Env.OpenPorts([]state.Port{{"tcp", 80}}) c.Assert(err, ErrorMatches, `invalid firewall mode for opening ports on environment: "instance"`) err = t.Env.ClosePorts([]state.Port{{"tcp", 80}}) c.Assert(err, ErrorMatches, `invalid firewall mode for closing ports on environment: "instance"`) _, err = t.Env.Ports() c.Assert(err, ErrorMatches, `invalid firewall mode for retrieving ports from environment: "instance"`) }