func (t *LiveTests) TestStopInstances(c *gc.C) { // It would be nice if this test was in jujutest, but // there's no way for jujutest to fabricate a valid-looking // instance id. inst0, _ := testing.AssertStartInstance(c, t.Env, "40") inst1 := ec2.FabricateInstance(inst0, "i-aaaaaaaa") inst2, _ := testing.AssertStartInstance(c, t.Env, "41") err := t.Env.StopInstances([]instance.Instance{inst0, inst1, inst2}) c.Check(err, gc.IsNil) var insts []instance.Instance // We need the retry logic here because we are waiting // for Instances to return an error, and it will not retry // if it succeeds. gone := false for a := ec2.ShortAttempt.Start(); a.Next(); { insts, err = t.Env.Instances([]instance.Id{inst0.Id(), inst2.Id()}) if err == environs.ErrPartialInstances { // instances not gone yet. continue } if err == environs.ErrNoInstances { gone = true break } c.Fatalf("error getting instances: %v", err) } if !gone { c.Errorf("after termination, instances remaining: %v", insts) } }
// setUpInstances adds machines to state backed by instances: // one manager machine, and one non-manager. func (s *destroyEnvironmentSuite) setUpInstances(c *gc.C) (m0, m1 *state.Machine) { m0, err := s.State.AddMachine("precise", state.JobManageEnviron) c.Assert(err, gc.IsNil) inst, _ := testing.AssertStartInstance(c, s.APIConn.Environ, m0.Id()) err = m0.SetProvisioned(inst.Id(), "fake_nonce", nil) c.Assert(err, gc.IsNil) m1, err = s.State.AddMachine("precise", state.JobHostUnits) c.Assert(err, gc.IsNil) inst, _ = testing.AssertStartInstance(c, s.APIConn.Environ, m1.Id()) err = m1.SetProvisioned(inst.Id(), "fake_nonce", nil) c.Assert(err, gc.IsNil) return m0, m1 }
func (t *Tests) TestStartStop(c *gc.C) { e := t.Prepare(c) envtesting.UploadFakeTools(c, e.Storage()) cfg, err := e.Config().Apply(map[string]interface{}{ "agent-version": version.Current.Number.String(), }) c.Assert(err, gc.IsNil) err = e.SetConfig(cfg) c.Assert(err, gc.IsNil) insts, err := e.Instances(nil) c.Assert(err, gc.IsNil) c.Assert(insts, gc.HasLen, 0) inst0, hc := testing.AssertStartInstance(c, e, "0") c.Assert(inst0, gc.NotNil) id0 := inst0.Id() // Sanity check for hardware characteristics. c.Assert(hc.Arch, gc.NotNil) c.Assert(hc.Mem, gc.NotNil) c.Assert(hc.CpuCores, gc.NotNil) inst1, _ := testing.AssertStartInstance(c, e, "1") c.Assert(inst1, gc.NotNil) id1 := inst1.Id() insts, err = e.Instances([]instance.Id{id0, id1}) c.Assert(err, gc.IsNil) c.Assert(insts, gc.HasLen, 2) c.Assert(insts[0].Id(), gc.Equals, id0) c.Assert(insts[1].Id(), gc.Equals, id1) // order of results is not specified insts, err = e.AllInstances() c.Assert(err, gc.IsNil) c.Assert(insts, gc.HasLen, 2) c.Assert(insts[0].Id(), gc.Not(gc.Equals), insts[1].Id()) err = e.StopInstances([]instance.Instance{inst0}) c.Assert(err, gc.IsNil) insts, err = e.Instances([]instance.Id{id0, id1}) c.Assert(err, gc.Equals, environs.ErrPartialInstances) c.Assert(insts[0], gc.IsNil) c.Assert(insts[1].Id(), gc.Equals, id1) insts, err = e.AllInstances() c.Assert(err, gc.IsNil) c.Assert(insts[0].Id(), gc.Equals, id1) }
func (s *workerSuite) setupScenario(c *gc.C) ([]*state.Machine, []instance.Instance) { var machines []*state.Machine var insts []instance.Instance for i := 0; i < 10; i++ { m, err := s.State.AddMachine("series", state.JobHostUnits) c.Assert(err, gc.IsNil) machines = append(machines, m) inst, _ := testing.AssertStartInstance(c, s.Conn.Environ, m.Id()) insts = append(insts, inst) } // Associate the odd-numbered machines with an instance. for i := 1; i < len(machines); i += 2 { m := machines[i] err := m.SetProvisioned(insts[i].Id(), "nonce", nil) c.Assert(err, gc.IsNil) } // Associate the first half of the instances with an address and status. for i := 0; i < len(machines)/2; i++ { dummy.SetInstanceAddresses(insts[i], s.addressesForIndex(i)) dummy.SetInstanceStatus(insts[i], "running") } // Make sure the second half of the instances have no addresses. for i := len(machines) / 2; i < len(machines); i++ { dummy.SetInstanceAddresses(insts[i], nil) } return machines, insts }
// If the environment is configured not to require a public IP address for nodes, // bootstrapping and starting an instance should occur without any attempt to // allocate a public address. func (s *localServerSuite) TestStartInstanceWithoutPublicIP(c *gc.C) { cleanup := s.srv.Service.Nova.RegisterControlPoint( "addFloatingIP", func(sc hook.ServiceControl, args ...interface{}) error { return fmt.Errorf("add floating IP should not have been called") }, ) defer cleanup() cleanup = s.srv.Service.Nova.RegisterControlPoint( "addServerFloatingIP", func(sc hook.ServiceControl, args ...interface{}) error { return fmt.Errorf("add server floating IP should not have been called") }, ) defer cleanup() cfg, err := config.New(config.NoDefaults, s.TestConfig.Merge(coretesting.Attrs{ "use-floating-ip": false, })) c.Assert(err, gc.IsNil) env, err := environs.Prepare(cfg, coretesting.Context(c), s.ConfigStore) c.Assert(err, gc.IsNil) err = bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) inst, _ := testing.AssertStartInstance(c, env, "100") err = env.StopInstances([]instance.Instance{inst}) c.Assert(err, gc.IsNil) }
func (s *suite) TestAllocateAddress(c *gc.C) { cfg, err := config.New(config.NoDefaults, s.TestConfig) c.Assert(err, gc.IsNil) e, err := environs.Prepare(cfg, testing.Context(c), s.ConfigStore) c.Assert(err, gc.IsNil, gc.Commentf("preparing environ %#v", s.TestConfig)) c.Assert(e, gc.NotNil) envtesting.UploadFakeTools(c, e.Storage()) err = bootstrap.EnsureNotBootstrapped(e) c.Assert(err, gc.IsNil) err = bootstrap.Bootstrap(testing.Context(c), e, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) inst, _ := jujutesting.AssertStartInstance(c, e, "0") c.Assert(inst, gc.NotNil) netId := network.Id("net1") opc := make(chan dummy.Operation, 200) dummy.Listen(opc) expectAddress := instance.NewAddress("0.1.2.1", instance.NetworkCloudLocal) address, err := e.AllocateAddress(inst.Id(), netId) c.Assert(err, gc.IsNil) c.Assert(address, gc.DeepEquals, expectAddress) assertAllocateAddress(c, e, opc, inst.Id(), netId, expectAddress) expectAddress = instance.NewAddress("0.1.2.2", instance.NetworkCloudLocal) address, err = e.AllocateAddress(inst.Id(), netId) c.Assert(err, gc.IsNil) c.Assert(address, gc.DeepEquals, expectAddress) assertAllocateAddress(c, e, opc, inst.Id(), netId, expectAddress) }
// primeAgent adds a new Machine to run the given jobs, and sets up the // machine agent's directory. It returns the new machine, the // agent's configuration and the tools currently running. func (s *commonMachineSuite) primeAgent( c *gc.C, vers version.Binary, jobs ...state.MachineJob) (m *state.Machine, config agent.ConfigSetterWriter, tools *tools.Tools) { // Add a machine and ensure it is provisioned. m, err := s.State.AddMachine("quantal", jobs...) c.Assert(err, gc.IsNil) inst, md := jujutesting.AssertStartInstance(c, s.Conn.Environ, m.Id()) c.Assert(m.SetProvisioned(inst.Id(), state.BootstrapNonce, md), gc.IsNil) // Add an address for the tests in case the maybeInitiateMongoServer // codepath is exercised. s.setFakeMachineAddresses(c, m) // Set up the new machine. err = m.SetAgentVersion(vers) c.Assert(err, gc.IsNil) err = m.SetPassword(initialMachinePassword) c.Assert(err, gc.IsNil) tag := names.MachineTag(m.Id()) if m.IsManager() { err = m.SetMongoPassword(initialMachinePassword) c.Assert(err, gc.IsNil) config, tools = s.agentSuite.primeStateAgent(c, tag, initialMachinePassword, vers) info, ok := config.StateServingInfo() c.Assert(ok, jc.IsTrue) err = s.State.SetStateServingInfo(info) c.Assert(err, gc.IsNil) } else { config, tools = s.agentSuite.primeAgent(c, tag, initialMachinePassword, vers) } err = config.Write() c.Assert(err, gc.IsNil) return m, config, tools }
func (s *LiveTests) assertStartInstanceDefaultSecurityGroup(c *gc.C, useDefault bool) { attrs := s.TestConfig.Merge(coretesting.Attrs{ "name": "sample-" + randomName(), "control-bucket": "juju-test-" + randomName(), "use-default-secgroup": useDefault, }) cfg, err := config.New(config.NoDefaults, attrs) c.Assert(err, gc.IsNil) // Set up a test environment. env, err := environs.New(cfg) c.Assert(err, gc.IsNil) c.Assert(env, gc.NotNil) defer env.Destroy() // Bootstrap and start an instance. err = bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) inst, _ := jujutesting.AssertStartInstance(c, env, "100") // Check whether the instance has the default security group assigned. novaClient := openstack.GetNovaClient(env) groups, err := novaClient.GetServerSecurityGroups(string(inst.Id())) c.Assert(err, gc.IsNil) defaultGroupFound := false for _, group := range groups { if group.Name == "default" { defaultGroupFound = true break } } c.Assert(defaultGroupFound, gc.Equals, useDefault) }
func (t *localServerSuite) TestAddresses(c *gc.C) { env := t.Prepare(c) envtesting.UploadFakeTools(c, env.Storage()) err := bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) inst, _ := testing.AssertStartInstance(c, env, "1") c.Assert(err, gc.IsNil) addrs, err := inst.Addresses() c.Assert(err, gc.IsNil) // Expected values use Address type but really contain a regexp for // the value rather than a valid ip or hostname. expected := []instance.Address{{ Value: "*.testing.invalid", Type: instance.HostName, NetworkScope: instance.NetworkPublic, }, { Value: "*.internal.invalid", Type: instance.HostName, NetworkScope: instance.NetworkCloudLocal, }, { Value: "8.0.0.*", Type: instance.Ipv4Address, NetworkScope: instance.NetworkPublic, }, { Value: "127.0.0.*", Type: instance.Ipv4Address, NetworkScope: instance.NetworkCloudLocal, }} c.Assert(addrs, gc.HasLen, len(expected)) for i, addr := range addrs { c.Check(addr.Value, gc.Matches, expected[i].Value) c.Check(addr.Type, gc.Equals, expected[i].Type) c.Check(addr.NetworkScope, gc.Equals, expected[i].NetworkScope) } }
func (s *localServerSuite) TestInstanceStatus(c *gc.C) { env := s.Prepare(c) // goose's test service always returns ACTIVE state. inst, _ := testing.AssertStartInstance(c, env, "100") c.Assert(inst.Status(), gc.Equals, nova.StatusActive) err := env.StopInstances([]instance.Instance{inst}) c.Assert(err, gc.IsNil) }
func (s *localServerSuite) TestInstanceStatus(c *gc.C) { env := s.Prepare(c) envtesting.UploadFakeTools(c, env.Storage()) inst, _ := testing.AssertStartInstance(c, env, "100") c.Assert(inst.Status(), gc.Equals, "running") err := env.StopInstances([]instance.Instance{inst}) c.Assert(err, gc.IsNil) }
// If the environment is configured not to require a public IP address for nodes, // bootstrapping and starting an instance should occur without any attempt to // allocate a public address. func (s *localServerSuite) TestStartInstance(c *gc.C) { env := s.Prepare(c) envtesting.UploadFakeTools(c, env.Storage()) err := bootstrap.Bootstrap(bootstrapContext(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) inst, _ := testing.AssertStartInstance(c, env, "100") err = env.StopInstances([]instance.Instance{inst}) c.Assert(err, gc.IsNil) }
func (t *localServerSuite) TestInstanceStatus(c *gc.C) { env := t.Prepare(c) envtesting.UploadFakeTools(c, env.Storage()) err := bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) t.srv.ec2srv.SetInitialInstanceState(ec2test.Terminated) inst, _ := testing.AssertStartInstance(c, env, "1") c.Assert(err, gc.IsNil) c.Assert(inst.Status(), gc.Equals, "terminated") }
func (t *localServerSuite) TestStartInstanceHardwareCharacteristics(c *gc.C) { env := t.Prepare(c) envtesting.UploadFakeTools(c, env.Storage()) err := bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) _, hc := testing.AssertStartInstance(c, env, "1") c.Check(*hc.Arch, gc.Equals, "amd64") c.Check(*hc.Mem, gc.Equals, uint64(1740)) c.Check(*hc.CpuCores, gc.Equals, uint64(1)) c.Assert(*hc.CpuPower, gc.Equals, uint64(100)) }
func (s *localServerSuite) TestStartInstanceNetwork(c *gc.C) { cfg, err := config.New(config.NoDefaults, s.TestConfig.Merge(coretesting.Attrs{ // A label that corresponds to a nova test service network "network": "net", })) c.Assert(err, gc.IsNil) env, err := environs.New(cfg) c.Assert(err, gc.IsNil) inst, _ := testing.AssertStartInstance(c, env, "100") err = env.StopInstances([]instance.Instance{inst}) c.Assert(err, gc.IsNil) }
// TestStartStop is similar to Tests.TestStartStop except // that it does not assume a pristine environment. func (t *LiveTests) TestStartStop(c *gc.C) { t.PrepareOnce(c) envtesting.UploadFakeTools(c, t.Env.Storage()) inst, _ := testing.AssertStartInstance(c, t.Env, "0") c.Assert(inst, gc.NotNil) id0 := inst.Id() insts, err := t.Env.Instances([]instance.Id{id0, id0}) c.Assert(err, gc.IsNil) c.Assert(insts, gc.HasLen, 2) c.Assert(insts[0].Id(), gc.Equals, id0) c.Assert(insts[1].Id(), gc.Equals, id0) // Asserting on the return of AllInstances makes the test fragile, // as even comparing the before and after start values can be thrown // off if other instances have been created or destroyed in the same // time frame. Instead, just check the instance we created exists. insts, err = t.Env.AllInstances() c.Assert(err, gc.IsNil) found := false for _, inst := range insts { if inst.Id() == id0 { c.Assert(found, gc.Equals, false, gc.Commentf("%v", insts)) found = true } } c.Assert(found, gc.Equals, true, gc.Commentf("expected %v in %v", inst, insts)) dns, err := inst.WaitDNSName() c.Assert(err, gc.IsNil) c.Assert(dns, gc.Not(gc.Equals), "") insts, err = t.Env.Instances([]instance.Id{id0, ""}) c.Assert(err, gc.Equals, environs.ErrPartialInstances) c.Assert(insts, gc.HasLen, 2) c.Check(insts[0].Id(), gc.Equals, id0) c.Check(insts[1], gc.IsNil) err = t.Env.StopInstances([]instance.Instance{inst}) c.Assert(err, gc.IsNil) // The machine may not be marked as shutting down // immediately. Repeat a few times to ensure we get the error. for a := t.Attempt.Start(); a.Next(); { insts, err = t.Env.Instances([]instance.Id{id0}) if err != nil { break } } c.Assert(err, gc.Equals, environs.ErrNoInstances) c.Assert(insts, gc.HasLen, 0) }
func (s *localServerSuite) TestInstancesGathering(c *gc.C) { env := s.Prepare(c) envtesting.UploadFakeTools(c, env.Storage()) inst0, _ := testing.AssertStartInstance(c, env, "100") id0 := inst0.Id() inst1, _ := testing.AssertStartInstance(c, env, "101") id1 := inst1.Id() c.Logf("id0: %s, id1: %s", id0, id1) defer func() { err := env.StopInstances([]instance.Instance{inst0, inst1}) c.Assert(err, gc.IsNil) }() for i, test := range instanceGathering { c.Logf("test %d: find %v -> expect len %d, err: %v", i, test.ids, len(test.ids), test.err) ids := make([]instance.Id, len(test.ids)) for j, id := range test.ids { switch id { case "id0": ids[j] = id0 case "id1": ids[j] = id1 } } insts, err := env.Instances(ids) c.Assert(err, gc.Equals, test.err) if err == environs.ErrNoInstances { c.Assert(insts, gc.HasLen, 0) } else { c.Assert(insts, gc.HasLen, len(test.ids)) } for j, inst := range insts { if ids[j] != "" { c.Assert(inst.Id(), gc.Equals, ids[j]) } else { c.Assert(inst, gc.IsNil) } } } }
func (s *CommonProvisionerSuite) startUnknownInstance(c *gc.C, id string) instance.Instance { instance, _ := testing.AssertStartInstance(c, s.Conn.Environ, id) select { case o := <-s.op: switch o := o.(type) { case dummy.OpStartInstance: default: c.Fatalf("unexpected operation %#v", o) } case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for startinstance operation") } return instance }
func (s *localServerSuite) TestDestroyEnvironmentDeletesSecurityGroupsFWModeGlobal(c *gc.C) { cfg, err := config.New(config.NoDefaults, s.TestConfig.Merge(coretesting.Attrs{ "firewall-mode": "global"})) c.Assert(err, gc.IsNil) env, err := environs.New(cfg) c.Assert(err, gc.IsNil) instanceName := "100" testing.AssertStartInstance(c, env, instanceName) allSecurityGroups := []string{"default", fmt.Sprintf("juju-%v", env.Name()), fmt.Sprintf("juju-%v-global", env.Name())} assertSecurityGroups(c, env, allSecurityGroups) err = env.Destroy() c.Check(err, gc.IsNil) assertSecurityGroups(c, env, []string{"default"}) }
func (s *SSHCommonSuite) makeMachines(n int, c *gc.C, setAddresses bool) []*state.Machine { var machines = make([]*state.Machine, n) for i := 0; i < n; i++ { m, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, gc.IsNil) if setAddresses { s.setAddresses(m, c) } // must set an instance id as the ssh command uses that as a signal the // machine has been provisioned inst, md := testing.AssertStartInstance(c, s.Conn.Environ, m.Id()) c.Assert(m.SetProvisioned(inst.Id(), "fake_nonce", md), gc.IsNil) machines[i] = m } return machines }
func (s *localServerSuite) TestStopInstance(c *gc.C) { cfg, err := config.New(config.NoDefaults, s.TestConfig.Merge(coretesting.Attrs{ "firewall-mode": "instance"})) c.Assert(err, gc.IsNil) env, err := environs.New(cfg) c.Assert(err, gc.IsNil) instanceName := "100" inst, _ := testing.AssertStartInstance(c, env, instanceName) // Openstack now has three security groups for the server, the default // group, one group for the entire environment, and another for the // new instance. assertSecurityGroups(c, env, []string{"default", fmt.Sprintf("juju-%v", env.Name()), fmt.Sprintf("juju-%v-%v", env.Name(), instanceName)}) err = env.StopInstances([]instance.Instance{inst}) c.Assert(err, gc.IsNil) // The security group for this instance is now removed. assertSecurityGroups(c, env, []string{"default", fmt.Sprintf("juju-%v", env.Name())}) }
// Due to bug #1300755 it can happen that the security group intended for // an instance is also used as the common security group of another // environment. If this is the case, the attempt to delete the instance's // security group fails but StopInstance succeeds. func (s *localServerSuite) TestStopInstanceSecurityGroupNotDeleted(c *gc.C) { // Force an error when a security group is deleted. cleanup := s.srv.Service.Nova.RegisterControlPoint( "removeSecurityGroup", func(sc hook.ServiceControl, args ...interface{}) error { return fmt.Errorf("failed on purpose") }, ) defer cleanup() cfg, err := config.New(config.NoDefaults, s.TestConfig.Merge(coretesting.Attrs{ "firewall-mode": "instance"})) c.Assert(err, gc.IsNil) env, err := environs.New(cfg) c.Assert(err, gc.IsNil) instanceName := "100" inst, _ := testing.AssertStartInstance(c, env, instanceName) allSecurityGroups := []string{"default", fmt.Sprintf("juju-%v", env.Name()), fmt.Sprintf("juju-%v-%v", env.Name(), instanceName)} assertSecurityGroups(c, env, allSecurityGroups) err = env.StopInstances([]instance.Instance{inst}) c.Assert(err, gc.IsNil) assertSecurityGroups(c, env, allSecurityGroups) }
func (t *LiveTests) TestInstanceAttributes(c *gc.C) { inst, hc := testing.AssertStartInstance(c, t.Env, "30") defer t.Env.StopInstances([]instance.Instance{inst}) // Sanity check for hardware characteristics. c.Assert(hc.Arch, gc.NotNil) c.Assert(hc.Mem, gc.NotNil) c.Assert(hc.RootDisk, gc.NotNil) c.Assert(hc.CpuCores, gc.NotNil) c.Assert(hc.CpuPower, gc.NotNil) dns, err := inst.WaitDNSName() // TODO(niemeyer): This assert sometimes fails with "no instances found" c.Assert(err, gc.IsNil) c.Assert(dns, gc.Not(gc.Equals), "") insts, err := t.Env.Instances([]instance.Id{inst.Id()}) c.Assert(err, gc.IsNil) c.Assert(len(insts), gc.Equals, 1) ec2inst := ec2.InstanceEC2(insts[0]) c.Assert(ec2inst.DNSName, gc.Equals, dns) c.Assert(ec2inst.InstanceType, gc.Equals, "m1.small") }
func (s *localServerSuite) TestCollectInstances(c *gc.C) { env := s.Prepare(c) cleanup := s.srv.Service.Nova.RegisterControlPoint( "addServer", func(sc hook.ServiceControl, args ...interface{}) error { details := args[0].(*nova.ServerDetail) details.Status = "BUILD(networking)" return nil }, ) defer cleanup() stateInst, _ := testing.AssertStartInstance(c, env, "100") defer func() { err := env.StopInstances([]instance.Instance{stateInst}) c.Assert(err, gc.IsNil) }() found := make(map[instance.Id]instance.Instance) missing := []instance.Id{stateInst.Id()} resultMissing := openstack.CollectInstances(env, missing, found) c.Assert(resultMissing, gc.DeepEquals, missing) }
func (s *localServerSuite) TestInstancesBuildSpawning(c *gc.C) { env := s.Prepare(c) // HP servers are available once they are BUILD(spawning). cleanup := s.srv.Service.Nova.RegisterControlPoint( "addServer", func(sc hook.ServiceControl, args ...interface{}) error { details := args[0].(*nova.ServerDetail) details.Status = nova.StatusBuildSpawning return nil }, ) defer cleanup() stateInst, _ := testing.AssertStartInstance(c, env, "100") defer func() { err := env.StopInstances([]instance.Instance{stateInst}) c.Assert(err, gc.IsNil) }() instances, err := env.Instances([]instance.Id{stateInst.Id()}) c.Assert(err, gc.IsNil) c.Assert(instances, gc.HasLen, 1) c.Assert(instances[0].Status(), gc.Equals, nova.StatusBuildSpawning) }
func (suite *environSuite) TestStartInstanceStartsInstance(c *gc.C) { suite.setupFakeTools(c) env := suite.makeEnviron() // Create node 0: it will be used as the bootstrap node. suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0", "hostname": "host0"}`) lshwXML, err := suite.generateHWTemplate(map[string]string{"aa:bb:cc:dd:ee:f0": "eth0"}) c.Assert(err, gc.IsNil) suite.testMAASObject.TestServer.AddNodeDetails("node0", lshwXML) err = bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) // The bootstrap node has been acquired and started. operations := suite.testMAASObject.TestServer.NodeOperations() actions, found := operations["node0"] c.Check(found, gc.Equals, true) c.Check(actions, gc.DeepEquals, []string{"acquire", "start"}) // Test the instance id is correctly recorded for the bootstrap node. // Check that the state holds the id of the bootstrap machine. stateData, err := bootstrap.LoadState(env.Storage()) c.Assert(err, gc.IsNil) c.Assert(stateData.StateInstances, gc.HasLen, 1) insts, err := env.AllInstances() c.Assert(err, gc.IsNil) c.Assert(insts, gc.HasLen, 1) c.Check(insts[0].Id(), gc.Equals, stateData.StateInstances[0]) // Create node 1: it will be used as instance number 1. suite.testMAASObject.TestServer.NewNode(`{"system_id": "node1", "hostname": "host1"}`) lshwXML, err = suite.generateHWTemplate(map[string]string{"aa:bb:cc:dd:ee:f1": "eth0"}) c.Assert(err, gc.IsNil) suite.testMAASObject.TestServer.AddNodeDetails("node1", lshwXML) // TODO(wallyworld) - test instance metadata instance, _ := testing.AssertStartInstance(c, env, "1") c.Assert(err, gc.IsNil) c.Check(instance, gc.NotNil) // The instance number 1 has been acquired and started. actions, found = operations["node1"] c.Assert(found, gc.Equals, true) c.Check(actions, gc.DeepEquals, []string{"acquire", "start"}) // The value of the "user data" parameter used when starting the node // contains the run cmd used to write the machine information onto // the node's filesystem. requestValues := suite.testMAASObject.TestServer.NodeOperationRequestValues() nodeRequestValues, found := requestValues["node1"] c.Assert(found, gc.Equals, true) c.Assert(len(nodeRequestValues), gc.Equals, 2) userData := nodeRequestValues[1].Get("user_data") decodedUserData, err := decodeUserData(userData) c.Assert(err, gc.IsNil) info := machineInfo{"host1"} cloudinitRunCmd, err := info.cloudinitRunCmd() c.Assert(err, gc.IsNil) data, err := goyaml.Marshal(cloudinitRunCmd) c.Assert(err, gc.IsNil) c.Check(string(decodedUserData), gc.Matches, "(.|\n)*"+string(data)+"(\n|.)*") // Trash the tools and try to start another instance. envtesting.RemoveTools(c, env.Storage()) instance, _, _, err = testing.StartInstance(env, "2") c.Check(instance, gc.IsNil) c.Check(err, jc.Satisfies, errors.IsNotFound) }
func (t *localServerSuite) TestBootstrapInstanceUserDataAndState(c *gc.C) { env := t.Prepare(c) envtesting.UploadFakeTools(c, env.Storage()) err := bootstrap.Bootstrap(coretesting.Context(c), env, environs.BootstrapParams{}) c.Assert(err, gc.IsNil) // check that the state holds the id of the bootstrap machine. bootstrapState, err := bootstrap.LoadState(env.Storage()) c.Assert(err, gc.IsNil) c.Assert(bootstrapState.StateInstances, gc.HasLen, 1) insts, err := env.AllInstances() c.Assert(err, gc.IsNil) c.Assert(insts, gc.HasLen, 1) c.Check(insts[0].Id(), gc.Equals, bootstrapState.StateInstances[0]) // check that the user data is configured to start zookeeper // and the machine and provisioning agents. // check that the user data is configured to only configure // authorized SSH keys and set the log output; everything // else happens after the machine is brought up. inst := t.srv.ec2srv.Instance(string(insts[0].Id())) c.Assert(inst, gc.NotNil) bootstrapDNS, err := insts[0].DNSName() c.Assert(err, gc.IsNil) c.Assert(bootstrapDNS, gc.Not(gc.Equals), "") userData, err := utils.Gunzip(inst.UserData) c.Assert(err, gc.IsNil) c.Logf("first instance: UserData: %q", userData) var userDataMap map[interface{}]interface{} err = goyaml.Unmarshal(userData, &userDataMap) c.Assert(err, gc.IsNil) c.Assert(userDataMap, jc.DeepEquals, map[interface{}]interface{}{ "output": map[interface{}]interface{}{ "all": "| tee -a /var/log/cloud-init-output.log", }, "ssh_authorized_keys": splitAuthKeys(env.Config().AuthorizedKeys()), "runcmd": []interface{}{ "set -xe", "install -D -m 644 /dev/null '/var/lib/juju/nonce.txt'", "printf '%s\\n' 'user-admin:bootstrap' > '/var/lib/juju/nonce.txt'", }, }) // check that a new instance will be started with a machine agent inst1, hc := testing.AssertStartInstance(c, env, "1") c.Check(*hc.Arch, gc.Equals, "amd64") c.Check(*hc.Mem, gc.Equals, uint64(1740)) c.Check(*hc.CpuCores, gc.Equals, uint64(1)) c.Assert(*hc.CpuPower, gc.Equals, uint64(100)) inst = t.srv.ec2srv.Instance(string(inst1.Id())) c.Assert(inst, gc.NotNil) userData, err = utils.Gunzip(inst.UserData) c.Assert(err, gc.IsNil) c.Logf("second instance: UserData: %q", userData) userDataMap = nil err = goyaml.Unmarshal(userData, &userDataMap) c.Assert(err, gc.IsNil) CheckPackage(c, userDataMap, "git", true) CheckPackage(c, userDataMap, "mongodb-server", false) CheckScripts(c, userDataMap, "jujud bootstrap-state", false) CheckScripts(c, userDataMap, "/var/lib/juju/agents/machine-1/agent.conf", true) // TODO check for provisioning agent err = env.Destroy() c.Assert(err, gc.IsNil) _, err = bootstrap.LoadState(env.Storage()) c.Assert(err, gc.NotNil) }
func (t *LiveTests) TestInstanceGroups(c *gc.C) { t.PrepareOnce(c) ec2conn := ec2.EnvironEC2(t.Env) groups := amzec2.SecurityGroupNames( ec2.JujuGroupName(t.Env), ec2.MachineGroupName(t.Env, "98"), ec2.MachineGroupName(t.Env, "99"), ) info := make([]amzec2.SecurityGroupInfo, len(groups)) // Create a group with the same name as the juju group // but with different permissions, to check that it's deleted // and recreated correctly. oldJujuGroup := createGroup(c, ec2conn, groups[0].Name, "old juju group") // Add two permissions: one is required and should be left alone; // the other is not and should be deleted. // N.B. this is unfortunately sensitive to the actual set of permissions used. _, err := ec2conn.AuthorizeSecurityGroup(oldJujuGroup, []amzec2.IPPerm{ { Protocol: "tcp", FromPort: 22, ToPort: 22, SourceIPs: []string{"0.0.0.0/0"}, }, { Protocol: "udp", FromPort: 4321, ToPort: 4322, SourceIPs: []string{"3.4.5.6/32"}, }, }) c.Assert(err, gc.IsNil) inst0, _ := testing.AssertStartInstance(c, t.Env, "98") defer t.Env.StopInstances([]instance.Instance{inst0}) // Create a same-named group for the second instance // before starting it, to check that it's reused correctly. oldMachineGroup := createGroup(c, ec2conn, groups[2].Name, "old machine group") inst1, _ := testing.AssertStartInstance(c, t.Env, "99") defer t.Env.StopInstances([]instance.Instance{inst1}) groupsResp, err := ec2conn.SecurityGroups(groups, nil) c.Assert(err, gc.IsNil) c.Assert(groupsResp.Groups, gc.HasLen, len(groups)) // For each group, check that it exists and record its id. for i, group := range groups { found := false for _, g := range groupsResp.Groups { if g.Name == group.Name { groups[i].Id = g.Id info[i] = g found = true break } } if !found { c.Fatalf("group %q not found", group.Name) } } // The old juju group should have been reused. c.Check(groups[0].Id, gc.Equals, oldJujuGroup.Id) // Check that it authorizes the correct ports and there // are no extra permissions (in particular we are checking // that the unneeded permission that we added earlier // has been deleted). perms := info[0].IPPerms c.Assert(perms, gc.HasLen, 6) checkPortAllowed(c, perms, 22) // SSH checkPortAllowed(c, perms, coretesting.FakeConfig()["state-port"].(int)) checkPortAllowed(c, perms, coretesting.FakeConfig()["api-port"].(int)) checkSecurityGroupAllowed(c, perms, groups[0]) // The old machine group should have been reused also. c.Check(groups[2].Id, gc.Equals, oldMachineGroup.Id) // Check that each instance is part of the correct groups. resp, err := ec2conn.Instances([]string{string(inst0.Id()), string(inst1.Id())}, nil) c.Assert(err, gc.IsNil) c.Assert(resp.Reservations, gc.HasLen, 2) for _, r := range resp.Reservations { c.Assert(r.Instances, gc.HasLen, 1) // each instance must be part of the general juju group. inst := r.Instances[0] msg := gc.Commentf("instance %#v", inst) c.Assert(hasSecurityGroup(inst, groups[0]), gc.Equals, true, msg) switch instance.Id(inst.InstanceId) { case inst0.Id(): c.Assert(hasSecurityGroup(inst, groups[1]), gc.Equals, true, msg) c.Assert(hasSecurityGroup(inst, groups[2]), gc.Equals, false, msg) case inst1.Id(): c.Assert(hasSecurityGroup(inst, groups[2]), gc.Equals, true, msg) c.Assert(hasSecurityGroup(inst, groups[1]), gc.Equals, false, msg) default: c.Errorf("unknown instance found: %v", inst) } } // Check that listing those instances finds them using the groups instIds := []instance.Id{inst0.Id(), inst1.Id()} idsFromInsts := func(insts []instance.Instance) (ids []instance.Id) { for _, inst := range insts { ids = append(ids, inst.Id()) } return ids } insts, err := t.Env.Instances(instIds) c.Assert(err, gc.IsNil) c.Assert(instIds, jc.SameContents, idsFromInsts(insts)) allInsts, err := t.Env.AllInstances() c.Assert(err, gc.IsNil) c.Assert(instIds, jc.SameContents, idsFromInsts(allInsts)) }
func (t *LiveTests) TestPorts(c *gc.C) { t.PrepareOnce(c) envtesting.UploadFakeTools(c, t.Env.Storage()) inst1, _ := testing.AssertStartInstance(c, t.Env, "1") c.Assert(inst1, gc.NotNil) defer t.Env.StopInstances([]instance.Instance{inst1}) ports, err := inst1.Ports("1") c.Assert(err, gc.IsNil) c.Assert(ports, gc.HasLen, 0) inst2, _ := testing.AssertStartInstance(c, t.Env, "2") c.Assert(inst2, gc.NotNil) ports, err = inst2.Ports("2") c.Assert(err, gc.IsNil) c.Assert(ports, gc.HasLen, 0) defer t.Env.StopInstances([]instance.Instance{inst2}) // Open some ports and check they're there. err = inst1.OpenPorts("1", []instance.Port{{"udp", 67}, {"tcp", 45}}) c.Assert(err, gc.IsNil) ports, err = inst1.Ports("1") c.Assert(err, gc.IsNil) c.Assert(ports, gc.DeepEquals, []instance.Port{{"tcp", 45}, {"udp", 67}}) ports, err = inst2.Ports("2") c.Assert(err, gc.IsNil) c.Assert(ports, gc.HasLen, 0) err = inst2.OpenPorts("2", []instance.Port{{"tcp", 89}, {"tcp", 45}}) c.Assert(err, gc.IsNil) // Check there's no crosstalk to another machine ports, err = inst2.Ports("2") c.Assert(err, gc.IsNil) c.Assert(ports, gc.DeepEquals, []instance.Port{{"tcp", 45}, {"tcp", 89}}) ports, err = inst1.Ports("1") c.Assert(err, gc.IsNil) c.Assert(ports, gc.DeepEquals, []instance.Port{{"tcp", 45}, {"udp", 67}}) // Check that opening the same port again is ok. oldPorts, err := inst2.Ports("2") c.Assert(err, gc.IsNil) err = inst2.OpenPorts("2", []instance.Port{{"tcp", 45}}) c.Assert(err, gc.IsNil) ports, err = inst2.Ports("2") c.Assert(err, gc.IsNil) c.Assert(ports, gc.DeepEquals, oldPorts) // Check that opening the same port again and another port is ok. err = inst2.OpenPorts("2", []instance.Port{{"tcp", 45}, {"tcp", 99}}) c.Assert(err, gc.IsNil) ports, err = inst2.Ports("2") c.Assert(err, gc.IsNil) c.Assert(ports, gc.DeepEquals, []instance.Port{{"tcp", 45}, {"tcp", 89}, {"tcp", 99}}) err = inst2.ClosePorts("2", []instance.Port{{"tcp", 45}, {"tcp", 99}}) c.Assert(err, gc.IsNil) // Check that we can close ports and that there's no crosstalk. ports, err = inst2.Ports("2") c.Assert(err, gc.IsNil) c.Assert(ports, gc.DeepEquals, []instance.Port{{"tcp", 89}}) ports, err = inst1.Ports("1") c.Assert(err, gc.IsNil) c.Assert(ports, gc.DeepEquals, []instance.Port{{"tcp", 45}, {"udp", 67}}) // Check that we can close multiple ports. err = inst1.ClosePorts("1", []instance.Port{{"tcp", 45}, {"udp", 67}}) c.Assert(err, gc.IsNil) ports, err = inst1.Ports("1") c.Assert(ports, gc.HasLen, 0) // Check that we can close ports that aren't there. err = inst2.ClosePorts("2", []instance.Port{{"tcp", 111}, {"udp", 222}}) c.Assert(err, gc.IsNil) ports, err = inst2.Ports("2") c.Assert(ports, gc.DeepEquals, []instance.Port{{"tcp", 89}}) // Check errors when acting on environment. err = t.Env.OpenPorts([]instance.Port{{"tcp", 80}}) c.Assert(err, gc.ErrorMatches, `invalid firewall mode "instance" for opening ports on environment`) err = t.Env.ClosePorts([]instance.Port{{"tcp", 80}}) c.Assert(err, gc.ErrorMatches, `invalid firewall mode "instance" for closing ports on environment`) _, err = t.Env.Ports() c.Assert(err, gc.ErrorMatches, `invalid firewall mode "instance" for retrieving ports from environment`) }
func (t *LiveTests) TestGlobalPorts(c *gc.C) { t.PrepareOnce(c) envtesting.UploadFakeTools(c, t.Env.Storage()) // Change configuration. oldConfig := t.Env.Config() defer func() { err := t.Env.SetConfig(oldConfig) c.Assert(err, gc.IsNil) }() attrs := t.Env.Config().AllAttrs() attrs["firewall-mode"] = "global" newConfig, err := t.Env.Config().Apply(attrs) c.Assert(err, gc.IsNil) err = t.Env.SetConfig(newConfig) c.Assert(err, gc.IsNil) // Create instances and check open ports on both instances. inst1, _ := testing.AssertStartInstance(c, t.Env, "1") defer t.Env.StopInstances([]instance.Instance{inst1}) ports, err := t.Env.Ports() c.Assert(err, gc.IsNil) c.Assert(ports, gc.HasLen, 0) inst2, _ := testing.AssertStartInstance(c, t.Env, "2") ports, err = t.Env.Ports() c.Assert(err, gc.IsNil) c.Assert(ports, gc.HasLen, 0) defer t.Env.StopInstances([]instance.Instance{inst2}) err = t.Env.OpenPorts([]instance.Port{{"udp", 67}, {"tcp", 45}, {"tcp", 89}, {"tcp", 99}}) c.Assert(err, gc.IsNil) ports, err = t.Env.Ports() c.Assert(err, gc.IsNil) c.Assert(ports, gc.DeepEquals, []instance.Port{{"tcp", 45}, {"tcp", 89}, {"tcp", 99}, {"udp", 67}}) // Check closing some ports. err = t.Env.ClosePorts([]instance.Port{{"tcp", 99}, {"udp", 67}}) c.Assert(err, gc.IsNil) ports, err = t.Env.Ports() c.Assert(err, gc.IsNil) c.Assert(ports, gc.DeepEquals, []instance.Port{{"tcp", 45}, {"tcp", 89}}) // Check that we can close ports that aren't there. err = t.Env.ClosePorts([]instance.Port{{"tcp", 111}, {"udp", 222}}) c.Assert(err, gc.IsNil) ports, err = t.Env.Ports() c.Assert(err, gc.IsNil) c.Assert(ports, gc.DeepEquals, []instance.Port{{"tcp", 45}, {"tcp", 89}}) // Check errors when acting on instances. err = inst1.OpenPorts("1", []instance.Port{{"tcp", 80}}) c.Assert(err, gc.ErrorMatches, `invalid firewall mode "global" for opening ports on instance`) err = inst1.ClosePorts("1", []instance.Port{{"tcp", 80}}) c.Assert(err, gc.ErrorMatches, `invalid firewall mode "global" for closing ports on instance`) _, err = inst1.Ports("1") c.Assert(err, gc.ErrorMatches, `invalid firewall mode "global" for retrieving ports from instance`) }