func (s *workerSuite) setupScenario(c *gc.C) ([]*state.Machine, []instance.Instance) { var machines []*state.Machine var insts []instance.Instance for i := 0; i < 10; i++ { m, err := s.State.AddMachine("series", state.JobHostUnits) c.Assert(err, gc.IsNil) machines = append(machines, m) inst, _ := testing.AssertStartInstance(c, s.Conn.Environ, m.Id()) insts = append(insts, inst) } // Associate the odd-numbered machines with an instance. for i := 1; i < len(machines); i += 2 { m := machines[i] err := m.SetProvisioned(insts[i].Id(), "nonce", nil) c.Assert(err, gc.IsNil) } // Associate the first half of the instances with an address and status. for i := 0; i < len(machines)/2; i++ { dummy.SetInstanceAddresses(insts[i], s.addressesForIndex(i)) dummy.SetInstanceStatus(insts[i], "running") } // Make sure the second half of the instances have no addresses. for i := len(machines) / 2; i < len(machines); i++ { dummy.SetInstanceAddresses(insts[i], nil) } return machines, insts }
func (s *commonMachineSuite) setFakeMachineAddresses(c *gc.C, machine *state.Machine) { addrs := network.NewAddresses("0.1.2.3") err := machine.SetProviderAddresses(addrs...) c.Assert(err, jc.ErrorIsNil) // Set the addresses in the environ instance as well so that if the instance poller // runs it won't overwrite them. instId, err := machine.InstanceId() c.Assert(err, jc.ErrorIsNil) insts, err := s.Environ.Instances([]instance.Id{instId}) c.Assert(err, jc.ErrorIsNil) dummy.SetInstanceAddresses(insts[0], addrs) }
// setServerAPIAddresses sets the given addresses on the dummy // bootstrap instance and in state. func (s *EndpointSuite) setServerAPIAddresses(c *gc.C, addresses ...network.HostPort) { insts, err := s.Environ.Instances([]instance.Id{dummy.BootstrapInstanceId}) c.Assert(err, jc.ErrorIsNil) err = s.State.SetAPIHostPorts([][]network.HostPort{addresses}) c.Assert(err, jc.ErrorIsNil) dummy.SetInstanceAddresses(insts[0], network.HostsWithoutPort(addresses)) instAddrs, err := insts[0].Addresses() c.Assert(err, jc.ErrorIsNil) stateAddrs, err := s.State.APIHostPorts() c.Assert(err, jc.ErrorIsNil) c.Logf("instance addresses set to %v", instAddrs) c.Logf("state addresses set to %v", stateAddrs) }
func (s *MachineSuite) TestManageModelRunsInstancePoller(c *gc.C) { s.AgentSuite.PatchValue(&instancepoller.ShortPoll, 500*time.Millisecond) usefulVersion := version.Binary{ Number: jujuversion.Current, Arch: arch.HostArch(), Series: "quantal", // to match the charm created below } envtesting.AssertUploadFakeToolsVersions( c, s.DefaultToolsStorage, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), usefulVersion, ) m, _, _ := s.primeAgent(c, state.JobManageModel) a := s.newAgent(c, m) defer a.Stop() go func() { c.Check(a.Run(nil), jc.ErrorIsNil) }() // Add one unit to a service; charm := s.AddTestingCharm(c, "dummy") svc := s.AddTestingService(c, "test-service", charm) units, err := juju.AddUnits(s.State, svc, svc.Name(), 1, nil) c.Assert(err, jc.ErrorIsNil) m, instId := s.waitProvisioned(c, units[0]) insts, err := s.Environ.Instances([]instance.Id{instId}) c.Assert(err, jc.ErrorIsNil) addrs := network.NewAddresses("1.2.3.4") dummy.SetInstanceAddresses(insts[0], addrs) dummy.SetInstanceStatus(insts[0], "running") for attempt := coretesting.LongAttempt.Start(); attempt.Next(); { if !attempt.HasNext() { c.Logf("final machine addresses: %#v", m.Addresses()) c.Fatalf("timed out waiting for machine to get address") } err := m.Refresh() c.Assert(err, jc.ErrorIsNil) instStatus, err := m.InstanceStatus() c.Assert(err, jc.ErrorIsNil) c.Logf("found status is %q %q", instStatus.Status, instStatus.Message) if reflect.DeepEqual(m.Addresses(), addrs) && instStatus.Message == "running" { c.Logf("machine %q address updated: %+v", m.Id(), addrs) break } c.Logf("waiting for machine %q address to be updated", m.Id()) } }
func (s *commonMachineSuite) setFakeMachineAddresses(c *gc.C, machine *state.Machine) { addrs := []network.Address{ network.NewAddress("0.1.2.3", network.ScopeUnknown), } err := machine.SetAddresses(addrs...) c.Assert(err, gc.IsNil) // Set the addresses in the environ instance as well so that if the instance poller // runs it won't overwrite them. instId, err := machine.InstanceId() c.Assert(err, gc.IsNil) insts, err := s.Conn.Environ.Instances([]instance.Id{instId}) c.Assert(err, gc.IsNil) dummy.SetInstanceAddresses(insts[0], addrs) }
func (s *MachineSuite) TestManageEnvironRunsInstancePoller(c *gc.C) { s.agentSuite.PatchValue(&instancepoller.ShortPoll, 500*time.Millisecond) usefulVersion := version.Current usefulVersion.Series = "quantal" // to match the charm created below envtesting.AssertUploadFakeToolsVersions(c, s.Environ.Storage(), usefulVersion) m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron) a := s.newAgent(c, m) defer a.Stop() go func() { c.Check(a.Run(nil), gc.IsNil) }() // Add one unit to a service; charm := s.AddTestingCharm(c, "dummy") svc := s.AddTestingService(c, "test-service", charm) units, err := juju.AddUnits(s.State, svc, 1, "") c.Assert(err, gc.IsNil) m, instId := s.waitProvisioned(c, units[0]) insts, err := s.Environ.Instances([]instance.Id{instId}) c.Assert(err, gc.IsNil) addrs := []network.Address{network.NewAddress("1.2.3.4", network.ScopeUnknown)} dummy.SetInstanceAddresses(insts[0], addrs) dummy.SetInstanceStatus(insts[0], "running") for a := coretesting.LongAttempt.Start(); a.Next(); { if !a.HasNext() { c.Logf("final machine addresses: %#v", m.Addresses()) c.Fatalf("timed out waiting for machine to get address") } err := m.Refresh() c.Assert(err, gc.IsNil) instStatus, err := m.InstanceStatus() c.Assert(err, gc.IsNil) if reflect.DeepEqual(m.Addresses(), addrs) && instStatus == "running" { break } } }
func (s *workerSuite) TestWorker(c *gc.C) { // Most functionality is already tested in detail - we // just need to test that things are wired together // correctly. s.PatchValue(&ShortPoll, 10*time.Millisecond) s.PatchValue(&LongPoll, 10*time.Millisecond) s.PatchValue(&gatherTime, 10*time.Millisecond) machines, insts := s.setupScenario(c) s.State.StartSync() w := NewWorker(s.State) defer func() { c.Assert(worker.Stop(w), gc.IsNil) }() checkInstanceInfo := func(index int, m machine, expectedStatus string) bool { isProvisioned := true status, err := m.InstanceStatus() if state.IsNotProvisionedError(err) { isProvisioned = false } else { c.Assert(err, gc.IsNil) } return reflect.DeepEqual(m.Addresses(), s.addressesForIndex(index)) && (!isProvisioned || status == expectedStatus) } // Wait for the odd numbered machines in the // first half of the machine slice to be given their // addresses and status. for a := coretesting.LongAttempt.Start(); a.Next(); { if !a.HasNext() { c.Fatalf("timed out waiting for instance info") } if machinesSatisfy(c, machines, func(i int, m *state.Machine) bool { if i < len(machines)/2 && i%2 == 1 { return checkInstanceInfo(i, m, "running") } status, err := m.InstanceStatus() if i%2 == 0 { // Even machines not provisioned yet. c.Assert(err, jc.Satisfies, state.IsNotProvisionedError) } else { c.Assert(status, gc.Equals, "") } return len(m.Addresses()) == 0 }) { break } } // Now provision the even machines in the first half and watch them get addresses. for i := 0; i < len(insts)/2; i += 2 { m := machines[i] err := m.SetProvisioned(insts[i].Id(), "nonce", nil) c.Assert(err, gc.IsNil) dummy.SetInstanceAddresses(insts[i], s.addressesForIndex(i)) dummy.SetInstanceStatus(insts[i], "running") } for a := coretesting.LongAttempt.Start(); a.Next(); { if !a.HasNext() { c.Fatalf("timed out waiting for machine instance info") } if machinesSatisfy(c, machines, func(i int, m *state.Machine) bool { if i < len(machines)/2 { return checkInstanceInfo(i, m, "running") } // Machines in second half still have no addresses, nor status. status, err := m.InstanceStatus() if i%2 == 0 { // Even machines not provisioned yet. c.Assert(err, jc.Satisfies, state.IsNotProvisionedError) } else { c.Assert(status, gc.Equals, "") } return len(m.Addresses()) == 0 }) { break } } // Provision the remaining machines and check the address and status. for i := len(insts) / 2; i < len(insts); i++ { if i%2 == 0 { m := machines[i] err := m.SetProvisioned(insts[i].Id(), "nonce", nil) c.Assert(err, gc.IsNil) } dummy.SetInstanceAddresses(insts[i], s.addressesForIndex(i)) dummy.SetInstanceStatus(insts[i], "running") } for a := coretesting.LongAttempt.Start(); a.Next(); { if !a.HasNext() { c.Fatalf("timed out waiting for machine instance info") } if machinesSatisfy(c, machines, func(i int, m *state.Machine) bool { return checkInstanceInfo(i, m, "running") }) { break } } }
func (s *workerSuite) TestWorker(c *gc.C) { // Most functionality is already tested in detail - we // just need to test that things are wired together // correctly. s.PatchValue(&ShortPoll, 10*time.Millisecond) s.PatchValue(&LongPoll, 10*time.Millisecond) s.PatchValue(&gatherTime, 10*time.Millisecond) machines, insts := s.setupScenario(c) s.State.StartSync() w, err := NewWorker(Config{ Facade: s.api, Environ: s.Environ, }) c.Assert(err, jc.ErrorIsNil) defer func() { c.Assert(worker.Stop(w), gc.IsNil) }() // TODO(perrito666) make this dependent on a juju status checkInstanceInfo := func(index int, m machine, expectedStatus string) bool { isProvisioned := true instanceStatus, err := m.InstanceStatus() if params.IsCodeNotProvisioned(err) { isProvisioned = false } else { c.Assert(err, jc.ErrorIsNil) } providerAddresses, err := m.ProviderAddresses() c.Assert(err, jc.ErrorIsNil) // TODO(perrito666) all providers should use juju statuses instead of message. return reflect.DeepEqual(providerAddresses, s.addressesForIndex(index)) && (!isProvisioned || instanceStatus.Info == expectedStatus) } // Wait for the odd numbered machines in the // first half of the machine slice to be given their // addresses and status. for a := coretesting.LongAttempt.Start(); a.Next(); { if !a.HasNext() { c.Fatalf("timed out waiting for instance info") } if machinesSatisfy(c, machines, func(i int, m *apiinstancepoller.Machine) bool { if i < len(machines)/2 && i%2 == 1 { return checkInstanceInfo(i, m, "running") } instanceStatus, err := m.InstanceStatus() c.Logf("instance message is: %q", instanceStatus.Info) c.Assert(instanceStatus.Status, gc.Equals, status.StatusPending) stm, err := s.State.Machine(m.Id()) c.Assert(err, jc.ErrorIsNil) return len(stm.Addresses()) == 0 }) { break } } // Now provision the even machines in the first half and watch them get addresses. for i := 0; i < len(insts)/2; i += 2 { m, err := s.State.Machine(machines[i].Id()) c.Assert(err, jc.ErrorIsNil) err = m.SetProvisioned(insts[i].Id(), "nonce", nil) c.Assert(err, jc.ErrorIsNil) dummy.SetInstanceAddresses(insts[i], s.addressesForIndex(i)) dummy.SetInstanceStatus(insts[i], "running") } for a := coretesting.LongAttempt.Start(); a.Next(); { if !a.HasNext() { c.Fatalf("timed out waiting for machine instance info") } if machinesSatisfy(c, machines, func(i int, m *apiinstancepoller.Machine) bool { if i < len(machines)/2 { return checkInstanceInfo(i, m, "running") } // Machines in second half still have no addresses, nor status. instanceStatus, err := m.InstanceStatus() c.Assert(instanceStatus.Status, gc.Equals, status.StatusPending) stm, err := s.State.Machine(m.Id()) c.Assert(err, jc.ErrorIsNil) return len(stm.Addresses()) == 0 }) { break } } // Provision the remaining machines and check the address and status. for i := len(insts) / 2; i < len(insts); i++ { if i%2 == 0 { m, err := s.State.Machine(machines[i].Id()) c.Assert(err, jc.ErrorIsNil) err = m.SetProvisioned(insts[i].Id(), "nonce", nil) c.Assert(err, jc.ErrorIsNil) } dummy.SetInstanceAddresses(insts[i], s.addressesForIndex(i)) dummy.SetInstanceStatus(insts[i], "running") } for a := coretesting.LongAttempt.Start(); a.Next(); { if !a.HasNext() { c.Fatalf("timed out waiting for machine instance info") } if machinesSatisfy(c, machines, func(i int, m *apiinstancepoller.Machine) bool { return checkInstanceInfo(i, m, "running") }) { break } } }