func (s *IsolatedWorkerSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.stub = &testing.Stub{} s.dataDir = c.MkDir() s.hookRan = make(chan struct{}) s.triggersCreated = make(chan struct{}) triggerFactory := func(state meterstatus.WorkerState, status string, disconectedAt time.Time, clk clock.Clock, amber time.Duration, red time.Duration) (<-chan time.Time, <-chan time.Time) { select { case s.triggersCreated <- struct{}{}: case <-time.After(coretesting.LongWait): c.Fatalf("failed to signal trigger creation") } return meterstatus.GetTriggers(state, status, disconectedAt, clk, amber, red) } s.clk = testing.NewClock(time.Now()) wrk, err := meterstatus.NewIsolatedStatusWorker( meterstatus.IsolatedConfig{ Runner: &stubRunner{stub: s.stub, ran: s.hookRan}, StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), Clock: s.clk, AmberGracePeriod: AmberGracePeriod, RedGracePeriod: RedGracePeriod, TriggerFactory: triggerFactory, }) c.Assert(err, jc.ErrorIsNil) c.Assert(wrk, gc.NotNil) s.worker = wrk }
func (s *httpSuite) TestHTTPClient(c *gc.C) { var handler http.HandlerFunc srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { handler(w, req) })) defer srv.Close() s.client.BaseURL = srv.URL for i, test := range httpClientTests { c.Logf("test %d: %s", i, test.about) handler = test.handler var resp interface{} if test.expectResponse != nil { resp = reflect.New(reflect.TypeOf(test.expectResponse).Elem()).Interface() } err := s.client.Get("/", resp) if test.expectError != "" { c.Check(err, gc.ErrorMatches, test.expectError) c.Check(params.ErrCode(err), gc.Equals, test.expectErrorCode) if err, ok := errors.Cause(err).(*params.Error); ok { c.Check(err.Info, jc.DeepEquals, test.expectErrorInfo) } else if test.expectErrorInfo != nil { c.Fatalf("no error info found in error") } continue } c.Check(err, gc.IsNil) c.Check(resp, jc.DeepEquals, test.expectResponse) } }
func (s *FilterSuite) TestServiceDeath(c *gc.C) { f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) c.Assert(err, jc.ErrorIsNil) defer statetesting.AssertStop(c, f) dyingC := s.notifyAsserterC(c, f.UnitDying()) dyingC.AssertNoReceive() err = s.unit.SetAgentStatus(state.StatusIdle, "", nil) c.Assert(err, jc.ErrorIsNil) err = s.wordpress.Destroy() c.Assert(err, jc.ErrorIsNil) timeout := time.After(coretesting.LongWait) loop: for { select { case <-f.UnitDying(): break loop case <-time.After(coretesting.ShortWait): s.BackingState.StartSync() case <-timeout: c.Fatalf("dead not detected") } } err = s.unit.Refresh() c.Assert(err, jc.ErrorIsNil) c.Assert(s.unit.Life(), gc.Equals, state.Dying) // Can't set s.wordpress to Dead while it still has units. }
func (waitAddresses) step(c *gc.C, ctx *context) { timeout := time.After(worstCase) for { select { case <-timeout: c.Fatalf("timed out waiting for unit addresses") case <-time.After(coretesting.ShortWait): err := ctx.unit.Refresh() if err != nil { c.Fatalf("unit refresh failed: %v", err) } // GZ 2013-07-10: Hardcoded values from dummy environ // special cased here, questionable. private, _ := ctx.unit.PrivateAddress() if private.Value != "private.address.example.com" { continue } public, _ := ctx.unit.PublicAddress() if public.Value != "public.address.example.com" { continue } return } } }
func (s *ExecHelperSuite) TestExecHelperError(c *gc.C) { argChan := make(chan []string, 1) cfg := testing.PatchExecConfig{ Stdout: "Hellooooo stdout!", Stderr: "Hellooooo stderr!", ExitCode: 55, Args: argChan, } f := s.GetExecCommand(cfg) stderr := &bytes.Buffer{} stdout := &bytes.Buffer{} cmd := f("echo", "hello world!") cmd.Stderr = stderr cmd.Stdout = stdout err := cmd.Run() c.Assert(err, gc.NotNil) _, ok := err.(*exec.ExitError) if !ok { c.Errorf("Expected *exec.ExitError, but got %T", err) } else { c.Check(err.Error(), gc.Equals, "exit status 55") } c.Check(stderr.String(), gc.Equals, cfg.Stderr+"\n") c.Check(stdout.String(), gc.Equals, cfg.Stdout+"\n") select { case args := <-argChan: c.Assert(args, gc.DeepEquals, []string{"echo", "hello world!"}) default: c.Fatalf("No arguments passed to output channel") } }
func (s *lxcProvisionerSuite) expectStarted(c *gc.C, machine *state.Machine) string { // This check in particular leads to tests just hanging // indefinitely quite often on i386. coretesting.SkipIfI386(c, "lp:1425569") var event mock.Event s.State.StartSync() select { case event = <-s.events: c.Assert(event.Action, gc.Equals, mock.Created) argsSet := set.NewStrings(event.TemplateArgs...) c.Assert(argsSet.Contains("imageURL"), jc.IsTrue) case <-time.After(coretesting.LongWait): c.Fatalf("timeout while waiting the mock container to get created") } select { case event = <-s.events: c.Assert(event.Action, gc.Equals, mock.Started) err := machine.Refresh() c.Assert(err, jc.ErrorIsNil) case <-time.After(coretesting.LongWait): c.Fatalf("timeout while waiting the mock container to start") } s.waitInstanceId(c, machine, instance.Id(event.InstanceId)) return event.InstanceId }
func (s *RethinkSuite) TestCursorReuseResult(c *test.C) { // Test query query := Expr([]interface{}{ map[string]interface{}{ "A": "a", }, map[string]interface{}{ "B": 1, }, map[string]interface{}{ "A": "a", }, map[string]interface{}{ "B": 1, }, map[string]interface{}{ "A": "a", "B": 1, }, }) res, err := query.Run(sess) c.Assert(err, test.IsNil) var i int var result SimpleT for res.Next(&result) { switch i { case 0: c.Assert(result, test.DeepEquals, SimpleT{ A: "a", B: 0, }) case 1: c.Assert(result, test.DeepEquals, SimpleT{ A: "", B: 1, }) case 2: c.Assert(result, test.DeepEquals, SimpleT{ A: "a", B: 0, }) case 3: c.Assert(result, test.DeepEquals, SimpleT{ A: "", B: 1, }) case 4: c.Assert(result, test.DeepEquals, SimpleT{ A: "a", B: 1, }) default: c.Fatalf("Unexpected number of results") } i++ } c.Assert(res.Err(), test.IsNil) }
func assertNoMoreCalls(c *gc.C, client *mockClient) { select { case call := <-client.calls: c.Fatalf("unexpected API call: %q", call) case <-time.After(testing.ShortWait): } }
func (ews *manifoldHarness) AssertStart(c *gc.C) { select { case <-ews.starts: case <-time.After(coretesting.LongWait): c.Fatalf("never started") } }
func (t *LiveTests) TestStopInstances(c *gc.C) { t.PrepareOnce(c) // It would be nice if this test was in jujutest, but // there's no way for jujutest to fabricate a valid-looking // instance id. inst0, _ := testing.AssertStartInstance(c, t.Env, "40") inst1 := ec2.FabricateInstance(inst0, "i-aaaaaaaa") inst2, _ := testing.AssertStartInstance(c, t.Env, "41") err := t.Env.StopInstances(inst0.Id(), inst1.Id(), inst2.Id()) c.Check(err, jc.ErrorIsNil) var insts []instance.Instance // We need the retry logic here because we are waiting // for Instances to return an error, and it will not retry // if it succeeds. gone := false for a := ec2.ShortAttempt.Start(); a.Next(); { insts, err = t.Env.Instances([]instance.Id{inst0.Id(), inst2.Id()}) if err == environs.ErrPartialInstances { // instances not gone yet. continue } if err == environs.ErrNoInstances { gone = true break } c.Fatalf("error getting instances: %v", err) } if !gone { c.Errorf("after termination, instances remaining: %v", insts) } }
func (s *pingerSuite) calculatePingTimeout(c *gc.C) time.Duration { // Try opening an API connection a few times and take the max // delay among the attempts. attempt := utils.AttemptStrategy{ Delay: coretesting.ShortWait, Min: 3, } var maxTimeout time.Duration for a := attempt.Start(); a.Next(); { openStart := time.Now() st, _ := s.OpenAPIAsNewMachine(c) err := st.Ping() if c.Check(err, jc.ErrorIsNil) { openDelay := time.Since(openStart) c.Logf("API open and initial ping took %v", openDelay) if maxTimeout < openDelay { maxTimeout = openDelay } } if st != nil { c.Check(st.Close(), jc.ErrorIsNil) } } if !c.Failed() && maxTimeout > 0 { return maxTimeout } c.Fatalf("cannot calculate ping timeout") return 0 }
func assertNoEvent(c *gc.C, ch <-chan interface{}, event string) { select { case <-ch: c.Fatalf("unexpected " + event) case <-time.After(coretesting.ShortWait): } }
func (s *workerSuite) TestMongoErrorNoCommonSpace(c *gc.C) { c.Skip("dimitern: test disabled as it needs refactoring") DoTestForIPv4AndIPv6(func(ipVersion TestIPVersion) { st, machines, hostPorts := mongoSpaceTestCommonSetup(c, ipVersion, false) for i, machine := range machines { // machine 10 gets a host port in space one // machine 11 gets a host port in space two // machine 12 gets a host port in space three st.machine(machine).setMongoHostPorts(hostPorts[i : i+1]) } w := startWorkerSupportingSpaces(c, st, ipVersion) done := make(chan error) go func() { done <- w.Wait() }() select { case err := <-done: c.Assert(err, gc.ErrorMatches, ".*couldn't find a space containing all peer group machines") case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for worker to exit") } // Each machine is in a unique space, so the Mongo space should be empty c.Assert(st.getMongoSpaceName(), gc.Equals, "") c.Assert(st.getMongoSpaceState(), gc.Equals, state.MongoSpaceInvalid) }) }
func (s *ProxyUpdaterSuite) waitForFile(c *gc.C, filename, expected string) { //TODO(bogdanteleaga): Find a way to test this on windows if runtime.GOOS == "windows" { c.Skip("Proxy settings are written to the registry on windows") } maxWait := time.After(coretesting.LongWait) for { select { case <-maxWait: c.Fatalf("timeout while waiting for proxy settings to change") return case <-time.After(10 * time.Millisecond): fileContent, err := ioutil.ReadFile(filename) if os.IsNotExist(err) { continue } c.Assert(err, jc.ErrorIsNil) if string(fileContent) != expected { c.Logf("file content not matching, still waiting") continue } return } } }
func (*limiterSuite) TestAcquireWaitBlocksUntilRelease(c *gc.C) { l := utils.NewLimiter(2) calls := make([]string, 0, 10) start := make(chan bool, 0) waiting := make(chan bool, 0) done := make(chan bool, 0) go func() { <-start calls = append(calls, fmt.Sprintf("%v", l.Acquire())) calls = append(calls, fmt.Sprintf("%v", l.Acquire())) calls = append(calls, fmt.Sprintf("%v", l.Acquire())) waiting <- true l.AcquireWait() calls = append(calls, "waited") calls = append(calls, fmt.Sprintf("%v", l.Acquire())) done <- true }() // Start the routine, and wait for it to get to the first checkpoint start <- true select { case <-waiting: case <-time.After(longWait): c.Fatalf("timed out waiting for 'waiting' to trigger") } c.Check(l.Acquire(), jc.IsFalse) l.Release() select { case <-done: case <-time.After(longWait): c.Fatalf("timed out waiting for 'done' to trigger") } c.Check(calls, gc.DeepEquals, []string{"true", "true", "false", "waited", "false"}) }
func (ews *manifoldHarness) AssertNoStart(c *gc.C) { select { case <-time.After(coretesting.ShortWait): case <-ews.starts: c.Fatalf("started unexpectedly") } }
func (s *workerSuite) TestStateServersArePublished(c *gc.C) { DoTestForIPv4AndIPv6(func(ipVersion TestIPVersion) { publishCh := make(chan [][]network.HostPort) publish := func(apiServers [][]network.HostPort, instanceIds []instance.Id) error { publishCh <- apiServers return nil } st := NewFakeState() InitState(c, st, 3, ipVersion) w := newWorker(st, PublisherFunc(publish)) defer func() { c.Check(worker.Stop(w), gc.IsNil) }() select { case servers := <-publishCh: AssertAPIHostPorts(c, servers, ExpectedAPIHostPorts(3, ipVersion)) case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for publish") } // Change one of the servers' API addresses and check that it's published. var newMachine10APIHostPorts []network.HostPort newMachine10APIHostPorts = network.NewHostPorts(apiPort, ipVersion.extraHost) st.machine("10").setAPIHostPorts(newMachine10APIHostPorts) select { case servers := <-publishCh: expected := ExpectedAPIHostPorts(3, ipVersion) expected[0] = newMachine10APIHostPorts AssertAPIHostPorts(c, servers, expected) case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for publish") } }) }
func (ews *manifoldHarness) InjectError(c *gc.C, err error) { select { case ews.errors <- err: case <-time.After(coretesting.LongWait): c.Fatalf("never sent") } }
func (s *KillSuite) TestKillEarlyAPIConnectionTimeout(c *gc.C) { stop := make(chan struct{}) defer close(stop) testDialer := func(sysName string) (*api.State, error) { <-stop return nil, errors.New("kill command waited too long") } done := make(chan struct{}) go func() { defer close(done) cmd := system.NewKillCommand(nil, nil, nil, testDialer) ctx, err := testing.RunCommand(c, cmd, "test1", "-y") c.Check(err, jc.ErrorIsNil) c.Check(testing.Stderr(ctx), jc.Contains, "Unable to open API: connection to state server timed out") c.Check(s.api.ignoreBlocks, jc.IsFalse) c.Check(s.api.destroyAll, jc.IsFalse) checkSystemRemovedFromStore(c, "test1", s.store) }() select { case <-done: case <-time.After(1 * time.Minute): c.Fatalf("Kill command waited too long to open the API") } }
func (s *UnitSuite) TestAgentSetsToolsVersion(c *gc.C) { _, unit, _, _ := s.primeAgent(c) vers := version.Current vers.Minor = version.Current.Minor + 1 err := unit.SetAgentVersion(vers) c.Assert(err, jc.ErrorIsNil) a := s.newAgent(c, unit) go func() { c.Check(a.Run(nil), gc.IsNil) }() defer func() { c.Check(a.Stop(), gc.IsNil) }() timeout := time.After(coretesting.LongWait) for done := false; !done; { select { case <-timeout: c.Fatalf("timeout while waiting for agent version to be set") case <-time.After(coretesting.ShortWait): err := unit.Refresh() c.Assert(err, jc.ErrorIsNil) agentTools, err := unit.AgentTools() c.Assert(err, jc.ErrorIsNil) if agentTools.Version.Minor != version.Current.Minor { continue } c.Assert(agentTools.Version, gc.DeepEquals, version.Current) done = true } } }
func (s addRelation) step(c *gc.C, ctx *context) { if ctx.relation != nil { panic("don't add two relations!") } if ctx.relatedSvc == nil { ctx.relatedSvc = ctx.s.AddTestingService(c, "mysql", ctx.s.AddTestingCharm(c, "mysql")) } eps, err := ctx.st.InferEndpoints("u", "mysql") c.Assert(err, jc.ErrorIsNil) ctx.relation, err = ctx.st.AddRelation(eps...) c.Assert(err, jc.ErrorIsNil) ctx.relationUnits = map[string]*state.RelationUnit{} if !s.waitJoin { return } // It's hard to do this properly (watching scope) without perturbing other tests. ru, err := ctx.relation.Unit(ctx.unit) c.Assert(err, jc.ErrorIsNil) timeout := time.After(worstCase) for { c.Logf("waiting to join relation") select { case <-timeout: c.Fatalf("failed to join relation") case <-time.After(coretesting.ShortWait): inScope, err := ru.InScope() c.Assert(err, jc.ErrorIsNil) if inScope { return } } } }
func (s *UnitSuite) TestUnitAgentAPIWorkerErrorClosesAPI(c *gc.C) { _, unit, _, _ := s.primeAgent(c) a := s.newAgent(c, unit) a.apiStateUpgrader = &unitAgentUpgrader{} closedAPI := make(chan io.Closer, 1) s.AgentSuite.PatchValue(&reportClosedUnitAPI, func(st io.Closer) { select { case closedAPI <- st: close(closedAPI) default: } }) worker, err := a.APIWorkers() select { case closed := <-closedAPI: c.Assert(closed, gc.NotNil) case <-time.After(coretesting.LongWait): c.Fatalf("API not opened") } c.Assert(worker, gc.IsNil) c.Assert(err, gc.ErrorMatches, "cannot set unit agent version: test failure") }
func (s *ExecHelperSuite) TestExecHelper(c *gc.C) { argChan := make(chan []string, 1) cfg := testing.PatchExecConfig{ Stdout: "Hellooooo stdout!", Stderr: "Hellooooo stderr!", Args: argChan, } f := s.GetExecCommand(cfg) stderr := &bytes.Buffer{} stdout := &bytes.Buffer{} cmd := f("echo", "hello world!") cmd.Stderr = stderr cmd.Stdout = stdout err := cmd.Run() c.Assert(err, jc.ErrorIsNil) c.Check(stderr.String(), gc.Equals, cfg.Stderr+"\n") c.Check(stdout.String(), gc.Equals, cfg.Stdout+"\n") select { case args := <-argChan: c.Assert(args, gc.DeepEquals, []string{"echo", "hello world!"}) default: c.Fatalf("No arguments passed to output channel") } }
func (srv *localServer) startServer(c *gc.C) { var err error srv.ec2srv, err = ec2test.NewServer() if err != nil { c.Fatalf("cannot start ec2 test server: %v", err) } srv.ec2srv.SetCreateRootDisks(srv.createRootDisks) srv.s3srv, err = s3test.NewServer(srv.config) if err != nil { c.Fatalf("cannot start s3 test server: %v", err) } aws.Regions["test"] = aws.Region{ Name: "test", EC2Endpoint: srv.ec2srv.URL(), S3Endpoint: srv.s3srv.URL(), S3LocationConstraint: true, } srv.addSpice(c) zones := make([]amzec2.AvailabilityZoneInfo, 3) zones[0].Region = "test" zones[0].Name = "test-available" zones[0].State = "available" zones[1].Region = "test" zones[1].Name = "test-impaired" zones[1].State = "impaired" zones[2].Region = "test" zones[2].Name = "test-unavailable" zones[2].State = "unavailable" srv.ec2srv.SetAvailabilityZones(zones) }
func (s *CommonProvisionerSuite) assertProvisionerObservesConfigChanges(c *gc.C, p provisioner.Provisioner) { // Inject our observer into the provisioner cfgObserver := make(chan *config.Config, 1) provisioner.SetObserver(p, cfgObserver) // Switch to reaping on All machines. attrs := map[string]interface{}{ config.ProvisionerHarvestModeKey: config.HarvestAll.String(), } err := s.State.UpdateEnvironConfig(attrs, nil, nil) c.Assert(err, jc.ErrorIsNil) s.BackingState.StartSync() // Wait for the PA to load the new configuration. We wait for the change we expect // like this because sometimes we pick up the initial harvest config (destroyed) // rather than the one we change to (all). received := []string{} for { select { case newCfg := <-cfgObserver: if newCfg.ProvisionerHarvestMode().String() == config.HarvestAll.String() { return } received = append(received, newCfg.ProvisionerHarvestMode().String()) case <-time.After(coretesting.LongWait): if len(received) == 0 { c.Fatalf("PA did not action config change") } else { c.Fatalf("timed out waiting for config to change to '%s', received %+v", config.HarvestAll.String(), received) } } } }
func (s *HookSenderSuite) TestHandlesUpdatesEmptyQueueSpam(c *gc.C) { source := hooktesting.NewEmptySource() defer statetesting.AssertStop(c, source) out := make(chan hook.Info) sender := hook.NewSender(out, source) defer statetesting.AssertStop(c, sender) // Spam all channels continuously for a bit. timeout := time.After(coretesting.LongWait) changeCount := 0 updateCount := 0 for i := 0; i < 100; i++ { select { case hi, ok := <-out: c.Fatalf("got unexpected hook: %#v %#v", hi, ok) case source.ChangesC <- source.NewChange("sent"): changeCount++ case update, ok := <-source.UpdatesC: c.Assert(ok, jc.IsTrue) c.Assert(update, gc.Equals, "sent") updateCount++ case <-timeout: c.Fatalf("not enough things happened in time") } } // Check sane end state. c.Check(changeCount, gc.Equals, 50) c.Check(updateCount, gc.Equals, 50) }
func (s *ClientSuite) TestClaimLeadershipTranslation(c *gc.C) { const claimTime = 5 * time.Hour numStubCalls := 0 apiCaller := s.apiCaller(c, func(request string, arg, result interface{}) error { numStubCalls++ c.Check(request, gc.Equals, "ClaimLeadership") c.Check(arg, jc.DeepEquals, params.ClaimLeadershipBulkParams{ Params: []params.ClaimLeadershipParams{{ ServiceTag: "service-stub-service", UnitTag: "unit-stub-unit-0", DurationSeconds: claimTime.Seconds(), }}, }) switch result := result.(type) { case *params.ClaimLeadershipBulkResults: result.Results = []params.ErrorResult{{}} default: c.Fatalf("bad result type: %T", result) } return nil }) client := leadership.NewClient(apiCaller) err := client.ClaimLeadership(StubServiceNm, StubUnitNm, claimTime) c.Check(err, jc.ErrorIsNil) c.Check(numStubCalls, gc.Equals, 1) }
func assertEmpty(c *gc.C, out chan hook.Info) { select { case <-time.After(coretesting.ShortWait): case actual, ok := <-out: c.Fatalf("got unexpected %#v %#v", actual, ok) } }
func (s *SelfSuite) TestActuallyWorks(c *gc.C) { // Create and install a manifold with an unsatisfied dependency. mh1 := newManifoldHarness("self") err := s.engine.Install("dependent", mh1.Manifold()) c.Assert(err, jc.ErrorIsNil) mh1.AssertNoStart(c) // Install an engine inside itself; once it's "started", dependent will // be restarted. manifold := dependency.SelfManifold(s.engine) err = s.engine.Install("self", manifold) c.Assert(err, jc.ErrorIsNil) mh1.AssertOneStart(c) // Check we can still stop it (with a timeout -- injudicious // implementation changes could induce deadlocks). done := make(chan struct{}) go func() { err := worker.Stop(s.engine) c.Check(err, jc.ErrorIsNil) close(done) }() select { case <-done: case <-time.After(coretesting.LongWait): c.Fatalf("timed out") } }
func (*rpcSuite) TestServerWaitsForOutstandingCalls(c *gc.C) { ready := make(chan struct{}) start := make(chan string) root := &Root{ delayed: map[string]*DelayedMethods{ "1": { ready: ready, done: start, }, }, } client, srvDone, _, _ := newRPCClientServer(c, root, nil, false) defer closeClient(c, client, srvDone) done := make(chan struct{}) go func() { var r stringVal err := client.Call(rpc.Request{"DelayedMethods", 0, "1", "Delay"}, nil, &r) c.Check(errors.Cause(err), gc.Equals, rpc.ErrShutdown) done <- struct{}{} }() chanRead(c, ready, "DelayedMethods.Delay ready") client.Close() select { case err := <-srvDone: c.Fatalf("server returned while outstanding operation in progress: %v", err) <-done case <-time.After(25 * time.Millisecond): } start <- "xxx" }