func (s *stubSender) waitBeforeClose(c *gc.C) { select { case <-s.waitCloseCh: case <-time.After(coretesting.LongWait): c.Error("timed out waiting") } }
func (*APIOpenerSuite) TestTimoutClosesAPIOnTimeout(c *gc.C) { var name string finished := make(chan struct{}) mockConn := &mockConnection{closed: make(chan struct{})} open := func(connectionName string) (api.Connection, error) { <-finished name = connectionName return mockConn, nil } // have the mock clock only wait a microsecond clock := &mockClock{wait: time.Microsecond} // but tell it to wait five seconds opener := envcmd.NewTimeoutOpener(open, clock, 5*time.Second) conn, err := opener.Open("a-name") c.Assert(errors.Cause(err), gc.Equals, envcmd.ErrConnTimedOut) c.Assert(conn, gc.IsNil) // check it was told to wait for 5 seconds c.Assert(clock.duration, gc.Equals, 5*time.Second) // tell the open func to continue now we have timed out close(finished) // wait until the connection has been closed select { case <-mockConn.closed: // continue case <-time.After(5 * time.Second): c.Error("API connection was not closed.") } c.Assert(name, gc.Equals, "a-name") }
func (s *stubSender) waitAfterSend(c *gc.C) { select { case <-s.waitSendCh: case <-time.After(coretesting.LongWait): c.Error("timed out waiting") } }
func (s *BootstrapSuite) TestBootstrapDestroy(c *gc.C) { resetJujuXDGDataHome(c) s.patchVersion(c) opc, errc := cmdtesting.RunCommand( cmdtesting.NullContext(c), s.newBootstrapCommand(), "devcontroller", "dummy-cloud/region-1", "--config", "broken=Bootstrap Destroy", "--auto-upgrade", ) err := <-errc c.Assert(err, gc.ErrorMatches, "failed to bootstrap model: dummy.Bootstrap is broken") var opDestroy *dummy.OpDestroy for opDestroy == nil { select { case op := <-opc: switch op := op.(type) { case dummy.OpDestroy: opDestroy = &op } default: c.Error("expected call to env.Destroy") return } } c.Assert(opDestroy.Error, gc.ErrorMatches, "dummy.Destroy is broken") }
func (s *BootstrapSuite) TestBootstrapKeepBroken(c *gc.C) { resetJujuHome(c, "devenv") devVersion := version.Current // Force a dev version by having a non zero build number. // This is because we have not uploaded any tools and auto // upload is only enabled for dev versions. devVersion.Build = 1234 s.PatchValue(&version.Current, devVersion) opc, errc := cmdtesting.RunCommand(cmdtesting.NullContext(c), envcmd.Wrap(new(BootstrapCommand)), "-e", "brokenenv", "--keep-broken") err := <-errc c.Assert(err, gc.ErrorMatches, "failed to bootstrap environment: dummy.Bootstrap is broken") done := false for !done { select { case op, ok := <-opc: if !ok { done = true break } switch op.(type) { case dummy.OpDestroy: c.Error("unexpected call to env.Destroy") break } default: break } } }
func assertUpgradeNotComplete(c *gc.C, context *upgradeWorkerContext) { select { case <-context.UpgradeComplete: c.Error("UpgradeComplete channel is closed but shouldn't be") default: } }
func assertUpgradeComplete(c *gc.C, context *upgradeWorkerContext) { select { case <-context.UpgradeComplete: default: c.Error("UpgradeComplete channel is open but shouldn't be") } }
func (s *BootstrapSuite) TestBootstrapKeepBroken(c *gc.C) { resetJujuXDGDataHome(c) s.patchVersion(c) opc, errc := cmdtesting.RunCommand(cmdtesting.NullContext(c), s.newBootstrapCommand(), "--keep-broken", "devcontroller", "dummy-cloud/region-1", "--config", "broken=Bootstrap Destroy", "--auto-upgrade", ) err := <-errc c.Assert(err, gc.ErrorMatches, "failed to bootstrap model: dummy.Bootstrap is broken") done := false for !done { select { case op, ok := <-opc: if !ok { done = true break } switch op.(type) { case dummy.OpDestroy: c.Error("unexpected call to env.Destroy") break } default: break } } }
func (*APIOpenerSuite) TestTimoutClosesAPIOnTimeout(c *gc.C) { var controllerName, accountName, modelName string finished := make(chan struct{}) mockConn := &mockConnection{closed: make(chan struct{})} open := func(_ jujuclient.ClientStore, controllerNameArg, accountNameArg, modelNameArg string) (api.Connection, error) { <-finished controllerName = controllerNameArg accountName = accountNameArg modelName = modelNameArg return mockConn, nil } // have the mock clock only wait a microsecond clock := &mockClock{wait: time.Microsecond} // but tell it to wait five seconds opener := modelcmd.NewTimeoutOpener(modelcmd.OpenFunc(open), clock, 5*time.Second) conn, err := opener.Open(nil, "a-name", "b-name", "c-name") c.Assert(errors.Cause(err), gc.Equals, modelcmd.ErrConnTimedOut) c.Assert(conn, gc.IsNil) // check it was told to wait for 5 seconds c.Assert(clock.duration, gc.Equals, 5*time.Second) // tell the open func to continue now we have timed out close(finished) // wait until the connection has been closed select { case <-mockConn.closed: // continue case <-time.After(5 * time.Second): c.Error("API connection was not closed.") } c.Assert(controllerName, gc.Equals, "a-name") c.Assert(accountName, gc.Equals, "b-name") c.Assert(modelName, gc.Equals, "c-name") }
func (s *stubStream) waitAfterNext(c *gc.C) { select { case <-s.waitCh: case <-time.After(coretesting.LongWait): c.Error("timed out waiting") } }
func assertUpgradeNotComplete(c *gc.C, doneCh chan struct{}) { select { case <-doneCh: c.Error("upgrade channel is closed but shouldn't be") default: } }
func (s *BootstrapSuite) TestBootstrapKeepBroken(c *gc.C) { resetJujuHome(c, "devenv") s.patchVersion(c) opc, errc := cmdtesting.RunCommand(cmdtesting.NullContext(c), newBootstrapCommand(), "-e", "brokenenv", "--keep-broken", "--auto-upgrade") err := <-errc c.Assert(err, gc.ErrorMatches, "failed to bootstrap environment: dummy.Bootstrap is broken") done := false for !done { select { case op, ok := <-opc: if !ok { done = true break } switch op.(type) { case dummy.OpDestroy: c.Error("unexpected call to env.Destroy") break } default: break } } }
// NewTCPProxy runs a proxy that copies to and from // the given remote TCP address. When the proxy // is closed, its listener and all connections will be closed. func NewTCPProxy(c *gc.C, remoteAddr string) *TCPProxy { listener, err := net.Listen("tcp", "127.0.0.1:0") c.Assert(err, jc.ErrorIsNil) p := &TCPProxy{ listener: listener, } go func() { for { client, err := p.listener.Accept() if err != nil { if !p.isClosed() { c.Error("cannot accept: %v", err) } return } p.addConn(client) server, err := net.Dial("tcp", remoteAddr) if err != nil { if !p.isClosed() { c.Error("cannot dial remote address: %v", err) } return } p.addConn(server) go stream(client, server) go stream(server, client) } }() return p }
func assertUpgradeComplete(c *gc.C, doneCh chan struct{}) { select { case <-doneCh: default: c.Error("upgrade channel is open but shouldn't be") } }
func (s *BootstrapSuite) TestBootstrapDestroy(c *gc.C) { resetJujuHome(c, "devenv") devVersion := version.Current // Force a dev version by having a non zero build number. // This is because we have not uploaded any tools and auto // upload is only enabled for dev versions. devVersion.Build = 1234 s.PatchValue(&version.Current, devVersion) opc, errc := cmdtesting.RunCommand(cmdtesting.NullContext(c), envcmd.Wrap(new(BootstrapCommand)), "-e", "brokenenv") err := <-errc c.Assert(err, gc.ErrorMatches, "failed to bootstrap environment: dummy.Bootstrap is broken") var opDestroy *dummy.OpDestroy for opDestroy == nil { select { case op := <-opc: switch op := op.(type) { case dummy.OpDestroy: opDestroy = &op } default: c.Error("expected call to env.Destroy") return } } c.Assert(opDestroy.Error, gc.ErrorMatches, "dummy.Destroy is broken") }
// TestCollectionOnClosedSessionGraceful closes the session directly and checks // we handle this cleanly without panicing. func (s *collectionSizeSuite) TestCollectionOnClosedSessionGraceful(c *gc.C) { session := s.Session.Copy() collection := session.DB("test").C("test_collection") u := monitoring.NewCollectionSizeCollector("test", "test", "test", collection) defer u.Close() err := collection.Insert(bson.M{"test": true}) c.Assert(err, jc.ErrorIsNil) // We close the session directly. // As the collector has copied the session, this should not // impact its behaviour - it should continue to monitor as usual. session.Close() ch := make(chan prometheus.Metric, 2) u.Collect(ch) // read the size select { case <-ch: default: c.Error("metric not provided by collector") } // read the count select { case <-ch: default: c.Error("metric not provided by collector") } }
func (s *filesSuite) checkSameStrings(c *gc.C, actual, expected []string) { sActual := set.NewStrings(actual...) sExpected := set.NewStrings(expected...) sActualOnly := sActual.Difference(sExpected) sExpectedOnly := sExpected.Difference(sActual) if !sActualOnly.IsEmpty() || !sExpectedOnly.IsEmpty() { c.Error("strings mismatch") onlyActual := sActualOnly.Values() onlyExpected := sExpectedOnly.Values() sort.Strings(onlyActual) sort.Strings(onlyExpected) if !sActualOnly.IsEmpty() { c.Log("...unexpected values:") for _, str := range onlyActual { c.Log(" " + str) } } if !sExpectedOnly.IsEmpty() { c.Log("...missing values:") for _, str := range onlyExpected { c.Log(" " + str) } } } }
func (s *CacheSuite) SetUpTest(c *gc.C) { usr, err := user.Current() if err != nil { c.Error(err) } os.Remove(usr.HomeDir + string(filepath.Separator) + InternalCacheFolder) }
func verifyTransitions(model Stream, outcomes transitionCases, c *gc.C) { var pump chan int var underTest *Stream from := func() *Stream { pump = make(chan int, 1) underTest = &Stream{ ID: model.ID, State: model.State, SendFlowAvailable: 4096, SendFlowPump: pump, } return underTest } verify := func(outcome interface{}, err *Error) { if expected, ok := outcome.(*errCase); ok { c.Check(err, gc.NotNil) c.Check(err.Level, gc.Equals, expected.level) c.Check(err.Code, gc.Equals, expected.code) return } expected := outcome.(*successCase) c.Check(underTest.State, gc.Equals, expected.state) select { case r, ok := <-pump: if expected.pumpOpened { c.Check(ok, gc.Equals, true) c.Check(r, gc.Equals, underTest.SendFlowAvailable) } else if expected.pumpClosed { c.Check(ok, gc.Equals, false) } else { c.Error("unexpected pump update: ", r, ok) } default: c.Check(expected.pumpOpened, gc.Equals, false) c.Check(expected.pumpClosed, gc.Equals, false) } if c.Failed() { panic(false) // Generate a callstack. } } verify(outcomes.onRecvData, from().onData(Receive, false)) verify(outcomes.onRecvDataWithFin, from().onData(Receive, true)) verify(outcomes.onRecvHeaders, from().onHeaders(Receive, false)) verify(outcomes.onRecvHeadersWithFin, from().onHeaders(Receive, true)) verify(outcomes.onRecvPushPromise, from().onPushPromise(Receive)) verify(outcomes.onRecvReset, from().onReset(Receive)) verify(outcomes.onSendData, from().onData(Send, false)) verify(outcomes.onSendDataWithFin, from().onData(Send, true)) verify(outcomes.onSendHeaders, from().onHeaders(Send, false)) verify(outcomes.onSendHeadersWithFin, from().onHeaders(Send, true)) verify(outcomes.onSendPushPromise, from().onPushPromise(Send)) verify(outcomes.onSendReset, from().onReset(Send)) }
func GetNewTestConfig(c *gc.C, url string, filter string) *config.Config { cfg, err := config.NewConfig(&MockLoader{ url: url, filter: filter, }) if err != nil { c.Error(err) } return cfg }
func (s *stubStream) setRecords(c *gc.C, recs []logfwd.Record) { recCh := make(chan logfwd.Record) go func() { for _, rec := range recs { select { case recCh <- rec: case <-time.After(coretesting.LongWait): c.Error("timed out waiting for records on the channel") } } }() s.ReturnNext = recCh }
func (s *MongoTests) Test_SaveUsersWithDuplicateEmailMustNotFail(c *check.C) { repository := NewUserRepository() repository.Save(&contracts.User{"*****@*****.**", "user1"}, nil) _, err2 := repository.Save(&contracts.User{"*****@*****.**", "user1"}, nil) if err2 != nil { c.Error("Should not have failed because the Save semanthics is CreateOrUpdate") c.Log(err2) } }
func (s *BasicSuite) TestBasic(t *c.C) { name := util.RandomString(30) t.Assert(s.Flynn("create", name), Outputs, fmt.Sprintf("Created %s\n", name)) push := s.Git("push", "flynn", "master") t.Assert(push, OutputContains, "Node.js app detected") t.Assert(push, OutputContains, "Downloading and installing node") t.Assert(push, OutputContains, "Installing dependencies") t.Assert(push, OutputContains, "Procfile declares types -> web") t.Assert(push, OutputContains, "Creating release") t.Assert(push, OutputContains, "Application deployed") t.Assert(push, OutputContains, "* [new branch] master -> master") t.Assert(s.Flynn("scale", "web=3"), Succeeds) newRoute := s.Flynn("route-add-http", util.RandomString(32)+".dev") t.Assert(newRoute, Succeeds) t.Assert(s.Flynn("routes"), OutputContains, strings.TrimSpace(newRoute.Output)) // use Attempts to give the processes time to start if err := Attempts.Run(func() error { ps := s.Flynn("ps") if ps.Err != nil { return ps.Err } psLines := strings.Split(strings.TrimSpace(ps.Output), "\n") if len(psLines) != 4 { return fmt.Errorf("Expected 4 ps lines, got %d", len(psLines)) } for _, l := range psLines[1:] { idType := regexp.MustCompile(`\s+`).Split(l, 2) if idType[1] != "web" { return fmt.Errorf("Expected web type, got %s", idType[1]) } log := s.Flynn("log", idType[0]) if !strings.Contains(log.Output, "Listening on ") { return fmt.Errorf("Expected \"%s\" to contain \"Listening on \"", log.Output) } } return nil }); err != nil { t.Error(err) } // Make HTTP requests }
func newStringsHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*stringsHandler, worker.Worker) { sh := &stringsHandler{ actions: nil, handled: make(chan []string, 1), setupError: setupError, teardownError: teardownError, handlerError: handlerError, watcher: newTestStringsWatcher(), setupDone: make(chan struct{}), } w, err := watcher.NewStringsWorker(watcher.StringsConfig{Handler: sh}) c.Assert(err, jc.ErrorIsNil) select { case <-sh.setupDone: case <-time.After(coretesting.ShortWait): c.Error("Failed waiting for stringsHandler.Setup to be called during SetUpTest") } return sh, w }
func newNotifyHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*notifyHandler, worker.Worker) { nh := ¬ifyHandler{ actions: nil, handled: make(chan struct{}, 1), setupError: setupError, teardownError: teardownError, handlerError: handlerError, watcher: newTestNotifyWatcher(), setupDone: make(chan struct{}), } w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{Handler: nh}) c.Assert(err, jc.ErrorIsNil) select { case <-nh.setupDone: case <-time.After(coretesting.ShortWait): c.Error("Failed waiting for notifyHandler.Setup to be called during SetUpTest") } return nh, w }
func newStringsHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*stringsHandler, worker.Worker) { sh := &stringsHandler{ actions: nil, handled: make(chan []string, 1), setupError: setupError, teardownError: teardownError, handlerError: handlerError, watcher: &testStringsWatcher{ changes: make(chan []string), }, setupDone: make(chan struct{}), } w := legacy.NewStringsWorker(sh) select { case <-sh.setupDone: case <-time.After(coretesting.ShortWait): c.Error("Failed waiting for stringsHandler.Setup to be called during SetUpTest") } return sh, w }
func newNotifyHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*notifyHandler, worker.Worker) { nh := ¬ifyHandler{ actions: nil, handled: make(chan struct{}, 1), setupError: setupError, teardownError: teardownError, handlerError: handlerError, watcher: &testNotifyWatcher{ changes: make(chan struct{}), }, setupDone: make(chan struct{}), } w := legacy.NewNotifyWorker(nh) select { case <-nh.setupDone: case <-time.After(coretesting.ShortWait): c.Error("Failed waiting for notifyHandler.Setup to be called during SetUpTest") } return nh, w }
func (s *MongoTests) Test_CopyCanBeUsedConcurrently(c *check.C) { runtime.GOMAXPROCS(runtime.NumCPU()) masterSession, mErr := mgo.DialWithInfo(&mgo.DialInfo{ Database: "test", Username: "", Password: "", Addrs: []string{fmt.Sprintf("%s:%s", _cfg.Host, _cfg.Port)}, Timeout: 10000 * time.Second, }) if mErr != nil { c.Error(mErr) return } defer masterSession.Close() num := 1000 wg := &sync.WaitGroup{} wg.Add(num) for i := 0; i < num; i++ { go func() { defer wg.Done() s := masterSession.Copy() defer s.Close() err := s.Ping() if err != nil { c.Error(err) } }() } wg.Wait() }
func (s *MongoTests) Test_CreateUser(c *check.C) { repository := NewUserRepository() user, err := repository.Save(&contracts.User{"*****@*****.**", "user1"}, nil) if err != nil { c.Error(err.Error()) } else { if user.Name != "user1" { c.Errorf("User name was %s and should be %s", user.Name, "user1") } if user.Email != "*****@*****.**" { c.Errorf("User email was %s and should be %s", user.Email, "*****@*****.**") } } }
func (t *UptimeSuite) TestUptimeReporting(c *gc.C) { now := time.Now() u, err := monitoring.NewUptimeCollector("test", "test", "test", time.Now) c.Assert(err, jc.ErrorIsNil) ch := make(chan prometheus.Metric, 1000) u.Collect(ch) var m prometheus.Metric select { case m = <-ch: default: c.Error("metric not provided by collector") } var raw prometheusinternal.Metric err = m.Write(&raw) c.Assert(err, jc.ErrorIsNil) cnt := raw.GetCounter() val := cnt.GetValue() c.Assert(val, gc.Equals, float64(now.Unix())) }