func assertNotReplied(c *gc.C, req *request) { select { case v := <-req.reply: c.Fatalf("request was unexpectedly replied to (got %v)", v) default: } }
func (s *S) TestContainerCommitRemovesOldImages(c *gocheck.C) { appName := "commit-remove-test-app" cont, err := s.newContainer(&newContainerOpts{AppName: appName}) c.Assert(err, gocheck.IsNil) defer s.removeTestContainer(cont) imageId, err := cont.commit() c.Assert(err, gocheck.IsNil) repoNamespace, _ := config.GetString("docker:repository-namespace") repository := repoNamespace + "/" + cont.AppName c.Assert(imageId, gocheck.Equals, repository) images, err := dockerCluster().ListImages(true) c.Assert(err, gocheck.IsNil) var toEraseID string for _, image := range images { if len(image.RepoTags) > 0 && image.RepoTags[0] == "tsuru/"+appName { toEraseID = image.ID break } } c.Assert(toEraseID, gocheck.Not(gocheck.Equals), "") cont, err = s.newContainer(&newContainerOpts{AppName: appName}) c.Assert(err, gocheck.IsNil) defer s.removeTestContainer(cont) _, err = cont.commit() c.Assert(err, gocheck.IsNil) images, err = dockerCluster().ListImages(true) c.Assert(err, gocheck.IsNil) for _, image := range images { if image.ID == toEraseID { c.Fatalf("Image id %q shouldn't be in images list.", toEraseID) } } }
// Test that MachineAgent enforces the API password on startup even for machine>0 func (s *UpgradeValidationMachineSuite) TestAgentEnsuresAPIInfoOnWorkers(c *gc.C) { // create a machine-0, then create a new machine-1 _, _ = s.Create1_10Machine(c) m1, _ := s.Create1_10Machine(c) a := &MachineAgent{} s.initAgent(c, a, "--machine-id", m1.Id()) agentStates := make(chan *state.State, 1000) undo := sendOpenedStates(agentStates) defer undo() done := make(chan error) go func() { done <- a.Run(nil) }() select { case agentState := <-agentStates: c.Assert(agentState, gc.NotNil) c.Assert(a.Conf.Conf.APIInfo.Password, gc.Equals, "machine-password") case <-time.After(testing.LongWait): c.Fatalf("state not opened") } err := a.Stop() c.Assert(err, gc.IsNil) c.Assert(<-done, gc.IsNil) }
func (s *MachineSuite) TestMachineAgentRunsAPIAddressUpdaterWorker(c *gc.C) { // Start the machine agent. m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits) a := s.newAgent(c, m) go func() { c.Check(a.Run(nil), gc.IsNil) }() defer func() { c.Check(a.Stop(), gc.IsNil) }() // Update the API addresses. updatedServers := [][]instance.HostPort{instance.AddressesWithPort( instance.NewAddresses("localhost"), 1234, )} err := s.BackingState.SetAPIHostPorts(updatedServers) c.Assert(err, gc.IsNil) // Wait for config to be updated. s.BackingState.StartSync() for attempt := coretesting.LongAttempt.Start(); attempt.Next(); { addrs, err := a.CurrentConfig().APIAddresses() c.Assert(err, gc.IsNil) if reflect.DeepEqual(addrs, []string{"localhost:1234"}) { return } } c.Fatalf("timeout while waiting for agent config to change") }
func (s *clientSuite) TestShareEnvironmentExistingUser(c *gc.C) { client := s.APIState.Client() user := s.Factory.MakeEnvUser(c, nil) cleanup := api.PatchClientFacadeCall(client, func(request string, paramsIn interface{}, response interface{}) error { if users, ok := paramsIn.(params.ModifyEnvironUsers); ok { c.Assert(users.Changes, gc.HasLen, 1) c.Logf(string(users.Changes[0].Action), gc.Equals, string(params.AddEnvUser)) c.Logf(users.Changes[0].UserTag, gc.Equals, user.UserTag().String()) } else { c.Fatalf("wrong input structure") } if result, ok := response.(*params.ErrorResults); ok { err := ¶ms.Error{Message: "failed to create environment user: env user already exists"} *result = params.ErrorResults{Results: []params.ErrorResult{{Error: err}}} } else { c.Fatalf("wrong input structure") } return nil }, ) defer cleanup() result, err := client.ShareEnvironment([]names.UserTag{user.UserTag()}) c.Assert(err, gc.IsNil) c.Assert(result.OneError().Error(), gc.Matches, "failed to create environment user: env user already exists") c.Assert(result.Results, gc.HasLen, 1) c.Assert(result.Results[0].Error, gc.ErrorMatches, `failed to create environment user: env user already exists`) }
func (s *MachineSuite) waitProvisioned(c *gc.C, unit *state.Unit) (*state.Machine, instance.Id) { c.Logf("waiting for unit %q to be provisioned", unit) machineId, err := unit.AssignedMachineId() c.Assert(err, gc.IsNil) m, err := s.State.Machine(machineId) c.Assert(err, gc.IsNil) w := m.Watch() defer w.Stop() timeout := time.After(coretesting.LongWait) for { select { case <-timeout: c.Fatalf("timed out waiting for provisioning") case _, ok := <-w.Changes(): c.Assert(ok, jc.IsTrue) err := m.Refresh() c.Assert(err, gc.IsNil) if instId, err := m.InstanceId(); err == nil { c.Logf("unit provisioned with instance %s", instId) return m, instId } else { c.Check(err, jc.Satisfies, state.IsNotProvisionedError) } } } panic("watcher died") }
func (s *MachineSuite) TestMachineAgentRunsAuthorisedKeysWorker(c *gc.C) { // Start the machine agent. m, _, _ := s.primeAgent(c, version.Current, state.JobHostUnits) a := s.newAgent(c, m) go func() { c.Check(a.Run(nil), gc.IsNil) }() defer func() { c.Check(a.Stop(), gc.IsNil) }() // Update the keys in the environment. sshKey := sshtesting.ValidKeyOne.Key + " user@host" err := s.BackingState.UpdateEnvironConfig(map[string]interface{}{"authorized-keys": sshKey}, nil, nil) c.Assert(err, gc.IsNil) // Wait for ssh keys file to be updated. s.State.StartSync() timeout := time.After(coretesting.LongWait) sshKeyWithCommentPrefix := sshtesting.ValidKeyOne.Key + " Juju:user@host" for { select { case <-timeout: c.Fatalf("timeout while waiting for authorised ssh keys to change") case <-time.After(coretesting.ShortWait): keys, err := ssh.ListKeys(authenticationworker.SSHUser, ssh.FullKeys) c.Assert(err, gc.IsNil) keysStr := strings.Join(keys, "\n") if sshKeyWithCommentPrefix != keysStr { continue } return } } }
func (s *MachineEnvironmentWatcherSuite) waitForPostSetup(c *gc.C) { select { case <-time.After(testing.LongWait): c.Fatalf("timeout while waiting for setup") case <-s.started: } }
func waitForUnitStarted(stateConn *state.State, unit *state.Unit, c *gc.C) { timeout := time.After(5 * time.Second) for { select { case <-timeout: c.Fatalf("no activity detected") case <-time.After(coretesting.ShortWait): err := unit.Refresh() c.Assert(err, gc.IsNil) st, info, data, err := unit.Status() c.Assert(err, gc.IsNil) switch st { case params.StatusPending, params.StatusInstalled: c.Logf("waiting...") continue case params.StatusStarted: c.Logf("started!") return case params.StatusDown: stateConn.StartSync() c.Logf("unit is still down") default: c.Fatalf("unexpected status %s %s %v", st, info, data) } } } }
func (s *PresenceSuite) TestFindAllBeings(c *gc.C) { w := presence.NewWatcher(s.presence) p := presence.NewPinger(s.presence, "a") defer w.Stop() defer p.Stop() ch := make(chan presence.Change) w.Watch("a", ch) assertChange(c, ch, presence.Change{"a", false}) c.Assert(p.Start(), gc.IsNil) done := make(chan bool) go func() { w.Sync() done <- true }() assertChange(c, ch, presence.Change{"a", true}) results, err := presence.FindAllBeings(w) c.Assert(err, gc.IsNil) c.Assert(results, gc.HasLen, 1) select { case <-done: case <-time.After(testing.LongWait): c.Fatalf("Sync failed to returned") } }
func assertNoChange(c *gc.C, watch <-chan presence.Change) { select { case got := <-watch: c.Fatalf("watch reported %v, want nothing", got) case <-time.After(testing.ShortWait): } }
func (s *PresenceSuite) TestStartSync(c *gc.C) { w := presence.NewWatcher(s.presence) p := presence.NewPinger(s.presence, "a") defer w.Stop() defer p.Stop() ch := make(chan presence.Change) w.Watch("a", ch) assertChange(c, ch, presence.Change{"a", false}) c.Assert(p.Start(), gc.IsNil) done := make(chan bool) go func() { w.StartSync() w.StartSync() w.StartSync() done <- true }() select { case <-done: case <-time.After(testing.LongWait): c.Fatalf("StartSync failed to return") } assertChange(c, ch, presence.Change{"a", true}) }
// Tests func (s *S) TestHash(c *check.C) { var ( err error f *os.File md5hash []byte ) // FIXME: This will not work with MacOS. if _, err = exec.LookPath("md5sum"); err != nil { c.Skip(err.Error()) } md5sum := exec.Command("md5sum", "./files_test.go") b := &bytes.Buffer{} md5sum.Stdout = b if err = md5sum.Run(); err != nil { c.Fatal(err) } if f, err = os.Open("./files_test.go"); err != nil { c.Fatalf("%v %s", md5sum, err) } if md5hash, err = Hash(md5.New(), f); err != nil { c.Fatal(err) } md5string := fmt.Sprintf("%x .*\n", md5hash) c.Check(string(b.Bytes()), check.Matches, md5string) }
func (*trySuite) TestExtraResultsAreClosed(c *gc.C) { try := parallel.NewTry(0, nil) begin := make([]chan struct{}, 4) results := make([]*closeResult, len(begin)) for i := range begin { begin[i] = make(chan struct{}) results[i] = &closeResult{make(chan struct{})} i := i try.Start(func(<-chan struct{}) (io.Closer, error) { <-begin[i] return results[i], nil }) } begin[0] <- struct{}{} val, err := try.Result() c.Assert(err, gc.IsNil) c.Assert(val, gc.Equals, results[0]) timeout := time.After(shortWait) for i, r := range results[1:] { begin[i+1] <- struct{}{} select { case <-r.closed: case <-timeout: c.Fatalf("timed out waiting for close") } } select { case <-results[0].closed: c.Fatalf("result was inappropriately closed") case <-time.After(shortWait): } }
func (*limiterSuite) TestAcquireWaitBlocksUntilRelease(c *gc.C) { l := utils.NewLimiter(2) calls := make([]string, 0, 10) start := make(chan bool, 0) waiting := make(chan bool, 0) done := make(chan bool, 0) go func() { <-start calls = append(calls, fmt.Sprintf("%v", l.Acquire())) calls = append(calls, fmt.Sprintf("%v", l.Acquire())) calls = append(calls, fmt.Sprintf("%v", l.Acquire())) waiting <- true l.AcquireWait() calls = append(calls, "waited") calls = append(calls, fmt.Sprintf("%v", l.Acquire())) done <- true }() // Start the routine, and wait for it to get to the first checkpoint start <- true select { case <-waiting: case <-time.After(longWait): c.Fatalf("timed out waiting for 'waiting' to trigger") } c.Check(l.Acquire(), jc.IsFalse) l.Release() select { case <-done: case <-time.After(longWait): c.Fatalf("timed out waiting for 'done' to trigger") } c.Check(calls, gc.DeepEquals, []string{"true", "true", "false", "waited", "false"}) }
func (s *S) TestReadGFF(c *check.C) { obtain := []*feat.Feature{} if r, err := NewReaderName(G[0]); err != nil { c.Fatalf("Failed to open %q: %s", G[0], err) } else { for i := 0; i < 3; i++ { for { if f, err := r.Read(); err != nil { if err == io.EOF { break } else { c.Fatalf("Failed to read %q: %s", G[0], err) } } else { obtain = append(obtain, f) } } if c.Failed() { break } if len(obtain) == len(expect) { for j := range obtain { c.Check(*obtain[j], check.DeepEquals, expect[j]) } } else { c.Check(len(obtain), check.Equals, len(expect)) } } c.Check(r.Type, check.Equals, bio.Moltype(0)) r.Close() } }
func (s *MachineSuite) TestEnsureLocalEnvironDoesntRunPeergrouper(c *gc.C) { started := make(chan struct{}, 1) s.agentSuite.PatchValue(&peergrouperNew, func(st *state.State) (worker.Worker, error) { c.Check(st, gc.NotNil) select { case started <- struct{}{}: default: } return newDummyWorker(), nil }) m, _, _ := s.primeAgent(c, version.Current, state.JobManageEnviron) a := s.newAgent(c, m) err := a.ChangeConfig(func(config agent.ConfigSetter) { config.SetValue(agent.ProviderType, "local") }) c.Assert(err, gc.IsNil) defer func() { c.Check(a.Stop(), gc.IsNil) }() go func() { c.Check(a.Run(nil), gc.IsNil) }() select { case <-started: c.Fatalf("local environment should not start peergrouper") case <-time.After(coretesting.ShortWait): } }
func (rateLimitSuite) TestSetAvailable(c *gc.C) { tb := NewBucket(250*time.Millisecond, 50) available := tb.SetAvailable(10) if available != 10 { c.Fatalf("test for SetAvailable expected available to be 10, was %v", available) } }
func (s *MachineSuite) TestJobManageEnvironRunsMinUnitsWorker(c *gc.C) { s.assertJobWithState(c, state.JobManageEnviron, func(conf agent.Config, agentState *state.State) { // Ensure that the MinUnits worker is alive by doing a simple check // that it responds to state changes: add a service, set its minimum // number of units to one, wait for the worker to add the missing unit. service := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) err := service.SetMinUnits(1) c.Assert(err, gc.IsNil) w := service.Watch() defer w.Stop() // Trigger a sync on the state used by the agent, and wait for the unit // to be created. agentState.StartSync() timeout := time.After(coretesting.LongWait) for { select { case <-timeout: c.Fatalf("unit not created") case <-time.After(coretesting.ShortWait): s.State.StartSync() case <-w.Changes(): units, err := service.AllUnits() c.Assert(err, gc.IsNil) if len(units) == 1 { return } } } }) }
func (s *FilterSuite) TestServiceDeath(c *gc.C) { f, err := newFilter(s.uniter, s.unit.Tag().String()) c.Assert(err, gc.IsNil) defer statetesting.AssertStop(c, f) dyingAsserter := coretesting.NotifyAsserterC{ C: c, Precond: func() { s.BackingState.StartSync() }, Chan: f.UnitDying(), } dyingAsserter.AssertNoReceive() err = s.unit.SetStatus(params.StatusStarted, "", nil) c.Assert(err, gc.IsNil) err = s.wordpress.Destroy() c.Assert(err, gc.IsNil) timeout := time.After(coretesting.LongWait) loop: for { select { case <-f.UnitDying(): break loop case <-time.After(coretesting.ShortWait): s.BackingState.StartSync() case <-timeout: c.Fatalf("dead not detected") } } err = s.unit.Refresh() c.Assert(err, gc.IsNil) c.Assert(s.unit.Life(), gc.Equals, state.Dying) // Can't set s.wordpress to Dead while it still has units. }
func (s *MachineSuite) TestMachineEnvironWorker(c *gc.C) { proxyDir := c.MkDir() s.agentSuite.PatchValue(&machineenvironmentworker.ProxyDirectory, proxyDir) s.agentSuite.PatchValue(&apt.ConfFile, filepath.Join(proxyDir, "juju-apt-proxy")) s.primeAgent(c, version.Current, state.JobHostUnits) // Make sure there are some proxy settings to write. proxySettings := proxy.Settings{ Http: "http proxy", Https: "https proxy", Ftp: "ftp proxy", } updateAttrs := config.ProxyConfigMap(proxySettings) err := s.State.UpdateEnvironConfig(updateAttrs, nil, nil) c.Assert(err, gc.IsNil) s.assertJobWithAPI(c, state.JobHostUnits, func(conf agent.Config, st *api.State) { for { select { case <-time.After(coretesting.LongWait): c.Fatalf("timeout while waiting for proxy settings to change") case <-time.After(10 * time.Millisecond): _, err := os.Stat(apt.ConfFile) if os.IsNotExist(err) { continue } c.Assert(err, gc.IsNil) return } } }) }
func (s *FilterSuite) TestCharmErrorEvents(c *gc.C) { f, err := newFilter(s.uniter, s.unit.Tag().String()) c.Assert(err, gc.IsNil) defer f.Stop() // no AssertStop, we test for an error below assertNoChange := func() { s.BackingState.StartSync() select { case <-f.ConfigEvents(): c.Fatalf("unexpected config event") case <-time.After(coretesting.ShortWait): } } // Check setting an invalid charm URL does not send events. err = f.SetCharm(charm.MustParseURL("cs:missing/one-1")) c.Assert(err, gc.Equals, tomb.ErrDying) assertNoChange() s.assertFilterDies(c, f) // Filter died after the error, so restart it. f, err = newFilter(s.uniter, s.unit.Tag().String()) c.Assert(err, gc.IsNil) defer f.Stop() // no AssertStop, we test for an error below // Check with a nil charm URL, again no changes. err = f.SetCharm(nil) c.Assert(err, gc.Equals, tomb.ErrDying) assertNoChange() s.assertFilterDies(c, f) }
func (s *loginSuite) TestUsersAreNotRateLimited(c *gc.C) { info, cleanup := s.setupServer(c) info.Tag = s.AdminUserTag(c) info.Password = "******" defer cleanup() delayChan, cleanup := apiserver.DelayLogins() defer cleanup() // We can login more than LoginRateLimit users nLogins := apiserver.LoginRateLimit * 2 errResults, wg := startNLogins(c, nLogins, info) select { case err := <-errResults: c.Fatalf("we should not have gotten any logins yet: %v", err) case <-time.After(coretesting.ShortWait): } c.Logf("letting %d logins complete", nLogins) for i := 0; i < nLogins; i++ { delayChan <- struct{}{} } c.Logf("waiting for original requests to finish") wg.Wait() close(errResults) for err := range errResults { c.Check(err, gc.IsNil) } }
func (s *workerSuite) TestStateServersArePublished(c *gc.C) { publishCh := make(chan [][]instance.HostPort) publish := func(apiServers [][]instance.HostPort, instanceIds []instance.Id) error { publishCh <- apiServers return nil } st := newFakeState() initState(c, st, 3) w := newWorker(st, publisherFunc(publish)) defer func() { c.Check(worker.Stop(w), gc.IsNil) }() select { case servers := <-publishCh: assertAPIHostPorts(c, servers, expectedAPIHostPorts(3)) case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for publish") } // Change one of the servers' API addresses and check that it's published. newMachine10APIHostPorts := addressesWithPort(apiPort, "0.2.8.124") st.machine("10").setAPIHostPorts(newMachine10APIHostPorts) select { case servers := <-publishCh: expected := expectedAPIHostPorts(3) expected[0] = newMachine10APIHostPorts assertAPIHostPorts(c, servers, expected) case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for publish") } }
func (s *singularSuite) TestWithIsMasterTrue(c *gc.C) { // When IsMaster returns true, workers get started on the underlying // runner as usual. s.PatchValue(&singular.PingInterval, 1*time.Millisecond) underlyingRunner := newRunner() conn := &fakeConn{ isMaster: true, } r, err := singular.New(underlyingRunner, conn) c.Assert(err, gc.IsNil) started := make(chan struct{}, 1) err = r.StartWorker("worker", func() (worker.Worker, error) { return worker.NewSimpleWorker(func(stop <-chan struct{}) error { started <- struct{}{} <-stop return nil }), nil }) select { case <-started: case <-time.After(testing.LongWait): c.Fatalf("timed out waiting for worker to start") } err = worker.Stop(r) c.Assert(err, gc.IsNil) }
func (s *CheckersS) TestBug(c *gocheck.C) { bug := gocheck.Bug("a %d bc", 42) info := bug.GetBugInfo() if info != "a 42 bc" { c.Fatalf("Bug() returned %#v", info) } }
// Test that MachineAgent enforces the API password on startup func (s *UpgradeValidationMachineSuite) TestAgentEnsuresAPIInfo(c *gc.C) { m, _ := s.Create1_10Machine(c) // This is similar to assertJobWithState, however we need to control // how the machine is initialized, so it looks like a 1.10 upgrade a := &MachineAgent{} s.initAgent(c, a, "--machine-id", m.Id()) agentStates := make(chan *state.State, 1000) undo := sendOpenedStates(agentStates) defer undo() done := make(chan error) go func() { done <- a.Run(nil) }() select { case agentState := <-agentStates: c.Assert(agentState, gc.NotNil) c.Assert(a.Conf.Conf.APIInfo.Password, gc.Equals, "machine-password") case <-time.After(testing.LongWait): c.Fatalf("state not opened") } err := a.Stop() c.Assert(err, gc.IsNil) c.Assert(<-done, gc.IsNil) }
func (srv *localServer) startServer(c *gc.C) { var err error srv.ec2srv, err = ec2test.NewServer() if err != nil { c.Fatalf("cannot start ec2 test server: %v", err) } srv.s3srv, err = s3test.NewServer(srv.config) if err != nil { c.Fatalf("cannot start s3 test server: %v", err) } aws.Regions["test"] = aws.Region{ Name: "test", EC2Endpoint: srv.ec2srv.URL(), S3Endpoint: srv.s3srv.URL(), S3LocationConstraint: true, Sign: aws.SignV2, } s3inst := s3.New(aws.Auth{}, aws.Regions["test"]) storage := ec2.BucketStorage(s3inst.Bucket("juju-dist")) envtesting.UploadFakeTools(c, storage) srv.addSpice(c) zones := make([]amzec2.AvailabilityZoneInfo, 3) zones[0].Region = "test" zones[0].Name = "test-available" zones[0].State = "available" zones[1].Region = "test" zones[1].Name = "test-impaired" zones[1].State = "impaired" zones[2].Region = "test" zones[2].Name = "test-unavailable" zones[2].State = "unavailable" srv.ec2srv.SetAvailabilityZones(zones) }
// assertCollected reads lines from the string channel linec. It compares if // those are the one passed with compare until a timeout. If the timeout is // reached earlier than all lines are collected the assertion fails. The // injection function allows to interrupt the processing with a function // generating an error or a regular stopping during the tailing. In case the // linec is closed due to stopping or an error only the values so far care // compared. Checking the reason for termination is done in the test. func assertCollected(c *gc.C, linec chan string, compare []string, injection func([]string)) { if len(compare) == 0 { return } timeout := time.After(testing.LongWait) lines := []string{} for { select { case line, ok := <-linec: if ok { lines = append(lines, line) if injection != nil { injection(lines) } if len(lines) == len(compare) { // All data received. c.Assert(lines, gc.DeepEquals, compare) return } } else { // linec closed after stopping or error. c.Assert(lines, gc.DeepEquals, compare[:len(lines)]) return } case <-timeout: if injection == nil { c.Fatalf("timeout during tailer collection") } return } } }
func (*runnerSuite) TestOneWorkerStartWhenStopping(c *gc.C) { worker.RestartDelay = 3 * time.Second runner := worker.NewRunner(allFatal, noImportance) starter := newTestWorkerStarter() starter.stopWait = make(chan struct{}) err := runner.StartWorker("id", testWorkerStart(starter)) c.Assert(err, gc.IsNil) starter.assertStarted(c, true) err = runner.StopWorker("id") c.Assert(err, gc.IsNil) err = runner.StartWorker("id", testWorkerStart(starter)) c.Assert(err, gc.IsNil) close(starter.stopWait) starter.assertStarted(c, false) // Check that the task is restarted immediately without // the usual restart timeout delay. t0 := time.Now() starter.assertStarted(c, true) restartDuration := time.Since(t0) if restartDuration > 1*time.Second { c.Fatalf("task did not restart immediately") } c.Assert(worker.Stop(runner), gc.IsNil) }