Пример #1
0
func (s *IsolatedWorkerSuite) TestConfigValidation(c *gc.C) {
	tests := []struct {
		cfg      meterstatus.IsolatedConfig
		expected string
	}{{
		cfg: meterstatus.IsolatedConfig{
			Runner:    &stubRunner{stub: s.stub},
			StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")),
		},
		expected: "clock not provided",
	}, {
		cfg: meterstatus.IsolatedConfig{
			Clock:     testing.NewClock(time.Now()),
			StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")),
		},
		expected: "hook runner not provided",
	}, {
		cfg: meterstatus.IsolatedConfig{
			Clock:  testing.NewClock(time.Now()),
			Runner: &stubRunner{stub: s.stub},
		},
		expected: "state file not provided",
	}}
	for i, test := range tests {
		c.Logf("running test %d", i)
		err := test.cfg.Validate()
		c.Assert(err, gc.ErrorMatches, test.expected)
	}
}
Пример #2
0
func (s *TxnPrunerSuite) TestPrunes(c *gc.C) {
	fakePruner := newFakeTransactionPruner()
	testClock := testing.NewClock(time.Now())
	interval := time.Minute
	p := txnpruner.New(fakePruner, interval, testClock)
	defer p.Kill()

	select {
	case <-testClock.Alarms():
	case <-time.After(coretesting.LongWait):
		c.Fatalf("timed out waiting for worker to stat")
	}
	c.Logf("pruner running and waiting: %s (%s)", testClock.Now(), time.Now())
	// Show that we prune every minute
	for i := 0; i < 5; i++ {
		testClock.Advance(interval)
		c.Logf("loop %d: %s (%s)", i, testClock.Now(), time.Now())
		select {
		case <-fakePruner.pruneCh:
		case <-time.After(coretesting.LongWait):
			c.Fatal("timed out waiting for pruning to happen")
		}
		// Now we need to wait for the txn pruner to call clock.After again
		// before we advance the clock, or it will be waiting for the wrong time.
		select {
		case <-testClock.Alarms():
		case <-time.After(coretesting.LongWait):
			c.Fatalf("timed out waiting for worker to loop around")
		}
	}
}
Пример #3
0
func (s *Suite) SetUpTest(c *gc.C) {
	s.BaseSuite.SetUpTest(c)

	s.clock = jujutesting.NewClock(time.Now())
	s.stub = new(jujutesting.Stub)
	s.connection = &stubConnection{
		stub:          s.stub,
		controllerTag: targetControllerTag,
	}
	s.connectionErr = nil

	s.facade = newStubMasterFacade(s.stub, s.clock.Now())

	// The default worker Config used by most of the tests. Tests may
	// tweak parts of this as needed.
	s.config = migrationmaster.Config{
		ModelUUID:       utils.MustNewUUID().String(),
		Facade:          s.facade,
		Guard:           newStubGuard(s.stub),
		APIOpen:         s.apiOpen,
		UploadBinaries:  nullUploadBinaries,
		CharmDownloader: fakeCharmDownloader,
		ToolsDownloader: fakeToolsDownloader,
		Clock:           s.clock,
	}
}
Пример #4
0
func (s *pingerSuite) newServerWithTestClock(c *gc.C) (*apiserver.Server, *testing.Clock) {
	clock := testing.NewClock(time.Now())
	config := s.sampleConfig(c)
	config.PingClock = clock
	server := s.newServer(c, config)
	return server, clock
}
Пример #5
0
func (s *MigrationSuite) SetUpTest(c *gc.C) {
	s.ConnSuite.SetUpTest(c)
	s.clock = jujutesting.NewClock(time.Now().Truncate(time.Second))
	err := s.State.SetClockForTesting(s.clock)
	c.Assert(err, jc.ErrorIsNil)

	// Create a hosted model to migrate.
	s.State2 = s.Factory.MakeModel(c, nil)
	s.AddCleanup(func(*gc.C) { s.State2.Close() })

	targetControllerTag := names.NewControllerTag(utils.MustNewUUID().String())

	mac, err := macaroon.New([]byte("secret"), "id", "location")
	c.Assert(err, jc.ErrorIsNil)

	// Plausible migration arguments to test with.
	s.stdSpec = state.MigrationSpec{
		InitiatedBy: names.NewUserTag("admin"),
		TargetInfo: migration.TargetInfo{
			ControllerTag: targetControllerTag,
			Addrs:         []string{"1.2.3.4:5555", "4.3.2.1:6666"},
			CACert:        "cert",
			AuthTag:       names.NewUserTag("user"),
			Password:      "******",
			Macaroons:     []macaroon.Slice{{mac}},
		},
	}
}
Пример #6
0
func (s *SenderSuite) SetUpTest(c *gc.C) {
	s.JujuConnSuite.SetUpTest(c)
	meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"})
	s.meteredService = s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm})
	s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Application: s.meteredService, SetCharmURL: true})
	s.clock = jujutesting.NewClock(time.Now())
}
Пример #7
0
func (r *pingSuite) TestPingTimeout(c *gc.C) {
	triggered := make(chan struct{})
	action := func() {
		close(triggered)
	}
	clock := jujutesting.NewClock(time.Now())
	timeout := apiserver.NewPingTimeout(action, clock, 50*time.Millisecond)
	for i := 0; i < 2; i++ {
		waitAlarm(c, clock)
		clock.Advance(10 * time.Millisecond)
		timeout.Ping()
	}

	waitAlarm(c, clock)
	clock.Advance(49 * time.Millisecond)
	select {
	case <-triggered:
		c.Fatalf("action triggered early")
	case <-time.After(testing.ShortWait):
	}

	clock.Advance(time.Millisecond)
	select {
	case <-triggered:
	case <-time.After(testing.LongWait):
		c.Fatalf("action never triggered")
	}
}
Пример #8
0
func (s *userAuthenticatorSuite) TestAuthenticateLocalLoginMacaroon(c *gc.C) {
	service := mockBakeryService{}
	clock := testing.NewClock(time.Time{})
	authenticator := &authentication.UserAuthenticator{
		Service: &service,
		Clock:   clock,
		LocalUserIdentityLocation: "https://testing.invalid:1234/auth",
	}

	service.SetErrors(&bakery.VerificationError{})
	_, err := authenticator.Authenticate(
		authentication.EntityFinder(nil),
		names.NewUserTag("bobbrown"),
		params.LoginRequest{},
	)
	c.Assert(err, gc.FitsTypeOf, &common.DischargeRequiredError{})

	service.CheckCallNames(c, "CheckAny", "ExpireStorageAt", "NewMacaroon")
	calls := service.Calls()
	c.Assert(calls[1].Args, jc.DeepEquals, []interface{}{clock.Now().Add(24 * time.Hour)})
	c.Assert(calls[2].Args, jc.DeepEquals, []interface{}{
		"", []byte(nil), []checkers.Caveat{
			checkers.NeedDeclaredCaveat(
				checkers.Caveat{
					Location:  "https://testing.invalid:1234/auth",
					Condition: "is-authenticated-user bobbrown@local",
				},
				"username",
			),
			{Condition: "time-before 0001-01-02T00:00:00Z"},
		},
	})
}
Пример #9
0
func newFixture(period time.Duration) workerFixture {
	return workerFixture{
		revisionUpdater: newMockRevisionUpdater(),
		clock:           testing.NewClock(time.Now()),
		period:          period,
	}
}
Пример #10
0
func (s *SecurityGroupSuite) TestDeleteSecurityGroupFewCalls(c *gc.C) {
	t0 := time.Time{}
	clock := autoAdvancingClock{testing.NewClock(t0)}
	count := 0
	maxCalls := 4
	expectedTimes := []time.Time{
		t0,
		t0.Add(time.Second),
		t0.Add(3 * time.Second),
		t0.Add(7 * time.Second),
		t0.Add(15 * time.Second),
	}
	s.instanceStub.deleteSecurityGroup = func(group amzec2.SecurityGroup) (resp *amzec2.SimpleResp, err error) {
		c.Assert(clock.Now(), gc.Equals, expectedTimes[count])
		if count < maxCalls {
			count++
			return nil, &amzec2.Error{Code: "keep going"}
		}
		return nil, nil
	}
	err := s.deleteFunc(s.instanceStub, amzec2.SecurityGroup{}, clock)
	c.Assert(err, jc.ErrorIsNil)

	expectedCalls := make([]string, maxCalls+1)
	for i := 0; i < maxCalls+1; i++ {
		expectedCalls[i] = "DeleteSecurityGroup"
	}
	s.instanceStub.CheckCallNames(c, expectedCalls...)
}
Пример #11
0
func (s *IsolatedWorkerSuite) SetUpTest(c *gc.C) {
	s.BaseSuite.SetUpTest(c)
	s.stub = &testing.Stub{}

	s.dataDir = c.MkDir()

	s.hookRan = make(chan struct{})
	s.triggersCreated = make(chan struct{})

	triggerFactory := func(state meterstatus.WorkerState, status string, disconectedAt time.Time, clk clock.Clock, amber time.Duration, red time.Duration) (<-chan time.Time, <-chan time.Time) {
		select {
		case s.triggersCreated <- struct{}{}:
		case <-time.After(coretesting.LongWait):
			c.Fatalf("failed to signal trigger creation")
		}
		return meterstatus.GetTriggers(state, status, disconectedAt, clk, amber, red)
	}

	s.clk = testing.NewClock(time.Now())
	wrk, err := meterstatus.NewIsolatedStatusWorker(
		meterstatus.IsolatedConfig{
			Runner:           &stubRunner{stub: s.stub, ran: s.hookRan},
			StateFile:        meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")),
			Clock:            s.clk,
			AmberGracePeriod: AmberGracePeriod,
			RedGracePeriod:   RedGracePeriod,
			TriggerFactory:   triggerFactory,
		})
	c.Assert(err, jc.ErrorIsNil)
	c.Assert(wrk, gc.NotNil)
	s.worker = wrk
}
Пример #12
0
func (s *aggregateSuite) TestPartialInstanceErrors(c *gc.C) {
	testGetter := new(testInstanceGetter)
	clock := jujutesting.NewClock(time.Now())
	delay := time.Second

	cfg := aggregatorConfig{
		Clock:   clock,
		Delay:   delay,
		Environ: testGetter,
	}

	testGetter.err = environs.ErrPartialInstances
	testGetter.newTestInstance("foo", "not foobar", []string{"192.168.1.2"})

	aggregator, err := newAggregator(cfg)
	c.Check(err, jc.ErrorIsNil)

	// Ensure the worker is killed and cleaned up if the test exits early.
	defer workertest.CleanKill(c, aggregator)

	// // Create a checker we can launch as goroutines
	var wg sync.WaitGroup
	checkInfo := func(id instance.Id, expectStatus string, expectedError error) {
		defer wg.Done()
		info, err := aggregator.instanceInfo(id)
		if expectedError == nil {
			c.Check(err, jc.ErrorIsNil)
		} else {
			c.Check(err.Error(), gc.Equals, expectedError.Error())
		}
		c.Check(info.status.Message, gc.Equals, expectStatus)
	}

	// Launch and wait for these
	wg.Add(2)
	go checkInfo("foo", "not foobar", nil)
	go checkInfo("foo2", "", errors.New("instance foo2 not found"))

	// Unwind the testing clock to let our requests through.
	waitAlarms(c, clock, 2)
	clock.Advance(delay)

	// Check we're still alive.
	workertest.CheckAlive(c, aggregator)

	// Wait until the checkers pass.
	wg.Wait()

	// Now kill the worker so we don't risk a race in the following assertions.
	workertest.CleanKill(c, aggregator)

	// Ensure we got our list back with the correct length.
	c.Assert(len(testGetter.ids), gc.Equals, 2)

	// Ensure we called instances once.
	// TODO(redir): all this stuff is really crying out to be, e.g.
	// testGetter.CheckOneCall(c, "foo", "foo2") per
	// http://reviews.vapour.ws/r/4885/
	c.Assert(testGetter.counter, gc.Equals, int32(1))
}
Пример #13
0
func (s *StatusHistorySuite) TestPruneStatusHistoryBySize(c *gc.C) {
	clock := testing.NewClock(time.Now())
	err := s.State.SetClockForTesting(clock)
	c.Assert(err, jc.ErrorIsNil)
	service := s.Factory.MakeApplication(c, nil)
	unit := s.Factory.MakeUnit(c, &factory.UnitParams{Application: service})
	state.PrimeUnitStatusHistory(c, clock, unit, status.Active, 20000, 1000, nil)

	history, err := unit.StatusHistory(status.StatusHistoryFilter{Size: 25000})
	c.Assert(err, jc.ErrorIsNil)
	c.Logf("%d\n", len(history))
	c.Assert(history, gc.HasLen, 20001)

	err = state.PruneStatusHistory(s.State, 0, 1)
	c.Assert(err, jc.ErrorIsNil)

	history, err = unit.StatusHistory(status.StatusHistoryFilter{Size: 25000})
	c.Assert(err, jc.ErrorIsNil)
	historyLen := len(history)
	// When writing this test, the size was 6670 for about 0,00015 MB per entry
	// but that is a size that can most likely change so I wont risk a flaky test
	// here, enough to say that if this size suddenly is no longer less than
	// half its good reason for suspicion.
	c.Assert(historyLen, jc.LessThan, 10000)
}
Пример #14
0
func (s *metricsManagerSuite) TestNewMetricsManagerAPIRefusesNonMachine(c *gc.C) {
	tests := []struct {
		tag            names.Tag
		environManager bool
		expectedError  string
	}{
		{names.NewUnitTag("mysql/0"), true, "permission denied"},
		{names.NewLocalUserTag("admin"), true, "permission denied"},
		{names.NewMachineTag("0"), false, "permission denied"},
		{names.NewMachineTag("0"), true, ""},
	}
	for i, test := range tests {
		c.Logf("test %d", i)

		anAuthoriser := s.authorizer
		anAuthoriser.EnvironManager = test.environManager
		anAuthoriser.Tag = test.tag
		endPoint, err := metricsmanager.NewMetricsManagerAPI(s.State, nil, anAuthoriser, jujutesting.NewClock(time.Now()))
		if test.expectedError == "" {
			c.Assert(err, jc.ErrorIsNil)
			c.Assert(endPoint, gc.NotNil)
		} else {
			c.Assert(err, gc.ErrorMatches, test.expectedError)
			c.Assert(endPoint, gc.IsNil)
		}
	}
}
Пример #15
0
func (s *machineSuite) TestNoPollWhenNotProvisioned(c *gc.C) {
	s.PatchValue(&ShortPoll, 1*time.Millisecond)
	s.PatchValue(&LongPoll, coretesting.LongWait)

	polled := make(chan struct{}, 1)
	getInstanceInfo := func(id instance.Id) (instanceInfo, error) {
		select {
		case polled <- struct{}{}:
		default:
		}
		return instanceInfo{testAddrs, instance.InstanceStatus{Status: status.Unknown, Message: "pending"}}, nil
	}
	context := &testMachineContext{
		getInstanceInfo: getInstanceInfo,
		dyingc:          make(chan struct{}),
	}
	m := &testMachine{
		tag:        names.NewMachineTag("99"),
		instanceId: instance.Id(""),
		refresh:    func() error { return nil },
		addresses:  testAddrs,
		life:       params.Alive,
		status:     "pending",
	}
	died := make(chan machine)

	clock := gitjujutesting.NewClock(time.Time{})
	changed := make(chan struct{})
	go runMachine(context, m, changed, died, clock)

	expectPoll := func() {
		// worker should be waiting for ShortPoll
		select {
		case <-clock.Alarms():
		case <-time.After(coretesting.LongWait):
			c.Fatalf("expected time-based polling")
		}
		clock.Advance(ShortPoll)
	}

	expectPoll()
	expectPoll()
	select {
	case <-polled:
		c.Fatalf("unexpected instance poll")
	case <-time.After(coretesting.ShortWait):
	}

	m.setInstanceId("inst-ance")
	expectPoll()
	select {
	case <-polled:
	case <-time.After(coretesting.LongWait):
		c.Fatalf("expected instance poll")
	}

	killMachineLoop(c, m, context.dyingc, died)
	c.Assert(context.killErr, gc.Equals, nil)
}
Пример #16
0
func (s *KillSuite) newKillCommandBoth() (cmd.Command, cmd.Command) {
	clock := s.clock
	if clock == nil {
		clock = testing.NewClock(time.Now())
	}
	return controller.NewKillCommandForTest(
		s.api, s.clientapi, s.store, s.apierror, clock, nil)
}
Пример #17
0
func (s *LeadershipSuite) SetUpTest(c *gc.C) {
	s.ConnSuite.SetUpTest(c)
	s.clock = jujutesting.NewClock(time.Now())
	err := s.State.SetClockForTesting(s.clock)
	c.Assert(err, jc.ErrorIsNil)
	s.checker = s.State.LeadershipChecker()
	s.claimer = s.State.LeadershipClaimer()
}
Пример #18
0
func (s *SecurityGroupSuite) TestDeleteSecurityGroupInvalidGroupNotFound(c *gc.C) {
	s.instanceStub.deleteSecurityGroup = func(group amzec2.SecurityGroup) (resp *amzec2.SimpleResp, err error) {
		return nil, &amzec2.Error{Code: "InvalidGroup.NotFound"}
	}
	err := s.deleteFunc(s.instanceStub, amzec2.SecurityGroup{}, testing.NewClock(time.Time{}))
	c.Assert(err, jc.ErrorIsNil)
	s.instanceStub.CheckCallNames(c, "DeleteSecurityGroup")
}
Пример #19
0
// Test several requests in a short space of time get batched.
func (s *aggregateSuite) TestMultipleResponseHandling(c *gc.C) {
	// We setup a couple variables here so that we can use them locally without
	// type assertions. Then we use them in the aggregatorConfig.
	testGetter := new(testInstanceGetter)
	clock := jujutesting.NewClock(time.Now())
	delay := time.Minute
	cfg := aggregatorConfig{
		Clock:   clock,
		Delay:   delay,
		Environ: testGetter,
	}

	// Setup multiple instances to batch
	testGetter.newTestInstance("foo", "foobar", []string{"127.0.0.1", "192.168.1.1"})
	testGetter.newTestInstance("foo2", "not foobar", []string{"192.168.1.2"})
	testGetter.newTestInstance("foo3", "ok-ish", []string{"192.168.1.3"})

	aggregator, err := newAggregator(cfg)
	c.Check(err, jc.ErrorIsNil)

	// Ensure the worker is killed and cleaned up if the test exits early.
	defer workertest.CleanKill(c, aggregator)

	// Create a closure for tests we can launch in goroutines.
	var wg sync.WaitGroup
	checkInfo := func(id instance.Id, expectStatus string) {
		defer wg.Done()
		info, err := aggregator.instanceInfo(id)
		c.Check(err, jc.ErrorIsNil)
		c.Check(info.status.Message, gc.Equals, expectStatus)
	}

	// Launch and wait for these
	wg.Add(2)
	go checkInfo("foo2", "not foobar")
	go checkInfo("foo3", "ok-ish")

	// Unwind the testing clock to let our requests through.
	waitAlarms(c, clock, 2)
	clock.Advance(delay)

	// Check we're still alive.
	workertest.CheckAlive(c, aggregator)

	// Wait until the tests pass.
	wg.Wait()

	// Ensure we kill the worker before looking at our testInstanceGetter to
	// ensure there's no possibility of a race.
	workertest.CleanKill(c, aggregator)

	// Ensure we got our list back with the expected contents.
	c.Assert(testGetter.ids, jc.SameContents, []instance.Id{"foo2", "foo3"})

	// Ensure we called instances once and have no errors there.
	c.Assert(testGetter.err, jc.ErrorIsNil)
	c.Assert(testGetter.counter, gc.DeepEquals, int32(1))
}
Пример #20
0
func (s *TrackerSuite) SetUpTest(c *gc.C) {
	s.IsolationSuite.SetUpTest(c)
	s.unitTag = names.NewUnitTag("led-service/123")
	s.clock = testing.NewClock(time.Date(2016, 10, 9, 12, 0, 0, 0, time.UTC))
	s.claimer = &StubClaimer{
		Stub:     &testing.Stub{},
		releases: make(chan struct{}),
	}
}
Пример #21
0
func (fix Fixture) newContext() *context {
	return &context{
		clock:   testing.NewClock(time.Now()),
		lwList:  newWorkerList(fix.LW_errors),
		swList:  newWorkerList(fix.SW_errors),
		tlwList: newWorkerList(fix.TLW_errors),
		pwList:  newWorkerList(fix.PW_errors),
	}
}
Пример #22
0
func (s *ValidationSuite) TestMissingMaxSleep(c *gc.C) {
	manager, err := lease.NewManager(lease.ManagerConfig{
		Client:    NewClient(nil, nil),
		Secretary: struct{ lease.Secretary }{},
		Clock:     testing.NewClock(time.Now()),
	})
	c.Check(err, gc.ErrorMatches, "non-positive MaxSleep not valid")
	c.Check(err, jc.Satisfies, errors.IsNotValid)
	c.Check(manager, gc.IsNil)
}
Пример #23
0
func (s *InstancePollerSuite) SetUpTest(c *gc.C) {
	s.BaseSuite.SetUpTest(c)

	s.authoriser = apiservertesting.FakeAuthorizer{
		EnvironManager: true,
	}
	s.resources = common.NewResources()
	s.AddCleanup(func(*gc.C) { s.resources.StopAll() })

	s.st = NewMockState()
	instancepoller.PatchState(s, s.st)

	var err error
	s.clock = jujutesting.NewClock(time.Now())
	s.api, err = instancepoller.NewInstancePollerAPI(nil, s.resources, s.authoriser, s.clock)
	c.Assert(err, jc.ErrorIsNil)

	s.machineEntities = params.Entities{
		Entities: []params.Entity{
			{Tag: "machine-1"},
			{Tag: "machine-2"},
			{Tag: "machine-3"},
		}}
	s.machineErrorResults = params.ErrorResults{
		Results: []params.ErrorResult{
			{Error: apiservertesting.ServerError("pow!")},
			{Error: apiservertesting.ServerError("FAIL")},
			{Error: apiservertesting.NotProvisionedError("42")},
		}}

	s.mixedEntities = params.Entities{
		Entities: []params.Entity{
			{Tag: "machine-1"},
			{Tag: "machine-2"},
			{Tag: "machine-42"},
			{Tag: "application-unknown"},
			{Tag: "invalid-tag"},
			{Tag: "unit-missing-1"},
			{Tag: ""},
			{Tag: "42"},
		}}
	s.mixedErrorResults = params.ErrorResults{
		Results: []params.ErrorResult{
			{Error: nil},
			{Error: nil},
			{Error: apiservertesting.NotFoundError("machine 42")},
			{Error: apiservertesting.ServerError(`"application-unknown" is not a valid machine tag`)},
			{Error: apiservertesting.ServerError(`"invalid-tag" is not a valid tag`)},
			{Error: apiservertesting.ServerError(`"unit-missing-1" is not a valid machine tag`)},
			{Error: apiservertesting.ServerError(`"" is not a valid tag`)},
			{Error: apiservertesting.ServerError(`"42" is not a valid tag`)},
		}}
}
Пример #24
0
// Test that advancing delay-time.Nanosecond and then killing causes all
// pending reqs to fail.
func (s *aggregateSuite) TestKillingWorkerKillsPendinReqs(c *gc.C) {
	// Setup local variables.
	testGetter := new(testInstanceGetter)
	clock := jujutesting.NewClock(time.Now())
	delay := time.Minute
	cfg := aggregatorConfig{
		Clock:   clock,
		Delay:   delay,
		Environ: testGetter,
	}

	testGetter.newTestInstance("foo", "foobar", []string{"127.0.0.1", "192.168.1.1"})
	testGetter.newTestInstance("foo2", "not foobar", []string{"192.168.1.2"})
	testGetter.newTestInstance("foo3", "ok-ish", []string{"192.168.1.3"})

	aggregator, err := newAggregator(cfg)
	c.Check(err, jc.ErrorIsNil)

	defer workertest.CleanKill(c, aggregator)

	// Set up a couple tests we can launch.
	var wg sync.WaitGroup
	checkInfo := func(id instance.Id) {
		defer wg.Done()
		info, err := aggregator.instanceInfo(id)
		c.Check(err.Error(), gc.Equals, "instanceInfo call aborted")
		c.Check(info.status.Message, gc.Equals, "")
	}

	// Launch a couple tests.
	wg.Add(2)
	go checkInfo("foo2")

	// Advance the clock and kill the worker.
	waitAlarms(c, clock, 1)
	clock.Advance(delay - time.Nanosecond)
	aggregator.Kill()

	go checkInfo("foo3")

	// Make sure we're dead.
	workertest.CheckKilled(c, aggregator)
	wg.Wait()

	// Make sure we have no ids, since we're dead.
	c.Assert(len(testGetter.ids), gc.DeepEquals, 0)

	// Ensure we called instances once and have no errors there.
	c.Assert(testGetter.err, jc.ErrorIsNil)
	c.Assert(testGetter.counter, gc.DeepEquals, int32(0))

}
Пример #25
0
func (*clockSuite) TestWaitAdvance(c *gc.C) {
	t0 := time.Now()
	cl := testing.NewClock(t0)

	// Test that no timers errors out.
	err := cl.WaitAdvance(time.Millisecond, 10*time.Millisecond, 1)
	c.Check(err, gc.ErrorMatches, "got 0 timers added after waiting 10ms: wanted 1")

	// Test that a timer doesn't error.
	_ = cl.After(time.Nanosecond)
	err = cl.WaitAdvance(time.Millisecond, 10*time.Millisecond, 1)
	c.Check(err, jc.ErrorIsNil)
}
Пример #26
0
func (s *userAuthenticatorSuite) TestCreateLocalLoginMacaroon(c *gc.C) {
	service := mockBakeryService{}
	clock := testing.NewClock(time.Time{})
	_, err := authentication.CreateLocalLoginMacaroon(
		names.NewUserTag("bobbrown"), &service, clock,
	)
	c.Assert(err, jc.ErrorIsNil)
	service.CheckCallNames(c, "NewMacaroon")
	service.CheckCall(c, 0, "NewMacaroon", "", []byte(nil), []checkers.Caveat{
		{Condition: "is-authenticated-user bobbrown@local"},
		{Condition: "time-before 0001-01-01T00:02:00Z"},
	})
}
Пример #27
0
func (*scheduleSuite) TestRemove(c *gc.C) {
	clock := jujutesting.NewClock(time.Time{})
	now := clock.Now()
	s := schedule.NewSchedule(clock)

	s.Add("k0", "v0", now.Add(3*time.Second))
	s.Add("k1", "v1", now.Add(2*time.Second))
	s.Remove("k0")
	assertReady(c, s, clock /* nothing */)

	clock.Advance(3 * time.Second)
	assertReady(c, s, clock, "v1")
}
Пример #28
0
func (s *metricsManagerSuite) SetUpTest(c *gc.C) {
	s.JujuConnSuite.SetUpTest(c)
	s.authorizer = apiservertesting.FakeAuthorizer{
		Tag:            names.NewMachineTag("0"),
		EnvironManager: true,
	}
	s.clock = jujutesting.NewClock(time.Now())
	manager, err := metricsmanager.NewMetricsManagerAPI(s.State, nil, s.authorizer, s.clock)
	c.Assert(err, jc.ErrorIsNil)
	s.metricsmanager = manager
	meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"})
	meteredService := s.Factory.MakeApplication(c, &factory.ApplicationParams{Charm: meteredCharm})
	s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Application: meteredService, SetCharmURL: true})
}
Пример #29
0
func (*clockSuite) TestAdvanceLogs(c *gc.C) {
	t0 := time.Now()
	cl := testing.NewClock(t0)

	// Shouldn't log anything.
	t := cl.After(time.Second)
	cl.Advance(time.Minute)
	<-t
	c.Check(c.GetTestLog(), jc.DeepEquals, "")

	// Should log since nothing's waiting.
	cl.Advance(time.Hour)
	c.Check(c.GetTestLog(), jc.Contains, "advancing a clock that has nothing waiting: cf. https://github.com/juju/juju/wiki/Intermittent-failures")
}
Пример #30
0
func (s *pingerSuite) TestAgentConnectionsShutDownWhenAPIServerDies(c *gc.C) {
	clock := testing.NewClock(time.Now())
	config := s.sampleConfig(c)
	config.Clock = clock
	server := s.newServerDirtyKill(c, config)
	conn, _ := s.OpenAPIAsNewMachine(c, server)

	err := pingConn(conn)
	c.Assert(err, jc.ErrorIsNil)
	server.Kill()

	// We know this is less than the client ping interval.
	clock.Advance(apiserver.MongoPingInterval)
	checkConnectionDies(c, conn)
}