Esempio n. 1
0
func (s *WorkerSuite) TestNewInvalidConfig(c *gc.C) {
	var cfg presence.Config

	_, err := presence.New(cfg)

	c.Check(err, jc.Satisfies, errors.IsNotValid)
}
Esempio n. 2
0
func (s *WorkerSuite) TestNewLoop(c *gc.C) {
	waitChan := make(chan struct{})
	block := make(chan struct{})
	s.clock.setAfter(4)
	count := 0
	s.cfg.Start = func() (presence.Pinger, error) {
		pinger, err := s.start()
		c.Logf("%d", count)
		if count > 3 {
			s.pinger.notify = waitChan
			s.pinger.waitBlock = block
		}
		count += 1
		return pinger, err
	}

	w, err := presence.New(s.cfg)
	c.Assert(err, jc.ErrorIsNil)
	defer workertest.CleanKill(c, w)
	defer close(block)
	<-waitChan

	s.stub.CheckCallNames(c,
		"start", "Wait", "After",
		"start", "Wait", "After",
		"start", "Wait", "After",
		"start", "Wait", "After",
		"start", "Wait",
	)
}
Esempio n. 3
0
func checkInvalid(c *gc.C, config presence.Config, message string) {
	check := func(err error) {
		c.Check(err, gc.ErrorMatches, message)
		c.Check(err, jc.Satisfies, errors.IsNotValid)
	}

	err := config.Validate()
	check(err)

	worker, err := presence.New(config)
	if !c.Check(worker, gc.IsNil) {
		workertest.CleanKill(c, worker)
	}
	check(err)
}
Esempio n. 4
0
func (s *WorkerSuite) TestNewRunOnceBeforeLoop(c *gc.C) {
	waitChan := make(chan struct{})
	s.clock.notify = waitChan

	w, err := presence.New(s.cfg)
	c.Assert(err, jc.ErrorIsNil)
	defer workertest.CleanKill(c, w)
	<-waitChan

	s.stub.CheckCallNames(c,
		"start",
		"Wait",
		"After",
	)
}
Esempio n. 5
0
func (s *WorkerSuite) TestNewFailStart(c *gc.C) {
	waitChan := make(chan struct{})
	s.clock.notify = waitChan
	failure := errors.New("<failure>")
	s.stub.SetErrors(failure)

	w, err := presence.New(s.cfg)
	c.Assert(err, jc.ErrorIsNil)
	defer workertest.CleanKill(c, w)
	<-waitChan

	s.stub.CheckCallNames(c,
		"start",
		"After", // continued on
	)
}
Esempio n. 6
0
func (s *WorkerSuite) TestNewRetry(c *gc.C) {
	failure := errors.New("<failure>")
	s.stub.SetErrors(
		nil, nil, nil,
		failure, nil,
		failure, nil,
		nil, failure, nil,
		nil, nil,
		failure, // never reached
	)
	waitChan := make(chan struct{})
	block := make(chan struct{})
	s.clock.setAfter(5)
	delay := time.Nanosecond
	s.cfg.RetryDelay = delay
	count := 0
	s.cfg.Start = func() (presence.Pinger, error) {
		pinger, err := s.start()
		if count > 4 {
			s.pinger.notify = waitChan
			s.pinger.waitBlock = block
		}
		count += 1
		return pinger, err
	}

	w, err := presence.New(s.cfg)
	c.Assert(err, jc.ErrorIsNil)
	defer workertest.CleanKill(c, w)
	defer close(block)
	<-waitChan

	s.stub.CheckCallNames(c,
		"start", "Wait", "After",
		"start", "After",
		"start", "After",
		"start", "Wait", "After",
		"start", "Wait", "After",
		"start", "Wait",
	)
	var noWait time.Duration
	s.stub.CheckCall(c, 2, "After", noWait)
	s.stub.CheckCall(c, 4, "After", delay)
	s.stub.CheckCall(c, 6, "After", delay)
	s.stub.CheckCall(c, 9, "After", noWait)
	s.stub.CheckCall(c, 12, "After", noWait)
}
Esempio n. 7
0
File: admin.go Progetto: kat-co/juju
func startPingerIfAgent(clock clock.Clock, root *apiHandler, entity state.Entity) error {
	// worker runs presence.Pingers -- absence of which will cause
	// embarrassing "agent is lost" messages to show up in status --
	// until it's stopped. It's stored in resources purely for the
	// side effects: we don't record its id, and nobody else
	// retrieves it -- we just expect it to be stopped when the
	// connection is shut down.
	agent, ok := entity.(statepresence.Agent)
	if !ok {
		return nil
	}
	worker, err := presence.New(presence.Config{
		Identity:   entity.Tag(),
		Start:      presenceShim{agent}.Start,
		Clock:      clock,
		RetryDelay: 3 * time.Second,
	})
	if err != nil {
		return err
	}
	root.getResources().Register(worker)

	// pingTimeout, by contrast, *is* used by the Pinger facade to
	// stave off the call to action() that will shut down the agent
	// connection if it gets lackadaisical about sending keepalive
	// Pings.
	//
	// Do not confuse those (apiserver) Pings with those made by
	// presence.Pinger (which *do* happen as a result of the former,
	// but only as a relatively distant consequence).
	//
	// We should have picked better names...
	action := func() {
		logger.Debugf("closing connection due to ping timout")
		if err := root.getRpcConn().Close(); err != nil {
			logger.Errorf("error closing the RPC connection: %v", err)
		}
	}
	pingTimeout := newPingTimeout(action, clock, maxClientPingInterval)
	return root.getResources().RegisterNamed("pingTimeout", pingTimeout)
}
Esempio n. 8
0
func run(c *gc.C, stub *testing.Stub, test FixtureTest) {
	context := &context{
		c:       c,
		stub:    stub,
		clock:   testing.NewClock(time.Now()),
		timeout: time.After(time.Second),
		starts:  make(chan worker.Worker, 1000),
	}
	defer context.checkCleanedUp()

	worker, err := presence.New(presence.Config{
		Identity:   names.NewMachineTag("1"),
		Start:      context.startPinger,
		Clock:      context.clock,
		RetryDelay: fiveSeconds,
	})
	c.Assert(err, jc.ErrorIsNil)
	defer workertest.CleanKill(c, worker)

	test(context, worker)
}