Esempio n. 1
0
func (s *HandlerTestSuite) SetupTest() {
	// Create a test node, on its own. Tests can join this to the cluster for
	// testing, if they want.
	s.testNode = newChannelNode(s.T())

	// Create a cluster for testing. Join these guys to each other.
	s.cluster = newSwimCluster(4)
	s.cluster.Bootstrap()

	s.ctx, s.ctxCancel = shared.NewTChannelContext(500 * time.Millisecond)
}
Esempio n. 2
0
func (s *requestSender) Send() (res []byte, err error) {
	ctx, cancel := shared.NewTChannelContext(s.timeout)
	defer cancel()

	var forwardError, applicationError error

	select {
	case <-s.MakeCall(ctx, &res, &forwardError, &applicationError):
		if applicationError != nil {
			return nil, applicationError
		}

		if forwardError == nil {
			if s.retries > 0 {
				// forwarding succeeded after retries
				s.emitter.emit(RetrySuccessEvent{s.retries})
			}
			return res, nil
		}

		if s.retries < s.maxRetries {
			return s.ScheduleRetry()
		}

		identity, _ := s.sender.WhoAmI()

		s.logger.WithFields(log.Fields{
			"local":       identity,
			"destination": s.destination,
			"service":     s.service,
			"endpoint":    s.endpoint,
		}).Warn("max retries exceeded for request")

		s.emitter.emit(MaxRetriesEvent{s.maxRetries})

		return nil, errors.New("max retries exceeded")
	case <-ctx.Done(): // request timed out

		identity, _ := s.sender.WhoAmI()

		s.logger.WithFields(log.Fields{
			"local":       identity,
			"destination": s.destination,
			"service":     s.service,
			"endpoint":    s.endpoint,
		}).Warn("request timed out")

		return nil, errors.New("request timed out")
	}
}
Esempio n. 3
0
func (p *pingSender) SendPing() (*ping, error) {
	ctx, cancel := shared.NewTChannelContext(p.timeout)

	defer cancel()

	var res ping
	select {
	case err := <-p.MakeCall(ctx, &res):
		if err != nil {
			return nil, err
		}
		return &res, nil

	case <-ctx.Done(): // ping timed out
		return nil, errors.New("ping timed out")
	}
}
Esempio n. 4
0
func (p *pingRequestSender) SendPingRequest() (*pingResponse, error) {
	p.logger.WithFields(log.Fields{
		"peer":   p.peer,
		"target": p.target,
	}).Debug("ping request send")

	ctx, cancel := shared.NewTChannelContext(p.timeout)
	defer cancel()

	var res pingResponse
	select {
	case err := <-p.MakeCall(ctx, &res):
		if err == nil {
			p.node.memberlist.Update(res.Changes)
		}
		return &res, err

	case <-ctx.Done(): // call timed out
		return nil, errors.New("ping request timed out")
	}

}
Esempio n. 5
0
func (n *testNode) closeAndWait(sender *tchannel.Channel) {
	n.channel.Close()

	// wait for tchannel to respond with connection refused errors.
	for {
		ctx, cancel := shared.NewTChannelContext(time.Millisecond * 50)

		err := sender.Ping(ctx, n.node.Address())
		if err == nil {
			cancel()
			continue
		}

		_, ok := err.(tchannel.SystemError)
		if ok {
			cancel()
			continue
		}

		cancel()
		break
	}
}
Esempio n. 6
0
func (j *joinSender) JoinGroup(nodesJoined []string) ([]string, []string) {
	group := j.SelectGroup(nodesJoined)

	var responses struct {
		successes []string
		failures  []string
		sync.Mutex
	}

	var numNodesLeft = j.size - len(nodesJoined)
	var startTime = time.Now()

	var wg sync.WaitGroup

	j.numTries++
	j.node.emit(JoinTriesUpdateEvent{j.numTries})

	for _, node := range group {
		wg.Add(1)
		go func(n string) {
			ctx, cancel := shared.NewTChannelContext(j.timeout)
			defer cancel()

			var res joinResponse
			var failed bool

			select {
			case err := <-j.MakeCall(ctx, n, &res):
				if err != nil {
					j.logger.WithFields(log.Fields{
						"remote":  n,
						"timeout": j.timeout,
					}).Debug("attempt to join node failed")
					failed = true
					break
				}

				j.node.memberlist.AddJoinList(res.Membership)

			case <-ctx.Done():
				j.logger.WithFields(log.Fields{
					"remote":  n,
					"timeout": j.timeout,
				}).Debug("attempt to join node timed out")
				failed = true
			}

			if !failed {
				responses.Lock()
				responses.successes = append(responses.successes, n)
				responses.Unlock()
			} else {
				responses.Lock()
				responses.failures = append(responses.failures, n)
				responses.Unlock()
			}

			wg.Done()
		}(node)
	}
	// wait for joins to complete
	wg.Wait()

	// don't need to lock successes/failures since we're finished writing to them
	j.logger.WithFields(log.Fields{
		"groupSize":    len(group),
		"joinSize":     j.size,
		"joinTime":     time.Now().Sub(startTime),
		"numNodesLeft": numNodesLeft,
		"numFailures":  len(responses.failures),
		"failures":     responses.failures,
		"numSuccesses": len(responses.successes),
		"successes":    responses.successes,
	}).Debug("join group complete")

	return responses.successes, responses.failures
}