Ejemplo n.º 1
0
func TestBackoff(t *testing.T) {
	clock := fakeClock{}
	backoff := podBackoff{
		perPodBackoff:   map[types.NamespacedName]*backoffEntry{},
		clock:           &clock,
		defaultDuration: 1 * time.Second,
		maxDuration:     60 * time.Second,
	}

	tests := []struct {
		podID            types.NamespacedName
		expectedDuration time.Duration
		advanceClock     time.Duration
	}{
		{
			podID:            types.NamespacedName{Namespace: "default", Name: "foo"},
			expectedDuration: 1 * time.Second,
		},
		{
			podID:            types.NamespacedName{Namespace: "default", Name: "foo"},
			expectedDuration: 2 * time.Second,
		},
		{
			podID:            types.NamespacedName{Namespace: "default", Name: "foo"},
			expectedDuration: 4 * time.Second,
		},
		{
			podID:            types.NamespacedName{Namespace: "default", Name: "bar"},
			expectedDuration: 1 * time.Second,
			advanceClock:     120 * time.Second,
		},
		// 'foo' should have been gc'd here.
		{
			podID:            types.NamespacedName{Namespace: "default", Name: "foo"},
			expectedDuration: 1 * time.Second,
		},
	}

	for _, test := range tests {
		duration := backoff.getEntry(test.podID).getBackoff(backoff.maxDuration)
		if duration != test.expectedDuration {
			t.Errorf("expected: %s, got %s for %s", test.expectedDuration.String(), duration.String(), test.podID)
		}
		clock.t = clock.t.Add(test.advanceClock)
		backoff.gc()
	}
	fooID := types.NamespacedName{Namespace: "default", Name: "foo"}
	backoff.perPodBackoff[fooID].backoff = 60 * time.Second
	duration := backoff.getEntry(fooID).getBackoff(backoff.maxDuration)
	if duration != 60*time.Second {
		t.Errorf("expected: 60, got %s", duration.String())
	}
	// Verify that we split on namespaces correctly, same name, different namespace
	fooID.Namespace = "other"
	duration = backoff.getEntry(fooID).getBackoff(backoff.maxDuration)
	if duration != 1*time.Second {
		t.Errorf("expected: 1, got %s", duration.String())
	}
}
Ejemplo n.º 2
0
// Returns an error if processing the delta failed, along with a boolean
// indicator of whether the processing should be retried.
func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) {
	service, ok := delta.Object.(*api.Service)
	var namespacedName types.NamespacedName
	var cachedService *cachedService
	if !ok {
		// If the DeltaFIFO saw a key in our cache that it didn't know about, it
		// can send a deletion with an unknown state. Grab the service from our
		// cache for deleting.
		key, ok := delta.Object.(cache.DeletedFinalStateUnknown)
		if !ok {
			return fmt.Errorf("Delta contained object that wasn't a service or a deleted key: %+v", delta), notRetryable
		}
		cachedService, ok = s.cache.get(key.Key)
		if !ok {
			return fmt.Errorf("Service %s not in cache even though the watcher thought it was. Ignoring the deletion.", key), notRetryable
		}
		service = cachedService.lastState
		delta.Object = cachedService.lastState
		namespacedName = types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
	} else {
		namespacedName.Namespace = service.Namespace
		namespacedName.Name = service.Name
		cachedService = s.cache.getOrCreate(namespacedName.String())
	}
	glog.V(2).Infof("Got new %s delta for service: %+v", delta.Type, service)

	// Ensure that no other goroutine will interfere with our processing of the
	// service.
	cachedService.mu.Lock()
	defer cachedService.mu.Unlock()

	// Update the cached service (used above for populating synthetic deletes)
	cachedService.lastState = service

	// TODO: Handle added, updated, and sync differently?
	switch delta.Type {
	case cache.Added:
		fallthrough
	case cache.Updated:
		fallthrough
	case cache.Sync:
		err, retry := s.createLoadBalancerIfNeeded(namespacedName, service, cachedService.appliedState)
		if err != nil {
			message := "Error creating load balancer"
			if retry {
				message += " (will retry): "
			} else {
				message += " (will not retry): "
			}
			message += err.Error()
			s.eventRecorder.Event(service, "CreatingLoadBalancerFailed", message)
			return err, retry
		}
		// Always update the cache upon success.
		// NOTE: Since we update the cached service if and only if we successfully
		// processed it, a cached service being nil implies that it hasn't yet
		// been successfully processed.
		cachedService.appliedState = service
		s.cache.set(namespacedName.String(), cachedService)
	case cache.Deleted:
		s.eventRecorder.Event(service, "DeletingLoadBalancer", "Deleting load balancer")
		err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region)
		if err != nil {
			message := "Error deleting load balancer (will retry): " + err.Error()
			s.eventRecorder.Event(service, "DeletingLoadBalancerFailed", message)
			return err, retryable
		}
		s.eventRecorder.Event(service, "DeletedLoadBalancer", "Deleted load balancer")
		s.cache.delete(namespacedName.String())
	default:
		glog.Errorf("Unexpected delta type: %v", delta.Type)
	}
	return nil, notRetryable
}
Ejemplo n.º 3
0
// Returns an error if processing the delta failed, along with a time.Duration
// indicating whether processing should be retried; zero means no-retry; otherwise
// we should retry in that Duration.
func (s *ServiceController) processDelta(delta *cache.Delta) (error, time.Duration) {
	var (
		namespacedName types.NamespacedName
		cachedService  *cachedService
	)

	deltaService, ok := delta.Object.(*api.Service)
	if ok {
		namespacedName.Namespace = deltaService.Namespace
		namespacedName.Name = deltaService.Name
		cachedService = s.cache.getOrCreate(namespacedName.String())
	} else {
		// If the DeltaFIFO saw a key in our cache that it didn't know about, it
		// can send a deletion with an unknown state. Grab the service from our
		// cache for deleting.
		key, ok := delta.Object.(cache.DeletedFinalStateUnknown)
		if !ok {
			return fmt.Errorf("delta contained object that wasn't a service or a deleted key: %+v", delta), doNotRetry
		}
		cachedService, ok = s.cache.get(key.Key)
		if !ok {
			return fmt.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion.", key), doNotRetry
		}
		deltaService = cachedService.lastState
		delta.Object = deltaService
		namespacedName = types.NamespacedName{Namespace: deltaService.Namespace, Name: deltaService.Name}
	}
	glog.V(2).Infof("Got new %s delta for service: %v", delta.Type, namespacedName)

	// Ensure that no other goroutine will interfere with our processing of the
	// service.
	cachedService.mu.Lock()
	defer cachedService.mu.Unlock()

	// Get the most recent state of the service from the API directly rather than
	// trusting the body of the delta. This avoids update re-ordering problems.
	// TODO: Handle sync delta types differently rather than doing a get on every
	// service every time we sync?
	service, err := s.kubeClient.Core().Services(namespacedName.Namespace).Get(namespacedName.Name)
	if err != nil && !errors.IsNotFound(err) {
		glog.Warningf("Failed to get most recent state of service %v from API (will retry): %v", namespacedName, err)
		return err, cachedService.nextRetryDelay()
	}
	if errors.IsNotFound(err) {
		glog.V(2).Infof("Service %v not found, ensuring load balancer is deleted", namespacedName)
		s.eventRecorder.Event(deltaService, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
		err := s.balancer.EnsureLoadBalancerDeleted(deltaService)
		if err != nil {
			message := "Error deleting load balancer (will retry): " + err.Error()
			s.eventRecorder.Event(deltaService, api.EventTypeWarning, "DeletingLoadBalancerFailed", message)
			return err, cachedService.nextRetryDelay()
		}
		s.eventRecorder.Event(deltaService, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
		s.cache.delete(namespacedName.String())

		cachedService.resetRetryDelay()
		return nil, doNotRetry
	}

	// Update the cached service (used above for populating synthetic deletes)
	cachedService.lastState = service

	err, retry := s.createLoadBalancerIfNeeded(namespacedName, service, cachedService.appliedState)
	if err != nil {
		message := "Error creating load balancer"
		if retry {
			message += " (will retry): "
		} else {
			message += " (will not retry): "
		}
		message += err.Error()
		s.eventRecorder.Event(service, api.EventTypeWarning, "CreatingLoadBalancerFailed", message)

		return err, cachedService.nextRetryDelay()
	}
	// Always update the cache upon success.
	// NOTE: Since we update the cached service if and only if we successfully
	// processed it, a cached service being nil implies that it hasn't yet
	// been successfully processed.
	cachedService.appliedState = service
	s.cache.set(namespacedName.String(), cachedService)

	cachedService.resetRetryDelay()
	return nil, doNotRetry
}
Ejemplo n.º 4
0
func chooseServiceName(tc int, hint int) types.NamespacedName {
	var svc types.NamespacedName
	svc.Namespace = fmt.Sprintf("ns_%d", tc)
	svc.Name = fmt.Sprintf("name_%d", hint)
	return svc
}