Пример #1
0
func NewGarbageCollector(metaOnlyClientPool dynamic.ClientPool, clientPool dynamic.ClientPool, mapper meta.RESTMapper, deletableResources map[schema.GroupVersionResource]struct{}) (*GarbageCollector, error) {
	gc := &GarbageCollector{
		metaOnlyClientPool:               metaOnlyClientPool,
		clientPool:                       clientPool,
		restMapper:                       mapper,
		clock:                            clock.RealClock{},
		dirtyQueue:                       workqueue.NewTimedWorkQueue(),
		orphanQueue:                      workqueue.NewTimedWorkQueue(),
		registeredRateLimiter:            NewRegisteredRateLimiter(deletableResources),
		registeredRateLimiterForMonitors: NewRegisteredRateLimiter(deletableResources),
		absentOwnerCache:                 NewUIDCache(500),
	}
	gc.propagator = &Propagator{
		eventQueue: workqueue.NewTimedWorkQueue(),
		uidToNode: &concurrentUIDToNode{
			RWMutex:   &sync.RWMutex{},
			uidToNode: make(map[types.UID]*node),
		},
		gc: gc,
	}
	for resource := range deletableResources {
		if _, ok := ignoredResources[resource]; ok {
			glog.V(6).Infof("ignore resource %#v", resource)
			continue
		}
		kind, err := gc.restMapper.KindFor(resource)
		if err != nil {
			if _, ok := err.(*meta.NoResourceMatchError); ok {
				// ignore NoResourceMatchErrors for now because TPRs won't be registered
				// and hence the RestMapper does not know about them. The deletableResources
				// though are using discovery which included TPRs.
				// TODO: use dynamic discovery for RestMapper and deletableResources
				glog.Warningf("ignore NoResourceMatchError for %v", resource)
				continue
			}
			return nil, err
		}
		monitor, err := gc.monitorFor(resource, kind)
		if err != nil {
			return nil, err
		}
		gc.monitors = append(gc.monitors, monitor)
	}
	return gc, nil
}
Пример #2
0
func NewGarbageCollector(metaOnlyClientPool dynamic.ClientPool, clientPool dynamic.ClientPool, resources []unversioned.GroupVersionResource) (*GarbageCollector, error) {
	gc := &GarbageCollector{
		metaOnlyClientPool: metaOnlyClientPool,
		clientPool:         clientPool,
		// TODO: should use a dynamic RESTMapper built from the discovery results.
		restMapper:                       registered.RESTMapper(),
		clock:                            clock.RealClock{},
		dirtyQueue:                       workqueue.NewTimedWorkQueue(),
		orphanQueue:                      workqueue.NewTimedWorkQueue(),
		registeredRateLimiter:            NewRegisteredRateLimiter(resources),
		registeredRateLimiterForMonitors: NewRegisteredRateLimiter(resources),
		absentOwnerCache:                 NewUIDCache(100),
	}
	gc.propagator = &Propagator{
		eventQueue: workqueue.NewTimedWorkQueue(),
		uidToNode: &concurrentUIDToNode{
			RWMutex:   &sync.RWMutex{},
			uidToNode: make(map[types.UID]*node),
		},
		gc: gc,
	}
	for _, resource := range resources {
		if _, ok := ignoredResources[resource]; ok {
			glog.V(6).Infof("ignore resource %#v", resource)
			continue
		}
		kind, err := gc.restMapper.KindFor(resource)
		if err != nil {
			return nil, err
		}
		monitor, err := gc.monitorFor(resource, kind)
		if err != nil {
			return nil, err
		}
		gc.monitors = append(gc.monitors, monitor)
	}
	return gc, nil
}
Пример #3
0
func TestProcessEvent(t *testing.T) {
	var testScenarios = []struct {
		name string
		// a series of events that will be supplied to the
		// Propagator.eventQueue.
		events []event
	}{
		{
			name: "test1",
			events: []event{
				createEvent(addEvent, "1", []string{}),
				createEvent(addEvent, "2", []string{"1"}),
				createEvent(addEvent, "3", []string{"1", "2"}),
			},
		},
		{
			name: "test2",
			events: []event{
				createEvent(addEvent, "1", []string{}),
				createEvent(addEvent, "2", []string{"1"}),
				createEvent(addEvent, "3", []string{"1", "2"}),
				createEvent(addEvent, "4", []string{"2"}),
				createEvent(deleteEvent, "2", []string{"doesn't matter"}),
			},
		},
		{
			name: "test3",
			events: []event{
				createEvent(addEvent, "1", []string{}),
				createEvent(addEvent, "2", []string{"1"}),
				createEvent(addEvent, "3", []string{"1", "2"}),
				createEvent(addEvent, "4", []string{"3"}),
				createEvent(updateEvent, "2", []string{"4"}),
			},
		},
		{
			name: "reverse test2",
			events: []event{
				createEvent(addEvent, "4", []string{"2"}),
				createEvent(addEvent, "3", []string{"1", "2"}),
				createEvent(addEvent, "2", []string{"1"}),
				createEvent(addEvent, "1", []string{}),
				createEvent(deleteEvent, "2", []string{"doesn't matter"}),
			},
		},
	}

	for _, scenario := range testScenarios {
		propagator := &Propagator{
			eventQueue: workqueue.NewTimedWorkQueue(),
			uidToNode: &concurrentUIDToNode{
				RWMutex:   &sync.RWMutex{},
				uidToNode: make(map[types.UID]*node),
			},
			gc: &GarbageCollector{
				dirtyQueue:       workqueue.NewTimedWorkQueue(),
				clock:            clock.RealClock{},
				absentOwnerCache: NewUIDCache(2),
			},
		}
		for i := 0; i < len(scenario.events); i++ {
			propagator.eventQueue.Add(&workqueue.TimedWorkQueueItem{StartTime: propagator.gc.clock.Now(), Object: &scenario.events[i]})
			propagator.processEvent()
			verifyGraphInvariants(scenario.name, propagator.uidToNode.uidToNode, t)
		}
	}
}