Пример #1
0
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest(dtc DensityTestConfig) time.Duration {
	defer GinkgoRecover()
	// Create a listener for events.
	// eLock is a lock protects the events
	var eLock sync.Mutex
	events := make([](*api.Event), 0)
	_, controller := controllerframework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return dtc.Client.Events(dtc.Namespace).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return dtc.Client.Events(dtc.Namespace).Watch(options)
			},
		},
		&api.Event{},
		0,
		controllerframework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				eLock.Lock()
				defer eLock.Unlock()
				events = append(events, obj.(*api.Event))
			},
		},
	)
	stop := make(chan struct{})
	go controller.Run(stop)

	// Create a listener for api updates
	// uLock is a lock protects the updateCount
	var uLock sync.Mutex
	updateCount := 0
	label := labels.SelectorFromSet(labels.Set(map[string]string{"type": "densityPod"}))
	_, updateController := controllerframework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.LabelSelector = label
				return dtc.Client.Pods(dtc.Namespace).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.LabelSelector = label
				return dtc.Client.Pods(dtc.Namespace).Watch(options)
			},
		},
		&api.Pod{},
		0,
		controllerframework.ResourceEventHandlerFuncs{
			UpdateFunc: func(_, _ interface{}) {
				uLock.Lock()
				defer uLock.Unlock()
				updateCount++
			},
		},
	)
	go updateController.Run(stop)

	// Start all replication controllers.
	startTime := time.Now()
	wg := sync.WaitGroup{}
	wg.Add(len(dtc.Configs))
	for i := range dtc.Configs {
		rcConfig := dtc.Configs[i]
		go func() {
			framework.ExpectNoError(framework.RunRC(rcConfig))
			wg.Done()
		}()
	}
	logStopCh := make(chan struct{})
	go logPodStartupStatus(dtc.Client, dtc.PodCount, dtc.Namespace, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
	wg.Wait()
	startupTime := time.Now().Sub(startTime)
	close(logStopCh)
	framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
	framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))

	By("Waiting for all events to be recorded")
	last := -1
	current := len(events)
	lastCount := -1
	currentCount := updateCount
	for start := time.Now(); (last < current || lastCount < currentCount) && time.Since(start) < dtc.Timeout; time.Sleep(10 * time.Second) {
		func() {
			eLock.Lock()
			defer eLock.Unlock()
			last = current
			current = len(events)
		}()
		func() {
			uLock.Lock()
			defer uLock.Unlock()
			lastCount = currentCount
			currentCount = updateCount
		}()
	}
	close(stop)

	if current != last {
		framework.Logf("Warning: Not all events were recorded after waiting %.2f minutes", dtc.Timeout.Minutes())
	}
	framework.Logf("Found %d events", current)
	if currentCount != lastCount {
		framework.Logf("Warning: Not all updates were recorded after waiting %.2f minutes", dtc.Timeout.Minutes())
	}
	framework.Logf("Found %d updates", currentCount)

	// Tune the threshold for allowed failures.
	badEvents := framework.BadEvents(events)
	Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(dtc.PodCount)))))
	// Print some data about Pod to Node allocation
	By("Printing Pod to Node allocation data")
	podList, err := dtc.Client.Pods(api.NamespaceAll).List(api.ListOptions{})
	framework.ExpectNoError(err)
	pausePodAllocation := make(map[string]int)
	systemPodAllocation := make(map[string][]string)
	for _, pod := range podList.Items {
		if pod.Namespace == api.NamespaceSystem {
			systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
		} else {
			pausePodAllocation[pod.Spec.NodeName]++
		}
	}
	nodeNames := make([]string, 0)
	for k := range pausePodAllocation {
		nodeNames = append(nodeNames, k)
	}
	sort.Strings(nodeNames)
	for _, node := range nodeNames {
		framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
	}
	return startupTime
}
Пример #2
0
					currentCount = updateCount
				}()
			}
			close(stop)

			if current != last {
				framework.Logf("Warning: Not all events were recorded after waiting %.2f minutes", timeout.Minutes())
			}
			framework.Logf("Found %d events", current)
			if currentCount != lastCount {
				framework.Logf("Warning: Not all updates were recorded after waiting %.2f minutes", timeout.Minutes())
			}
			framework.Logf("Found %d updates", currentCount)

			// Tune the threshold for allowed failures.
			badEvents := framework.BadEvents(events)
			Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(totalPods)))))
			// Print some data about Pod to Node allocation
			By("Printing Pod to Node allocation data")
			podList, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
			framework.ExpectNoError(err)
			pausePodAllocation := make(map[string]int)
			systemPodAllocation := make(map[string][]string)
			for _, pod := range podList.Items {
				if pod.Namespace == api.NamespaceSystem {
					systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
				} else {
					pausePodAllocation[pod.Spec.NodeName]++
				}
			}
			nodeNames := make([]string, 0)