func verifyResult(c *client.Client, podName string, ns string, oldNotScheduled int) {
	allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
	expectNoError(err)
	scheduledPods, notScheduledPods := getPodsScheduled(allPods)

	schedEvents, err := c.Events(ns).List(
		labels.Everything(),
		fields.Set{
			"involvedObject.kind":      "Pod",
			"involvedObject.name":      podName,
			"involvedObject.namespace": ns,
			"source":                   "scheduler",
			"reason":                   "FailedScheduling",
		}.AsSelector())
	expectNoError(err)

	printed := false
	printOnce := func(msg string) string {
		if !printed {
			printed = true
			return msg
		} else {
			return ""
		}
	}

	Expect(len(notScheduledPods)).To(Equal(1+oldNotScheduled), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
	Expect(schedEvents.Items).ToNot(BeEmpty(), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
}
Example #2
0
// NewPetSetController creates a new petset controller.
func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
	recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "petset"})
	pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}}

	psc := &PetSetController{
		kubeClient:       kubeClient,
		blockingPetStore: newUnHealthyPetTracker(pc),
		newSyncer: func(blockingPet *pcb) *petSyncer {
			return &petSyncer{pc, blockingPet}
		},
		queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "petset"),
	}

	podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
		// lookup the petset and enqueue
		AddFunc: psc.addPod,
		// lookup current and old petset if labels changed
		UpdateFunc: psc.updatePod,
		// lookup petset accounting for deletion tombstones
		DeleteFunc: psc.deletePod,
	})
	psc.podStore.Indexer = podInformer.GetIndexer()
	psc.podController = podInformer.GetController()

	psc.psStore.Store, psc.psController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return psc.kubeClient.Apps().PetSets(api.NamespaceAll).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return psc.kubeClient.Apps().PetSets(api.NamespaceAll).Watch(options)
			},
		},
		&apps.PetSet{},
		petSetResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: psc.enqueuePetSet,
			UpdateFunc: func(old, cur interface{}) {
				oldPS := old.(*apps.PetSet)
				curPS := cur.(*apps.PetSet)
				if oldPS.Status.Replicas != curPS.Status.Replicas {
					glog.V(4).Infof("Observed updated replica count for PetSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas)
				}
				psc.enqueuePetSet(cur)
			},
			DeleteFunc: psc.enqueuePetSet,
		},
	)
	// TODO: Watch volumes
	psc.podStoreSynced = psc.podController.HasSynced
	psc.syncHandler = psc.Sync
	return psc
}
Example #3
0
func NewKubeEvents(client *kubeclient.Client, ec cache.EventsCache) api.Source {
	// Buffered channel to send/receive events from
	eventsChan := make(chan eventsUpdate, 1024)
	errorChan := make(chan error)
	es := &eventsSourceImpl{
		Client:        client,
		eventsChannel: eventsChan,
		errorChannel:  errorChan,
		ec:            ec,
	}
	go es.watchLoop(client.Events(kubeapi.NamespaceAll), eventsChan, errorChan)
	// TODO: Inject Namespace Store in here to get namespace IDs for events.
	return es
}
Example #4
0
// checkNoUnexpectedEvents checks unexpected events didn't happen.
// Currently only "UnexpectedJob" is checked.
func checkNoUnexpectedEvents(c *client.Client, ns, scheduledJobName string) error {
	sj, err := c.Batch().ScheduledJobs(ns).Get(scheduledJobName)
	if err != nil {
		return fmt.Errorf("error in getting scheduledjob %s/%s: %v", ns, scheduledJobName, err)
	}
	events, err := c.Events(ns).Search(sj)
	if err != nil {
		return fmt.Errorf("error in listing events: %s", err)
	}
	for _, e := range events.Items {
		if e.Reason == "UnexpectedJob" {
			return fmt.Errorf("found unexpected event: %#v", e)
		}
	}
	return nil
}
Example #5
0
// GetReplicationControllerPodsEvents gets events associated to pods in replication controller.
func GetReplicationControllerPodsEvents(client *client.Client, namespace, replicationControllerName string) ([]api.Event,
	error) {
	replicationController, err := client.ReplicationControllers(namespace).Get(replicationControllerName)

	if err != nil {
		return nil, err
	}

	pods, err := client.Pods(namespace).List(api.ListOptions{
		LabelSelector: labels.SelectorFromSet(replicationController.Spec.Selector),
		FieldSelector: fields.Everything(),
	})

	if err != nil {
		return nil, err
	}

	eventList, err := client.Events(namespace).List(api.ListOptions{
		LabelSelector: labels.Everything(),
		FieldSelector: fields.Everything(),
	})

	if err != nil {
		return nil, err
	}

	events := filterEventsByPodsUID(eventList.Items, pods.Items)

	return events, nil
}
func verifyResult(c *client.Client, podName string, ns string) {
	allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything(), unversioned.ListOptions{})
	expectNoError(err)
	scheduledPods, notScheduledPods := getPodsScheduled(allPods)

	schedEvents, err := c.Events(ns).List(
		labels.Everything(),
		fields.Set{
			"involvedObject.kind":      "Pod",
			"involvedObject.name":      podName,
			"involvedObject.namespace": ns,
			"source":                   "scheduler",
			"reason":                   "FailedScheduling",
		}.AsSelector(),
		unversioned.ListOptions{})
	expectNoError(err)
	// If we failed to find event with a capitalized first letter of reason
	// try looking for one starting with a small one for backward compatibility.
	// If we don't do it we end up in #15806.
	// TODO: remove this block when we don't care about supporting v1.0 too much.
	if len(schedEvents.Items) == 0 {
		schedEvents, err = c.Events(ns).List(
			labels.Everything(),
			fields.Set{
				"involvedObject.kind":      "Pod",
				"involvedObject.name":      podName,
				"involvedObject.namespace": ns,
				"source":                   "scheduler",
				"reason":                   "failedScheduling",
			}.AsSelector(),
			unversioned.ListOptions{})
		expectNoError(err)
	}

	printed := false
	printOnce := func(msg string) string {
		if !printed {
			printed = true
			return msg
		} else {
			return ""
		}
	}

	Expect(len(notScheduledPods)).To(Equal(1), printOnce(fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)))
	Expect(schedEvents.Items).ToNot(BeEmpty(), printOnce(fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)))
}
// GetReplicationControllerList returns a list of all Replication Controllers in the cluster.
func GetReplicationControllerList(client *client.Client) (*ReplicationControllerList, error) {
	log.Printf("Getting list of all replication controllers in the cluster")

	listEverything := api.ListOptions{
		LabelSelector: labels.Everything(),
		FieldSelector: fields.Everything(),
	}

	replicationControllers, err := client.ReplicationControllers(api.NamespaceAll).List(listEverything)

	if err != nil {
		return nil, err
	}

	services, err := client.Services(api.NamespaceAll).List(listEverything)

	if err != nil {
		return nil, err
	}

	pods, err := client.Pods(api.NamespaceAll).List(listEverything)

	if err != nil {
		return nil, err
	}

	eventsList, err := client.Events(api.NamespaceAll).List(api.ListOptions{
		LabelSelector: labels.Everything(),
		FieldSelector: fields.Everything(),
	})

	if err != nil {
		return nil, err
	}

	// Anonymous callback function to get pods warnings.
	// Function fulfils GetPodsEventWarningsFunc type contract.
	// Based on list of api pods returns list of pod related warning events
	getPodsEventWarningsFn := func(pods []api.Pod) []Event {
		return GetPodsEventWarnings(eventsList, pods)
	}

	// Anonymous callback function to get nodes by their names.
	getNodeFn := func(nodeName string) (*api.Node, error) {
		return client.Nodes().Get(nodeName)
	}

	result, err := getReplicationControllerList(replicationControllers.Items, services.Items,
		pods.Items, getPodsEventWarningsFn, getNodeFn)

	if err != nil {
		return nil, err
	}

	return result, nil
}
Example #8
0
func NewScheduledJobController(kubeClient *client.Client) *ScheduledJobController {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	// TODO: remove the wrapper when every clients have moved to use the clientset.
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	if kubeClient != nil && kubeClient.GetRateLimiter() != nil {
		metrics.RegisterMetricAndTrackRateLimiterUsage("scheduledjob_controller", kubeClient.GetRateLimiter())
	}

	jm := &ScheduledJobController{
		kubeClient: kubeClient,
		jobControl: realJobControl{KubeClient: kubeClient},
		sjControl:  &realSJControl{KubeClient: kubeClient},
		recorder:   eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduled-job-controller"}),
	}

	return jm
}
Example #9
0
// Gets events associated to replication controller.
func GetReplicationControllerEvents(client *client.Client, namespace, replicationControllerName string) ([]api.Event,
	error) {
	fieldSelector, err := fields.ParseSelector("involvedObject.name=" + replicationControllerName)

	if err != nil {
		return nil, err
	}

	list, err := client.Events(namespace).List(api.ListOptions{
		LabelSelector: labels.Everything(),
		FieldSelector: fieldSelector,
	})

	if err != nil {
		return nil, err
	}

	return list.Items, nil
}
Example #10
0
// Gets events associated to replica set.
func GetReplicaSetEvents(client *client.Client, namespace, replicaSetName string) ([]api.Event,
	error) {
	fieldSelector, err := fields.ParseSelector("involvedObject.name=" + replicaSetName)

	if err != nil {
		return nil, err
	}

	list, err := client.Events(namespace).List(unversioned.ListOptions{
		LabelSelector: unversioned.LabelSelector{labels.Everything()},
		FieldSelector: unversioned.FieldSelector{fieldSelector},
	})

	if err != nil {
		return nil, err
	}

	return list.Items, nil
}
Example #11
0
// Gets events associated to pods in replica set.
func GetReplicaSetPodsEvents(client *client.Client, namespace, replicaSetName string) ([]api.Event,
	error) {
	replicaSet, err := client.ReplicationControllers(namespace).Get(replicaSetName)

	if err != nil {
		return nil, err
	}

	pods, err := client.Pods(namespace).List(unversioned.ListOptions{
		LabelSelector: unversioned.LabelSelector{labels.SelectorFromSet(replicaSet.Spec.Selector)},
		FieldSelector: unversioned.FieldSelector{fields.Everything()},
	})

	if err != nil {
		return nil, err
	}

	events := make([]api.Event, 0, 0)

	for _, pod := range pods.Items {
		fieldSelector, err := fields.ParseSelector("involvedObject.name=" + pod.Name)

		if err != nil {
			return nil, err
		}

		list, err := client.Events(namespace).List(unversioned.ListOptions{
			LabelSelector: unversioned.LabelSelector{labels.Everything()},
			FieldSelector: unversioned.FieldSelector{fieldSelector},
		})

		if err != nil {
			return nil, err
		}

		for _, event := range list.Items {
			events = append(events, event)
		}

	}

	return events, nil
}
Example #12
0
						return c.Events(ns).Watch(options)
					},
				},
				&api.Event{},
				0,
				controllerframework.ResourceEventHandlerFuncs{
					AddFunc: func(obj interface{}) {
						events = append(events, obj.(*api.Event))
					},
				},
			)
			stop := make(chan struct{})
			go controller.Run(stop)

			// Start the replication controller.
			startTime := time.Now()
			expectNoError(RunRC(config))
			e2eStartupTime := time.Now().Sub(startTime)
			Logf("E2E startup time for %d pods: %v", totalPods, e2eStartupTime)

			By("Waiting for all events to be recorded")
			last := -1
			current := len(events)
			timeout := 10 * time.Minute
			for start := time.Now(); last < current && time.Since(start) < timeout; time.Sleep(10 * time.Second) {
				last = current
				current = len(events)
			}
			close(stop)

			if current != last {
				Logf("Warning: Not all events were recorded after waiting %.2f minutes", timeout.Minutes())
			}
			Logf("Found %d events", current)

			// Tune the threshold for allowed failures.
			badEvents := BadEvents(events)
			Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(totalPods)))))

			if itArg.runLatencyTest {
				Logf("Schedling additional Pods to measure startup latencies")

				createTimes := make(map[string]unversioned.Time, 0)
				nodes := make(map[string]string, 0)
				scheduleTimes := make(map[string]unversioned.Time, 0)
				runTimes := make(map[string]unversioned.Time, 0)
				watchTimes := make(map[string]unversioned.Time, 0)

				var mutex sync.Mutex
				checkPod := func(p *api.Pod) {
					mutex.Lock()
					defer mutex.Unlock()
					defer GinkgoRecover()

					if p.Status.Phase == api.PodRunning {
						if _, found := watchTimes[p.Name]; !found {
							watchTimes[p.Name] = unversioned.Now()
							createTimes[p.Name] = p.CreationTimestamp
							nodes[p.Name] = p.Spec.NodeName
							var startTime unversioned.Time
							for _, cs := range p.Status.ContainerStatuses {
								if cs.State.Running != nil {
									if startTime.Before(cs.State.Running.StartedAt) {
										startTime = cs.State.Running.StartedAt
									}
								}
							}
							if startTime != unversioned.NewTime(time.Time{}) {
								runTimes[p.Name] = startTime
							} else {
								Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
							}
						}
					}
				}

				additionalPodsPrefix = "density-latency-pod-" + string(util.NewUUID())
				_, controller := controllerframework.NewInformer(
					&cache.ListWatch{
						ListFunc: func() (runtime.Object, error) {
							return c.Pods(ns).List(labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix}), fields.Everything(), unversioned.ListOptions{})
						},
						WatchFunc: func(options unversioned.ListOptions) (watch.Interface, error) {
							options.LabelSelector.Selector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
							return c.Pods(ns).Watch(options)
						},
					},
					&api.Pod{},
					0,
					controllerframework.ResourceEventHandlerFuncs{
						AddFunc: func(obj interface{}) {
							p, ok := obj.(*api.Pod)
							Expect(ok).To(Equal(true))
							go checkPod(p)
						},
						UpdateFunc: func(oldObj, newObj interface{}) {
							p, ok := newObj.(*api.Pod)
							Expect(ok).To(Equal(true))
							go checkPod(p)
						},
					},
				)

				stopCh := make(chan struct{})
				go controller.Run(stopCh)

				// Create some additional pods with throughput ~5 pods/sec.
				var wg sync.WaitGroup
				wg.Add(nodeCount)
				podLabels := map[string]string{
					"name": additionalPodsPrefix,
				}
				for i := 1; i <= nodeCount; i++ {
					name := additionalPodsPrefix + "-" + strconv.Itoa(i)
					go createRunningPod(&wg, c, name, ns, "gcr.io/google_containers/pause:2.0", podLabels)
					time.Sleep(200 * time.Millisecond)
				}
				wg.Wait()

				Logf("Waiting for all Pods begin observed by the watch...")
				for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
					if time.Since(start) < timeout {
						Failf("Timeout reached waiting for all Pods being observed by the watch.")
					}
				}
				close(stopCh)

				schedEvents, err := c.Events(ns).List(
Example #13
0
						return c.Events(ns).Watch(options)
					},
				},
				&api.Event{},
				0,
				controllerframework.ResourceEventHandlerFuncs{
					AddFunc: func(obj interface{}) {
						eLock.Lock()
						defer eLock.Unlock()
						events = append(events, obj.(*api.Event))
					},
				},
			)
			stop := make(chan struct{})
			go controller.Run(stop)

			// Create a listener for api updates
			// uLock is a lock protects the updateCount
			var uLock sync.Mutex
			updateCount := 0
			label := labels.SelectorFromSet(labels.Set(map[string]string{"name": RCName}))
			_, updateController := controllerframework.NewInformer(
				&cache.ListWatch{
					ListFunc: func(options api.ListOptions) (runtime.Object, error) {
						options.LabelSelector = label
						return c.Pods(ns).List(options)
					},
					WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
						options.LabelSelector = label
						return c.Pods(ns).Watch(options)
					},
				},
				&api.Pod{},
				0,
				controllerframework.ResourceEventHandlerFuncs{
					UpdateFunc: func(_, _ interface{}) {
						uLock.Lock()
						defer uLock.Unlock()
						updateCount++
					},
				},
			)
			go updateController.Run(stop)

			// Start the replication controller.
			startTime := time.Now()
			expectNoError(RunRC(config))
			e2eStartupTime := time.Now().Sub(startTime)
			Logf("E2E startup time for %d pods: %v", totalPods, e2eStartupTime)

			By("Waiting for all events to be recorded")
			last := -1
			current := len(events)
			lastCount := -1
			currentCount := updateCount
			timeout := 10 * time.Minute
			for start := time.Now(); (last < current || lastCount < currentCount) && time.Since(start) < timeout; time.Sleep(10 * time.Second) {
				func() {
					eLock.Lock()
					defer eLock.Unlock()
					last = current
					current = len(events)
				}()
				func() {
					uLock.Lock()
					defer uLock.Unlock()
					lastCount = currentCount
					currentCount = updateCount
				}()
			}
			close(stop)

			if current != last {
				Logf("Warning: Not all events were recorded after waiting %.2f minutes", timeout.Minutes())
			}
			Logf("Found %d events", current)
			if currentCount != lastCount {
				Logf("Warning: Not all updates were recorded after waiting %.2f minutes", timeout.Minutes())
			}
			Logf("Found %d updates", currentCount)

			// Tune the threshold for allowed failures.
			badEvents := BadEvents(events)
			Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(totalPods)))))

			if itArg.runLatencyTest {
				Logf("Schedling additional Pods to measure startup latencies")

				createTimes := make(map[string]unversioned.Time, 0)
				nodes := make(map[string]string, 0)
				scheduleTimes := make(map[string]unversioned.Time, 0)
				runTimes := make(map[string]unversioned.Time, 0)
				watchTimes := make(map[string]unversioned.Time, 0)

				var mutex sync.Mutex
				checkPod := func(p *api.Pod) {
					mutex.Lock()
					defer mutex.Unlock()
					defer GinkgoRecover()

					if p.Status.Phase == api.PodRunning {
						if _, found := watchTimes[p.Name]; !found {
							watchTimes[p.Name] = unversioned.Now()
							createTimes[p.Name] = p.CreationTimestamp
							nodes[p.Name] = p.Spec.NodeName
							var startTime unversioned.Time
							for _, cs := range p.Status.ContainerStatuses {
								if cs.State.Running != nil {
									if startTime.Before(cs.State.Running.StartedAt) {
										startTime = cs.State.Running.StartedAt
									}
								}
							}
							if startTime != unversioned.NewTime(time.Time{}) {
								runTimes[p.Name] = startTime
							} else {
								Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
							}
						}
					}
				}

				additionalPodsPrefix = "density-latency-pod-" + string(util.NewUUID())
				_, controller := controllerframework.NewInformer(
					&cache.ListWatch{
						ListFunc: func(options api.ListOptions) (runtime.Object, error) {
							options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
							return c.Pods(ns).List(options)
						},
						WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
							options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
							return c.Pods(ns).Watch(options)
						},
					},
					&api.Pod{},
					0,
					controllerframework.ResourceEventHandlerFuncs{
						AddFunc: func(obj interface{}) {
							p, ok := obj.(*api.Pod)
							Expect(ok).To(Equal(true))
							go checkPod(p)
						},
						UpdateFunc: func(oldObj, newObj interface{}) {
							p, ok := newObj.(*api.Pod)
							Expect(ok).To(Equal(true))
							go checkPod(p)
						},
					},
				)

				stopCh := make(chan struct{})
				go controller.Run(stopCh)

				// Create some additional pods with throughput ~5 pods/sec.
				var wg sync.WaitGroup
				wg.Add(nodeCount)
				podLabels := map[string]string{
					"name": additionalPodsPrefix,
				}
				for i := 1; i <= nodeCount; i++ {
					name := additionalPodsPrefix + "-" + strconv.Itoa(i)
					go createRunningPod(&wg, c, name, ns, "gcr.io/google_containers/pause:2.0", podLabels)
					time.Sleep(200 * time.Millisecond)
				}
				wg.Wait()

				Logf("Waiting for all Pods begin observed by the watch...")
				for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
					if time.Since(start) < timeout {
						Failf("Timeout reached waiting for all Pods being observed by the watch.")
					}
				}
				close(stopCh)

				selector := fields.Set{
					"involvedObject.kind":      "Pod",
					"involvedObject.namespace": ns,
					"source":                   api.DefaultSchedulerName,
				}.AsSelector()
				options := api.ListOptions{FieldSelector: selector}
				schedEvents, err := c.Events(ns).List(options)
Example #14
0
// NewLoadBalancerController creates a controller for gce loadbalancers.
// - kubeClient: A kubernetes REST client.
// - clusterManager: A ClusterManager capable of creating all cloud resources
//	 required for L7 loadbalancing.
// - resyncPeriod: Watchers relist from the Kubernetes API server this often.
func NewLoadBalancerController(kubeClient *client.Client, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	lbc := LoadBalancerController{
		client:              kubeClient,
		CloudClusterManager: clusterManager,
		stopCh:              make(chan struct{}),
		recorder: eventBroadcaster.NewRecorder(
			api.EventSource{Component: "loadbalancer-controller"}),
	}
	lbc.nodeQueue = NewTaskQueue(lbc.syncNodes)
	lbc.ingQueue = NewTaskQueue(lbc.sync)
	lbc.hasSynced = lbc.storesSynced

	// Ingress watch handlers
	pathHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			addIng := obj.(*extensions.Ingress)
			if !isGCEIngress(addIng) {
				glog.Infof("Ignoring add for ingress %v based on annotation %v", addIng.Name, ingressClassKey)
				return
			}
			lbc.recorder.Eventf(addIng, api.EventTypeNormal, "ADD", fmt.Sprintf("%s/%s", addIng.Namespace, addIng.Name))
			lbc.ingQueue.enqueue(obj)
		},
		DeleteFunc: func(obj interface{}) {
			delIng := obj.(*extensions.Ingress)
			if !isGCEIngress(delIng) {
				glog.Infof("Ignoring delete for ingress %v based on annotation %v", delIng.Name, ingressClassKey)
				return
			}
			glog.Infof("Delete notification received for Ingress %v/%v", delIng.Namespace, delIng.Name)
			lbc.ingQueue.enqueue(obj)
		},
		UpdateFunc: func(old, cur interface{}) {
			curIng := cur.(*extensions.Ingress)
			if !isGCEIngress(curIng) {
				return
			}
			if !reflect.DeepEqual(old, cur) {
				glog.V(3).Infof("Ingress %v changed, syncing", curIng.Name)
			}
			lbc.ingQueue.enqueue(cur)
		},
	}
	lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  ingressListFunc(lbc.client, namespace),
			WatchFunc: ingressWatchFunc(lbc.client, namespace),
		},
		&extensions.Ingress{}, resyncPeriod, pathHandlers)

	// Service watch handlers
	svcHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc: lbc.enqueueIngressForService,
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				lbc.enqueueIngressForService(cur)
			}
		},
		// Ingress deletes matter, service deletes don't.
	}

	lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
		cache.NewListWatchFromClient(
			lbc.client, "services", namespace, fields.Everything()),
		&api.Service{}, resyncPeriod, svcHandlers)

	lbc.podLister.Indexer, lbc.podController = framework.NewIndexerInformer(
		cache.NewListWatchFromClient(lbc.client, "pods", namespace, fields.Everything()),
		&api.Pod{},
		resyncPeriod,
		framework.ResourceEventHandlerFuncs{},
		cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
	)

	nodeHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc:    lbc.nodeQueue.enqueue,
		DeleteFunc: lbc.nodeQueue.enqueue,
		// Nodes are updated every 10s and we don't care, so no update handler.
	}
	// Node watch handlers
	lbc.nodeLister.Store, lbc.nodeController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(opts api.ListOptions) (runtime.Object, error) {
				return lbc.client.Get().
					Resource("nodes").
					FieldsSelectorParam(fields.Everything()).
					Do().
					Get()
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return lbc.client.Get().
					Prefix("watch").
					Resource("nodes").
					FieldsSelectorParam(fields.Everything()).
					Param("resourceVersion", options.ResourceVersion).Watch()
			},
		},
		&api.Node{}, 0, nodeHandlers)

	lbc.tr = &GCETranslator{&lbc}
	lbc.tlsLoader = &apiServerTLSLoader{client: lbc.client}
	glog.V(3).Infof("Created new loadbalancer controller")

	return &lbc, nil
}
Example #15
0
func runLatencyTest(nodeCount int, c *client.Client, ns string) {
	var (
		nodes              = make(map[string]string, 0)           // pod name -> node name
		createTimestamps   = make(map[string]unversioned.Time, 0) // pod name -> create time
		scheduleTimestamps = make(map[string]unversioned.Time, 0) // pod name -> schedule time
		startTimestamps    = make(map[string]unversioned.Time, 0) // pod name -> time to run
		watchTimestamps    = make(map[string]unversioned.Time, 0) // pod name -> time to read from informer

		additionalPodsPrefix = "latency-pod-" + string(util.NewUUID())
	)

	var mutex sync.Mutex
	readPodInfo := func(p *api.Pod) {
		mutex.Lock()
		defer mutex.Unlock()
		defer GinkgoRecover()

		if p.Status.Phase == api.PodRunning {
			if _, found := watchTimestamps[p.Name]; !found {
				watchTimestamps[p.Name] = unversioned.Now()
				createTimestamps[p.Name] = p.CreationTimestamp
				nodes[p.Name] = p.Spec.NodeName
				var startTimestamp unversioned.Time
				for _, cs := range p.Status.ContainerStatuses {
					if cs.State.Running != nil {
						if startTimestamp.Before(cs.State.Running.StartedAt) {
							startTimestamp = cs.State.Running.StartedAt
						}
					}
				}
				if startTimestamp != unversioned.NewTime(time.Time{}) {
					startTimestamps[p.Name] = startTimestamp
				} else {
					Failf("Pod %v is reported to be running, but none of its containers are", p.Name)
				}
			}
		}
	}

	// Create a informer to read timestamps for each pod
	stopCh := make(chan struct{})
	_, informer := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
				return c.Pods(ns).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
				return c.Pods(ns).Watch(options)
			},
		},
		&api.Pod{},
		0,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				p, ok := obj.(*api.Pod)
				Expect(ok).To(Equal(true))
				go readPodInfo(p)
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				p, ok := newObj.(*api.Pod)
				Expect(ok).To(Equal(true))
				go readPodInfo(p)
			},
		},
	)
	go informer.Run(stopCh)

	// Create  additional pods with throughput ~5 pods/sec.
	var wg sync.WaitGroup
	wg.Add(nodeCount)
	podLabels := map[string]string{
		"name": additionalPodsPrefix,
	}
	for i := 1; i <= nodeCount; i++ {
		name := additionalPodsPrefix + "-" + strconv.Itoa(i)
		go createRunningPod(&wg, c, name, ns, "gcr.io/google_containers/pause:go", podLabels)
		time.Sleep(200 * time.Millisecond)
	}
	wg.Wait()

	Logf("Waiting for all Pods begin observed by the watch...")
	for start := time.Now(); len(watchTimestamps) < nodeCount; time.Sleep(10 * time.Second) {
		if time.Since(start) < timeout {
			Failf("Timeout reached waiting for all Pods being observed by the watch.")
		}
	}
	close(stopCh)

	// Read the schedule timestamp by checking the scheduler event for each pod
	selector := fields.Set{
		"involvedObject.kind":      "Pod",
		"involvedObject.namespace": ns,
		"source":                   "scheduler",
	}.AsSelector()
	options := api.ListOptions{FieldSelector: selector}
	schedEvents, err := c.Events(ns).List(options)
	expectNoError(err)
	for k := range createTimestamps {
		for _, event := range schedEvents.Items {
			if event.InvolvedObject.Name == k {
				scheduleTimestamps[k] = event.FirstTimestamp
				break
			}
		}
	}

	var (
		scheduleLatencies        = make([]podLatencyData, 0)
		startLatencies           = make([]podLatencyData, 0)
		watchLatencies           = make([]podLatencyData, 0)
		scheduleToWatchLatencies = make([]podLatencyData, 0)
		e2eLatencies             = make([]podLatencyData, 0)
	)

	for name, podNode := range nodes {
		createTs, ok := createTimestamps[name]
		Expect(ok).To(Equal(true))
		scheduleTs, ok := scheduleTimestamps[name]
		Expect(ok).To(Equal(true))
		runTs, ok := startTimestamps[name]
		Expect(ok).To(Equal(true))
		watchTs, ok := watchTimestamps[name]
		Expect(ok).To(Equal(true))

		var (
			scheduleLatency        = podLatencyData{name, podNode, scheduleTs.Time.Sub(createTs.Time)}
			startLatency           = podLatencyData{name, podNode, runTs.Time.Sub(scheduleTs.Time)}
			watchLatency           = podLatencyData{name, podNode, watchTs.Time.Sub(runTs.Time)}
			scheduleToWatchLatency = podLatencyData{name, podNode, watchTs.Time.Sub(scheduleTs.Time)}
			e2eLatency             = podLatencyData{name, podNode, watchTs.Time.Sub(createTs.Time)}
		)

		scheduleLatencies = append(scheduleLatencies, scheduleLatency)
		startLatencies = append(startLatencies, startLatency)
		watchLatencies = append(watchLatencies, watchLatency)
		scheduleToWatchLatencies = append(scheduleToWatchLatencies, scheduleToWatchLatency)
		e2eLatencies = append(e2eLatencies, e2eLatency)
	}

	sort.Sort(latencySlice(scheduleLatencies))
	sort.Sort(latencySlice(startLatencies))
	sort.Sort(latencySlice(watchLatencies))
	sort.Sort(latencySlice(scheduleToWatchLatencies))
	sort.Sort(latencySlice(e2eLatencies))

	printLatencies(scheduleLatencies, "worst schedule latencies")
	printLatencies(startLatencies, "worst run-after-schedule latencies")
	printLatencies(watchLatencies, "worst watch latencies")
	printLatencies(scheduleToWatchLatencies, "worst scheduled-to-end total latencies")
	printLatencies(e2eLatencies, "worst e2e total latencies")

	// Ensure all scheduleLatencies are under expected ceilings.
	// These numbers were guessed based on numerous Jenkins e2e runs.
	testMaximumLatencyValue(scheduleLatencies, 1*time.Second, "scheduleLatencies")
	testMaximumLatencyValue(startLatencies, 15*time.Second, "startLatencies")
	testMaximumLatencyValue(watchLatencies, 8*time.Second, "watchLatencies")
	testMaximumLatencyValue(scheduleToWatchLatencies, 5*time.Second, "scheduleToWatchLatencies")
	testMaximumLatencyValue(e2eLatencies, 5*time.Second, "e2eLatencies")

	// Test whether e2e pod startup time is acceptable.
	podStartupLatency := PodStartupLatency{Latency: extractLatencyMetrics(e2eLatencies)}
	expectNoError(VerifyPodStartupLatency(podStartupLatency))

	// Log suspicious latency metrics/docker errors from all nodes that had slow startup times
	logSuspiciousLatency(startLatencies, nil, nodeCount, c)
}
Example #16
0
// NewLoadBalancerController creates a controller for nginx loadbalancer
func NewLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Duration, defaultSvc, customErrorSvc nginx.Service, namespace string, lbInfo *lbInfo) (*loadBalancerController, error) {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	lbc := loadBalancerController{
		client: kubeClient,
		stopCh: make(chan struct{}),
		recorder: eventBroadcaster.NewRecorder(
			api.EventSource{Component: "nginx-lb-controller"}),
		lbInfo: lbInfo,
	}
	lbc.ingQueue = NewTaskQueue(lbc.syncIngress)
	lbc.configQueue = NewTaskQueue(lbc.syncConfig)

	lbc.ngx = nginx.NewManager(kubeClient, defaultSvc, customErrorSvc, namespace)

	// Ingress watch handlers
	pathHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			addIng := obj.(*extensions.Ingress)
			lbc.recorder.Eventf(addIng, "ADD", addIng.Name)
			lbc.ingQueue.enqueue(obj)
		},
		DeleteFunc: lbc.ingQueue.enqueue,
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				glog.V(2).Infof("Ingress %v changed, syncing", cur.(*extensions.Ingress).Name)
			}
			lbc.ingQueue.enqueue(cur)
		},
	}
	lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  ingressListFunc(lbc.client, namespace),
			WatchFunc: ingressWatchFunc(lbc.client, namespace),
		},
		&extensions.Ingress{}, resyncPeriod, pathHandlers)

	// Config watch handlers
	configHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			lbc.configQueue.enqueue(obj)
		},
		DeleteFunc: lbc.configQueue.enqueue,
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				glog.V(2).Infof("nginx rc changed, syncing")
				lbc.configQueue.enqueue(cur)
			}
		},
	}

	lbc.configLister.Store, lbc.configController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				rc, err := kubeClient.ReplicationControllers(lbInfo.RCNamespace).Get(lbInfo.RCName)
				return &api.ReplicationControllerList{
					Items: []api.ReplicationController{*rc},
				}, err
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": lbInfo.RCName})
				return kubeClient.ReplicationControllers(lbInfo.RCNamespace).Watch(labels.Everything(), fields.Everything(), options)
			},
		},
		&api.ReplicationController{}, resyncPeriod, configHandlers)

	return &lbc, nil
}
Example #17
0
func createEventRecorder(kubeClient *kube_client.Client) kube_record.EventRecorder {
	eventBroadcaster := kube_record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
	return eventBroadcaster.NewRecorder(kube_api.EventSource{Component: "cluster-autoscaler"})
}
Example #18
0
// NewNetworkController returns a new *NetworkController.
func NewNetworkController(client *client.Client, provider networkprovider.Interface) *NetworkController {
	broadcaster := record.NewBroadcaster()
	broadcaster.StartRecordingToSink(client.Events(""))
	recorder := broadcaster.NewRecorder(api.EventSource{Component: "network-controller"})

	e := &NetworkController{
		client:           client,
		queue:            workqueue.New(),
		netProvider:      provider,
		eventRecorder:    recorder,
		eventBroadcaster: broadcaster,
	}

	e.serviceStore.Store, e.serviceController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.Services(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return e.client.Services(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), options)
			},
		},
		&api.Service{},
		FullServiceResyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: e.enqueueService,
			UpdateFunc: func(old, cur interface{}) {
				e.enqueueService(cur)
			},
			DeleteFunc: e.enqueueService,
		},
	)

	e.networkStore.Store, e.networkController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.Networks().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return e.client.Networks().Watch(labels.Everything(), fields.Everything(), options)
			},
		},
		&api.Network{},
		NetworkRelistPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.addNetwork,
			UpdateFunc: e.updateNetwork,
			DeleteFunc: e.deleteNetwork,
		},
	)

	e.endpointStore.Store, e.endpointController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.Endpoints(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return e.client.Endpoints(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), options)
			},
		},
		&api.Endpoints{},
		EndpointRelistPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.addEndpoint,
			UpdateFunc: e.updateEndpoint,
			DeleteFunc: e.deleteEndpoint,
		},
	)

	return e
}
Example #19
0
// NewLoadBalancerController creates a controller for gce loadbalancers.
// - kubeClient: A kubernetes REST client.
// - clusterManager: A ClusterManager capable of creating all cloud resources
//	 required for L7 loadbalancing.
// - resyncPeriod: Watchers relist from the Kubernetes API server this often.
func NewLoadBalancerController(kubeClient *client.Client, clusterManager *ClusterManager, resyncPeriod time.Duration) (*loadBalancerController, error) {
	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	lbc := loadBalancerController{
		client:         kubeClient,
		clusterManager: clusterManager,
		stopCh:         make(chan struct{}),
		recorder: eventBroadcaster.NewRecorder(
			api.EventSource{Component: "loadbalancer-controller"}),
	}
	lbc.nodeQueue = NewTaskQueue(lbc.syncNodes)
	lbc.ingQueue = NewTaskQueue(lbc.sync)

	// Ingress watch handlers
	pathHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			addIng := obj.(*extensions.Ingress)
			lbc.recorder.Eventf(addIng, "ADD", addIng.Name)
			lbc.ingQueue.enqueue(obj)
		},
		DeleteFunc: lbc.ingQueue.enqueue,
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				glog.V(3).Infof("Ingress %v changed, syncing",
					cur.(*extensions.Ingress).Name)
			}
			lbc.ingQueue.enqueue(cur)
		},
	}
	lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  ingressListFunc(lbc.client),
			WatchFunc: ingressWatchFunc(lbc.client),
		},
		&extensions.Ingress{}, resyncPeriod, pathHandlers)

	// Service watch handlers
	svcHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc: lbc.enqueueIngressForService,
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				lbc.enqueueIngressForService(cur)
			}
		},
		// Ingress deletes matter, service deletes don't.
	}

	lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
		cache.NewListWatchFromClient(
			lbc.client, "services", api.NamespaceAll, fields.Everything()),
		&api.Service{}, resyncPeriod, svcHandlers)

	nodeHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc:    lbc.nodeQueue.enqueue,
		DeleteFunc: lbc.nodeQueue.enqueue,
		// Nodes are updated every 10s and we don't care, so no update handler.
	}

	// Node watch handlers
	lbc.nodeLister.Store, lbc.nodeController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return lbc.client.Get().
					Resource("nodes").
					FieldsSelectorParam(fields.Everything()).
					Do().
					Get()
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return lbc.client.Get().
					Prefix("watch").
					Resource("nodes").
					FieldsSelectorParam(fields.Everything()).
					Param("resourceVersion", options.ResourceVersion).Watch()
			},
		},
		&api.Node{}, 0, nodeHandlers)

	lbc.tr = &gceTranslator{&lbc}
	glog.Infof("Created new loadbalancer controller")

	return &lbc, nil
}
Example #20
0
// newLoadBalancerController creates a controller for nginx loadbalancer
func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Duration,
	defaultSvc, namespace, nxgConfigMapName, tcpConfigMapName, udpConfigMapName,
	defSSLCertificate string, runtimeInfo *podInfo) (*loadBalancerController, error) {

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(namespace))

	lbc := loadBalancerController{
		client:            kubeClient,
		stopCh:            make(chan struct{}),
		podInfo:           runtimeInfo,
		nginx:             nginx.NewManager(kubeClient),
		nxgConfigMap:      nxgConfigMapName,
		tcpConfigMap:      tcpConfigMapName,
		udpConfigMap:      udpConfigMapName,
		defSSLCertificate: defSSLCertificate,
		defaultSvc:        defaultSvc,
		recorder: eventBroadcaster.NewRecorder(api.EventSource{
			Component: "nginx-ingress-controller",
		}),
	}

	lbc.syncQueue = NewTaskQueue(lbc.sync)
	lbc.ingQueue = NewTaskQueue(lbc.updateIngressStatus)

	ingEventHandler := framework.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			addIng := obj.(*extensions.Ingress)
			if !isNGINXIngress(addIng) {
				glog.Infof("Ignoring add for ingress %v based on annotation %v", addIng.Name, ingressClassKey)
				return
			}
			lbc.recorder.Eventf(addIng, api.EventTypeNormal, "CREATE", fmt.Sprintf("%s/%s", addIng.Namespace, addIng.Name))
			lbc.ingQueue.enqueue(obj)
			lbc.syncQueue.enqueue(obj)
		},
		DeleteFunc: func(obj interface{}) {
			delIng := obj.(*extensions.Ingress)
			if !isNGINXIngress(delIng) {
				glog.Infof("Ignoring add for ingress %v based on annotation %v", delIng.Name, ingressClassKey)
				return
			}
			lbc.recorder.Eventf(delIng, api.EventTypeNormal, "DELETE", fmt.Sprintf("%s/%s", delIng.Namespace, delIng.Name))
			lbc.syncQueue.enqueue(obj)
		},
		UpdateFunc: func(old, cur interface{}) {
			curIng := cur.(*extensions.Ingress)
			if !isNGINXIngress(curIng) {
				return
			}
			if !reflect.DeepEqual(old, cur) {
				upIng := cur.(*extensions.Ingress)
				lbc.recorder.Eventf(upIng, api.EventTypeNormal, "UPDATE", fmt.Sprintf("%s/%s", upIng.Namespace, upIng.Name))
				lbc.ingQueue.enqueue(cur)
				lbc.syncQueue.enqueue(cur)
			}
		},
	}

	secrEventHandler := framework.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			addSecr := obj.(*api.Secret)
			if lbc.secrReferenced(addSecr.Namespace, addSecr.Name) {
				lbc.recorder.Eventf(addSecr, api.EventTypeNormal, "CREATE", fmt.Sprintf("%s/%s", addSecr.Namespace, addSecr.Name))
				lbc.syncQueue.enqueue(obj)
			}
		},
		DeleteFunc: func(obj interface{}) {
			delSecr := obj.(*api.Secret)
			if lbc.secrReferenced(delSecr.Namespace, delSecr.Name) {
				lbc.recorder.Eventf(delSecr, api.EventTypeNormal, "DELETE", fmt.Sprintf("%s/%s", delSecr.Namespace, delSecr.Name))
				lbc.syncQueue.enqueue(obj)
			}
		},
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				upSecr := cur.(*api.Secret)
				if lbc.secrReferenced(upSecr.Namespace, upSecr.Name) {
					lbc.recorder.Eventf(upSecr, api.EventTypeNormal, "UPDATE", fmt.Sprintf("%s/%s", upSecr.Namespace, upSecr.Name))
					lbc.syncQueue.enqueue(cur)
				}
			}
		},
	}

	eventHandler := framework.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			lbc.syncQueue.enqueue(obj)
		},
		DeleteFunc: func(obj interface{}) {
			lbc.syncQueue.enqueue(obj)
		},
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				lbc.syncQueue.enqueue(cur)
			}
		},
	}

	mapEventHandler := framework.ResourceEventHandlerFuncs{
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				upCmap := cur.(*api.ConfigMap)
				mapKey := fmt.Sprintf("%s/%s", upCmap.Namespace, upCmap.Name)
				// updates to configuration configmaps can trigger an update
				if mapKey == lbc.nxgConfigMap || mapKey == lbc.tcpConfigMap || mapKey == lbc.udpConfigMap {
					lbc.recorder.Eventf(upCmap, api.EventTypeNormal, "UPDATE", mapKey)
					lbc.syncQueue.enqueue(cur)
				}
			}
		},
	}

	lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  ingressListFunc(lbc.client, namespace),
			WatchFunc: ingressWatchFunc(lbc.client, namespace),
		},
		&extensions.Ingress{}, resyncPeriod, ingEventHandler)

	lbc.endpLister.Store, lbc.endpController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  endpointsListFunc(lbc.client, namespace),
			WatchFunc: endpointsWatchFunc(lbc.client, namespace),
		},
		&api.Endpoints{}, resyncPeriod, eventHandler)

	lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  serviceListFunc(lbc.client, namespace),
			WatchFunc: serviceWatchFunc(lbc.client, namespace),
		},
		&api.Service{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})

	lbc.secrLister.Store, lbc.secrController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  secretsListFunc(lbc.client, namespace),
			WatchFunc: secretsWatchFunc(lbc.client, namespace),
		},
		&api.Secret{}, resyncPeriod, secrEventHandler)

	lbc.mapLister.Store, lbc.mapController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  mapListFunc(lbc.client, namespace),
			WatchFunc: mapWatchFunc(lbc.client, namespace),
		},
		&api.ConfigMap{}, resyncPeriod, mapEventHandler)

	return &lbc, nil
}
Example #21
0
func dumpClusterInfo(f *cmdutil.Factory, cmd *cobra.Command, args []string, out io.Writer) error {
	var c *unversioned.Client
	var err error
	if c, err = f.Client(); err != nil {
		return err
	}
	printer, _, err := kubectl.GetPrinter("json", "")
	if err != nil {
		return err
	}

	nodes, err := c.Nodes().List(api.ListOptions{})
	if err != nil {
		return err
	}

	if err := printer.PrintObj(nodes, setupOutputWriter(cmd, out, "nodes.json")); err != nil {
		return err
	}

	var namespaces []string
	if cmdutil.GetFlagBool(cmd, "all-namespaces") {
		namespaceList, err := c.Namespaces().List(api.ListOptions{})
		if err != nil {
			return err
		}
		for ix := range namespaceList.Items {
			namespaces = append(namespaces, namespaceList.Items[ix].Name)
		}
	} else {
		namespaces = cmdutil.GetFlagStringSlice(cmd, "namespaces")
		if len(namespaces) == 0 {
			cmdNamespace, _, err := f.DefaultNamespace()
			if err != nil {
				return err
			}
			namespaces = []string{
				api.NamespaceSystem,
				cmdNamespace,
			}
		}
	}
	for _, namespace := range namespaces {
		// TODO: this is repetitive in the extreme.  Use reflection or
		// something to make this a for loop.
		events, err := c.Events(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(events, setupOutputWriter(cmd, out, path.Join(namespace, "events.json"))); err != nil {
			return err
		}

		rcs, err := c.ReplicationControllers(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(rcs, setupOutputWriter(cmd, out, path.Join(namespace, "replication-controllers.json"))); err != nil {
			return err
		}

		svcs, err := c.Services(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(svcs, setupOutputWriter(cmd, out, path.Join(namespace, "services.json"))); err != nil {
			return err
		}

		sets, err := c.DaemonSets(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(sets, setupOutputWriter(cmd, out, path.Join(namespace, "daemonsets.json"))); err != nil {
			return err
		}

		deps, err := c.Deployments(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(deps, setupOutputWriter(cmd, out, path.Join(namespace, "deployments.json"))); err != nil {
			return err
		}

		rps, err := c.ReplicaSets(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}
		if err := printer.PrintObj(rps, setupOutputWriter(cmd, out, path.Join(namespace, "replicasets.json"))); err != nil {
			return err
		}

		pods, err := c.Pods(namespace).List(api.ListOptions{})
		if err != nil {
			return err
		}

		if err := printer.PrintObj(pods, setupOutputWriter(cmd, out, path.Join(namespace, "pods.json"))); err != nil {
			return err
		}

		for ix := range pods.Items {
			pod := &pods.Items[ix]
			writer := setupOutputWriter(cmd, out, path.Join(namespace, pod.Name, "logs.txt"))
			writer.Write([]byte(fmt.Sprintf("==== START logs for %s/%s ====\n", pod.Namespace, pod.Name)))
			request, err := f.LogsForObject(pod, &api.PodLogOptions{})
			if err != nil {
				return err
			}

			data, err := request.DoRaw()
			if err != nil {
				return err
			}
			writer.Write(data)
			writer.Write([]byte(fmt.Sprintf("==== END logs for %s/%s ====\n", pod.Namespace, pod.Name)))
		}
	}
	dir := cmdutil.GetFlagString(cmd, "output-directory")
	if len(dir) == 0 {
		dir = "."
	}
	if dir != "-" {
		fmt.Fprintf(out, "Cluster info dumped to %s", dir)
	}
	return nil
}
Example #22
0
						return c.Events(ns).Watch(options)
					},
				},
				&api.Event{},
				0,
				controllerframework.ResourceEventHandlerFuncs{
					AddFunc: func(obj interface{}) {
						events = append(events, obj.(*api.Event))
					},
				},
			)
			stop := make(chan struct{})
			go controller.Run(stop)

			// Start the replication controller.
			startTime := time.Now()
			expectNoError(RunRC(config))
			e2eStartupTime := time.Now().Sub(startTime)
			Logf("E2E startup time for %d pods: %v", totalPods, e2eStartupTime)

			By("Waiting for all events to be recorded")
			last := -1
			current := len(events)
			timeout := 10 * time.Minute
			for start := time.Now(); last < current && time.Since(start) < timeout; time.Sleep(10 * time.Second) {
				last = current
				current = len(events)
			}
			close(stop)

			if current != last {
				Logf("Warning: Not all events were recorded after waiting %.2f minutes", timeout.Minutes())
			}
			Logf("Found %d events", current)

			// Tune the threshold for allowed failures.
			badEvents := BadEvents(events)
			Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(totalPods)))))

			if itArg.runLatencyTest {
				Logf("Schedling additional Pods to measure startup latencies")

				createTimes := make(map[string]unversioned.Time, 0)
				nodes := make(map[string]string, 0)
				scheduleTimes := make(map[string]unversioned.Time, 0)
				runTimes := make(map[string]unversioned.Time, 0)
				watchTimes := make(map[string]unversioned.Time, 0)

				var mutex sync.Mutex
				checkPod := func(p *api.Pod) {
					mutex.Lock()
					defer mutex.Unlock()
					defer GinkgoRecover()

					if p.Status.Phase == api.PodRunning {
						if _, found := watchTimes[p.Name]; !found {
							watchTimes[p.Name] = unversioned.Now()
							createTimes[p.Name] = p.CreationTimestamp
							nodes[p.Name] = p.Spec.NodeName
							var startTime unversioned.Time
							for _, cs := range p.Status.ContainerStatuses {
								if cs.State.Running != nil {
									if startTime.Before(cs.State.Running.StartedAt) {
										startTime = cs.State.Running.StartedAt
									}
								}
							}
							if startTime != unversioned.NewTime(time.Time{}) {
								runTimes[p.Name] = startTime
							} else {
								Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
							}
						}
					}
				}

				additionalPodsPrefix = "density-latency-pod-" + string(util.NewUUID())
				_, controller := controllerframework.NewInformer(
					&cache.ListWatch{
						ListFunc: func() (runtime.Object, error) {
							selector := labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
							options := unversioned.ListOptions{LabelSelector: unversioned.LabelSelector{selector}}
							return c.Pods(ns).List(options)
						},
						WatchFunc: func(options unversioned.ListOptions) (watch.Interface, error) {
							options.LabelSelector.Selector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
							return c.Pods(ns).Watch(options)
						},
					},
					&api.Pod{},
					0,
					controllerframework.ResourceEventHandlerFuncs{
						AddFunc: func(obj interface{}) {
							p, ok := obj.(*api.Pod)
							Expect(ok).To(Equal(true))
							go checkPod(p)
						},
						UpdateFunc: func(oldObj, newObj interface{}) {
							p, ok := newObj.(*api.Pod)
							Expect(ok).To(Equal(true))
							go checkPod(p)
						},
					},
				)

				stopCh := make(chan struct{})
				go controller.Run(stopCh)

				// Create some additional pods with throughput ~5 pods/sec.
				var wg sync.WaitGroup
				wg.Add(nodeCount)
				podLabels := map[string]string{
					"name": additionalPodsPrefix,
				}
				for i := 1; i <= nodeCount; i++ {
					name := additionalPodsPrefix + "-" + strconv.Itoa(i)
					go createRunningPod(&wg, c, name, ns, "gcr.io/google_containers/pause:2.0", podLabels)
					time.Sleep(200 * time.Millisecond)
				}
				wg.Wait()

				Logf("Waiting for all Pods begin observed by the watch...")
				for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
					if time.Since(start) < timeout {
						Failf("Timeout reached waiting for all Pods being observed by the watch.")
					}
				}
				close(stopCh)

				selector := fields.Set{
					"involvedObject.kind":      "Pod",
					"involvedObject.namespace": ns,
					"source":                   "scheduler",
				}.AsSelector()
				options := unversioned.ListOptions{FieldSelector: unversioned.FieldSelector{selector}}
				schedEvents, err := c.Events(ns).List(options)
Example #23
0
// newLoadBalancerController creates a controller for nginx loadbalancer
func newLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Duration, defaultSvc,
	namespace, nxgConfigMapName, tcpConfigMapName, udpConfigMapName string, runtimeInfo *podInfo) (*loadBalancerController, error) {

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartLogging(glog.Infof)
	eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))

	lbc := loadBalancerController{
		client:       kubeClient,
		stopCh:       make(chan struct{}),
		podInfo:      runtimeInfo,
		nginx:        nginx.NewManager(kubeClient),
		nxgConfigMap: nxgConfigMapName,
		tcpConfigMap: tcpConfigMapName,
		udpConfigMap: udpConfigMapName,
		defaultSvc:   defaultSvc,
		recorder:     eventBroadcaster.NewRecorder(api.EventSource{Component: "loadbalancer-controller"}),
	}

	lbc.syncQueue = NewTaskQueue(lbc.sync)
	lbc.ingQueue = NewTaskQueue(lbc.updateIngressStatus)

	ingEventHandler := framework.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			addIng := obj.(*extensions.Ingress)
			lbc.recorder.Eventf(addIng, api.EventTypeNormal, "CREATE", fmt.Sprintf("%s/%s", addIng.Namespace, addIng.Name))
			lbc.ingQueue.enqueue(obj)
			lbc.syncQueue.enqueue(obj)
		},
		DeleteFunc: func(obj interface{}) {
			upIng := obj.(*extensions.Ingress)
			lbc.recorder.Eventf(upIng, api.EventTypeNormal, "DELETE", fmt.Sprintf("%s/%s", upIng.Namespace, upIng.Name))
			lbc.syncQueue.enqueue(obj)
		},
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				upIng := cur.(*extensions.Ingress)
				lbc.recorder.Eventf(upIng, api.EventTypeNormal, "UPDATE", fmt.Sprintf("%s/%s", upIng.Namespace, upIng.Name))
				lbc.ingQueue.enqueue(cur)
				lbc.syncQueue.enqueue(cur)
			}
		},
	}

	eventHandler := framework.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			lbc.syncQueue.enqueue(obj)
		},
		DeleteFunc: func(obj interface{}) {
			lbc.syncQueue.enqueue(obj)
		},
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				lbc.syncQueue.enqueue(cur)
			}
		},
	}

	lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  ingressListFunc(lbc.client, namespace),
			WatchFunc: ingressWatchFunc(lbc.client, namespace),
		},
		&extensions.Ingress{}, resyncPeriod, ingEventHandler)

	lbc.endpLister.Store, lbc.endpController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  endpointsListFunc(lbc.client, namespace),
			WatchFunc: endpointsWatchFunc(lbc.client, namespace),
		},
		&api.Endpoints{}, resyncPeriod, eventHandler)

	lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc:  serviceListFunc(lbc.client, namespace),
			WatchFunc: serviceWatchFunc(lbc.client, namespace),
		},
		&api.Service{}, resyncPeriod, framework.ResourceEventHandlerFuncs{})

	return &lbc, nil
}
Example #24
0
						return c.Events(ns).Watch(labels.Everything(), fields.Everything(), rv)
					},
				},
				&api.Event{},
				0,
				framework.ResourceEventHandlerFuncs{
					AddFunc: func(obj interface{}) {
						events = append(events, obj.(*api.Event))
					},
				},
			)
			stop := make(chan struct{})
			go controller.Run(stop)

			// Start the replication controller.
			startTime := time.Now()
			expectNoError(RunRC(config))
			e2eStartupTime := time.Now().Sub(startTime)
			Logf("E2E startup time for %d pods: %v", totalPods, e2eStartupTime)

			By("Waiting for all events to be recorded")
			last := -1
			current := len(events)
			timeout := 10 * time.Minute
			for start := time.Now(); last < current && time.Since(start) < timeout; time.Sleep(10 * time.Second) {
				last = current
				current = len(events)
			}
			close(stop)

			if current != last {
				Logf("Warning: Not all events were recorded after waiting %.2f minutes", timeout.Minutes())
			}
			Logf("Found %d events", current)

			// Tune the threshold for allowed failures.
			badEvents := BadEvents(events)
			Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(totalPods)))))

			if itArg.runLatencyTest {
				Logf("Schedling additional Pods to measure startup latencies")

				createTimes := make(map[string]util.Time, 0)
				nodes := make(map[string]string, 0)
				scheduleTimes := make(map[string]util.Time, 0)
				runTimes := make(map[string]util.Time, 0)
				watchTimes := make(map[string]util.Time, 0)

				var mutex sync.Mutex
				checkPod := func(p *api.Pod) {
					mutex.Lock()
					defer mutex.Unlock()
					defer GinkgoRecover()

					if p.Status.Phase == api.PodRunning {
						if _, found := watchTimes[p.Name]; !found {
							watchTimes[p.Name] = util.Now()
							createTimes[p.Name] = p.CreationTimestamp
							nodes[p.Name] = p.Spec.NodeName
							var startTime util.Time
							for _, cs := range p.Status.ContainerStatuses {
								if cs.State.Running != nil {
									if startTime.Before(cs.State.Running.StartedAt) {
										startTime = cs.State.Running.StartedAt
									}
								}
							}
							if startTime != util.NewTime(time.Time{}) {
								runTimes[p.Name] = startTime
							} else {
								Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
							}
						}
					}
				}

				additionalPodsPrefix = "density-latency-pod-" + string(util.NewUUID())
				_, controller := framework.NewInformer(
					&cache.ListWatch{
						ListFunc: func() (runtime.Object, error) {
							return c.Pods(ns).List(labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix}), fields.Everything())
						},
						WatchFunc: func(rv string) (watch.Interface, error) {
							return c.Pods(ns).Watch(labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix}), fields.Everything(), rv)
						},
					},
					&api.Pod{},
					0,
					framework.ResourceEventHandlerFuncs{
						AddFunc: func(obj interface{}) {
							p, ok := obj.(*api.Pod)
							Expect(ok).To(Equal(true))
							go checkPod(p)
						},
						UpdateFunc: func(oldObj, newObj interface{}) {
							p, ok := newObj.(*api.Pod)
							Expect(ok).To(Equal(true))
							go checkPod(p)
						},
					},
				)

				stopCh := make(chan struct{})
				go controller.Run(stopCh)

				// Create some additional pods with throughput ~5 pods/sec.
				var wg sync.WaitGroup
				wg.Add(minionCount)
				podLabels := map[string]string{
					"name": additionalPodsPrefix,
				}
				for i := 1; i <= minionCount; i++ {
					name := additionalPodsPrefix + "-" + strconv.Itoa(i)
					go createRunningPod(&wg, c, name, ns, "gcr.io/google_containers/pause:go", podLabels)
					time.Sleep(200 * time.Millisecond)
				}
				wg.Wait()

				Logf("Waiting for all Pods begin observed by the watch...")
				for start := time.Now(); len(watchTimes) < minionCount && time.Since(start) < timeout; time.Sleep(10 * time.Second) {
				}
				close(stopCh)

				schedEvents, err := c.Events(ns).List(
Example #25
0
		for i := 0; i < numNodeOODPods-1; i++ {
			name := fmt.Sprintf("pod-node-outofdisk-%d", i)
			createOutOfDiskPod(c, ns, name, podCPU)

			expectNoError(framework.WaitForPodRunning(name))
			pod, err := podClient.Get(name)
			expectNoError(err)
			Expect(pod.Spec.NodeName).To(Equal(unfilledNodeName))
		}

		pendingPodName := fmt.Sprintf("pod-node-outofdisk-%d", numNodeOODPods-1)
		createOutOfDiskPod(c, ns, pendingPodName, podCPU)

		By(fmt.Sprintf("Finding a failed scheduler event for pod %s", pendingPodName))
		wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) {
			schedEvents, err := c.Events(ns).List(
				labels.Everything(),
				fields.Set{
					"involvedObject.kind":      "Pod",
					"involvedObject.name":      pendingPodName,
					"involvedObject.namespace": ns,
					"source":                   "scheduler",
					"reason":                   "FailedScheduling",
				}.AsSelector())
			expectNoError(err)

			if len(schedEvents.Items) > 0 {
				return true, nil
			} else {
				return false, nil
			}
Example #26
0
		}

		pendingPodName := fmt.Sprintf("pod-node-outofdisk-%d", numNodeOODPods-1)
		createOutOfDiskPod(c, ns, pendingPodName, podCPU)

		By(fmt.Sprintf("Finding a failed scheduler event for pod %s", pendingPodName))
		wait.Poll(2*time.Second, 5*time.Minute, func() (bool, error) {
			selector := fields.Set{
				"involvedObject.kind":      "Pod",
				"involvedObject.name":      pendingPodName,
				"involvedObject.namespace": ns,
				"source":                   api.DefaultSchedulerName,
				"reason":                   "FailedScheduling",
			}.AsSelector()
			options := api.ListOptions{FieldSelector: selector}
			schedEvents, err := c.Events(ns).List(options)
			expectNoError(err)

			if len(schedEvents.Items) > 0 {
				return true, nil
			} else {
				return false, nil
			}
		})

		nodelist := ListSchedulableNodesOrDie(c)
		Expect(len(nodelist.Items)).To(BeNumerically(">", 1))

		nodeToRecover := nodelist.Items[1]
		Expect(nodeToRecover.Name).ToNot(Equal(unfilledNodeName))
Example #27
0
				return verifyNoEvents(c.Events(eventNamespace), eventListOptions)
			}, pollConsistent, pollInterval).Should(Succeed())
			By("Make sure the default node condition is generated")
			Eventually(func() error {
				return verifyCondition(c.Nodes(), node.Name, condition, api.ConditionFalse, defaultReason, defaultMessage)
			}, pollConsistent, pollInterval).Should(Succeed())

			num := 3
			By(fmt.Sprintf("Inject %d temporary errors", num))
			Expect(framework.IssueSSHCommand(injectCommand(tempMessage, num), framework.TestContext.Provider, node)).To(Succeed())
			By(fmt.Sprintf("Wait for %d events generated", num))
			Eventually(func() error {
				return verifyEvents(c.Events(eventNamespace), eventListOptions, num, tempReason, tempMessage)
			}, pollTimeout, pollInterval).Should(Succeed())
			By(fmt.Sprintf("Make sure only %d events generated", num))
			Consistently(func() error {
				return verifyEvents(c.Events(eventNamespace), eventListOptions, num, tempReason, tempMessage)
			}, pollConsistent, pollInterval).Should(Succeed())
			By("Make sure the node condition is still false")
			Expect(verifyCondition(c.Nodes(), node.Name, condition, api.ConditionFalse, defaultReason, defaultMessage)).To(Succeed())

			By("Inject 1 permanent error")
			Expect(framework.IssueSSHCommand(injectCommand(permMessage, 1), framework.TestContext.Provider, node)).To(Succeed())
			By("Make sure the corresponding node condition is generated")
			Eventually(func() error {
				return verifyCondition(c.Nodes(), node.Name, condition, api.ConditionTrue, permReason, permMessage)
			}, pollTimeout, pollInterval).Should(Succeed())
			By("Make sure no new events are generated")
			Consistently(func() error {
				return verifyEvents(c.Events(eventNamespace), eventListOptions, num, tempReason, tempMessage)
			}, pollConsistent, pollInterval).Should(Succeed())
		})

		AfterEach(func() {
			By("Delete the node problem detector")
			c.Pods(ns).Delete(name, api.NewDeleteOptions(0))
			By("Wait for the node problem detector to disappear")
			Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed())
			By("Delete the config map")
			c.ConfigMaps(ns).Delete(configName)
			By("Clean up the events")
			Expect(c.Events(eventNamespace).DeleteCollection(api.NewDeleteOptions(0), eventListOptions)).To(Succeed())