// NewEventsSource initializes a new events source and starts a // goroutine to watch/fetch event updates. func NewEventsSource(client *kubeclient.Client) EventsSource { // Buffered channel to send/receive events from eventsChan := make(chan eventsUpdate, 1024) errorChan := make(chan error) glog.V(4).Infof("Starting event source") go watchLoop(client.Events(kubeapi.NamespaceAll), eventsChan, errorChan) glog.V(4).Infof("Finished starting event source") return &eventsSourceImpl{client, eventsChan, errorChan} }
func NewKubeEvents(client *kubeclient.Client, ec cache.EventsCache) api.Source { // Buffered channel to send/receive events from eventsChan := make(chan eventsUpdate, 1024) errorChan := make(chan error) es := &eventsSourceImpl{ Client: client, eventsChannel: eventsChan, errorChannel: errorChan, ec: ec, } go es.watchLoop(client.Events(kubeapi.NamespaceAll), eventsChan, errorChan) // TODO: Inject Namespace Store in here to get namespace IDs for events. return es }
func SetupEventSending(client *client.Client, hostname string) { glog.Infof("Sending events to api server.") record.StartRecording(client.Events(""), api.EventSource{ Component: "kubelet", Host: hostname, }) }
func verifyResult(c *client.Client, podName string, ns string) { allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()) expectNoError(err) runningPods := 0 notRunningPods := make([]api.Pod, 0) for _, pod := range allPods.Items { if pod.Status.Phase == api.PodRunning { runningPods += 1 } else { notRunningPods = append(notRunningPods, pod) } } schedEvents, err := c.Events(ns).List( labels.Everything(), fields.Set{ "involvedObject.kind": "Pod", "involvedObject.name": podName, "involvedObject.namespace": ns, "source": "scheduler", "reason": "failedScheduling", }.AsSelector()) expectNoError(err) printed := false printOnce := func(msg string) string { if !printed { printed = true return msg } else { return "" } } Expect(len(notRunningPods)).To(Equal(1), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods))) Expect(schedEvents.Items).ToNot(BeEmpty(), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods))) Expect(notRunningPods[0].Name).To(Equal(podName), printOnce(fmt.Sprintf("Pods found in the cluster: %#v", allPods))) }
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()) expectNoError(err) runningPods := 0 notRunningPods := make([]api.Pod, 0) for _, pod := range allPods.Items { if pod.Status.Phase == api.PodRunning { runningPods += 1 } else { notRunningPods = append(notRunningPods, pod) } } schedEvents, err := c.Events(ns).List( labels.Everything(), fields.Set{ "involvedObject.kind": "Pod", "involvedObject.name": "additional-pod", "involvedObject.namespace": ns, "source": "scheduler", "reason": "failedScheduling", }.AsSelector()) expectNoError(err) Expect(runningPods).To(Equal(int(totalPodCapacity))) Expect(len(notRunningPods)).To(Equal(1)) Expect(schedEvents.Items).ToNot(BeEmpty()) Expect(notRunningPods[0].Name).To(Equal("additional-pod")) }) })
Expect(len(pods.Items)).To(Equal(1)) By("retrieving the pod") podWithUid, err := podClient.Get(pod.Name) if err != nil { Failf("Failed to get pod: %v", err) } fmt.Printf("%+v\n", podWithUid) // Check for scheduler event about the pod. By("checking for scheduler event about the pod") events, err := c.Events(api.NamespaceDefault).List( labels.Everything(), fields.Set{ "involvedObject.kind": "Pod", "involvedObject.uid": string(podWithUid.UID), "involvedObject.namespace": api.NamespaceDefault, "source": "scheduler", }.AsSelector(), ) if err != nil { Failf("Error while listing events: %v", err) } Expect(len(events.Items)).ToNot(BeZero(), "scheduler events from running pod") fmt.Println("Saw scheduler event for our pod.") // Check for kubelet event about the pod. By("checking for kubelet event about the pod") events, err = c.Events(api.NamespaceDefault).List( labels.Everything(), fields.Set{
// TestKubeletSendsEvent checks that kubelets and scheduler send events about pods scheduling and running. func TestKubeletSendsEvent(c *client.Client) bool { provider := os.Getenv("KUBERNETES_PROVIDER") if len(provider) > 0 && provider != "gce" && provider != "gke" { glog.Infof("skipping TestKubeletSendsEvent on cloud provider %s", provider) return true } if provider == "" { glog.Info("KUBERNETES_PROVIDER is unset; assuming \"gce\"") } podClient := c.Pods(api.NamespaceDefault) pod := loadPodOrDie(assetPath("cmd", "e2e", "pod.json")) value := strconv.Itoa(time.Now().Nanosecond()) pod.Labels["time"] = value _, err := podClient.Create(pod) if err != nil { glog.Errorf("Failed to create pod: %v", err) return false } defer podClient.Delete(pod.Name) waitForPodRunning(c, pod.Name) pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))) if len(pods.Items) != 1 { glog.Errorf("Failed to find the correct pod") return false } podWithUid, err := podClient.Get(pod.Name) if err != nil { glog.Errorf("Failed to get pod: %v", err) return false } // Check for scheduler event about the pod. glog.Infof("%+v", podWithUid) events, err := c.Events(api.NamespaceDefault).List( labels.Everything(), labels.Set{ "involvedObject.kind": "Pod", "involvedObject.uid": podWithUid.UID, "involvedObject.namespace": api.NamespaceDefault, "source": "scheduler", }.AsSelector(), ) if err != nil { glog.Error("Error while listing events:", err) return false } if len(events.Items) == 0 { glog.Error("Didn't see any scheduler events even though pod was running.") return false } glog.Info("Saw scheduler event for our pod.") // Check for kubelet event about the pod. events, err = c.Events(api.NamespaceDefault).List( labels.Everything(), labels.Set{ "involvedObject.uid": podWithUid.UID, "involvedObject.kind": "BoundPod", "involvedObject.namespace": api.NamespaceDefault, "source": "kubelet", }.AsSelector(), ) if err != nil { glog.Error("Error while listing events:", err) return false } if len(events.Items) == 0 { glog.Error("Didn't see any kubelet events even though pod was running.") return false } glog.Info("Saw kubelet event for our pod.") return true }
name = "[Skipped] " + name } itArg := testArg It(name, func() { uuid := string(util.NewUUID()) totalPods := itArg.podsPerMinion * minionCount nameStr := strconv.Itoa(totalPods) + "-" + uuid ns = "e2e-density" + nameStr RCName = "my-hostname-density" + nameStr // Create a listener for events events := make([](*api.Event), 0) _, controller := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Events(ns).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return c.Events(ns).Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.Event{}, time.Second*10, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { events = append(events, obj.(*api.Event)) }, }, ) stop := make(chan struct{}) go controller.Run(stop)
config := RCConfig{Client: c, Image: "gcr.io/google_containers/pause:go", Name: RCName, Namespace: ns, PollInterval: itArg.interval, PodStatusFile: fileHndl, Replicas: totalPods, MaxContainerFailures: &MaxContainerFailures, } // Create a listener for events. events := make([](*api.Event), 0) _, controller := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Events(ns).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return c.Events(ns).Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.Event{}, 0, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { events = append(events, obj.(*api.Event)) }, }, ) stop := make(chan struct{}) go controller.Run(stop)
func SetupEventSending(client *client.Client, hostname string) { glog.Infof("Sending events to api server.") record.StartRecording(client.Events("")) }