コード例 #1
0
ファイル: projectstatus.go プロジェクト: iconoeugen/origin
func describeAdditionalBuildDetail(build *buildgraph.BuildConfigNode, lastSuccessfulBuild *buildgraph.BuildNode, lastUnsuccessfulBuild *buildgraph.BuildNode, activeBuilds []*buildgraph.BuildNode, pushTargetResolved bool, includeSuccess bool) []string {
	if build == nil {
		return nil
	}
	out := []string{}

	passTime := unversioned.Time{}
	if lastSuccessfulBuild != nil {
		passTime = buildTimestamp(lastSuccessfulBuild.Build)
	}
	failTime := unversioned.Time{}
	if lastUnsuccessfulBuild != nil {
		failTime = buildTimestamp(lastUnsuccessfulBuild.Build)
	}

	lastTime := failTime
	if passTime.After(failTime.Time) {
		lastTime = passTime
	}

	// display the last successful build if specifically requested or we're going to display an active build for context
	if lastSuccessfulBuild != nil && (includeSuccess || len(activeBuilds) > 0) {
		out = append(out, describeBuildPhase(lastSuccessfulBuild.Build, &passTime, build.BuildConfig.Name, pushTargetResolved))
	}
	if passTime.Before(failTime) {
		out = append(out, describeBuildPhase(lastUnsuccessfulBuild.Build, &failTime, build.BuildConfig.Name, pushTargetResolved))
	}

	if len(activeBuilds) > 0 {
		activeOut := []string{}
		for i := range activeBuilds {
			activeOut = append(activeOut, describeBuildPhase(activeBuilds[i].Build, nil, build.BuildConfig.Name, pushTargetResolved))
		}

		if buildTimestamp(activeBuilds[0].Build).Before(lastTime) {
			out = append(out, activeOut...)
		} else {
			out = append(activeOut, out...)
		}
	}
	if len(out) == 0 && lastSuccessfulBuild == nil {
		out = append(out, "not built yet")
	}
	return out
}
コード例 #2
0
ファイル: main.go プロジェクト: aclisp/kubecon
func genOnePod(pod *api.Pod) page.Pod {
	var containerBirth unversioned.Time
	restarts := 0
	totalContainers := len(pod.Spec.Containers)
	readyContainers := 0
	reason := string(pod.Status.Phase)
	conditionMap := make(map[api.PodConditionType]*api.PodCondition)
	PodAllConditions := []api.PodConditionType{api.PodReady}
	for i := range pod.Status.Conditions {
		cond := pod.Status.Conditions[i]
		conditionMap[cond.Type] = &cond
	}
	for _, validCondition := range PodAllConditions {
		if condition, ok := conditionMap[validCondition]; ok {
			if condition.Status != api.ConditionTrue {
				reason = "Not" + string(condition.Type)
			}
		}
	}
	if pod.Status.Reason != "" {
		reason = pod.Status.Reason
	}
	for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
		container := pod.Status.ContainerStatuses[i]

		restarts += container.RestartCount
		if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
			reason = container.State.Waiting.Reason
		} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
			reason = container.State.Terminated.Reason
		} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
			if container.State.Terminated.Signal != 0 {
				reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
			} else {
				reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
			}
		} else if container.Ready && container.State.Running != nil {
			readyContainers++
			if containerBirth.Before(container.State.Running.StartedAt) {
				containerBirth = container.State.Running.StartedAt
			}
			if container.Image == PauseImage {
				reason = "Stopped"
			}
		}
	}
	if pod.DeletionTimestamp != nil {
		reason = "Terminating"
	}
	podIP := ""
	portString := ""
	if pod.Spec.HostNetwork {
		podIP = ""
		for i := range pod.Spec.Containers {
			for j := range pod.Spec.Containers[i].Ports {
				port := pod.Spec.Containers[i].Ports[j]
				portString += fmt.Sprintf("%d/%s,", port.HostPort, port.Protocol)
			}
		}
		portString = strings.TrimSuffix(portString, ",")
	} else {
		podIP = pod.Status.PodIP
		matches := portMapping.FindStringSubmatch(pod.Status.Message)
		if len(matches) > 1 {
			portString = matches[1]
		}
	}
	var ports []string
	for _, p := range strings.Split(portString, ",") {
		ports = append(ports, strings.TrimSuffix(p, "/TCP"))
	}
	req, limit, _ := kube.GetSinglePodTotalRequestsAndLimits(pod)

	return page.Pod{
		Namespace:       pod.Namespace,
		Name:            pod.Name,
		Images:          populatePodImages(pod.Spec.Containers),
		TotalContainers: totalContainers,
		ReadyContainers: readyContainers,
		Status:          reason,
		Restarts:        restarts,
		Age:             kube.TranslateTimestamp(pod.CreationTimestamp),
		ContainerAge:    kube.TranslateTimestamp(containerBirth),
		ContainerBirth:  containerBirth.Time,
		HostNetwork:     pod.Spec.HostNetwork,
		HostIP:          pod.Spec.NodeName,
		PodIP:           podIP,
		Ports:           ports,
		Requests:        kube.TranslateResourseList(req),
		Limits:          kube.TranslateResourseList(limit),
	}
}
コード例 #3
0
ファイル: density.go プロジェクト: hongbin/kubernetes
				var mutex sync.Mutex
				checkPod := func(p *api.Pod) {
					mutex.Lock()
					defer mutex.Unlock()
					defer GinkgoRecover()

					if p.Status.Phase == api.PodRunning {
						if _, found := watchTimes[p.Name]; !found {
							watchTimes[p.Name] = unversioned.Now()
							createTimes[p.Name] = p.CreationTimestamp
							nodes[p.Name] = p.Spec.NodeName
							var startTime unversioned.Time
							for _, cs := range p.Status.ContainerStatuses {
								if cs.State.Running != nil {
									if startTime.Before(cs.State.Running.StartedAt) {
										startTime = cs.State.Running.StartedAt
									}
								}
							}
							if startTime != unversioned.NewTime(time.Time{}) {
								runTimes[p.Name] = startTime
							} else {
								Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
							}
						}
					}
				}

				additionalPodsPrefix = "density-latency-pod-" + string(util.NewUUID())
				_, controller := controllerframework.NewInformer(
コード例 #4
0
ファイル: latency.go プロジェクト: ruizeng/kubernetes
func runLatencyTest(nodeCount int, c *client.Client, ns string) {
	var (
		nodes              = make(map[string]string, 0)           // pod name -> node name
		createTimestamps   = make(map[string]unversioned.Time, 0) // pod name -> create time
		scheduleTimestamps = make(map[string]unversioned.Time, 0) // pod name -> schedule time
		startTimestamps    = make(map[string]unversioned.Time, 0) // pod name -> time to run
		watchTimestamps    = make(map[string]unversioned.Time, 0) // pod name -> time to read from informer

		additionalPodsPrefix = "latency-pod-" + string(util.NewUUID())
	)

	var mutex sync.Mutex
	readPodInfo := func(p *api.Pod) {
		mutex.Lock()
		defer mutex.Unlock()
		defer GinkgoRecover()

		if p.Status.Phase == api.PodRunning {
			if _, found := watchTimestamps[p.Name]; !found {
				watchTimestamps[p.Name] = unversioned.Now()
				createTimestamps[p.Name] = p.CreationTimestamp
				nodes[p.Name] = p.Spec.NodeName
				var startTimestamp unversioned.Time
				for _, cs := range p.Status.ContainerStatuses {
					if cs.State.Running != nil {
						if startTimestamp.Before(cs.State.Running.StartedAt) {
							startTimestamp = cs.State.Running.StartedAt
						}
					}
				}
				if startTimestamp != unversioned.NewTime(time.Time{}) {
					startTimestamps[p.Name] = startTimestamp
				} else {
					Failf("Pod %v is reported to be running, but none of its containers are", p.Name)
				}
			}
		}
	}

	// Create a informer to read timestamps for each pod
	stopCh := make(chan struct{})
	_, informer := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
				return c.Pods(ns).List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
				return c.Pods(ns).Watch(options)
			},
		},
		&api.Pod{},
		0,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				p, ok := obj.(*api.Pod)
				Expect(ok).To(Equal(true))
				go readPodInfo(p)
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				p, ok := newObj.(*api.Pod)
				Expect(ok).To(Equal(true))
				go readPodInfo(p)
			},
		},
	)
	go informer.Run(stopCh)

	// Create  additional pods with throughput ~5 pods/sec.
	var wg sync.WaitGroup
	wg.Add(nodeCount)
	podLabels := map[string]string{
		"name": additionalPodsPrefix,
	}
	for i := 1; i <= nodeCount; i++ {
		name := additionalPodsPrefix + "-" + strconv.Itoa(i)
		go createRunningPod(&wg, c, name, ns, "gcr.io/google_containers/pause:go", podLabels)
		time.Sleep(200 * time.Millisecond)
	}
	wg.Wait()

	Logf("Waiting for all Pods begin observed by the watch...")
	for start := time.Now(); len(watchTimestamps) < nodeCount; time.Sleep(10 * time.Second) {
		if time.Since(start) < timeout {
			Failf("Timeout reached waiting for all Pods being observed by the watch.")
		}
	}
	close(stopCh)

	// Read the schedule timestamp by checking the scheduler event for each pod
	selector := fields.Set{
		"involvedObject.kind":      "Pod",
		"involvedObject.namespace": ns,
		"source":                   "scheduler",
	}.AsSelector()
	options := api.ListOptions{FieldSelector: selector}
	schedEvents, err := c.Events(ns).List(options)
	expectNoError(err)
	for k := range createTimestamps {
		for _, event := range schedEvents.Items {
			if event.InvolvedObject.Name == k {
				scheduleTimestamps[k] = event.FirstTimestamp
				break
			}
		}
	}

	var (
		scheduleLatencies        = make([]podLatencyData, 0)
		startLatencies           = make([]podLatencyData, 0)
		watchLatencies           = make([]podLatencyData, 0)
		scheduleToWatchLatencies = make([]podLatencyData, 0)
		e2eLatencies             = make([]podLatencyData, 0)
	)

	for name, podNode := range nodes {
		createTs, ok := createTimestamps[name]
		Expect(ok).To(Equal(true))
		scheduleTs, ok := scheduleTimestamps[name]
		Expect(ok).To(Equal(true))
		runTs, ok := startTimestamps[name]
		Expect(ok).To(Equal(true))
		watchTs, ok := watchTimestamps[name]
		Expect(ok).To(Equal(true))

		var (
			scheduleLatency        = podLatencyData{name, podNode, scheduleTs.Time.Sub(createTs.Time)}
			startLatency           = podLatencyData{name, podNode, runTs.Time.Sub(scheduleTs.Time)}
			watchLatency           = podLatencyData{name, podNode, watchTs.Time.Sub(runTs.Time)}
			scheduleToWatchLatency = podLatencyData{name, podNode, watchTs.Time.Sub(scheduleTs.Time)}
			e2eLatency             = podLatencyData{name, podNode, watchTs.Time.Sub(createTs.Time)}
		)

		scheduleLatencies = append(scheduleLatencies, scheduleLatency)
		startLatencies = append(startLatencies, startLatency)
		watchLatencies = append(watchLatencies, watchLatency)
		scheduleToWatchLatencies = append(scheduleToWatchLatencies, scheduleToWatchLatency)
		e2eLatencies = append(e2eLatencies, e2eLatency)
	}

	sort.Sort(latencySlice(scheduleLatencies))
	sort.Sort(latencySlice(startLatencies))
	sort.Sort(latencySlice(watchLatencies))
	sort.Sort(latencySlice(scheduleToWatchLatencies))
	sort.Sort(latencySlice(e2eLatencies))

	printLatencies(scheduleLatencies, "worst schedule latencies")
	printLatencies(startLatencies, "worst run-after-schedule latencies")
	printLatencies(watchLatencies, "worst watch latencies")
	printLatencies(scheduleToWatchLatencies, "worst scheduled-to-end total latencies")
	printLatencies(e2eLatencies, "worst e2e total latencies")

	// Ensure all scheduleLatencies are under expected ceilings.
	// These numbers were guessed based on numerous Jenkins e2e runs.
	testMaximumLatencyValue(scheduleLatencies, 1*time.Second, "scheduleLatencies")
	testMaximumLatencyValue(startLatencies, 15*time.Second, "startLatencies")
	testMaximumLatencyValue(watchLatencies, 8*time.Second, "watchLatencies")
	testMaximumLatencyValue(scheduleToWatchLatencies, 5*time.Second, "scheduleToWatchLatencies")
	testMaximumLatencyValue(e2eLatencies, 5*time.Second, "e2eLatencies")

	// Test whether e2e pod startup time is acceptable.
	podStartupLatency := PodStartupLatency{Latency: extractLatencyMetrics(e2eLatencies)}
	expectNoError(VerifyPodStartupLatency(podStartupLatency))

	// Log suspicious latency metrics/docker errors from all nodes that had slow startup times
	logSuspiciousLatency(startLatencies, nil, nodeCount, c)
}