// RecordConfigEvent records an event for the deployment config referenced by the // deployment. func RecordConfigEvent(client kclient.EventNamespacer, deployment *kapi.ReplicationController, decoder runtime.Decoder, eventType, reason, msg string) { t := unversioned.Time{Time: time.Now()} var obj runtime.Object = deployment if config, err := deployutil.DecodeDeploymentConfig(deployment, decoder); err == nil { obj = config } else { glog.Errorf("Unable to decode deployment config from %s/%s: %v", deployment.Namespace, deployment.Name, err) } ref, err := kapi.GetReference(obj) if err != nil { glog.Errorf("Unable to get reference for %#v: %v", obj, err) return } event := &kapi.Event{ ObjectMeta: kapi.ObjectMeta{ Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), Namespace: ref.Namespace, }, InvolvedObject: *ref, Reason: reason, Message: msg, Source: kapi.EventSource{ Component: deployutil.DeployerPodNameFor(deployment), }, FirstTimestamp: t, LastTimestamp: t, Count: 1, Type: eventType, } if _, err := client.Events(ref.Namespace).Create(event); err != nil { glog.Errorf("Could not create event '%#v': %v", event, err) } }
// Convert_string_slice_To_unversioned_Time allows converting a URL query parameter value func Convert_string_slice_To_unversioned_Time(input *[]string, out *unversioned.Time, s conversion.Scope) error { str := "" if len(*input) > 0 { str = (*input)[0] } return out.UnmarshalQueryParameter(str) }
func (e *HookExecutor) emitEvent(deployment *kapi.ReplicationController, eventType, reason, msg string) { t := unversioned.Time{Time: time.Now()} var ref *kapi.ObjectReference if config, err := deployutil.DecodeDeploymentConfig(deployment, e.decoder); err != nil { glog.Errorf("Unable to decode deployment %s/%s to replication contoller: %v", deployment.Namespace, deployment.Name, err) if ref, err = kapi.GetReference(deployment); err != nil { glog.Errorf("Unable to get reference for %#v: %v", deployment, err) return } } else { if ref, err = kapi.GetReference(config); err != nil { glog.Errorf("Unable to get reference for %#v: %v", config, err) return } } event := &kapi.Event{ ObjectMeta: kapi.ObjectMeta{ Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), Namespace: ref.Namespace, }, InvolvedObject: *ref, Reason: reason, Message: msg, FirstTimestamp: t, LastTimestamp: t, Count: 1, Type: eventType, } if _, err := e.events.Create(event); err != nil { glog.Errorf("Could not send event '%#v': %v", event, err) } }
func (u *Unstructured) GetDeletionTimestamp() *unversioned.Time { var timestamp unversioned.Time timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp")) if timestamp.IsZero() { return nil } return ×tamp }
func containerRestartedRecently(status kapi.ContainerStatus, now unversioned.Time) bool { if status.RestartCount == 0 { return false } if status.LastTerminationState.Terminated != nil && now.Sub(status.LastTerminationState.Terminated.FinishedAt.Time) < RestartRecentDuration { return true } return false }
// hasIngressBeenTouched returns true if the route appears to have been touched since the last time func (a *StatusAdmitter) hasIngressBeenTouched(route *routeapi.Route, lastTouch *unversioned.Time) bool { glog.V(4).Infof("has last touch %v for %s/%s", lastTouch, route.Namespace, route.Name) if lastTouch.IsZero() { return false } old, ok := a.expected.Get(route.UID) if !ok || old.(time.Time).Equal(lastTouch.Time) { return false } return true }
func deepCopy_unversioned_Time(in unversioned.Time, out *unversioned.Time, c *conversion.Cloner) error { if newVal, err := c.DeepCopy(in.Time); err != nil { return err } else { out.Time = newVal.(time.Time) } return nil }
func describeAdditionalBuildDetail(build *buildgraph.BuildConfigNode, lastSuccessfulBuild *buildgraph.BuildNode, lastUnsuccessfulBuild *buildgraph.BuildNode, activeBuilds []*buildgraph.BuildNode, pushTargetResolved bool, includeSuccess bool) []string { if build == nil { return nil } out := []string{} passTime := unversioned.Time{} if lastSuccessfulBuild != nil { passTime = buildTimestamp(lastSuccessfulBuild.Build) } failTime := unversioned.Time{} if lastUnsuccessfulBuild != nil { failTime = buildTimestamp(lastUnsuccessfulBuild.Build) } lastTime := failTime if passTime.After(failTime.Time) { lastTime = passTime } // display the last successful build if specifically requested or we're going to display an active build for context if lastSuccessfulBuild != nil && (includeSuccess || len(activeBuilds) > 0) { out = append(out, describeBuildPhase(lastSuccessfulBuild.Build, &passTime, build.BuildConfig.Name, pushTargetResolved)) } if passTime.Before(failTime) { out = append(out, describeBuildPhase(lastUnsuccessfulBuild.Build, &failTime, build.BuildConfig.Name, pushTargetResolved)) } if len(activeBuilds) > 0 { activeOut := []string{} for i := range activeBuilds { activeOut = append(activeOut, describeBuildPhase(activeBuilds[i].Build, nil, build.BuildConfig.Name, pushTargetResolved)) } if buildTimestamp(activeBuilds[0].Build).Before(lastTime) { out = append(out, activeOut...) } else { out = append(activeOut, out...) } } if len(out) == 0 && lastSuccessfulBuild == nil { out = append(out, "not built yet") } return out }
func printImageStream(stream *imageapi.ImageStream, w io.Writer, opts kctl.PrintOptions) error { name := formatResourceName(opts.Kind, stream.Name, opts.WithKind) tags := "" const numOfTagsShown = 3 var latest unversioned.Time for _, list := range stream.Status.Tags { if len(list.Items) > 0 { if list.Items[0].Created.After(latest.Time) { latest = list.Items[0].Created } } } latestTime := "" if !latest.IsZero() { latestTime = fmt.Sprintf("%s ago", formatRelativeTime(latest.Time)) } list := imageapi.SortStatusTags(stream.Status.Tags) more := false if len(list) > numOfTagsShown { list = list[:numOfTagsShown] more = true } tags = strings.Join(list, ",") if more { tags = fmt.Sprintf("%s + %d more...", tags, len(stream.Status.Tags)-numOfTagsShown) } if opts.WithNamespace { if _, err := fmt.Fprintf(w, "%s\t", stream.Namespace); err != nil { return err } } repo := stream.Spec.DockerImageRepository if len(repo) == 0 { repo = stream.Status.DockerImageRepository } if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", name, repo, tags, latestTime); err != nil { return err } if err := appendItemLabels(stream.Labels, w, opts.ColumnLabels, opts.ShowLabels); err != nil { return err } return nil }
func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, reason, message string) *api.Event { t := unversioned.Time{recorder.clock.Now()} namespace := ref.Namespace if namespace == "" { namespace = api.NamespaceDefault } return &api.Event{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), Namespace: namespace, }, InvolvedObject: *ref, Reason: reason, Message: message, FirstTimestamp: t, LastTimestamp: t, Count: 1, } }
func describeBuildPhase(build *buildapi.Build, t *unversioned.Time, parentName string, pushTargetResolved bool) string { imageStreamFailure := "" // if we're using an image stream and that image stream is the internal registry and that registry doesn't exist if (build.Spec.Output.To != nil) && !pushTargetResolved { imageStreamFailure = " (can't push to image)" } if t == nil { ts := buildTimestamp(build) t = &ts } var time string if t.IsZero() { time = "<unknown>" } else { time = strings.ToLower(formatRelativeTime(t.Time)) } buildIdentification := fmt.Sprintf("build/%s", build.Name) prefix := parentName + "-" if strings.HasPrefix(build.Name, prefix) { suffix := build.Name[len(prefix):] if buildNumber, err := strconv.Atoi(suffix); err == nil { buildIdentification = fmt.Sprintf("build #%d", buildNumber) } } revision := describeSourceRevision(build.Spec.Revision) if len(revision) != 0 { revision = fmt.Sprintf(" - %s", revision) } switch build.Status.Phase { case buildapi.BuildPhaseComplete: return fmt.Sprintf("%s succeeded %s ago%s%s", buildIdentification, time, revision, imageStreamFailure) case buildapi.BuildPhaseError: return fmt.Sprintf("%s stopped with an error %s ago%s%s", buildIdentification, time, revision, imageStreamFailure) case buildapi.BuildPhaseFailed: return fmt.Sprintf("%s failed %s ago%s%s", buildIdentification, time, revision, imageStreamFailure) default: status := strings.ToLower(string(build.Status.Phase)) return fmt.Sprintf("%s %s for %s%s%s", buildIdentification, status, time, revision, imageStreamFailure) } }
func (f *FakeRecorder) makeEvent(ref *v1.ObjectReference, eventtype, reason, message string) *v1.Event { fmt.Println("make event") t := unversioned.Time{Time: f.clock.Now()} namespace := ref.Namespace if namespace == "" { namespace = v1.NamespaceDefault } return &v1.Event{ ObjectMeta: v1.ObjectMeta{ Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), Namespace: namespace, }, InvolvedObject: *ref, Reason: reason, Message: message, FirstTimestamp: t, LastTimestamp: t, Count: 1, Type: eventtype, } }
// hasIngressBeenTouched returns true if the route appears to have been touched since the last time func (a *StatusAdmitter) hasIngressBeenTouched(route *routeapi.Route, lastTouch *unversioned.Time) bool { glog.V(4).Infof("has last touch %v for %s/%s", lastTouch, route.Namespace, route.Name) if lastTouch.IsZero() { return false } old, ok := a.expected.Get(route.UID) if ok && old.(time.Time).Before(nowFn().Add(-a.contentionInterval)) { // throw out cache entries from before the contention interval, in case this is no longer valid // (e.g. the previous updater no longer exists due to scale down) glog.V(4).Infof("expired cached last touch of %s", old.(time.Time)) a.expected.Remove(route.UID) ok = false } if !ok || old.(time.Time).Equal(lastTouch.Time) { glog.V(4).Infof("missing or equal cached last touch") return false } glog.V(4).Infof("different cached last touch of %s", old.(time.Time)) return true }
func printImageStream(stream *imageapi.ImageStream, w io.Writer, withNamespace, wide, showAll bool, columnLabels []string) error { tags := "" const numOfTagsShown = 3 var latest unversioned.Time for _, list := range stream.Status.Tags { if len(list.Items) > 0 { if list.Items[0].Created.After(latest.Time) { latest = list.Items[0].Created } } } latestTime := "" if !latest.IsZero() { latestTime = fmt.Sprintf("%s ago", formatRelativeTime(latest.Time)) } list := imageapi.SortStatusTags(stream.Status.Tags) more := false if len(list) > numOfTagsShown { list = list[:numOfTagsShown] more = true } tags = strings.Join(list, ",") if more { tags = fmt.Sprintf("%s + %d more...", tags, len(stream.Status.Tags)-numOfTagsShown) } if withNamespace { if _, err := fmt.Fprintf(w, "%s\t", stream.Namespace); err != nil { return err } } repo := stream.Spec.DockerImageRepository if len(repo) == 0 { repo = stream.Status.DockerImageRepository } _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", stream.Name, repo, tags, latestTime) return err }
func (u *Unstructured) SetDeletionTimestamp(timestamp *unversioned.Time) { ts, _ := timestamp.MarshalQueryParameter() u.setNestedField(ts, "metadata", "deletionTimestamp") }
func TestWildcardSubDomainOwnership(t *testing.T) { p := &fakePlugin{} recorder := rejectionRecorder{rejections: make(map[string]string)} admitter := NewHostAdmitter(p, wildcardAdmitter, true, recorder) oldest := unversioned.Time{Time: time.Now()} ownerRoute := &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{ CreationTimestamp: oldest, Name: "first", Namespace: "owner", }, Spec: routeapi.RouteSpec{ Host: "owner.namespace.test", WildcardPolicy: routeapi.WildcardPolicySubdomain, }, } err := admitter.HandleRoute(watch.Added, ownerRoute) if err != nil { t.Fatalf("Owner route not admitted: %v", err) } tests := []struct { createdAt unversioned.Time name string namespace string host string policy routeapi.WildcardPolicyType reason string }{ { name: "nohost", namespace: "something", }, { name: "blockedhost", namespace: "blocked", host: "www.internal." + BlockedTestDomain, reason: "RouteNotAdmitted", }, { name: "blockedhost2", namespace: "blocked", host: "www.internal." + BlockedTestDomain, policy: routeapi.WildcardPolicyNone, reason: "RouteNotAdmitted", }, { name: "blockedhostwildcard", namespace: "blocked", host: "www.wildcard." + BlockedTestDomain, policy: routeapi.WildcardPolicySubdomain, reason: "RouteNotAdmitted", }, { createdAt: unversioned.Time{Time: oldest.Add(2 * time.Hour)}, name: "diffnamespace", namespace: "notowner", host: "www.namespace.test", reason: "HostAlreadyClaimed", }, { createdAt: unversioned.Time{Time: oldest.Add(2 * time.Hour)}, name: "diffnamespace2", namespace: "notowner", host: "www.namespace.test", policy: routeapi.WildcardPolicyNone, reason: "HostAlreadyClaimed", }, { createdAt: unversioned.Time{Time: oldest.Add(2 * time.Hour)}, name: "diffnamespacewildcard", namespace: "notowner", host: "www.namespace.test", policy: routeapi.WildcardPolicySubdomain, reason: "HostAlreadyClaimed", }, { createdAt: unversioned.Time{Time: oldest.Add(2 * time.Hour)}, name: "diffns2", namespace: "fortytwo", host: "www.namespace.test", policy: routeapi.WildcardPolicyNone, reason: "HostAlreadyClaimed", }, { createdAt: unversioned.Time{Time: oldest.Add(3 * time.Hour)}, name: "host2diffns2", namespace: "fortytwo", host: "api.namespace.test", policy: routeapi.WildcardPolicyNone, reason: "HostAlreadyClaimed", }, { createdAt: unversioned.Time{Time: oldest.Add(3 * time.Hour)}, name: "host2diffns3", namespace: "fortytwo", host: "api.namespace.test", policy: routeapi.WildcardPolicySubdomain, reason: "HostAlreadyClaimed", }, { createdAt: unversioned.Time{Time: oldest.Add(4 * time.Hour)}, name: "ownernshost", namespace: "owner", host: "api.namespace.test", }, { createdAt: unversioned.Time{Time: oldest.Add(4 * time.Hour)}, name: "ownernswildcardhost", namespace: "owner", host: "wild.namespace.test", policy: routeapi.WildcardPolicySubdomain, reason: "HostAlreadyClaimed", }, { name: "tldhost", namespace: "ns1", host: "ns1.org", }, { name: "tldhost2", namespace: "ns2", host: "ns2.org", policy: routeapi.WildcardPolicyNone, }, { name: "tldhostwildcard", namespace: "wild", host: "wild.play", policy: routeapi.WildcardPolicySubdomain, }, { name: "anothertldhostwildcard", namespace: "oscarwilde", host: "oscarwilde.com", policy: routeapi.WildcardPolicySubdomain, }, { name: "yatldhostwildcard", namespace: "yap", host: "test.me", policy: routeapi.WildcardPolicySubdomain, }, { name: "yatldhost2", namespace: "yap", host: "vinyl.play", policy: routeapi.WildcardPolicyNone, }, { name: "level2sub", namespace: "l2s", host: "test.co.us", }, { name: "level2sub2", namespace: "l2s", host: "unit.co.us", policy: routeapi.WildcardPolicyNone, }, { name: "level2sub3", namespace: "l2s", host: "qe.co.us", policy: routeapi.WildcardPolicySubdomain, }, } for _, tc := range tests { route := &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{ CreationTimestamp: tc.createdAt, Name: tc.name, Namespace: tc.namespace, }, Spec: routeapi.RouteSpec{ Host: tc.host, WildcardPolicy: tc.policy, }, } err := admitter.HandleRoute(watch.Added, route) if tc.reason != "" { if err == nil { t.Fatalf("Test case %s expected errors, got none", tc.name) } k := recorder.rejectionKey(route) if recorder.rejections[k] != tc.reason { t.Fatalf("Test case %s expected error %s, got %s", tc.name, tc.reason, recorder.rejections[k]) } } else { if err != nil { t.Fatalf("Test case %s expected no errors, got %v", tc.name, err) } } } wildcardRoute := &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{ CreationTimestamp: unversioned.Time{Time: oldest.Add(time.Hour)}, Name: "wildcard-owner", Namespace: "owner", }, Spec: routeapi.RouteSpec{ Host: "wildcard.namespace.test", WildcardPolicy: routeapi.WildcardPolicySubdomain, }, } err = admitter.HandleRoute(watch.Added, wildcardRoute) if err != nil { k := recorder.rejectionKey(wildcardRoute) if recorder.rejections[k] != "HostAlreadyClaimed" { t.Fatalf("Wildcard route expected host already claimed error, got %v - error=%v", recorder.rejections[k], err) } } else { t.Fatalf("Newer wildcard route expected errors, got none") } // bounce all the routes from the namespace "owner" and claim // ownership of the subdomain for the namespace "bouncer". bouncer := &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{ CreationTimestamp: unversioned.Time{Time: oldest.Add(-1 * time.Hour)}, Name: "hosted", Namespace: "bouncer", }, Spec: routeapi.RouteSpec{ Host: "api.namespace.test", }, } err = admitter.HandleRoute(watch.Added, bouncer) if err != nil { t.Fatalf("bouncer route expected no errors, got %v", err) } // The bouncer route should kick out the owner and wildcard routes. bouncedRoutes := []*routeapi.Route{ownerRoute, wildcardRoute} for _, route := range bouncedRoutes { k := recorder.rejectionKey(route) if recorder.rejections[k] != "HostAlreadyClaimed" { t.Fatalf("bounced route %s expected a subdomain already claimed error, got %s", k, recorder.rejections[k]) } } }
scheduleTimes := make(map[string]unversioned.Time, 0) runTimes := make(map[string]unversioned.Time, 0) watchTimes := make(map[string]unversioned.Time, 0) var mutex sync.Mutex checkPod := func(p *api.Pod) { mutex.Lock() defer mutex.Unlock() defer GinkgoRecover() if p.Status.Phase == api.PodRunning { if _, found := watchTimes[p.Name]; !found { watchTimes[p.Name] = unversioned.Now() createTimes[p.Name] = p.CreationTimestamp nodes[p.Name] = p.Spec.NodeName var startTime unversioned.Time for _, cs := range p.Status.ContainerStatuses { if cs.State.Running != nil { if startTime.Before(cs.State.Running.StartedAt) { startTime = cs.State.Running.StartedAt } } } if startTime != unversioned.NewTime(time.Time{}) { runTimes[p.Name] = startTime } else { Failf("Pod %v is reported to be running, but none of its containers is", p.Name) } } } }
func TestValidRouteAdmissionFuzzing(t *testing.T) { p := &fakePlugin{} admitAll := func(route *routeapi.Route) error { return nil } recorder := rejectionRecorder{rejections: make(map[string]string)} admitter := NewHostAdmitter(p, RouteAdmissionFunc(admitAll), true, recorder) oldest := unversioned.Time{Time: time.Now()} makeTime := func(d time.Duration) unversioned.Time { return unversioned.Time{Time: oldest.Add(d)} } routes := []*routeapi.Route{ makeRoute("ns1", "r1", "net", "", false, makeTime(0*time.Second)), makeRoute("ns2", "r2", "com", "", false, makeTime(1*time.Second)), makeRoute("ns3", "r3", "domain1.com", "", false, makeTime(2*time.Second)), makeRoute("ns4", "r4", "domain2.com", "", false, makeTime(3*time.Second)), makeRoute("ns5", "r5", "foo.domain1.com", "", false, makeTime(4*time.Second)), makeRoute("ns6", "r6", "bar.domain1.com", "", false, makeTime(5*time.Second)), makeRoute("ns7", "r7", "sub.foo.domain1.com", "", true, makeTime(6*time.Second)), makeRoute("ns8", "r8", "sub.bar.domain1.com", "", true, makeTime(7*time.Second)), makeRoute("ns8", "r9", "sub.bar.domain1.com", "/p1", true, makeTime(8*time.Second)), makeRoute("ns8", "r10", "sub.bar.domain1.com", "/p2", true, makeTime(9*time.Second)), makeRoute("ns8", "r11", "sub.bar.domain1.com", "/p1/p2/p3", true, makeTime(10*time.Second)), makeRoute("ns9", "r12", "sub.bar.domain2.com", "", false, makeTime(11*time.Second)), makeRoute("ns9", "r13", "sub.bar.domain2.com", "/p1", false, makeTime(12*time.Second)), makeRoute("ns9", "r14", "sub.bar.domain2.com", "/p2", false, makeTime(13*time.Second)), } rand.Seed(1) existing := sets.NewInt() errors := sets.NewString() for i := 0; i < 1000; i++ { add := false switch { case len(existing) == len(routes): add = false case len(existing) == 0: add = true default: add = (rand.Intn(4) > 0) } index := 0 if add { index = rand.Intn(len(routes)) if existing.Has(index) { // t.Logf("%d: updated route %d", i, index) if err := admitter.HandleRoute(watch.Modified, routes[index]); err != nil { errors.Insert(fmt.Sprintf("error updating route %s/%s: %v", routes[index].Namespace, routes[index].Name, err.Error())) } } else { // t.Logf("%d: added route %d", i, index) if err := admitter.HandleRoute(watch.Added, routes[index]); err != nil { errors.Insert(fmt.Sprintf("error adding route %s/%s: %v", routes[index].Namespace, routes[index].Name, err.Error())) } } existing.Insert(index) } else { index = existing.List()[rand.Intn(len(existing))] // t.Logf("%d: deleted route %d", i, index) if err := admitter.HandleRoute(watch.Deleted, routes[index]); err != nil { errors.Insert(fmt.Sprintf("error deleting route %s/%s: %v", routes[index].Namespace, routes[index].Name, err.Error())) } existing.Delete(index) } } if len(errors) > 0 { t.Errorf("Unexpected errors:\n%s", strings.Join(errors.List(), "\n")) } if len(recorder.rejections) > 0 { t.Errorf("Unexpected rejections: %#v", recorder.rejections) } }
// afterOrZero checks if time t1 is after time t2; if one of them // is zero, the zero time is seen as after non-zero time. func afterOrZero(t1, t2 unversioned.Time) bool { if t1.Time.IsZero() || t2.Time.IsZero() { return t1.Time.IsZero() } return t1.After(t2.Time) }
func TestInvalidRouteAdmissionFuzzing(t *testing.T) { p := &fakePlugin{} admitAll := func(route *routeapi.Route) error { return nil } recorder := rejectionRecorder{rejections: make(map[string]string)} admitter := NewHostAdmitter(p, RouteAdmissionFunc(admitAll), true, recorder) oldest := unversioned.Time{Time: time.Now()} makeTime := func(d time.Duration) unversioned.Time { return unversioned.Time{Time: oldest.Add(d)} } routes := []struct { Route *routeapi.Route ErrIfInt sets.Int ErrIf sets.String }{ // Wildcard and explicit allowed in same namespace {Route: makeRoute("ns1", "r1", "net", "", false, makeTime(0*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns1", "r2", "net", "", true, makeTime(1*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns1", "r3", "www.same.net", "", false, makeTime(2*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns1", "r4", "www.same.net", "", true, makeTime(3*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns1", "r5", "foo.same.net", "", true, makeTime(4*time.Second)), ErrIf: sets.NewString(`ns1/r4`)}, {Route: makeRoute("ns2", "r1", "com", "", false, makeTime(10*time.Second)), ErrIf: sets.NewString(`ns1/r2`)}, {Route: makeRoute("ns2", "r2", "com", "", true, makeTime(11*time.Second)), ErrIf: sets.NewString(`ns1/r1`, `ns1/r2`)}, {Route: makeRoute("ns2", "r3", "www.same.com", "", false, makeTime(12*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns2", "r4", "www.same.com", "", true, makeTime(13*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns2", "r5", "www.same.com", "/abc", true, makeTime(13*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns2", "r6", "foo.same.com", "", true, makeTime(14*time.Second)), ErrIf: sets.NewString(`ns2/r4`)}, {Route: makeRoute("ns2", "r7", "foo.same.com", "/abc", true, makeTime(14*time.Second)), ErrIf: sets.NewString(`ns2/r5`)}, // Fails because of other namespaces {Route: makeRoute("ns3", "r1", "net", "", false, makeTime(20*time.Second)), ErrIf: sets.NewString(`ns1/r1`, `ns1/r2`, `ns2/r2`)}, {Route: makeRoute("ns3", "r2", "net", "", true, makeTime(21*time.Second)), ErrIf: sets.NewString(`ns1/r1`, `ns1/r2`, `ns2/r1`, `ns2/r2`)}, {Route: makeRoute("ns3", "r3", "net", "/p1", true, makeTime(22*time.Second)), ErrIf: sets.NewString(`ns1/r1`, `ns1/r2`, `ns2/r1`, `ns2/r2`)}, {Route: makeRoute("ns3", "r4", "com", "", false, makeTime(23*time.Second)), ErrIf: sets.NewString(`ns1/r2`, `ns2/r1`, `ns2/r2`)}, {Route: makeRoute("ns3", "r5", "com", "", true, makeTime(24*time.Second)), ErrIf: sets.NewString(`ns1/r1`, `ns1/r2`, `ns2/r1`, `ns2/r2`, `ns3/r2`)}, {Route: makeRoute("ns3", "r6", "com", "/p1/p2", true, makeTime(25*time.Second)), ErrIf: sets.NewString(`ns1/r1`, `ns1/r2`, `ns2/r1`, `ns2/r2`)}, // Interleaved ages between namespaces {Route: makeRoute("ns4", "r1", "domain1.com", "", false, makeTime(30*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns5", "r1", "domain1.com", "", false, makeTime(31*time.Second)), ErrIf: sets.NewString(`ns4/r1`)}, {Route: makeRoute("ns4", "r2", "domain1.com", "", false, makeTime(32*time.Second)), ErrIf: sets.NewString(`ns4/r1`, `ns5/r1`)}, {Route: makeRoute("ns5", "r2", "domain1.com", "", false, makeTime(33*time.Second)), ErrIf: sets.NewString(`ns4/r1`, `ns5/r1`, `ns4/r2`)}, // namespace with older wildcard wins over specific and wildcard routes in other namespaces {Route: makeRoute("ns6", "r1", "foo.domain1.com", "", true, makeTime(40*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns7", "r1", "bar.domain1.com", "", true, makeTime(50*time.Second)), ErrIf: sets.NewString(`ns6/r1`)}, {Route: makeRoute("ns7", "r2", "bar.domain1.com", "", false, makeTime(51*time.Second)), ErrIf: sets.NewString(`ns6/r1`)}, {Route: makeRoute("ns7", "r3", "bar.domain1.com", "/foo", false, makeTime(51*time.Second)), ErrIf: sets.NewString(`ns6/r1`)}, {Route: makeRoute("ns8", "r1", "baz.domain1.com", "", true, makeTime(60*time.Second)), ErrIf: sets.NewString(`ns6/r1`, `ns7/r1`, `ns7/r2`, `ns7/r3`)}, {Route: makeRoute("ns8", "r2", "baz.domain1.com", "", false, makeTime(61*time.Second)), ErrIf: sets.NewString(`ns6/r1`, `ns7/r1`)}, // namespace with older explicit host and wildcard wins over specific and wildcard routes in other namespaces {Route: makeRoute("ns9", "r1", "foo.domain2.com", "", false, makeTime(40*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns10", "r1", "bar.domain2.com", "", true, makeTime(50*time.Second)), ErrIf: sets.NewString(`ns9/r1`)}, {Route: makeRoute("ns10", "r2", "bar.domain2.com", "", false, makeTime(51*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns10", "r3", "foo.domain2.com", "", false, makeTime(52*time.Second)), ErrIf: sets.NewString(`ns9/r1`)}, {Route: makeRoute("ns10", "r4", "foo.domain2.com", "/p1", false, makeTime(53*time.Second)), ErrIf: sets.NewString(`ns9/r1`)}, {Route: makeRoute("ns10", "r5", "foo.domain2.com", "/p2", false, makeTime(54*time.Second)), ErrIf: sets.NewString(`ns9/r1`)}, {Route: makeRoute("ns10", "r6", "foo.domain2.com", "/p1/p2/other", false, makeTime(55*time.Second)), ErrIf: sets.NewString(`ns9/r1`)}, {Route: makeRoute("ns10", "r7", "foo.domain2.com", "/someother", false, makeTime(56*time.Second)), ErrIf: sets.NewString(`ns9/r1`)}, {Route: makeRoute("ns11", "r1", "baz.domain2.com", "", true, makeTime(60*time.Second)), ErrIf: sets.NewString(`ns9/r1`, `ns10/r1`, `ns10/r2`, `ns10/r3`, `ns10/r4`, `ns10/r5`, `ns10/r6`, `ns10/r7`)}, {Route: makeRoute("ns11", "r2", "baz.domain2.com", "", false, makeTime(61*time.Second)), ErrIf: sets.NewString(`ns10/r1`)}, // namespace with specific and wildcard route with paths wins over specific and wildcard routes in other namespaces {Route: makeRoute("ns12", "r1", "foo.domain3.com", "", false, makeTime(70*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns12", "r2", "bar.domain3.com", "/abc", false, makeTime(71*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns13", "r1", "foo.domain3.com", "", true, makeTime(80*time.Second)), ErrIf: sets.NewString(`ns12/r1`, `ns12/r2`)}, {Route: makeRoute("ns13", "r2", "bar.domain3.com", "", false, makeTime(81*time.Second)), ErrIf: sets.NewString(`ns12/r2`)}, {Route: makeRoute("ns13", "r3", "bar.domain3.com", "/abc", false, makeTime(82*time.Second)), ErrIf: sets.NewString(`ns12/r2`)}, {Route: makeRoute("ns13", "r4", "bar.domain3.com", "/def", false, makeTime(83*time.Second)), ErrIf: sets.NewString(`ns12/r2`)}, {Route: makeRoute("ns13", "r5", "wild.domain3.com", "/aces", true, makeTime(84*time.Second)), ErrIf: sets.NewString(`ns12/r1`, `ns12/r2`)}, {Route: makeRoute("ns13", "r6", "wild.domain3.com", "", true, makeTime(85*time.Second)), ErrIf: sets.NewString(`ns12/r1`, `ns12/r2`, `ns13/r1`)}, {Route: makeRoute("ns14", "r1", "foo.domain3.com", "", false, makeTime(90*time.Second)), ErrIf: sets.NewString(`ns12/r1`, `ns13/r1`, `ns13/r5`, `ns13/r6`)}, {Route: makeRoute("ns14", "r2", "bar.domain3.com", "", false, makeTime(91*time.Second)), ErrIf: sets.NewString(`ns12/r2`, `ns13/r1`, `ns13/r2`, `ns13/r3`, `ns13/r4`, `ns13/r5`, `ns13/r6`)}, // namespace with oldest wildcard and non-wildcard routes with same paths wins {Route: makeRoute("ns15", "r1", "foo.domain4.com", "", false, makeTime(100*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns15", "r2", "foo.domain4.com", "/abc", false, makeTime(101*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns15", "r3", "foo.domain4.com", "", false, makeTime(102*time.Second)), ErrIf: sets.NewString(`ns15/r1`)}, {Route: makeRoute("ns15", "r4", "foo.domain4.com", "/abc", false, makeTime(103*time.Second)), ErrIf: sets.NewString(`ns15/r2`)}, {Route: makeRoute("ns15", "r5", "www.domain4.com", "", true, makeTime(104*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns15", "r6", "www.domain4.com", "/abc", true, makeTime(105*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns15", "r7", "www.domain4.com", "", true, makeTime(106*time.Second)), ErrIf: sets.NewString(`ns15/r5`)}, {Route: makeRoute("ns15", "r8", "www.domain4.com", "/abc", true, makeTime(107*time.Second)), ErrIf: sets.NewString(`ns15/r6`)}, {Route: makeRoute("ns15", "r9", "www.domain4.com", "/def", true, makeTime(108*time.Second)), ErrIf: sets.NewString()}, {Route: makeRoute("ns15", "r10", "www.domain4.com", "/def", true, makeTime(109*time.Second)), ErrIf: sets.NewString(`ns15/r9`)}, } nameToIndex := map[string]int{} for i, tc := range routes { name := tc.Route.Namespace + "/" + tc.Route.Name if _, exists := nameToIndex[name]; exists { t.Fatalf("%d has a duplicate route name %s", i, name) } nameToIndex[name] = i } for i, tc := range routes { errIfInt := sets.NewInt() for name := range tc.ErrIf { if index, ok := nameToIndex[name]; ok { errIfInt.Insert(index) } else { t.Fatalf("%d references an unknown route name: %s", i, name) } } tc.ErrIfInt = errIfInt routes[i] = tc } rand.Seed(1) existing := sets.NewInt() errors := sets.NewString() for i := 0; i < 10000; i++ { add := false switch { case len(existing) == len(routes): add = false case len(existing) == 0: add = true default: add = (rand.Intn(4) > 0) } index := 0 eventType := watch.Deleted if add { index = rand.Intn(len(routes)) if existing.Has(index) { eventType = watch.Modified } else { eventType = watch.Added } } else { index = existing.List()[rand.Intn(len(existing))] eventType = watch.Deleted } route := routes[index].Route err := admitter.HandleRoute(eventType, route) if eventType != watch.Deleted && existing.HasAny(routes[index].ErrIfInt.List()...) { if err == nil { errors.Insert(fmt.Sprintf("no error %s route %s/%s (existing=%v, errif=%v)", eventType, route.Namespace, route.Name, existing.List(), routes[index].ErrIfInt.List())) } } else { if err != nil { errors.Insert(fmt.Sprintf("error %s route %s/%s: %v (existing=%v, errif=%v)", eventType, route.Namespace, route.Name, err.Error(), existing.List(), routes[index].ErrIfInt.List())) } } existingNames := sets.NewString() for _, routes := range admitter.claimedHosts { for _, route := range routes { existingNames.Insert(route.Namespace + "/" + route.Name) } } for _, routes := range admitter.claimedWildcards { for _, route := range routes { existingNames.Insert(route.Namespace + "/" + route.Name) } } for _, routes := range admitter.blockedWildcards { for _, route := range routes { if !existingNames.Has(route.Namespace + "/" + route.Name) { t.Fatalf("blockedWildcards has %s/%s, not in claimedHosts or claimedWildcards", route.Namespace, route.Name) } } } existing = sets.NewInt() for name := range existingNames { index, ok := nameToIndex[name] if !ok { t.Fatalf("unknown route %s", name) } existing.Insert(index) } } if len(errors) > 0 { t.Errorf("Unexpected errors:\n%s", strings.Join(errors.List(), "\n")) } }
// TestHandleRoute test route watch events func TestHandleRoute(t *testing.T) { rejections := &fakeRejections{} router := newTestRouter(make(map[string]ServiceUnit)) templatePlugin := newDefaultTemplatePlugin(router, true) // TODO: move tests that rely on unique hosts to pkg/router/controller and remove them from // here plugin := controller.NewUniqueHost(templatePlugin, controller.HostForRoute, rejections) original := unversioned.Time{Time: time.Now()} //add route := &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{ CreationTimestamp: original, Namespace: "foo", Name: "test", }, Spec: routeapi.RouteSpec{ Host: "www.example.com", To: kapi.ObjectReference{ Name: "TestService", }, }, } serviceUnitKey := fmt.Sprintf("%s/%s", route.Namespace, route.Spec.To.Name) plugin.HandleRoute(watch.Added, route) if !router.Committed { t.Errorf("Expected router to be committed after HandleRoute call") } actualSU, ok := router.FindServiceUnit(serviceUnitKey) if !ok { t.Errorf("TestHandleRoute was unable to find the service unit %s after HandleRoute was called", route.Spec.To.Name) } else { serviceAliasCfg, ok := actualSU.ServiceAliasConfigs[router.routeKey(route)] if !ok { t.Errorf("TestHandleRoute expected route key %s", router.routeKey(route)) } else { if serviceAliasCfg.Host != route.Spec.Host || serviceAliasCfg.Path != route.Spec.Path { t.Errorf("Expected route did not match service alias config %v : %v", route, serviceAliasCfg) } } } if len(rejections.rejections) > 0 { t.Fatalf("did not expect a recorded rejection: %#v", rejections) } // attempt to add a second route with a newer time, verify it is ignored duplicateRoute := &routeapi.Route{ ObjectMeta: kapi.ObjectMeta{ CreationTimestamp: unversioned.Time{Time: original.Add(time.Hour)}, Namespace: "foo", Name: "dupe", }, Spec: routeapi.RouteSpec{ Host: "www.example.com", To: kapi.ObjectReference{ Name: "TestService2", }, }, } if err := plugin.HandleRoute(watch.Added, duplicateRoute); err == nil { t.Fatal("unexpected non-error") } if _, ok := router.FindServiceUnit("foo/TestService2"); ok { t.Fatalf("unexpected second unit: %#v", router) } if r, ok := plugin.RoutesForHost("www.example.com"); !ok || r[0].Name != "test" { t.Fatalf("unexpected claimed routes: %#v", r) } if len(rejections.rejections) != 1 || rejections.rejections[0].route.Name != "dupe" || rejections.rejections[0].reason != "HostAlreadyClaimed" || rejections.rejections[0].message != "route test already exposes www.example.com and is older" { t.Fatalf("did not record rejection: %#v", rejections) } rejections.rejections = nil // attempt to remove the second route that is not being used, verify it is ignored if err := plugin.HandleRoute(watch.Deleted, duplicateRoute); err == nil { t.Fatal("unexpected non-error") } if _, ok := router.FindServiceUnit("foo/TestService2"); ok { t.Fatalf("unexpected second unit: %#v", router) } if _, ok := router.FindServiceUnit("foo/TestService"); !ok { t.Fatalf("unexpected first unit: %#v", router) } if r, ok := plugin.RoutesForHost("www.example.com"); !ok || r[0].Name != "test" { t.Fatalf("unexpected claimed routes: %#v", r) } if len(rejections.rejections) != 1 || rejections.rejections[0].route.Name != "dupe" || rejections.rejections[0].reason != "HostAlreadyClaimed" || rejections.rejections[0].message != "route test already exposes www.example.com and is older" { t.Fatalf("did not record rejection: %#v", rejections) } rejections.rejections = nil // add a second route with an older time, verify it takes effect duplicateRoute.CreationTimestamp = unversioned.Time{Time: original.Add(-time.Hour)} if err := plugin.HandleRoute(watch.Added, duplicateRoute); err != nil { t.Fatal("unexpected error") } otherSU, ok := router.FindServiceUnit("foo/TestService2") if !ok { t.Fatalf("missing second unit: %#v", router) } if len(actualSU.ServiceAliasConfigs) != 0 || len(otherSU.ServiceAliasConfigs) != 1 { t.Errorf("incorrect router state: %#v", router) } if _, ok := actualSU.ServiceAliasConfigs[router.routeKey(route)]; ok { t.Errorf("unexpected service alias config %s", router.routeKey(route)) } if len(rejections.rejections) != 1 || rejections.rejections[0].route.Name != "test" || rejections.rejections[0].reason != "HostAlreadyClaimed" || rejections.rejections[0].message != "replaced by older route dupe" { t.Fatalf("did not record rejection: %#v", rejections) } rejections.rejections = nil //mod route.Spec.Host = "www.example2.com" if err := plugin.HandleRoute(watch.Modified, route); err != nil { t.Fatal("unexpected error") } if !router.Committed { t.Errorf("Expected router to be committed after HandleRoute call") } actualSU, ok = router.FindServiceUnit(serviceUnitKey) if !ok { t.Errorf("TestHandleRoute was unable to find the service unit %s after HandleRoute was called", route.Spec.To.Name) } else { serviceAliasCfg, ok := actualSU.ServiceAliasConfigs[router.routeKey(route)] if !ok { t.Errorf("TestHandleRoute expected route key %s", router.routeKey(route)) } else { if serviceAliasCfg.Host != route.Spec.Host || serviceAliasCfg.Path != route.Spec.Path { t.Errorf("Expected route did not match service alias config %v : %v", route, serviceAliasCfg) } } } if plugin.HostLen() != 1 { t.Fatalf("did not clear claimed route: %#v", plugin) } if len(rejections.rejections) != 0 { t.Fatalf("unexpected rejection: %#v", rejections) } //delete if err := plugin.HandleRoute(watch.Deleted, route); err != nil { t.Fatal("unexpected error") } if !router.Committed { t.Errorf("Expected router to be committed after HandleRoute call") } actualSU, ok = router.FindServiceUnit(serviceUnitKey) if !ok { t.Errorf("TestHandleRoute was unable to find the service unit %s after HandleRoute was called", route.Spec.To.Name) } else { _, ok := actualSU.ServiceAliasConfigs[router.routeKey(route)] if ok { t.Errorf("TestHandleRoute did not expect route key %s", router.routeKey(route)) } } if plugin.HostLen() != 0 { t.Errorf("did not clear claimed route: %#v", plugin) } if len(rejections.rejections) != 0 { t.Fatalf("unexpected rejection: %#v", rejections) } }
func genOnePod(pod *api.Pod) page.Pod { var containerBirth unversioned.Time restarts := 0 totalContainers := len(pod.Spec.Containers) readyContainers := 0 reason := string(pod.Status.Phase) conditionMap := make(map[api.PodConditionType]*api.PodCondition) PodAllConditions := []api.PodConditionType{api.PodReady} for i := range pod.Status.Conditions { cond := pod.Status.Conditions[i] conditionMap[cond.Type] = &cond } for _, validCondition := range PodAllConditions { if condition, ok := conditionMap[validCondition]; ok { if condition.Status != api.ConditionTrue { reason = "Not" + string(condition.Type) } } } if pod.Status.Reason != "" { reason = pod.Status.Reason } for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { container := pod.Status.ContainerStatuses[i] restarts += container.RestartCount if container.State.Waiting != nil && container.State.Waiting.Reason != "" { reason = container.State.Waiting.Reason } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { reason = container.State.Terminated.Reason } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { if container.State.Terminated.Signal != 0 { reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) } else { reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) } } else if container.Ready && container.State.Running != nil { readyContainers++ if containerBirth.Before(container.State.Running.StartedAt) { containerBirth = container.State.Running.StartedAt } if container.Image == PauseImage { reason = "Stopped" } } } if pod.DeletionTimestamp != nil { reason = "Terminating" } podIP := "" portString := "" if pod.Spec.HostNetwork { podIP = "" for i := range pod.Spec.Containers { for j := range pod.Spec.Containers[i].Ports { port := pod.Spec.Containers[i].Ports[j] portString += fmt.Sprintf("%d/%s,", port.HostPort, port.Protocol) } } portString = strings.TrimSuffix(portString, ",") } else { podIP = pod.Status.PodIP matches := portMapping.FindStringSubmatch(pod.Status.Message) if len(matches) > 1 { portString = matches[1] } } var ports []string for _, p := range strings.Split(portString, ",") { ports = append(ports, strings.TrimSuffix(p, "/TCP")) } req, limit, _ := kube.GetSinglePodTotalRequestsAndLimits(pod) return page.Pod{ Namespace: pod.Namespace, Name: pod.Name, Images: populatePodImages(pod.Spec.Containers), TotalContainers: totalContainers, ReadyContainers: readyContainers, Status: reason, Restarts: restarts, Age: kube.TranslateTimestamp(pod.CreationTimestamp), ContainerAge: kube.TranslateTimestamp(containerBirth), ContainerBirth: containerBirth.Time, HostNetwork: pod.Spec.HostNetwork, HostIP: pod.Spec.NodeName, PodIP: podIP, Ports: ports, Requests: kube.TranslateResourseList(req), Limits: kube.TranslateResourseList(limit), } }
// translateTimestamp returns the elapsed time since timestamp in // human-readable approximation. func translateTimestamp(timestamp unversioned.Time) string { if timestamp.IsZero() { return "<unknown>" } return shortHumanDuration(time.Now().Sub(timestamp.Time)) }
func runLatencyTest(nodeCount int, c *client.Client, ns string) { var ( nodes = make(map[string]string, 0) // pod name -> node name createTimestamps = make(map[string]unversioned.Time, 0) // pod name -> create time scheduleTimestamps = make(map[string]unversioned.Time, 0) // pod name -> schedule time startTimestamps = make(map[string]unversioned.Time, 0) // pod name -> time to run watchTimestamps = make(map[string]unversioned.Time, 0) // pod name -> time to read from informer additionalPodsPrefix = "latency-pod-" + string(util.NewUUID()) ) var mutex sync.Mutex readPodInfo := func(p *api.Pod) { mutex.Lock() defer mutex.Unlock() defer GinkgoRecover() if p.Status.Phase == api.PodRunning { if _, found := watchTimestamps[p.Name]; !found { watchTimestamps[p.Name] = unversioned.Now() createTimestamps[p.Name] = p.CreationTimestamp nodes[p.Name] = p.Spec.NodeName var startTimestamp unversioned.Time for _, cs := range p.Status.ContainerStatuses { if cs.State.Running != nil { if startTimestamp.Before(cs.State.Running.StartedAt) { startTimestamp = cs.State.Running.StartedAt } } } if startTimestamp != unversioned.NewTime(time.Time{}) { startTimestamps[p.Name] = startTimestamp } else { Failf("Pod %v is reported to be running, but none of its containers are", p.Name) } } } } // Create a informer to read timestamps for each pod stopCh := make(chan struct{}) _, informer := framework.NewInformer( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix}) return c.Pods(ns).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix}) return c.Pods(ns).Watch(options) }, }, &api.Pod{}, 0, framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { p, ok := obj.(*api.Pod) Expect(ok).To(Equal(true)) go readPodInfo(p) }, UpdateFunc: func(oldObj, newObj interface{}) { p, ok := newObj.(*api.Pod) Expect(ok).To(Equal(true)) go readPodInfo(p) }, }, ) go informer.Run(stopCh) // Create additional pods with throughput ~5 pods/sec. var wg sync.WaitGroup wg.Add(nodeCount) podLabels := map[string]string{ "name": additionalPodsPrefix, } for i := 1; i <= nodeCount; i++ { name := additionalPodsPrefix + "-" + strconv.Itoa(i) go createRunningPod(&wg, c, name, ns, "gcr.io/google_containers/pause:go", podLabels) time.Sleep(200 * time.Millisecond) } wg.Wait() Logf("Waiting for all Pods begin observed by the watch...") for start := time.Now(); len(watchTimestamps) < nodeCount; time.Sleep(10 * time.Second) { if time.Since(start) < timeout { Failf("Timeout reached waiting for all Pods being observed by the watch.") } } close(stopCh) // Read the schedule timestamp by checking the scheduler event for each pod selector := fields.Set{ "involvedObject.kind": "Pod", "involvedObject.namespace": ns, "source": "scheduler", }.AsSelector() options := api.ListOptions{FieldSelector: selector} schedEvents, err := c.Events(ns).List(options) expectNoError(err) for k := range createTimestamps { for _, event := range schedEvents.Items { if event.InvolvedObject.Name == k { scheduleTimestamps[k] = event.FirstTimestamp break } } } var ( scheduleLatencies = make([]podLatencyData, 0) startLatencies = make([]podLatencyData, 0) watchLatencies = make([]podLatencyData, 0) scheduleToWatchLatencies = make([]podLatencyData, 0) e2eLatencies = make([]podLatencyData, 0) ) for name, podNode := range nodes { createTs, ok := createTimestamps[name] Expect(ok).To(Equal(true)) scheduleTs, ok := scheduleTimestamps[name] Expect(ok).To(Equal(true)) runTs, ok := startTimestamps[name] Expect(ok).To(Equal(true)) watchTs, ok := watchTimestamps[name] Expect(ok).To(Equal(true)) var ( scheduleLatency = podLatencyData{name, podNode, scheduleTs.Time.Sub(createTs.Time)} startLatency = podLatencyData{name, podNode, runTs.Time.Sub(scheduleTs.Time)} watchLatency = podLatencyData{name, podNode, watchTs.Time.Sub(runTs.Time)} scheduleToWatchLatency = podLatencyData{name, podNode, watchTs.Time.Sub(scheduleTs.Time)} e2eLatency = podLatencyData{name, podNode, watchTs.Time.Sub(createTs.Time)} ) scheduleLatencies = append(scheduleLatencies, scheduleLatency) startLatencies = append(startLatencies, startLatency) watchLatencies = append(watchLatencies, watchLatency) scheduleToWatchLatencies = append(scheduleToWatchLatencies, scheduleToWatchLatency) e2eLatencies = append(e2eLatencies, e2eLatency) } sort.Sort(latencySlice(scheduleLatencies)) sort.Sort(latencySlice(startLatencies)) sort.Sort(latencySlice(watchLatencies)) sort.Sort(latencySlice(scheduleToWatchLatencies)) sort.Sort(latencySlice(e2eLatencies)) printLatencies(scheduleLatencies, "worst schedule latencies") printLatencies(startLatencies, "worst run-after-schedule latencies") printLatencies(watchLatencies, "worst watch latencies") printLatencies(scheduleToWatchLatencies, "worst scheduled-to-end total latencies") printLatencies(e2eLatencies, "worst e2e total latencies") // Ensure all scheduleLatencies are under expected ceilings. // These numbers were guessed based on numerous Jenkins e2e runs. testMaximumLatencyValue(scheduleLatencies, 1*time.Second, "scheduleLatencies") testMaximumLatencyValue(startLatencies, 15*time.Second, "startLatencies") testMaximumLatencyValue(watchLatencies, 8*time.Second, "watchLatencies") testMaximumLatencyValue(scheduleToWatchLatencies, 5*time.Second, "scheduleToWatchLatencies") testMaximumLatencyValue(e2eLatencies, 5*time.Second, "e2eLatencies") // Test whether e2e pod startup time is acceptable. podStartupLatency := PodStartupLatency{Latency: extractLatencyMetrics(e2eLatencies)} expectNoError(VerifyPodStartupLatency(podStartupLatency)) // Log suspicious latency metrics/docker errors from all nodes that had slow startup times logSuspiciousLatency(startLatencies, nil, nodeCount, c) }
// translateTimestamp returns the elapsed time since timestamp in // human-readable approximation. func TranslateTimestamp(timestamp api_uv.Time) string { if timestamp.IsZero() { return "<unknown>" } return shortHumanDuration(time.Now().Sub(timestamp.Time)) }
func (u *Unstructured) GetCreationTimestamp() unversioned.Time { var timestamp unversioned.Time timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) return timestamp }