func tagReferenceToTagEvent(stream *api.ImageStream, tagRef api.TagReference, tagOrID string) (*api.TagEvent, error) { switch tagRef.From.Kind { case "DockerImage": return &api.TagEvent{ Created: util.Now(), DockerImageReference: tagRef.From.Name, }, nil case "ImageStreamImage": ref, err := api.DockerImageReferenceForStream(stream) if err != nil { return nil, err } resolvedIDs := api.ResolveImageID(stream, tagOrID) switch len(resolvedIDs) { case 1: ref.ID = resolvedIDs.List()[0] return &api.TagEvent{ Created: util.Now(), DockerImageReference: ref.String(), Image: ref.ID, }, nil case 0: return nil, fmt.Errorf("no images match the prefix %q", tagOrID) default: return nil, fmt.Errorf("multiple images match the prefix %q: %s", tagOrID, strings.Join(resolvedIDs.List(), ", ")) } case "ImageStreamTag": return api.LatestTaggedImage(stream, tagOrID), nil default: return nil, fmt.Errorf("invalid from.kind %q: it must be ImageStreamImage or ImageStreamTag", tagRef.From.Kind) } }
func newCondition() experimental.JobCondition { return experimental.JobCondition{ Type: experimental.JobComplete, Status: api.ConditionTrue, LastProbeTime: util.Now(), LastTransitionTime: util.Now(), } }
// HandlePod updates the state of the build based on the pod state func (bc *BuildPodController) HandlePod(pod *kapi.Pod) error { obj, exists, err := bc.BuildStore.Get(buildKey(pod)) if err != nil { glog.V(4).Infof("Error getting Build for pod %s/%s: %v", pod.Namespace, pod.Name, err) return err } if !exists || obj == nil { glog.V(5).Infof("No Build found for pod %s/%s", pod.Namespace, pod.Name) return nil } build := obj.(*buildapi.Build) nextStatus := build.Status.Phase switch pod.Status.Phase { case kapi.PodRunning: // The pod's still running nextStatus = buildapi.BuildPhaseRunning case kapi.PodSucceeded: // Check the exit codes of all the containers in the pod nextStatus = buildapi.BuildPhaseComplete if len(pod.Status.ContainerStatuses) == 0 { // no containers in the pod means something went badly wrong, so the build // should be failed. glog.V(2).Infof("Failing build %s/%s because the pod has no containers", build.Namespace, build.Name) nextStatus = buildapi.BuildPhaseFailed } else { for _, info := range pod.Status.ContainerStatuses { if info.State.Terminated != nil && info.State.Terminated.ExitCode != 0 { nextStatus = buildapi.BuildPhaseFailed break } } } case kapi.PodFailed: nextStatus = buildapi.BuildPhaseFailed } if build.Status.Phase != nextStatus { glog.V(4).Infof("Updating Build %s/%s status %s -> %s", build.Namespace, build.Name, build.Status.Phase, nextStatus) build.Status.Phase = nextStatus if buildutil.IsBuildComplete(build) { now := util.Now() build.Status.CompletionTimestamp = &now } if build.Status.Phase == buildapi.BuildPhaseRunning { now := util.Now() build.Status.StartTimestamp = &now } if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil { return fmt.Errorf("failed to update Build %s/%s: %v", build.Namespace, build.Name, err) } glog.V(4).Infof("Build %s/%s status was updated %s -> %s", build.Namespace, build.Name, build.Status.Phase, nextStatus) } return nil }
func NewEmptyPolicy(namespace string) *authorizationapi.Policy { policy := &authorizationapi.Policy{} policy.Name = authorizationapi.PolicyName policy.Namespace = namespace policy.CreationTimestamp = util.Now() policy.LastModified = util.Now() policy.Roles = make(map[string]*authorizationapi.Role) return policy }
func NewEmptyPolicyBinding(namespace, policyNamespace, policyBindingName string) *authorizationapi.PolicyBinding { binding := &authorizationapi.PolicyBinding{} binding.Name = policyBindingName binding.Namespace = namespace binding.CreationTimestamp = util.Now() binding.LastModified = util.Now() binding.PolicyRef = kapi.ObjectReference{Name: authorizationapi.PolicyName, Namespace: policyNamespace} binding.RoleBindings = make(map[string]*authorizationapi.RoleBinding) return binding }
func TestAddOrUpdateEventNoExisting(t *testing.T) { // Arrange eventTime := util.Now() event := api.Event{ Reason: "my reasons are many", Message: "my message is love", InvolvedObject: api.ObjectReference{ Kind: "Pod", Name: "awesome.name", Namespace: "betterNamespace", UID: "C934D34AFB20242", APIVersion: "version", }, Source: api.EventSource{ Component: "kubelet", Host: "kublet.node1", }, Count: 1, FirstTimestamp: eventTime, LastTimestamp: eventTime, } // Act result := addOrUpdateEvent(&event) // Assert compareEventWithHistoryEntry(&event, &result, t) }
func TestGetEventExisting(t *testing.T) { // Arrange eventTime := util.Now() event := api.Event{ Reason: "do I exist", Message: "I do, oh my", InvolvedObject: api.ObjectReference{ Kind: "Pod", Name: "clever.name.here", Namespace: "spaceOfName", UID: "D933D32AFB2A238", APIVersion: "version", }, Source: api.EventSource{ Component: "kubelet", Host: "kublet.node4", }, Count: 1, FirstTimestamp: eventTime, LastTimestamp: eventTime, } addOrUpdateEvent(&event) // Act existingEvent := getEvent(&event) // Assert compareEventWithHistoryEntry(&event, &existingEvent, t) }
// NewFilterBeforePredicate is a function that returns true if the build was created before the current time minus specified duration func NewFilterBeforePredicate(d time.Duration) FilterPredicate { now := util.Now() before := util.NewTime(now.Time.Add(-1 * d)) return func(item *kapi.ReplicationController) bool { return item.CreationTimestamp.Before(before) } }
// TestSort verifies that builds are sorted by most recently created func TestSort(t *testing.T) { present := util.Now() past := util.NewTime(present.Time.Add(-1 * time.Minute)) controllers := []*kapi.ReplicationController{ { ObjectMeta: kapi.ObjectMeta{ Name: "past", CreationTimestamp: past, }, }, { ObjectMeta: kapi.ObjectMeta{ Name: "present", CreationTimestamp: present, }, }, } sort.Sort(sortableReplicationControllers(controllers)) if controllers[0].Name != "present" { t.Errorf("Unexpected sort order") } if controllers[1].Name != "past" { t.Errorf("Unexpected sort order") } }
// NewFilterBeforePredicate is a function that returns true if the build was created before the current time minus specified duration func NewFilterBeforePredicate(d time.Duration) FilterPredicate { now := util.Now() before := util.NewTime(now.Time.Add(-1 * d)) return func(build *buildapi.Build) bool { return build.CreationTimestamp.Before(before) } }
func TestFilterBeforePredicate(t *testing.T) { youngerThan := time.Hour now := util.Now() old := util.NewTime(now.Time.Add(-1 * youngerThan)) builds := []*buildapi.Build{ { ObjectMeta: kapi.ObjectMeta{ Name: "old", CreationTimestamp: old, }, }, { ObjectMeta: kapi.ObjectMeta{ Name: "new", CreationTimestamp: now, }, }, } filter := &andFilter{ filterPredicates: []FilterPredicate{NewFilterBeforePredicate(youngerThan)}, } result := filter.Filter(builds) if len(result) != 1 { t.Errorf("Unexpected number of results") } if expected, actual := "old", result[0].Name; expected != actual { t.Errorf("expected %v, actual %v", expected, actual) } }
func TestDeleteNamespaceWithCompleteFinalizers(t *testing.T) { now := util.Now() fakeEtcdClient, etcdStorage := newEtcdStorage(t) fakeEtcdClient.ChangeIndex = 1 key := etcdtest.AddPrefix("/namespaces/foo") fakeEtcdClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: "foo", DeletionTimestamp: &now, }, Spec: api.NamespaceSpec{ Finalizers: []api.FinalizerName{}, }, Status: api.NamespaceStatus{Phase: api.NamespaceActive}, }), ModifiedIndex: 1, CreatedIndex: 1, }, }, } storage, _, _ := NewStorage(etcdStorage) _, err := storage.Delete(api.NewContext(), "foo", nil) if err != nil { t.Fatalf("unexpected error: %v", err) } }
func (m *VirtualStorage) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) { if err := rest.BeforeCreate(m.CreateStrategy, ctx, obj); err != nil { return nil, err } role := obj.(*authorizationapi.Role) policy, err := m.EnsurePolicy(ctx) if err != nil { return nil, err } if _, exists := policy.Roles[role.Name]; exists { return nil, kapierrors.NewAlreadyExists("Role", role.Name) } role.ResourceVersion = policy.ResourceVersion policy.Roles[role.Name] = role policy.LastModified = util.Now() if err := m.PolicyStorage.UpdatePolicy(ctx, policy); err != nil { return nil, err } return role, nil }
// CancelBuild updates a build status to Cancelled, after its associated pod is deleted. func (bc *BuildController) CancelBuild(build *buildapi.Build) error { if !isBuildCancellable(build) { glog.V(4).Infof("Build %s/%s can be cancelled only if it has pending/running status, not %s.", build.Namespace, build.Name, build.Status.Phase) return nil } glog.V(4).Infof("Cancelling Build %s/%s.", build.Namespace, build.Name) pod, err := bc.PodManager.GetPod(build.Namespace, buildutil.GetBuildPodName(build)) if err != nil { if !errors.IsNotFound(err) { return fmt.Errorf("Failed to get Pod for build %s/%s: %v", build.Namespace, build.Name, err) } } else { err := bc.PodManager.DeletePod(build.Namespace, pod) if err != nil && !errors.IsNotFound(err) { return fmt.Errorf("Couldn't delete Build Pod %s/%s: %v", build.Namespace, pod.Name, err) } } build.Status.Phase = buildapi.BuildPhaseCancelled now := util.Now() build.Status.CompletionTimestamp = &now if err := bc.BuildUpdater.Update(build.Namespace, build); err != nil { return fmt.Errorf("Failed to update Build %s/%s: %v", build.Namespace, build.Name, err) } glog.V(4).Infof("Build %s/%s was successfully cancelled.", build.Namespace, build.Name) return nil }
func TestLimitedLogAndRetryFinish(t *testing.T) { updater := &buildUpdater{} err := errors.New("funky error") now := kutil.Now() retry := controller.Retry{ Count: 0, StartTimestamp: kutil.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute()-31, now.Second(), now.Nanosecond(), now.Location()), } if limitedLogAndRetry(updater, 30*time.Minute)(&buildapi.Build{Status: buildapi.BuildStatus{Phase: buildapi.BuildPhaseNew}}, err, retry) { t.Error("Expected no more retries after reaching timeout!") } if updater.Build == nil { t.Fatal("BuildUpdater wasn't called!") } if updater.Build.Status.Phase != buildapi.BuildPhaseFailed { t.Errorf("Expected status %s, got %s!", buildapi.BuildPhaseFailed, updater.Build.Status.Phase) } if !strings.Contains(updater.Build.Status.Message, err.Error()) { t.Errorf("Expected message to contain %v, got %s!", err.Error(), updater.Build.Status.Message) } if updater.Build.Status.CompletionTimestamp == nil { t.Error("Expected CompletionTimestamp to be set!") } }
func deletePods(kubeClient client.Interface, ns string, before util.Time) (int64, error) { items, err := kubeClient.Pods(ns).List(labels.Everything(), fields.Everything()) if err != nil { return 0, err } expired := util.Now().After(before.Time) var deleteOptions *api.DeleteOptions if expired { deleteOptions = api.NewDeleteOptions(0) } estimate := int64(0) for i := range items.Items { if items.Items[i].Spec.TerminationGracePeriodSeconds != nil { grace := *items.Items[i].Spec.TerminationGracePeriodSeconds if grace > estimate { estimate = grace } } err := kubeClient.Pods(ns).Delete(items.Items[i].Name, deleteOptions) if err != nil && !errors.IsNotFound(err) { return 0, err } } if expired { estimate = 0 } return estimate, nil }
func TestNamespaceStatusStrategy(t *testing.T) { ctx := api.NewDefaultContext() if StatusStrategy.NamespaceScoped() { t.Errorf("Namespaces should not be namespace scoped") } if StatusStrategy.AllowCreateOnUpdate() { t.Errorf("Namespaces should not allow create on update") } now := util.Now() oldNamespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}, Spec: api.NamespaceSpec{Finalizers: []api.FinalizerName{"kubernetes"}}, Status: api.NamespaceStatus{Phase: api.NamespaceActive}, } namespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "9", DeletionTimestamp: &now}, Status: api.NamespaceStatus{Phase: api.NamespaceTerminating}, } StatusStrategy.PrepareForUpdate(namespace, oldNamespace) if namespace.Status.Phase != api.NamespaceTerminating { t.Errorf("Namespace status updates should allow change of phase: %v", namespace.Status.Phase) } if len(namespace.Spec.Finalizers) != 1 || namespace.Spec.Finalizers[0] != api.FinalizerKubernetes { t.Errorf("PrepareForUpdate should have preserved old finalizers") } errs := StatusStrategy.ValidateUpdate(ctx, namespace, oldNamespace) if len(errs) != 0 { t.Errorf("Unexpected error %v", errs) } if namespace.ResourceVersion != "9" { t.Errorf("Incoming resource version on update should not be mutated") } }
// addImageStreamsToGraph adds all the streams to the graph. The most recent n // image revisions for a tag will be preserved, where n is specified by the // algorithm's keepTagRevisions. Image revisions older than n are candidates // for pruning. if the image stream's age is at least as old as the minimum // threshold in algorithm. Otherwise, if the image stream is younger than the // threshold, all image revisions for that stream are ineligible for pruning. // // addImageStreamsToGraph also adds references from each stream to all the // layers it references (via each image a stream references). func addImageStreamsToGraph(g graph.Graph, streams *imageapi.ImageStreamList, algorithm pruneAlgorithm) { for i := range streams.Items { stream := &streams.Items[i] glog.V(4).Infof("Examining ImageStream %s/%s", stream.Namespace, stream.Name) // use a weak reference for old image revisions by default oldImageRevisionReferenceKind := WeakReferencedImageEdgeKind age := util.Now().Sub(stream.CreationTimestamp.Time) if age < algorithm.keepYoungerThan { // stream's age is below threshold - use a strong reference for old image revisions instead glog.V(4).Infof("Stream %s/%s is below age threshold - none of its images are eligible for pruning", stream.Namespace, stream.Name) oldImageRevisionReferenceKind = ReferencedImageEdgeKind } glog.V(4).Infof("Adding ImageStream %s/%s to graph", stream.Namespace, stream.Name) isNode := imagegraph.EnsureImageStreamNode(g, stream) imageStreamNode := isNode.(*imagegraph.ImageStreamNode) for tag, history := range stream.Status.Tags { for i := range history.Items { n := imagegraph.FindImage(g, history.Items[i].Image) if n == nil { glog.V(2).Infof("Unable to find image %q in graph (from tag=%q, revision=%d, dockerImageReference=%s)", history.Items[i].Image, tag, i, history.Items[i].DockerImageReference) continue } imageNode := n.(*imagegraph.ImageNode) var kind string switch { case i < algorithm.keepTagRevisions: kind = ReferencedImageEdgeKind default: kind = oldImageRevisionReferenceKind } glog.V(4).Infof("Checking for existing strong reference from stream %s/%s to image %s", stream.Namespace, stream.Name, imageNode.Image.Name) if edge := g.Edge(imageStreamNode, imageNode); edge != nil && g.EdgeKinds(edge).Has(ReferencedImageEdgeKind) { glog.V(4).Infof("Strong reference found") continue } glog.V(4).Infof("Adding edge (kind=%d) from %q to %q", kind, imageStreamNode.UniqueName.UniqueName(), imageNode.UniqueName.UniqueName()) g.AddEdge(imageStreamNode, imageNode, kind) glog.V(4).Infof("Adding stream->layer references") // add stream -> layer references so we can prune them later for _, s := range g.From(imageNode) { if g.Kind(s) != imagegraph.ImageLayerNodeKind { continue } glog.V(4).Infof("Adding reference from stream %q to layer %q", stream.Name, s.(*imagegraph.ImageLayerNode).Layer) g.AddEdge(imageStreamNode, s, ReferencedImageLayerEdgeKind) } } } } }
// getNextBuildNameFromBuild returns name of the next build with random uuid added at the end func getNextBuildNameFromBuild(build *buildapi.Build) string { buildName := build.Name if matched, _ := regexp.MatchString(`^.+-\d-\d+$`, buildName); matched { nameElems := strings.Split(buildName, "-") buildName = strings.Join(nameElems[:len(nameElems)-1], "-") } return fmt.Sprintf("%s-%d", buildName, int32(util.Now().Unix())) }
func GetBootstrapPolicyBinding() *authorizationapi.ClusterPolicyBinding { policyBinding := &authorizationapi.ClusterPolicyBinding{ ObjectMeta: kapi.ObjectMeta{ Name: ":Default", CreationTimestamp: util.Now(), UID: util.NewUUID(), }, LastModified: util.Now(), RoleBindings: make(map[string]*authorizationapi.ClusterRoleBinding), } bindings := bootstrappolicy.GetBootstrapClusterRoleBindings() for i := range bindings { policyBinding.RoleBindings[bindings[i].Name] = &bindings[i] } return policyBinding }
func GetBootstrapPolicy() *authorizationapi.ClusterPolicy { policy := &authorizationapi.ClusterPolicy{ ObjectMeta: kapi.ObjectMeta{ Name: authorizationapi.PolicyName, CreationTimestamp: util.Now(), UID: util.NewUUID(), }, LastModified: util.Now(), Roles: make(map[string]*authorizationapi.ClusterRole), } roles := bootstrappolicy.GetBootstrapClusterRoles() for i := range roles { policy.Roles[roles[i].Name] = &roles[i] } return policy }
func TestAddOrUpdateEventExisting(t *testing.T) { // Arrange event1Time := util.Unix(2324, 2342) event2Time := util.Now() event1 := api.Event{ Reason: "something happened", Message: "can you believe it?", ObjectMeta: api.ObjectMeta{ ResourceVersion: "rs1", }, InvolvedObject: api.ObjectReference{ Kind: "Scheduler", Name: "anOkName", Namespace: "someNamespace", UID: "C934D3234CD0242", APIVersion: "version", }, Source: api.EventSource{ Component: "kubelet", Host: "kublet.node2", }, Count: 1, FirstTimestamp: event1Time, LastTimestamp: event1Time, } event2 := api.Event{ Reason: "something happened", Message: "can you believe it?", ObjectMeta: api.ObjectMeta{ ResourceVersion: "rs2", }, InvolvedObject: api.ObjectReference{ Kind: "Scheduler", Name: "anOkName", Namespace: "someNamespace", UID: "C934D3234CD0242", APIVersion: "version", }, Source: api.EventSource{ Component: "kubelet", Host: "kublet.node2", }, Count: 3, FirstTimestamp: event1Time, LastTimestamp: event2Time, } // Act addOrUpdateEvent(&event1) result1 := addOrUpdateEvent(&event2) result2 := getEvent(&event1) // Assert compareEventWithHistoryEntry(&event2, &result1, t) compareEventWithHistoryEntry(&event2, &result2, t) }
// BeforeDelete tests whether the object can be gracefully deleted. If graceful is set the object // should be gracefully deleted, if gracefulPending is set the object has already been gracefully deleted // (and the provided grace period is longer than the time to deletion), and an error is returned if the // condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with // default values if graceful is true. func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) { if strategy == nil { return false, false, nil } objectMeta, _, kerr := objectMetaAndKind(strategy, obj) if kerr != nil { return false, false, kerr } // if the object is already being deleted if objectMeta.DeletionTimestamp != nil { // if we are already being deleted, we may only shorten the deletion grace period // this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set, // so we force deletion immediately if objectMeta.DeletionGracePeriodSeconds == nil { return false, false, nil } // only a shorter grace period may be provided by a user if options.GracePeriodSeconds != nil { period := int64(*options.GracePeriodSeconds) if period > *objectMeta.DeletionGracePeriodSeconds { return false, true, nil } now := util.NewTime(util.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) objectMeta.DeletionTimestamp = &now objectMeta.DeletionGracePeriodSeconds = &period options.GracePeriodSeconds = &period return true, false, nil } // graceful deletion is pending, do nothing options.GracePeriodSeconds = objectMeta.DeletionGracePeriodSeconds return false, true, nil } if !strategy.CheckGracefulDelete(obj, options) { return false, false, nil } now := util.NewTime(util.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) objectMeta.DeletionTimestamp = &now objectMeta.DeletionGracePeriodSeconds = options.GracePeriodSeconds return true, false, nil }
func testSyncNamespaceThatIsTerminating(t *testing.T, experimentalMode bool) { mockClient := &testclient.Fake{} now := util.Now() testNamespace := api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: "test", ResourceVersion: "1", DeletionTimestamp: &now, }, Spec: api.NamespaceSpec{ Finalizers: []api.FinalizerName{"kubernetes"}, }, Status: api.NamespaceStatus{ Phase: api.NamespaceTerminating, }, } err := syncNamespace(mockClient, experimentalMode, testNamespace) if err != nil { t.Errorf("Unexpected error when synching namespace %v", err) } // TODO: Reuse the constants for all these strings from testclient expectedActionSet := util.NewStringSet( strings.Join([]string{"list", "replicationcontrollers", ""}, "-"), strings.Join([]string{"list", "services", ""}, "-"), strings.Join([]string{"list", "pods", ""}, "-"), strings.Join([]string{"list", "resourcequotas", ""}, "-"), strings.Join([]string{"list", "secrets", ""}, "-"), strings.Join([]string{"list", "limitranges", ""}, "-"), strings.Join([]string{"list", "events", ""}, "-"), strings.Join([]string{"list", "serviceaccounts", ""}, "-"), strings.Join([]string{"list", "persistentvolumeclaims", ""}, "-"), strings.Join([]string{"create", "namespaces", "finalize"}, "-"), strings.Join([]string{"delete", "namespaces", ""}, "-"), ) if experimentalMode { expectedActionSet.Insert( strings.Join([]string{"list", "horizontalpodautoscalers", ""}, "-"), strings.Join([]string{"list", "daemonsets", ""}, "-"), strings.Join([]string{"list", "deployments", ""}, "-"), ) } actionSet := util.NewStringSet() for _, action := range mockClient.Actions() { actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource(), action.GetSubresource()}, "-")) } if !actionSet.HasAll(expectedActionSet.List()...) { t.Errorf("Expected actions: %v, but got: %v", expectedActionSet, actionSet) } if !expectedActionSet.HasAll(actionSet.List()...) { t.Errorf("Expected actions: %v, but got: %v", expectedActionSet, actionSet) } }
// getNextBuildNameFromBuild returns name of the next build with random uuid added at the end func getNextBuildNameFromBuild(build *buildapi.Build) string { buildName := build.Name if matched, _ := regexp.MatchString(`^.+-\d-\d+$`, buildName); matched { nameElems := strings.Split(buildName, "-") buildName = strings.Join(nameElems[:len(nameElems)-1], "-") } suffix := fmt.Sprintf("%v", util.Now().UnixNano()) if len(suffix) > 10 { suffix = suffix[len(suffix)-10:] } return namer.GetName(buildName, suffix, util.DNS1123SubdomainMaxLength) }
func TestSyncNamespaceThatIsTerminating(t *testing.T) { mockKubeClient := &ktestclient.Fake{} mockOriginClient := &testclient.Fake{} nm := NamespaceController{ KubeClient: mockKubeClient, Client: mockOriginClient, } now := util.Now() testNamespace := &kapi.Namespace{ ObjectMeta: kapi.ObjectMeta{ Name: "test", ResourceVersion: "1", DeletionTimestamp: &now, }, Spec: kapi.NamespaceSpec{ Finalizers: []kapi.FinalizerName{kapi.FinalizerKubernetes, api.FinalizerOrigin}, }, Status: kapi.NamespaceStatus{ Phase: kapi.NamespaceTerminating, }, } err := nm.Handle(testNamespace) if err != nil { t.Errorf("Unexpected error when handling namespace %v", err) } // TODO: we will expect a finalize namespace call after rebase expectedActionSet := []ktestclient.Action{ ktestclient.NewListAction("buildconfigs", "", nil, nil), ktestclient.NewListAction("policies", "", nil, nil), ktestclient.NewListAction("imagestreams", "", nil, nil), ktestclient.NewListAction("policybindings", "", nil, nil), ktestclient.NewListAction("rolebindings", "", nil, nil), ktestclient.NewListAction("roles", "", nil, nil), ktestclient.NewListAction("routes", "", nil, nil), ktestclient.NewListAction("templates", "", nil, nil), ktestclient.NewListAction("builds", "", nil, nil), ktestclient.NewListAction("namespace", "", nil, nil), ktestclient.NewListAction("deploymentconfig", "", nil, nil), } actionSet := []ktestclient.Action{} for i := range mockKubeClient.Actions() { actionSet = append(actionSet, mockKubeClient.Actions()[i]) } for i := range mockOriginClient.Actions() { actionSet = append(actionSet, mockOriginClient.Actions()[i]) } if len(actionSet) != len(expectedActionSet) { t.Errorf("Expected actions: %v, but got: %v", expectedActionSet, actionSet) } }
func TestLimitedLogAndRetryProcessing(t *testing.T) { updater := &buildUpdater{} err := errors.New("funky error") now := kutil.Now() retry := controller.Retry{0, kutil.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute()-10, now.Second(), now.Nanosecond(), now.Location())} if !limitedLogAndRetry(updater, 30*time.Minute)(&buildapi.Build{Status: buildapi.BuildStatus{Phase: buildapi.BuildPhaseNew}}, err, retry) { t.Error("Expected more retries!") } if updater.Build != nil { t.Fatal("BuildUpdater shouldn't be called!") } }
func TestSyncNamespaceThatIsTerminating(t *testing.T) { mockKubeClient := &ktestclient.Fake{} mockOriginClient := &testclient.Fake{} nm := NamespaceController{ KubeClient: mockKubeClient, Client: mockOriginClient, } now := util.Now() testNamespace := &kapi.Namespace{ ObjectMeta: kapi.ObjectMeta{ Name: "test", ResourceVersion: "1", DeletionTimestamp: &now, }, Spec: kapi.NamespaceSpec{ Finalizers: []kapi.FinalizerName{kapi.FinalizerKubernetes, api.FinalizerOrigin}, }, Status: kapi.NamespaceStatus{ Phase: kapi.NamespaceTerminating, }, } err := nm.Handle(testNamespace) if err != nil { t.Errorf("Unexpected error when handling namespace %v", err) } // TODO: we will expect a finalize namespace call after rebase expectedActionSet := util.NewStringSet( "list-buildconfig", "list-policies", "list-imagestreams", "list-policyBindings", "list-roleBinding", "list-role", "list-routes", "list-templates", "list-builds", "finalize-namespace", "list-deploymentconfig", ) actionSet := util.NewStringSet() for i := range mockKubeClient.Actions() { actionSet.Insert(mockKubeClient.Actions()[i].Action) } for i := range mockOriginClient.Actions { actionSet.Insert(mockOriginClient.Actions[i].Action) } if !(actionSet.HasAll(expectedActionSet.List()...) && (len(actionSet) == len(expectedActionSet))) { t.Errorf("Expected actions: %v, but got: %v", expectedActionSet, actionSet) } }
// create count pods with the given phase for the given job func newPodList(count int, status api.PodPhase, job *experimental.Job) []api.Pod { pods := []api.Pod{} for i := 0; i < count; i++ { newPod := api.Pod{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("pod-%v", util.Now().UnixNano()), Labels: job.Spec.Selector, Namespace: job.Namespace, }, Status: api.PodStatus{Phase: status}, } pods = append(pods, newPod) } return pods }
func agedImage(id, ref string, ageInMinutes int64) imageapi.Image { image := imageWithLayers(id, ref, "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "tarsum.dev+sha256:b194de3772ebbcdc8f244f663669799ac1cb141834b7cb8b69100285d357a2b0", "tarsum.dev+sha256:c937c4bb1c1a21cc6d94340812262c6472092028972ae69b551b1a70d4276171", "tarsum.dev+sha256:2aaacc362ac6be2b9e9ae8c6029f6f616bb50aec63746521858e47841b90fabd", "tarsum.dev+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", ) if ageInMinutes >= 0 { image.CreationTimestamp = util.NewTime(util.Now().Add(time.Duration(-1*ageInMinutes) * time.Minute)) } return image }