func newCondition() extensions.JobCondition { return extensions.JobCondition{ Type: extensions.JobComplete, Status: api.ConditionTrue, LastProbeTime: unversioned.Now(), LastTransitionTime: unversioned.Now(), } }
func deletePods(kubeClient client.Interface, ns string, before unversioned.Time) (int64, error) { items, err := kubeClient.Pods(ns).List(labels.Everything(), fields.Everything()) if err != nil { return 0, err } expired := unversioned.Now().After(before.Time) var deleteOptions *api.DeleteOptions if expired { deleteOptions = api.NewDeleteOptions(0) } estimate := int64(0) for i := range items.Items { if items.Items[i].Spec.TerminationGracePeriodSeconds != nil { grace := *items.Items[i].Spec.TerminationGracePeriodSeconds if grace > estimate { estimate = grace } } err := kubeClient.Pods(ns).Delete(items.Items[i].Name, deleteOptions) if err != nil && !errors.IsNotFound(err) { return 0, err } } if expired { estimate = 0 } return estimate, nil }
func TestNamespaceStatusStrategy(t *testing.T) { ctx := api.NewDefaultContext() if StatusStrategy.NamespaceScoped() { t.Errorf("Namespaces should not be namespace scoped") } if StatusStrategy.AllowCreateOnUpdate() { t.Errorf("Namespaces should not allow create on update") } now := unversioned.Now() oldNamespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "10"}, Spec: api.NamespaceSpec{Finalizers: []api.FinalizerName{"kubernetes"}}, Status: api.NamespaceStatus{Phase: api.NamespaceActive}, } namespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "9", DeletionTimestamp: &now}, Status: api.NamespaceStatus{Phase: api.NamespaceTerminating}, } StatusStrategy.PrepareForUpdate(namespace, oldNamespace) if namespace.Status.Phase != api.NamespaceTerminating { t.Errorf("Namespace status updates should allow change of phase: %v", namespace.Status.Phase) } if len(namespace.Spec.Finalizers) != 1 || namespace.Spec.Finalizers[0] != api.FinalizerKubernetes { t.Errorf("PrepareForUpdate should have preserved old finalizers") } errs := StatusStrategy.ValidateUpdate(ctx, namespace, oldNamespace) if len(errs) != 0 { t.Errorf("Unexpected error %v", errs) } if namespace.ResourceVersion != "9" { t.Errorf("Incoming resource version on update should not be mutated") } }
func TestGetEventExisting(t *testing.T) { // Arrange eventCache := NewEventCache() eventTime := unversioned.Now() event := api.Event{ Reason: "do I exist", Message: "I do, oh my", InvolvedObject: api.ObjectReference{ Kind: "Pod", Name: "clever.name.here", Namespace: "spaceOfName", UID: "D933D32AFB2A238", APIVersion: "version", }, Source: api.EventSource{ Component: "kubelet", Host: "kublet.node4", }, Count: 1, FirstTimestamp: eventTime, LastTimestamp: eventTime, } eventCache.addOrUpdateEvent(&event) // Act existingEvent := eventCache.getEvent(&event) // Assert compareEventWithHistoryEntry(&event, &existingEvent, t) }
func TestAddOrUpdateEventNoExisting(t *testing.T) { // Arrange eventCache := NewEventCache() eventTime := unversioned.Now() event := api.Event{ Reason: "my reasons are many", Message: "my message is love", InvolvedObject: api.ObjectReference{ Kind: "Pod", Name: "awesome.name", Namespace: "betterNamespace", UID: "C934D34AFB20242", APIVersion: "version", }, Source: api.EventSource{ Component: "kubelet", Host: "kublet.node1", }, Count: 1, FirstTimestamp: eventTime, LastTimestamp: eventTime, } // Act result := eventCache.addOrUpdateEvent(&event) // Assert compareEventWithHistoryEntry(&event, &result, t) }
func TestAddOrUpdateEventExisting(t *testing.T) { // Arrange eventCache := NewEventCache() event1Time := unversioned.Unix(2324, 2342) event2Time := unversioned.Now() event1 := api.Event{ Reason: "something happened", Message: "can you believe it?", ObjectMeta: api.ObjectMeta{ ResourceVersion: "rs1", }, InvolvedObject: api.ObjectReference{ Kind: "Scheduler", Name: "anOkName", Namespace: "someNamespace", UID: "C934D3234CD0242", APIVersion: "version", }, Source: api.EventSource{ Component: "kubelet", Host: "kublet.node2", }, Count: 1, FirstTimestamp: event1Time, LastTimestamp: event1Time, } event2 := api.Event{ Reason: "something happened", Message: "can you believe it?", ObjectMeta: api.ObjectMeta{ ResourceVersion: "rs2", }, InvolvedObject: api.ObjectReference{ Kind: "Scheduler", Name: "anOkName", Namespace: "someNamespace", UID: "C934D3234CD0242", APIVersion: "version", }, Source: api.EventSource{ Component: "kubelet", Host: "kublet.node2", }, Count: 3, FirstTimestamp: event1Time, LastTimestamp: event2Time, } // Act eventCache.addOrUpdateEvent(&event1) result1 := eventCache.addOrUpdateEvent(&event2) result2 := eventCache.getEvent(&event1) // Assert compareEventWithHistoryEntry(&event2, &result1, t) compareEventWithHistoryEntry(&event2, &result2, t) }
// BeforeDelete tests whether the object can be gracefully deleted. If graceful is set the object // should be gracefully deleted, if gracefulPending is set the object has already been gracefully deleted // (and the provided grace period is longer than the time to deletion), and an error is returned if the // condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with // default values if graceful is true. func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) { if strategy == nil { return false, false, nil } objectMeta, _, kerr := objectMetaAndKind(strategy, obj) if kerr != nil { return false, false, kerr } // if the object is already being deleted if objectMeta.DeletionTimestamp != nil { // if we are already being deleted, we may only shorten the deletion grace period // this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set, // so we force deletion immediately if objectMeta.DeletionGracePeriodSeconds == nil { return false, false, nil } // only a shorter grace period may be provided by a user if options.GracePeriodSeconds != nil { period := int64(*options.GracePeriodSeconds) if period > *objectMeta.DeletionGracePeriodSeconds { return false, true, nil } now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) objectMeta.DeletionTimestamp = &now objectMeta.DeletionGracePeriodSeconds = &period options.GracePeriodSeconds = &period return true, false, nil } // graceful deletion is pending, do nothing options.GracePeriodSeconds = objectMeta.DeletionGracePeriodSeconds return false, true, nil } if !strategy.CheckGracefulDelete(obj, options) { return false, false, nil } now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) objectMeta.DeletionTimestamp = &now objectMeta.DeletionGracePeriodSeconds = options.GracePeriodSeconds return true, false, nil }
func TestChangedStatusKeepsStartTime(t *testing.T) { syncer := newTestManager() now := unversioned.Now() firstStatus := getRandomPodStatus() firstStatus.StartTime = &now syncer.SetPodStatus(testPod, firstStatus) syncer.SetPodStatus(testPod, getRandomPodStatus()) verifyUpdates(t, syncer, 2) finalStatus, _ := syncer.GetPodStatus(testPod.UID) if finalStatus.StartTime.IsZero() { t.Errorf("StartTime should not be zero") } if !finalStatus.StartTime.Time.Equal(now.Time) { t.Errorf("Expected %v, but got %v", now.Time, finalStatus.StartTime.Time) } }
func (t *Tester) testCreateResetsUserData(valid runtime.Object) { objectMeta := t.getObjectMetaOrFail(valid) now := unversioned.Now() objectMeta.UID = "bad-uid" objectMeta.CreationTimestamp = now obj, err := t.storage.(rest.Creater).Create(t.TestContext(), valid) if err != nil { t.Fatalf("Unexpected error: %v", err) } if obj == nil { t.Fatalf("Unexpected object from result: %#v", obj) } if objectMeta.UID == "bad-uid" || objectMeta.CreationTimestamp == now { t.Errorf("ObjectMeta did not reset basic fields: %#v", objectMeta) } }
func TestEventList(t *testing.T) { ns := api.NamespaceDefault objReference := &api.ObjectReference{ Kind: "foo", Namespace: ns, Name: "objref1", UID: "uid", APIVersion: "apiv1", ResourceVersion: "1", } timeStamp := unversioned.Now() eventList := &api.EventList{ Items: []api.Event{ { InvolvedObject: *objReference, FirstTimestamp: timeStamp, LastTimestamp: timeStamp, Count: 1, }, }, } c := &testClient{ Request: testRequest{ Method: "GET", Path: testapi.Default.ResourcePath("events", ns, ""), Body: nil, }, Response: Response{StatusCode: 200, Body: eventList}, } response, err := c.Setup(t).Events(ns).List(labels.Everything(), fields.Everything()) if err != nil { t.Errorf("%#v should be nil.", err) } if len(response.Items) != 1 { t.Errorf("%#v response.Items should have len 1.", response.Items) } responseEvent := response.Items[0] if e, r := eventList.Items[0].InvolvedObject, responseEvent.InvolvedObject; !reflect.DeepEqual(e, r) { t.Errorf("%#v != %#v.", e, r) } }
func makeEvent(ref *api.ObjectReference, reason, message string) *api.Event { t := unversioned.Now() namespace := ref.Namespace if namespace == "" { namespace = api.NamespaceDefault } return &api.Event{ ObjectMeta: api.ObjectMeta{ Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), Namespace: namespace, }, InvolvedObject: *ref, Reason: reason, Message: message, FirstTimestamp: t, LastTimestamp: t, Count: 1, } }
func TestNewStatusPreservesPodStartTime(t *testing.T) { syncer := newTestManager() pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Status: api.PodStatus{}, } now := unversioned.Now() startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) pod.Status.StartTime = &startTime syncer.SetPodStatus(pod, getRandomPodStatus()) status, _ := syncer.GetPodStatus(pod.UID) if !status.StartTime.Time.Equal(startTime.Time) { t.Errorf("Unexpected start time, expected %v, actual %v", startTime, status.StartTime) } }
func TestDeleteNamespaceWithCompleteFinalizers(t *testing.T) { storage, fakeClient := newStorage(t) key := etcdtest.AddPrefix("namespaces/foo") ctx := api.NewContext() now := unversioned.Now() namespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: "foo", DeletionTimestamp: &now, }, Spec: api.NamespaceSpec{ Finalizers: []api.FinalizerName{}, }, Status: api.NamespaceStatus{Phase: api.NamespaceActive}, } if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), namespace), 0); err != nil { t.Fatalf("unexpected error: %v", err) } if _, err := storage.Delete(ctx, "foo", nil); err != nil { t.Errorf("unexpected error: %v", err) } }
func (m *manager) SetPodStatus(pod *api.Pod, status api.PodStatus) { m.podStatusesLock.Lock() defer m.podStatusesLock.Unlock() oldStatus, found := m.podStatuses[pod.UID] // ensure that the start time does not change across updates. if found && oldStatus.StartTime != nil { status.StartTime = oldStatus.StartTime } // if the status has no start time, we need to set an initial time // TODO(yujuhong): Consider setting StartTime when generating the pod // status instead, which would allow manager to become a simple cache // again. if status.StartTime.IsZero() { if pod.Status.StartTime.IsZero() { // the pod did not have a previously recorded value so set to now now := unversioned.Now() status.StartTime = &now } else { // the pod had a recorded value, but the kubelet restarted so we need to rebuild cache // based on last observed value status.StartTime = pod.Status.StartTime } } // TODO: Holding a lock during blocking operations is dangerous. Refactor so this isn't necessary. // The intent here is to prevent concurrent updates to a pod's status from // clobbering each other so the phase of a pod progresses monotonically. // Currently this routine is not called for the same pod from multiple // workers and/or the kubelet but dropping the lock before sending the // status down the channel feels like an easy way to get a bullet in foot. if !found || !isStatusEqual(&oldStatus, &status) || pod.DeletionTimestamp != nil { m.podStatuses[pod.UID] = status m.podStatusChannel <- podStatusSyncRequest{pod, status} } else { glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", kubeletUtil.FormatPodName(pod), status) } }
func TestEventGet(t *testing.T) { objReference := &api.ObjectReference{ Kind: "foo", Namespace: "nm", Name: "objref1", UID: "uid", APIVersion: "apiv1", ResourceVersion: "1", } timeStamp := unversioned.Now() event := &api.Event{ ObjectMeta: api.ObjectMeta{ Namespace: "other", }, InvolvedObject: *objReference, FirstTimestamp: timeStamp, LastTimestamp: timeStamp, Count: 1, } c := &testClient{ Request: testRequest{ Method: "GET", Path: testapi.Default.ResourcePath("events", "other", "1"), Body: nil, }, Response: Response{StatusCode: 200, Body: event}, } response, err := c.Setup(t).Events("other").Get("1") if err != nil { t.Fatalf("%v should be nil.", err) } if e, r := event.InvolvedObject, response.InvolvedObject; !reflect.DeepEqual(e, r) { t.Errorf("%#v != %#v.", e, r) } }
// Delete enforces life-cycle rules for namespace termination func (r *REST) Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) { nsObj, err := r.Get(ctx, name) if err != nil { return nil, err } namespace := nsObj.(*api.Namespace) // upon first request to delete, we switch the phase to start namespace termination if namespace.DeletionTimestamp.IsZero() { now := unversioned.Now() namespace.DeletionTimestamp = &now namespace.Status.Phase = api.NamespaceTerminating result, _, err := r.status.Update(ctx, namespace) return result, err } // prior to final deletion, we must ensure that finalizers is empty if len(namespace.Spec.Finalizers) != 0 { err = apierrors.NewConflict("Namespace", namespace.Name, fmt.Errorf("The system is ensuring all content is removed from this namespace. Upon completion, this namespace will automatically be purged by the system.")) return nil, err } return r.Etcd.Delete(ctx, name, nil) }
func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIVersions) { mockClient := &testclient.Fake{} now := unversioned.Now() testNamespace := &api.Namespace{ ObjectMeta: api.ObjectMeta{ Name: "test", ResourceVersion: "1", DeletionTimestamp: &now, }, Spec: api.NamespaceSpec{ Finalizers: []api.FinalizerName{"kubernetes"}, }, Status: api.NamespaceStatus{ Phase: api.NamespaceTerminating, }, } if containsVersion(versions, "extensions/v1beta1") { resources := []unversioned.APIResource{} for _, resource := range []string{"daemonsets", "deployments", "jobs", "horizontalpodautoscalers", "ingresses"} { resources = append(resources, unversioned.APIResource{Name: resource}) } mockClient.Resources = map[string]*unversioned.APIResourceList{ "extensions/v1beta1": { GroupVersion: "extensions/v1beta1", APIResources: resources, }, } } err := syncNamespace(mockClient, versions, testNamespace) if err != nil { t.Errorf("Unexpected error when synching namespace %v", err) } // TODO: Reuse the constants for all these strings from testclient expectedActionSet := sets.NewString( strings.Join([]string{"list", "replicationcontrollers", ""}, "-"), strings.Join([]string{"list", "services", ""}, "-"), strings.Join([]string{"list", "pods", ""}, "-"), strings.Join([]string{"list", "resourcequotas", ""}, "-"), strings.Join([]string{"list", "secrets", ""}, "-"), strings.Join([]string{"list", "limitranges", ""}, "-"), strings.Join([]string{"list", "events", ""}, "-"), strings.Join([]string{"list", "serviceaccounts", ""}, "-"), strings.Join([]string{"list", "persistentvolumeclaims", ""}, "-"), strings.Join([]string{"create", "namespaces", "finalize"}, "-"), strings.Join([]string{"delete", "namespaces", ""}, "-"), ) if containsVersion(versions, "extensions/v1beta1") { expectedActionSet.Insert( strings.Join([]string{"list", "daemonsets", ""}, "-"), strings.Join([]string{"list", "deployments", ""}, "-"), strings.Join([]string{"list", "jobs", ""}, "-"), strings.Join([]string{"list", "horizontalpodautoscalers", ""}, "-"), strings.Join([]string{"list", "ingresses", ""}, "-"), strings.Join([]string{"get", "resource", ""}, "-"), ) } actionSet := sets.NewString() for _, action := range mockClient.Actions() { actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource(), action.GetSubresource()}, "-")) } if !actionSet.HasAll(expectedActionSet.List()...) { t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet)) } if !expectedActionSet.HasAll(actionSet.List()...) { t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, actionSet.Difference(expectedActionSet)) } }
// FillObjectMetaSystemFields populates fields that are managed by the system on ObjectMeta. func FillObjectMetaSystemFields(ctx Context, meta *ObjectMeta) { meta.CreationTimestamp = unversioned.Now() meta.UID = util.NewUUID() meta.SelfLink = "" }
func (f fakeRecorder) Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) { f.events = append(f.events, fakeEvent{object, unversioned.Now(), reason, fmt.Sprintf(messageFmt, args...)}) }
func (f fakeRecorder) Event(object runtime.Object, reason, message string) { f.events = append(f.events, fakeEvent{object, unversioned.Now(), reason, message}) }
func (recorder *recorderImpl) Event(object runtime.Object, reason, message string) { recorder.generateEvent(object, unversioned.Now(), reason, message) }
createTimes := make(map[string]unversioned.Time, 0) nodes := make(map[string]string, 0) scheduleTimes := make(map[string]unversioned.Time, 0) runTimes := make(map[string]unversioned.Time, 0) watchTimes := make(map[string]unversioned.Time, 0) var mutex sync.Mutex checkPod := func(p *api.Pod) { mutex.Lock() defer mutex.Unlock() defer GinkgoRecover() if p.Status.Phase == api.PodRunning { if _, found := watchTimes[p.Name]; !found { watchTimes[p.Name] = unversioned.Now() createTimes[p.Name] = p.CreationTimestamp nodes[p.Name] = p.Spec.NodeName var startTime unversioned.Time for _, cs := range p.Status.ContainerStatuses { if cs.State.Running != nil { if startTime.Before(cs.State.Running.StartedAt) { startTime = cs.State.Running.StartedAt } } } if startTime != unversioned.NewTime(time.Time{}) { runTimes[p.Name] = startTime } else { Failf("Pod %v is reported to be running, but none of its containers is", p.Name) }
func TestWatchJobs(t *testing.T) { fakeWatch := watch.NewFake() client := &testclient.Fake{} client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil)) manager := NewJobController(client, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady var testJob extensions.Job received := make(chan string) // The update sent through the fakeWatcher should make its way into the workqueue, // and eventually into the syncHandler. manager.syncHandler = func(key string) error { obj, exists, err := manager.jobStore.Store.GetByKey(key) if !exists || err != nil { t.Errorf("Expected to find job under key %v", key) } job := *obj.(*extensions.Job) if !api.Semantic.DeepDerivative(job, testJob) { t.Errorf("Expected %#v, but got %#v", testJob, job) } received <- key return nil } // Start only the job watcher and the workqueue, send a watch event, // and make sure it hits the sync method. stopCh := make(chan struct{}) defer close(stopCh) go manager.jobController.Run(stopCh) go util.Until(manager.worker, 10*time.Millisecond, stopCh) // We're sending new job to see if it reaches syncHandler. testJob.Name = "foo" fakeWatch.Add(&testJob) select { case <-received: case <-time.After(controllerTimeout): t.Errorf("Expected 1 call but got 0") } // We're sending fake finished job, to see if it reaches syncHandler - it should not, // since we're filtering out finished jobs. testJobv2 := extensions.Job{ ObjectMeta: api.ObjectMeta{Name: "foo"}, Status: extensions.JobStatus{ Conditions: []extensions.JobCondition{{ Type: extensions.JobComplete, Status: api.ConditionTrue, LastProbeTime: unversioned.Now(), LastTransitionTime: unversioned.Now(), }}, }, } fakeWatch.Modify(&testJobv2) select { case <-received: t.Errorf("Expected 0 call but got 1") case <-time.After(controllerTimeout): } }