// BeforeDelete tests whether the object can be gracefully deleted. If graceful is set the object // should be gracefully deleted, if gracefulPending is set the object has already been gracefully deleted // (and the provided grace period is longer than the time to deletion), and an error is returned if the // condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with // default values if graceful is true. func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) { if strategy == nil { return false, false, nil } objectMeta, _, kerr := objectMetaAndKind(strategy, obj) if kerr != nil { return false, false, kerr } // if the object is already being deleted if objectMeta.DeletionTimestamp != nil { // if we are already being deleted, we may only shorten the deletion grace period // this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set, // so we force deletion immediately if objectMeta.DeletionGracePeriodSeconds == nil { return false, false, nil } // only a shorter grace period may be provided by a user if options.GracePeriodSeconds != nil { period := int64(*options.GracePeriodSeconds) if period > *objectMeta.DeletionGracePeriodSeconds { return false, true, nil } now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) objectMeta.DeletionTimestamp = &now objectMeta.DeletionGracePeriodSeconds = &period options.GracePeriodSeconds = &period return true, false, nil } // graceful deletion is pending, do nothing options.GracePeriodSeconds = objectMeta.DeletionGracePeriodSeconds return false, true, nil } if !strategy.CheckGracefulDelete(obj, options) { return false, false, nil } now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) objectMeta.DeletionTimestamp = &now objectMeta.DeletionGracePeriodSeconds = options.GracePeriodSeconds return true, false, nil }
// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. // The return value can be ignored or used to stop recording, if desired. // TODO: make me an object with parameterizable queue length and retry interval func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { // The default math/rand package functions aren't thread safe, so create a // new Rand object for each StartRecording call. randGen := rand.New(rand.NewSource(time.Now().UnixNano())) var eventCache *historyCache = NewEventCache() return eventBroadcaster.StartEventWatcher( func(event *api.Event) { // Make a copy before modification, because there could be multiple listeners. // Events are safe to copy like this. eventCopy := *event event = &eventCopy var patch []byte previousEvent := eventCache.getEvent(event) updateExistingEvent := previousEvent.Count > 0 if updateExistingEvent { // we still need to copy Name because the Patch relies on the Name to find the target event event.Name = previousEvent.Name event.Count = previousEvent.Count + 1 // we need to make sure the Count and LastTimestamp are the only differences between event and the eventCopy2 eventCopy2 := *event eventCopy2.Count = 0 eventCopy2.LastTimestamp = unversioned.NewTime(time.Unix(0, 0)) newData, _ := json.Marshal(event) oldData, _ := json.Marshal(eventCopy2) patch, _ = strategicpatch.CreateStrategicMergePatch(oldData, newData, event) } tries := 0 for { if recordEvent(sink, event, patch, updateExistingEvent, eventCache) { break } tries++ if tries >= maxTriesPerEvent { glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) break } // Randomize the first sleep so that various clients won't all be // synced up if the master goes down. if tries == 1 { time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) } else { time.Sleep(sleepDuration) } } }) }
func TestNewStatusPreservesPodStartTime(t *testing.T) { syncer := newTestManager() pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Status: api.PodStatus{}, } now := unversioned.Now() startTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute)) pod.Status.StartTime = &startTime syncer.SetPodStatus(pod, getRandomPodStatus()) status, _ := syncer.GetPodStatus(pod.UID) if !status.StartTime.Time.Equal(startTime.Time) { t.Errorf("Unexpected start time, expected %v, actual %v", startTime, status.StartTime) } }
func TestPrintEventsResultSorted(t *testing.T) { // Arrange printer := NewHumanReadablePrinter(false /* noHeaders */, false, false, false, []string{}) obj := api.EventList{ Items: []api.Event{ { Source: api.EventSource{Component: "kubelet"}, Message: "Item 1", FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), Count: 1, }, { Source: api.EventSource{Component: "scheduler"}, Message: "Item 2", FirstTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), LastTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), Count: 1, }, { Source: api.EventSource{Component: "kubelet"}, Message: "Item 3", FirstTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), LastTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), Count: 1, }, }, } buffer := &bytes.Buffer{} // Act err := printer.PrintObj(&obj, buffer) // Assert if err != nil { t.Fatalf("An error occurred printing the EventList: %#v", err) } out := buffer.String() VerifyDatesInOrder(out, "\n" /* rowDelimiter */, " " /* columnDelimiter */, t) }
func TestPodDescribeResultsSorted(t *testing.T) { // Arrange fake := testclient.NewSimpleFake(&api.EventList{ Items: []api.Event{ { Source: api.EventSource{Component: "kubelet"}, Message: "Item 1", FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), Count: 1, }, { Source: api.EventSource{Component: "scheduler"}, Message: "Item 2", FirstTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), LastTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), Count: 1, }, { Source: api.EventSource{Component: "kubelet"}, Message: "Item 3", FirstTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), LastTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), Count: 1, }, }, }) c := &describeClient{T: t, Namespace: "foo", Interface: fake} d := PodDescriber{c} // Act out, err := d.Describe("foo", "bar") // Assert if err != nil { t.Errorf("unexpected error: %v", err) } VerifyDatesInOrder(out, "\n" /* rowDelimiter */, "\t" /* columnDelimiter */, t) }
func TestSortableEvents(t *testing.T) { // Arrange list := SortableEvents([]api.Event{ { Source: api.EventSource{Component: "kubelet"}, Message: "Item 1", FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), Count: 1, }, { Source: api.EventSource{Component: "scheduler"}, Message: "Item 2", FirstTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), LastTimestamp: unversioned.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), Count: 1, }, { Source: api.EventSource{Component: "kubelet"}, Message: "Item 3", FirstTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), LastTimestamp: unversioned.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), Count: 1, }, }) // Act sort.Sort(list) // Assert if list[0].Message != "Item 2" || list[1].Message != "Item 3" || list[2].Message != "Item 1" { t.Fatal("List is not sorted by time. List: ", list) } }
func TestDescribeContainers(t *testing.T) { testCases := []struct { container api.Container status api.ContainerStatus expectedElements []string }{ // Running state. { container: api.Container{Name: "test", Image: "image"}, status: api.ContainerStatus{ Name: "test", State: api.ContainerState{ Running: &api.ContainerStateRunning{ StartedAt: unversioned.NewTime(time.Now()), }, }, Ready: true, RestartCount: 7, }, expectedElements: []string{"test", "State", "Running", "Ready", "True", "Restart Count", "7", "Image", "image", "Started"}, }, // Waiting state. { container: api.Container{Name: "test", Image: "image"}, status: api.ContainerStatus{ Name: "test", State: api.ContainerState{ Waiting: &api.ContainerStateWaiting{ Reason: "potato", }, }, Ready: true, RestartCount: 7, }, expectedElements: []string{"test", "State", "Waiting", "Ready", "True", "Restart Count", "7", "Image", "image", "Reason", "potato"}, }, // Terminated state. { container: api.Container{Name: "test", Image: "image"}, status: api.ContainerStatus{ Name: "test", State: api.ContainerState{ Terminated: &api.ContainerStateTerminated{ StartedAt: unversioned.NewTime(time.Now()), FinishedAt: unversioned.NewTime(time.Now()), Reason: "potato", ExitCode: 2, }, }, Ready: true, RestartCount: 7, }, expectedElements: []string{"test", "State", "Terminated", "Ready", "True", "Restart Count", "7", "Image", "image", "Reason", "potato", "Started", "Finished", "Exit Code", "2"}, }, // Last Terminated { container: api.Container{Name: "test", Image: "image"}, status: api.ContainerStatus{ Name: "test", State: api.ContainerState{ Running: &api.ContainerStateRunning{ StartedAt: unversioned.NewTime(time.Now()), }, }, LastTerminationState: api.ContainerState{ Terminated: &api.ContainerStateTerminated{ StartedAt: unversioned.NewTime(time.Now().Add(time.Second * 3)), FinishedAt: unversioned.NewTime(time.Now()), Reason: "crashing", ExitCode: 3, }, }, Ready: true, RestartCount: 7, }, expectedElements: []string{"test", "State", "Terminated", "Ready", "True", "Restart Count", "7", "Image", "image", "Started", "Finished", "Exit Code", "2", "crashing", "3"}, }, // No state defaults to waiting. { container: api.Container{Name: "test", Image: "image"}, status: api.ContainerStatus{ Name: "test", Ready: true, RestartCount: 7, }, expectedElements: []string{"test", "State", "Waiting", "Ready", "True", "Restart Count", "7", "Image", "image"}, }, //env { container: api.Container{Name: "test", Image: "image", Env: []api.EnvVar{{Name: "envname", Value: "xyz"}}}, status: api.ContainerStatus{ Name: "test", Ready: true, RestartCount: 7, }, expectedElements: []string{"test", "State", "Waiting", "Ready", "True", "Restart Count", "7", "Image", "image", "envname", "xyz"}, }, // Using limits. { container: api.Container{ Name: "test", Image: "image", Resources: api.ResourceRequirements{ Limits: api.ResourceList{ api.ResourceName(api.ResourceCPU): resource.MustParse("1000"), api.ResourceName(api.ResourceMemory): resource.MustParse("4G"), api.ResourceName(api.ResourceStorage): resource.MustParse("20G"), }, }, }, status: api.ContainerStatus{ Name: "test", Ready: true, RestartCount: 7, }, expectedElements: []string{"cpu", "1k", "memory", "4G", "storage", "20G"}, }, } for i, testCase := range testCases { out := new(bytes.Buffer) pod := api.Pod{ Spec: api.PodSpec{ Containers: []api.Container{testCase.container}, }, Status: api.PodStatus{ ContainerStatuses: []api.ContainerStatus{testCase.status}, }, } describeContainers(&pod, out) output := out.String() for _, expected := range testCase.expectedElements { if !strings.Contains(output, expected) { t.Errorf("Test case %d: expected to find %q in output: %q", i, expected, output) } } } }
defer GinkgoRecover() if p.Status.Phase == api.PodRunning { if _, found := watchTimes[p.Name]; !found { watchTimes[p.Name] = unversioned.Now() createTimes[p.Name] = p.CreationTimestamp nodes[p.Name] = p.Spec.NodeName var startTime unversioned.Time for _, cs := range p.Status.ContainerStatuses { if cs.State.Running != nil { if startTime.Before(cs.State.Running.StartedAt) { startTime = cs.State.Running.StartedAt } } } if startTime != unversioned.NewTime(time.Time{}) { runTimes[p.Name] = startTime } else { Failf("Pod %v is reported to be running, but none of its containers is", p.Name) } } } } additionalPodsPrefix = "density-latency-pod-" + string(util.NewUUID()) _, controller := controllerFramework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return c.Pods(ns).List(labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix}), fields.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) {
func TestPrintHumanReadableWithNamespace(t *testing.T) { namespaceName := "testnamespace" name := "test" table := []struct { obj runtime.Object isNamespaced bool }{ { obj: &api.Pod{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, }, isNamespaced: true, }, { obj: &api.ReplicationController{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Spec: api.ReplicationControllerSpec{ Replicas: 2, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "name": "foo", "type": "production", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Image: "foo/bar", TerminationMessagePath: api.TerminationMessagePathDefault, ImagePullPolicy: api.PullIfNotPresent, }, }, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSDefault, NodeSelector: map[string]string{ "baz": "blah", }, }, }, }, }, isNamespaced: true, }, { obj: &api.Service{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Spec: api.ServiceSpec{ ClusterIP: "1.2.3.4", Ports: []api.ServicePort{ { Port: 80, Protocol: "TCP", }, }, }, Status: api.ServiceStatus{ LoadBalancer: api.LoadBalancerStatus{ Ingress: []api.LoadBalancerIngress{ { IP: "2.3.4.5", }, }, }, }, }, isNamespaced: true, }, { obj: &api.Endpoints{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}, {IP: "localhost"}}, Ports: []api.EndpointPort{{Port: 8080}}, }, }}, isNamespaced: true, }, { obj: &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: name}, }, isNamespaced: false, }, { obj: &api.Secret{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, }, isNamespaced: true, }, { obj: &api.ServiceAccount{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Secrets: []api.ObjectReference{}, }, isNamespaced: true, }, { obj: &api.Node{ ObjectMeta: api.ObjectMeta{Name: name}, Status: api.NodeStatus{}, }, isNamespaced: false, }, { obj: &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Spec: api.PersistentVolumeSpec{}, }, isNamespaced: false, }, { obj: &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Spec: api.PersistentVolumeClaimSpec{}, }, isNamespaced: true, }, { obj: &api.Event{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Source: api.EventSource{Component: "kubelet"}, Message: "Item 1", FirstTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), LastTimestamp: unversioned.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), Count: 1, }, isNamespaced: true, }, { obj: &api.LimitRange{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, }, isNamespaced: true, }, { obj: &api.ResourceQuota{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, }, isNamespaced: true, }, { obj: &api.ComponentStatus{ Conditions: []api.ComponentCondition{ {Type: api.ComponentHealthy, Status: api.ConditionTrue, Message: "ok", Error: ""}, }, }, isNamespaced: false, }, } for _, test := range table { if test.isNamespaced { // Expect output to include namespace when requested. printer := NewHumanReadablePrinter(false, true, false, false, []string{}) buffer := &bytes.Buffer{} err := printer.PrintObj(test.obj, buffer) if err != nil { t.Fatalf("An error occurred printing object: %#v", err) } matched := contains(strings.Fields(buffer.String()), fmt.Sprintf("%s", namespaceName)) if !matched { t.Errorf("Expect printing object to contain namespace: %#v", test.obj) } } else { // Expect error when trying to get all namespaces for un-namespaced object. printer := NewHumanReadablePrinter(false, true, false, false, []string{}) buffer := &bytes.Buffer{} err := printer.PrintObj(test.obj, buffer) if err == nil { t.Errorf("Expected error when printing un-namespaced type") } } } }
func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodAutoscaler) error { reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Namespace, hpa.Spec.ScaleRef.Name) scale, err := a.client.Extensions().Scales(hpa.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name) if err != nil { a.eventRecorder.Event(&hpa, "FailedGetScale", err.Error()) return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err) } currentReplicas := scale.Status.Replicas desiredReplicas, currentUtilization, timestamp, err := a.computeReplicasForCPUUtilization(hpa, scale) if err != nil { a.eventRecorder.Event(&hpa, "FailedComputeReplicas", err.Error()) return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err) } if hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas { desiredReplicas = *hpa.Spec.MinReplicas } // TODO: remove when pod idling is done. if desiredReplicas == 0 { desiredReplicas = 1 } if desiredReplicas > hpa.Spec.MaxReplicas { desiredReplicas = hpa.Spec.MaxReplicas } rescale := false if desiredReplicas != currentReplicas { // Going down only if the usageRatio dropped significantly below the target // and there was no rescaling in the last downscaleForbiddenWindow. if desiredReplicas < currentReplicas && (hpa.Status.LastScaleTime == nil || hpa.Status.LastScaleTime.Add(downscaleForbiddenWindow).Before(timestamp)) { rescale = true } // Going up only if the usage ratio increased significantly above the target // and there was no rescaling in the last upscaleForbiddenWindow. if desiredReplicas > currentReplicas && (hpa.Status.LastScaleTime == nil || hpa.Status.LastScaleTime.Add(upscaleForbiddenWindow).Before(timestamp)) { rescale = true } } if rescale { scale.Spec.Replicas = desiredReplicas _, err = a.client.Extensions().Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale) if err != nil { a.eventRecorder.Eventf(&hpa, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error()) return fmt.Errorf("failed to rescale %s: %v", reference, err) } a.eventRecorder.Eventf(&hpa, "SuccessfulRescale", "New size: %d", desiredReplicas) glog.Infof("Successfull rescale of %s, old size: %d, new size: %d", hpa.Name, currentReplicas, desiredReplicas) } else { desiredReplicas = currentReplicas } hpa.Status = extensions.HorizontalPodAutoscalerStatus{ CurrentReplicas: currentReplicas, DesiredReplicas: desiredReplicas, CurrentCPUUtilizationPercentage: currentUtilization, LastScaleTime: hpa.Status.LastScaleTime, } if rescale { now := unversioned.NewTime(time.Now()) hpa.Status.LastScaleTime = &now } _, err = a.client.Extensions().HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(&hpa) if err != nil { a.eventRecorder.Event(&hpa, "FailedUpdateStatus", err.Error()) return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err) } return nil }