// TestGracefulStoreCanDeleteIfExistingGracePeriodZero tests recovery from // race condition where the graceful delete is unable to complete // in prior operation, but the pod remains with deletion timestamp // and grace period set to 0. func TestGracefulStoreCanDeleteIfExistingGracePeriodZero(t *testing.T) { deletionTimestamp := metav1.NewTime(time.Now()) deletionGracePeriodSeconds := int64(0) initialGeneration := int64(1) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", Generation: initialGeneration, DeletionGracePeriodSeconds: &deletionGracePeriodSeconds, DeletionTimestamp: &deletionTimestamp, }, Spec: api.PodSpec{NodeName: "machine"}, } testContext := api.WithNamespace(api.NewContext(), "test") destroyFunc, registry := NewTestGenericStoreRegistry(t) registry.EnableGarbageCollection = false defaultDeleteStrategy := testRESTStrategy{api.Scheme, api.SimpleNameGenerator, true, false, true} registry.DeleteStrategy = testGracefulStrategy{defaultDeleteStrategy} defer destroyFunc() graceful, gracefulPending, err := rest.BeforeDelete(registry.DeleteStrategy, testContext, pod, api.NewDeleteOptions(0)) if err != nil { t.Fatalf("Unexpected error: %v", err) } if graceful { t.Fatalf("graceful should be false if object has DeletionTimestamp and DeletionGracePeriodSeconds is 0") } if gracefulPending { t.Fatalf("gracefulPending should be false if object has DeletionTimestamp and DeletionGracePeriodSeconds is 0") } }
func TestNodeConditionsObservedSince(t *testing.T) { now := metav1.Now() observedTime := metav1.NewTime(now.Time.Add(-1 * time.Minute)) testCases := map[string]struct { observedAt nodeConditionsObservedAt period time.Duration now time.Time result []v1.NodeConditionType }{ "in-period": { observedAt: nodeConditionsObservedAt{ v1.NodeMemoryPressure: observedTime.Time, }, period: 2 * time.Minute, now: now.Time, result: []v1.NodeConditionType{v1.NodeMemoryPressure}, }, "out-of-period": { observedAt: nodeConditionsObservedAt{ v1.NodeMemoryPressure: observedTime.Time, }, period: 30 * time.Second, now: now.Time, result: []v1.NodeConditionType{}, }, } for testName, testCase := range testCases { actual := nodeConditionsObservedSince(testCase.observedAt, testCase.period, testCase.now) if !nodeConditionList(actual).Equal(nodeConditionList(testCase.result)) { t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual) } } }
func (sb *summaryBuilder) containerInfoV2ToStats( name string, info *cadvisorapiv2.ContainerInfo) stats.ContainerStats { cStats := stats.ContainerStats{ StartTime: metav1.NewTime(info.Spec.CreationTime), Name: name, } cstat, found := sb.latestContainerStats(info) if !found { return cStats } if info.Spec.HasCpu { cpuStats := stats.CPUStats{ Time: metav1.NewTime(cstat.Timestamp), } if cstat.CpuInst != nil { cpuStats.UsageNanoCores = &cstat.CpuInst.Usage.Total } if cstat.Cpu != nil { cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total } cStats.CPU = &cpuStats } if info.Spec.HasMemory { pageFaults := cstat.Memory.ContainerData.Pgfault majorPageFaults := cstat.Memory.ContainerData.Pgmajfault cStats.Memory = &stats.MemoryStats{ Time: metav1.NewTime(cstat.Timestamp), UsageBytes: &cstat.Memory.Usage, WorkingSetBytes: &cstat.Memory.WorkingSet, RSSBytes: &cstat.Memory.RSS, PageFaults: &pageFaults, MajorPageFaults: &majorPageFaults, } // availableBytes = memory limit (if known) - workingset if !isMemoryUnlimited(info.Spec.Memory.Limit) { availableBytes := info.Spec.Memory.Limit - cstat.Memory.WorkingSet cStats.Memory.AvailableBytes = &availableBytes } } sb.containerInfoV2FsStats(info, &cStats) cStats.UserDefinedMetrics = sb.containerInfoV2ToUserDefinedMetrics(info) return cStats }
func TestThresholdsMetGracePeriod(t *testing.T) { now := metav1.Now() hardThreshold := Threshold{ Signal: SignalMemoryAvailable, Operator: OpLessThan, Value: ThresholdValue{ Quantity: quantityMustParse("1Gi"), }, } softThreshold := Threshold{ Signal: SignalMemoryAvailable, Operator: OpLessThan, Value: ThresholdValue{ Quantity: quantityMustParse("2Gi"), }, GracePeriod: 1 * time.Minute, } oldTime := metav1.NewTime(now.Time.Add(-2 * time.Minute)) testCases := map[string]struct { observedAt thresholdsObservedAt now time.Time result []Threshold }{ "empty": { observedAt: thresholdsObservedAt{}, now: now.Time, result: []Threshold{}, }, "hard-threshold-met": { observedAt: thresholdsObservedAt{ hardThreshold: now.Time, }, now: now.Time, result: []Threshold{hardThreshold}, }, "soft-threshold-not-met": { observedAt: thresholdsObservedAt{ softThreshold: now.Time, }, now: now.Time, result: []Threshold{}, }, "soft-threshold-met": { observedAt: thresholdsObservedAt{ softThreshold: oldTime.Time, }, now: now.Time, result: []Threshold{softThreshold}, }, } for testName, testCase := range testCases { actual := thresholdsMetGracePeriod(testCase.observedAt, now.Time) if !thresholdList(actual).Equal(thresholdList(testCase.result)) { t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual) } } }
// setNodeDiskPressureCondition for the node. // TODO: this needs to move somewhere centralized... func (kl *Kubelet) setNodeDiskPressureCondition(node *v1.Node) { currentTime := metav1.NewTime(kl.clock.Now()) var condition *v1.NodeCondition // Check if NodeDiskPressure condition already exists and if it does, just pick it up for update. for i := range node.Status.Conditions { if node.Status.Conditions[i].Type == v1.NodeDiskPressure { condition = &node.Status.Conditions[i] } } newCondition := false // If the NodeDiskPressure condition doesn't exist, create one if condition == nil { condition = &v1.NodeCondition{ Type: v1.NodeDiskPressure, Status: v1.ConditionUnknown, } // cannot be appended to node.Status.Conditions here because it gets // copied to the slice. So if we append to the slice here none of the // updates we make below are reflected in the slice. newCondition = true } // Update the heartbeat time condition.LastHeartbeatTime = currentTime // Note: The conditions below take care of the case when a new NodeDiskressure condition is // created and as well as the case when the condition already exists. When a new condition // is created its status is set to v1.ConditionUnknown which matches either // condition.Status != v1.ConditionTrue or // condition.Status != v1.ConditionFalse in the conditions below depending on whether // the kubelet is under disk pressure or not. if kl.evictionManager.IsUnderDiskPressure() { if condition.Status != v1.ConditionTrue { condition.Status = v1.ConditionTrue condition.Reason = "KubeletHasDiskPressure" condition.Message = "kubelet has disk pressure" condition.LastTransitionTime = currentTime kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasDiskPressure") } } else { if condition.Status != v1.ConditionFalse { condition.Status = v1.ConditionFalse condition.Reason = "KubeletHasNoDiskPressure" condition.Message = "kubelet has no disk pressure" condition.LastTransitionTime = currentTime kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasNoDiskPressure") } } if newCondition { node.Status.Conditions = append(node.Status.Conditions, *condition) } }
// TestActiveDeadlineHandler verifies the active deadline handler functions as expected. func TestActiveDeadlineHandler(t *testing.T) { pods := newTestPods(4) fakeClock := clock.NewFakeClock(time.Now()) podStatusProvider := &mockPodStatusProvider{pods: pods} fakeRecorder := &record.FakeRecorder{} handler, err := newActiveDeadlineHandler(podStatusProvider, fakeRecorder, fakeClock) if err != nil { t.Fatalf("unexpected error: %v", err) } now := metav1.Now() startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute)) // this pod has exceeded its active deadline exceededActiveDeadlineSeconds := int64(30) pods[0].Status.StartTime = &startTime pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds // this pod has not exceeded its active deadline notYetActiveDeadlineSeconds := int64(120) pods[1].Status.StartTime = &startTime pods[1].Spec.ActiveDeadlineSeconds = ¬YetActiveDeadlineSeconds // this pod has no deadline pods[2].Status.StartTime = &startTime pods[2].Spec.ActiveDeadlineSeconds = nil testCases := []struct { pod *v1.Pod expected bool }{{pods[0], true}, {pods[1], false}, {pods[2], false}, {pods[3], false}} for i, testCase := range testCases { if actual := handler.ShouldSync(testCase.pod); actual != testCase.expected { t.Errorf("[%d] ShouldSync expected %#v, got %#v", i, testCase.expected, actual) } actual := handler.ShouldEvict(testCase.pod) if actual.Evict != testCase.expected { t.Errorf("[%d] ShouldEvict.Evict expected %#v, got %#v", i, testCase.expected, actual.Evict) } if testCase.expected { if actual.Reason != reason { t.Errorf("[%d] ShouldEvict.Reason expected %#v, got %#v", i, message, actual.Reason) } if actual.Message != message { t.Errorf("[%d] ShouldEvict.Message expected %#v, got %#v", i, message, actual.Message) } } } }
// markAsDeleting sets the obj's DeletionGracePeriodSeconds to 0, and sets the // DeletionTimestamp to "now". Finalizers are watching for such updates and will // finalize the object if their IDs are present in the object's Finalizers list. func markAsDeleting(obj runtime.Object) (err error) { objectMeta, kerr := api.ObjectMetaFor(obj) if kerr != nil { return kerr } now := metav1.NewTime(time.Now()) // This handles Generation bump for resources that don't support graceful deletion. For resources that support graceful deletion is handle in pkg/api/rest/delete.go if objectMeta.DeletionTimestamp == nil && objectMeta.Generation > 0 { objectMeta.Generation++ } objectMeta.DeletionTimestamp = &now var zero int64 = 0 objectMeta.DeletionGracePeriodSeconds = &zero return nil }
func (sb *summaryBuilder) containerInfoV2ToUserDefinedMetrics(info *cadvisorapiv2.ContainerInfo) []stats.UserDefinedMetric { type specVal struct { ref stats.UserDefinedMetricDescriptor valType cadvisorapiv1.DataType time time.Time value float64 } udmMap := map[string]*specVal{} for _, spec := range info.Spec.CustomMetrics { udmMap[spec.Name] = &specVal{ ref: stats.UserDefinedMetricDescriptor{ Name: spec.Name, Type: stats.UserDefinedMetricType(spec.Type), Units: spec.Units, }, valType: spec.Format, } } for _, stat := range info.Stats { for name, values := range stat.CustomMetrics { specVal, ok := udmMap[name] if !ok { glog.Warningf("spec for custom metric %q is missing from cAdvisor output. Spec: %+v, Metrics: %+v", name, info.Spec, stat.CustomMetrics) continue } for _, value := range values { // Pick the most recent value if value.Timestamp.Before(specVal.time) { continue } specVal.time = value.Timestamp specVal.value = value.FloatValue if specVal.valType == cadvisorapiv1.IntType { specVal.value = float64(value.IntValue) } } } } var udm []stats.UserDefinedMetric for _, specVal := range udmMap { udm = append(udm, stats.UserDefinedMetric{ UserDefinedMetricDescriptor: specVal.ref, Time: metav1.NewTime(specVal.time), Value: specVal.value, }) } return udm }
func TestThresholdsFirstObservedAt(t *testing.T) { hardThreshold := Threshold{ Signal: SignalMemoryAvailable, Operator: OpLessThan, Value: ThresholdValue{ Quantity: quantityMustParse("1Gi"), }, } now := metav1.Now() oldTime := metav1.NewTime(now.Time.Add(-1 * time.Minute)) testCases := map[string]struct { thresholds []Threshold lastObservedAt thresholdsObservedAt now time.Time result thresholdsObservedAt }{ "empty": { thresholds: []Threshold{}, lastObservedAt: thresholdsObservedAt{}, now: now.Time, result: thresholdsObservedAt{}, }, "no-previous-observation": { thresholds: []Threshold{hardThreshold}, lastObservedAt: thresholdsObservedAt{}, now: now.Time, result: thresholdsObservedAt{ hardThreshold: now.Time, }, }, "previous-observation": { thresholds: []Threshold{hardThreshold}, lastObservedAt: thresholdsObservedAt{ hardThreshold: oldTime.Time, }, now: now.Time, result: thresholdsObservedAt{ hardThreshold: oldTime.Time, }, }, } for testName, testCase := range testCases { actual := thresholdsFirstObservedAt(testCase.thresholds, testCase.lastObservedAt, testCase.now) if !reflect.DeepEqual(actual, testCase.result) { t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual) } } }
// SetNodeCondition updates specific node condition with patch operation. func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.NodeCondition) error { generatePatch := func(condition v1.NodeCondition) ([]byte, error) { raw, err := json.Marshal(&[]v1.NodeCondition{condition}) if err != nil { return nil, err } return []byte(fmt.Sprintf(`{"status":{"conditions":%s}}`, raw)), nil } condition.LastHeartbeatTime = metav1.NewTime(time.Now()) patch, err := generatePatch(condition) if err != nil { return nil } _, err = c.Core().Nodes().PatchStatus(string(node), patch) return err }
func newPod(now time.Time, ready bool, beforeSec int) v1.Pod { conditionStatus := v1.ConditionFalse if ready { conditionStatus = v1.ConditionTrue } return v1.Pod{ Status: v1.PodStatus{ Conditions: []v1.PodCondition{ { Type: v1.PodReady, LastTransitionTime: metav1.NewTime(now.Add(-1 * time.Duration(beforeSec) * time.Second)), Status: conditionStatus, }, }, }, } }
// buildSummaryPods aggregates and returns the container stats in cinfos by the Pod managing the container. // Containers not managed by a Pod are omitted. func (sb *summaryBuilder) buildSummaryPods() []stats.PodStats { // Map each container to a pod and update the PodStats with container data podToStats := map[stats.PodReference]*stats.PodStats{} for key, cinfo := range sb.infos { // on systemd using devicemapper each mount into the container has an associated cgroup. // we ignore them to ensure we do not get duplicate entries in our summary. // for details on .mount units: http://man7.org/linux/man-pages/man5/systemd.mount.5.html if strings.HasSuffix(key, ".mount") { continue } // Build the Pod key if this container is managed by a Pod if !sb.isPodManagedContainer(&cinfo) { continue } ref := sb.buildPodRef(&cinfo) // Lookup the PodStats for the pod using the PodRef. If none exists, initialize a new entry. podStats, found := podToStats[ref] if !found { podStats = &stats.PodStats{PodRef: ref} podToStats[ref] = podStats } // Update the PodStats entry with the stats from the container by adding it to stats.Containers containerName := types.GetContainerName(cinfo.Spec.Labels) if containerName == leaky.PodInfraContainerName { // Special case for infrastructure container which is hidden from the user and has network stats podStats.Network = sb.containerInfoV2ToNetworkStats("pod:"+ref.Namespace+"_"+ref.Name, &cinfo) podStats.StartTime = metav1.NewTime(cinfo.Spec.CreationTime) } else { podStats.Containers = append(podStats.Containers, sb.containerInfoV2ToStats(containerName, &cinfo)) } } // Add each PodStats to the result result := make([]stats.PodStats, 0, len(podToStats)) for _, podStats := range podToStats { // Lookup the volume stats for each pod podUID := kubetypes.UID(podStats.PodRef.UID) if vstats, found := sb.fsResourceAnalyzer.GetPodVolumeStats(podUID); found { podStats.VolumeStats = vstats.Volumes } result = append(result, *podStats) } return result }
// EventAggregate identifies similar events and groups into a common event if required func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, error) { aggregateKey, localKey := e.keyFunc(newEvent) now := metav1.NewTime(e.clock.Now()) record := aggregateRecord{localKeys: sets.NewString(), lastTimestamp: now} e.Lock() defer e.Unlock() value, found := e.cache.Get(aggregateKey) if found { record = value.(aggregateRecord) } // if the last event was far enough in the past, it is not aggregated, and we must reset state maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second interval := now.Time.Sub(record.lastTimestamp.Time) if interval > maxInterval { record = aggregateRecord{localKeys: sets.NewString()} } record.localKeys.Insert(localKey) record.lastTimestamp = now e.cache.Add(aggregateKey, record) if record.localKeys.Len() < e.maxEvents { return newEvent, nil } // do not grow our local key set any larger than max record.localKeys.PopAny() // create a new aggregate event eventCopy := &v1.Event{ ObjectMeta: v1.ObjectMeta{ Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()), Namespace: newEvent.Namespace, }, Count: 1, FirstTimestamp: now, InvolvedObject: newEvent.InvolvedObject, LastTimestamp: now, Message: e.messageFunc(newEvent), Type: newEvent.Type, Reason: newEvent.Reason, Source: newEvent.Source, } return eventCopy, nil }
func TestNodeConditionsLastObservedAt(t *testing.T) { now := metav1.Now() oldTime := metav1.NewTime(now.Time.Add(-1 * time.Minute)) testCases := map[string]struct { nodeConditions []v1.NodeConditionType lastObservedAt nodeConditionsObservedAt now time.Time result nodeConditionsObservedAt }{ "no-previous-observation": { nodeConditions: []v1.NodeConditionType{v1.NodeMemoryPressure}, lastObservedAt: nodeConditionsObservedAt{}, now: now.Time, result: nodeConditionsObservedAt{ v1.NodeMemoryPressure: now.Time, }, }, "previous-observation": { nodeConditions: []v1.NodeConditionType{v1.NodeMemoryPressure}, lastObservedAt: nodeConditionsObservedAt{ v1.NodeMemoryPressure: oldTime.Time, }, now: now.Time, result: nodeConditionsObservedAt{ v1.NodeMemoryPressure: now.Time, }, }, "old-observation": { nodeConditions: []v1.NodeConditionType{}, lastObservedAt: nodeConditionsObservedAt{ v1.NodeMemoryPressure: oldTime.Time, }, now: now.Time, result: nodeConditionsObservedAt{ v1.NodeMemoryPressure: oldTime.Time, }, }, } for testName, testCase := range testCases { actual := nodeConditionsLastObservedAt(testCase.nodeConditions, testCase.lastObservedAt, testCase.now) if !reflect.DeepEqual(actual, testCase.result) { t.Errorf("Test case: %s, expected: %v, actual: %v", testName, testCase.result, actual) } } }
func TestNewPodAddedDelete(t *testing.T) { channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) // should register an add addedPod := CreateValidPod("foo", "new") podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, addedPod) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, addedPod)) // mark this pod as deleted timestamp := metav1.NewTime(time.Now()) deletedPod := CreateValidPod("foo", "new") deletedPod.ObjectMeta.DeletionTimestamp = ×tamp podUpdate = CreatePodUpdate(kubetypes.DELETE, TestSource, deletedPod) channel <- podUpdate // the existing pod should be gracefully deleted expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.DELETE, TestSource, addedPod)) }
// eventObserve records the event, and determines if its frequency should update func (e *eventLogger) eventObserve(newEvent *v1.Event) (*v1.Event, []byte, error) { var ( patch []byte err error ) key := getEventKey(newEvent) eventCopy := *newEvent event := &eventCopy e.Lock() defer e.Unlock() lastObservation := e.lastEventObservationFromCache(key) // we have seen this event before, so we must prepare a patch if lastObservation.count > 0 { // update the event based on the last observation so patch will work as desired event.Name = lastObservation.name event.ResourceVersion = lastObservation.resourceVersion event.FirstTimestamp = lastObservation.firstTimestamp event.Count = int32(lastObservation.count) + 1 eventCopy2 := *event eventCopy2.Count = 0 eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0)) newData, _ := json.Marshal(event) oldData, _ := json.Marshal(eventCopy2) patch, err = strategicpatch.CreateStrategicMergePatch(oldData, newData, event) } // record our new observation e.cache.Add( key, eventLog{ count: int(event.Count), firstTimestamp: event.FirstTimestamp, name: event.Name, resourceVersion: event.ResourceVersion, }, ) return event, patch, err }
func TestNewStatusPreservesPodStartTime(t *testing.T) { syncer := newTestManager(&fake.Clientset{}) pod := &v1.Pod{ ObjectMeta: v1.ObjectMeta{ UID: "12345678", Name: "foo", Namespace: "new", }, Status: v1.PodStatus{}, } now := metav1.Now() startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute)) pod.Status.StartTime = &startTime syncer.SetPodStatus(pod, getRandomPodStatus()) status := expectPodStatus(t, syncer, pod) if !status.StartTime.Time.Equal(startTime.Time) { t.Errorf("Unexpected start time, expected %v, actual %v", startTime, status.StartTime) } }
func TestPodDescribeResultsSorted(t *testing.T) { // Arrange fake := fake.NewSimpleClientset( &api.EventList{ Items: []api.Event{ { ObjectMeta: api.ObjectMeta{Name: "one"}, Source: api.EventSource{Component: "kubelet"}, Message: "Item 1", FirstTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), LastTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), Count: 1, Type: api.EventTypeNormal, }, { ObjectMeta: api.ObjectMeta{Name: "two"}, Source: api.EventSource{Component: "scheduler"}, Message: "Item 2", FirstTimestamp: metav1.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), LastTimestamp: metav1.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), Count: 1, Type: api.EventTypeNormal, }, { ObjectMeta: api.ObjectMeta{Name: "three"}, Source: api.EventSource{Component: "kubelet"}, Message: "Item 3", FirstTimestamp: metav1.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), LastTimestamp: metav1.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), Count: 1, Type: api.EventTypeNormal, }, }, }, &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: "foo", Name: "bar"}}, ) c := &describeClient{T: t, Namespace: "foo", Interface: fake} d := PodDescriber{c} // Act out, err := d.Describe("foo", "bar", DescriberSettings{ShowEvents: true}) // Assert if err != nil { t.Errorf("unexpected error: %v", err) } VerifyDatesInOrder(out, "\n" /* rowDelimiter */, "\t" /* columnDelimiter */, t) }
func (sb *summaryBuilder) containerInfoV2ToNetworkStats(name string, info *cadvisorapiv2.ContainerInfo) *stats.NetworkStats { if !info.Spec.HasNetwork { return nil } cstat, found := sb.latestContainerStats(info) if !found { return nil } for _, inter := range cstat.Network.Interfaces { if inter.Name == network.DefaultInterfaceName { return &stats.NetworkStats{ Time: metav1.NewTime(cstat.Timestamp), RxBytes: &inter.RxBytes, RxErrors: &inter.RxErrors, TxBytes: &inter.TxBytes, TxErrors: &inter.TxErrors, } } } glog.V(4).Infof("Missing default interface %q for %s", network.DefaultInterfaceName, name) return nil }
func TestPrintEventsResultSorted(t *testing.T) { // Arrange printer := NewHumanReadablePrinter(PrintOptions{ ColumnLabels: []string{}, }) obj := api.EventList{ Items: []api.Event{ { Source: api.EventSource{Component: "kubelet"}, Message: "Item 1", FirstTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), LastTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), Count: 1, Type: api.EventTypeNormal, }, { Source: api.EventSource{Component: "scheduler"}, Message: "Item 2", FirstTimestamp: metav1.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), LastTimestamp: metav1.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), Count: 1, Type: api.EventTypeNormal, }, { Source: api.EventSource{Component: "kubelet"}, Message: "Item 3", FirstTimestamp: metav1.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), LastTimestamp: metav1.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), Count: 1, Type: api.EventTypeNormal, }, }, } buffer := &bytes.Buffer{} // Act err := printer.PrintObj(&obj, buffer) // Assert if err != nil { t.Fatalf("An error occurred printing the EventList: %#v", err) } out := buffer.String() VerifyDatesInOrder(out, "\n" /* rowDelimiter */, " " /* columnDelimiter */, t) }
func (a *HorizontalController) updateStatus(hpa *autoscaling.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, cpuCurrentUtilization *int32, cmStatus string, rescale bool) error { hpa.Status = autoscaling.HorizontalPodAutoscalerStatus{ CurrentReplicas: currentReplicas, DesiredReplicas: desiredReplicas, CurrentCPUUtilizationPercentage: cpuCurrentUtilization, LastScaleTime: hpa.Status.LastScaleTime, } if cmStatus != "" { hpa.Annotations[HpaCustomMetricsStatusAnnotationName] = cmStatus } if rescale { now := metav1.NewTime(time.Now()) hpa.Status.LastScaleTime = &now } _, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(hpa) if err != nil { a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error()) return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err) } glog.V(2).Infof("Successfully updated status for %s", hpa.Name) return nil }
func TestSortableEvents(t *testing.T) { // Arrange list := SortableEvents([]api.Event{ { Source: api.EventSource{Component: "kubelet"}, Message: "Item 1", FirstTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), LastTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), Count: 1, Type: api.EventTypeNormal, }, { Source: api.EventSource{Component: "scheduler"}, Message: "Item 2", FirstTimestamp: metav1.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), LastTimestamp: metav1.NewTime(time.Date(1987, time.June, 17, 0, 0, 0, 0, time.UTC)), Count: 1, Type: api.EventTypeNormal, }, { Source: api.EventSource{Component: "kubelet"}, Message: "Item 3", FirstTimestamp: metav1.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), LastTimestamp: metav1.NewTime(time.Date(2002, time.December, 25, 0, 0, 0, 0, time.UTC)), Count: 1, Type: api.EventTypeNormal, }, }) // Act sort.Sort(list) // Assert if list[0].Message != "Item 2" || list[1].Message != "Item 3" || list[2].Message != "Item 1" { t.Fatal("List is not sorted by time. List: ", list) } }
func TestPrintHumanReadableWithNamespace(t *testing.T) { namespaceName := "testnamespace" name := "test" table := []struct { obj runtime.Object isNamespaced bool }{ { obj: &api.Pod{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, }, isNamespaced: true, }, { obj: &api.ReplicationController{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Spec: api.ReplicationControllerSpec{ Replicas: 2, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "name": "foo", "type": "production", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Image: "foo/bar", TerminationMessagePath: api.TerminationMessagePathDefault, ImagePullPolicy: api.PullIfNotPresent, }, }, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSDefault, NodeSelector: map[string]string{ "baz": "blah", }, }, }, }, }, isNamespaced: true, }, { obj: &api.Service{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Spec: api.ServiceSpec{ ClusterIP: "1.2.3.4", Ports: []api.ServicePort{ { Port: 80, Protocol: "TCP", }, }, }, Status: api.ServiceStatus{ LoadBalancer: api.LoadBalancerStatus{ Ingress: []api.LoadBalancerIngress{ { IP: "2.3.4.5", }, }, }, }, }, isNamespaced: true, }, { obj: &api.Endpoints{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Subsets: []api.EndpointSubset{{ Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}, {IP: "localhost"}}, Ports: []api.EndpointPort{{Port: 8080}}, }, }}, isNamespaced: true, }, { obj: &api.Namespace{ ObjectMeta: api.ObjectMeta{Name: name}, }, isNamespaced: false, }, { obj: &api.Secret{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, }, isNamespaced: true, }, { obj: &api.ServiceAccount{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Secrets: []api.ObjectReference{}, }, isNamespaced: true, }, { obj: &api.Node{ ObjectMeta: api.ObjectMeta{Name: name}, Status: api.NodeStatus{}, }, isNamespaced: false, }, { obj: &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Spec: api.PersistentVolumeSpec{}, }, isNamespaced: false, }, { obj: &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Spec: api.PersistentVolumeClaimSpec{}, }, isNamespaced: true, }, { obj: &api.Event{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Source: api.EventSource{Component: "kubelet"}, Message: "Item 1", FirstTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), LastTimestamp: metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)), Count: 1, Type: api.EventTypeNormal, }, isNamespaced: true, }, { obj: &api.LimitRange{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, }, isNamespaced: true, }, { obj: &api.ResourceQuota{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, }, isNamespaced: true, }, { obj: &api.ComponentStatus{ Conditions: []api.ComponentCondition{ {Type: api.ComponentHealthy, Status: api.ConditionTrue, Message: "ok", Error: ""}, }, }, isNamespaced: false, }, } for _, test := range table { if test.isNamespaced { // Expect output to include namespace when requested. printer := NewHumanReadablePrinter(PrintOptions{ WithNamespace: true, ColumnLabels: []string{}, }) buffer := &bytes.Buffer{} err := printer.PrintObj(test.obj, buffer) if err != nil { t.Fatalf("An error occurred printing object: %#v", err) } matched := contains(strings.Fields(buffer.String()), fmt.Sprintf("%s", namespaceName)) if !matched { t.Errorf("Expect printing object to contain namespace: %#v", test.obj) } } else { // Expect error when trying to get all namespaces for un-namespaced object. printer := NewHumanReadablePrinter(PrintOptions{ WithNamespace: true, ColumnLabels: []string{}, }) buffer := &bytes.Buffer{} err := printer.PrintObj(test.obj, buffer) if err == nil { t.Errorf("Expected error when printing un-namespaced type") } } } }
defer GinkgoRecover() if p.Status.Phase == v1.PodRunning { if _, found := watchTimes[p.Name]; !found { watchTimes[p.Name] = metav1.Now() createTimes[p.Name] = p.CreationTimestamp nodeNames[p.Name] = p.Spec.NodeName var startTime metav1.Time for _, cs := range p.Status.ContainerStatuses { if cs.State.Running != nil { if startTime.Before(cs.State.Running.StartedAt) { startTime = cs.State.Running.StartedAt } } } if startTime != metav1.NewTime(time.Time{}) { runTimes[p.Name] = startTime } else { framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name) } } } } additionalPodsPrefix = "density-latency-pod" stopCh := make(chan struct{}) latencyPodStores := make([]cache.Store, len(namespaces)) for i := 0; i < len(namespaces); i++ { nsName := namespaces[i].Name latencyPodsStore, controller := cache.NewInformer(
Name: podName, }, Spec: v1.PodSpec{ // Don't restart the Pod since it is expected to exit RestartPolicy: v1.RestartPolicyNever, Containers: []v1.Container{ { Image: "gcr.io/google_containers/busybox:1.24", Name: podName, Command: []string{"sh", "-c", "echo 'Hello World' ; sleep 240"}, }, }, }, }) Eventually(func() string { sinceTime := apiunversioned.NewTime(time.Now().Add(time.Duration(-1 * time.Hour))) rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream() if err != nil { return "" } defer rc.Close() buf := new(bytes.Buffer) buf.ReadFrom(rc) return buf.String() }, time.Minute, time.Second*4).Should(Equal("Hello World\n")) }) }) Context("when scheduling a busybox command that always fails in a pod", func() { var podName string BeforeEach(func() {
// TestEventCorrelator validates proper counting, aggregation of events func TestEventCorrelator(t *testing.T) { firstEvent := makeEvent("first", "i am first", makeObjectReference("Pod", "my-pod", "my-ns")) duplicateEvent := makeEvent("duplicate", "me again", makeObjectReference("Pod", "my-pod", "my-ns")) uniqueEvent := makeEvent("unique", "snowflake", makeObjectReference("Pod", "my-pod", "my-ns")) similarEvent := makeEvent("similar", "similar message", makeObjectReference("Pod", "my-pod", "my-ns")) aggregateEvent := makeEvent(similarEvent.Reason, EventAggregatorByReasonMessageFunc(&similarEvent), similarEvent.InvolvedObject) scenario := map[string]struct { previousEvents []v1.Event newEvent v1.Event expectedEvent v1.Event intervalSeconds int }{ "create-a-single-event": { previousEvents: []v1.Event{}, newEvent: firstEvent, expectedEvent: setCount(firstEvent, 1), intervalSeconds: 5, }, "the-same-event-should-just-count": { previousEvents: makeEvents(1, duplicateEvent), newEvent: duplicateEvent, expectedEvent: setCount(duplicateEvent, 2), intervalSeconds: 5, }, "the-same-event-should-just-count-even-if-more-than-aggregate": { previousEvents: makeEvents(defaultAggregateMaxEvents, duplicateEvent), newEvent: duplicateEvent, expectedEvent: setCount(duplicateEvent, defaultAggregateMaxEvents+1), intervalSeconds: 5, }, "create-many-unique-events": { previousEvents: makeUniqueEvents(30), newEvent: uniqueEvent, expectedEvent: setCount(uniqueEvent, 1), intervalSeconds: 5, }, "similar-events-should-aggregate-event": { previousEvents: makeSimilarEvents(defaultAggregateMaxEvents-1, similarEvent, similarEvent.Message), newEvent: similarEvent, expectedEvent: setCount(aggregateEvent, 1), intervalSeconds: 5, }, "similar-events-many-times-should-count-the-aggregate": { previousEvents: makeSimilarEvents(defaultAggregateMaxEvents, similarEvent, similarEvent.Message), newEvent: similarEvent, expectedEvent: setCount(aggregateEvent, 2), intervalSeconds: 5, }, "similar-events-whose-interval-is-greater-than-aggregate-interval-do-not-aggregate": { previousEvents: makeSimilarEvents(defaultAggregateMaxEvents-1, similarEvent, similarEvent.Message), newEvent: similarEvent, expectedEvent: setCount(similarEvent, 1), intervalSeconds: defaultAggregateIntervalInSeconds, }, } for testScenario, testInput := range scenario { eventInterval := time.Duration(testInput.intervalSeconds) * time.Second clock := clock.IntervalClock{Time: time.Now(), Duration: eventInterval} correlator := NewEventCorrelator(&clock) for i := range testInput.previousEvents { event := testInput.previousEvents[i] now := metav1.NewTime(clock.Now()) event.FirstTimestamp = now event.LastTimestamp = now result, err := correlator.EventCorrelate(&event) if err != nil { t.Errorf("scenario %v: unexpected error playing back prevEvents %v", testScenario, err) } correlator.UpdateState(result.Event) } // update the input to current clock value now := metav1.NewTime(clock.Now()) testInput.newEvent.FirstTimestamp = now testInput.newEvent.LastTimestamp = now result, err := correlator.EventCorrelate(&testInput.newEvent) if err != nil { t.Errorf("scenario %v: unexpected error correlating input event %v", testScenario, err) } _, err = validateEvent(testScenario, result.Event, &testInput.expectedEvent, t) if err != nil { t.Errorf("scenario %v: unexpected error validating result %v", testScenario, err) } } }
func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) kubelet := testKubelet.kubelet clock := testKubelet.fakeClock // Do not set nano second, because apiserver function doesn't support nano second. (Only support // RFC3339). clock.SetTime(time.Unix(123456, 0)) kubeClient := testKubelet.fakeKubeClient existingNode := v1.Node{ ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}, Spec: v1.NodeSpec{}, Status: v1.NodeStatus{ Conditions: []v1.NodeCondition{ { Type: v1.NodeReady, Status: v1.ConditionTrue, Reason: "KubeletReady", Message: fmt.Sprintf("kubelet is posting ready status"), LastHeartbeatTime: metav1.NewTime(clock.Now()), LastTransitionTime: metav1.NewTime(clock.Now()), }, { Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue, Reason: "KubeletOutOfDisk", Message: "out of disk space", LastHeartbeatTime: metav1.NewTime(clock.Now()), LastTransitionTime: metav1.NewTime(clock.Now()), }, }, }, } kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain mockCadvisor := testKubelet.fakeCadvisor machineInfo := &cadvisorapi.MachineInfo{ MachineID: "123", SystemUUID: "abc", BootID: "1b3", NumCores: 2, MemoryCapacity: 1024, } fsInfo := cadvisorapiv2.FsInfo{ Device: "123", } mockCadvisor.On("Start").Return(nil) mockCadvisor.On("MachineInfo").Return(machineInfo, nil) mockCadvisor.On("ImagesFsInfo").Return(fsInfo, nil) mockCadvisor.On("RootFsInfo").Return(fsInfo, nil) versionInfo := &cadvisorapi.VersionInfo{ KernelVersion: "3.16.0-0.bpo.4-amd64", ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)", DockerVersion: "1.5.0", } mockCadvisor.On("VersionInfo").Return(versionInfo, nil) kubelet.outOfDiskTransitionFrequency = 5 * time.Second ood := v1.NodeCondition{ Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue, Reason: "KubeletOutOfDisk", Message: "out of disk space", LastHeartbeatTime: metav1.NewTime(clock.Now()), // placeholder LastTransitionTime: metav1.NewTime(clock.Now()), // placeholder } noOod := v1.NodeCondition{ Type: v1.NodeOutOfDisk, Status: v1.ConditionFalse, Reason: "KubeletHasSufficientDisk", Message: fmt.Sprintf("kubelet has sufficient disk space available"), LastHeartbeatTime: metav1.NewTime(clock.Now()), // placeholder LastTransitionTime: metav1.NewTime(clock.Now()), // placeholder } testCases := []struct { rootFsAvail uint64 dockerFsAvail uint64 expected v1.NodeCondition }{ { // NodeOutOfDisk==false rootFsAvail: 200, dockerFsAvail: 200, expected: ood, }, { // NodeOutOfDisk==true rootFsAvail: 50, dockerFsAvail: 200, expected: ood, }, { // NodeOutOfDisk==false rootFsAvail: 200, dockerFsAvail: 200, expected: ood, }, { // NodeOutOfDisk==true rootFsAvail: 200, dockerFsAvail: 50, expected: ood, }, { // NodeOutOfDisk==false rootFsAvail: 200, dockerFsAvail: 200, expected: noOod, }, } kubelet.updateRuntimeUp() for tcIdx, tc := range testCases { // Step by a second clock.Step(1 * time.Second) // Setup expected times. tc.expected.LastHeartbeatTime = metav1.NewTime(clock.Now()) // In the last case, there should be a status transition for NodeOutOfDisk if tcIdx == len(testCases)-1 { tc.expected.LastTransitionTime = metav1.NewTime(clock.Now()) } // Make kubelet report that it has sufficient disk space if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, tc.rootFsAvail, tc.dockerFsAvail, 100, 100); err != nil { t.Fatalf("can't update disk space manager: %v", err) } if err := kubelet.updateNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } actions := kubeClient.Actions() if len(actions) != 2 { t.Errorf("%d. unexpected actions: %v", tcIdx, actions) } patchAction, ok := actions[1].(core.PatchActionImpl) if !ok { t.Errorf("%d. unexpected action type. expected PatchActionImpl, got %#v", tcIdx, actions[1]) } updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch()) if err != nil { t.Fatalf("can't apply node status patch: %v", err) } kubeClient.ClearActions() var oodCondition v1.NodeCondition for i, cond := range updatedNode.Status.Conditions { if cond.Type == v1.NodeOutOfDisk { oodCondition = updatedNode.Status.Conditions[i] } } if !reflect.DeepEqual(tc.expected, oodCondition) { t.Errorf("%d.\nunexpected objects: %s", tcIdx, diff.ObjectDiff(tc.expected, oodCondition)) } } }
// Set OODcondition for the node. func (kl *Kubelet) setNodeOODCondition(node *v1.Node) { currentTime := metav1.NewTime(kl.clock.Now()) var nodeOODCondition *v1.NodeCondition // Check if NodeOutOfDisk condition already exists and if it does, just pick it up for update. for i := range node.Status.Conditions { if node.Status.Conditions[i].Type == v1.NodeOutOfDisk { nodeOODCondition = &node.Status.Conditions[i] } } newOODCondition := false // If the NodeOutOfDisk condition doesn't exist, create one. if nodeOODCondition == nil { nodeOODCondition = &v1.NodeCondition{ Type: v1.NodeOutOfDisk, Status: v1.ConditionUnknown, } // nodeOODCondition cannot be appended to node.Status.Conditions here because it gets // copied to the slice. So if we append nodeOODCondition to the slice here none of the // updates we make to nodeOODCondition below are reflected in the slice. newOODCondition = true } // Update the heartbeat time irrespective of all the conditions. nodeOODCondition.LastHeartbeatTime = currentTime // Note: The conditions below take care of the case when a new NodeOutOfDisk condition is // created and as well as the case when the condition already exists. When a new condition // is created its status is set to v1.ConditionUnknown which matches either // nodeOODCondition.Status != v1.ConditionTrue or // nodeOODCondition.Status != v1.ConditionFalse in the conditions below depending on whether // the kubelet is out of disk or not. if kl.isOutOfDisk() { if nodeOODCondition.Status != v1.ConditionTrue { nodeOODCondition.Status = v1.ConditionTrue nodeOODCondition.Reason = "KubeletOutOfDisk" nodeOODCondition.Message = "out of disk space" nodeOODCondition.LastTransitionTime = currentTime kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeOutOfDisk") } } else { if nodeOODCondition.Status != v1.ConditionFalse { // Update the out of disk condition when the condition status is unknown even if we // are within the outOfDiskTransitionFrequency duration. We do this to set the // condition status correctly at kubelet startup. if nodeOODCondition.Status == v1.ConditionUnknown || kl.clock.Since(nodeOODCondition.LastTransitionTime.Time) >= kl.outOfDiskTransitionFrequency { nodeOODCondition.Status = v1.ConditionFalse nodeOODCondition.Reason = "KubeletHasSufficientDisk" nodeOODCondition.Message = "kubelet has sufficient disk space available" nodeOODCondition.LastTransitionTime = currentTime kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasSufficientDisk") } else { glog.Infof("Node condition status for OutOfDisk is false, but last transition time is less than %s", kl.outOfDiskTransitionFrequency) } } } if newOODCondition { node.Status.Conditions = append(node.Status.Conditions, *nodeOODCondition) } }
// initialNode constructs the initial v1.Node for this Kubelet, incorporating node // labels, information from the cloud provider, and Kubelet configuration. func (kl *Kubelet) initialNode() (*v1.Node, error) { node := &v1.Node{ ObjectMeta: v1.ObjectMeta{ Name: string(kl.nodeName), Labels: map[string]string{ metav1.LabelHostname: kl.hostname, metav1.LabelOS: goruntime.GOOS, metav1.LabelArch: goruntime.GOARCH, }, }, Spec: v1.NodeSpec{ Unschedulable: !kl.registerSchedulable, }, } if len(kl.kubeletConfiguration.RegisterWithTaints) > 0 { annotations := make(map[string]string) b, err := json.Marshal(kl.kubeletConfiguration.RegisterWithTaints) if err != nil { return nil, err } annotations[v1.TaintsAnnotationKey] = string(b) node.ObjectMeta.Annotations = annotations } // Initially, set NodeNetworkUnavailable to true. if kl.providerRequiresNetworkingConfiguration() { node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{ Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue, Reason: "NoRouteCreated", Message: "Node created without a route", LastTransitionTime: metav1.NewTime(kl.clock.Now()), }) } if kl.enableControllerAttachDetach { if node.Annotations == nil { node.Annotations = make(map[string]string) } glog.Infof("Setting node annotation to enable volume controller attach/detach") node.Annotations[volumehelper.ControllerManagedAttachAnnotation] = "true" } else { glog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes") } // @question: should this be place after the call to the cloud provider? which also applies labels for k, v := range kl.nodeLabels { if cv, found := node.ObjectMeta.Labels[k]; found { glog.Warningf("the node label %s=%s will overwrite default setting %s", k, v, cv) } node.ObjectMeta.Labels[k] = v } if kl.cloud != nil { instances, ok := kl.cloud.Instances() if !ok { return nil, fmt.Errorf("failed to get instances from cloud provider") } // TODO(roberthbailey): Can we do this without having credentials to talk // to the cloud provider? // TODO: ExternalID is deprecated, we'll have to drop this code externalID, err := instances.ExternalID(kl.nodeName) if err != nil { return nil, fmt.Errorf("failed to get external ID from cloud provider: %v", err) } node.Spec.ExternalID = externalID // TODO: We can't assume that the node has credentials to talk to the // cloudprovider from arbitrary nodes. At most, we should talk to a // local metadata server here. node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(kl.cloud, kl.nodeName) if err != nil { return nil, err } instanceType, err := instances.InstanceType(kl.nodeName) if err != nil { return nil, err } if instanceType != "" { glog.Infof("Adding node label from cloud provider: %s=%s", metav1.LabelInstanceType, instanceType) node.ObjectMeta.Labels[metav1.LabelInstanceType] = instanceType } // If the cloud has zone information, label the node with the zone information zones, ok := kl.cloud.Zones() if ok { zone, err := zones.GetZone() if err != nil { return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err) } if zone.FailureDomain != "" { glog.Infof("Adding node label from cloud provider: %s=%s", metav1.LabelZoneFailureDomain, zone.FailureDomain) node.ObjectMeta.Labels[metav1.LabelZoneFailureDomain] = zone.FailureDomain } if zone.Region != "" { glog.Infof("Adding node label from cloud provider: %s=%s", metav1.LabelZoneRegion, zone.Region) node.ObjectMeta.Labels[metav1.LabelZoneRegion] = zone.Region } } } else { node.Spec.ExternalID = kl.hostname if kl.autoDetectCloudProvider { // If no cloud provider is defined - use the one detected by cadvisor info, err := kl.GetCachedMachineInfo() if err == nil { kl.updateCloudProviderFromMachineInfo(node, info) } } } if err := kl.setNodeStatus(node); err != nil { return nil, err } return node, nil }
// Set Ready condition for the node. func (kl *Kubelet) setNodeReadyCondition(node *v1.Node) { // NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions. // This is due to an issue with version skewed kubelet and master components. // ref: https://github.com/kubernetes/kubernetes/issues/16961 currentTime := metav1.NewTime(kl.clock.Now()) var newNodeReadyCondition v1.NodeCondition rs := append(kl.runtimeState.runtimeErrors(), kl.runtimeState.networkErrors()...) if len(rs) == 0 { newNodeReadyCondition = v1.NodeCondition{ Type: v1.NodeReady, Status: v1.ConditionTrue, Reason: "KubeletReady", Message: "kubelet is posting ready status", LastHeartbeatTime: currentTime, } } else { newNodeReadyCondition = v1.NodeCondition{ Type: v1.NodeReady, Status: v1.ConditionFalse, Reason: "KubeletNotReady", Message: strings.Join(rs, ","), LastHeartbeatTime: currentTime, } } // Append AppArmor status if it's enabled. // TODO(timstclair): This is a temporary message until node feature reporting is added. if newNodeReadyCondition.Status == v1.ConditionTrue && kl.appArmorValidator != nil && kl.appArmorValidator.ValidateHost() == nil { newNodeReadyCondition.Message = fmt.Sprintf("%s. AppArmor enabled", newNodeReadyCondition.Message) } // Record any soft requirements that were not met in the container manager. status := kl.containerManager.Status() if status.SoftRequirements != nil { newNodeReadyCondition.Message = fmt.Sprintf("%s. WARNING: %s", newNodeReadyCondition.Message, status.SoftRequirements.Error()) } readyConditionUpdated := false needToRecordEvent := false for i := range node.Status.Conditions { if node.Status.Conditions[i].Type == v1.NodeReady { if node.Status.Conditions[i].Status == newNodeReadyCondition.Status { newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime } else { newNodeReadyCondition.LastTransitionTime = currentTime needToRecordEvent = true } node.Status.Conditions[i] = newNodeReadyCondition readyConditionUpdated = true break } } if !readyConditionUpdated { newNodeReadyCondition.LastTransitionTime = currentTime node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition) } if needToRecordEvent { if newNodeReadyCondition.Status == v1.ConditionTrue { kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeReady) } else { kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotReady) glog.Infof("Node became not ready: %+v", newNodeReadyCondition) } } }